From 6818f25c51612ce141005a834feef47e87e5c416 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Sun, 8 Mar 2026 16:00:26 -0500 Subject: [PATCH 01/15] feat(influxdb3): rename API specs and add download links Rename Core and Enterprise OpenAPI spec files from ref.yml to descriptive names (influxdb3-core-openapi.yaml, influxdb3-enterprise-openapi.yaml). Copy specs to static/openapi/ and add download links in the API description. --- api-docs/generate-api-docs.sh | 4 +- api-docs/influxdb3/core/.config.yml | 2 +- api-docs/influxdb3/core/v3/content/info.yml | 5 +- .../v3/influxdb3-core-openapi.yaml} | 3699 ++++++++-------- api-docs/influxdb3/enterprise/.config.yml | 2 +- .../influxdb3/enterprise/v3/content/info.yml | 5 +- .../v3/influxdb3-enterprise-openapi.yaml | 3799 +++++++++++++++++ .../openapi/influxdb3-core-openapi.yaml | 3697 +++++++++------- .../openapi/influxdb3-enterprise-openapi.yaml | 3799 +++++++++++++++++ 9 files changed, 11673 insertions(+), 3339 deletions(-) rename api-docs/influxdb3/{enterprise/v3/ref.yml => core/v3/influxdb3-core-openapi.yaml} (77%) create mode 100644 api-docs/influxdb3/enterprise/v3/influxdb3-enterprise-openapi.yaml rename api-docs/influxdb3/core/v3/ref.yml => static/openapi/influxdb3-core-openapi.yaml (72%) create mode 100644 static/openapi/influxdb3-enterprise-openapi.yaml diff --git a/api-docs/generate-api-docs.sh b/api-docs/generate-api-docs.sh index 1e7820a7e7..f603bb2af1 100755 --- a/api-docs/generate-api-docs.sh +++ b/api-docs/generate-api-docs.sh @@ -70,7 +70,7 @@ function generateHtml { local specbundle=redoc-static_index.html # Define the temporary file for the Hugo template and Redoc HTML. local tmpfile="${productVersion}-${api}_index.tmp" - + echo "Bundling $specPath" # Use npx to install and run the specified version of redoc-cli. @@ -83,9 +83,9 @@ function generateHtml { --title="$title" \ --options.sortPropsAlphabetically \ --options.menuToggle \ - --options.hideDownloadButton \ --options.hideHostname \ --options.noAutoAuth \ + --options.hideDownloadButton \ --output=$specbundle \ --templateOptions.description="$shortDescription" \ --templateOptions.product="$productVersion" \ diff --git a/api-docs/influxdb3/core/.config.yml b/api-docs/influxdb3/core/.config.yml index 14792e219a..d492b29edd 100644 --- a/api-docs/influxdb3/core/.config.yml +++ b/api-docs/influxdb3/core/.config.yml @@ -7,7 +7,7 @@ x-influxdata-product-name: InfluxDB 3 Core apis: v3@3: - root: v3/ref.yml + root: v3/influxdb3-core-openapi.yaml x-influxdata-docs-aliases: - /influxdb3/core/api/ - /influxdb3/core/api/v1/ diff --git a/api-docs/influxdb3/core/v3/content/info.yml b/api-docs/influxdb3/core/v3/content/info.yml index 34e55186eb..107c08b130 100644 --- a/api-docs/influxdb3/core/v3/content/info.yml +++ b/api-docs/influxdb3/core/v3/content/info.yml @@ -21,10 +21,7 @@ description: | - `/`: Compatibility endpoints for InfluxDB v1 workloads and clients - `/api/v2/write`: Compatibility endpoint for InfluxDB v2 workloads and clients - + [Download the OpenAPI specification](/openapi/influxdb3-core-openapi.yaml) license: name: MIT url: 'https://opensource.org/licenses/MIT' diff --git a/api-docs/influxdb3/enterprise/v3/ref.yml b/api-docs/influxdb3/core/v3/influxdb3-core-openapi.yaml similarity index 77% rename from api-docs/influxdb3/enterprise/v3/ref.yml rename to api-docs/influxdb3/core/v3/influxdb3-core-openapi.yaml index 8a813ac3e8..f413341474 100644 --- a/api-docs/influxdb3/enterprise/v3/ref.yml +++ b/api-docs/influxdb3/core/v3/influxdb3-core-openapi.yaml @@ -1,27 +1,24 @@ openapi: 3.0.3 info: - title: InfluxDB 3 Enterprise API Service + title: InfluxDB 3 Core API Service description: | - The InfluxDB HTTP API for InfluxDB 3 Enterprise provides a programmatic interface for - interacting with InfluxDB 3 Enterprise databases and resources. + The InfluxDB HTTP API for InfluxDB 3 Core provides a programmatic interface for + interacting with InfluxDB 3 Core databases and resources. Use this API to: - - Write data to InfluxDB 3 Enterprise databases + - Write data to InfluxDB 3 Core databases - Query data using SQL or InfluxQL - Process data using Processing engine plugins - Manage databases, tables, and Processing engine triggers - Perform administrative tasks and access system information The API includes endpoints under the following paths: - - `/api/v3`: InfluxDB 3 Enterprise native endpoints + - `/api/v3`: InfluxDB 3 Core native endpoints - `/`: Compatibility endpoints for InfluxDB v1 workloads and clients - `/api/v2/write`: Compatibility endpoint for InfluxDB v2 workloads and clients - - version: '3.7.0' + [Download the OpenAPI specification](/openapi/influxdb3-core-openapi.yaml) + version: v3.8.0 license: name: MIT url: https://opensource.org/licenses/MIT @@ -29,15 +26,16 @@ info: name: InfluxData url: https://www.influxdata.com email: support@influxdata.com + x-source-hash: sha256:1259b96096eab6c8dbf3f76c974924f124e9b3e08eedc6b0c9a66d3108857c52 servers: - url: https://{baseurl} - description: InfluxDB 3 Enterprise API URL + description: InfluxDB 3 Core API URL variables: baseurl: enum: - localhost:8181 default: localhost:8181 - description: InfluxDB 3 Enterprise URL + description: InfluxDB 3 Core URL security: - BearerAuthentication: [] - TokenAuthentication: [] @@ -56,8 +54,13 @@ tags: | [Querystring authentication](#section/Authentication/QuerystringAuthentication) | v1 endpoints | x-traitTag: true + x-related: + - title: Authenticate v1 API requests + href: /influxdb3/core/guides/api-compatibility/v1/ + - title: Manage tokens + href: /influxdb3/core/admin/tokens/ - name: Cache data - description: | + description: |- Manage the in-memory cache. #### Distinct Value Cache @@ -84,76 +87,126 @@ tags: what fields to cache, what tags to use to identify each series, and the number of values to cache for each unique series. An LVC is associated with a table, which can have multiple LVCs. - - #### Related guides - - - [Manage the Distinct Value Cache](/influxdb3/enterprise/admin/distinct-value-cache/) - - [Manage the Last Value Cache](/influxdb3/enterprise/admin/last-value-cache/) + x-related: + - title: Manage the Distinct Value Cache + href: /influxdb3/core/admin/distinct-value-cache/ + - title: Manage the Last Value Cache + href: /influxdb3/core/admin/last-value-cache/ - name: Compatibility endpoints - description: | + description: > InfluxDB 3 provides compatibility endpoints for InfluxDB 1.x and InfluxDB 2.x workloads and clients. + ### Write data using v1- or v2-compatible endpoints + - [`/api/v2/write` endpoint](#operation/PostV2Write) for InfluxDB v2 clients and when you bring existing InfluxDB v2 write workloads to InfluxDB 3. - - [`/write` endpoint](#operation/PostV1Write) for InfluxDB v1 clients and when you bring existing InfluxDB v1 write workloads to InfluxDB 3. + - [`/write` endpoint](#operation/PostV1Write) for InfluxDB v1 clients and when you bring existing InfluxDB v1 + write workloads to InfluxDB 3. + For new workloads, use the [`/api/v3/write_lp` endpoint](#operation/PostWriteLP). + All endpoints accept the same line protocol format. + ### Query data - Use the HTTP [`/query`](#operation/GetV1ExecuteQuery) endpoint for InfluxDB v1 clients and v1 query workloads using InfluxQL. + + Use the HTTP [`/query`](#operation/GetV1ExecuteQuery) endpoint for InfluxDB v1 clients and v1 query workloads + using InfluxQL. + For new workloads, use one of the following: + - HTTP [`/api/v3/query_sql` endpoint](#operation/GetExecuteQuerySQL) for new query workloads using SQL. - - HTTP [`/api/v3/query_influxql` endpoint](#operation/GetExecuteInfluxQLQuery) for new query workloads using InfluxQL. - - Flight SQL and InfluxDB 3 _Flight+gRPC_ APIs for querying with SQL or InfluxQL. For more information about using Flight APIs, see [InfluxDB 3 client libraries](https://github.com/InfluxCommunity?q=influxdb3&type=public&language=&sort=). + + - HTTP [`/api/v3/query_influxql` endpoint](#operation/GetExecuteInfluxQLQuery) for new query workloads using + InfluxQL. + + - Flight SQL and InfluxDB 3 _Flight+gRPC_ APIs for querying with SQL or InfluxQL. For more information about using + Flight APIs, see [InfluxDB 3 client + libraries](https://github.com/InfluxCommunity?q=influxdb3&type=public&language=&sort=). + ### Server information - Server information endpoints such as `/health` and `metrics` are compatible with InfluxDB 1.x and InfluxDB 2.x clients. + + Server information endpoints such as `/health` and `metrics` are compatible with InfluxDB 1.x and InfluxDB 2.x + clients. + x-related: + - title: Use compatibility APIs to write data + href: /influxdb3/core/write-data/http-api/compatibility-apis/ - name: Database description: Manage databases - - description: | + - description: > Most InfluxDB API endpoints require parameters in the request--for example, specifying the database to use. + ### Common parameters + The following table shows common parameters used by many InfluxDB API endpoints. + Many endpoints may require other parameters in the query string or in the + request body that perform functions specific to those endpoints. + | Query parameter | Value type | Description | + |:------------------------ |:--------------------- |:-------------------------------------------| + | `db` | string | The database name | + InfluxDB HTTP API endpoints use standard HTTP request and response headers. + The following table shows common headers used by many InfluxDB API endpoints. + Some endpoints may use other headers that perform functions more specific to those endpoints--for example, - the write endpoints accept the `Content-Encoding` header to indicate that line protocol is compressed in the request body. + + the write endpoints accept the `Content-Encoding` header to indicate that line protocol is compressed in the + request body. + | Header | Value type | Description | + |:------------------------ |:--------------------- |:-------------------------------------------| + | `Accept` | string | The content type that the client can understand. | + | `Authorization` | string | The authorization scheme and credential. | + | `Content-Length` | integer | The size of the entity-body, in bytes. | + | `Content-Type` | string | The format of the data in the request body. | name: Headers and parameters x-traitTag: true - name: Processing engine - description: | + description: > Manage Processing engine triggers, test plugins, and send requests to trigger On Request plugins. - InfluxDB 3 Enterprise provides the InfluxDB 3 processing engine, an embedded Python VM that can dynamically load and trigger Python plugins in response to events in your database. + + InfluxDB 3 Core provides the InfluxDB 3 processing engine, an embedded Python VM that can dynamically load and + trigger Python plugins in response to events in your database. + Use Processing engine plugins and triggers to run code and perform tasks for different database events. - To get started with the processing engine, see the [Processing engine and Python plugins](/influxdb3/enterprise/processing-engine/) guide. + + To get started with the processing engine, see the [Processing engine and Python + plugins](/influxdb3/core/processing-engine/) guide. + x-related: + - title: Processing engine and Python plugins + href: /influxdb3/core/plugins/ - name: Query data description: Query data using SQL or InfluxQL + x-related: + - title: Use the InfluxDB v1 HTTP query API and InfluxQL to query data + href: /influxdb3/core/query-data/execute-queries/influxdb-v1-api/ - name: Quick start description: | 1. [Create an admin token](#section/Authentication) to authorize API requests. @@ -195,7 +248,7 @@ tags: {"room":"Living room","temp":71.5,"time":"2025-02-25T20:19:34.984098"} ``` - For more information about using InfluxDB 3 Enterprise, see the [Get started](/influxdb3/enterprise/get-started/) guide. + For more information about using InfluxDB 3 Core, see the [Get started](/influxdb3/core/get-started/) guide. x-traitTag: true - name: Server information description: Retrieve server metrics, status, and version information @@ -219,157 +272,79 @@ tags: | **Milliseconds** | ✅ `ms` | ✅ `ms` | ✅ `millisecond` | | **Microseconds** | ✅ `u` or `µ` | ✅ `us` | ✅ `microsecond` | | **Nanoseconds** | ✅ `ns` | ✅ `ns` | ✅ `nanosecond` | - | **Minutes** | ✅ `m` | ❌ No | ❌ No | - | **Hours** | ✅ `h` | ❌ No | ❌ No | | **Default** | Nanosecond | Nanosecond | **Auto** (guessed) | All timestamps are stored internally as nanoseconds. paths: - /write: - post: - operationId: PostV1Write - summary: Write line protocol (v1-compatible) + /api/v1/health: + get: + operationId: GetHealthV1 + summary: Health check (v1) description: | - Writes line protocol to the specified database. - - This endpoint provides backward compatibility for InfluxDB 1.x write workloads using tools such as InfluxDB 1.x client libraries, the Telegraf `outputs.influxdb` output plugin, or third-party tools. - - Use this endpoint to send data in [line protocol](https://docs.influxdata.com/influxdb3/enterprise/reference/syntax/line-protocol/) format to InfluxDB. - Use query parameters to specify options for writing data. + Checks the status of the service. - #### Related + Returns `OK` if the service is running. This endpoint does not return version information. + Use the [`/ping`](#operation/GetPing) endpoint to retrieve version details. - - [Use compatibility APIs to write data](/influxdb3/enterprise/write-data/http-api/compatibility-apis/) - parameters: - - $ref: '#/components/parameters/dbWriteParam' - - $ref: '#/components/parameters/compatibilityPrecisionParam' - - $ref: '#/components/parameters/v1UsernameParam' - - $ref: '#/components/parameters/v1PasswordParam' - - name: rp - in: query - required: false - schema: - type: string - description: | - Retention policy name. Honored but discouraged. InfluxDB 3 doesn't use retention policies. - - name: consistency - in: query - required: false - schema: - type: string - description: | - Write consistency level. Ignored by InfluxDB 3. Provided for compatibility with InfluxDB 1.x clients. - - name: Authorization - in: header - required: false - schema: - type: string - description: | - Authorization header for token-based authentication. - Supported schemes: - - `Bearer AUTH_TOKEN` - OAuth bearer token scheme - - `Token AUTH_TOKEN` - InfluxDB v2 token scheme - - `Basic ` - Basic authentication (username is ignored) - - name: Content-Type - in: header - description: | - The content type of the request payload. - schema: - $ref: '#/components/schemas/LineProtocol' - required: false - - name: Accept - in: header - description: | - The content type that the client can understand. - Writes only return a response body if they fail (partially or completely)--for example, - due to a syntax problem or type mismatch. - schema: - type: string - default: application/json - enum: - - application/json - required: false - - $ref: '#/components/parameters/ContentEncoding' - - $ref: '#/components/parameters/ContentLength' - requestBody: - $ref: '#/components/requestBodies/lineProtocolRequestBody' + > **Note**: This endpoint requires authentication by default in InfluxDB 3 Core. responses: - '204': - description: Success ("No Content"). All data in the batch is written and queryable. - headers: - cluster-uuid: - $ref: '#/components/headers/ClusterUUID' - '400': - description: | - Bad request. Some (a _partial write_) or all of the data from the batch was rejected and not written. - If a partial write occurred, then some points from the batch are written and queryable. - - The response body: - - indicates if a partial write occurred or all data was rejected. - - contains details about the [rejected points](/influxdb3/enterprise/write-data/troubleshoot/#troubleshoot-rejected-points), up to 100 points. + "200": + description: Service is running. Returns `OK`. content: - application/json: - examples: - rejectedAllPoints: - summary: Rejected all points in the batch - value: | - { - "error": "write of line protocol failed", - "data": [ - { - "original_line": "dquote> home,room=Kitchen temp=hi", - "line_number": 2, - "error_message": "No fields were provided" - } - ] - } - partialWriteErrorWithRejectedPoints: - summary: Partial write rejected some points in the batch - value: | - { - "error": "partial write of line protocol occurred", - "data": [ - { - "original_line": "dquote> home,room=Kitchen temp=hi", - "line_number": 2, - "error_message": "No fields were provided" - } - ] - } - '401': - $ref: '#/components/responses/Unauthorized' - '403': - description: Access denied. - '413': - description: Request entity too large. + text/plain: + schema: + type: string + example: OK + "401": + description: Unauthorized. Authentication is required. + "500": + description: Service is unavailable. tags: + - Server information - Compatibility endpoints - - Write data - x-influxdata-guides: - - title: Use compatibility APIs to write data - href: /influxdb3/enterprise/write-data/http-api/compatibility-apis/ /api/v2/write: post: operationId: PostV2Write + responses: + "204": + description: Success ("No Content"). All data in the batch is written and queryable. + headers: + cluster-uuid: + $ref: "#/components/headers/ClusterUUID" + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "413": + description: Request entity too large. summary: Write line protocol (v2-compatible) - description: | + description: > Writes line protocol to the specified database. - This endpoint provides backward compatibility for InfluxDB 2.x write workloads using tools such as InfluxDB 2.x client libraries, the Telegraf `outputs.influxdb_v2` output plugin, or third-party tools. - Use this endpoint to send data in [line protocol](/influxdb3/enterprise/reference/syntax/line-protocol/) format to InfluxDB. + This endpoint provides backward compatibility for InfluxDB 2.x write workloads using tools such as InfluxDB 2.x + client libraries, the Telegraf `outputs.influxdb_v2` output plugin, or third-party tools. + + + Use this endpoint to send data in [line protocol](/influxdb3/core/reference/syntax/line-protocol/) format to + InfluxDB. + Use query parameters to specify options for writing data. + #### Related - - [Use compatibility APIs to write data](/influxdb3/enterprise/write-data/http-api/compatibility-apis/) + + - [Use compatibility APIs to write data](/influxdb3/core/write-data/http-api/compatibility-apis/) parameters: - name: Content-Type in: header description: | The content type of the request payload. schema: - $ref: '#/components/schemas/LineProtocol' + $ref: "#/components/schemas/LineProtocol" required: false - description: | The compression applied to the line protocol in the request payload. @@ -404,848 +379,618 @@ paths: enum: - application/json type: string - - name: db + - name: bucket in: query required: true schema: type: string - description: | + description: |- A database name. InfluxDB creates the database if it doesn't already exist, and then writes all points in the batch to the database. + + This parameter is named `bucket` for compatibility with InfluxDB v2 client libraries. - name: accept_partial in: query required: false schema: - $ref: '#/components/schemas/AcceptPartial' - - $ref: '#/components/parameters/compatibilityPrecisionParam' + $ref: "#/components/schemas/AcceptPartial" + - $ref: "#/components/parameters/compatibilityPrecisionParam" requestBody: - $ref: '#/components/requestBodies/lineProtocolRequestBody' - responses: - '204': - description: Success ("No Content"). All data in the batch is written and queryable. - headers: - cluster-uuid: - $ref: '#/components/headers/ClusterUUID' - '400': - description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '403': - description: Access denied. - '413': - description: Request entity too large. + $ref: "#/components/requestBodies/lineProtocolRequestBody" tags: - Compatibility endpoints - Write data - x-influxdata-guides: - - title: Use compatibility APIs to write data - href: /influxdb3/enterprise/write-data/http-api/compatibility-apis/ - /api/v3/write_lp: - post: - operationId: PostWriteLP - summary: Write line protocol - description: | - Writes line protocol to the specified database. - - This is the native InfluxDB 3 Enterprise write endpoint that provides enhanced control - over write behavior with advanced parameters for high-performance and fault-tolerant operations. - - Use this endpoint to send data in [line protocol](/influxdb3/enterprise/reference/syntax/line-protocol/) format to InfluxDB. - Use query parameters to specify options for writing data. - - #### Features - - - **Partial writes**: Use `accept_partial=true` to allow partial success when some lines in a batch fail - - **Asynchronous writes**: Use `no_sync=true` to skip waiting for WAL synchronization, allowing faster response times but sacrificing durability guarantees - - **Flexible precision**: Automatic timestamp precision detection with `precision=auto` (default) - - #### Auto precision detection - - When you use `precision=auto` or omit the precision parameter, InfluxDB 3 automatically detects - the timestamp precision based on the magnitude of the timestamp value: - - - Timestamps < 5e9 → Second precision (multiplied by 1,000,000,000 to convert to nanoseconds) - - Timestamps < 5e12 → Millisecond precision (multiplied by 1,000,000) - - Timestamps < 5e15 → Microsecond precision (multiplied by 1,000) - - Larger timestamps → Nanosecond precision (no conversion needed) - - #### Related - - - [Use the InfluxDB v3 write_lp API to write data](/influxdb3/enterprise/write-data/http-api/v3-write-lp/) + /api/v3/configure/database: + delete: + operationId: DeleteConfigureDatabase parameters: - - $ref: '#/components/parameters/dbWriteParam' - - $ref: '#/components/parameters/accept_partial' - - $ref: '#/components/parameters/precisionParam' - - name: no_sync + - $ref: "#/components/parameters/db" + - name: data_only in: query + required: false schema: - $ref: '#/components/schemas/NoSync' - - name: Content-Type - in: header + type: boolean + default: false description: | - The content type of the request payload. - schema: - $ref: '#/components/schemas/LineProtocol' + Delete only data while preserving the database schema and all associated resources + (tokens, triggers, last value caches, distinct value caches, processing engine configurations). + When `false` (default), the entire database is deleted. + - name: remove_tables + in: query required: false - - name: Accept - in: header + schema: + type: boolean + default: false description: | - The content type that the client can understand. - Writes only return a response body if they fail (partially or completely)--for example, - due to a syntax problem or type mismatch. + Used with `data_only=true` to remove table resources (caches) while preserving + database-level resources (tokens, triggers, processing engine configurations). + Has no effect when `data_only=false`. + - name: hard_delete_at + in: query + required: false schema: type: string - default: application/json - enum: - - application/json - required: false - - $ref: '#/components/parameters/ContentEncoding' - - $ref: '#/components/parameters/ContentLength' - requestBody: - $ref: '#/components/requestBodies/lineProtocolRequestBody' + format: date-time + description: |- + Schedule the database for hard deletion at the specified time. + If not provided, the database will be soft deleted. + Use ISO 8601 date-time format (for example, "2025-12-31T23:59:59Z"). + + #### Deleting a database cannot be undone + + Deleting a database is a destructive action. + Once a database is deleted, data stored in that database cannot be recovered. + + + Also accepts special string values: + - `now` — hard delete immediately + - `never` — soft delete only (default behavior) + - `default` — use the system default hard deletion time responses: - '204': - description: Success ("No Content"). All data in the batch is written and queryable. - headers: - cluster-uuid: - $ref: '#/components/headers/ClusterUUID' - '400': - description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '403': - description: Access denied. - '413': - description: Request entity too large. - '422': - description: Unprocessable entity. - x-codeSamples: - - label: cURL - Basic write - lang: Shell - source: | - curl --request POST "http://localhost:8181/api/v3/write_lp?db=sensors" \ - --header "Authorization: Bearer DATABASE_TOKEN" \ - --header "Content-Type: text/plain" \ - --data-raw "cpu,host=server01 usage=85.2 1638360000000000000" - - label: cURL - Write with millisecond precision - lang: Shell - source: | - curl --request POST "http://localhost:8181/api/v3/write_lp?db=sensors&precision=ms" \ - --header "Authorization: Bearer DATABASE_TOKEN" \ - --header "Content-Type: text/plain" \ - --data-raw "cpu,host=server01 usage=85.2 1638360000000" - - label: cURL - Asynchronous write with partial acceptance - lang: Shell - source: | - curl --request POST "http://localhost:8181/api/v3/write_lp?db=sensors&accept_partial=true&no_sync=true&precision=auto" \ - --header "Authorization: Bearer DATABASE_TOKEN" \ - --header "Content-Type: text/plain" \ - --data-raw "cpu,host=server01 usage=85.2 - memory,host=server01 used=4096" - - label: cURL - Multiple measurements with tags - lang: Shell - source: | - curl --request POST "http://localhost:8181/api/v3/write_lp?db=sensors&precision=ns" \ - --header "Authorization: Bearer DATABASE_TOKEN" \ - --header "Content-Type: text/plain" \ - --data-raw "cpu,host=server01,region=us-west usage=85.2,load=0.75 1638360000000000000 - memory,host=server01,region=us-west used=4096,free=12288 1638360000000000000 - disk,host=server01,region=us-west,device=/dev/sda1 used=50.5,free=49.5 1638360000000000000" + "200": + description: Success. Database deleted. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Database not found. + summary: Delete a database + description: | + Soft deletes a database. + The database is scheduled for deletion and unavailable for querying. + Use the `hard_delete_at` parameter to schedule a hard deletion. + Use the `data_only` parameter to delete data while preserving the database schema and resources. tags: - - Write data - /api/v3/query_sql: + - Database get: - operationId: GetExecuteQuerySQL - summary: Execute SQL query - description: Executes an SQL query to retrieve data from the specified database. - parameters: - - $ref: '#/components/parameters/db' - - $ref: '#/components/parameters/querySqlParam' - - $ref: '#/components/parameters/format' - - $ref: '#/components/parameters/AcceptQueryHeader' - - $ref: '#/components/parameters/ContentType' + operationId: GetConfigureDatabase responses: - '200': - description: Success. The response body contains query results. + "200": + description: Success. The response body contains the list of databases. content: application/json: schema: - $ref: '#/components/schemas/QueryResponse' - example: - results: - - series: - - name: mytable - columns: - - time - - value - values: - - - '2024-02-02T12:00:00Z' - - 42 - text/csv: - schema: - type: string - application/vnd.apache.parquet: - schema: - type: string - application/jsonl: - schema: - type: string - '400': + $ref: "#/components/schemas/ShowDatabasesResponse" + "400": description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '403': - description: Access denied. - '404': + "401": + $ref: "#/components/responses/Unauthorized" + "404": description: Database not found. - '405': - description: Method not allowed. - '422': - description: Unprocessable entity. + summary: List databases + description: Retrieves a list of databases. + parameters: + - $ref: "#/components/parameters/formatRequired" + - name: show_deleted + in: query + required: false + schema: + type: boolean + default: false + description: | + Include soft-deleted databases in the response. + By default, only active databases are returned. tags: - - Query data + - Database post: - operationId: PostExecuteQuerySQL - summary: Execute SQL query - description: Executes an SQL query to retrieve data from the specified database. - parameters: - - $ref: '#/components/parameters/AcceptQueryHeader' - - $ref: '#/components/parameters/ContentType' + operationId: PostConfigureDatabase + responses: + "200": + description: Success. Database created. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "409": + description: Database already exists. + summary: Create a database + description: Creates a new database in the system. requestBody: - $ref: '#/components/requestBodies/queryRequestBody' + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateDatabaseRequest" + tags: + - Database + put: + operationId: update_database responses: - '200': - description: Success. The response body contains query results. - content: - application/json: - schema: - $ref: '#/components/schemas/QueryResponse' - text/csv: - schema: - type: string - application/vnd.apache.parquet: - schema: - type: string - application/jsonl: - schema: - type: string - '400': + "200": + description: Success. The database has been updated. + "400": description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '403': - description: Access denied. - '404': + "401": + $ref: "#/components/responses/Unauthorized" + "404": description: Database not found. - '405': - description: Method not allowed. - '422': - description: Unprocessable entity. + summary: Update a database + description: | + Updates database configuration, such as retention period. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/UpdateDatabaseRequest" tags: - - Query data - /api/v3/query_influxql: - get: - operationId: GetExecuteInfluxQLQuery - summary: Execute InfluxQL query - description: Executes an InfluxQL query to retrieve data from the specified database. + - Database + /api/v3/configure/database/retention_period: + delete: + operationId: DeleteDatabaseRetentionPeriod + summary: Remove database retention period + description: | + Removes the retention period from a database, setting it to infinite retention. parameters: - - $ref: '#/components/parameters/dbQueryParam' - - name: q + - $ref: "#/components/parameters/db" + responses: + "204": + description: Success. The database retention period has been removed. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Database not found. + tags: + - Database + /api/v3/configure/distinct_cache: + delete: + operationId: DeleteConfigureDistinctCache + responses: + "200": + description: Success. The distinct cache has been deleted. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Cache not found. + summary: Delete distinct cache + description: Deletes a distinct cache. + parameters: + - $ref: "#/components/parameters/db" + - name: table in: query required: true schema: type: string - - name: format + description: The name of the table containing the distinct cache. + - name: name in: query - required: false + required: true schema: type: string - - $ref: '#/components/parameters/AcceptQueryHeader' - responses: - '200': - description: Success. The response body contains query results. - content: - application/json: - schema: - $ref: '#/components/schemas/QueryResponse' - text/csv: - schema: - type: string - application/vnd.apache.parquet: - schema: - type: string - application/jsonl: - schema: - type: string - '400': - description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '403': - description: Access denied. - '404': - description: Database not found. - '405': - description: Method not allowed. - '422': - description: Unprocessable entity. + description: The name of the distinct cache to delete. tags: - - Query data + - Cache data + - Table post: - operationId: PostExecuteQueryInfluxQL - summary: Execute InfluxQL query - description: Executes an InfluxQL query to retrieve data from the specified database. - parameters: - - $ref: '#/components/parameters/AcceptQueryHeader' - - $ref: '#/components/parameters/ContentType' - requestBody: - $ref: '#/components/requestBodies/queryRequestBody' + operationId: PostConfigureDistinctCache responses: - '200': - description: Success. The response body contains query results. - content: - application/json: - schema: - $ref: '#/components/schemas/QueryResponse' - text/csv: - schema: - type: string - application/vnd.apache.parquet: - schema: - type: string - application/jsonl: - schema: - type: string - '400': - description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '403': - description: Access denied. - '404': - description: Database not found. - '405': - description: Method not allowed. - '422': - description: Unprocessable entity. - tags: - - Query data - /query: - get: - operationId: GetV1ExecuteQuery - summary: Execute InfluxQL query (v1-compatible) - description: | - Executes an InfluxQL query to retrieve data from the specified database. - - This endpoint is compatible with InfluxDB 1.x client libraries and third-party integrations such as Grafana. - Use query parameters to specify the database and the InfluxQL query. + "201": + description: Success. The distinct cache has been created. + "400": + description: > + Bad request. - #### Related - - [Use the InfluxDB v1 HTTP query API and InfluxQL to query data](/influxdb3/enterprise/query-data/execute-queries/influxdb-v1-api/) + The server responds with status `400` if the request would overwrite an existing cache with a different + configuration. + "409": + description: Conflict. A distinct cache with this configuration already exists. + summary: Create distinct cache + description: Creates a distinct cache for a table. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/DistinctCacheCreateRequest" + tags: + - Cache data + - Table + /api/v3/configure/last_cache: + delete: + operationId: DeleteConfigureLastCache + responses: + "200": + description: Success. The last cache has been deleted. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Cache not found. + summary: Delete last cache + description: Deletes a last cache. parameters: - - name: Accept - in: header + - $ref: "#/components/parameters/db" + - name: table + in: query + required: true schema: type: string - default: application/json - enum: - - application/json - - application/csv - - text/csv - required: false - description: | - The content type that the client can understand. - - If `text/csv` is specified, the `Content-type` response header is `application/csv` and the response is formatted as CSV. - - Returns an error if the format is invalid or non-UTF8. - - in: query - name: chunked - description: | - If true, the response is divided into chunks of size `chunk_size`. - schema: - type: boolean - default: false - - in: query - name: chunk_size - description: | - The number of records that will go into a chunk. - This parameter is only used if `chunked=true`. - schema: - type: integer - default: 10000 - - in: query - name: db - description: The database to query. If not provided, the InfluxQL query string must specify the database. - schema: - type: string - format: InfluxQL - - in: query - name: pretty - description: | - If true, the JSON response is formatted in a human-readable format. - schema: - type: boolean - default: false - - in: query - name: q - description: The InfluxQL query string. - required: true - schema: - type: string - - name: epoch - description: | - Formats timestamps as [unix (epoch) timestamps](/influxdb3/enterprise/reference/glossary/#unix-timestamp) with the specified precision - instead of [RFC3339 timestamps](/influxdb3/enterprise/reference/glossary/#rfc3339-timestamp) with nanosecond precision. - in: query - schema: - $ref: '#/components/schemas/EpochCompatibility' - - $ref: '#/components/parameters/v1UsernameParam' - - $ref: '#/components/parameters/v1PasswordParam' - - name: rp + description: The name of the table containing the last cache. + - name: name in: query - required: false - schema: - type: string - description: | - Retention policy name. Honored but discouraged. InfluxDB 3 doesn't use retention policies. - - name: Authorization - in: header - required: false + required: true schema: type: string - description: | - Authorization header for token-based authentication. - Supported schemes: - - `Bearer AUTH_TOKEN` - OAuth bearer token scheme - - `Token AUTH_TOKEN` - InfluxDB v2 token scheme - - `Basic ` - Basic authentication (username is ignored) + description: The name of the last cache to delete. + tags: + - Cache data + - Table + post: + operationId: PostConfigureLastCache responses: - '200': - description: | - Success. The response body contains query results. - content: - application/json: - schema: - $ref: '#/components/schemas/QueryResponse' - application/csv: - schema: - type: string - headers: - Content-Type: - description: | - The content type of the response. - Default is `application/json`. - - If the `Accept` request header is `application/csv` or `text/csv`, the `Content-type` response header is `application/csv` - and the response is formatted as CSV. - schema: - type: string - default: application/json - enum: - - application/json - - application/csv - '400': - description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '403': - description: Access denied. - '404': - description: Database not found. - '405': - description: Method not allowed. - '422': - description: Unprocessable entity. + "201": + description: Success. Last cache created. + "400": + description: Bad request. A cache with this name already exists or the request is malformed. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Cache not found. + summary: Create last cache + description: Creates a last cache for a table. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/LastCacheCreateRequest" tags: - - Query data - - Compatibility endpoints - x-influxdata-guides: - - title: Use the InfluxDB v1 HTTP query API and InfluxQL to query data - href: /influxdb3/enterprise/query-data/execute-queries/influxdb-v1-api/ + - Cache data + - Table + /api/v3/configure/plugin_environment/install_packages: post: - operationId: PostExecuteV1Query - summary: Execute InfluxQL query (v1-compatible) - description: | - Executes an InfluxQL query to retrieve data from the specified database. - - #### Related + operationId: PostInstallPluginPackages + summary: Install plugin packages + description: |- + Installs the specified Python packages into the processing engine plugin environment. - - [Use the InfluxDB v1 HTTP query API and InfluxQL to query data](/influxdb3/enterprise/query-data/execute-queries/influxdb-v1-api/) + This endpoint is synchronous and blocks until the packages are installed. + parameters: + - $ref: "#/components/parameters/ContentType" requestBody: + required: true content: application/json: schema: type: object properties: - db: - type: string - description: The database to query. If not provided, the InfluxQL query string must specify the database. - q: - description: The InfluxQL query string. - type: string - chunked: - description: | - If true, the response is divided into chunks of size `chunk_size`. - type: boolean - chunk_size: - description: | - The number of records that will go into a chunk. - This parameter is only used if `chunked=true`. - type: integer - default: 10000 - epoch: - description: | - A unix timestamp precision. - - - `h` for hours - - `m` for minutes - - `s` for seconds - - `ms` for milliseconds - - `u` or `µ` for microseconds - - `ns` for nanoseconds - - Formats timestamps as [unix (epoch) timestamps](/influxdb3/enterprise/reference/glossary/#unix-timestamp) with the specified precision - instead of [RFC3339 timestamps](/influxdb3/enterprise/reference/glossary/#rfc3339-timestamp) with nanosecond precision. - enum: - - ns - - u - - µ - - ms - - s - - m - - h - type: string - pretty: + packages: + type: array + items: + type: string description: | - If true, the JSON response is formatted in a human-readable format. - type: boolean + A list of Python package names to install. + Can include version specifiers (e.g., "scipy==1.9.0"). + example: + - influxdb3-python + - scipy + - pandas==1.5.0 + - requests required: - - q - parameters: - - name: Accept - in: header - schema: - type: string - default: application/json - enum: - - application/json - - application/csv - - text/csv - required: false - description: | - The content type that the client can understand. - - If `text/csv` is specified, the `Content-type` response header is `application/csv` and the response is formatted as CSV. - - Returns an error if the format is invalid or non-UTF8. + - packages + example: + packages: + - influxdb3-python + - scipy + - pandas==1.5.0 + - requests responses: - '200': - description: | - Success. The response body contains query results. - content: - application/json: - schema: - $ref: '#/components/schemas/QueryResponse' - application/csv: - schema: - type: string - headers: - Content-Type: - description: | - The content type of the response. - Default is `application/json`. - - If the `Accept` request header is `application/csv` or `text/csv`, the `Content-type` response header is `application/csv` - and the response is formatted as CSV. - schema: - type: string - default: application/json - enum: - - application/json - - application/csv - '400': + "200": + description: Success. The packages are installed. + "400": description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '403': - description: Access denied. - '404': - description: Database not found. - '405': - description: Method not allowed. - '422': - description: Unprocessable entity. + "401": + $ref: "#/components/responses/Unauthorized" tags: - - Query data - - Compatibility endpoints - x-influxdata-guides: - - title: Use the InfluxDB v1 HTTP query API and InfluxQL to query data - href: /influxdb3/enterprise/query-data/execute-queries/influxdb-v1-api/ - /health: - get: - operationId: GetHealth - summary: Health check - description: | - Checks the status of the service. + - Processing engine + /api/v3/configure/plugin_environment/install_requirements: + post: + operationId: PostInstallPluginRequirements + summary: Install plugin requirements + description: > + Installs requirements from a requirements file (also known as a "pip requirements file") into the processing + engine plugin environment. - Returns `OK` if the service is running. This endpoint does not return version information. - Use the [`/ping`](#operation/GetPing) endpoint to retrieve version details. - > **Note**: This endpoint requires authentication by default in InfluxDB 3 Enterprise. - responses: - '200': - description: Service is running. Returns `OK`. - content: - text/plain: - schema: - type: string - example: OK - '401': - description: Unauthorized. Authentication is required. - '500': - description: Service is unavailable. - tags: - - Server information - /api/v1/health: - get: - operationId: GetHealthV1 - summary: Health check (v1) - description: Checks the status of the service. - responses: - '200': - description: Service is running. - '500': - description: Service is unavailable. - tags: - - Server information - - Compatibility endpoints - /ping: - get: - operationId: GetPing - tags: - - Server information - summary: Ping the server - description: | - Returns version information for the server. + This endpoint is synchronous and blocks until the requirements are installed. - **Important**: Use a GET request. HEAD requests return `404 Not Found`. - The response includes version information in both headers and the JSON body: + ### Related - - **Headers**: `x-influxdb-version` and `x-influxdb-build` - - **Body**: JSON object with `version`, `revision`, and `process_id` - > **Note**: This endpoint requires authentication by default in InfluxDB 3 Enterprise. - responses: - '200': - description: Success. The response body contains server information. - headers: - x-influxdb-version: - description: The InfluxDB version number (for example, `3.8.0`). - schema: - type: string - example: '3.8.0' - x-influxdb-build: - description: The InfluxDB build type (`Core` or `Enterprise`). - schema: - type: string - example: Enterprise - content: - application/json: - schema: - type: object - properties: - version: - type: string - description: The InfluxDB version number. - example: '3.8.0' - revision: - type: string - description: The git revision hash for the build. - example: '83b589b883' - process_id: - type: string - description: A unique identifier for the server process. - example: 'b756d9e0-cecd-4f72-b6d0-19e2d4f8cbb7' - '401': - description: Unauthorized. Authentication is required. - '404': - description: | - Not Found. Returned for HEAD requests. - Use a GET request to retrieve version information. - /metrics: - get: - operationId: GetMetrics - summary: Metrics - description: Retrieves Prometheus-compatible server metrics. - responses: - '200': - description: Success. The response body contains Prometheus-compatible server metrics. - tags: - - Server information - /api/v3/configure/database: - get: - operationId: GetConfigureDatabase - summary: List databases - description: Retrieves a list of databases. + - [Processing engine and Python plugins](/influxdb3/core/plugins/) + + - [Python requirements file format](https://pip.pypa.io/en/stable/reference/requirements-file-format/) parameters: - - $ref: '#/components/parameters/formatRequired' - - name: show_deleted - in: query - required: false - schema: - type: boolean - default: false - description: | - Include soft-deleted databases in the response. - By default, only active databases are returned. + - $ref: "#/components/parameters/ContentType" + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + requirements_location: + type: string + description: | + The path to the requirements file containing Python packages to install. + Can be a relative path (relative to the plugin directory) or an absolute path. + example: requirements.txt + required: + - requirements_location + example: + requirements_location: requirements.txt responses: - '200': - description: Success. The response body contains the list of databases. - content: - application/json: - schema: - $ref: '#/components/schemas/ShowDatabasesResponse' - '400': + "200": + description: Success. The requirements have been installed. + "400": description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - description: Database not found. + "401": + $ref: "#/components/responses/Unauthorized" tags: - - Database + - Processing engine + /api/v3/configure/processing_engine_trigger: post: - operationId: PostConfigureDatabase - summary: Create a database - description: Creates a new database in the system. + operationId: PostConfigureProcessingEngineTrigger + summary: Create processing engine trigger + description: Creates a processing engine trigger with the specified plugin file and trigger specification. requestBody: required: true content: application/json: schema: - $ref: '#/components/schemas/CreateDatabaseRequest' + $ref: "#/components/schemas/ProcessingEngineTriggerRequest" + examples: + schedule_cron: + summary: Schedule trigger using cron + description: > + In `"cron:CRON_EXPRESSION"`, `CRON_EXPRESSION` uses extended 6-field cron format. + + The cron expression `0 0 6 * * 1-5` means the trigger will run at 6:00 AM every weekday (Monday to + Friday). + value: + db: DATABASE_NAME + plugin_filename: schedule.py + trigger_name: schedule_cron_trigger + trigger_specification: cron:0 0 6 * * 1-5 + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + schedule_every: + summary: Schedule trigger using interval + description: | + In `"every:DURATION"`, `DURATION` specifies the interval between trigger executions. + The duration `1h` means the trigger will run every hour. + value: + db: mydb + plugin_filename: schedule.py + trigger_name: schedule_every_trigger + trigger_specification: every:1h + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + schedule_every_seconds: + summary: Schedule trigger using seconds interval + description: | + Example of scheduling a trigger to run every 30 seconds. + value: + db: mydb + plugin_filename: schedule.py + trigger_name: schedule_every_30s_trigger + trigger_specification: every:30s + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + schedule_every_minutes: + summary: Schedule trigger using minutes interval + description: | + Example of scheduling a trigger to run every 5 minutes. + value: + db: mydb + plugin_filename: schedule.py + trigger_name: schedule_every_5m_trigger + trigger_specification: every:5m + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + all_tables: + summary: All tables trigger example + description: | + Trigger that fires on write events to any table in the database. + value: + db: mydb + plugin_filename: all_tables.py + trigger_name: all_tables_trigger + trigger_specification: all_tables + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + table_specific: + summary: Table-specific trigger example + description: | + Trigger that fires on write events to a specific table. + value: + db: mydb + plugin_filename: table.py + trigger_name: table_trigger + trigger_specification: table:sensors + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + api_request: + summary: On-demand request trigger example + description: | + Creates an HTTP endpoint `/api/v3/engine/hello-world` for manual invocation. + value: + db: mydb + plugin_filename: request.py + trigger_name: hello_world_trigger + trigger_specification: request:hello-world + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + cron_friday_afternoon: + summary: Cron trigger for Friday afternoons + description: | + Example of a cron trigger that runs every Friday at 2:30 PM. + value: + db: reports + plugin_filename: weekly_report.py + trigger_name: friday_report_trigger + trigger_specification: cron:0 30 14 * * 5 + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + cron_monthly: + summary: Cron trigger for monthly execution + description: | + Example of a cron trigger that runs on the first day of every month at midnight. + value: + db: monthly_data + plugin_filename: monthly_cleanup.py + trigger_name: monthly_cleanup_trigger + trigger_specification: cron:0 0 0 1 * * + disabled: false + trigger_settings: + run_async: false + error_behavior: Log responses: - '201': - description: Success. Database created. - '400': + "200": + description: Success. Processing engine trigger created. + "400": description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '409': - description: Database already exists. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Trigger not found. tags: - - Database + - Processing engine delete: - operationId: DeleteConfigureDatabase - summary: Delete a database - description: | - Soft deletes a database. - The database is scheduled for deletion and unavailable for querying. - Use the `hard_delete_at` parameter to schedule a hard deletion. - Use the `data_only` parameter to delete data while preserving the database schema and resources. + operationId: DeleteConfigureProcessingEngineTrigger + summary: Delete processing engine trigger + description: Deletes a processing engine trigger. parameters: - - $ref: '#/components/parameters/db' - - name: data_only + - $ref: "#/components/parameters/db" + - name: trigger_name in: query - required: false + required: true schema: - type: boolean - default: false - description: | - Delete only data while preserving the database schema and all associated resources - (tokens, triggers, last value caches, distinct value caches, processing engine configurations). - When `false` (default), the entire database is deleted. - - name: remove_tables + type: string + - name: force in: query required: false schema: type: boolean default: false description: | - Used with `data_only=true` to remove table resources (caches) while preserving - database-level resources (tokens, triggers, processing engine configurations). - Has no effect when `data_only=false`. - - name: hard_delete_at - in: query - required: false - schema: - type: string - format: date-time - description: | - Schedule the database for hard deletion at the specified time. - If not provided, the database will be soft deleted. - Use ISO 8601 date-time format (for example, "2025-12-31T23:59:59Z"). - - #### Deleting a database cannot be undone - - Deleting a database is a destructive action. - Once a database is deleted, data stored in that database cannot be recovered. + Force deletion of the trigger even if it has active executions. + By default, deletion fails if the trigger is currently executing. responses: - '200': - description: Success. Database deleted. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - description: Database not found. + "200": + description: Success. The processing engine trigger has been deleted. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Trigger not found. tags: - - Database - /api/v3/configure/database/retention_period: - delete: - operationId: DeleteDatabaseRetentionPeriod - summary: Remove database retention period - description: | - Removes the retention period from a database, setting it to infinite retention. + - Processing engine + /api/v3/configure/processing_engine_trigger/disable: + post: + operationId: PostDisableProcessingEngineTrigger + summary: Disable processing engine trigger + description: Disables a processing engine trigger. parameters: - - $ref: '#/components/parameters/db' + - name: db + in: query + required: true + schema: + type: string + description: The database name. + - name: trigger_name + in: query + required: true + schema: + type: string + description: The name of the trigger. responses: - '204': - description: Success. The database retention period has been removed. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - description: Database not found. + "200": + description: Success. The processing engine trigger has been disabled. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Trigger not found. tags: - - Database - /api/v3/configure/table: + - Processing engine + /api/v3/configure/processing_engine_trigger/enable: post: - operationId: PostConfigureTable - summary: Create a table - description: Creates a new table within a database. - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/CreateTableRequest' + operationId: PostEnableProcessingEngineTrigger + summary: Enable processing engine trigger + description: Enables a processing engine trigger. + parameters: + - name: db + in: query + required: true + schema: + type: string + description: The database name. + - name: trigger_name + in: query + required: true + schema: + type: string + description: The name of the trigger. responses: - '201': - description: Success. The table has been created. - '400': + "200": + description: Success. The processing engine trigger is enabled. + "400": description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - description: Database not found. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Trigger not found. tags: - - Table + - Processing engine + /api/v3/configure/table: delete: operationId: DeleteConfigureTable - summary: Delete a table - description: | - Soft deletes a table. - The table is scheduled for deletion and unavailable for querying. - Use the `hard_delete_at` parameter to schedule a hard deletion. - Use the `data_only` parameter to delete data while preserving the table schema and resources. - - #### Deleting a table cannot be undone - - Deleting a table is a destructive action. - Once a table is deleted, data stored in that table cannot be recovered. parameters: - - $ref: '#/components/parameters/db' + - $ref: "#/components/parameters/db" - name: table in: query required: true @@ -1267,804 +1012,1315 @@ paths: schema: type: string format: date-time - description: | + description: |- Schedule the table for hard deletion at the specified time. If not provided, the table will be soft deleted. Use ISO 8601 format (for example, "2025-12-31T23:59:59Z"). + + + Also accepts special string values: + - `now` — hard delete immediately + - `never` — soft delete only (default behavior) + - `default` — use the system default hard deletion time responses: - '200': + "200": description: Success (no content). The table has been deleted. - '401': - $ref: '#/components/responses/Unauthorized' - '404': + "401": + $ref: "#/components/responses/Unauthorized" + "404": description: Table not found. + summary: Delete a table + description: | + Soft deletes a table. + The table is scheduled for deletion and unavailable for querying. + Use the `hard_delete_at` parameter to schedule a hard deletion. + Use the `data_only` parameter to delete data while preserving the table schema and resources. + + #### Deleting a table cannot be undone + + Deleting a table is a destructive action. + Once a table is deleted, data stored in that table cannot be recovered. tags: - Table - patch: - operationId: PatchConfigureTable - summary: Update a table - description: | - Updates table configuration, such as retention period. + post: + operationId: PostConfigureTable + responses: + "200": + description: Success. The table has been created. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Database not found. + summary: Create a table + description: Creates a new table within a database. requestBody: required: true content: application/json: schema: - $ref: '#/components/schemas/UpdateTableRequest' - responses: - '200': - description: Success. The table has been updated. - '400': - description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - description: Table not found. + $ref: "#/components/schemas/CreateTableRequest" tags: - Table - /api/v3/configure/database/{db}: - patch: - operationId: PatchConfigureDatabase - summary: Update a database - description: | - Updates database configuration, such as retention period. + /api/v3/configure/token: + delete: + operationId: DeleteToken parameters: - - name: db - in: path + - name: token_name + in: query required: true schema: type: string - description: The name of the database to update. - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/UpdateDatabaseRequest' + description: The name of the token to delete. responses: - '200': - description: Success. The database has been updated. - '400': - description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - description: Database not found. + "200": + description: Success. The token has been deleted. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Token not found. + summary: Delete token + description: | + Deletes a token. tags: - - Database - /api/v3/show/license: - get: - operationId: GetShowLicense - summary: Show license information + - Authentication + - Token + /api/v3/configure/token/admin: + post: + operationId: PostCreateAdminToken + responses: + "201": + description: | + Success. The admin token has been created. + The response body contains the token string and metadata. + content: + application/json: + schema: + $ref: "#/components/schemas/AdminTokenObject" + "401": + $ref: "#/components/responses/Unauthorized" + summary: Create admin token + description: | + Creates an admin token. + An admin token is a special type of token that has full access to all resources in the system. + tags: + - Authentication + - Token + /api/v3/configure/token/admin/regenerate: + post: + operationId: PostRegenerateAdminToken + summary: Regenerate admin token description: | - Retrieves information about the current InfluxDB 3 Enterprise license. + Regenerates an admin token and revokes the previous token with the same name. + parameters: [] responses: - '200': - description: Success. The response body contains license information. + "201": + description: Success. The admin token has been regenerated. content: application/json: schema: - $ref: '#/components/schemas/LicenseResponse' - '401': - $ref: '#/components/responses/Unauthorized' - '403': - description: Access denied. + $ref: "#/components/schemas/AdminTokenObject" + "401": + $ref: "#/components/responses/Unauthorized" tags: - - Server information - /api/v3/configure/distinct_cache: + - Authentication + - Token + /api/v3/configure/token/named_admin: post: - operationId: PostConfigureDistinctCache - summary: Create distinct cache - description: Creates a distinct cache for a table. + operationId: PostCreateNamedAdminToken + responses: + "201": + description: | + Success. The named admin token has been created. + The response body contains the token string and metadata. + content: + application/json: + schema: + $ref: "#/components/schemas/AdminTokenObject" + "401": + $ref: "#/components/responses/Unauthorized" + "409": + description: A token with this name already exists. + summary: Create named admin token + description: | + Creates a named admin token. + A named admin token is a special type of admin token with a custom name for identification and management. tags: - - Cache data - - Table + - Authentication + - Token requestBody: required: true content: application/json: schema: - $ref: '#/components/schemas/DistinctCacheCreateRequest' + type: object + properties: + token_name: + type: string + description: The name for the admin token. + expiry_secs: + type: integer + description: Optional expiration time in seconds. If not provided, the token does not expire. + nullable: true + required: + - token_name + /api/v3/engine/{request_path}: + get: + operationId: GetProcessingEnginePluginRequest responses: - '201': - description: Success. The distinct cache has been created. - '204': - description: Not created. A distinct cache with this configuration already exists. - '400': - description: | - Bad request. + "200": + description: Success. The plugin request has been executed. + "400": + description: Malformed request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Plugin not found. + "500": + description: Processing failure. + summary: On Request processing engine plugin request + description: > + Executes the On Request processing engine plugin specified in the trigger's `plugin_filename`. - The server responds with status `400` if the request would overwrite an existing cache with a different configuration. - delete: - operationId: DeleteConfigureDistinctCache - summary: Delete distinct cache - description: Deletes a distinct cache. - parameters: - - $ref: '#/components/parameters/db' - - name: table - in: query - required: true - schema: - type: string - description: The name of the table containing the distinct cache. - - name: name - in: query - required: true - schema: - type: string - description: The name of the distinct cache to delete. - responses: - '200': - description: Success. The distinct cache has been deleted. - '400': - description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - description: Cache not found. + The request can include request headers, query string parameters, and a request body, which InfluxDB passes to + the plugin. + + + An On Request plugin implements the following signature: + + + ```python + + def process_request(influxdb3_local, query_parameters, request_headers, request_body, args=None) + + ``` + + + The response depends on the plugin implementation. tags: - - Cache data - - Table - /api/v3/configure/last_cache: + - Processing engine post: - operationId: PostConfigureLastCache - summary: Create last cache - description: Creates a last cache for a table. - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/LastCacheCreateRequest' + operationId: PostProcessingEnginePluginRequest responses: - '201': - description: Success. Last cache created. - '400': - description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - description: Cache not found. - '409': - description: Cache already exists. - tags: - - Cache data - - Table - delete: - operationId: DeleteConfigureLastCache - summary: Delete last cache - description: Deletes a last cache. - parameters: - - $ref: '#/components/parameters/db' - - name: table - in: query - required: true - schema: - type: string - description: The name of the table containing the last cache. - - name: name - in: query - required: true - schema: - type: string - description: The name of the last cache to delete. - responses: - '200': - description: Success. The last cache has been deleted. - '400': - description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - description: Cache not found. - tags: - - Cache data - - Table - /api/v3/configure/processing_engine_trigger: - post: - operationId: PostConfigureProcessingEngineTrigger - summary: Create processing engine trigger - description: | - Creates a processing engine trigger with the specified plugin file and trigger specification. + "200": + description: Success. The plugin request has been executed. + "400": + description: Malformed request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Plugin not found. + "500": + description: Processing failure. + summary: On Request processing engine plugin request + description: > + Executes the On Request processing engine plugin specified in the trigger's `plugin_filename`. + + The request can include request headers, query string parameters, and a request body, which InfluxDB passes to + the plugin. + + + An On Request plugin implements the following signature: - ### Related guides - - [Processing engine and Python plugins](/influxdb3/enterprise/plugins/) + ```python + + def process_request(influxdb3_local, query_parameters, request_headers, request_body, args=None) + + ``` + + + The response depends on the plugin implementation. + parameters: + - $ref: "#/components/parameters/ContentType" requestBody: - required: true + required: false content: application/json: schema: - $ref: '#/components/schemas/ProcessingEngineTriggerRequest' - examples: - schedule_cron: - summary: Schedule trigger using cron - description: | - In `"cron:CRON_EXPRESSION"`, `CRON_EXPRESSION` uses extended 6-field cron format. - The cron expression `0 0 6 * * 1-5` means the trigger will run at 6:00 AM every weekday (Monday to Friday). - value: - db: DATABASE_NAME - plugin_filename: schedule.py - trigger_name: schedule_cron_trigger - trigger_specification: cron:0 0 6 * * 1-5 - disabled: false - trigger_settings: - run_async: false - error_behavior: Log - schedule_every: - summary: Schedule trigger using interval - description: | - In `"every:DURATION"`, `DURATION` specifies the interval between trigger executions. - The duration `1h` means the trigger will run every hour. - value: - db: mydb - plugin_filename: schedule.py - trigger_name: schedule_every_trigger - trigger_specification: every:1h - disabled: false - trigger_settings: - run_async: false - error_behavior: Log - schedule_every_seconds: - summary: Schedule trigger using seconds interval - description: | - Example of scheduling a trigger to run every 30 seconds. - value: - db: mydb - plugin_filename: schedule.py - trigger_name: schedule_every_30s_trigger - trigger_specification: every:30s - disabled: false - trigger_settings: - run_async: false - error_behavior: Log - schedule_every_minutes: - summary: Schedule trigger using minutes interval - description: | - Example of scheduling a trigger to run every 5 minutes. - value: - db: mydb - plugin_filename: schedule.py - trigger_name: schedule_every_5m_trigger - trigger_specification: every:5m - disabled: false - trigger_settings: - run_async: false - error_behavior: Log - all_tables: - summary: All tables trigger example - description: | - Trigger that fires on write events to any table in the database. - value: - db: mydb - plugin_filename: all_tables.py - trigger_name: all_tables_trigger - trigger_specification: all_tables - disabled: false - trigger_settings: - run_async: false - error_behavior: Log - table_specific: - summary: Table-specific trigger example - description: | - Trigger that fires on write events to a specific table. - value: - db: mydb - plugin_filename: table.py - trigger_name: table_trigger - trigger_specification: table:sensors - disabled: false - trigger_settings: - run_async: false - error_behavior: Log - api_request: - summary: On-demand request trigger example - description: | - Creates an HTTP endpoint `/api/v3/engine/hello-world` for manual invocation. - value: - db: mydb - plugin_filename: request.py - trigger_name: hello_world_trigger - trigger_specification: request:hello-world - disabled: false - trigger_settings: - run_async: false - error_behavior: Log - cron_friday_afternoon: - summary: Cron trigger for Friday afternoons - description: | - Example of a cron trigger that runs every Friday at 2:30 PM. - value: - db: reports - plugin_filename: weekly_report.py - trigger_name: friday_report_trigger - trigger_specification: cron:0 30 14 * * 5 - disabled: false - trigger_settings: - run_async: false - error_behavior: Log - cron_monthly: - summary: Cron trigger for monthly execution - description: | - Example of a cron trigger that runs on the first day of every month at midnight. - value: - db: monthly_data - plugin_filename: monthly_cleanup.py - trigger_name: monthly_cleanup_trigger - trigger_specification: cron:0 0 0 1 * * - disabled: false - trigger_settings: - run_async: false - error_behavior: Log - responses: - '200': - description: Success. Processing engine trigger created. - '400': - description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - description: Trigger not found. + type: object + additionalProperties: true tags: - Processing engine - delete: - operationId: DeleteConfigureProcessingEngineTrigger - summary: Delete processing engine trigger - description: Deletes a processing engine trigger. - parameters: - - $ref: '#/components/parameters/db' - - name: trigger_name - in: query - required: true - schema: - type: string - - name: force - in: query - required: false - schema: - type: boolean - default: false - description: | - Force deletion of the trigger even if it has active executions. - By default, deletion fails if the trigger is currently executing. + parameters: + - name: request_path + description: | + The path configured in the request trigger specification for the plugin. + + For example, if you define a trigger with the following: + + ```json + trigger_specification: "request:hello-world" + ``` + + then, the HTTP API exposes the following plugin endpoint: + + ``` + /api/v3/engine/hello-world + ``` + in: path + required: true + schema: + type: string + /api/v3/plugin_test/schedule: + post: + operationId: PostTestSchedulingPlugin responses: - '200': - description: Success. The processing engine trigger has been deleted. - '400': + "200": + description: Success. The plugin test has been executed. + "400": description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - description: Trigger not found. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Plugin not enabled. + summary: Test scheduling plugin + description: Executes a test of a scheduling plugin. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/SchedulePluginTestRequest" tags: - Processing engine - /api/v3/configure/processing_engine_trigger/disable: + /api/v3/plugin_test/wal: post: - operationId: PostDisableProcessingEngineTrigger - summary: Disable processing engine trigger - description: Disables a processing engine trigger. - parameters: - - $ref: '#/components/parameters/ContentType' + operationId: PostTestWALPlugin + responses: + "200": + description: Success. The plugin test has been executed. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Plugin not enabled. + summary: Test WAL plugin + description: Executes a test of a write-ahead logging (WAL) plugin. requestBody: required: true content: application/json: schema: - $ref: '#/components/schemas/ProcessingEngineTriggerRequest' - responses: - '200': - description: Success. The processing engine trigger has been disabled. - '400': - description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - description: Trigger not found. + $ref: "#/components/schemas/WALPluginTestRequest" tags: - Processing engine - /api/v3/configure/processing_engine_trigger/enable: - post: - operationId: PostEnableProcessingEngineTrigger - summary: Enable processing engine trigger - description: Enables a processing engine trigger. - parameters: - - $ref: '#/components/parameters/ContentType' + /api/v3/plugins/directory: + put: + operationId: PutPluginDirectory requestBody: required: true content: application/json: schema: - $ref: '#/components/schemas/ProcessingEngineTriggerRequest' + $ref: "#/components/schemas/PluginDirectoryRequest" responses: - '200': - description: Success. The processing engine trigger is enabled. - '400': - description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - description: Trigger not found. + "200": + description: Success. The plugin directory has been updated. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Forbidden. Admin token required. + "500": + description: Plugin not found. The `plugin_name` does not match any registered trigger. + summary: Update a multi-file plugin directory + description: | + Replaces all files in a multi-file plugin directory. The + `plugin_name` must match a registered trigger name. Each entry in + the `files` array specifies a `relative_path` and `content`—the + server writes them into the trigger's plugin directory. + + Use this endpoint to update multi-file plugins (directories with + `__init__.py` and supporting modules). For single-file plugins, + use `PUT /api/v3/plugins/files` instead. tags: - Processing engine - /api/v3/configure/plugin_environment/install_packages: + x-security-note: Requires an admin token + /api/v3/plugins/files: post: - operationId: PostInstallPluginPackages - summary: Install plugin packages + operationId: create_plugin_file + summary: Create a plugin file description: | - Installs the specified Python packages into the processing engine plugin environment. - - This endpoint is synchronous and blocks until the packages are installed. - - ### Related guides - - - [Processing engine and Python plugins](/influxdb3/enterprise/plugins/) - parameters: - - $ref: '#/components/parameters/ContentType' + Creates a single plugin file in the plugin directory. Writes the + `content` to a file named after `plugin_name`. Does not require an + existing trigger—use this to upload plugin files before creating + triggers that reference them. requestBody: required: true content: application/json: schema: - type: object - properties: - packages: - type: array - items: - type: string - description: | - A list of Python package names to install. - Can include version specifiers (e.g., "scipy==1.9.0"). - example: - - influxdb3-python - - scipy - - pandas==1.5.0 - - requests - required: - - packages - example: - packages: - - influxdb3-python - - scipy - - pandas==1.5.0 - - requests + $ref: "#/components/schemas/PluginFileRequest" responses: - '200': - description: Success. The packages are installed. - '400': - description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' + "200": + description: Success. The plugin file has been created. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Forbidden. Admin token required. tags: - Processing engine - /api/v3/configure/plugin_environment/install_requirements: - post: - operationId: PostInstallPluginRequirements - summary: Install plugin requirements + x-security-note: Requires an admin token + put: + operationId: PutPluginFile + summary: Update a plugin file description: | - Installs requirements from a requirements file (also known as a "pip requirements file") into the processing engine plugin environment. - - This endpoint is synchronous and blocks until the requirements are installed. - - ### Related - - - [Processing engine and Python plugins](/influxdb3/enterprise/plugins/) - - [Python requirements file format](https://pip.pypa.io/en/stable/reference/requirements-file-format/) - parameters: - - $ref: '#/components/parameters/ContentType' + Updates a single plugin file for an existing trigger. The + `plugin_name` must match a registered trigger name—the server + resolves the trigger's `plugin_filename` and overwrites that file + with the provided `content`. + + To upload a new plugin file before creating a trigger, use + `POST /api/v3/plugins/files` instead. To update a multi-file + plugin directory, use `PUT /api/v3/plugins/directory`. requestBody: required: true content: application/json: schema: - type: object - properties: - requirements_location: - type: string - description: | - The path to the requirements file containing Python packages to install. - Can be a relative path (relative to the plugin directory) or an absolute path. - example: requirements.txt - required: - - requirements_location - example: - requirements_location: requirements.txt + $ref: "#/components/schemas/PluginFileRequest" responses: - '200': - description: Success. The requirements have been installed. - '400': - description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' + "200": + description: Success. The plugin file has been updated. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Forbidden. Admin token required. + "500": + description: Plugin not found. The `plugin_name` does not match any registered trigger. tags: - Processing engine - /api/v3/plugin_test/wal: + x-security-note: Requires an admin token + /api/v3/query_influxql: + get: + operationId: GetExecuteInfluxQLQuery + responses: + "200": + description: Success. The response body contains query results. + content: + application/json: + schema: + $ref: "#/components/schemas/QueryResponse" + text/csv: + schema: + type: string + application/vnd.apache.parquet: + schema: + type: string + application/jsonl: + schema: + type: string + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "404": + description: Database not found. + "405": + description: Method not allowed. + "422": + description: Unprocessable entity. + summary: Execute InfluxQL query + description: Executes an InfluxQL query to retrieve data from the specified database. + parameters: + - $ref: "#/components/parameters/dbQueryParam" + - name: q + in: query + required: true + schema: + type: string + - name: format + in: query + required: false + schema: + type: string + - $ref: "#/components/parameters/AcceptQueryHeader" + - name: params + in: query + required: false + schema: + type: string + description: JSON-encoded query parameters. Use this to pass bind parameters to parameterized queries. + description: JSON-encoded query parameters for parameterized queries. + tags: + - Query data post: - operationId: PostTestWALPlugin - summary: Test WAL plugin - description: Executes a test of a write-ahead logging (WAL) plugin. + operationId: PostExecuteQueryInfluxQL + responses: + "200": + description: Success. The response body contains query results. + content: + application/json: + schema: + $ref: "#/components/schemas/QueryResponse" + text/csv: + schema: + type: string + application/vnd.apache.parquet: + schema: + type: string + application/jsonl: + schema: + type: string + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "404": + description: Database not found. + "405": + description: Method not allowed. + "422": + description: Unprocessable entity. + summary: Execute InfluxQL query + description: Executes an InfluxQL query to retrieve data from the specified database. + parameters: + - $ref: "#/components/parameters/AcceptQueryHeader" + - $ref: "#/components/parameters/ContentType" requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/WALPluginTestRequest' + $ref: "#/components/requestBodies/queryRequestBody" + tags: + - Query data + /api/v3/query_sql: + get: + operationId: GetExecuteQuerySQL responses: - '200': - description: Success. The plugin test has been executed. - '400': + "200": + description: Success. The response body contains query results. + content: + application/json: + schema: + $ref: "#/components/schemas/QueryResponse" + example: + results: + - series: + - name: mytable + columns: + - time + - value + values: + - - "2024-02-02T12:00:00Z" + - 42 + text/csv: + schema: + type: string + application/vnd.apache.parquet: + schema: + type: string + application/jsonl: + schema: + type: string + "400": description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - description: Plugin not enabled. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "404": + description: Database not found. + "405": + description: Method not allowed. + "422": + description: Unprocessable entity. + summary: Execute SQL query + description: Executes an SQL query to retrieve data from the specified database. + parameters: + - $ref: "#/components/parameters/db" + - $ref: "#/components/parameters/querySqlParam" + - $ref: "#/components/parameters/format" + - $ref: "#/components/parameters/AcceptQueryHeader" + - $ref: "#/components/parameters/ContentType" + - name: params + in: query + required: false + schema: + type: string + description: JSON-encoded query parameters. Use this to pass bind parameters to parameterized queries. + description: JSON-encoded query parameters for parameterized queries. tags: - - Processing engine - /api/v3/plugin_test/schedule: + - Query data post: - operationId: PostTestSchedulingPlugin - summary: Test scheduling plugin - description: Executes a test of a scheduling plugin. + operationId: PostExecuteQuerySQL + responses: + "200": + description: Success. The response body contains query results. + content: + application/json: + schema: + $ref: "#/components/schemas/QueryResponse" + text/csv: + schema: + type: string + application/vnd.apache.parquet: + schema: + type: string + application/jsonl: + schema: + type: string + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "404": + description: Database not found. + "405": + description: Method not allowed. + "422": + description: Unprocessable entity. + summary: Execute SQL query + description: Executes an SQL query to retrieve data from the specified database. + parameters: + - $ref: "#/components/parameters/AcceptQueryHeader" + - $ref: "#/components/parameters/ContentType" + requestBody: + $ref: "#/components/requestBodies/queryRequestBody" + tags: + - Query data + /api/v3/write_lp: + post: + operationId: PostWriteLP + parameters: + - $ref: "#/components/parameters/dbWriteParam" + - $ref: "#/components/parameters/accept_partial" + - $ref: "#/components/parameters/precisionParam" + - name: no_sync + in: query + schema: + $ref: "#/components/schemas/NoSync" + - name: Content-Type + in: header + description: | + The content type of the request payload. + schema: + $ref: "#/components/schemas/LineProtocol" + required: false + - name: Accept + in: header + description: | + The content type that the client can understand. + Writes only return a response body if they fail (partially or completely)--for example, + due to a syntax problem or type mismatch. + schema: + type: string + default: application/json + enum: + - application/json + required: false + - $ref: "#/components/parameters/ContentEncoding" + - $ref: "#/components/parameters/ContentLength" + responses: + "204": + description: Success ("No Content"). All data in the batch is written and queryable. + headers: + cluster-uuid: + $ref: "#/components/headers/ClusterUUID" + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "413": + description: Request entity too large. + "422": + description: Unprocessable entity. + summary: Write line protocol + description: > + Writes line protocol to the specified database. + + + This is the native InfluxDB 3 Core write endpoint that provides enhanced control + + over write behavior with advanced parameters for high-performance and fault-tolerant operations. + + + Use this endpoint to send data in [line protocol](/influxdb3/core/reference/syntax/line-protocol/) format to + InfluxDB. + + Use query parameters to specify options for writing data. + + + #### Features + + + - **Partial writes**: Use `accept_partial=true` to allow partial success when some lines in a batch fail + + - **Asynchronous writes**: Use `no_sync=true` to skip waiting for WAL synchronization, allowing faster response + times but sacrificing durability guarantees + + - **Flexible precision**: Automatic timestamp precision detection with `precision=auto` (default) + + + #### Auto precision detection + + + When you use `precision=auto` or omit the precision parameter, InfluxDB 3 automatically detects + + the timestamp precision based on the magnitude of the timestamp value: + + + - Timestamps < 5e9 → Second precision (multiplied by 1,000,000,000 to convert to nanoseconds) + + - Timestamps < 5e12 → Millisecond precision (multiplied by 1,000,000) + + - Timestamps < 5e15 → Microsecond precision (multiplied by 1,000) + + - Larger timestamps → Nanosecond precision (no conversion needed) + + + #### Related + + + - [Use the InfluxDB v3 write_lp API to write data](/influxdb3/core/write-data/http-api/v3-write-lp/) + requestBody: + $ref: "#/components/requestBodies/lineProtocolRequestBody" + tags: + - Write data + x-codeSamples: + - label: cURL - Basic write + lang: Shell + source: | + curl --request POST "http://localhost:8181/api/v3/write_lp?db=sensors" \ + --header "Authorization: Bearer DATABASE_TOKEN" \ + --header "Content-Type: text/plain" \ + --data-raw "cpu,host=server01 usage=85.2 1638360000000000000" + - label: cURL - Write with millisecond precision + lang: Shell + source: | + curl --request POST "http://localhost:8181/api/v3/write_lp?db=sensors&precision=ms" \ + --header "Authorization: Bearer DATABASE_TOKEN" \ + --header "Content-Type: text/plain" \ + --data-raw "cpu,host=server01 usage=85.2 1638360000000" + - label: cURL - Asynchronous write with partial acceptance + lang: Shell + source: > + curl --request POST + "http://localhost:8181/api/v3/write_lp?db=sensors&accept_partial=true&no_sync=true&precision=auto" \ + --header "Authorization: Bearer DATABASE_TOKEN" \ + --header "Content-Type: text/plain" \ + --data-raw "cpu,host=server01 usage=85.2 + memory,host=server01 used=4096" + - label: cURL - Multiple measurements with tags + lang: Shell + source: | + curl --request POST "http://localhost:8181/api/v3/write_lp?db=sensors&precision=ns" \ + --header "Authorization: Bearer DATABASE_TOKEN" \ + --header "Content-Type: text/plain" \ + --data-raw "cpu,host=server01,region=us-west usage=85.2,load=0.75 1638360000000000000 + memory,host=server01,region=us-west used=4096,free=12288 1638360000000000000 + disk,host=server01,region=us-west,device=/dev/sda1 used=50.5,free=49.5 1638360000000000000" + /health: + get: + operationId: GetHealth + responses: + "200": + description: Service is running. Returns `OK`. + content: + text/plain: + schema: + type: string + example: OK + "401": + description: Unauthorized. Authentication is required. + "500": + description: Service is unavailable. + summary: Health check + description: | + Checks the status of the service. + + Returns `OK` if the service is running. This endpoint does not return version information. + Use the [`/ping`](#operation/GetPing) endpoint to retrieve version details. + + > **Note**: This endpoint requires authentication by default in InfluxDB 3 Core. + tags: + - Server information + /metrics: + get: + operationId: GetMetrics + responses: + "200": + description: Success + summary: Metrics + description: Retrieves Prometheus-compatible server metrics. + tags: + - Server information + /ping: + get: + operationId: GetPing + responses: + "200": + description: Success. The response body contains server information. + headers: + x-influxdb-version: + description: The InfluxDB version number (for example, `3.8.0`). + schema: + type: string + example: 3.8.0 + x-influxdb-build: + description: The InfluxDB build type (`Core` or `Enterprise`). + schema: + type: string + example: Core + content: + application/json: + schema: + type: object + properties: + version: + type: string + description: The InfluxDB version number. + example: 3.8.0 + revision: + type: string + description: The git revision hash for the build. + example: 83b589b883 + process_id: + type: string + description: A unique identifier for the server process. + example: b756d9e0-cecd-4f72-b6d0-19e2d4f8cbb7 + "401": + description: Unauthorized. Authentication is required. + "404": + description: | + Not Found. Returned for HEAD requests. + Use a GET request to retrieve version information. + x-client-method: ping + summary: Ping the server + description: | + Returns version information for the server. + + **Important**: Use a GET request. HEAD requests return `404 Not Found`. + + The response includes version information in both headers and the JSON body: + + - **Headers**: `x-influxdb-version` and `x-influxdb-build` + - **Body**: JSON object with `version`, `revision`, and `process_id` + + > **Note**: This endpoint requires authentication by default in InfluxDB 3 Core. + tags: + - Server information + post: + operationId: ping + responses: + "200": + description: Success. The response body contains server information. + headers: + x-influxdb-version: + description: The InfluxDB version number (for example, `3.8.0`). + schema: + type: string + example: 3.8.0 + x-influxdb-build: + description: The InfluxDB build type (`Core` or `Enterprise`). + schema: + type: string + example: Core + content: + application/json: + schema: + type: object + properties: + version: + type: string + description: The InfluxDB version number. + example: 3.8.0 + revision: + type: string + description: The git revision hash for the build. + example: 83b589b883 + process_id: + type: string + description: A unique identifier for the server process. + example: b756d9e0-cecd-4f72-b6d0-19e2d4f8cbb7 + "401": + description: Unauthorized. Authentication is required. + "404": + description: | + Not Found. Returned for HEAD requests. + Use a GET request to retrieve version information. + summary: Ping the server + description: Returns version information for the server. Accepts POST in addition to GET. + tags: + - Server information + /query: + get: + operationId: GetV1ExecuteQuery + responses: + "200": + description: | + Success. The response body contains query results. + content: + application/json: + schema: + $ref: "#/components/schemas/QueryResponse" + application/csv: + schema: + type: string + headers: + Content-Type: + description: > + The content type of the response. + + Default is `application/json`. + + + If the `Accept` request header is `application/csv` or `text/csv`, the `Content-type` response header is + `application/csv` + + and the response is formatted as CSV. + schema: + type: string + default: application/json + enum: + - application/json + - application/csv + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "404": + description: Database not found. + "405": + description: Method not allowed. + "422": + description: Unprocessable entity. + summary: Execute InfluxQL query (v1-compatible) + description: > + Executes an InfluxQL query to retrieve data from the specified database. + + + This endpoint is compatible with InfluxDB 1.x client libraries and third-party integrations such as Grafana. + + Use query parameters to specify the database and the InfluxQL query. + + + #### Related + + + - [Use the InfluxDB v1 HTTP query API and InfluxQL to query + data](/influxdb3/core/query-data/execute-queries/influxdb-v1-api/) + parameters: + - name: Accept + in: header + schema: + type: string + default: application/json + enum: + - application/json + - application/csv + - text/csv + required: false + description: > + The content type that the client can understand. + + + If `text/csv` is specified, the `Content-type` response header is `application/csv` and the response is + formatted as CSV. + + + Returns an error if the format is invalid or non-UTF8. + - in: query + name: chunked + description: | + If true, the response is divided into chunks of size `chunk_size`. + schema: + type: boolean + default: false + - in: query + name: chunk_size + description: | + The number of records that will go into a chunk. + This parameter is only used if `chunked=true`. + schema: + type: integer + default: 10000 + - in: query + name: db + description: The database to query. If not provided, the InfluxQL query string must specify the database. + schema: + type: string + format: InfluxQL + - in: query + name: pretty + description: | + If true, the JSON response is formatted in a human-readable format. + schema: + type: boolean + default: false + - in: query + name: q + description: The InfluxQL query string. + required: true + schema: + type: string + - name: epoch + description: > + Formats timestamps as [unix (epoch) timestamps](/influxdb3/core/reference/glossary/#unix-timestamp) with the + specified precision + + instead of [RFC3339 timestamps](/influxdb3/core/reference/glossary/#rfc3339-timestamp) with nanosecond + precision. + in: query + schema: + $ref: "#/components/schemas/EpochCompatibility" + - $ref: "#/components/parameters/v1UsernameParam" + - $ref: "#/components/parameters/v1PasswordParam" + - name: rp + in: query + required: false + schema: + type: string + description: | + Retention policy name. Honored but discouraged. InfluxDB 3 doesn't use retention policies. + - name: Authorization + in: header + required: false + schema: + type: string + description: | + Authorization header for token-based authentication. + Supported schemes: + - `Bearer AUTH_TOKEN` - OAuth bearer token scheme + - `Token AUTH_TOKEN` - InfluxDB v2 token scheme + - `Basic ` - Basic authentication (username is ignored) + tags: + - Query data + - Compatibility endpoints + post: + operationId: PostExecuteV1Query + responses: + "200": + description: | + Success. The response body contains query results. + content: + application/json: + schema: + $ref: "#/components/schemas/QueryResponse" + application/csv: + schema: + type: string + headers: + Content-Type: + description: > + The content type of the response. + + Default is `application/json`. + + + If the `Accept` request header is `application/csv` or `text/csv`, the `Content-type` response header is + `application/csv` + + and the response is formatted as CSV. + schema: + type: string + default: application/json + enum: + - application/json + - application/csv + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "404": + description: Database not found. + "405": + description: Method not allowed. + "422": + description: Unprocessable entity. + summary: Execute InfluxQL query (v1-compatible) + description: > + Executes an InfluxQL query to retrieve data from the specified database. + + + #### Related + + + - [Use the InfluxDB v1 HTTP query API and InfluxQL to query + data](/influxdb3/core/query-data/execute-queries/influxdb-v1-api/) + parameters: + - name: Accept + in: header + schema: + type: string + default: application/json + enum: + - application/json + - application/csv + - text/csv + required: false + description: > + The content type that the client can understand. + + + If `text/csv` is specified, the `Content-type` response header is `application/csv` and the response is + formatted as CSV. + + + Returns an error if the format is invalid or non-UTF8. requestBody: - required: true content: application/json: schema: - $ref: '#/components/schemas/SchedulePluginTestRequest' - responses: - '200': - description: Success. The plugin test has been executed. - '400': - description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - description: Plugin not enabled. - tags: - - Processing engine - /api/v3/engine/{request_path}: - parameters: - - name: request_path - description: | - The path configured in the request trigger specification for the plugin. + type: object + properties: + db: + type: string + description: The database to query. If not provided, the InfluxQL query string must specify the database. + q: + description: The InfluxQL query string. + type: string + chunked: + description: | + If true, the response is divided into chunks of size `chunk_size`. + type: boolean + chunk_size: + description: | + The number of records that will go into a chunk. + This parameter is only used if `chunked=true`. + type: integer + default: 10000 + epoch: + description: > + A unix timestamp precision. - For example, if you define a trigger with the following: - ```json - trigger_specification: "request:hello-world" - ``` + - `h` for hours - then, the HTTP API exposes the following plugin endpoint: + - `m` for minutes - ``` - /api/v3/engine/hello-world - ``` - in: path - required: true - schema: - type: string - get: - operationId: GetProcessingEnginePluginRequest - summary: On Request processing engine plugin request - description: | - Executes the On Request processing engine plugin specified in the trigger's `plugin_filename`. - The request can include request headers, query string parameters, and a request body, which InfluxDB passes to the plugin. + - `s` for seconds - An On Request plugin implements the following signature: + - `ms` for milliseconds - ```python - def process_request(influxdb3_local, query_parameters, request_headers, request_body, args=None) - ``` + - `u` or `µ` for microseconds - The response depends on the plugin implementation. - responses: - '200': - description: Success. The plugin request has been executed. - '400': - description: Malformed request. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - description: Plugin not found. - '500': - description: Processing failure. - tags: - - Processing engine - post: - operationId: PostProcessingEnginePluginRequest - summary: On Request processing engine plugin request - description: | - Executes the On Request processing engine plugin specified in the trigger's `plugin_filename`. - The request can include request headers, query string parameters, and a request body, which InfluxDB passes to the plugin. + - `ns` for nanoseconds - An On Request plugin implements the following signature: - ```python - def process_request(influxdb3_local, query_parameters, request_headers, request_body, args=None) - ``` + Formats timestamps as [unix (epoch) timestamps](/influxdb3/core/reference/glossary/#unix-timestamp) + with the specified precision - The response depends on the plugin implementation. - parameters: - - $ref: '#/components/parameters/ContentType' - requestBody: - required: false - content: - application/json: + instead of [RFC3339 timestamps](/influxdb3/core/reference/glossary/#rfc3339-timestamp) with + nanosecond precision. + enum: + - ns + - u + - µ + - ms + - s + - m + - h + type: string + pretty: + description: | + If true, the JSON response is formatted in a human-readable format. + type: boolean + required: + - q + application/x-www-form-urlencoded: schema: type: object - additionalProperties: true - responses: - '200': - description: Success. The plugin request has been executed. - '400': - description: Malformed request. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - description: Plugin not found. - '500': - description: Processing failure. - tags: - - Processing engine - /api/v3/configure/enterprise/token: - post: - operationId: PostCreateResourceToken - summary: Create a resource token - description: | - Creates a resource (fine-grained permissions) token. - A resource token is a token that has access to specific resources in the system. + properties: + db: + type: string + description: The database to query. If not provided, the InfluxQL query string must specify the database. + q: + description: The InfluxQL query string. + type: string + chunked: + description: | + If true, the response is divided into chunks of size `chunk_size`. + type: boolean + chunk_size: + description: | + The number of records that will go into a chunk. + This parameter is only used if `chunked=true`. + type: integer + default: 10000 + epoch: + description: > + A unix timestamp precision. - This endpoint is only available in InfluxDB 3 Enterprise. - responses: - '201': - description: | - Success. The resource token has been created. - The response body contains the token string and metadata. - content: - application/json: - schema: - $ref: '#/components/schemas/ResourceTokenObject' - '401': - $ref: '#/components/responses/Unauthorized' + + - `h` for hours + + - `m` for minutes + + - `s` for seconds + + - `ms` for milliseconds + + - `u` or `µ` for microseconds + + - `ns` for nanoseconds + + + Formats timestamps as [unix (epoch) timestamps](/influxdb3/core/reference/glossary/#unix-timestamp) + with the specified precision + + instead of [RFC3339 timestamps](/influxdb3/core/reference/glossary/#rfc3339-timestamp) with + nanosecond precision. + enum: + - ns + - u + - µ + - ms + - s + - m + - h + type: string + pretty: + description: | + If true, the JSON response is formatted in a human-readable format. + type: boolean + required: + - q + application/vnd.influxql: + schema: + type: string + description: InfluxQL query string sent as the request body. tags: - - Authentication - - Token - /api/v3/configure/token/admin: + - Query data + - Compatibility endpoints + /write: post: - operationId: PostCreateAdminToken - summary: Create admin token - description: | - Creates an admin token. - An admin token is a special type of token that has full access to all resources in the system. + operationId: PostV1Write responses: - '201': + "204": + description: Success ("No Content"). All data in the batch is written and queryable. + headers: + cluster-uuid: + $ref: "#/components/headers/ClusterUUID" + "400": description: | - Success. The admin token has been created. - The response body contains the token string and metadata. - content: - application/json: - schema: - $ref: '#/components/schemas/AdminTokenObject' - '401': - $ref: '#/components/responses/Unauthorized' - tags: - - Authentication - - Token - /api/v3/configure/token/admin/regenerate: - post: - operationId: PostRegenerateAdminToken - summary: Regenerate admin token - description: | - Regenerates an admin token and revokes the previous token with the same name. - parameters: [] - responses: - '201': - description: Success. The admin token has been regenerated. + Bad request. Some (a _partial write_) or all of the data from the batch was rejected and not written. + If a partial write occurred, then some points from the batch are written and queryable. + + The response body: + - indicates if a partial write occurred or all data was rejected. + - contains details about the [rejected points](/influxdb3/core/write-data/troubleshoot/#troubleshoot-rejected-points), up to 100 points. content: application/json: - schema: - $ref: '#/components/schemas/AdminTokenObject' - '401': - $ref: '#/components/responses/Unauthorized' - tags: - - Authentication - - Token - /api/v3/configure/token: - delete: - operationId: DeleteToken - summary: Delete token - description: | - Deletes a token. + examples: + rejectedAllPoints: + summary: Rejected all points in the batch + value: | + { + "error": "write of line protocol failed", + "data": [ + { + "original_line": "dquote> home,room=Kitchen temp=hi", + "line_number": 2, + "error_message": "No fields were provided" + } + ] + } + partialWriteErrorWithRejectedPoints: + summary: Partial write rejected some points in the batch + value: | + { + "error": "partial write of line protocol occurred", + "data": [ + { + "original_line": "dquote> home,room=Kitchen temp=hi", + "line_number": 2, + "error_message": "No fields were provided" + } + ] + } + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "413": + description: Request entity too large. + summary: Write line protocol (v1-compatible) + description: > + Writes line protocol to the specified database. + + + This endpoint provides backward compatibility for InfluxDB 1.x write workloads using tools such as InfluxDB 1.x + client libraries, the Telegraf `outputs.influxdb` output plugin, or third-party tools. + + + Use this endpoint to send data in [line + protocol](https://docs.influxdata.com/influxdb3/core/reference/syntax/line-protocol/) format to InfluxDB. + + Use query parameters to specify options for writing data. + + + #### Related + + + - [Use compatibility APIs to write data](/influxdb3/core/write-data/http-api/compatibility-apis/) parameters: - - name: id + - $ref: "#/components/parameters/dbWriteParam" + - $ref: "#/components/parameters/compatibilityPrecisionParam" + - $ref: "#/components/parameters/v1UsernameParam" + - $ref: "#/components/parameters/v1PasswordParam" + - name: rp in: query - required: true + required: false schema: type: string - description: The ID of the token to delete. - responses: - '204': - description: Success. The token has been deleted. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - description: Token not found. - tags: - - Authentication - - Token - /api/v3/configure/token/named_admin: - post: - operationId: PostCreateNamedAdminToken - summary: Create named admin token - description: | - Creates a named admin token. - A named admin token is a special type of admin token with a custom name for identification and management. - parameters: - - name: name + description: | + Retention policy name. Honored but discouraged. InfluxDB 3 doesn't use retention policies. + - name: consistency in: query - required: true + required: false schema: type: string - description: The name for the admin token. - responses: - '201': description: | - Success. The named admin token has been created. - The response body contains the token string and metadata. - content: - application/json: - schema: - $ref: '#/components/schemas/AdminTokenObject' - '401': - $ref: '#/components/responses/Unauthorized' - '409': - description: A token with this name already exists. - tags: - - Authentication - - Token - /api/v3/plugins/files: - put: - operationId: PutPluginFile - summary: Update plugin file - description: | - Updates a plugin file in the plugin directory. - x-security-note: Requires an admin token - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/PluginFileRequest' - responses: - '204': - description: Success. The plugin file has been updated. - '401': - $ref: '#/components/responses/Unauthorized' - '403': - description: Forbidden. Admin token required. - tags: - - Processing engine - /api/v3/plugins/directory: - put: - operationId: PutPluginDirectory - summary: Update plugin directory - description: | - Updates the plugin directory configuration. - x-security-note: Requires an admin token + Write consistency level. Ignored by InfluxDB 3. Provided for compatibility with InfluxDB 1.x clients. + - name: Authorization + in: header + required: false + schema: + type: string + description: | + Authorization header for token-based authentication. + Supported schemes: + - `Bearer AUTH_TOKEN` - OAuth bearer token scheme + - `Token AUTH_TOKEN` - InfluxDB v2 token scheme + - `Basic ` - Basic authentication (username is ignored) + - name: Content-Type + in: header + description: | + The content type of the request payload. + schema: + $ref: "#/components/schemas/LineProtocol" + required: false + - name: Accept + in: header + description: | + The content type that the client can understand. + Writes only return a response body if they fail (partially or completely)--for example, + due to a syntax problem or type mismatch. + schema: + type: string + default: application/json + enum: + - application/json + required: false + - $ref: "#/components/parameters/ContentEncoding" + - $ref: "#/components/parameters/ContentLength" requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/PluginDirectoryRequest' - responses: - '204': - description: Success. The plugin directory has been updated. - '401': - $ref: '#/components/responses/Unauthorized' - '403': - description: Forbidden. Admin token required. + $ref: "#/components/requestBodies/lineProtocolRequestBody" tags: - - Processing engine + - Compatibility endpoints + - Write data components: parameters: AcceptQueryHeader: @@ -2088,7 +2344,7 @@ components: The compression applied to the line protocol in the request payload. To send a gzip payload, pass `Content-Encoding: gzip` header. schema: - $ref: '#/components/schemas/ContentEncoding' + $ref: "#/components/schemas/ContentEncoding" required: false ContentLength: name: Content-Length @@ -2096,7 +2352,7 @@ components: description: | The size of the entity-body, in bytes, sent to InfluxDB. schema: - $ref: '#/components/schemas/ContentLength' + $ref: "#/components/schemas/ContentLength" ContentType: name: Content-Type description: | @@ -2140,20 +2396,20 @@ components: in: query required: false schema: - $ref: '#/components/schemas/AcceptPartial' + $ref: "#/components/schemas/AcceptPartial" compatibilityPrecisionParam: name: precision in: query - required: true + required: false schema: - $ref: '#/components/schemas/PrecisionWriteCompatibility' + $ref: "#/components/schemas/PrecisionWriteCompatibility" description: The precision for unix timestamps in the line protocol batch. precisionParam: name: precision in: query - required: true + required: false schema: - $ref: '#/components/schemas/PrecisionWrite' + $ref: "#/components/schemas/PrecisionWrite" description: The precision for unix timestamps in the line protocol batch. querySqlParam: name: q @@ -2169,22 +2425,24 @@ components: in: query required: false schema: - $ref: '#/components/schemas/Format' + $ref: "#/components/schemas/Format" formatRequired: name: format in: query required: true schema: - $ref: '#/components/schemas/Format' + $ref: "#/components/schemas/Format" v1UsernameParam: name: u in: query required: false schema: type: string - description: | + description: > Username for v1 compatibility authentication. - When using Basic authentication or query string authentication, InfluxDB 3 ignores this parameter but allows any arbitrary string for compatibility with InfluxDB 1.x clients. + + When using Basic authentication or query string authentication, InfluxDB 3 ignores this parameter but allows any + arbitrary string for compatibility with InfluxDB 1.x clients. v1PasswordParam: name: p in: query @@ -2217,7 +2475,7 @@ components: content: application/json: schema: - $ref: '#/components/schemas/QueryRequestObject' + $ref: "#/components/schemas/QueryRequestObject" schemas: AdminTokenObject: type: object @@ -2240,61 +2498,31 @@ components: name: _admin token: apiv3_00xx0Xx0xx00XX0x0 hash: 00xx0Xx0xx00XX0x0 - created_at: '2025-04-18T14:02:45.331Z' + created_at: "2025-04-18T14:02:45.331Z" expiry: null - ResourceTokenObject: - type: object - properties: - token_name: - type: string - permissions: - type: array - items: - type: object - properties: - resource_type: - type: string - enum: - - system - - db - resource_identifier: - type: array - items: - type: string - actions: - type: array - items: - type: string - enum: - - read - - write - expiry_secs: - type: integer - description: The expiration time in seconds. - example: - token_name: All system information - permissions: - - resource_type: system - resource_identifier: - - '*' - actions: - - read - expiry_secs: 300000 ContentEncoding: type: string enum: - gzip - identity - description: | + description: > Content coding. + Use `gzip` for compressed data or `identity` for unmodified, uncompressed data. + #### Multi-member gzip support - InfluxDB 3 supports multi-member gzip payloads (concatenated gzip files per [RFC 1952](https://www.rfc-editor.org/rfc/rfc1952)). + + InfluxDB 3 supports multi-member gzip payloads (concatenated gzip files per [RFC + 1952](https://www.rfc-editor.org/rfc/rfc1952)). + This allows you to: + - Concatenate multiple gzip files and send them in a single request + - Maintain compatibility with InfluxDB v1 and v2 write endpoints + - Simplify batch operations using standard compression tools default: identity LineProtocol: @@ -2308,8 +2536,6 @@ components: ContentLength: type: integer description: The length in decimal number of octets. - Database: - type: string AcceptPartial: type: boolean default: true @@ -2320,9 +2546,12 @@ components: - json - csv - parquet + - json_lines - jsonl - description: | + - pretty + description: |- The format of data in the response body. + `json_lines` is the canonical name; `jsonl` is accepted as an alias. NoSync: type: boolean default: false @@ -2331,18 +2560,21 @@ components: #### Related - - [Use the HTTP API and client libraries to write data](/influxdb3/enterprise/write-data/api-client-libraries/) - - [Data durability](/influxdb3/enterprise/reference/internals/durability/) + - [Use the HTTP API and client libraries to write data](/influxdb3/core/write-data/api-client-libraries/) + - [Data durability](/influxdb3/core/reference/internals/durability/) PrecisionWriteCompatibility: enum: - ms - s - us + - u - ns + - "n" type: string - description: | + description: |- The precision for unix timestamps in the line protocol batch. - Use `ms` for milliseconds, `s` for seconds, `us` for microseconds, or `ns` for nanoseconds. + Use `ms` for milliseconds, `s` for seconds, `us` or `u` for microseconds, or `ns` or `n` for nanoseconds. + Optional — defaults to nanosecond precision if omitted. PrecisionWrite: enum: - auto @@ -2378,6 +2610,7 @@ components: - json - csv - parquet + - json_lines - jsonl - pretty params: @@ -2458,8 +2691,6 @@ components: type: string table: type: string - node_spec: - $ref: '#/components/schemas/ApiNodeSpec' name: type: string description: Optional cache name. @@ -2492,8 +2723,6 @@ components: type: string table: type: string - node_spec: - $ref: '#/components/schemas/ApiNodeSpec' name: type: string description: Optional cache name. @@ -2538,63 +2767,99 @@ components: The path can be absolute or relative to the `--plugins-dir` directory configured when starting InfluxDB 3. The plugin file must implement the trigger interface associated with the trigger's specification. - node_spec: - $ref: '#/components/schemas/ApiNodeSpec' trigger_name: type: string trigger_settings: description: | Configuration for trigger error handling and execution behavior. allOf: - - $ref: '#/components/schemas/TriggerSettings' + - $ref: "#/components/schemas/TriggerSettings" trigger_specification: - description: | + description: > Specifies when and how the processing engine trigger should be invoked. + ## Supported trigger specifications: + ### Cron-based scheduling + Format: `cron:CRON_EXPRESSION` + Uses extended (6-field) cron format (second minute hour day_of_month month day_of_week): + ``` + ┌───────────── second (0-59) + │ ┌───────────── minute (0-59) + │ │ ┌───────────── hour (0-23) + │ │ │ ┌───────────── day of month (1-31) + │ │ │ │ ┌───────────── month (1-12) + │ │ │ │ │ ┌───────────── day of week (0-6, Sunday=0) + │ │ │ │ │ │ + * * * * * * + ``` + Examples: + - `cron:0 0 6 * * 1-5` - Every weekday at 6:00 AM + - `cron:0 30 14 * * 5` - Every Friday at 2:30 PM + - `cron:0 0 0 1 * *` - First day of every month at midnight + ### Interval-based scheduling + Format: `every:DURATION` - Supported durations: `s` (seconds), `m` (minutes), `h` (hours), `d` (days), `w` (weeks), `M` (months), `y` (years): + + Supported durations: `s` (seconds), `m` (minutes), `h` (hours), `d` (days), `w` (weeks), `M` (months), `y` + (years): + - `every:30s` - Every 30 seconds + - `every:5m` - Every 5 minutes + - `every:1h` - Every hour + - `every:1d` - Every day + - `every:1w` - Every week + - `every:1M` - Every month + - `every:1y` - Every year + **Maximum interval**: 1 year + ### Table-based triggers + - `all_tables` - Triggers on write events to any table in the database + - `table:TABLE_NAME` - Triggers on write events to a specific table + ### On-demand triggers + Format: `request:REQUEST_PATH` + Creates an HTTP endpoint `/api/v3/engine/REQUEST_PATH` for manual invocation: + - `request:hello-world` - Creates endpoint `/api/v3/engine/hello-world` + - `request:data-export` - Creates endpoint `/api/v3/engine/data-export` pattern: ^(cron:[0-9 *,/-]+|every:[0-9]+[smhd]|all_tables|table:[a-zA-Z_][a-zA-Z0-9_]*|request:[a-zA-Z0-9_-]+)$ example: cron:0 0 6 * * 1-5 @@ -2640,22 +2905,6 @@ components: required: - run_async - error_behavior - ApiNodeSpec: - type: object - description: | - Optional specification for targeting specific nodes in a multi-node InfluxDB 3 Enterprise cluster. - Use this to control which node(s) should handle the cache or trigger. - properties: - node_id: - type: string - description: | - The ID of a specific node in the cluster. - If specified, the cache or trigger will only be created on this node. - node_group: - type: string - description: | - The name of a node group in the cluster. - If specified, the cache or trigger will be created on all nodes in this group. WALPluginTestRequest: type: object description: | @@ -2745,7 +2994,7 @@ components: files: type: array items: - $ref: '#/components/schemas/PluginFileEntry' + $ref: "#/components/schemas/PluginFileEntry" description: | List of plugin files to include in the directory. required: @@ -2756,16 +3005,15 @@ components: description: | Represents a single file in a plugin directory. properties: - filename: - type: string - description: | - The name of the file within the plugin directory. content: type: string description: | The content of the file. + relative_path: + type: string + description: The relative path of the file within the plugin directory. required: - - filename + - relative_path - content ShowDatabasesResponse: type: object @@ -2789,7 +3037,7 @@ components: - time - value values: - - - '2024-02-02T12:00:00Z' + - - "2024-02-02T12:00:00Z" - 42 ErrorMessage: type: object @@ -2799,38 +3047,6 @@ components: data: type: object nullable: true - LineProtocolError: - properties: - code: - description: Code is the machine-readable error code. - enum: - - internal error - - not found - - conflict - - invalid - - empty value - - unavailable - readOnly: true - type: string - err: - description: Stack of errors that occurred during processing of the request. Useful for debugging. - readOnly: true - type: string - line: - description: First line in the request body that contains malformed data. - format: int32 - readOnly: true - type: integer - message: - description: Human-readable message. - readOnly: true - type: string - op: - description: Describes the logical code operation when the error occurred. Useful for debugging. - readOnly: true - type: string - required: - - code EpochCompatibility: description: | A unix timestamp precision. @@ -2859,62 +3075,13 @@ components: Use duration format (for example, "1d", "1h", "30m", "7d"). example: 7d description: Request schema for updating database configuration. - UpdateTableRequest: - type: object - properties: - db: - type: string - description: The name of the database containing the table. - table: - type: string - description: The name of the table to update. - retention_period: - type: string - description: | - The retention period for the table. Specifies how long data in this table should be retained. - Use duration format (for example, "1d", "1h", "30m", "7d"). - example: 30d - required: - - db - - table - description: Request schema for updating table configuration. - LicenseResponse: - type: object - properties: - license_type: - type: string - description: The type of license (for example, "enterprise", "trial"). - example: enterprise - expires_at: - type: string - format: date-time - description: The expiration date of the license in ISO 8601 format. - example: '2025-12-31T23:59:59Z' - features: - type: array - items: - type: string - description: List of features enabled by the license. - example: - - clustering - - processing_engine - - advanced_auth - status: - type: string - enum: - - active - - expired - - invalid - description: The current status of the license. - example: active - description: Response schema for license information. responses: Unauthorized: description: Unauthorized access. content: application/json: schema: - $ref: '#/components/schemas/ErrorMessage' + $ref: "#/components/schemas/ErrorMessage" BadRequest: description: | Request failed. Possible reasons: @@ -2925,19 +3092,19 @@ components: content: application/json: schema: - $ref: '#/components/schemas/ErrorMessage' + $ref: "#/components/schemas/ErrorMessage" Forbidden: description: Access denied. content: application/json: schema: - $ref: '#/components/schemas/ErrorMessage' + $ref: "#/components/schemas/ErrorMessage" NotFound: description: Resource not found. content: application/json: schema: - $ref: '#/components/schemas/ErrorMessage' + $ref: "#/components/schemas/ErrorMessage" headers: ClusterUUID: description: | @@ -2954,94 +3121,126 @@ components: BasicAuthentication: type: http scheme: basic - description: | + description: >- Use the `Authorization` header with the `Basic` scheme to authenticate v1 API requests. - Works with v1 compatibility [`/write`](#operation/PostV1Write) and [`/query`](#operation/GetV1Query) endpoints in InfluxDB 3. - When authenticating requests, InfluxDB 3 checks that the `password` part of the decoded credential is an authorized token + Works with v1 compatibility [`/write`](#operation/PostV1Write) and [`/query`](#operation/GetV1Query) endpoints + in InfluxDB 3. + + + When authenticating requests, InfluxDB 3 checks that the `password` part of the decoded credential is an + authorized token + and ignores the `username` part of the decoded credential. + ### Syntax + ```http + Authorization: Basic + ``` + ### Example + ```bash + curl "http://localhost:8181/write?db=DATABASE_NAME&precision=s" \ --user "":"AUTH_TOKEN" \ --header "Content-type: text/plain; charset=utf-8" \ --data-binary 'home,room=kitchen temp=72 1641024000' ``` + Replace the following: - - **`DATABASE_NAME`**: your InfluxDB 3 Enterprise database - - **`AUTH_TOKEN`**: an admin token or database token authorized for the database - #### Related guides + - **`DATABASE_NAME`**: your InfluxDB 3 Core database - - [Authenticate v1 API requests](/influxdb3/enterprise/guides/api-compatibility/v1/) - - [Manage tokens](/influxdb3/enterprise/admin/tokens/) + - **`AUTH_TOKEN`**: an admin token or database token authorized for the database QuerystringAuthentication: type: apiKey in: query name: u=&p= - description: | + description: >- Use InfluxDB 1.x API parameters to provide credentials through the query string for v1 API requests. - Querystring authentication works with v1-compatible [`/write`](#operation/PostV1Write) and [`/query`](#operation/GetV1Query) endpoints. + + Querystring authentication works with v1-compatible [`/write`](#operation/PostV1Write) and + [`/query`](#operation/GetV1Query) endpoints. + When authenticating requests, InfluxDB 3 checks that the `p` (_password_) query parameter is an authorized token + and ignores the `u` (_username_) query parameter. + ### Syntax + ```http + https://localhost:8181/query/?[u=any]&p=AUTH_TOKEN + https://localhost:8181/write/?[u=any]&p=AUTH_TOKEN + ``` + ### Examples + ```bash + curl "http://localhost:8181/write?db=DATABASE_NAME&precision=s&p=AUTH_TOKEN" \ --header "Content-type: text/plain; charset=utf-8" \ --data-binary 'home,room=kitchen temp=72 1641024000' ``` + Replace the following: - - **`DATABASE_NAME`**: your InfluxDB 3 Enterprise database + + - **`DATABASE_NAME`**: your InfluxDB 3 Core database + - **`AUTH_TOKEN`**: an admin token or database token authorized for the database + ```bash + ####################################### + # Use an InfluxDB 1.x compatible username and password + # to query the InfluxDB v1 HTTP API + ####################################### + # Use authentication query parameters: + # ?p=AUTH_TOKEN + ####################################### + curl --get "http://localhost:8181/query" \ --data-urlencode "p=AUTH_TOKEN" \ --data-urlencode "db=DATABASE_NAME" \ --data-urlencode "q=SELECT * FROM MEASUREMENT" ``` + Replace the following: - - **`DATABASE_NAME`**: the database to query - - **`AUTH_TOKEN`**: a database token with sufficient permissions to the database - #### Related guides + - **`DATABASE_NAME`**: the database to query - - [Authenticate v1 API requests](/influxdb3/enterprise/guides/api-compatibility/v1/) - - [Manage tokens](/influxdb3/enterprise/admin/tokens/) + - **`AUTH_TOKEN`**: a database token with sufficient permissions to the database BearerAuthentication: type: http scheme: bearer @@ -3069,7 +3268,7 @@ components: --header "Authorization: Bearer AUTH_TOKEN" ``` TokenAuthentication: - description: | + description: |- Use InfluxDB v2 Token authentication to provide an authorization token to InfluxDB 3. The v2 Token scheme works with v1 and v2 compatibility endpoints in InfluxDB 3. @@ -3096,10 +3295,6 @@ components: --header "Authorization: Token AUTH_TOKEN" \ --data-binary 'home,room=kitchen temp=72 1463683075' ``` - - ### Related guides - - - [Manage tokens](/influxdb3/enterprise/admin/tokens/) in: header name: Authorization type: apiKey diff --git a/api-docs/influxdb3/enterprise/.config.yml b/api-docs/influxdb3/enterprise/.config.yml index 4b8210b97c..d39bc413c8 100644 --- a/api-docs/influxdb3/enterprise/.config.yml +++ b/api-docs/influxdb3/enterprise/.config.yml @@ -7,7 +7,7 @@ x-influxdata-product-name: InfluxDB 3 Enterprise apis: v3@3: - root: v3/ref.yml + root: v3/influxdb3-enterprise-openapi.yaml x-influxdata-docs-aliases: - /influxdb3/enterprise/api/ - /influxdb3/enterprise/api/v1/ diff --git a/api-docs/influxdb3/enterprise/v3/content/info.yml b/api-docs/influxdb3/enterprise/v3/content/info.yml index e4ec8ef609..cd2e5acdf3 100644 --- a/api-docs/influxdb3/enterprise/v3/content/info.yml +++ b/api-docs/influxdb3/enterprise/v3/content/info.yml @@ -21,10 +21,7 @@ description: | - `/`: Compatibility endpoints for InfluxDB v1 workloads and clients - `/api/v2/write`: Compatibility endpoint for InfluxDB v2 workloads and clients - + [Download the OpenAPI specification](/openapi/influxdb3-enterprise-openapi.yaml) license: name: MIT url: 'https://opensource.org/licenses/MIT' diff --git a/api-docs/influxdb3/enterprise/v3/influxdb3-enterprise-openapi.yaml b/api-docs/influxdb3/enterprise/v3/influxdb3-enterprise-openapi.yaml new file mode 100644 index 0000000000..5ff481f9d0 --- /dev/null +++ b/api-docs/influxdb3/enterprise/v3/influxdb3-enterprise-openapi.yaml @@ -0,0 +1,3799 @@ +openapi: 3.0.3 +info: + title: InfluxDB 3 Enterprise API Service + description: | + The InfluxDB HTTP API for InfluxDB 3 Enterprise provides a programmatic interface for + interacting with InfluxDB 3 Enterprise databases and resources. + Use this API to: + + - Write data to InfluxDB 3 Enterprise databases + - Query data using SQL or InfluxQL + - Process data using Processing engine plugins + - Manage databases, tables, and Processing engine triggers + - Perform administrative tasks and access system information + + The API includes endpoints under the following paths: + - `/api/v3`: InfluxDB 3 Enterprise native endpoints + - `/`: Compatibility endpoints for InfluxDB v1 workloads and clients + - `/api/v2/write`: Compatibility endpoint for InfluxDB v2 workloads and clients + + [Download the OpenAPI specification](/openapi/influxdb3-enterprise-openapi.yaml) + version: v3.8.0 + license: + name: MIT + url: https://opensource.org/licenses/MIT + contact: + name: InfluxData + url: https://www.influxdata.com + email: support@influxdata.com + x-source-hash: sha256:1259b96096eab6c8dbf3f76c974924f124e9b3e08eedc6b0c9a66d3108857c52 +servers: + - url: https://{baseurl} + description: InfluxDB 3 Enterprise API URL + variables: + baseurl: + enum: + - localhost:8181 + default: localhost:8181 + description: InfluxDB 3 Enterprise URL +security: + - BearerAuthentication: [] + - TokenAuthentication: [] + - BasicAuthentication: [] + - QuerystringAuthentication: [] +tags: + - name: Authentication + description: | + Depending on your workflow, use one of the following schemes to authenticate to the InfluxDB 3 API: + + | Authentication scheme | Works with | + |:-------------------|:-----------| + | [Bearer authentication](#section/Authentication/BearerAuthentication) | All endpoints | + | [Token authentication](#section/Authentication/TokenAuthentication) | v1, v2 endpoints | + | [Basic authentication](#section/Authentication/BasicAuthentication) | v1 endpoints | + | [Querystring authentication](#section/Authentication/QuerystringAuthentication) | v1 endpoints | + + x-traitTag: true + x-related: + - title: Authenticate v1 API requests + href: /influxdb3/enterprise/guides/api-compatibility/v1/ + - title: Manage tokens + href: /influxdb3/enterprise/admin/tokens/ + - name: Cache data + description: |- + Manage the in-memory cache. + + #### Distinct Value Cache + + The Distinct Value Cache (DVC) lets you cache distinct + values of one or more columns in a table, improving the performance of + queries that return distinct tag and field values. + + The DVC is an in-memory cache that stores distinct values for specific columns + in a table. When you create an DVC, you can specify what columns' distinct + values to cache, the maximum number of distinct value combinations to cache, and + the maximum age of cached values. A DVC is associated with a table, which can + have multiple DVCs. + + #### Last value cache + + The Last Value Cache (LVC) lets you cache the most recent + values for specific fields in a table, improving the performance of queries that + return the most recent value of a field for specific series or the last N values + of a field. + + The LVC is an in-memory cache that stores the last N number of values for + specific fields of series in a table. When you create an LVC, you can specify + what fields to cache, what tags to use to identify each series, and the + number of values to cache for each unique series. + An LVC is associated with a table, which can have multiple LVCs. + x-related: + - title: Manage the Distinct Value Cache + href: /influxdb3/enterprise/admin/distinct-value-cache/ + - title: Manage the Last Value Cache + href: /influxdb3/enterprise/admin/last-value-cache/ + - name: Compatibility endpoints + description: > + InfluxDB 3 provides compatibility endpoints for InfluxDB 1.x and InfluxDB 2.x workloads and clients. + + + ### Write data using v1- or v2-compatible endpoints + + + - [`/api/v2/write` endpoint](#operation/PostV2Write) + for InfluxDB v2 clients and when you bring existing InfluxDB v2 write workloads to InfluxDB 3. + - [`/write` endpoint](#operation/PostV1Write) for InfluxDB v1 clients and when you bring existing InfluxDB v1 + write workloads to InfluxDB 3. + + + For new workloads, use the [`/api/v3/write_lp` endpoint](#operation/PostWriteLP). + + + All endpoints accept the same line protocol format. + + + ### Query data + + + Use the HTTP [`/query`](#operation/GetV1ExecuteQuery) endpoint for InfluxDB v1 clients and v1 query workloads + using InfluxQL. + + + For new workloads, use one of the following: + + + - HTTP [`/api/v3/query_sql` endpoint](#operation/GetExecuteQuerySQL) for new query workloads using SQL. + + - HTTP [`/api/v3/query_influxql` endpoint](#operation/GetExecuteInfluxQLQuery) for new query workloads using + InfluxQL. + + - Flight SQL and InfluxDB 3 _Flight+gRPC_ APIs for querying with SQL or InfluxQL. For more information about using + Flight APIs, see [InfluxDB 3 client + libraries](https://github.com/InfluxCommunity?q=influxdb3&type=public&language=&sort=). + + + ### Server information + + + Server information endpoints such as `/health` and `metrics` are compatible with InfluxDB 1.x and InfluxDB 2.x + clients. + x-related: + - title: Use compatibility APIs to write data + href: /influxdb3/enterprise/write-data/http-api/compatibility-apis/ + - name: Database + description: Manage databases + - description: > + Most InfluxDB API endpoints require parameters in the request--for example, specifying the database to use. + + + ### Common parameters + + + The following table shows common parameters used by many InfluxDB API endpoints. + + Many endpoints may require other parameters in the query string or in the + + request body that perform functions specific to those endpoints. + + + | Query parameter | Value type | Description | + + |:------------------------ |:--------------------- |:-------------------------------------------| + + | `db` | string | The database name | + + + InfluxDB HTTP API endpoints use standard HTTP request and response headers. + + The following table shows common headers used by many InfluxDB API endpoints. + + Some endpoints may use other headers that perform functions more specific to those endpoints--for example, + + the write endpoints accept the `Content-Encoding` header to indicate that line protocol is compressed in the + request body. + + + | Header | Value type | Description | + + |:------------------------ |:--------------------- |:-------------------------------------------| + + | `Accept` | string | The content type that the client can understand. | + + | `Authorization` | string | The authorization scheme and credential. | + + | `Content-Length` | integer | The size of the entity-body, in bytes. | + + | `Content-Type` | string | The format of the data in the request body. | + name: Headers and parameters + x-traitTag: true + - name: Processing engine + description: > + Manage Processing engine triggers, test plugins, and send requests to trigger On Request plugins. + + + InfluxDB 3 Enterprise provides the InfluxDB 3 processing engine, an embedded Python VM that can dynamically load + and trigger Python plugins in response to events in your database. + + Use Processing engine plugins and triggers to run code and perform tasks for different database events. + + + To get started with the processing engine, see the [Processing engine and Python + plugins](/influxdb3/enterprise/processing-engine/) guide. + x-related: + - title: Processing engine and Python plugins + href: /influxdb3/enterprise/plugins/ + - name: Query data + description: Query data using SQL or InfluxQL + x-related: + - title: Use the InfluxDB v1 HTTP query API and InfluxQL to query data + href: /influxdb3/enterprise/query-data/execute-queries/influxdb-v1-api/ + - name: Quick start + description: > + 1. [Create an admin token](#section/Authentication) to authorize API requests. + + ```bash + curl -X POST "http://localhost:8181/api/v3/configure/token/admin" + ``` + 2. [Check the status](#section/Server-information) of the InfluxDB server. + + ```bash + curl "http://localhost:8181/health" \ + --header "Authorization: Bearer ADMIN_TOKEN" + ``` + + 3. [Write data](#operation/PostWriteLP) to InfluxDB. + + ```bash + curl "http://localhost:8181/api/v3/write_lp?db=sensors&precision=auto" + --header "Authorization: Bearer ADMIN_TOKEN" \ + --data-raw "home,room=Kitchen temp=72.0 + home,room=Living\ room temp=71.5" + ``` + + If all data is written, the response is `204 No Content`. + + 4. [Query data](#operation/GetExecuteQuerySQL) from InfluxDB. + + ```bash + curl -G "http://localhost:8181/api/v3/query_sql" \ + --header "Authorization: Bearer ADMIN_TOKEN" \ + --data-urlencode "db=sensors" \ + --data-urlencode "q=SELECT * FROM home WHERE room='Living room'" \ + --data-urlencode "format=jsonl" + ``` + + Output: + + ```jsonl + {"room":"Living room","temp":71.5,"time":"2025-02-25T20:19:34.984098"} + ``` + + For more information about using InfluxDB 3 Enterprise, see the [Get started](/influxdb3/enterprise/get-started/) + guide. + x-traitTag: true + - name: Server information + description: Retrieve server metrics, status, and version information + - name: Table + description: Manage table schemas and data + - name: Token + description: Manage tokens for authentication and authorization + - name: Write data + description: | + Write data to InfluxDB 3 using line protocol format. + + #### Timestamp precision across write APIs + + InfluxDB 3 provides multiple write endpoints for compatibility with different InfluxDB versions. + The following table compares timestamp precision support across v1, v2, and v3 write APIs: + + | Precision | v1 (`/write`) | v2 (`/api/v2/write`) | v3 (`/api/v3/write_lp`) | + |-----------|---------------|----------------------|-------------------------| + | **Auto detection** | ❌ No | ❌ No | ✅ `auto` (default) | + | **Seconds** | ✅ `s` | ✅ `s` | ✅ `second` | + | **Milliseconds** | ✅ `ms` | ✅ `ms` | ✅ `millisecond` | + | **Microseconds** | ✅ `u` or `µ` | ✅ `us` | ✅ `microsecond` | + | **Nanoseconds** | ✅ `ns` | ✅ `ns` | ✅ `nanosecond` | + | **Default** | Nanosecond | Nanosecond | **Auto** (guessed) | + + All timestamps are stored internally as nanoseconds. +paths: + /api/v1/health: + get: + operationId: GetHealthV1 + summary: Health check (v1) + description: | + Checks the status of the service. + + Returns `OK` if the service is running. This endpoint does not return version information. + Use the [`/ping`](#operation/GetPing) endpoint to retrieve version details. + + > **Note**: This endpoint requires authentication by default in InfluxDB 3 Enterprise. + responses: + "200": + description: Service is running. Returns `OK`. + content: + text/plain: + schema: + type: string + example: OK + "401": + description: Unauthorized. Authentication is required. + "500": + description: Service is unavailable. + tags: + - Server information + - Compatibility endpoints + /api/v2/write: + post: + operationId: PostV2Write + responses: + "204": + description: Success ("No Content"). All data in the batch is written and queryable. + headers: + cluster-uuid: + $ref: "#/components/headers/ClusterUUID" + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "413": + description: Request entity too large. + summary: Write line protocol (v2-compatible) + description: > + Writes line protocol to the specified database. + + + This endpoint provides backward compatibility for InfluxDB 2.x write workloads using tools such as InfluxDB 2.x + client libraries, the Telegraf `outputs.influxdb_v2` output plugin, or third-party tools. + + + Use this endpoint to send data in [line protocol](/influxdb3/enterprise/reference/syntax/line-protocol/) format + to InfluxDB. + + Use query parameters to specify options for writing data. + + + #### Related + + + - [Use compatibility APIs to write data](/influxdb3/enterprise/write-data/http-api/compatibility-apis/) + parameters: + - name: Content-Type + in: header + description: | + The content type of the request payload. + schema: + $ref: "#/components/schemas/LineProtocol" + required: false + - description: | + The compression applied to the line protocol in the request payload. + To send a gzip payload, pass `Content-Encoding: gzip` header. + in: header + name: Content-Encoding + schema: + default: identity + description: | + Content coding. + Use `gzip` for compressed data or `identity` for unmodified, uncompressed data. + enum: + - gzip + - identity + type: string + - description: | + The size of the entity-body, in bytes, sent to InfluxDB. + in: header + name: Content-Length + schema: + description: The length in decimal number of octets. + type: integer + - description: | + The content type that the client can understand. + Writes only return a response body if they fail (partially or completely)--for example, + due to a syntax problem or type mismatch. + in: header + name: Accept + schema: + default: application/json + description: Error content type. + enum: + - application/json + type: string + - name: bucket + in: query + required: true + schema: + type: string + description: |- + A database name. + InfluxDB creates the database if it doesn't already exist, and then + writes all points in the batch to the database. + + This parameter is named `bucket` for compatibility with InfluxDB v2 client libraries. + - name: accept_partial + in: query + required: false + schema: + $ref: "#/components/schemas/AcceptPartial" + - $ref: "#/components/parameters/compatibilityPrecisionParam" + requestBody: + $ref: "#/components/requestBodies/lineProtocolRequestBody" + tags: + - Compatibility endpoints + - Write data + /api/v3/configure/database: + delete: + operationId: DeleteConfigureDatabase + parameters: + - $ref: "#/components/parameters/db" + - name: data_only + in: query + required: false + schema: + type: boolean + default: false + description: | + Delete only data while preserving the database schema and all associated resources + (tokens, triggers, last value caches, distinct value caches, processing engine configurations). + When `false` (default), the entire database is deleted. + - name: remove_tables + in: query + required: false + schema: + type: boolean + default: false + description: | + Used with `data_only=true` to remove table resources (caches) while preserving + database-level resources (tokens, triggers, processing engine configurations). + Has no effect when `data_only=false`. + - name: hard_delete_at + in: query + required: false + schema: + type: string + format: date-time + description: |- + Schedule the database for hard deletion at the specified time. + If not provided, the database will be soft deleted. + Use ISO 8601 date-time format (for example, "2025-12-31T23:59:59Z"). + + #### Deleting a database cannot be undone + + Deleting a database is a destructive action. + Once a database is deleted, data stored in that database cannot be recovered. + + + Also accepts special string values: + - `now` — hard delete immediately + - `never` — soft delete only (default behavior) + - `default` — use the system default hard deletion time + responses: + "200": + description: Success. Database deleted. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Database not found. + summary: Delete a database + description: | + Soft deletes a database. + The database is scheduled for deletion and unavailable for querying. + Use the `hard_delete_at` parameter to schedule a hard deletion. + Use the `data_only` parameter to delete data while preserving the database schema and resources. + tags: + - Database + get: + operationId: GetConfigureDatabase + responses: + "200": + description: Success. The response body contains the list of databases. + content: + application/json: + schema: + $ref: "#/components/schemas/ShowDatabasesResponse" + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Database not found. + summary: List databases + description: Retrieves a list of databases. + parameters: + - $ref: "#/components/parameters/formatRequired" + - name: show_deleted + in: query + required: false + schema: + type: boolean + default: false + description: | + Include soft-deleted databases in the response. + By default, only active databases are returned. + tags: + - Database + post: + operationId: PostConfigureDatabase + responses: + "200": + description: Success. Database created. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "409": + description: Database already exists. + summary: Create a database + description: Creates a new database in the system. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateDatabaseRequest" + tags: + - Database + put: + operationId: update_database + responses: + "200": + description: Success. The database has been updated. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Database not found. + summary: Update a database + description: | + Updates database configuration, such as retention period. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/UpdateDatabaseRequest" + tags: + - Database + /api/v3/configure/database/retention_period: + delete: + operationId: DeleteDatabaseRetentionPeriod + summary: Remove database retention period + description: | + Removes the retention period from a database, setting it to infinite retention. + parameters: + - $ref: "#/components/parameters/db" + responses: + "204": + description: Success. The database retention period has been removed. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Database not found. + tags: + - Database + /api/v3/configure/distinct_cache: + delete: + operationId: DeleteConfigureDistinctCache + responses: + "200": + description: Success. The distinct cache has been deleted. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Cache not found. + summary: Delete distinct cache + description: Deletes a distinct cache. + parameters: + - $ref: "#/components/parameters/db" + - name: table + in: query + required: true + schema: + type: string + description: The name of the table containing the distinct cache. + - name: name + in: query + required: true + schema: + type: string + description: The name of the distinct cache to delete. + tags: + - Cache data + - Table + post: + operationId: PostConfigureDistinctCache + responses: + "201": + description: Success. The distinct cache has been created. + "400": + description: > + Bad request. + + + The server responds with status `400` if the request would overwrite an existing cache with a different + configuration. + "409": + description: Conflict. A distinct cache with this configuration already exists. + summary: Create distinct cache + description: Creates a distinct cache for a table. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/DistinctCacheCreateRequest" + tags: + - Cache data + - Table + /api/v3/configure/last_cache: + delete: + operationId: DeleteConfigureLastCache + responses: + "200": + description: Success. The last cache has been deleted. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Cache not found. + summary: Delete last cache + description: Deletes a last cache. + parameters: + - $ref: "#/components/parameters/db" + - name: table + in: query + required: true + schema: + type: string + description: The name of the table containing the last cache. + - name: name + in: query + required: true + schema: + type: string + description: The name of the last cache to delete. + tags: + - Cache data + - Table + post: + operationId: PostConfigureLastCache + responses: + "201": + description: Success. Last cache created. + "400": + description: Bad request. A cache with this name already exists or the request is malformed. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Cache not found. + summary: Create last cache + description: Creates a last cache for a table. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/LastCacheCreateRequest" + tags: + - Cache data + - Table + /api/v3/configure/plugin_environment/install_packages: + post: + operationId: PostInstallPluginPackages + summary: Install plugin packages + description: |- + Installs the specified Python packages into the processing engine plugin environment. + + This endpoint is synchronous and blocks until the packages are installed. + parameters: + - $ref: "#/components/parameters/ContentType" + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + packages: + type: array + items: + type: string + description: | + A list of Python package names to install. + Can include version specifiers (e.g., "scipy==1.9.0"). + example: + - influxdb3-python + - scipy + - pandas==1.5.0 + - requests + required: + - packages + example: + packages: + - influxdb3-python + - scipy + - pandas==1.5.0 + - requests + responses: + "200": + description: Success. The packages are installed. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + tags: + - Processing engine + /api/v3/configure/plugin_environment/install_requirements: + post: + operationId: PostInstallPluginRequirements + summary: Install plugin requirements + description: > + Installs requirements from a requirements file (also known as a "pip requirements file") into the processing + engine plugin environment. + + + This endpoint is synchronous and blocks until the requirements are installed. + + + ### Related + + + - [Processing engine and Python plugins](/influxdb3/enterprise/plugins/) + + - [Python requirements file format](https://pip.pypa.io/en/stable/reference/requirements-file-format/) + parameters: + - $ref: "#/components/parameters/ContentType" + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + requirements_location: + type: string + description: | + The path to the requirements file containing Python packages to install. + Can be a relative path (relative to the plugin directory) or an absolute path. + example: requirements.txt + required: + - requirements_location + example: + requirements_location: requirements.txt + responses: + "200": + description: Success. The requirements have been installed. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + tags: + - Processing engine + /api/v3/configure/processing_engine_trigger: + post: + operationId: PostConfigureProcessingEngineTrigger + summary: Create processing engine trigger + description: Creates a processing engine trigger with the specified plugin file and trigger specification. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/ProcessingEngineTriggerRequest" + examples: + schedule_cron: + summary: Schedule trigger using cron + description: > + In `"cron:CRON_EXPRESSION"`, `CRON_EXPRESSION` uses extended 6-field cron format. + + The cron expression `0 0 6 * * 1-5` means the trigger will run at 6:00 AM every weekday (Monday to + Friday). + value: + db: DATABASE_NAME + plugin_filename: schedule.py + trigger_name: schedule_cron_trigger + trigger_specification: cron:0 0 6 * * 1-5 + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + schedule_every: + summary: Schedule trigger using interval + description: | + In `"every:DURATION"`, `DURATION` specifies the interval between trigger executions. + The duration `1h` means the trigger will run every hour. + value: + db: mydb + plugin_filename: schedule.py + trigger_name: schedule_every_trigger + trigger_specification: every:1h + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + schedule_every_seconds: + summary: Schedule trigger using seconds interval + description: | + Example of scheduling a trigger to run every 30 seconds. + value: + db: mydb + plugin_filename: schedule.py + trigger_name: schedule_every_30s_trigger + trigger_specification: every:30s + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + schedule_every_minutes: + summary: Schedule trigger using minutes interval + description: | + Example of scheduling a trigger to run every 5 minutes. + value: + db: mydb + plugin_filename: schedule.py + trigger_name: schedule_every_5m_trigger + trigger_specification: every:5m + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + all_tables: + summary: All tables trigger example + description: | + Trigger that fires on write events to any table in the database. + value: + db: mydb + plugin_filename: all_tables.py + trigger_name: all_tables_trigger + trigger_specification: all_tables + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + table_specific: + summary: Table-specific trigger example + description: | + Trigger that fires on write events to a specific table. + value: + db: mydb + plugin_filename: table.py + trigger_name: table_trigger + trigger_specification: table:sensors + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + api_request: + summary: On-demand request trigger example + description: | + Creates an HTTP endpoint `/api/v3/engine/hello-world` for manual invocation. + value: + db: mydb + plugin_filename: request.py + trigger_name: hello_world_trigger + trigger_specification: request:hello-world + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + cron_friday_afternoon: + summary: Cron trigger for Friday afternoons + description: | + Example of a cron trigger that runs every Friday at 2:30 PM. + value: + db: reports + plugin_filename: weekly_report.py + trigger_name: friday_report_trigger + trigger_specification: cron:0 30 14 * * 5 + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + cron_monthly: + summary: Cron trigger for monthly execution + description: | + Example of a cron trigger that runs on the first day of every month at midnight. + value: + db: monthly_data + plugin_filename: monthly_cleanup.py + trigger_name: monthly_cleanup_trigger + trigger_specification: cron:0 0 0 1 * * + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + responses: + "200": + description: Success. Processing engine trigger created. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Trigger not found. + tags: + - Processing engine + delete: + operationId: DeleteConfigureProcessingEngineTrigger + summary: Delete processing engine trigger + description: Deletes a processing engine trigger. + parameters: + - $ref: "#/components/parameters/db" + - name: trigger_name + in: query + required: true + schema: + type: string + - name: force + in: query + required: false + schema: + type: boolean + default: false + description: | + Force deletion of the trigger even if it has active executions. + By default, deletion fails if the trigger is currently executing. + responses: + "200": + description: Success. The processing engine trigger has been deleted. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Trigger not found. + tags: + - Processing engine + /api/v3/configure/processing_engine_trigger/disable: + post: + operationId: PostDisableProcessingEngineTrigger + summary: Disable processing engine trigger + description: Disables a processing engine trigger. + parameters: + - name: db + in: query + required: true + schema: + type: string + description: The database name. + - name: trigger_name + in: query + required: true + schema: + type: string + description: The name of the trigger. + responses: + "200": + description: Success. The processing engine trigger has been disabled. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Trigger not found. + tags: + - Processing engine + /api/v3/configure/processing_engine_trigger/enable: + post: + operationId: PostEnableProcessingEngineTrigger + summary: Enable processing engine trigger + description: Enables a processing engine trigger. + parameters: + - name: db + in: query + required: true + schema: + type: string + description: The database name. + - name: trigger_name + in: query + required: true + schema: + type: string + description: The name of the trigger. + responses: + "200": + description: Success. The processing engine trigger is enabled. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Trigger not found. + tags: + - Processing engine + /api/v3/configure/table: + delete: + operationId: DeleteConfigureTable + parameters: + - $ref: "#/components/parameters/db" + - name: table + in: query + required: true + schema: + type: string + - name: data_only + in: query + required: false + schema: + type: boolean + default: false + description: | + Delete only data while preserving the table schema and all associated resources + (last value caches, distinct value caches). + When `false` (default), the entire table is deleted. + - name: hard_delete_at + in: query + required: false + schema: + type: string + format: date-time + description: |- + Schedule the table for hard deletion at the specified time. + If not provided, the table will be soft deleted. + Use ISO 8601 format (for example, "2025-12-31T23:59:59Z"). + + + Also accepts special string values: + - `now` — hard delete immediately + - `never` — soft delete only (default behavior) + - `default` — use the system default hard deletion time + responses: + "200": + description: Success (no content). The table has been deleted. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Table not found. + summary: Delete a table + description: | + Soft deletes a table. + The table is scheduled for deletion and unavailable for querying. + Use the `hard_delete_at` parameter to schedule a hard deletion. + Use the `data_only` parameter to delete data while preserving the table schema and resources. + + #### Deleting a table cannot be undone + + Deleting a table is a destructive action. + Once a table is deleted, data stored in that table cannot be recovered. + tags: + - Table + post: + operationId: PostConfigureTable + responses: + "200": + description: Success. The table has been created. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Database not found. + summary: Create a table + description: Creates a new table within a database. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateTableRequest" + tags: + - Table + put: + operationId: PatchConfigureTable + responses: + "200": + description: Success. The table has been updated. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Table not found. + summary: Update a table + description: | + Updates table configuration, such as retention period. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/UpdateTableRequest" + tags: + - Table + x-enterprise-only: true + /api/v3/configure/token: + delete: + operationId: DeleteToken + parameters: + - name: token_name + in: query + required: true + schema: + type: string + description: The name of the token to delete. + responses: + "200": + description: Success. The token has been deleted. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Token not found. + summary: Delete token + description: | + Deletes a token. + tags: + - Authentication + - Token + /api/v3/configure/token/admin: + post: + operationId: PostCreateAdminToken + responses: + "201": + description: | + Success. The admin token has been created. + The response body contains the token string and metadata. + content: + application/json: + schema: + $ref: "#/components/schemas/AdminTokenObject" + "401": + $ref: "#/components/responses/Unauthorized" + summary: Create admin token + description: | + Creates an admin token. + An admin token is a special type of token that has full access to all resources in the system. + tags: + - Authentication + - Token + /api/v3/configure/token/admin/regenerate: + post: + operationId: PostRegenerateAdminToken + summary: Regenerate admin token + description: | + Regenerates an admin token and revokes the previous token with the same name. + parameters: [] + responses: + "201": + description: Success. The admin token has been regenerated. + content: + application/json: + schema: + $ref: "#/components/schemas/AdminTokenObject" + "401": + $ref: "#/components/responses/Unauthorized" + tags: + - Authentication + - Token + /api/v3/configure/token/named_admin: + post: + operationId: PostCreateNamedAdminToken + responses: + "201": + description: | + Success. The named admin token has been created. + The response body contains the token string and metadata. + content: + application/json: + schema: + $ref: "#/components/schemas/AdminTokenObject" + "401": + $ref: "#/components/responses/Unauthorized" + "409": + description: A token with this name already exists. + summary: Create named admin token + description: | + Creates a named admin token. + A named admin token is a special type of admin token with a custom name for identification and management. + tags: + - Authentication + - Token + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + token_name: + type: string + description: The name for the admin token. + expiry_secs: + type: integer + description: Optional expiration time in seconds. If not provided, the token does not expire. + nullable: true + required: + - token_name + /api/v3/engine/{request_path}: + get: + operationId: GetProcessingEnginePluginRequest + responses: + "200": + description: Success. The plugin request has been executed. + "400": + description: Malformed request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Plugin not found. + "500": + description: Processing failure. + summary: On Request processing engine plugin request + description: > + Executes the On Request processing engine plugin specified in the trigger's `plugin_filename`. + + The request can include request headers, query string parameters, and a request body, which InfluxDB passes to + the plugin. + + + An On Request plugin implements the following signature: + + + ```python + + def process_request(influxdb3_local, query_parameters, request_headers, request_body, args=None) + + ``` + + + The response depends on the plugin implementation. + tags: + - Processing engine + post: + operationId: PostProcessingEnginePluginRequest + responses: + "200": + description: Success. The plugin request has been executed. + "400": + description: Malformed request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Plugin not found. + "500": + description: Processing failure. + summary: On Request processing engine plugin request + description: > + Executes the On Request processing engine plugin specified in the trigger's `plugin_filename`. + + The request can include request headers, query string parameters, and a request body, which InfluxDB passes to + the plugin. + + + An On Request plugin implements the following signature: + + + ```python + + def process_request(influxdb3_local, query_parameters, request_headers, request_body, args=None) + + ``` + + + The response depends on the plugin implementation. + parameters: + - $ref: "#/components/parameters/ContentType" + requestBody: + required: false + content: + application/json: + schema: + type: object + additionalProperties: true + tags: + - Processing engine + parameters: + - name: request_path + description: | + The path configured in the request trigger specification for the plugin. + + For example, if you define a trigger with the following: + + ```json + trigger_specification: "request:hello-world" + ``` + + then, the HTTP API exposes the following plugin endpoint: + + ``` + /api/v3/engine/hello-world + ``` + in: path + required: true + schema: + type: string + /api/v3/enterprise/configure/file_index: + post: + operationId: configure_file_index_create + summary: Create a file index + description: >- + Creates a file index for a database or table. + + + A file index improves query performance by indexing data files based on specified columns, enabling the query + engine to skip irrelevant files during query execution. + + + This endpoint is only available in InfluxDB 3 Enterprise. + x-enterprise-only: true + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/FileIndexCreateRequest" + responses: + "200": + description: Success. The file index has been created. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Database or table not found. + tags: + - Database + - Table + delete: + operationId: configure_file_index_delete + summary: Delete a file index + description: |- + Deletes a file index from a database or table. + + This endpoint is only available in InfluxDB 3 Enterprise. + x-enterprise-only: true + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/FileIndexDeleteRequest" + responses: + "200": + description: Success. The file index has been deleted. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Database, table, or file index not found. + tags: + - Database + - Table + /api/v3/enterprise/configure/node/stop: + post: + operationId: stop_node + summary: Mark a node as stopped + description: >- + Marks a node as stopped in the catalog, freeing up the licensed cores it was using for other nodes. + + + Use this endpoint after you have already stopped the physical instance (for example, using `kill` or stopping + the container). This endpoint does not shut down the running process — you must stop the instance first. + + + When the node is marked as stopped: + + 1. Licensed cores from the stopped node are freed for reuse + + 2. Other nodes in the cluster see the update after their catalog sync interval + + + This endpoint is only available in InfluxDB 3 Enterprise. + + + #### Related + + + - [influxdb3 stop node](/influxdb3/enterprise/reference/cli/influxdb3/stop/node/) + x-enterprise-only: true + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/StopNodeRequest" + responses: + "200": + description: Success. The node has been marked as stopped. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Node not found. + tags: + - Server information + /api/v3/enterprise/configure/table/retention_period: + post: + operationId: create_or_update_retention_period_for_table + summary: Set table retention period + description: >- + Sets or updates the retention period for a specific table. + + + Use this endpoint to control how long data in a table is retained independently of the database-level retention + period. + + + This endpoint is only available in InfluxDB 3 Enterprise. + + + #### Related + + + - [influxdb3 update table](/influxdb3/enterprise/reference/cli/influxdb3/update/table/) + x-enterprise-only: true + parameters: + - name: db + in: query + required: true + schema: + type: string + description: The database name. + - name: table + in: query + required: true + schema: + type: string + description: The table name. + - name: duration + in: query + required: true + schema: + type: string + description: The retention period as a human-readable duration (for example, "30d", "24h", "1y"). + responses: + "204": + description: Success. The table retention period has been set. + "400": + description: Bad request. Invalid duration format. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Database or table not found. + tags: + - Table + delete: + operationId: delete_retention_period_for_table + summary: Clear table retention period + description: >- + Removes the retention period from a specific table, reverting to the database-level retention period (or + infinite retention if no database-level retention is set). + + + This endpoint is only available in InfluxDB 3 Enterprise. + + + #### Related + + + - [influxdb3 update table](/influxdb3/enterprise/reference/cli/influxdb3/update/table/) + x-enterprise-only: true + parameters: + - name: db + in: query + required: true + schema: + type: string + description: The database name. + - name: table + in: query + required: true + schema: + type: string + description: The table name. + responses: + "204": + description: Success. The table retention period has been cleared. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Database or table not found. + tags: + - Table + /api/v3/enterprise/configure/token: + post: + operationId: PostCreateResourceToken + summary: Create a resource token + description: | + Creates a resource (fine-grained permissions) token. + A resource token is a token that has access to specific resources in the system. + + This endpoint is only available in InfluxDB 3 Enterprise. + responses: + "201": + description: | + Success. The resource token has been created. + The response body contains the token string and metadata. + content: + application/json: + schema: + $ref: "#/components/schemas/ResourceTokenObject" + "401": + $ref: "#/components/responses/Unauthorized" + tags: + - Authentication + - Token + x-enterprise-only: true + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateTokenWithPermissionsRequest" + /api/v3/plugin_test/schedule: + post: + operationId: PostTestSchedulingPlugin + responses: + "200": + description: Success. The plugin test has been executed. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Plugin not enabled. + summary: Test scheduling plugin + description: Executes a test of a scheduling plugin. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/SchedulePluginTestRequest" + tags: + - Processing engine + /api/v3/plugin_test/wal: + post: + operationId: PostTestWALPlugin + responses: + "200": + description: Success. The plugin test has been executed. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Plugin not enabled. + summary: Test WAL plugin + description: Executes a test of a write-ahead logging (WAL) plugin. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/WALPluginTestRequest" + tags: + - Processing engine + /api/v3/plugins/directory: + put: + operationId: PutPluginDirectory + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/PluginDirectoryRequest" + responses: + "200": + description: Success. The plugin directory has been updated. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Forbidden. Admin token required. + "500": + description: Plugin not found. The `plugin_name` does not match any registered trigger. + summary: Update a multi-file plugin directory + description: | + Replaces all files in a multi-file plugin directory. The + `plugin_name` must match a registered trigger name. Each entry in + the `files` array specifies a `relative_path` and `content`—the + server writes them into the trigger's plugin directory. + + Use this endpoint to update multi-file plugins (directories with + `__init__.py` and supporting modules). For single-file plugins, + use `PUT /api/v3/plugins/files` instead. + tags: + - Processing engine + x-security-note: Requires an admin token + /api/v3/plugins/files: + post: + operationId: create_plugin_file + summary: Create a plugin file + description: | + Creates a single plugin file in the plugin directory. Writes the + `content` to a file named after `plugin_name`. Does not require an + existing trigger—use this to upload plugin files before creating + triggers that reference them. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/PluginFileRequest" + responses: + "200": + description: Success. The plugin file has been created. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Forbidden. Admin token required. + tags: + - Processing engine + x-security-note: Requires an admin token + put: + operationId: PutPluginFile + summary: Update a plugin file + description: | + Updates a single plugin file for an existing trigger. The + `plugin_name` must match a registered trigger name—the server + resolves the trigger's `plugin_filename` and overwrites that file + with the provided `content`. + + To upload a new plugin file before creating a trigger, use + `POST /api/v3/plugins/files` instead. To update a multi-file + plugin directory, use `PUT /api/v3/plugins/directory`. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/PluginFileRequest" + responses: + "200": + description: Success. The plugin file has been updated. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Forbidden. Admin token required. + "500": + description: Plugin not found. The `plugin_name` does not match any registered trigger. + tags: + - Processing engine + x-security-note: Requires an admin token + /api/v3/query_influxql: + get: + operationId: GetExecuteInfluxQLQuery + responses: + "200": + description: Success. The response body contains query results. + content: + application/json: + schema: + $ref: "#/components/schemas/QueryResponse" + text/csv: + schema: + type: string + application/vnd.apache.parquet: + schema: + type: string + application/jsonl: + schema: + type: string + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "404": + description: Database not found. + "405": + description: Method not allowed. + "422": + description: Unprocessable entity. + summary: Execute InfluxQL query + description: Executes an InfluxQL query to retrieve data from the specified database. + parameters: + - $ref: "#/components/parameters/dbQueryParam" + - name: q + in: query + required: true + schema: + type: string + - name: format + in: query + required: false + schema: + type: string + - $ref: "#/components/parameters/AcceptQueryHeader" + - name: params + in: query + required: false + schema: + type: string + description: JSON-encoded query parameters. Use this to pass bind parameters to parameterized queries. + description: JSON-encoded query parameters for parameterized queries. + tags: + - Query data + post: + operationId: PostExecuteQueryInfluxQL + responses: + "200": + description: Success. The response body contains query results. + content: + application/json: + schema: + $ref: "#/components/schemas/QueryResponse" + text/csv: + schema: + type: string + application/vnd.apache.parquet: + schema: + type: string + application/jsonl: + schema: + type: string + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "404": + description: Database not found. + "405": + description: Method not allowed. + "422": + description: Unprocessable entity. + summary: Execute InfluxQL query + description: Executes an InfluxQL query to retrieve data from the specified database. + parameters: + - $ref: "#/components/parameters/AcceptQueryHeader" + - $ref: "#/components/parameters/ContentType" + requestBody: + $ref: "#/components/requestBodies/queryRequestBody" + tags: + - Query data + /api/v3/query_sql: + get: + operationId: GetExecuteQuerySQL + responses: + "200": + description: Success. The response body contains query results. + content: + application/json: + schema: + $ref: "#/components/schemas/QueryResponse" + example: + results: + - series: + - name: mytable + columns: + - time + - value + values: + - - "2024-02-02T12:00:00Z" + - 42 + text/csv: + schema: + type: string + application/vnd.apache.parquet: + schema: + type: string + application/jsonl: + schema: + type: string + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "404": + description: Database not found. + "405": + description: Method not allowed. + "422": + description: Unprocessable entity. + summary: Execute SQL query + description: Executes an SQL query to retrieve data from the specified database. + parameters: + - $ref: "#/components/parameters/db" + - $ref: "#/components/parameters/querySqlParam" + - $ref: "#/components/parameters/format" + - $ref: "#/components/parameters/AcceptQueryHeader" + - $ref: "#/components/parameters/ContentType" + - name: params + in: query + required: false + schema: + type: string + description: JSON-encoded query parameters. Use this to pass bind parameters to parameterized queries. + description: JSON-encoded query parameters for parameterized queries. + tags: + - Query data + post: + operationId: PostExecuteQuerySQL + responses: + "200": + description: Success. The response body contains query results. + content: + application/json: + schema: + $ref: "#/components/schemas/QueryResponse" + text/csv: + schema: + type: string + application/vnd.apache.parquet: + schema: + type: string + application/jsonl: + schema: + type: string + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "404": + description: Database not found. + "405": + description: Method not allowed. + "422": + description: Unprocessable entity. + summary: Execute SQL query + description: Executes an SQL query to retrieve data from the specified database. + parameters: + - $ref: "#/components/parameters/AcceptQueryHeader" + - $ref: "#/components/parameters/ContentType" + requestBody: + $ref: "#/components/requestBodies/queryRequestBody" + tags: + - Query data + /api/v3/write_lp: + post: + operationId: PostWriteLP + parameters: + - $ref: "#/components/parameters/dbWriteParam" + - $ref: "#/components/parameters/accept_partial" + - $ref: "#/components/parameters/precisionParam" + - name: no_sync + in: query + schema: + $ref: "#/components/schemas/NoSync" + - name: Content-Type + in: header + description: | + The content type of the request payload. + schema: + $ref: "#/components/schemas/LineProtocol" + required: false + - name: Accept + in: header + description: | + The content type that the client can understand. + Writes only return a response body if they fail (partially or completely)--for example, + due to a syntax problem or type mismatch. + schema: + type: string + default: application/json + enum: + - application/json + required: false + - $ref: "#/components/parameters/ContentEncoding" + - $ref: "#/components/parameters/ContentLength" + responses: + "204": + description: Success ("No Content"). All data in the batch is written and queryable. + headers: + cluster-uuid: + $ref: "#/components/headers/ClusterUUID" + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "413": + description: Request entity too large. + "422": + description: Unprocessable entity. + summary: Write line protocol + description: > + Writes line protocol to the specified database. + + + This is the native InfluxDB 3 Enterprise write endpoint that provides enhanced control + + over write behavior with advanced parameters for high-performance and fault-tolerant operations. + + + Use this endpoint to send data in [line protocol](/influxdb3/enterprise/reference/syntax/line-protocol/) format + to InfluxDB. + + Use query parameters to specify options for writing data. + + + #### Features + + + - **Partial writes**: Use `accept_partial=true` to allow partial success when some lines in a batch fail + + - **Asynchronous writes**: Use `no_sync=true` to skip waiting for WAL synchronization, allowing faster response + times but sacrificing durability guarantees + + - **Flexible precision**: Automatic timestamp precision detection with `precision=auto` (default) + + + #### Auto precision detection + + + When you use `precision=auto` or omit the precision parameter, InfluxDB 3 automatically detects + + the timestamp precision based on the magnitude of the timestamp value: + + + - Timestamps < 5e9 → Second precision (multiplied by 1,000,000,000 to convert to nanoseconds) + + - Timestamps < 5e12 → Millisecond precision (multiplied by 1,000,000) + + - Timestamps < 5e15 → Microsecond precision (multiplied by 1,000) + + - Larger timestamps → Nanosecond precision (no conversion needed) + + + #### Related + + + - [Use the InfluxDB v3 write_lp API to write data](/influxdb3/enterprise/write-data/http-api/v3-write-lp/) + requestBody: + $ref: "#/components/requestBodies/lineProtocolRequestBody" + tags: + - Write data + x-codeSamples: + - label: cURL - Basic write + lang: Shell + source: | + curl --request POST "http://localhost:8181/api/v3/write_lp?db=sensors" \ + --header "Authorization: Bearer DATABASE_TOKEN" \ + --header "Content-Type: text/plain" \ + --data-raw "cpu,host=server01 usage=85.2 1638360000000000000" + - label: cURL - Write with millisecond precision + lang: Shell + source: | + curl --request POST "http://localhost:8181/api/v3/write_lp?db=sensors&precision=ms" \ + --header "Authorization: Bearer DATABASE_TOKEN" \ + --header "Content-Type: text/plain" \ + --data-raw "cpu,host=server01 usage=85.2 1638360000000" + - label: cURL - Asynchronous write with partial acceptance + lang: Shell + source: > + curl --request POST + "http://localhost:8181/api/v3/write_lp?db=sensors&accept_partial=true&no_sync=true&precision=auto" \ + --header "Authorization: Bearer DATABASE_TOKEN" \ + --header "Content-Type: text/plain" \ + --data-raw "cpu,host=server01 usage=85.2 + memory,host=server01 used=4096" + - label: cURL - Multiple measurements with tags + lang: Shell + source: | + curl --request POST "http://localhost:8181/api/v3/write_lp?db=sensors&precision=ns" \ + --header "Authorization: Bearer DATABASE_TOKEN" \ + --header "Content-Type: text/plain" \ + --data-raw "cpu,host=server01,region=us-west usage=85.2,load=0.75 1638360000000000000 + memory,host=server01,region=us-west used=4096,free=12288 1638360000000000000 + disk,host=server01,region=us-west,device=/dev/sda1 used=50.5,free=49.5 1638360000000000000" + /health: + get: + operationId: GetHealth + responses: + "200": + description: Service is running. Returns `OK`. + content: + text/plain: + schema: + type: string + example: OK + "401": + description: Unauthorized. Authentication is required. + "500": + description: Service is unavailable. + summary: Health check + description: | + Checks the status of the service. + + Returns `OK` if the service is running. This endpoint does not return version information. + Use the [`/ping`](#operation/GetPing) endpoint to retrieve version details. + + > **Note**: This endpoint requires authentication by default in InfluxDB 3 Enterprise. + tags: + - Server information + /metrics: + get: + operationId: GetMetrics + responses: + "200": + description: Success + summary: Metrics + description: Retrieves Prometheus-compatible server metrics. + tags: + - Server information + /ping: + get: + operationId: GetPing + responses: + "200": + description: Success. The response body contains server information. + headers: + x-influxdb-version: + description: The InfluxDB version number (for example, `3.8.0`). + schema: + type: string + example: 3.8.0 + x-influxdb-build: + description: The InfluxDB build type (`Core` or `Enterprise`). + schema: + type: string + example: Enterprise + content: + application/json: + schema: + type: object + properties: + version: + type: string + description: The InfluxDB version number. + example: 3.8.0 + revision: + type: string + description: The git revision hash for the build. + example: 83b589b883 + process_id: + type: string + description: A unique identifier for the server process. + example: b756d9e0-cecd-4f72-b6d0-19e2d4f8cbb7 + "401": + description: Unauthorized. Authentication is required. + "404": + description: | + Not Found. Returned for HEAD requests. + Use a GET request to retrieve version information. + x-client-method: ping + summary: Ping the server + description: | + Returns version information for the server. + + **Important**: Use a GET request. HEAD requests return `404 Not Found`. + + The response includes version information in both headers and the JSON body: + + - **Headers**: `x-influxdb-version` and `x-influxdb-build` + - **Body**: JSON object with `version`, `revision`, and `process_id` + + > **Note**: This endpoint requires authentication by default in InfluxDB 3 Enterprise. + tags: + - Server information + post: + operationId: ping + responses: + "200": + description: Success. The response body contains server information. + headers: + x-influxdb-version: + description: The InfluxDB version number (for example, `3.8.0`). + schema: + type: string + example: 3.8.0 + x-influxdb-build: + description: The InfluxDB build type (`Core` or `Enterprise`). + schema: + type: string + example: Enterprise + content: + application/json: + schema: + type: object + properties: + version: + type: string + description: The InfluxDB version number. + example: 3.8.0 + revision: + type: string + description: The git revision hash for the build. + example: 83b589b883 + process_id: + type: string + description: A unique identifier for the server process. + example: b756d9e0-cecd-4f72-b6d0-19e2d4f8cbb7 + "401": + description: Unauthorized. Authentication is required. + "404": + description: | + Not Found. Returned for HEAD requests. + Use a GET request to retrieve version information. + summary: Ping the server + description: Returns version information for the server. Accepts POST in addition to GET. + tags: + - Server information + /query: + get: + operationId: GetV1ExecuteQuery + responses: + "200": + description: | + Success. The response body contains query results. + content: + application/json: + schema: + $ref: "#/components/schemas/QueryResponse" + application/csv: + schema: + type: string + headers: + Content-Type: + description: > + The content type of the response. + + Default is `application/json`. + + + If the `Accept` request header is `application/csv` or `text/csv`, the `Content-type` response header is + `application/csv` + + and the response is formatted as CSV. + schema: + type: string + default: application/json + enum: + - application/json + - application/csv + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "404": + description: Database not found. + "405": + description: Method not allowed. + "422": + description: Unprocessable entity. + summary: Execute InfluxQL query (v1-compatible) + description: > + Executes an InfluxQL query to retrieve data from the specified database. + + + This endpoint is compatible with InfluxDB 1.x client libraries and third-party integrations such as Grafana. + + Use query parameters to specify the database and the InfluxQL query. + + + #### Related + + + - [Use the InfluxDB v1 HTTP query API and InfluxQL to query + data](/influxdb3/enterprise/query-data/execute-queries/influxdb-v1-api/) + parameters: + - name: Accept + in: header + schema: + type: string + default: application/json + enum: + - application/json + - application/csv + - text/csv + required: false + description: > + The content type that the client can understand. + + + If `text/csv` is specified, the `Content-type` response header is `application/csv` and the response is + formatted as CSV. + + + Returns an error if the format is invalid or non-UTF8. + - in: query + name: chunked + description: | + If true, the response is divided into chunks of size `chunk_size`. + schema: + type: boolean + default: false + - in: query + name: chunk_size + description: | + The number of records that will go into a chunk. + This parameter is only used if `chunked=true`. + schema: + type: integer + default: 10000 + - in: query + name: db + description: The database to query. If not provided, the InfluxQL query string must specify the database. + schema: + type: string + format: InfluxQL + - in: query + name: pretty + description: | + If true, the JSON response is formatted in a human-readable format. + schema: + type: boolean + default: false + - in: query + name: q + description: The InfluxQL query string. + required: true + schema: + type: string + - name: epoch + description: > + Formats timestamps as [unix (epoch) timestamps](/influxdb3/enterprise/reference/glossary/#unix-timestamp) + with the specified precision + + instead of [RFC3339 timestamps](/influxdb3/enterprise/reference/glossary/#rfc3339-timestamp) with nanosecond + precision. + in: query + schema: + $ref: "#/components/schemas/EpochCompatibility" + - $ref: "#/components/parameters/v1UsernameParam" + - $ref: "#/components/parameters/v1PasswordParam" + - name: rp + in: query + required: false + schema: + type: string + description: | + Retention policy name. Honored but discouraged. InfluxDB 3 doesn't use retention policies. + - name: Authorization + in: header + required: false + schema: + type: string + description: | + Authorization header for token-based authentication. + Supported schemes: + - `Bearer AUTH_TOKEN` - OAuth bearer token scheme + - `Token AUTH_TOKEN` - InfluxDB v2 token scheme + - `Basic ` - Basic authentication (username is ignored) + tags: + - Query data + - Compatibility endpoints + post: + operationId: PostExecuteV1Query + responses: + "200": + description: | + Success. The response body contains query results. + content: + application/json: + schema: + $ref: "#/components/schemas/QueryResponse" + application/csv: + schema: + type: string + headers: + Content-Type: + description: > + The content type of the response. + + Default is `application/json`. + + + If the `Accept` request header is `application/csv` or `text/csv`, the `Content-type` response header is + `application/csv` + + and the response is formatted as CSV. + schema: + type: string + default: application/json + enum: + - application/json + - application/csv + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "404": + description: Database not found. + "405": + description: Method not allowed. + "422": + description: Unprocessable entity. + summary: Execute InfluxQL query (v1-compatible) + description: > + Executes an InfluxQL query to retrieve data from the specified database. + + + #### Related + + + - [Use the InfluxDB v1 HTTP query API and InfluxQL to query + data](/influxdb3/enterprise/query-data/execute-queries/influxdb-v1-api/) + parameters: + - name: Accept + in: header + schema: + type: string + default: application/json + enum: + - application/json + - application/csv + - text/csv + required: false + description: > + The content type that the client can understand. + + + If `text/csv` is specified, the `Content-type` response header is `application/csv` and the response is + formatted as CSV. + + + Returns an error if the format is invalid or non-UTF8. + requestBody: + content: + application/json: + schema: + type: object + properties: + db: + type: string + description: The database to query. If not provided, the InfluxQL query string must specify the database. + q: + description: The InfluxQL query string. + type: string + chunked: + description: | + If true, the response is divided into chunks of size `chunk_size`. + type: boolean + chunk_size: + description: | + The number of records that will go into a chunk. + This parameter is only used if `chunked=true`. + type: integer + default: 10000 + epoch: + description: > + A unix timestamp precision. + + + - `h` for hours + + - `m` for minutes + + - `s` for seconds + + - `ms` for milliseconds + + - `u` or `µ` for microseconds + + - `ns` for nanoseconds + + + Formats timestamps as [unix (epoch) + timestamps](/influxdb3/enterprise/reference/glossary/#unix-timestamp) with the specified precision + + instead of [RFC3339 timestamps](/influxdb3/enterprise/reference/glossary/#rfc3339-timestamp) with + nanosecond precision. + enum: + - ns + - u + - µ + - ms + - s + - m + - h + type: string + pretty: + description: | + If true, the JSON response is formatted in a human-readable format. + type: boolean + required: + - q + application/x-www-form-urlencoded: + schema: + type: object + properties: + db: + type: string + description: The database to query. If not provided, the InfluxQL query string must specify the database. + q: + description: The InfluxQL query string. + type: string + chunked: + description: | + If true, the response is divided into chunks of size `chunk_size`. + type: boolean + chunk_size: + description: | + The number of records that will go into a chunk. + This parameter is only used if `chunked=true`. + type: integer + default: 10000 + epoch: + description: > + A unix timestamp precision. + + + - `h` for hours + + - `m` for minutes + + - `s` for seconds + + - `ms` for milliseconds + + - `u` or `µ` for microseconds + + - `ns` for nanoseconds + + + Formats timestamps as [unix (epoch) + timestamps](/influxdb3/enterprise/reference/glossary/#unix-timestamp) with the specified precision + + instead of [RFC3339 timestamps](/influxdb3/enterprise/reference/glossary/#rfc3339-timestamp) with + nanosecond precision. + enum: + - ns + - u + - µ + - ms + - s + - m + - h + type: string + pretty: + description: | + If true, the JSON response is formatted in a human-readable format. + type: boolean + required: + - q + application/vnd.influxql: + schema: + type: string + description: InfluxQL query string sent as the request body. + tags: + - Query data + - Compatibility endpoints + /write: + post: + operationId: PostV1Write + responses: + "204": + description: Success ("No Content"). All data in the batch is written and queryable. + headers: + cluster-uuid: + $ref: "#/components/headers/ClusterUUID" + "400": + description: | + Bad request. Some (a _partial write_) or all of the data from the batch was rejected and not written. + If a partial write occurred, then some points from the batch are written and queryable. + + The response body: + - indicates if a partial write occurred or all data was rejected. + - contains details about the [rejected points](/influxdb3/enterprise/write-data/troubleshoot/#troubleshoot-rejected-points), up to 100 points. + content: + application/json: + examples: + rejectedAllPoints: + summary: Rejected all points in the batch + value: | + { + "error": "write of line protocol failed", + "data": [ + { + "original_line": "dquote> home,room=Kitchen temp=hi", + "line_number": 2, + "error_message": "No fields were provided" + } + ] + } + partialWriteErrorWithRejectedPoints: + summary: Partial write rejected some points in the batch + value: | + { + "error": "partial write of line protocol occurred", + "data": [ + { + "original_line": "dquote> home,room=Kitchen temp=hi", + "line_number": 2, + "error_message": "No fields were provided" + } + ] + } + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "413": + description: Request entity too large. + summary: Write line protocol (v1-compatible) + description: > + Writes line protocol to the specified database. + + + This endpoint provides backward compatibility for InfluxDB 1.x write workloads using tools such as InfluxDB 1.x + client libraries, the Telegraf `outputs.influxdb` output plugin, or third-party tools. + + + Use this endpoint to send data in [line + protocol](https://docs.influxdata.com/influxdb3/enterprise/reference/syntax/line-protocol/) format to InfluxDB. + + Use query parameters to specify options for writing data. + + + #### Related + + + - [Use compatibility APIs to write data](/influxdb3/enterprise/write-data/http-api/compatibility-apis/) + parameters: + - $ref: "#/components/parameters/dbWriteParam" + - $ref: "#/components/parameters/compatibilityPrecisionParam" + - $ref: "#/components/parameters/v1UsernameParam" + - $ref: "#/components/parameters/v1PasswordParam" + - name: rp + in: query + required: false + schema: + type: string + description: | + Retention policy name. Honored but discouraged. InfluxDB 3 doesn't use retention policies. + - name: consistency + in: query + required: false + schema: + type: string + description: | + Write consistency level. Ignored by InfluxDB 3. Provided for compatibility with InfluxDB 1.x clients. + - name: Authorization + in: header + required: false + schema: + type: string + description: | + Authorization header for token-based authentication. + Supported schemes: + - `Bearer AUTH_TOKEN` - OAuth bearer token scheme + - `Token AUTH_TOKEN` - InfluxDB v2 token scheme + - `Basic ` - Basic authentication (username is ignored) + - name: Content-Type + in: header + description: | + The content type of the request payload. + schema: + $ref: "#/components/schemas/LineProtocol" + required: false + - name: Accept + in: header + description: | + The content type that the client can understand. + Writes only return a response body if they fail (partially or completely)--for example, + due to a syntax problem or type mismatch. + schema: + type: string + default: application/json + enum: + - application/json + required: false + - $ref: "#/components/parameters/ContentEncoding" + - $ref: "#/components/parameters/ContentLength" + requestBody: + $ref: "#/components/requestBodies/lineProtocolRequestBody" + tags: + - Compatibility endpoints + - Write data +components: + parameters: + AcceptQueryHeader: + name: Accept + in: header + schema: + type: string + default: application/json + enum: + - application/json + - application/jsonl + - application/vnd.apache.parquet + - text/csv + required: false + description: | + The content type that the client can understand. + ContentEncoding: + name: Content-Encoding + in: header + description: | + The compression applied to the line protocol in the request payload. + To send a gzip payload, pass `Content-Encoding: gzip` header. + schema: + $ref: "#/components/schemas/ContentEncoding" + required: false + ContentLength: + name: Content-Length + in: header + description: | + The size of the entity-body, in bytes, sent to InfluxDB. + schema: + $ref: "#/components/schemas/ContentLength" + ContentType: + name: Content-Type + description: | + The format of the data in the request body. + in: header + schema: + type: string + enum: + - application/json + required: false + db: + name: db + in: query + required: true + schema: + type: string + description: | + The name of the database. + dbWriteParam: + name: db + in: query + required: true + schema: + type: string + description: | + The name of the database. + InfluxDB creates the database if it doesn't already exist, and then + writes all points in the batch to the database. + dbQueryParam: + name: db + in: query + required: false + schema: + type: string + description: | + The name of the database. + + If you provide a query that specifies the database, you can omit the 'db' parameter from your request. + accept_partial: + name: accept_partial + in: query + required: false + schema: + $ref: "#/components/schemas/AcceptPartial" + compatibilityPrecisionParam: + name: precision + in: query + required: false + schema: + $ref: "#/components/schemas/PrecisionWriteCompatibility" + description: The precision for unix timestamps in the line protocol batch. + precisionParam: + name: precision + in: query + required: false + schema: + $ref: "#/components/schemas/PrecisionWrite" + description: The precision for unix timestamps in the line protocol batch. + querySqlParam: + name: q + in: query + required: true + schema: + type: string + format: SQL + description: | + The query to execute. + format: + name: format + in: query + required: false + schema: + $ref: "#/components/schemas/Format" + formatRequired: + name: format + in: query + required: true + schema: + $ref: "#/components/schemas/Format" + v1UsernameParam: + name: u + in: query + required: false + schema: + type: string + description: > + Username for v1 compatibility authentication. + + When using Basic authentication or query string authentication, InfluxDB 3 ignores this parameter but allows any + arbitrary string for compatibility with InfluxDB 1.x clients. + v1PasswordParam: + name: p + in: query + required: false + schema: + type: string + description: | + Password for v1 compatibility authentication. + For query string authentication, pass a database token with write permissions as this parameter. + InfluxDB 3 checks that the `p` value is an authorized token. + requestBodies: + lineProtocolRequestBody: + required: true + content: + text/plain: + schema: + type: string + examples: + line: + summary: Example line protocol + value: measurement,tag=value field=1 1234567890 + multiline: + summary: Example line protocol with UTF-8 characters + value: | + measurement,tag=value field=1 1234567890 + measurement,tag=value field=2 1234567900 + measurement,tag=value field=3 1234568000 + queryRequestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/QueryRequestObject" + schemas: + AdminTokenObject: + type: object + properties: + id: + type: integer + name: + type: string + token: + type: string + hash: + type: string + created_at: + type: string + format: date-time + expiry: + format: date-time + example: + id: 0 + name: _admin + token: apiv3_00xx0Xx0xx00XX0x0 + hash: 00xx0Xx0xx00XX0x0 + created_at: "2025-04-18T14:02:45.331Z" + expiry: null + ResourceTokenObject: + type: object + properties: + token_name: + type: string + permissions: + type: array + items: + type: object + properties: + resource_type: + type: string + enum: + - system + - db + actions: + type: array + items: + type: string + enum: + - read + - write + resource_names: + type: array + items: + type: string + description: List of resource names. Use "*" for all resources. + expiry_secs: + type: integer + description: The expiration time in seconds. + example: + token_name: All system information + permissions: + - resource_type: system + actions: + - read + resource_names: + - "*" + expiry_secs: 300000 + ContentEncoding: + type: string + enum: + - gzip + - identity + description: > + Content coding. + + Use `gzip` for compressed data or `identity` for unmodified, uncompressed data. + + + #### Multi-member gzip support + + + InfluxDB 3 supports multi-member gzip payloads (concatenated gzip files per [RFC + 1952](https://www.rfc-editor.org/rfc/rfc1952)). + + This allows you to: + + - Concatenate multiple gzip files and send them in a single request + + - Maintain compatibility with InfluxDB v1 and v2 write endpoints + + - Simplify batch operations using standard compression tools + default: identity + LineProtocol: + type: string + enum: + - text/plain + - text/plain; charset=utf-8 + description: | + `text/plain` is the content type for line protocol. `UTF-8` is the default character set. + default: text/plain; charset=utf-8 + ContentLength: + type: integer + description: The length in decimal number of octets. + Database: + type: string + AcceptPartial: + type: boolean + default: true + description: Accept partial writes. + Format: + type: string + enum: + - json + - csv + - parquet + - json_lines + - jsonl + - pretty + description: |- + The format of data in the response body. + `json_lines` is the canonical name; `jsonl` is accepted as an alias. + NoSync: + type: boolean + default: false + description: | + Acknowledges a successful write without waiting for WAL persistence. + + #### Related + + - [Use the HTTP API and client libraries to write data](/influxdb3/enterprise/write-data/api-client-libraries/) + - [Data durability](/influxdb3/enterprise/reference/internals/durability/) + PrecisionWriteCompatibility: + enum: + - ms + - s + - us + - u + - ns + - "n" + type: string + description: |- + The precision for unix timestamps in the line protocol batch. + Use `ms` for milliseconds, `s` for seconds, `us` or `u` for microseconds, or `ns` or `n` for nanoseconds. + Optional — defaults to nanosecond precision if omitted. + PrecisionWrite: + enum: + - auto + - nanosecond + - microsecond + - millisecond + - second + type: string + description: | + The precision for unix timestamps in the line protocol batch. + + Supported values: + - `auto` (default): Automatically detects precision based on timestamp magnitude + - `nanosecond`: Nanoseconds + - `microsecond`: Microseconds + - `millisecond`: Milliseconds + - `second`: Seconds + QueryRequestObject: + type: object + properties: + db: + description: | + The name of the database to query. + Required if the query (`q`) doesn't specify the database. + type: string + q: + description: The query to execute. + type: string + format: + description: The format of the query results. + type: string + enum: + - json + - csv + - parquet + - json_lines + - jsonl + - pretty + params: + description: | + Additional parameters for the query. + Use this field to pass query parameters. + type: object + additionalProperties: true + required: + - db + - q + example: + db: mydb + q: SELECT * FROM mytable + format: json + params: {} + CreateDatabaseRequest: + type: object + properties: + db: + type: string + pattern: ^[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]$|^[a-zA-Z0-9]$ + description: |- + The database name. Database names cannot contain underscores (_). + Names must start and end with alphanumeric characters and can contain hyphens (-) in the middle. + retention_period: + type: string + description: |- + The retention period for the database. Specifies how long data should be retained. + Use duration format (for example, "1d", "1h", "30m", "7d"). + example: 7d + required: + - db + CreateTableRequest: + type: object + properties: + db: + type: string + table: + type: string + tags: + type: array + items: + type: string + fields: + type: array + items: + type: object + properties: + name: + type: string + type: + type: string + enum: + - utf8 + - int64 + - uint64 + - float64 + - bool + required: + - name + - type + retention_period: + type: string + description: |- + The retention period for the table. Specifies how long data in this table should be retained. + Use duration format (for example, "1d", "1h", "30m", "7d"). + example: 30d + required: + - db + - table + - tags + - fields + DistinctCacheCreateRequest: + type: object + properties: + db: + type: string + table: + type: string + node_spec: + $ref: "#/components/schemas/ApiNodeSpec" + name: + type: string + description: Optional cache name. + columns: + type: array + items: + type: string + max_cardinality: + type: integer + description: Optional maximum cardinality. + max_age: + type: integer + description: Optional maximum age in seconds. + required: + - db + - table + - columns + example: + db: mydb + table: mytable + columns: + - tag1 + - tag2 + max_cardinality: 1000 + max_age: 3600 + LastCacheCreateRequest: + type: object + properties: + db: + type: string + table: + type: string + node_spec: + $ref: "#/components/schemas/ApiNodeSpec" + name: + type: string + description: Optional cache name. + key_columns: + type: array + items: + type: string + description: Optional list of key columns. + value_columns: + type: array + items: + type: string + description: Optional list of value columns. + count: + type: integer + description: Optional count. + ttl: + type: integer + description: Optional time-to-live in seconds. + required: + - db + - table + example: + db: mydb + table: mytable + key_columns: + - tag1 + value_columns: + - field1 + count: 100 + ttl: 3600 + ProcessingEngineTriggerRequest: + type: object + properties: + db: + type: string + plugin_filename: + type: string + description: | + The path and filename of the plugin to execute--for example, + `schedule.py` or `endpoints/report.py`. + The path can be absolute or relative to the `--plugins-dir` directory configured when starting InfluxDB 3. + + The plugin file must implement the trigger interface associated with the trigger's specification. + node_spec: + $ref: "#/components/schemas/ApiNodeSpec" + trigger_name: + type: string + trigger_settings: + description: | + Configuration for trigger error handling and execution behavior. + allOf: + - $ref: "#/components/schemas/TriggerSettings" + trigger_specification: + description: > + Specifies when and how the processing engine trigger should be invoked. + + + ## Supported trigger specifications: + + + ### Cron-based scheduling + + Format: `cron:CRON_EXPRESSION` + + + Uses extended (6-field) cron format (second minute hour day_of_month month day_of_week): + + ``` + + ┌───────────── second (0-59) + + │ ┌───────────── minute (0-59) + + │ │ ┌───────────── hour (0-23) + + │ │ │ ┌───────────── day of month (1-31) + + │ │ │ │ ┌───────────── month (1-12) + + │ │ │ │ │ ┌───────────── day of week (0-6, Sunday=0) + + │ │ │ │ │ │ + + * * * * * * + + ``` + + Examples: + + - `cron:0 0 6 * * 1-5` - Every weekday at 6:00 AM + + - `cron:0 30 14 * * 5` - Every Friday at 2:30 PM + + - `cron:0 0 0 1 * *` - First day of every month at midnight + + + ### Interval-based scheduling + + Format: `every:DURATION` + + + Supported durations: `s` (seconds), `m` (minutes), `h` (hours), `d` (days), `w` (weeks), `M` (months), `y` + (years): + + - `every:30s` - Every 30 seconds + + - `every:5m` - Every 5 minutes + + - `every:1h` - Every hour + + - `every:1d` - Every day + + - `every:1w` - Every week + + - `every:1M` - Every month + + - `every:1y` - Every year + + + **Maximum interval**: 1 year + + + ### Table-based triggers + + - `all_tables` - Triggers on write events to any table in the database + + - `table:TABLE_NAME` - Triggers on write events to a specific table + + + ### On-demand triggers + + Format: `request:REQUEST_PATH` + + + Creates an HTTP endpoint `/api/v3/engine/REQUEST_PATH` for manual invocation: + + - `request:hello-world` - Creates endpoint `/api/v3/engine/hello-world` + + - `request:data-export` - Creates endpoint `/api/v3/engine/data-export` + pattern: ^(cron:[0-9 *,/-]+|every:[0-9]+[smhd]|all_tables|table:[a-zA-Z_][a-zA-Z0-9_]*|request:[a-zA-Z0-9_-]+)$ + example: cron:0 0 6 * * 1-5 + trigger_arguments: + type: object + additionalProperties: true + description: Optional arguments passed to the plugin. + disabled: + type: boolean + default: false + description: Whether the trigger is disabled. + required: + - db + - plugin_filename + - trigger_name + - trigger_settings + - trigger_specification + - disabled + TriggerSettings: + type: object + description: | + Configuration settings for processing engine trigger error handling and execution behavior. + properties: + run_async: + type: boolean + default: false + description: | + Whether to run the trigger asynchronously. + When `true`, the trigger executes in the background without blocking. + When `false`, the trigger executes synchronously. + error_behavior: + type: string + enum: + - Log + - Retry + - Disable + description: | + Specifies how to handle errors that occur during trigger execution: + - `Log`: Log the error and continue (default) + - `Retry`: Retry the trigger execution + - `Disable`: Disable the trigger after an error + default: Log + required: + - run_async + - error_behavior + ApiNodeSpec: + x-enterprise-only: true + type: object + description: | + Optional specification for targeting specific nodes in a multi-node InfluxDB 3 Enterprise cluster. + Use this to control which node(s) should handle the cache or trigger. + properties: + node_id: + type: string + description: | + The ID of a specific node in the cluster. + If specified, the cache or trigger will only be created on this node. + node_group: + type: string + description: | + The name of a node group in the cluster. + If specified, the cache or trigger will be created on all nodes in this group. + WALPluginTestRequest: + type: object + description: | + Request body for testing a write-ahead logging (WAL) plugin. + properties: + filename: + type: string + description: | + The path and filename of the plugin to test. + database: + type: string + description: | + The database name to use for the test. + input_lp: + type: string + description: | + Line protocol data to use as input for the test. + cache_name: + type: string + description: | + Optional name of the cache to use in the test. + input_arguments: + type: object + additionalProperties: + type: string + description: | + Optional key-value pairs of arguments to pass to the plugin. + required: + - filename + - database + - input_lp + SchedulePluginTestRequest: + type: object + description: | + Request body for testing a scheduling plugin. + properties: + filename: + type: string + description: | + The path and filename of the plugin to test. + database: + type: string + description: | + The database name to use for the test. + schedule: + type: string + description: | + Optional schedule specification in cron or interval format. + cache_name: + type: string + description: | + Optional name of the cache to use in the test. + input_arguments: + type: object + additionalProperties: + type: string + description: | + Optional key-value pairs of arguments to pass to the plugin. + required: + - filename + - database + PluginFileRequest: + type: object + description: | + Request body for updating a plugin file. + properties: + plugin_name: + type: string + description: | + The name of the plugin file to update. + content: + type: string + description: | + The content of the plugin file. + required: + - plugin_name + - content + PluginDirectoryRequest: + type: object + description: | + Request body for updating plugin directory with multiple files. + properties: + plugin_name: + type: string + description: | + The name of the plugin directory to update. + files: + type: array + items: + $ref: "#/components/schemas/PluginFileEntry" + description: | + List of plugin files to include in the directory. + required: + - plugin_name + - files + PluginFileEntry: + type: object + description: | + Represents a single file in a plugin directory. + properties: + content: + type: string + description: | + The content of the file. + relative_path: + type: string + description: The relative path of the file within the plugin directory. + required: + - relative_path + - content + ShowDatabasesResponse: + type: object + properties: + databases: + type: array + items: + type: string + QueryResponse: + type: object + properties: + results: + type: array + items: + type: object + example: + results: + - series: + - name: mytable + columns: + - time + - value + values: + - - "2024-02-02T12:00:00Z" + - 42 + ErrorMessage: + type: object + properties: + error: + type: string + data: + type: object + nullable: true + LineProtocolError: + properties: + code: + description: Code is the machine-readable error code. + enum: + - internal error + - not found + - conflict + - invalid + - empty value + - unavailable + readOnly: true + type: string + err: + description: Stack of errors that occurred during processing of the request. Useful for debugging. + readOnly: true + type: string + line: + description: First line in the request body that contains malformed data. + format: int32 + readOnly: true + type: integer + message: + description: Human-readable message. + readOnly: true + type: string + op: + description: Describes the logical code operation when the error occurred. Useful for debugging. + readOnly: true + type: string + required: + - code + EpochCompatibility: + description: | + A unix timestamp precision. + - `h` for hours + - `m` for minutes + - `s` for seconds + - `ms` for milliseconds + - `u` or `µ` for microseconds + - `ns` for nanoseconds + enum: + - ns + - u + - µ + - ms + - s + - m + - h + type: string + UpdateDatabaseRequest: + type: object + properties: + retention_period: + type: string + description: | + The retention period for the database. Specifies how long data should be retained. + Use duration format (for example, "1d", "1h", "30m", "7d"). + example: 7d + description: Request schema for updating database configuration. + UpdateTableRequest: + type: object + properties: + db: + type: string + description: The name of the database containing the table. + table: + type: string + description: The name of the table to update. + retention_period: + type: string + description: | + The retention period for the table. Specifies how long data in this table should be retained. + Use duration format (for example, "1d", "1h", "30m", "7d"). + example: 30d + required: + - db + - table + description: Request schema for updating table configuration. + LicenseResponse: + type: object + properties: + license_type: + type: string + description: The type of license (for example, "enterprise", "trial"). + example: enterprise + expires_at: + type: string + format: date-time + description: The expiration date of the license in ISO 8601 format. + example: "2025-12-31T23:59:59Z" + features: + type: array + items: + type: string + description: List of features enabled by the license. + example: + - clustering + - processing_engine + - advanced_auth + status: + type: string + enum: + - active + - expired + - invalid + description: The current status of the license. + example: active + description: Response schema for license information. + CreateTokenWithPermissionsRequest: + type: object + properties: + token_name: + type: string + description: The name for the resource token. + permissions: + type: array + items: + $ref: "#/components/schemas/PermissionDetailsApi" + description: List of permissions to grant to the token. + expiry_secs: + type: integer + description: Optional expiration time in seconds. + nullable: true + required: + - token_name + - permissions + PermissionDetailsApi: + type: object + properties: + resource_type: + type: string + enum: + - system + - db + description: The type of resource. + resource_names: + type: array + items: + type: string + description: List of resource names. Use "*" for all resources. + actions: + type: array + items: + type: string + enum: + - read + - write + description: List of actions to grant. + required: + - resource_type + - resource_names + - actions + FileIndexCreateRequest: + type: object + description: Request body for creating a file index. + properties: + db: + type: string + description: The database name. + table: + type: string + description: The table name. If omitted, the file index applies to the database. + nullable: true + columns: + type: array + items: + type: string + description: The columns to use for the file index. + required: + - db + - columns + example: + db: mydb + table: mytable + columns: + - tag1 + - tag2 + FileIndexDeleteRequest: + type: object + description: Request body for deleting a file index. + properties: + db: + type: string + description: The database name. + table: + type: string + description: The table name. If omitted, deletes the database-level file index. + nullable: true + required: + - db + example: + db: mydb + table: mytable + StopNodeRequest: + type: object + description: Request body for marking a node as stopped in the catalog. + properties: + node_id: + type: string + description: The ID of the node to mark as stopped. + required: + - node_id + example: + node_id: node-1 + responses: + Unauthorized: + description: Unauthorized access. + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorMessage" + BadRequest: + description: | + Request failed. Possible reasons: + + - Invalid database name + - Malformed request body + - Invalid timestamp precision + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorMessage" + Forbidden: + description: Access denied. + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorMessage" + NotFound: + description: Resource not found. + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorMessage" + headers: + ClusterUUID: + description: | + The catalog UUID of the InfluxDB instance. + This header is included in all HTTP API responses and enables you to: + - Identify which cluster instance handled the request + - Monitor deployments across multiple InfluxDB instances + - Debug and troubleshoot distributed systems + schema: + type: string + format: uuid + example: 01234567-89ab-cdef-0123-456789abcdef + securitySchemes: + BasicAuthentication: + type: http + scheme: basic + description: >- + Use the `Authorization` header with the `Basic` scheme to authenticate v1 API requests. + + + Works with v1 compatibility [`/write`](#operation/PostV1Write) and [`/query`](#operation/GetV1Query) endpoints + in InfluxDB 3. + + + When authenticating requests, InfluxDB 3 checks that the `password` part of the decoded credential is an + authorized token + + and ignores the `username` part of the decoded credential. + + + ### Syntax + + + ```http + + Authorization: Basic + + ``` + + + ### Example + + + ```bash + + curl "http://localhost:8181/write?db=DATABASE_NAME&precision=s" \ + --user "":"AUTH_TOKEN" \ + --header "Content-type: text/plain; charset=utf-8" \ + --data-binary 'home,room=kitchen temp=72 1641024000' + ``` + + + Replace the following: + + + - **`DATABASE_NAME`**: your InfluxDB 3 Enterprise database + + - **`AUTH_TOKEN`**: an admin token or database token authorized for the database + QuerystringAuthentication: + type: apiKey + in: query + name: u=&p= + description: >- + Use InfluxDB 1.x API parameters to provide credentials through the query string for v1 API requests. + + + Querystring authentication works with v1-compatible [`/write`](#operation/PostV1Write) and + [`/query`](#operation/GetV1Query) endpoints. + + + When authenticating requests, InfluxDB 3 checks that the `p` (_password_) query parameter is an authorized token + + and ignores the `u` (_username_) query parameter. + + + ### Syntax + + + ```http + + https://localhost:8181/query/?[u=any]&p=AUTH_TOKEN + + https://localhost:8181/write/?[u=any]&p=AUTH_TOKEN + + ``` + + + ### Examples + + + ```bash + + curl "http://localhost:8181/write?db=DATABASE_NAME&precision=s&p=AUTH_TOKEN" \ + --header "Content-type: text/plain; charset=utf-8" \ + --data-binary 'home,room=kitchen temp=72 1641024000' + ``` + + + Replace the following: + + + - **`DATABASE_NAME`**: your InfluxDB 3 Enterprise database + + - **`AUTH_TOKEN`**: an admin token or database token authorized for the database + + + ```bash + + ####################################### + + # Use an InfluxDB 1.x compatible username and password + + # to query the InfluxDB v1 HTTP API + + ####################################### + + # Use authentication query parameters: + + # ?p=AUTH_TOKEN + + ####################################### + + + curl --get "http://localhost:8181/query" \ + --data-urlencode "p=AUTH_TOKEN" \ + --data-urlencode "db=DATABASE_NAME" \ + --data-urlencode "q=SELECT * FROM MEASUREMENT" + ``` + + + Replace the following: + + + - **`DATABASE_NAME`**: the database to query + + - **`AUTH_TOKEN`**: a database token with sufficient permissions to the database + BearerAuthentication: + type: http + scheme: bearer + bearerFormat: JWT + description: | + + Use the OAuth Bearer authentication + scheme to provide an authorization token to InfluxDB 3. + + Bearer authentication works with all endpoints. + + In your API requests, send an `Authorization` header. + For the header value, provide the word `Bearer` followed by a space and a database token. + + ### Syntax + + ```http + Authorization: Bearer AUTH_TOKEN + ``` + + ### Example + + ```bash + curl http://localhost:8181/api/v3/query_influxql \ + --header "Authorization: Bearer AUTH_TOKEN" + ``` + TokenAuthentication: + description: |- + Use InfluxDB v2 Token authentication to provide an authorization token to InfluxDB 3. + + The v2 Token scheme works with v1 and v2 compatibility endpoints in InfluxDB 3. + + In your API requests, send an `Authorization` header. + For the header value, provide the word `Token` followed by a space and a database token. + The word `Token` is case-sensitive. + + ### Syntax + + ```http + Authorization: Token AUTH_TOKEN + ``` + + ### Example + + ```sh + ######################################################## + # Use the Token authentication scheme with /api/v2/write + # to write data. + ######################################################## + + curl --request post "http://localhost:8181/api/v2/write?bucket=DATABASE_NAME&precision=s" \ + --header "Authorization: Token AUTH_TOKEN" \ + --data-binary 'home,room=kitchen temp=72 1463683075' + ``` + in: header + name: Authorization + type: apiKey +x-tagGroups: + - name: Using the InfluxDB HTTP API + tags: + - Quick start + - Authentication + - Cache data + - Common parameters + - Response codes + - Compatibility endpoints + - Database + - Processing engine + - Server information + - Table + - Token + - Query data + - Write data diff --git a/api-docs/influxdb3/core/v3/ref.yml b/static/openapi/influxdb3-core-openapi.yaml similarity index 72% rename from api-docs/influxdb3/core/v3/ref.yml rename to static/openapi/influxdb3-core-openapi.yaml index 32778f807e..bd0d928430 100644 --- a/api-docs/influxdb3/core/v3/ref.yml +++ b/static/openapi/influxdb3-core-openapi.yaml @@ -17,11 +17,8 @@ info: - `/`: Compatibility endpoints for InfluxDB v1 workloads and clients - `/api/v2/write`: Compatibility endpoint for InfluxDB v2 workloads and clients - - version: '3.7.0' + To download the OpenAPI specification for this API, use the **Download** button above. + version: v3.8.0 license: name: MIT url: https://opensource.org/licenses/MIT @@ -29,6 +26,7 @@ info: name: InfluxData url: https://www.influxdata.com email: support@influxdata.com + x-source-hash: sha256:1259b96096eab6c8dbf3f76c974924f124e9b3e08eedc6b0c9a66d3108857c52 servers: - url: https://{baseurl} description: InfluxDB 3 Core API URL @@ -56,8 +54,13 @@ tags: | [Querystring authentication](#section/Authentication/QuerystringAuthentication) | v1 endpoints | x-traitTag: true + x-related: + - title: Authenticate v1 API requests + href: /influxdb3/core/guides/api-compatibility/v1/ + - title: Manage tokens + href: /influxdb3/core/admin/tokens/ - name: Cache data - description: | + description: |- Manage the in-memory cache. #### Distinct Value Cache @@ -84,76 +87,126 @@ tags: what fields to cache, what tags to use to identify each series, and the number of values to cache for each unique series. An LVC is associated with a table, which can have multiple LVCs. - - #### Related guides - - - [Manage the Distinct Value Cache](/influxdb3/core/admin/distinct-value-cache/) - - [Manage the Last Value Cache](/influxdb3/core/admin/last-value-cache/) + x-related: + - title: Manage the Distinct Value Cache + href: /influxdb3/core/admin/distinct-value-cache/ + - title: Manage the Last Value Cache + href: /influxdb3/core/admin/last-value-cache/ - name: Compatibility endpoints - description: | + description: > InfluxDB 3 provides compatibility endpoints for InfluxDB 1.x and InfluxDB 2.x workloads and clients. + ### Write data using v1- or v2-compatible endpoints + - [`/api/v2/write` endpoint](#operation/PostV2Write) for InfluxDB v2 clients and when you bring existing InfluxDB v2 write workloads to InfluxDB 3. - - [`/write` endpoint](#operation/PostV1Write) for InfluxDB v1 clients and when you bring existing InfluxDB v1 write workloads to InfluxDB 3. + - [`/write` endpoint](#operation/PostV1Write) for InfluxDB v1 clients and when you bring existing InfluxDB v1 + write workloads to InfluxDB 3. + For new workloads, use the [`/api/v3/write_lp` endpoint](#operation/PostWriteLP). + All endpoints accept the same line protocol format. + ### Query data - Use the HTTP [`/query`](#operation/GetV1ExecuteQuery) endpoint for InfluxDB v1 clients and v1 query workloads using InfluxQL. + + Use the HTTP [`/query`](#operation/GetV1ExecuteQuery) endpoint for InfluxDB v1 clients and v1 query workloads + using InfluxQL. + For new workloads, use one of the following: + - HTTP [`/api/v3/query_sql` endpoint](#operation/GetExecuteQuerySQL) for new query workloads using SQL. - - HTTP [`/api/v3/query_influxql` endpoint](#operation/GetExecuteInfluxQLQuery) for new query workloads using InfluxQL. - - Flight SQL and InfluxDB 3 _Flight+gRPC_ APIs for querying with SQL or InfluxQL. For more information about using Flight APIs, see [InfluxDB 3 client libraries](https://github.com/InfluxCommunity?q=influxdb3&type=public&language=&sort=). + + - HTTP [`/api/v3/query_influxql` endpoint](#operation/GetExecuteInfluxQLQuery) for new query workloads using + InfluxQL. + + - Flight SQL and InfluxDB 3 _Flight+gRPC_ APIs for querying with SQL or InfluxQL. For more information about using + Flight APIs, see [InfluxDB 3 client + libraries](https://github.com/InfluxCommunity?q=influxdb3&type=public&language=&sort=). + ### Server information - Server information endpoints such as `/health` and `metrics` are compatible with InfluxDB 1.x and InfluxDB 2.x clients. + + Server information endpoints such as `/health` and `metrics` are compatible with InfluxDB 1.x and InfluxDB 2.x + clients. + x-related: + - title: Use compatibility APIs to write data + href: /influxdb3/core/write-data/http-api/compatibility-apis/ - name: Database description: Manage databases - - description: | + - description: > Most InfluxDB API endpoints require parameters in the request--for example, specifying the database to use. + ### Common parameters + The following table shows common parameters used by many InfluxDB API endpoints. + Many endpoints may require other parameters in the query string or in the + request body that perform functions specific to those endpoints. + | Query parameter | Value type | Description | + |:------------------------ |:--------------------- |:-------------------------------------------| + | `db` | string | The database name | + InfluxDB HTTP API endpoints use standard HTTP request and response headers. + The following table shows common headers used by many InfluxDB API endpoints. + Some endpoints may use other headers that perform functions more specific to those endpoints--for example, - the write endpoints accept the `Content-Encoding` header to indicate that line protocol is compressed in the request body. + + the write endpoints accept the `Content-Encoding` header to indicate that line protocol is compressed in the + request body. + | Header | Value type | Description | + |:------------------------ |:--------------------- |:-------------------------------------------| + | `Accept` | string | The content type that the client can understand. | + | `Authorization` | string | The authorization scheme and credential. | + | `Content-Length` | integer | The size of the entity-body, in bytes. | + | `Content-Type` | string | The format of the data in the request body. | name: Headers and parameters x-traitTag: true - name: Processing engine - description: | + description: > Manage Processing engine triggers, test plugins, and send requests to trigger On Request plugins. - InfluxDB 3 Core provides the InfluxDB 3 processing engine, an embedded Python VM that can dynamically load and trigger Python plugins in response to events in your database. + + InfluxDB 3 Core provides the InfluxDB 3 processing engine, an embedded Python VM that can dynamically load and + trigger Python plugins in response to events in your database. + Use Processing engine plugins and triggers to run code and perform tasks for different database events. - To get started with the processing engine, see the [Processing engine and Python plugins](/influxdb3/core/processing-engine/) guide. + + To get started with the processing engine, see the [Processing engine and Python + plugins](/influxdb3/core/processing-engine/) guide. + x-related: + - title: Processing engine and Python plugins + href: /influxdb3/core/plugins/ - name: Query data description: Query data using SQL or InfluxQL + x-related: + - title: Use the InfluxDB v1 HTTP query API and InfluxQL to query data + href: /influxdb3/core/query-data/execute-queries/influxdb-v1-api/ - name: Quick start description: | 1. [Create an admin token](#section/Authentication) to authorize API requests. @@ -219,149 +272,71 @@ tags: | **Milliseconds** | ✅ `ms` | ✅ `ms` | ✅ `millisecond` | | **Microseconds** | ✅ `u` or `µ` | ✅ `us` | ✅ `microsecond` | | **Nanoseconds** | ✅ `ns` | ✅ `ns` | ✅ `nanosecond` | - | **Minutes** | ✅ `m` | ❌ No | ❌ No | - | **Hours** | ✅ `h` | ❌ No | ❌ No | | **Default** | Nanosecond | Nanosecond | **Auto** (guessed) | All timestamps are stored internally as nanoseconds. paths: - /write: - post: - operationId: PostV1Write - summary: Write line protocol (v1-compatible) + /api/v1/health: + get: + operationId: GetHealthV1 + summary: Health check (v1) description: | - Writes line protocol to the specified database. - - This endpoint provides backward compatibility for InfluxDB 1.x write workloads using tools such as InfluxDB 1.x client libraries, the Telegraf `outputs.influxdb` output plugin, or third-party tools. - - Use this endpoint to send data in [line protocol](https://docs.influxdata.com/influxdb3/core/reference/syntax/line-protocol/) format to InfluxDB. - Use query parameters to specify options for writing data. + Checks the status of the service. - #### Related + Returns `OK` if the service is running. This endpoint does not return version information. + Use the [`/ping`](#operation/GetPing) endpoint to retrieve version details. - - [Use compatibility APIs to write data](/influxdb3/core/write-data/http-api/compatibility-apis/) - parameters: - - $ref: '#/components/parameters/dbWriteParam' - - $ref: '#/components/parameters/compatibilityPrecisionParam' - - $ref: '#/components/parameters/v1UsernameParam' - - $ref: '#/components/parameters/v1PasswordParam' - - name: rp - in: query - required: false - schema: - type: string - description: | - Retention policy name. Honored but discouraged. InfluxDB 3 doesn't use retention policies. - - name: consistency - in: query - required: false - schema: - type: string - description: | - Write consistency level. Ignored by InfluxDB 3. Provided for compatibility with InfluxDB 1.x clients. - - name: Authorization - in: header - required: false - schema: - type: string - description: | - Authorization header for token-based authentication. - Supported schemes: - - `Bearer AUTH_TOKEN` - OAuth bearer token scheme - - `Token AUTH_TOKEN` - InfluxDB v2 token scheme - - `Basic ` - Basic authentication (username is ignored) - - name: Content-Type - in: header - description: | - The content type of the request payload. - schema: - $ref: '#/components/schemas/LineProtocol' - required: false - - name: Accept - in: header - description: | - The content type that the client can understand. - Writes only return a response body if they fail (partially or completely)--for example, - due to a syntax problem or type mismatch. - schema: - type: string - default: application/json - enum: - - application/json - required: false - - $ref: '#/components/parameters/ContentEncoding' - - $ref: '#/components/parameters/ContentLength' - requestBody: - $ref: '#/components/requestBodies/lineProtocolRequestBody' + > **Note**: This endpoint requires authentication by default in InfluxDB 3 Core. responses: - '204': - description: Success ("No Content"). All data in the batch is written and queryable. - headers: - cluster-uuid: - $ref: '#/components/headers/ClusterUUID' - '400': - description: | - Bad request. Some (a _partial write_) or all of the data from the batch was rejected and not written. - If a partial write occurred, then some points from the batch are written and queryable. - - The response body: - - indicates if a partial write occurred or all data was rejected. - - contains details about the [rejected points](/influxdb3/core/write-data/troubleshoot/#troubleshoot-rejected-points), up to 100 points. + "200": + description: Service is running. Returns `OK`. content: - application/json: - examples: - rejectedAllPoints: - summary: Rejected all points in the batch - value: | - { - "error": "write of line protocol failed", - "data": [ - { - "original_line": "dquote> home,room=Kitchen temp=hi", - "line_number": 2, - "error_message": "No fields were provided" - } - ] - } - partialWriteErrorWithRejectedPoints: - summary: Partial write rejected some points in the batch - value: | - { - "error": "partial write of line protocol occurred", - "data": [ - { - "original_line": "dquote> home,room=Kitchen temp=hi", - "line_number": 2, - "error_message": "No fields were provided" - } - ] - } - '401': - $ref: '#/components/responses/Unauthorized' - '403': - description: Access denied. - '413': - description: Request entity too large. + text/plain: + schema: + type: string + example: OK + "401": + description: Unauthorized. Authentication is required. + "500": + description: Service is unavailable. tags: + - Server information - Compatibility endpoints - - Write data - x-influxdata-guides: - - title: Use compatibility APIs to write data - href: /influxdb3/core/write-data/http-api/compatibility-apis/ /api/v2/write: post: operationId: PostV2Write + responses: + "204": + description: Success ("No Content"). All data in the batch is written and queryable. + headers: + cluster-uuid: + $ref: "#/components/headers/ClusterUUID" + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "413": + description: Request entity too large. summary: Write line protocol (v2-compatible) - description: | + description: > Writes line protocol to the specified database. - This endpoint provides backward compatibility for InfluxDB 2.x write workloads using tools such as InfluxDB 2.x client libraries, the Telegraf `outputs.influxdb_v2` output plugin, or third-party tools. - Use this endpoint to send data in [line protocol](/influxdb3/core/reference/syntax/line-protocol/) format to InfluxDB. + This endpoint provides backward compatibility for InfluxDB 2.x write workloads using tools such as InfluxDB 2.x + client libraries, the Telegraf `outputs.influxdb_v2` output plugin, or third-party tools. + + + Use this endpoint to send data in [line protocol](/influxdb3/core/reference/syntax/line-protocol/) format to + InfluxDB. + Use query parameters to specify options for writing data. + #### Related + - [Use compatibility APIs to write data](/influxdb3/core/write-data/http-api/compatibility-apis/) parameters: - name: Content-Type @@ -369,7 +344,7 @@ paths: description: | The content type of the request payload. schema: - $ref: '#/components/schemas/LineProtocol' + $ref: "#/components/schemas/LineProtocol" required: false - description: | The compression applied to the line protocol in the request payload. @@ -404,1500 +379,1948 @@ paths: enum: - application/json type: string - - name: db + - name: bucket in: query required: true schema: type: string - description: | + description: |- A database name. InfluxDB creates the database if it doesn't already exist, and then writes all points in the batch to the database. + + This parameter is named `bucket` for compatibility with InfluxDB v2 client libraries. - name: accept_partial in: query required: false schema: - $ref: '#/components/schemas/AcceptPartial' - - $ref: '#/components/parameters/compatibilityPrecisionParam' + $ref: "#/components/schemas/AcceptPartial" + - $ref: "#/components/parameters/compatibilityPrecisionParam" requestBody: - $ref: '#/components/requestBodies/lineProtocolRequestBody' - responses: - '204': - description: Success ("No Content"). All data in the batch is written and queryable. - headers: - cluster-uuid: - $ref: '#/components/headers/ClusterUUID' - '400': - description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '403': - description: Access denied. - '413': - description: Request entity too large. + $ref: "#/components/requestBodies/lineProtocolRequestBody" tags: - Compatibility endpoints - Write data - x-influxdata-guides: - - title: Use compatibility APIs to write data - href: /influxdb3/core/write-data/http-api/compatibility-apis/ - /api/v3/write_lp: - post: - operationId: PostWriteLP - summary: Write line protocol - description: | - Writes line protocol to the specified database. - - This is the native InfluxDB 3 Core write endpoint that provides enhanced control - over write behavior with advanced parameters for high-performance and fault-tolerant operations. - - Use this endpoint to send data in [line protocol](/influxdb3/core/reference/syntax/line-protocol/) format to InfluxDB. - Use query parameters to specify options for writing data. - - #### Features - - - **Partial writes**: Use `accept_partial=true` to allow partial success when some lines in a batch fail - - **Asynchronous writes**: Use `no_sync=true` to skip waiting for WAL synchronization, allowing faster response times but sacrificing durability guarantees - - **Flexible precision**: Automatic timestamp precision detection with `precision=auto` (default) - - #### Auto precision detection - - When you use `precision=auto` or omit the precision parameter, InfluxDB 3 automatically detects - the timestamp precision based on the magnitude of the timestamp value: - - - Timestamps < 5e9 → Second precision (multiplied by 1,000,000,000 to convert to nanoseconds) - - Timestamps < 5e12 → Millisecond precision (multiplied by 1,000,000) - - Timestamps < 5e15 → Microsecond precision (multiplied by 1,000) - - Larger timestamps → Nanosecond precision (no conversion needed) - - #### Related - - - [Use the InfluxDB v3 write_lp API to write data](/influxdb3/core/write-data/http-api/v3-write-lp/) + /api/v3/configure/database: + delete: + operationId: DeleteConfigureDatabase parameters: - - $ref: '#/components/parameters/dbWriteParam' - - $ref: '#/components/parameters/accept_partial' - - $ref: '#/components/parameters/precisionParam' - - name: no_sync + - $ref: "#/components/parameters/db" + - name: data_only in: query + required: false schema: - $ref: '#/components/schemas/NoSync' - - name: Content-Type - in: header + type: boolean + default: false description: | - The content type of the request payload. - schema: - $ref: '#/components/schemas/LineProtocol' + Delete only data while preserving the database schema and all associated resources + (tokens, triggers, last value caches, distinct value caches, processing engine configurations). + When `false` (default), the entire database is deleted. + - name: remove_tables + in: query required: false - - name: Accept - in: header + schema: + type: boolean + default: false description: | - The content type that the client can understand. - Writes only return a response body if they fail (partially or completely)--for example, - due to a syntax problem or type mismatch. + Used with `data_only=true` to remove table resources (caches) while preserving + database-level resources (tokens, triggers, processing engine configurations). + Has no effect when `data_only=false`. + - name: hard_delete_at + in: query + required: false schema: type: string - default: application/json - enum: - - application/json - required: false - - $ref: '#/components/parameters/ContentEncoding' - - $ref: '#/components/parameters/ContentLength' - requestBody: - $ref: '#/components/requestBodies/lineProtocolRequestBody' + format: date-time + description: |- + Schedule the database for hard deletion at the specified time. + If not provided, the database will be soft deleted. + Use ISO 8601 date-time format (for example, "2025-12-31T23:59:59Z"). + + #### Deleting a database cannot be undone + + Deleting a database is a destructive action. + Once a database is deleted, data stored in that database cannot be recovered. + + + Also accepts special string values: + - `now` — hard delete immediately + - `never` — soft delete only (default behavior) + - `default` — use the system default hard deletion time responses: - '204': - description: Success ("No Content"). All data in the batch is written and queryable. - headers: - cluster-uuid: - $ref: '#/components/headers/ClusterUUID' - '400': - description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '403': - description: Access denied. - '413': - description: Request entity too large. - '422': - description: Unprocessable entity. - x-codeSamples: - - label: cURL - Basic write - lang: Shell - source: | - curl --request POST "http://localhost:8181/api/v3/write_lp?db=sensors" \ - --header "Authorization: Bearer DATABASE_TOKEN" \ - --header "Content-Type: text/plain" \ - --data-raw "cpu,host=server01 usage=85.2 1638360000000000000" - - label: cURL - Write with millisecond precision - lang: Shell - source: | - curl --request POST "http://localhost:8181/api/v3/write_lp?db=sensors&precision=ms" \ - --header "Authorization: Bearer DATABASE_TOKEN" \ - --header "Content-Type: text/plain" \ - --data-raw "cpu,host=server01 usage=85.2 1638360000000" - - label: cURL - Asynchronous write with partial acceptance - lang: Shell - source: | - curl --request POST "http://localhost:8181/api/v3/write_lp?db=sensors&accept_partial=true&no_sync=true&precision=auto" \ - --header "Authorization: Bearer DATABASE_TOKEN" \ - --header "Content-Type: text/plain" \ - --data-raw "cpu,host=server01 usage=85.2 - memory,host=server01 used=4096" - - label: cURL - Multiple measurements with tags - lang: Shell - source: | - curl --request POST "http://localhost:8181/api/v3/write_lp?db=sensors&precision=ns" \ - --header "Authorization: Bearer DATABASE_TOKEN" \ - --header "Content-Type: text/plain" \ - --data-raw "cpu,host=server01,region=us-west usage=85.2,load=0.75 1638360000000000000 - memory,host=server01,region=us-west used=4096,free=12288 1638360000000000000 - disk,host=server01,region=us-west,device=/dev/sda1 used=50.5,free=49.5 1638360000000000000" + "200": + description: Success. Database deleted. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Database not found. + summary: Delete a database + description: | + Soft deletes a database. + The database is scheduled for deletion and unavailable for querying. + Use the `hard_delete_at` parameter to schedule a hard deletion. + Use the `data_only` parameter to delete data while preserving the database schema and resources. tags: - - Write data - /api/v3/query_sql: + - Database get: - operationId: GetExecuteQuerySQL - summary: Execute SQL query - description: Executes an SQL query to retrieve data from the specified database. - parameters: - - $ref: '#/components/parameters/db' - - $ref: '#/components/parameters/querySqlParam' - - $ref: '#/components/parameters/format' - - $ref: '#/components/parameters/AcceptQueryHeader' - - $ref: '#/components/parameters/ContentType' + operationId: GetConfigureDatabase responses: - '200': - description: Success. The response body contains query results. + "200": + description: Success. The response body contains the list of databases. content: application/json: schema: - $ref: '#/components/schemas/QueryResponse' - example: - results: - - series: - - name: mytable - columns: - - time - - value - values: - - - '2024-02-02T12:00:00Z' - - 42 - text/csv: - schema: - type: string - application/vnd.apache.parquet: - schema: - type: string - application/jsonl: - schema: - type: string - '400': + $ref: "#/components/schemas/ShowDatabasesResponse" + "400": description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '403': - description: Access denied. - '404': + "401": + $ref: "#/components/responses/Unauthorized" + "404": description: Database not found. - '405': - description: Method not allowed. - '422': - description: Unprocessable entity. + summary: List databases + description: Retrieves a list of databases. + parameters: + - $ref: "#/components/parameters/formatRequired" + - name: show_deleted + in: query + required: false + schema: + type: boolean + default: false + description: | + Include soft-deleted databases in the response. + By default, only active databases are returned. tags: - - Query data + - Database post: - operationId: PostExecuteQuerySQL - summary: Execute SQL query - description: Executes an SQL query to retrieve data from the specified database. - parameters: - - $ref: '#/components/parameters/AcceptQueryHeader' - - $ref: '#/components/parameters/ContentType' + operationId: PostConfigureDatabase + responses: + "200": + description: Success. Database created. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "409": + description: Database already exists. + summary: Create a database + description: Creates a new database in the system. requestBody: - $ref: '#/components/requestBodies/queryRequestBody' + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateDatabaseRequest" + tags: + - Database + put: + operationId: update_database responses: - '200': - description: Success. The response body contains query results. - content: - application/json: - schema: - $ref: '#/components/schemas/QueryResponse' - text/csv: - schema: - type: string - application/vnd.apache.parquet: - schema: - type: string - application/jsonl: - schema: - type: string - '400': + "200": + description: Success. The database has been updated. + "400": description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '403': - description: Access denied. - '404': + "401": + $ref: "#/components/responses/Unauthorized" + "404": description: Database not found. - '405': - description: Method not allowed. - '422': - description: Unprocessable entity. + summary: Update a database + description: | + Updates database configuration, such as retention period. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/UpdateDatabaseRequest" tags: - - Query data - /api/v3/query_influxql: - get: - operationId: GetExecuteInfluxQLQuery - summary: Execute InfluxQL query - description: Executes an InfluxQL query to retrieve data from the specified database. + - Database + /api/v3/configure/database/retention_period: + delete: + operationId: DeleteDatabaseRetentionPeriod + summary: Remove database retention period + description: | + Removes the retention period from a database, setting it to infinite retention. parameters: - - $ref: '#/components/parameters/dbQueryParam' - - name: q + - $ref: "#/components/parameters/db" + responses: + "204": + description: Success. The database retention period has been removed. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Database not found. + tags: + - Database + /api/v3/configure/distinct_cache: + delete: + operationId: DeleteConfigureDistinctCache + responses: + "200": + description: Success. The distinct cache has been deleted. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Cache not found. + summary: Delete distinct cache + description: Deletes a distinct cache. + parameters: + - $ref: "#/components/parameters/db" + - name: table in: query required: true schema: type: string - - name: format + description: The name of the table containing the distinct cache. + - name: name in: query - required: false + required: true schema: type: string - - $ref: '#/components/parameters/AcceptQueryHeader' - responses: - '200': - description: Success. The response body contains query results. - content: - application/json: - schema: - $ref: '#/components/schemas/QueryResponse' - text/csv: - schema: - type: string - application/vnd.apache.parquet: - schema: - type: string - application/jsonl: - schema: - type: string - '400': - description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '403': - description: Access denied. - '404': - description: Database not found. - '405': - description: Method not allowed. - '422': - description: Unprocessable entity. + description: The name of the distinct cache to delete. tags: - - Query data + - Cache data + - Table post: - operationId: PostExecuteQueryInfluxQL - summary: Execute InfluxQL query - description: Executes an InfluxQL query to retrieve data from the specified database. - parameters: - - $ref: '#/components/parameters/AcceptQueryHeader' - - $ref: '#/components/parameters/ContentType' - requestBody: - $ref: '#/components/requestBodies/queryRequestBody' + operationId: PostConfigureDistinctCache responses: - '200': - description: Success. The response body contains query results. - content: - application/json: - schema: - $ref: '#/components/schemas/QueryResponse' - text/csv: - schema: - type: string - application/vnd.apache.parquet: - schema: - type: string - application/jsonl: - schema: - type: string - '400': - description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '403': - description: Access denied. - '404': - description: Database not found. - '405': - description: Method not allowed. - '422': - description: Unprocessable entity. - tags: - - Query data - /query: - get: - operationId: GetV1ExecuteQuery - summary: Execute InfluxQL query (v1-compatible) - description: | - Executes an InfluxQL query to retrieve data from the specified database. - - This endpoint is compatible with InfluxDB 1.x client libraries and third-party integrations such as Grafana. - Use query parameters to specify the database and the InfluxQL query. + "201": + description: Success. The distinct cache has been created. + "400": + description: > + Bad request. - #### Related - - [Use the InfluxDB v1 HTTP query API and InfluxQL to query data](/influxdb3/core/query-data/execute-queries/influxdb-v1-api/) + The server responds with status `400` if the request would overwrite an existing cache with a different + configuration. + "409": + description: Conflict. A distinct cache with this configuration already exists. + summary: Create distinct cache + description: Creates a distinct cache for a table. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/DistinctCacheCreateRequest" + tags: + - Cache data + - Table + /api/v3/configure/last_cache: + delete: + operationId: DeleteConfigureLastCache + responses: + "200": + description: Success. The last cache has been deleted. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Cache not found. + summary: Delete last cache + description: Deletes a last cache. parameters: - - name: Accept - in: header + - $ref: "#/components/parameters/db" + - name: table + in: query + required: true schema: type: string - default: application/json - enum: - - application/json - - application/csv - - text/csv - required: false - description: | - The content type that the client can understand. - - If `text/csv` is specified, the `Content-type` response header is `application/csv` and the response is formatted as CSV. - - Returns an error if the format is invalid or non-UTF8. - - in: query - name: chunked - description: | - If true, the response is divided into chunks of size `chunk_size`. - schema: - type: boolean - default: false - - in: query - name: chunk_size - description: | - The number of records that will go into a chunk. - This parameter is only used if `chunked=true`. - schema: - type: integer - default: 10000 - - in: query - name: db - description: The database to query. If not provided, the InfluxQL query string must specify the database. - schema: - type: string - format: InfluxQL - - in: query - name: pretty - description: | - If true, the JSON response is formatted in a human-readable format. - schema: - type: boolean - default: false - - in: query - name: q - description: The InfluxQL query string. - required: true - schema: - type: string - - name: epoch - description: | - Formats timestamps as [unix (epoch) timestamps](/influxdb3/core/reference/glossary/#unix-timestamp) with the specified precision - instead of [RFC3339 timestamps](/influxdb3/core/reference/glossary/#rfc3339-timestamp) with nanosecond precision. - in: query - schema: - $ref: '#/components/schemas/EpochCompatibility' - - $ref: '#/components/parameters/v1UsernameParam' - - $ref: '#/components/parameters/v1PasswordParam' - - name: rp - in: query - required: false - schema: - type: string - description: | - Retention policy name. Honored but discouraged. InfluxDB 3 doesn't use retention policies. - - name: Authorization - in: header - required: false + description: The name of the table containing the last cache. + - name: name + in: query + required: true schema: type: string - description: | - Authorization header for token-based authentication. - Supported schemes: - - `Bearer AUTH_TOKEN` - OAuth bearer token scheme - - `Token AUTH_TOKEN` - InfluxDB v2 token scheme - - `Basic ` - Basic authentication (username is ignored) + description: The name of the last cache to delete. + tags: + - Cache data + - Table + post: + operationId: PostConfigureLastCache responses: - '200': - description: | - Success. The response body contains query results. - content: - application/json: - schema: - $ref: '#/components/schemas/QueryResponse' - application/csv: - schema: - type: string - headers: - Content-Type: - description: | - The content type of the response. - Default is `application/json`. - - If the `Accept` request header is `application/csv` or `text/csv`, the `Content-type` response header is `application/csv` - and the response is formatted as CSV. - schema: - type: string - default: application/json - enum: - - application/json - - application/csv - '400': - description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '403': - description: Access denied. - '404': - description: Database not found. - '405': - description: Method not allowed. - '422': - description: Unprocessable entity. + "201": + description: Success. Last cache created. + "400": + description: Bad request. A cache with this name already exists or the request is malformed. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Cache not found. + summary: Create last cache + description: Creates a last cache for a table. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/LastCacheCreateRequest" tags: - - Query data - - Compatibility endpoints - x-influxdata-guides: - - title: Use the InfluxDB v1 HTTP query API and InfluxQL to query data - href: /influxdb3/core/query-data/execute-queries/influxdb-v1-api/ + - Cache data + - Table + /api/v3/configure/plugin_environment/install_packages: post: - operationId: PostExecuteV1Query - summary: Execute InfluxQL query (v1-compatible) - description: | - Executes an InfluxQL query to retrieve data from the specified database. - - #### Related + operationId: PostInstallPluginPackages + summary: Install plugin packages + description: |- + Installs the specified Python packages into the processing engine plugin environment. - - [Use the InfluxDB v1 HTTP query API and InfluxQL to query data](/influxdb3/core/query-data/execute-queries/influxdb-v1-api/) + This endpoint is synchronous and blocks until the packages are installed. + parameters: + - $ref: "#/components/parameters/ContentType" requestBody: + required: true content: application/json: schema: type: object properties: - db: - type: string - description: The database to query. If not provided, the InfluxQL query string must specify the database. - q: - description: The InfluxQL query string. - type: string - chunked: - description: | - If true, the response is divided into chunks of size `chunk_size`. - type: boolean - chunk_size: - description: | - The number of records that will go into a chunk. - This parameter is only used if `chunked=true`. - type: integer - default: 10000 - epoch: + packages: + type: array + items: + type: string description: | - A unix timestamp precision. + A list of Python package names to install. + Can include version specifiers (e.g., "scipy==1.9.0"). + example: + - influxdb3-python + - scipy + - pandas==1.5.0 + - requests + required: + - packages + example: + packages: + - influxdb3-python + - scipy + - pandas==1.5.0 + - requests + responses: + "200": + description: Success. The packages are installed. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + tags: + - Processing engine + /api/v3/configure/plugin_environment/install_requirements: + post: + operationId: PostInstallPluginRequirements + summary: Install plugin requirements + description: > + Installs requirements from a requirements file (also known as a "pip requirements file") into the processing + engine plugin environment. - - `h` for hours - - `m` for minutes - - `s` for seconds - - `ms` for milliseconds - - `u` or `µ` for microseconds - - `ns` for nanoseconds - Formats timestamps as [unix (epoch) timestamps](/influxdb3/core/reference/glossary/#unix-timestamp) with the specified precision - instead of [RFC3339 timestamps](/influxdb3/core/reference/glossary/#rfc3339-timestamp) with nanosecond precision. - enum: - - ns - - u - - µ - - ms - - s - - m - - h + This endpoint is synchronous and blocks until the requirements are installed. + + + ### Related + + + - [Processing engine and Python plugins](/influxdb3/core/plugins/) + + - [Python requirements file format](https://pip.pypa.io/en/stable/reference/requirements-file-format/) + parameters: + - $ref: "#/components/parameters/ContentType" + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + requirements_location: type: string - pretty: description: | - If true, the JSON response is formatted in a human-readable format. - type: boolean + The path to the requirements file containing Python packages to install. + Can be a relative path (relative to the plugin directory) or an absolute path. + example: requirements.txt required: - - q - parameters: - - name: Accept - in: header - schema: - type: string - default: application/json - enum: - - application/json - - application/csv - - text/csv - required: false - description: | - The content type that the client can understand. - - If `text/csv` is specified, the `Content-type` response header is `application/csv` and the response is formatted as CSV. - - Returns an error if the format is invalid or non-UTF8. + - requirements_location + example: + requirements_location: requirements.txt responses: - '200': - description: | - Success. The response body contains query results. - content: - application/json: - schema: - $ref: '#/components/schemas/QueryResponse' - application/csv: - schema: - type: string - headers: - Content-Type: - description: | - The content type of the response. - Default is `application/json`. - - If the `Accept` request header is `application/csv` or `text/csv`, the `Content-type` response header is `application/csv` - and the response is formatted as CSV. - schema: - type: string - default: application/json - enum: - - application/json - - application/csv - '400': + "200": + description: Success. The requirements have been installed. + "400": description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '403': - description: Access denied. - '404': - description: Database not found. - '405': - description: Method not allowed. - '422': - description: Unprocessable entity. + "401": + $ref: "#/components/responses/Unauthorized" tags: - - Query data - - Compatibility endpoints - x-influxdata-guides: - - title: Use the InfluxDB v1 HTTP query API and InfluxQL to query data - href: /influxdb3/core/query-data/execute-queries/influxdb-v1-api/ - /health: - get: - operationId: GetHealth - summary: Health check - description: | - Checks the status of the service. + - Processing engine + /api/v3/configure/processing_engine_trigger: + post: + operationId: PostConfigureProcessingEngineTrigger + summary: Create processing engine trigger + description: Creates a processing engine trigger with the specified plugin file and trigger specification. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/ProcessingEngineTriggerRequest" + examples: + schedule_cron: + summary: Schedule trigger using cron + description: > + In `"cron:CRON_EXPRESSION"`, `CRON_EXPRESSION` uses extended 6-field cron format. - Returns `OK` if the service is running. This endpoint does not return version information. - Use the [`/ping`](#operation/GetPing) endpoint to retrieve version details. - responses: - '200': - description: Service is running. Returns `OK`. - content: - text/plain: - schema: - type: string - example: OK - '500': - description: Service is unavailable. - tags: - - Server information - /api/v1/health: - get: - operationId: GetHealthV1 - summary: Health check (v1) - description: Checks the status of the service. + The cron expression `0 0 6 * * 1-5` means the trigger will run at 6:00 AM every weekday (Monday to + Friday). + value: + db: DATABASE_NAME + plugin_filename: schedule.py + trigger_name: schedule_cron_trigger + trigger_specification: cron:0 0 6 * * 1-5 + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + schedule_every: + summary: Schedule trigger using interval + description: | + In `"every:DURATION"`, `DURATION` specifies the interval between trigger executions. + The duration `1h` means the trigger will run every hour. + value: + db: mydb + plugin_filename: schedule.py + trigger_name: schedule_every_trigger + trigger_specification: every:1h + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + schedule_every_seconds: + summary: Schedule trigger using seconds interval + description: | + Example of scheduling a trigger to run every 30 seconds. + value: + db: mydb + plugin_filename: schedule.py + trigger_name: schedule_every_30s_trigger + trigger_specification: every:30s + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + schedule_every_minutes: + summary: Schedule trigger using minutes interval + description: | + Example of scheduling a trigger to run every 5 minutes. + value: + db: mydb + plugin_filename: schedule.py + trigger_name: schedule_every_5m_trigger + trigger_specification: every:5m + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + all_tables: + summary: All tables trigger example + description: | + Trigger that fires on write events to any table in the database. + value: + db: mydb + plugin_filename: all_tables.py + trigger_name: all_tables_trigger + trigger_specification: all_tables + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + table_specific: + summary: Table-specific trigger example + description: | + Trigger that fires on write events to a specific table. + value: + db: mydb + plugin_filename: table.py + trigger_name: table_trigger + trigger_specification: table:sensors + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + api_request: + summary: On-demand request trigger example + description: | + Creates an HTTP endpoint `/api/v3/engine/hello-world` for manual invocation. + value: + db: mydb + plugin_filename: request.py + trigger_name: hello_world_trigger + trigger_specification: request:hello-world + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + cron_friday_afternoon: + summary: Cron trigger for Friday afternoons + description: | + Example of a cron trigger that runs every Friday at 2:30 PM. + value: + db: reports + plugin_filename: weekly_report.py + trigger_name: friday_report_trigger + trigger_specification: cron:0 30 14 * * 5 + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + cron_monthly: + summary: Cron trigger for monthly execution + description: | + Example of a cron trigger that runs on the first day of every month at midnight. + value: + db: monthly_data + plugin_filename: monthly_cleanup.py + trigger_name: monthly_cleanup_trigger + trigger_specification: cron:0 0 0 1 * * + disabled: false + trigger_settings: + run_async: false + error_behavior: Log responses: - '200': - description: Service is running. - '500': - description: Service is unavailable. - tags: - - Server information - - Compatibility endpoints - /ping: - get: - operationId: GetPing + "200": + description: Success. Processing engine trigger created. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Trigger not found. tags: - - Server information - summary: Ping the server - description: | - Returns version information for the server. - - **Important**: Use a GET request. HEAD requests return `404 Not Found`. - - The response includes version information in both headers and the JSON body: - - - **Headers**: `x-influxdb-version` and `x-influxdb-build` - - **Body**: JSON object with `version`, `revision`, and `process_id` - responses: - '200': - description: Success. The response body contains server information. - headers: - x-influxdb-version: - description: The InfluxDB version number (for example, `3.8.0`). - schema: - type: string - example: '3.8.0' - x-influxdb-build: - description: The InfluxDB build type (`Core` or `Enterprise`). - schema: - type: string - example: Core - content: - application/json: - schema: - type: object - properties: - version: - type: string - description: The InfluxDB version number. - example: '3.8.0' - revision: - type: string - description: The git revision hash for the build. - example: '5276213d5b' - process_id: - type: string - description: A unique identifier for the server process. - example: 'b756d9e0-cecd-4f72-b6d0-19e2d4f8cbb7' - '404': + - Processing engine + delete: + operationId: DeleteConfigureProcessingEngineTrigger + summary: Delete processing engine trigger + description: Deletes a processing engine trigger. + parameters: + - $ref: "#/components/parameters/db" + - name: trigger_name + in: query + required: true + schema: + type: string + - name: force + in: query + required: false + schema: + type: boolean + default: false description: | - Not Found. Returned for HEAD requests. - Use a GET request to retrieve version information. - /metrics: - get: - operationId: GetMetrics - summary: Metrics - description: Retrieves Prometheus-compatible server metrics. + Force deletion of the trigger even if it has active executions. + By default, deletion fails if the trigger is currently executing. responses: - '200': - description: Success. The response body contains Prometheus-compatible server metrics. + "200": + description: Success. The processing engine trigger has been deleted. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Trigger not found. tags: - - Server information - /api/v3/configure/database: - get: - operationId: GetConfigureDatabase - summary: List databases - description: Retrieves a list of databases. + - Processing engine + /api/v3/configure/processing_engine_trigger/disable: + post: + operationId: PostDisableProcessingEngineTrigger + summary: Disable processing engine trigger + description: Disables a processing engine trigger. parameters: - - $ref: '#/components/parameters/formatRequired' + - name: db + in: query + required: true + schema: + type: string + description: The database name. + - name: trigger_name + in: query + required: true + schema: + type: string + description: The name of the trigger. responses: - '200': - description: Success. The response body contains the list of databases. - content: - application/json: - schema: - $ref: '#/components/schemas/ShowDatabasesResponse' - '400': + "200": + description: Success. The processing engine trigger has been disabled. + "400": description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - description: Database not found. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Trigger not found. tags: - - Database + - Processing engine + /api/v3/configure/processing_engine_trigger/enable: post: - operationId: PostConfigureDatabase - summary: Create a database - description: Creates a new database in the system. - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/CreateDatabaseRequest' + operationId: PostEnableProcessingEngineTrigger + summary: Enable processing engine trigger + description: Enables a processing engine trigger. + parameters: + - name: db + in: query + required: true + schema: + type: string + description: The database name. + - name: trigger_name + in: query + required: true + schema: + type: string + description: The name of the trigger. responses: - '201': - description: Success. Database created. - '400': + "200": + description: Success. The processing engine trigger is enabled. + "400": description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '409': - description: Database already exists. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Trigger not found. tags: - - Database - delete: - operationId: DeleteConfigureDatabase - summary: Delete a database - description: | - Soft deletes a database. - The database is scheduled for deletion and unavailable for querying. - Use the `hard_delete_at` parameter to schedule a hard deletion. - parameters: - - $ref: '#/components/parameters/db' - - name: hard_delete_at - in: query - required: false - schema: - type: string - format: date-time - description: | - Schedule the database for hard deletion at the specified time. - If not provided, the database will be soft deleted. - Use ISO 8601 date-time format (for example, "2025-12-31T23:59:59Z"). - - #### Deleting a database cannot be undone - - Deleting a database is a destructive action. - Once a database is deleted, data stored in that database cannot be recovered. - responses: - '200': - description: Success. Database deleted. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - description: Database not found. - tags: - - Database - /api/v3/configure/database/retention_period: - delete: - operationId: DeleteDatabaseRetentionPeriod - summary: Remove database retention period - description: | - Removes the retention period from a database, setting it to infinite retention. - Data in the database will not expire based on time. - parameters: - - $ref: '#/components/parameters/db' - responses: - '200': - description: Success. Retention period removed from database. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - description: Database not found. - tags: - - Database + - Processing engine /api/v3/configure/table: - post: - operationId: PostConfigureTable - summary: Create a table - description: Creates a new table within a database. - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/CreateTableRequest' - responses: - '201': - description: Success. The table has been created. - '400': - description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - description: Database not found. - tags: - - Table delete: operationId: DeleteConfigureTable - summary: Delete a table - description: | - Soft deletes a table. - The table is scheduled for deletion and unavailable for querying. - Use the `hard_delete_at` parameter to schedule a hard deletion. - - #### Deleting a table cannot be undone - - Deleting a table is a destructive action. - Once a table is deleted, data stored in that table cannot be recovered. parameters: - - $ref: '#/components/parameters/db' + - $ref: "#/components/parameters/db" - name: table in: query required: true schema: type: string + - name: data_only + in: query + required: false + schema: + type: boolean + default: false + description: | + Delete only data while preserving the table schema and all associated resources + (last value caches, distinct value caches). + When `false` (default), the entire table is deleted. - name: hard_delete_at in: query required: false schema: type: string format: date-time - description: | + description: |- Schedule the table for hard deletion at the specified time. If not provided, the table will be soft deleted. Use ISO 8601 format (for example, "2025-12-31T23:59:59Z"). + + + Also accepts special string values: + - `now` — hard delete immediately + - `never` — soft delete only (default behavior) + - `default` — use the system default hard deletion time responses: - '200': + "200": description: Success (no content). The table has been deleted. - '401': - $ref: '#/components/responses/Unauthorized' - '404': + "401": + $ref: "#/components/responses/Unauthorized" + "404": description: Table not found. + summary: Delete a table + description: | + Soft deletes a table. + The table is scheduled for deletion and unavailable for querying. + Use the `hard_delete_at` parameter to schedule a hard deletion. + Use the `data_only` parameter to delete data while preserving the table schema and resources. + + #### Deleting a table cannot be undone + + Deleting a table is a destructive action. + Once a table is deleted, data stored in that table cannot be recovered. tags: - Table - /api/v3/configure/distinct_cache: post: - operationId: PostConfigureDistinctCache - summary: Create distinct cache - description: Creates a distinct cache for a table. - tags: - - Cache data - - Table + operationId: PostConfigureTable + responses: + "200": + description: Success. The table has been created. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Database not found. + summary: Create a table + description: Creates a new table within a database. requestBody: required: true content: application/json: schema: - $ref: '#/components/schemas/DistinctCacheCreateRequest' - responses: - '201': - description: Success. The distinct cache has been created. - '204': - description: Not created. A distinct cache with this configuration already exists. - '400': - description: | - Bad request. - - The server responds with status `400` if the request would overwrite an existing cache with a different configuration. + $ref: "#/components/schemas/CreateTableRequest" + tags: + - Table + /api/v3/configure/token: delete: - operationId: DeleteConfigureDistinctCache - summary: Delete distinct cache - description: Deletes a distinct cache. + operationId: DeleteToken parameters: - - $ref: '#/components/parameters/db' - - name: table - in: query - required: true - schema: - type: string - description: The name of the table containing the distinct cache. - - name: name + - name: token_name in: query required: true schema: type: string - description: The name of the distinct cache to delete. + description: The name of the token to delete. responses: - '200': - description: Success. The distinct cache has been deleted. - '400': - description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - description: Cache not found. + "200": + description: Success. The token has been deleted. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Token not found. + summary: Delete token + description: | + Deletes a token. tags: - - Cache data - - Table - /api/v3/configure/last_cache: + - Authentication + - Token + /api/v3/configure/token/admin: post: - operationId: PostConfigureLastCache - summary: Create last cache - description: Creates a last cache for a table. - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/LastCacheCreateRequest' + operationId: PostCreateAdminToken responses: - '201': - description: Success. Last cache created. - '400': - description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - description: Cache not found. - '409': - description: Cache already exists. + "201": + description: | + Success. The admin token has been created. + The response body contains the token string and metadata. + content: + application/json: + schema: + $ref: "#/components/schemas/AdminTokenObject" + "401": + $ref: "#/components/responses/Unauthorized" + summary: Create admin token + description: | + Creates an admin token. + An admin token is a special type of token that has full access to all resources in the system. tags: - - Cache data - - Table - delete: - operationId: DeleteConfigureLastCache - summary: Delete last cache - description: Deletes a last cache. - parameters: - - $ref: '#/components/parameters/db' - - name: table - in: query - required: true - schema: - type: string - description: The name of the table containing the last cache. - - name: name - in: query - required: true - schema: - type: string - description: The name of the last cache to delete. + - Authentication + - Token + /api/v3/configure/token/admin/regenerate: + post: + operationId: PostRegenerateAdminToken + summary: Regenerate admin token + description: | + Regenerates an admin token and revokes the previous token with the same name. + parameters: [] responses: - '200': - description: Success. The last cache has been deleted. - '400': - description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - description: Cache not found. + "201": + description: Success. The admin token has been regenerated. + content: + application/json: + schema: + $ref: "#/components/schemas/AdminTokenObject" + "401": + $ref: "#/components/responses/Unauthorized" tags: - - Cache data - - Table - /api/v3/configure/processing_engine_trigger: + - Authentication + - Token + /api/v3/configure/token/named_admin: post: - operationId: PostConfigureProcessingEngineTrigger - summary: Create processing engine trigger + operationId: PostCreateNamedAdminToken + responses: + "201": + description: | + Success. The named admin token has been created. + The response body contains the token string and metadata. + content: + application/json: + schema: + $ref: "#/components/schemas/AdminTokenObject" + "401": + $ref: "#/components/responses/Unauthorized" + "409": + description: A token with this name already exists. + summary: Create named admin token description: | - Creates a processing engine trigger with the specified plugin file and trigger specification. - - ### Related guides - - - [Processing engine and Python plugins](/influxdb3/core/plugins/) + Creates a named admin token. + A named admin token is a special type of admin token with a custom name for identification and management. + tags: + - Authentication + - Token requestBody: required: true content: application/json: schema: - $ref: '#/components/schemas/ProcessingEngineTriggerRequest' - examples: - schedule_cron: - summary: Schedule trigger using cron - description: | - In `"cron:CRON_EXPRESSION"`, `CRON_EXPRESSION` uses extended 6-field cron format. - The cron expression `0 0 6 * * 1-5` means the trigger will run at 6:00 AM every weekday (Monday to Friday). - value: - db: mydb - plugin_filename: schedule.py - trigger_name: schedule_cron_trigger - trigger_specification: cron:0 0 6 * * 1-5 - disabled: false - trigger_settings: - run_async: false - error_behavior: Log - schedule_every: - summary: Schedule trigger using interval - description: | - In `"every:DURATION"`, `DURATION` specifies the interval between trigger executions. - The duration `1h` means the trigger will run every hour. - value: - db: mydb - plugin_filename: schedule.py - trigger_name: schedule_every_trigger - trigger_specification: every:1h - disabled: false - trigger_settings: - run_async: false - error_behavior: Log - schedule_every_seconds: - summary: Schedule trigger using seconds interval - description: | - Example of scheduling a trigger to run every 30 seconds. - value: - db: mydb - plugin_filename: schedule.py - trigger_name: schedule_every_30s_trigger - trigger_specification: every:30s - disabled: false - trigger_settings: - run_async: false - error_behavior: Log - schedule_every_minutes: - summary: Schedule trigger using minutes interval - description: | - Example of scheduling a trigger to run every 5 minutes. - value: - db: mydb - plugin_filename: schedule.py - trigger_name: schedule_every_5m_trigger - trigger_specification: every:5m - disabled: false - trigger_settings: - run_async: false - error_behavior: Log - all_tables: - summary: All tables trigger example - description: | - Trigger that fires on write events to any table in the database. - value: - db: mydb - plugin_filename: all_tables.py - trigger_name: all_tables_trigger - trigger_specification: all_tables - disabled: false - trigger_settings: - run_async: false - error_behavior: Log - table_specific: - summary: Table-specific trigger example - description: | - Trigger that fires on write events to a specific table. - value: - db: mydb - plugin_filename: table.py - trigger_name: table_trigger - trigger_specification: table:sensors - disabled: false - trigger_settings: - run_async: false - error_behavior: Log - api_request: - summary: On-demand request trigger example - description: | - Creates an HTTP endpoint `/api/v3/engine/hello-world` for manual invocation. - value: - db: mydb - plugin_filename: request.py - trigger_name: hello_world_trigger - trigger_specification: request:hello-world - disabled: false - trigger_settings: - run_async: false - error_behavior: Log - cron_friday_afternoon: - summary: Cron trigger for Friday afternoons - description: | - Example of a cron trigger that runs every Friday at 2:30 PM. - value: - db: reports - plugin_filename: weekly_report.py - trigger_name: friday_report_trigger - trigger_specification: cron:0 30 14 * * 5 - disabled: false - trigger_settings: - run_async: false - error_behavior: Log - cron_monthly: - summary: Cron trigger for monthly execution - description: | - Example of a cron trigger that runs on the first day of every month at midnight. - value: - db: monthly_data - plugin_filename: monthly_cleanup.py - trigger_name: monthly_cleanup_trigger - trigger_specification: cron:0 0 0 1 * * - disabled: false - trigger_settings: - run_async: false - error_behavior: Log + type: object + properties: + token_name: + type: string + description: The name for the admin token. + expiry_secs: + type: integer + description: Optional expiration time in seconds. If not provided, the token does not expire. + nullable: true + required: + - token_name + /api/v3/engine/{request_path}: + get: + operationId: GetProcessingEnginePluginRequest responses: - '200': - description: Success. Processing engine trigger created. - '400': + "200": + description: Success. The plugin request has been executed. + "400": + description: Malformed request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Plugin not found. + "500": + description: Processing failure. + summary: On Request processing engine plugin request + description: > + Executes the On Request processing engine plugin specified in the trigger's `plugin_filename`. + + The request can include request headers, query string parameters, and a request body, which InfluxDB passes to + the plugin. + + + An On Request plugin implements the following signature: + + + ```python + + def process_request(influxdb3_local, query_parameters, request_headers, request_body, args=None) + + ``` + + + The response depends on the plugin implementation. + tags: + - Processing engine + post: + operationId: PostProcessingEnginePluginRequest + responses: + "200": + description: Success. The plugin request has been executed. + "400": + description: Malformed request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Plugin not found. + "500": + description: Processing failure. + summary: On Request processing engine plugin request + description: > + Executes the On Request processing engine plugin specified in the trigger's `plugin_filename`. + + The request can include request headers, query string parameters, and a request body, which InfluxDB passes to + the plugin. + + + An On Request plugin implements the following signature: + + + ```python + + def process_request(influxdb3_local, query_parameters, request_headers, request_body, args=None) + + ``` + + + The response depends on the plugin implementation. + parameters: + - $ref: "#/components/parameters/ContentType" + requestBody: + required: false + content: + application/json: + schema: + type: object + additionalProperties: true + tags: + - Processing engine + parameters: + - name: request_path + description: | + The path configured in the request trigger specification for the plugin. + + For example, if you define a trigger with the following: + + ```json + trigger_specification: "request:hello-world" + ``` + + then, the HTTP API exposes the following plugin endpoint: + + ``` + /api/v3/engine/hello-world + ``` + in: path + required: true + schema: + type: string + /api/v3/plugin_test/schedule: + post: + operationId: PostTestSchedulingPlugin + responses: + "200": + description: Success. The plugin test has been executed. + "400": description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - description: Trigger not found. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Plugin not enabled. + summary: Test scheduling plugin + description: Executes a test of a scheduling plugin. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/SchedulePluginTestRequest" tags: - Processing engine - delete: - operationId: DeleteConfigureProcessingEngineTrigger - summary: Delete processing engine trigger - description: Deletes a processing engine trigger. + /api/v3/plugin_test/wal: + post: + operationId: PostTestWALPlugin + responses: + "200": + description: Success. The plugin test has been executed. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Plugin not enabled. + summary: Test WAL plugin + description: Executes a test of a write-ahead logging (WAL) plugin. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/WALPluginTestRequest" + tags: + - Processing engine + /api/v3/plugins/directory: + put: + operationId: PutPluginDirectory + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/PluginDirectoryRequest" + responses: + "200": + description: Success. The plugin directory has been updated. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Forbidden. Admin token required. + "500": + description: Plugin not found. The `plugin_name` does not match any registered trigger. + summary: Update a multi-file plugin directory + description: | + Replaces all files in a multi-file plugin directory. The + `plugin_name` must match a registered trigger name. Each entry in + the `files` array specifies a `relative_path` and `content`—the + server writes them into the trigger's plugin directory. + + Use this endpoint to update multi-file plugins (directories with + `__init__.py` and supporting modules). For single-file plugins, + use `PUT /api/v3/plugins/files` instead. + tags: + - Processing engine + x-security-note: Requires an admin token + /api/v3/plugins/files: + post: + operationId: create_plugin_file + summary: Create a plugin file + description: | + Creates a single plugin file in the plugin directory. Writes the + `content` to a file named after `plugin_name`. Does not require an + existing trigger—use this to upload plugin files before creating + triggers that reference them. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/PluginFileRequest" + responses: + "200": + description: Success. The plugin file has been created. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Forbidden. Admin token required. + tags: + - Processing engine + x-security-note: Requires an admin token + put: + operationId: PutPluginFile + summary: Update a plugin file + description: | + Updates a single plugin file for an existing trigger. The + `plugin_name` must match a registered trigger name—the server + resolves the trigger's `plugin_filename` and overwrites that file + with the provided `content`. + + To upload a new plugin file before creating a trigger, use + `POST /api/v3/plugins/files` instead. To update a multi-file + plugin directory, use `PUT /api/v3/plugins/directory`. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/PluginFileRequest" + responses: + "200": + description: Success. The plugin file has been updated. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Forbidden. Admin token required. + "500": + description: Plugin not found. The `plugin_name` does not match any registered trigger. + tags: + - Processing engine + x-security-note: Requires an admin token + /api/v3/query_influxql: + get: + operationId: GetExecuteInfluxQLQuery + responses: + "200": + description: Success. The response body contains query results. + content: + application/json: + schema: + $ref: "#/components/schemas/QueryResponse" + text/csv: + schema: + type: string + application/vnd.apache.parquet: + schema: + type: string + application/jsonl: + schema: + type: string + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "404": + description: Database not found. + "405": + description: Method not allowed. + "422": + description: Unprocessable entity. + summary: Execute InfluxQL query + description: Executes an InfluxQL query to retrieve data from the specified database. + parameters: + - $ref: "#/components/parameters/dbQueryParam" + - name: q + in: query + required: true + schema: + type: string + - name: format + in: query + required: false + schema: + type: string + - $ref: "#/components/parameters/AcceptQueryHeader" + - name: params + in: query + required: false + schema: + type: string + description: JSON-encoded query parameters. Use this to pass bind parameters to parameterized queries. + description: JSON-encoded query parameters for parameterized queries. + tags: + - Query data + post: + operationId: PostExecuteQueryInfluxQL + responses: + "200": + description: Success. The response body contains query results. + content: + application/json: + schema: + $ref: "#/components/schemas/QueryResponse" + text/csv: + schema: + type: string + application/vnd.apache.parquet: + schema: + type: string + application/jsonl: + schema: + type: string + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "404": + description: Database not found. + "405": + description: Method not allowed. + "422": + description: Unprocessable entity. + summary: Execute InfluxQL query + description: Executes an InfluxQL query to retrieve data from the specified database. + parameters: + - $ref: "#/components/parameters/AcceptQueryHeader" + - $ref: "#/components/parameters/ContentType" + requestBody: + $ref: "#/components/requestBodies/queryRequestBody" + tags: + - Query data + /api/v3/query_sql: + get: + operationId: GetExecuteQuerySQL + responses: + "200": + description: Success. The response body contains query results. + content: + application/json: + schema: + $ref: "#/components/schemas/QueryResponse" + example: + results: + - series: + - name: mytable + columns: + - time + - value + values: + - - "2024-02-02T12:00:00Z" + - 42 + text/csv: + schema: + type: string + application/vnd.apache.parquet: + schema: + type: string + application/jsonl: + schema: + type: string + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "404": + description: Database not found. + "405": + description: Method not allowed. + "422": + description: Unprocessable entity. + summary: Execute SQL query + description: Executes an SQL query to retrieve data from the specified database. + parameters: + - $ref: "#/components/parameters/db" + - $ref: "#/components/parameters/querySqlParam" + - $ref: "#/components/parameters/format" + - $ref: "#/components/parameters/AcceptQueryHeader" + - $ref: "#/components/parameters/ContentType" + - name: params + in: query + required: false + schema: + type: string + description: JSON-encoded query parameters. Use this to pass bind parameters to parameterized queries. + description: JSON-encoded query parameters for parameterized queries. + tags: + - Query data + post: + operationId: PostExecuteQuerySQL + responses: + "200": + description: Success. The response body contains query results. + content: + application/json: + schema: + $ref: "#/components/schemas/QueryResponse" + text/csv: + schema: + type: string + application/vnd.apache.parquet: + schema: + type: string + application/jsonl: + schema: + type: string + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "404": + description: Database not found. + "405": + description: Method not allowed. + "422": + description: Unprocessable entity. + summary: Execute SQL query + description: Executes an SQL query to retrieve data from the specified database. + parameters: + - $ref: "#/components/parameters/AcceptQueryHeader" + - $ref: "#/components/parameters/ContentType" + requestBody: + $ref: "#/components/requestBodies/queryRequestBody" + tags: + - Query data + /api/v3/write_lp: + post: + operationId: PostWriteLP + parameters: + - $ref: "#/components/parameters/dbWriteParam" + - $ref: "#/components/parameters/accept_partial" + - $ref: "#/components/parameters/precisionParam" + - name: no_sync + in: query + schema: + $ref: "#/components/schemas/NoSync" + - name: Content-Type + in: header + description: | + The content type of the request payload. + schema: + $ref: "#/components/schemas/LineProtocol" + required: false + - name: Accept + in: header + description: | + The content type that the client can understand. + Writes only return a response body if they fail (partially or completely)--for example, + due to a syntax problem or type mismatch. + schema: + type: string + default: application/json + enum: + - application/json + required: false + - $ref: "#/components/parameters/ContentEncoding" + - $ref: "#/components/parameters/ContentLength" + responses: + "204": + description: Success ("No Content"). All data in the batch is written and queryable. + headers: + cluster-uuid: + $ref: "#/components/headers/ClusterUUID" + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "413": + description: Request entity too large. + "422": + description: Unprocessable entity. + summary: Write line protocol + description: > + Writes line protocol to the specified database. + + + This is the native InfluxDB 3 Core write endpoint that provides enhanced control + + over write behavior with advanced parameters for high-performance and fault-tolerant operations. + + + Use this endpoint to send data in [line protocol](/influxdb3/core/reference/syntax/line-protocol/) format to + InfluxDB. + + Use query parameters to specify options for writing data. + + + #### Features + + + - **Partial writes**: Use `accept_partial=true` to allow partial success when some lines in a batch fail + + - **Asynchronous writes**: Use `no_sync=true` to skip waiting for WAL synchronization, allowing faster response + times but sacrificing durability guarantees + + - **Flexible precision**: Automatic timestamp precision detection with `precision=auto` (default) + + + #### Auto precision detection + + + When you use `precision=auto` or omit the precision parameter, InfluxDB 3 automatically detects + + the timestamp precision based on the magnitude of the timestamp value: + + + - Timestamps < 5e9 → Second precision (multiplied by 1,000,000,000 to convert to nanoseconds) + + - Timestamps < 5e12 → Millisecond precision (multiplied by 1,000,000) + + - Timestamps < 5e15 → Microsecond precision (multiplied by 1,000) + + - Larger timestamps → Nanosecond precision (no conversion needed) + + + #### Related + + + - [Use the InfluxDB v3 write_lp API to write data](/influxdb3/core/write-data/http-api/v3-write-lp/) + requestBody: + $ref: "#/components/requestBodies/lineProtocolRequestBody" + tags: + - Write data + x-codeSamples: + - label: cURL - Basic write + lang: Shell + source: | + curl --request POST "http://localhost:8181/api/v3/write_lp?db=sensors" \ + --header "Authorization: Bearer DATABASE_TOKEN" \ + --header "Content-Type: text/plain" \ + --data-raw "cpu,host=server01 usage=85.2 1638360000000000000" + - label: cURL - Write with millisecond precision + lang: Shell + source: | + curl --request POST "http://localhost:8181/api/v3/write_lp?db=sensors&precision=ms" \ + --header "Authorization: Bearer DATABASE_TOKEN" \ + --header "Content-Type: text/plain" \ + --data-raw "cpu,host=server01 usage=85.2 1638360000000" + - label: cURL - Asynchronous write with partial acceptance + lang: Shell + source: > + curl --request POST + "http://localhost:8181/api/v3/write_lp?db=sensors&accept_partial=true&no_sync=true&precision=auto" \ + --header "Authorization: Bearer DATABASE_TOKEN" \ + --header "Content-Type: text/plain" \ + --data-raw "cpu,host=server01 usage=85.2 + memory,host=server01 used=4096" + - label: cURL - Multiple measurements with tags + lang: Shell + source: | + curl --request POST "http://localhost:8181/api/v3/write_lp?db=sensors&precision=ns" \ + --header "Authorization: Bearer DATABASE_TOKEN" \ + --header "Content-Type: text/plain" \ + --data-raw "cpu,host=server01,region=us-west usage=85.2,load=0.75 1638360000000000000 + memory,host=server01,region=us-west used=4096,free=12288 1638360000000000000 + disk,host=server01,region=us-west,device=/dev/sda1 used=50.5,free=49.5 1638360000000000000" + /health: + get: + operationId: GetHealth + responses: + "200": + description: Service is running. Returns `OK`. + content: + text/plain: + schema: + type: string + example: OK + "401": + description: Unauthorized. Authentication is required. + "500": + description: Service is unavailable. + summary: Health check + description: | + Checks the status of the service. + + Returns `OK` if the service is running. This endpoint does not return version information. + Use the [`/ping`](#operation/GetPing) endpoint to retrieve version details. + + > **Note**: This endpoint requires authentication by default in InfluxDB 3 Core. + tags: + - Server information + /metrics: + get: + operationId: GetMetrics + responses: + "200": + description: Success + summary: Metrics + description: Retrieves Prometheus-compatible server metrics. + tags: + - Server information + /ping: + get: + operationId: GetPing + responses: + "200": + description: Success. The response body contains server information. + headers: + x-influxdb-version: + description: The InfluxDB version number (for example, `3.8.0`). + schema: + type: string + example: 3.8.0 + x-influxdb-build: + description: The InfluxDB build type (`Core` or `Enterprise`). + schema: + type: string + example: Core + content: + application/json: + schema: + type: object + properties: + version: + type: string + description: The InfluxDB version number. + example: 3.8.0 + revision: + type: string + description: The git revision hash for the build. + example: 83b589b883 + process_id: + type: string + description: A unique identifier for the server process. + example: b756d9e0-cecd-4f72-b6d0-19e2d4f8cbb7 + "401": + description: Unauthorized. Authentication is required. + "404": + description: | + Not Found. Returned for HEAD requests. + Use a GET request to retrieve version information. + x-client-method: ping + summary: Ping the server + description: | + Returns version information for the server. + + **Important**: Use a GET request. HEAD requests return `404 Not Found`. + + The response includes version information in both headers and the JSON body: + + - **Headers**: `x-influxdb-version` and `x-influxdb-build` + - **Body**: JSON object with `version`, `revision`, and `process_id` + + > **Note**: This endpoint requires authentication by default in InfluxDB 3 Core. + tags: + - Server information + post: + operationId: ping + responses: + "200": + description: Success. The response body contains server information. + headers: + x-influxdb-version: + description: The InfluxDB version number (for example, `3.8.0`). + schema: + type: string + example: 3.8.0 + x-influxdb-build: + description: The InfluxDB build type (`Core` or `Enterprise`). + schema: + type: string + example: Core + content: + application/json: + schema: + type: object + properties: + version: + type: string + description: The InfluxDB version number. + example: 3.8.0 + revision: + type: string + description: The git revision hash for the build. + example: 83b589b883 + process_id: + type: string + description: A unique identifier for the server process. + example: b756d9e0-cecd-4f72-b6d0-19e2d4f8cbb7 + "401": + description: Unauthorized. Authentication is required. + "404": + description: | + Not Found. Returned for HEAD requests. + Use a GET request to retrieve version information. + summary: Ping the server + description: Returns version information for the server. Accepts POST in addition to GET. + tags: + - Server information + /query: + get: + operationId: GetV1ExecuteQuery + responses: + "200": + description: | + Success. The response body contains query results. + content: + application/json: + schema: + $ref: "#/components/schemas/QueryResponse" + application/csv: + schema: + type: string + headers: + Content-Type: + description: > + The content type of the response. + + Default is `application/json`. + + + If the `Accept` request header is `application/csv` or `text/csv`, the `Content-type` response header is + `application/csv` + + and the response is formatted as CSV. + schema: + type: string + default: application/json + enum: + - application/json + - application/csv + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "404": + description: Database not found. + "405": + description: Method not allowed. + "422": + description: Unprocessable entity. + summary: Execute InfluxQL query (v1-compatible) + description: > + Executes an InfluxQL query to retrieve data from the specified database. + + + This endpoint is compatible with InfluxDB 1.x client libraries and third-party integrations such as Grafana. + + Use query parameters to specify the database and the InfluxQL query. + + + #### Related + + + - [Use the InfluxDB v1 HTTP query API and InfluxQL to query + data](/influxdb3/core/query-data/execute-queries/influxdb-v1-api/) parameters: - - $ref: '#/components/parameters/db' - - name: trigger_name - in: query - required: true + - name: Accept + in: header schema: type: string - - name: force - in: query + default: application/json + enum: + - application/json + - application/csv + - text/csv required: false + description: > + The content type that the client can understand. + + + If `text/csv` is specified, the `Content-type` response header is `application/csv` and the response is + formatted as CSV. + + + Returns an error if the format is invalid or non-UTF8. + - in: query + name: chunked + description: | + If true, the response is divided into chunks of size `chunk_size`. schema: type: boolean default: false - responses: - '200': - description: Success. The processing engine trigger has been deleted. - '400': - description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - description: Trigger not found. - tags: - - Processing engine - /api/v3/configure/processing_engine_trigger/disable: - post: - operationId: PostDisableProcessingEngineTrigger - summary: Disable processing engine trigger - description: Disables a processing engine trigger. - parameters: - - $ref: '#/components/parameters/ContentType' - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/ProcessingEngineTriggerRequest' - responses: - '200': - description: Success. The processing engine trigger has been disabled. - '400': - description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - description: Trigger not found. + - in: query + name: chunk_size + description: | + The number of records that will go into a chunk. + This parameter is only used if `chunked=true`. + schema: + type: integer + default: 10000 + - in: query + name: db + description: The database to query. If not provided, the InfluxQL query string must specify the database. + schema: + type: string + format: InfluxQL + - in: query + name: pretty + description: | + If true, the JSON response is formatted in a human-readable format. + schema: + type: boolean + default: false + - in: query + name: q + description: The InfluxQL query string. + required: true + schema: + type: string + - name: epoch + description: > + Formats timestamps as [unix (epoch) timestamps](/influxdb3/core/reference/glossary/#unix-timestamp) with the + specified precision + + instead of [RFC3339 timestamps](/influxdb3/core/reference/glossary/#rfc3339-timestamp) with nanosecond + precision. + in: query + schema: + $ref: "#/components/schemas/EpochCompatibility" + - $ref: "#/components/parameters/v1UsernameParam" + - $ref: "#/components/parameters/v1PasswordParam" + - name: rp + in: query + required: false + schema: + type: string + description: | + Retention policy name. Honored but discouraged. InfluxDB 3 doesn't use retention policies. + - name: Authorization + in: header + required: false + schema: + type: string + description: | + Authorization header for token-based authentication. + Supported schemes: + - `Bearer AUTH_TOKEN` - OAuth bearer token scheme + - `Token AUTH_TOKEN` - InfluxDB v2 token scheme + - `Basic ` - Basic authentication (username is ignored) tags: - - Processing engine - /api/v3/configure/processing_engine_trigger/enable: + - Query data + - Compatibility endpoints post: - operationId: PostEnableProcessingEngineTrigger - summary: Enable processing engine trigger - description: Enables a processing engine trigger. - parameters: - - $ref: '#/components/parameters/ContentType' - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/ProcessingEngineTriggerRequest' + operationId: PostExecuteV1Query responses: - '200': - description: Success. The processing engine trigger is enabled. - '400': + "200": + description: | + Success. The response body contains query results. + content: + application/json: + schema: + $ref: "#/components/schemas/QueryResponse" + application/csv: + schema: + type: string + headers: + Content-Type: + description: > + The content type of the response. + + Default is `application/json`. + + + If the `Accept` request header is `application/csv` or `text/csv`, the `Content-type` response header is + `application/csv` + + and the response is formatted as CSV. + schema: + type: string + default: application/json + enum: + - application/json + - application/csv + "400": description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - description: Trigger not found. - tags: - - Processing engine - /api/v3/configure/plugin_environment/install_packages: - post: - operationId: PostInstallPluginPackages - summary: Install plugin packages - description: | - Installs the specified Python packages into the processing engine plugin environment. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "404": + description: Database not found. + "405": + description: Method not allowed. + "422": + description: Unprocessable entity. + summary: Execute InfluxQL query (v1-compatible) + description: > + Executes an InfluxQL query to retrieve data from the specified database. - This endpoint is synchronous and blocks until the packages are installed. - ### Related guides + #### Related + - - [Processing engine and Python plugins](/influxdb3/core/plugins/) + - [Use the InfluxDB v1 HTTP query API and InfluxQL to query + data](/influxdb3/core/query-data/execute-queries/influxdb-v1-api/) parameters: - - $ref: '#/components/parameters/ContentType' + - name: Accept + in: header + schema: + type: string + default: application/json + enum: + - application/json + - application/csv + - text/csv + required: false + description: > + The content type that the client can understand. + + + If `text/csv` is specified, the `Content-type` response header is `application/csv` and the response is + formatted as CSV. + + + Returns an error if the format is invalid or non-UTF8. requestBody: - required: true content: application/json: schema: type: object properties: - packages: - type: array - items: - type: string + db: + type: string + description: The database to query. If not provided, the InfluxQL query string must specify the database. + q: + description: The InfluxQL query string. + type: string + chunked: description: | - A list of Python package names to install. - Can include version specifiers (e.g., "scipy==1.9.0"). - example: - - influxdb3-python - - scipy - - pandas==1.5.0 - - requests - required: - - packages - example: - packages: - - influxdb3-python - - scipy - - pandas==1.5.0 - - requests - responses: - '200': - description: Success. The packages are installed. - '400': - description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - tags: - - Processing engine - /api/v3/configure/plugin_environment/install_requirements: - post: - operationId: PostInstallPluginRequirements - summary: Install plugin requirements - description: | - Installs requirements from a requirements file (also known as a "pip requirements file") into the processing engine plugin environment. + If true, the response is divided into chunks of size `chunk_size`. + type: boolean + chunk_size: + description: | + The number of records that will go into a chunk. + This parameter is only used if `chunked=true`. + type: integer + default: 10000 + epoch: + description: > + A unix timestamp precision. - This endpoint is synchronous and blocks until the requirements are installed. - ### Related + - `h` for hours - - [Processing engine and Python plugins](/influxdb3/core/plugins/) - - [Python requirements file format](https://pip.pypa.io/en/stable/reference/requirements-file-format/) - parameters: - - $ref: '#/components/parameters/ContentType' - requestBody: - required: true - content: - application/json: + - `m` for minutes + + - `s` for seconds + + - `ms` for milliseconds + + - `u` or `µ` for microseconds + + - `ns` for nanoseconds + + + Formats timestamps as [unix (epoch) timestamps](/influxdb3/core/reference/glossary/#unix-timestamp) + with the specified precision + + instead of [RFC3339 timestamps](/influxdb3/core/reference/glossary/#rfc3339-timestamp) with + nanosecond precision. + enum: + - ns + - u + - µ + - ms + - s + - m + - h + type: string + pretty: + description: | + If true, the JSON response is formatted in a human-readable format. + type: boolean + required: + - q + application/x-www-form-urlencoded: schema: type: object properties: - requirements_location: + db: + type: string + description: The database to query. If not provided, the InfluxQL query string must specify the database. + q: + description: The InfluxQL query string. type: string + chunked: description: | - The path to the requirements file containing Python packages to install. - Can be a relative path (relative to the plugin directory) or an absolute path. - example: requirements.txt - required: - - requirements_location - example: - requirements_location: requirements.txt - responses: - '200': - description: Success. The requirements have been installed. - '400': - description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - tags: - - Processing engine - /api/v3/plugin_test/wal: - post: - operationId: PostTestWALPlugin - summary: Test WAL plugin - description: Executes a test of a write-ahead logging (WAL) plugin. - responses: - '200': - description: Success. The plugin test has been executed. - '400': - description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - description: Plugin not enabled. - tags: - - Processing engine - /api/v3/plugin_test/schedule: - post: - operationId: PostTestSchedulingPlugin - summary: Test scheduling plugin - description: Executes a test of a scheduling plugin. - responses: - '200': - description: Success. The plugin test has been executed. - '400': - description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - description: Plugin not enabled. - tags: - - Processing engine - /api/v3/engine/{request_path}: - parameters: - - name: request_path - description: | - The path configured in the request trigger specification for the plugin. + If true, the response is divided into chunks of size `chunk_size`. + type: boolean + chunk_size: + description: | + The number of records that will go into a chunk. + This parameter is only used if `chunked=true`. + type: integer + default: 10000 + epoch: + description: > + A unix timestamp precision. - For example, if you define a trigger with the following: - ```json - trigger_specification: "request:hello-world" - ``` + - `h` for hours - then, the HTTP API exposes the following plugin endpoint: + - `m` for minutes - ``` - /api/v3/engine/hello-world - ``` - in: path - required: true - schema: - type: string - get: - operationId: GetProcessingEnginePluginRequest - summary: On Request processing engine plugin request - description: | - Executes the On Request processing engine plugin specified in the trigger's `plugin_filename`. - The request can include request headers, query string parameters, and a request body, which InfluxDB passes to the plugin. + - `s` for seconds - An On Request plugin implements the following signature: + - `ms` for milliseconds - ```python - def process_request(influxdb3_local, query_parameters, request_headers, request_body, args=None) - ``` + - `u` or `µ` for microseconds - The response depends on the plugin implementation. - responses: - '200': - description: Success. The plugin request has been executed. - '400': - description: Malformed request. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - description: Plugin not found. - '500': - description: Processing failure. - tags: - - Processing engine - post: - operationId: PostProcessingEnginePluginRequest - summary: On Request processing engine plugin request - description: | - Executes the On Request processing engine plugin specified in the trigger's `plugin_filename`. - The request can include request headers, query string parameters, and a request body, which InfluxDB passes to the plugin. + - `ns` for nanoseconds - An On Request plugin implements the following signature: - ```python - def process_request(influxdb3_local, query_parameters, request_headers, request_body, args=None) - ``` + Formats timestamps as [unix (epoch) timestamps](/influxdb3/core/reference/glossary/#unix-timestamp) + with the specified precision - The response depends on the plugin implementation. - parameters: - - $ref: '#/components/parameters/ContentType' - requestBody: - required: false - content: - application/json: + instead of [RFC3339 timestamps](/influxdb3/core/reference/glossary/#rfc3339-timestamp) with + nanosecond precision. + enum: + - ns + - u + - µ + - ms + - s + - m + - h + type: string + pretty: + description: | + If true, the JSON response is formatted in a human-readable format. + type: boolean + required: + - q + application/vnd.influxql: schema: - type: object - additionalProperties: true - responses: - '200': - description: Success. The plugin request has been executed. - '400': - description: Malformed request. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - description: Plugin not found. - '500': - description: Processing failure. + type: string + description: InfluxQL query string sent as the request body. tags: - - Processing engine - /api/v3/configure/token/admin: + - Query data + - Compatibility endpoints + /write: post: - operationId: PostCreateAdminToken - summary: Create admin token - description: | - Creates an admin token. - An admin token is a special type of token that has full access to all resources in the system. + operationId: PostV1Write responses: - '201': + "204": + description: Success ("No Content"). All data in the batch is written and queryable. + headers: + cluster-uuid: + $ref: "#/components/headers/ClusterUUID" + "400": description: | - Success. The admin token has been created. - The response body contains the token string and metadata. - content: - application/json: - schema: - $ref: '#/components/schemas/AdminTokenObject' - '401': - $ref: '#/components/responses/Unauthorized' - tags: - - Authentication - - Token - /api/v3/configure/token/admin/regenerate: - post: - operationId: PostRegenerateAdminToken - summary: Regenerate admin token - description: | - Regenerates an admin token and revokes the previous token with the same name. - parameters: [] - responses: - '201': - description: Success. The admin token has been regenerated. + Bad request. Some (a _partial write_) or all of the data from the batch was rejected and not written. + If a partial write occurred, then some points from the batch are written and queryable. + + The response body: + - indicates if a partial write occurred or all data was rejected. + - contains details about the [rejected points](/influxdb3/core/write-data/troubleshoot/#troubleshoot-rejected-points), up to 100 points. content: application/json: - schema: - $ref: '#/components/schemas/AdminTokenObject' - '401': - $ref: '#/components/responses/Unauthorized' - tags: - - Authentication - - Token - /api/v3/configure/token: - delete: - operationId: DeleteToken - summary: Delete token - description: | - Deletes a token. + examples: + rejectedAllPoints: + summary: Rejected all points in the batch + value: | + { + "error": "write of line protocol failed", + "data": [ + { + "original_line": "dquote> home,room=Kitchen temp=hi", + "line_number": 2, + "error_message": "No fields were provided" + } + ] + } + partialWriteErrorWithRejectedPoints: + summary: Partial write rejected some points in the batch + value: | + { + "error": "partial write of line protocol occurred", + "data": [ + { + "original_line": "dquote> home,room=Kitchen temp=hi", + "line_number": 2, + "error_message": "No fields were provided" + } + ] + } + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "413": + description: Request entity too large. + summary: Write line protocol (v1-compatible) + description: > + Writes line protocol to the specified database. + + + This endpoint provides backward compatibility for InfluxDB 1.x write workloads using tools such as InfluxDB 1.x + client libraries, the Telegraf `outputs.influxdb` output plugin, or third-party tools. + + + Use this endpoint to send data in [line + protocol](https://docs.influxdata.com/influxdb3/core/reference/syntax/line-protocol/) format to InfluxDB. + + Use query parameters to specify options for writing data. + + + #### Related + + + - [Use compatibility APIs to write data](/influxdb3/core/write-data/http-api/compatibility-apis/) parameters: - - name: id + - $ref: "#/components/parameters/dbWriteParam" + - $ref: "#/components/parameters/compatibilityPrecisionParam" + - $ref: "#/components/parameters/v1UsernameParam" + - $ref: "#/components/parameters/v1PasswordParam" + - name: rp in: query - required: true + required: false schema: type: string description: | - The ID of the token to delete. - responses: - '204': - description: Success. The token has been deleted. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - description: Token not found. - tags: - - Authentication - - Token - /api/v3/configure/token/named_admin: - post: - operationId: PostCreateNamedAdminToken - summary: Create named admin token - description: | - Creates a named admin token. - A named admin token is an admin token with a specific name identifier. - parameters: - - name: name + Retention policy name. Honored but discouraged. InfluxDB 3 doesn't use retention policies. + - name: consistency in: query - required: true + required: false + schema: + type: string + description: | + Write consistency level. Ignored by InfluxDB 3. Provided for compatibility with InfluxDB 1.x clients. + - name: Authorization + in: header + required: false schema: type: string description: | - The name for the admin token. - responses: - '201': + Authorization header for token-based authentication. + Supported schemes: + - `Bearer AUTH_TOKEN` - OAuth bearer token scheme + - `Token AUTH_TOKEN` - InfluxDB v2 token scheme + - `Basic ` - Basic authentication (username is ignored) + - name: Content-Type + in: header description: | - Success. The named admin token has been created. - The response body contains the token string and metadata. - content: - application/json: - schema: - $ref: '#/components/schemas/AdminTokenObject' - '401': - $ref: '#/components/responses/Unauthorized' - '409': - description: A token with this name already exists. - tags: - - Authentication - - Token - /api/v3/plugins/files: - put: - operationId: PutPluginFile - summary: Update plugin file - description: | - Updates a plugin file in the plugin directory. - x-security-note: Requires an admin token - responses: - '204': - description: Success. The plugin file has been updated. - '401': - $ref: '#/components/responses/Unauthorized' - '403': - description: Forbidden. Admin token required. - tags: - - Processing engine - /api/v3/plugins/directory: - put: - operationId: PutPluginDirectory - summary: Update plugin directory - description: | - Updates the plugin directory configuration. - x-security-note: Requires an admin token - responses: - '204': - description: Success. The plugin directory has been updated. - '401': - $ref: '#/components/responses/Unauthorized' - '403': - description: Forbidden. Admin token required. + The content type of the request payload. + schema: + $ref: "#/components/schemas/LineProtocol" + required: false + - name: Accept + in: header + description: | + The content type that the client can understand. + Writes only return a response body if they fail (partially or completely)--for example, + due to a syntax problem or type mismatch. + schema: + type: string + default: application/json + enum: + - application/json + required: false + - $ref: "#/components/parameters/ContentEncoding" + - $ref: "#/components/parameters/ContentLength" + requestBody: + $ref: "#/components/requestBodies/lineProtocolRequestBody" tags: - - Processing engine + - Compatibility endpoints + - Write data components: parameters: AcceptQueryHeader: @@ -1921,7 +2344,7 @@ components: The compression applied to the line protocol in the request payload. To send a gzip payload, pass `Content-Encoding: gzip` header. schema: - $ref: '#/components/schemas/ContentEncoding' + $ref: "#/components/schemas/ContentEncoding" required: false ContentLength: name: Content-Length @@ -1929,7 +2352,7 @@ components: description: | The size of the entity-body, in bytes, sent to InfluxDB. schema: - $ref: '#/components/schemas/ContentLength' + $ref: "#/components/schemas/ContentLength" ContentType: name: Content-Type description: | @@ -1973,20 +2396,20 @@ components: in: query required: false schema: - $ref: '#/components/schemas/AcceptPartial' + $ref: "#/components/schemas/AcceptPartial" compatibilityPrecisionParam: name: precision in: query - required: true + required: false schema: - $ref: '#/components/schemas/PrecisionWriteCompatibility' + $ref: "#/components/schemas/PrecisionWriteCompatibility" description: The precision for unix timestamps in the line protocol batch. precisionParam: name: precision in: query - required: true + required: false schema: - $ref: '#/components/schemas/PrecisionWrite' + $ref: "#/components/schemas/PrecisionWrite" description: The precision for unix timestamps in the line protocol batch. querySqlParam: name: q @@ -2002,22 +2425,24 @@ components: in: query required: false schema: - $ref: '#/components/schemas/Format' + $ref: "#/components/schemas/Format" formatRequired: name: format in: query required: true schema: - $ref: '#/components/schemas/Format' + $ref: "#/components/schemas/Format" v1UsernameParam: name: u in: query required: false schema: type: string - description: | + description: > Username for v1 compatibility authentication. - When using Basic authentication or query string authentication, InfluxDB 3 ignores this parameter but allows any arbitrary string for compatibility with InfluxDB 1.x clients. + + When using Basic authentication or query string authentication, InfluxDB 3 ignores this parameter but allows any + arbitrary string for compatibility with InfluxDB 1.x clients. v1PasswordParam: name: p in: query @@ -2026,7 +2451,7 @@ components: type: string description: | Password for v1 compatibility authentication. - For query string authentication, pass an admin token. + For query string authentication, pass a database token with write permissions as this parameter. InfluxDB 3 checks that the `p` value is an authorized token. requestBodies: lineProtocolRequestBody: @@ -2050,7 +2475,7 @@ components: content: application/json: schema: - $ref: '#/components/schemas/QueryRequestObject' + $ref: "#/components/schemas/QueryRequestObject" schemas: AdminTokenObject: type: object @@ -2073,23 +2498,31 @@ components: name: _admin token: apiv3_00xx0Xx0xx00XX0x0 hash: 00xx0Xx0xx00XX0x0 - created_at: '2025-04-18T14:02:45.331Z' + created_at: "2025-04-18T14:02:45.331Z" expiry: null ContentEncoding: type: string enum: - gzip - identity - description: | + description: > Content coding. + Use `gzip` for compressed data or `identity` for unmodified, uncompressed data. + #### Multi-member gzip support - InfluxDB 3 supports multi-member gzip payloads (concatenated gzip files per [RFC 1952](https://www.rfc-editor.org/rfc/rfc1952)). + + InfluxDB 3 supports multi-member gzip payloads (concatenated gzip files per [RFC + 1952](https://www.rfc-editor.org/rfc/rfc1952)). + This allows you to: + - Concatenate multiple gzip files and send them in a single request + - Maintain compatibility with InfluxDB v1 and v2 write endpoints + - Simplify batch operations using standard compression tools default: identity LineProtocol: @@ -2103,8 +2536,6 @@ components: ContentLength: type: integer description: The length in decimal number of octets. - Database: - type: string AcceptPartial: type: boolean default: true @@ -2115,9 +2546,12 @@ components: - json - csv - parquet + - json_lines - jsonl - description: | + - pretty + description: |- The format of data in the response body. + `json_lines` is the canonical name; `jsonl` is accepted as an alias. NoSync: type: boolean default: false @@ -2133,11 +2567,14 @@ components: - ms - s - us + - u - ns + - "n" type: string - description: | + description: |- The precision for unix timestamps in the line protocol batch. - Use `ms` for milliseconds, `s` for seconds, `us` for microseconds, or `ns` for nanoseconds. + Use `ms` for milliseconds, `s` for seconds, `us` or `u` for microseconds, or `ns` or `n` for nanoseconds. + Optional — defaults to nanosecond precision if omitted. PrecisionWrite: enum: - auto @@ -2173,6 +2610,7 @@ components: - json - csv - parquet + - json_lines - jsonl - pretty params: @@ -2194,9 +2632,13 @@ components: properties: db: type: string + pattern: ^[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]$|^[a-zA-Z0-9]$ + description: |- + The database name. Database names cannot contain underscores (_). + Names must start and end with alphanumeric characters and can contain hyphens (-) in the middle. retention_period: type: string - description: | + description: |- The retention period for the database. Specifies how long data should be retained. Use duration format (for example, "1d", "1h", "30m", "7d"). example: 7d @@ -2231,6 +2673,12 @@ components: required: - name - type + retention_period: + type: string + description: |- + The retention period for the table. Specifies how long data in this table should be retained. + Use duration format (for example, "1d", "1h", "30m", "7d"). + example: 30d required: - db - table @@ -2325,56 +2773,93 @@ components: description: | Configuration for trigger error handling and execution behavior. allOf: - - $ref: '#/components/schemas/TriggerSettings' + - $ref: "#/components/schemas/TriggerSettings" trigger_specification: - type: string - description: | + description: > Specifies when and how the processing engine trigger should be invoked. + ## Supported trigger specifications: + ### Cron-based scheduling + Format: `cron:CRON_EXPRESSION` + Uses extended (6-field) cron format (second minute hour day_of_month month day_of_week): + ``` + ┌───────────── second (0-59) + │ ┌───────────── minute (0-59) + │ │ ┌───────────── hour (0-23) + │ │ │ ┌───────────── day of month (1-31) + │ │ │ │ ┌───────────── month (1-12) + │ │ │ │ │ ┌───────────── day of week (0-6, Sunday=0) + │ │ │ │ │ │ + * * * * * * + ``` + Examples: + - `cron:0 0 6 * * 1-5` - Every weekday at 6:00 AM + - `cron:0 30 14 * * 5` - Every Friday at 2:30 PM + - `cron:0 0 0 1 * *` - First day of every month at midnight + ### Interval-based scheduling + Format: `every:DURATION` - Supported durations: `s` (seconds), `m` (minutes), `h` (hours), `d` (days), `w` (weeks), `M` (months), `y` (years): + + Supported durations: `s` (seconds), `m` (minutes), `h` (hours), `d` (days), `w` (weeks), `M` (months), `y` + (years): + - `every:30s` - Every 30 seconds + - `every:5m` - Every 5 minutes + - `every:1h` - Every hour + - `every:1d` - Every day + - `every:1w` - Every week + - `every:1M` - Every month + - `every:1y` - Every year + **Maximum interval**: 1 year + ### Table-based triggers + - `all_tables` - Triggers on write events to any table in the database + - `table:TABLE_NAME` - Triggers on write events to a specific table + ### On-demand triggers + Format: `request:REQUEST_PATH` + Creates an HTTP endpoint `/api/v3/engine/REQUEST_PATH` for manual invocation: + - `request:hello-world` - Creates endpoint `/api/v3/engine/hello-world` + - `request:data-export` - Creates endpoint `/api/v3/engine/data-export` pattern: ^(cron:[0-9 *,/-]+|every:[0-9]+[smhd]|all_tables|table:[a-zA-Z_][a-zA-Z0-9_]*|request:[a-zA-Z0-9_-]+)$ example: cron:0 0 6 * * 1-5 @@ -2420,6 +2905,116 @@ components: required: - run_async - error_behavior + WALPluginTestRequest: + type: object + description: | + Request body for testing a write-ahead logging (WAL) plugin. + properties: + filename: + type: string + description: | + The path and filename of the plugin to test. + database: + type: string + description: | + The database name to use for the test. + input_lp: + type: string + description: | + Line protocol data to use as input for the test. + cache_name: + type: string + description: | + Optional name of the cache to use in the test. + input_arguments: + type: object + additionalProperties: + type: string + description: | + Optional key-value pairs of arguments to pass to the plugin. + required: + - filename + - database + - input_lp + SchedulePluginTestRequest: + type: object + description: | + Request body for testing a scheduling plugin. + properties: + filename: + type: string + description: | + The path and filename of the plugin to test. + database: + type: string + description: | + The database name to use for the test. + schedule: + type: string + description: | + Optional schedule specification in cron or interval format. + cache_name: + type: string + description: | + Optional name of the cache to use in the test. + input_arguments: + type: object + additionalProperties: + type: string + description: | + Optional key-value pairs of arguments to pass to the plugin. + required: + - filename + - database + PluginFileRequest: + type: object + description: | + Request body for updating a plugin file. + properties: + plugin_name: + type: string + description: | + The name of the plugin file to update. + content: + type: string + description: | + The content of the plugin file. + required: + - plugin_name + - content + PluginDirectoryRequest: + type: object + description: | + Request body for updating plugin directory with multiple files. + properties: + plugin_name: + type: string + description: | + The name of the plugin directory to update. + files: + type: array + items: + $ref: "#/components/schemas/PluginFileEntry" + description: | + List of plugin files to include in the directory. + required: + - plugin_name + - files + PluginFileEntry: + type: object + description: | + Represents a single file in a plugin directory. + properties: + content: + type: string + description: | + The content of the file. + relative_path: + type: string + description: The relative path of the file within the plugin directory. + required: + - relative_path + - content ShowDatabasesResponse: type: object properties: @@ -2442,7 +3037,7 @@ components: - time - value values: - - - '2024-02-02T12:00:00Z' + - - "2024-02-02T12:00:00Z" - 42 ErrorMessage: type: object @@ -2452,38 +3047,6 @@ components: data: type: object nullable: true - LineProtocolError: - properties: - code: - description: Code is the machine-readable error code. - enum: - - internal error - - not found - - conflict - - invalid - - empty value - - unavailable - readOnly: true - type: string - err: - description: Stack of errors that occurred during processing of the request. Useful for debugging. - readOnly: true - type: string - line: - description: First line in the request body that contains malformed data. - format: int32 - readOnly: true - type: integer - message: - description: Human-readable message. - readOnly: true - type: string - op: - description: Describes the logical code operation when the error occurred. Useful for debugging. - readOnly: true - type: string - required: - - code EpochCompatibility: description: | A unix timestamp precision. @@ -2512,62 +3075,13 @@ components: Use duration format (for example, "1d", "1h", "30m", "7d"). example: 7d description: Request schema for updating database configuration. - UpdateTableRequest: - type: object - properties: - db: - type: string - description: The name of the database containing the table. - table: - type: string - description: The name of the table to update. - retention_period: - type: string - description: | - The retention period for the table. Specifies how long data in this table should be retained. - Use duration format (for example, "1d", "1h", "30m", "7d"). - example: 30d - required: - - db - - table - description: Request schema for updating table configuration. - LicenseResponse: - type: object - properties: - license_type: - type: string - description: The type of license (for example, "enterprise", "trial"). - example: enterprise - expires_at: - type: string - format: date-time - description: The expiration date of the license in ISO 8601 format. - example: '2025-12-31T23:59:59Z' - features: - type: array - items: - type: string - description: List of features enabled by the license. - example: - - clustering - - processing_engine - - advanced_auth - status: - type: string - enum: - - active - - expired - - invalid - description: The current status of the license. - example: active - description: Response schema for license information. responses: Unauthorized: description: Unauthorized access. content: application/json: schema: - $ref: '#/components/schemas/ErrorMessage' + $ref: "#/components/schemas/ErrorMessage" BadRequest: description: | Request failed. Possible reasons: @@ -2578,19 +3092,19 @@ components: content: application/json: schema: - $ref: '#/components/schemas/ErrorMessage' + $ref: "#/components/schemas/ErrorMessage" Forbidden: description: Access denied. content: application/json: schema: - $ref: '#/components/schemas/ErrorMessage' + $ref: "#/components/schemas/ErrorMessage" NotFound: description: Resource not found. content: application/json: schema: - $ref: '#/components/schemas/ErrorMessage' + $ref: "#/components/schemas/ErrorMessage" headers: ClusterUUID: description: | @@ -2607,88 +3121,126 @@ components: BasicAuthentication: type: http scheme: basic - description: | + description: >- Use the `Authorization` header with the `Basic` scheme to authenticate v1 API requests. - Works with v1 compatibility [`/write`](#operation/PostV1Write) and [`/query`](#operation/GetV1Query) endpoints in InfluxDB 3. - When authenticating requests, InfluxDB 3 checks that the `password` part of the decoded credential is an authorized token + Works with v1 compatibility [`/write`](#operation/PostV1Write) and [`/query`](#operation/GetV1Query) endpoints + in InfluxDB 3. + + + When authenticating requests, InfluxDB 3 checks that the `password` part of the decoded credential is an + authorized token + and ignores the `username` part of the decoded credential. + + ### Syntax + + + ```http + + Authorization: Basic + + ``` + + ### Example + ```bash + curl "http://localhost:8181/write?db=DATABASE_NAME&precision=s" \ --user "":"AUTH_TOKEN" \ --header "Content-type: text/plain; charset=utf-8" \ --data-binary 'home,room=kitchen temp=72 1641024000' ``` + Replace the following: - - **`DATABASE_NAME`**: your InfluxDB 3 Core database - - **`AUTH_TOKEN`**: an admin token - #### Related guides + - **`DATABASE_NAME`**: your InfluxDB 3 Core database - - [Authenticate v1 API requests](/influxdb3/core/guides/api-compatibility/v1/) - - [Manage tokens](/influxdb3/core/admin/tokens/) + - **`AUTH_TOKEN`**: an admin token or database token authorized for the database QuerystringAuthentication: type: apiKey in: query name: u=&p= - description: | + description: >- Use InfluxDB 1.x API parameters to provide credentials through the query string for v1 API requests. - Querystring authentication works with v1-compatible [`/write`](#operation/PostV1Write) and [`/query`](#operation/GetV1Query) endpoints. + + Querystring authentication works with v1-compatible [`/write`](#operation/PostV1Write) and + [`/query`](#operation/GetV1Query) endpoints. + When authenticating requests, InfluxDB 3 checks that the `p` (_password_) query parameter is an authorized token + and ignores the `u` (_username_) query parameter. + ### Syntax + ```http - http://localhost:8181/query/?[u=any]&p=DATABASE_TOKEN - http://localhost:8181/write/?[u=any]&p=DATABASE_TOKEN + + https://localhost:8181/query/?[u=any]&p=AUTH_TOKEN + + https://localhost:8181/write/?[u=any]&p=AUTH_TOKEN + ``` + ### Examples + ```bash + curl "http://localhost:8181/write?db=DATABASE_NAME&precision=s&p=AUTH_TOKEN" \ --header "Content-type: text/plain; charset=utf-8" \ --data-binary 'home,room=kitchen temp=72 1641024000' ``` + Replace the following: + - **`DATABASE_NAME`**: your InfluxDB 3 Core database - - **`AUTH_TOKEN`**: an admin token + + - **`AUTH_TOKEN`**: an admin token or database token authorized for the database + ```bash + ####################################### + # Use an InfluxDB 1.x compatible username and password + # to query the InfluxDB v1 HTTP API + ####################################### + # Use authentication query parameters: - # ?p=DATABASE_TOKEN + + # ?p=AUTH_TOKEN + ####################################### + curl --get "http://localhost:8181/query" \ --data-urlencode "p=AUTH_TOKEN" \ --data-urlencode "db=DATABASE_NAME" \ --data-urlencode "q=SELECT * FROM MEASUREMENT" ``` + Replace the following: - - **`DATABASE_NAME`**: the database to query - - **`AUTH_TOKEN`**: an [admin token](/influxdb3/core/admin/tokens/) - #### Related guides + - **`DATABASE_NAME`**: the database to query - - [Authenticate v1 API requests](/influxdb3/core/guides/api-compatibility/v1/) - - [Manage tokens](/influxdb3/core/admin/tokens/) + - **`AUTH_TOKEN`**: a database token with sufficient permissions to the database BearerAuthentication: type: http scheme: bearer @@ -2701,8 +3253,7 @@ components: Bearer authentication works with all endpoints. In your API requests, send an `Authorization` header. - For the header value, provide the word `Bearer` followed by a space and an admin token. - + For the header value, provide the word `Bearer` followed by a space and a database token. ### Syntax @@ -2717,7 +3268,7 @@ components: --header "Authorization: Bearer AUTH_TOKEN" ``` TokenAuthentication: - description: | + description: |- Use InfluxDB v2 Token authentication to provide an authorization token to InfluxDB 3. The v2 Token scheme works with v1 and v2 compatibility endpoints in InfluxDB 3. @@ -2744,10 +3295,6 @@ components: --header "Authorization: Token AUTH_TOKEN" \ --data-binary 'home,room=kitchen temp=72 1463683075' ``` - - ### Related guides - - - [Manage tokens](/influxdb3/core/admin/tokens/) in: header name: Authorization type: apiKey diff --git a/static/openapi/influxdb3-enterprise-openapi.yaml b/static/openapi/influxdb3-enterprise-openapi.yaml new file mode 100644 index 0000000000..3d6c70af5f --- /dev/null +++ b/static/openapi/influxdb3-enterprise-openapi.yaml @@ -0,0 +1,3799 @@ +openapi: 3.0.3 +info: + title: InfluxDB 3 Enterprise API Service + description: | + The InfluxDB HTTP API for InfluxDB 3 Enterprise provides a programmatic interface for + interacting with InfluxDB 3 Enterprise databases and resources. + Use this API to: + + - Write data to InfluxDB 3 Enterprise databases + - Query data using SQL or InfluxQL + - Process data using Processing engine plugins + - Manage databases, tables, and Processing engine triggers + - Perform administrative tasks and access system information + + The API includes endpoints under the following paths: + - `/api/v3`: InfluxDB 3 Enterprise native endpoints + - `/`: Compatibility endpoints for InfluxDB v1 workloads and clients + - `/api/v2/write`: Compatibility endpoint for InfluxDB v2 workloads and clients + + To download the OpenAPI specification for this API, use the **Download** button above. + version: v3.8.0 + license: + name: MIT + url: https://opensource.org/licenses/MIT + contact: + name: InfluxData + url: https://www.influxdata.com + email: support@influxdata.com + x-source-hash: sha256:1259b96096eab6c8dbf3f76c974924f124e9b3e08eedc6b0c9a66d3108857c52 +servers: + - url: https://{baseurl} + description: InfluxDB 3 Enterprise API URL + variables: + baseurl: + enum: + - localhost:8181 + default: localhost:8181 + description: InfluxDB 3 Enterprise URL +security: + - BearerAuthentication: [] + - TokenAuthentication: [] + - BasicAuthentication: [] + - QuerystringAuthentication: [] +tags: + - name: Authentication + description: | + Depending on your workflow, use one of the following schemes to authenticate to the InfluxDB 3 API: + + | Authentication scheme | Works with | + |:-------------------|:-----------| + | [Bearer authentication](#section/Authentication/BearerAuthentication) | All endpoints | + | [Token authentication](#section/Authentication/TokenAuthentication) | v1, v2 endpoints | + | [Basic authentication](#section/Authentication/BasicAuthentication) | v1 endpoints | + | [Querystring authentication](#section/Authentication/QuerystringAuthentication) | v1 endpoints | + + x-traitTag: true + x-related: + - title: Authenticate v1 API requests + href: /influxdb3/enterprise/guides/api-compatibility/v1/ + - title: Manage tokens + href: /influxdb3/enterprise/admin/tokens/ + - name: Cache data + description: |- + Manage the in-memory cache. + + #### Distinct Value Cache + + The Distinct Value Cache (DVC) lets you cache distinct + values of one or more columns in a table, improving the performance of + queries that return distinct tag and field values. + + The DVC is an in-memory cache that stores distinct values for specific columns + in a table. When you create an DVC, you can specify what columns' distinct + values to cache, the maximum number of distinct value combinations to cache, and + the maximum age of cached values. A DVC is associated with a table, which can + have multiple DVCs. + + #### Last value cache + + The Last Value Cache (LVC) lets you cache the most recent + values for specific fields in a table, improving the performance of queries that + return the most recent value of a field for specific series or the last N values + of a field. + + The LVC is an in-memory cache that stores the last N number of values for + specific fields of series in a table. When you create an LVC, you can specify + what fields to cache, what tags to use to identify each series, and the + number of values to cache for each unique series. + An LVC is associated with a table, which can have multiple LVCs. + x-related: + - title: Manage the Distinct Value Cache + href: /influxdb3/enterprise/admin/distinct-value-cache/ + - title: Manage the Last Value Cache + href: /influxdb3/enterprise/admin/last-value-cache/ + - name: Compatibility endpoints + description: > + InfluxDB 3 provides compatibility endpoints for InfluxDB 1.x and InfluxDB 2.x workloads and clients. + + + ### Write data using v1- or v2-compatible endpoints + + + - [`/api/v2/write` endpoint](#operation/PostV2Write) + for InfluxDB v2 clients and when you bring existing InfluxDB v2 write workloads to InfluxDB 3. + - [`/write` endpoint](#operation/PostV1Write) for InfluxDB v1 clients and when you bring existing InfluxDB v1 + write workloads to InfluxDB 3. + + + For new workloads, use the [`/api/v3/write_lp` endpoint](#operation/PostWriteLP). + + + All endpoints accept the same line protocol format. + + + ### Query data + + + Use the HTTP [`/query`](#operation/GetV1ExecuteQuery) endpoint for InfluxDB v1 clients and v1 query workloads + using InfluxQL. + + + For new workloads, use one of the following: + + + - HTTP [`/api/v3/query_sql` endpoint](#operation/GetExecuteQuerySQL) for new query workloads using SQL. + + - HTTP [`/api/v3/query_influxql` endpoint](#operation/GetExecuteInfluxQLQuery) for new query workloads using + InfluxQL. + + - Flight SQL and InfluxDB 3 _Flight+gRPC_ APIs for querying with SQL or InfluxQL. For more information about using + Flight APIs, see [InfluxDB 3 client + libraries](https://github.com/InfluxCommunity?q=influxdb3&type=public&language=&sort=). + + + ### Server information + + + Server information endpoints such as `/health` and `metrics` are compatible with InfluxDB 1.x and InfluxDB 2.x + clients. + x-related: + - title: Use compatibility APIs to write data + href: /influxdb3/enterprise/write-data/http-api/compatibility-apis/ + - name: Database + description: Manage databases + - description: > + Most InfluxDB API endpoints require parameters in the request--for example, specifying the database to use. + + + ### Common parameters + + + The following table shows common parameters used by many InfluxDB API endpoints. + + Many endpoints may require other parameters in the query string or in the + + request body that perform functions specific to those endpoints. + + + | Query parameter | Value type | Description | + + |:------------------------ |:--------------------- |:-------------------------------------------| + + | `db` | string | The database name | + + + InfluxDB HTTP API endpoints use standard HTTP request and response headers. + + The following table shows common headers used by many InfluxDB API endpoints. + + Some endpoints may use other headers that perform functions more specific to those endpoints--for example, + + the write endpoints accept the `Content-Encoding` header to indicate that line protocol is compressed in the + request body. + + + | Header | Value type | Description | + + |:------------------------ |:--------------------- |:-------------------------------------------| + + | `Accept` | string | The content type that the client can understand. | + + | `Authorization` | string | The authorization scheme and credential. | + + | `Content-Length` | integer | The size of the entity-body, in bytes. | + + | `Content-Type` | string | The format of the data in the request body. | + name: Headers and parameters + x-traitTag: true + - name: Processing engine + description: > + Manage Processing engine triggers, test plugins, and send requests to trigger On Request plugins. + + + InfluxDB 3 Enterprise provides the InfluxDB 3 processing engine, an embedded Python VM that can dynamically load + and trigger Python plugins in response to events in your database. + + Use Processing engine plugins and triggers to run code and perform tasks for different database events. + + + To get started with the processing engine, see the [Processing engine and Python + plugins](/influxdb3/enterprise/processing-engine/) guide. + x-related: + - title: Processing engine and Python plugins + href: /influxdb3/enterprise/plugins/ + - name: Query data + description: Query data using SQL or InfluxQL + x-related: + - title: Use the InfluxDB v1 HTTP query API and InfluxQL to query data + href: /influxdb3/enterprise/query-data/execute-queries/influxdb-v1-api/ + - name: Quick start + description: > + 1. [Create an admin token](#section/Authentication) to authorize API requests. + + ```bash + curl -X POST "http://localhost:8181/api/v3/configure/token/admin" + ``` + 2. [Check the status](#section/Server-information) of the InfluxDB server. + + ```bash + curl "http://localhost:8181/health" \ + --header "Authorization: Bearer ADMIN_TOKEN" + ``` + + 3. [Write data](#operation/PostWriteLP) to InfluxDB. + + ```bash + curl "http://localhost:8181/api/v3/write_lp?db=sensors&precision=auto" + --header "Authorization: Bearer ADMIN_TOKEN" \ + --data-raw "home,room=Kitchen temp=72.0 + home,room=Living\ room temp=71.5" + ``` + + If all data is written, the response is `204 No Content`. + + 4. [Query data](#operation/GetExecuteQuerySQL) from InfluxDB. + + ```bash + curl -G "http://localhost:8181/api/v3/query_sql" \ + --header "Authorization: Bearer ADMIN_TOKEN" \ + --data-urlencode "db=sensors" \ + --data-urlencode "q=SELECT * FROM home WHERE room='Living room'" \ + --data-urlencode "format=jsonl" + ``` + + Output: + + ```jsonl + {"room":"Living room","temp":71.5,"time":"2025-02-25T20:19:34.984098"} + ``` + + For more information about using InfluxDB 3 Enterprise, see the [Get started](/influxdb3/enterprise/get-started/) + guide. + x-traitTag: true + - name: Server information + description: Retrieve server metrics, status, and version information + - name: Table + description: Manage table schemas and data + - name: Token + description: Manage tokens for authentication and authorization + - name: Write data + description: | + Write data to InfluxDB 3 using line protocol format. + + #### Timestamp precision across write APIs + + InfluxDB 3 provides multiple write endpoints for compatibility with different InfluxDB versions. + The following table compares timestamp precision support across v1, v2, and v3 write APIs: + + | Precision | v1 (`/write`) | v2 (`/api/v2/write`) | v3 (`/api/v3/write_lp`) | + |-----------|---------------|----------------------|-------------------------| + | **Auto detection** | ❌ No | ❌ No | ✅ `auto` (default) | + | **Seconds** | ✅ `s` | ✅ `s` | ✅ `second` | + | **Milliseconds** | ✅ `ms` | ✅ `ms` | ✅ `millisecond` | + | **Microseconds** | ✅ `u` or `µ` | ✅ `us` | ✅ `microsecond` | + | **Nanoseconds** | ✅ `ns` | ✅ `ns` | ✅ `nanosecond` | + | **Default** | Nanosecond | Nanosecond | **Auto** (guessed) | + + All timestamps are stored internally as nanoseconds. +paths: + /api/v1/health: + get: + operationId: GetHealthV1 + summary: Health check (v1) + description: | + Checks the status of the service. + + Returns `OK` if the service is running. This endpoint does not return version information. + Use the [`/ping`](#operation/GetPing) endpoint to retrieve version details. + + > **Note**: This endpoint requires authentication by default in InfluxDB 3 Enterprise. + responses: + "200": + description: Service is running. Returns `OK`. + content: + text/plain: + schema: + type: string + example: OK + "401": + description: Unauthorized. Authentication is required. + "500": + description: Service is unavailable. + tags: + - Server information + - Compatibility endpoints + /api/v2/write: + post: + operationId: PostV2Write + responses: + "204": + description: Success ("No Content"). All data in the batch is written and queryable. + headers: + cluster-uuid: + $ref: "#/components/headers/ClusterUUID" + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "413": + description: Request entity too large. + summary: Write line protocol (v2-compatible) + description: > + Writes line protocol to the specified database. + + + This endpoint provides backward compatibility for InfluxDB 2.x write workloads using tools such as InfluxDB 2.x + client libraries, the Telegraf `outputs.influxdb_v2` output plugin, or third-party tools. + + + Use this endpoint to send data in [line protocol](/influxdb3/enterprise/reference/syntax/line-protocol/) format + to InfluxDB. + + Use query parameters to specify options for writing data. + + + #### Related + + + - [Use compatibility APIs to write data](/influxdb3/enterprise/write-data/http-api/compatibility-apis/) + parameters: + - name: Content-Type + in: header + description: | + The content type of the request payload. + schema: + $ref: "#/components/schemas/LineProtocol" + required: false + - description: | + The compression applied to the line protocol in the request payload. + To send a gzip payload, pass `Content-Encoding: gzip` header. + in: header + name: Content-Encoding + schema: + default: identity + description: | + Content coding. + Use `gzip` for compressed data or `identity` for unmodified, uncompressed data. + enum: + - gzip + - identity + type: string + - description: | + The size of the entity-body, in bytes, sent to InfluxDB. + in: header + name: Content-Length + schema: + description: The length in decimal number of octets. + type: integer + - description: | + The content type that the client can understand. + Writes only return a response body if they fail (partially or completely)--for example, + due to a syntax problem or type mismatch. + in: header + name: Accept + schema: + default: application/json + description: Error content type. + enum: + - application/json + type: string + - name: bucket + in: query + required: true + schema: + type: string + description: |- + A database name. + InfluxDB creates the database if it doesn't already exist, and then + writes all points in the batch to the database. + + This parameter is named `bucket` for compatibility with InfluxDB v2 client libraries. + - name: accept_partial + in: query + required: false + schema: + $ref: "#/components/schemas/AcceptPartial" + - $ref: "#/components/parameters/compatibilityPrecisionParam" + requestBody: + $ref: "#/components/requestBodies/lineProtocolRequestBody" + tags: + - Compatibility endpoints + - Write data + /api/v3/configure/database: + delete: + operationId: DeleteConfigureDatabase + parameters: + - $ref: "#/components/parameters/db" + - name: data_only + in: query + required: false + schema: + type: boolean + default: false + description: | + Delete only data while preserving the database schema and all associated resources + (tokens, triggers, last value caches, distinct value caches, processing engine configurations). + When `false` (default), the entire database is deleted. + - name: remove_tables + in: query + required: false + schema: + type: boolean + default: false + description: | + Used with `data_only=true` to remove table resources (caches) while preserving + database-level resources (tokens, triggers, processing engine configurations). + Has no effect when `data_only=false`. + - name: hard_delete_at + in: query + required: false + schema: + type: string + format: date-time + description: |- + Schedule the database for hard deletion at the specified time. + If not provided, the database will be soft deleted. + Use ISO 8601 date-time format (for example, "2025-12-31T23:59:59Z"). + + #### Deleting a database cannot be undone + + Deleting a database is a destructive action. + Once a database is deleted, data stored in that database cannot be recovered. + + + Also accepts special string values: + - `now` — hard delete immediately + - `never` — soft delete only (default behavior) + - `default` — use the system default hard deletion time + responses: + "200": + description: Success. Database deleted. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Database not found. + summary: Delete a database + description: | + Soft deletes a database. + The database is scheduled for deletion and unavailable for querying. + Use the `hard_delete_at` parameter to schedule a hard deletion. + Use the `data_only` parameter to delete data while preserving the database schema and resources. + tags: + - Database + get: + operationId: GetConfigureDatabase + responses: + "200": + description: Success. The response body contains the list of databases. + content: + application/json: + schema: + $ref: "#/components/schemas/ShowDatabasesResponse" + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Database not found. + summary: List databases + description: Retrieves a list of databases. + parameters: + - $ref: "#/components/parameters/formatRequired" + - name: show_deleted + in: query + required: false + schema: + type: boolean + default: false + description: | + Include soft-deleted databases in the response. + By default, only active databases are returned. + tags: + - Database + post: + operationId: PostConfigureDatabase + responses: + "200": + description: Success. Database created. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "409": + description: Database already exists. + summary: Create a database + description: Creates a new database in the system. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateDatabaseRequest" + tags: + - Database + put: + operationId: update_database + responses: + "200": + description: Success. The database has been updated. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Database not found. + summary: Update a database + description: | + Updates database configuration, such as retention period. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/UpdateDatabaseRequest" + tags: + - Database + /api/v3/configure/database/retention_period: + delete: + operationId: DeleteDatabaseRetentionPeriod + summary: Remove database retention period + description: | + Removes the retention period from a database, setting it to infinite retention. + parameters: + - $ref: "#/components/parameters/db" + responses: + "204": + description: Success. The database retention period has been removed. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Database not found. + tags: + - Database + /api/v3/configure/distinct_cache: + delete: + operationId: DeleteConfigureDistinctCache + responses: + "200": + description: Success. The distinct cache has been deleted. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Cache not found. + summary: Delete distinct cache + description: Deletes a distinct cache. + parameters: + - $ref: "#/components/parameters/db" + - name: table + in: query + required: true + schema: + type: string + description: The name of the table containing the distinct cache. + - name: name + in: query + required: true + schema: + type: string + description: The name of the distinct cache to delete. + tags: + - Cache data + - Table + post: + operationId: PostConfigureDistinctCache + responses: + "201": + description: Success. The distinct cache has been created. + "400": + description: > + Bad request. + + + The server responds with status `400` if the request would overwrite an existing cache with a different + configuration. + "409": + description: Conflict. A distinct cache with this configuration already exists. + summary: Create distinct cache + description: Creates a distinct cache for a table. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/DistinctCacheCreateRequest" + tags: + - Cache data + - Table + /api/v3/configure/last_cache: + delete: + operationId: DeleteConfigureLastCache + responses: + "200": + description: Success. The last cache has been deleted. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Cache not found. + summary: Delete last cache + description: Deletes a last cache. + parameters: + - $ref: "#/components/parameters/db" + - name: table + in: query + required: true + schema: + type: string + description: The name of the table containing the last cache. + - name: name + in: query + required: true + schema: + type: string + description: The name of the last cache to delete. + tags: + - Cache data + - Table + post: + operationId: PostConfigureLastCache + responses: + "201": + description: Success. Last cache created. + "400": + description: Bad request. A cache with this name already exists or the request is malformed. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Cache not found. + summary: Create last cache + description: Creates a last cache for a table. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/LastCacheCreateRequest" + tags: + - Cache data + - Table + /api/v3/configure/plugin_environment/install_packages: + post: + operationId: PostInstallPluginPackages + summary: Install plugin packages + description: |- + Installs the specified Python packages into the processing engine plugin environment. + + This endpoint is synchronous and blocks until the packages are installed. + parameters: + - $ref: "#/components/parameters/ContentType" + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + packages: + type: array + items: + type: string + description: | + A list of Python package names to install. + Can include version specifiers (e.g., "scipy==1.9.0"). + example: + - influxdb3-python + - scipy + - pandas==1.5.0 + - requests + required: + - packages + example: + packages: + - influxdb3-python + - scipy + - pandas==1.5.0 + - requests + responses: + "200": + description: Success. The packages are installed. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + tags: + - Processing engine + /api/v3/configure/plugin_environment/install_requirements: + post: + operationId: PostInstallPluginRequirements + summary: Install plugin requirements + description: > + Installs requirements from a requirements file (also known as a "pip requirements file") into the processing + engine plugin environment. + + + This endpoint is synchronous and blocks until the requirements are installed. + + + ### Related + + + - [Processing engine and Python plugins](/influxdb3/enterprise/plugins/) + + - [Python requirements file format](https://pip.pypa.io/en/stable/reference/requirements-file-format/) + parameters: + - $ref: "#/components/parameters/ContentType" + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + requirements_location: + type: string + description: | + The path to the requirements file containing Python packages to install. + Can be a relative path (relative to the plugin directory) or an absolute path. + example: requirements.txt + required: + - requirements_location + example: + requirements_location: requirements.txt + responses: + "200": + description: Success. The requirements have been installed. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + tags: + - Processing engine + /api/v3/configure/processing_engine_trigger: + post: + operationId: PostConfigureProcessingEngineTrigger + summary: Create processing engine trigger + description: Creates a processing engine trigger with the specified plugin file and trigger specification. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/ProcessingEngineTriggerRequest" + examples: + schedule_cron: + summary: Schedule trigger using cron + description: > + In `"cron:CRON_EXPRESSION"`, `CRON_EXPRESSION` uses extended 6-field cron format. + + The cron expression `0 0 6 * * 1-5` means the trigger will run at 6:00 AM every weekday (Monday to + Friday). + value: + db: DATABASE_NAME + plugin_filename: schedule.py + trigger_name: schedule_cron_trigger + trigger_specification: cron:0 0 6 * * 1-5 + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + schedule_every: + summary: Schedule trigger using interval + description: | + In `"every:DURATION"`, `DURATION` specifies the interval between trigger executions. + The duration `1h` means the trigger will run every hour. + value: + db: mydb + plugin_filename: schedule.py + trigger_name: schedule_every_trigger + trigger_specification: every:1h + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + schedule_every_seconds: + summary: Schedule trigger using seconds interval + description: | + Example of scheduling a trigger to run every 30 seconds. + value: + db: mydb + plugin_filename: schedule.py + trigger_name: schedule_every_30s_trigger + trigger_specification: every:30s + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + schedule_every_minutes: + summary: Schedule trigger using minutes interval + description: | + Example of scheduling a trigger to run every 5 minutes. + value: + db: mydb + plugin_filename: schedule.py + trigger_name: schedule_every_5m_trigger + trigger_specification: every:5m + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + all_tables: + summary: All tables trigger example + description: | + Trigger that fires on write events to any table in the database. + value: + db: mydb + plugin_filename: all_tables.py + trigger_name: all_tables_trigger + trigger_specification: all_tables + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + table_specific: + summary: Table-specific trigger example + description: | + Trigger that fires on write events to a specific table. + value: + db: mydb + plugin_filename: table.py + trigger_name: table_trigger + trigger_specification: table:sensors + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + api_request: + summary: On-demand request trigger example + description: | + Creates an HTTP endpoint `/api/v3/engine/hello-world` for manual invocation. + value: + db: mydb + plugin_filename: request.py + trigger_name: hello_world_trigger + trigger_specification: request:hello-world + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + cron_friday_afternoon: + summary: Cron trigger for Friday afternoons + description: | + Example of a cron trigger that runs every Friday at 2:30 PM. + value: + db: reports + plugin_filename: weekly_report.py + trigger_name: friday_report_trigger + trigger_specification: cron:0 30 14 * * 5 + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + cron_monthly: + summary: Cron trigger for monthly execution + description: | + Example of a cron trigger that runs on the first day of every month at midnight. + value: + db: monthly_data + plugin_filename: monthly_cleanup.py + trigger_name: monthly_cleanup_trigger + trigger_specification: cron:0 0 0 1 * * + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + responses: + "200": + description: Success. Processing engine trigger created. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Trigger not found. + tags: + - Processing engine + delete: + operationId: DeleteConfigureProcessingEngineTrigger + summary: Delete processing engine trigger + description: Deletes a processing engine trigger. + parameters: + - $ref: "#/components/parameters/db" + - name: trigger_name + in: query + required: true + schema: + type: string + - name: force + in: query + required: false + schema: + type: boolean + default: false + description: | + Force deletion of the trigger even if it has active executions. + By default, deletion fails if the trigger is currently executing. + responses: + "200": + description: Success. The processing engine trigger has been deleted. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Trigger not found. + tags: + - Processing engine + /api/v3/configure/processing_engine_trigger/disable: + post: + operationId: PostDisableProcessingEngineTrigger + summary: Disable processing engine trigger + description: Disables a processing engine trigger. + parameters: + - name: db + in: query + required: true + schema: + type: string + description: The database name. + - name: trigger_name + in: query + required: true + schema: + type: string + description: The name of the trigger. + responses: + "200": + description: Success. The processing engine trigger has been disabled. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Trigger not found. + tags: + - Processing engine + /api/v3/configure/processing_engine_trigger/enable: + post: + operationId: PostEnableProcessingEngineTrigger + summary: Enable processing engine trigger + description: Enables a processing engine trigger. + parameters: + - name: db + in: query + required: true + schema: + type: string + description: The database name. + - name: trigger_name + in: query + required: true + schema: + type: string + description: The name of the trigger. + responses: + "200": + description: Success. The processing engine trigger is enabled. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Trigger not found. + tags: + - Processing engine + /api/v3/configure/table: + delete: + operationId: DeleteConfigureTable + parameters: + - $ref: "#/components/parameters/db" + - name: table + in: query + required: true + schema: + type: string + - name: data_only + in: query + required: false + schema: + type: boolean + default: false + description: | + Delete only data while preserving the table schema and all associated resources + (last value caches, distinct value caches). + When `false` (default), the entire table is deleted. + - name: hard_delete_at + in: query + required: false + schema: + type: string + format: date-time + description: |- + Schedule the table for hard deletion at the specified time. + If not provided, the table will be soft deleted. + Use ISO 8601 format (for example, "2025-12-31T23:59:59Z"). + + + Also accepts special string values: + - `now` — hard delete immediately + - `never` — soft delete only (default behavior) + - `default` — use the system default hard deletion time + responses: + "200": + description: Success (no content). The table has been deleted. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Table not found. + summary: Delete a table + description: | + Soft deletes a table. + The table is scheduled for deletion and unavailable for querying. + Use the `hard_delete_at` parameter to schedule a hard deletion. + Use the `data_only` parameter to delete data while preserving the table schema and resources. + + #### Deleting a table cannot be undone + + Deleting a table is a destructive action. + Once a table is deleted, data stored in that table cannot be recovered. + tags: + - Table + post: + operationId: PostConfigureTable + responses: + "200": + description: Success. The table has been created. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Database not found. + summary: Create a table + description: Creates a new table within a database. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateTableRequest" + tags: + - Table + put: + operationId: PatchConfigureTable + responses: + "200": + description: Success. The table has been updated. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Table not found. + summary: Update a table + description: | + Updates table configuration, such as retention period. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/UpdateTableRequest" + tags: + - Table + x-enterprise-only: true + /api/v3/configure/token: + delete: + operationId: DeleteToken + parameters: + - name: token_name + in: query + required: true + schema: + type: string + description: The name of the token to delete. + responses: + "200": + description: Success. The token has been deleted. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Token not found. + summary: Delete token + description: | + Deletes a token. + tags: + - Authentication + - Token + /api/v3/configure/token/admin: + post: + operationId: PostCreateAdminToken + responses: + "201": + description: | + Success. The admin token has been created. + The response body contains the token string and metadata. + content: + application/json: + schema: + $ref: "#/components/schemas/AdminTokenObject" + "401": + $ref: "#/components/responses/Unauthorized" + summary: Create admin token + description: | + Creates an admin token. + An admin token is a special type of token that has full access to all resources in the system. + tags: + - Authentication + - Token + /api/v3/configure/token/admin/regenerate: + post: + operationId: PostRegenerateAdminToken + summary: Regenerate admin token + description: | + Regenerates an admin token and revokes the previous token with the same name. + parameters: [] + responses: + "201": + description: Success. The admin token has been regenerated. + content: + application/json: + schema: + $ref: "#/components/schemas/AdminTokenObject" + "401": + $ref: "#/components/responses/Unauthorized" + tags: + - Authentication + - Token + /api/v3/configure/token/named_admin: + post: + operationId: PostCreateNamedAdminToken + responses: + "201": + description: | + Success. The named admin token has been created. + The response body contains the token string and metadata. + content: + application/json: + schema: + $ref: "#/components/schemas/AdminTokenObject" + "401": + $ref: "#/components/responses/Unauthorized" + "409": + description: A token with this name already exists. + summary: Create named admin token + description: | + Creates a named admin token. + A named admin token is a special type of admin token with a custom name for identification and management. + tags: + - Authentication + - Token + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + token_name: + type: string + description: The name for the admin token. + expiry_secs: + type: integer + description: Optional expiration time in seconds. If not provided, the token does not expire. + nullable: true + required: + - token_name + /api/v3/engine/{request_path}: + get: + operationId: GetProcessingEnginePluginRequest + responses: + "200": + description: Success. The plugin request has been executed. + "400": + description: Malformed request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Plugin not found. + "500": + description: Processing failure. + summary: On Request processing engine plugin request + description: > + Executes the On Request processing engine plugin specified in the trigger's `plugin_filename`. + + The request can include request headers, query string parameters, and a request body, which InfluxDB passes to + the plugin. + + + An On Request plugin implements the following signature: + + + ```python + + def process_request(influxdb3_local, query_parameters, request_headers, request_body, args=None) + + ``` + + + The response depends on the plugin implementation. + tags: + - Processing engine + post: + operationId: PostProcessingEnginePluginRequest + responses: + "200": + description: Success. The plugin request has been executed. + "400": + description: Malformed request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Plugin not found. + "500": + description: Processing failure. + summary: On Request processing engine plugin request + description: > + Executes the On Request processing engine plugin specified in the trigger's `plugin_filename`. + + The request can include request headers, query string parameters, and a request body, which InfluxDB passes to + the plugin. + + + An On Request plugin implements the following signature: + + + ```python + + def process_request(influxdb3_local, query_parameters, request_headers, request_body, args=None) + + ``` + + + The response depends on the plugin implementation. + parameters: + - $ref: "#/components/parameters/ContentType" + requestBody: + required: false + content: + application/json: + schema: + type: object + additionalProperties: true + tags: + - Processing engine + parameters: + - name: request_path + description: | + The path configured in the request trigger specification for the plugin. + + For example, if you define a trigger with the following: + + ```json + trigger_specification: "request:hello-world" + ``` + + then, the HTTP API exposes the following plugin endpoint: + + ``` + /api/v3/engine/hello-world + ``` + in: path + required: true + schema: + type: string + /api/v3/enterprise/configure/file_index: + post: + operationId: configure_file_index_create + summary: Create a file index + description: >- + Creates a file index for a database or table. + + + A file index improves query performance by indexing data files based on specified columns, enabling the query + engine to skip irrelevant files during query execution. + + + This endpoint is only available in InfluxDB 3 Enterprise. + x-enterprise-only: true + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/FileIndexCreateRequest" + responses: + "200": + description: Success. The file index has been created. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Database or table not found. + tags: + - Database + - Table + delete: + operationId: configure_file_index_delete + summary: Delete a file index + description: |- + Deletes a file index from a database or table. + + This endpoint is only available in InfluxDB 3 Enterprise. + x-enterprise-only: true + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/FileIndexDeleteRequest" + responses: + "200": + description: Success. The file index has been deleted. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Database, table, or file index not found. + tags: + - Database + - Table + /api/v3/enterprise/configure/node/stop: + post: + operationId: stop_node + summary: Mark a node as stopped + description: >- + Marks a node as stopped in the catalog, freeing up the licensed cores it was using for other nodes. + + + Use this endpoint after you have already stopped the physical instance (for example, using `kill` or stopping + the container). This endpoint does not shut down the running process — you must stop the instance first. + + + When the node is marked as stopped: + + 1. Licensed cores from the stopped node are freed for reuse + + 2. Other nodes in the cluster see the update after their catalog sync interval + + + This endpoint is only available in InfluxDB 3 Enterprise. + + + #### Related + + + - [influxdb3 stop node](/influxdb3/enterprise/reference/cli/influxdb3/stop/node/) + x-enterprise-only: true + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/StopNodeRequest" + responses: + "200": + description: Success. The node has been marked as stopped. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Node not found. + tags: + - Server information + /api/v3/enterprise/configure/table/retention_period: + post: + operationId: create_or_update_retention_period_for_table + summary: Set table retention period + description: >- + Sets or updates the retention period for a specific table. + + + Use this endpoint to control how long data in a table is retained independently of the database-level retention + period. + + + This endpoint is only available in InfluxDB 3 Enterprise. + + + #### Related + + + - [influxdb3 update table](/influxdb3/enterprise/reference/cli/influxdb3/update/table/) + x-enterprise-only: true + parameters: + - name: db + in: query + required: true + schema: + type: string + description: The database name. + - name: table + in: query + required: true + schema: + type: string + description: The table name. + - name: duration + in: query + required: true + schema: + type: string + description: The retention period as a human-readable duration (for example, "30d", "24h", "1y"). + responses: + "204": + description: Success. The table retention period has been set. + "400": + description: Bad request. Invalid duration format. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Database or table not found. + tags: + - Table + delete: + operationId: delete_retention_period_for_table + summary: Clear table retention period + description: >- + Removes the retention period from a specific table, reverting to the database-level retention period (or + infinite retention if no database-level retention is set). + + + This endpoint is only available in InfluxDB 3 Enterprise. + + + #### Related + + + - [influxdb3 update table](/influxdb3/enterprise/reference/cli/influxdb3/update/table/) + x-enterprise-only: true + parameters: + - name: db + in: query + required: true + schema: + type: string + description: The database name. + - name: table + in: query + required: true + schema: + type: string + description: The table name. + responses: + "204": + description: Success. The table retention period has been cleared. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Database or table not found. + tags: + - Table + /api/v3/enterprise/configure/token: + post: + operationId: PostCreateResourceToken + summary: Create a resource token + description: | + Creates a resource (fine-grained permissions) token. + A resource token is a token that has access to specific resources in the system. + + This endpoint is only available in InfluxDB 3 Enterprise. + responses: + "201": + description: | + Success. The resource token has been created. + The response body contains the token string and metadata. + content: + application/json: + schema: + $ref: "#/components/schemas/ResourceTokenObject" + "401": + $ref: "#/components/responses/Unauthorized" + tags: + - Authentication + - Token + x-enterprise-only: true + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateTokenWithPermissionsRequest" + /api/v3/plugin_test/schedule: + post: + operationId: PostTestSchedulingPlugin + responses: + "200": + description: Success. The plugin test has been executed. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Plugin not enabled. + summary: Test scheduling plugin + description: Executes a test of a scheduling plugin. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/SchedulePluginTestRequest" + tags: + - Processing engine + /api/v3/plugin_test/wal: + post: + operationId: PostTestWALPlugin + responses: + "200": + description: Success. The plugin test has been executed. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Plugin not enabled. + summary: Test WAL plugin + description: Executes a test of a write-ahead logging (WAL) plugin. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/WALPluginTestRequest" + tags: + - Processing engine + /api/v3/plugins/directory: + put: + operationId: PutPluginDirectory + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/PluginDirectoryRequest" + responses: + "200": + description: Success. The plugin directory has been updated. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Forbidden. Admin token required. + "500": + description: Plugin not found. The `plugin_name` does not match any registered trigger. + summary: Update a multi-file plugin directory + description: | + Replaces all files in a multi-file plugin directory. The + `plugin_name` must match a registered trigger name. Each entry in + the `files` array specifies a `relative_path` and `content`—the + server writes them into the trigger's plugin directory. + + Use this endpoint to update multi-file plugins (directories with + `__init__.py` and supporting modules). For single-file plugins, + use `PUT /api/v3/plugins/files` instead. + tags: + - Processing engine + x-security-note: Requires an admin token + /api/v3/plugins/files: + post: + operationId: create_plugin_file + summary: Create a plugin file + description: | + Creates a single plugin file in the plugin directory. Writes the + `content` to a file named after `plugin_name`. Does not require an + existing trigger—use this to upload plugin files before creating + triggers that reference them. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/PluginFileRequest" + responses: + "200": + description: Success. The plugin file has been created. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Forbidden. Admin token required. + tags: + - Processing engine + x-security-note: Requires an admin token + put: + operationId: PutPluginFile + summary: Update a plugin file + description: | + Updates a single plugin file for an existing trigger. The + `plugin_name` must match a registered trigger name—the server + resolves the trigger's `plugin_filename` and overwrites that file + with the provided `content`. + + To upload a new plugin file before creating a trigger, use + `POST /api/v3/plugins/files` instead. To update a multi-file + plugin directory, use `PUT /api/v3/plugins/directory`. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/PluginFileRequest" + responses: + "200": + description: Success. The plugin file has been updated. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Forbidden. Admin token required. + "500": + description: Plugin not found. The `plugin_name` does not match any registered trigger. + tags: + - Processing engine + x-security-note: Requires an admin token + /api/v3/query_influxql: + get: + operationId: GetExecuteInfluxQLQuery + responses: + "200": + description: Success. The response body contains query results. + content: + application/json: + schema: + $ref: "#/components/schemas/QueryResponse" + text/csv: + schema: + type: string + application/vnd.apache.parquet: + schema: + type: string + application/jsonl: + schema: + type: string + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "404": + description: Database not found. + "405": + description: Method not allowed. + "422": + description: Unprocessable entity. + summary: Execute InfluxQL query + description: Executes an InfluxQL query to retrieve data from the specified database. + parameters: + - $ref: "#/components/parameters/dbQueryParam" + - name: q + in: query + required: true + schema: + type: string + - name: format + in: query + required: false + schema: + type: string + - $ref: "#/components/parameters/AcceptQueryHeader" + - name: params + in: query + required: false + schema: + type: string + description: JSON-encoded query parameters. Use this to pass bind parameters to parameterized queries. + description: JSON-encoded query parameters for parameterized queries. + tags: + - Query data + post: + operationId: PostExecuteQueryInfluxQL + responses: + "200": + description: Success. The response body contains query results. + content: + application/json: + schema: + $ref: "#/components/schemas/QueryResponse" + text/csv: + schema: + type: string + application/vnd.apache.parquet: + schema: + type: string + application/jsonl: + schema: + type: string + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "404": + description: Database not found. + "405": + description: Method not allowed. + "422": + description: Unprocessable entity. + summary: Execute InfluxQL query + description: Executes an InfluxQL query to retrieve data from the specified database. + parameters: + - $ref: "#/components/parameters/AcceptQueryHeader" + - $ref: "#/components/parameters/ContentType" + requestBody: + $ref: "#/components/requestBodies/queryRequestBody" + tags: + - Query data + /api/v3/query_sql: + get: + operationId: GetExecuteQuerySQL + responses: + "200": + description: Success. The response body contains query results. + content: + application/json: + schema: + $ref: "#/components/schemas/QueryResponse" + example: + results: + - series: + - name: mytable + columns: + - time + - value + values: + - - "2024-02-02T12:00:00Z" + - 42 + text/csv: + schema: + type: string + application/vnd.apache.parquet: + schema: + type: string + application/jsonl: + schema: + type: string + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "404": + description: Database not found. + "405": + description: Method not allowed. + "422": + description: Unprocessable entity. + summary: Execute SQL query + description: Executes an SQL query to retrieve data from the specified database. + parameters: + - $ref: "#/components/parameters/db" + - $ref: "#/components/parameters/querySqlParam" + - $ref: "#/components/parameters/format" + - $ref: "#/components/parameters/AcceptQueryHeader" + - $ref: "#/components/parameters/ContentType" + - name: params + in: query + required: false + schema: + type: string + description: JSON-encoded query parameters. Use this to pass bind parameters to parameterized queries. + description: JSON-encoded query parameters for parameterized queries. + tags: + - Query data + post: + operationId: PostExecuteQuerySQL + responses: + "200": + description: Success. The response body contains query results. + content: + application/json: + schema: + $ref: "#/components/schemas/QueryResponse" + text/csv: + schema: + type: string + application/vnd.apache.parquet: + schema: + type: string + application/jsonl: + schema: + type: string + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "404": + description: Database not found. + "405": + description: Method not allowed. + "422": + description: Unprocessable entity. + summary: Execute SQL query + description: Executes an SQL query to retrieve data from the specified database. + parameters: + - $ref: "#/components/parameters/AcceptQueryHeader" + - $ref: "#/components/parameters/ContentType" + requestBody: + $ref: "#/components/requestBodies/queryRequestBody" + tags: + - Query data + /api/v3/write_lp: + post: + operationId: PostWriteLP + parameters: + - $ref: "#/components/parameters/dbWriteParam" + - $ref: "#/components/parameters/accept_partial" + - $ref: "#/components/parameters/precisionParam" + - name: no_sync + in: query + schema: + $ref: "#/components/schemas/NoSync" + - name: Content-Type + in: header + description: | + The content type of the request payload. + schema: + $ref: "#/components/schemas/LineProtocol" + required: false + - name: Accept + in: header + description: | + The content type that the client can understand. + Writes only return a response body if they fail (partially or completely)--for example, + due to a syntax problem or type mismatch. + schema: + type: string + default: application/json + enum: + - application/json + required: false + - $ref: "#/components/parameters/ContentEncoding" + - $ref: "#/components/parameters/ContentLength" + responses: + "204": + description: Success ("No Content"). All data in the batch is written and queryable. + headers: + cluster-uuid: + $ref: "#/components/headers/ClusterUUID" + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "413": + description: Request entity too large. + "422": + description: Unprocessable entity. + summary: Write line protocol + description: > + Writes line protocol to the specified database. + + + This is the native InfluxDB 3 Enterprise write endpoint that provides enhanced control + + over write behavior with advanced parameters for high-performance and fault-tolerant operations. + + + Use this endpoint to send data in [line protocol](/influxdb3/enterprise/reference/syntax/line-protocol/) format + to InfluxDB. + + Use query parameters to specify options for writing data. + + + #### Features + + + - **Partial writes**: Use `accept_partial=true` to allow partial success when some lines in a batch fail + + - **Asynchronous writes**: Use `no_sync=true` to skip waiting for WAL synchronization, allowing faster response + times but sacrificing durability guarantees + + - **Flexible precision**: Automatic timestamp precision detection with `precision=auto` (default) + + + #### Auto precision detection + + + When you use `precision=auto` or omit the precision parameter, InfluxDB 3 automatically detects + + the timestamp precision based on the magnitude of the timestamp value: + + + - Timestamps < 5e9 → Second precision (multiplied by 1,000,000,000 to convert to nanoseconds) + + - Timestamps < 5e12 → Millisecond precision (multiplied by 1,000,000) + + - Timestamps < 5e15 → Microsecond precision (multiplied by 1,000) + + - Larger timestamps → Nanosecond precision (no conversion needed) + + + #### Related + + + - [Use the InfluxDB v3 write_lp API to write data](/influxdb3/enterprise/write-data/http-api/v3-write-lp/) + requestBody: + $ref: "#/components/requestBodies/lineProtocolRequestBody" + tags: + - Write data + x-codeSamples: + - label: cURL - Basic write + lang: Shell + source: | + curl --request POST "http://localhost:8181/api/v3/write_lp?db=sensors" \ + --header "Authorization: Bearer DATABASE_TOKEN" \ + --header "Content-Type: text/plain" \ + --data-raw "cpu,host=server01 usage=85.2 1638360000000000000" + - label: cURL - Write with millisecond precision + lang: Shell + source: | + curl --request POST "http://localhost:8181/api/v3/write_lp?db=sensors&precision=ms" \ + --header "Authorization: Bearer DATABASE_TOKEN" \ + --header "Content-Type: text/plain" \ + --data-raw "cpu,host=server01 usage=85.2 1638360000000" + - label: cURL - Asynchronous write with partial acceptance + lang: Shell + source: > + curl --request POST + "http://localhost:8181/api/v3/write_lp?db=sensors&accept_partial=true&no_sync=true&precision=auto" \ + --header "Authorization: Bearer DATABASE_TOKEN" \ + --header "Content-Type: text/plain" \ + --data-raw "cpu,host=server01 usage=85.2 + memory,host=server01 used=4096" + - label: cURL - Multiple measurements with tags + lang: Shell + source: | + curl --request POST "http://localhost:8181/api/v3/write_lp?db=sensors&precision=ns" \ + --header "Authorization: Bearer DATABASE_TOKEN" \ + --header "Content-Type: text/plain" \ + --data-raw "cpu,host=server01,region=us-west usage=85.2,load=0.75 1638360000000000000 + memory,host=server01,region=us-west used=4096,free=12288 1638360000000000000 + disk,host=server01,region=us-west,device=/dev/sda1 used=50.5,free=49.5 1638360000000000000" + /health: + get: + operationId: GetHealth + responses: + "200": + description: Service is running. Returns `OK`. + content: + text/plain: + schema: + type: string + example: OK + "401": + description: Unauthorized. Authentication is required. + "500": + description: Service is unavailable. + summary: Health check + description: | + Checks the status of the service. + + Returns `OK` if the service is running. This endpoint does not return version information. + Use the [`/ping`](#operation/GetPing) endpoint to retrieve version details. + + > **Note**: This endpoint requires authentication by default in InfluxDB 3 Enterprise. + tags: + - Server information + /metrics: + get: + operationId: GetMetrics + responses: + "200": + description: Success + summary: Metrics + description: Retrieves Prometheus-compatible server metrics. + tags: + - Server information + /ping: + get: + operationId: GetPing + responses: + "200": + description: Success. The response body contains server information. + headers: + x-influxdb-version: + description: The InfluxDB version number (for example, `3.8.0`). + schema: + type: string + example: 3.8.0 + x-influxdb-build: + description: The InfluxDB build type (`Core` or `Enterprise`). + schema: + type: string + example: Enterprise + content: + application/json: + schema: + type: object + properties: + version: + type: string + description: The InfluxDB version number. + example: 3.8.0 + revision: + type: string + description: The git revision hash for the build. + example: 83b589b883 + process_id: + type: string + description: A unique identifier for the server process. + example: b756d9e0-cecd-4f72-b6d0-19e2d4f8cbb7 + "401": + description: Unauthorized. Authentication is required. + "404": + description: | + Not Found. Returned for HEAD requests. + Use a GET request to retrieve version information. + x-client-method: ping + summary: Ping the server + description: | + Returns version information for the server. + + **Important**: Use a GET request. HEAD requests return `404 Not Found`. + + The response includes version information in both headers and the JSON body: + + - **Headers**: `x-influxdb-version` and `x-influxdb-build` + - **Body**: JSON object with `version`, `revision`, and `process_id` + + > **Note**: This endpoint requires authentication by default in InfluxDB 3 Enterprise. + tags: + - Server information + post: + operationId: ping + responses: + "200": + description: Success. The response body contains server information. + headers: + x-influxdb-version: + description: The InfluxDB version number (for example, `3.8.0`). + schema: + type: string + example: 3.8.0 + x-influxdb-build: + description: The InfluxDB build type (`Core` or `Enterprise`). + schema: + type: string + example: Enterprise + content: + application/json: + schema: + type: object + properties: + version: + type: string + description: The InfluxDB version number. + example: 3.8.0 + revision: + type: string + description: The git revision hash for the build. + example: 83b589b883 + process_id: + type: string + description: A unique identifier for the server process. + example: b756d9e0-cecd-4f72-b6d0-19e2d4f8cbb7 + "401": + description: Unauthorized. Authentication is required. + "404": + description: | + Not Found. Returned for HEAD requests. + Use a GET request to retrieve version information. + summary: Ping the server + description: Returns version information for the server. Accepts POST in addition to GET. + tags: + - Server information + /query: + get: + operationId: GetV1ExecuteQuery + responses: + "200": + description: | + Success. The response body contains query results. + content: + application/json: + schema: + $ref: "#/components/schemas/QueryResponse" + application/csv: + schema: + type: string + headers: + Content-Type: + description: > + The content type of the response. + + Default is `application/json`. + + + If the `Accept` request header is `application/csv` or `text/csv`, the `Content-type` response header is + `application/csv` + + and the response is formatted as CSV. + schema: + type: string + default: application/json + enum: + - application/json + - application/csv + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "404": + description: Database not found. + "405": + description: Method not allowed. + "422": + description: Unprocessable entity. + summary: Execute InfluxQL query (v1-compatible) + description: > + Executes an InfluxQL query to retrieve data from the specified database. + + + This endpoint is compatible with InfluxDB 1.x client libraries and third-party integrations such as Grafana. + + Use query parameters to specify the database and the InfluxQL query. + + + #### Related + + + - [Use the InfluxDB v1 HTTP query API and InfluxQL to query + data](/influxdb3/enterprise/query-data/execute-queries/influxdb-v1-api/) + parameters: + - name: Accept + in: header + schema: + type: string + default: application/json + enum: + - application/json + - application/csv + - text/csv + required: false + description: > + The content type that the client can understand. + + + If `text/csv` is specified, the `Content-type` response header is `application/csv` and the response is + formatted as CSV. + + + Returns an error if the format is invalid or non-UTF8. + - in: query + name: chunked + description: | + If true, the response is divided into chunks of size `chunk_size`. + schema: + type: boolean + default: false + - in: query + name: chunk_size + description: | + The number of records that will go into a chunk. + This parameter is only used if `chunked=true`. + schema: + type: integer + default: 10000 + - in: query + name: db + description: The database to query. If not provided, the InfluxQL query string must specify the database. + schema: + type: string + format: InfluxQL + - in: query + name: pretty + description: | + If true, the JSON response is formatted in a human-readable format. + schema: + type: boolean + default: false + - in: query + name: q + description: The InfluxQL query string. + required: true + schema: + type: string + - name: epoch + description: > + Formats timestamps as [unix (epoch) timestamps](/influxdb3/enterprise/reference/glossary/#unix-timestamp) + with the specified precision + + instead of [RFC3339 timestamps](/influxdb3/enterprise/reference/glossary/#rfc3339-timestamp) with nanosecond + precision. + in: query + schema: + $ref: "#/components/schemas/EpochCompatibility" + - $ref: "#/components/parameters/v1UsernameParam" + - $ref: "#/components/parameters/v1PasswordParam" + - name: rp + in: query + required: false + schema: + type: string + description: | + Retention policy name. Honored but discouraged. InfluxDB 3 doesn't use retention policies. + - name: Authorization + in: header + required: false + schema: + type: string + description: | + Authorization header for token-based authentication. + Supported schemes: + - `Bearer AUTH_TOKEN` - OAuth bearer token scheme + - `Token AUTH_TOKEN` - InfluxDB v2 token scheme + - `Basic ` - Basic authentication (username is ignored) + tags: + - Query data + - Compatibility endpoints + post: + operationId: PostExecuteV1Query + responses: + "200": + description: | + Success. The response body contains query results. + content: + application/json: + schema: + $ref: "#/components/schemas/QueryResponse" + application/csv: + schema: + type: string + headers: + Content-Type: + description: > + The content type of the response. + + Default is `application/json`. + + + If the `Accept` request header is `application/csv` or `text/csv`, the `Content-type` response header is + `application/csv` + + and the response is formatted as CSV. + schema: + type: string + default: application/json + enum: + - application/json + - application/csv + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "404": + description: Database not found. + "405": + description: Method not allowed. + "422": + description: Unprocessable entity. + summary: Execute InfluxQL query (v1-compatible) + description: > + Executes an InfluxQL query to retrieve data from the specified database. + + + #### Related + + + - [Use the InfluxDB v1 HTTP query API and InfluxQL to query + data](/influxdb3/enterprise/query-data/execute-queries/influxdb-v1-api/) + parameters: + - name: Accept + in: header + schema: + type: string + default: application/json + enum: + - application/json + - application/csv + - text/csv + required: false + description: > + The content type that the client can understand. + + + If `text/csv` is specified, the `Content-type` response header is `application/csv` and the response is + formatted as CSV. + + + Returns an error if the format is invalid or non-UTF8. + requestBody: + content: + application/json: + schema: + type: object + properties: + db: + type: string + description: The database to query. If not provided, the InfluxQL query string must specify the database. + q: + description: The InfluxQL query string. + type: string + chunked: + description: | + If true, the response is divided into chunks of size `chunk_size`. + type: boolean + chunk_size: + description: | + The number of records that will go into a chunk. + This parameter is only used if `chunked=true`. + type: integer + default: 10000 + epoch: + description: > + A unix timestamp precision. + + + - `h` for hours + + - `m` for minutes + + - `s` for seconds + + - `ms` for milliseconds + + - `u` or `µ` for microseconds + + - `ns` for nanoseconds + + + Formats timestamps as [unix (epoch) + timestamps](/influxdb3/enterprise/reference/glossary/#unix-timestamp) with the specified precision + + instead of [RFC3339 timestamps](/influxdb3/enterprise/reference/glossary/#rfc3339-timestamp) with + nanosecond precision. + enum: + - ns + - u + - µ + - ms + - s + - m + - h + type: string + pretty: + description: | + If true, the JSON response is formatted in a human-readable format. + type: boolean + required: + - q + application/x-www-form-urlencoded: + schema: + type: object + properties: + db: + type: string + description: The database to query. If not provided, the InfluxQL query string must specify the database. + q: + description: The InfluxQL query string. + type: string + chunked: + description: | + If true, the response is divided into chunks of size `chunk_size`. + type: boolean + chunk_size: + description: | + The number of records that will go into a chunk. + This parameter is only used if `chunked=true`. + type: integer + default: 10000 + epoch: + description: > + A unix timestamp precision. + + + - `h` for hours + + - `m` for minutes + + - `s` for seconds + + - `ms` for milliseconds + + - `u` or `µ` for microseconds + + - `ns` for nanoseconds + + + Formats timestamps as [unix (epoch) + timestamps](/influxdb3/enterprise/reference/glossary/#unix-timestamp) with the specified precision + + instead of [RFC3339 timestamps](/influxdb3/enterprise/reference/glossary/#rfc3339-timestamp) with + nanosecond precision. + enum: + - ns + - u + - µ + - ms + - s + - m + - h + type: string + pretty: + description: | + If true, the JSON response is formatted in a human-readable format. + type: boolean + required: + - q + application/vnd.influxql: + schema: + type: string + description: InfluxQL query string sent as the request body. + tags: + - Query data + - Compatibility endpoints + /write: + post: + operationId: PostV1Write + responses: + "204": + description: Success ("No Content"). All data in the batch is written and queryable. + headers: + cluster-uuid: + $ref: "#/components/headers/ClusterUUID" + "400": + description: | + Bad request. Some (a _partial write_) or all of the data from the batch was rejected and not written. + If a partial write occurred, then some points from the batch are written and queryable. + + The response body: + - indicates if a partial write occurred or all data was rejected. + - contains details about the [rejected points](/influxdb3/enterprise/write-data/troubleshoot/#troubleshoot-rejected-points), up to 100 points. + content: + application/json: + examples: + rejectedAllPoints: + summary: Rejected all points in the batch + value: | + { + "error": "write of line protocol failed", + "data": [ + { + "original_line": "dquote> home,room=Kitchen temp=hi", + "line_number": 2, + "error_message": "No fields were provided" + } + ] + } + partialWriteErrorWithRejectedPoints: + summary: Partial write rejected some points in the batch + value: | + { + "error": "partial write of line protocol occurred", + "data": [ + { + "original_line": "dquote> home,room=Kitchen temp=hi", + "line_number": 2, + "error_message": "No fields were provided" + } + ] + } + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "413": + description: Request entity too large. + summary: Write line protocol (v1-compatible) + description: > + Writes line protocol to the specified database. + + + This endpoint provides backward compatibility for InfluxDB 1.x write workloads using tools such as InfluxDB 1.x + client libraries, the Telegraf `outputs.influxdb` output plugin, or third-party tools. + + + Use this endpoint to send data in [line + protocol](https://docs.influxdata.com/influxdb3/enterprise/reference/syntax/line-protocol/) format to InfluxDB. + + Use query parameters to specify options for writing data. + + + #### Related + + + - [Use compatibility APIs to write data](/influxdb3/enterprise/write-data/http-api/compatibility-apis/) + parameters: + - $ref: "#/components/parameters/dbWriteParam" + - $ref: "#/components/parameters/compatibilityPrecisionParam" + - $ref: "#/components/parameters/v1UsernameParam" + - $ref: "#/components/parameters/v1PasswordParam" + - name: rp + in: query + required: false + schema: + type: string + description: | + Retention policy name. Honored but discouraged. InfluxDB 3 doesn't use retention policies. + - name: consistency + in: query + required: false + schema: + type: string + description: | + Write consistency level. Ignored by InfluxDB 3. Provided for compatibility with InfluxDB 1.x clients. + - name: Authorization + in: header + required: false + schema: + type: string + description: | + Authorization header for token-based authentication. + Supported schemes: + - `Bearer AUTH_TOKEN` - OAuth bearer token scheme + - `Token AUTH_TOKEN` - InfluxDB v2 token scheme + - `Basic ` - Basic authentication (username is ignored) + - name: Content-Type + in: header + description: | + The content type of the request payload. + schema: + $ref: "#/components/schemas/LineProtocol" + required: false + - name: Accept + in: header + description: | + The content type that the client can understand. + Writes only return a response body if they fail (partially or completely)--for example, + due to a syntax problem or type mismatch. + schema: + type: string + default: application/json + enum: + - application/json + required: false + - $ref: "#/components/parameters/ContentEncoding" + - $ref: "#/components/parameters/ContentLength" + requestBody: + $ref: "#/components/requestBodies/lineProtocolRequestBody" + tags: + - Compatibility endpoints + - Write data +components: + parameters: + AcceptQueryHeader: + name: Accept + in: header + schema: + type: string + default: application/json + enum: + - application/json + - application/jsonl + - application/vnd.apache.parquet + - text/csv + required: false + description: | + The content type that the client can understand. + ContentEncoding: + name: Content-Encoding + in: header + description: | + The compression applied to the line protocol in the request payload. + To send a gzip payload, pass `Content-Encoding: gzip` header. + schema: + $ref: "#/components/schemas/ContentEncoding" + required: false + ContentLength: + name: Content-Length + in: header + description: | + The size of the entity-body, in bytes, sent to InfluxDB. + schema: + $ref: "#/components/schemas/ContentLength" + ContentType: + name: Content-Type + description: | + The format of the data in the request body. + in: header + schema: + type: string + enum: + - application/json + required: false + db: + name: db + in: query + required: true + schema: + type: string + description: | + The name of the database. + dbWriteParam: + name: db + in: query + required: true + schema: + type: string + description: | + The name of the database. + InfluxDB creates the database if it doesn't already exist, and then + writes all points in the batch to the database. + dbQueryParam: + name: db + in: query + required: false + schema: + type: string + description: | + The name of the database. + + If you provide a query that specifies the database, you can omit the 'db' parameter from your request. + accept_partial: + name: accept_partial + in: query + required: false + schema: + $ref: "#/components/schemas/AcceptPartial" + compatibilityPrecisionParam: + name: precision + in: query + required: false + schema: + $ref: "#/components/schemas/PrecisionWriteCompatibility" + description: The precision for unix timestamps in the line protocol batch. + precisionParam: + name: precision + in: query + required: false + schema: + $ref: "#/components/schemas/PrecisionWrite" + description: The precision for unix timestamps in the line protocol batch. + querySqlParam: + name: q + in: query + required: true + schema: + type: string + format: SQL + description: | + The query to execute. + format: + name: format + in: query + required: false + schema: + $ref: "#/components/schemas/Format" + formatRequired: + name: format + in: query + required: true + schema: + $ref: "#/components/schemas/Format" + v1UsernameParam: + name: u + in: query + required: false + schema: + type: string + description: > + Username for v1 compatibility authentication. + + When using Basic authentication or query string authentication, InfluxDB 3 ignores this parameter but allows any + arbitrary string for compatibility with InfluxDB 1.x clients. + v1PasswordParam: + name: p + in: query + required: false + schema: + type: string + description: | + Password for v1 compatibility authentication. + For query string authentication, pass a database token with write permissions as this parameter. + InfluxDB 3 checks that the `p` value is an authorized token. + requestBodies: + lineProtocolRequestBody: + required: true + content: + text/plain: + schema: + type: string + examples: + line: + summary: Example line protocol + value: measurement,tag=value field=1 1234567890 + multiline: + summary: Example line protocol with UTF-8 characters + value: | + measurement,tag=value field=1 1234567890 + measurement,tag=value field=2 1234567900 + measurement,tag=value field=3 1234568000 + queryRequestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/QueryRequestObject" + schemas: + AdminTokenObject: + type: object + properties: + id: + type: integer + name: + type: string + token: + type: string + hash: + type: string + created_at: + type: string + format: date-time + expiry: + format: date-time + example: + id: 0 + name: _admin + token: apiv3_00xx0Xx0xx00XX0x0 + hash: 00xx0Xx0xx00XX0x0 + created_at: "2025-04-18T14:02:45.331Z" + expiry: null + ResourceTokenObject: + type: object + properties: + token_name: + type: string + permissions: + type: array + items: + type: object + properties: + resource_type: + type: string + enum: + - system + - db + actions: + type: array + items: + type: string + enum: + - read + - write + resource_names: + type: array + items: + type: string + description: List of resource names. Use "*" for all resources. + expiry_secs: + type: integer + description: The expiration time in seconds. + example: + token_name: All system information + permissions: + - resource_type: system + actions: + - read + resource_names: + - "*" + expiry_secs: 300000 + ContentEncoding: + type: string + enum: + - gzip + - identity + description: > + Content coding. + + Use `gzip` for compressed data or `identity` for unmodified, uncompressed data. + + + #### Multi-member gzip support + + + InfluxDB 3 supports multi-member gzip payloads (concatenated gzip files per [RFC + 1952](https://www.rfc-editor.org/rfc/rfc1952)). + + This allows you to: + + - Concatenate multiple gzip files and send them in a single request + + - Maintain compatibility with InfluxDB v1 and v2 write endpoints + + - Simplify batch operations using standard compression tools + default: identity + LineProtocol: + type: string + enum: + - text/plain + - text/plain; charset=utf-8 + description: | + `text/plain` is the content type for line protocol. `UTF-8` is the default character set. + default: text/plain; charset=utf-8 + ContentLength: + type: integer + description: The length in decimal number of octets. + Database: + type: string + AcceptPartial: + type: boolean + default: true + description: Accept partial writes. + Format: + type: string + enum: + - json + - csv + - parquet + - json_lines + - jsonl + - pretty + description: |- + The format of data in the response body. + `json_lines` is the canonical name; `jsonl` is accepted as an alias. + NoSync: + type: boolean + default: false + description: | + Acknowledges a successful write without waiting for WAL persistence. + + #### Related + + - [Use the HTTP API and client libraries to write data](/influxdb3/enterprise/write-data/api-client-libraries/) + - [Data durability](/influxdb3/enterprise/reference/internals/durability/) + PrecisionWriteCompatibility: + enum: + - ms + - s + - us + - u + - ns + - "n" + type: string + description: |- + The precision for unix timestamps in the line protocol batch. + Use `ms` for milliseconds, `s` for seconds, `us` or `u` for microseconds, or `ns` or `n` for nanoseconds. + Optional — defaults to nanosecond precision if omitted. + PrecisionWrite: + enum: + - auto + - nanosecond + - microsecond + - millisecond + - second + type: string + description: | + The precision for unix timestamps in the line protocol batch. + + Supported values: + - `auto` (default): Automatically detects precision based on timestamp magnitude + - `nanosecond`: Nanoseconds + - `microsecond`: Microseconds + - `millisecond`: Milliseconds + - `second`: Seconds + QueryRequestObject: + type: object + properties: + db: + description: | + The name of the database to query. + Required if the query (`q`) doesn't specify the database. + type: string + q: + description: The query to execute. + type: string + format: + description: The format of the query results. + type: string + enum: + - json + - csv + - parquet + - json_lines + - jsonl + - pretty + params: + description: | + Additional parameters for the query. + Use this field to pass query parameters. + type: object + additionalProperties: true + required: + - db + - q + example: + db: mydb + q: SELECT * FROM mytable + format: json + params: {} + CreateDatabaseRequest: + type: object + properties: + db: + type: string + pattern: ^[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]$|^[a-zA-Z0-9]$ + description: |- + The database name. Database names cannot contain underscores (_). + Names must start and end with alphanumeric characters and can contain hyphens (-) in the middle. + retention_period: + type: string + description: |- + The retention period for the database. Specifies how long data should be retained. + Use duration format (for example, "1d", "1h", "30m", "7d"). + example: 7d + required: + - db + CreateTableRequest: + type: object + properties: + db: + type: string + table: + type: string + tags: + type: array + items: + type: string + fields: + type: array + items: + type: object + properties: + name: + type: string + type: + type: string + enum: + - utf8 + - int64 + - uint64 + - float64 + - bool + required: + - name + - type + retention_period: + type: string + description: |- + The retention period for the table. Specifies how long data in this table should be retained. + Use duration format (for example, "1d", "1h", "30m", "7d"). + example: 30d + required: + - db + - table + - tags + - fields + DistinctCacheCreateRequest: + type: object + properties: + db: + type: string + table: + type: string + node_spec: + $ref: "#/components/schemas/ApiNodeSpec" + name: + type: string + description: Optional cache name. + columns: + type: array + items: + type: string + max_cardinality: + type: integer + description: Optional maximum cardinality. + max_age: + type: integer + description: Optional maximum age in seconds. + required: + - db + - table + - columns + example: + db: mydb + table: mytable + columns: + - tag1 + - tag2 + max_cardinality: 1000 + max_age: 3600 + LastCacheCreateRequest: + type: object + properties: + db: + type: string + table: + type: string + node_spec: + $ref: "#/components/schemas/ApiNodeSpec" + name: + type: string + description: Optional cache name. + key_columns: + type: array + items: + type: string + description: Optional list of key columns. + value_columns: + type: array + items: + type: string + description: Optional list of value columns. + count: + type: integer + description: Optional count. + ttl: + type: integer + description: Optional time-to-live in seconds. + required: + - db + - table + example: + db: mydb + table: mytable + key_columns: + - tag1 + value_columns: + - field1 + count: 100 + ttl: 3600 + ProcessingEngineTriggerRequest: + type: object + properties: + db: + type: string + plugin_filename: + type: string + description: | + The path and filename of the plugin to execute--for example, + `schedule.py` or `endpoints/report.py`. + The path can be absolute or relative to the `--plugins-dir` directory configured when starting InfluxDB 3. + + The plugin file must implement the trigger interface associated with the trigger's specification. + node_spec: + $ref: "#/components/schemas/ApiNodeSpec" + trigger_name: + type: string + trigger_settings: + description: | + Configuration for trigger error handling and execution behavior. + allOf: + - $ref: "#/components/schemas/TriggerSettings" + trigger_specification: + description: > + Specifies when and how the processing engine trigger should be invoked. + + + ## Supported trigger specifications: + + + ### Cron-based scheduling + + Format: `cron:CRON_EXPRESSION` + + + Uses extended (6-field) cron format (second minute hour day_of_month month day_of_week): + + ``` + + ┌───────────── second (0-59) + + │ ┌───────────── minute (0-59) + + │ │ ┌───────────── hour (0-23) + + │ │ │ ┌───────────── day of month (1-31) + + │ │ │ │ ┌───────────── month (1-12) + + │ │ │ │ │ ┌───────────── day of week (0-6, Sunday=0) + + │ │ │ │ │ │ + + * * * * * * + + ``` + + Examples: + + - `cron:0 0 6 * * 1-5` - Every weekday at 6:00 AM + + - `cron:0 30 14 * * 5` - Every Friday at 2:30 PM + + - `cron:0 0 0 1 * *` - First day of every month at midnight + + + ### Interval-based scheduling + + Format: `every:DURATION` + + + Supported durations: `s` (seconds), `m` (minutes), `h` (hours), `d` (days), `w` (weeks), `M` (months), `y` + (years): + + - `every:30s` - Every 30 seconds + + - `every:5m` - Every 5 minutes + + - `every:1h` - Every hour + + - `every:1d` - Every day + + - `every:1w` - Every week + + - `every:1M` - Every month + + - `every:1y` - Every year + + + **Maximum interval**: 1 year + + + ### Table-based triggers + + - `all_tables` - Triggers on write events to any table in the database + + - `table:TABLE_NAME` - Triggers on write events to a specific table + + + ### On-demand triggers + + Format: `request:REQUEST_PATH` + + + Creates an HTTP endpoint `/api/v3/engine/REQUEST_PATH` for manual invocation: + + - `request:hello-world` - Creates endpoint `/api/v3/engine/hello-world` + + - `request:data-export` - Creates endpoint `/api/v3/engine/data-export` + pattern: ^(cron:[0-9 *,/-]+|every:[0-9]+[smhd]|all_tables|table:[a-zA-Z_][a-zA-Z0-9_]*|request:[a-zA-Z0-9_-]+)$ + example: cron:0 0 6 * * 1-5 + trigger_arguments: + type: object + additionalProperties: true + description: Optional arguments passed to the plugin. + disabled: + type: boolean + default: false + description: Whether the trigger is disabled. + required: + - db + - plugin_filename + - trigger_name + - trigger_settings + - trigger_specification + - disabled + TriggerSettings: + type: object + description: | + Configuration settings for processing engine trigger error handling and execution behavior. + properties: + run_async: + type: boolean + default: false + description: | + Whether to run the trigger asynchronously. + When `true`, the trigger executes in the background without blocking. + When `false`, the trigger executes synchronously. + error_behavior: + type: string + enum: + - Log + - Retry + - Disable + description: | + Specifies how to handle errors that occur during trigger execution: + - `Log`: Log the error and continue (default) + - `Retry`: Retry the trigger execution + - `Disable`: Disable the trigger after an error + default: Log + required: + - run_async + - error_behavior + ApiNodeSpec: + x-enterprise-only: true + type: object + description: | + Optional specification for targeting specific nodes in a multi-node InfluxDB 3 Enterprise cluster. + Use this to control which node(s) should handle the cache or trigger. + properties: + node_id: + type: string + description: | + The ID of a specific node in the cluster. + If specified, the cache or trigger will only be created on this node. + node_group: + type: string + description: | + The name of a node group in the cluster. + If specified, the cache or trigger will be created on all nodes in this group. + WALPluginTestRequest: + type: object + description: | + Request body for testing a write-ahead logging (WAL) plugin. + properties: + filename: + type: string + description: | + The path and filename of the plugin to test. + database: + type: string + description: | + The database name to use for the test. + input_lp: + type: string + description: | + Line protocol data to use as input for the test. + cache_name: + type: string + description: | + Optional name of the cache to use in the test. + input_arguments: + type: object + additionalProperties: + type: string + description: | + Optional key-value pairs of arguments to pass to the plugin. + required: + - filename + - database + - input_lp + SchedulePluginTestRequest: + type: object + description: | + Request body for testing a scheduling plugin. + properties: + filename: + type: string + description: | + The path and filename of the plugin to test. + database: + type: string + description: | + The database name to use for the test. + schedule: + type: string + description: | + Optional schedule specification in cron or interval format. + cache_name: + type: string + description: | + Optional name of the cache to use in the test. + input_arguments: + type: object + additionalProperties: + type: string + description: | + Optional key-value pairs of arguments to pass to the plugin. + required: + - filename + - database + PluginFileRequest: + type: object + description: | + Request body for updating a plugin file. + properties: + plugin_name: + type: string + description: | + The name of the plugin file to update. + content: + type: string + description: | + The content of the plugin file. + required: + - plugin_name + - content + PluginDirectoryRequest: + type: object + description: | + Request body for updating plugin directory with multiple files. + properties: + plugin_name: + type: string + description: | + The name of the plugin directory to update. + files: + type: array + items: + $ref: "#/components/schemas/PluginFileEntry" + description: | + List of plugin files to include in the directory. + required: + - plugin_name + - files + PluginFileEntry: + type: object + description: | + Represents a single file in a plugin directory. + properties: + content: + type: string + description: | + The content of the file. + relative_path: + type: string + description: The relative path of the file within the plugin directory. + required: + - relative_path + - content + ShowDatabasesResponse: + type: object + properties: + databases: + type: array + items: + type: string + QueryResponse: + type: object + properties: + results: + type: array + items: + type: object + example: + results: + - series: + - name: mytable + columns: + - time + - value + values: + - - "2024-02-02T12:00:00Z" + - 42 + ErrorMessage: + type: object + properties: + error: + type: string + data: + type: object + nullable: true + LineProtocolError: + properties: + code: + description: Code is the machine-readable error code. + enum: + - internal error + - not found + - conflict + - invalid + - empty value + - unavailable + readOnly: true + type: string + err: + description: Stack of errors that occurred during processing of the request. Useful for debugging. + readOnly: true + type: string + line: + description: First line in the request body that contains malformed data. + format: int32 + readOnly: true + type: integer + message: + description: Human-readable message. + readOnly: true + type: string + op: + description: Describes the logical code operation when the error occurred. Useful for debugging. + readOnly: true + type: string + required: + - code + EpochCompatibility: + description: | + A unix timestamp precision. + - `h` for hours + - `m` for minutes + - `s` for seconds + - `ms` for milliseconds + - `u` or `µ` for microseconds + - `ns` for nanoseconds + enum: + - ns + - u + - µ + - ms + - s + - m + - h + type: string + UpdateDatabaseRequest: + type: object + properties: + retention_period: + type: string + description: | + The retention period for the database. Specifies how long data should be retained. + Use duration format (for example, "1d", "1h", "30m", "7d"). + example: 7d + description: Request schema for updating database configuration. + UpdateTableRequest: + type: object + properties: + db: + type: string + description: The name of the database containing the table. + table: + type: string + description: The name of the table to update. + retention_period: + type: string + description: | + The retention period for the table. Specifies how long data in this table should be retained. + Use duration format (for example, "1d", "1h", "30m", "7d"). + example: 30d + required: + - db + - table + description: Request schema for updating table configuration. + LicenseResponse: + type: object + properties: + license_type: + type: string + description: The type of license (for example, "enterprise", "trial"). + example: enterprise + expires_at: + type: string + format: date-time + description: The expiration date of the license in ISO 8601 format. + example: "2025-12-31T23:59:59Z" + features: + type: array + items: + type: string + description: List of features enabled by the license. + example: + - clustering + - processing_engine + - advanced_auth + status: + type: string + enum: + - active + - expired + - invalid + description: The current status of the license. + example: active + description: Response schema for license information. + CreateTokenWithPermissionsRequest: + type: object + properties: + token_name: + type: string + description: The name for the resource token. + permissions: + type: array + items: + $ref: "#/components/schemas/PermissionDetailsApi" + description: List of permissions to grant to the token. + expiry_secs: + type: integer + description: Optional expiration time in seconds. + nullable: true + required: + - token_name + - permissions + PermissionDetailsApi: + type: object + properties: + resource_type: + type: string + enum: + - system + - db + description: The type of resource. + resource_names: + type: array + items: + type: string + description: List of resource names. Use "*" for all resources. + actions: + type: array + items: + type: string + enum: + - read + - write + description: List of actions to grant. + required: + - resource_type + - resource_names + - actions + FileIndexCreateRequest: + type: object + description: Request body for creating a file index. + properties: + db: + type: string + description: The database name. + table: + type: string + description: The table name. If omitted, the file index applies to the database. + nullable: true + columns: + type: array + items: + type: string + description: The columns to use for the file index. + required: + - db + - columns + example: + db: mydb + table: mytable + columns: + - tag1 + - tag2 + FileIndexDeleteRequest: + type: object + description: Request body for deleting a file index. + properties: + db: + type: string + description: The database name. + table: + type: string + description: The table name. If omitted, deletes the database-level file index. + nullable: true + required: + - db + example: + db: mydb + table: mytable + StopNodeRequest: + type: object + description: Request body for marking a node as stopped in the catalog. + properties: + node_id: + type: string + description: The ID of the node to mark as stopped. + required: + - node_id + example: + node_id: node-1 + responses: + Unauthorized: + description: Unauthorized access. + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorMessage" + BadRequest: + description: | + Request failed. Possible reasons: + + - Invalid database name + - Malformed request body + - Invalid timestamp precision + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorMessage" + Forbidden: + description: Access denied. + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorMessage" + NotFound: + description: Resource not found. + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorMessage" + headers: + ClusterUUID: + description: | + The catalog UUID of the InfluxDB instance. + This header is included in all HTTP API responses and enables you to: + - Identify which cluster instance handled the request + - Monitor deployments across multiple InfluxDB instances + - Debug and troubleshoot distributed systems + schema: + type: string + format: uuid + example: 01234567-89ab-cdef-0123-456789abcdef + securitySchemes: + BasicAuthentication: + type: http + scheme: basic + description: >- + Use the `Authorization` header with the `Basic` scheme to authenticate v1 API requests. + + + Works with v1 compatibility [`/write`](#operation/PostV1Write) and [`/query`](#operation/GetV1Query) endpoints + in InfluxDB 3. + + + When authenticating requests, InfluxDB 3 checks that the `password` part of the decoded credential is an + authorized token + + and ignores the `username` part of the decoded credential. + + + ### Syntax + + + ```http + + Authorization: Basic + + ``` + + + ### Example + + + ```bash + + curl "http://localhost:8181/write?db=DATABASE_NAME&precision=s" \ + --user "":"AUTH_TOKEN" \ + --header "Content-type: text/plain; charset=utf-8" \ + --data-binary 'home,room=kitchen temp=72 1641024000' + ``` + + + Replace the following: + + + - **`DATABASE_NAME`**: your InfluxDB 3 Enterprise database + + - **`AUTH_TOKEN`**: an admin token or database token authorized for the database + QuerystringAuthentication: + type: apiKey + in: query + name: u=&p= + description: >- + Use InfluxDB 1.x API parameters to provide credentials through the query string for v1 API requests. + + + Querystring authentication works with v1-compatible [`/write`](#operation/PostV1Write) and + [`/query`](#operation/GetV1Query) endpoints. + + + When authenticating requests, InfluxDB 3 checks that the `p` (_password_) query parameter is an authorized token + + and ignores the `u` (_username_) query parameter. + + + ### Syntax + + + ```http + + https://localhost:8181/query/?[u=any]&p=AUTH_TOKEN + + https://localhost:8181/write/?[u=any]&p=AUTH_TOKEN + + ``` + + + ### Examples + + + ```bash + + curl "http://localhost:8181/write?db=DATABASE_NAME&precision=s&p=AUTH_TOKEN" \ + --header "Content-type: text/plain; charset=utf-8" \ + --data-binary 'home,room=kitchen temp=72 1641024000' + ``` + + + Replace the following: + + + - **`DATABASE_NAME`**: your InfluxDB 3 Enterprise database + + - **`AUTH_TOKEN`**: an admin token or database token authorized for the database + + + ```bash + + ####################################### + + # Use an InfluxDB 1.x compatible username and password + + # to query the InfluxDB v1 HTTP API + + ####################################### + + # Use authentication query parameters: + + # ?p=AUTH_TOKEN + + ####################################### + + + curl --get "http://localhost:8181/query" \ + --data-urlencode "p=AUTH_TOKEN" \ + --data-urlencode "db=DATABASE_NAME" \ + --data-urlencode "q=SELECT * FROM MEASUREMENT" + ``` + + + Replace the following: + + + - **`DATABASE_NAME`**: the database to query + + - **`AUTH_TOKEN`**: a database token with sufficient permissions to the database + BearerAuthentication: + type: http + scheme: bearer + bearerFormat: JWT + description: | + + Use the OAuth Bearer authentication + scheme to provide an authorization token to InfluxDB 3. + + Bearer authentication works with all endpoints. + + In your API requests, send an `Authorization` header. + For the header value, provide the word `Bearer` followed by a space and a database token. + + ### Syntax + + ```http + Authorization: Bearer AUTH_TOKEN + ``` + + ### Example + + ```bash + curl http://localhost:8181/api/v3/query_influxql \ + --header "Authorization: Bearer AUTH_TOKEN" + ``` + TokenAuthentication: + description: |- + Use InfluxDB v2 Token authentication to provide an authorization token to InfluxDB 3. + + The v2 Token scheme works with v1 and v2 compatibility endpoints in InfluxDB 3. + + In your API requests, send an `Authorization` header. + For the header value, provide the word `Token` followed by a space and a database token. + The word `Token` is case-sensitive. + + ### Syntax + + ```http + Authorization: Token AUTH_TOKEN + ``` + + ### Example + + ```sh + ######################################################## + # Use the Token authentication scheme with /api/v2/write + # to write data. + ######################################################## + + curl --request post "http://localhost:8181/api/v2/write?bucket=DATABASE_NAME&precision=s" \ + --header "Authorization: Token AUTH_TOKEN" \ + --data-binary 'home,room=kitchen temp=72 1463683075' + ``` + in: header + name: Authorization + type: apiKey +x-tagGroups: + - name: Using the InfluxDB HTTP API + tags: + - Quick start + - Authentication + - Cache data + - Common parameters + - Response codes + - Compatibility endpoints + - Database + - Processing engine + - Server information + - Table + - Token + - Query data + - Write data From 83fb44587a447f6762b1bd8563cbf2056605389c Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Sun, 8 Mar 2026 22:13:27 +0000 Subject: [PATCH 02/15] Initial plan From e1e7e222426fa586258821be1510ae4c781a7adf Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Sun, 8 Mar 2026 17:15:32 -0500 Subject: [PATCH 03/15] feat(influxdb3): rename API specs and add download links (#6906) Rename Core and Enterprise OpenAPI spec files from ref.yml to descriptive names (influxdb3-core-openapi.yaml, influxdb3-enterprise-openapi.yaml). Copy specs to static/openapi/ and add download links in the API description. --- api-docs/generate-api-docs.sh | 4 +- api-docs/influxdb3/core/.config.yml | 2 +- api-docs/influxdb3/core/v3/content/info.yml | 5 +- .../v3/influxdb3-core-openapi.yaml} | 3699 ++++++++-------- api-docs/influxdb3/enterprise/.config.yml | 2 +- .../influxdb3/enterprise/v3/content/info.yml | 5 +- .../v3/influxdb3-enterprise-openapi.yaml | 3799 +++++++++++++++++ .../openapi/influxdb3-core-openapi.yaml | 3697 +++++++++------- .../openapi/influxdb3-enterprise-openapi.yaml | 3799 +++++++++++++++++ 9 files changed, 11673 insertions(+), 3339 deletions(-) rename api-docs/influxdb3/{enterprise/v3/ref.yml => core/v3/influxdb3-core-openapi.yaml} (77%) create mode 100644 api-docs/influxdb3/enterprise/v3/influxdb3-enterprise-openapi.yaml rename api-docs/influxdb3/core/v3/ref.yml => static/openapi/influxdb3-core-openapi.yaml (72%) create mode 100644 static/openapi/influxdb3-enterprise-openapi.yaml diff --git a/api-docs/generate-api-docs.sh b/api-docs/generate-api-docs.sh index 1e7820a7e7..f603bb2af1 100755 --- a/api-docs/generate-api-docs.sh +++ b/api-docs/generate-api-docs.sh @@ -70,7 +70,7 @@ function generateHtml { local specbundle=redoc-static_index.html # Define the temporary file for the Hugo template and Redoc HTML. local tmpfile="${productVersion}-${api}_index.tmp" - + echo "Bundling $specPath" # Use npx to install and run the specified version of redoc-cli. @@ -83,9 +83,9 @@ function generateHtml { --title="$title" \ --options.sortPropsAlphabetically \ --options.menuToggle \ - --options.hideDownloadButton \ --options.hideHostname \ --options.noAutoAuth \ + --options.hideDownloadButton \ --output=$specbundle \ --templateOptions.description="$shortDescription" \ --templateOptions.product="$productVersion" \ diff --git a/api-docs/influxdb3/core/.config.yml b/api-docs/influxdb3/core/.config.yml index 14792e219a..d492b29edd 100644 --- a/api-docs/influxdb3/core/.config.yml +++ b/api-docs/influxdb3/core/.config.yml @@ -7,7 +7,7 @@ x-influxdata-product-name: InfluxDB 3 Core apis: v3@3: - root: v3/ref.yml + root: v3/influxdb3-core-openapi.yaml x-influxdata-docs-aliases: - /influxdb3/core/api/ - /influxdb3/core/api/v1/ diff --git a/api-docs/influxdb3/core/v3/content/info.yml b/api-docs/influxdb3/core/v3/content/info.yml index 34e55186eb..107c08b130 100644 --- a/api-docs/influxdb3/core/v3/content/info.yml +++ b/api-docs/influxdb3/core/v3/content/info.yml @@ -21,10 +21,7 @@ description: | - `/`: Compatibility endpoints for InfluxDB v1 workloads and clients - `/api/v2/write`: Compatibility endpoint for InfluxDB v2 workloads and clients - + [Download the OpenAPI specification](/openapi/influxdb3-core-openapi.yaml) license: name: MIT url: 'https://opensource.org/licenses/MIT' diff --git a/api-docs/influxdb3/enterprise/v3/ref.yml b/api-docs/influxdb3/core/v3/influxdb3-core-openapi.yaml similarity index 77% rename from api-docs/influxdb3/enterprise/v3/ref.yml rename to api-docs/influxdb3/core/v3/influxdb3-core-openapi.yaml index 8a813ac3e8..f413341474 100644 --- a/api-docs/influxdb3/enterprise/v3/ref.yml +++ b/api-docs/influxdb3/core/v3/influxdb3-core-openapi.yaml @@ -1,27 +1,24 @@ openapi: 3.0.3 info: - title: InfluxDB 3 Enterprise API Service + title: InfluxDB 3 Core API Service description: | - The InfluxDB HTTP API for InfluxDB 3 Enterprise provides a programmatic interface for - interacting with InfluxDB 3 Enterprise databases and resources. + The InfluxDB HTTP API for InfluxDB 3 Core provides a programmatic interface for + interacting with InfluxDB 3 Core databases and resources. Use this API to: - - Write data to InfluxDB 3 Enterprise databases + - Write data to InfluxDB 3 Core databases - Query data using SQL or InfluxQL - Process data using Processing engine plugins - Manage databases, tables, and Processing engine triggers - Perform administrative tasks and access system information The API includes endpoints under the following paths: - - `/api/v3`: InfluxDB 3 Enterprise native endpoints + - `/api/v3`: InfluxDB 3 Core native endpoints - `/`: Compatibility endpoints for InfluxDB v1 workloads and clients - `/api/v2/write`: Compatibility endpoint for InfluxDB v2 workloads and clients - - version: '3.7.0' + [Download the OpenAPI specification](/openapi/influxdb3-core-openapi.yaml) + version: v3.8.0 license: name: MIT url: https://opensource.org/licenses/MIT @@ -29,15 +26,16 @@ info: name: InfluxData url: https://www.influxdata.com email: support@influxdata.com + x-source-hash: sha256:1259b96096eab6c8dbf3f76c974924f124e9b3e08eedc6b0c9a66d3108857c52 servers: - url: https://{baseurl} - description: InfluxDB 3 Enterprise API URL + description: InfluxDB 3 Core API URL variables: baseurl: enum: - localhost:8181 default: localhost:8181 - description: InfluxDB 3 Enterprise URL + description: InfluxDB 3 Core URL security: - BearerAuthentication: [] - TokenAuthentication: [] @@ -56,8 +54,13 @@ tags: | [Querystring authentication](#section/Authentication/QuerystringAuthentication) | v1 endpoints | x-traitTag: true + x-related: + - title: Authenticate v1 API requests + href: /influxdb3/core/guides/api-compatibility/v1/ + - title: Manage tokens + href: /influxdb3/core/admin/tokens/ - name: Cache data - description: | + description: |- Manage the in-memory cache. #### Distinct Value Cache @@ -84,76 +87,126 @@ tags: what fields to cache, what tags to use to identify each series, and the number of values to cache for each unique series. An LVC is associated with a table, which can have multiple LVCs. - - #### Related guides - - - [Manage the Distinct Value Cache](/influxdb3/enterprise/admin/distinct-value-cache/) - - [Manage the Last Value Cache](/influxdb3/enterprise/admin/last-value-cache/) + x-related: + - title: Manage the Distinct Value Cache + href: /influxdb3/core/admin/distinct-value-cache/ + - title: Manage the Last Value Cache + href: /influxdb3/core/admin/last-value-cache/ - name: Compatibility endpoints - description: | + description: > InfluxDB 3 provides compatibility endpoints for InfluxDB 1.x and InfluxDB 2.x workloads and clients. + ### Write data using v1- or v2-compatible endpoints + - [`/api/v2/write` endpoint](#operation/PostV2Write) for InfluxDB v2 clients and when you bring existing InfluxDB v2 write workloads to InfluxDB 3. - - [`/write` endpoint](#operation/PostV1Write) for InfluxDB v1 clients and when you bring existing InfluxDB v1 write workloads to InfluxDB 3. + - [`/write` endpoint](#operation/PostV1Write) for InfluxDB v1 clients and when you bring existing InfluxDB v1 + write workloads to InfluxDB 3. + For new workloads, use the [`/api/v3/write_lp` endpoint](#operation/PostWriteLP). + All endpoints accept the same line protocol format. + ### Query data - Use the HTTP [`/query`](#operation/GetV1ExecuteQuery) endpoint for InfluxDB v1 clients and v1 query workloads using InfluxQL. + + Use the HTTP [`/query`](#operation/GetV1ExecuteQuery) endpoint for InfluxDB v1 clients and v1 query workloads + using InfluxQL. + For new workloads, use one of the following: + - HTTP [`/api/v3/query_sql` endpoint](#operation/GetExecuteQuerySQL) for new query workloads using SQL. - - HTTP [`/api/v3/query_influxql` endpoint](#operation/GetExecuteInfluxQLQuery) for new query workloads using InfluxQL. - - Flight SQL and InfluxDB 3 _Flight+gRPC_ APIs for querying with SQL or InfluxQL. For more information about using Flight APIs, see [InfluxDB 3 client libraries](https://github.com/InfluxCommunity?q=influxdb3&type=public&language=&sort=). + + - HTTP [`/api/v3/query_influxql` endpoint](#operation/GetExecuteInfluxQLQuery) for new query workloads using + InfluxQL. + + - Flight SQL and InfluxDB 3 _Flight+gRPC_ APIs for querying with SQL or InfluxQL. For more information about using + Flight APIs, see [InfluxDB 3 client + libraries](https://github.com/InfluxCommunity?q=influxdb3&type=public&language=&sort=). + ### Server information - Server information endpoints such as `/health` and `metrics` are compatible with InfluxDB 1.x and InfluxDB 2.x clients. + + Server information endpoints such as `/health` and `metrics` are compatible with InfluxDB 1.x and InfluxDB 2.x + clients. + x-related: + - title: Use compatibility APIs to write data + href: /influxdb3/core/write-data/http-api/compatibility-apis/ - name: Database description: Manage databases - - description: | + - description: > Most InfluxDB API endpoints require parameters in the request--for example, specifying the database to use. + ### Common parameters + The following table shows common parameters used by many InfluxDB API endpoints. + Many endpoints may require other parameters in the query string or in the + request body that perform functions specific to those endpoints. + | Query parameter | Value type | Description | + |:------------------------ |:--------------------- |:-------------------------------------------| + | `db` | string | The database name | + InfluxDB HTTP API endpoints use standard HTTP request and response headers. + The following table shows common headers used by many InfluxDB API endpoints. + Some endpoints may use other headers that perform functions more specific to those endpoints--for example, - the write endpoints accept the `Content-Encoding` header to indicate that line protocol is compressed in the request body. + + the write endpoints accept the `Content-Encoding` header to indicate that line protocol is compressed in the + request body. + | Header | Value type | Description | + |:------------------------ |:--------------------- |:-------------------------------------------| + | `Accept` | string | The content type that the client can understand. | + | `Authorization` | string | The authorization scheme and credential. | + | `Content-Length` | integer | The size of the entity-body, in bytes. | + | `Content-Type` | string | The format of the data in the request body. | name: Headers and parameters x-traitTag: true - name: Processing engine - description: | + description: > Manage Processing engine triggers, test plugins, and send requests to trigger On Request plugins. - InfluxDB 3 Enterprise provides the InfluxDB 3 processing engine, an embedded Python VM that can dynamically load and trigger Python plugins in response to events in your database. + + InfluxDB 3 Core provides the InfluxDB 3 processing engine, an embedded Python VM that can dynamically load and + trigger Python plugins in response to events in your database. + Use Processing engine plugins and triggers to run code and perform tasks for different database events. - To get started with the processing engine, see the [Processing engine and Python plugins](/influxdb3/enterprise/processing-engine/) guide. + + To get started with the processing engine, see the [Processing engine and Python + plugins](/influxdb3/core/processing-engine/) guide. + x-related: + - title: Processing engine and Python plugins + href: /influxdb3/core/plugins/ - name: Query data description: Query data using SQL or InfluxQL + x-related: + - title: Use the InfluxDB v1 HTTP query API and InfluxQL to query data + href: /influxdb3/core/query-data/execute-queries/influxdb-v1-api/ - name: Quick start description: | 1. [Create an admin token](#section/Authentication) to authorize API requests. @@ -195,7 +248,7 @@ tags: {"room":"Living room","temp":71.5,"time":"2025-02-25T20:19:34.984098"} ``` - For more information about using InfluxDB 3 Enterprise, see the [Get started](/influxdb3/enterprise/get-started/) guide. + For more information about using InfluxDB 3 Core, see the [Get started](/influxdb3/core/get-started/) guide. x-traitTag: true - name: Server information description: Retrieve server metrics, status, and version information @@ -219,157 +272,79 @@ tags: | **Milliseconds** | ✅ `ms` | ✅ `ms` | ✅ `millisecond` | | **Microseconds** | ✅ `u` or `µ` | ✅ `us` | ✅ `microsecond` | | **Nanoseconds** | ✅ `ns` | ✅ `ns` | ✅ `nanosecond` | - | **Minutes** | ✅ `m` | ❌ No | ❌ No | - | **Hours** | ✅ `h` | ❌ No | ❌ No | | **Default** | Nanosecond | Nanosecond | **Auto** (guessed) | All timestamps are stored internally as nanoseconds. paths: - /write: - post: - operationId: PostV1Write - summary: Write line protocol (v1-compatible) + /api/v1/health: + get: + operationId: GetHealthV1 + summary: Health check (v1) description: | - Writes line protocol to the specified database. - - This endpoint provides backward compatibility for InfluxDB 1.x write workloads using tools such as InfluxDB 1.x client libraries, the Telegraf `outputs.influxdb` output plugin, or third-party tools. - - Use this endpoint to send data in [line protocol](https://docs.influxdata.com/influxdb3/enterprise/reference/syntax/line-protocol/) format to InfluxDB. - Use query parameters to specify options for writing data. + Checks the status of the service. - #### Related + Returns `OK` if the service is running. This endpoint does not return version information. + Use the [`/ping`](#operation/GetPing) endpoint to retrieve version details. - - [Use compatibility APIs to write data](/influxdb3/enterprise/write-data/http-api/compatibility-apis/) - parameters: - - $ref: '#/components/parameters/dbWriteParam' - - $ref: '#/components/parameters/compatibilityPrecisionParam' - - $ref: '#/components/parameters/v1UsernameParam' - - $ref: '#/components/parameters/v1PasswordParam' - - name: rp - in: query - required: false - schema: - type: string - description: | - Retention policy name. Honored but discouraged. InfluxDB 3 doesn't use retention policies. - - name: consistency - in: query - required: false - schema: - type: string - description: | - Write consistency level. Ignored by InfluxDB 3. Provided for compatibility with InfluxDB 1.x clients. - - name: Authorization - in: header - required: false - schema: - type: string - description: | - Authorization header for token-based authentication. - Supported schemes: - - `Bearer AUTH_TOKEN` - OAuth bearer token scheme - - `Token AUTH_TOKEN` - InfluxDB v2 token scheme - - `Basic ` - Basic authentication (username is ignored) - - name: Content-Type - in: header - description: | - The content type of the request payload. - schema: - $ref: '#/components/schemas/LineProtocol' - required: false - - name: Accept - in: header - description: | - The content type that the client can understand. - Writes only return a response body if they fail (partially or completely)--for example, - due to a syntax problem or type mismatch. - schema: - type: string - default: application/json - enum: - - application/json - required: false - - $ref: '#/components/parameters/ContentEncoding' - - $ref: '#/components/parameters/ContentLength' - requestBody: - $ref: '#/components/requestBodies/lineProtocolRequestBody' + > **Note**: This endpoint requires authentication by default in InfluxDB 3 Core. responses: - '204': - description: Success ("No Content"). All data in the batch is written and queryable. - headers: - cluster-uuid: - $ref: '#/components/headers/ClusterUUID' - '400': - description: | - Bad request. Some (a _partial write_) or all of the data from the batch was rejected and not written. - If a partial write occurred, then some points from the batch are written and queryable. - - The response body: - - indicates if a partial write occurred or all data was rejected. - - contains details about the [rejected points](/influxdb3/enterprise/write-data/troubleshoot/#troubleshoot-rejected-points), up to 100 points. + "200": + description: Service is running. Returns `OK`. content: - application/json: - examples: - rejectedAllPoints: - summary: Rejected all points in the batch - value: | - { - "error": "write of line protocol failed", - "data": [ - { - "original_line": "dquote> home,room=Kitchen temp=hi", - "line_number": 2, - "error_message": "No fields were provided" - } - ] - } - partialWriteErrorWithRejectedPoints: - summary: Partial write rejected some points in the batch - value: | - { - "error": "partial write of line protocol occurred", - "data": [ - { - "original_line": "dquote> home,room=Kitchen temp=hi", - "line_number": 2, - "error_message": "No fields were provided" - } - ] - } - '401': - $ref: '#/components/responses/Unauthorized' - '403': - description: Access denied. - '413': - description: Request entity too large. + text/plain: + schema: + type: string + example: OK + "401": + description: Unauthorized. Authentication is required. + "500": + description: Service is unavailable. tags: + - Server information - Compatibility endpoints - - Write data - x-influxdata-guides: - - title: Use compatibility APIs to write data - href: /influxdb3/enterprise/write-data/http-api/compatibility-apis/ /api/v2/write: post: operationId: PostV2Write + responses: + "204": + description: Success ("No Content"). All data in the batch is written and queryable. + headers: + cluster-uuid: + $ref: "#/components/headers/ClusterUUID" + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "413": + description: Request entity too large. summary: Write line protocol (v2-compatible) - description: | + description: > Writes line protocol to the specified database. - This endpoint provides backward compatibility for InfluxDB 2.x write workloads using tools such as InfluxDB 2.x client libraries, the Telegraf `outputs.influxdb_v2` output plugin, or third-party tools. - Use this endpoint to send data in [line protocol](/influxdb3/enterprise/reference/syntax/line-protocol/) format to InfluxDB. + This endpoint provides backward compatibility for InfluxDB 2.x write workloads using tools such as InfluxDB 2.x + client libraries, the Telegraf `outputs.influxdb_v2` output plugin, or third-party tools. + + + Use this endpoint to send data in [line protocol](/influxdb3/core/reference/syntax/line-protocol/) format to + InfluxDB. + Use query parameters to specify options for writing data. + #### Related - - [Use compatibility APIs to write data](/influxdb3/enterprise/write-data/http-api/compatibility-apis/) + + - [Use compatibility APIs to write data](/influxdb3/core/write-data/http-api/compatibility-apis/) parameters: - name: Content-Type in: header description: | The content type of the request payload. schema: - $ref: '#/components/schemas/LineProtocol' + $ref: "#/components/schemas/LineProtocol" required: false - description: | The compression applied to the line protocol in the request payload. @@ -404,848 +379,618 @@ paths: enum: - application/json type: string - - name: db + - name: bucket in: query required: true schema: type: string - description: | + description: |- A database name. InfluxDB creates the database if it doesn't already exist, and then writes all points in the batch to the database. + + This parameter is named `bucket` for compatibility with InfluxDB v2 client libraries. - name: accept_partial in: query required: false schema: - $ref: '#/components/schemas/AcceptPartial' - - $ref: '#/components/parameters/compatibilityPrecisionParam' + $ref: "#/components/schemas/AcceptPartial" + - $ref: "#/components/parameters/compatibilityPrecisionParam" requestBody: - $ref: '#/components/requestBodies/lineProtocolRequestBody' - responses: - '204': - description: Success ("No Content"). All data in the batch is written and queryable. - headers: - cluster-uuid: - $ref: '#/components/headers/ClusterUUID' - '400': - description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '403': - description: Access denied. - '413': - description: Request entity too large. + $ref: "#/components/requestBodies/lineProtocolRequestBody" tags: - Compatibility endpoints - Write data - x-influxdata-guides: - - title: Use compatibility APIs to write data - href: /influxdb3/enterprise/write-data/http-api/compatibility-apis/ - /api/v3/write_lp: - post: - operationId: PostWriteLP - summary: Write line protocol - description: | - Writes line protocol to the specified database. - - This is the native InfluxDB 3 Enterprise write endpoint that provides enhanced control - over write behavior with advanced parameters for high-performance and fault-tolerant operations. - - Use this endpoint to send data in [line protocol](/influxdb3/enterprise/reference/syntax/line-protocol/) format to InfluxDB. - Use query parameters to specify options for writing data. - - #### Features - - - **Partial writes**: Use `accept_partial=true` to allow partial success when some lines in a batch fail - - **Asynchronous writes**: Use `no_sync=true` to skip waiting for WAL synchronization, allowing faster response times but sacrificing durability guarantees - - **Flexible precision**: Automatic timestamp precision detection with `precision=auto` (default) - - #### Auto precision detection - - When you use `precision=auto` or omit the precision parameter, InfluxDB 3 automatically detects - the timestamp precision based on the magnitude of the timestamp value: - - - Timestamps < 5e9 → Second precision (multiplied by 1,000,000,000 to convert to nanoseconds) - - Timestamps < 5e12 → Millisecond precision (multiplied by 1,000,000) - - Timestamps < 5e15 → Microsecond precision (multiplied by 1,000) - - Larger timestamps → Nanosecond precision (no conversion needed) - - #### Related - - - [Use the InfluxDB v3 write_lp API to write data](/influxdb3/enterprise/write-data/http-api/v3-write-lp/) + /api/v3/configure/database: + delete: + operationId: DeleteConfigureDatabase parameters: - - $ref: '#/components/parameters/dbWriteParam' - - $ref: '#/components/parameters/accept_partial' - - $ref: '#/components/parameters/precisionParam' - - name: no_sync + - $ref: "#/components/parameters/db" + - name: data_only in: query + required: false schema: - $ref: '#/components/schemas/NoSync' - - name: Content-Type - in: header + type: boolean + default: false description: | - The content type of the request payload. - schema: - $ref: '#/components/schemas/LineProtocol' + Delete only data while preserving the database schema and all associated resources + (tokens, triggers, last value caches, distinct value caches, processing engine configurations). + When `false` (default), the entire database is deleted. + - name: remove_tables + in: query required: false - - name: Accept - in: header + schema: + type: boolean + default: false description: | - The content type that the client can understand. - Writes only return a response body if they fail (partially or completely)--for example, - due to a syntax problem or type mismatch. + Used with `data_only=true` to remove table resources (caches) while preserving + database-level resources (tokens, triggers, processing engine configurations). + Has no effect when `data_only=false`. + - name: hard_delete_at + in: query + required: false schema: type: string - default: application/json - enum: - - application/json - required: false - - $ref: '#/components/parameters/ContentEncoding' - - $ref: '#/components/parameters/ContentLength' - requestBody: - $ref: '#/components/requestBodies/lineProtocolRequestBody' + format: date-time + description: |- + Schedule the database for hard deletion at the specified time. + If not provided, the database will be soft deleted. + Use ISO 8601 date-time format (for example, "2025-12-31T23:59:59Z"). + + #### Deleting a database cannot be undone + + Deleting a database is a destructive action. + Once a database is deleted, data stored in that database cannot be recovered. + + + Also accepts special string values: + - `now` — hard delete immediately + - `never` — soft delete only (default behavior) + - `default` — use the system default hard deletion time responses: - '204': - description: Success ("No Content"). All data in the batch is written and queryable. - headers: - cluster-uuid: - $ref: '#/components/headers/ClusterUUID' - '400': - description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '403': - description: Access denied. - '413': - description: Request entity too large. - '422': - description: Unprocessable entity. - x-codeSamples: - - label: cURL - Basic write - lang: Shell - source: | - curl --request POST "http://localhost:8181/api/v3/write_lp?db=sensors" \ - --header "Authorization: Bearer DATABASE_TOKEN" \ - --header "Content-Type: text/plain" \ - --data-raw "cpu,host=server01 usage=85.2 1638360000000000000" - - label: cURL - Write with millisecond precision - lang: Shell - source: | - curl --request POST "http://localhost:8181/api/v3/write_lp?db=sensors&precision=ms" \ - --header "Authorization: Bearer DATABASE_TOKEN" \ - --header "Content-Type: text/plain" \ - --data-raw "cpu,host=server01 usage=85.2 1638360000000" - - label: cURL - Asynchronous write with partial acceptance - lang: Shell - source: | - curl --request POST "http://localhost:8181/api/v3/write_lp?db=sensors&accept_partial=true&no_sync=true&precision=auto" \ - --header "Authorization: Bearer DATABASE_TOKEN" \ - --header "Content-Type: text/plain" \ - --data-raw "cpu,host=server01 usage=85.2 - memory,host=server01 used=4096" - - label: cURL - Multiple measurements with tags - lang: Shell - source: | - curl --request POST "http://localhost:8181/api/v3/write_lp?db=sensors&precision=ns" \ - --header "Authorization: Bearer DATABASE_TOKEN" \ - --header "Content-Type: text/plain" \ - --data-raw "cpu,host=server01,region=us-west usage=85.2,load=0.75 1638360000000000000 - memory,host=server01,region=us-west used=4096,free=12288 1638360000000000000 - disk,host=server01,region=us-west,device=/dev/sda1 used=50.5,free=49.5 1638360000000000000" + "200": + description: Success. Database deleted. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Database not found. + summary: Delete a database + description: | + Soft deletes a database. + The database is scheduled for deletion and unavailable for querying. + Use the `hard_delete_at` parameter to schedule a hard deletion. + Use the `data_only` parameter to delete data while preserving the database schema and resources. tags: - - Write data - /api/v3/query_sql: + - Database get: - operationId: GetExecuteQuerySQL - summary: Execute SQL query - description: Executes an SQL query to retrieve data from the specified database. - parameters: - - $ref: '#/components/parameters/db' - - $ref: '#/components/parameters/querySqlParam' - - $ref: '#/components/parameters/format' - - $ref: '#/components/parameters/AcceptQueryHeader' - - $ref: '#/components/parameters/ContentType' + operationId: GetConfigureDatabase responses: - '200': - description: Success. The response body contains query results. + "200": + description: Success. The response body contains the list of databases. content: application/json: schema: - $ref: '#/components/schemas/QueryResponse' - example: - results: - - series: - - name: mytable - columns: - - time - - value - values: - - - '2024-02-02T12:00:00Z' - - 42 - text/csv: - schema: - type: string - application/vnd.apache.parquet: - schema: - type: string - application/jsonl: - schema: - type: string - '400': + $ref: "#/components/schemas/ShowDatabasesResponse" + "400": description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '403': - description: Access denied. - '404': + "401": + $ref: "#/components/responses/Unauthorized" + "404": description: Database not found. - '405': - description: Method not allowed. - '422': - description: Unprocessable entity. + summary: List databases + description: Retrieves a list of databases. + parameters: + - $ref: "#/components/parameters/formatRequired" + - name: show_deleted + in: query + required: false + schema: + type: boolean + default: false + description: | + Include soft-deleted databases in the response. + By default, only active databases are returned. tags: - - Query data + - Database post: - operationId: PostExecuteQuerySQL - summary: Execute SQL query - description: Executes an SQL query to retrieve data from the specified database. - parameters: - - $ref: '#/components/parameters/AcceptQueryHeader' - - $ref: '#/components/parameters/ContentType' + operationId: PostConfigureDatabase + responses: + "200": + description: Success. Database created. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "409": + description: Database already exists. + summary: Create a database + description: Creates a new database in the system. requestBody: - $ref: '#/components/requestBodies/queryRequestBody' + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateDatabaseRequest" + tags: + - Database + put: + operationId: update_database responses: - '200': - description: Success. The response body contains query results. - content: - application/json: - schema: - $ref: '#/components/schemas/QueryResponse' - text/csv: - schema: - type: string - application/vnd.apache.parquet: - schema: - type: string - application/jsonl: - schema: - type: string - '400': + "200": + description: Success. The database has been updated. + "400": description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '403': - description: Access denied. - '404': + "401": + $ref: "#/components/responses/Unauthorized" + "404": description: Database not found. - '405': - description: Method not allowed. - '422': - description: Unprocessable entity. + summary: Update a database + description: | + Updates database configuration, such as retention period. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/UpdateDatabaseRequest" tags: - - Query data - /api/v3/query_influxql: - get: - operationId: GetExecuteInfluxQLQuery - summary: Execute InfluxQL query - description: Executes an InfluxQL query to retrieve data from the specified database. + - Database + /api/v3/configure/database/retention_period: + delete: + operationId: DeleteDatabaseRetentionPeriod + summary: Remove database retention period + description: | + Removes the retention period from a database, setting it to infinite retention. parameters: - - $ref: '#/components/parameters/dbQueryParam' - - name: q + - $ref: "#/components/parameters/db" + responses: + "204": + description: Success. The database retention period has been removed. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Database not found. + tags: + - Database + /api/v3/configure/distinct_cache: + delete: + operationId: DeleteConfigureDistinctCache + responses: + "200": + description: Success. The distinct cache has been deleted. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Cache not found. + summary: Delete distinct cache + description: Deletes a distinct cache. + parameters: + - $ref: "#/components/parameters/db" + - name: table in: query required: true schema: type: string - - name: format + description: The name of the table containing the distinct cache. + - name: name in: query - required: false + required: true schema: type: string - - $ref: '#/components/parameters/AcceptQueryHeader' - responses: - '200': - description: Success. The response body contains query results. - content: - application/json: - schema: - $ref: '#/components/schemas/QueryResponse' - text/csv: - schema: - type: string - application/vnd.apache.parquet: - schema: - type: string - application/jsonl: - schema: - type: string - '400': - description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '403': - description: Access denied. - '404': - description: Database not found. - '405': - description: Method not allowed. - '422': - description: Unprocessable entity. + description: The name of the distinct cache to delete. tags: - - Query data + - Cache data + - Table post: - operationId: PostExecuteQueryInfluxQL - summary: Execute InfluxQL query - description: Executes an InfluxQL query to retrieve data from the specified database. - parameters: - - $ref: '#/components/parameters/AcceptQueryHeader' - - $ref: '#/components/parameters/ContentType' - requestBody: - $ref: '#/components/requestBodies/queryRequestBody' + operationId: PostConfigureDistinctCache responses: - '200': - description: Success. The response body contains query results. - content: - application/json: - schema: - $ref: '#/components/schemas/QueryResponse' - text/csv: - schema: - type: string - application/vnd.apache.parquet: - schema: - type: string - application/jsonl: - schema: - type: string - '400': - description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '403': - description: Access denied. - '404': - description: Database not found. - '405': - description: Method not allowed. - '422': - description: Unprocessable entity. - tags: - - Query data - /query: - get: - operationId: GetV1ExecuteQuery - summary: Execute InfluxQL query (v1-compatible) - description: | - Executes an InfluxQL query to retrieve data from the specified database. - - This endpoint is compatible with InfluxDB 1.x client libraries and third-party integrations such as Grafana. - Use query parameters to specify the database and the InfluxQL query. + "201": + description: Success. The distinct cache has been created. + "400": + description: > + Bad request. - #### Related - - [Use the InfluxDB v1 HTTP query API and InfluxQL to query data](/influxdb3/enterprise/query-data/execute-queries/influxdb-v1-api/) + The server responds with status `400` if the request would overwrite an existing cache with a different + configuration. + "409": + description: Conflict. A distinct cache with this configuration already exists. + summary: Create distinct cache + description: Creates a distinct cache for a table. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/DistinctCacheCreateRequest" + tags: + - Cache data + - Table + /api/v3/configure/last_cache: + delete: + operationId: DeleteConfigureLastCache + responses: + "200": + description: Success. The last cache has been deleted. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Cache not found. + summary: Delete last cache + description: Deletes a last cache. parameters: - - name: Accept - in: header + - $ref: "#/components/parameters/db" + - name: table + in: query + required: true schema: type: string - default: application/json - enum: - - application/json - - application/csv - - text/csv - required: false - description: | - The content type that the client can understand. - - If `text/csv` is specified, the `Content-type` response header is `application/csv` and the response is formatted as CSV. - - Returns an error if the format is invalid or non-UTF8. - - in: query - name: chunked - description: | - If true, the response is divided into chunks of size `chunk_size`. - schema: - type: boolean - default: false - - in: query - name: chunk_size - description: | - The number of records that will go into a chunk. - This parameter is only used if `chunked=true`. - schema: - type: integer - default: 10000 - - in: query - name: db - description: The database to query. If not provided, the InfluxQL query string must specify the database. - schema: - type: string - format: InfluxQL - - in: query - name: pretty - description: | - If true, the JSON response is formatted in a human-readable format. - schema: - type: boolean - default: false - - in: query - name: q - description: The InfluxQL query string. - required: true - schema: - type: string - - name: epoch - description: | - Formats timestamps as [unix (epoch) timestamps](/influxdb3/enterprise/reference/glossary/#unix-timestamp) with the specified precision - instead of [RFC3339 timestamps](/influxdb3/enterprise/reference/glossary/#rfc3339-timestamp) with nanosecond precision. - in: query - schema: - $ref: '#/components/schemas/EpochCompatibility' - - $ref: '#/components/parameters/v1UsernameParam' - - $ref: '#/components/parameters/v1PasswordParam' - - name: rp + description: The name of the table containing the last cache. + - name: name in: query - required: false - schema: - type: string - description: | - Retention policy name. Honored but discouraged. InfluxDB 3 doesn't use retention policies. - - name: Authorization - in: header - required: false + required: true schema: type: string - description: | - Authorization header for token-based authentication. - Supported schemes: - - `Bearer AUTH_TOKEN` - OAuth bearer token scheme - - `Token AUTH_TOKEN` - InfluxDB v2 token scheme - - `Basic ` - Basic authentication (username is ignored) + description: The name of the last cache to delete. + tags: + - Cache data + - Table + post: + operationId: PostConfigureLastCache responses: - '200': - description: | - Success. The response body contains query results. - content: - application/json: - schema: - $ref: '#/components/schemas/QueryResponse' - application/csv: - schema: - type: string - headers: - Content-Type: - description: | - The content type of the response. - Default is `application/json`. - - If the `Accept` request header is `application/csv` or `text/csv`, the `Content-type` response header is `application/csv` - and the response is formatted as CSV. - schema: - type: string - default: application/json - enum: - - application/json - - application/csv - '400': - description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '403': - description: Access denied. - '404': - description: Database not found. - '405': - description: Method not allowed. - '422': - description: Unprocessable entity. + "201": + description: Success. Last cache created. + "400": + description: Bad request. A cache with this name already exists or the request is malformed. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Cache not found. + summary: Create last cache + description: Creates a last cache for a table. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/LastCacheCreateRequest" tags: - - Query data - - Compatibility endpoints - x-influxdata-guides: - - title: Use the InfluxDB v1 HTTP query API and InfluxQL to query data - href: /influxdb3/enterprise/query-data/execute-queries/influxdb-v1-api/ + - Cache data + - Table + /api/v3/configure/plugin_environment/install_packages: post: - operationId: PostExecuteV1Query - summary: Execute InfluxQL query (v1-compatible) - description: | - Executes an InfluxQL query to retrieve data from the specified database. - - #### Related + operationId: PostInstallPluginPackages + summary: Install plugin packages + description: |- + Installs the specified Python packages into the processing engine plugin environment. - - [Use the InfluxDB v1 HTTP query API and InfluxQL to query data](/influxdb3/enterprise/query-data/execute-queries/influxdb-v1-api/) + This endpoint is synchronous and blocks until the packages are installed. + parameters: + - $ref: "#/components/parameters/ContentType" requestBody: + required: true content: application/json: schema: type: object properties: - db: - type: string - description: The database to query. If not provided, the InfluxQL query string must specify the database. - q: - description: The InfluxQL query string. - type: string - chunked: - description: | - If true, the response is divided into chunks of size `chunk_size`. - type: boolean - chunk_size: - description: | - The number of records that will go into a chunk. - This parameter is only used if `chunked=true`. - type: integer - default: 10000 - epoch: - description: | - A unix timestamp precision. - - - `h` for hours - - `m` for minutes - - `s` for seconds - - `ms` for milliseconds - - `u` or `µ` for microseconds - - `ns` for nanoseconds - - Formats timestamps as [unix (epoch) timestamps](/influxdb3/enterprise/reference/glossary/#unix-timestamp) with the specified precision - instead of [RFC3339 timestamps](/influxdb3/enterprise/reference/glossary/#rfc3339-timestamp) with nanosecond precision. - enum: - - ns - - u - - µ - - ms - - s - - m - - h - type: string - pretty: + packages: + type: array + items: + type: string description: | - If true, the JSON response is formatted in a human-readable format. - type: boolean + A list of Python package names to install. + Can include version specifiers (e.g., "scipy==1.9.0"). + example: + - influxdb3-python + - scipy + - pandas==1.5.0 + - requests required: - - q - parameters: - - name: Accept - in: header - schema: - type: string - default: application/json - enum: - - application/json - - application/csv - - text/csv - required: false - description: | - The content type that the client can understand. - - If `text/csv` is specified, the `Content-type` response header is `application/csv` and the response is formatted as CSV. - - Returns an error if the format is invalid or non-UTF8. + - packages + example: + packages: + - influxdb3-python + - scipy + - pandas==1.5.0 + - requests responses: - '200': - description: | - Success. The response body contains query results. - content: - application/json: - schema: - $ref: '#/components/schemas/QueryResponse' - application/csv: - schema: - type: string - headers: - Content-Type: - description: | - The content type of the response. - Default is `application/json`. - - If the `Accept` request header is `application/csv` or `text/csv`, the `Content-type` response header is `application/csv` - and the response is formatted as CSV. - schema: - type: string - default: application/json - enum: - - application/json - - application/csv - '400': + "200": + description: Success. The packages are installed. + "400": description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '403': - description: Access denied. - '404': - description: Database not found. - '405': - description: Method not allowed. - '422': - description: Unprocessable entity. + "401": + $ref: "#/components/responses/Unauthorized" tags: - - Query data - - Compatibility endpoints - x-influxdata-guides: - - title: Use the InfluxDB v1 HTTP query API and InfluxQL to query data - href: /influxdb3/enterprise/query-data/execute-queries/influxdb-v1-api/ - /health: - get: - operationId: GetHealth - summary: Health check - description: | - Checks the status of the service. + - Processing engine + /api/v3/configure/plugin_environment/install_requirements: + post: + operationId: PostInstallPluginRequirements + summary: Install plugin requirements + description: > + Installs requirements from a requirements file (also known as a "pip requirements file") into the processing + engine plugin environment. - Returns `OK` if the service is running. This endpoint does not return version information. - Use the [`/ping`](#operation/GetPing) endpoint to retrieve version details. - > **Note**: This endpoint requires authentication by default in InfluxDB 3 Enterprise. - responses: - '200': - description: Service is running. Returns `OK`. - content: - text/plain: - schema: - type: string - example: OK - '401': - description: Unauthorized. Authentication is required. - '500': - description: Service is unavailable. - tags: - - Server information - /api/v1/health: - get: - operationId: GetHealthV1 - summary: Health check (v1) - description: Checks the status of the service. - responses: - '200': - description: Service is running. - '500': - description: Service is unavailable. - tags: - - Server information - - Compatibility endpoints - /ping: - get: - operationId: GetPing - tags: - - Server information - summary: Ping the server - description: | - Returns version information for the server. + This endpoint is synchronous and blocks until the requirements are installed. - **Important**: Use a GET request. HEAD requests return `404 Not Found`. - The response includes version information in both headers and the JSON body: + ### Related - - **Headers**: `x-influxdb-version` and `x-influxdb-build` - - **Body**: JSON object with `version`, `revision`, and `process_id` - > **Note**: This endpoint requires authentication by default in InfluxDB 3 Enterprise. - responses: - '200': - description: Success. The response body contains server information. - headers: - x-influxdb-version: - description: The InfluxDB version number (for example, `3.8.0`). - schema: - type: string - example: '3.8.0' - x-influxdb-build: - description: The InfluxDB build type (`Core` or `Enterprise`). - schema: - type: string - example: Enterprise - content: - application/json: - schema: - type: object - properties: - version: - type: string - description: The InfluxDB version number. - example: '3.8.0' - revision: - type: string - description: The git revision hash for the build. - example: '83b589b883' - process_id: - type: string - description: A unique identifier for the server process. - example: 'b756d9e0-cecd-4f72-b6d0-19e2d4f8cbb7' - '401': - description: Unauthorized. Authentication is required. - '404': - description: | - Not Found. Returned for HEAD requests. - Use a GET request to retrieve version information. - /metrics: - get: - operationId: GetMetrics - summary: Metrics - description: Retrieves Prometheus-compatible server metrics. - responses: - '200': - description: Success. The response body contains Prometheus-compatible server metrics. - tags: - - Server information - /api/v3/configure/database: - get: - operationId: GetConfigureDatabase - summary: List databases - description: Retrieves a list of databases. + - [Processing engine and Python plugins](/influxdb3/core/plugins/) + + - [Python requirements file format](https://pip.pypa.io/en/stable/reference/requirements-file-format/) parameters: - - $ref: '#/components/parameters/formatRequired' - - name: show_deleted - in: query - required: false - schema: - type: boolean - default: false - description: | - Include soft-deleted databases in the response. - By default, only active databases are returned. + - $ref: "#/components/parameters/ContentType" + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + requirements_location: + type: string + description: | + The path to the requirements file containing Python packages to install. + Can be a relative path (relative to the plugin directory) or an absolute path. + example: requirements.txt + required: + - requirements_location + example: + requirements_location: requirements.txt responses: - '200': - description: Success. The response body contains the list of databases. - content: - application/json: - schema: - $ref: '#/components/schemas/ShowDatabasesResponse' - '400': + "200": + description: Success. The requirements have been installed. + "400": description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - description: Database not found. + "401": + $ref: "#/components/responses/Unauthorized" tags: - - Database + - Processing engine + /api/v3/configure/processing_engine_trigger: post: - operationId: PostConfigureDatabase - summary: Create a database - description: Creates a new database in the system. + operationId: PostConfigureProcessingEngineTrigger + summary: Create processing engine trigger + description: Creates a processing engine trigger with the specified plugin file and trigger specification. requestBody: required: true content: application/json: schema: - $ref: '#/components/schemas/CreateDatabaseRequest' + $ref: "#/components/schemas/ProcessingEngineTriggerRequest" + examples: + schedule_cron: + summary: Schedule trigger using cron + description: > + In `"cron:CRON_EXPRESSION"`, `CRON_EXPRESSION` uses extended 6-field cron format. + + The cron expression `0 0 6 * * 1-5` means the trigger will run at 6:00 AM every weekday (Monday to + Friday). + value: + db: DATABASE_NAME + plugin_filename: schedule.py + trigger_name: schedule_cron_trigger + trigger_specification: cron:0 0 6 * * 1-5 + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + schedule_every: + summary: Schedule trigger using interval + description: | + In `"every:DURATION"`, `DURATION` specifies the interval between trigger executions. + The duration `1h` means the trigger will run every hour. + value: + db: mydb + plugin_filename: schedule.py + trigger_name: schedule_every_trigger + trigger_specification: every:1h + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + schedule_every_seconds: + summary: Schedule trigger using seconds interval + description: | + Example of scheduling a trigger to run every 30 seconds. + value: + db: mydb + plugin_filename: schedule.py + trigger_name: schedule_every_30s_trigger + trigger_specification: every:30s + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + schedule_every_minutes: + summary: Schedule trigger using minutes interval + description: | + Example of scheduling a trigger to run every 5 minutes. + value: + db: mydb + plugin_filename: schedule.py + trigger_name: schedule_every_5m_trigger + trigger_specification: every:5m + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + all_tables: + summary: All tables trigger example + description: | + Trigger that fires on write events to any table in the database. + value: + db: mydb + plugin_filename: all_tables.py + trigger_name: all_tables_trigger + trigger_specification: all_tables + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + table_specific: + summary: Table-specific trigger example + description: | + Trigger that fires on write events to a specific table. + value: + db: mydb + plugin_filename: table.py + trigger_name: table_trigger + trigger_specification: table:sensors + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + api_request: + summary: On-demand request trigger example + description: | + Creates an HTTP endpoint `/api/v3/engine/hello-world` for manual invocation. + value: + db: mydb + plugin_filename: request.py + trigger_name: hello_world_trigger + trigger_specification: request:hello-world + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + cron_friday_afternoon: + summary: Cron trigger for Friday afternoons + description: | + Example of a cron trigger that runs every Friday at 2:30 PM. + value: + db: reports + plugin_filename: weekly_report.py + trigger_name: friday_report_trigger + trigger_specification: cron:0 30 14 * * 5 + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + cron_monthly: + summary: Cron trigger for monthly execution + description: | + Example of a cron trigger that runs on the first day of every month at midnight. + value: + db: monthly_data + plugin_filename: monthly_cleanup.py + trigger_name: monthly_cleanup_trigger + trigger_specification: cron:0 0 0 1 * * + disabled: false + trigger_settings: + run_async: false + error_behavior: Log responses: - '201': - description: Success. Database created. - '400': + "200": + description: Success. Processing engine trigger created. + "400": description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '409': - description: Database already exists. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Trigger not found. tags: - - Database + - Processing engine delete: - operationId: DeleteConfigureDatabase - summary: Delete a database - description: | - Soft deletes a database. - The database is scheduled for deletion and unavailable for querying. - Use the `hard_delete_at` parameter to schedule a hard deletion. - Use the `data_only` parameter to delete data while preserving the database schema and resources. + operationId: DeleteConfigureProcessingEngineTrigger + summary: Delete processing engine trigger + description: Deletes a processing engine trigger. parameters: - - $ref: '#/components/parameters/db' - - name: data_only + - $ref: "#/components/parameters/db" + - name: trigger_name in: query - required: false + required: true schema: - type: boolean - default: false - description: | - Delete only data while preserving the database schema and all associated resources - (tokens, triggers, last value caches, distinct value caches, processing engine configurations). - When `false` (default), the entire database is deleted. - - name: remove_tables + type: string + - name: force in: query required: false schema: type: boolean default: false description: | - Used with `data_only=true` to remove table resources (caches) while preserving - database-level resources (tokens, triggers, processing engine configurations). - Has no effect when `data_only=false`. - - name: hard_delete_at - in: query - required: false - schema: - type: string - format: date-time - description: | - Schedule the database for hard deletion at the specified time. - If not provided, the database will be soft deleted. - Use ISO 8601 date-time format (for example, "2025-12-31T23:59:59Z"). - - #### Deleting a database cannot be undone - - Deleting a database is a destructive action. - Once a database is deleted, data stored in that database cannot be recovered. + Force deletion of the trigger even if it has active executions. + By default, deletion fails if the trigger is currently executing. responses: - '200': - description: Success. Database deleted. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - description: Database not found. + "200": + description: Success. The processing engine trigger has been deleted. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Trigger not found. tags: - - Database - /api/v3/configure/database/retention_period: - delete: - operationId: DeleteDatabaseRetentionPeriod - summary: Remove database retention period - description: | - Removes the retention period from a database, setting it to infinite retention. + - Processing engine + /api/v3/configure/processing_engine_trigger/disable: + post: + operationId: PostDisableProcessingEngineTrigger + summary: Disable processing engine trigger + description: Disables a processing engine trigger. parameters: - - $ref: '#/components/parameters/db' + - name: db + in: query + required: true + schema: + type: string + description: The database name. + - name: trigger_name + in: query + required: true + schema: + type: string + description: The name of the trigger. responses: - '204': - description: Success. The database retention period has been removed. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - description: Database not found. + "200": + description: Success. The processing engine trigger has been disabled. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Trigger not found. tags: - - Database - /api/v3/configure/table: + - Processing engine + /api/v3/configure/processing_engine_trigger/enable: post: - operationId: PostConfigureTable - summary: Create a table - description: Creates a new table within a database. - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/CreateTableRequest' + operationId: PostEnableProcessingEngineTrigger + summary: Enable processing engine trigger + description: Enables a processing engine trigger. + parameters: + - name: db + in: query + required: true + schema: + type: string + description: The database name. + - name: trigger_name + in: query + required: true + schema: + type: string + description: The name of the trigger. responses: - '201': - description: Success. The table has been created. - '400': + "200": + description: Success. The processing engine trigger is enabled. + "400": description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - description: Database not found. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Trigger not found. tags: - - Table + - Processing engine + /api/v3/configure/table: delete: operationId: DeleteConfigureTable - summary: Delete a table - description: | - Soft deletes a table. - The table is scheduled for deletion and unavailable for querying. - Use the `hard_delete_at` parameter to schedule a hard deletion. - Use the `data_only` parameter to delete data while preserving the table schema and resources. - - #### Deleting a table cannot be undone - - Deleting a table is a destructive action. - Once a table is deleted, data stored in that table cannot be recovered. parameters: - - $ref: '#/components/parameters/db' + - $ref: "#/components/parameters/db" - name: table in: query required: true @@ -1267,804 +1012,1315 @@ paths: schema: type: string format: date-time - description: | + description: |- Schedule the table for hard deletion at the specified time. If not provided, the table will be soft deleted. Use ISO 8601 format (for example, "2025-12-31T23:59:59Z"). + + + Also accepts special string values: + - `now` — hard delete immediately + - `never` — soft delete only (default behavior) + - `default` — use the system default hard deletion time responses: - '200': + "200": description: Success (no content). The table has been deleted. - '401': - $ref: '#/components/responses/Unauthorized' - '404': + "401": + $ref: "#/components/responses/Unauthorized" + "404": description: Table not found. + summary: Delete a table + description: | + Soft deletes a table. + The table is scheduled for deletion and unavailable for querying. + Use the `hard_delete_at` parameter to schedule a hard deletion. + Use the `data_only` parameter to delete data while preserving the table schema and resources. + + #### Deleting a table cannot be undone + + Deleting a table is a destructive action. + Once a table is deleted, data stored in that table cannot be recovered. tags: - Table - patch: - operationId: PatchConfigureTable - summary: Update a table - description: | - Updates table configuration, such as retention period. + post: + operationId: PostConfigureTable + responses: + "200": + description: Success. The table has been created. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Database not found. + summary: Create a table + description: Creates a new table within a database. requestBody: required: true content: application/json: schema: - $ref: '#/components/schemas/UpdateTableRequest' - responses: - '200': - description: Success. The table has been updated. - '400': - description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - description: Table not found. + $ref: "#/components/schemas/CreateTableRequest" tags: - Table - /api/v3/configure/database/{db}: - patch: - operationId: PatchConfigureDatabase - summary: Update a database - description: | - Updates database configuration, such as retention period. + /api/v3/configure/token: + delete: + operationId: DeleteToken parameters: - - name: db - in: path + - name: token_name + in: query required: true schema: type: string - description: The name of the database to update. - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/UpdateDatabaseRequest' + description: The name of the token to delete. responses: - '200': - description: Success. The database has been updated. - '400': - description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - description: Database not found. + "200": + description: Success. The token has been deleted. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Token not found. + summary: Delete token + description: | + Deletes a token. tags: - - Database - /api/v3/show/license: - get: - operationId: GetShowLicense - summary: Show license information + - Authentication + - Token + /api/v3/configure/token/admin: + post: + operationId: PostCreateAdminToken + responses: + "201": + description: | + Success. The admin token has been created. + The response body contains the token string and metadata. + content: + application/json: + schema: + $ref: "#/components/schemas/AdminTokenObject" + "401": + $ref: "#/components/responses/Unauthorized" + summary: Create admin token + description: | + Creates an admin token. + An admin token is a special type of token that has full access to all resources in the system. + tags: + - Authentication + - Token + /api/v3/configure/token/admin/regenerate: + post: + operationId: PostRegenerateAdminToken + summary: Regenerate admin token description: | - Retrieves information about the current InfluxDB 3 Enterprise license. + Regenerates an admin token and revokes the previous token with the same name. + parameters: [] responses: - '200': - description: Success. The response body contains license information. + "201": + description: Success. The admin token has been regenerated. content: application/json: schema: - $ref: '#/components/schemas/LicenseResponse' - '401': - $ref: '#/components/responses/Unauthorized' - '403': - description: Access denied. + $ref: "#/components/schemas/AdminTokenObject" + "401": + $ref: "#/components/responses/Unauthorized" tags: - - Server information - /api/v3/configure/distinct_cache: + - Authentication + - Token + /api/v3/configure/token/named_admin: post: - operationId: PostConfigureDistinctCache - summary: Create distinct cache - description: Creates a distinct cache for a table. + operationId: PostCreateNamedAdminToken + responses: + "201": + description: | + Success. The named admin token has been created. + The response body contains the token string and metadata. + content: + application/json: + schema: + $ref: "#/components/schemas/AdminTokenObject" + "401": + $ref: "#/components/responses/Unauthorized" + "409": + description: A token with this name already exists. + summary: Create named admin token + description: | + Creates a named admin token. + A named admin token is a special type of admin token with a custom name for identification and management. tags: - - Cache data - - Table + - Authentication + - Token requestBody: required: true content: application/json: schema: - $ref: '#/components/schemas/DistinctCacheCreateRequest' + type: object + properties: + token_name: + type: string + description: The name for the admin token. + expiry_secs: + type: integer + description: Optional expiration time in seconds. If not provided, the token does not expire. + nullable: true + required: + - token_name + /api/v3/engine/{request_path}: + get: + operationId: GetProcessingEnginePluginRequest responses: - '201': - description: Success. The distinct cache has been created. - '204': - description: Not created. A distinct cache with this configuration already exists. - '400': - description: | - Bad request. + "200": + description: Success. The plugin request has been executed. + "400": + description: Malformed request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Plugin not found. + "500": + description: Processing failure. + summary: On Request processing engine plugin request + description: > + Executes the On Request processing engine plugin specified in the trigger's `plugin_filename`. - The server responds with status `400` if the request would overwrite an existing cache with a different configuration. - delete: - operationId: DeleteConfigureDistinctCache - summary: Delete distinct cache - description: Deletes a distinct cache. - parameters: - - $ref: '#/components/parameters/db' - - name: table - in: query - required: true - schema: - type: string - description: The name of the table containing the distinct cache. - - name: name - in: query - required: true - schema: - type: string - description: The name of the distinct cache to delete. - responses: - '200': - description: Success. The distinct cache has been deleted. - '400': - description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - description: Cache not found. + The request can include request headers, query string parameters, and a request body, which InfluxDB passes to + the plugin. + + + An On Request plugin implements the following signature: + + + ```python + + def process_request(influxdb3_local, query_parameters, request_headers, request_body, args=None) + + ``` + + + The response depends on the plugin implementation. tags: - - Cache data - - Table - /api/v3/configure/last_cache: + - Processing engine post: - operationId: PostConfigureLastCache - summary: Create last cache - description: Creates a last cache for a table. - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/LastCacheCreateRequest' + operationId: PostProcessingEnginePluginRequest responses: - '201': - description: Success. Last cache created. - '400': - description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - description: Cache not found. - '409': - description: Cache already exists. - tags: - - Cache data - - Table - delete: - operationId: DeleteConfigureLastCache - summary: Delete last cache - description: Deletes a last cache. - parameters: - - $ref: '#/components/parameters/db' - - name: table - in: query - required: true - schema: - type: string - description: The name of the table containing the last cache. - - name: name - in: query - required: true - schema: - type: string - description: The name of the last cache to delete. - responses: - '200': - description: Success. The last cache has been deleted. - '400': - description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - description: Cache not found. - tags: - - Cache data - - Table - /api/v3/configure/processing_engine_trigger: - post: - operationId: PostConfigureProcessingEngineTrigger - summary: Create processing engine trigger - description: | - Creates a processing engine trigger with the specified plugin file and trigger specification. + "200": + description: Success. The plugin request has been executed. + "400": + description: Malformed request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Plugin not found. + "500": + description: Processing failure. + summary: On Request processing engine plugin request + description: > + Executes the On Request processing engine plugin specified in the trigger's `plugin_filename`. + + The request can include request headers, query string parameters, and a request body, which InfluxDB passes to + the plugin. + + + An On Request plugin implements the following signature: - ### Related guides - - [Processing engine and Python plugins](/influxdb3/enterprise/plugins/) + ```python + + def process_request(influxdb3_local, query_parameters, request_headers, request_body, args=None) + + ``` + + + The response depends on the plugin implementation. + parameters: + - $ref: "#/components/parameters/ContentType" requestBody: - required: true + required: false content: application/json: schema: - $ref: '#/components/schemas/ProcessingEngineTriggerRequest' - examples: - schedule_cron: - summary: Schedule trigger using cron - description: | - In `"cron:CRON_EXPRESSION"`, `CRON_EXPRESSION` uses extended 6-field cron format. - The cron expression `0 0 6 * * 1-5` means the trigger will run at 6:00 AM every weekday (Monday to Friday). - value: - db: DATABASE_NAME - plugin_filename: schedule.py - trigger_name: schedule_cron_trigger - trigger_specification: cron:0 0 6 * * 1-5 - disabled: false - trigger_settings: - run_async: false - error_behavior: Log - schedule_every: - summary: Schedule trigger using interval - description: | - In `"every:DURATION"`, `DURATION` specifies the interval between trigger executions. - The duration `1h` means the trigger will run every hour. - value: - db: mydb - plugin_filename: schedule.py - trigger_name: schedule_every_trigger - trigger_specification: every:1h - disabled: false - trigger_settings: - run_async: false - error_behavior: Log - schedule_every_seconds: - summary: Schedule trigger using seconds interval - description: | - Example of scheduling a trigger to run every 30 seconds. - value: - db: mydb - plugin_filename: schedule.py - trigger_name: schedule_every_30s_trigger - trigger_specification: every:30s - disabled: false - trigger_settings: - run_async: false - error_behavior: Log - schedule_every_minutes: - summary: Schedule trigger using minutes interval - description: | - Example of scheduling a trigger to run every 5 minutes. - value: - db: mydb - plugin_filename: schedule.py - trigger_name: schedule_every_5m_trigger - trigger_specification: every:5m - disabled: false - trigger_settings: - run_async: false - error_behavior: Log - all_tables: - summary: All tables trigger example - description: | - Trigger that fires on write events to any table in the database. - value: - db: mydb - plugin_filename: all_tables.py - trigger_name: all_tables_trigger - trigger_specification: all_tables - disabled: false - trigger_settings: - run_async: false - error_behavior: Log - table_specific: - summary: Table-specific trigger example - description: | - Trigger that fires on write events to a specific table. - value: - db: mydb - plugin_filename: table.py - trigger_name: table_trigger - trigger_specification: table:sensors - disabled: false - trigger_settings: - run_async: false - error_behavior: Log - api_request: - summary: On-demand request trigger example - description: | - Creates an HTTP endpoint `/api/v3/engine/hello-world` for manual invocation. - value: - db: mydb - plugin_filename: request.py - trigger_name: hello_world_trigger - trigger_specification: request:hello-world - disabled: false - trigger_settings: - run_async: false - error_behavior: Log - cron_friday_afternoon: - summary: Cron trigger for Friday afternoons - description: | - Example of a cron trigger that runs every Friday at 2:30 PM. - value: - db: reports - plugin_filename: weekly_report.py - trigger_name: friday_report_trigger - trigger_specification: cron:0 30 14 * * 5 - disabled: false - trigger_settings: - run_async: false - error_behavior: Log - cron_monthly: - summary: Cron trigger for monthly execution - description: | - Example of a cron trigger that runs on the first day of every month at midnight. - value: - db: monthly_data - plugin_filename: monthly_cleanup.py - trigger_name: monthly_cleanup_trigger - trigger_specification: cron:0 0 0 1 * * - disabled: false - trigger_settings: - run_async: false - error_behavior: Log - responses: - '200': - description: Success. Processing engine trigger created. - '400': - description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - description: Trigger not found. + type: object + additionalProperties: true tags: - Processing engine - delete: - operationId: DeleteConfigureProcessingEngineTrigger - summary: Delete processing engine trigger - description: Deletes a processing engine trigger. - parameters: - - $ref: '#/components/parameters/db' - - name: trigger_name - in: query - required: true - schema: - type: string - - name: force - in: query - required: false - schema: - type: boolean - default: false - description: | - Force deletion of the trigger even if it has active executions. - By default, deletion fails if the trigger is currently executing. + parameters: + - name: request_path + description: | + The path configured in the request trigger specification for the plugin. + + For example, if you define a trigger with the following: + + ```json + trigger_specification: "request:hello-world" + ``` + + then, the HTTP API exposes the following plugin endpoint: + + ``` + /api/v3/engine/hello-world + ``` + in: path + required: true + schema: + type: string + /api/v3/plugin_test/schedule: + post: + operationId: PostTestSchedulingPlugin responses: - '200': - description: Success. The processing engine trigger has been deleted. - '400': + "200": + description: Success. The plugin test has been executed. + "400": description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - description: Trigger not found. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Plugin not enabled. + summary: Test scheduling plugin + description: Executes a test of a scheduling plugin. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/SchedulePluginTestRequest" tags: - Processing engine - /api/v3/configure/processing_engine_trigger/disable: + /api/v3/plugin_test/wal: post: - operationId: PostDisableProcessingEngineTrigger - summary: Disable processing engine trigger - description: Disables a processing engine trigger. - parameters: - - $ref: '#/components/parameters/ContentType' + operationId: PostTestWALPlugin + responses: + "200": + description: Success. The plugin test has been executed. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Plugin not enabled. + summary: Test WAL plugin + description: Executes a test of a write-ahead logging (WAL) plugin. requestBody: required: true content: application/json: schema: - $ref: '#/components/schemas/ProcessingEngineTriggerRequest' - responses: - '200': - description: Success. The processing engine trigger has been disabled. - '400': - description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - description: Trigger not found. + $ref: "#/components/schemas/WALPluginTestRequest" tags: - Processing engine - /api/v3/configure/processing_engine_trigger/enable: - post: - operationId: PostEnableProcessingEngineTrigger - summary: Enable processing engine trigger - description: Enables a processing engine trigger. - parameters: - - $ref: '#/components/parameters/ContentType' + /api/v3/plugins/directory: + put: + operationId: PutPluginDirectory requestBody: required: true content: application/json: schema: - $ref: '#/components/schemas/ProcessingEngineTriggerRequest' + $ref: "#/components/schemas/PluginDirectoryRequest" responses: - '200': - description: Success. The processing engine trigger is enabled. - '400': - description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - description: Trigger not found. + "200": + description: Success. The plugin directory has been updated. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Forbidden. Admin token required. + "500": + description: Plugin not found. The `plugin_name` does not match any registered trigger. + summary: Update a multi-file plugin directory + description: | + Replaces all files in a multi-file plugin directory. The + `plugin_name` must match a registered trigger name. Each entry in + the `files` array specifies a `relative_path` and `content`—the + server writes them into the trigger's plugin directory. + + Use this endpoint to update multi-file plugins (directories with + `__init__.py` and supporting modules). For single-file plugins, + use `PUT /api/v3/plugins/files` instead. tags: - Processing engine - /api/v3/configure/plugin_environment/install_packages: + x-security-note: Requires an admin token + /api/v3/plugins/files: post: - operationId: PostInstallPluginPackages - summary: Install plugin packages + operationId: create_plugin_file + summary: Create a plugin file description: | - Installs the specified Python packages into the processing engine plugin environment. - - This endpoint is synchronous and blocks until the packages are installed. - - ### Related guides - - - [Processing engine and Python plugins](/influxdb3/enterprise/plugins/) - parameters: - - $ref: '#/components/parameters/ContentType' + Creates a single plugin file in the plugin directory. Writes the + `content` to a file named after `plugin_name`. Does not require an + existing trigger—use this to upload plugin files before creating + triggers that reference them. requestBody: required: true content: application/json: schema: - type: object - properties: - packages: - type: array - items: - type: string - description: | - A list of Python package names to install. - Can include version specifiers (e.g., "scipy==1.9.0"). - example: - - influxdb3-python - - scipy - - pandas==1.5.0 - - requests - required: - - packages - example: - packages: - - influxdb3-python - - scipy - - pandas==1.5.0 - - requests + $ref: "#/components/schemas/PluginFileRequest" responses: - '200': - description: Success. The packages are installed. - '400': - description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' + "200": + description: Success. The plugin file has been created. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Forbidden. Admin token required. tags: - Processing engine - /api/v3/configure/plugin_environment/install_requirements: - post: - operationId: PostInstallPluginRequirements - summary: Install plugin requirements + x-security-note: Requires an admin token + put: + operationId: PutPluginFile + summary: Update a plugin file description: | - Installs requirements from a requirements file (also known as a "pip requirements file") into the processing engine plugin environment. - - This endpoint is synchronous and blocks until the requirements are installed. - - ### Related - - - [Processing engine and Python plugins](/influxdb3/enterprise/plugins/) - - [Python requirements file format](https://pip.pypa.io/en/stable/reference/requirements-file-format/) - parameters: - - $ref: '#/components/parameters/ContentType' + Updates a single plugin file for an existing trigger. The + `plugin_name` must match a registered trigger name—the server + resolves the trigger's `plugin_filename` and overwrites that file + with the provided `content`. + + To upload a new plugin file before creating a trigger, use + `POST /api/v3/plugins/files` instead. To update a multi-file + plugin directory, use `PUT /api/v3/plugins/directory`. requestBody: required: true content: application/json: schema: - type: object - properties: - requirements_location: - type: string - description: | - The path to the requirements file containing Python packages to install. - Can be a relative path (relative to the plugin directory) or an absolute path. - example: requirements.txt - required: - - requirements_location - example: - requirements_location: requirements.txt + $ref: "#/components/schemas/PluginFileRequest" responses: - '200': - description: Success. The requirements have been installed. - '400': - description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' + "200": + description: Success. The plugin file has been updated. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Forbidden. Admin token required. + "500": + description: Plugin not found. The `plugin_name` does not match any registered trigger. tags: - Processing engine - /api/v3/plugin_test/wal: + x-security-note: Requires an admin token + /api/v3/query_influxql: + get: + operationId: GetExecuteInfluxQLQuery + responses: + "200": + description: Success. The response body contains query results. + content: + application/json: + schema: + $ref: "#/components/schemas/QueryResponse" + text/csv: + schema: + type: string + application/vnd.apache.parquet: + schema: + type: string + application/jsonl: + schema: + type: string + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "404": + description: Database not found. + "405": + description: Method not allowed. + "422": + description: Unprocessable entity. + summary: Execute InfluxQL query + description: Executes an InfluxQL query to retrieve data from the specified database. + parameters: + - $ref: "#/components/parameters/dbQueryParam" + - name: q + in: query + required: true + schema: + type: string + - name: format + in: query + required: false + schema: + type: string + - $ref: "#/components/parameters/AcceptQueryHeader" + - name: params + in: query + required: false + schema: + type: string + description: JSON-encoded query parameters. Use this to pass bind parameters to parameterized queries. + description: JSON-encoded query parameters for parameterized queries. + tags: + - Query data post: - operationId: PostTestWALPlugin - summary: Test WAL plugin - description: Executes a test of a write-ahead logging (WAL) plugin. + operationId: PostExecuteQueryInfluxQL + responses: + "200": + description: Success. The response body contains query results. + content: + application/json: + schema: + $ref: "#/components/schemas/QueryResponse" + text/csv: + schema: + type: string + application/vnd.apache.parquet: + schema: + type: string + application/jsonl: + schema: + type: string + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "404": + description: Database not found. + "405": + description: Method not allowed. + "422": + description: Unprocessable entity. + summary: Execute InfluxQL query + description: Executes an InfluxQL query to retrieve data from the specified database. + parameters: + - $ref: "#/components/parameters/AcceptQueryHeader" + - $ref: "#/components/parameters/ContentType" requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/WALPluginTestRequest' + $ref: "#/components/requestBodies/queryRequestBody" + tags: + - Query data + /api/v3/query_sql: + get: + operationId: GetExecuteQuerySQL responses: - '200': - description: Success. The plugin test has been executed. - '400': + "200": + description: Success. The response body contains query results. + content: + application/json: + schema: + $ref: "#/components/schemas/QueryResponse" + example: + results: + - series: + - name: mytable + columns: + - time + - value + values: + - - "2024-02-02T12:00:00Z" + - 42 + text/csv: + schema: + type: string + application/vnd.apache.parquet: + schema: + type: string + application/jsonl: + schema: + type: string + "400": description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - description: Plugin not enabled. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "404": + description: Database not found. + "405": + description: Method not allowed. + "422": + description: Unprocessable entity. + summary: Execute SQL query + description: Executes an SQL query to retrieve data from the specified database. + parameters: + - $ref: "#/components/parameters/db" + - $ref: "#/components/parameters/querySqlParam" + - $ref: "#/components/parameters/format" + - $ref: "#/components/parameters/AcceptQueryHeader" + - $ref: "#/components/parameters/ContentType" + - name: params + in: query + required: false + schema: + type: string + description: JSON-encoded query parameters. Use this to pass bind parameters to parameterized queries. + description: JSON-encoded query parameters for parameterized queries. tags: - - Processing engine - /api/v3/plugin_test/schedule: + - Query data post: - operationId: PostTestSchedulingPlugin - summary: Test scheduling plugin - description: Executes a test of a scheduling plugin. + operationId: PostExecuteQuerySQL + responses: + "200": + description: Success. The response body contains query results. + content: + application/json: + schema: + $ref: "#/components/schemas/QueryResponse" + text/csv: + schema: + type: string + application/vnd.apache.parquet: + schema: + type: string + application/jsonl: + schema: + type: string + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "404": + description: Database not found. + "405": + description: Method not allowed. + "422": + description: Unprocessable entity. + summary: Execute SQL query + description: Executes an SQL query to retrieve data from the specified database. + parameters: + - $ref: "#/components/parameters/AcceptQueryHeader" + - $ref: "#/components/parameters/ContentType" + requestBody: + $ref: "#/components/requestBodies/queryRequestBody" + tags: + - Query data + /api/v3/write_lp: + post: + operationId: PostWriteLP + parameters: + - $ref: "#/components/parameters/dbWriteParam" + - $ref: "#/components/parameters/accept_partial" + - $ref: "#/components/parameters/precisionParam" + - name: no_sync + in: query + schema: + $ref: "#/components/schemas/NoSync" + - name: Content-Type + in: header + description: | + The content type of the request payload. + schema: + $ref: "#/components/schemas/LineProtocol" + required: false + - name: Accept + in: header + description: | + The content type that the client can understand. + Writes only return a response body if they fail (partially or completely)--for example, + due to a syntax problem or type mismatch. + schema: + type: string + default: application/json + enum: + - application/json + required: false + - $ref: "#/components/parameters/ContentEncoding" + - $ref: "#/components/parameters/ContentLength" + responses: + "204": + description: Success ("No Content"). All data in the batch is written and queryable. + headers: + cluster-uuid: + $ref: "#/components/headers/ClusterUUID" + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "413": + description: Request entity too large. + "422": + description: Unprocessable entity. + summary: Write line protocol + description: > + Writes line protocol to the specified database. + + + This is the native InfluxDB 3 Core write endpoint that provides enhanced control + + over write behavior with advanced parameters for high-performance and fault-tolerant operations. + + + Use this endpoint to send data in [line protocol](/influxdb3/core/reference/syntax/line-protocol/) format to + InfluxDB. + + Use query parameters to specify options for writing data. + + + #### Features + + + - **Partial writes**: Use `accept_partial=true` to allow partial success when some lines in a batch fail + + - **Asynchronous writes**: Use `no_sync=true` to skip waiting for WAL synchronization, allowing faster response + times but sacrificing durability guarantees + + - **Flexible precision**: Automatic timestamp precision detection with `precision=auto` (default) + + + #### Auto precision detection + + + When you use `precision=auto` or omit the precision parameter, InfluxDB 3 automatically detects + + the timestamp precision based on the magnitude of the timestamp value: + + + - Timestamps < 5e9 → Second precision (multiplied by 1,000,000,000 to convert to nanoseconds) + + - Timestamps < 5e12 → Millisecond precision (multiplied by 1,000,000) + + - Timestamps < 5e15 → Microsecond precision (multiplied by 1,000) + + - Larger timestamps → Nanosecond precision (no conversion needed) + + + #### Related + + + - [Use the InfluxDB v3 write_lp API to write data](/influxdb3/core/write-data/http-api/v3-write-lp/) + requestBody: + $ref: "#/components/requestBodies/lineProtocolRequestBody" + tags: + - Write data + x-codeSamples: + - label: cURL - Basic write + lang: Shell + source: | + curl --request POST "http://localhost:8181/api/v3/write_lp?db=sensors" \ + --header "Authorization: Bearer DATABASE_TOKEN" \ + --header "Content-Type: text/plain" \ + --data-raw "cpu,host=server01 usage=85.2 1638360000000000000" + - label: cURL - Write with millisecond precision + lang: Shell + source: | + curl --request POST "http://localhost:8181/api/v3/write_lp?db=sensors&precision=ms" \ + --header "Authorization: Bearer DATABASE_TOKEN" \ + --header "Content-Type: text/plain" \ + --data-raw "cpu,host=server01 usage=85.2 1638360000000" + - label: cURL - Asynchronous write with partial acceptance + lang: Shell + source: > + curl --request POST + "http://localhost:8181/api/v3/write_lp?db=sensors&accept_partial=true&no_sync=true&precision=auto" \ + --header "Authorization: Bearer DATABASE_TOKEN" \ + --header "Content-Type: text/plain" \ + --data-raw "cpu,host=server01 usage=85.2 + memory,host=server01 used=4096" + - label: cURL - Multiple measurements with tags + lang: Shell + source: | + curl --request POST "http://localhost:8181/api/v3/write_lp?db=sensors&precision=ns" \ + --header "Authorization: Bearer DATABASE_TOKEN" \ + --header "Content-Type: text/plain" \ + --data-raw "cpu,host=server01,region=us-west usage=85.2,load=0.75 1638360000000000000 + memory,host=server01,region=us-west used=4096,free=12288 1638360000000000000 + disk,host=server01,region=us-west,device=/dev/sda1 used=50.5,free=49.5 1638360000000000000" + /health: + get: + operationId: GetHealth + responses: + "200": + description: Service is running. Returns `OK`. + content: + text/plain: + schema: + type: string + example: OK + "401": + description: Unauthorized. Authentication is required. + "500": + description: Service is unavailable. + summary: Health check + description: | + Checks the status of the service. + + Returns `OK` if the service is running. This endpoint does not return version information. + Use the [`/ping`](#operation/GetPing) endpoint to retrieve version details. + + > **Note**: This endpoint requires authentication by default in InfluxDB 3 Core. + tags: + - Server information + /metrics: + get: + operationId: GetMetrics + responses: + "200": + description: Success + summary: Metrics + description: Retrieves Prometheus-compatible server metrics. + tags: + - Server information + /ping: + get: + operationId: GetPing + responses: + "200": + description: Success. The response body contains server information. + headers: + x-influxdb-version: + description: The InfluxDB version number (for example, `3.8.0`). + schema: + type: string + example: 3.8.0 + x-influxdb-build: + description: The InfluxDB build type (`Core` or `Enterprise`). + schema: + type: string + example: Core + content: + application/json: + schema: + type: object + properties: + version: + type: string + description: The InfluxDB version number. + example: 3.8.0 + revision: + type: string + description: The git revision hash for the build. + example: 83b589b883 + process_id: + type: string + description: A unique identifier for the server process. + example: b756d9e0-cecd-4f72-b6d0-19e2d4f8cbb7 + "401": + description: Unauthorized. Authentication is required. + "404": + description: | + Not Found. Returned for HEAD requests. + Use a GET request to retrieve version information. + x-client-method: ping + summary: Ping the server + description: | + Returns version information for the server. + + **Important**: Use a GET request. HEAD requests return `404 Not Found`. + + The response includes version information in both headers and the JSON body: + + - **Headers**: `x-influxdb-version` and `x-influxdb-build` + - **Body**: JSON object with `version`, `revision`, and `process_id` + + > **Note**: This endpoint requires authentication by default in InfluxDB 3 Core. + tags: + - Server information + post: + operationId: ping + responses: + "200": + description: Success. The response body contains server information. + headers: + x-influxdb-version: + description: The InfluxDB version number (for example, `3.8.0`). + schema: + type: string + example: 3.8.0 + x-influxdb-build: + description: The InfluxDB build type (`Core` or `Enterprise`). + schema: + type: string + example: Core + content: + application/json: + schema: + type: object + properties: + version: + type: string + description: The InfluxDB version number. + example: 3.8.0 + revision: + type: string + description: The git revision hash for the build. + example: 83b589b883 + process_id: + type: string + description: A unique identifier for the server process. + example: b756d9e0-cecd-4f72-b6d0-19e2d4f8cbb7 + "401": + description: Unauthorized. Authentication is required. + "404": + description: | + Not Found. Returned for HEAD requests. + Use a GET request to retrieve version information. + summary: Ping the server + description: Returns version information for the server. Accepts POST in addition to GET. + tags: + - Server information + /query: + get: + operationId: GetV1ExecuteQuery + responses: + "200": + description: | + Success. The response body contains query results. + content: + application/json: + schema: + $ref: "#/components/schemas/QueryResponse" + application/csv: + schema: + type: string + headers: + Content-Type: + description: > + The content type of the response. + + Default is `application/json`. + + + If the `Accept` request header is `application/csv` or `text/csv`, the `Content-type` response header is + `application/csv` + + and the response is formatted as CSV. + schema: + type: string + default: application/json + enum: + - application/json + - application/csv + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "404": + description: Database not found. + "405": + description: Method not allowed. + "422": + description: Unprocessable entity. + summary: Execute InfluxQL query (v1-compatible) + description: > + Executes an InfluxQL query to retrieve data from the specified database. + + + This endpoint is compatible with InfluxDB 1.x client libraries and third-party integrations such as Grafana. + + Use query parameters to specify the database and the InfluxQL query. + + + #### Related + + + - [Use the InfluxDB v1 HTTP query API and InfluxQL to query + data](/influxdb3/core/query-data/execute-queries/influxdb-v1-api/) + parameters: + - name: Accept + in: header + schema: + type: string + default: application/json + enum: + - application/json + - application/csv + - text/csv + required: false + description: > + The content type that the client can understand. + + + If `text/csv` is specified, the `Content-type` response header is `application/csv` and the response is + formatted as CSV. + + + Returns an error if the format is invalid or non-UTF8. + - in: query + name: chunked + description: | + If true, the response is divided into chunks of size `chunk_size`. + schema: + type: boolean + default: false + - in: query + name: chunk_size + description: | + The number of records that will go into a chunk. + This parameter is only used if `chunked=true`. + schema: + type: integer + default: 10000 + - in: query + name: db + description: The database to query. If not provided, the InfluxQL query string must specify the database. + schema: + type: string + format: InfluxQL + - in: query + name: pretty + description: | + If true, the JSON response is formatted in a human-readable format. + schema: + type: boolean + default: false + - in: query + name: q + description: The InfluxQL query string. + required: true + schema: + type: string + - name: epoch + description: > + Formats timestamps as [unix (epoch) timestamps](/influxdb3/core/reference/glossary/#unix-timestamp) with the + specified precision + + instead of [RFC3339 timestamps](/influxdb3/core/reference/glossary/#rfc3339-timestamp) with nanosecond + precision. + in: query + schema: + $ref: "#/components/schemas/EpochCompatibility" + - $ref: "#/components/parameters/v1UsernameParam" + - $ref: "#/components/parameters/v1PasswordParam" + - name: rp + in: query + required: false + schema: + type: string + description: | + Retention policy name. Honored but discouraged. InfluxDB 3 doesn't use retention policies. + - name: Authorization + in: header + required: false + schema: + type: string + description: | + Authorization header for token-based authentication. + Supported schemes: + - `Bearer AUTH_TOKEN` - OAuth bearer token scheme + - `Token AUTH_TOKEN` - InfluxDB v2 token scheme + - `Basic ` - Basic authentication (username is ignored) + tags: + - Query data + - Compatibility endpoints + post: + operationId: PostExecuteV1Query + responses: + "200": + description: | + Success. The response body contains query results. + content: + application/json: + schema: + $ref: "#/components/schemas/QueryResponse" + application/csv: + schema: + type: string + headers: + Content-Type: + description: > + The content type of the response. + + Default is `application/json`. + + + If the `Accept` request header is `application/csv` or `text/csv`, the `Content-type` response header is + `application/csv` + + and the response is formatted as CSV. + schema: + type: string + default: application/json + enum: + - application/json + - application/csv + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "404": + description: Database not found. + "405": + description: Method not allowed. + "422": + description: Unprocessable entity. + summary: Execute InfluxQL query (v1-compatible) + description: > + Executes an InfluxQL query to retrieve data from the specified database. + + + #### Related + + + - [Use the InfluxDB v1 HTTP query API and InfluxQL to query + data](/influxdb3/core/query-data/execute-queries/influxdb-v1-api/) + parameters: + - name: Accept + in: header + schema: + type: string + default: application/json + enum: + - application/json + - application/csv + - text/csv + required: false + description: > + The content type that the client can understand. + + + If `text/csv` is specified, the `Content-type` response header is `application/csv` and the response is + formatted as CSV. + + + Returns an error if the format is invalid or non-UTF8. requestBody: - required: true content: application/json: schema: - $ref: '#/components/schemas/SchedulePluginTestRequest' - responses: - '200': - description: Success. The plugin test has been executed. - '400': - description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - description: Plugin not enabled. - tags: - - Processing engine - /api/v3/engine/{request_path}: - parameters: - - name: request_path - description: | - The path configured in the request trigger specification for the plugin. + type: object + properties: + db: + type: string + description: The database to query. If not provided, the InfluxQL query string must specify the database. + q: + description: The InfluxQL query string. + type: string + chunked: + description: | + If true, the response is divided into chunks of size `chunk_size`. + type: boolean + chunk_size: + description: | + The number of records that will go into a chunk. + This parameter is only used if `chunked=true`. + type: integer + default: 10000 + epoch: + description: > + A unix timestamp precision. - For example, if you define a trigger with the following: - ```json - trigger_specification: "request:hello-world" - ``` + - `h` for hours - then, the HTTP API exposes the following plugin endpoint: + - `m` for minutes - ``` - /api/v3/engine/hello-world - ``` - in: path - required: true - schema: - type: string - get: - operationId: GetProcessingEnginePluginRequest - summary: On Request processing engine plugin request - description: | - Executes the On Request processing engine plugin specified in the trigger's `plugin_filename`. - The request can include request headers, query string parameters, and a request body, which InfluxDB passes to the plugin. + - `s` for seconds - An On Request plugin implements the following signature: + - `ms` for milliseconds - ```python - def process_request(influxdb3_local, query_parameters, request_headers, request_body, args=None) - ``` + - `u` or `µ` for microseconds - The response depends on the plugin implementation. - responses: - '200': - description: Success. The plugin request has been executed. - '400': - description: Malformed request. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - description: Plugin not found. - '500': - description: Processing failure. - tags: - - Processing engine - post: - operationId: PostProcessingEnginePluginRequest - summary: On Request processing engine plugin request - description: | - Executes the On Request processing engine plugin specified in the trigger's `plugin_filename`. - The request can include request headers, query string parameters, and a request body, which InfluxDB passes to the plugin. + - `ns` for nanoseconds - An On Request plugin implements the following signature: - ```python - def process_request(influxdb3_local, query_parameters, request_headers, request_body, args=None) - ``` + Formats timestamps as [unix (epoch) timestamps](/influxdb3/core/reference/glossary/#unix-timestamp) + with the specified precision - The response depends on the plugin implementation. - parameters: - - $ref: '#/components/parameters/ContentType' - requestBody: - required: false - content: - application/json: + instead of [RFC3339 timestamps](/influxdb3/core/reference/glossary/#rfc3339-timestamp) with + nanosecond precision. + enum: + - ns + - u + - µ + - ms + - s + - m + - h + type: string + pretty: + description: | + If true, the JSON response is formatted in a human-readable format. + type: boolean + required: + - q + application/x-www-form-urlencoded: schema: type: object - additionalProperties: true - responses: - '200': - description: Success. The plugin request has been executed. - '400': - description: Malformed request. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - description: Plugin not found. - '500': - description: Processing failure. - tags: - - Processing engine - /api/v3/configure/enterprise/token: - post: - operationId: PostCreateResourceToken - summary: Create a resource token - description: | - Creates a resource (fine-grained permissions) token. - A resource token is a token that has access to specific resources in the system. + properties: + db: + type: string + description: The database to query. If not provided, the InfluxQL query string must specify the database. + q: + description: The InfluxQL query string. + type: string + chunked: + description: | + If true, the response is divided into chunks of size `chunk_size`. + type: boolean + chunk_size: + description: | + The number of records that will go into a chunk. + This parameter is only used if `chunked=true`. + type: integer + default: 10000 + epoch: + description: > + A unix timestamp precision. - This endpoint is only available in InfluxDB 3 Enterprise. - responses: - '201': - description: | - Success. The resource token has been created. - The response body contains the token string and metadata. - content: - application/json: - schema: - $ref: '#/components/schemas/ResourceTokenObject' - '401': - $ref: '#/components/responses/Unauthorized' + + - `h` for hours + + - `m` for minutes + + - `s` for seconds + + - `ms` for milliseconds + + - `u` or `µ` for microseconds + + - `ns` for nanoseconds + + + Formats timestamps as [unix (epoch) timestamps](/influxdb3/core/reference/glossary/#unix-timestamp) + with the specified precision + + instead of [RFC3339 timestamps](/influxdb3/core/reference/glossary/#rfc3339-timestamp) with + nanosecond precision. + enum: + - ns + - u + - µ + - ms + - s + - m + - h + type: string + pretty: + description: | + If true, the JSON response is formatted in a human-readable format. + type: boolean + required: + - q + application/vnd.influxql: + schema: + type: string + description: InfluxQL query string sent as the request body. tags: - - Authentication - - Token - /api/v3/configure/token/admin: + - Query data + - Compatibility endpoints + /write: post: - operationId: PostCreateAdminToken - summary: Create admin token - description: | - Creates an admin token. - An admin token is a special type of token that has full access to all resources in the system. + operationId: PostV1Write responses: - '201': + "204": + description: Success ("No Content"). All data in the batch is written and queryable. + headers: + cluster-uuid: + $ref: "#/components/headers/ClusterUUID" + "400": description: | - Success. The admin token has been created. - The response body contains the token string and metadata. - content: - application/json: - schema: - $ref: '#/components/schemas/AdminTokenObject' - '401': - $ref: '#/components/responses/Unauthorized' - tags: - - Authentication - - Token - /api/v3/configure/token/admin/regenerate: - post: - operationId: PostRegenerateAdminToken - summary: Regenerate admin token - description: | - Regenerates an admin token and revokes the previous token with the same name. - parameters: [] - responses: - '201': - description: Success. The admin token has been regenerated. + Bad request. Some (a _partial write_) or all of the data from the batch was rejected and not written. + If a partial write occurred, then some points from the batch are written and queryable. + + The response body: + - indicates if a partial write occurred or all data was rejected. + - contains details about the [rejected points](/influxdb3/core/write-data/troubleshoot/#troubleshoot-rejected-points), up to 100 points. content: application/json: - schema: - $ref: '#/components/schemas/AdminTokenObject' - '401': - $ref: '#/components/responses/Unauthorized' - tags: - - Authentication - - Token - /api/v3/configure/token: - delete: - operationId: DeleteToken - summary: Delete token - description: | - Deletes a token. + examples: + rejectedAllPoints: + summary: Rejected all points in the batch + value: | + { + "error": "write of line protocol failed", + "data": [ + { + "original_line": "dquote> home,room=Kitchen temp=hi", + "line_number": 2, + "error_message": "No fields were provided" + } + ] + } + partialWriteErrorWithRejectedPoints: + summary: Partial write rejected some points in the batch + value: | + { + "error": "partial write of line protocol occurred", + "data": [ + { + "original_line": "dquote> home,room=Kitchen temp=hi", + "line_number": 2, + "error_message": "No fields were provided" + } + ] + } + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "413": + description: Request entity too large. + summary: Write line protocol (v1-compatible) + description: > + Writes line protocol to the specified database. + + + This endpoint provides backward compatibility for InfluxDB 1.x write workloads using tools such as InfluxDB 1.x + client libraries, the Telegraf `outputs.influxdb` output plugin, or third-party tools. + + + Use this endpoint to send data in [line + protocol](https://docs.influxdata.com/influxdb3/core/reference/syntax/line-protocol/) format to InfluxDB. + + Use query parameters to specify options for writing data. + + + #### Related + + + - [Use compatibility APIs to write data](/influxdb3/core/write-data/http-api/compatibility-apis/) parameters: - - name: id + - $ref: "#/components/parameters/dbWriteParam" + - $ref: "#/components/parameters/compatibilityPrecisionParam" + - $ref: "#/components/parameters/v1UsernameParam" + - $ref: "#/components/parameters/v1PasswordParam" + - name: rp in: query - required: true + required: false schema: type: string - description: The ID of the token to delete. - responses: - '204': - description: Success. The token has been deleted. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - description: Token not found. - tags: - - Authentication - - Token - /api/v3/configure/token/named_admin: - post: - operationId: PostCreateNamedAdminToken - summary: Create named admin token - description: | - Creates a named admin token. - A named admin token is a special type of admin token with a custom name for identification and management. - parameters: - - name: name + description: | + Retention policy name. Honored but discouraged. InfluxDB 3 doesn't use retention policies. + - name: consistency in: query - required: true + required: false schema: type: string - description: The name for the admin token. - responses: - '201': description: | - Success. The named admin token has been created. - The response body contains the token string and metadata. - content: - application/json: - schema: - $ref: '#/components/schemas/AdminTokenObject' - '401': - $ref: '#/components/responses/Unauthorized' - '409': - description: A token with this name already exists. - tags: - - Authentication - - Token - /api/v3/plugins/files: - put: - operationId: PutPluginFile - summary: Update plugin file - description: | - Updates a plugin file in the plugin directory. - x-security-note: Requires an admin token - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/PluginFileRequest' - responses: - '204': - description: Success. The plugin file has been updated. - '401': - $ref: '#/components/responses/Unauthorized' - '403': - description: Forbidden. Admin token required. - tags: - - Processing engine - /api/v3/plugins/directory: - put: - operationId: PutPluginDirectory - summary: Update plugin directory - description: | - Updates the plugin directory configuration. - x-security-note: Requires an admin token + Write consistency level. Ignored by InfluxDB 3. Provided for compatibility with InfluxDB 1.x clients. + - name: Authorization + in: header + required: false + schema: + type: string + description: | + Authorization header for token-based authentication. + Supported schemes: + - `Bearer AUTH_TOKEN` - OAuth bearer token scheme + - `Token AUTH_TOKEN` - InfluxDB v2 token scheme + - `Basic ` - Basic authentication (username is ignored) + - name: Content-Type + in: header + description: | + The content type of the request payload. + schema: + $ref: "#/components/schemas/LineProtocol" + required: false + - name: Accept + in: header + description: | + The content type that the client can understand. + Writes only return a response body if they fail (partially or completely)--for example, + due to a syntax problem or type mismatch. + schema: + type: string + default: application/json + enum: + - application/json + required: false + - $ref: "#/components/parameters/ContentEncoding" + - $ref: "#/components/parameters/ContentLength" requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/PluginDirectoryRequest' - responses: - '204': - description: Success. The plugin directory has been updated. - '401': - $ref: '#/components/responses/Unauthorized' - '403': - description: Forbidden. Admin token required. + $ref: "#/components/requestBodies/lineProtocolRequestBody" tags: - - Processing engine + - Compatibility endpoints + - Write data components: parameters: AcceptQueryHeader: @@ -2088,7 +2344,7 @@ components: The compression applied to the line protocol in the request payload. To send a gzip payload, pass `Content-Encoding: gzip` header. schema: - $ref: '#/components/schemas/ContentEncoding' + $ref: "#/components/schemas/ContentEncoding" required: false ContentLength: name: Content-Length @@ -2096,7 +2352,7 @@ components: description: | The size of the entity-body, in bytes, sent to InfluxDB. schema: - $ref: '#/components/schemas/ContentLength' + $ref: "#/components/schemas/ContentLength" ContentType: name: Content-Type description: | @@ -2140,20 +2396,20 @@ components: in: query required: false schema: - $ref: '#/components/schemas/AcceptPartial' + $ref: "#/components/schemas/AcceptPartial" compatibilityPrecisionParam: name: precision in: query - required: true + required: false schema: - $ref: '#/components/schemas/PrecisionWriteCompatibility' + $ref: "#/components/schemas/PrecisionWriteCompatibility" description: The precision for unix timestamps in the line protocol batch. precisionParam: name: precision in: query - required: true + required: false schema: - $ref: '#/components/schemas/PrecisionWrite' + $ref: "#/components/schemas/PrecisionWrite" description: The precision for unix timestamps in the line protocol batch. querySqlParam: name: q @@ -2169,22 +2425,24 @@ components: in: query required: false schema: - $ref: '#/components/schemas/Format' + $ref: "#/components/schemas/Format" formatRequired: name: format in: query required: true schema: - $ref: '#/components/schemas/Format' + $ref: "#/components/schemas/Format" v1UsernameParam: name: u in: query required: false schema: type: string - description: | + description: > Username for v1 compatibility authentication. - When using Basic authentication or query string authentication, InfluxDB 3 ignores this parameter but allows any arbitrary string for compatibility with InfluxDB 1.x clients. + + When using Basic authentication or query string authentication, InfluxDB 3 ignores this parameter but allows any + arbitrary string for compatibility with InfluxDB 1.x clients. v1PasswordParam: name: p in: query @@ -2217,7 +2475,7 @@ components: content: application/json: schema: - $ref: '#/components/schemas/QueryRequestObject' + $ref: "#/components/schemas/QueryRequestObject" schemas: AdminTokenObject: type: object @@ -2240,61 +2498,31 @@ components: name: _admin token: apiv3_00xx0Xx0xx00XX0x0 hash: 00xx0Xx0xx00XX0x0 - created_at: '2025-04-18T14:02:45.331Z' + created_at: "2025-04-18T14:02:45.331Z" expiry: null - ResourceTokenObject: - type: object - properties: - token_name: - type: string - permissions: - type: array - items: - type: object - properties: - resource_type: - type: string - enum: - - system - - db - resource_identifier: - type: array - items: - type: string - actions: - type: array - items: - type: string - enum: - - read - - write - expiry_secs: - type: integer - description: The expiration time in seconds. - example: - token_name: All system information - permissions: - - resource_type: system - resource_identifier: - - '*' - actions: - - read - expiry_secs: 300000 ContentEncoding: type: string enum: - gzip - identity - description: | + description: > Content coding. + Use `gzip` for compressed data or `identity` for unmodified, uncompressed data. + #### Multi-member gzip support - InfluxDB 3 supports multi-member gzip payloads (concatenated gzip files per [RFC 1952](https://www.rfc-editor.org/rfc/rfc1952)). + + InfluxDB 3 supports multi-member gzip payloads (concatenated gzip files per [RFC + 1952](https://www.rfc-editor.org/rfc/rfc1952)). + This allows you to: + - Concatenate multiple gzip files and send them in a single request + - Maintain compatibility with InfluxDB v1 and v2 write endpoints + - Simplify batch operations using standard compression tools default: identity LineProtocol: @@ -2308,8 +2536,6 @@ components: ContentLength: type: integer description: The length in decimal number of octets. - Database: - type: string AcceptPartial: type: boolean default: true @@ -2320,9 +2546,12 @@ components: - json - csv - parquet + - json_lines - jsonl - description: | + - pretty + description: |- The format of data in the response body. + `json_lines` is the canonical name; `jsonl` is accepted as an alias. NoSync: type: boolean default: false @@ -2331,18 +2560,21 @@ components: #### Related - - [Use the HTTP API and client libraries to write data](/influxdb3/enterprise/write-data/api-client-libraries/) - - [Data durability](/influxdb3/enterprise/reference/internals/durability/) + - [Use the HTTP API and client libraries to write data](/influxdb3/core/write-data/api-client-libraries/) + - [Data durability](/influxdb3/core/reference/internals/durability/) PrecisionWriteCompatibility: enum: - ms - s - us + - u - ns + - "n" type: string - description: | + description: |- The precision for unix timestamps in the line protocol batch. - Use `ms` for milliseconds, `s` for seconds, `us` for microseconds, or `ns` for nanoseconds. + Use `ms` for milliseconds, `s` for seconds, `us` or `u` for microseconds, or `ns` or `n` for nanoseconds. + Optional — defaults to nanosecond precision if omitted. PrecisionWrite: enum: - auto @@ -2378,6 +2610,7 @@ components: - json - csv - parquet + - json_lines - jsonl - pretty params: @@ -2458,8 +2691,6 @@ components: type: string table: type: string - node_spec: - $ref: '#/components/schemas/ApiNodeSpec' name: type: string description: Optional cache name. @@ -2492,8 +2723,6 @@ components: type: string table: type: string - node_spec: - $ref: '#/components/schemas/ApiNodeSpec' name: type: string description: Optional cache name. @@ -2538,63 +2767,99 @@ components: The path can be absolute or relative to the `--plugins-dir` directory configured when starting InfluxDB 3. The plugin file must implement the trigger interface associated with the trigger's specification. - node_spec: - $ref: '#/components/schemas/ApiNodeSpec' trigger_name: type: string trigger_settings: description: | Configuration for trigger error handling and execution behavior. allOf: - - $ref: '#/components/schemas/TriggerSettings' + - $ref: "#/components/schemas/TriggerSettings" trigger_specification: - description: | + description: > Specifies when and how the processing engine trigger should be invoked. + ## Supported trigger specifications: + ### Cron-based scheduling + Format: `cron:CRON_EXPRESSION` + Uses extended (6-field) cron format (second minute hour day_of_month month day_of_week): + ``` + ┌───────────── second (0-59) + │ ┌───────────── minute (0-59) + │ │ ┌───────────── hour (0-23) + │ │ │ ┌───────────── day of month (1-31) + │ │ │ │ ┌───────────── month (1-12) + │ │ │ │ │ ┌───────────── day of week (0-6, Sunday=0) + │ │ │ │ │ │ + * * * * * * + ``` + Examples: + - `cron:0 0 6 * * 1-5` - Every weekday at 6:00 AM + - `cron:0 30 14 * * 5` - Every Friday at 2:30 PM + - `cron:0 0 0 1 * *` - First day of every month at midnight + ### Interval-based scheduling + Format: `every:DURATION` - Supported durations: `s` (seconds), `m` (minutes), `h` (hours), `d` (days), `w` (weeks), `M` (months), `y` (years): + + Supported durations: `s` (seconds), `m` (minutes), `h` (hours), `d` (days), `w` (weeks), `M` (months), `y` + (years): + - `every:30s` - Every 30 seconds + - `every:5m` - Every 5 minutes + - `every:1h` - Every hour + - `every:1d` - Every day + - `every:1w` - Every week + - `every:1M` - Every month + - `every:1y` - Every year + **Maximum interval**: 1 year + ### Table-based triggers + - `all_tables` - Triggers on write events to any table in the database + - `table:TABLE_NAME` - Triggers on write events to a specific table + ### On-demand triggers + Format: `request:REQUEST_PATH` + Creates an HTTP endpoint `/api/v3/engine/REQUEST_PATH` for manual invocation: + - `request:hello-world` - Creates endpoint `/api/v3/engine/hello-world` + - `request:data-export` - Creates endpoint `/api/v3/engine/data-export` pattern: ^(cron:[0-9 *,/-]+|every:[0-9]+[smhd]|all_tables|table:[a-zA-Z_][a-zA-Z0-9_]*|request:[a-zA-Z0-9_-]+)$ example: cron:0 0 6 * * 1-5 @@ -2640,22 +2905,6 @@ components: required: - run_async - error_behavior - ApiNodeSpec: - type: object - description: | - Optional specification for targeting specific nodes in a multi-node InfluxDB 3 Enterprise cluster. - Use this to control which node(s) should handle the cache or trigger. - properties: - node_id: - type: string - description: | - The ID of a specific node in the cluster. - If specified, the cache or trigger will only be created on this node. - node_group: - type: string - description: | - The name of a node group in the cluster. - If specified, the cache or trigger will be created on all nodes in this group. WALPluginTestRequest: type: object description: | @@ -2745,7 +2994,7 @@ components: files: type: array items: - $ref: '#/components/schemas/PluginFileEntry' + $ref: "#/components/schemas/PluginFileEntry" description: | List of plugin files to include in the directory. required: @@ -2756,16 +3005,15 @@ components: description: | Represents a single file in a plugin directory. properties: - filename: - type: string - description: | - The name of the file within the plugin directory. content: type: string description: | The content of the file. + relative_path: + type: string + description: The relative path of the file within the plugin directory. required: - - filename + - relative_path - content ShowDatabasesResponse: type: object @@ -2789,7 +3037,7 @@ components: - time - value values: - - - '2024-02-02T12:00:00Z' + - - "2024-02-02T12:00:00Z" - 42 ErrorMessage: type: object @@ -2799,38 +3047,6 @@ components: data: type: object nullable: true - LineProtocolError: - properties: - code: - description: Code is the machine-readable error code. - enum: - - internal error - - not found - - conflict - - invalid - - empty value - - unavailable - readOnly: true - type: string - err: - description: Stack of errors that occurred during processing of the request. Useful for debugging. - readOnly: true - type: string - line: - description: First line in the request body that contains malformed data. - format: int32 - readOnly: true - type: integer - message: - description: Human-readable message. - readOnly: true - type: string - op: - description: Describes the logical code operation when the error occurred. Useful for debugging. - readOnly: true - type: string - required: - - code EpochCompatibility: description: | A unix timestamp precision. @@ -2859,62 +3075,13 @@ components: Use duration format (for example, "1d", "1h", "30m", "7d"). example: 7d description: Request schema for updating database configuration. - UpdateTableRequest: - type: object - properties: - db: - type: string - description: The name of the database containing the table. - table: - type: string - description: The name of the table to update. - retention_period: - type: string - description: | - The retention period for the table. Specifies how long data in this table should be retained. - Use duration format (for example, "1d", "1h", "30m", "7d"). - example: 30d - required: - - db - - table - description: Request schema for updating table configuration. - LicenseResponse: - type: object - properties: - license_type: - type: string - description: The type of license (for example, "enterprise", "trial"). - example: enterprise - expires_at: - type: string - format: date-time - description: The expiration date of the license in ISO 8601 format. - example: '2025-12-31T23:59:59Z' - features: - type: array - items: - type: string - description: List of features enabled by the license. - example: - - clustering - - processing_engine - - advanced_auth - status: - type: string - enum: - - active - - expired - - invalid - description: The current status of the license. - example: active - description: Response schema for license information. responses: Unauthorized: description: Unauthorized access. content: application/json: schema: - $ref: '#/components/schemas/ErrorMessage' + $ref: "#/components/schemas/ErrorMessage" BadRequest: description: | Request failed. Possible reasons: @@ -2925,19 +3092,19 @@ components: content: application/json: schema: - $ref: '#/components/schemas/ErrorMessage' + $ref: "#/components/schemas/ErrorMessage" Forbidden: description: Access denied. content: application/json: schema: - $ref: '#/components/schemas/ErrorMessage' + $ref: "#/components/schemas/ErrorMessage" NotFound: description: Resource not found. content: application/json: schema: - $ref: '#/components/schemas/ErrorMessage' + $ref: "#/components/schemas/ErrorMessage" headers: ClusterUUID: description: | @@ -2954,94 +3121,126 @@ components: BasicAuthentication: type: http scheme: basic - description: | + description: >- Use the `Authorization` header with the `Basic` scheme to authenticate v1 API requests. - Works with v1 compatibility [`/write`](#operation/PostV1Write) and [`/query`](#operation/GetV1Query) endpoints in InfluxDB 3. - When authenticating requests, InfluxDB 3 checks that the `password` part of the decoded credential is an authorized token + Works with v1 compatibility [`/write`](#operation/PostV1Write) and [`/query`](#operation/GetV1Query) endpoints + in InfluxDB 3. + + + When authenticating requests, InfluxDB 3 checks that the `password` part of the decoded credential is an + authorized token + and ignores the `username` part of the decoded credential. + ### Syntax + ```http + Authorization: Basic + ``` + ### Example + ```bash + curl "http://localhost:8181/write?db=DATABASE_NAME&precision=s" \ --user "":"AUTH_TOKEN" \ --header "Content-type: text/plain; charset=utf-8" \ --data-binary 'home,room=kitchen temp=72 1641024000' ``` + Replace the following: - - **`DATABASE_NAME`**: your InfluxDB 3 Enterprise database - - **`AUTH_TOKEN`**: an admin token or database token authorized for the database - #### Related guides + - **`DATABASE_NAME`**: your InfluxDB 3 Core database - - [Authenticate v1 API requests](/influxdb3/enterprise/guides/api-compatibility/v1/) - - [Manage tokens](/influxdb3/enterprise/admin/tokens/) + - **`AUTH_TOKEN`**: an admin token or database token authorized for the database QuerystringAuthentication: type: apiKey in: query name: u=&p= - description: | + description: >- Use InfluxDB 1.x API parameters to provide credentials through the query string for v1 API requests. - Querystring authentication works with v1-compatible [`/write`](#operation/PostV1Write) and [`/query`](#operation/GetV1Query) endpoints. + + Querystring authentication works with v1-compatible [`/write`](#operation/PostV1Write) and + [`/query`](#operation/GetV1Query) endpoints. + When authenticating requests, InfluxDB 3 checks that the `p` (_password_) query parameter is an authorized token + and ignores the `u` (_username_) query parameter. + ### Syntax + ```http + https://localhost:8181/query/?[u=any]&p=AUTH_TOKEN + https://localhost:8181/write/?[u=any]&p=AUTH_TOKEN + ``` + ### Examples + ```bash + curl "http://localhost:8181/write?db=DATABASE_NAME&precision=s&p=AUTH_TOKEN" \ --header "Content-type: text/plain; charset=utf-8" \ --data-binary 'home,room=kitchen temp=72 1641024000' ``` + Replace the following: - - **`DATABASE_NAME`**: your InfluxDB 3 Enterprise database + + - **`DATABASE_NAME`**: your InfluxDB 3 Core database + - **`AUTH_TOKEN`**: an admin token or database token authorized for the database + ```bash + ####################################### + # Use an InfluxDB 1.x compatible username and password + # to query the InfluxDB v1 HTTP API + ####################################### + # Use authentication query parameters: + # ?p=AUTH_TOKEN + ####################################### + curl --get "http://localhost:8181/query" \ --data-urlencode "p=AUTH_TOKEN" \ --data-urlencode "db=DATABASE_NAME" \ --data-urlencode "q=SELECT * FROM MEASUREMENT" ``` + Replace the following: - - **`DATABASE_NAME`**: the database to query - - **`AUTH_TOKEN`**: a database token with sufficient permissions to the database - #### Related guides + - **`DATABASE_NAME`**: the database to query - - [Authenticate v1 API requests](/influxdb3/enterprise/guides/api-compatibility/v1/) - - [Manage tokens](/influxdb3/enterprise/admin/tokens/) + - **`AUTH_TOKEN`**: a database token with sufficient permissions to the database BearerAuthentication: type: http scheme: bearer @@ -3069,7 +3268,7 @@ components: --header "Authorization: Bearer AUTH_TOKEN" ``` TokenAuthentication: - description: | + description: |- Use InfluxDB v2 Token authentication to provide an authorization token to InfluxDB 3. The v2 Token scheme works with v1 and v2 compatibility endpoints in InfluxDB 3. @@ -3096,10 +3295,6 @@ components: --header "Authorization: Token AUTH_TOKEN" \ --data-binary 'home,room=kitchen temp=72 1463683075' ``` - - ### Related guides - - - [Manage tokens](/influxdb3/enterprise/admin/tokens/) in: header name: Authorization type: apiKey diff --git a/api-docs/influxdb3/enterprise/.config.yml b/api-docs/influxdb3/enterprise/.config.yml index 4b8210b97c..d39bc413c8 100644 --- a/api-docs/influxdb3/enterprise/.config.yml +++ b/api-docs/influxdb3/enterprise/.config.yml @@ -7,7 +7,7 @@ x-influxdata-product-name: InfluxDB 3 Enterprise apis: v3@3: - root: v3/ref.yml + root: v3/influxdb3-enterprise-openapi.yaml x-influxdata-docs-aliases: - /influxdb3/enterprise/api/ - /influxdb3/enterprise/api/v1/ diff --git a/api-docs/influxdb3/enterprise/v3/content/info.yml b/api-docs/influxdb3/enterprise/v3/content/info.yml index e4ec8ef609..cd2e5acdf3 100644 --- a/api-docs/influxdb3/enterprise/v3/content/info.yml +++ b/api-docs/influxdb3/enterprise/v3/content/info.yml @@ -21,10 +21,7 @@ description: | - `/`: Compatibility endpoints for InfluxDB v1 workloads and clients - `/api/v2/write`: Compatibility endpoint for InfluxDB v2 workloads and clients - + [Download the OpenAPI specification](/openapi/influxdb3-enterprise-openapi.yaml) license: name: MIT url: 'https://opensource.org/licenses/MIT' diff --git a/api-docs/influxdb3/enterprise/v3/influxdb3-enterprise-openapi.yaml b/api-docs/influxdb3/enterprise/v3/influxdb3-enterprise-openapi.yaml new file mode 100644 index 0000000000..5ff481f9d0 --- /dev/null +++ b/api-docs/influxdb3/enterprise/v3/influxdb3-enterprise-openapi.yaml @@ -0,0 +1,3799 @@ +openapi: 3.0.3 +info: + title: InfluxDB 3 Enterprise API Service + description: | + The InfluxDB HTTP API for InfluxDB 3 Enterprise provides a programmatic interface for + interacting with InfluxDB 3 Enterprise databases and resources. + Use this API to: + + - Write data to InfluxDB 3 Enterprise databases + - Query data using SQL or InfluxQL + - Process data using Processing engine plugins + - Manage databases, tables, and Processing engine triggers + - Perform administrative tasks and access system information + + The API includes endpoints under the following paths: + - `/api/v3`: InfluxDB 3 Enterprise native endpoints + - `/`: Compatibility endpoints for InfluxDB v1 workloads and clients + - `/api/v2/write`: Compatibility endpoint for InfluxDB v2 workloads and clients + + [Download the OpenAPI specification](/openapi/influxdb3-enterprise-openapi.yaml) + version: v3.8.0 + license: + name: MIT + url: https://opensource.org/licenses/MIT + contact: + name: InfluxData + url: https://www.influxdata.com + email: support@influxdata.com + x-source-hash: sha256:1259b96096eab6c8dbf3f76c974924f124e9b3e08eedc6b0c9a66d3108857c52 +servers: + - url: https://{baseurl} + description: InfluxDB 3 Enterprise API URL + variables: + baseurl: + enum: + - localhost:8181 + default: localhost:8181 + description: InfluxDB 3 Enterprise URL +security: + - BearerAuthentication: [] + - TokenAuthentication: [] + - BasicAuthentication: [] + - QuerystringAuthentication: [] +tags: + - name: Authentication + description: | + Depending on your workflow, use one of the following schemes to authenticate to the InfluxDB 3 API: + + | Authentication scheme | Works with | + |:-------------------|:-----------| + | [Bearer authentication](#section/Authentication/BearerAuthentication) | All endpoints | + | [Token authentication](#section/Authentication/TokenAuthentication) | v1, v2 endpoints | + | [Basic authentication](#section/Authentication/BasicAuthentication) | v1 endpoints | + | [Querystring authentication](#section/Authentication/QuerystringAuthentication) | v1 endpoints | + + x-traitTag: true + x-related: + - title: Authenticate v1 API requests + href: /influxdb3/enterprise/guides/api-compatibility/v1/ + - title: Manage tokens + href: /influxdb3/enterprise/admin/tokens/ + - name: Cache data + description: |- + Manage the in-memory cache. + + #### Distinct Value Cache + + The Distinct Value Cache (DVC) lets you cache distinct + values of one or more columns in a table, improving the performance of + queries that return distinct tag and field values. + + The DVC is an in-memory cache that stores distinct values for specific columns + in a table. When you create an DVC, you can specify what columns' distinct + values to cache, the maximum number of distinct value combinations to cache, and + the maximum age of cached values. A DVC is associated with a table, which can + have multiple DVCs. + + #### Last value cache + + The Last Value Cache (LVC) lets you cache the most recent + values for specific fields in a table, improving the performance of queries that + return the most recent value of a field for specific series or the last N values + of a field. + + The LVC is an in-memory cache that stores the last N number of values for + specific fields of series in a table. When you create an LVC, you can specify + what fields to cache, what tags to use to identify each series, and the + number of values to cache for each unique series. + An LVC is associated with a table, which can have multiple LVCs. + x-related: + - title: Manage the Distinct Value Cache + href: /influxdb3/enterprise/admin/distinct-value-cache/ + - title: Manage the Last Value Cache + href: /influxdb3/enterprise/admin/last-value-cache/ + - name: Compatibility endpoints + description: > + InfluxDB 3 provides compatibility endpoints for InfluxDB 1.x and InfluxDB 2.x workloads and clients. + + + ### Write data using v1- or v2-compatible endpoints + + + - [`/api/v2/write` endpoint](#operation/PostV2Write) + for InfluxDB v2 clients and when you bring existing InfluxDB v2 write workloads to InfluxDB 3. + - [`/write` endpoint](#operation/PostV1Write) for InfluxDB v1 clients and when you bring existing InfluxDB v1 + write workloads to InfluxDB 3. + + + For new workloads, use the [`/api/v3/write_lp` endpoint](#operation/PostWriteLP). + + + All endpoints accept the same line protocol format. + + + ### Query data + + + Use the HTTP [`/query`](#operation/GetV1ExecuteQuery) endpoint for InfluxDB v1 clients and v1 query workloads + using InfluxQL. + + + For new workloads, use one of the following: + + + - HTTP [`/api/v3/query_sql` endpoint](#operation/GetExecuteQuerySQL) for new query workloads using SQL. + + - HTTP [`/api/v3/query_influxql` endpoint](#operation/GetExecuteInfluxQLQuery) for new query workloads using + InfluxQL. + + - Flight SQL and InfluxDB 3 _Flight+gRPC_ APIs for querying with SQL or InfluxQL. For more information about using + Flight APIs, see [InfluxDB 3 client + libraries](https://github.com/InfluxCommunity?q=influxdb3&type=public&language=&sort=). + + + ### Server information + + + Server information endpoints such as `/health` and `metrics` are compatible with InfluxDB 1.x and InfluxDB 2.x + clients. + x-related: + - title: Use compatibility APIs to write data + href: /influxdb3/enterprise/write-data/http-api/compatibility-apis/ + - name: Database + description: Manage databases + - description: > + Most InfluxDB API endpoints require parameters in the request--for example, specifying the database to use. + + + ### Common parameters + + + The following table shows common parameters used by many InfluxDB API endpoints. + + Many endpoints may require other parameters in the query string or in the + + request body that perform functions specific to those endpoints. + + + | Query parameter | Value type | Description | + + |:------------------------ |:--------------------- |:-------------------------------------------| + + | `db` | string | The database name | + + + InfluxDB HTTP API endpoints use standard HTTP request and response headers. + + The following table shows common headers used by many InfluxDB API endpoints. + + Some endpoints may use other headers that perform functions more specific to those endpoints--for example, + + the write endpoints accept the `Content-Encoding` header to indicate that line protocol is compressed in the + request body. + + + | Header | Value type | Description | + + |:------------------------ |:--------------------- |:-------------------------------------------| + + | `Accept` | string | The content type that the client can understand. | + + | `Authorization` | string | The authorization scheme and credential. | + + | `Content-Length` | integer | The size of the entity-body, in bytes. | + + | `Content-Type` | string | The format of the data in the request body. | + name: Headers and parameters + x-traitTag: true + - name: Processing engine + description: > + Manage Processing engine triggers, test plugins, and send requests to trigger On Request plugins. + + + InfluxDB 3 Enterprise provides the InfluxDB 3 processing engine, an embedded Python VM that can dynamically load + and trigger Python plugins in response to events in your database. + + Use Processing engine plugins and triggers to run code and perform tasks for different database events. + + + To get started with the processing engine, see the [Processing engine and Python + plugins](/influxdb3/enterprise/processing-engine/) guide. + x-related: + - title: Processing engine and Python plugins + href: /influxdb3/enterprise/plugins/ + - name: Query data + description: Query data using SQL or InfluxQL + x-related: + - title: Use the InfluxDB v1 HTTP query API and InfluxQL to query data + href: /influxdb3/enterprise/query-data/execute-queries/influxdb-v1-api/ + - name: Quick start + description: > + 1. [Create an admin token](#section/Authentication) to authorize API requests. + + ```bash + curl -X POST "http://localhost:8181/api/v3/configure/token/admin" + ``` + 2. [Check the status](#section/Server-information) of the InfluxDB server. + + ```bash + curl "http://localhost:8181/health" \ + --header "Authorization: Bearer ADMIN_TOKEN" + ``` + + 3. [Write data](#operation/PostWriteLP) to InfluxDB. + + ```bash + curl "http://localhost:8181/api/v3/write_lp?db=sensors&precision=auto" + --header "Authorization: Bearer ADMIN_TOKEN" \ + --data-raw "home,room=Kitchen temp=72.0 + home,room=Living\ room temp=71.5" + ``` + + If all data is written, the response is `204 No Content`. + + 4. [Query data](#operation/GetExecuteQuerySQL) from InfluxDB. + + ```bash + curl -G "http://localhost:8181/api/v3/query_sql" \ + --header "Authorization: Bearer ADMIN_TOKEN" \ + --data-urlencode "db=sensors" \ + --data-urlencode "q=SELECT * FROM home WHERE room='Living room'" \ + --data-urlencode "format=jsonl" + ``` + + Output: + + ```jsonl + {"room":"Living room","temp":71.5,"time":"2025-02-25T20:19:34.984098"} + ``` + + For more information about using InfluxDB 3 Enterprise, see the [Get started](/influxdb3/enterprise/get-started/) + guide. + x-traitTag: true + - name: Server information + description: Retrieve server metrics, status, and version information + - name: Table + description: Manage table schemas and data + - name: Token + description: Manage tokens for authentication and authorization + - name: Write data + description: | + Write data to InfluxDB 3 using line protocol format. + + #### Timestamp precision across write APIs + + InfluxDB 3 provides multiple write endpoints for compatibility with different InfluxDB versions. + The following table compares timestamp precision support across v1, v2, and v3 write APIs: + + | Precision | v1 (`/write`) | v2 (`/api/v2/write`) | v3 (`/api/v3/write_lp`) | + |-----------|---------------|----------------------|-------------------------| + | **Auto detection** | ❌ No | ❌ No | ✅ `auto` (default) | + | **Seconds** | ✅ `s` | ✅ `s` | ✅ `second` | + | **Milliseconds** | ✅ `ms` | ✅ `ms` | ✅ `millisecond` | + | **Microseconds** | ✅ `u` or `µ` | ✅ `us` | ✅ `microsecond` | + | **Nanoseconds** | ✅ `ns` | ✅ `ns` | ✅ `nanosecond` | + | **Default** | Nanosecond | Nanosecond | **Auto** (guessed) | + + All timestamps are stored internally as nanoseconds. +paths: + /api/v1/health: + get: + operationId: GetHealthV1 + summary: Health check (v1) + description: | + Checks the status of the service. + + Returns `OK` if the service is running. This endpoint does not return version information. + Use the [`/ping`](#operation/GetPing) endpoint to retrieve version details. + + > **Note**: This endpoint requires authentication by default in InfluxDB 3 Enterprise. + responses: + "200": + description: Service is running. Returns `OK`. + content: + text/plain: + schema: + type: string + example: OK + "401": + description: Unauthorized. Authentication is required. + "500": + description: Service is unavailable. + tags: + - Server information + - Compatibility endpoints + /api/v2/write: + post: + operationId: PostV2Write + responses: + "204": + description: Success ("No Content"). All data in the batch is written and queryable. + headers: + cluster-uuid: + $ref: "#/components/headers/ClusterUUID" + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "413": + description: Request entity too large. + summary: Write line protocol (v2-compatible) + description: > + Writes line protocol to the specified database. + + + This endpoint provides backward compatibility for InfluxDB 2.x write workloads using tools such as InfluxDB 2.x + client libraries, the Telegraf `outputs.influxdb_v2` output plugin, or third-party tools. + + + Use this endpoint to send data in [line protocol](/influxdb3/enterprise/reference/syntax/line-protocol/) format + to InfluxDB. + + Use query parameters to specify options for writing data. + + + #### Related + + + - [Use compatibility APIs to write data](/influxdb3/enterprise/write-data/http-api/compatibility-apis/) + parameters: + - name: Content-Type + in: header + description: | + The content type of the request payload. + schema: + $ref: "#/components/schemas/LineProtocol" + required: false + - description: | + The compression applied to the line protocol in the request payload. + To send a gzip payload, pass `Content-Encoding: gzip` header. + in: header + name: Content-Encoding + schema: + default: identity + description: | + Content coding. + Use `gzip` for compressed data or `identity` for unmodified, uncompressed data. + enum: + - gzip + - identity + type: string + - description: | + The size of the entity-body, in bytes, sent to InfluxDB. + in: header + name: Content-Length + schema: + description: The length in decimal number of octets. + type: integer + - description: | + The content type that the client can understand. + Writes only return a response body if they fail (partially or completely)--for example, + due to a syntax problem or type mismatch. + in: header + name: Accept + schema: + default: application/json + description: Error content type. + enum: + - application/json + type: string + - name: bucket + in: query + required: true + schema: + type: string + description: |- + A database name. + InfluxDB creates the database if it doesn't already exist, and then + writes all points in the batch to the database. + + This parameter is named `bucket` for compatibility with InfluxDB v2 client libraries. + - name: accept_partial + in: query + required: false + schema: + $ref: "#/components/schemas/AcceptPartial" + - $ref: "#/components/parameters/compatibilityPrecisionParam" + requestBody: + $ref: "#/components/requestBodies/lineProtocolRequestBody" + tags: + - Compatibility endpoints + - Write data + /api/v3/configure/database: + delete: + operationId: DeleteConfigureDatabase + parameters: + - $ref: "#/components/parameters/db" + - name: data_only + in: query + required: false + schema: + type: boolean + default: false + description: | + Delete only data while preserving the database schema and all associated resources + (tokens, triggers, last value caches, distinct value caches, processing engine configurations). + When `false` (default), the entire database is deleted. + - name: remove_tables + in: query + required: false + schema: + type: boolean + default: false + description: | + Used with `data_only=true` to remove table resources (caches) while preserving + database-level resources (tokens, triggers, processing engine configurations). + Has no effect when `data_only=false`. + - name: hard_delete_at + in: query + required: false + schema: + type: string + format: date-time + description: |- + Schedule the database for hard deletion at the specified time. + If not provided, the database will be soft deleted. + Use ISO 8601 date-time format (for example, "2025-12-31T23:59:59Z"). + + #### Deleting a database cannot be undone + + Deleting a database is a destructive action. + Once a database is deleted, data stored in that database cannot be recovered. + + + Also accepts special string values: + - `now` — hard delete immediately + - `never` — soft delete only (default behavior) + - `default` — use the system default hard deletion time + responses: + "200": + description: Success. Database deleted. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Database not found. + summary: Delete a database + description: | + Soft deletes a database. + The database is scheduled for deletion and unavailable for querying. + Use the `hard_delete_at` parameter to schedule a hard deletion. + Use the `data_only` parameter to delete data while preserving the database schema and resources. + tags: + - Database + get: + operationId: GetConfigureDatabase + responses: + "200": + description: Success. The response body contains the list of databases. + content: + application/json: + schema: + $ref: "#/components/schemas/ShowDatabasesResponse" + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Database not found. + summary: List databases + description: Retrieves a list of databases. + parameters: + - $ref: "#/components/parameters/formatRequired" + - name: show_deleted + in: query + required: false + schema: + type: boolean + default: false + description: | + Include soft-deleted databases in the response. + By default, only active databases are returned. + tags: + - Database + post: + operationId: PostConfigureDatabase + responses: + "200": + description: Success. Database created. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "409": + description: Database already exists. + summary: Create a database + description: Creates a new database in the system. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateDatabaseRequest" + tags: + - Database + put: + operationId: update_database + responses: + "200": + description: Success. The database has been updated. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Database not found. + summary: Update a database + description: | + Updates database configuration, such as retention period. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/UpdateDatabaseRequest" + tags: + - Database + /api/v3/configure/database/retention_period: + delete: + operationId: DeleteDatabaseRetentionPeriod + summary: Remove database retention period + description: | + Removes the retention period from a database, setting it to infinite retention. + parameters: + - $ref: "#/components/parameters/db" + responses: + "204": + description: Success. The database retention period has been removed. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Database not found. + tags: + - Database + /api/v3/configure/distinct_cache: + delete: + operationId: DeleteConfigureDistinctCache + responses: + "200": + description: Success. The distinct cache has been deleted. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Cache not found. + summary: Delete distinct cache + description: Deletes a distinct cache. + parameters: + - $ref: "#/components/parameters/db" + - name: table + in: query + required: true + schema: + type: string + description: The name of the table containing the distinct cache. + - name: name + in: query + required: true + schema: + type: string + description: The name of the distinct cache to delete. + tags: + - Cache data + - Table + post: + operationId: PostConfigureDistinctCache + responses: + "201": + description: Success. The distinct cache has been created. + "400": + description: > + Bad request. + + + The server responds with status `400` if the request would overwrite an existing cache with a different + configuration. + "409": + description: Conflict. A distinct cache with this configuration already exists. + summary: Create distinct cache + description: Creates a distinct cache for a table. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/DistinctCacheCreateRequest" + tags: + - Cache data + - Table + /api/v3/configure/last_cache: + delete: + operationId: DeleteConfigureLastCache + responses: + "200": + description: Success. The last cache has been deleted. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Cache not found. + summary: Delete last cache + description: Deletes a last cache. + parameters: + - $ref: "#/components/parameters/db" + - name: table + in: query + required: true + schema: + type: string + description: The name of the table containing the last cache. + - name: name + in: query + required: true + schema: + type: string + description: The name of the last cache to delete. + tags: + - Cache data + - Table + post: + operationId: PostConfigureLastCache + responses: + "201": + description: Success. Last cache created. + "400": + description: Bad request. A cache with this name already exists or the request is malformed. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Cache not found. + summary: Create last cache + description: Creates a last cache for a table. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/LastCacheCreateRequest" + tags: + - Cache data + - Table + /api/v3/configure/plugin_environment/install_packages: + post: + operationId: PostInstallPluginPackages + summary: Install plugin packages + description: |- + Installs the specified Python packages into the processing engine plugin environment. + + This endpoint is synchronous and blocks until the packages are installed. + parameters: + - $ref: "#/components/parameters/ContentType" + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + packages: + type: array + items: + type: string + description: | + A list of Python package names to install. + Can include version specifiers (e.g., "scipy==1.9.0"). + example: + - influxdb3-python + - scipy + - pandas==1.5.0 + - requests + required: + - packages + example: + packages: + - influxdb3-python + - scipy + - pandas==1.5.0 + - requests + responses: + "200": + description: Success. The packages are installed. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + tags: + - Processing engine + /api/v3/configure/plugin_environment/install_requirements: + post: + operationId: PostInstallPluginRequirements + summary: Install plugin requirements + description: > + Installs requirements from a requirements file (also known as a "pip requirements file") into the processing + engine plugin environment. + + + This endpoint is synchronous and blocks until the requirements are installed. + + + ### Related + + + - [Processing engine and Python plugins](/influxdb3/enterprise/plugins/) + + - [Python requirements file format](https://pip.pypa.io/en/stable/reference/requirements-file-format/) + parameters: + - $ref: "#/components/parameters/ContentType" + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + requirements_location: + type: string + description: | + The path to the requirements file containing Python packages to install. + Can be a relative path (relative to the plugin directory) or an absolute path. + example: requirements.txt + required: + - requirements_location + example: + requirements_location: requirements.txt + responses: + "200": + description: Success. The requirements have been installed. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + tags: + - Processing engine + /api/v3/configure/processing_engine_trigger: + post: + operationId: PostConfigureProcessingEngineTrigger + summary: Create processing engine trigger + description: Creates a processing engine trigger with the specified plugin file and trigger specification. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/ProcessingEngineTriggerRequest" + examples: + schedule_cron: + summary: Schedule trigger using cron + description: > + In `"cron:CRON_EXPRESSION"`, `CRON_EXPRESSION` uses extended 6-field cron format. + + The cron expression `0 0 6 * * 1-5` means the trigger will run at 6:00 AM every weekday (Monday to + Friday). + value: + db: DATABASE_NAME + plugin_filename: schedule.py + trigger_name: schedule_cron_trigger + trigger_specification: cron:0 0 6 * * 1-5 + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + schedule_every: + summary: Schedule trigger using interval + description: | + In `"every:DURATION"`, `DURATION` specifies the interval between trigger executions. + The duration `1h` means the trigger will run every hour. + value: + db: mydb + plugin_filename: schedule.py + trigger_name: schedule_every_trigger + trigger_specification: every:1h + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + schedule_every_seconds: + summary: Schedule trigger using seconds interval + description: | + Example of scheduling a trigger to run every 30 seconds. + value: + db: mydb + plugin_filename: schedule.py + trigger_name: schedule_every_30s_trigger + trigger_specification: every:30s + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + schedule_every_minutes: + summary: Schedule trigger using minutes interval + description: | + Example of scheduling a trigger to run every 5 minutes. + value: + db: mydb + plugin_filename: schedule.py + trigger_name: schedule_every_5m_trigger + trigger_specification: every:5m + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + all_tables: + summary: All tables trigger example + description: | + Trigger that fires on write events to any table in the database. + value: + db: mydb + plugin_filename: all_tables.py + trigger_name: all_tables_trigger + trigger_specification: all_tables + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + table_specific: + summary: Table-specific trigger example + description: | + Trigger that fires on write events to a specific table. + value: + db: mydb + plugin_filename: table.py + trigger_name: table_trigger + trigger_specification: table:sensors + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + api_request: + summary: On-demand request trigger example + description: | + Creates an HTTP endpoint `/api/v3/engine/hello-world` for manual invocation. + value: + db: mydb + plugin_filename: request.py + trigger_name: hello_world_trigger + trigger_specification: request:hello-world + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + cron_friday_afternoon: + summary: Cron trigger for Friday afternoons + description: | + Example of a cron trigger that runs every Friday at 2:30 PM. + value: + db: reports + plugin_filename: weekly_report.py + trigger_name: friday_report_trigger + trigger_specification: cron:0 30 14 * * 5 + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + cron_monthly: + summary: Cron trigger for monthly execution + description: | + Example of a cron trigger that runs on the first day of every month at midnight. + value: + db: monthly_data + plugin_filename: monthly_cleanup.py + trigger_name: monthly_cleanup_trigger + trigger_specification: cron:0 0 0 1 * * + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + responses: + "200": + description: Success. Processing engine trigger created. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Trigger not found. + tags: + - Processing engine + delete: + operationId: DeleteConfigureProcessingEngineTrigger + summary: Delete processing engine trigger + description: Deletes a processing engine trigger. + parameters: + - $ref: "#/components/parameters/db" + - name: trigger_name + in: query + required: true + schema: + type: string + - name: force + in: query + required: false + schema: + type: boolean + default: false + description: | + Force deletion of the trigger even if it has active executions. + By default, deletion fails if the trigger is currently executing. + responses: + "200": + description: Success. The processing engine trigger has been deleted. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Trigger not found. + tags: + - Processing engine + /api/v3/configure/processing_engine_trigger/disable: + post: + operationId: PostDisableProcessingEngineTrigger + summary: Disable processing engine trigger + description: Disables a processing engine trigger. + parameters: + - name: db + in: query + required: true + schema: + type: string + description: The database name. + - name: trigger_name + in: query + required: true + schema: + type: string + description: The name of the trigger. + responses: + "200": + description: Success. The processing engine trigger has been disabled. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Trigger not found. + tags: + - Processing engine + /api/v3/configure/processing_engine_trigger/enable: + post: + operationId: PostEnableProcessingEngineTrigger + summary: Enable processing engine trigger + description: Enables a processing engine trigger. + parameters: + - name: db + in: query + required: true + schema: + type: string + description: The database name. + - name: trigger_name + in: query + required: true + schema: + type: string + description: The name of the trigger. + responses: + "200": + description: Success. The processing engine trigger is enabled. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Trigger not found. + tags: + - Processing engine + /api/v3/configure/table: + delete: + operationId: DeleteConfigureTable + parameters: + - $ref: "#/components/parameters/db" + - name: table + in: query + required: true + schema: + type: string + - name: data_only + in: query + required: false + schema: + type: boolean + default: false + description: | + Delete only data while preserving the table schema and all associated resources + (last value caches, distinct value caches). + When `false` (default), the entire table is deleted. + - name: hard_delete_at + in: query + required: false + schema: + type: string + format: date-time + description: |- + Schedule the table for hard deletion at the specified time. + If not provided, the table will be soft deleted. + Use ISO 8601 format (for example, "2025-12-31T23:59:59Z"). + + + Also accepts special string values: + - `now` — hard delete immediately + - `never` — soft delete only (default behavior) + - `default` — use the system default hard deletion time + responses: + "200": + description: Success (no content). The table has been deleted. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Table not found. + summary: Delete a table + description: | + Soft deletes a table. + The table is scheduled for deletion and unavailable for querying. + Use the `hard_delete_at` parameter to schedule a hard deletion. + Use the `data_only` parameter to delete data while preserving the table schema and resources. + + #### Deleting a table cannot be undone + + Deleting a table is a destructive action. + Once a table is deleted, data stored in that table cannot be recovered. + tags: + - Table + post: + operationId: PostConfigureTable + responses: + "200": + description: Success. The table has been created. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Database not found. + summary: Create a table + description: Creates a new table within a database. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateTableRequest" + tags: + - Table + put: + operationId: PatchConfigureTable + responses: + "200": + description: Success. The table has been updated. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Table not found. + summary: Update a table + description: | + Updates table configuration, such as retention period. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/UpdateTableRequest" + tags: + - Table + x-enterprise-only: true + /api/v3/configure/token: + delete: + operationId: DeleteToken + parameters: + - name: token_name + in: query + required: true + schema: + type: string + description: The name of the token to delete. + responses: + "200": + description: Success. The token has been deleted. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Token not found. + summary: Delete token + description: | + Deletes a token. + tags: + - Authentication + - Token + /api/v3/configure/token/admin: + post: + operationId: PostCreateAdminToken + responses: + "201": + description: | + Success. The admin token has been created. + The response body contains the token string and metadata. + content: + application/json: + schema: + $ref: "#/components/schemas/AdminTokenObject" + "401": + $ref: "#/components/responses/Unauthorized" + summary: Create admin token + description: | + Creates an admin token. + An admin token is a special type of token that has full access to all resources in the system. + tags: + - Authentication + - Token + /api/v3/configure/token/admin/regenerate: + post: + operationId: PostRegenerateAdminToken + summary: Regenerate admin token + description: | + Regenerates an admin token and revokes the previous token with the same name. + parameters: [] + responses: + "201": + description: Success. The admin token has been regenerated. + content: + application/json: + schema: + $ref: "#/components/schemas/AdminTokenObject" + "401": + $ref: "#/components/responses/Unauthorized" + tags: + - Authentication + - Token + /api/v3/configure/token/named_admin: + post: + operationId: PostCreateNamedAdminToken + responses: + "201": + description: | + Success. The named admin token has been created. + The response body contains the token string and metadata. + content: + application/json: + schema: + $ref: "#/components/schemas/AdminTokenObject" + "401": + $ref: "#/components/responses/Unauthorized" + "409": + description: A token with this name already exists. + summary: Create named admin token + description: | + Creates a named admin token. + A named admin token is a special type of admin token with a custom name for identification and management. + tags: + - Authentication + - Token + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + token_name: + type: string + description: The name for the admin token. + expiry_secs: + type: integer + description: Optional expiration time in seconds. If not provided, the token does not expire. + nullable: true + required: + - token_name + /api/v3/engine/{request_path}: + get: + operationId: GetProcessingEnginePluginRequest + responses: + "200": + description: Success. The plugin request has been executed. + "400": + description: Malformed request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Plugin not found. + "500": + description: Processing failure. + summary: On Request processing engine plugin request + description: > + Executes the On Request processing engine plugin specified in the trigger's `plugin_filename`. + + The request can include request headers, query string parameters, and a request body, which InfluxDB passes to + the plugin. + + + An On Request plugin implements the following signature: + + + ```python + + def process_request(influxdb3_local, query_parameters, request_headers, request_body, args=None) + + ``` + + + The response depends on the plugin implementation. + tags: + - Processing engine + post: + operationId: PostProcessingEnginePluginRequest + responses: + "200": + description: Success. The plugin request has been executed. + "400": + description: Malformed request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Plugin not found. + "500": + description: Processing failure. + summary: On Request processing engine plugin request + description: > + Executes the On Request processing engine plugin specified in the trigger's `plugin_filename`. + + The request can include request headers, query string parameters, and a request body, which InfluxDB passes to + the plugin. + + + An On Request plugin implements the following signature: + + + ```python + + def process_request(influxdb3_local, query_parameters, request_headers, request_body, args=None) + + ``` + + + The response depends on the plugin implementation. + parameters: + - $ref: "#/components/parameters/ContentType" + requestBody: + required: false + content: + application/json: + schema: + type: object + additionalProperties: true + tags: + - Processing engine + parameters: + - name: request_path + description: | + The path configured in the request trigger specification for the plugin. + + For example, if you define a trigger with the following: + + ```json + trigger_specification: "request:hello-world" + ``` + + then, the HTTP API exposes the following plugin endpoint: + + ``` + /api/v3/engine/hello-world + ``` + in: path + required: true + schema: + type: string + /api/v3/enterprise/configure/file_index: + post: + operationId: configure_file_index_create + summary: Create a file index + description: >- + Creates a file index for a database or table. + + + A file index improves query performance by indexing data files based on specified columns, enabling the query + engine to skip irrelevant files during query execution. + + + This endpoint is only available in InfluxDB 3 Enterprise. + x-enterprise-only: true + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/FileIndexCreateRequest" + responses: + "200": + description: Success. The file index has been created. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Database or table not found. + tags: + - Database + - Table + delete: + operationId: configure_file_index_delete + summary: Delete a file index + description: |- + Deletes a file index from a database or table. + + This endpoint is only available in InfluxDB 3 Enterprise. + x-enterprise-only: true + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/FileIndexDeleteRequest" + responses: + "200": + description: Success. The file index has been deleted. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Database, table, or file index not found. + tags: + - Database + - Table + /api/v3/enterprise/configure/node/stop: + post: + operationId: stop_node + summary: Mark a node as stopped + description: >- + Marks a node as stopped in the catalog, freeing up the licensed cores it was using for other nodes. + + + Use this endpoint after you have already stopped the physical instance (for example, using `kill` or stopping + the container). This endpoint does not shut down the running process — you must stop the instance first. + + + When the node is marked as stopped: + + 1. Licensed cores from the stopped node are freed for reuse + + 2. Other nodes in the cluster see the update after their catalog sync interval + + + This endpoint is only available in InfluxDB 3 Enterprise. + + + #### Related + + + - [influxdb3 stop node](/influxdb3/enterprise/reference/cli/influxdb3/stop/node/) + x-enterprise-only: true + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/StopNodeRequest" + responses: + "200": + description: Success. The node has been marked as stopped. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Node not found. + tags: + - Server information + /api/v3/enterprise/configure/table/retention_period: + post: + operationId: create_or_update_retention_period_for_table + summary: Set table retention period + description: >- + Sets or updates the retention period for a specific table. + + + Use this endpoint to control how long data in a table is retained independently of the database-level retention + period. + + + This endpoint is only available in InfluxDB 3 Enterprise. + + + #### Related + + + - [influxdb3 update table](/influxdb3/enterprise/reference/cli/influxdb3/update/table/) + x-enterprise-only: true + parameters: + - name: db + in: query + required: true + schema: + type: string + description: The database name. + - name: table + in: query + required: true + schema: + type: string + description: The table name. + - name: duration + in: query + required: true + schema: + type: string + description: The retention period as a human-readable duration (for example, "30d", "24h", "1y"). + responses: + "204": + description: Success. The table retention period has been set. + "400": + description: Bad request. Invalid duration format. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Database or table not found. + tags: + - Table + delete: + operationId: delete_retention_period_for_table + summary: Clear table retention period + description: >- + Removes the retention period from a specific table, reverting to the database-level retention period (or + infinite retention if no database-level retention is set). + + + This endpoint is only available in InfluxDB 3 Enterprise. + + + #### Related + + + - [influxdb3 update table](/influxdb3/enterprise/reference/cli/influxdb3/update/table/) + x-enterprise-only: true + parameters: + - name: db + in: query + required: true + schema: + type: string + description: The database name. + - name: table + in: query + required: true + schema: + type: string + description: The table name. + responses: + "204": + description: Success. The table retention period has been cleared. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Database or table not found. + tags: + - Table + /api/v3/enterprise/configure/token: + post: + operationId: PostCreateResourceToken + summary: Create a resource token + description: | + Creates a resource (fine-grained permissions) token. + A resource token is a token that has access to specific resources in the system. + + This endpoint is only available in InfluxDB 3 Enterprise. + responses: + "201": + description: | + Success. The resource token has been created. + The response body contains the token string and metadata. + content: + application/json: + schema: + $ref: "#/components/schemas/ResourceTokenObject" + "401": + $ref: "#/components/responses/Unauthorized" + tags: + - Authentication + - Token + x-enterprise-only: true + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateTokenWithPermissionsRequest" + /api/v3/plugin_test/schedule: + post: + operationId: PostTestSchedulingPlugin + responses: + "200": + description: Success. The plugin test has been executed. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Plugin not enabled. + summary: Test scheduling plugin + description: Executes a test of a scheduling plugin. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/SchedulePluginTestRequest" + tags: + - Processing engine + /api/v3/plugin_test/wal: + post: + operationId: PostTestWALPlugin + responses: + "200": + description: Success. The plugin test has been executed. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Plugin not enabled. + summary: Test WAL plugin + description: Executes a test of a write-ahead logging (WAL) plugin. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/WALPluginTestRequest" + tags: + - Processing engine + /api/v3/plugins/directory: + put: + operationId: PutPluginDirectory + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/PluginDirectoryRequest" + responses: + "200": + description: Success. The plugin directory has been updated. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Forbidden. Admin token required. + "500": + description: Plugin not found. The `plugin_name` does not match any registered trigger. + summary: Update a multi-file plugin directory + description: | + Replaces all files in a multi-file plugin directory. The + `plugin_name` must match a registered trigger name. Each entry in + the `files` array specifies a `relative_path` and `content`—the + server writes them into the trigger's plugin directory. + + Use this endpoint to update multi-file plugins (directories with + `__init__.py` and supporting modules). For single-file plugins, + use `PUT /api/v3/plugins/files` instead. + tags: + - Processing engine + x-security-note: Requires an admin token + /api/v3/plugins/files: + post: + operationId: create_plugin_file + summary: Create a plugin file + description: | + Creates a single plugin file in the plugin directory. Writes the + `content` to a file named after `plugin_name`. Does not require an + existing trigger—use this to upload plugin files before creating + triggers that reference them. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/PluginFileRequest" + responses: + "200": + description: Success. The plugin file has been created. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Forbidden. Admin token required. + tags: + - Processing engine + x-security-note: Requires an admin token + put: + operationId: PutPluginFile + summary: Update a plugin file + description: | + Updates a single plugin file for an existing trigger. The + `plugin_name` must match a registered trigger name—the server + resolves the trigger's `plugin_filename` and overwrites that file + with the provided `content`. + + To upload a new plugin file before creating a trigger, use + `POST /api/v3/plugins/files` instead. To update a multi-file + plugin directory, use `PUT /api/v3/plugins/directory`. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/PluginFileRequest" + responses: + "200": + description: Success. The plugin file has been updated. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Forbidden. Admin token required. + "500": + description: Plugin not found. The `plugin_name` does not match any registered trigger. + tags: + - Processing engine + x-security-note: Requires an admin token + /api/v3/query_influxql: + get: + operationId: GetExecuteInfluxQLQuery + responses: + "200": + description: Success. The response body contains query results. + content: + application/json: + schema: + $ref: "#/components/schemas/QueryResponse" + text/csv: + schema: + type: string + application/vnd.apache.parquet: + schema: + type: string + application/jsonl: + schema: + type: string + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "404": + description: Database not found. + "405": + description: Method not allowed. + "422": + description: Unprocessable entity. + summary: Execute InfluxQL query + description: Executes an InfluxQL query to retrieve data from the specified database. + parameters: + - $ref: "#/components/parameters/dbQueryParam" + - name: q + in: query + required: true + schema: + type: string + - name: format + in: query + required: false + schema: + type: string + - $ref: "#/components/parameters/AcceptQueryHeader" + - name: params + in: query + required: false + schema: + type: string + description: JSON-encoded query parameters. Use this to pass bind parameters to parameterized queries. + description: JSON-encoded query parameters for parameterized queries. + tags: + - Query data + post: + operationId: PostExecuteQueryInfluxQL + responses: + "200": + description: Success. The response body contains query results. + content: + application/json: + schema: + $ref: "#/components/schemas/QueryResponse" + text/csv: + schema: + type: string + application/vnd.apache.parquet: + schema: + type: string + application/jsonl: + schema: + type: string + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "404": + description: Database not found. + "405": + description: Method not allowed. + "422": + description: Unprocessable entity. + summary: Execute InfluxQL query + description: Executes an InfluxQL query to retrieve data from the specified database. + parameters: + - $ref: "#/components/parameters/AcceptQueryHeader" + - $ref: "#/components/parameters/ContentType" + requestBody: + $ref: "#/components/requestBodies/queryRequestBody" + tags: + - Query data + /api/v3/query_sql: + get: + operationId: GetExecuteQuerySQL + responses: + "200": + description: Success. The response body contains query results. + content: + application/json: + schema: + $ref: "#/components/schemas/QueryResponse" + example: + results: + - series: + - name: mytable + columns: + - time + - value + values: + - - "2024-02-02T12:00:00Z" + - 42 + text/csv: + schema: + type: string + application/vnd.apache.parquet: + schema: + type: string + application/jsonl: + schema: + type: string + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "404": + description: Database not found. + "405": + description: Method not allowed. + "422": + description: Unprocessable entity. + summary: Execute SQL query + description: Executes an SQL query to retrieve data from the specified database. + parameters: + - $ref: "#/components/parameters/db" + - $ref: "#/components/parameters/querySqlParam" + - $ref: "#/components/parameters/format" + - $ref: "#/components/parameters/AcceptQueryHeader" + - $ref: "#/components/parameters/ContentType" + - name: params + in: query + required: false + schema: + type: string + description: JSON-encoded query parameters. Use this to pass bind parameters to parameterized queries. + description: JSON-encoded query parameters for parameterized queries. + tags: + - Query data + post: + operationId: PostExecuteQuerySQL + responses: + "200": + description: Success. The response body contains query results. + content: + application/json: + schema: + $ref: "#/components/schemas/QueryResponse" + text/csv: + schema: + type: string + application/vnd.apache.parquet: + schema: + type: string + application/jsonl: + schema: + type: string + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "404": + description: Database not found. + "405": + description: Method not allowed. + "422": + description: Unprocessable entity. + summary: Execute SQL query + description: Executes an SQL query to retrieve data from the specified database. + parameters: + - $ref: "#/components/parameters/AcceptQueryHeader" + - $ref: "#/components/parameters/ContentType" + requestBody: + $ref: "#/components/requestBodies/queryRequestBody" + tags: + - Query data + /api/v3/write_lp: + post: + operationId: PostWriteLP + parameters: + - $ref: "#/components/parameters/dbWriteParam" + - $ref: "#/components/parameters/accept_partial" + - $ref: "#/components/parameters/precisionParam" + - name: no_sync + in: query + schema: + $ref: "#/components/schemas/NoSync" + - name: Content-Type + in: header + description: | + The content type of the request payload. + schema: + $ref: "#/components/schemas/LineProtocol" + required: false + - name: Accept + in: header + description: | + The content type that the client can understand. + Writes only return a response body if they fail (partially or completely)--for example, + due to a syntax problem or type mismatch. + schema: + type: string + default: application/json + enum: + - application/json + required: false + - $ref: "#/components/parameters/ContentEncoding" + - $ref: "#/components/parameters/ContentLength" + responses: + "204": + description: Success ("No Content"). All data in the batch is written and queryable. + headers: + cluster-uuid: + $ref: "#/components/headers/ClusterUUID" + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "413": + description: Request entity too large. + "422": + description: Unprocessable entity. + summary: Write line protocol + description: > + Writes line protocol to the specified database. + + + This is the native InfluxDB 3 Enterprise write endpoint that provides enhanced control + + over write behavior with advanced parameters for high-performance and fault-tolerant operations. + + + Use this endpoint to send data in [line protocol](/influxdb3/enterprise/reference/syntax/line-protocol/) format + to InfluxDB. + + Use query parameters to specify options for writing data. + + + #### Features + + + - **Partial writes**: Use `accept_partial=true` to allow partial success when some lines in a batch fail + + - **Asynchronous writes**: Use `no_sync=true` to skip waiting for WAL synchronization, allowing faster response + times but sacrificing durability guarantees + + - **Flexible precision**: Automatic timestamp precision detection with `precision=auto` (default) + + + #### Auto precision detection + + + When you use `precision=auto` or omit the precision parameter, InfluxDB 3 automatically detects + + the timestamp precision based on the magnitude of the timestamp value: + + + - Timestamps < 5e9 → Second precision (multiplied by 1,000,000,000 to convert to nanoseconds) + + - Timestamps < 5e12 → Millisecond precision (multiplied by 1,000,000) + + - Timestamps < 5e15 → Microsecond precision (multiplied by 1,000) + + - Larger timestamps → Nanosecond precision (no conversion needed) + + + #### Related + + + - [Use the InfluxDB v3 write_lp API to write data](/influxdb3/enterprise/write-data/http-api/v3-write-lp/) + requestBody: + $ref: "#/components/requestBodies/lineProtocolRequestBody" + tags: + - Write data + x-codeSamples: + - label: cURL - Basic write + lang: Shell + source: | + curl --request POST "http://localhost:8181/api/v3/write_lp?db=sensors" \ + --header "Authorization: Bearer DATABASE_TOKEN" \ + --header "Content-Type: text/plain" \ + --data-raw "cpu,host=server01 usage=85.2 1638360000000000000" + - label: cURL - Write with millisecond precision + lang: Shell + source: | + curl --request POST "http://localhost:8181/api/v3/write_lp?db=sensors&precision=ms" \ + --header "Authorization: Bearer DATABASE_TOKEN" \ + --header "Content-Type: text/plain" \ + --data-raw "cpu,host=server01 usage=85.2 1638360000000" + - label: cURL - Asynchronous write with partial acceptance + lang: Shell + source: > + curl --request POST + "http://localhost:8181/api/v3/write_lp?db=sensors&accept_partial=true&no_sync=true&precision=auto" \ + --header "Authorization: Bearer DATABASE_TOKEN" \ + --header "Content-Type: text/plain" \ + --data-raw "cpu,host=server01 usage=85.2 + memory,host=server01 used=4096" + - label: cURL - Multiple measurements with tags + lang: Shell + source: | + curl --request POST "http://localhost:8181/api/v3/write_lp?db=sensors&precision=ns" \ + --header "Authorization: Bearer DATABASE_TOKEN" \ + --header "Content-Type: text/plain" \ + --data-raw "cpu,host=server01,region=us-west usage=85.2,load=0.75 1638360000000000000 + memory,host=server01,region=us-west used=4096,free=12288 1638360000000000000 + disk,host=server01,region=us-west,device=/dev/sda1 used=50.5,free=49.5 1638360000000000000" + /health: + get: + operationId: GetHealth + responses: + "200": + description: Service is running. Returns `OK`. + content: + text/plain: + schema: + type: string + example: OK + "401": + description: Unauthorized. Authentication is required. + "500": + description: Service is unavailable. + summary: Health check + description: | + Checks the status of the service. + + Returns `OK` if the service is running. This endpoint does not return version information. + Use the [`/ping`](#operation/GetPing) endpoint to retrieve version details. + + > **Note**: This endpoint requires authentication by default in InfluxDB 3 Enterprise. + tags: + - Server information + /metrics: + get: + operationId: GetMetrics + responses: + "200": + description: Success + summary: Metrics + description: Retrieves Prometheus-compatible server metrics. + tags: + - Server information + /ping: + get: + operationId: GetPing + responses: + "200": + description: Success. The response body contains server information. + headers: + x-influxdb-version: + description: The InfluxDB version number (for example, `3.8.0`). + schema: + type: string + example: 3.8.0 + x-influxdb-build: + description: The InfluxDB build type (`Core` or `Enterprise`). + schema: + type: string + example: Enterprise + content: + application/json: + schema: + type: object + properties: + version: + type: string + description: The InfluxDB version number. + example: 3.8.0 + revision: + type: string + description: The git revision hash for the build. + example: 83b589b883 + process_id: + type: string + description: A unique identifier for the server process. + example: b756d9e0-cecd-4f72-b6d0-19e2d4f8cbb7 + "401": + description: Unauthorized. Authentication is required. + "404": + description: | + Not Found. Returned for HEAD requests. + Use a GET request to retrieve version information. + x-client-method: ping + summary: Ping the server + description: | + Returns version information for the server. + + **Important**: Use a GET request. HEAD requests return `404 Not Found`. + + The response includes version information in both headers and the JSON body: + + - **Headers**: `x-influxdb-version` and `x-influxdb-build` + - **Body**: JSON object with `version`, `revision`, and `process_id` + + > **Note**: This endpoint requires authentication by default in InfluxDB 3 Enterprise. + tags: + - Server information + post: + operationId: ping + responses: + "200": + description: Success. The response body contains server information. + headers: + x-influxdb-version: + description: The InfluxDB version number (for example, `3.8.0`). + schema: + type: string + example: 3.8.0 + x-influxdb-build: + description: The InfluxDB build type (`Core` or `Enterprise`). + schema: + type: string + example: Enterprise + content: + application/json: + schema: + type: object + properties: + version: + type: string + description: The InfluxDB version number. + example: 3.8.0 + revision: + type: string + description: The git revision hash for the build. + example: 83b589b883 + process_id: + type: string + description: A unique identifier for the server process. + example: b756d9e0-cecd-4f72-b6d0-19e2d4f8cbb7 + "401": + description: Unauthorized. Authentication is required. + "404": + description: | + Not Found. Returned for HEAD requests. + Use a GET request to retrieve version information. + summary: Ping the server + description: Returns version information for the server. Accepts POST in addition to GET. + tags: + - Server information + /query: + get: + operationId: GetV1ExecuteQuery + responses: + "200": + description: | + Success. The response body contains query results. + content: + application/json: + schema: + $ref: "#/components/schemas/QueryResponse" + application/csv: + schema: + type: string + headers: + Content-Type: + description: > + The content type of the response. + + Default is `application/json`. + + + If the `Accept` request header is `application/csv` or `text/csv`, the `Content-type` response header is + `application/csv` + + and the response is formatted as CSV. + schema: + type: string + default: application/json + enum: + - application/json + - application/csv + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "404": + description: Database not found. + "405": + description: Method not allowed. + "422": + description: Unprocessable entity. + summary: Execute InfluxQL query (v1-compatible) + description: > + Executes an InfluxQL query to retrieve data from the specified database. + + + This endpoint is compatible with InfluxDB 1.x client libraries and third-party integrations such as Grafana. + + Use query parameters to specify the database and the InfluxQL query. + + + #### Related + + + - [Use the InfluxDB v1 HTTP query API and InfluxQL to query + data](/influxdb3/enterprise/query-data/execute-queries/influxdb-v1-api/) + parameters: + - name: Accept + in: header + schema: + type: string + default: application/json + enum: + - application/json + - application/csv + - text/csv + required: false + description: > + The content type that the client can understand. + + + If `text/csv` is specified, the `Content-type` response header is `application/csv` and the response is + formatted as CSV. + + + Returns an error if the format is invalid or non-UTF8. + - in: query + name: chunked + description: | + If true, the response is divided into chunks of size `chunk_size`. + schema: + type: boolean + default: false + - in: query + name: chunk_size + description: | + The number of records that will go into a chunk. + This parameter is only used if `chunked=true`. + schema: + type: integer + default: 10000 + - in: query + name: db + description: The database to query. If not provided, the InfluxQL query string must specify the database. + schema: + type: string + format: InfluxQL + - in: query + name: pretty + description: | + If true, the JSON response is formatted in a human-readable format. + schema: + type: boolean + default: false + - in: query + name: q + description: The InfluxQL query string. + required: true + schema: + type: string + - name: epoch + description: > + Formats timestamps as [unix (epoch) timestamps](/influxdb3/enterprise/reference/glossary/#unix-timestamp) + with the specified precision + + instead of [RFC3339 timestamps](/influxdb3/enterprise/reference/glossary/#rfc3339-timestamp) with nanosecond + precision. + in: query + schema: + $ref: "#/components/schemas/EpochCompatibility" + - $ref: "#/components/parameters/v1UsernameParam" + - $ref: "#/components/parameters/v1PasswordParam" + - name: rp + in: query + required: false + schema: + type: string + description: | + Retention policy name. Honored but discouraged. InfluxDB 3 doesn't use retention policies. + - name: Authorization + in: header + required: false + schema: + type: string + description: | + Authorization header for token-based authentication. + Supported schemes: + - `Bearer AUTH_TOKEN` - OAuth bearer token scheme + - `Token AUTH_TOKEN` - InfluxDB v2 token scheme + - `Basic ` - Basic authentication (username is ignored) + tags: + - Query data + - Compatibility endpoints + post: + operationId: PostExecuteV1Query + responses: + "200": + description: | + Success. The response body contains query results. + content: + application/json: + schema: + $ref: "#/components/schemas/QueryResponse" + application/csv: + schema: + type: string + headers: + Content-Type: + description: > + The content type of the response. + + Default is `application/json`. + + + If the `Accept` request header is `application/csv` or `text/csv`, the `Content-type` response header is + `application/csv` + + and the response is formatted as CSV. + schema: + type: string + default: application/json + enum: + - application/json + - application/csv + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "404": + description: Database not found. + "405": + description: Method not allowed. + "422": + description: Unprocessable entity. + summary: Execute InfluxQL query (v1-compatible) + description: > + Executes an InfluxQL query to retrieve data from the specified database. + + + #### Related + + + - [Use the InfluxDB v1 HTTP query API and InfluxQL to query + data](/influxdb3/enterprise/query-data/execute-queries/influxdb-v1-api/) + parameters: + - name: Accept + in: header + schema: + type: string + default: application/json + enum: + - application/json + - application/csv + - text/csv + required: false + description: > + The content type that the client can understand. + + + If `text/csv` is specified, the `Content-type` response header is `application/csv` and the response is + formatted as CSV. + + + Returns an error if the format is invalid or non-UTF8. + requestBody: + content: + application/json: + schema: + type: object + properties: + db: + type: string + description: The database to query. If not provided, the InfluxQL query string must specify the database. + q: + description: The InfluxQL query string. + type: string + chunked: + description: | + If true, the response is divided into chunks of size `chunk_size`. + type: boolean + chunk_size: + description: | + The number of records that will go into a chunk. + This parameter is only used if `chunked=true`. + type: integer + default: 10000 + epoch: + description: > + A unix timestamp precision. + + + - `h` for hours + + - `m` for minutes + + - `s` for seconds + + - `ms` for milliseconds + + - `u` or `µ` for microseconds + + - `ns` for nanoseconds + + + Formats timestamps as [unix (epoch) + timestamps](/influxdb3/enterprise/reference/glossary/#unix-timestamp) with the specified precision + + instead of [RFC3339 timestamps](/influxdb3/enterprise/reference/glossary/#rfc3339-timestamp) with + nanosecond precision. + enum: + - ns + - u + - µ + - ms + - s + - m + - h + type: string + pretty: + description: | + If true, the JSON response is formatted in a human-readable format. + type: boolean + required: + - q + application/x-www-form-urlencoded: + schema: + type: object + properties: + db: + type: string + description: The database to query. If not provided, the InfluxQL query string must specify the database. + q: + description: The InfluxQL query string. + type: string + chunked: + description: | + If true, the response is divided into chunks of size `chunk_size`. + type: boolean + chunk_size: + description: | + The number of records that will go into a chunk. + This parameter is only used if `chunked=true`. + type: integer + default: 10000 + epoch: + description: > + A unix timestamp precision. + + + - `h` for hours + + - `m` for minutes + + - `s` for seconds + + - `ms` for milliseconds + + - `u` or `µ` for microseconds + + - `ns` for nanoseconds + + + Formats timestamps as [unix (epoch) + timestamps](/influxdb3/enterprise/reference/glossary/#unix-timestamp) with the specified precision + + instead of [RFC3339 timestamps](/influxdb3/enterprise/reference/glossary/#rfc3339-timestamp) with + nanosecond precision. + enum: + - ns + - u + - µ + - ms + - s + - m + - h + type: string + pretty: + description: | + If true, the JSON response is formatted in a human-readable format. + type: boolean + required: + - q + application/vnd.influxql: + schema: + type: string + description: InfluxQL query string sent as the request body. + tags: + - Query data + - Compatibility endpoints + /write: + post: + operationId: PostV1Write + responses: + "204": + description: Success ("No Content"). All data in the batch is written and queryable. + headers: + cluster-uuid: + $ref: "#/components/headers/ClusterUUID" + "400": + description: | + Bad request. Some (a _partial write_) or all of the data from the batch was rejected and not written. + If a partial write occurred, then some points from the batch are written and queryable. + + The response body: + - indicates if a partial write occurred or all data was rejected. + - contains details about the [rejected points](/influxdb3/enterprise/write-data/troubleshoot/#troubleshoot-rejected-points), up to 100 points. + content: + application/json: + examples: + rejectedAllPoints: + summary: Rejected all points in the batch + value: | + { + "error": "write of line protocol failed", + "data": [ + { + "original_line": "dquote> home,room=Kitchen temp=hi", + "line_number": 2, + "error_message": "No fields were provided" + } + ] + } + partialWriteErrorWithRejectedPoints: + summary: Partial write rejected some points in the batch + value: | + { + "error": "partial write of line protocol occurred", + "data": [ + { + "original_line": "dquote> home,room=Kitchen temp=hi", + "line_number": 2, + "error_message": "No fields were provided" + } + ] + } + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "413": + description: Request entity too large. + summary: Write line protocol (v1-compatible) + description: > + Writes line protocol to the specified database. + + + This endpoint provides backward compatibility for InfluxDB 1.x write workloads using tools such as InfluxDB 1.x + client libraries, the Telegraf `outputs.influxdb` output plugin, or third-party tools. + + + Use this endpoint to send data in [line + protocol](https://docs.influxdata.com/influxdb3/enterprise/reference/syntax/line-protocol/) format to InfluxDB. + + Use query parameters to specify options for writing data. + + + #### Related + + + - [Use compatibility APIs to write data](/influxdb3/enterprise/write-data/http-api/compatibility-apis/) + parameters: + - $ref: "#/components/parameters/dbWriteParam" + - $ref: "#/components/parameters/compatibilityPrecisionParam" + - $ref: "#/components/parameters/v1UsernameParam" + - $ref: "#/components/parameters/v1PasswordParam" + - name: rp + in: query + required: false + schema: + type: string + description: | + Retention policy name. Honored but discouraged. InfluxDB 3 doesn't use retention policies. + - name: consistency + in: query + required: false + schema: + type: string + description: | + Write consistency level. Ignored by InfluxDB 3. Provided for compatibility with InfluxDB 1.x clients. + - name: Authorization + in: header + required: false + schema: + type: string + description: | + Authorization header for token-based authentication. + Supported schemes: + - `Bearer AUTH_TOKEN` - OAuth bearer token scheme + - `Token AUTH_TOKEN` - InfluxDB v2 token scheme + - `Basic ` - Basic authentication (username is ignored) + - name: Content-Type + in: header + description: | + The content type of the request payload. + schema: + $ref: "#/components/schemas/LineProtocol" + required: false + - name: Accept + in: header + description: | + The content type that the client can understand. + Writes only return a response body if they fail (partially or completely)--for example, + due to a syntax problem or type mismatch. + schema: + type: string + default: application/json + enum: + - application/json + required: false + - $ref: "#/components/parameters/ContentEncoding" + - $ref: "#/components/parameters/ContentLength" + requestBody: + $ref: "#/components/requestBodies/lineProtocolRequestBody" + tags: + - Compatibility endpoints + - Write data +components: + parameters: + AcceptQueryHeader: + name: Accept + in: header + schema: + type: string + default: application/json + enum: + - application/json + - application/jsonl + - application/vnd.apache.parquet + - text/csv + required: false + description: | + The content type that the client can understand. + ContentEncoding: + name: Content-Encoding + in: header + description: | + The compression applied to the line protocol in the request payload. + To send a gzip payload, pass `Content-Encoding: gzip` header. + schema: + $ref: "#/components/schemas/ContentEncoding" + required: false + ContentLength: + name: Content-Length + in: header + description: | + The size of the entity-body, in bytes, sent to InfluxDB. + schema: + $ref: "#/components/schemas/ContentLength" + ContentType: + name: Content-Type + description: | + The format of the data in the request body. + in: header + schema: + type: string + enum: + - application/json + required: false + db: + name: db + in: query + required: true + schema: + type: string + description: | + The name of the database. + dbWriteParam: + name: db + in: query + required: true + schema: + type: string + description: | + The name of the database. + InfluxDB creates the database if it doesn't already exist, and then + writes all points in the batch to the database. + dbQueryParam: + name: db + in: query + required: false + schema: + type: string + description: | + The name of the database. + + If you provide a query that specifies the database, you can omit the 'db' parameter from your request. + accept_partial: + name: accept_partial + in: query + required: false + schema: + $ref: "#/components/schemas/AcceptPartial" + compatibilityPrecisionParam: + name: precision + in: query + required: false + schema: + $ref: "#/components/schemas/PrecisionWriteCompatibility" + description: The precision for unix timestamps in the line protocol batch. + precisionParam: + name: precision + in: query + required: false + schema: + $ref: "#/components/schemas/PrecisionWrite" + description: The precision for unix timestamps in the line protocol batch. + querySqlParam: + name: q + in: query + required: true + schema: + type: string + format: SQL + description: | + The query to execute. + format: + name: format + in: query + required: false + schema: + $ref: "#/components/schemas/Format" + formatRequired: + name: format + in: query + required: true + schema: + $ref: "#/components/schemas/Format" + v1UsernameParam: + name: u + in: query + required: false + schema: + type: string + description: > + Username for v1 compatibility authentication. + + When using Basic authentication or query string authentication, InfluxDB 3 ignores this parameter but allows any + arbitrary string for compatibility with InfluxDB 1.x clients. + v1PasswordParam: + name: p + in: query + required: false + schema: + type: string + description: | + Password for v1 compatibility authentication. + For query string authentication, pass a database token with write permissions as this parameter. + InfluxDB 3 checks that the `p` value is an authorized token. + requestBodies: + lineProtocolRequestBody: + required: true + content: + text/plain: + schema: + type: string + examples: + line: + summary: Example line protocol + value: measurement,tag=value field=1 1234567890 + multiline: + summary: Example line protocol with UTF-8 characters + value: | + measurement,tag=value field=1 1234567890 + measurement,tag=value field=2 1234567900 + measurement,tag=value field=3 1234568000 + queryRequestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/QueryRequestObject" + schemas: + AdminTokenObject: + type: object + properties: + id: + type: integer + name: + type: string + token: + type: string + hash: + type: string + created_at: + type: string + format: date-time + expiry: + format: date-time + example: + id: 0 + name: _admin + token: apiv3_00xx0Xx0xx00XX0x0 + hash: 00xx0Xx0xx00XX0x0 + created_at: "2025-04-18T14:02:45.331Z" + expiry: null + ResourceTokenObject: + type: object + properties: + token_name: + type: string + permissions: + type: array + items: + type: object + properties: + resource_type: + type: string + enum: + - system + - db + actions: + type: array + items: + type: string + enum: + - read + - write + resource_names: + type: array + items: + type: string + description: List of resource names. Use "*" for all resources. + expiry_secs: + type: integer + description: The expiration time in seconds. + example: + token_name: All system information + permissions: + - resource_type: system + actions: + - read + resource_names: + - "*" + expiry_secs: 300000 + ContentEncoding: + type: string + enum: + - gzip + - identity + description: > + Content coding. + + Use `gzip` for compressed data or `identity` for unmodified, uncompressed data. + + + #### Multi-member gzip support + + + InfluxDB 3 supports multi-member gzip payloads (concatenated gzip files per [RFC + 1952](https://www.rfc-editor.org/rfc/rfc1952)). + + This allows you to: + + - Concatenate multiple gzip files and send them in a single request + + - Maintain compatibility with InfluxDB v1 and v2 write endpoints + + - Simplify batch operations using standard compression tools + default: identity + LineProtocol: + type: string + enum: + - text/plain + - text/plain; charset=utf-8 + description: | + `text/plain` is the content type for line protocol. `UTF-8` is the default character set. + default: text/plain; charset=utf-8 + ContentLength: + type: integer + description: The length in decimal number of octets. + Database: + type: string + AcceptPartial: + type: boolean + default: true + description: Accept partial writes. + Format: + type: string + enum: + - json + - csv + - parquet + - json_lines + - jsonl + - pretty + description: |- + The format of data in the response body. + `json_lines` is the canonical name; `jsonl` is accepted as an alias. + NoSync: + type: boolean + default: false + description: | + Acknowledges a successful write without waiting for WAL persistence. + + #### Related + + - [Use the HTTP API and client libraries to write data](/influxdb3/enterprise/write-data/api-client-libraries/) + - [Data durability](/influxdb3/enterprise/reference/internals/durability/) + PrecisionWriteCompatibility: + enum: + - ms + - s + - us + - u + - ns + - "n" + type: string + description: |- + The precision for unix timestamps in the line protocol batch. + Use `ms` for milliseconds, `s` for seconds, `us` or `u` for microseconds, or `ns` or `n` for nanoseconds. + Optional — defaults to nanosecond precision if omitted. + PrecisionWrite: + enum: + - auto + - nanosecond + - microsecond + - millisecond + - second + type: string + description: | + The precision for unix timestamps in the line protocol batch. + + Supported values: + - `auto` (default): Automatically detects precision based on timestamp magnitude + - `nanosecond`: Nanoseconds + - `microsecond`: Microseconds + - `millisecond`: Milliseconds + - `second`: Seconds + QueryRequestObject: + type: object + properties: + db: + description: | + The name of the database to query. + Required if the query (`q`) doesn't specify the database. + type: string + q: + description: The query to execute. + type: string + format: + description: The format of the query results. + type: string + enum: + - json + - csv + - parquet + - json_lines + - jsonl + - pretty + params: + description: | + Additional parameters for the query. + Use this field to pass query parameters. + type: object + additionalProperties: true + required: + - db + - q + example: + db: mydb + q: SELECT * FROM mytable + format: json + params: {} + CreateDatabaseRequest: + type: object + properties: + db: + type: string + pattern: ^[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]$|^[a-zA-Z0-9]$ + description: |- + The database name. Database names cannot contain underscores (_). + Names must start and end with alphanumeric characters and can contain hyphens (-) in the middle. + retention_period: + type: string + description: |- + The retention period for the database. Specifies how long data should be retained. + Use duration format (for example, "1d", "1h", "30m", "7d"). + example: 7d + required: + - db + CreateTableRequest: + type: object + properties: + db: + type: string + table: + type: string + tags: + type: array + items: + type: string + fields: + type: array + items: + type: object + properties: + name: + type: string + type: + type: string + enum: + - utf8 + - int64 + - uint64 + - float64 + - bool + required: + - name + - type + retention_period: + type: string + description: |- + The retention period for the table. Specifies how long data in this table should be retained. + Use duration format (for example, "1d", "1h", "30m", "7d"). + example: 30d + required: + - db + - table + - tags + - fields + DistinctCacheCreateRequest: + type: object + properties: + db: + type: string + table: + type: string + node_spec: + $ref: "#/components/schemas/ApiNodeSpec" + name: + type: string + description: Optional cache name. + columns: + type: array + items: + type: string + max_cardinality: + type: integer + description: Optional maximum cardinality. + max_age: + type: integer + description: Optional maximum age in seconds. + required: + - db + - table + - columns + example: + db: mydb + table: mytable + columns: + - tag1 + - tag2 + max_cardinality: 1000 + max_age: 3600 + LastCacheCreateRequest: + type: object + properties: + db: + type: string + table: + type: string + node_spec: + $ref: "#/components/schemas/ApiNodeSpec" + name: + type: string + description: Optional cache name. + key_columns: + type: array + items: + type: string + description: Optional list of key columns. + value_columns: + type: array + items: + type: string + description: Optional list of value columns. + count: + type: integer + description: Optional count. + ttl: + type: integer + description: Optional time-to-live in seconds. + required: + - db + - table + example: + db: mydb + table: mytable + key_columns: + - tag1 + value_columns: + - field1 + count: 100 + ttl: 3600 + ProcessingEngineTriggerRequest: + type: object + properties: + db: + type: string + plugin_filename: + type: string + description: | + The path and filename of the plugin to execute--for example, + `schedule.py` or `endpoints/report.py`. + The path can be absolute or relative to the `--plugins-dir` directory configured when starting InfluxDB 3. + + The plugin file must implement the trigger interface associated with the trigger's specification. + node_spec: + $ref: "#/components/schemas/ApiNodeSpec" + trigger_name: + type: string + trigger_settings: + description: | + Configuration for trigger error handling and execution behavior. + allOf: + - $ref: "#/components/schemas/TriggerSettings" + trigger_specification: + description: > + Specifies when and how the processing engine trigger should be invoked. + + + ## Supported trigger specifications: + + + ### Cron-based scheduling + + Format: `cron:CRON_EXPRESSION` + + + Uses extended (6-field) cron format (second minute hour day_of_month month day_of_week): + + ``` + + ┌───────────── second (0-59) + + │ ┌───────────── minute (0-59) + + │ │ ┌───────────── hour (0-23) + + │ │ │ ┌───────────── day of month (1-31) + + │ │ │ │ ┌───────────── month (1-12) + + │ │ │ │ │ ┌───────────── day of week (0-6, Sunday=0) + + │ │ │ │ │ │ + + * * * * * * + + ``` + + Examples: + + - `cron:0 0 6 * * 1-5` - Every weekday at 6:00 AM + + - `cron:0 30 14 * * 5` - Every Friday at 2:30 PM + + - `cron:0 0 0 1 * *` - First day of every month at midnight + + + ### Interval-based scheduling + + Format: `every:DURATION` + + + Supported durations: `s` (seconds), `m` (minutes), `h` (hours), `d` (days), `w` (weeks), `M` (months), `y` + (years): + + - `every:30s` - Every 30 seconds + + - `every:5m` - Every 5 minutes + + - `every:1h` - Every hour + + - `every:1d` - Every day + + - `every:1w` - Every week + + - `every:1M` - Every month + + - `every:1y` - Every year + + + **Maximum interval**: 1 year + + + ### Table-based triggers + + - `all_tables` - Triggers on write events to any table in the database + + - `table:TABLE_NAME` - Triggers on write events to a specific table + + + ### On-demand triggers + + Format: `request:REQUEST_PATH` + + + Creates an HTTP endpoint `/api/v3/engine/REQUEST_PATH` for manual invocation: + + - `request:hello-world` - Creates endpoint `/api/v3/engine/hello-world` + + - `request:data-export` - Creates endpoint `/api/v3/engine/data-export` + pattern: ^(cron:[0-9 *,/-]+|every:[0-9]+[smhd]|all_tables|table:[a-zA-Z_][a-zA-Z0-9_]*|request:[a-zA-Z0-9_-]+)$ + example: cron:0 0 6 * * 1-5 + trigger_arguments: + type: object + additionalProperties: true + description: Optional arguments passed to the plugin. + disabled: + type: boolean + default: false + description: Whether the trigger is disabled. + required: + - db + - plugin_filename + - trigger_name + - trigger_settings + - trigger_specification + - disabled + TriggerSettings: + type: object + description: | + Configuration settings for processing engine trigger error handling and execution behavior. + properties: + run_async: + type: boolean + default: false + description: | + Whether to run the trigger asynchronously. + When `true`, the trigger executes in the background without blocking. + When `false`, the trigger executes synchronously. + error_behavior: + type: string + enum: + - Log + - Retry + - Disable + description: | + Specifies how to handle errors that occur during trigger execution: + - `Log`: Log the error and continue (default) + - `Retry`: Retry the trigger execution + - `Disable`: Disable the trigger after an error + default: Log + required: + - run_async + - error_behavior + ApiNodeSpec: + x-enterprise-only: true + type: object + description: | + Optional specification for targeting specific nodes in a multi-node InfluxDB 3 Enterprise cluster. + Use this to control which node(s) should handle the cache or trigger. + properties: + node_id: + type: string + description: | + The ID of a specific node in the cluster. + If specified, the cache or trigger will only be created on this node. + node_group: + type: string + description: | + The name of a node group in the cluster. + If specified, the cache or trigger will be created on all nodes in this group. + WALPluginTestRequest: + type: object + description: | + Request body for testing a write-ahead logging (WAL) plugin. + properties: + filename: + type: string + description: | + The path and filename of the plugin to test. + database: + type: string + description: | + The database name to use for the test. + input_lp: + type: string + description: | + Line protocol data to use as input for the test. + cache_name: + type: string + description: | + Optional name of the cache to use in the test. + input_arguments: + type: object + additionalProperties: + type: string + description: | + Optional key-value pairs of arguments to pass to the plugin. + required: + - filename + - database + - input_lp + SchedulePluginTestRequest: + type: object + description: | + Request body for testing a scheduling plugin. + properties: + filename: + type: string + description: | + The path and filename of the plugin to test. + database: + type: string + description: | + The database name to use for the test. + schedule: + type: string + description: | + Optional schedule specification in cron or interval format. + cache_name: + type: string + description: | + Optional name of the cache to use in the test. + input_arguments: + type: object + additionalProperties: + type: string + description: | + Optional key-value pairs of arguments to pass to the plugin. + required: + - filename + - database + PluginFileRequest: + type: object + description: | + Request body for updating a plugin file. + properties: + plugin_name: + type: string + description: | + The name of the plugin file to update. + content: + type: string + description: | + The content of the plugin file. + required: + - plugin_name + - content + PluginDirectoryRequest: + type: object + description: | + Request body for updating plugin directory with multiple files. + properties: + plugin_name: + type: string + description: | + The name of the plugin directory to update. + files: + type: array + items: + $ref: "#/components/schemas/PluginFileEntry" + description: | + List of plugin files to include in the directory. + required: + - plugin_name + - files + PluginFileEntry: + type: object + description: | + Represents a single file in a plugin directory. + properties: + content: + type: string + description: | + The content of the file. + relative_path: + type: string + description: The relative path of the file within the plugin directory. + required: + - relative_path + - content + ShowDatabasesResponse: + type: object + properties: + databases: + type: array + items: + type: string + QueryResponse: + type: object + properties: + results: + type: array + items: + type: object + example: + results: + - series: + - name: mytable + columns: + - time + - value + values: + - - "2024-02-02T12:00:00Z" + - 42 + ErrorMessage: + type: object + properties: + error: + type: string + data: + type: object + nullable: true + LineProtocolError: + properties: + code: + description: Code is the machine-readable error code. + enum: + - internal error + - not found + - conflict + - invalid + - empty value + - unavailable + readOnly: true + type: string + err: + description: Stack of errors that occurred during processing of the request. Useful for debugging. + readOnly: true + type: string + line: + description: First line in the request body that contains malformed data. + format: int32 + readOnly: true + type: integer + message: + description: Human-readable message. + readOnly: true + type: string + op: + description: Describes the logical code operation when the error occurred. Useful for debugging. + readOnly: true + type: string + required: + - code + EpochCompatibility: + description: | + A unix timestamp precision. + - `h` for hours + - `m` for minutes + - `s` for seconds + - `ms` for milliseconds + - `u` or `µ` for microseconds + - `ns` for nanoseconds + enum: + - ns + - u + - µ + - ms + - s + - m + - h + type: string + UpdateDatabaseRequest: + type: object + properties: + retention_period: + type: string + description: | + The retention period for the database. Specifies how long data should be retained. + Use duration format (for example, "1d", "1h", "30m", "7d"). + example: 7d + description: Request schema for updating database configuration. + UpdateTableRequest: + type: object + properties: + db: + type: string + description: The name of the database containing the table. + table: + type: string + description: The name of the table to update. + retention_period: + type: string + description: | + The retention period for the table. Specifies how long data in this table should be retained. + Use duration format (for example, "1d", "1h", "30m", "7d"). + example: 30d + required: + - db + - table + description: Request schema for updating table configuration. + LicenseResponse: + type: object + properties: + license_type: + type: string + description: The type of license (for example, "enterprise", "trial"). + example: enterprise + expires_at: + type: string + format: date-time + description: The expiration date of the license in ISO 8601 format. + example: "2025-12-31T23:59:59Z" + features: + type: array + items: + type: string + description: List of features enabled by the license. + example: + - clustering + - processing_engine + - advanced_auth + status: + type: string + enum: + - active + - expired + - invalid + description: The current status of the license. + example: active + description: Response schema for license information. + CreateTokenWithPermissionsRequest: + type: object + properties: + token_name: + type: string + description: The name for the resource token. + permissions: + type: array + items: + $ref: "#/components/schemas/PermissionDetailsApi" + description: List of permissions to grant to the token. + expiry_secs: + type: integer + description: Optional expiration time in seconds. + nullable: true + required: + - token_name + - permissions + PermissionDetailsApi: + type: object + properties: + resource_type: + type: string + enum: + - system + - db + description: The type of resource. + resource_names: + type: array + items: + type: string + description: List of resource names. Use "*" for all resources. + actions: + type: array + items: + type: string + enum: + - read + - write + description: List of actions to grant. + required: + - resource_type + - resource_names + - actions + FileIndexCreateRequest: + type: object + description: Request body for creating a file index. + properties: + db: + type: string + description: The database name. + table: + type: string + description: The table name. If omitted, the file index applies to the database. + nullable: true + columns: + type: array + items: + type: string + description: The columns to use for the file index. + required: + - db + - columns + example: + db: mydb + table: mytable + columns: + - tag1 + - tag2 + FileIndexDeleteRequest: + type: object + description: Request body for deleting a file index. + properties: + db: + type: string + description: The database name. + table: + type: string + description: The table name. If omitted, deletes the database-level file index. + nullable: true + required: + - db + example: + db: mydb + table: mytable + StopNodeRequest: + type: object + description: Request body for marking a node as stopped in the catalog. + properties: + node_id: + type: string + description: The ID of the node to mark as stopped. + required: + - node_id + example: + node_id: node-1 + responses: + Unauthorized: + description: Unauthorized access. + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorMessage" + BadRequest: + description: | + Request failed. Possible reasons: + + - Invalid database name + - Malformed request body + - Invalid timestamp precision + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorMessage" + Forbidden: + description: Access denied. + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorMessage" + NotFound: + description: Resource not found. + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorMessage" + headers: + ClusterUUID: + description: | + The catalog UUID of the InfluxDB instance. + This header is included in all HTTP API responses and enables you to: + - Identify which cluster instance handled the request + - Monitor deployments across multiple InfluxDB instances + - Debug and troubleshoot distributed systems + schema: + type: string + format: uuid + example: 01234567-89ab-cdef-0123-456789abcdef + securitySchemes: + BasicAuthentication: + type: http + scheme: basic + description: >- + Use the `Authorization` header with the `Basic` scheme to authenticate v1 API requests. + + + Works with v1 compatibility [`/write`](#operation/PostV1Write) and [`/query`](#operation/GetV1Query) endpoints + in InfluxDB 3. + + + When authenticating requests, InfluxDB 3 checks that the `password` part of the decoded credential is an + authorized token + + and ignores the `username` part of the decoded credential. + + + ### Syntax + + + ```http + + Authorization: Basic + + ``` + + + ### Example + + + ```bash + + curl "http://localhost:8181/write?db=DATABASE_NAME&precision=s" \ + --user "":"AUTH_TOKEN" \ + --header "Content-type: text/plain; charset=utf-8" \ + --data-binary 'home,room=kitchen temp=72 1641024000' + ``` + + + Replace the following: + + + - **`DATABASE_NAME`**: your InfluxDB 3 Enterprise database + + - **`AUTH_TOKEN`**: an admin token or database token authorized for the database + QuerystringAuthentication: + type: apiKey + in: query + name: u=&p= + description: >- + Use InfluxDB 1.x API parameters to provide credentials through the query string for v1 API requests. + + + Querystring authentication works with v1-compatible [`/write`](#operation/PostV1Write) and + [`/query`](#operation/GetV1Query) endpoints. + + + When authenticating requests, InfluxDB 3 checks that the `p` (_password_) query parameter is an authorized token + + and ignores the `u` (_username_) query parameter. + + + ### Syntax + + + ```http + + https://localhost:8181/query/?[u=any]&p=AUTH_TOKEN + + https://localhost:8181/write/?[u=any]&p=AUTH_TOKEN + + ``` + + + ### Examples + + + ```bash + + curl "http://localhost:8181/write?db=DATABASE_NAME&precision=s&p=AUTH_TOKEN" \ + --header "Content-type: text/plain; charset=utf-8" \ + --data-binary 'home,room=kitchen temp=72 1641024000' + ``` + + + Replace the following: + + + - **`DATABASE_NAME`**: your InfluxDB 3 Enterprise database + + - **`AUTH_TOKEN`**: an admin token or database token authorized for the database + + + ```bash + + ####################################### + + # Use an InfluxDB 1.x compatible username and password + + # to query the InfluxDB v1 HTTP API + + ####################################### + + # Use authentication query parameters: + + # ?p=AUTH_TOKEN + + ####################################### + + + curl --get "http://localhost:8181/query" \ + --data-urlencode "p=AUTH_TOKEN" \ + --data-urlencode "db=DATABASE_NAME" \ + --data-urlencode "q=SELECT * FROM MEASUREMENT" + ``` + + + Replace the following: + + + - **`DATABASE_NAME`**: the database to query + + - **`AUTH_TOKEN`**: a database token with sufficient permissions to the database + BearerAuthentication: + type: http + scheme: bearer + bearerFormat: JWT + description: | + + Use the OAuth Bearer authentication + scheme to provide an authorization token to InfluxDB 3. + + Bearer authentication works with all endpoints. + + In your API requests, send an `Authorization` header. + For the header value, provide the word `Bearer` followed by a space and a database token. + + ### Syntax + + ```http + Authorization: Bearer AUTH_TOKEN + ``` + + ### Example + + ```bash + curl http://localhost:8181/api/v3/query_influxql \ + --header "Authorization: Bearer AUTH_TOKEN" + ``` + TokenAuthentication: + description: |- + Use InfluxDB v2 Token authentication to provide an authorization token to InfluxDB 3. + + The v2 Token scheme works with v1 and v2 compatibility endpoints in InfluxDB 3. + + In your API requests, send an `Authorization` header. + For the header value, provide the word `Token` followed by a space and a database token. + The word `Token` is case-sensitive. + + ### Syntax + + ```http + Authorization: Token AUTH_TOKEN + ``` + + ### Example + + ```sh + ######################################################## + # Use the Token authentication scheme with /api/v2/write + # to write data. + ######################################################## + + curl --request post "http://localhost:8181/api/v2/write?bucket=DATABASE_NAME&precision=s" \ + --header "Authorization: Token AUTH_TOKEN" \ + --data-binary 'home,room=kitchen temp=72 1463683075' + ``` + in: header + name: Authorization + type: apiKey +x-tagGroups: + - name: Using the InfluxDB HTTP API + tags: + - Quick start + - Authentication + - Cache data + - Common parameters + - Response codes + - Compatibility endpoints + - Database + - Processing engine + - Server information + - Table + - Token + - Query data + - Write data diff --git a/api-docs/influxdb3/core/v3/ref.yml b/static/openapi/influxdb3-core-openapi.yaml similarity index 72% rename from api-docs/influxdb3/core/v3/ref.yml rename to static/openapi/influxdb3-core-openapi.yaml index 32778f807e..bd0d928430 100644 --- a/api-docs/influxdb3/core/v3/ref.yml +++ b/static/openapi/influxdb3-core-openapi.yaml @@ -17,11 +17,8 @@ info: - `/`: Compatibility endpoints for InfluxDB v1 workloads and clients - `/api/v2/write`: Compatibility endpoint for InfluxDB v2 workloads and clients - - version: '3.7.0' + To download the OpenAPI specification for this API, use the **Download** button above. + version: v3.8.0 license: name: MIT url: https://opensource.org/licenses/MIT @@ -29,6 +26,7 @@ info: name: InfluxData url: https://www.influxdata.com email: support@influxdata.com + x-source-hash: sha256:1259b96096eab6c8dbf3f76c974924f124e9b3e08eedc6b0c9a66d3108857c52 servers: - url: https://{baseurl} description: InfluxDB 3 Core API URL @@ -56,8 +54,13 @@ tags: | [Querystring authentication](#section/Authentication/QuerystringAuthentication) | v1 endpoints | x-traitTag: true + x-related: + - title: Authenticate v1 API requests + href: /influxdb3/core/guides/api-compatibility/v1/ + - title: Manage tokens + href: /influxdb3/core/admin/tokens/ - name: Cache data - description: | + description: |- Manage the in-memory cache. #### Distinct Value Cache @@ -84,76 +87,126 @@ tags: what fields to cache, what tags to use to identify each series, and the number of values to cache for each unique series. An LVC is associated with a table, which can have multiple LVCs. - - #### Related guides - - - [Manage the Distinct Value Cache](/influxdb3/core/admin/distinct-value-cache/) - - [Manage the Last Value Cache](/influxdb3/core/admin/last-value-cache/) + x-related: + - title: Manage the Distinct Value Cache + href: /influxdb3/core/admin/distinct-value-cache/ + - title: Manage the Last Value Cache + href: /influxdb3/core/admin/last-value-cache/ - name: Compatibility endpoints - description: | + description: > InfluxDB 3 provides compatibility endpoints for InfluxDB 1.x and InfluxDB 2.x workloads and clients. + ### Write data using v1- or v2-compatible endpoints + - [`/api/v2/write` endpoint](#operation/PostV2Write) for InfluxDB v2 clients and when you bring existing InfluxDB v2 write workloads to InfluxDB 3. - - [`/write` endpoint](#operation/PostV1Write) for InfluxDB v1 clients and when you bring existing InfluxDB v1 write workloads to InfluxDB 3. + - [`/write` endpoint](#operation/PostV1Write) for InfluxDB v1 clients and when you bring existing InfluxDB v1 + write workloads to InfluxDB 3. + For new workloads, use the [`/api/v3/write_lp` endpoint](#operation/PostWriteLP). + All endpoints accept the same line protocol format. + ### Query data - Use the HTTP [`/query`](#operation/GetV1ExecuteQuery) endpoint for InfluxDB v1 clients and v1 query workloads using InfluxQL. + + Use the HTTP [`/query`](#operation/GetV1ExecuteQuery) endpoint for InfluxDB v1 clients and v1 query workloads + using InfluxQL. + For new workloads, use one of the following: + - HTTP [`/api/v3/query_sql` endpoint](#operation/GetExecuteQuerySQL) for new query workloads using SQL. - - HTTP [`/api/v3/query_influxql` endpoint](#operation/GetExecuteInfluxQLQuery) for new query workloads using InfluxQL. - - Flight SQL and InfluxDB 3 _Flight+gRPC_ APIs for querying with SQL or InfluxQL. For more information about using Flight APIs, see [InfluxDB 3 client libraries](https://github.com/InfluxCommunity?q=influxdb3&type=public&language=&sort=). + + - HTTP [`/api/v3/query_influxql` endpoint](#operation/GetExecuteInfluxQLQuery) for new query workloads using + InfluxQL. + + - Flight SQL and InfluxDB 3 _Flight+gRPC_ APIs for querying with SQL or InfluxQL. For more information about using + Flight APIs, see [InfluxDB 3 client + libraries](https://github.com/InfluxCommunity?q=influxdb3&type=public&language=&sort=). + ### Server information - Server information endpoints such as `/health` and `metrics` are compatible with InfluxDB 1.x and InfluxDB 2.x clients. + + Server information endpoints such as `/health` and `metrics` are compatible with InfluxDB 1.x and InfluxDB 2.x + clients. + x-related: + - title: Use compatibility APIs to write data + href: /influxdb3/core/write-data/http-api/compatibility-apis/ - name: Database description: Manage databases - - description: | + - description: > Most InfluxDB API endpoints require parameters in the request--for example, specifying the database to use. + ### Common parameters + The following table shows common parameters used by many InfluxDB API endpoints. + Many endpoints may require other parameters in the query string or in the + request body that perform functions specific to those endpoints. + | Query parameter | Value type | Description | + |:------------------------ |:--------------------- |:-------------------------------------------| + | `db` | string | The database name | + InfluxDB HTTP API endpoints use standard HTTP request and response headers. + The following table shows common headers used by many InfluxDB API endpoints. + Some endpoints may use other headers that perform functions more specific to those endpoints--for example, - the write endpoints accept the `Content-Encoding` header to indicate that line protocol is compressed in the request body. + + the write endpoints accept the `Content-Encoding` header to indicate that line protocol is compressed in the + request body. + | Header | Value type | Description | + |:------------------------ |:--------------------- |:-------------------------------------------| + | `Accept` | string | The content type that the client can understand. | + | `Authorization` | string | The authorization scheme and credential. | + | `Content-Length` | integer | The size of the entity-body, in bytes. | + | `Content-Type` | string | The format of the data in the request body. | name: Headers and parameters x-traitTag: true - name: Processing engine - description: | + description: > Manage Processing engine triggers, test plugins, and send requests to trigger On Request plugins. - InfluxDB 3 Core provides the InfluxDB 3 processing engine, an embedded Python VM that can dynamically load and trigger Python plugins in response to events in your database. + + InfluxDB 3 Core provides the InfluxDB 3 processing engine, an embedded Python VM that can dynamically load and + trigger Python plugins in response to events in your database. + Use Processing engine plugins and triggers to run code and perform tasks for different database events. - To get started with the processing engine, see the [Processing engine and Python plugins](/influxdb3/core/processing-engine/) guide. + + To get started with the processing engine, see the [Processing engine and Python + plugins](/influxdb3/core/processing-engine/) guide. + x-related: + - title: Processing engine and Python plugins + href: /influxdb3/core/plugins/ - name: Query data description: Query data using SQL or InfluxQL + x-related: + - title: Use the InfluxDB v1 HTTP query API and InfluxQL to query data + href: /influxdb3/core/query-data/execute-queries/influxdb-v1-api/ - name: Quick start description: | 1. [Create an admin token](#section/Authentication) to authorize API requests. @@ -219,149 +272,71 @@ tags: | **Milliseconds** | ✅ `ms` | ✅ `ms` | ✅ `millisecond` | | **Microseconds** | ✅ `u` or `µ` | ✅ `us` | ✅ `microsecond` | | **Nanoseconds** | ✅ `ns` | ✅ `ns` | ✅ `nanosecond` | - | **Minutes** | ✅ `m` | ❌ No | ❌ No | - | **Hours** | ✅ `h` | ❌ No | ❌ No | | **Default** | Nanosecond | Nanosecond | **Auto** (guessed) | All timestamps are stored internally as nanoseconds. paths: - /write: - post: - operationId: PostV1Write - summary: Write line protocol (v1-compatible) + /api/v1/health: + get: + operationId: GetHealthV1 + summary: Health check (v1) description: | - Writes line protocol to the specified database. - - This endpoint provides backward compatibility for InfluxDB 1.x write workloads using tools such as InfluxDB 1.x client libraries, the Telegraf `outputs.influxdb` output plugin, or third-party tools. - - Use this endpoint to send data in [line protocol](https://docs.influxdata.com/influxdb3/core/reference/syntax/line-protocol/) format to InfluxDB. - Use query parameters to specify options for writing data. + Checks the status of the service. - #### Related + Returns `OK` if the service is running. This endpoint does not return version information. + Use the [`/ping`](#operation/GetPing) endpoint to retrieve version details. - - [Use compatibility APIs to write data](/influxdb3/core/write-data/http-api/compatibility-apis/) - parameters: - - $ref: '#/components/parameters/dbWriteParam' - - $ref: '#/components/parameters/compatibilityPrecisionParam' - - $ref: '#/components/parameters/v1UsernameParam' - - $ref: '#/components/parameters/v1PasswordParam' - - name: rp - in: query - required: false - schema: - type: string - description: | - Retention policy name. Honored but discouraged. InfluxDB 3 doesn't use retention policies. - - name: consistency - in: query - required: false - schema: - type: string - description: | - Write consistency level. Ignored by InfluxDB 3. Provided for compatibility with InfluxDB 1.x clients. - - name: Authorization - in: header - required: false - schema: - type: string - description: | - Authorization header for token-based authentication. - Supported schemes: - - `Bearer AUTH_TOKEN` - OAuth bearer token scheme - - `Token AUTH_TOKEN` - InfluxDB v2 token scheme - - `Basic ` - Basic authentication (username is ignored) - - name: Content-Type - in: header - description: | - The content type of the request payload. - schema: - $ref: '#/components/schemas/LineProtocol' - required: false - - name: Accept - in: header - description: | - The content type that the client can understand. - Writes only return a response body if they fail (partially or completely)--for example, - due to a syntax problem or type mismatch. - schema: - type: string - default: application/json - enum: - - application/json - required: false - - $ref: '#/components/parameters/ContentEncoding' - - $ref: '#/components/parameters/ContentLength' - requestBody: - $ref: '#/components/requestBodies/lineProtocolRequestBody' + > **Note**: This endpoint requires authentication by default in InfluxDB 3 Core. responses: - '204': - description: Success ("No Content"). All data in the batch is written and queryable. - headers: - cluster-uuid: - $ref: '#/components/headers/ClusterUUID' - '400': - description: | - Bad request. Some (a _partial write_) or all of the data from the batch was rejected and not written. - If a partial write occurred, then some points from the batch are written and queryable. - - The response body: - - indicates if a partial write occurred or all data was rejected. - - contains details about the [rejected points](/influxdb3/core/write-data/troubleshoot/#troubleshoot-rejected-points), up to 100 points. + "200": + description: Service is running. Returns `OK`. content: - application/json: - examples: - rejectedAllPoints: - summary: Rejected all points in the batch - value: | - { - "error": "write of line protocol failed", - "data": [ - { - "original_line": "dquote> home,room=Kitchen temp=hi", - "line_number": 2, - "error_message": "No fields were provided" - } - ] - } - partialWriteErrorWithRejectedPoints: - summary: Partial write rejected some points in the batch - value: | - { - "error": "partial write of line protocol occurred", - "data": [ - { - "original_line": "dquote> home,room=Kitchen temp=hi", - "line_number": 2, - "error_message": "No fields were provided" - } - ] - } - '401': - $ref: '#/components/responses/Unauthorized' - '403': - description: Access denied. - '413': - description: Request entity too large. + text/plain: + schema: + type: string + example: OK + "401": + description: Unauthorized. Authentication is required. + "500": + description: Service is unavailable. tags: + - Server information - Compatibility endpoints - - Write data - x-influxdata-guides: - - title: Use compatibility APIs to write data - href: /influxdb3/core/write-data/http-api/compatibility-apis/ /api/v2/write: post: operationId: PostV2Write + responses: + "204": + description: Success ("No Content"). All data in the batch is written and queryable. + headers: + cluster-uuid: + $ref: "#/components/headers/ClusterUUID" + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "413": + description: Request entity too large. summary: Write line protocol (v2-compatible) - description: | + description: > Writes line protocol to the specified database. - This endpoint provides backward compatibility for InfluxDB 2.x write workloads using tools such as InfluxDB 2.x client libraries, the Telegraf `outputs.influxdb_v2` output plugin, or third-party tools. - Use this endpoint to send data in [line protocol](/influxdb3/core/reference/syntax/line-protocol/) format to InfluxDB. + This endpoint provides backward compatibility for InfluxDB 2.x write workloads using tools such as InfluxDB 2.x + client libraries, the Telegraf `outputs.influxdb_v2` output plugin, or third-party tools. + + + Use this endpoint to send data in [line protocol](/influxdb3/core/reference/syntax/line-protocol/) format to + InfluxDB. + Use query parameters to specify options for writing data. + #### Related + - [Use compatibility APIs to write data](/influxdb3/core/write-data/http-api/compatibility-apis/) parameters: - name: Content-Type @@ -369,7 +344,7 @@ paths: description: | The content type of the request payload. schema: - $ref: '#/components/schemas/LineProtocol' + $ref: "#/components/schemas/LineProtocol" required: false - description: | The compression applied to the line protocol in the request payload. @@ -404,1500 +379,1948 @@ paths: enum: - application/json type: string - - name: db + - name: bucket in: query required: true schema: type: string - description: | + description: |- A database name. InfluxDB creates the database if it doesn't already exist, and then writes all points in the batch to the database. + + This parameter is named `bucket` for compatibility with InfluxDB v2 client libraries. - name: accept_partial in: query required: false schema: - $ref: '#/components/schemas/AcceptPartial' - - $ref: '#/components/parameters/compatibilityPrecisionParam' + $ref: "#/components/schemas/AcceptPartial" + - $ref: "#/components/parameters/compatibilityPrecisionParam" requestBody: - $ref: '#/components/requestBodies/lineProtocolRequestBody' - responses: - '204': - description: Success ("No Content"). All data in the batch is written and queryable. - headers: - cluster-uuid: - $ref: '#/components/headers/ClusterUUID' - '400': - description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '403': - description: Access denied. - '413': - description: Request entity too large. + $ref: "#/components/requestBodies/lineProtocolRequestBody" tags: - Compatibility endpoints - Write data - x-influxdata-guides: - - title: Use compatibility APIs to write data - href: /influxdb3/core/write-data/http-api/compatibility-apis/ - /api/v3/write_lp: - post: - operationId: PostWriteLP - summary: Write line protocol - description: | - Writes line protocol to the specified database. - - This is the native InfluxDB 3 Core write endpoint that provides enhanced control - over write behavior with advanced parameters for high-performance and fault-tolerant operations. - - Use this endpoint to send data in [line protocol](/influxdb3/core/reference/syntax/line-protocol/) format to InfluxDB. - Use query parameters to specify options for writing data. - - #### Features - - - **Partial writes**: Use `accept_partial=true` to allow partial success when some lines in a batch fail - - **Asynchronous writes**: Use `no_sync=true` to skip waiting for WAL synchronization, allowing faster response times but sacrificing durability guarantees - - **Flexible precision**: Automatic timestamp precision detection with `precision=auto` (default) - - #### Auto precision detection - - When you use `precision=auto` or omit the precision parameter, InfluxDB 3 automatically detects - the timestamp precision based on the magnitude of the timestamp value: - - - Timestamps < 5e9 → Second precision (multiplied by 1,000,000,000 to convert to nanoseconds) - - Timestamps < 5e12 → Millisecond precision (multiplied by 1,000,000) - - Timestamps < 5e15 → Microsecond precision (multiplied by 1,000) - - Larger timestamps → Nanosecond precision (no conversion needed) - - #### Related - - - [Use the InfluxDB v3 write_lp API to write data](/influxdb3/core/write-data/http-api/v3-write-lp/) + /api/v3/configure/database: + delete: + operationId: DeleteConfigureDatabase parameters: - - $ref: '#/components/parameters/dbWriteParam' - - $ref: '#/components/parameters/accept_partial' - - $ref: '#/components/parameters/precisionParam' - - name: no_sync + - $ref: "#/components/parameters/db" + - name: data_only in: query + required: false schema: - $ref: '#/components/schemas/NoSync' - - name: Content-Type - in: header + type: boolean + default: false description: | - The content type of the request payload. - schema: - $ref: '#/components/schemas/LineProtocol' + Delete only data while preserving the database schema and all associated resources + (tokens, triggers, last value caches, distinct value caches, processing engine configurations). + When `false` (default), the entire database is deleted. + - name: remove_tables + in: query required: false - - name: Accept - in: header + schema: + type: boolean + default: false description: | - The content type that the client can understand. - Writes only return a response body if they fail (partially or completely)--for example, - due to a syntax problem or type mismatch. + Used with `data_only=true` to remove table resources (caches) while preserving + database-level resources (tokens, triggers, processing engine configurations). + Has no effect when `data_only=false`. + - name: hard_delete_at + in: query + required: false schema: type: string - default: application/json - enum: - - application/json - required: false - - $ref: '#/components/parameters/ContentEncoding' - - $ref: '#/components/parameters/ContentLength' - requestBody: - $ref: '#/components/requestBodies/lineProtocolRequestBody' + format: date-time + description: |- + Schedule the database for hard deletion at the specified time. + If not provided, the database will be soft deleted. + Use ISO 8601 date-time format (for example, "2025-12-31T23:59:59Z"). + + #### Deleting a database cannot be undone + + Deleting a database is a destructive action. + Once a database is deleted, data stored in that database cannot be recovered. + + + Also accepts special string values: + - `now` — hard delete immediately + - `never` — soft delete only (default behavior) + - `default` — use the system default hard deletion time responses: - '204': - description: Success ("No Content"). All data in the batch is written and queryable. - headers: - cluster-uuid: - $ref: '#/components/headers/ClusterUUID' - '400': - description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '403': - description: Access denied. - '413': - description: Request entity too large. - '422': - description: Unprocessable entity. - x-codeSamples: - - label: cURL - Basic write - lang: Shell - source: | - curl --request POST "http://localhost:8181/api/v3/write_lp?db=sensors" \ - --header "Authorization: Bearer DATABASE_TOKEN" \ - --header "Content-Type: text/plain" \ - --data-raw "cpu,host=server01 usage=85.2 1638360000000000000" - - label: cURL - Write with millisecond precision - lang: Shell - source: | - curl --request POST "http://localhost:8181/api/v3/write_lp?db=sensors&precision=ms" \ - --header "Authorization: Bearer DATABASE_TOKEN" \ - --header "Content-Type: text/plain" \ - --data-raw "cpu,host=server01 usage=85.2 1638360000000" - - label: cURL - Asynchronous write with partial acceptance - lang: Shell - source: | - curl --request POST "http://localhost:8181/api/v3/write_lp?db=sensors&accept_partial=true&no_sync=true&precision=auto" \ - --header "Authorization: Bearer DATABASE_TOKEN" \ - --header "Content-Type: text/plain" \ - --data-raw "cpu,host=server01 usage=85.2 - memory,host=server01 used=4096" - - label: cURL - Multiple measurements with tags - lang: Shell - source: | - curl --request POST "http://localhost:8181/api/v3/write_lp?db=sensors&precision=ns" \ - --header "Authorization: Bearer DATABASE_TOKEN" \ - --header "Content-Type: text/plain" \ - --data-raw "cpu,host=server01,region=us-west usage=85.2,load=0.75 1638360000000000000 - memory,host=server01,region=us-west used=4096,free=12288 1638360000000000000 - disk,host=server01,region=us-west,device=/dev/sda1 used=50.5,free=49.5 1638360000000000000" + "200": + description: Success. Database deleted. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Database not found. + summary: Delete a database + description: | + Soft deletes a database. + The database is scheduled for deletion and unavailable for querying. + Use the `hard_delete_at` parameter to schedule a hard deletion. + Use the `data_only` parameter to delete data while preserving the database schema and resources. tags: - - Write data - /api/v3/query_sql: + - Database get: - operationId: GetExecuteQuerySQL - summary: Execute SQL query - description: Executes an SQL query to retrieve data from the specified database. - parameters: - - $ref: '#/components/parameters/db' - - $ref: '#/components/parameters/querySqlParam' - - $ref: '#/components/parameters/format' - - $ref: '#/components/parameters/AcceptQueryHeader' - - $ref: '#/components/parameters/ContentType' + operationId: GetConfigureDatabase responses: - '200': - description: Success. The response body contains query results. + "200": + description: Success. The response body contains the list of databases. content: application/json: schema: - $ref: '#/components/schemas/QueryResponse' - example: - results: - - series: - - name: mytable - columns: - - time - - value - values: - - - '2024-02-02T12:00:00Z' - - 42 - text/csv: - schema: - type: string - application/vnd.apache.parquet: - schema: - type: string - application/jsonl: - schema: - type: string - '400': + $ref: "#/components/schemas/ShowDatabasesResponse" + "400": description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '403': - description: Access denied. - '404': + "401": + $ref: "#/components/responses/Unauthorized" + "404": description: Database not found. - '405': - description: Method not allowed. - '422': - description: Unprocessable entity. + summary: List databases + description: Retrieves a list of databases. + parameters: + - $ref: "#/components/parameters/formatRequired" + - name: show_deleted + in: query + required: false + schema: + type: boolean + default: false + description: | + Include soft-deleted databases in the response. + By default, only active databases are returned. tags: - - Query data + - Database post: - operationId: PostExecuteQuerySQL - summary: Execute SQL query - description: Executes an SQL query to retrieve data from the specified database. - parameters: - - $ref: '#/components/parameters/AcceptQueryHeader' - - $ref: '#/components/parameters/ContentType' + operationId: PostConfigureDatabase + responses: + "200": + description: Success. Database created. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "409": + description: Database already exists. + summary: Create a database + description: Creates a new database in the system. requestBody: - $ref: '#/components/requestBodies/queryRequestBody' + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateDatabaseRequest" + tags: + - Database + put: + operationId: update_database responses: - '200': - description: Success. The response body contains query results. - content: - application/json: - schema: - $ref: '#/components/schemas/QueryResponse' - text/csv: - schema: - type: string - application/vnd.apache.parquet: - schema: - type: string - application/jsonl: - schema: - type: string - '400': + "200": + description: Success. The database has been updated. + "400": description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '403': - description: Access denied. - '404': + "401": + $ref: "#/components/responses/Unauthorized" + "404": description: Database not found. - '405': - description: Method not allowed. - '422': - description: Unprocessable entity. + summary: Update a database + description: | + Updates database configuration, such as retention period. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/UpdateDatabaseRequest" tags: - - Query data - /api/v3/query_influxql: - get: - operationId: GetExecuteInfluxQLQuery - summary: Execute InfluxQL query - description: Executes an InfluxQL query to retrieve data from the specified database. + - Database + /api/v3/configure/database/retention_period: + delete: + operationId: DeleteDatabaseRetentionPeriod + summary: Remove database retention period + description: | + Removes the retention period from a database, setting it to infinite retention. parameters: - - $ref: '#/components/parameters/dbQueryParam' - - name: q + - $ref: "#/components/parameters/db" + responses: + "204": + description: Success. The database retention period has been removed. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Database not found. + tags: + - Database + /api/v3/configure/distinct_cache: + delete: + operationId: DeleteConfigureDistinctCache + responses: + "200": + description: Success. The distinct cache has been deleted. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Cache not found. + summary: Delete distinct cache + description: Deletes a distinct cache. + parameters: + - $ref: "#/components/parameters/db" + - name: table in: query required: true schema: type: string - - name: format + description: The name of the table containing the distinct cache. + - name: name in: query - required: false + required: true schema: type: string - - $ref: '#/components/parameters/AcceptQueryHeader' - responses: - '200': - description: Success. The response body contains query results. - content: - application/json: - schema: - $ref: '#/components/schemas/QueryResponse' - text/csv: - schema: - type: string - application/vnd.apache.parquet: - schema: - type: string - application/jsonl: - schema: - type: string - '400': - description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '403': - description: Access denied. - '404': - description: Database not found. - '405': - description: Method not allowed. - '422': - description: Unprocessable entity. + description: The name of the distinct cache to delete. tags: - - Query data + - Cache data + - Table post: - operationId: PostExecuteQueryInfluxQL - summary: Execute InfluxQL query - description: Executes an InfluxQL query to retrieve data from the specified database. - parameters: - - $ref: '#/components/parameters/AcceptQueryHeader' - - $ref: '#/components/parameters/ContentType' - requestBody: - $ref: '#/components/requestBodies/queryRequestBody' + operationId: PostConfigureDistinctCache responses: - '200': - description: Success. The response body contains query results. - content: - application/json: - schema: - $ref: '#/components/schemas/QueryResponse' - text/csv: - schema: - type: string - application/vnd.apache.parquet: - schema: - type: string - application/jsonl: - schema: - type: string - '400': - description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '403': - description: Access denied. - '404': - description: Database not found. - '405': - description: Method not allowed. - '422': - description: Unprocessable entity. - tags: - - Query data - /query: - get: - operationId: GetV1ExecuteQuery - summary: Execute InfluxQL query (v1-compatible) - description: | - Executes an InfluxQL query to retrieve data from the specified database. - - This endpoint is compatible with InfluxDB 1.x client libraries and third-party integrations such as Grafana. - Use query parameters to specify the database and the InfluxQL query. + "201": + description: Success. The distinct cache has been created. + "400": + description: > + Bad request. - #### Related - - [Use the InfluxDB v1 HTTP query API and InfluxQL to query data](/influxdb3/core/query-data/execute-queries/influxdb-v1-api/) + The server responds with status `400` if the request would overwrite an existing cache with a different + configuration. + "409": + description: Conflict. A distinct cache with this configuration already exists. + summary: Create distinct cache + description: Creates a distinct cache for a table. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/DistinctCacheCreateRequest" + tags: + - Cache data + - Table + /api/v3/configure/last_cache: + delete: + operationId: DeleteConfigureLastCache + responses: + "200": + description: Success. The last cache has been deleted. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Cache not found. + summary: Delete last cache + description: Deletes a last cache. parameters: - - name: Accept - in: header + - $ref: "#/components/parameters/db" + - name: table + in: query + required: true schema: type: string - default: application/json - enum: - - application/json - - application/csv - - text/csv - required: false - description: | - The content type that the client can understand. - - If `text/csv` is specified, the `Content-type` response header is `application/csv` and the response is formatted as CSV. - - Returns an error if the format is invalid or non-UTF8. - - in: query - name: chunked - description: | - If true, the response is divided into chunks of size `chunk_size`. - schema: - type: boolean - default: false - - in: query - name: chunk_size - description: | - The number of records that will go into a chunk. - This parameter is only used if `chunked=true`. - schema: - type: integer - default: 10000 - - in: query - name: db - description: The database to query. If not provided, the InfluxQL query string must specify the database. - schema: - type: string - format: InfluxQL - - in: query - name: pretty - description: | - If true, the JSON response is formatted in a human-readable format. - schema: - type: boolean - default: false - - in: query - name: q - description: The InfluxQL query string. - required: true - schema: - type: string - - name: epoch - description: | - Formats timestamps as [unix (epoch) timestamps](/influxdb3/core/reference/glossary/#unix-timestamp) with the specified precision - instead of [RFC3339 timestamps](/influxdb3/core/reference/glossary/#rfc3339-timestamp) with nanosecond precision. - in: query - schema: - $ref: '#/components/schemas/EpochCompatibility' - - $ref: '#/components/parameters/v1UsernameParam' - - $ref: '#/components/parameters/v1PasswordParam' - - name: rp - in: query - required: false - schema: - type: string - description: | - Retention policy name. Honored but discouraged. InfluxDB 3 doesn't use retention policies. - - name: Authorization - in: header - required: false + description: The name of the table containing the last cache. + - name: name + in: query + required: true schema: type: string - description: | - Authorization header for token-based authentication. - Supported schemes: - - `Bearer AUTH_TOKEN` - OAuth bearer token scheme - - `Token AUTH_TOKEN` - InfluxDB v2 token scheme - - `Basic ` - Basic authentication (username is ignored) + description: The name of the last cache to delete. + tags: + - Cache data + - Table + post: + operationId: PostConfigureLastCache responses: - '200': - description: | - Success. The response body contains query results. - content: - application/json: - schema: - $ref: '#/components/schemas/QueryResponse' - application/csv: - schema: - type: string - headers: - Content-Type: - description: | - The content type of the response. - Default is `application/json`. - - If the `Accept` request header is `application/csv` or `text/csv`, the `Content-type` response header is `application/csv` - and the response is formatted as CSV. - schema: - type: string - default: application/json - enum: - - application/json - - application/csv - '400': - description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '403': - description: Access denied. - '404': - description: Database not found. - '405': - description: Method not allowed. - '422': - description: Unprocessable entity. + "201": + description: Success. Last cache created. + "400": + description: Bad request. A cache with this name already exists or the request is malformed. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Cache not found. + summary: Create last cache + description: Creates a last cache for a table. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/LastCacheCreateRequest" tags: - - Query data - - Compatibility endpoints - x-influxdata-guides: - - title: Use the InfluxDB v1 HTTP query API and InfluxQL to query data - href: /influxdb3/core/query-data/execute-queries/influxdb-v1-api/ + - Cache data + - Table + /api/v3/configure/plugin_environment/install_packages: post: - operationId: PostExecuteV1Query - summary: Execute InfluxQL query (v1-compatible) - description: | - Executes an InfluxQL query to retrieve data from the specified database. - - #### Related + operationId: PostInstallPluginPackages + summary: Install plugin packages + description: |- + Installs the specified Python packages into the processing engine plugin environment. - - [Use the InfluxDB v1 HTTP query API and InfluxQL to query data](/influxdb3/core/query-data/execute-queries/influxdb-v1-api/) + This endpoint is synchronous and blocks until the packages are installed. + parameters: + - $ref: "#/components/parameters/ContentType" requestBody: + required: true content: application/json: schema: type: object properties: - db: - type: string - description: The database to query. If not provided, the InfluxQL query string must specify the database. - q: - description: The InfluxQL query string. - type: string - chunked: - description: | - If true, the response is divided into chunks of size `chunk_size`. - type: boolean - chunk_size: - description: | - The number of records that will go into a chunk. - This parameter is only used if `chunked=true`. - type: integer - default: 10000 - epoch: + packages: + type: array + items: + type: string description: | - A unix timestamp precision. + A list of Python package names to install. + Can include version specifiers (e.g., "scipy==1.9.0"). + example: + - influxdb3-python + - scipy + - pandas==1.5.0 + - requests + required: + - packages + example: + packages: + - influxdb3-python + - scipy + - pandas==1.5.0 + - requests + responses: + "200": + description: Success. The packages are installed. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + tags: + - Processing engine + /api/v3/configure/plugin_environment/install_requirements: + post: + operationId: PostInstallPluginRequirements + summary: Install plugin requirements + description: > + Installs requirements from a requirements file (also known as a "pip requirements file") into the processing + engine plugin environment. - - `h` for hours - - `m` for minutes - - `s` for seconds - - `ms` for milliseconds - - `u` or `µ` for microseconds - - `ns` for nanoseconds - Formats timestamps as [unix (epoch) timestamps](/influxdb3/core/reference/glossary/#unix-timestamp) with the specified precision - instead of [RFC3339 timestamps](/influxdb3/core/reference/glossary/#rfc3339-timestamp) with nanosecond precision. - enum: - - ns - - u - - µ - - ms - - s - - m - - h + This endpoint is synchronous and blocks until the requirements are installed. + + + ### Related + + + - [Processing engine and Python plugins](/influxdb3/core/plugins/) + + - [Python requirements file format](https://pip.pypa.io/en/stable/reference/requirements-file-format/) + parameters: + - $ref: "#/components/parameters/ContentType" + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + requirements_location: type: string - pretty: description: | - If true, the JSON response is formatted in a human-readable format. - type: boolean + The path to the requirements file containing Python packages to install. + Can be a relative path (relative to the plugin directory) or an absolute path. + example: requirements.txt required: - - q - parameters: - - name: Accept - in: header - schema: - type: string - default: application/json - enum: - - application/json - - application/csv - - text/csv - required: false - description: | - The content type that the client can understand. - - If `text/csv` is specified, the `Content-type` response header is `application/csv` and the response is formatted as CSV. - - Returns an error if the format is invalid or non-UTF8. + - requirements_location + example: + requirements_location: requirements.txt responses: - '200': - description: | - Success. The response body contains query results. - content: - application/json: - schema: - $ref: '#/components/schemas/QueryResponse' - application/csv: - schema: - type: string - headers: - Content-Type: - description: | - The content type of the response. - Default is `application/json`. - - If the `Accept` request header is `application/csv` or `text/csv`, the `Content-type` response header is `application/csv` - and the response is formatted as CSV. - schema: - type: string - default: application/json - enum: - - application/json - - application/csv - '400': + "200": + description: Success. The requirements have been installed. + "400": description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '403': - description: Access denied. - '404': - description: Database not found. - '405': - description: Method not allowed. - '422': - description: Unprocessable entity. + "401": + $ref: "#/components/responses/Unauthorized" tags: - - Query data - - Compatibility endpoints - x-influxdata-guides: - - title: Use the InfluxDB v1 HTTP query API and InfluxQL to query data - href: /influxdb3/core/query-data/execute-queries/influxdb-v1-api/ - /health: - get: - operationId: GetHealth - summary: Health check - description: | - Checks the status of the service. + - Processing engine + /api/v3/configure/processing_engine_trigger: + post: + operationId: PostConfigureProcessingEngineTrigger + summary: Create processing engine trigger + description: Creates a processing engine trigger with the specified plugin file and trigger specification. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/ProcessingEngineTriggerRequest" + examples: + schedule_cron: + summary: Schedule trigger using cron + description: > + In `"cron:CRON_EXPRESSION"`, `CRON_EXPRESSION` uses extended 6-field cron format. - Returns `OK` if the service is running. This endpoint does not return version information. - Use the [`/ping`](#operation/GetPing) endpoint to retrieve version details. - responses: - '200': - description: Service is running. Returns `OK`. - content: - text/plain: - schema: - type: string - example: OK - '500': - description: Service is unavailable. - tags: - - Server information - /api/v1/health: - get: - operationId: GetHealthV1 - summary: Health check (v1) - description: Checks the status of the service. + The cron expression `0 0 6 * * 1-5` means the trigger will run at 6:00 AM every weekday (Monday to + Friday). + value: + db: DATABASE_NAME + plugin_filename: schedule.py + trigger_name: schedule_cron_trigger + trigger_specification: cron:0 0 6 * * 1-5 + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + schedule_every: + summary: Schedule trigger using interval + description: | + In `"every:DURATION"`, `DURATION` specifies the interval between trigger executions. + The duration `1h` means the trigger will run every hour. + value: + db: mydb + plugin_filename: schedule.py + trigger_name: schedule_every_trigger + trigger_specification: every:1h + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + schedule_every_seconds: + summary: Schedule trigger using seconds interval + description: | + Example of scheduling a trigger to run every 30 seconds. + value: + db: mydb + plugin_filename: schedule.py + trigger_name: schedule_every_30s_trigger + trigger_specification: every:30s + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + schedule_every_minutes: + summary: Schedule trigger using minutes interval + description: | + Example of scheduling a trigger to run every 5 minutes. + value: + db: mydb + plugin_filename: schedule.py + trigger_name: schedule_every_5m_trigger + trigger_specification: every:5m + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + all_tables: + summary: All tables trigger example + description: | + Trigger that fires on write events to any table in the database. + value: + db: mydb + plugin_filename: all_tables.py + trigger_name: all_tables_trigger + trigger_specification: all_tables + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + table_specific: + summary: Table-specific trigger example + description: | + Trigger that fires on write events to a specific table. + value: + db: mydb + plugin_filename: table.py + trigger_name: table_trigger + trigger_specification: table:sensors + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + api_request: + summary: On-demand request trigger example + description: | + Creates an HTTP endpoint `/api/v3/engine/hello-world` for manual invocation. + value: + db: mydb + plugin_filename: request.py + trigger_name: hello_world_trigger + trigger_specification: request:hello-world + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + cron_friday_afternoon: + summary: Cron trigger for Friday afternoons + description: | + Example of a cron trigger that runs every Friday at 2:30 PM. + value: + db: reports + plugin_filename: weekly_report.py + trigger_name: friday_report_trigger + trigger_specification: cron:0 30 14 * * 5 + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + cron_monthly: + summary: Cron trigger for monthly execution + description: | + Example of a cron trigger that runs on the first day of every month at midnight. + value: + db: monthly_data + plugin_filename: monthly_cleanup.py + trigger_name: monthly_cleanup_trigger + trigger_specification: cron:0 0 0 1 * * + disabled: false + trigger_settings: + run_async: false + error_behavior: Log responses: - '200': - description: Service is running. - '500': - description: Service is unavailable. - tags: - - Server information - - Compatibility endpoints - /ping: - get: - operationId: GetPing + "200": + description: Success. Processing engine trigger created. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Trigger not found. tags: - - Server information - summary: Ping the server - description: | - Returns version information for the server. - - **Important**: Use a GET request. HEAD requests return `404 Not Found`. - - The response includes version information in both headers and the JSON body: - - - **Headers**: `x-influxdb-version` and `x-influxdb-build` - - **Body**: JSON object with `version`, `revision`, and `process_id` - responses: - '200': - description: Success. The response body contains server information. - headers: - x-influxdb-version: - description: The InfluxDB version number (for example, `3.8.0`). - schema: - type: string - example: '3.8.0' - x-influxdb-build: - description: The InfluxDB build type (`Core` or `Enterprise`). - schema: - type: string - example: Core - content: - application/json: - schema: - type: object - properties: - version: - type: string - description: The InfluxDB version number. - example: '3.8.0' - revision: - type: string - description: The git revision hash for the build. - example: '5276213d5b' - process_id: - type: string - description: A unique identifier for the server process. - example: 'b756d9e0-cecd-4f72-b6d0-19e2d4f8cbb7' - '404': + - Processing engine + delete: + operationId: DeleteConfigureProcessingEngineTrigger + summary: Delete processing engine trigger + description: Deletes a processing engine trigger. + parameters: + - $ref: "#/components/parameters/db" + - name: trigger_name + in: query + required: true + schema: + type: string + - name: force + in: query + required: false + schema: + type: boolean + default: false description: | - Not Found. Returned for HEAD requests. - Use a GET request to retrieve version information. - /metrics: - get: - operationId: GetMetrics - summary: Metrics - description: Retrieves Prometheus-compatible server metrics. + Force deletion of the trigger even if it has active executions. + By default, deletion fails if the trigger is currently executing. responses: - '200': - description: Success. The response body contains Prometheus-compatible server metrics. + "200": + description: Success. The processing engine trigger has been deleted. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Trigger not found. tags: - - Server information - /api/v3/configure/database: - get: - operationId: GetConfigureDatabase - summary: List databases - description: Retrieves a list of databases. + - Processing engine + /api/v3/configure/processing_engine_trigger/disable: + post: + operationId: PostDisableProcessingEngineTrigger + summary: Disable processing engine trigger + description: Disables a processing engine trigger. parameters: - - $ref: '#/components/parameters/formatRequired' + - name: db + in: query + required: true + schema: + type: string + description: The database name. + - name: trigger_name + in: query + required: true + schema: + type: string + description: The name of the trigger. responses: - '200': - description: Success. The response body contains the list of databases. - content: - application/json: - schema: - $ref: '#/components/schemas/ShowDatabasesResponse' - '400': + "200": + description: Success. The processing engine trigger has been disabled. + "400": description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - description: Database not found. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Trigger not found. tags: - - Database + - Processing engine + /api/v3/configure/processing_engine_trigger/enable: post: - operationId: PostConfigureDatabase - summary: Create a database - description: Creates a new database in the system. - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/CreateDatabaseRequest' + operationId: PostEnableProcessingEngineTrigger + summary: Enable processing engine trigger + description: Enables a processing engine trigger. + parameters: + - name: db + in: query + required: true + schema: + type: string + description: The database name. + - name: trigger_name + in: query + required: true + schema: + type: string + description: The name of the trigger. responses: - '201': - description: Success. Database created. - '400': + "200": + description: Success. The processing engine trigger is enabled. + "400": description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '409': - description: Database already exists. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Trigger not found. tags: - - Database - delete: - operationId: DeleteConfigureDatabase - summary: Delete a database - description: | - Soft deletes a database. - The database is scheduled for deletion and unavailable for querying. - Use the `hard_delete_at` parameter to schedule a hard deletion. - parameters: - - $ref: '#/components/parameters/db' - - name: hard_delete_at - in: query - required: false - schema: - type: string - format: date-time - description: | - Schedule the database for hard deletion at the specified time. - If not provided, the database will be soft deleted. - Use ISO 8601 date-time format (for example, "2025-12-31T23:59:59Z"). - - #### Deleting a database cannot be undone - - Deleting a database is a destructive action. - Once a database is deleted, data stored in that database cannot be recovered. - responses: - '200': - description: Success. Database deleted. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - description: Database not found. - tags: - - Database - /api/v3/configure/database/retention_period: - delete: - operationId: DeleteDatabaseRetentionPeriod - summary: Remove database retention period - description: | - Removes the retention period from a database, setting it to infinite retention. - Data in the database will not expire based on time. - parameters: - - $ref: '#/components/parameters/db' - responses: - '200': - description: Success. Retention period removed from database. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - description: Database not found. - tags: - - Database + - Processing engine /api/v3/configure/table: - post: - operationId: PostConfigureTable - summary: Create a table - description: Creates a new table within a database. - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/CreateTableRequest' - responses: - '201': - description: Success. The table has been created. - '400': - description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - description: Database not found. - tags: - - Table delete: operationId: DeleteConfigureTable - summary: Delete a table - description: | - Soft deletes a table. - The table is scheduled for deletion and unavailable for querying. - Use the `hard_delete_at` parameter to schedule a hard deletion. - - #### Deleting a table cannot be undone - - Deleting a table is a destructive action. - Once a table is deleted, data stored in that table cannot be recovered. parameters: - - $ref: '#/components/parameters/db' + - $ref: "#/components/parameters/db" - name: table in: query required: true schema: type: string + - name: data_only + in: query + required: false + schema: + type: boolean + default: false + description: | + Delete only data while preserving the table schema and all associated resources + (last value caches, distinct value caches). + When `false` (default), the entire table is deleted. - name: hard_delete_at in: query required: false schema: type: string format: date-time - description: | + description: |- Schedule the table for hard deletion at the specified time. If not provided, the table will be soft deleted. Use ISO 8601 format (for example, "2025-12-31T23:59:59Z"). + + + Also accepts special string values: + - `now` — hard delete immediately + - `never` — soft delete only (default behavior) + - `default` — use the system default hard deletion time responses: - '200': + "200": description: Success (no content). The table has been deleted. - '401': - $ref: '#/components/responses/Unauthorized' - '404': + "401": + $ref: "#/components/responses/Unauthorized" + "404": description: Table not found. + summary: Delete a table + description: | + Soft deletes a table. + The table is scheduled for deletion and unavailable for querying. + Use the `hard_delete_at` parameter to schedule a hard deletion. + Use the `data_only` parameter to delete data while preserving the table schema and resources. + + #### Deleting a table cannot be undone + + Deleting a table is a destructive action. + Once a table is deleted, data stored in that table cannot be recovered. tags: - Table - /api/v3/configure/distinct_cache: post: - operationId: PostConfigureDistinctCache - summary: Create distinct cache - description: Creates a distinct cache for a table. - tags: - - Cache data - - Table + operationId: PostConfigureTable + responses: + "200": + description: Success. The table has been created. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Database not found. + summary: Create a table + description: Creates a new table within a database. requestBody: required: true content: application/json: schema: - $ref: '#/components/schemas/DistinctCacheCreateRequest' - responses: - '201': - description: Success. The distinct cache has been created. - '204': - description: Not created. A distinct cache with this configuration already exists. - '400': - description: | - Bad request. - - The server responds with status `400` if the request would overwrite an existing cache with a different configuration. + $ref: "#/components/schemas/CreateTableRequest" + tags: + - Table + /api/v3/configure/token: delete: - operationId: DeleteConfigureDistinctCache - summary: Delete distinct cache - description: Deletes a distinct cache. + operationId: DeleteToken parameters: - - $ref: '#/components/parameters/db' - - name: table - in: query - required: true - schema: - type: string - description: The name of the table containing the distinct cache. - - name: name + - name: token_name in: query required: true schema: type: string - description: The name of the distinct cache to delete. + description: The name of the token to delete. responses: - '200': - description: Success. The distinct cache has been deleted. - '400': - description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - description: Cache not found. + "200": + description: Success. The token has been deleted. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Token not found. + summary: Delete token + description: | + Deletes a token. tags: - - Cache data - - Table - /api/v3/configure/last_cache: + - Authentication + - Token + /api/v3/configure/token/admin: post: - operationId: PostConfigureLastCache - summary: Create last cache - description: Creates a last cache for a table. - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/LastCacheCreateRequest' + operationId: PostCreateAdminToken responses: - '201': - description: Success. Last cache created. - '400': - description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - description: Cache not found. - '409': - description: Cache already exists. + "201": + description: | + Success. The admin token has been created. + The response body contains the token string and metadata. + content: + application/json: + schema: + $ref: "#/components/schemas/AdminTokenObject" + "401": + $ref: "#/components/responses/Unauthorized" + summary: Create admin token + description: | + Creates an admin token. + An admin token is a special type of token that has full access to all resources in the system. tags: - - Cache data - - Table - delete: - operationId: DeleteConfigureLastCache - summary: Delete last cache - description: Deletes a last cache. - parameters: - - $ref: '#/components/parameters/db' - - name: table - in: query - required: true - schema: - type: string - description: The name of the table containing the last cache. - - name: name - in: query - required: true - schema: - type: string - description: The name of the last cache to delete. + - Authentication + - Token + /api/v3/configure/token/admin/regenerate: + post: + operationId: PostRegenerateAdminToken + summary: Regenerate admin token + description: | + Regenerates an admin token and revokes the previous token with the same name. + parameters: [] responses: - '200': - description: Success. The last cache has been deleted. - '400': - description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - description: Cache not found. + "201": + description: Success. The admin token has been regenerated. + content: + application/json: + schema: + $ref: "#/components/schemas/AdminTokenObject" + "401": + $ref: "#/components/responses/Unauthorized" tags: - - Cache data - - Table - /api/v3/configure/processing_engine_trigger: + - Authentication + - Token + /api/v3/configure/token/named_admin: post: - operationId: PostConfigureProcessingEngineTrigger - summary: Create processing engine trigger + operationId: PostCreateNamedAdminToken + responses: + "201": + description: | + Success. The named admin token has been created. + The response body contains the token string and metadata. + content: + application/json: + schema: + $ref: "#/components/schemas/AdminTokenObject" + "401": + $ref: "#/components/responses/Unauthorized" + "409": + description: A token with this name already exists. + summary: Create named admin token description: | - Creates a processing engine trigger with the specified plugin file and trigger specification. - - ### Related guides - - - [Processing engine and Python plugins](/influxdb3/core/plugins/) + Creates a named admin token. + A named admin token is a special type of admin token with a custom name for identification and management. + tags: + - Authentication + - Token requestBody: required: true content: application/json: schema: - $ref: '#/components/schemas/ProcessingEngineTriggerRequest' - examples: - schedule_cron: - summary: Schedule trigger using cron - description: | - In `"cron:CRON_EXPRESSION"`, `CRON_EXPRESSION` uses extended 6-field cron format. - The cron expression `0 0 6 * * 1-5` means the trigger will run at 6:00 AM every weekday (Monday to Friday). - value: - db: mydb - plugin_filename: schedule.py - trigger_name: schedule_cron_trigger - trigger_specification: cron:0 0 6 * * 1-5 - disabled: false - trigger_settings: - run_async: false - error_behavior: Log - schedule_every: - summary: Schedule trigger using interval - description: | - In `"every:DURATION"`, `DURATION` specifies the interval between trigger executions. - The duration `1h` means the trigger will run every hour. - value: - db: mydb - plugin_filename: schedule.py - trigger_name: schedule_every_trigger - trigger_specification: every:1h - disabled: false - trigger_settings: - run_async: false - error_behavior: Log - schedule_every_seconds: - summary: Schedule trigger using seconds interval - description: | - Example of scheduling a trigger to run every 30 seconds. - value: - db: mydb - plugin_filename: schedule.py - trigger_name: schedule_every_30s_trigger - trigger_specification: every:30s - disabled: false - trigger_settings: - run_async: false - error_behavior: Log - schedule_every_minutes: - summary: Schedule trigger using minutes interval - description: | - Example of scheduling a trigger to run every 5 minutes. - value: - db: mydb - plugin_filename: schedule.py - trigger_name: schedule_every_5m_trigger - trigger_specification: every:5m - disabled: false - trigger_settings: - run_async: false - error_behavior: Log - all_tables: - summary: All tables trigger example - description: | - Trigger that fires on write events to any table in the database. - value: - db: mydb - plugin_filename: all_tables.py - trigger_name: all_tables_trigger - trigger_specification: all_tables - disabled: false - trigger_settings: - run_async: false - error_behavior: Log - table_specific: - summary: Table-specific trigger example - description: | - Trigger that fires on write events to a specific table. - value: - db: mydb - plugin_filename: table.py - trigger_name: table_trigger - trigger_specification: table:sensors - disabled: false - trigger_settings: - run_async: false - error_behavior: Log - api_request: - summary: On-demand request trigger example - description: | - Creates an HTTP endpoint `/api/v3/engine/hello-world` for manual invocation. - value: - db: mydb - plugin_filename: request.py - trigger_name: hello_world_trigger - trigger_specification: request:hello-world - disabled: false - trigger_settings: - run_async: false - error_behavior: Log - cron_friday_afternoon: - summary: Cron trigger for Friday afternoons - description: | - Example of a cron trigger that runs every Friday at 2:30 PM. - value: - db: reports - plugin_filename: weekly_report.py - trigger_name: friday_report_trigger - trigger_specification: cron:0 30 14 * * 5 - disabled: false - trigger_settings: - run_async: false - error_behavior: Log - cron_monthly: - summary: Cron trigger for monthly execution - description: | - Example of a cron trigger that runs on the first day of every month at midnight. - value: - db: monthly_data - plugin_filename: monthly_cleanup.py - trigger_name: monthly_cleanup_trigger - trigger_specification: cron:0 0 0 1 * * - disabled: false - trigger_settings: - run_async: false - error_behavior: Log + type: object + properties: + token_name: + type: string + description: The name for the admin token. + expiry_secs: + type: integer + description: Optional expiration time in seconds. If not provided, the token does not expire. + nullable: true + required: + - token_name + /api/v3/engine/{request_path}: + get: + operationId: GetProcessingEnginePluginRequest responses: - '200': - description: Success. Processing engine trigger created. - '400': + "200": + description: Success. The plugin request has been executed. + "400": + description: Malformed request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Plugin not found. + "500": + description: Processing failure. + summary: On Request processing engine plugin request + description: > + Executes the On Request processing engine plugin specified in the trigger's `plugin_filename`. + + The request can include request headers, query string parameters, and a request body, which InfluxDB passes to + the plugin. + + + An On Request plugin implements the following signature: + + + ```python + + def process_request(influxdb3_local, query_parameters, request_headers, request_body, args=None) + + ``` + + + The response depends on the plugin implementation. + tags: + - Processing engine + post: + operationId: PostProcessingEnginePluginRequest + responses: + "200": + description: Success. The plugin request has been executed. + "400": + description: Malformed request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Plugin not found. + "500": + description: Processing failure. + summary: On Request processing engine plugin request + description: > + Executes the On Request processing engine plugin specified in the trigger's `plugin_filename`. + + The request can include request headers, query string parameters, and a request body, which InfluxDB passes to + the plugin. + + + An On Request plugin implements the following signature: + + + ```python + + def process_request(influxdb3_local, query_parameters, request_headers, request_body, args=None) + + ``` + + + The response depends on the plugin implementation. + parameters: + - $ref: "#/components/parameters/ContentType" + requestBody: + required: false + content: + application/json: + schema: + type: object + additionalProperties: true + tags: + - Processing engine + parameters: + - name: request_path + description: | + The path configured in the request trigger specification for the plugin. + + For example, if you define a trigger with the following: + + ```json + trigger_specification: "request:hello-world" + ``` + + then, the HTTP API exposes the following plugin endpoint: + + ``` + /api/v3/engine/hello-world + ``` + in: path + required: true + schema: + type: string + /api/v3/plugin_test/schedule: + post: + operationId: PostTestSchedulingPlugin + responses: + "200": + description: Success. The plugin test has been executed. + "400": description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - description: Trigger not found. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Plugin not enabled. + summary: Test scheduling plugin + description: Executes a test of a scheduling plugin. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/SchedulePluginTestRequest" tags: - Processing engine - delete: - operationId: DeleteConfigureProcessingEngineTrigger - summary: Delete processing engine trigger - description: Deletes a processing engine trigger. + /api/v3/plugin_test/wal: + post: + operationId: PostTestWALPlugin + responses: + "200": + description: Success. The plugin test has been executed. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Plugin not enabled. + summary: Test WAL plugin + description: Executes a test of a write-ahead logging (WAL) plugin. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/WALPluginTestRequest" + tags: + - Processing engine + /api/v3/plugins/directory: + put: + operationId: PutPluginDirectory + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/PluginDirectoryRequest" + responses: + "200": + description: Success. The plugin directory has been updated. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Forbidden. Admin token required. + "500": + description: Plugin not found. The `plugin_name` does not match any registered trigger. + summary: Update a multi-file plugin directory + description: | + Replaces all files in a multi-file plugin directory. The + `plugin_name` must match a registered trigger name. Each entry in + the `files` array specifies a `relative_path` and `content`—the + server writes them into the trigger's plugin directory. + + Use this endpoint to update multi-file plugins (directories with + `__init__.py` and supporting modules). For single-file plugins, + use `PUT /api/v3/plugins/files` instead. + tags: + - Processing engine + x-security-note: Requires an admin token + /api/v3/plugins/files: + post: + operationId: create_plugin_file + summary: Create a plugin file + description: | + Creates a single plugin file in the plugin directory. Writes the + `content` to a file named after `plugin_name`. Does not require an + existing trigger—use this to upload plugin files before creating + triggers that reference them. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/PluginFileRequest" + responses: + "200": + description: Success. The plugin file has been created. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Forbidden. Admin token required. + tags: + - Processing engine + x-security-note: Requires an admin token + put: + operationId: PutPluginFile + summary: Update a plugin file + description: | + Updates a single plugin file for an existing trigger. The + `plugin_name` must match a registered trigger name—the server + resolves the trigger's `plugin_filename` and overwrites that file + with the provided `content`. + + To upload a new plugin file before creating a trigger, use + `POST /api/v3/plugins/files` instead. To update a multi-file + plugin directory, use `PUT /api/v3/plugins/directory`. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/PluginFileRequest" + responses: + "200": + description: Success. The plugin file has been updated. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Forbidden. Admin token required. + "500": + description: Plugin not found. The `plugin_name` does not match any registered trigger. + tags: + - Processing engine + x-security-note: Requires an admin token + /api/v3/query_influxql: + get: + operationId: GetExecuteInfluxQLQuery + responses: + "200": + description: Success. The response body contains query results. + content: + application/json: + schema: + $ref: "#/components/schemas/QueryResponse" + text/csv: + schema: + type: string + application/vnd.apache.parquet: + schema: + type: string + application/jsonl: + schema: + type: string + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "404": + description: Database not found. + "405": + description: Method not allowed. + "422": + description: Unprocessable entity. + summary: Execute InfluxQL query + description: Executes an InfluxQL query to retrieve data from the specified database. + parameters: + - $ref: "#/components/parameters/dbQueryParam" + - name: q + in: query + required: true + schema: + type: string + - name: format + in: query + required: false + schema: + type: string + - $ref: "#/components/parameters/AcceptQueryHeader" + - name: params + in: query + required: false + schema: + type: string + description: JSON-encoded query parameters. Use this to pass bind parameters to parameterized queries. + description: JSON-encoded query parameters for parameterized queries. + tags: + - Query data + post: + operationId: PostExecuteQueryInfluxQL + responses: + "200": + description: Success. The response body contains query results. + content: + application/json: + schema: + $ref: "#/components/schemas/QueryResponse" + text/csv: + schema: + type: string + application/vnd.apache.parquet: + schema: + type: string + application/jsonl: + schema: + type: string + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "404": + description: Database not found. + "405": + description: Method not allowed. + "422": + description: Unprocessable entity. + summary: Execute InfluxQL query + description: Executes an InfluxQL query to retrieve data from the specified database. + parameters: + - $ref: "#/components/parameters/AcceptQueryHeader" + - $ref: "#/components/parameters/ContentType" + requestBody: + $ref: "#/components/requestBodies/queryRequestBody" + tags: + - Query data + /api/v3/query_sql: + get: + operationId: GetExecuteQuerySQL + responses: + "200": + description: Success. The response body contains query results. + content: + application/json: + schema: + $ref: "#/components/schemas/QueryResponse" + example: + results: + - series: + - name: mytable + columns: + - time + - value + values: + - - "2024-02-02T12:00:00Z" + - 42 + text/csv: + schema: + type: string + application/vnd.apache.parquet: + schema: + type: string + application/jsonl: + schema: + type: string + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "404": + description: Database not found. + "405": + description: Method not allowed. + "422": + description: Unprocessable entity. + summary: Execute SQL query + description: Executes an SQL query to retrieve data from the specified database. + parameters: + - $ref: "#/components/parameters/db" + - $ref: "#/components/parameters/querySqlParam" + - $ref: "#/components/parameters/format" + - $ref: "#/components/parameters/AcceptQueryHeader" + - $ref: "#/components/parameters/ContentType" + - name: params + in: query + required: false + schema: + type: string + description: JSON-encoded query parameters. Use this to pass bind parameters to parameterized queries. + description: JSON-encoded query parameters for parameterized queries. + tags: + - Query data + post: + operationId: PostExecuteQuerySQL + responses: + "200": + description: Success. The response body contains query results. + content: + application/json: + schema: + $ref: "#/components/schemas/QueryResponse" + text/csv: + schema: + type: string + application/vnd.apache.parquet: + schema: + type: string + application/jsonl: + schema: + type: string + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "404": + description: Database not found. + "405": + description: Method not allowed. + "422": + description: Unprocessable entity. + summary: Execute SQL query + description: Executes an SQL query to retrieve data from the specified database. + parameters: + - $ref: "#/components/parameters/AcceptQueryHeader" + - $ref: "#/components/parameters/ContentType" + requestBody: + $ref: "#/components/requestBodies/queryRequestBody" + tags: + - Query data + /api/v3/write_lp: + post: + operationId: PostWriteLP + parameters: + - $ref: "#/components/parameters/dbWriteParam" + - $ref: "#/components/parameters/accept_partial" + - $ref: "#/components/parameters/precisionParam" + - name: no_sync + in: query + schema: + $ref: "#/components/schemas/NoSync" + - name: Content-Type + in: header + description: | + The content type of the request payload. + schema: + $ref: "#/components/schemas/LineProtocol" + required: false + - name: Accept + in: header + description: | + The content type that the client can understand. + Writes only return a response body if they fail (partially or completely)--for example, + due to a syntax problem or type mismatch. + schema: + type: string + default: application/json + enum: + - application/json + required: false + - $ref: "#/components/parameters/ContentEncoding" + - $ref: "#/components/parameters/ContentLength" + responses: + "204": + description: Success ("No Content"). All data in the batch is written and queryable. + headers: + cluster-uuid: + $ref: "#/components/headers/ClusterUUID" + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "413": + description: Request entity too large. + "422": + description: Unprocessable entity. + summary: Write line protocol + description: > + Writes line protocol to the specified database. + + + This is the native InfluxDB 3 Core write endpoint that provides enhanced control + + over write behavior with advanced parameters for high-performance and fault-tolerant operations. + + + Use this endpoint to send data in [line protocol](/influxdb3/core/reference/syntax/line-protocol/) format to + InfluxDB. + + Use query parameters to specify options for writing data. + + + #### Features + + + - **Partial writes**: Use `accept_partial=true` to allow partial success when some lines in a batch fail + + - **Asynchronous writes**: Use `no_sync=true` to skip waiting for WAL synchronization, allowing faster response + times but sacrificing durability guarantees + + - **Flexible precision**: Automatic timestamp precision detection with `precision=auto` (default) + + + #### Auto precision detection + + + When you use `precision=auto` or omit the precision parameter, InfluxDB 3 automatically detects + + the timestamp precision based on the magnitude of the timestamp value: + + + - Timestamps < 5e9 → Second precision (multiplied by 1,000,000,000 to convert to nanoseconds) + + - Timestamps < 5e12 → Millisecond precision (multiplied by 1,000,000) + + - Timestamps < 5e15 → Microsecond precision (multiplied by 1,000) + + - Larger timestamps → Nanosecond precision (no conversion needed) + + + #### Related + + + - [Use the InfluxDB v3 write_lp API to write data](/influxdb3/core/write-data/http-api/v3-write-lp/) + requestBody: + $ref: "#/components/requestBodies/lineProtocolRequestBody" + tags: + - Write data + x-codeSamples: + - label: cURL - Basic write + lang: Shell + source: | + curl --request POST "http://localhost:8181/api/v3/write_lp?db=sensors" \ + --header "Authorization: Bearer DATABASE_TOKEN" \ + --header "Content-Type: text/plain" \ + --data-raw "cpu,host=server01 usage=85.2 1638360000000000000" + - label: cURL - Write with millisecond precision + lang: Shell + source: | + curl --request POST "http://localhost:8181/api/v3/write_lp?db=sensors&precision=ms" \ + --header "Authorization: Bearer DATABASE_TOKEN" \ + --header "Content-Type: text/plain" \ + --data-raw "cpu,host=server01 usage=85.2 1638360000000" + - label: cURL - Asynchronous write with partial acceptance + lang: Shell + source: > + curl --request POST + "http://localhost:8181/api/v3/write_lp?db=sensors&accept_partial=true&no_sync=true&precision=auto" \ + --header "Authorization: Bearer DATABASE_TOKEN" \ + --header "Content-Type: text/plain" \ + --data-raw "cpu,host=server01 usage=85.2 + memory,host=server01 used=4096" + - label: cURL - Multiple measurements with tags + lang: Shell + source: | + curl --request POST "http://localhost:8181/api/v3/write_lp?db=sensors&precision=ns" \ + --header "Authorization: Bearer DATABASE_TOKEN" \ + --header "Content-Type: text/plain" \ + --data-raw "cpu,host=server01,region=us-west usage=85.2,load=0.75 1638360000000000000 + memory,host=server01,region=us-west used=4096,free=12288 1638360000000000000 + disk,host=server01,region=us-west,device=/dev/sda1 used=50.5,free=49.5 1638360000000000000" + /health: + get: + operationId: GetHealth + responses: + "200": + description: Service is running. Returns `OK`. + content: + text/plain: + schema: + type: string + example: OK + "401": + description: Unauthorized. Authentication is required. + "500": + description: Service is unavailable. + summary: Health check + description: | + Checks the status of the service. + + Returns `OK` if the service is running. This endpoint does not return version information. + Use the [`/ping`](#operation/GetPing) endpoint to retrieve version details. + + > **Note**: This endpoint requires authentication by default in InfluxDB 3 Core. + tags: + - Server information + /metrics: + get: + operationId: GetMetrics + responses: + "200": + description: Success + summary: Metrics + description: Retrieves Prometheus-compatible server metrics. + tags: + - Server information + /ping: + get: + operationId: GetPing + responses: + "200": + description: Success. The response body contains server information. + headers: + x-influxdb-version: + description: The InfluxDB version number (for example, `3.8.0`). + schema: + type: string + example: 3.8.0 + x-influxdb-build: + description: The InfluxDB build type (`Core` or `Enterprise`). + schema: + type: string + example: Core + content: + application/json: + schema: + type: object + properties: + version: + type: string + description: The InfluxDB version number. + example: 3.8.0 + revision: + type: string + description: The git revision hash for the build. + example: 83b589b883 + process_id: + type: string + description: A unique identifier for the server process. + example: b756d9e0-cecd-4f72-b6d0-19e2d4f8cbb7 + "401": + description: Unauthorized. Authentication is required. + "404": + description: | + Not Found. Returned for HEAD requests. + Use a GET request to retrieve version information. + x-client-method: ping + summary: Ping the server + description: | + Returns version information for the server. + + **Important**: Use a GET request. HEAD requests return `404 Not Found`. + + The response includes version information in both headers and the JSON body: + + - **Headers**: `x-influxdb-version` and `x-influxdb-build` + - **Body**: JSON object with `version`, `revision`, and `process_id` + + > **Note**: This endpoint requires authentication by default in InfluxDB 3 Core. + tags: + - Server information + post: + operationId: ping + responses: + "200": + description: Success. The response body contains server information. + headers: + x-influxdb-version: + description: The InfluxDB version number (for example, `3.8.0`). + schema: + type: string + example: 3.8.0 + x-influxdb-build: + description: The InfluxDB build type (`Core` or `Enterprise`). + schema: + type: string + example: Core + content: + application/json: + schema: + type: object + properties: + version: + type: string + description: The InfluxDB version number. + example: 3.8.0 + revision: + type: string + description: The git revision hash for the build. + example: 83b589b883 + process_id: + type: string + description: A unique identifier for the server process. + example: b756d9e0-cecd-4f72-b6d0-19e2d4f8cbb7 + "401": + description: Unauthorized. Authentication is required. + "404": + description: | + Not Found. Returned for HEAD requests. + Use a GET request to retrieve version information. + summary: Ping the server + description: Returns version information for the server. Accepts POST in addition to GET. + tags: + - Server information + /query: + get: + operationId: GetV1ExecuteQuery + responses: + "200": + description: | + Success. The response body contains query results. + content: + application/json: + schema: + $ref: "#/components/schemas/QueryResponse" + application/csv: + schema: + type: string + headers: + Content-Type: + description: > + The content type of the response. + + Default is `application/json`. + + + If the `Accept` request header is `application/csv` or `text/csv`, the `Content-type` response header is + `application/csv` + + and the response is formatted as CSV. + schema: + type: string + default: application/json + enum: + - application/json + - application/csv + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "404": + description: Database not found. + "405": + description: Method not allowed. + "422": + description: Unprocessable entity. + summary: Execute InfluxQL query (v1-compatible) + description: > + Executes an InfluxQL query to retrieve data from the specified database. + + + This endpoint is compatible with InfluxDB 1.x client libraries and third-party integrations such as Grafana. + + Use query parameters to specify the database and the InfluxQL query. + + + #### Related + + + - [Use the InfluxDB v1 HTTP query API and InfluxQL to query + data](/influxdb3/core/query-data/execute-queries/influxdb-v1-api/) parameters: - - $ref: '#/components/parameters/db' - - name: trigger_name - in: query - required: true + - name: Accept + in: header schema: type: string - - name: force - in: query + default: application/json + enum: + - application/json + - application/csv + - text/csv required: false + description: > + The content type that the client can understand. + + + If `text/csv` is specified, the `Content-type` response header is `application/csv` and the response is + formatted as CSV. + + + Returns an error if the format is invalid or non-UTF8. + - in: query + name: chunked + description: | + If true, the response is divided into chunks of size `chunk_size`. schema: type: boolean default: false - responses: - '200': - description: Success. The processing engine trigger has been deleted. - '400': - description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - description: Trigger not found. - tags: - - Processing engine - /api/v3/configure/processing_engine_trigger/disable: - post: - operationId: PostDisableProcessingEngineTrigger - summary: Disable processing engine trigger - description: Disables a processing engine trigger. - parameters: - - $ref: '#/components/parameters/ContentType' - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/ProcessingEngineTriggerRequest' - responses: - '200': - description: Success. The processing engine trigger has been disabled. - '400': - description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - description: Trigger not found. + - in: query + name: chunk_size + description: | + The number of records that will go into a chunk. + This parameter is only used if `chunked=true`. + schema: + type: integer + default: 10000 + - in: query + name: db + description: The database to query. If not provided, the InfluxQL query string must specify the database. + schema: + type: string + format: InfluxQL + - in: query + name: pretty + description: | + If true, the JSON response is formatted in a human-readable format. + schema: + type: boolean + default: false + - in: query + name: q + description: The InfluxQL query string. + required: true + schema: + type: string + - name: epoch + description: > + Formats timestamps as [unix (epoch) timestamps](/influxdb3/core/reference/glossary/#unix-timestamp) with the + specified precision + + instead of [RFC3339 timestamps](/influxdb3/core/reference/glossary/#rfc3339-timestamp) with nanosecond + precision. + in: query + schema: + $ref: "#/components/schemas/EpochCompatibility" + - $ref: "#/components/parameters/v1UsernameParam" + - $ref: "#/components/parameters/v1PasswordParam" + - name: rp + in: query + required: false + schema: + type: string + description: | + Retention policy name. Honored but discouraged. InfluxDB 3 doesn't use retention policies. + - name: Authorization + in: header + required: false + schema: + type: string + description: | + Authorization header for token-based authentication. + Supported schemes: + - `Bearer AUTH_TOKEN` - OAuth bearer token scheme + - `Token AUTH_TOKEN` - InfluxDB v2 token scheme + - `Basic ` - Basic authentication (username is ignored) tags: - - Processing engine - /api/v3/configure/processing_engine_trigger/enable: + - Query data + - Compatibility endpoints post: - operationId: PostEnableProcessingEngineTrigger - summary: Enable processing engine trigger - description: Enables a processing engine trigger. - parameters: - - $ref: '#/components/parameters/ContentType' - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/ProcessingEngineTriggerRequest' + operationId: PostExecuteV1Query responses: - '200': - description: Success. The processing engine trigger is enabled. - '400': + "200": + description: | + Success. The response body contains query results. + content: + application/json: + schema: + $ref: "#/components/schemas/QueryResponse" + application/csv: + schema: + type: string + headers: + Content-Type: + description: > + The content type of the response. + + Default is `application/json`. + + + If the `Accept` request header is `application/csv` or `text/csv`, the `Content-type` response header is + `application/csv` + + and the response is formatted as CSV. + schema: + type: string + default: application/json + enum: + - application/json + - application/csv + "400": description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - description: Trigger not found. - tags: - - Processing engine - /api/v3/configure/plugin_environment/install_packages: - post: - operationId: PostInstallPluginPackages - summary: Install plugin packages - description: | - Installs the specified Python packages into the processing engine plugin environment. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "404": + description: Database not found. + "405": + description: Method not allowed. + "422": + description: Unprocessable entity. + summary: Execute InfluxQL query (v1-compatible) + description: > + Executes an InfluxQL query to retrieve data from the specified database. - This endpoint is synchronous and blocks until the packages are installed. - ### Related guides + #### Related + - - [Processing engine and Python plugins](/influxdb3/core/plugins/) + - [Use the InfluxDB v1 HTTP query API and InfluxQL to query + data](/influxdb3/core/query-data/execute-queries/influxdb-v1-api/) parameters: - - $ref: '#/components/parameters/ContentType' + - name: Accept + in: header + schema: + type: string + default: application/json + enum: + - application/json + - application/csv + - text/csv + required: false + description: > + The content type that the client can understand. + + + If `text/csv` is specified, the `Content-type` response header is `application/csv` and the response is + formatted as CSV. + + + Returns an error if the format is invalid or non-UTF8. requestBody: - required: true content: application/json: schema: type: object properties: - packages: - type: array - items: - type: string + db: + type: string + description: The database to query. If not provided, the InfluxQL query string must specify the database. + q: + description: The InfluxQL query string. + type: string + chunked: description: | - A list of Python package names to install. - Can include version specifiers (e.g., "scipy==1.9.0"). - example: - - influxdb3-python - - scipy - - pandas==1.5.0 - - requests - required: - - packages - example: - packages: - - influxdb3-python - - scipy - - pandas==1.5.0 - - requests - responses: - '200': - description: Success. The packages are installed. - '400': - description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - tags: - - Processing engine - /api/v3/configure/plugin_environment/install_requirements: - post: - operationId: PostInstallPluginRequirements - summary: Install plugin requirements - description: | - Installs requirements from a requirements file (also known as a "pip requirements file") into the processing engine plugin environment. + If true, the response is divided into chunks of size `chunk_size`. + type: boolean + chunk_size: + description: | + The number of records that will go into a chunk. + This parameter is only used if `chunked=true`. + type: integer + default: 10000 + epoch: + description: > + A unix timestamp precision. - This endpoint is synchronous and blocks until the requirements are installed. - ### Related + - `h` for hours - - [Processing engine and Python plugins](/influxdb3/core/plugins/) - - [Python requirements file format](https://pip.pypa.io/en/stable/reference/requirements-file-format/) - parameters: - - $ref: '#/components/parameters/ContentType' - requestBody: - required: true - content: - application/json: + - `m` for minutes + + - `s` for seconds + + - `ms` for milliseconds + + - `u` or `µ` for microseconds + + - `ns` for nanoseconds + + + Formats timestamps as [unix (epoch) timestamps](/influxdb3/core/reference/glossary/#unix-timestamp) + with the specified precision + + instead of [RFC3339 timestamps](/influxdb3/core/reference/glossary/#rfc3339-timestamp) with + nanosecond precision. + enum: + - ns + - u + - µ + - ms + - s + - m + - h + type: string + pretty: + description: | + If true, the JSON response is formatted in a human-readable format. + type: boolean + required: + - q + application/x-www-form-urlencoded: schema: type: object properties: - requirements_location: + db: + type: string + description: The database to query. If not provided, the InfluxQL query string must specify the database. + q: + description: The InfluxQL query string. type: string + chunked: description: | - The path to the requirements file containing Python packages to install. - Can be a relative path (relative to the plugin directory) or an absolute path. - example: requirements.txt - required: - - requirements_location - example: - requirements_location: requirements.txt - responses: - '200': - description: Success. The requirements have been installed. - '400': - description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - tags: - - Processing engine - /api/v3/plugin_test/wal: - post: - operationId: PostTestWALPlugin - summary: Test WAL plugin - description: Executes a test of a write-ahead logging (WAL) plugin. - responses: - '200': - description: Success. The plugin test has been executed. - '400': - description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - description: Plugin not enabled. - tags: - - Processing engine - /api/v3/plugin_test/schedule: - post: - operationId: PostTestSchedulingPlugin - summary: Test scheduling plugin - description: Executes a test of a scheduling plugin. - responses: - '200': - description: Success. The plugin test has been executed. - '400': - description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - description: Plugin not enabled. - tags: - - Processing engine - /api/v3/engine/{request_path}: - parameters: - - name: request_path - description: | - The path configured in the request trigger specification for the plugin. + If true, the response is divided into chunks of size `chunk_size`. + type: boolean + chunk_size: + description: | + The number of records that will go into a chunk. + This parameter is only used if `chunked=true`. + type: integer + default: 10000 + epoch: + description: > + A unix timestamp precision. - For example, if you define a trigger with the following: - ```json - trigger_specification: "request:hello-world" - ``` + - `h` for hours - then, the HTTP API exposes the following plugin endpoint: + - `m` for minutes - ``` - /api/v3/engine/hello-world - ``` - in: path - required: true - schema: - type: string - get: - operationId: GetProcessingEnginePluginRequest - summary: On Request processing engine plugin request - description: | - Executes the On Request processing engine plugin specified in the trigger's `plugin_filename`. - The request can include request headers, query string parameters, and a request body, which InfluxDB passes to the plugin. + - `s` for seconds - An On Request plugin implements the following signature: + - `ms` for milliseconds - ```python - def process_request(influxdb3_local, query_parameters, request_headers, request_body, args=None) - ``` + - `u` or `µ` for microseconds - The response depends on the plugin implementation. - responses: - '200': - description: Success. The plugin request has been executed. - '400': - description: Malformed request. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - description: Plugin not found. - '500': - description: Processing failure. - tags: - - Processing engine - post: - operationId: PostProcessingEnginePluginRequest - summary: On Request processing engine plugin request - description: | - Executes the On Request processing engine plugin specified in the trigger's `plugin_filename`. - The request can include request headers, query string parameters, and a request body, which InfluxDB passes to the plugin. + - `ns` for nanoseconds - An On Request plugin implements the following signature: - ```python - def process_request(influxdb3_local, query_parameters, request_headers, request_body, args=None) - ``` + Formats timestamps as [unix (epoch) timestamps](/influxdb3/core/reference/glossary/#unix-timestamp) + with the specified precision - The response depends on the plugin implementation. - parameters: - - $ref: '#/components/parameters/ContentType' - requestBody: - required: false - content: - application/json: + instead of [RFC3339 timestamps](/influxdb3/core/reference/glossary/#rfc3339-timestamp) with + nanosecond precision. + enum: + - ns + - u + - µ + - ms + - s + - m + - h + type: string + pretty: + description: | + If true, the JSON response is formatted in a human-readable format. + type: boolean + required: + - q + application/vnd.influxql: schema: - type: object - additionalProperties: true - responses: - '200': - description: Success. The plugin request has been executed. - '400': - description: Malformed request. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - description: Plugin not found. - '500': - description: Processing failure. + type: string + description: InfluxQL query string sent as the request body. tags: - - Processing engine - /api/v3/configure/token/admin: + - Query data + - Compatibility endpoints + /write: post: - operationId: PostCreateAdminToken - summary: Create admin token - description: | - Creates an admin token. - An admin token is a special type of token that has full access to all resources in the system. + operationId: PostV1Write responses: - '201': + "204": + description: Success ("No Content"). All data in the batch is written and queryable. + headers: + cluster-uuid: + $ref: "#/components/headers/ClusterUUID" + "400": description: | - Success. The admin token has been created. - The response body contains the token string and metadata. - content: - application/json: - schema: - $ref: '#/components/schemas/AdminTokenObject' - '401': - $ref: '#/components/responses/Unauthorized' - tags: - - Authentication - - Token - /api/v3/configure/token/admin/regenerate: - post: - operationId: PostRegenerateAdminToken - summary: Regenerate admin token - description: | - Regenerates an admin token and revokes the previous token with the same name. - parameters: [] - responses: - '201': - description: Success. The admin token has been regenerated. + Bad request. Some (a _partial write_) or all of the data from the batch was rejected and not written. + If a partial write occurred, then some points from the batch are written and queryable. + + The response body: + - indicates if a partial write occurred or all data was rejected. + - contains details about the [rejected points](/influxdb3/core/write-data/troubleshoot/#troubleshoot-rejected-points), up to 100 points. content: application/json: - schema: - $ref: '#/components/schemas/AdminTokenObject' - '401': - $ref: '#/components/responses/Unauthorized' - tags: - - Authentication - - Token - /api/v3/configure/token: - delete: - operationId: DeleteToken - summary: Delete token - description: | - Deletes a token. + examples: + rejectedAllPoints: + summary: Rejected all points in the batch + value: | + { + "error": "write of line protocol failed", + "data": [ + { + "original_line": "dquote> home,room=Kitchen temp=hi", + "line_number": 2, + "error_message": "No fields were provided" + } + ] + } + partialWriteErrorWithRejectedPoints: + summary: Partial write rejected some points in the batch + value: | + { + "error": "partial write of line protocol occurred", + "data": [ + { + "original_line": "dquote> home,room=Kitchen temp=hi", + "line_number": 2, + "error_message": "No fields were provided" + } + ] + } + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "413": + description: Request entity too large. + summary: Write line protocol (v1-compatible) + description: > + Writes line protocol to the specified database. + + + This endpoint provides backward compatibility for InfluxDB 1.x write workloads using tools such as InfluxDB 1.x + client libraries, the Telegraf `outputs.influxdb` output plugin, or third-party tools. + + + Use this endpoint to send data in [line + protocol](https://docs.influxdata.com/influxdb3/core/reference/syntax/line-protocol/) format to InfluxDB. + + Use query parameters to specify options for writing data. + + + #### Related + + + - [Use compatibility APIs to write data](/influxdb3/core/write-data/http-api/compatibility-apis/) parameters: - - name: id + - $ref: "#/components/parameters/dbWriteParam" + - $ref: "#/components/parameters/compatibilityPrecisionParam" + - $ref: "#/components/parameters/v1UsernameParam" + - $ref: "#/components/parameters/v1PasswordParam" + - name: rp in: query - required: true + required: false schema: type: string description: | - The ID of the token to delete. - responses: - '204': - description: Success. The token has been deleted. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - description: Token not found. - tags: - - Authentication - - Token - /api/v3/configure/token/named_admin: - post: - operationId: PostCreateNamedAdminToken - summary: Create named admin token - description: | - Creates a named admin token. - A named admin token is an admin token with a specific name identifier. - parameters: - - name: name + Retention policy name. Honored but discouraged. InfluxDB 3 doesn't use retention policies. + - name: consistency in: query - required: true + required: false + schema: + type: string + description: | + Write consistency level. Ignored by InfluxDB 3. Provided for compatibility with InfluxDB 1.x clients. + - name: Authorization + in: header + required: false schema: type: string description: | - The name for the admin token. - responses: - '201': + Authorization header for token-based authentication. + Supported schemes: + - `Bearer AUTH_TOKEN` - OAuth bearer token scheme + - `Token AUTH_TOKEN` - InfluxDB v2 token scheme + - `Basic ` - Basic authentication (username is ignored) + - name: Content-Type + in: header description: | - Success. The named admin token has been created. - The response body contains the token string and metadata. - content: - application/json: - schema: - $ref: '#/components/schemas/AdminTokenObject' - '401': - $ref: '#/components/responses/Unauthorized' - '409': - description: A token with this name already exists. - tags: - - Authentication - - Token - /api/v3/plugins/files: - put: - operationId: PutPluginFile - summary: Update plugin file - description: | - Updates a plugin file in the plugin directory. - x-security-note: Requires an admin token - responses: - '204': - description: Success. The plugin file has been updated. - '401': - $ref: '#/components/responses/Unauthorized' - '403': - description: Forbidden. Admin token required. - tags: - - Processing engine - /api/v3/plugins/directory: - put: - operationId: PutPluginDirectory - summary: Update plugin directory - description: | - Updates the plugin directory configuration. - x-security-note: Requires an admin token - responses: - '204': - description: Success. The plugin directory has been updated. - '401': - $ref: '#/components/responses/Unauthorized' - '403': - description: Forbidden. Admin token required. + The content type of the request payload. + schema: + $ref: "#/components/schemas/LineProtocol" + required: false + - name: Accept + in: header + description: | + The content type that the client can understand. + Writes only return a response body if they fail (partially or completely)--for example, + due to a syntax problem or type mismatch. + schema: + type: string + default: application/json + enum: + - application/json + required: false + - $ref: "#/components/parameters/ContentEncoding" + - $ref: "#/components/parameters/ContentLength" + requestBody: + $ref: "#/components/requestBodies/lineProtocolRequestBody" tags: - - Processing engine + - Compatibility endpoints + - Write data components: parameters: AcceptQueryHeader: @@ -1921,7 +2344,7 @@ components: The compression applied to the line protocol in the request payload. To send a gzip payload, pass `Content-Encoding: gzip` header. schema: - $ref: '#/components/schemas/ContentEncoding' + $ref: "#/components/schemas/ContentEncoding" required: false ContentLength: name: Content-Length @@ -1929,7 +2352,7 @@ components: description: | The size of the entity-body, in bytes, sent to InfluxDB. schema: - $ref: '#/components/schemas/ContentLength' + $ref: "#/components/schemas/ContentLength" ContentType: name: Content-Type description: | @@ -1973,20 +2396,20 @@ components: in: query required: false schema: - $ref: '#/components/schemas/AcceptPartial' + $ref: "#/components/schemas/AcceptPartial" compatibilityPrecisionParam: name: precision in: query - required: true + required: false schema: - $ref: '#/components/schemas/PrecisionWriteCompatibility' + $ref: "#/components/schemas/PrecisionWriteCompatibility" description: The precision for unix timestamps in the line protocol batch. precisionParam: name: precision in: query - required: true + required: false schema: - $ref: '#/components/schemas/PrecisionWrite' + $ref: "#/components/schemas/PrecisionWrite" description: The precision for unix timestamps in the line protocol batch. querySqlParam: name: q @@ -2002,22 +2425,24 @@ components: in: query required: false schema: - $ref: '#/components/schemas/Format' + $ref: "#/components/schemas/Format" formatRequired: name: format in: query required: true schema: - $ref: '#/components/schemas/Format' + $ref: "#/components/schemas/Format" v1UsernameParam: name: u in: query required: false schema: type: string - description: | + description: > Username for v1 compatibility authentication. - When using Basic authentication or query string authentication, InfluxDB 3 ignores this parameter but allows any arbitrary string for compatibility with InfluxDB 1.x clients. + + When using Basic authentication or query string authentication, InfluxDB 3 ignores this parameter but allows any + arbitrary string for compatibility with InfluxDB 1.x clients. v1PasswordParam: name: p in: query @@ -2026,7 +2451,7 @@ components: type: string description: | Password for v1 compatibility authentication. - For query string authentication, pass an admin token. + For query string authentication, pass a database token with write permissions as this parameter. InfluxDB 3 checks that the `p` value is an authorized token. requestBodies: lineProtocolRequestBody: @@ -2050,7 +2475,7 @@ components: content: application/json: schema: - $ref: '#/components/schemas/QueryRequestObject' + $ref: "#/components/schemas/QueryRequestObject" schemas: AdminTokenObject: type: object @@ -2073,23 +2498,31 @@ components: name: _admin token: apiv3_00xx0Xx0xx00XX0x0 hash: 00xx0Xx0xx00XX0x0 - created_at: '2025-04-18T14:02:45.331Z' + created_at: "2025-04-18T14:02:45.331Z" expiry: null ContentEncoding: type: string enum: - gzip - identity - description: | + description: > Content coding. + Use `gzip` for compressed data or `identity` for unmodified, uncompressed data. + #### Multi-member gzip support - InfluxDB 3 supports multi-member gzip payloads (concatenated gzip files per [RFC 1952](https://www.rfc-editor.org/rfc/rfc1952)). + + InfluxDB 3 supports multi-member gzip payloads (concatenated gzip files per [RFC + 1952](https://www.rfc-editor.org/rfc/rfc1952)). + This allows you to: + - Concatenate multiple gzip files and send them in a single request + - Maintain compatibility with InfluxDB v1 and v2 write endpoints + - Simplify batch operations using standard compression tools default: identity LineProtocol: @@ -2103,8 +2536,6 @@ components: ContentLength: type: integer description: The length in decimal number of octets. - Database: - type: string AcceptPartial: type: boolean default: true @@ -2115,9 +2546,12 @@ components: - json - csv - parquet + - json_lines - jsonl - description: | + - pretty + description: |- The format of data in the response body. + `json_lines` is the canonical name; `jsonl` is accepted as an alias. NoSync: type: boolean default: false @@ -2133,11 +2567,14 @@ components: - ms - s - us + - u - ns + - "n" type: string - description: | + description: |- The precision for unix timestamps in the line protocol batch. - Use `ms` for milliseconds, `s` for seconds, `us` for microseconds, or `ns` for nanoseconds. + Use `ms` for milliseconds, `s` for seconds, `us` or `u` for microseconds, or `ns` or `n` for nanoseconds. + Optional — defaults to nanosecond precision if omitted. PrecisionWrite: enum: - auto @@ -2173,6 +2610,7 @@ components: - json - csv - parquet + - json_lines - jsonl - pretty params: @@ -2194,9 +2632,13 @@ components: properties: db: type: string + pattern: ^[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]$|^[a-zA-Z0-9]$ + description: |- + The database name. Database names cannot contain underscores (_). + Names must start and end with alphanumeric characters and can contain hyphens (-) in the middle. retention_period: type: string - description: | + description: |- The retention period for the database. Specifies how long data should be retained. Use duration format (for example, "1d", "1h", "30m", "7d"). example: 7d @@ -2231,6 +2673,12 @@ components: required: - name - type + retention_period: + type: string + description: |- + The retention period for the table. Specifies how long data in this table should be retained. + Use duration format (for example, "1d", "1h", "30m", "7d"). + example: 30d required: - db - table @@ -2325,56 +2773,93 @@ components: description: | Configuration for trigger error handling and execution behavior. allOf: - - $ref: '#/components/schemas/TriggerSettings' + - $ref: "#/components/schemas/TriggerSettings" trigger_specification: - type: string - description: | + description: > Specifies when and how the processing engine trigger should be invoked. + ## Supported trigger specifications: + ### Cron-based scheduling + Format: `cron:CRON_EXPRESSION` + Uses extended (6-field) cron format (second minute hour day_of_month month day_of_week): + ``` + ┌───────────── second (0-59) + │ ┌───────────── minute (0-59) + │ │ ┌───────────── hour (0-23) + │ │ │ ┌───────────── day of month (1-31) + │ │ │ │ ┌───────────── month (1-12) + │ │ │ │ │ ┌───────────── day of week (0-6, Sunday=0) + │ │ │ │ │ │ + * * * * * * + ``` + Examples: + - `cron:0 0 6 * * 1-5` - Every weekday at 6:00 AM + - `cron:0 30 14 * * 5` - Every Friday at 2:30 PM + - `cron:0 0 0 1 * *` - First day of every month at midnight + ### Interval-based scheduling + Format: `every:DURATION` - Supported durations: `s` (seconds), `m` (minutes), `h` (hours), `d` (days), `w` (weeks), `M` (months), `y` (years): + + Supported durations: `s` (seconds), `m` (minutes), `h` (hours), `d` (days), `w` (weeks), `M` (months), `y` + (years): + - `every:30s` - Every 30 seconds + - `every:5m` - Every 5 minutes + - `every:1h` - Every hour + - `every:1d` - Every day + - `every:1w` - Every week + - `every:1M` - Every month + - `every:1y` - Every year + **Maximum interval**: 1 year + ### Table-based triggers + - `all_tables` - Triggers on write events to any table in the database + - `table:TABLE_NAME` - Triggers on write events to a specific table + ### On-demand triggers + Format: `request:REQUEST_PATH` + Creates an HTTP endpoint `/api/v3/engine/REQUEST_PATH` for manual invocation: + - `request:hello-world` - Creates endpoint `/api/v3/engine/hello-world` + - `request:data-export` - Creates endpoint `/api/v3/engine/data-export` pattern: ^(cron:[0-9 *,/-]+|every:[0-9]+[smhd]|all_tables|table:[a-zA-Z_][a-zA-Z0-9_]*|request:[a-zA-Z0-9_-]+)$ example: cron:0 0 6 * * 1-5 @@ -2420,6 +2905,116 @@ components: required: - run_async - error_behavior + WALPluginTestRequest: + type: object + description: | + Request body for testing a write-ahead logging (WAL) plugin. + properties: + filename: + type: string + description: | + The path and filename of the plugin to test. + database: + type: string + description: | + The database name to use for the test. + input_lp: + type: string + description: | + Line protocol data to use as input for the test. + cache_name: + type: string + description: | + Optional name of the cache to use in the test. + input_arguments: + type: object + additionalProperties: + type: string + description: | + Optional key-value pairs of arguments to pass to the plugin. + required: + - filename + - database + - input_lp + SchedulePluginTestRequest: + type: object + description: | + Request body for testing a scheduling plugin. + properties: + filename: + type: string + description: | + The path and filename of the plugin to test. + database: + type: string + description: | + The database name to use for the test. + schedule: + type: string + description: | + Optional schedule specification in cron or interval format. + cache_name: + type: string + description: | + Optional name of the cache to use in the test. + input_arguments: + type: object + additionalProperties: + type: string + description: | + Optional key-value pairs of arguments to pass to the plugin. + required: + - filename + - database + PluginFileRequest: + type: object + description: | + Request body for updating a plugin file. + properties: + plugin_name: + type: string + description: | + The name of the plugin file to update. + content: + type: string + description: | + The content of the plugin file. + required: + - plugin_name + - content + PluginDirectoryRequest: + type: object + description: | + Request body for updating plugin directory with multiple files. + properties: + plugin_name: + type: string + description: | + The name of the plugin directory to update. + files: + type: array + items: + $ref: "#/components/schemas/PluginFileEntry" + description: | + List of plugin files to include in the directory. + required: + - plugin_name + - files + PluginFileEntry: + type: object + description: | + Represents a single file in a plugin directory. + properties: + content: + type: string + description: | + The content of the file. + relative_path: + type: string + description: The relative path of the file within the plugin directory. + required: + - relative_path + - content ShowDatabasesResponse: type: object properties: @@ -2442,7 +3037,7 @@ components: - time - value values: - - - '2024-02-02T12:00:00Z' + - - "2024-02-02T12:00:00Z" - 42 ErrorMessage: type: object @@ -2452,38 +3047,6 @@ components: data: type: object nullable: true - LineProtocolError: - properties: - code: - description: Code is the machine-readable error code. - enum: - - internal error - - not found - - conflict - - invalid - - empty value - - unavailable - readOnly: true - type: string - err: - description: Stack of errors that occurred during processing of the request. Useful for debugging. - readOnly: true - type: string - line: - description: First line in the request body that contains malformed data. - format: int32 - readOnly: true - type: integer - message: - description: Human-readable message. - readOnly: true - type: string - op: - description: Describes the logical code operation when the error occurred. Useful for debugging. - readOnly: true - type: string - required: - - code EpochCompatibility: description: | A unix timestamp precision. @@ -2512,62 +3075,13 @@ components: Use duration format (for example, "1d", "1h", "30m", "7d"). example: 7d description: Request schema for updating database configuration. - UpdateTableRequest: - type: object - properties: - db: - type: string - description: The name of the database containing the table. - table: - type: string - description: The name of the table to update. - retention_period: - type: string - description: | - The retention period for the table. Specifies how long data in this table should be retained. - Use duration format (for example, "1d", "1h", "30m", "7d"). - example: 30d - required: - - db - - table - description: Request schema for updating table configuration. - LicenseResponse: - type: object - properties: - license_type: - type: string - description: The type of license (for example, "enterprise", "trial"). - example: enterprise - expires_at: - type: string - format: date-time - description: The expiration date of the license in ISO 8601 format. - example: '2025-12-31T23:59:59Z' - features: - type: array - items: - type: string - description: List of features enabled by the license. - example: - - clustering - - processing_engine - - advanced_auth - status: - type: string - enum: - - active - - expired - - invalid - description: The current status of the license. - example: active - description: Response schema for license information. responses: Unauthorized: description: Unauthorized access. content: application/json: schema: - $ref: '#/components/schemas/ErrorMessage' + $ref: "#/components/schemas/ErrorMessage" BadRequest: description: | Request failed. Possible reasons: @@ -2578,19 +3092,19 @@ components: content: application/json: schema: - $ref: '#/components/schemas/ErrorMessage' + $ref: "#/components/schemas/ErrorMessage" Forbidden: description: Access denied. content: application/json: schema: - $ref: '#/components/schemas/ErrorMessage' + $ref: "#/components/schemas/ErrorMessage" NotFound: description: Resource not found. content: application/json: schema: - $ref: '#/components/schemas/ErrorMessage' + $ref: "#/components/schemas/ErrorMessage" headers: ClusterUUID: description: | @@ -2607,88 +3121,126 @@ components: BasicAuthentication: type: http scheme: basic - description: | + description: >- Use the `Authorization` header with the `Basic` scheme to authenticate v1 API requests. - Works with v1 compatibility [`/write`](#operation/PostV1Write) and [`/query`](#operation/GetV1Query) endpoints in InfluxDB 3. - When authenticating requests, InfluxDB 3 checks that the `password` part of the decoded credential is an authorized token + Works with v1 compatibility [`/write`](#operation/PostV1Write) and [`/query`](#operation/GetV1Query) endpoints + in InfluxDB 3. + + + When authenticating requests, InfluxDB 3 checks that the `password` part of the decoded credential is an + authorized token + and ignores the `username` part of the decoded credential. + + ### Syntax + + + ```http + + Authorization: Basic + + ``` + + ### Example + ```bash + curl "http://localhost:8181/write?db=DATABASE_NAME&precision=s" \ --user "":"AUTH_TOKEN" \ --header "Content-type: text/plain; charset=utf-8" \ --data-binary 'home,room=kitchen temp=72 1641024000' ``` + Replace the following: - - **`DATABASE_NAME`**: your InfluxDB 3 Core database - - **`AUTH_TOKEN`**: an admin token - #### Related guides + - **`DATABASE_NAME`**: your InfluxDB 3 Core database - - [Authenticate v1 API requests](/influxdb3/core/guides/api-compatibility/v1/) - - [Manage tokens](/influxdb3/core/admin/tokens/) + - **`AUTH_TOKEN`**: an admin token or database token authorized for the database QuerystringAuthentication: type: apiKey in: query name: u=&p= - description: | + description: >- Use InfluxDB 1.x API parameters to provide credentials through the query string for v1 API requests. - Querystring authentication works with v1-compatible [`/write`](#operation/PostV1Write) and [`/query`](#operation/GetV1Query) endpoints. + + Querystring authentication works with v1-compatible [`/write`](#operation/PostV1Write) and + [`/query`](#operation/GetV1Query) endpoints. + When authenticating requests, InfluxDB 3 checks that the `p` (_password_) query parameter is an authorized token + and ignores the `u` (_username_) query parameter. + ### Syntax + ```http - http://localhost:8181/query/?[u=any]&p=DATABASE_TOKEN - http://localhost:8181/write/?[u=any]&p=DATABASE_TOKEN + + https://localhost:8181/query/?[u=any]&p=AUTH_TOKEN + + https://localhost:8181/write/?[u=any]&p=AUTH_TOKEN + ``` + ### Examples + ```bash + curl "http://localhost:8181/write?db=DATABASE_NAME&precision=s&p=AUTH_TOKEN" \ --header "Content-type: text/plain; charset=utf-8" \ --data-binary 'home,room=kitchen temp=72 1641024000' ``` + Replace the following: + - **`DATABASE_NAME`**: your InfluxDB 3 Core database - - **`AUTH_TOKEN`**: an admin token + + - **`AUTH_TOKEN`**: an admin token or database token authorized for the database + ```bash + ####################################### + # Use an InfluxDB 1.x compatible username and password + # to query the InfluxDB v1 HTTP API + ####################################### + # Use authentication query parameters: - # ?p=DATABASE_TOKEN + + # ?p=AUTH_TOKEN + ####################################### + curl --get "http://localhost:8181/query" \ --data-urlencode "p=AUTH_TOKEN" \ --data-urlencode "db=DATABASE_NAME" \ --data-urlencode "q=SELECT * FROM MEASUREMENT" ``` + Replace the following: - - **`DATABASE_NAME`**: the database to query - - **`AUTH_TOKEN`**: an [admin token](/influxdb3/core/admin/tokens/) - #### Related guides + - **`DATABASE_NAME`**: the database to query - - [Authenticate v1 API requests](/influxdb3/core/guides/api-compatibility/v1/) - - [Manage tokens](/influxdb3/core/admin/tokens/) + - **`AUTH_TOKEN`**: a database token with sufficient permissions to the database BearerAuthentication: type: http scheme: bearer @@ -2701,8 +3253,7 @@ components: Bearer authentication works with all endpoints. In your API requests, send an `Authorization` header. - For the header value, provide the word `Bearer` followed by a space and an admin token. - + For the header value, provide the word `Bearer` followed by a space and a database token. ### Syntax @@ -2717,7 +3268,7 @@ components: --header "Authorization: Bearer AUTH_TOKEN" ``` TokenAuthentication: - description: | + description: |- Use InfluxDB v2 Token authentication to provide an authorization token to InfluxDB 3. The v2 Token scheme works with v1 and v2 compatibility endpoints in InfluxDB 3. @@ -2744,10 +3295,6 @@ components: --header "Authorization: Token AUTH_TOKEN" \ --data-binary 'home,room=kitchen temp=72 1463683075' ``` - - ### Related guides - - - [Manage tokens](/influxdb3/core/admin/tokens/) in: header name: Authorization type: apiKey diff --git a/static/openapi/influxdb3-enterprise-openapi.yaml b/static/openapi/influxdb3-enterprise-openapi.yaml new file mode 100644 index 0000000000..3d6c70af5f --- /dev/null +++ b/static/openapi/influxdb3-enterprise-openapi.yaml @@ -0,0 +1,3799 @@ +openapi: 3.0.3 +info: + title: InfluxDB 3 Enterprise API Service + description: | + The InfluxDB HTTP API for InfluxDB 3 Enterprise provides a programmatic interface for + interacting with InfluxDB 3 Enterprise databases and resources. + Use this API to: + + - Write data to InfluxDB 3 Enterprise databases + - Query data using SQL or InfluxQL + - Process data using Processing engine plugins + - Manage databases, tables, and Processing engine triggers + - Perform administrative tasks and access system information + + The API includes endpoints under the following paths: + - `/api/v3`: InfluxDB 3 Enterprise native endpoints + - `/`: Compatibility endpoints for InfluxDB v1 workloads and clients + - `/api/v2/write`: Compatibility endpoint for InfluxDB v2 workloads and clients + + To download the OpenAPI specification for this API, use the **Download** button above. + version: v3.8.0 + license: + name: MIT + url: https://opensource.org/licenses/MIT + contact: + name: InfluxData + url: https://www.influxdata.com + email: support@influxdata.com + x-source-hash: sha256:1259b96096eab6c8dbf3f76c974924f124e9b3e08eedc6b0c9a66d3108857c52 +servers: + - url: https://{baseurl} + description: InfluxDB 3 Enterprise API URL + variables: + baseurl: + enum: + - localhost:8181 + default: localhost:8181 + description: InfluxDB 3 Enterprise URL +security: + - BearerAuthentication: [] + - TokenAuthentication: [] + - BasicAuthentication: [] + - QuerystringAuthentication: [] +tags: + - name: Authentication + description: | + Depending on your workflow, use one of the following schemes to authenticate to the InfluxDB 3 API: + + | Authentication scheme | Works with | + |:-------------------|:-----------| + | [Bearer authentication](#section/Authentication/BearerAuthentication) | All endpoints | + | [Token authentication](#section/Authentication/TokenAuthentication) | v1, v2 endpoints | + | [Basic authentication](#section/Authentication/BasicAuthentication) | v1 endpoints | + | [Querystring authentication](#section/Authentication/QuerystringAuthentication) | v1 endpoints | + + x-traitTag: true + x-related: + - title: Authenticate v1 API requests + href: /influxdb3/enterprise/guides/api-compatibility/v1/ + - title: Manage tokens + href: /influxdb3/enterprise/admin/tokens/ + - name: Cache data + description: |- + Manage the in-memory cache. + + #### Distinct Value Cache + + The Distinct Value Cache (DVC) lets you cache distinct + values of one or more columns in a table, improving the performance of + queries that return distinct tag and field values. + + The DVC is an in-memory cache that stores distinct values for specific columns + in a table. When you create an DVC, you can specify what columns' distinct + values to cache, the maximum number of distinct value combinations to cache, and + the maximum age of cached values. A DVC is associated with a table, which can + have multiple DVCs. + + #### Last value cache + + The Last Value Cache (LVC) lets you cache the most recent + values for specific fields in a table, improving the performance of queries that + return the most recent value of a field for specific series or the last N values + of a field. + + The LVC is an in-memory cache that stores the last N number of values for + specific fields of series in a table. When you create an LVC, you can specify + what fields to cache, what tags to use to identify each series, and the + number of values to cache for each unique series. + An LVC is associated with a table, which can have multiple LVCs. + x-related: + - title: Manage the Distinct Value Cache + href: /influxdb3/enterprise/admin/distinct-value-cache/ + - title: Manage the Last Value Cache + href: /influxdb3/enterprise/admin/last-value-cache/ + - name: Compatibility endpoints + description: > + InfluxDB 3 provides compatibility endpoints for InfluxDB 1.x and InfluxDB 2.x workloads and clients. + + + ### Write data using v1- or v2-compatible endpoints + + + - [`/api/v2/write` endpoint](#operation/PostV2Write) + for InfluxDB v2 clients and when you bring existing InfluxDB v2 write workloads to InfluxDB 3. + - [`/write` endpoint](#operation/PostV1Write) for InfluxDB v1 clients and when you bring existing InfluxDB v1 + write workloads to InfluxDB 3. + + + For new workloads, use the [`/api/v3/write_lp` endpoint](#operation/PostWriteLP). + + + All endpoints accept the same line protocol format. + + + ### Query data + + + Use the HTTP [`/query`](#operation/GetV1ExecuteQuery) endpoint for InfluxDB v1 clients and v1 query workloads + using InfluxQL. + + + For new workloads, use one of the following: + + + - HTTP [`/api/v3/query_sql` endpoint](#operation/GetExecuteQuerySQL) for new query workloads using SQL. + + - HTTP [`/api/v3/query_influxql` endpoint](#operation/GetExecuteInfluxQLQuery) for new query workloads using + InfluxQL. + + - Flight SQL and InfluxDB 3 _Flight+gRPC_ APIs for querying with SQL or InfluxQL. For more information about using + Flight APIs, see [InfluxDB 3 client + libraries](https://github.com/InfluxCommunity?q=influxdb3&type=public&language=&sort=). + + + ### Server information + + + Server information endpoints such as `/health` and `metrics` are compatible with InfluxDB 1.x and InfluxDB 2.x + clients. + x-related: + - title: Use compatibility APIs to write data + href: /influxdb3/enterprise/write-data/http-api/compatibility-apis/ + - name: Database + description: Manage databases + - description: > + Most InfluxDB API endpoints require parameters in the request--for example, specifying the database to use. + + + ### Common parameters + + + The following table shows common parameters used by many InfluxDB API endpoints. + + Many endpoints may require other parameters in the query string or in the + + request body that perform functions specific to those endpoints. + + + | Query parameter | Value type | Description | + + |:------------------------ |:--------------------- |:-------------------------------------------| + + | `db` | string | The database name | + + + InfluxDB HTTP API endpoints use standard HTTP request and response headers. + + The following table shows common headers used by many InfluxDB API endpoints. + + Some endpoints may use other headers that perform functions more specific to those endpoints--for example, + + the write endpoints accept the `Content-Encoding` header to indicate that line protocol is compressed in the + request body. + + + | Header | Value type | Description | + + |:------------------------ |:--------------------- |:-------------------------------------------| + + | `Accept` | string | The content type that the client can understand. | + + | `Authorization` | string | The authorization scheme and credential. | + + | `Content-Length` | integer | The size of the entity-body, in bytes. | + + | `Content-Type` | string | The format of the data in the request body. | + name: Headers and parameters + x-traitTag: true + - name: Processing engine + description: > + Manage Processing engine triggers, test plugins, and send requests to trigger On Request plugins. + + + InfluxDB 3 Enterprise provides the InfluxDB 3 processing engine, an embedded Python VM that can dynamically load + and trigger Python plugins in response to events in your database. + + Use Processing engine plugins and triggers to run code and perform tasks for different database events. + + + To get started with the processing engine, see the [Processing engine and Python + plugins](/influxdb3/enterprise/processing-engine/) guide. + x-related: + - title: Processing engine and Python plugins + href: /influxdb3/enterprise/plugins/ + - name: Query data + description: Query data using SQL or InfluxQL + x-related: + - title: Use the InfluxDB v1 HTTP query API and InfluxQL to query data + href: /influxdb3/enterprise/query-data/execute-queries/influxdb-v1-api/ + - name: Quick start + description: > + 1. [Create an admin token](#section/Authentication) to authorize API requests. + + ```bash + curl -X POST "http://localhost:8181/api/v3/configure/token/admin" + ``` + 2. [Check the status](#section/Server-information) of the InfluxDB server. + + ```bash + curl "http://localhost:8181/health" \ + --header "Authorization: Bearer ADMIN_TOKEN" + ``` + + 3. [Write data](#operation/PostWriteLP) to InfluxDB. + + ```bash + curl "http://localhost:8181/api/v3/write_lp?db=sensors&precision=auto" + --header "Authorization: Bearer ADMIN_TOKEN" \ + --data-raw "home,room=Kitchen temp=72.0 + home,room=Living\ room temp=71.5" + ``` + + If all data is written, the response is `204 No Content`. + + 4. [Query data](#operation/GetExecuteQuerySQL) from InfluxDB. + + ```bash + curl -G "http://localhost:8181/api/v3/query_sql" \ + --header "Authorization: Bearer ADMIN_TOKEN" \ + --data-urlencode "db=sensors" \ + --data-urlencode "q=SELECT * FROM home WHERE room='Living room'" \ + --data-urlencode "format=jsonl" + ``` + + Output: + + ```jsonl + {"room":"Living room","temp":71.5,"time":"2025-02-25T20:19:34.984098"} + ``` + + For more information about using InfluxDB 3 Enterprise, see the [Get started](/influxdb3/enterprise/get-started/) + guide. + x-traitTag: true + - name: Server information + description: Retrieve server metrics, status, and version information + - name: Table + description: Manage table schemas and data + - name: Token + description: Manage tokens for authentication and authorization + - name: Write data + description: | + Write data to InfluxDB 3 using line protocol format. + + #### Timestamp precision across write APIs + + InfluxDB 3 provides multiple write endpoints for compatibility with different InfluxDB versions. + The following table compares timestamp precision support across v1, v2, and v3 write APIs: + + | Precision | v1 (`/write`) | v2 (`/api/v2/write`) | v3 (`/api/v3/write_lp`) | + |-----------|---------------|----------------------|-------------------------| + | **Auto detection** | ❌ No | ❌ No | ✅ `auto` (default) | + | **Seconds** | ✅ `s` | ✅ `s` | ✅ `second` | + | **Milliseconds** | ✅ `ms` | ✅ `ms` | ✅ `millisecond` | + | **Microseconds** | ✅ `u` or `µ` | ✅ `us` | ✅ `microsecond` | + | **Nanoseconds** | ✅ `ns` | ✅ `ns` | ✅ `nanosecond` | + | **Default** | Nanosecond | Nanosecond | **Auto** (guessed) | + + All timestamps are stored internally as nanoseconds. +paths: + /api/v1/health: + get: + operationId: GetHealthV1 + summary: Health check (v1) + description: | + Checks the status of the service. + + Returns `OK` if the service is running. This endpoint does not return version information. + Use the [`/ping`](#operation/GetPing) endpoint to retrieve version details. + + > **Note**: This endpoint requires authentication by default in InfluxDB 3 Enterprise. + responses: + "200": + description: Service is running. Returns `OK`. + content: + text/plain: + schema: + type: string + example: OK + "401": + description: Unauthorized. Authentication is required. + "500": + description: Service is unavailable. + tags: + - Server information + - Compatibility endpoints + /api/v2/write: + post: + operationId: PostV2Write + responses: + "204": + description: Success ("No Content"). All data in the batch is written and queryable. + headers: + cluster-uuid: + $ref: "#/components/headers/ClusterUUID" + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "413": + description: Request entity too large. + summary: Write line protocol (v2-compatible) + description: > + Writes line protocol to the specified database. + + + This endpoint provides backward compatibility for InfluxDB 2.x write workloads using tools such as InfluxDB 2.x + client libraries, the Telegraf `outputs.influxdb_v2` output plugin, or third-party tools. + + + Use this endpoint to send data in [line protocol](/influxdb3/enterprise/reference/syntax/line-protocol/) format + to InfluxDB. + + Use query parameters to specify options for writing data. + + + #### Related + + + - [Use compatibility APIs to write data](/influxdb3/enterprise/write-data/http-api/compatibility-apis/) + parameters: + - name: Content-Type + in: header + description: | + The content type of the request payload. + schema: + $ref: "#/components/schemas/LineProtocol" + required: false + - description: | + The compression applied to the line protocol in the request payload. + To send a gzip payload, pass `Content-Encoding: gzip` header. + in: header + name: Content-Encoding + schema: + default: identity + description: | + Content coding. + Use `gzip` for compressed data or `identity` for unmodified, uncompressed data. + enum: + - gzip + - identity + type: string + - description: | + The size of the entity-body, in bytes, sent to InfluxDB. + in: header + name: Content-Length + schema: + description: The length in decimal number of octets. + type: integer + - description: | + The content type that the client can understand. + Writes only return a response body if they fail (partially or completely)--for example, + due to a syntax problem or type mismatch. + in: header + name: Accept + schema: + default: application/json + description: Error content type. + enum: + - application/json + type: string + - name: bucket + in: query + required: true + schema: + type: string + description: |- + A database name. + InfluxDB creates the database if it doesn't already exist, and then + writes all points in the batch to the database. + + This parameter is named `bucket` for compatibility with InfluxDB v2 client libraries. + - name: accept_partial + in: query + required: false + schema: + $ref: "#/components/schemas/AcceptPartial" + - $ref: "#/components/parameters/compatibilityPrecisionParam" + requestBody: + $ref: "#/components/requestBodies/lineProtocolRequestBody" + tags: + - Compatibility endpoints + - Write data + /api/v3/configure/database: + delete: + operationId: DeleteConfigureDatabase + parameters: + - $ref: "#/components/parameters/db" + - name: data_only + in: query + required: false + schema: + type: boolean + default: false + description: | + Delete only data while preserving the database schema and all associated resources + (tokens, triggers, last value caches, distinct value caches, processing engine configurations). + When `false` (default), the entire database is deleted. + - name: remove_tables + in: query + required: false + schema: + type: boolean + default: false + description: | + Used with `data_only=true` to remove table resources (caches) while preserving + database-level resources (tokens, triggers, processing engine configurations). + Has no effect when `data_only=false`. + - name: hard_delete_at + in: query + required: false + schema: + type: string + format: date-time + description: |- + Schedule the database for hard deletion at the specified time. + If not provided, the database will be soft deleted. + Use ISO 8601 date-time format (for example, "2025-12-31T23:59:59Z"). + + #### Deleting a database cannot be undone + + Deleting a database is a destructive action. + Once a database is deleted, data stored in that database cannot be recovered. + + + Also accepts special string values: + - `now` — hard delete immediately + - `never` — soft delete only (default behavior) + - `default` — use the system default hard deletion time + responses: + "200": + description: Success. Database deleted. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Database not found. + summary: Delete a database + description: | + Soft deletes a database. + The database is scheduled for deletion and unavailable for querying. + Use the `hard_delete_at` parameter to schedule a hard deletion. + Use the `data_only` parameter to delete data while preserving the database schema and resources. + tags: + - Database + get: + operationId: GetConfigureDatabase + responses: + "200": + description: Success. The response body contains the list of databases. + content: + application/json: + schema: + $ref: "#/components/schemas/ShowDatabasesResponse" + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Database not found. + summary: List databases + description: Retrieves a list of databases. + parameters: + - $ref: "#/components/parameters/formatRequired" + - name: show_deleted + in: query + required: false + schema: + type: boolean + default: false + description: | + Include soft-deleted databases in the response. + By default, only active databases are returned. + tags: + - Database + post: + operationId: PostConfigureDatabase + responses: + "200": + description: Success. Database created. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "409": + description: Database already exists. + summary: Create a database + description: Creates a new database in the system. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateDatabaseRequest" + tags: + - Database + put: + operationId: update_database + responses: + "200": + description: Success. The database has been updated. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Database not found. + summary: Update a database + description: | + Updates database configuration, such as retention period. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/UpdateDatabaseRequest" + tags: + - Database + /api/v3/configure/database/retention_period: + delete: + operationId: DeleteDatabaseRetentionPeriod + summary: Remove database retention period + description: | + Removes the retention period from a database, setting it to infinite retention. + parameters: + - $ref: "#/components/parameters/db" + responses: + "204": + description: Success. The database retention period has been removed. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Database not found. + tags: + - Database + /api/v3/configure/distinct_cache: + delete: + operationId: DeleteConfigureDistinctCache + responses: + "200": + description: Success. The distinct cache has been deleted. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Cache not found. + summary: Delete distinct cache + description: Deletes a distinct cache. + parameters: + - $ref: "#/components/parameters/db" + - name: table + in: query + required: true + schema: + type: string + description: The name of the table containing the distinct cache. + - name: name + in: query + required: true + schema: + type: string + description: The name of the distinct cache to delete. + tags: + - Cache data + - Table + post: + operationId: PostConfigureDistinctCache + responses: + "201": + description: Success. The distinct cache has been created. + "400": + description: > + Bad request. + + + The server responds with status `400` if the request would overwrite an existing cache with a different + configuration. + "409": + description: Conflict. A distinct cache with this configuration already exists. + summary: Create distinct cache + description: Creates a distinct cache for a table. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/DistinctCacheCreateRequest" + tags: + - Cache data + - Table + /api/v3/configure/last_cache: + delete: + operationId: DeleteConfigureLastCache + responses: + "200": + description: Success. The last cache has been deleted. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Cache not found. + summary: Delete last cache + description: Deletes a last cache. + parameters: + - $ref: "#/components/parameters/db" + - name: table + in: query + required: true + schema: + type: string + description: The name of the table containing the last cache. + - name: name + in: query + required: true + schema: + type: string + description: The name of the last cache to delete. + tags: + - Cache data + - Table + post: + operationId: PostConfigureLastCache + responses: + "201": + description: Success. Last cache created. + "400": + description: Bad request. A cache with this name already exists or the request is malformed. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Cache not found. + summary: Create last cache + description: Creates a last cache for a table. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/LastCacheCreateRequest" + tags: + - Cache data + - Table + /api/v3/configure/plugin_environment/install_packages: + post: + operationId: PostInstallPluginPackages + summary: Install plugin packages + description: |- + Installs the specified Python packages into the processing engine plugin environment. + + This endpoint is synchronous and blocks until the packages are installed. + parameters: + - $ref: "#/components/parameters/ContentType" + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + packages: + type: array + items: + type: string + description: | + A list of Python package names to install. + Can include version specifiers (e.g., "scipy==1.9.0"). + example: + - influxdb3-python + - scipy + - pandas==1.5.0 + - requests + required: + - packages + example: + packages: + - influxdb3-python + - scipy + - pandas==1.5.0 + - requests + responses: + "200": + description: Success. The packages are installed. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + tags: + - Processing engine + /api/v3/configure/plugin_environment/install_requirements: + post: + operationId: PostInstallPluginRequirements + summary: Install plugin requirements + description: > + Installs requirements from a requirements file (also known as a "pip requirements file") into the processing + engine plugin environment. + + + This endpoint is synchronous and blocks until the requirements are installed. + + + ### Related + + + - [Processing engine and Python plugins](/influxdb3/enterprise/plugins/) + + - [Python requirements file format](https://pip.pypa.io/en/stable/reference/requirements-file-format/) + parameters: + - $ref: "#/components/parameters/ContentType" + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + requirements_location: + type: string + description: | + The path to the requirements file containing Python packages to install. + Can be a relative path (relative to the plugin directory) or an absolute path. + example: requirements.txt + required: + - requirements_location + example: + requirements_location: requirements.txt + responses: + "200": + description: Success. The requirements have been installed. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + tags: + - Processing engine + /api/v3/configure/processing_engine_trigger: + post: + operationId: PostConfigureProcessingEngineTrigger + summary: Create processing engine trigger + description: Creates a processing engine trigger with the specified plugin file and trigger specification. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/ProcessingEngineTriggerRequest" + examples: + schedule_cron: + summary: Schedule trigger using cron + description: > + In `"cron:CRON_EXPRESSION"`, `CRON_EXPRESSION` uses extended 6-field cron format. + + The cron expression `0 0 6 * * 1-5` means the trigger will run at 6:00 AM every weekday (Monday to + Friday). + value: + db: DATABASE_NAME + plugin_filename: schedule.py + trigger_name: schedule_cron_trigger + trigger_specification: cron:0 0 6 * * 1-5 + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + schedule_every: + summary: Schedule trigger using interval + description: | + In `"every:DURATION"`, `DURATION` specifies the interval between trigger executions. + The duration `1h` means the trigger will run every hour. + value: + db: mydb + plugin_filename: schedule.py + trigger_name: schedule_every_trigger + trigger_specification: every:1h + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + schedule_every_seconds: + summary: Schedule trigger using seconds interval + description: | + Example of scheduling a trigger to run every 30 seconds. + value: + db: mydb + plugin_filename: schedule.py + trigger_name: schedule_every_30s_trigger + trigger_specification: every:30s + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + schedule_every_minutes: + summary: Schedule trigger using minutes interval + description: | + Example of scheduling a trigger to run every 5 minutes. + value: + db: mydb + plugin_filename: schedule.py + trigger_name: schedule_every_5m_trigger + trigger_specification: every:5m + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + all_tables: + summary: All tables trigger example + description: | + Trigger that fires on write events to any table in the database. + value: + db: mydb + plugin_filename: all_tables.py + trigger_name: all_tables_trigger + trigger_specification: all_tables + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + table_specific: + summary: Table-specific trigger example + description: | + Trigger that fires on write events to a specific table. + value: + db: mydb + plugin_filename: table.py + trigger_name: table_trigger + trigger_specification: table:sensors + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + api_request: + summary: On-demand request trigger example + description: | + Creates an HTTP endpoint `/api/v3/engine/hello-world` for manual invocation. + value: + db: mydb + plugin_filename: request.py + trigger_name: hello_world_trigger + trigger_specification: request:hello-world + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + cron_friday_afternoon: + summary: Cron trigger for Friday afternoons + description: | + Example of a cron trigger that runs every Friday at 2:30 PM. + value: + db: reports + plugin_filename: weekly_report.py + trigger_name: friday_report_trigger + trigger_specification: cron:0 30 14 * * 5 + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + cron_monthly: + summary: Cron trigger for monthly execution + description: | + Example of a cron trigger that runs on the first day of every month at midnight. + value: + db: monthly_data + plugin_filename: monthly_cleanup.py + trigger_name: monthly_cleanup_trigger + trigger_specification: cron:0 0 0 1 * * + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + responses: + "200": + description: Success. Processing engine trigger created. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Trigger not found. + tags: + - Processing engine + delete: + operationId: DeleteConfigureProcessingEngineTrigger + summary: Delete processing engine trigger + description: Deletes a processing engine trigger. + parameters: + - $ref: "#/components/parameters/db" + - name: trigger_name + in: query + required: true + schema: + type: string + - name: force + in: query + required: false + schema: + type: boolean + default: false + description: | + Force deletion of the trigger even if it has active executions. + By default, deletion fails if the trigger is currently executing. + responses: + "200": + description: Success. The processing engine trigger has been deleted. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Trigger not found. + tags: + - Processing engine + /api/v3/configure/processing_engine_trigger/disable: + post: + operationId: PostDisableProcessingEngineTrigger + summary: Disable processing engine trigger + description: Disables a processing engine trigger. + parameters: + - name: db + in: query + required: true + schema: + type: string + description: The database name. + - name: trigger_name + in: query + required: true + schema: + type: string + description: The name of the trigger. + responses: + "200": + description: Success. The processing engine trigger has been disabled. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Trigger not found. + tags: + - Processing engine + /api/v3/configure/processing_engine_trigger/enable: + post: + operationId: PostEnableProcessingEngineTrigger + summary: Enable processing engine trigger + description: Enables a processing engine trigger. + parameters: + - name: db + in: query + required: true + schema: + type: string + description: The database name. + - name: trigger_name + in: query + required: true + schema: + type: string + description: The name of the trigger. + responses: + "200": + description: Success. The processing engine trigger is enabled. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Trigger not found. + tags: + - Processing engine + /api/v3/configure/table: + delete: + operationId: DeleteConfigureTable + parameters: + - $ref: "#/components/parameters/db" + - name: table + in: query + required: true + schema: + type: string + - name: data_only + in: query + required: false + schema: + type: boolean + default: false + description: | + Delete only data while preserving the table schema and all associated resources + (last value caches, distinct value caches). + When `false` (default), the entire table is deleted. + - name: hard_delete_at + in: query + required: false + schema: + type: string + format: date-time + description: |- + Schedule the table for hard deletion at the specified time. + If not provided, the table will be soft deleted. + Use ISO 8601 format (for example, "2025-12-31T23:59:59Z"). + + + Also accepts special string values: + - `now` — hard delete immediately + - `never` — soft delete only (default behavior) + - `default` — use the system default hard deletion time + responses: + "200": + description: Success (no content). The table has been deleted. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Table not found. + summary: Delete a table + description: | + Soft deletes a table. + The table is scheduled for deletion and unavailable for querying. + Use the `hard_delete_at` parameter to schedule a hard deletion. + Use the `data_only` parameter to delete data while preserving the table schema and resources. + + #### Deleting a table cannot be undone + + Deleting a table is a destructive action. + Once a table is deleted, data stored in that table cannot be recovered. + tags: + - Table + post: + operationId: PostConfigureTable + responses: + "200": + description: Success. The table has been created. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Database not found. + summary: Create a table + description: Creates a new table within a database. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateTableRequest" + tags: + - Table + put: + operationId: PatchConfigureTable + responses: + "200": + description: Success. The table has been updated. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Table not found. + summary: Update a table + description: | + Updates table configuration, such as retention period. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/UpdateTableRequest" + tags: + - Table + x-enterprise-only: true + /api/v3/configure/token: + delete: + operationId: DeleteToken + parameters: + - name: token_name + in: query + required: true + schema: + type: string + description: The name of the token to delete. + responses: + "200": + description: Success. The token has been deleted. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Token not found. + summary: Delete token + description: | + Deletes a token. + tags: + - Authentication + - Token + /api/v3/configure/token/admin: + post: + operationId: PostCreateAdminToken + responses: + "201": + description: | + Success. The admin token has been created. + The response body contains the token string and metadata. + content: + application/json: + schema: + $ref: "#/components/schemas/AdminTokenObject" + "401": + $ref: "#/components/responses/Unauthorized" + summary: Create admin token + description: | + Creates an admin token. + An admin token is a special type of token that has full access to all resources in the system. + tags: + - Authentication + - Token + /api/v3/configure/token/admin/regenerate: + post: + operationId: PostRegenerateAdminToken + summary: Regenerate admin token + description: | + Regenerates an admin token and revokes the previous token with the same name. + parameters: [] + responses: + "201": + description: Success. The admin token has been regenerated. + content: + application/json: + schema: + $ref: "#/components/schemas/AdminTokenObject" + "401": + $ref: "#/components/responses/Unauthorized" + tags: + - Authentication + - Token + /api/v3/configure/token/named_admin: + post: + operationId: PostCreateNamedAdminToken + responses: + "201": + description: | + Success. The named admin token has been created. + The response body contains the token string and metadata. + content: + application/json: + schema: + $ref: "#/components/schemas/AdminTokenObject" + "401": + $ref: "#/components/responses/Unauthorized" + "409": + description: A token with this name already exists. + summary: Create named admin token + description: | + Creates a named admin token. + A named admin token is a special type of admin token with a custom name for identification and management. + tags: + - Authentication + - Token + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + token_name: + type: string + description: The name for the admin token. + expiry_secs: + type: integer + description: Optional expiration time in seconds. If not provided, the token does not expire. + nullable: true + required: + - token_name + /api/v3/engine/{request_path}: + get: + operationId: GetProcessingEnginePluginRequest + responses: + "200": + description: Success. The plugin request has been executed. + "400": + description: Malformed request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Plugin not found. + "500": + description: Processing failure. + summary: On Request processing engine plugin request + description: > + Executes the On Request processing engine plugin specified in the trigger's `plugin_filename`. + + The request can include request headers, query string parameters, and a request body, which InfluxDB passes to + the plugin. + + + An On Request plugin implements the following signature: + + + ```python + + def process_request(influxdb3_local, query_parameters, request_headers, request_body, args=None) + + ``` + + + The response depends on the plugin implementation. + tags: + - Processing engine + post: + operationId: PostProcessingEnginePluginRequest + responses: + "200": + description: Success. The plugin request has been executed. + "400": + description: Malformed request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Plugin not found. + "500": + description: Processing failure. + summary: On Request processing engine plugin request + description: > + Executes the On Request processing engine plugin specified in the trigger's `plugin_filename`. + + The request can include request headers, query string parameters, and a request body, which InfluxDB passes to + the plugin. + + + An On Request plugin implements the following signature: + + + ```python + + def process_request(influxdb3_local, query_parameters, request_headers, request_body, args=None) + + ``` + + + The response depends on the plugin implementation. + parameters: + - $ref: "#/components/parameters/ContentType" + requestBody: + required: false + content: + application/json: + schema: + type: object + additionalProperties: true + tags: + - Processing engine + parameters: + - name: request_path + description: | + The path configured in the request trigger specification for the plugin. + + For example, if you define a trigger with the following: + + ```json + trigger_specification: "request:hello-world" + ``` + + then, the HTTP API exposes the following plugin endpoint: + + ``` + /api/v3/engine/hello-world + ``` + in: path + required: true + schema: + type: string + /api/v3/enterprise/configure/file_index: + post: + operationId: configure_file_index_create + summary: Create a file index + description: >- + Creates a file index for a database or table. + + + A file index improves query performance by indexing data files based on specified columns, enabling the query + engine to skip irrelevant files during query execution. + + + This endpoint is only available in InfluxDB 3 Enterprise. + x-enterprise-only: true + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/FileIndexCreateRequest" + responses: + "200": + description: Success. The file index has been created. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Database or table not found. + tags: + - Database + - Table + delete: + operationId: configure_file_index_delete + summary: Delete a file index + description: |- + Deletes a file index from a database or table. + + This endpoint is only available in InfluxDB 3 Enterprise. + x-enterprise-only: true + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/FileIndexDeleteRequest" + responses: + "200": + description: Success. The file index has been deleted. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Database, table, or file index not found. + tags: + - Database + - Table + /api/v3/enterprise/configure/node/stop: + post: + operationId: stop_node + summary: Mark a node as stopped + description: >- + Marks a node as stopped in the catalog, freeing up the licensed cores it was using for other nodes. + + + Use this endpoint after you have already stopped the physical instance (for example, using `kill` or stopping + the container). This endpoint does not shut down the running process — you must stop the instance first. + + + When the node is marked as stopped: + + 1. Licensed cores from the stopped node are freed for reuse + + 2. Other nodes in the cluster see the update after their catalog sync interval + + + This endpoint is only available in InfluxDB 3 Enterprise. + + + #### Related + + + - [influxdb3 stop node](/influxdb3/enterprise/reference/cli/influxdb3/stop/node/) + x-enterprise-only: true + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/StopNodeRequest" + responses: + "200": + description: Success. The node has been marked as stopped. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Node not found. + tags: + - Server information + /api/v3/enterprise/configure/table/retention_period: + post: + operationId: create_or_update_retention_period_for_table + summary: Set table retention period + description: >- + Sets or updates the retention period for a specific table. + + + Use this endpoint to control how long data in a table is retained independently of the database-level retention + period. + + + This endpoint is only available in InfluxDB 3 Enterprise. + + + #### Related + + + - [influxdb3 update table](/influxdb3/enterprise/reference/cli/influxdb3/update/table/) + x-enterprise-only: true + parameters: + - name: db + in: query + required: true + schema: + type: string + description: The database name. + - name: table + in: query + required: true + schema: + type: string + description: The table name. + - name: duration + in: query + required: true + schema: + type: string + description: The retention period as a human-readable duration (for example, "30d", "24h", "1y"). + responses: + "204": + description: Success. The table retention period has been set. + "400": + description: Bad request. Invalid duration format. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Database or table not found. + tags: + - Table + delete: + operationId: delete_retention_period_for_table + summary: Clear table retention period + description: >- + Removes the retention period from a specific table, reverting to the database-level retention period (or + infinite retention if no database-level retention is set). + + + This endpoint is only available in InfluxDB 3 Enterprise. + + + #### Related + + + - [influxdb3 update table](/influxdb3/enterprise/reference/cli/influxdb3/update/table/) + x-enterprise-only: true + parameters: + - name: db + in: query + required: true + schema: + type: string + description: The database name. + - name: table + in: query + required: true + schema: + type: string + description: The table name. + responses: + "204": + description: Success. The table retention period has been cleared. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Database or table not found. + tags: + - Table + /api/v3/enterprise/configure/token: + post: + operationId: PostCreateResourceToken + summary: Create a resource token + description: | + Creates a resource (fine-grained permissions) token. + A resource token is a token that has access to specific resources in the system. + + This endpoint is only available in InfluxDB 3 Enterprise. + responses: + "201": + description: | + Success. The resource token has been created. + The response body contains the token string and metadata. + content: + application/json: + schema: + $ref: "#/components/schemas/ResourceTokenObject" + "401": + $ref: "#/components/responses/Unauthorized" + tags: + - Authentication + - Token + x-enterprise-only: true + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateTokenWithPermissionsRequest" + /api/v3/plugin_test/schedule: + post: + operationId: PostTestSchedulingPlugin + responses: + "200": + description: Success. The plugin test has been executed. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Plugin not enabled. + summary: Test scheduling plugin + description: Executes a test of a scheduling plugin. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/SchedulePluginTestRequest" + tags: + - Processing engine + /api/v3/plugin_test/wal: + post: + operationId: PostTestWALPlugin + responses: + "200": + description: Success. The plugin test has been executed. + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: Plugin not enabled. + summary: Test WAL plugin + description: Executes a test of a write-ahead logging (WAL) plugin. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/WALPluginTestRequest" + tags: + - Processing engine + /api/v3/plugins/directory: + put: + operationId: PutPluginDirectory + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/PluginDirectoryRequest" + responses: + "200": + description: Success. The plugin directory has been updated. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Forbidden. Admin token required. + "500": + description: Plugin not found. The `plugin_name` does not match any registered trigger. + summary: Update a multi-file plugin directory + description: | + Replaces all files in a multi-file plugin directory. The + `plugin_name` must match a registered trigger name. Each entry in + the `files` array specifies a `relative_path` and `content`—the + server writes them into the trigger's plugin directory. + + Use this endpoint to update multi-file plugins (directories with + `__init__.py` and supporting modules). For single-file plugins, + use `PUT /api/v3/plugins/files` instead. + tags: + - Processing engine + x-security-note: Requires an admin token + /api/v3/plugins/files: + post: + operationId: create_plugin_file + summary: Create a plugin file + description: | + Creates a single plugin file in the plugin directory. Writes the + `content` to a file named after `plugin_name`. Does not require an + existing trigger—use this to upload plugin files before creating + triggers that reference them. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/PluginFileRequest" + responses: + "200": + description: Success. The plugin file has been created. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Forbidden. Admin token required. + tags: + - Processing engine + x-security-note: Requires an admin token + put: + operationId: PutPluginFile + summary: Update a plugin file + description: | + Updates a single plugin file for an existing trigger. The + `plugin_name` must match a registered trigger name—the server + resolves the trigger's `plugin_filename` and overwrites that file + with the provided `content`. + + To upload a new plugin file before creating a trigger, use + `POST /api/v3/plugins/files` instead. To update a multi-file + plugin directory, use `PUT /api/v3/plugins/directory`. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/PluginFileRequest" + responses: + "200": + description: Success. The plugin file has been updated. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Forbidden. Admin token required. + "500": + description: Plugin not found. The `plugin_name` does not match any registered trigger. + tags: + - Processing engine + x-security-note: Requires an admin token + /api/v3/query_influxql: + get: + operationId: GetExecuteInfluxQLQuery + responses: + "200": + description: Success. The response body contains query results. + content: + application/json: + schema: + $ref: "#/components/schemas/QueryResponse" + text/csv: + schema: + type: string + application/vnd.apache.parquet: + schema: + type: string + application/jsonl: + schema: + type: string + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "404": + description: Database not found. + "405": + description: Method not allowed. + "422": + description: Unprocessable entity. + summary: Execute InfluxQL query + description: Executes an InfluxQL query to retrieve data from the specified database. + parameters: + - $ref: "#/components/parameters/dbQueryParam" + - name: q + in: query + required: true + schema: + type: string + - name: format + in: query + required: false + schema: + type: string + - $ref: "#/components/parameters/AcceptQueryHeader" + - name: params + in: query + required: false + schema: + type: string + description: JSON-encoded query parameters. Use this to pass bind parameters to parameterized queries. + description: JSON-encoded query parameters for parameterized queries. + tags: + - Query data + post: + operationId: PostExecuteQueryInfluxQL + responses: + "200": + description: Success. The response body contains query results. + content: + application/json: + schema: + $ref: "#/components/schemas/QueryResponse" + text/csv: + schema: + type: string + application/vnd.apache.parquet: + schema: + type: string + application/jsonl: + schema: + type: string + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "404": + description: Database not found. + "405": + description: Method not allowed. + "422": + description: Unprocessable entity. + summary: Execute InfluxQL query + description: Executes an InfluxQL query to retrieve data from the specified database. + parameters: + - $ref: "#/components/parameters/AcceptQueryHeader" + - $ref: "#/components/parameters/ContentType" + requestBody: + $ref: "#/components/requestBodies/queryRequestBody" + tags: + - Query data + /api/v3/query_sql: + get: + operationId: GetExecuteQuerySQL + responses: + "200": + description: Success. The response body contains query results. + content: + application/json: + schema: + $ref: "#/components/schemas/QueryResponse" + example: + results: + - series: + - name: mytable + columns: + - time + - value + values: + - - "2024-02-02T12:00:00Z" + - 42 + text/csv: + schema: + type: string + application/vnd.apache.parquet: + schema: + type: string + application/jsonl: + schema: + type: string + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "404": + description: Database not found. + "405": + description: Method not allowed. + "422": + description: Unprocessable entity. + summary: Execute SQL query + description: Executes an SQL query to retrieve data from the specified database. + parameters: + - $ref: "#/components/parameters/db" + - $ref: "#/components/parameters/querySqlParam" + - $ref: "#/components/parameters/format" + - $ref: "#/components/parameters/AcceptQueryHeader" + - $ref: "#/components/parameters/ContentType" + - name: params + in: query + required: false + schema: + type: string + description: JSON-encoded query parameters. Use this to pass bind parameters to parameterized queries. + description: JSON-encoded query parameters for parameterized queries. + tags: + - Query data + post: + operationId: PostExecuteQuerySQL + responses: + "200": + description: Success. The response body contains query results. + content: + application/json: + schema: + $ref: "#/components/schemas/QueryResponse" + text/csv: + schema: + type: string + application/vnd.apache.parquet: + schema: + type: string + application/jsonl: + schema: + type: string + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "404": + description: Database not found. + "405": + description: Method not allowed. + "422": + description: Unprocessable entity. + summary: Execute SQL query + description: Executes an SQL query to retrieve data from the specified database. + parameters: + - $ref: "#/components/parameters/AcceptQueryHeader" + - $ref: "#/components/parameters/ContentType" + requestBody: + $ref: "#/components/requestBodies/queryRequestBody" + tags: + - Query data + /api/v3/write_lp: + post: + operationId: PostWriteLP + parameters: + - $ref: "#/components/parameters/dbWriteParam" + - $ref: "#/components/parameters/accept_partial" + - $ref: "#/components/parameters/precisionParam" + - name: no_sync + in: query + schema: + $ref: "#/components/schemas/NoSync" + - name: Content-Type + in: header + description: | + The content type of the request payload. + schema: + $ref: "#/components/schemas/LineProtocol" + required: false + - name: Accept + in: header + description: | + The content type that the client can understand. + Writes only return a response body if they fail (partially or completely)--for example, + due to a syntax problem or type mismatch. + schema: + type: string + default: application/json + enum: + - application/json + required: false + - $ref: "#/components/parameters/ContentEncoding" + - $ref: "#/components/parameters/ContentLength" + responses: + "204": + description: Success ("No Content"). All data in the batch is written and queryable. + headers: + cluster-uuid: + $ref: "#/components/headers/ClusterUUID" + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "413": + description: Request entity too large. + "422": + description: Unprocessable entity. + summary: Write line protocol + description: > + Writes line protocol to the specified database. + + + This is the native InfluxDB 3 Enterprise write endpoint that provides enhanced control + + over write behavior with advanced parameters for high-performance and fault-tolerant operations. + + + Use this endpoint to send data in [line protocol](/influxdb3/enterprise/reference/syntax/line-protocol/) format + to InfluxDB. + + Use query parameters to specify options for writing data. + + + #### Features + + + - **Partial writes**: Use `accept_partial=true` to allow partial success when some lines in a batch fail + + - **Asynchronous writes**: Use `no_sync=true` to skip waiting for WAL synchronization, allowing faster response + times but sacrificing durability guarantees + + - **Flexible precision**: Automatic timestamp precision detection with `precision=auto` (default) + + + #### Auto precision detection + + + When you use `precision=auto` or omit the precision parameter, InfluxDB 3 automatically detects + + the timestamp precision based on the magnitude of the timestamp value: + + + - Timestamps < 5e9 → Second precision (multiplied by 1,000,000,000 to convert to nanoseconds) + + - Timestamps < 5e12 → Millisecond precision (multiplied by 1,000,000) + + - Timestamps < 5e15 → Microsecond precision (multiplied by 1,000) + + - Larger timestamps → Nanosecond precision (no conversion needed) + + + #### Related + + + - [Use the InfluxDB v3 write_lp API to write data](/influxdb3/enterprise/write-data/http-api/v3-write-lp/) + requestBody: + $ref: "#/components/requestBodies/lineProtocolRequestBody" + tags: + - Write data + x-codeSamples: + - label: cURL - Basic write + lang: Shell + source: | + curl --request POST "http://localhost:8181/api/v3/write_lp?db=sensors" \ + --header "Authorization: Bearer DATABASE_TOKEN" \ + --header "Content-Type: text/plain" \ + --data-raw "cpu,host=server01 usage=85.2 1638360000000000000" + - label: cURL - Write with millisecond precision + lang: Shell + source: | + curl --request POST "http://localhost:8181/api/v3/write_lp?db=sensors&precision=ms" \ + --header "Authorization: Bearer DATABASE_TOKEN" \ + --header "Content-Type: text/plain" \ + --data-raw "cpu,host=server01 usage=85.2 1638360000000" + - label: cURL - Asynchronous write with partial acceptance + lang: Shell + source: > + curl --request POST + "http://localhost:8181/api/v3/write_lp?db=sensors&accept_partial=true&no_sync=true&precision=auto" \ + --header "Authorization: Bearer DATABASE_TOKEN" \ + --header "Content-Type: text/plain" \ + --data-raw "cpu,host=server01 usage=85.2 + memory,host=server01 used=4096" + - label: cURL - Multiple measurements with tags + lang: Shell + source: | + curl --request POST "http://localhost:8181/api/v3/write_lp?db=sensors&precision=ns" \ + --header "Authorization: Bearer DATABASE_TOKEN" \ + --header "Content-Type: text/plain" \ + --data-raw "cpu,host=server01,region=us-west usage=85.2,load=0.75 1638360000000000000 + memory,host=server01,region=us-west used=4096,free=12288 1638360000000000000 + disk,host=server01,region=us-west,device=/dev/sda1 used=50.5,free=49.5 1638360000000000000" + /health: + get: + operationId: GetHealth + responses: + "200": + description: Service is running. Returns `OK`. + content: + text/plain: + schema: + type: string + example: OK + "401": + description: Unauthorized. Authentication is required. + "500": + description: Service is unavailable. + summary: Health check + description: | + Checks the status of the service. + + Returns `OK` if the service is running. This endpoint does not return version information. + Use the [`/ping`](#operation/GetPing) endpoint to retrieve version details. + + > **Note**: This endpoint requires authentication by default in InfluxDB 3 Enterprise. + tags: + - Server information + /metrics: + get: + operationId: GetMetrics + responses: + "200": + description: Success + summary: Metrics + description: Retrieves Prometheus-compatible server metrics. + tags: + - Server information + /ping: + get: + operationId: GetPing + responses: + "200": + description: Success. The response body contains server information. + headers: + x-influxdb-version: + description: The InfluxDB version number (for example, `3.8.0`). + schema: + type: string + example: 3.8.0 + x-influxdb-build: + description: The InfluxDB build type (`Core` or `Enterprise`). + schema: + type: string + example: Enterprise + content: + application/json: + schema: + type: object + properties: + version: + type: string + description: The InfluxDB version number. + example: 3.8.0 + revision: + type: string + description: The git revision hash for the build. + example: 83b589b883 + process_id: + type: string + description: A unique identifier for the server process. + example: b756d9e0-cecd-4f72-b6d0-19e2d4f8cbb7 + "401": + description: Unauthorized. Authentication is required. + "404": + description: | + Not Found. Returned for HEAD requests. + Use a GET request to retrieve version information. + x-client-method: ping + summary: Ping the server + description: | + Returns version information for the server. + + **Important**: Use a GET request. HEAD requests return `404 Not Found`. + + The response includes version information in both headers and the JSON body: + + - **Headers**: `x-influxdb-version` and `x-influxdb-build` + - **Body**: JSON object with `version`, `revision`, and `process_id` + + > **Note**: This endpoint requires authentication by default in InfluxDB 3 Enterprise. + tags: + - Server information + post: + operationId: ping + responses: + "200": + description: Success. The response body contains server information. + headers: + x-influxdb-version: + description: The InfluxDB version number (for example, `3.8.0`). + schema: + type: string + example: 3.8.0 + x-influxdb-build: + description: The InfluxDB build type (`Core` or `Enterprise`). + schema: + type: string + example: Enterprise + content: + application/json: + schema: + type: object + properties: + version: + type: string + description: The InfluxDB version number. + example: 3.8.0 + revision: + type: string + description: The git revision hash for the build. + example: 83b589b883 + process_id: + type: string + description: A unique identifier for the server process. + example: b756d9e0-cecd-4f72-b6d0-19e2d4f8cbb7 + "401": + description: Unauthorized. Authentication is required. + "404": + description: | + Not Found. Returned for HEAD requests. + Use a GET request to retrieve version information. + summary: Ping the server + description: Returns version information for the server. Accepts POST in addition to GET. + tags: + - Server information + /query: + get: + operationId: GetV1ExecuteQuery + responses: + "200": + description: | + Success. The response body contains query results. + content: + application/json: + schema: + $ref: "#/components/schemas/QueryResponse" + application/csv: + schema: + type: string + headers: + Content-Type: + description: > + The content type of the response. + + Default is `application/json`. + + + If the `Accept` request header is `application/csv` or `text/csv`, the `Content-type` response header is + `application/csv` + + and the response is formatted as CSV. + schema: + type: string + default: application/json + enum: + - application/json + - application/csv + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "404": + description: Database not found. + "405": + description: Method not allowed. + "422": + description: Unprocessable entity. + summary: Execute InfluxQL query (v1-compatible) + description: > + Executes an InfluxQL query to retrieve data from the specified database. + + + This endpoint is compatible with InfluxDB 1.x client libraries and third-party integrations such as Grafana. + + Use query parameters to specify the database and the InfluxQL query. + + + #### Related + + + - [Use the InfluxDB v1 HTTP query API and InfluxQL to query + data](/influxdb3/enterprise/query-data/execute-queries/influxdb-v1-api/) + parameters: + - name: Accept + in: header + schema: + type: string + default: application/json + enum: + - application/json + - application/csv + - text/csv + required: false + description: > + The content type that the client can understand. + + + If `text/csv` is specified, the `Content-type` response header is `application/csv` and the response is + formatted as CSV. + + + Returns an error if the format is invalid or non-UTF8. + - in: query + name: chunked + description: | + If true, the response is divided into chunks of size `chunk_size`. + schema: + type: boolean + default: false + - in: query + name: chunk_size + description: | + The number of records that will go into a chunk. + This parameter is only used if `chunked=true`. + schema: + type: integer + default: 10000 + - in: query + name: db + description: The database to query. If not provided, the InfluxQL query string must specify the database. + schema: + type: string + format: InfluxQL + - in: query + name: pretty + description: | + If true, the JSON response is formatted in a human-readable format. + schema: + type: boolean + default: false + - in: query + name: q + description: The InfluxQL query string. + required: true + schema: + type: string + - name: epoch + description: > + Formats timestamps as [unix (epoch) timestamps](/influxdb3/enterprise/reference/glossary/#unix-timestamp) + with the specified precision + + instead of [RFC3339 timestamps](/influxdb3/enterprise/reference/glossary/#rfc3339-timestamp) with nanosecond + precision. + in: query + schema: + $ref: "#/components/schemas/EpochCompatibility" + - $ref: "#/components/parameters/v1UsernameParam" + - $ref: "#/components/parameters/v1PasswordParam" + - name: rp + in: query + required: false + schema: + type: string + description: | + Retention policy name. Honored but discouraged. InfluxDB 3 doesn't use retention policies. + - name: Authorization + in: header + required: false + schema: + type: string + description: | + Authorization header for token-based authentication. + Supported schemes: + - `Bearer AUTH_TOKEN` - OAuth bearer token scheme + - `Token AUTH_TOKEN` - InfluxDB v2 token scheme + - `Basic ` - Basic authentication (username is ignored) + tags: + - Query data + - Compatibility endpoints + post: + operationId: PostExecuteV1Query + responses: + "200": + description: | + Success. The response body contains query results. + content: + application/json: + schema: + $ref: "#/components/schemas/QueryResponse" + application/csv: + schema: + type: string + headers: + Content-Type: + description: > + The content type of the response. + + Default is `application/json`. + + + If the `Accept` request header is `application/csv` or `text/csv`, the `Content-type` response header is + `application/csv` + + and the response is formatted as CSV. + schema: + type: string + default: application/json + enum: + - application/json + - application/csv + "400": + description: Bad request. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "404": + description: Database not found. + "405": + description: Method not allowed. + "422": + description: Unprocessable entity. + summary: Execute InfluxQL query (v1-compatible) + description: > + Executes an InfluxQL query to retrieve data from the specified database. + + + #### Related + + + - [Use the InfluxDB v1 HTTP query API and InfluxQL to query + data](/influxdb3/enterprise/query-data/execute-queries/influxdb-v1-api/) + parameters: + - name: Accept + in: header + schema: + type: string + default: application/json + enum: + - application/json + - application/csv + - text/csv + required: false + description: > + The content type that the client can understand. + + + If `text/csv` is specified, the `Content-type` response header is `application/csv` and the response is + formatted as CSV. + + + Returns an error if the format is invalid or non-UTF8. + requestBody: + content: + application/json: + schema: + type: object + properties: + db: + type: string + description: The database to query. If not provided, the InfluxQL query string must specify the database. + q: + description: The InfluxQL query string. + type: string + chunked: + description: | + If true, the response is divided into chunks of size `chunk_size`. + type: boolean + chunk_size: + description: | + The number of records that will go into a chunk. + This parameter is only used if `chunked=true`. + type: integer + default: 10000 + epoch: + description: > + A unix timestamp precision. + + + - `h` for hours + + - `m` for minutes + + - `s` for seconds + + - `ms` for milliseconds + + - `u` or `µ` for microseconds + + - `ns` for nanoseconds + + + Formats timestamps as [unix (epoch) + timestamps](/influxdb3/enterprise/reference/glossary/#unix-timestamp) with the specified precision + + instead of [RFC3339 timestamps](/influxdb3/enterprise/reference/glossary/#rfc3339-timestamp) with + nanosecond precision. + enum: + - ns + - u + - µ + - ms + - s + - m + - h + type: string + pretty: + description: | + If true, the JSON response is formatted in a human-readable format. + type: boolean + required: + - q + application/x-www-form-urlencoded: + schema: + type: object + properties: + db: + type: string + description: The database to query. If not provided, the InfluxQL query string must specify the database. + q: + description: The InfluxQL query string. + type: string + chunked: + description: | + If true, the response is divided into chunks of size `chunk_size`. + type: boolean + chunk_size: + description: | + The number of records that will go into a chunk. + This parameter is only used if `chunked=true`. + type: integer + default: 10000 + epoch: + description: > + A unix timestamp precision. + + + - `h` for hours + + - `m` for minutes + + - `s` for seconds + + - `ms` for milliseconds + + - `u` or `µ` for microseconds + + - `ns` for nanoseconds + + + Formats timestamps as [unix (epoch) + timestamps](/influxdb3/enterprise/reference/glossary/#unix-timestamp) with the specified precision + + instead of [RFC3339 timestamps](/influxdb3/enterprise/reference/glossary/#rfc3339-timestamp) with + nanosecond precision. + enum: + - ns + - u + - µ + - ms + - s + - m + - h + type: string + pretty: + description: | + If true, the JSON response is formatted in a human-readable format. + type: boolean + required: + - q + application/vnd.influxql: + schema: + type: string + description: InfluxQL query string sent as the request body. + tags: + - Query data + - Compatibility endpoints + /write: + post: + operationId: PostV1Write + responses: + "204": + description: Success ("No Content"). All data in the batch is written and queryable. + headers: + cluster-uuid: + $ref: "#/components/headers/ClusterUUID" + "400": + description: | + Bad request. Some (a _partial write_) or all of the data from the batch was rejected and not written. + If a partial write occurred, then some points from the batch are written and queryable. + + The response body: + - indicates if a partial write occurred or all data was rejected. + - contains details about the [rejected points](/influxdb3/enterprise/write-data/troubleshoot/#troubleshoot-rejected-points), up to 100 points. + content: + application/json: + examples: + rejectedAllPoints: + summary: Rejected all points in the batch + value: | + { + "error": "write of line protocol failed", + "data": [ + { + "original_line": "dquote> home,room=Kitchen temp=hi", + "line_number": 2, + "error_message": "No fields were provided" + } + ] + } + partialWriteErrorWithRejectedPoints: + summary: Partial write rejected some points in the batch + value: | + { + "error": "partial write of line protocol occurred", + "data": [ + { + "original_line": "dquote> home,room=Kitchen temp=hi", + "line_number": 2, + "error_message": "No fields were provided" + } + ] + } + "401": + $ref: "#/components/responses/Unauthorized" + "403": + description: Access denied. + "413": + description: Request entity too large. + summary: Write line protocol (v1-compatible) + description: > + Writes line protocol to the specified database. + + + This endpoint provides backward compatibility for InfluxDB 1.x write workloads using tools such as InfluxDB 1.x + client libraries, the Telegraf `outputs.influxdb` output plugin, or third-party tools. + + + Use this endpoint to send data in [line + protocol](https://docs.influxdata.com/influxdb3/enterprise/reference/syntax/line-protocol/) format to InfluxDB. + + Use query parameters to specify options for writing data. + + + #### Related + + + - [Use compatibility APIs to write data](/influxdb3/enterprise/write-data/http-api/compatibility-apis/) + parameters: + - $ref: "#/components/parameters/dbWriteParam" + - $ref: "#/components/parameters/compatibilityPrecisionParam" + - $ref: "#/components/parameters/v1UsernameParam" + - $ref: "#/components/parameters/v1PasswordParam" + - name: rp + in: query + required: false + schema: + type: string + description: | + Retention policy name. Honored but discouraged. InfluxDB 3 doesn't use retention policies. + - name: consistency + in: query + required: false + schema: + type: string + description: | + Write consistency level. Ignored by InfluxDB 3. Provided for compatibility with InfluxDB 1.x clients. + - name: Authorization + in: header + required: false + schema: + type: string + description: | + Authorization header for token-based authentication. + Supported schemes: + - `Bearer AUTH_TOKEN` - OAuth bearer token scheme + - `Token AUTH_TOKEN` - InfluxDB v2 token scheme + - `Basic ` - Basic authentication (username is ignored) + - name: Content-Type + in: header + description: | + The content type of the request payload. + schema: + $ref: "#/components/schemas/LineProtocol" + required: false + - name: Accept + in: header + description: | + The content type that the client can understand. + Writes only return a response body if they fail (partially or completely)--for example, + due to a syntax problem or type mismatch. + schema: + type: string + default: application/json + enum: + - application/json + required: false + - $ref: "#/components/parameters/ContentEncoding" + - $ref: "#/components/parameters/ContentLength" + requestBody: + $ref: "#/components/requestBodies/lineProtocolRequestBody" + tags: + - Compatibility endpoints + - Write data +components: + parameters: + AcceptQueryHeader: + name: Accept + in: header + schema: + type: string + default: application/json + enum: + - application/json + - application/jsonl + - application/vnd.apache.parquet + - text/csv + required: false + description: | + The content type that the client can understand. + ContentEncoding: + name: Content-Encoding + in: header + description: | + The compression applied to the line protocol in the request payload. + To send a gzip payload, pass `Content-Encoding: gzip` header. + schema: + $ref: "#/components/schemas/ContentEncoding" + required: false + ContentLength: + name: Content-Length + in: header + description: | + The size of the entity-body, in bytes, sent to InfluxDB. + schema: + $ref: "#/components/schemas/ContentLength" + ContentType: + name: Content-Type + description: | + The format of the data in the request body. + in: header + schema: + type: string + enum: + - application/json + required: false + db: + name: db + in: query + required: true + schema: + type: string + description: | + The name of the database. + dbWriteParam: + name: db + in: query + required: true + schema: + type: string + description: | + The name of the database. + InfluxDB creates the database if it doesn't already exist, and then + writes all points in the batch to the database. + dbQueryParam: + name: db + in: query + required: false + schema: + type: string + description: | + The name of the database. + + If you provide a query that specifies the database, you can omit the 'db' parameter from your request. + accept_partial: + name: accept_partial + in: query + required: false + schema: + $ref: "#/components/schemas/AcceptPartial" + compatibilityPrecisionParam: + name: precision + in: query + required: false + schema: + $ref: "#/components/schemas/PrecisionWriteCompatibility" + description: The precision for unix timestamps in the line protocol batch. + precisionParam: + name: precision + in: query + required: false + schema: + $ref: "#/components/schemas/PrecisionWrite" + description: The precision for unix timestamps in the line protocol batch. + querySqlParam: + name: q + in: query + required: true + schema: + type: string + format: SQL + description: | + The query to execute. + format: + name: format + in: query + required: false + schema: + $ref: "#/components/schemas/Format" + formatRequired: + name: format + in: query + required: true + schema: + $ref: "#/components/schemas/Format" + v1UsernameParam: + name: u + in: query + required: false + schema: + type: string + description: > + Username for v1 compatibility authentication. + + When using Basic authentication or query string authentication, InfluxDB 3 ignores this parameter but allows any + arbitrary string for compatibility with InfluxDB 1.x clients. + v1PasswordParam: + name: p + in: query + required: false + schema: + type: string + description: | + Password for v1 compatibility authentication. + For query string authentication, pass a database token with write permissions as this parameter. + InfluxDB 3 checks that the `p` value is an authorized token. + requestBodies: + lineProtocolRequestBody: + required: true + content: + text/plain: + schema: + type: string + examples: + line: + summary: Example line protocol + value: measurement,tag=value field=1 1234567890 + multiline: + summary: Example line protocol with UTF-8 characters + value: | + measurement,tag=value field=1 1234567890 + measurement,tag=value field=2 1234567900 + measurement,tag=value field=3 1234568000 + queryRequestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/QueryRequestObject" + schemas: + AdminTokenObject: + type: object + properties: + id: + type: integer + name: + type: string + token: + type: string + hash: + type: string + created_at: + type: string + format: date-time + expiry: + format: date-time + example: + id: 0 + name: _admin + token: apiv3_00xx0Xx0xx00XX0x0 + hash: 00xx0Xx0xx00XX0x0 + created_at: "2025-04-18T14:02:45.331Z" + expiry: null + ResourceTokenObject: + type: object + properties: + token_name: + type: string + permissions: + type: array + items: + type: object + properties: + resource_type: + type: string + enum: + - system + - db + actions: + type: array + items: + type: string + enum: + - read + - write + resource_names: + type: array + items: + type: string + description: List of resource names. Use "*" for all resources. + expiry_secs: + type: integer + description: The expiration time in seconds. + example: + token_name: All system information + permissions: + - resource_type: system + actions: + - read + resource_names: + - "*" + expiry_secs: 300000 + ContentEncoding: + type: string + enum: + - gzip + - identity + description: > + Content coding. + + Use `gzip` for compressed data or `identity` for unmodified, uncompressed data. + + + #### Multi-member gzip support + + + InfluxDB 3 supports multi-member gzip payloads (concatenated gzip files per [RFC + 1952](https://www.rfc-editor.org/rfc/rfc1952)). + + This allows you to: + + - Concatenate multiple gzip files and send them in a single request + + - Maintain compatibility with InfluxDB v1 and v2 write endpoints + + - Simplify batch operations using standard compression tools + default: identity + LineProtocol: + type: string + enum: + - text/plain + - text/plain; charset=utf-8 + description: | + `text/plain` is the content type for line protocol. `UTF-8` is the default character set. + default: text/plain; charset=utf-8 + ContentLength: + type: integer + description: The length in decimal number of octets. + Database: + type: string + AcceptPartial: + type: boolean + default: true + description: Accept partial writes. + Format: + type: string + enum: + - json + - csv + - parquet + - json_lines + - jsonl + - pretty + description: |- + The format of data in the response body. + `json_lines` is the canonical name; `jsonl` is accepted as an alias. + NoSync: + type: boolean + default: false + description: | + Acknowledges a successful write without waiting for WAL persistence. + + #### Related + + - [Use the HTTP API and client libraries to write data](/influxdb3/enterprise/write-data/api-client-libraries/) + - [Data durability](/influxdb3/enterprise/reference/internals/durability/) + PrecisionWriteCompatibility: + enum: + - ms + - s + - us + - u + - ns + - "n" + type: string + description: |- + The precision for unix timestamps in the line protocol batch. + Use `ms` for milliseconds, `s` for seconds, `us` or `u` for microseconds, or `ns` or `n` for nanoseconds. + Optional — defaults to nanosecond precision if omitted. + PrecisionWrite: + enum: + - auto + - nanosecond + - microsecond + - millisecond + - second + type: string + description: | + The precision for unix timestamps in the line protocol batch. + + Supported values: + - `auto` (default): Automatically detects precision based on timestamp magnitude + - `nanosecond`: Nanoseconds + - `microsecond`: Microseconds + - `millisecond`: Milliseconds + - `second`: Seconds + QueryRequestObject: + type: object + properties: + db: + description: | + The name of the database to query. + Required if the query (`q`) doesn't specify the database. + type: string + q: + description: The query to execute. + type: string + format: + description: The format of the query results. + type: string + enum: + - json + - csv + - parquet + - json_lines + - jsonl + - pretty + params: + description: | + Additional parameters for the query. + Use this field to pass query parameters. + type: object + additionalProperties: true + required: + - db + - q + example: + db: mydb + q: SELECT * FROM mytable + format: json + params: {} + CreateDatabaseRequest: + type: object + properties: + db: + type: string + pattern: ^[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]$|^[a-zA-Z0-9]$ + description: |- + The database name. Database names cannot contain underscores (_). + Names must start and end with alphanumeric characters and can contain hyphens (-) in the middle. + retention_period: + type: string + description: |- + The retention period for the database. Specifies how long data should be retained. + Use duration format (for example, "1d", "1h", "30m", "7d"). + example: 7d + required: + - db + CreateTableRequest: + type: object + properties: + db: + type: string + table: + type: string + tags: + type: array + items: + type: string + fields: + type: array + items: + type: object + properties: + name: + type: string + type: + type: string + enum: + - utf8 + - int64 + - uint64 + - float64 + - bool + required: + - name + - type + retention_period: + type: string + description: |- + The retention period for the table. Specifies how long data in this table should be retained. + Use duration format (for example, "1d", "1h", "30m", "7d"). + example: 30d + required: + - db + - table + - tags + - fields + DistinctCacheCreateRequest: + type: object + properties: + db: + type: string + table: + type: string + node_spec: + $ref: "#/components/schemas/ApiNodeSpec" + name: + type: string + description: Optional cache name. + columns: + type: array + items: + type: string + max_cardinality: + type: integer + description: Optional maximum cardinality. + max_age: + type: integer + description: Optional maximum age in seconds. + required: + - db + - table + - columns + example: + db: mydb + table: mytable + columns: + - tag1 + - tag2 + max_cardinality: 1000 + max_age: 3600 + LastCacheCreateRequest: + type: object + properties: + db: + type: string + table: + type: string + node_spec: + $ref: "#/components/schemas/ApiNodeSpec" + name: + type: string + description: Optional cache name. + key_columns: + type: array + items: + type: string + description: Optional list of key columns. + value_columns: + type: array + items: + type: string + description: Optional list of value columns. + count: + type: integer + description: Optional count. + ttl: + type: integer + description: Optional time-to-live in seconds. + required: + - db + - table + example: + db: mydb + table: mytable + key_columns: + - tag1 + value_columns: + - field1 + count: 100 + ttl: 3600 + ProcessingEngineTriggerRequest: + type: object + properties: + db: + type: string + plugin_filename: + type: string + description: | + The path and filename of the plugin to execute--for example, + `schedule.py` or `endpoints/report.py`. + The path can be absolute or relative to the `--plugins-dir` directory configured when starting InfluxDB 3. + + The plugin file must implement the trigger interface associated with the trigger's specification. + node_spec: + $ref: "#/components/schemas/ApiNodeSpec" + trigger_name: + type: string + trigger_settings: + description: | + Configuration for trigger error handling and execution behavior. + allOf: + - $ref: "#/components/schemas/TriggerSettings" + trigger_specification: + description: > + Specifies when and how the processing engine trigger should be invoked. + + + ## Supported trigger specifications: + + + ### Cron-based scheduling + + Format: `cron:CRON_EXPRESSION` + + + Uses extended (6-field) cron format (second minute hour day_of_month month day_of_week): + + ``` + + ┌───────────── second (0-59) + + │ ┌───────────── minute (0-59) + + │ │ ┌───────────── hour (0-23) + + │ │ │ ┌───────────── day of month (1-31) + + │ │ │ │ ┌───────────── month (1-12) + + │ │ │ │ │ ┌───────────── day of week (0-6, Sunday=0) + + │ │ │ │ │ │ + + * * * * * * + + ``` + + Examples: + + - `cron:0 0 6 * * 1-5` - Every weekday at 6:00 AM + + - `cron:0 30 14 * * 5` - Every Friday at 2:30 PM + + - `cron:0 0 0 1 * *` - First day of every month at midnight + + + ### Interval-based scheduling + + Format: `every:DURATION` + + + Supported durations: `s` (seconds), `m` (minutes), `h` (hours), `d` (days), `w` (weeks), `M` (months), `y` + (years): + + - `every:30s` - Every 30 seconds + + - `every:5m` - Every 5 minutes + + - `every:1h` - Every hour + + - `every:1d` - Every day + + - `every:1w` - Every week + + - `every:1M` - Every month + + - `every:1y` - Every year + + + **Maximum interval**: 1 year + + + ### Table-based triggers + + - `all_tables` - Triggers on write events to any table in the database + + - `table:TABLE_NAME` - Triggers on write events to a specific table + + + ### On-demand triggers + + Format: `request:REQUEST_PATH` + + + Creates an HTTP endpoint `/api/v3/engine/REQUEST_PATH` for manual invocation: + + - `request:hello-world` - Creates endpoint `/api/v3/engine/hello-world` + + - `request:data-export` - Creates endpoint `/api/v3/engine/data-export` + pattern: ^(cron:[0-9 *,/-]+|every:[0-9]+[smhd]|all_tables|table:[a-zA-Z_][a-zA-Z0-9_]*|request:[a-zA-Z0-9_-]+)$ + example: cron:0 0 6 * * 1-5 + trigger_arguments: + type: object + additionalProperties: true + description: Optional arguments passed to the plugin. + disabled: + type: boolean + default: false + description: Whether the trigger is disabled. + required: + - db + - plugin_filename + - trigger_name + - trigger_settings + - trigger_specification + - disabled + TriggerSettings: + type: object + description: | + Configuration settings for processing engine trigger error handling and execution behavior. + properties: + run_async: + type: boolean + default: false + description: | + Whether to run the trigger asynchronously. + When `true`, the trigger executes in the background without blocking. + When `false`, the trigger executes synchronously. + error_behavior: + type: string + enum: + - Log + - Retry + - Disable + description: | + Specifies how to handle errors that occur during trigger execution: + - `Log`: Log the error and continue (default) + - `Retry`: Retry the trigger execution + - `Disable`: Disable the trigger after an error + default: Log + required: + - run_async + - error_behavior + ApiNodeSpec: + x-enterprise-only: true + type: object + description: | + Optional specification for targeting specific nodes in a multi-node InfluxDB 3 Enterprise cluster. + Use this to control which node(s) should handle the cache or trigger. + properties: + node_id: + type: string + description: | + The ID of a specific node in the cluster. + If specified, the cache or trigger will only be created on this node. + node_group: + type: string + description: | + The name of a node group in the cluster. + If specified, the cache or trigger will be created on all nodes in this group. + WALPluginTestRequest: + type: object + description: | + Request body for testing a write-ahead logging (WAL) plugin. + properties: + filename: + type: string + description: | + The path and filename of the plugin to test. + database: + type: string + description: | + The database name to use for the test. + input_lp: + type: string + description: | + Line protocol data to use as input for the test. + cache_name: + type: string + description: | + Optional name of the cache to use in the test. + input_arguments: + type: object + additionalProperties: + type: string + description: | + Optional key-value pairs of arguments to pass to the plugin. + required: + - filename + - database + - input_lp + SchedulePluginTestRequest: + type: object + description: | + Request body for testing a scheduling plugin. + properties: + filename: + type: string + description: | + The path and filename of the plugin to test. + database: + type: string + description: | + The database name to use for the test. + schedule: + type: string + description: | + Optional schedule specification in cron or interval format. + cache_name: + type: string + description: | + Optional name of the cache to use in the test. + input_arguments: + type: object + additionalProperties: + type: string + description: | + Optional key-value pairs of arguments to pass to the plugin. + required: + - filename + - database + PluginFileRequest: + type: object + description: | + Request body for updating a plugin file. + properties: + plugin_name: + type: string + description: | + The name of the plugin file to update. + content: + type: string + description: | + The content of the plugin file. + required: + - plugin_name + - content + PluginDirectoryRequest: + type: object + description: | + Request body for updating plugin directory with multiple files. + properties: + plugin_name: + type: string + description: | + The name of the plugin directory to update. + files: + type: array + items: + $ref: "#/components/schemas/PluginFileEntry" + description: | + List of plugin files to include in the directory. + required: + - plugin_name + - files + PluginFileEntry: + type: object + description: | + Represents a single file in a plugin directory. + properties: + content: + type: string + description: | + The content of the file. + relative_path: + type: string + description: The relative path of the file within the plugin directory. + required: + - relative_path + - content + ShowDatabasesResponse: + type: object + properties: + databases: + type: array + items: + type: string + QueryResponse: + type: object + properties: + results: + type: array + items: + type: object + example: + results: + - series: + - name: mytable + columns: + - time + - value + values: + - - "2024-02-02T12:00:00Z" + - 42 + ErrorMessage: + type: object + properties: + error: + type: string + data: + type: object + nullable: true + LineProtocolError: + properties: + code: + description: Code is the machine-readable error code. + enum: + - internal error + - not found + - conflict + - invalid + - empty value + - unavailable + readOnly: true + type: string + err: + description: Stack of errors that occurred during processing of the request. Useful for debugging. + readOnly: true + type: string + line: + description: First line in the request body that contains malformed data. + format: int32 + readOnly: true + type: integer + message: + description: Human-readable message. + readOnly: true + type: string + op: + description: Describes the logical code operation when the error occurred. Useful for debugging. + readOnly: true + type: string + required: + - code + EpochCompatibility: + description: | + A unix timestamp precision. + - `h` for hours + - `m` for minutes + - `s` for seconds + - `ms` for milliseconds + - `u` or `µ` for microseconds + - `ns` for nanoseconds + enum: + - ns + - u + - µ + - ms + - s + - m + - h + type: string + UpdateDatabaseRequest: + type: object + properties: + retention_period: + type: string + description: | + The retention period for the database. Specifies how long data should be retained. + Use duration format (for example, "1d", "1h", "30m", "7d"). + example: 7d + description: Request schema for updating database configuration. + UpdateTableRequest: + type: object + properties: + db: + type: string + description: The name of the database containing the table. + table: + type: string + description: The name of the table to update. + retention_period: + type: string + description: | + The retention period for the table. Specifies how long data in this table should be retained. + Use duration format (for example, "1d", "1h", "30m", "7d"). + example: 30d + required: + - db + - table + description: Request schema for updating table configuration. + LicenseResponse: + type: object + properties: + license_type: + type: string + description: The type of license (for example, "enterprise", "trial"). + example: enterprise + expires_at: + type: string + format: date-time + description: The expiration date of the license in ISO 8601 format. + example: "2025-12-31T23:59:59Z" + features: + type: array + items: + type: string + description: List of features enabled by the license. + example: + - clustering + - processing_engine + - advanced_auth + status: + type: string + enum: + - active + - expired + - invalid + description: The current status of the license. + example: active + description: Response schema for license information. + CreateTokenWithPermissionsRequest: + type: object + properties: + token_name: + type: string + description: The name for the resource token. + permissions: + type: array + items: + $ref: "#/components/schemas/PermissionDetailsApi" + description: List of permissions to grant to the token. + expiry_secs: + type: integer + description: Optional expiration time in seconds. + nullable: true + required: + - token_name + - permissions + PermissionDetailsApi: + type: object + properties: + resource_type: + type: string + enum: + - system + - db + description: The type of resource. + resource_names: + type: array + items: + type: string + description: List of resource names. Use "*" for all resources. + actions: + type: array + items: + type: string + enum: + - read + - write + description: List of actions to grant. + required: + - resource_type + - resource_names + - actions + FileIndexCreateRequest: + type: object + description: Request body for creating a file index. + properties: + db: + type: string + description: The database name. + table: + type: string + description: The table name. If omitted, the file index applies to the database. + nullable: true + columns: + type: array + items: + type: string + description: The columns to use for the file index. + required: + - db + - columns + example: + db: mydb + table: mytable + columns: + - tag1 + - tag2 + FileIndexDeleteRequest: + type: object + description: Request body for deleting a file index. + properties: + db: + type: string + description: The database name. + table: + type: string + description: The table name. If omitted, deletes the database-level file index. + nullable: true + required: + - db + example: + db: mydb + table: mytable + StopNodeRequest: + type: object + description: Request body for marking a node as stopped in the catalog. + properties: + node_id: + type: string + description: The ID of the node to mark as stopped. + required: + - node_id + example: + node_id: node-1 + responses: + Unauthorized: + description: Unauthorized access. + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorMessage" + BadRequest: + description: | + Request failed. Possible reasons: + + - Invalid database name + - Malformed request body + - Invalid timestamp precision + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorMessage" + Forbidden: + description: Access denied. + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorMessage" + NotFound: + description: Resource not found. + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorMessage" + headers: + ClusterUUID: + description: | + The catalog UUID of the InfluxDB instance. + This header is included in all HTTP API responses and enables you to: + - Identify which cluster instance handled the request + - Monitor deployments across multiple InfluxDB instances + - Debug and troubleshoot distributed systems + schema: + type: string + format: uuid + example: 01234567-89ab-cdef-0123-456789abcdef + securitySchemes: + BasicAuthentication: + type: http + scheme: basic + description: >- + Use the `Authorization` header with the `Basic` scheme to authenticate v1 API requests. + + + Works with v1 compatibility [`/write`](#operation/PostV1Write) and [`/query`](#operation/GetV1Query) endpoints + in InfluxDB 3. + + + When authenticating requests, InfluxDB 3 checks that the `password` part of the decoded credential is an + authorized token + + and ignores the `username` part of the decoded credential. + + + ### Syntax + + + ```http + + Authorization: Basic + + ``` + + + ### Example + + + ```bash + + curl "http://localhost:8181/write?db=DATABASE_NAME&precision=s" \ + --user "":"AUTH_TOKEN" \ + --header "Content-type: text/plain; charset=utf-8" \ + --data-binary 'home,room=kitchen temp=72 1641024000' + ``` + + + Replace the following: + + + - **`DATABASE_NAME`**: your InfluxDB 3 Enterprise database + + - **`AUTH_TOKEN`**: an admin token or database token authorized for the database + QuerystringAuthentication: + type: apiKey + in: query + name: u=&p= + description: >- + Use InfluxDB 1.x API parameters to provide credentials through the query string for v1 API requests. + + + Querystring authentication works with v1-compatible [`/write`](#operation/PostV1Write) and + [`/query`](#operation/GetV1Query) endpoints. + + + When authenticating requests, InfluxDB 3 checks that the `p` (_password_) query parameter is an authorized token + + and ignores the `u` (_username_) query parameter. + + + ### Syntax + + + ```http + + https://localhost:8181/query/?[u=any]&p=AUTH_TOKEN + + https://localhost:8181/write/?[u=any]&p=AUTH_TOKEN + + ``` + + + ### Examples + + + ```bash + + curl "http://localhost:8181/write?db=DATABASE_NAME&precision=s&p=AUTH_TOKEN" \ + --header "Content-type: text/plain; charset=utf-8" \ + --data-binary 'home,room=kitchen temp=72 1641024000' + ``` + + + Replace the following: + + + - **`DATABASE_NAME`**: your InfluxDB 3 Enterprise database + + - **`AUTH_TOKEN`**: an admin token or database token authorized for the database + + + ```bash + + ####################################### + + # Use an InfluxDB 1.x compatible username and password + + # to query the InfluxDB v1 HTTP API + + ####################################### + + # Use authentication query parameters: + + # ?p=AUTH_TOKEN + + ####################################### + + + curl --get "http://localhost:8181/query" \ + --data-urlencode "p=AUTH_TOKEN" \ + --data-urlencode "db=DATABASE_NAME" \ + --data-urlencode "q=SELECT * FROM MEASUREMENT" + ``` + + + Replace the following: + + + - **`DATABASE_NAME`**: the database to query + + - **`AUTH_TOKEN`**: a database token with sufficient permissions to the database + BearerAuthentication: + type: http + scheme: bearer + bearerFormat: JWT + description: | + + Use the OAuth Bearer authentication + scheme to provide an authorization token to InfluxDB 3. + + Bearer authentication works with all endpoints. + + In your API requests, send an `Authorization` header. + For the header value, provide the word `Bearer` followed by a space and a database token. + + ### Syntax + + ```http + Authorization: Bearer AUTH_TOKEN + ``` + + ### Example + + ```bash + curl http://localhost:8181/api/v3/query_influxql \ + --header "Authorization: Bearer AUTH_TOKEN" + ``` + TokenAuthentication: + description: |- + Use InfluxDB v2 Token authentication to provide an authorization token to InfluxDB 3. + + The v2 Token scheme works with v1 and v2 compatibility endpoints in InfluxDB 3. + + In your API requests, send an `Authorization` header. + For the header value, provide the word `Token` followed by a space and a database token. + The word `Token` is case-sensitive. + + ### Syntax + + ```http + Authorization: Token AUTH_TOKEN + ``` + + ### Example + + ```sh + ######################################################## + # Use the Token authentication scheme with /api/v2/write + # to write data. + ######################################################## + + curl --request post "http://localhost:8181/api/v2/write?bucket=DATABASE_NAME&precision=s" \ + --header "Authorization: Token AUTH_TOKEN" \ + --data-binary 'home,room=kitchen temp=72 1463683075' + ``` + in: header + name: Authorization + type: apiKey +x-tagGroups: + - name: Using the InfluxDB HTTP API + tags: + - Quick start + - Authentication + - Cache data + - Common parameters + - Response codes + - Compatibility endpoints + - Database + - Processing engine + - Server information + - Table + - Token + - Query data + - Write data From de07f79a450dee8012d5a7abfc5e718906479e11 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Sun, 8 Mar 2026 22:24:19 +0000 Subject: [PATCH 04/15] fix: run API docs build in PR preview workflow for /api pages - Add detectApiPages() to detect-preview-pages.js to auto-map changed api-docs/ files to their content URL paths via .config.yml - Add has-api-doc-changes output to detect-preview-pages.js - Fix needs-author-input logic to not request input when API pages are already auto-detected - Add Build API docs step to pr-preview.yml that runs yarn run build:api-docs before the Hugo build when api-doc changes detected Co-authored-by: jstirnaman <212227+jstirnaman@users.noreply.github.com> --- .github/scripts/detect-preview-pages.js | 70 +++++++++++++++++++++++-- .github/workflows/pr-preview.yml | 4 ++ 2 files changed, 70 insertions(+), 4 deletions(-) diff --git a/.github/scripts/detect-preview-pages.js b/.github/scripts/detect-preview-pages.js index 12e255ac46..c5292e74ca 100644 --- a/.github/scripts/detect-preview-pages.js +++ b/.github/scripts/detect-preview-pages.js @@ -5,12 +5,14 @@ * Outputs (for GitHub Actions): * - pages-to-deploy: JSON array of URL paths to deploy * - has-layout-changes: 'true' if layout/asset/data changes detected + * - has-api-doc-changes: 'true' if api-docs/ or openapi/ changes detected * - needs-author-input: 'true' if author must select pages * - change-summary: Human-readable summary of changes */ import { execSync } from 'child_process'; -import { appendFileSync, existsSync } from 'fs'; +import { appendFileSync, existsSync, readFileSync } from 'fs'; +import { load } from 'js-yaml'; import { extractDocsUrls } from './parse-pr-urls.js'; import { getChangedContentFiles, @@ -78,6 +80,53 @@ function isOnlyDeletions() { } } +/** + * Detect API pages affected by changed api-docs files. + * Maps changed api-docs files to their corresponding content URL paths by reading + * each product version's .config.yml and extracting API keys. + * @param {string[]} apiDocFiles - Changed files in api-docs/ + * @returns {string[]} Array of URL paths for affected API pages + */ +function detectApiPages(apiDocFiles) { + const pages = new Set(); + const processedVersions = new Set(); + // Matches the {product}/{version} path segment in api-docs/{product}/{version}/... + // e.g., api-docs/influxdb3/core/.config.yml -> captures 'influxdb3/core' + const PRODUCT_VERSION_PATTERN = /^api-docs\/([^/]+\/[^/]+)\//; + + for (const file of apiDocFiles) { + const match = file.match(PRODUCT_VERSION_PATTERN); + if (!match) continue; + + const productVersionDir = match[1]; // e.g., 'influxdb3/core' or 'influxdb/v2' + + // Only process each product version once + if (processedVersions.has(productVersionDir)) continue; + processedVersions.add(productVersionDir); + + const configPath = `api-docs/${productVersionDir}/.config.yml`; + if (!existsSync(configPath)) continue; + + try { + const configContent = readFileSync(configPath, 'utf-8'); + const config = load(configContent); + + if (!config || !config.apis) continue; + + for (const apiKey of Object.keys(config.apis)) { + // Extract apiName: e.g., 'v3@3' -> 'v3', 'v1-compatibility@2' -> 'v1-compatibility' + const apiName = apiKey.split('@')[0]; + const urlPath = `/${productVersionDir}/api/${apiName}/`; + pages.add(urlPath); + } + } catch (err) { + console.log(` ⚠️ Could not read or parse ${configPath}: ${err.message}`); + } + } + + return Array.from(pages); +} + /** * Write output for GitHub Actions */ @@ -96,6 +145,7 @@ function main() { console.log('📭 PR contains only deletions - skipping preview'); setOutput('pages-to-deploy', '[]'); setOutput('has-layout-changes', 'false'); + setOutput('has-api-doc-changes', 'false'); setOutput('needs-author-input', 'false'); setOutput('change-summary', 'No pages to preview (content removed)'); setOutput('skip-reason', 'deletions-only'); @@ -133,7 +183,17 @@ function main() { console.log(` Found ${pagesToDeploy.length} affected pages\n`); } - // Strategy 2: Layout/asset changes - parse URLs from PR body + // Strategy 2: API doc changes - auto-detect affected API pages + if (changes.apiDocs.length > 0) { + console.log('📋 API doc changes detected, auto-detecting affected pages...'); + const apiPages = detectApiPages(changes.apiDocs); + if (apiPages.length > 0) { + console.log(` Found ${apiPages.length} affected API page(s): ${apiPages.join(', ')}`); + pagesToDeploy = [...new Set([...pagesToDeploy, ...apiPages])]; + } + } + + // Strategy 3: Layout/asset changes - parse URLs from PR body if (hasLayoutChanges) { console.log('🎨 Layout/asset changes detected, checking PR description for URLs...'); const prUrls = extractDocsUrls(PR_BODY); @@ -142,11 +202,12 @@ function main() { console.log(` Found ${prUrls.length} URLs in PR description`); // Merge with content pages (deduplicate) pagesToDeploy = [...new Set([...pagesToDeploy, ...prUrls])]; - } else if (changes.content.length === 0) { - // No content changes AND no URLs specified - need author input + } else if (changes.content.length === 0 && pagesToDeploy.length === 0) { + // No content changes, no auto-detected pages, and no URLs specified - need author input console.log(' ⚠️ No URLs found in PR description - author input needed'); setOutput('pages-to-deploy', '[]'); setOutput('has-layout-changes', 'true'); + setOutput('has-api-doc-changes', String(changes.apiDocs.length > 0)); setOutput('needs-author-input', 'true'); setOutput('change-summary', 'Layout/asset changes detected - please specify pages to preview'); return; @@ -168,6 +229,7 @@ function main() { setOutput('pages-to-deploy', JSON.stringify(pagesToDeploy)); setOutput('has-layout-changes', String(hasLayoutChanges)); + setOutput('has-api-doc-changes', String(changes.apiDocs.length > 0)); setOutput('needs-author-input', 'false'); setOutput('change-summary', summary); } diff --git a/.github/workflows/pr-preview.yml b/.github/workflows/pr-preview.yml index 18c8ca3788..7a0dc8469f 100644 --- a/.github/workflows/pr-preview.yml +++ b/.github/workflows/pr-preview.yml @@ -115,6 +115,10 @@ jobs: echo "No pages to deploy - skipping preview" echo "Reason: ${{ steps.detect.outputs.skip-reason || steps.detect.outputs.change-summary }}" + - name: Build API docs + if: steps.detect.outputs.has-api-doc-changes == 'true' && steps.detect.outputs.pages-to-deploy != '[]' + run: yarn run build:api-docs + - name: Build Hugo site if: steps.detect.outputs.pages-to-deploy != '[]' id: build From b951d3c27afecdb549b261826fb4c65162b9e58d Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Mon, 9 Mar 2026 20:22:31 -0500 Subject: [PATCH 05/15] feat(ci): add doc review pipeline with auto-labeling and Copilot review (#6890) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat: add doc review pipeline implementation plan Detailed plan for two interconnected systems: 1. Label system overhaul (22 automation-driven labels replacing 30+ inconsistent ones) 2. Doc review workflow (Claude visual review + Copilot structural review with screenshots) This is a plan document only — no implementation changes. https://claude.ai/code/session_01D5rLaHdQv9iBL55UEsdQFt * fix: split product:v3 into v3-monolith and v3-distributed labels - product:v3-monolith: Core, Enterprise (single-node / clusterable) - product:v3-distributed: Cloud Serverless, Cloud Dedicated, Clustered - Updated auto-label path mappings to match content directory structure - Updated migration mapping and label count (22 → 23) https://claude.ai/code/session_01D5rLaHdQv9iBL55UEsdQFt * feat: define agent instruction file architecture in Phase 3 - One CLAUDE.md (pointer) → role-specific files in .claude/agents/ - doc-triage-agent.md: label taxonomy, path mapping, priority rules - doc-review-agent.md: review scope, severity classification, output format - Prompt file (.github/prompts/) references agent file, stays workflow-specific - Updated file summary and implementation order https://claude.ai/code/session_01D5rLaHdQv9iBL55UEsdQFt * feat: move visual/screenshot review from Claude to Copilot Claude now handles diff-only Markdown review (frontmatter, shortcodes, style, terminology). Copilot handles visual review by analyzing screenshots posted as images in PR comments. Key changes: - Job 3 (Claude) runs in parallel with Jobs 1→2→4 (diff-only, no screenshots) - Job 4 (Copilot) analyzes screenshots via @copilot PR comment mentions - Two prompt files: doc-review.md (Claude), copilot-visual-review.md (Copilot) - doc-review-agent.md scoped to diff-only (no screenshot analysis) - Q1 resolved: screenshots delivered to Copilot via PR comment images - Reduced Claude API cost (no image processing) - Added Copilot failure handling (fallback to human review of artifacts) https://claude.ai/code/session_01D5rLaHdQv9iBL55UEsdQFt * chore: resolve all open questions in doc review pipeline plan Convert Q2–Q5 from open recommendations to resolved decisions: - Q2: Advisory only (no required status checks) until false-positive rate confirmed - Q3: Playwright for CI screenshots, Puppeteer for local debugging - Q4: Poll preview URL with 15s interval and 10-min timeout - Q5: Cost acceptable with existing mitigations (path filters, skip-review, concurrency) Rename section from "Open Questions" to "Decisions (Resolved)". https://claude.ai/code/session_01D5rLaHdQv9iBL55UEsdQFt * Apply suggestions from code review Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * Fix label naming inconsistency and document workflow migration requirements (#6893) * Initial plan * fix: resolve label naming inconsistency and document workflow updates - Rename review:approved to approval:codeowner to avoid confusion with review/* labels - Add note explaining the distinct prefix to prevent implementor confusion - Document required workflow updates for sync-plugin-docs label migration - Specify exact files and line numbers that need updating Co-authored-by: jstirnaman <212227+jstirnaman@users.noreply.github.com> --------- Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com> Co-authored-by: jstirnaman <212227+jstirnaman@users.noreply.github.com> * feat(ci): add doc review pipeline and deduplicate instruction files Add Phase 2-3 pipeline components: doc-review workflow (3-job architecture), Claude/Copilot review prompts, URL resolver script, triage and review agents, and label guide. Deduplicate AGENTS.md (254→96 lines) by removing content available in referenced docs. Remove duplicate sections from copilot-instructions.md (264→221 lines). AGENTS.md now contains only high-signal guidelines loaded every session: commands, constraints, style rules, product paths, and reference pointers. * task: updated PR pipeline plan * task: remove old cli plan * task: products file now contains content path mappings and (GitHub) label groups for each product. Product group labels will be used to assign reviewers and help with content checks. * feat(ci): add auto-label workflow for PR product detection Add auto-label workflow that applies product and source labels to PRs based on changed file paths, using data/products.yml as the source of truth. Add workflow-utils.js shared helper for product path matching. * refactor(ci): extract shared label and review format definitions Consolidate duplicated label definitions and review comment format into shared source-of-truth files to prevent context drift. New files: - data/labels.yml: source, waiting, workflow, review, and product:shared label definitions (names, colors, descriptions) - .github/templates/review-comment.md: severity levels, comment structure, result rules, and result-to-label mapping Updated consumers to reference shared files instead of inline copies: - .claude/agents/doc-review-agent.md - .claude/agents/doc-triage-agent.md - .github/prompts/copilot-visual-review.md - .github/LABEL_GUIDE.md Workflow fixes: - Pin all GitHub Actions to SHA hashes - Fix fromJson() for url-count comparison in doc-review.yml - Fix fallback handler to remove all review:* labels before re-adding * refactor(ci): replace Claude review with Copilot native code review - Remove claude-code-action job, use `copilot-reviews` reviewer instead - Extract review criteria to .github/instructions/content-review.instructions.md - Simplify copilot-instructions.md by removing duplicated content - Harden auto-label workflow (scoped permissions, pagination, concurrency) - Upsert visual review comments instead of creating duplicates - Delete unused .github/prompts/doc-review.md * Update .github/workflows/doc-review.yml Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * Update .github/DOC-REVIEW-PIPELINE-PLAN.md Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * task: update review pipeline plan * task: add label-migration scripts. These are one-use scripts that we'll remove after the label migration * Update .github/workflows/doc-review.yml Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * Update .github/workflows/doc-review.yml Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * Update .github/workflows/auto-label.yml Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * Apply suggestions from code review Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * Apply suggestions from code review Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * style: remove unnecessary escape in regex character class * feat(ci): add workflow_dispatch to auto-label and doc-review workflows - Add workflow_dispatch with pr_number input to both workflows for manual testing and on-demand re-runs - Migrate sync-plugin-docs label references to source:sync - Add area:agents, area:ci, area:links, release:*, good-first-issue, source:feedback, waiting:pr to labels.yml - Update products.yml: influxdb_cloud label_group v2 -> v2-cloud - Track label renames and deletions in LABEL_GUIDE.md * fix(ci): replace npm ci with targeted js-yaml install in auto-label npm ci fails in sparse checkout because package-lock.json is not included. The workflow only needs js-yaml for YAML parsing. * fix(ci): add --legacy-peer-deps to auto-label npm install * task: updated labels for migration * docs: update pipeline plan with test results and completion status * test: reapply reverted serve.md changes for e2e pipeline test Reverse the revert from 2f8efd6 to provide content changes that exercise the auto-label and doc-review workflows end-to-end. * fix(ci): fix preview URL polling in doc-review visual review curl --head outputs response headers before the status code from -w, so STATUS contained "HTTP/2 200 ...200" instead of just "200". Drop --head and add -o /dev/null to capture only the status code. * fix: correct broken fragment links in InfluxDB 3 serve.md files (#6910) * Initial plan * fix: correct broken links in serve.md files for enterprise config-options Co-authored-by: jstirnaman <212227+jstirnaman@users.noreply.github.com> * Update content/influxdb3/enterprise/reference/cli/influxdb3/serve.md --------- Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com> Co-authored-by: jstirnaman <212227+jstirnaman@users.noreply.github.com> Co-authored-by: Jason Stirnaman --------- Co-authored-by: Claude Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> Co-authored-by: Copilot <198982749+Copilot@users.noreply.github.com> --- .claude/agents/doc-review-agent.md | 82 ++ .claude/agents/doc-triage-agent.md | 72 ++ .claude/settings.json | 93 +-- .github/DOC-REVIEW-PIPELINE-PLAN.md | 704 ++++++++++++++++++ .github/ISSUE_TEMPLATE/sync-plugin-docs.yml | 2 +- .github/LABEL_GUIDE.md | 100 +++ .github/copilot-instructions.md | 258 +------ .../content-review.instructions.md | 76 ++ .github/prompts/copilot-visual-review.md | 34 + .github/scripts/resolve-review-urls.js | 38 + .github/scripts/workflow-utils.js | 104 +++ .github/templates/review-comment.md | 98 +++ .github/workflows/auto-label.yml | 122 +++ .github/workflows/doc-review.yml | 264 +++++++ .github/workflows/sync-plugins.yml | 6 +- AGENTS.md | 279 ++----- CLAUDE.md | 3 +- .../core/reference/cli/influxdb3/serve.md | 20 +- .../reference/cli/influxdb3/serve.md | 23 +- data/labels.yml | 86 +++ data/products.yml | 30 + .../2025-01-10-docs-cli-workflow-design.md | 134 ---- helper-scripts/label-migration/README.md | 95 +++ .../label-migration/create-labels.sh | 97 +++ .../label-migration/delete-labels.sh | 137 ++++ .../label-migration/migrate-labels.sh | 104 +++ 26 files changed, 2416 insertions(+), 645 deletions(-) create mode 100644 .claude/agents/doc-review-agent.md create mode 100644 .claude/agents/doc-triage-agent.md create mode 100644 .github/DOC-REVIEW-PIPELINE-PLAN.md create mode 100644 .github/LABEL_GUIDE.md create mode 100644 .github/instructions/content-review.instructions.md create mode 100644 .github/prompts/copilot-visual-review.md create mode 100644 .github/scripts/resolve-review-urls.js create mode 100644 .github/scripts/workflow-utils.js create mode 100644 .github/templates/review-comment.md create mode 100644 .github/workflows/auto-label.yml create mode 100644 .github/workflows/doc-review.yml create mode 100644 data/labels.yml delete mode 100644 docs/plans/2025-01-10-docs-cli-workflow-design.md create mode 100644 helper-scripts/label-migration/README.md create mode 100755 helper-scripts/label-migration/create-labels.sh create mode 100755 helper-scripts/label-migration/delete-labels.sh create mode 100755 helper-scripts/label-migration/migrate-labels.sh diff --git a/.claude/agents/doc-review-agent.md b/.claude/agents/doc-review-agent.md new file mode 100644 index 0000000000..a16adae0da --- /dev/null +++ b/.claude/agents/doc-review-agent.md @@ -0,0 +1,82 @@ +--- +name: doc-review-agent +description: | + Diff-only PR review agent for documentation changes. Reviews Markdown + changes against style guide, frontmatter rules, shortcode syntax, and + documentation standards. Available for local Claude Code review sessions. +model: sonnet +--- + +You are a documentation review agent for the InfluxData docs-v2 repository. +Your job is to review PR diffs for documentation quality issues. You review +Markdown source only — visual/rendered review is handled separately by Copilot. + +## Review Scope + +Check the PR diff for these categories. Reference the linked docs for +detailed rules — do not invent rules that aren't documented. + +### 1. Frontmatter + +Rules: [DOCS-FRONTMATTER.md](../../DOCS-FRONTMATTER.md) + +- `title` and `description` are required on every page +- `menu` structure matches the product's menu key +- `weight` is present and uses the correct range (1-99, 101-199, etc.) +- `source` paths for shared content point to valid `/shared/` paths +- No duplicate or conflicting frontmatter keys + +### 2. Shortcode Syntax + +Rules: [DOCS-SHORTCODES.md](../../DOCS-SHORTCODES.md) + +- Shortcodes use correct opening/closing syntax (`{{< >}}` vs `{{% %}}` + depending on whether inner content is Markdown) +- Required parameters are present +- Closing tags match opening tags +- Callouts use GitHub-style syntax: `> [!Note]`, `> [!Warning]`, etc. + +### 3. Semantic Line Feeds + +Rules: [DOCS-CONTRIBUTING.md](../../DOCS-CONTRIBUTING.md) + +- One sentence per line +- Long sentences should be on their own line, not concatenated + +### 4. Heading Hierarchy + +- No h1 headings in content (h1 comes from `title` frontmatter) +- Headings don't skip levels (h2 → h4 without h3) + +### 5. Terminology and Product Names + +- Use official product names: "InfluxDB 3 Core", "InfluxDB 3 Enterprise", + "InfluxDB Cloud Serverless", "InfluxDB Cloud Dedicated", etc. +- Don't mix v2/v3 terminology in v3 docs (e.g., "bucket" in Core docs) +- Version references match the content path + +### 6. Links + +- Internal links use relative paths or Hugo `relref` shortcodes +- No hardcoded `docs.influxdata.com` links in content files +- Anchor links match actual heading IDs + +### 7. Shared Content + +- `source:` frontmatter points to an existing shared file path +- Shared files don't contain frontmatter (only content) +- Changes to shared content are intentional (affects multiple products) + +## Output Format + +Follow the shared review comment format, severity definitions, and label +mapping in +[.github/templates/review-comment.md](../../.github/templates/review-comment.md). + +## What NOT to Review + +- Rendered HTML appearance (Copilot handles this) +- Code correctness inside code blocks (pytest handles this) +- Link validity (link-checker workflow handles this) +- Vale style linting (Vale handles this) +- Files outside the diff diff --git a/.claude/agents/doc-triage-agent.md b/.claude/agents/doc-triage-agent.md new file mode 100644 index 0000000000..59a4b4effa --- /dev/null +++ b/.claude/agents/doc-triage-agent.md @@ -0,0 +1,72 @@ +--- +name: doc-triage-agent +description: | + Triage agent for documentation issues and PRs. Applies product labels, + assesses priority, and determines readiness for automated workflows. + Uses data/products.yml as the single source of truth for path-to-product + mapping. +model: sonnet +--- + +You are a documentation triage agent for the InfluxData docs-v2 repository. +Your job is to label, prioritize, and route issues and PRs for the +documentation team. + +## Label Taxonomy + +Apply labels using the definitions in these source files: + +- **Product labels** (`product:*`): Read + [data/products.yml](../../data/products.yml) — match changed file paths + against each product's `content_path`, apply `product:{label_group}`. + Apply all matching labels. For shared content, apply `product:shared` plus + labels for all products that reference the shared file. +- **Non-product labels**: Read + [data/labels.yml](../../data/labels.yml) for all source, waiting, workflow, + and review label names and descriptions. +- **Review labels** (`review:*`): Defined in `data/labels.yml` but applied + only by the doc-review workflow, not during triage. + +## Priority Assessment + +Assess priority based on: + +1. **Product tier:** InfluxDB 3 Core/Enterprise > Cloud Dedicated/Serverless > v2 > v1 +2. **Issue type:** Incorrect information > missing content > style issues +3. **Scope:** Security/data-loss implications > functional docs > reference docs +4. **Staleness:** Issues with `waiting:*` labels older than 14 days should be + escalated or re-triaged + +## Decision Logic + +### When to apply `agent-ready` + +Apply when ALL of these are true: +- The issue has clear, actionable requirements +- No external dependencies (no `waiting:*` labels) +- The fix is within the documentation scope (not a product bug) +- Product labels are applied (agent needs to know which content to modify) + +### When to apply `waiting:*` + +Apply when the issue: +- References undocumented API behavior → `waiting:engineering` +- Requires a product decision about feature naming or scope → `waiting:product` +- Needs clarification from the reporter about expected behavior → add a comment asking, don't apply waiting + +### When to apply `review:needs-human` + +Apply during triage only if: +- The issue involves complex cross-product implications +- The content change could affect shared content used by many products +- The issue requires domain expertise the agent doesn't have + +## Triage Workflow + +1. Read the issue/PR title and body +2. Identify affected products from content paths or mentions +3. Apply product labels +4. Apply source label if applicable +5. Assess whether the issue is ready for agent work +6. Apply `agent-ready` or `waiting:*` as appropriate +7. Post a brief triage comment summarizing the labeling decision diff --git a/.claude/settings.json b/.claude/settings.json index f5838526be..c21b0380a6 100644 --- a/.claude/settings.json +++ b/.claude/settings.json @@ -1,51 +1,51 @@ { "permissions": { "allow": [ - "Bash(.ci/vale/vale.sh:*)", - "Bash(npm:*)", - "Bash(yarn:*)", - "Bash(pnpm:*)", - "Bash(npx:*)", - "Bash(node:*)", - "Bash(python:*)", - "Bash(python3:*)", - "Bash(pip:*)", - "Bash(poetry:*)", - "Bash(make:*)", - "Bash(cargo:*)", - "Bash(go:*)", - "Bash(curl:*)", - "Bash(gh:*)", - "Bash(hugo:*)", - "Bash(htmlq:*)", - "Bash(jq:*)", - "Bash(yq:*)", - "Bash(mkdir:*)", - "Bash(cat:*)", - "Bash(ls:*)", - "Bash(echo:*)", - "Bash(rg:*)", - "Bash(grep:*)", - "Bash(find:*)", - "Bash(bash:*)", - "Bash(wc:*)", - "Bash(sort:*)", - "Bash(uniq:*)", - "Bash(head:*)", - "Bash(tail:*)", - "Bash(awk:*)", - "Bash(touch:*)", - "Bash(docker:*)", - "Edit", - "Read", - "Write", - "Grep", - "Glob", - "LS", - "Skill(superpowers:brainstorming)", - "Skill(superpowers:brainstorming:*)", - "mcp__acp__Bash" - ], + "Bash(.ci/vale/vale.sh:*)", + "Bash(npm:*)", + "Bash(yarn:*)", + "Bash(pnpm:*)", + "Bash(npx:*)", + "Bash(node:*)", + "Bash(python:*)", + "Bash(python3:*)", + "Bash(pip:*)", + "Bash(poetry:*)", + "Bash(make:*)", + "Bash(cargo:*)", + "Bash(go:*)", + "Bash(curl:*)", + "Bash(gh:*)", + "Bash(hugo:*)", + "Bash(htmlq:*)", + "Bash(jq:*)", + "Bash(yq:*)", + "Bash(mkdir:*)", + "Bash(cat:*)", + "Bash(ls:*)", + "Bash(echo:*)", + "Bash(rg:*)", + "Bash(grep:*)", + "Bash(find:*)", + "Bash(bash:*)", + "Bash(wc:*)", + "Bash(sort:*)", + "Bash(uniq:*)", + "Bash(head:*)", + "Bash(tail:*)", + "Bash(awk:*)", + "Bash(touch:*)", + "Bash(docker:*)", + "Edit", + "Read", + "Write", + "Grep", + "Glob", + "LS", + "Skill(superpowers:brainstorming)", + "Skill(superpowers:brainstorming:*)", + "mcp__acp__Bash" + ], "deny": [ "Read(./.env)", "Read(./.env.*)", @@ -58,5 +58,8 @@ "Bash(rm:*)", "Read(/tmp)" ] + }, + "enabledPlugins": { + "github@claude-plugins-official": true } } diff --git a/.github/DOC-REVIEW-PIPELINE-PLAN.md b/.github/DOC-REVIEW-PIPELINE-PLAN.md new file mode 100644 index 0000000000..60815f485f --- /dev/null +++ b/.github/DOC-REVIEW-PIPELINE-PLAN.md @@ -0,0 +1,704 @@ +# Doc Review Pipeline — Implementation Plan + +**Status:** Complete — all phases implemented and tested +**Repository:** influxdata/docs-v2 +**Author:** Triage agent (Claude Code) +**Date:** 2026-02-28 + +--- + +## Table of Contents + +1. [Goal](#goal) +2. [What Already Exists](#what-already-exists) +3. [Architecture Overview](#architecture-overview) +4. [Phase 1: Label System Overhaul](#phase-1-label-system-overhaul) +5. [Phase 2: Doc Review Workflow](#phase-2-doc-review-workflow) +6. [Phase 3: Documentation and Agent Instructions](#phase-3-documentation-and-agent-instructions) +7. [Future Phases (Not In Scope)](#future-phases-not-in-scope) +8. [Decisions (Resolved)](#decisions-resolved) +9. [Risk Assessment](#risk-assessment) + +--- + +## Goal + +Build two interconnected systems: + +1. **Label system** — An automation-driven label taxonomy that supports + cross-repo automation, agentic workflows, and human-in-the-loop review. +2. **Doc review pipeline** — A GitHub Actions workflow that automates + documentation PR review using Copilot for both code review (diff-based, + using auto-loaded instruction files) and visual review (rendered HTML + at preview URLs), with rendered-page verification that catches issues + invisible in the Markdown source. + +The pipeline catches issues only visible in rendered output — expanded +shortcodes, broken layouts, incorrect product names — by having Copilot +analyze the rendered HTML of deployed preview pages. + +--- + +## What Already Exists + +### Infrastructure + +| Component | Location | Notes | +|-----------|----------|-------| +| PR preview deployment | `.github/workflows/pr-preview.yml` | Builds Hugo site, deploys to `gh-pages` branch at `influxdata.github.io/docs-v2/pr-preview/pr-{N}/` | +| Changed file detection | `.github/scripts/detect-preview-pages.js` | Detects changed files, maps content to public URLs, handles shared content | +| Content-to-URL mapping | `scripts/lib/content-utils.js` | `getChangedContentFiles()`, `mapContentToPublic()`, `expandSharedContentChanges()` | +| Screenshot tooling | `scripts/puppeteer/screenshot.js` | Puppeteer-based screenshot utility (already a dependency) | +| Playwright | `package.json` | Already a dependency (`^1.58.1`) | +| Claude agent instructions | `CLAUDE.md`, `AGENTS.md`, `.claude/` | Review criteria, style guide, skills, commands | +| Copilot instructions | `.github/copilot-instructions.md` | Style guide, repo structure, patterns | +| Copilot pattern instructions | `.github/instructions/` | Auto-loaded by Copilot based on changed file patterns | +| Auto-labeling (path-based) | Not yet implemented | Needed for Phase 1 | +| Link checker workflow | `.github/workflows/pr-link-check.yml` | Validates links on PR changes | +| Sync plugins workflow | `.github/workflows/sync-plugins.yml` | Issue-triggered workflow pattern to follow | +| Audit documentation workflow | `.github/workflows/audit-documentation.yml` | Creates issues from audit results | + +### Labels (Current State) + +The repo has 30+ labels with inconsistent naming patterns and significant +overlap. Product labels use long names (`InfluxDB 3 Core and Enterprise`), +workflow states are minimal (`release:pending` is the only actively used one), +and there is no agent-readiness or blocking-state taxonomy. + +--- + +## Architecture Overview + +``` +PR opened/updated (content paths) + │ + ├──────────────────────────┐ + ▼ ▼ +┌─ Job 1: Resolve URLs ────┐ ┌─ Job 2: Copilot Code Review ───┐ +│ resolve-review-urls.js │ │ gh pr edit --add-reviewer │ +│ changed files → URLs │ │ copilot-reviews │ +│ Output: url list │ │ Uses .github/instructions/ │ +└──────────┬───────────────┘ │ for auto-loaded review rules │ + │ └──────────────┬─────────────────┘ + ▼ │ +┌─ Job 3: Copilot Visual Review ────────┐ │ +│ Wait for preview deployment │ │ +│ Post preview URLs + review prompt │ │ +│ @copilot analyzes rendered HTML │ │ +│ Checks: layout, shortcodes, 404s │ │ +└──────────────┬───────────────────────┘ │ + │ │ + ▼ ▼ + Human reviews what remains +``` + +**Job 2 (Copilot code review) runs in parallel with Jobs 1→3** — it uses +GitHub's native Copilot reviewer, which analyzes the PR diff using +auto-loaded instruction files from `.github/instructions/`. + +--- + +## Phase 1: Label System Overhaul + +### Rationale + +The label system is a prerequisite for agentic workflows. Agents need clear +signals about issue readiness (`agent-ready`), blocking states +(`waiting:engineering`, `waiting:product`), and product scope +(`product:v3-monolith`, `product:v3-distributed`). +Consistent label patterns also enable GitHub API queries for dashboards and +automation. + +### 1.1 — Label taxonomy + +> **Note:** The tables below are a planning snapshot. The authoritative +> definitions live in `data/labels.yml` (non-product labels) and +> `data/products.yml` (product labels). See `.github/LABEL_GUIDE.md` for +> the current index. + +**24 labels organized into 6 categories:** + +#### Product labels (11) — Color: `#FFA500` (yellow) + +| Label | Description | +|-------|-------------| +| `product:v3-monolith` | InfluxDB 3 Core and Enterprise (single-node / clusterable) | +| `product:v3-distributed` | InfluxDB 3 Cloud Serverless, Cloud Dedicated, Clustered | +| `product:v2` | InfluxDB v2 (Cloud, OSS) | +| `product:v1` | InfluxDB v1 OSS | +| `product:v1-enterprise` | InfluxDB Enterprise v1 | +| `product:telegraf` | Telegraf documentation | +| `product:chronograf` | Chronograf documentation | +| `product:kapacitor` | Kapacitor documentation | +| `product:flux` | Flux language documentation | +| `product:explorer` | InfluxDB 3 Explorer | +| `product:shared` | Shared content across products | + +#### Source tracking labels (4) — Color: `#9370DB` (purple) + +| Label | Description | +|-------|-------------| +| `source:auto-detected` | Created by change detection within this repo | +| `source:dar` | Generated by DAR pipeline (issue analysis → draft) | +| `source:sync` | Synced from an external repository | +| `source:manual` | Human-created issue | + +#### Waiting states (2) — Color: `#FF8C00` (orange) + +| Label | Description | +|-------|-------------| +| `waiting:engineering` | Waiting for engineer confirmation | +| `waiting:product` | Waiting for product/PM decision | + +#### Workflow states (2) — Color: `#00FF00` / `#1E90FF` + +| Label | Description | +|-------|-------------| +| `agent-ready` | Agent can work on this autonomously | +| `skip-review` | Skip automated doc review pipeline | + +> [!Note] +> Human codeowner approval uses GitHub's native PR review mechanism (CODEOWNERS file), not a label. The `review:*` labels below are applied **manually** after reviewing Copilot feedback. + +#### Review outcome labels (3) — Color: `#28A745` / `#DC3545` / `#FFC107` + +| Label | Description | +|-------|-------------| +| `review:approved` | Review passed — no blocking issues found | +| `review:changes-requested` | Review found blocking issues | +| `review:needs-human` | Review inconclusive, needs human | + +> [!Note] +> All labels use colons (`:`) as separators for consistency. The `review:*` labels +> are mutually exclusive. They are applied manually after review — the CI workflow +> does not manage labels. Copilot code review uses GitHub's native "Comment" +> review type. + +#### Existing labels to keep (renamed) (2) + +| Old Name | New Name | Description | +|----------|----------|-------------| +| `AI assistant tooling` | `ai:tooling` | Related to AI assistant infrastructure | +| `ci:testing-and-validation` | `ci:testing` | CI/testing infrastructure | + +### 1.2 — Migration scripts + +Create migration scripts in `helper-scripts/label-migration/`: + +- **`create-labels.sh`** — Creates all new labels using `gh label create --force` (idempotent) +- **`migrate-labels.sh`** — Migrates existing issues from old labels to new labels using `gh issue edit` +- **`delete-labels.sh`** — Deletes old labels (requires interactive confirmation) +- **`README.md`** — Execution order, prerequisites, rollback instructions + +**Migration mapping:** + +| Old Label | New Label | +|-----------|-----------| +| `InfluxDB 3 Core and Enterprise` | `product:v3-monolith` | +| `InfluxDB v3` | `product:v3-monolith` (review individually — some may be distributed) | +| `Processing engine` | `product:v3-monolith` | +| `InfluxDB v2` | `product:v2` | +| `InfluxDB v1` | `product:v1` | +| `Enterprise 1.x` | `product:v1-enterprise` | +| `Chronograf 1.x` | `product:chronograf` | +| `Kapacitor` | `product:kapacitor` | +| `Flux` | `product:flux` | +| `InfluxDB 3 Explorer` | `product:explorer` | +| `Pending Release` | `release:pending` | +| `release/influxdb3` | `release:pending` | +| `sync-plugin-docs` | `source:sync` | + +> [!Important] +> **Workflow Updates Required:** +> The `sync-plugin-docs` label is used in GitHub Actions workflows. After migrating this label to `source:sync`, the following files must be updated: +> - `.github/workflows/sync-plugins.yml` (lines 28, 173, 421) +> - `.github/ISSUE_TEMPLATE/sync-plugin-docs.yml` (line 4) +> +> Update all references from `sync-plugin-docs` to `source:sync` to ensure the plugin sync automation continues to work after the label migration. + +> [!Note] +> `release:pending` is an existing workflow state label that we are keeping as-is. +> The migration scripts **must ensure** this label exists (create it if missing) and **must not** delete it in the cleanup step. + +**Labels to delete after migration:** +`bug`, `priority`, `documentation`, `Proposal`, `Research Phase`, +`ready-for-collaboration`, `ui`, `javascript`, `dependencies`, +`integration-demo-blog`, `API`, `Docker`, `Grafana`, `Ask AI`, +plus all old product labels listed above. + +**Execution:** +1. Run `create-labels.sh` (safe, idempotent) +2. Run `migrate-labels.sh` +3. Human verifies a sample of issues +4. Run `delete-labels.sh` (destructive, requires confirmation) + +### 1.3 — Auto-labeling workflow + +**File:** `.github/workflows/auto-label.yml` + +**Trigger:** `pull_request: [opened, synchronize]` + +**Logic:** +- List changed files via `github.rest.pulls.listFiles()` +- Read `data/products.yml` for path-to-label mappings (single source of truth): + - Each product entry has `content_path` and `label_group` fields + - Match file paths against `content/{content_path}/` → `product:{label_group}` + - Example: `content/influxdb3/core/` matches `content_path: influxdb3/core`, + `label_group: v3-monolith` → applies `product:v3-monolith` +- Shared content handling: + - `content/shared/` changes apply `product:shared` label + - Additionally expand shared content to affected products using + `expandSharedContentChanges()` from `scripts/lib/content-utils.js` + - Apply all affected product labels (additive) +- Multi-product PRs: apply all matching `product:*` labels (additive) +- Only add labels that are not already present (idempotent) +- Runs as `actions/github-script@v7` + +--- + +## Phase 2: Doc Review Workflow + +### 2.1 — Workflow file + +**File:** `.github/workflows/doc-review.yml` + +**Trigger:** + +```yaml +on: + pull_request: + types: [opened, synchronize, ready_for_review] + paths: + - 'content/**' + - 'layouts/**' + - 'assets/**' + - 'data/**' +``` + +**Permissions:** `contents: read`, `pull-requests: write` + +**Concurrency:** `group: doc-review-${{ github.event.number }}`, `cancel-in-progress: true` + +**Skip conditions:** Draft PRs, fork PRs, PRs with a `skip-review` label (new label to be added in Phase 1 via the label migration scripts). + +### 2.2 — Job 1: Resolve URLs + +**Purpose:** Map changed files to preview URLs. + +**Implementation:** +- Reuse the existing `detect-preview-pages.js` script and `content-utils.js` library +- Same logic as `pr-preview.yml` Job 1, but output a JSON artifact instead of deploying +- Output format: `[{"file": "content/influxdb3/core/write-data/_index.md", "url": "/influxdb3/core/write-data/"}]` +- Upload as `urls.json` workflow artifact + +**Key detail:** This job runs `getChangedContentFiles()` and `mapContentToPublic()` +from `scripts/lib/content-utils.js`, which already handles shared content +expansion (if `content/shared/foo.md` changes, all pages with +`source: /shared/foo.md` are included). + +### 2.3 — Job 2: Copilot Code Review + +**Purpose:** Review Markdown changes against the style guide and documentation +standards using GitHub's native Copilot code review. Visual review of rendered +pages is handled separately in Job 3. + +**Dependencies:** None beyond the PR itself. This job runs in parallel with +Jobs 1→3. + +**Implementation:** +- Adds `copilot-reviews` as a PR reviewer via `gh pr edit --add-reviewer` +- Copilot automatically reviews the PR diff using instruction files from + `.github/instructions/` that are auto-loaded based on changed file patterns +- No custom prompt or API key required + +**Review criteria file:** `.github/instructions/content-review.instructions.md` + +This file is auto-loaded by Copilot for PRs that change `content/**/*.md` +files. It checks for: + +1. **Frontmatter correctness** — Required fields, menu structure, weights +2. **Shortcode syntax** — Correct usage, closing tags, parameters +3. **Semantic line feeds** — One sentence per line +4. **Heading hierarchy** — No h1 in content (title comes from frontmatter) +5. **Product-specific terminology** — Correct product names, versions +6. **Link format** — Relative links, proper shortcode links +7. **Shared content** — `source:` frontmatter correctness +8. **Code blocks** — Language identifiers, line length, long CLI options + +**Severity classification:** +- `BLOCKING` — Wrong product names, invalid frontmatter, broken shortcode syntax +- `WARNING` — Style inconsistencies, missing semantic line feeds +- `INFO` — Suggestions, not problems + +**Output:** +- Copilot posts inline review comments using GitHub's native "Comment" + review type +- `review:*` labels are applied manually by humans after reviewing the + Copilot feedback — the workflow does not manage labels + +### 2.4 — Job 3: Copilot Visual Review (rendered HTML) + +**Purpose:** Have Copilot analyze the rendered preview pages to catch visual +and structural issues invisible in the Markdown source. + +**Dependencies:** Depends on Job 1 (needs URL list). Must wait for the +`pr-preview.yml` deployment to be live. + +**Why Copilot for visual review:** +- Copilot can analyze rendered HTML content at public preview URLs — no + screenshot capture or image upload required. +- Visual review is a good fit for Copilot because the rendered pages are + self-contained artifacts (no need to cross-reference repo files). +- Copilot code review (Job 2) handles the diff; visual review catches what + the diff review cannot. + +**Implementation:** + +1. **Wait for preview deployment:** + - Poll `https://influxdata.github.io/docs-v2/pr-preview/pr-{N}/` with + `curl --head` until it returns 200 + - Timeout: 10 minutes (preview build takes ~75s + deploy time) + - Poll interval: 15 seconds + - If timeout, skip visual review; Copilot code review (Job 2) still runs + +2. **Post preview URLs and trigger Copilot review:** + - Use `actions/github-script@v7` to post a PR comment listing the preview + URLs from Job 1, formatted as clickable links + - Post a follow-up comment tagging `@copilot` with instructions to review + the rendered pages at the preview URLs. The comment should instruct + Copilot to check each page for: + - Raw shortcode syntax visible on the page (`{{<` or `{{%`) + - Placeholder text that should have been replaced + - Broken layouts: overlapping text, missing images, collapsed sections + - Code blocks rendered incorrectly (raw HTML/Markdown fences visible) + - Navigation/sidebar entries correct + - Visible 404 or error state + - Product name inconsistencies in the rendered page header/breadcrumbs + - The review instruction template is stored in + `.github/prompts/copilot-visual-review.md` for maintainability + - Preview URL count capped at 50 pages (matching `MAX_PAGES` in + `detect-preview-pages.js`) + +3. **Comment upsert pattern:** + - Visual review comments use a marker-based upsert pattern — the workflow + updates an existing comment if one with the marker exists, otherwise + creates a new one. This prevents duplicate comments on `synchronize` + events. + +### 2.6 — Workflow failure handling + +- If preview deployment times out: skip Copilot visual review (Job 3), + Copilot code review (Job 2) still runs independently. Post a comment + explaining visual review was skipped. +- If Copilot does not respond to the `@copilot` mention: the preview URLs + remain in the comment for human review. +- Never block PR merge on workflow failures — the workflow adds comments + but does not set required status checks or manage labels. + +--- + +## Phase 3: Documentation and Agent Instructions + +### 3.1 — Instruction file architecture + +**Principle:** One `CLAUDE.md` that references role-specific files. No per-role +CLAUDE files — Claude Code only reads one `CLAUDE.md` per directory level. The +role context comes from the task prompt (GitHub Actions workflow), not the config +file. + +``` +CLAUDE.md ← lightweight pointer (already exists) + ├── references .github/LABEL_GUIDE.md ← label taxonomy + usage + ├── references .claude/agents/ ← role-specific agent instructions + │ ├── doc-triage-agent.md ← triage + auto-label logic + │ └── doc-review-agent.md ← local review sessions (Claude Code) + └── references .github/instructions/ ← Copilot auto-loaded instructions + └── content-review.instructions.md ← review criteria for content/**/*.md +``` + +**How review roles are assigned at runtime:** +- **Copilot code review (CI):** GitHub's native reviewer. Auto-loads + instruction files from `.github/instructions/` based on changed file + patterns. No custom prompt or API key needed. +- **Copilot visual review (CI):** Triggered by `@copilot` mention in a PR + comment with preview URLs and a review template. +- **Claude local review:** Uses `.claude/agents/doc-review-agent.md` for + local Claude Code sessions. Not used in CI. +- Shared rules (style guide, frontmatter, shortcodes) stay in the existing + referenced files (`DOCS-CONTRIBUTING.md`, `DOCS-SHORTCODES.md`, etc.) +- No duplication — each instruction file says what's unique to that context + +### 3.2 — Agent instruction files + +#### `.claude/agents/doc-triage-agent.md` + +Role-specific instructions for issue/PR triage. Contents: + +- **Label taxonomy** — Full label list with categories, colors, descriptions +- **Path-to-product mapping** — Which content paths map to which `product:*` labels +- **Priority rules** — How to assess priority based on product, scope, and issue type +- **Decision logic** — When to apply `agent-ready`, `waiting:*`, `review:needs-human` +- **Migration context** — Old label → new label mapping (useful during transition) + +This file does NOT duplicate style guide rules. It references +`DOCS-CONTRIBUTING.md` for those. + +#### `.claude/agents/doc-review-agent.md` + +Role-specific instructions for **local** Claude Code review sessions. This +file is NOT used in CI — the CI review is handled by Copilot using +`.github/instructions/content-review.instructions.md`. + +Contents: + +- **Review scope** — Markdown diff review only (frontmatter, shortcodes, + semantic line feeds, heading hierarchy, terminology, links, shared content). +- **Severity classification** — BLOCKING / WARNING / INFO definitions with examples +- **Output format** — Structured review comment template + +This file references `DOCS-CONTRIBUTING.md` for style rules and +`DOCS-SHORTCODES.md` for shortcode syntax — it does NOT restate them. + +### 3.3 — Label usage guide + +**File:** `.github/LABEL_GUIDE.md` + +Contents: +- Label categories with descriptions and colors +- Common workflows (issue triage, DAR pipeline, manual work) +- GitHub filter queries for agents and humans +- Auto-labeling behavior reference + +### 3.4 — Update existing pointer files + +**`CLAUDE.md`** — Add one line to the "Full instruction resources" list: +```markdown +- [.github/LABEL_GUIDE.md](.github/LABEL_GUIDE.md) - Label taxonomy and pipeline usage +``` + +**`AGENTS.md`** — Add a section referencing the label guide and agent roles: +```markdown +## Doc Review Pipeline +- Label guide: `.github/LABEL_GUIDE.md` +- Triage agent: `.claude/agents/doc-triage-agent.md` +- Review agent: `.claude/agents/doc-review-agent.md` +``` + +**`.github/copilot-instructions.md`** — Add the label guide to the +"Specialized Resources" table. + +These are small additions — no restructuring of existing files. + +### 3.5 — Review instruction files + +#### `.github/instructions/content-review.instructions.md` (Copilot code review) + +Auto-loaded by Copilot for PRs that change `content/**/*.md` files. Contains +the review criteria (frontmatter, shortcodes, heading hierarchy, terminology, +links, code blocks) with severity classification. + +This file replaces the original `.github/prompts/doc-review.md` Claude prompt. +The review criteria are the same but delivered through Copilot's native +instruction file mechanism instead of a custom action. + +#### `.github/templates/review-comment.md` (shared format) + +Shared definitions for severity levels, comment structure, and result → label +mapping. Used by `doc-review-agent.md` (local review sessions) and the +Copilot visual review template. + +#### Copilot visual review template + +The `@copilot` visual review comment is constructed inline in the +`doc-review.yml` workflow using the review template from +`.github/templates/review-comment.md`. Contains: + +- The visual review checklist (raw shortcodes, broken layouts, 404s, etc.) +- Instructions for analyzing the rendered pages at the preview URLs +- Output format guidance (what to flag, severity levels) + +--- + +## Future Phases (Not In Scope) + +These are explicitly **not** part of this plan. Documented here for context. + +### v2 — Screenshot-based visual review +- Add Playwright screenshot capture script (`.github/scripts/capture-screenshots.js`) + for design/layout PRs where HTML analysis isn't sufficient. +- Capture full-page PNGs of preview pages, upload as workflow artifacts. +- Useful for PRs touching `layouts/`, `assets/css/`, or template changes + where visual regression matters. +- The existing `scripts/puppeteer/screenshot.js` remains for local debugging; + the CI script should use Playwright for reliability. + +### v3 — Stale PR management +- Cron job that scans for stale PRs (draft >3 days with no review activity) + and pings the author. +- Metrics tracking: % of PRs that pass Copilot review on first attempt. + +### v4 — Agent-driven issue resolution +- Auto-assign doc issues to agents based on `agent-ready` label. +- Claude or Copilot drafts the fix, then the other agent reviews. +- Closes the loop: issue → draft → review → human approval. + +--- + +## Decisions (Resolved) + +### Q1: How should Copilot review rendered pages? — RESOLVED + +**Decision:** Copilot reviews rendered HTML at public preview URLs — no +screenshots needed. Job 3 posts preview URLs in a PR comment, then tags +`@copilot` with a review prompt. See section 2.5 for implementation details. + +This approach works because: +- Preview pages are publicly accessible at `influxdata.github.io/docs-v2/pr-preview/pr-{N}/` +- Copilot can analyze HTML content at public URLs +- No screenshot capture, image upload, or artifact management required + +Screenshot capture is deferred to Future Phases (v2) for design/layout PRs +where visual regression testing matters. + +### Q2: Should the review workflow be a required status check? — RESOLVED + +**Decision:** No. Start as advisory (comments only). The workflow posts review +comments but does not set required status checks or manage labels. `review:*` +labels are applied manually after review. Make it required only after the team +confirms the false-positive rate is acceptable (see Future Phases). + +### Q3: Should screenshots use Playwright or Puppeteer? — DEFERRED + +**Decision:** Deferred to Future Phases (v2). The current implementation +reviews rendered HTML at preview URLs, not screenshots. When screenshot +capture is added later, use Playwright for CI and keep Puppeteer for local +debugging. + +### Q4: How to handle the `pr-preview.yml` dependency? — RESOLVED + +**Decision:** Option A — poll the preview URL with timeout. Job 3 polls +`https://influxdata.github.io/docs-v2/pr-preview/pr-{N}/` with `curl --head` +every 15 seconds until it returns 200, with a 10-minute timeout. If timeout is +reached, skip Copilot visual review; Copilot code review (Job 2) still runs +independently. + +Rationale: Polling is simple, self-contained, and resilient. The URL pattern is +deterministic. Option B (`workflow_run`) adds complexity and doesn't handle +cases where preview doesn't deploy. Option C (combined workflow) makes the +workflow too large and eliminates the parallelism benefit. + +### Q5: Cost and rate limiting — RESOLVED + +**Decision:** Acceptable. Both code review and visual review use the repo's +Copilot allocation. No external API keys or per-call costs. + +Mitigations already designed into the workflow: +- `paths` filter ensures only doc-content PRs trigger the workflow. +- `skip-review` label allows trivial PRs to opt out. +- Concurrency group cancels in-progress reviews when the PR is updated. +- Preview URL count is capped at 50 pages (matching `MAX_PAGES` in + `resolve-review-urls.js`). +- Draft and fork PRs are skipped entirely. + +### Q6: Label separator convention — RESOLVED + +**Decision:** Use colons (`:`) everywhere. No slashes. One separator for +consistency — expecting humans or agents to infer different semantics from +separator choice is unrealistic. Mutually exclusive behavior (e.g., `review:*` +labels) is enforced in workflow code, not punctuation. + +### Q7: Human approval mechanism — RESOLVED + +**Decision:** Use GitHub's native PR review system (CODEOWNERS file) for human +approval. No `approval:codeowner` label. The `review:*` labels are exclusively +for automated pipeline outcomes. + +### Q8: Product path mapping — RESOLVED + +**Decision:** Extend `data/products.yml` with `content_path` and `label_group` +fields. This file becomes the single source of truth for path-to-product +resolution, used by the auto-label workflow, matrix-generator, and documentation +(AGENTS.md). Eliminates duplicated mappings across multiple files. + +### Q9: `sync-plugin-docs` label migration — RESOLVED + +**Decision:** Migrate to `source:sync` (not `source:auto-detected`). Plugin +sync is a distinct operation from change detection. `source:sync` is general +enough to cover future external repo syncs without being hyper-specific. + +### Q10: Multi-product and shared content labeling — RESOLVED + +**Decision:** Auto-labeling is additive — apply all matching `product:*` labels. +Changes to `content/shared/` get the `product:shared` label plus all expanded +product labels (resolved via `expandSharedContentChanges()`). + +--- + +## Risk Assessment + +| Risk | Impact | Mitigation | +|------|--------|------------| +| Preview not deployed in time | Low | 10-minute polling timeout, fall back to code-only review | +| False positives in review | Medium | Start as advisory (not required check), iterate instruction files | +| Label migration data loss | Low | Migrate before deleting; human verification gate | +| Copilot visual review misses issues | Medium | Preview URLs remain in comment for human review; start advisory | +| Copilot code review quality | Medium | Review criteria in `.github/instructions/` can be iterated; local Claude review available as backup | +| Product mapping drift | Low | Single source of truth in `data/products.yml`; auto-label and matrix-generator both derive from it | + +--- + +## File Summary + +Files to create or modify: + +| Action | File | Phase | Status | +|--------|------|-------|--------| +| Modify | `data/products.yml` | 1.0 | Done | +| Modify | `data/labels.yml` | 1.1 | Done | +| Create | `helper-scripts/label-migration/create-labels.sh` | 1.2 | Done | +| Create | `helper-scripts/label-migration/migrate-labels.sh` | 1.2 | Done | +| Create | `helper-scripts/label-migration/delete-labels.sh` | 1.2 | Done | +| Create | `helper-scripts/label-migration/README.md` | 1.2 | Done | +| Create | `.github/workflows/auto-label.yml` | 1.3 | Done | +| Create | `.github/workflows/doc-review.yml` | 2.1 | Done | +| Create | `.claude/agents/doc-triage-agent.md` | 3.2 | Done | +| Create | `.claude/agents/doc-review-agent.md` | 3.2 | Done | +| Create | `.github/LABEL_GUIDE.md` | 3.3 | Done | +| Create | `.github/instructions/content-review.instructions.md` | 3.5 | Done | +| Create | `.github/templates/review-comment.md` | 2.5/3.5 | Done | +| Modify | `CLAUDE.md` | 3.4 | Done | +| Modify | `AGENTS.md` | 3.4 | Done | +| Modify | `.github/copilot-instructions.md` | 3.4 | Done | + +--- + +## Implementation Order + +1. ~~**Phase 1.0** — Extend `data/products.yml` with `content_path` and `label_group`~~ ✅ +2. ~~**Phase 1.1–1.2** — Create label migration scripts~~ ✅ +3. ~~**Phase 1.3** — Create auto-label workflow~~ ✅ +4. ~~**Execute label migration** — Run scripts, then manual cleanup~~ ✅ +5. ~~**Phase 3.2** — Create agent instruction files~~ ✅ +6. ~~**Phase 2.1–2.3** — Workflow skeleton + URL resolution + Copilot code review~~ ✅ +7. ~~**Phase 2.5** — Copilot visual review job~~ ✅ +8. ~~**Phase 3.3–3.5** — Label guide, instruction files, pointer updates~~ ✅ +9. ~~**Test end-to-end** — Triggered workflows via `workflow_dispatch` against PR #6890~~ ✅ + +### End-to-end test results (2026-03-09) + +Triggered via `workflow_dispatch` with `pr_number=6890` on branch +`claude/triage-agent-plan-EOY0u`. + +| Workflow | Job | Result | Notes | +|----------|-----|--------|-------| +| Auto-label PRs | auto-label | Pass | Loaded 14 path mappings, 0 product labels (correct — no content changes) | +| Doc Review | resolve-urls | Pass | 0 preview URLs (correct — no content changes) | +| Doc Review | copilot-review | Pass | `copilot-reviews` added as reviewer | +| Doc Review | copilot-visual-review | Skipped | Correct — 0 URLs to review | + +**Fixes applied during testing:** +- `npm ci` replaced with targeted `js-yaml` install (sparse checkout lacks lock file) +- Added `workflow_dispatch` with `pr_number` input for on-demand re-runs + +**Remaining:** Visual review (Job 3) needs a content-changing PR to fully exercise +the preview URL polling and Copilot `@copilot` mention flow. diff --git a/.github/ISSUE_TEMPLATE/sync-plugin-docs.yml b/.github/ISSUE_TEMPLATE/sync-plugin-docs.yml index 382af5ce3f..0331a501c5 100644 --- a/.github/ISSUE_TEMPLATE/sync-plugin-docs.yml +++ b/.github/ISSUE_TEMPLATE/sync-plugin-docs.yml @@ -1,7 +1,7 @@ name: Sync Plugin Documentation description: Request synchronization of plugin documentation from influxdb3_plugins repository title: "Sync plugin docs: [PLUGIN_NAMES]" -labels: ["sync-plugin-docs", "documentation", "automation"] +labels: ["source:sync", "documentation", "automation"] assignees: [] body: - type: markdown diff --git a/.github/LABEL_GUIDE.md b/.github/LABEL_GUIDE.md new file mode 100644 index 0000000000..14de676703 --- /dev/null +++ b/.github/LABEL_GUIDE.md @@ -0,0 +1,100 @@ +# Label Guide + +Label taxonomy for the docs-v2 repository. Used by automation workflows, +triage agents, and human contributors. + +## Label Definitions + +- **Product labels** (`product:*`): Derived from + [data/products.yml](../data/products.yml) — each product's `label_group` + field determines the label name, `content_path` determines which files + trigger it. Applied by the [auto-label workflow](workflows/auto-label.yml). + Multi-product PRs get all matching labels. Shared content changes get + `product:shared` plus labels for all products that reference the shared file. + +- **Source, waiting, workflow, and review labels**: Defined in + [data/labels.yml](../data/labels.yml) — names, colors, and descriptions. + +- **Review label behavior** (severity levels, result rules, result → label + mapping): Defined in + [templates/review-comment.md](templates/review-comment.md). + +Human approval uses GitHub's native PR review system (CODEOWNERS), not labels. + +## Renamed Labels + +| Old Name | New Name | +|----------|----------| +| `AI assistant tooling` | `ai:tooling` | +| `ci:testing-and-validation` | `ci:testing` | +| `design` | `area:site-ui` | +| `InfluxDB Cloud` | `product:v2-cloud` | +| `user feedback` | `source:feedback` | +| `ai:tooling` | `area:agents` | + +## Deleted Labels + +| Label | Replacement | Reason | +|-------|-------------|--------| +| `Pending PR` | `waiting:pr` | Consolidated into `waiting:` namespace | +| `broke-link` | `area:links` | Consolidated into `area:` namespace | + +## Common Workflows + +### Issue triage + +1. Read issue → identify product(s) → apply `product:*` labels +2. Apply `source:*` label if applicable +3. Determine readiness → apply `agent-ready` or `waiting:*` + +### PR review pipeline + +1. PR opened → auto-label applies `product:*` labels +2. Doc review workflow triggers (unless `skip-review` is present) +3. Copilot code review runs on the diff (uses + [`.github/instructions/`](instructions/) files from the base branch) +4. Copilot visual review checks rendered preview pages +5. Human reviewer uses GitHub's PR review for final approval + +Review labels (`review:*`) are applied manually after review, not by CI. + +### GitHub Filter Queries + +``` +# PRs needing human review +label:review:needs-human is:pr is:open + +# Agent-ready issues +label:agent-ready is:issue is:open -label:waiting:engineering -label:waiting:product + +# All InfluxDB 3 issues +label:product:v3-monolith,product:v3-distributed is:issue is:open + +# Blocked issues +label:waiting:engineering,waiting:product is:issue is:open + +# PRs that skipped review +label:skip-review is:pr +``` + +## Auto-labeling Behavior + +The [auto-label workflow](workflows/auto-label.yml) runs on +`pull_request: [opened, synchronize]` and: + +- Reads path-to-product mappings from `data/products.yml` +- Matches changed files to product labels +- Expands shared content changes to affected product labels +- Adds labels idempotently (skips labels already present) +- Skips draft and fork PRs + +## References + +- Label definitions: `data/labels.yml` +- Product definitions: `data/products.yml` +- Review comment format: `.github/templates/review-comment.md` +- Auto-label workflow: `.github/workflows/auto-label.yml` +- Doc review workflow: `.github/workflows/doc-review.yml` +- Triage agent: `.claude/agents/doc-triage-agent.md` +- Review agent: `.claude/agents/doc-review-agent.md` +- Migration scripts: `helper-scripts/label-migration/` diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md index b31410cbf3..975b1a0f02 100644 --- a/.github/copilot-instructions.md +++ b/.github/copilot-instructions.md @@ -2,261 +2,61 @@ > **For GitHub Copilot and other AI coding agents** > -> This is the primary instruction file for GitHub Copilot working with the InfluxData documentation site. -> > **Instruction resources**: > -> - [.github/agents/copilot-instructions-agent.md](agents/copilot-instructions-agent.md) - **Creating/improving Copilot instructions** -> - [.claude/skills/](../.claude/skills/) - **Detailed workflows** (content editing, testing, InfluxDB setup, templates) > - [.github/instructions/](instructions/) - **Pattern-specific** (auto-loaded by file type) -> - [.github/agents/](agents/) - **Specialist agents** (TypeScript/Hugo, Copilot management) -> - [AGENTS.md](../AGENTS.md), [CLAUDE.md](../CLAUDE.md) - General AI assistant guides +> - [AGENTS.md](../AGENTS.md) - Shared project guidelines (style, constraints, content structure) +> - [.github/LABEL_GUIDE.md](LABEL_GUIDE.md) - Label taxonomy and review pipeline ## Quick Reference -| Task | Command | Time | Details | -| ---------------- | ----------------------------------------------------- | ------- | ------------------------------------- | -| Install | `CYPRESS_INSTALL_BINARY=0 yarn install` | \~4s | Skip Cypress for CI | -| Build | `npx hugo --quiet` | \~75s | NEVER CANCEL | -| Dev Server | `npx hugo server` | \~92s | Port 1313 | -| Create Docs | `docs create --products ` | varies | AI-assisted scaffolding | -| Create & Open | `docs create --products --open` | instant | Non-blocking (background) | -| Create & Wait | `docs create --products --open --wait` | varies | Blocking (interactive) | -| Edit Docs | `docs edit ` | instant | Non-blocking (background) | -| Edit Docs (wait) | `docs edit --wait` | varies | Blocking (interactive) | -| List Files | `docs edit --list` | instant | Show files without opening | -| Add Placeholders | `docs placeholders ` | instant | Add placeholder syntax to code blocks | -| Audit Docs | `docs audit --products ` | varies | Audit documentation coverage | -| Release Notes | `docs release-notes --products ` | varies | Generate release notes from commits | -| Test All | `yarn test:codeblocks:all` | 15-45m | NEVER CANCEL | -| Lint | `yarn lint` | \~1m | Pre-commit checks | +| Task | Command | Time | +| ---------------- | ----------------------------------------------------- | ------- | +| Install | `CYPRESS_INSTALL_BINARY=0 yarn install` | \~4s | +| Build | `npx hugo --quiet` | \~75s | +| Dev Server | `npx hugo server` | \~92s | +| Create Docs | `docs create --products ` | varies | +| Edit Docs | `docs edit ` | instant | +| Add Placeholders | `docs placeholders ` | instant | +| Audit Docs | `docs audit --products ` | varies | +| Test All | `yarn test:codeblocks:all` | 15-45m | +| Lint | `yarn lint` | \~1m | -## CLI Tools +**NEVER CANCEL** Hugo builds (\~75s) or test runs (15-45m). -**For when to use CLI vs direct editing**, see [docs-cli-workflow skill](../.claude/skills/docs-cli-workflow/SKILL.md). +## CLI Tools ```bash -# Create new documentation (AI-assisted scaffolding) -docs create --products -docs create --products influxdb3_core --open # Non-blocking -docs create --products influxdb3_core --open --wait # Blocking - -# Find and edit documentation by URL -docs edit # Non-blocking (agent-friendly) -docs edit --list # List files only -docs edit --wait # Wait for editor - -# Other tools -docs placeholders # Add placeholder syntax to code blocks -docs audit --products # Audit documentation coverage -docs release-notes --products - -# Get help -docs --help -docs create --help +docs --help # Full reference ``` -**Key points**: - -- Accepts both product keys (`influxdb3_core`) and paths (`/influxdb3/core`) -- Non-blocking by default (agent-friendly) -- Use `--wait` for interactive editing -- `--products` and `--repos` are mutually exclusive for audit/release-notes +Non-blocking by default. Use `--wait` for interactive editing. ## Workflows -### Content Editing - -See [content-editing skill](../.claude/skills/content-editing/SKILL.md) for complete workflow: - -- Creating/editing content with CLI -- Shared content management -- Testing and validation - -### Testing - -See [DOCS-TESTING.md](../DOCS-TESTING.md) and [cypress-e2e-testing skill](../.claude/skills/cypress-e2e-testing/SKILL.md). - -Quick tests (NEVER CANCEL long-running): - -```bash -yarn test:codeblocks:all # 15-45m -yarn test:links # 1-5m -yarn lint # 1m -``` - -### InfluxDB 3 Setup - -See [influxdb3-test-setup skill](../.claude/skills/influxdb3-test-setup/SKILL.md). - -Quick setup: - -```bash -./test/scripts/init-influxdb3.sh core # Per-worktree, port 8282 -./test/scripts/init-influxdb3.sh enterprise # Shared, port 8181 -./test/scripts/init-influxdb3.sh all # Both -``` - -### Hugo Template Development - -See [hugo-template-dev skill](../.claude/skills/hugo-template-dev/SKILL.md) for template syntax, data access, and testing strategies. - -## Repository Structure - -### Content Organization - -- **InfluxDB 3**: `/content/influxdb3/` (core, enterprise, cloud-dedicated, cloud-serverless, clustered, explorer) -- **InfluxDB v2**: `/content/influxdb/` (v2, cloud) -- **InfluxDB v1**: `/content/influxdb/v1` -- **InfluxDB Enterprise (v1)**: `/content/enterprise_influxdb/v1/` -- **Telegraf**: `/content/telegraf/v1/` -- **Kapacitor**: `/content/kapacitor/` -- **Chronograf**: `/content/chronograf/` -- **Flux**: `/content/flux/` -- **Examples**: `/content/example.md` (comprehensive shortcode reference) -- **Shared content**: `/content/shared/` - -### Key Files - -- **Config**: `/config/_default/`, `package.json`, `compose.yaml`, `lefthook.yml` -- **Testing**: `cypress.config.js`, `pytest.ini`, `.vale.ini` -- **Assets**: `/assets/` (JS, CSS), `/layouts/` (templates), `/data/` (YAML/JSON) -- **Build output**: `/public/` (\~529MB, gitignored) - -## Technology Stack - -- **Hugo** - Static site generator -- **Node.js/Yarn** - Package management -- **Testing**: Pytest, Cypress, link-checker, Vale -- **Tools**: Docker, ESLint, Prettier, Lefthook - -## Common Issues - -### Network Restrictions +- **Content editing**: See [content-editing skill](../.claude/skills/content-editing/SKILL.md) +- **Testing**: See [DOCS-TESTING.md](../DOCS-TESTING.md) +- **Hugo templates**: See [hugo-template-dev skill](../.claude/skills/hugo-template-dev/SKILL.md) -Commands that may fail in restricted environments: +## Product and Content Paths -- Docker builds (external repos) -- `docker compose up local-dev` (Alpine packages) -- Cypress installation (use `CYPRESS_INSTALL_BINARY=0`) - -### Pre-commit Validation - -```bash -# Quick validation before commits -yarn prettier --write "**/*.{css,js,ts,jsx,tsx}" -yarn eslint assets/js/**/*.js -npx hugo --quiet -``` - -## Documentation Coverage - -- **InfluxDB 3**: Core, Enterprise, Cloud (Dedicated/Serverless), Clustered, Explorer, plugins -- **InfluxDB v2/v1**: OSS, Cloud, Enterprise -- **Tools**: Telegraf, Kapacitor, Chronograf, Flux -- **API Reference**: All InfluxDB editions +Defined in [data/products.yml](../data/products.yml). ## Content Guidelines -**Style guide**: Google Developer Documentation Style Guide\ -**Voice**: Active, present tense, second person\ -**Line breaks**: Semantic line feeds (one sentence per line)\ -**Files**: lowercase-with-hyphens.md - -### Quick Shortcodes - -````markdown -# Callouts (GitHub-style alerts) -> [!Note] / [!Warning] / [!Tip] / [!Important] / [!Caution] - -# Required elements -{{< req >}} -{{< req type="key" >}} - -# Code placeholders -```sh { placeholders="DATABASE_NAME|API_TOKEN" } -curl https://example.com/api?db=DATABASE_NAME -```` - -```` - -**Complete reference**: [DOCS-SHORTCODES.md](../DOCS-SHORTCODES.md) - -### Required Frontmatter - -```yaml -title: # Required -description: # Required -menu: - product_menu_key: - name: # Optional - parent: # Optional -weight: # Required: 1-99, 101-199, 201-299... -```` - -**Shared content**: Add `source: /shared/path/to/file.md` - -**Complete reference**: [DOCS-FRONTMATTER.md](../DOCS-FRONTMATTER.md) - -### Resources - -- [DOCS-CONTRIBUTING.md](../DOCS-CONTRIBUTING.md) - Workflow & guidelines -- [DOCS-SHORTCODES.md](../DOCS-SHORTCODES.md) - Complete shortcodes -- [DOCS-FRONTMATTER.md](../DOCS-FRONTMATTER.md) - Complete metadata -- [DOCS-TESTING.md](../DOCS-TESTING.md) - Testing procedures -- [content/example.md](../content/example.md) - Working examples - -## Troubleshooting - -| Issue | Solution | -| ------------------------ | ---------------------------------------------------------------- | -| Pytest collected 0 items | Use `python` not `py` for language identifier | -| Hugo build errors | Check `/config/_default/` | -| Docker build fails | Expected in restricted networks - use local Hugo | -| Cypress install fails | Use `CYPRESS_INSTALL_BINARY=0 yarn install` | -| Link validation slow | Test specific files: `yarn test:links content/file.md` | -| Vale "0 errors in stdin" | File is outside repo - Vale Docker can only access repo files | -| Vale false positives | Add terms to `.ci/vale/styles/InfluxDataDocs/Terms/ignore.txt` | -| Vale duration warnings | Duration literals (`30d`) are valid - check InfluxDataDocs.Units | - -## Specialized Instructions +- [DOCS-CONTRIBUTING.md](../DOCS-CONTRIBUTING.md) - Style, workflow, commit format +- [DOCS-SHORTCODES.md](../DOCS-SHORTCODES.md) - Shortcode reference +- [DOCS-FRONTMATTER.md](../DOCS-FRONTMATTER.md) - Frontmatter reference +- [content/example.md](../content/example.md) - Working shortcode examples -### File Pattern-Specific Instructions +## File Pattern-Specific Instructions -These instructions are automatically loaded by GitHub Copilot based on the files you're working with: +Auto-loaded by GitHub Copilot based on changed files: | Pattern | File | Description | | ------------------------ | ----------------------------------------------------------------- | ------------------------------------------------ | | `content/**/*.md` | [content.instructions.md](instructions/content.instructions.md) | Content file guidelines, frontmatter, shortcodes | +| `content/**/*.md` | [content-review.instructions.md](instructions/content-review.instructions.md) | Review criteria for content changes | | `layouts/**/*.html` | [layouts.instructions.md](instructions/layouts.instructions.md) | Shortcode implementation patterns and testing | | `api-docs/**/*.yml` | [api-docs.instructions.md](instructions/api-docs.instructions.md) | OpenAPI spec workflow | | `assets/js/**/*.{js,ts}` | [assets.instructions.md](instructions/assets.instructions.md) | TypeScript/JavaScript and CSS development | - -### Specialized Resources - -**Custom Agents** (`.github/agents/`): - -- [typescript-hugo-agent.md](agents/typescript-hugo-agent.md) - TypeScript/Hugo development -- [copilot-instructions-agent.md](agents/copilot-instructions-agent.md) - Managing Copilot instructions - -**Claude Skills** (`.claude/skills/` - detailed workflows): - -- [content-editing](../.claude/skills/content-editing/SKILL.md) - Complete content workflow -- [docs-cli-workflow](../.claude/skills/docs-cli-workflow/SKILL.md) - CLI decision guidance -- [cypress-e2e-testing](../.claude/skills/cypress-e2e-testing/SKILL.md) - E2E testing -- [hugo-template-dev](../.claude/skills/hugo-template-dev/SKILL.md) - Hugo templates -- [influxdb3-test-setup](../.claude/skills/influxdb3-test-setup/SKILL.md) - InfluxDB 3 setup -- [vale-linting](../.claude/skills/vale-linting/SKILL.md) - Vale configuration and debugging - -**Documentation**: - -- [DOCS-TESTING.md](../DOCS-TESTING.md) - Testing procedures -- [DOCS-CONTRIBUTING.md](../DOCS-CONTRIBUTING.md) - Contribution guidelines -- [DOCS-FRONTMATTER.md](../DOCS-FRONTMATTER.md) - Frontmatter reference -- [DOCS-SHORTCODES.md](../DOCS-SHORTCODES.md) - Shortcodes reference - -## Important Notes - -- This is a large site (5,359+ pages) with complex build processes -- **NEVER CANCEL** long-running operations (Hugo builds, tests) -- Set appropriate timeouts: Hugo build (180s+), tests (30+ minutes) diff --git a/.github/instructions/content-review.instructions.md b/.github/instructions/content-review.instructions.md new file mode 100644 index 0000000000..185786556d --- /dev/null +++ b/.github/instructions/content-review.instructions.md @@ -0,0 +1,76 @@ +--- +applyTo: "content/**/*.md" +--- + +# Content Review Criteria + +Review documentation changes against these rules. Only flag issues you are +confident about. Reference the linked docs for detailed rules. + +## Frontmatter + +Rules: [DOCS-FRONTMATTER.md](../../DOCS-FRONTMATTER.md) + +- `title` and `description` are required on every page +- `menu` structure matches the product's menu key +- `weight` is present for pages in navigation +- `source` paths point to valid `/shared/` paths +- No duplicate or conflicting frontmatter keys + +## Shortcode Syntax + +Rules: [DOCS-SHORTCODES.md](../../DOCS-SHORTCODES.md) + +- `{{< >}}` for HTML output, `{{% %}}` for Markdown-processed content +- Closing tags match opening tags +- Required parameters are present +- Callouts use GitHub-style syntax: `> [!Note]`, `> [!Warning]`, etc. + +## Heading Hierarchy + +- No h1 headings in content (h1 comes from `title` frontmatter) +- Headings don't skip levels (h2 -> h4 without h3) + +## Semantic Line Feeds + +Rules: [DOCS-CONTRIBUTING.md](../../DOCS-CONTRIBUTING.md) + +- One sentence per line (better diffs) +- Long sentences on their own line, not concatenated + +## Terminology and Product Names + +Products defined in [data/products.yml](../../data/products.yml): + +- Use official names: "InfluxDB 3 Core", "InfluxDB 3 Enterprise", + "InfluxDB Cloud Serverless", "InfluxDB Cloud Dedicated" +- Don't mix v2/v3 terminology (e.g., "bucket" in v3 Core docs) +- Version references match the content path + +## Links + +- Internal links use relative paths or Hugo `relref` shortcodes +- No hardcoded `docs.influxdata.com` links in content files +- Anchor links match actual heading IDs + +## Code Blocks + +- Use `python` not `py` for language identifiers (pytest requirement) +- Long options in CLI examples (`--output` not `-o`) +- Keep lines within 80 characters +- Include language identifier on fenced code blocks + +## Shared Content + +- `source:` frontmatter points to an existing shared file +- Shared files don't contain frontmatter (only content) +- Changes to shared content affect multiple products — flag if unintentional + +## Severity + +- **BLOCKING**: Broken rendering, wrong product names, missing required + frontmatter, malformed shortcodes, h1 in content body +- **WARNING**: Missing semantic line feeds, skipped heading levels, missing + `weight`, long CLI options not used +- **INFO**: Suggestions, code block missing language identifier, opportunities + to use shared content diff --git a/.github/prompts/copilot-visual-review.md b/.github/prompts/copilot-visual-review.md new file mode 100644 index 0000000000..2ce9de1545 --- /dev/null +++ b/.github/prompts/copilot-visual-review.md @@ -0,0 +1,34 @@ +# Visual Review Prompt + +Review the rendered documentation pages at the preview URLs listed below. +Check each page for visual and structural issues that are invisible in the +Markdown source. + +## Checklist + +For each preview URL, verify: + +- [ ] **No raw shortcodes** — No `{{<` or `{{%` syntax visible on the page +- [ ] **No placeholder text** — No `PLACEHOLDER`, `TODO`, `FIXME`, or + template variables visible in rendered content +- [ ] **Layout intact** — No overlapping text, missing images, or collapsed + sections +- [ ] **Code blocks render correctly** — No raw HTML fences or Markdown + syntax visible inside code blocks +- [ ] **Product names correct** — Page header, breadcrumbs, and sidebar show + the correct product name +- [ ] **No 404s or errors** — Page loads without error states +- [ ] **Navigation correct** — Sidebar entries link to the right pages and + the page appears in the expected location + +## Output + +Follow the shared review comment format, severity definitions, and label +mapping in +[templates/review-comment.md](../templates/review-comment.md). + +Adapt the "Files Reviewed" section to list preview URLs instead of file +paths. + +## Preview URLs + diff --git a/.github/scripts/resolve-review-urls.js b/.github/scripts/resolve-review-urls.js new file mode 100644 index 0000000000..8869555d75 --- /dev/null +++ b/.github/scripts/resolve-review-urls.js @@ -0,0 +1,38 @@ +/** + * Resolve Review URLs + * + * Maps changed content files to URL paths for the doc-review workflow. + * Reuses the same content-utils functions as detect-preview-pages.js. + * + * Outputs (for GitHub Actions): + * - urls: JSON array of URL paths + * - url-count: Number of URLs + */ + +import { appendFileSync } from 'fs'; +import { + getChangedContentFiles, + mapContentToPublic, +} from '../../scripts/lib/content-utils.js'; + +const GITHUB_OUTPUT = process.env.GITHUB_OUTPUT || '/dev/stdout'; +const BASE_REF = process.env.BASE_REF || 'origin/master'; +const MAX_PAGES = 50; + +if (!/^origin\/[a-zA-Z0-9._/-]+$/.test(BASE_REF)) { + console.error(`Invalid BASE_REF: ${BASE_REF}`); + process.exit(1); +} + +const changed = getChangedContentFiles(BASE_REF); +const htmlPaths = mapContentToPublic(changed, 'public'); + +const urls = Array.from(htmlPaths) + .sort() + .map((p) => '/' + p.replace(/^public\//, '').replace(/\/index\.html$/, '/')) + .slice(0, MAX_PAGES); + +appendFileSync(GITHUB_OUTPUT, `urls=${JSON.stringify(urls)}\n`); +appendFileSync(GITHUB_OUTPUT, `url-count=${urls.length}\n`); + +console.log(`Detected ${urls.length} preview URLs`); diff --git a/.github/scripts/workflow-utils.js b/.github/scripts/workflow-utils.js new file mode 100644 index 0000000000..38f49695a3 --- /dev/null +++ b/.github/scripts/workflow-utils.js @@ -0,0 +1,104 @@ +/** + * Workflow Utilities + * + * Canonical import for GitHub Actions workflow scripts. Re-exports shared + * utilities from scripts/lib/ and adds workflow-specific helpers. + * + * Usage from github-script inline steps: + * + * const utils = await import(`${process.cwd()}/.github/scripts/workflow-utils.js`); + * const pathToLabel = await utils.getProductLabelMap(); + * const labels = utils.matchFilesToLabels(changedFiles, pathToLabel); + * + * Usage from .github/scripts/ ESM modules: + * + * import { getProductLabelMap, findPagesReferencingSharedContent } from './workflow-utils.js'; + */ + +import { readFileSync } from 'fs'; +import { findPagesReferencingSharedContent } from '../../scripts/lib/content-utils.js'; + +// --- Re-export content utilities --- +export { + findPagesReferencingSharedContent, + expandSharedContentChanges, + getChangedContentFiles, + mapContentToPublic, + categorizeContentFiles, + getSourceFromFrontmatter, +} from '../../scripts/lib/content-utils.js'; + +/** + * Build a Map of content path prefixes to product label names + * by reading data/products.yml. + * + * Requires `js-yaml` to be installed (e.g., `npm install js-yaml`). + * + * @param {string} [productsPath='data/products.yml'] - Path to products.yml + * @returns {Promise>} Map of "content/{path}/" → "product:{label_group}" + */ +export async function getProductLabelMap(productsPath = 'data/products.yml') { + const { load } = await import('js-yaml'); + const products = load(readFileSync(productsPath, 'utf8')); + const pathToLabel = new Map(); + + for (const product of Object.values(products)) { + const cp = product.content_path; + const lg = product.label_group; + if (!cp || !lg) continue; + + if (typeof cp === 'string' && typeof lg === 'string') { + pathToLabel.set(`content/${cp}/`, `product:${lg}`); + } else if (typeof cp === 'object' && typeof lg === 'object') { + for (const version of Object.keys(cp)) { + if (lg[version]) { + pathToLabel.set(`content/${cp[version]}/`, `product:${lg[version]}`); + } + } + } + } + + return pathToLabel; +} + +/** + * Match a list of file paths against the product label map. + * For shared content files, expands to find affected products. + * + * @param {string[]} files - Changed file paths + * @param {Map} pathToLabel - From getProductLabelMap() + * @returns {Set} Set of label names to apply + */ +export function matchFilesToLabels(files, pathToLabel) { + const labels = new Set(); + + for (const file of files) { + if (file.startsWith('content/shared/')) { + labels.add('product:shared'); + + try { + const referencingPages = findPagesReferencingSharedContent(file); + for (const page of referencingPages) { + for (const [prefix, label] of pathToLabel) { + if (page.startsWith(prefix)) { + labels.add(label); + break; + } + } + } + } catch { + // Shared content expansion failed — product:shared still applied + } + continue; + } + + for (const [prefix, label] of pathToLabel) { + if (file.startsWith(prefix)) { + labels.add(label); + break; + } + } + } + + return labels; +} diff --git a/.github/templates/review-comment.md b/.github/templates/review-comment.md new file mode 100644 index 0000000000..790ff29128 --- /dev/null +++ b/.github/templates/review-comment.md @@ -0,0 +1,98 @@ +# Review Comment Format + +Shared definitions for severity levels, comment structure, and result → label +mapping. Used by doc-review-agent.md (local review sessions) and +copilot-visual-review.md (rendered page review). + +## Severity Levels + +### BLOCKING + +Issues that will cause incorrect rendering, broken pages, or misleading +content. These must be fixed before merge. + +Examples: +- Missing required frontmatter (`title`, `description`) +- Unclosed or malformed shortcode tags +- Wrong product name in content (e.g., "InfluxDB 3" in v2 docs) +- Broken `source:` path for shared content +- h1 heading in content body +- Raw shortcode syntax visible on rendered page (`{{<` or `{{%`) +- 404 errors on preview pages +- Wrong product name in header or breadcrumbs + +### WARNING + +Style issues or minor visual problems that should be fixed but don't break +functionality or correctness. + +Examples: +- Missing semantic line feeds (multiple sentences on one line) +- Heading level skipped (h2 → h4) +- Long option not used in CLI examples (`-o` instead of `--output`) +- Missing `weight` in frontmatter +- Minor layout issues (overlapping text, collapsed sections) +- Missing images +- Placeholder text visible (`TODO`, `FIXME`) + +### INFO + +Suggestions and observations. Not problems. + +Examples: +- Opportunity to use a shared content file +- Unusually long page that could be split +- Code block missing language identifier +- Cosmetic improvements + +## Comment Structure + +Post a single review comment on the PR with this structure: + +```markdown +## Doc Review Summary + +**Result:** APPROVED | CHANGES REQUESTED | NEEDS HUMAN REVIEW + +### Issues Found + +#### BLOCKING + +- **file:line** — Description of the issue + - Suggested fix: ... + +#### WARNING + +- **file:line** — Description of the issue + +#### INFO + +- **file:line** — Observation + +### Files Reviewed + +- `path/to/file.md` — Brief summary of changes +``` + +Adapt the "Files Reviewed" section to the review context: +- **Source review:** list file paths from the diff +- **Visual review (Copilot):** list preview URLs instead of file paths + +## Result Rules + +- Zero BLOCKING issues → **APPROVED** +- Any BLOCKING issues → **CHANGES REQUESTED** +- Cannot determine severity or diff is ambiguous → **NEEDS HUMAN REVIEW** +- Only report issues you are confident about. Do not guess. +- Group issues by file when multiple issues exist in the same file. + +## Result → Label Mapping + +| Result | Label | +|--------|-------| +| APPROVED | `review:approved` | +| CHANGES REQUESTED | `review:changes-requested` | +| NEEDS HUMAN REVIEW | `review:needs-human` | + +Labels are mutually exclusive. Apply manually after review — Copilot code +review uses GitHub's native "Comment" review type and does not manage labels. diff --git a/.github/workflows/auto-label.yml b/.github/workflows/auto-label.yml new file mode 100644 index 0000000000..7dfccb43c2 --- /dev/null +++ b/.github/workflows/auto-label.yml @@ -0,0 +1,122 @@ +name: Auto-label PRs + +on: + pull_request: + types: [opened, synchronize] + workflow_dispatch: + inputs: + pr_number: + description: 'PR number to label' + required: true + type: number + +permissions: {} + +concurrency: + group: auto-label-${{ github.event.number || inputs.pr_number }} + cancel-in-progress: true + +jobs: + auto-label: + runs-on: ubuntu-latest + permissions: + contents: read + pull-requests: write + # Skip draft PRs and fork PRs (workflow_dispatch always runs) + if: | + github.event_name == 'workflow_dispatch' || + (!github.event.pull_request.draft && + github.event.pull_request.head.repo.full_name == github.repository) + steps: + - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4.3.1 + with: + persist-credentials: false + sparse-checkout: | + content + data/products.yml + scripts/lib/content-utils.js + .github/scripts/workflow-utils.js + package.json + sparse-checkout-cone-mode: false + + - uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4.4.0 + with: + node-version: 22 + + - name: Install js-yaml + run: npm install --no-save --ignore-scripts --no-package-lock --legacy-peer-deps js-yaml + + - name: Apply product labels + uses: actions/github-script@f28e40c7f34bde8b3046d885e986cb6290c5673b # v7.1.0 + with: + script: | + const { + getProductLabelMap, + matchFilesToLabels, + } = await import( + `${process.cwd()}/.github/scripts/workflow-utils.js` + ); + + const prNumber = + context.issue.number || + Number('${{ inputs.pr_number }}'); + + if (!prNumber) { + core.setFailed('No PR number available'); + return; + } + + // --- Build path-to-label mapping from products.yml --- + const pathToLabel = await getProductLabelMap(); + core.info( + `Loaded ${pathToLabel.size} path-to-label mappings from products.yml` + ); + + // --- Get changed files from the PR (paginated) --- + const files = await github.paginate( + github.rest.pulls.listFiles, + { + owner: context.repo.owner, + repo: context.repo.repo, + pull_number: prNumber, + per_page: 100, + } + ); + + const changedFiles = files.map(f => f.filename); + core.info(`PR has ${changedFiles.length} changed files`); + + // --- Match files to product labels --- + const labelsToAdd = matchFilesToLabels(changedFiles, pathToLabel); + + if (labelsToAdd.size === 0) { + core.info('No product labels to add'); + return; + } + + // --- Get existing PR labels to avoid duplicates --- + const { data: prData } = await github.rest.issues.get({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: prNumber, + }); + + const existingLabels = new Set(prData.labels.map(l => l.name)); + const newLabels = [...labelsToAdd].filter( + l => !existingLabels.has(l) + ); + + if (newLabels.length === 0) { + core.info('All matching labels already present'); + return; + } + + // --- Apply labels --- + await github.rest.issues.addLabels({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: prNumber, + labels: newLabels, + }); + + core.info(`Added labels: ${newLabels.join(', ')}`); diff --git a/.github/workflows/doc-review.yml b/.github/workflows/doc-review.yml new file mode 100644 index 0000000000..9ccf81b2ae --- /dev/null +++ b/.github/workflows/doc-review.yml @@ -0,0 +1,264 @@ +name: Doc Review + +on: + pull_request: + types: [opened, synchronize, ready_for_review] + paths: + - 'content/**' + - 'layouts/**' + - 'assets/**' + - 'data/**' + workflow_dispatch: + inputs: + pr_number: + description: 'PR number to review' + required: true + type: number + +permissions: {} + +concurrency: + group: doc-review-${{ github.event.number || inputs.pr_number }} + cancel-in-progress: true + +jobs: + # ----------------------------------------------------------------- + # Job 1: Resolve preview URLs from changed content files + # ----------------------------------------------------------------- + resolve-urls: + runs-on: ubuntu-latest + permissions: + contents: read + pull-requests: read + if: | + github.event_name == 'workflow_dispatch' || + (!github.event.pull_request.draft && + github.event.pull_request.head.repo.full_name == github.repository && + !contains(github.event.pull_request.labels.*.name, 'skip-review')) + outputs: + urls: ${{ steps.detect.outputs.urls }} + url-count: ${{ steps.detect.outputs.url-count }} + steps: + - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4.3.1 + with: + persist-credentials: false + fetch-depth: 0 + sparse-checkout: | + content + data/products.yml + scripts/lib/content-utils.js + .github/scripts/resolve-review-urls.js + package.json + sparse-checkout-cone-mode: false + + - uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4.4.0 + with: + node-version: 22 + + - name: Resolve base ref + id: base + env: + GH_TOKEN: ${{ github.token }} + PR_NUMBER: ${{ github.event.pull_request.number || inputs.pr_number }} + run: | + if [ -n "${{ github.base_ref }}" ]; then + echo "ref=origin/${{ github.base_ref }}" >> "$GITHUB_OUTPUT" + else + BASE=$(gh pr view "$PR_NUMBER" --repo "${{ github.repository }}" --json baseRefName -q .baseRefName) + git fetch origin "$BASE" + echo "ref=origin/$BASE" >> "$GITHUB_OUTPUT" + fi + + - name: Detect changed pages + id: detect + env: + BASE_REF: ${{ steps.base.outputs.ref }} + run: node .github/scripts/resolve-review-urls.js + + # ----------------------------------------------------------------- + # Job 2: Copilot code review (runs in parallel with Job 1) + # ----------------------------------------------------------------- + copilot-review: + runs-on: ubuntu-latest + permissions: + pull-requests: write + if: | + github.event_name == 'workflow_dispatch' || + (!github.event.pull_request.draft && + github.event.pull_request.head.repo.full_name == github.repository && + !contains(github.event.pull_request.labels.*.name, 'skip-review')) + steps: + - name: Request Copilot review + env: + GH_TOKEN: ${{ github.token }} + PR_NUMBER: ${{ github.event.pull_request.number || inputs.pr_number }} + REPO: ${{ github.repository }} + run: gh pr edit "$PR_NUMBER" --repo "$REPO" --add-reviewer "copilot-reviews" + + # ----------------------------------------------------------------- + # Job 3: Copilot visual review (depends on Job 1 for URLs) + # ----------------------------------------------------------------- + copilot-visual-review: + runs-on: ubuntu-latest + permissions: + contents: read + pull-requests: write + needs: resolve-urls + if: needs.resolve-urls.result == 'success' && fromJson(needs.resolve-urls.outputs.url-count) > 0 + steps: + - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4.3.1 + with: + persist-credentials: false + sparse-checkout: .github/prompts/copilot-visual-review.md + sparse-checkout-cone-mode: false + + - name: Wait for preview deployment + id: wait + env: + PR_NUMBER: ${{ github.event.pull_request.number || inputs.pr_number }} + run: | + PREVIEW_URL="https://influxdata.github.io/docs-v2/pr-preview/pr-${PR_NUMBER}/" + TIMEOUT=600 # 10 minutes + INTERVAL=15 + ELAPSED=0 + + echo "Waiting for preview at ${PREVIEW_URL}" + + while [ "$ELAPSED" -lt "$TIMEOUT" ]; do + STATUS=$(curl -s -o /dev/null -L -w "%{http_code}" "$PREVIEW_URL" || echo "000") + if [ "$STATUS" = "200" ]; then + echo "Preview is live" + echo "available=true" >> "$GITHUB_OUTPUT" + exit 0 + fi + echo "Status: ${STATUS} (${ELAPSED}s / ${TIMEOUT}s)" + sleep "$INTERVAL" + ELAPSED=$((ELAPSED + INTERVAL)) + done + + echo "Preview deployment timed out after ${TIMEOUT}s" + echo "available=false" >> "$GITHUB_OUTPUT" + + - name: Post visual review request + if: steps.wait.outputs.available == 'true' + uses: actions/github-script@f28e40c7f34bde8b3046d885e986cb6290c5673b # v7.1.0 + env: + PREVIEW_URLS: ${{ needs.resolve-urls.outputs.urls }} + PR_NUMBER: ${{ github.event.pull_request.number || inputs.pr_number }} + with: + script: | + const fs = require('fs'); + + let urls; + try { + urls = JSON.parse(process.env.PREVIEW_URLS); + } catch (e) { + core.warning(`Failed to parse PREVIEW_URLS: ${e.message}`); + return; + } + + const prNumber = context.issue.number || Number(process.env.PR_NUMBER); + const previewBase = `https://influxdata.github.io/docs-v2/pr-preview/pr-${prNumber}`; + + // Build preview URL list + const urlList = urls + .map(u => `- [${u}](${previewBase}${u})`) + .join('\n'); + + // Read the Copilot visual review template + const template = fs.readFileSync( + '.github/prompts/copilot-visual-review.md', + 'utf8' + ); + + const marker = ''; + const body = [ + marker, + '## Preview Pages for Review', + '', + `${urls.length} page(s) changed in this PR:`, + '', + '
', + 'Preview URLs', + '', + urlList, + '', + '
', + '', + '---', + '', + `@copilot please review the preview pages listed above using the template below:`, + '', + template.trim(), + '', + urlList, + ].join('\n'); + + // Update existing comment or create new one + const { data: comments } = await github.rest.issues.listComments({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: prNumber, + }); + const existing = comments.find(c => c.body.includes(marker)); + + if (existing) { + await github.rest.issues.updateComment({ + owner: context.repo.owner, + repo: context.repo.repo, + comment_id: existing.id, + body, + }); + } else { + await github.rest.issues.createComment({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: prNumber, + body, + }); + } + + core.info(`Posted visual review request with ${urls.length} URLs`); + + - name: Post timeout notice + if: steps.wait.outputs.available == 'false' + uses: actions/github-script@f28e40c7f34bde8b3046d885e986cb6290c5673b # v7.1.0 + env: + PR_NUMBER: ${{ github.event.pull_request.number || inputs.pr_number }} + with: + script: | + const prNumber = context.issue.number || Number(process.env.PR_NUMBER); + const marker = ''; + const body = [ + marker, + '## Visual Review Skipped', + '', + 'The PR preview deployment did not become available within 10 minutes.', + 'Visual review was skipped. The Copilot code review (Job 2) still ran.', + '', + 'To trigger visual review manually, re-run this workflow after the', + 'preview is deployed.', + ].join('\n'); + + const { data: comments } = await github.rest.issues.listComments({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: prNumber, + }); + const existing = comments.find(c => c.body.includes(marker)); + + if (existing) { + await github.rest.issues.updateComment({ + owner: context.repo.owner, + repo: context.repo.repo, + comment_id: existing.id, + body, + }); + } else { + await github.rest.issues.createComment({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: prNumber, + body, + }); + } diff --git a/.github/workflows/sync-plugins.yml b/.github/workflows/sync-plugins.yml index 50c9ecb4ec..d840f42475 100644 --- a/.github/workflows/sync-plugins.yml +++ b/.github/workflows/sync-plugins.yml @@ -25,7 +25,7 @@ jobs: # Only run on issues with sync-plugin-docs label or manual dispatch if: | github.event_name == 'workflow_dispatch' || - (github.event_name == 'issues' && contains(github.event.issue.labels.*.name, 'sync-plugin-docs')) + (github.event_name == 'issues' && contains(github.event.issue.labels.*.name, 'source:sync')) steps: - name: Parse issue inputs @@ -170,7 +170,7 @@ jobs: repo: context.repo.repo, issue_number: parseInt(issueNumber), state: 'closed', - labels: ['sync-plugin-docs', 'validation-failed'] + labels: ['source:sync', 'validation-failed'] }); } @@ -418,7 +418,7 @@ jobs: repo: context.repo.repo, issue_number: ${{ steps.inputs.outputs.issue_number }}, state: 'closed', - labels: ['sync-plugin-docs', 'completed'] + labels: ['source:sync', 'completed'] }); - name: Report failure diff --git a/AGENTS.md b/AGENTS.md index ecc293d223..c05d86e2b3 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -1,240 +1,99 @@ # InfluxData Documentation (docs-v2) -> **For general AI assistants (Claude, ChatGPT, Gemini, etc.)** -> -> This guide provides comprehensive instructions for AI assistants helping with the InfluxData documentation repository. It focuses on content creation, writing workflows, and style guidelines. -> +> **Shared project guidelines for all AI assistants** +> > **Other instruction resources**: -> - [.github/copilot-instructions.md](.github/copilot-instructions.md) - For GitHub Copilot (focused on coding and automation) -> - [CLAUDE.md](CLAUDE.md) - For Claude with MCP (minimal pointer) +> - [.github/copilot-instructions.md](.github/copilot-instructions.md) - GitHub Copilot (CLI tools, workflows, repo structure) +> - [CLAUDE.md](CLAUDE.md) - Claude with MCP (pointer file) > - [.claude/](.claude/) - Claude MCP configuration (commands, agents, skills) > - [.github/instructions/](.github/instructions/) - File pattern-specific instructions -## Project Overview +## Commands -This repository powers [docs.influxdata.com](https://docs.influxdata.com), a Hugo-based static documentation site covering InfluxDB 3, InfluxDB v2/v1, Telegraf, and related products. - -**Key Characteristics:** -- **Scale**: 5,359+ pages -- **Build time**: ~75 seconds (NEVER cancel Hugo builds) -- **Tech stack**: Hugo, Node.js, Docker, Vale, Pytest, Cypress -- **Test time**: 15-45 minutes for full code block tests - -## Quick Commands - -| Task | Command | Time | -|------|---------|------| -| Install dependencies | `CYPRESS_INSTALL_BINARY=0 yarn install` | ~4s | -| Build site | `npx hugo --quiet` | ~75s | -| Dev server | `npx hugo server` | ~92s | -| Test code blocks | `yarn test:codeblocks:all` | 15-45m | +| Task | Command | Notes | +|------|---------|-------| +| Install | `CYPRESS_INSTALL_BINARY=0 yarn install` | ~4s | +| Build | `npx hugo --quiet` | ~75s — **NEVER CANCEL** | +| Dev server | `npx hugo server` | ~92s, port 1313 | +| Test code blocks | `yarn test:codeblocks:all` | 15-45m — **NEVER CANCEL** | | Lint | `yarn lint` | ~1m | -## Repository Structure - -``` -docs-v2/ -├── content/ # Documentation content -│ ├── influxdb3/ # InfluxDB 3 (core, enterprise, cloud-*) -│ ├── influxdb/ # InfluxDB v2 and v1 -│ ├── enterprise_influxdb/ # InfluxDB Enterprise v1 -│ ├── telegraf/ # Telegraf docs -│ ├── shared/ # Shared content across products -│ └── example.md # Shortcode testing playground -├── layouts/ # Hugo templates and shortcodes -├── assets/ # JS, CSS, TypeScript -├── api-docs/ # OpenAPI specifications -├── data/ # YAML/JSON data files -├── public/ # Build output (gitignored, ~529MB) -└── .github/ - └── copilot-instructions.md # Primary AI instructions -``` - -**Content Paths**: See [copilot-instructions.md](.github/copilot-instructions.md#content-organization) - -## Common Workflows - -### Editing a page in your browser - -1. Navigate to the desired page on [docs.influxdata.com](https://docs.influxdata.com) -2. Click the "Edit this page" link at the bottom -3. Make changes in the GitHub web editor -4. Commit changes via a pull request - -### Creating/Editing Content Manually - -**Frontmatter** (page metadata): -```yaml -title: Page Title # Required - becomes h1 -description: Brief desc # Required - for SEO -menu: - influxdb_2_0: - name: Nav Label # Optional - nav display name - parent: Parent Node # Optional - for nesting -weight: 1 # Required - sort order -``` - -**Shared Content** (avoid duplication): -```yaml -source: /shared/path/to/content.md -``` - -Shared content files (`/shared/path/to/content.md`): -- Don't store frontmatter -- Can use `{{% show-in %}}`, `{{% hide-in %}}`, and the `version` keyword (`/influxdb3/version/content.md`) - -**Common Shortcodes**: -- Callouts: `> [!Note]`, `> [!Warning]`, `> [!Important]`, `> [!Tip]` -- Tabs: `{{< tabs-wrapper >}}` + `{{% tabs %}}` + `{{% tab-content %}}` -- Required: `{{< req >}}` or `{{< req type="key" >}}` -- Code placeholders: `{ placeholders="" }` - -**📖 Complete Reference**: [DOCS-SHORTCODES.md](DOCS-SHORTCODES.md) | [DOCS-FRONTMATTER.md](DOCS-FRONTMATTER.md) - -### Testing Changes - -**Always test before committing**: -```bash -# Verify server renders (check 200 status) -curl -s -o /dev/null -w "%{http_code}" http://localhost:1313/influxdb3/core/ - -# Test specific content -yarn test:links content/influxdb3/core/**/*.md - -# Run style linting -.ci/vale/vale.sh content/**/*.md -``` - -**📖 Complete Reference**: [DOCS-TESTING.md](DOCS-TESTING.md) +## Constraints -### Committing Changes +- **NEVER cancel** Hugo builds (~75s) or test runs (15-45m) — the site has 5,359+ pages +- Set timeouts: Hugo 180s+, tests 30m+ +- Use `python` not `py` for code block language identifiers (pytest won't collect `py` blocks) +- Shared content files (`content/shared/`) have no frontmatter — the consuming page provides it +- Product names and versions come from `data/products.yml` (single source of truth) +- Commit format: `type(scope): description` — see [DOCS-CONTRIBUTING.md](DOCS-CONTRIBUTING.md#commit-guidelines) +- Network-restricted environments: Cypress (`CYPRESS_INSTALL_BINARY=0`), Docker builds, and Alpine packages may fail -**Commit Message Format**: -``` -type(scope): description +## Style Rules -Examples: -- fix(enterprise): correct Docker environment variable -- feat(influxdb3): add new plugin documentation -- docs(core): update configuration examples -``` +Follows [Google Developer Documentation Style Guide](https://developers.google.com/style) with these project-specific additions: -**Types**: `fix`, `feat`, `style`, `refactor`, `test`, `chore` +- **Semantic line feeds** — one sentence per line (better diffs) +- **No h1 in content** — `title` frontmatter auto-generates h1 +- Active voice, present tense, second person +- Long options in CLI examples (`--output` not `-o`) +- Code blocks within 80 characters -**Scopes**: `enterprise`, `influxdb3`, `core`, `cloud`, `telegraf`, etc. +## Content Structure -**Pre-commit hooks** run automatically (Vale, Prettier, tests). Skip with: -```bash -git commit -m "message" --no-verify -``` +**Required frontmatter**: `title`, `description`, `menu`, `weight` +— see [DOCS-FRONTMATTER.md](DOCS-FRONTMATTER.md) -**📖 Complete Reference**: [DOCS-CONTRIBUTING.md](DOCS-CONTRIBUTING.md#commit-guidelines) +**Shared content**: `source: /shared/path/to/content.md` +— shared files use `{{% show-in %}}` / `{{% hide-in %}}` for product-specific content -## Key Patterns +**Shortcodes**: Callouts use `> [!Note]` / `> [!Warning]` syntax +— see [DOCS-SHORTCODES.md](DOCS-SHORTCODES.md) and [content/example.md](content/example.md) -### Content Organization +## Product Content Paths -- **Product versions**: Managed in `/data/products.yml` -- **Semantic line feeds**: One sentence per line for better diffs -- **Heading hierarchy**: Use h2-h6 only (h1 auto-generated from frontmatter) -- **Image naming**: `project/version-context-description.png` +Canonical paths from `data/products.yml`: -### Code Examples +| Product | Content Path | +|---------|-------------| +| InfluxDB 3 Core | `content/influxdb3/core/` | +| InfluxDB 3 Enterprise | `content/influxdb3/enterprise/` | +| InfluxDB 3 Explorer | `content/influxdb3/explorer/` | +| InfluxDB Cloud Serverless | `content/influxdb3/cloud-serverless/` | +| InfluxDB Cloud Dedicated | `content/influxdb3/cloud-dedicated/` | +| InfluxDB Clustered | `content/influxdb3/clustered/` | +| InfluxDB OSS v2 | `content/influxdb/v2/` | +| InfluxDB OSS v1 | `content/influxdb/v1/` | +| InfluxDB Cloud (TSM) | `content/influxdb/cloud/` | +| InfluxDB Enterprise v1 | `content/enterprise_influxdb/` | +| Telegraf | `content/telegraf/` | +| Chronograf | `content/chronograf/` | +| Kapacitor | `content/kapacitor/` | +| Flux | `content/flux/` | +| Shared content | `content/shared/` | -**Testable code blocks** (pytest): -```python -print("Hello, world!") -``` +## Doc Review Pipeline - +Automated PR review for documentation changes. +See [.github/LABEL_GUIDE.md](.github/LABEL_GUIDE.md) for the label taxonomy. -``` -Hello, world! -``` +| Resource | Path | +|----------|------| +| Label guide | [.github/LABEL_GUIDE.md](.github/LABEL_GUIDE.md) | +| Triage agent | [.claude/agents/doc-triage-agent.md](.claude/agents/doc-triage-agent.md) | +| Content review instructions | [.github/instructions/content-review.instructions.md](.github/instructions/content-review.instructions.md) | +| Review agent (local) | [.claude/agents/doc-review-agent.md](.claude/agents/doc-review-agent.md) | +| Auto-label workflow | [.github/workflows/auto-label.yml](.github/workflows/auto-label.yml) | +| Doc review workflow | [.github/workflows/doc-review.yml](.github/workflows/doc-review.yml) | -**Language identifiers**: Use `python` not `py`, `bash` not `sh` (for pytest collection) - -### API Documentation - -- **Location**: `/api-docs/` directory -- **Format**: OpenAPI 3.0 YAML -- **Generation**: Uses Redoc + custom processing -- **📖 Workflow**: [api-docs/README.md](api-docs/README.md) - -### JavaScript/TypeScript - -- **Entry point**: `assets/js/main.js` -- **Pattern**: Component-based with `data-component` attributes -- **Debugging**: Source maps or debug helpers available -- **📖 Details**: [DOCS-CONTRIBUTING.md](DOCS-CONTRIBUTING.md#javascript-in-the-documentation-ui) - -## Important Constraints - -### Performance -- **NEVER cancel Hugo builds** - they take ~75s normally -- **NEVER cancel test runs** - code block tests take 15-45 minutes -- **Set timeouts**: Hugo (180s+), tests (30+ minutes) - -### Style Guidelines -- Use Google Developer Documentation style -- Active voice, present tense, second person for instructions -- No emojis unless explicitly requested -- Use long options in CLI examples (`--option` vs `-o`) -- Format code blocks within 80 characters - -### Network Restrictions -Some operations may fail in restricted environments: -- Docker builds requiring external repos -- `docker compose up local-dev` (Alpine packages) -- Cypress installation (use `CYPRESS_INSTALL_BINARY=0`) - -## Documentation References +## Reference | Document | Purpose | |----------|---------| -| [DOCS-CONTRIBUTING.md](DOCS-CONTRIBUTING.md) | Contribution workflow, style guidelines | -| [DOCS-TESTING.md](DOCS-TESTING.md) | Testing procedures (code blocks, links, linting) | +| [DOCS-CONTRIBUTING.md](DOCS-CONTRIBUTING.md) | Style guidelines, commit format, contribution workflow | +| [DOCS-TESTING.md](DOCS-TESTING.md) | Code block testing, link validation, Vale linting | | [DOCS-SHORTCODES.md](DOCS-SHORTCODES.md) | Complete shortcode reference | | [DOCS-FRONTMATTER.md](DOCS-FRONTMATTER.md) | Complete frontmatter field reference | -| [.github/copilot-instructions.md](.github/copilot-instructions.md) | Primary AI assistant instructions | | [api-docs/README.md](api-docs/README.md) | API documentation workflow | -| [content/example.md](content/example.md) | Live shortcode examples for testing | - -## Specialized Topics - -### Working with Specific Products - -| Product | Content Path | Special Notes | -|---------|-------------|---------------| -| InfluxDB 3 Core | `/content/influxdb3/core/` | Latest architecture | -| InfluxDB 3 Enterprise | `/content/influxdb3/enterprise/` | Core + licensed features, clustered | -| InfluxDB Cloud Dedicated | `/content/influxdb3/cloud-dedicated/`, `/content/influxdb3/cloud-serverless/` | Managed and distributed | -| InfluxDB Clustered | `/content/influxdb3/clustered/` | Self-managed and distributed | -| InfluxDB Cloud | `/content/influxdb/cloud/` | Legacy but active | -| InfluxDB v2 | `/content/influxdb/v2/` | Legacy but active | -| InfluxDB Enterprise v1 | `/content/enterprise_influxdb/v1/` | Legacy but active enterprise, clustered | - -### Advanced Tasks - -- **Vale configuration**: `.ci/vale/styles/` for custom rules -- **Link checking**: Uses custom `link-checker` binary -- **Docker testing**: `compose.yaml` defines test services -- **Lefthook**: Git hooks configuration in `lefthook.yml` - -## Troubleshooting - -| Issue | Solution | -|-------|----------| -| Pytest collected 0 items | Use `python` not `py` for code block language | -| Hugo build errors | Check `/config/_default/` configuration | -| Link validation slow | Test specific files: `yarn test:links content/file.md` | -| Vale errors | Check `.ci/vale/styles/config/vocabularies` | - -## Critical Reminders - -1. **Be a critical thinking partner** - Challenge assumptions, identify issues -2. **Test before committing** - Run relevant tests locally -3. **Reference, don't duplicate** - Link to detailed docs instead of copying -4. **Respect build times** - Don't cancel long-running operations -5. **Follow conventions** - Use established patterns for consistency - +| [content/example.md](content/example.md) | Live shortcode examples | +| [.github/copilot-instructions.md](.github/copilot-instructions.md) | CLI tools, repo structure, workflows | +| [.github/LABEL_GUIDE.md](.github/LABEL_GUIDE.md) | Label taxonomy and review pipeline | diff --git a/CLAUDE.md b/CLAUDE.md index fe99fa453a..a549ac0759 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -6,7 +6,8 @@ > > **Full instruction resources**: > - [.github/copilot-instructions.md](.github/copilot-instructions.md) - For GitHub Copilot (technical setup, automation) -> - [AGENTS.md](AGENTS.md) - For general AI assistants (content creation, workflows, style guidelines) +> - [AGENTS.md](AGENTS.md) - Shared project guidelines (style, constraints, content structure) +> - [.github/LABEL_GUIDE.md](.github/LABEL_GUIDE.md) - Label taxonomy and pipeline usage > - [.claude/](.claude/) - Claude MCP configuration directory with: > - Custom commands in `.claude/commands/` > - Specialized agents in `.claude/agents/` diff --git a/content/influxdb3/core/reference/cli/influxdb3/serve.md b/content/influxdb3/core/reference/cli/influxdb3/serve.md index 35fdd5835b..b087a11e79 100644 --- a/content/influxdb3/core/reference/cli/influxdb3/serve.md +++ b/content/influxdb3/core/reference/cli/influxdb3/serve.md @@ -63,7 +63,7 @@ influxdb3 serve [OPTIONS] | | `--aws-session-token` | _See [configuration options](/influxdb3/core/reference/config-options/#aws-session-token)_ | | | `--aws-skip-signature` | _See [configuration options](/influxdb3/core/reference/config-options/#aws-skip-signature)_ | | | `--azure-allow-http` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#azure-allow-http)_ | -| | `--azure-endpoint` | _See [configuration options](/influxdb3/enterprise/reference/config-options/##azure-endpoint)_ | +| | `--azure-endpoint` | _See [configuration options](/influxdb3/core/reference/config-options/#azure-endpoint)_ | | | `--azure-storage-access-key` | _See [configuration options](/influxdb3/core/reference/config-options/#azure-storage-access-key)_ | | | `--azure-storage-account` | _See [configuration options](/influxdb3/core/reference/config-options/#azure-storage-account)_ | | | `--bucket` | _See [configuration options](/influxdb3/core/reference/config-options/#bucket)_ | @@ -71,14 +71,14 @@ influxdb3 serve [OPTIONS] | | `--datafusion-config` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-config)_ | | | `--datafusion-max-parquet-fanout` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-max-parquet-fanout)_ | | | `--datafusion-num-threads` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-num-threads)_ | -| | `--datafusion-runtime-disable-lifo-slot` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-runtime-disable-lifo-slot)_ | -| | `--datafusion-runtime-event-interval` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-runtime-event-interval)_ | -| | `--datafusion-runtime-global-queue-interval` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-runtime-global-queue-interval)_ | -| | `--datafusion-runtime-max-blocking-threads` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-runtime-max-blocking-threads)_ | -| | `--datafusion-runtime-max-io-events-per-tick` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-runtime-max-io-events-per-tick)_ | -| | `--datafusion-runtime-thread-keep-alive` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-runtime-thread-keep-alive)_ | -| | `--datafusion-runtime-thread-priority` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-runtime-thread-priority)_ | -| | `--datafusion-runtime-type` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-runtime-type)_ | +| | `--datafusion-runtime-disable-lifo-slot` | Development-only Tokio runtime configuration | +| | `--datafusion-runtime-event-interval` | Development-only Tokio runtime configuration | +| | `--datafusion-runtime-global-queue-interval` | Development-only Tokio runtime configuration | +| | `--datafusion-runtime-max-blocking-threads` | Development-only Tokio runtime configuration | +| | `--datafusion-runtime-max-io-events-per-tick` | Development-only Tokio runtime configuration | +| | `--datafusion-runtime-thread-keep-alive` | Development-only Tokio runtime configuration | +| | `--datafusion-runtime-thread-priority` | Development-only Tokio runtime configuration | +| | `--datafusion-runtime-type` | Development-only Tokio runtime configuration | | | `--datafusion-use-cached-parquet-loader` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-use-cached-parquet-loader)_ | | | `--delete-grace-period` | _See [configuration options](/influxdb3/core/reference/config-options/#delete-grace-period)_ | | | `--disable-authz` | _See [configuration options](/influxdb3/core/reference/config-options/#disable-authz)_ | @@ -118,7 +118,7 @@ influxdb3 serve [OPTIONS] | | `--table-index-cache-concurrency-limit` | _See [configuration options](/influxdb3/core/reference/config-options/#table-index-cache-concurrency-limit)_ | | | `--table-index-cache-max-entries` | _See [configuration options](/influxdb3/core/reference/config-options/#table-index-cache-max-entries)_ | | | `--tcp-listener-file-path` | _See [configuration options](/influxdb3/core/reference/config-options/#tcp-listener-file-path)_ | -| | `--telemetry-disable-upload` | _See [configuration options](/influxdb3/core/reference/config-options/#telemetry-disable-upload)_ | +| | `--telemetry-disable-upload` | _See [configuration options](/influxdb3/core/reference/config-options/#disable-telemetry-upload)_ | | | `--telemetry-endpoint` | _See [configuration options](/influxdb3/core/reference/config-options/#telemetry-endpoint)_ | | | `--tls-cert` | _See [configuration options](/influxdb3/core/reference/config-options/#tls-cert)_ | | | `--tls-key` | _See [configuration options](/influxdb3/core/reference/config-options/#tls-key)_ | diff --git a/content/influxdb3/enterprise/reference/cli/influxdb3/serve.md b/content/influxdb3/enterprise/reference/cli/influxdb3/serve.md index 804a826ec9..4ade8944d8 100644 --- a/content/influxdb3/enterprise/reference/cli/influxdb3/serve.md +++ b/content/influxdb3/enterprise/reference/cli/influxdb3/serve.md @@ -62,7 +62,7 @@ influxdb3 serve [OPTIONS] | | `--aws-session-token` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#aws-session-token)_ | | | `--aws-skip-signature` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#aws-skip-signature)_ | | | `--azure-allow-http` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#azure-allow-http)_ | -| | `--azure-endpoint` | _See [configuration options](/influxdb3/enterprise/reference/config-options/##azure-endpoint)_ | +| | `--azure-endpoint` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#azure-endpoint)_ | | | `--azure-storage-access-key` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#azure-storage-access-key)_ | | | `--azure-storage-account` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#azure-storage-account)_ | | | `--bucket` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#bucket)_ | @@ -73,19 +73,18 @@ influxdb3 serve [OPTIONS] | | `--compaction-gen2-duration` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#compaction-gen2-duration)_ | | | `--compaction-max-num-files-per-plan` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#compaction-max-num-files-per-plan)_ | | | `--compaction-multipliers` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#compaction-multipliers)_ | -| | `--compaction-row-limit` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#compaction-row-limit)_ | | | `--data-dir` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#data-dir)_ | | | `--datafusion-config` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#datafusion-config)_ | | | `--datafusion-max-parquet-fanout` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#datafusion-max-parquet-fanout)_ | | | `--datafusion-num-threads` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#datafusion-num-threads)_ | -| | `--datafusion-runtime-disable-lifo-slot` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#datafusion-runtime-disable-lifo-slot)_ | -| | `--datafusion-runtime-event-interval` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#datafusion-runtime-event-interval)_ | -| | `--datafusion-runtime-global-queue-interval` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#datafusion-runtime-global-queue-interval)_ | -| | `--datafusion-runtime-max-blocking-threads` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#datafusion-runtime-max-blocking-threads)_ | -| | `--datafusion-runtime-max-io-events-per-tick` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#datafusion-runtime-max-io-events-per-tick)_ | -| | `--datafusion-runtime-thread-keep-alive` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#datafusion-runtime-thread-keep-alive)_ | -| | `--datafusion-runtime-thread-priority` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#datafusion-runtime-thread-priority)_ | -| | `--datafusion-runtime-type` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#datafusion-runtime-type)_ | +| | `--datafusion-runtime-disable-lifo-slot` | Development-only Tokio runtime configuration | +| | `--datafusion-runtime-event-interval` | Development-only Tokio runtime configuration | +| | `--datafusion-runtime-global-queue-interval` | Development-only Tokio runtime configuration | +| | `--datafusion-runtime-max-blocking-threads` | Development-only Tokio runtime configuration | +| | `--datafusion-runtime-max-io-events-per-tick` | Development-only Tokio runtime configuration | +| | `--datafusion-runtime-thread-keep-alive` | Development-only Tokio runtime configuration | +| | `--datafusion-runtime-thread-priority` | Development-only Tokio runtime configuration | +| | `--datafusion-runtime-type` | Development-only Tokio runtime configuration | | | `--datafusion-use-cached-parquet-loader` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#datafusion-use-cached-parquet-loader)_ | | | `--delete-grace-period` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#delete-grace-period)_ | | | `--disable-authz` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#disable-authz)_ | @@ -113,7 +112,7 @@ influxdb3 serve [OPTIONS] | | `--node-id` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#node-id)_ | | | `--node-id-from-env` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#node-id-from-env)_ | | | `--num-cores` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#num-cores)_ | -| | `--num-datafusion-threads` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#num-datafusion-threads)_ | +| | `--num-datafusion-threads` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#datafusion-num-threads)_ | | | `--num-database-limit` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#num-database-limit)_ | | | `--num-table-limit` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#num-table-limit)_ | | | `--num-total-columns-per-table-limit` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#num-total-columns-per-table-limit)_ | @@ -140,7 +139,7 @@ influxdb3 serve [OPTIONS] | | `--table-index-cache-concurrency-limit` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#table-index-cache-concurrency-limit)_ | | | `--table-index-cache-max-entries` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#table-index-cache-max-entries)_ | | | `--tcp-listener-file-path` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#tcp-listener-file-path)_ | -| | `--telemetry-disable-upload` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#telemetry-disable-upload)_ | +| | `--telemetry-disable-upload` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#disable-telemetry-upload)_ | | | `--telemetry-endpoint` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#telemetry-endpoint)_ | | | `--tls-cert` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#tls-cert)_ | | | `--tls-key` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#tls-key)_ | diff --git a/data/labels.yml b/data/labels.yml new file mode 100644 index 0000000000..a4965bc49b --- /dev/null +++ b/data/labels.yml @@ -0,0 +1,86 @@ +# Label definitions for the docs-v2 repository. +# +# Source of truth for non-product label names, colors, and descriptions. +# Product labels are derived from data/products.yml (label_group + content_path), +# except product:shared which is defined here (applies across products). +# Review label behavior (severity, result rules) is in .github/templates/review-comment.md. +# +# When a label value is a string, it's the description and the group color applies. +# When a label value is an object, it has its own color. + +product: + description: Cross-product labels not derived from a single products.yml entry. + labels: + product:shared: + color: "#FFA500" + description: Shared content that applies to multiple products + +source: + color: "#9370DB" + description: Track how an issue or PR was created. + labels: + source:auto-detected: Created by change detection within this repo + source:dar: Generated by the DAR pipeline + source:sync: Synced from an external repository + source:feedback: From user feedback + source:manual: Human-created issue + +waiting: + color: "#FF8C00" + description: Indicate external blockers. + labels: + waiting:engineering: Waiting for engineer confirmation + waiting:pr: Blocked on another PR merging first + waiting:product: Waiting for product or PM decision + +workflow: + description: Control automation behavior. + labels: + agent-ready: + color: "#00FF00" + description: Issue can be worked autonomously by an agent + skip-review: + color: "#1E90FF" + description: PR skips the automated doc review pipeline + +area: + color: "#a89129" + description: What part of the repo a change affects. + labels: + area:agents: "AI agents, skills, hooks, and MCP config" + area:ci: "Continuous integration pipeline (verify, test, validate, publish)" + area:links: "Link management (validation, checking, fixing)" + area:site-ui: "Documentation site UI: templates, styles, JS/TS" + +release: + description: Release-gated merge workflow. + labels: + release:pending: + color: "#FEF2C0" + description: Waiting for product release before merging + release:ready: + color: "#0E8A16" + description: Product released, docs ready for review/merge + +onboarding: + description: Contributor onboarding. + labels: + good-first-issue: + color: "#f9f348" + description: Easy update. Good for first timers! + +review: + description: >- + Automated review outcomes. Mutually exclusive. + See .github/templates/review-comment.md for severity definitions + and result-to-label mapping. + labels: + review:approved: + color: "#28A745" + description: Automated review passed + review:changes-requested: + color: "#DC3545" + description: Automated review found blocking issues + review:needs-human: + color: "#FFC107" + description: Automated review inconclusive diff --git a/data/products.yml b/data/products.yml index 1a0cbdca54..90d37e0541 100644 --- a/data/products.yml +++ b/data/products.yml @@ -2,6 +2,8 @@ influxdb3_core: name: InfluxDB 3 Core altname: InfluxDB 3 Core namespace: influxdb3 + content_path: influxdb3/core + label_group: v3-monolith menu_category: self-managed versions: [core] list_order: 2 @@ -38,6 +40,8 @@ influxdb3_enterprise: name: InfluxDB 3 Enterprise altname: InfluxDB 3 Enterprise namespace: influxdb3 + content_path: influxdb3/enterprise + label_group: v3-monolith menu_category: self-managed versions: [enterprise] list_order: 2 @@ -74,6 +78,8 @@ influxdb3_explorer: name: InfluxDB 3 Explorer altname: Explorer namespace: influxdb3_explorer + content_path: influxdb3/explorer + label_group: explorer menu_category: tools list_order: 1 latest: explorer @@ -90,6 +96,8 @@ influxdb3_cloud_serverless: name: InfluxDB Cloud Serverless altname: InfluxDB Cloud namespace: influxdb + content_path: influxdb3/cloud-serverless + label_group: v3-distributed menu_category: managed versions: [cloud-serverless] list_order: 2 @@ -123,6 +131,8 @@ influxdb3_cloud_dedicated: name: InfluxDB Cloud Dedicated altname: InfluxDB Cloud namespace: influxdb + content_path: influxdb3/cloud-dedicated + label_group: v3-distributed menu_category: managed versions: [cloud-dedicated] list_order: 3 @@ -154,6 +164,8 @@ influxdb3_clustered: name: InfluxDB Clustered altname: InfluxDB Clustered namespace: influxdb + content_path: influxdb3/clustered + label_group: v3-distributed menu_category: self-managed versions: [clustered] list_order: 3 @@ -188,6 +200,12 @@ influxdb: name__v1: InfluxDB OSS v1 altname: InfluxDB OSS namespace: influxdb + content_path: + v2: influxdb/v2 + v1: influxdb/v1 + label_group: + v2: v2 + v1: v1 succeeded_by: influxdb3_core menu_category: self-managed list_order: 1 @@ -233,6 +251,8 @@ influxdb_cloud: name__vcloud: InfluxDB Cloud (TSM) altname: InfluxDB Cloud namespace: influxdb + content_path: influxdb/cloud + label_group: v2-cloud menu_category: managed versions: [cloud] list_order: 1 @@ -262,6 +282,8 @@ influxdb_cloud: telegraf: name: Telegraf namespace: telegraf + content_path: telegraf + label_group: telegraf menu_category: other list_order: 6 versions: [v1] @@ -286,6 +308,8 @@ telegraf_controller: chronograf: name: Chronograf namespace: chronograf + content_path: chronograf + label_group: chronograf menu_category: other list_order: 7 versions: [v1] @@ -301,6 +325,8 @@ chronograf: kapacitor: name: Kapacitor namespace: kapacitor + content_path: kapacitor + label_group: kapacitor menu_category: other list_order: 7 versions: [v1] @@ -316,6 +342,8 @@ kapacitor: enterprise_influxdb: name: 'InfluxDB Enterprise v1' namespace: enterprise_influxdb + content_path: enterprise_influxdb + label_group: v1-enterprise menu_category: self-managed list_order: 5 versions: [v1] @@ -370,6 +398,8 @@ influxdb_cloud1: flux: name: Flux namespace: flux + content_path: flux + label_group: flux menu_category: languages list_order: 8 versions: [v0] diff --git a/docs/plans/2025-01-10-docs-cli-workflow-design.md b/docs/plans/2025-01-10-docs-cli-workflow-design.md deleted file mode 100644 index 1aba665c99..0000000000 --- a/docs/plans/2025-01-10-docs-cli-workflow-design.md +++ /dev/null @@ -1,134 +0,0 @@ -# docs-cli-workflow Skill Design - -## Overview - -A Claude Code skill that guides when to use `docs create` and `docs edit` CLI tools versus direct file editing for InfluxData documentation. - -## Problem - -Claude under-utilizes the `docs create` and `docs edit` CLI tools even when they would provide significant value: - -- Better scaffolding for multi-product content -- Context gathering (link extraction, structure analysis) -- Education about style guidelines and shortcodes -- Automatic file location from URLs - -## Skill Identity - -- **Name**: `docs-cli-workflow` -- **Location**: `.claude/skills/docs-cli-workflow/SKILL.md` -- **Scope**: Decision guidance only (not full workflow management) -- **Behavior**: Suggest and wait for user confirmation - -## Activation - -### Trigger Keywords - -The skill activates when user messages contain: - -- "new page", "new doc", "create documentation", "add a page" -- "edit this URL", "edit ", "update this page" (with URL) -- "document this feature", "write docs for" -- "I have a draft", "from this draft" -- References to docs.influxdata.com URLs - -### Non-Triggers (Direct Editing is Fine) - -- "fix this typo in content/..." -- "update the frontmatter in..." -- Explicit file paths the user already knows -- Small edits to existing files user has open - -## Decision Logic - -### When to Suggest `docs create` - -| Trigger | Why CLI is Better | -| ---------------------------------- | --------------------------------------------------------- | -| Content targets multiple products | CLI scaffolds shared content pattern automatically | -| User unsure where page should live | CLI analyzes structure, suggests location | -| Draft references existing docs | CLI extracts links, provides context to avoid duplication | -| User unfamiliar with conventions | CLI prompt includes style guide, shortcode examples | -| Complex new feature documentation | CLI gathers product metadata, version info | - -### When to Suggest `docs edit` - -| Trigger | Why CLI is Better | -| -------------------------------------- | ------------------------------------------------------ | -| User provides docs.influxdata.com URL | CLI finds source file(s) including shared content | -| User doesn't know source file location | CLI maps URL → file path(s) | -| Page uses shared content | CLI identifies both frontmatter file AND shared source | - -### When to Skip CLI (Edit Directly) - -| Scenario | Why Direct is Fine | -| -------------------------------- | ------------------------------- | -| User provides explicit file path | They already know where to edit | -| Small typo/link fixes | Overhead not worth it | -| User says "just edit it" | Explicit preference | -| Frontmatter-only changes | No content generation needed | - -## Suggestion Format - -### For `docs create` - -``` -I'd recommend using the docs CLI for this: - -npx docs create --products - -**Why**: [1-2 sentences explaining the specific benefit for this request] - -Options: -1. **Use CLI** - I'll run the command and guide you through product selection -2. **Edit directly** - Skip the CLI, I'll create/edit files manually - -Which do you prefer? -``` - -### For `docs edit` - -``` -I can use the docs CLI to find the source files for this page: - -npx docs edit - -**Why**: [1-2 sentences - e.g., "This will locate the source file and any shared content it uses"] - -Options: -1. **Use CLI** - I'll find and open the relevant files -2. **I know the file** - Tell me the path and I'll edit directly - -Which do you prefer? -``` - -### Principles - -- Show the actual command (educational) -- Explain *why* for this specific case -- Always offer the direct alternative -- Keep it brief - 4-6 lines max - -## Edge Cases - -| Edge Case | Behavior | -| ---------------------------------------- | ------------------------------------------------------ | -| User already in a `docs create` workflow | Don't re-suggest | -| URL points to non-existent page | Suggest `docs create --url` instead of `docs edit` | -| User provides both URL and draft | Suggest `docs create --url --from-draft ` | -| User declines CLI twice in session | Stop suggesting, note preference | - -## Post-Confirmation Behavior - -After user confirms they want to use the CLI: - -1. Run the appropriate command -2. Let the CLI handle the rest (product selection, file generation, etc.) -3. No additional skill guidance needed - -## Related Files - -- `scripts/docs-cli.js` - Main CLI entry point -- `scripts/docs-create.js` - Content scaffolding implementation -- `scripts/docs-edit.js` - File finder implementation -- `scripts/lib/content-scaffolding.js` - Context preparation logic diff --git a/helper-scripts/label-migration/README.md b/helper-scripts/label-migration/README.md new file mode 100644 index 0000000000..99e979d2fe --- /dev/null +++ b/helper-scripts/label-migration/README.md @@ -0,0 +1,95 @@ +# Label Migration Scripts + +Migrate the docs-v2 repository from 80+ ad-hoc labels to the 24-label taxonomy +defined in [DOC-REVIEW-PIPELINE-PLAN.md](../../.github/DOC-REVIEW-PIPELINE-PLAN.md). + +## Prerequisites + +- `gh` CLI authenticated with access to `influxdata/docs-v2` +- Run from any directory (scripts use `REPO` env var, defaults to `influxdata/docs-v2`) + +## Execution Order + +### Step 1: Create new labels (safe, idempotent) + +```bash +./create-labels.sh # Creates 24 new labels +./create-labels.sh --dry-run # Preview without creating +``` + +Uses `gh label create --force`, which creates new labels or updates existing +ones. Safe to run multiple times. + +### Step 2: Migrate issues to new labels + +```bash +./migrate-labels.sh # Adds new labels to issues with old labels +./migrate-labels.sh --dry-run # Preview without modifying issues +``` + +Adds new labels to issues/PRs that have old labels. Does NOT remove old labels. +Flags `InfluxDB v3` issues for manual review (may be monolith or distributed). + +### Step 3: Verify migration + +Before deleting old labels, verify a sample of migrated issues: + +```bash +# Check issues with new product labels +gh issue list -R influxdata/docs-v2 -l "product:v3-monolith" --state all +gh issue list -R influxdata/docs-v2 -l "product:v3-distributed" --state all + +# Check the flagged InfluxDB v3 issues +gh issue list -R influxdata/docs-v2 -l "InfluxDB v3" --state all +``` + +### Step 4: Delete old labels (destructive, interactive) + +```bash +./delete-labels.sh # Deletes old labels with confirmation prompts +./delete-labels.sh --dry-run # Preview without deleting +``` + +Prompts for confirmation before each batch of deletions. Batches: +1. Old product labels (15 labels) +2. Old release labels (2 labels) +3. Old source tracking labels (1 label) +4. Renamed labels (2 labels) +5. Unused/generic labels (14 labels) + +### Step 5: Update workflow references + +After deleting `sync-plugin-docs`, update these files to use `source:sync`: +- `.github/workflows/sync-plugins.yml` (lines 28, 173, 421) +- `.github/ISSUE_TEMPLATE/sync-plugin-docs.yml` (line 4) + +## Targeting a different repo + +```bash +REPO=myorg/myrepo ./create-labels.sh +REPO=myorg/myrepo ./migrate-labels.sh --dry-run +``` + +## Rollback + +If something goes wrong after Step 2 (migration): +- Old labels still exist (not deleted until Step 4) +- New labels can be removed: `gh label delete "product:v3-monolith" -R influxdata/docs-v2 --yes` +- Issues retain both old and new labels until old labels are deleted + +If something goes wrong after Step 4 (deletion): +- Old labels are gone but issues retain the new labels +- Re-create old labels manually if needed: `gh label create "InfluxDB v3" -R influxdata/docs-v2 --color EC8909` + +## Label Taxonomy + +See the full taxonomy in [DOC-REVIEW-PIPELINE-PLAN.md](../../.github/DOC-REVIEW-PIPELINE-PLAN.md#11--label-taxonomy). + +| Category | Count | Prefix | Example | +|----------|-------|--------|---------| +| Product | 11 | `product:` | `product:v3-monolith` | +| Source tracking | 4 | `source:` | `source:sync` | +| Waiting states | 2 | `waiting:` | `waiting:engineering` | +| Workflow states | 2 | (none) | `agent-ready`, `skip-review` | +| Review outcomes | 3 | `review:` | `review:approved` | +| Renamed | 2 | various | `ai:tooling`, `ci:testing` | diff --git a/helper-scripts/label-migration/create-labels.sh b/helper-scripts/label-migration/create-labels.sh new file mode 100755 index 0000000000..0aea302c99 --- /dev/null +++ b/helper-scripts/label-migration/create-labels.sh @@ -0,0 +1,97 @@ +#!/usr/bin/env bash +set -euo pipefail + +# Create all labels defined in the doc review pipeline plan. +# Safe and idempotent — uses --force to update existing labels. +# +# Usage: +# ./create-labels.sh # Create labels in influxdata/docs-v2 +# ./create-labels.sh --dry-run # Print commands without executing +# REPO=owner/repo ./create-labels.sh # Target a different repo + +REPO="${REPO:-influxdata/docs-v2}" +DRY_RUN=false + +if [[ "${1:-}" == "--dry-run" ]]; then + DRY_RUN=true + echo "=== DRY RUN — no labels will be created ===" + echo +fi + +create_label() { + local name="$1" + local color="$2" + local description="$3" + + if $DRY_RUN; then + printf " %-30s #%-6s %s\n" "$name" "$color" "$description" + else + if gh label create "$name" \ + --repo "$REPO" \ + --color "$color" \ + --description "$description" \ + --force 2>/dev/null; then + printf " ✓ %-30s\n" "$name" + else + printf " ✗ %-30s (failed)\n" "$name" + fi + fi +} + +echo "Repository: $REPO" +echo + +# --- Product labels (11) — yellow --- +echo "Product labels:" +create_label "product:v3-monolith" "FFA500" "InfluxDB 3 Core and Enterprise (single-node / clusterable)" +create_label "product:v3-distributed" "FFA500" "InfluxDB 3 Cloud Serverless, Cloud Dedicated, Clustered" +create_label "product:v2" "FFA500" "InfluxDB v2 (Cloud TSM, OSS)" +create_label "product:v1" "FFA500" "InfluxDB v1 OSS" +create_label "product:v1-enterprise" "FFA500" "InfluxDB Enterprise v1" +create_label "product:telegraf" "FFA500" "Telegraf documentation" +create_label "product:chronograf" "FFA500" "Chronograf documentation" +create_label "product:kapacitor" "FFA500" "Kapacitor documentation" +create_label "product:flux" "FFA500" "Flux language documentation" +create_label "product:explorer" "FFA500" "InfluxDB 3 Explorer" +create_label "product:shared" "FFA500" "Shared content across products" +echo + +# --- Source tracking labels (4) — purple --- +echo "Source tracking labels:" +create_label "source:auto-detected" "9370DB" "Created by change detection within this repo" +create_label "source:dar" "9370DB" "Generated by DAR pipeline (issue analysis)" +create_label "source:sync" "9370DB" "Synced from an external repository" +create_label "source:manual" "9370DB" "Human-created issue" +echo + +# --- Waiting states (2) — orange --- +echo "Waiting state labels:" +create_label "waiting:engineering" "FF8C00" "Waiting for engineer confirmation" +create_label "waiting:product" "FF8C00" "Waiting for product/PM decision" +echo + +# --- Workflow states (2) — green/blue --- +echo "Workflow state labels:" +create_label "agent-ready" "00FF00" "Agent can work on this autonomously" +create_label "skip-review" "1E90FF" "Skip automated doc review pipeline" +echo + +# --- Review outcome labels (3) — green/red/yellow --- +echo "Review outcome labels:" +create_label "review:approved" "28A745" "Automated review passed — no blocking issues" +create_label "review:changes-requested" "DC3545" "Automated review found blocking issues" +create_label "review:needs-human" "FFC107" "Automated review inconclusive, needs human" +echo + +# --- Renamed labels (2) --- +echo "Renamed labels:" +create_label "ai:tooling" "3fb91f" "Related to AI assistant infrastructure" +create_label "ci:testing" "a1fd0f" "CI/testing infrastructure" +echo + +# --- Ensure existing workflow labels exist --- +echo "Existing labels (ensure present):" +create_label "release:pending" "FEF2C0" "Waiting for product release before merging" +echo + +echo "Done. Total: 24 new + 1 existing = 25 labels." diff --git a/helper-scripts/label-migration/delete-labels.sh b/helper-scripts/label-migration/delete-labels.sh new file mode 100755 index 0000000000..58bc2c9ee0 --- /dev/null +++ b/helper-scripts/label-migration/delete-labels.sh @@ -0,0 +1,137 @@ +#!/usr/bin/env bash +set -euo pipefail + +# Delete old labels after migration is verified. +# DESTRUCTIVE — requires interactive confirmation for each batch. +# +# Run this ONLY after: +# 1. create-labels.sh has been run +# 2. migrate-labels.sh has been run +# 3. A sample of migrated issues has been manually verified +# +# Usage: +# ./delete-labels.sh # Delete labels (with confirmation prompts) +# ./delete-labels.sh --dry-run # Print what would be deleted +# REPO=owner/repo ./delete-labels.sh # Target a different repo + +REPO="${REPO:-influxdata/docs-v2}" +DRY_RUN=false + +if [[ "${1:-}" == "--dry-run" ]]; then + DRY_RUN=true + echo "=== DRY RUN — no labels will be deleted ===" + echo +fi + +delete_label() { + local name="$1" + + if $DRY_RUN; then + printf " Would delete: %s\n" "$name" + return + fi + + if gh label delete "$name" \ + --repo "$REPO" \ + --yes 2>/dev/null; then + printf " ✓ Deleted: %s\n" "$name" + else + printf " - Skipped: %s (not found or already deleted)\n" "$name" + fi +} + +confirm_batch() { + local batch_name="$1" + + if $DRY_RUN; then + return 0 + fi + + echo + read -r -p "Delete $batch_name labels? [y/N] " response + case "$response" in + [yY][eE][sS]|[yY]) return 0 ;; + *) echo " Skipped."; return 1 ;; + esac +} + +echo "Repository: $REPO" +echo +echo "⚠ This script deletes labels. Run migrate-labels.sh first." +echo + +# --- Old product labels (migrated to product:* labels) --- +echo "=== Old product labels ===" +if confirm_batch "old product"; then + delete_label "InfluxDB 3 Core and Enterprise" + delete_label "InfluxDB v3" + delete_label "Processing engine" + delete_label "InfluxDB v2" + delete_label "InfluxDB v1" + delete_label "Enterprise 1.x" + delete_label "Chronograf 1.x" + delete_label "Kapacitor" + delete_label "Flux" + delete_label "InfluxDB 3 Explorer" + delete_label "InfluxDB Cloud Dedicated" + delete_label "InfluxDB Cloud Serverless" + delete_label "InfluxDB Clustered" + delete_label "InfluxDB Cloud" + delete_label "Telegraf" +fi +echo + +# --- Old release labels (migrated to release:pending) --- +echo "=== Old release labels ===" +if confirm_batch "old release"; then + delete_label "Pending Release" + delete_label "release/influxdb3" +fi +echo + +# --- Old source tracking labels --- +echo "=== Old source tracking labels ===" +if confirm_batch "old source tracking"; then + delete_label "sync-plugin-docs" +fi +echo + +# --- Renamed labels --- +echo "=== Renamed labels (old names) ===" +if confirm_batch "renamed label (old names)"; then + delete_label "AI assistant tooling" + delete_label "ci:testing-and-validation" +fi +echo + +# --- Unused/generic labels --- +echo "=== Unused/generic labels ===" +echo "These labels have inconsistent naming or overlap with the new taxonomy." +if confirm_batch "unused/generic"; then + delete_label "bug" + delete_label "priority" + delete_label "documentation" + delete_label "Proposal" + delete_label "Research Phase" + delete_label "ready-for-collaboration" + delete_label "ui" + delete_label "javascript" + delete_label "dependencies" + delete_label "integration-demo-blog" + delete_label "API" + delete_label "Docker" + delete_label "Grafana" + delete_label "Ask AI" +fi +echo + +echo "=== Done ===" +echo +echo "Labels NOT deleted (kept intentionally or not in scope):" +echo " - release:pending, release:ready, release/telegraf, release/v1" +echo " - good-first-issue, user feedback, validation-failed" +echo " - duplicate, enhancement, help wanted, question, wontfix" +echo " - design, security, security/misc, Epic, feat, fix, chore" +echo " - And others not in the migration scope" +echo +echo "Review remaining labels with: gh label list -R $REPO" diff --git a/helper-scripts/label-migration/migrate-labels.sh b/helper-scripts/label-migration/migrate-labels.sh new file mode 100755 index 0000000000..a1f309ab32 --- /dev/null +++ b/helper-scripts/label-migration/migrate-labels.sh @@ -0,0 +1,104 @@ +#!/usr/bin/env bash +set -euo pipefail + +# Migrate issues and PRs from old labels to new labels. +# For each mapping, finds all issues with the old label and adds the new label. +# Does NOT remove old labels — that happens in delete-labels.sh after verification. +# +# Usage: +# ./migrate-labels.sh # Migrate labels in influxdata/docs-v2 +# ./migrate-labels.sh --dry-run # Print what would happen without executing +# REPO=owner/repo ./migrate-labels.sh # Target a different repo + +REPO="${REPO:-influxdata/docs-v2}" +DRY_RUN=false + +if [[ "${1:-}" == "--dry-run" ]]; then + DRY_RUN=true + echo "=== DRY RUN — no issues will be modified ===" + echo +fi + +migrate_label() { + local old_label="$1" + local new_label="$2" + local note="${3:-}" + + echo "--- $old_label → $new_label" + if [[ -n "$note" ]]; then + echo " Note: $note" + fi + + # Get all open and closed issues/PRs with the old label + local numbers + numbers=$(gh issue list \ + --repo "$REPO" \ + --label "$old_label" \ + --state all \ + --json number \ + --jq '.[].number' 2>/dev/null || true) + + if [[ -z "$numbers" ]]; then + echo " No issues found with label '$old_label'" + echo + return + fi + + local count + count=$(echo "$numbers" | wc -l | tr -d ' ') + echo " Found $count issue(s)" + + for num in $numbers; do + if $DRY_RUN; then + echo " Would add '$new_label' to #$num" + else + if gh issue edit "$num" \ + --repo "$REPO" \ + --add-label "$new_label" 2>/dev/null; then + echo " ✓ #$num" + else + echo " ✗ #$num (failed)" + fi + fi + done + echo +} + +# Flag issues that need manual review instead of automatic migration +flag_for_review() { + local old_label="$1" + local reason="$2" + + echo "--- ⚠ $old_label — NEEDS MANUAL REVIEW" + echo " Reason: $reason" + + local numbers + numbers=$(gh issue list \ + --repo "$REPO" \ + --label "$old_label" \ + --state all \ + --json number,title \ + --jq '.[] | "#\(.number) \(.title)"' 2>/dev/null || true) + + if [[ -z "$numbers" ]]; then + echo " No issues found" + else + echo "$numbers" | while IFS= read -r line; do + echo " $line" + done + fi + echo +} + +echo "Repository: $REPO" + +migrate_label "alerts" "product:v2" +migrate_label "InfluxDB v2" "product:v2" +migrate_label "InfluxDB 3 Core and Enterprise" "product:v3-monolith" + +echo "=== Done ===" +echo +echo "Next steps:" +echo " 1. Review any issues flagged above" +echo " 2. Verify a sample of migrated issues in the GitHub UI" +echo " 3. Once satisfied, run delete-labels.sh to remove old labels" From 8dd60bf9b8aa909cd9cc2814f325ac8fe43eb83b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Mar 2026 10:10:09 -0500 Subject: [PATCH 06/15] chore(deps): bump dompurify from 3.3.1 to 3.3.2 (#6905) Bumps [dompurify](https://github.com/cure53/DOMPurify) from 3.3.1 to 3.3.2. - [Release notes](https://github.com/cure53/DOMPurify/releases) - [Commits](https://github.com/cure53/DOMPurify/compare/3.3.1...3.3.2) --- updated-dependencies: - dependency-name: dompurify dependency-version: 3.3.2 dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Jason Stirnaman --- yarn.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/yarn.lock b/yarn.lock index 56c52b1050..3b8d8d78e3 100644 --- a/yarn.lock +++ b/yarn.lock @@ -2055,9 +2055,9 @@ doctrine@^2.1.0: esutils "^2.0.2" dompurify@^3.2.5: - version "3.3.1" - resolved "https://registry.yarnpkg.com/dompurify/-/dompurify-3.3.1.tgz#c7e1ddebfe3301eacd6c0c12a4af284936dbbb86" - integrity sha512-qkdCKzLNtrgPFP1Vo+98FRzJnBRGe4ffyCea9IwHB1fyxPOeNTHpLKYGd4Uk9xvNoH0ZoOjwZxNptyMwqrId1Q== + version "3.3.2" + resolved "https://registry.yarnpkg.com/dompurify/-/dompurify-3.3.2.tgz#58c515d0f8508b8749452a028aa589ad80b36325" + integrity sha512-6obghkliLdmKa56xdbLOpUZ43pAR6xFy1uOrxBaIDjT+yaRuuybLjGS9eVBoSR/UPU5fq3OXClEHLJNGvbxKpQ== optionalDependencies: "@types/trusted-types" "^2.0.7" From ef6f12410f0cef0ed4df87717b4b7524328456c3 Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Tue, 10 Mar 2026 16:17:22 +0100 Subject: [PATCH 07/15] Telegraf v1.38.0 (#6911) * Updating changelog * Updating plugin list * Updating product version * Updating plugins --------- Co-authored-by: Jason Stirnaman --- .../aggregator-plugins/basicstats/_index.md | 2 +- .../aggregator-plugins/derivative/_index.md | 2 +- .../v1/aggregator-plugins/final/_index.md | 2 +- .../v1/aggregator-plugins/histogram/_index.md | 2 +- .../v1/aggregator-plugins/merge/_index.md | 2 +- .../v1/aggregator-plugins/minmax/_index.md | 2 +- .../v1/aggregator-plugins/quantile/_index.md | 2 +- .../v1/aggregator-plugins/starlark/_index.md | 2 +- .../aggregator-plugins/valuecounter/_index.md | 2 +- .../v1/input-plugins/activemq/_index.md | 2 +- .../v1/input-plugins/aerospike/_index.md | 2 +- .../v1/input-plugins/aliyuncms/_index.md | 2 +- .../v1/input-plugins/amd_rocm_smi/_index.md | 2 +- .../v1/input-plugins/amqp_consumer/_index.md | 2 +- .../v1/input-plugins/apache/_index.md | 2 +- .../v1/input-plugins/apcupsd/_index.md | 2 +- .../v1/input-plugins/aurora/_index.md | 2 +- .../v1/input-plugins/azure_monitor/_index.md | 2 +- .../azure_storage_queue/_index.md | 2 +- .../v1/input-plugins/bcache/_index.md | 2 +- .../v1/input-plugins/beanstalkd/_index.md | 2 +- .../telegraf/v1/input-plugins/beat/_index.md | 2 +- .../telegraf/v1/input-plugins/bind/_index.md | 2 +- .../telegraf/v1/input-plugins/bond/_index.md | 2 +- .../v1/input-plugins/burrow/_index.md | 2 +- .../telegraf/v1/input-plugins/ceph/_index.md | 2 +- .../v1/input-plugins/cgroup/_index.md | 2 +- .../v1/input-plugins/chrony/_index.md | 2 +- .../cisco_telemetry_mdt/_index.md | 2 +- .../v1/input-plugins/clickhouse/_index.md | 2 +- .../v1/input-plugins/cloud_pubsub/_index.md | 2 +- .../input-plugins/cloud_pubsub_push/_index.md | 2 +- .../v1/input-plugins/cloudwatch/_index.md | 2 +- .../cloudwatch_metric_streams/_index.md | 2 +- .../v1/input-plugins/conntrack/_index.md | 2 +- .../v1/input-plugins/consul/_index.md | 2 +- .../v1/input-plugins/consul_agent/_index.md | 2 +- .../v1/input-plugins/couchbase/_index.md | 2 +- .../v1/input-plugins/couchdb/_index.md | 2 +- .../telegraf/v1/input-plugins/cpu/_index.md | 2 +- .../telegraf/v1/input-plugins/csgo/_index.md | 2 +- .../input-plugins/ctrlx_datalayer/_index.md | 2 +- .../telegraf/v1/input-plugins/dcos/_index.md | 2 +- .../input-plugins/directory_monitor/_index.md | 2 +- .../telegraf/v1/input-plugins/disk/_index.md | 2 +- .../v1/input-plugins/diskio/_index.md | 2 +- .../v1/input-plugins/disque/_index.md | 2 +- .../v1/input-plugins/dmcache/_index.md | 2 +- .../v1/input-plugins/dns_query/_index.md | 2 +- .../v1/input-plugins/docker/_index.md | 2 +- .../v1/input-plugins/docker_log/_index.md | 2 +- .../v1/input-plugins/dovecot/_index.md | 2 +- .../telegraf/v1/input-plugins/dpdk/_index.md | 2 +- .../telegraf/v1/input-plugins/ecs/_index.md | 2 +- .../v1/input-plugins/elasticsearch/_index.md | 2 +- .../elasticsearch_query/_index.md | 2 +- .../v1/input-plugins/ethtool/_index.md | 2 +- .../input-plugins/eventhub_consumer/_index.md | 2 +- .../telegraf/v1/input-plugins/exec/_index.md | 2 +- .../telegraf/v1/input-plugins/execd/_index.md | 2 +- .../v1/input-plugins/fail2ban/_index.md | 2 +- .../v1/input-plugins/fibaro/_index.md | 2 +- .../telegraf/v1/input-plugins/file/_index.md | 2 +- .../v1/input-plugins/filecount/_index.md | 2 +- .../v1/input-plugins/filestat/_index.md | 2 +- .../v1/input-plugins/fireboard/_index.md | 2 +- .../v1/input-plugins/firehose/_index.md | 2 +- .../v1/input-plugins/fluentd/_index.md | 2 +- .../v1/input-plugins/fritzbox/_index.md | 2 +- .../v1/input-plugins/github/_index.md | 2 +- .../telegraf/v1/input-plugins/gnmi/_index.md | 5 +- .../google_cloud_storage/_index.md | 2 +- .../v1/input-plugins/graylog/_index.md | 2 +- .../v1/input-plugins/haproxy/_index.md | 2 +- .../v1/input-plugins/hddtemp/_index.md | 2 +- .../telegraf/v1/input-plugins/http/_index.md | 5 +- .../input-plugins/http_listener_v2/_index.md | 2 +- .../v1/input-plugins/http_response/_index.md | 2 +- .../v1/input-plugins/huebridge/_index.md | 2 +- .../v1/input-plugins/hugepages/_index.md | 2 +- .../v1/input-plugins/icinga2/_index.md | 2 +- .../v1/input-plugins/infiniband/_index.md | 2 +- .../v1/input-plugins/influxdb/_index.md | 2 +- .../input-plugins/influxdb_listener/_index.md | 2 +- .../influxdb_v2_listener/_index.md | 2 +- .../v1/input-plugins/intel_baseband/_index.md | 2 +- .../v1/input-plugins/intel_dlb/_index.md | 2 +- .../v1/input-plugins/intel_pmt/_index.md | 2 +- .../v1/input-plugins/intel_pmu/_index.md | 2 +- .../input-plugins/intel_powerstat/_index.md | 2 +- .../v1/input-plugins/intel_rdt/_index.md | 2 +- .../v1/input-plugins/internal/_index.md | 2 +- .../v1/input-plugins/internet_speed/_index.md | 2 +- .../v1/input-plugins/interrupts/_index.md | 2 +- .../v1/input-plugins/ipmi_sensor/_index.md | 2 +- .../telegraf/v1/input-plugins/ipset/_index.md | 2 +- .../v1/input-plugins/iptables/_index.md | 2 +- .../telegraf/v1/input-plugins/ipvs/_index.md | 2 +- .../v1/input-plugins/jenkins/_index.md | 2 +- .../v1/input-plugins/jolokia2_agent/_index.md | 2 +- .../v1/input-plugins/jolokia2_proxy/_index.md | 2 +- .../jti_openconfig_telemetry/_index.md | 2 +- .../v1/input-plugins/kafka_consumer/_index.md | 2 +- .../v1/input-plugins/kapacitor/_index.md | 2 +- .../v1/input-plugins/kernel/_index.md | 2 +- .../v1/input-plugins/kernel_vmstat/_index.md | 2 +- .../v1/input-plugins/kibana/_index.md | 2 +- .../input-plugins/kinesis_consumer/_index.md | 2 +- .../v1/input-plugins/knx_listener/_index.md | 2 +- .../v1/input-plugins/kube_inventory/_index.md | 2 +- .../v1/input-plugins/kubernetes/_index.md | 2 +- .../telegraf/v1/input-plugins/lanz/_index.md | 2 +- .../telegraf/v1/input-plugins/ldap/_index.md | 2 +- .../telegraf/v1/input-plugins/leofs/_index.md | 2 +- .../v1/input-plugins/libvirt/_index.md | 2 +- .../v1/input-plugins/linux_cpu/_index.md | 2 +- .../input-plugins/linux_sysctl_fs/_index.md | 2 +- .../telegraf/v1/input-plugins/logql/_index.md | 5 +- .../v1/input-plugins/logstash/_index.md | 2 +- .../v1/input-plugins/lustre2/_index.md | 2 +- .../telegraf/v1/input-plugins/lvm/_index.md | 2 +- .../v1/input-plugins/mailchimp/_index.md | 2 +- .../v1/input-plugins/marklogic/_index.md | 2 +- .../v1/input-plugins/mavlink/_index.md | 2 +- .../v1/input-plugins/mcrouter/_index.md | 2 +- .../v1/input-plugins/mdstat/_index.md | 2 +- .../telegraf/v1/input-plugins/mem/_index.md | 2 +- .../v1/input-plugins/memcached/_index.md | 2 +- .../telegraf/v1/input-plugins/mesos/_index.md | 2 +- .../v1/input-plugins/minecraft/_index.md | 2 +- .../telegraf/v1/input-plugins/mock/_index.md | 2 +- .../v1/input-plugins/modbus/_index.md | 2 +- .../v1/input-plugins/mongodb/_index.md | 2 +- .../telegraf/v1/input-plugins/monit/_index.md | 2 +- .../v1/input-plugins/mqtt_consumer/_index.md | 8 +- .../v1/input-plugins/multifile/_index.md | 2 +- .../telegraf/v1/input-plugins/mysql/_index.md | 12 +- .../telegraf/v1/input-plugins/nats/_index.md | 2 +- .../v1/input-plugins/nats_consumer/_index.md | 2 +- .../v1/input-plugins/neoom_beaam/_index.md | 2 +- .../v1/input-plugins/neptune_apex/_index.md | 2 +- .../telegraf/v1/input-plugins/net/_index.md | 2 +- .../v1/input-plugins/net_response/_index.md | 2 +- .../v1/input-plugins/netflow/_index.md | 2 +- .../v1/input-plugins/netstat/_index.md | 2 +- .../v1/input-plugins/nfsclient/_index.md | 2 +- .../v1/input-plugins/nftables/_index.md | 44 ++++- .../telegraf/v1/input-plugins/nginx/_index.md | 2 +- .../v1/input-plugins/nginx_plus/_index.md | 2 +- .../v1/input-plugins/nginx_plus_api/_index.md | 2 +- .../v1/input-plugins/nginx_sts/_index.md | 2 +- .../nginx_upstream_check/_index.md | 2 +- .../v1/input-plugins/nginx_vts/_index.md | 2 +- .../telegraf/v1/input-plugins/nomad/_index.md | 2 +- .../telegraf/v1/input-plugins/nsd/_index.md | 2 +- .../telegraf/v1/input-plugins/nsdp/_index.md | 2 +- .../telegraf/v1/input-plugins/nsq/_index.md | 2 +- .../v1/input-plugins/nsq_consumer/_index.md | 2 +- .../telegraf/v1/input-plugins/nstat/_index.md | 2 +- .../telegraf/v1/input-plugins/ntpq/_index.md | 2 +- .../v1/input-plugins/nvidia_smi/_index.md | 2 +- .../telegraf/v1/input-plugins/opcua/_index.md | 46 ++++- .../v1/input-plugins/opcua_listener/_index.md | 66 +++++-- .../v1/input-plugins/openldap/_index.md | 2 +- .../v1/input-plugins/openntpd/_index.md | 2 +- .../input-plugins/opensearch_query/_index.md | 2 +- .../v1/input-plugins/opensmtpd/_index.md | 2 +- .../v1/input-plugins/openstack/_index.md | 2 +- .../v1/input-plugins/opentelemetry/_index.md | 2 +- .../v1/input-plugins/openweathermap/_index.md | 2 +- .../v1/input-plugins/p4runtime/_index.md | 2 +- .../v1/input-plugins/passenger/_index.md | 2 +- .../telegraf/v1/input-plugins/pf/_index.md | 2 +- .../v1/input-plugins/pgbouncer/_index.md | 2 +- .../v1/input-plugins/phpfpm/_index.md | 2 +- .../telegraf/v1/input-plugins/ping/_index.md | 2 +- .../v1/input-plugins/postfix/_index.md | 2 +- .../v1/input-plugins/postgresql/_index.md | 2 +- .../postgresql_extensible/_index.md | 2 +- .../v1/input-plugins/powerdns/_index.md | 2 +- .../input-plugins/powerdns_recursor/_index.md | 2 +- .../v1/input-plugins/processes/_index.md | 2 +- .../v1/input-plugins/procstat/_index.md | 2 +- .../v1/input-plugins/prometheus/_index.md | 2 +- .../v1/input-plugins/promql/_index.md | 5 +- .../v1/input-plugins/proxmox/_index.md | 2 +- .../v1/input-plugins/puppetagent/_index.md | 2 +- .../v1/input-plugins/rabbitmq/_index.md | 2 +- .../v1/input-plugins/radius/_index.md | 2 +- .../v1/input-plugins/raindrops/_index.md | 2 +- .../telegraf/v1/input-plugins/ras/_index.md | 2 +- .../v1/input-plugins/ravendb/_index.md | 2 +- .../v1/input-plugins/redfish/_index.md | 2 +- .../telegraf/v1/input-plugins/redis/_index.md | 2 +- .../v1/input-plugins/redis_sentinel/_index.md | 2 +- .../v1/input-plugins/rethinkdb/_index.md | 2 +- .../telegraf/v1/input-plugins/riak/_index.md | 2 +- .../input-plugins/riemann_listener/_index.md | 2 +- .../v1/input-plugins/s7comm/_index.md | 2 +- .../v1/input-plugins/salesforce/_index.md | 2 +- .../v1/input-plugins/sensors/_index.md | 2 +- .../telegraf/v1/input-plugins/sflow/_index.md | 2 +- .../telegraf/v1/input-plugins/sip/_index.md | 182 ++++++++++++++++++ .../telegraf/v1/input-plugins/slab/_index.md | 2 +- .../telegraf/v1/input-plugins/slurm/_index.md | 2 +- .../telegraf/v1/input-plugins/smart/_index.md | 15 +- .../v1/input-plugins/smartctl/_index.md | 2 +- .../telegraf/v1/input-plugins/snmp/_index.md | 2 +- .../v1/input-plugins/snmp_trap/_index.md | 2 +- .../input-plugins/socket_listener/_index.md | 2 +- .../v1/input-plugins/socketstat/_index.md | 2 +- .../telegraf/v1/input-plugins/solr/_index.md | 2 +- .../telegraf/v1/input-plugins/sql/_index.md | 2 +- .../v1/input-plugins/sqlserver/_index.md | 2 +- .../v1/input-plugins/stackdriver/_index.md | 2 +- .../v1/input-plugins/statsd/_index.md | 40 +++- .../v1/input-plugins/supervisor/_index.md | 2 +- .../v1/input-plugins/suricata/_index.md | 2 +- .../telegraf/v1/input-plugins/swap/_index.md | 2 +- .../v1/input-plugins/synproxy/_index.md | 2 +- .../v1/input-plugins/syslog/_index.md | 2 +- .../v1/input-plugins/sysstat/_index.md | 2 +- .../v1/input-plugins/system/_index.md | 2 +- .../v1/input-plugins/systemd_units/_index.md | 2 +- .../v1/input-plugins/tacacs/_index.md | 2 +- .../telegraf/v1/input-plugins/tail/_index.md | 2 +- .../v1/input-plugins/teamspeak/_index.md | 2 +- .../telegraf/v1/input-plugins/temp/_index.md | 2 +- .../v1/input-plugins/tengine/_index.md | 2 +- .../telegraf/v1/input-plugins/timex/_index.md | 2 +- .../v1/input-plugins/tomcat/_index.md | 2 +- .../telegraf/v1/input-plugins/trig/_index.md | 2 +- .../v1/input-plugins/turbostat/_index.md | 2 +- .../v1/input-plugins/twemproxy/_index.md | 2 +- .../v1/input-plugins/unbound/_index.md | 2 +- .../telegraf/v1/input-plugins/upsd/_index.md | 2 +- .../telegraf/v1/input-plugins/uwsgi/_index.md | 2 +- .../v1/input-plugins/varnish/_index.md | 2 +- .../telegraf/v1/input-plugins/vault/_index.md | 2 +- .../v1/input-plugins/vsphere/_index.md | 2 +- .../v1/input-plugins/webhooks/_index.md | 2 +- .../telegraf/v1/input-plugins/whois/_index.md | 2 +- .../v1/input-plugins/win_eventlog/_index.md | 2 +- .../input-plugins/win_perf_counters/_index.md | 2 +- .../v1/input-plugins/win_services/_index.md | 2 +- .../v1/input-plugins/win_wmi/_index.md | 2 +- .../v1/input-plugins/wireguard/_index.md | 2 +- .../v1/input-plugins/wireless/_index.md | 2 +- .../v1/input-plugins/x509_cert/_index.md | 2 +- .../v1/input-plugins/xtremio/_index.md | 2 +- .../telegraf/v1/input-plugins/zfs/_index.md | 2 +- .../v1/input-plugins/zipkin/_index.md | 2 +- .../v1/input-plugins/zookeeper/_index.md | 2 +- .../telegraf/v1/output-plugins/amon/_index.md | 2 +- .../telegraf/v1/output-plugins/amqp/_index.md | 2 +- .../application_insights/_index.md | 2 +- .../telegraf/v1/output-plugins/arc/_index.md | 2 +- .../azure_data_explorer/_index.md | 2 +- .../v1/output-plugins/azure_monitor/_index.md | 2 +- .../v1/output-plugins/bigquery/_index.md | 2 +- .../v1/output-plugins/clarify/_index.md | 2 +- .../v1/output-plugins/cloud_pubsub/_index.md | 2 +- .../v1/output-plugins/cloudwatch/_index.md | 2 +- .../output-plugins/cloudwatch_logs/_index.md | 2 +- .../v1/output-plugins/cratedb/_index.md | 2 +- .../v1/output-plugins/datadog/_index.md | 2 +- .../v1/output-plugins/discard/_index.md | 2 +- .../v1/output-plugins/dynatrace/_index.md | 2 +- .../v1/output-plugins/elasticsearch/_index.md | 2 +- .../v1/output-plugins/event_hubs/_index.md | 2 +- .../telegraf/v1/output-plugins/exec/_index.md | 2 +- .../v1/output-plugins/execd/_index.md | 2 +- .../telegraf/v1/output-plugins/file/_index.md | 2 +- .../v1/output-plugins/graphite/_index.md | 2 +- .../v1/output-plugins/graylog/_index.md | 2 +- .../v1/output-plugins/groundwork/_index.md | 2 +- .../v1/output-plugins/health/_index.md | 5 +- .../v1/output-plugins/heartbeat/_index.md | 175 ++++++++++++++++- .../telegraf/v1/output-plugins/http/_index.md | 5 +- .../v1/output-plugins/influxdb/_index.md | 2 +- .../v1/output-plugins/influxdb_v2/_index.md | 4 +- .../v1/output-plugins/influxdb_v3/_index.md | 146 ++++++++++++++ .../v1/output-plugins/inlong/_index.md | 2 +- .../v1/output-plugins/instrumental/_index.md | 2 +- .../v1/output-plugins/iotdb/_index.md | 2 +- .../v1/output-plugins/kafka/_index.md | 2 +- .../v1/output-plugins/kinesis/_index.md | 2 +- .../v1/output-plugins/librato/_index.md | 2 +- .../v1/output-plugins/logzio/_index.md | 2 +- .../telegraf/v1/output-plugins/loki/_index.md | 2 +- .../output-plugins/microsoft_fabric/_index.md | 2 +- .../v1/output-plugins/mongodb/_index.md | 62 +++--- .../telegraf/v1/output-plugins/mqtt/_index.md | 2 +- .../telegraf/v1/output-plugins/nats/_index.md | 2 +- .../nebius_cloud_monitoring/_index.md | 2 +- .../v1/output-plugins/newrelic/_index.md | 2 +- .../telegraf/v1/output-plugins/nsq/_index.md | 2 +- .../v1/output-plugins/opensearch/_index.md | 2 +- .../v1/output-plugins/opentelemetry/_index.md | 10 +- .../v1/output-plugins/opentsdb/_index.md | 2 +- .../v1/output-plugins/parquet/_index.md | 2 +- .../v1/output-plugins/postgresql/_index.md | 2 +- .../prometheus_client/_index.md | 8 +- .../telegraf/v1/output-plugins/quix/_index.md | 2 +- .../output-plugins/redistimeseries/_index.md | 9 +- .../v1/output-plugins/remotefile/_index.md | 2 +- .../v1/output-plugins/riemann/_index.md | 2 +- .../v1/output-plugins/sensu/_index.md | 2 +- .../v1/output-plugins/signalfx/_index.md | 2 +- .../v1/output-plugins/socket_writer/_index.md | 2 +- .../telegraf/v1/output-plugins/sql/_index.md | 2 +- .../v1/output-plugins/stackdriver/_index.md | 13 +- .../v1/output-plugins/stomp/_index.md | 2 +- .../v1/output-plugins/sumologic/_index.md | 2 +- .../v1/output-plugins/syslog/_index.md | 2 +- .../v1/output-plugins/timestream/_index.md | 2 +- .../v1/output-plugins/warp10/_index.md | 2 +- .../v1/output-plugins/wavefront/_index.md | 2 +- .../v1/output-plugins/websocket/_index.md | 2 +- .../yandex_cloud_monitoring/_index.md | 2 +- .../v1/output-plugins/zabbix/_index.md | 2 +- .../v1/processor-plugins/aws_ec2/_index.md | 2 +- .../v1/processor-plugins/batch/_index.md | 2 +- .../v1/processor-plugins/clone/_index.md | 2 +- .../v1/processor-plugins/converter/_index.md | 2 +- .../cumulative_sum/_index.md | 2 +- .../v1/processor-plugins/date/_index.md | 2 +- .../v1/processor-plugins/dedup/_index.md | 2 +- .../v1/processor-plugins/defaults/_index.md | 2 +- .../v1/processor-plugins/enum/_index.md | 2 +- .../v1/processor-plugins/execd/_index.md | 2 +- .../v1/processor-plugins/filepath/_index.md | 2 +- .../v1/processor-plugins/filter/_index.md | 2 +- .../v1/processor-plugins/ifname/_index.md | 2 +- .../v1/processor-plugins/lookup/_index.md | 2 +- .../v1/processor-plugins/noise/_index.md | 2 +- .../v1/processor-plugins/override/_index.md | 2 +- .../v1/processor-plugins/parser/_index.md | 2 +- .../v1/processor-plugins/pivot/_index.md | 2 +- .../v1/processor-plugins/port_name/_index.md | 2 +- .../v1/processor-plugins/printer/_index.md | 2 +- .../v1/processor-plugins/regex/_index.md | 2 +- .../v1/processor-plugins/rename/_index.md | 2 +- .../processor-plugins/reverse_dns/_index.md | 2 +- .../v1/processor-plugins/round/_index.md | 2 +- .../v1/processor-plugins/s2geo/_index.md | 2 +- .../v1/processor-plugins/scale/_index.md | 2 +- .../processor-plugins/snmp_lookup/_index.md | 2 +- .../v1/processor-plugins/split/_index.md | 2 +- .../v1/processor-plugins/starlark/_index.md | 2 +- .../v1/processor-plugins/strings/_index.md | 2 +- .../v1/processor-plugins/tag_limit/_index.md | 2 +- .../v1/processor-plugins/template/_index.md | 2 +- .../v1/processor-plugins/timestamp/_index.md | 2 +- .../v1/processor-plugins/topk/_index.md | 2 +- .../v1/processor-plugins/unpivot/_index.md | 2 +- content/telegraf/v1/release-notes.md | 75 ++++++++ data/products.yml | 4 +- data/telegraf_plugins.yml | 29 ++- 359 files changed, 1220 insertions(+), 426 deletions(-) create mode 100644 content/telegraf/v1/input-plugins/sip/_index.md create mode 100644 content/telegraf/v1/output-plugins/influxdb_v3/_index.md diff --git a/content/telegraf/v1/aggregator-plugins/basicstats/_index.md b/content/telegraf/v1/aggregator-plugins/basicstats/_index.md index da433ed00d..761316c005 100644 --- a/content/telegraf/v1/aggregator-plugins/basicstats/_index.md +++ b/content/telegraf/v1/aggregator-plugins/basicstats/_index.md @@ -10,7 +10,7 @@ introduced: "v1.5.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/aggregators/basicstats/README.md, Basic Statistics Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/aggregators/basicstats/README.md, Basic Statistics Plugin Source --- # Basic Statistics Aggregator Plugin diff --git a/content/telegraf/v1/aggregator-plugins/derivative/_index.md b/content/telegraf/v1/aggregator-plugins/derivative/_index.md index 17ae103695..ac8c7622c9 100644 --- a/content/telegraf/v1/aggregator-plugins/derivative/_index.md +++ b/content/telegraf/v1/aggregator-plugins/derivative/_index.md @@ -10,7 +10,7 @@ introduced: "v1.18.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/aggregators/derivative/README.md, Derivative Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/aggregators/derivative/README.md, Derivative Plugin Source --- # Derivative Aggregator Plugin diff --git a/content/telegraf/v1/aggregator-plugins/final/_index.md b/content/telegraf/v1/aggregator-plugins/final/_index.md index e0b5b692b9..50c2447772 100644 --- a/content/telegraf/v1/aggregator-plugins/final/_index.md +++ b/content/telegraf/v1/aggregator-plugins/final/_index.md @@ -10,7 +10,7 @@ introduced: "v1.11.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/aggregators/final/README.md, Final Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/aggregators/final/README.md, Final Plugin Source --- # Final Aggregator Plugin diff --git a/content/telegraf/v1/aggregator-plugins/histogram/_index.md b/content/telegraf/v1/aggregator-plugins/histogram/_index.md index 2506c59217..9e679991cf 100644 --- a/content/telegraf/v1/aggregator-plugins/histogram/_index.md +++ b/content/telegraf/v1/aggregator-plugins/histogram/_index.md @@ -10,7 +10,7 @@ introduced: "v1.4.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/aggregators/histogram/README.md, Histogram Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/aggregators/histogram/README.md, Histogram Plugin Source --- # Histogram Aggregator Plugin diff --git a/content/telegraf/v1/aggregator-plugins/merge/_index.md b/content/telegraf/v1/aggregator-plugins/merge/_index.md index 1d9d594c99..46d645c450 100644 --- a/content/telegraf/v1/aggregator-plugins/merge/_index.md +++ b/content/telegraf/v1/aggregator-plugins/merge/_index.md @@ -10,7 +10,7 @@ introduced: "v1.13.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/aggregators/merge/README.md, Merge Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/aggregators/merge/README.md, Merge Plugin Source --- # Merge Aggregator Plugin diff --git a/content/telegraf/v1/aggregator-plugins/minmax/_index.md b/content/telegraf/v1/aggregator-plugins/minmax/_index.md index da6a71a6f6..70c4bfd1bc 100644 --- a/content/telegraf/v1/aggregator-plugins/minmax/_index.md +++ b/content/telegraf/v1/aggregator-plugins/minmax/_index.md @@ -10,7 +10,7 @@ introduced: "v1.1.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/aggregators/minmax/README.md, Minimum-Maximum Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/aggregators/minmax/README.md, Minimum-Maximum Plugin Source --- # Minimum-Maximum Aggregator Plugin diff --git a/content/telegraf/v1/aggregator-plugins/quantile/_index.md b/content/telegraf/v1/aggregator-plugins/quantile/_index.md index 9b9776f5c8..b46d45cbb4 100644 --- a/content/telegraf/v1/aggregator-plugins/quantile/_index.md +++ b/content/telegraf/v1/aggregator-plugins/quantile/_index.md @@ -10,7 +10,7 @@ introduced: "v1.18.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/aggregators/quantile/README.md, Quantile Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/aggregators/quantile/README.md, Quantile Plugin Source --- # Quantile Aggregator Plugin diff --git a/content/telegraf/v1/aggregator-plugins/starlark/_index.md b/content/telegraf/v1/aggregator-plugins/starlark/_index.md index ab6eaf7d8b..98a47d6833 100644 --- a/content/telegraf/v1/aggregator-plugins/starlark/_index.md +++ b/content/telegraf/v1/aggregator-plugins/starlark/_index.md @@ -10,7 +10,7 @@ introduced: "v1.21.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/aggregators/starlark/README.md, Starlark Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/aggregators/starlark/README.md, Starlark Plugin Source --- # Starlark Aggregator Plugin diff --git a/content/telegraf/v1/aggregator-plugins/valuecounter/_index.md b/content/telegraf/v1/aggregator-plugins/valuecounter/_index.md index 1218ca7d1b..9877f44bb5 100644 --- a/content/telegraf/v1/aggregator-plugins/valuecounter/_index.md +++ b/content/telegraf/v1/aggregator-plugins/valuecounter/_index.md @@ -10,7 +10,7 @@ introduced: "v1.8.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/aggregators/valuecounter/README.md, Value Counter Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/aggregators/valuecounter/README.md, Value Counter Plugin Source --- # Value Counter Aggregator Plugin diff --git a/content/telegraf/v1/input-plugins/activemq/_index.md b/content/telegraf/v1/input-plugins/activemq/_index.md index 3498a4d237..eb3db060eb 100644 --- a/content/telegraf/v1/input-plugins/activemq/_index.md +++ b/content/telegraf/v1/input-plugins/activemq/_index.md @@ -10,7 +10,7 @@ introduced: "v1.8.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/activemq/README.md, ActiveMQ Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/activemq/README.md, ActiveMQ Plugin Source --- # ActiveMQ Input Plugin diff --git a/content/telegraf/v1/input-plugins/aerospike/_index.md b/content/telegraf/v1/input-plugins/aerospike/_index.md index da98a802fd..377fdcacb3 100644 --- a/content/telegraf/v1/input-plugins/aerospike/_index.md +++ b/content/telegraf/v1/input-plugins/aerospike/_index.md @@ -12,7 +12,7 @@ removal: v1.40.0 os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/aerospike/README.md, Aerospike Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/aerospike/README.md, Aerospike Plugin Source --- # Aerospike Input Plugin diff --git a/content/telegraf/v1/input-plugins/aliyuncms/_index.md b/content/telegraf/v1/input-plugins/aliyuncms/_index.md index d948a4a706..7d14303710 100644 --- a/content/telegraf/v1/input-plugins/aliyuncms/_index.md +++ b/content/telegraf/v1/input-plugins/aliyuncms/_index.md @@ -10,7 +10,7 @@ introduced: "v1.19.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/aliyuncms/README.md, Alibaba Cloud Monitor Service (Aliyun) Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/aliyuncms/README.md, Alibaba Cloud Monitor Service (Aliyun) Plugin Source --- # Alibaba Cloud Monitor Service (Aliyun) Input Plugin diff --git a/content/telegraf/v1/input-plugins/amd_rocm_smi/_index.md b/content/telegraf/v1/input-plugins/amd_rocm_smi/_index.md index c875d9e6f6..fa47fb0ff7 100644 --- a/content/telegraf/v1/input-plugins/amd_rocm_smi/_index.md +++ b/content/telegraf/v1/input-plugins/amd_rocm_smi/_index.md @@ -10,7 +10,7 @@ introduced: "v1.20.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/amd_rocm_smi/README.md, AMD ROCm System Management Interface (SMI) Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/amd_rocm_smi/README.md, AMD ROCm System Management Interface (SMI) Plugin Source --- # AMD ROCm System Management Interface (SMI) Input Plugin diff --git a/content/telegraf/v1/input-plugins/amqp_consumer/_index.md b/content/telegraf/v1/input-plugins/amqp_consumer/_index.md index 1a049529a1..cf55e8315f 100644 --- a/content/telegraf/v1/input-plugins/amqp_consumer/_index.md +++ b/content/telegraf/v1/input-plugins/amqp_consumer/_index.md @@ -10,7 +10,7 @@ introduced: "v1.3.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/amqp_consumer/README.md, AMQP Consumer Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/amqp_consumer/README.md, AMQP Consumer Plugin Source --- # AMQP Consumer Input Plugin diff --git a/content/telegraf/v1/input-plugins/apache/_index.md b/content/telegraf/v1/input-plugins/apache/_index.md index 16f9df9b2d..5352b30b87 100644 --- a/content/telegraf/v1/input-plugins/apache/_index.md +++ b/content/telegraf/v1/input-plugins/apache/_index.md @@ -10,7 +10,7 @@ introduced: "v1.8.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/apache/README.md, Apache Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/apache/README.md, Apache Plugin Source --- # Apache Input Plugin diff --git a/content/telegraf/v1/input-plugins/apcupsd/_index.md b/content/telegraf/v1/input-plugins/apcupsd/_index.md index 49eabfd48a..1f081d1938 100644 --- a/content/telegraf/v1/input-plugins/apcupsd/_index.md +++ b/content/telegraf/v1/input-plugins/apcupsd/_index.md @@ -10,7 +10,7 @@ introduced: "v1.12.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/apcupsd/README.md, APC UPSD Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/apcupsd/README.md, APC UPSD Plugin Source --- # APC UPSD Input Plugin diff --git a/content/telegraf/v1/input-plugins/aurora/_index.md b/content/telegraf/v1/input-plugins/aurora/_index.md index 2aa80d69db..d56c935f82 100644 --- a/content/telegraf/v1/input-plugins/aurora/_index.md +++ b/content/telegraf/v1/input-plugins/aurora/_index.md @@ -10,7 +10,7 @@ introduced: "v1.7.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/aurora/README.md, Apache Aurora Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/aurora/README.md, Apache Aurora Plugin Source --- # Apache Aurora Input Plugin diff --git a/content/telegraf/v1/input-plugins/azure_monitor/_index.md b/content/telegraf/v1/input-plugins/azure_monitor/_index.md index 525b66cdbd..47a1bbbfed 100644 --- a/content/telegraf/v1/input-plugins/azure_monitor/_index.md +++ b/content/telegraf/v1/input-plugins/azure_monitor/_index.md @@ -10,7 +10,7 @@ introduced: "v1.25.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/azure_monitor/README.md, Azure Monitor Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/azure_monitor/README.md, Azure Monitor Plugin Source --- # Azure Monitor Input Plugin diff --git a/content/telegraf/v1/input-plugins/azure_storage_queue/_index.md b/content/telegraf/v1/input-plugins/azure_storage_queue/_index.md index 2da4ddb3ce..0fdf846ca8 100644 --- a/content/telegraf/v1/input-plugins/azure_storage_queue/_index.md +++ b/content/telegraf/v1/input-plugins/azure_storage_queue/_index.md @@ -10,7 +10,7 @@ introduced: "v1.13.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/azure_storage_queue/README.md, Azure Queue Storage Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/azure_storage_queue/README.md, Azure Queue Storage Plugin Source --- # Azure Queue Storage Input Plugin diff --git a/content/telegraf/v1/input-plugins/bcache/_index.md b/content/telegraf/v1/input-plugins/bcache/_index.md index ae2006ed74..04d998e4e0 100644 --- a/content/telegraf/v1/input-plugins/bcache/_index.md +++ b/content/telegraf/v1/input-plugins/bcache/_index.md @@ -10,7 +10,7 @@ introduced: "v0.2.0" os_support: "linux" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/bcache/README.md, Bcache Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/bcache/README.md, Bcache Plugin Source --- # Bcache Input Plugin diff --git a/content/telegraf/v1/input-plugins/beanstalkd/_index.md b/content/telegraf/v1/input-plugins/beanstalkd/_index.md index adbcf02017..66aa0f22a3 100644 --- a/content/telegraf/v1/input-plugins/beanstalkd/_index.md +++ b/content/telegraf/v1/input-plugins/beanstalkd/_index.md @@ -10,7 +10,7 @@ introduced: "v1.8.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/beanstalkd/README.md, Beanstalkd Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/beanstalkd/README.md, Beanstalkd Plugin Source --- # Beanstalkd Input Plugin diff --git a/content/telegraf/v1/input-plugins/beat/_index.md b/content/telegraf/v1/input-plugins/beat/_index.md index 54594f5214..98d42d9ca6 100644 --- a/content/telegraf/v1/input-plugins/beat/_index.md +++ b/content/telegraf/v1/input-plugins/beat/_index.md @@ -10,7 +10,7 @@ introduced: "v1.18.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/beat/README.md, Beat Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/beat/README.md, Beat Plugin Source --- # Beat Input Plugin diff --git a/content/telegraf/v1/input-plugins/bind/_index.md b/content/telegraf/v1/input-plugins/bind/_index.md index 5a7e2ec2f1..8bb23bbd69 100644 --- a/content/telegraf/v1/input-plugins/bind/_index.md +++ b/content/telegraf/v1/input-plugins/bind/_index.md @@ -10,7 +10,7 @@ introduced: "v1.11.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/bind/README.md, BIND 9 Nameserver Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/bind/README.md, BIND 9 Nameserver Plugin Source --- # BIND 9 Nameserver Input Plugin diff --git a/content/telegraf/v1/input-plugins/bond/_index.md b/content/telegraf/v1/input-plugins/bond/_index.md index e97304b7a6..f4af68588b 100644 --- a/content/telegraf/v1/input-plugins/bond/_index.md +++ b/content/telegraf/v1/input-plugins/bond/_index.md @@ -10,7 +10,7 @@ introduced: "v1.5.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/bond/README.md, Bond Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/bond/README.md, Bond Plugin Source --- # Bond Input Plugin diff --git a/content/telegraf/v1/input-plugins/burrow/_index.md b/content/telegraf/v1/input-plugins/burrow/_index.md index 79c6fa9cdb..8cdf5715a4 100644 --- a/content/telegraf/v1/input-plugins/burrow/_index.md +++ b/content/telegraf/v1/input-plugins/burrow/_index.md @@ -10,7 +10,7 @@ introduced: "v1.7.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/burrow/README.md, Burrow Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/burrow/README.md, Burrow Plugin Source --- # Burrow Input Plugin diff --git a/content/telegraf/v1/input-plugins/ceph/_index.md b/content/telegraf/v1/input-plugins/ceph/_index.md index 7b0a4054f1..4e2457853f 100644 --- a/content/telegraf/v1/input-plugins/ceph/_index.md +++ b/content/telegraf/v1/input-plugins/ceph/_index.md @@ -10,7 +10,7 @@ introduced: "v0.13.1" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/ceph/README.md, Ceph Storage Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/ceph/README.md, Ceph Storage Plugin Source --- # Ceph Storage Input Plugin diff --git a/content/telegraf/v1/input-plugins/cgroup/_index.md b/content/telegraf/v1/input-plugins/cgroup/_index.md index cedbcf5191..e18fb7861a 100644 --- a/content/telegraf/v1/input-plugins/cgroup/_index.md +++ b/content/telegraf/v1/input-plugins/cgroup/_index.md @@ -10,7 +10,7 @@ introduced: "v1.0.0" os_support: "linux" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/cgroup/README.md, Control Group Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/cgroup/README.md, Control Group Plugin Source --- # Control Group Input Plugin diff --git a/content/telegraf/v1/input-plugins/chrony/_index.md b/content/telegraf/v1/input-plugins/chrony/_index.md index 47e700b850..dde05fefea 100644 --- a/content/telegraf/v1/input-plugins/chrony/_index.md +++ b/content/telegraf/v1/input-plugins/chrony/_index.md @@ -10,7 +10,7 @@ introduced: "v0.13.1" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/chrony/README.md, chrony Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/chrony/README.md, chrony Plugin Source --- # chrony Input Plugin diff --git a/content/telegraf/v1/input-plugins/cisco_telemetry_mdt/_index.md b/content/telegraf/v1/input-plugins/cisco_telemetry_mdt/_index.md index 9428d08979..dfde415719 100644 --- a/content/telegraf/v1/input-plugins/cisco_telemetry_mdt/_index.md +++ b/content/telegraf/v1/input-plugins/cisco_telemetry_mdt/_index.md @@ -10,7 +10,7 @@ introduced: "v1.11.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/cisco_telemetry_mdt/README.md, Cisco Model-Driven Telemetry (MDT) Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/cisco_telemetry_mdt/README.md, Cisco Model-Driven Telemetry (MDT) Plugin Source --- # Cisco Model-Driven Telemetry (MDT) Input Plugin diff --git a/content/telegraf/v1/input-plugins/clickhouse/_index.md b/content/telegraf/v1/input-plugins/clickhouse/_index.md index 78db381bb4..720a87aecf 100644 --- a/content/telegraf/v1/input-plugins/clickhouse/_index.md +++ b/content/telegraf/v1/input-plugins/clickhouse/_index.md @@ -10,7 +10,7 @@ introduced: "v1.14.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/clickhouse/README.md, ClickHouse Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/clickhouse/README.md, ClickHouse Plugin Source --- # ClickHouse Input Plugin diff --git a/content/telegraf/v1/input-plugins/cloud_pubsub/_index.md b/content/telegraf/v1/input-plugins/cloud_pubsub/_index.md index acc51346a8..85b9f8d224 100644 --- a/content/telegraf/v1/input-plugins/cloud_pubsub/_index.md +++ b/content/telegraf/v1/input-plugins/cloud_pubsub/_index.md @@ -10,7 +10,7 @@ introduced: "v1.10.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/cloud_pubsub/README.md, Google Cloud PubSub Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/cloud_pubsub/README.md, Google Cloud PubSub Plugin Source --- # Google Cloud PubSub Input Plugin diff --git a/content/telegraf/v1/input-plugins/cloud_pubsub_push/_index.md b/content/telegraf/v1/input-plugins/cloud_pubsub_push/_index.md index 29da06e3c6..cb8c8fdbec 100644 --- a/content/telegraf/v1/input-plugins/cloud_pubsub_push/_index.md +++ b/content/telegraf/v1/input-plugins/cloud_pubsub_push/_index.md @@ -10,7 +10,7 @@ introduced: "v1.10.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/cloud_pubsub_push/README.md, Google Cloud PubSub Push Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/cloud_pubsub_push/README.md, Google Cloud PubSub Push Plugin Source --- # Google Cloud PubSub Push Input Plugin diff --git a/content/telegraf/v1/input-plugins/cloudwatch/_index.md b/content/telegraf/v1/input-plugins/cloudwatch/_index.md index 47d54ab4eb..dbb1717367 100644 --- a/content/telegraf/v1/input-plugins/cloudwatch/_index.md +++ b/content/telegraf/v1/input-plugins/cloudwatch/_index.md @@ -10,7 +10,7 @@ introduced: "v0.12.1" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/cloudwatch/README.md, Amazon CloudWatch Statistics Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/cloudwatch/README.md, Amazon CloudWatch Statistics Plugin Source --- # Amazon CloudWatch Statistics Input Plugin diff --git a/content/telegraf/v1/input-plugins/cloudwatch_metric_streams/_index.md b/content/telegraf/v1/input-plugins/cloudwatch_metric_streams/_index.md index 975e1d740b..3bea448608 100644 --- a/content/telegraf/v1/input-plugins/cloudwatch_metric_streams/_index.md +++ b/content/telegraf/v1/input-plugins/cloudwatch_metric_streams/_index.md @@ -10,7 +10,7 @@ introduced: "v1.24.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/cloudwatch_metric_streams/README.md, Amazon CloudWatch Metric Streams Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/cloudwatch_metric_streams/README.md, Amazon CloudWatch Metric Streams Plugin Source --- # Amazon CloudWatch Metric Streams Input Plugin diff --git a/content/telegraf/v1/input-plugins/conntrack/_index.md b/content/telegraf/v1/input-plugins/conntrack/_index.md index fbf5ab027e..b2c9190633 100644 --- a/content/telegraf/v1/input-plugins/conntrack/_index.md +++ b/content/telegraf/v1/input-plugins/conntrack/_index.md @@ -10,7 +10,7 @@ introduced: "v1.0.0" os_support: "linux" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/conntrack/README.md, Netfilter Conntrack Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/conntrack/README.md, Netfilter Conntrack Plugin Source --- # Netfilter Conntrack Input Plugin diff --git a/content/telegraf/v1/input-plugins/consul/_index.md b/content/telegraf/v1/input-plugins/consul/_index.md index ab45b41fe2..c96d4a6a3e 100644 --- a/content/telegraf/v1/input-plugins/consul/_index.md +++ b/content/telegraf/v1/input-plugins/consul/_index.md @@ -10,7 +10,7 @@ introduced: "v1.0.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/consul/README.md, Hashicorp Consul Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/consul/README.md, Hashicorp Consul Plugin Source --- # Hashicorp Consul Input Plugin diff --git a/content/telegraf/v1/input-plugins/consul_agent/_index.md b/content/telegraf/v1/input-plugins/consul_agent/_index.md index 1364562cf4..916e398181 100644 --- a/content/telegraf/v1/input-plugins/consul_agent/_index.md +++ b/content/telegraf/v1/input-plugins/consul_agent/_index.md @@ -10,7 +10,7 @@ introduced: "v1.22.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/consul_agent/README.md, Hashicorp Consul Agent Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/consul_agent/README.md, Hashicorp Consul Agent Plugin Source --- # Hashicorp Consul Agent Input Plugin diff --git a/content/telegraf/v1/input-plugins/couchbase/_index.md b/content/telegraf/v1/input-plugins/couchbase/_index.md index 6ae00773c5..b6cccd27eb 100644 --- a/content/telegraf/v1/input-plugins/couchbase/_index.md +++ b/content/telegraf/v1/input-plugins/couchbase/_index.md @@ -10,7 +10,7 @@ introduced: "v0.12.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/couchbase/README.md, Couchbase Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/couchbase/README.md, Couchbase Plugin Source --- # Couchbase Input Plugin diff --git a/content/telegraf/v1/input-plugins/couchdb/_index.md b/content/telegraf/v1/input-plugins/couchdb/_index.md index 5a84c73b83..d5fe0e4b15 100644 --- a/content/telegraf/v1/input-plugins/couchdb/_index.md +++ b/content/telegraf/v1/input-plugins/couchdb/_index.md @@ -10,7 +10,7 @@ introduced: "v0.10.3" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/couchdb/README.md, Apache CouchDB Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/couchdb/README.md, Apache CouchDB Plugin Source --- # Apache CouchDB Input Plugin diff --git a/content/telegraf/v1/input-plugins/cpu/_index.md b/content/telegraf/v1/input-plugins/cpu/_index.md index 290d534302..9cfd0a595c 100644 --- a/content/telegraf/v1/input-plugins/cpu/_index.md +++ b/content/telegraf/v1/input-plugins/cpu/_index.md @@ -10,7 +10,7 @@ introduced: "v0.1.5" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/cpu/README.md, CPU Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/cpu/README.md, CPU Plugin Source --- # CPU Input Plugin diff --git a/content/telegraf/v1/input-plugins/csgo/_index.md b/content/telegraf/v1/input-plugins/csgo/_index.md index 4670882d3f..4adffe244e 100644 --- a/content/telegraf/v1/input-plugins/csgo/_index.md +++ b/content/telegraf/v1/input-plugins/csgo/_index.md @@ -10,7 +10,7 @@ introduced: "v1.18.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/csgo/README.md, Counter-Strike Global Offensive (CSGO) Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/csgo/README.md, Counter-Strike Global Offensive (CSGO) Plugin Source --- # Counter-Strike: Global Offensive (CSGO) Input Plugin diff --git a/content/telegraf/v1/input-plugins/ctrlx_datalayer/_index.md b/content/telegraf/v1/input-plugins/ctrlx_datalayer/_index.md index af43240e55..8cf6feff3f 100644 --- a/content/telegraf/v1/input-plugins/ctrlx_datalayer/_index.md +++ b/content/telegraf/v1/input-plugins/ctrlx_datalayer/_index.md @@ -10,7 +10,7 @@ introduced: "v1.27.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/ctrlx_datalayer/README.md, Bosch Rexroth ctrlX Data Layer Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/ctrlx_datalayer/README.md, Bosch Rexroth ctrlX Data Layer Plugin Source --- # Bosch Rexroth ctrlX Data Layer Input Plugin diff --git a/content/telegraf/v1/input-plugins/dcos/_index.md b/content/telegraf/v1/input-plugins/dcos/_index.md index 38f7dc0003..6340b5d0d2 100644 --- a/content/telegraf/v1/input-plugins/dcos/_index.md +++ b/content/telegraf/v1/input-plugins/dcos/_index.md @@ -10,7 +10,7 @@ introduced: "v1.5.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/dcos/README.md, Mesosphere Distributed Cloud OS Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/dcos/README.md, Mesosphere Distributed Cloud OS Plugin Source --- # Mesosphere Distributed Cloud OS Input Plugin diff --git a/content/telegraf/v1/input-plugins/directory_monitor/_index.md b/content/telegraf/v1/input-plugins/directory_monitor/_index.md index 83b1b80733..4a131d4a3e 100644 --- a/content/telegraf/v1/input-plugins/directory_monitor/_index.md +++ b/content/telegraf/v1/input-plugins/directory_monitor/_index.md @@ -10,7 +10,7 @@ introduced: "v1.18.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/directory_monitor/README.md, Directory Monitor Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/directory_monitor/README.md, Directory Monitor Plugin Source --- # Directory Monitor Input Plugin diff --git a/content/telegraf/v1/input-plugins/disk/_index.md b/content/telegraf/v1/input-plugins/disk/_index.md index 49e88c18d8..2f61daf2ff 100644 --- a/content/telegraf/v1/input-plugins/disk/_index.md +++ b/content/telegraf/v1/input-plugins/disk/_index.md @@ -10,7 +10,7 @@ introduced: "v0.1.1" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/disk/README.md, Disk Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/disk/README.md, Disk Plugin Source --- # Disk Input Plugin diff --git a/content/telegraf/v1/input-plugins/diskio/_index.md b/content/telegraf/v1/input-plugins/diskio/_index.md index b6e8e2da1a..7a3ba1888e 100644 --- a/content/telegraf/v1/input-plugins/diskio/_index.md +++ b/content/telegraf/v1/input-plugins/diskio/_index.md @@ -10,7 +10,7 @@ introduced: "v0.10.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/diskio/README.md, DiskIO Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/diskio/README.md, DiskIO Plugin Source --- # DiskIO Input Plugin diff --git a/content/telegraf/v1/input-plugins/disque/_index.md b/content/telegraf/v1/input-plugins/disque/_index.md index 6c9538d156..f42f629372 100644 --- a/content/telegraf/v1/input-plugins/disque/_index.md +++ b/content/telegraf/v1/input-plugins/disque/_index.md @@ -10,7 +10,7 @@ introduced: "v0.10.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/disque/README.md, Disque Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/disque/README.md, Disque Plugin Source --- # Disque Input Plugin diff --git a/content/telegraf/v1/input-plugins/dmcache/_index.md b/content/telegraf/v1/input-plugins/dmcache/_index.md index a845854408..3768a54cf5 100644 --- a/content/telegraf/v1/input-plugins/dmcache/_index.md +++ b/content/telegraf/v1/input-plugins/dmcache/_index.md @@ -10,7 +10,7 @@ introduced: "v1.3.0" os_support: "linux" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/dmcache/README.md, Device Mapper Cache Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/dmcache/README.md, Device Mapper Cache Plugin Source --- # Device Mapper Cache Input Plugin diff --git a/content/telegraf/v1/input-plugins/dns_query/_index.md b/content/telegraf/v1/input-plugins/dns_query/_index.md index 06d9603531..dba08d1a24 100644 --- a/content/telegraf/v1/input-plugins/dns_query/_index.md +++ b/content/telegraf/v1/input-plugins/dns_query/_index.md @@ -10,7 +10,7 @@ introduced: "v1.4.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/dns_query/README.md, DNS Query Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/dns_query/README.md, DNS Query Plugin Source --- # DNS Query Input Plugin diff --git a/content/telegraf/v1/input-plugins/docker/_index.md b/content/telegraf/v1/input-plugins/docker/_index.md index 2d5154f6c5..9493c7e137 100644 --- a/content/telegraf/v1/input-plugins/docker/_index.md +++ b/content/telegraf/v1/input-plugins/docker/_index.md @@ -10,7 +10,7 @@ introduced: "v0.1.9" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/docker/README.md, Docker Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/docker/README.md, Docker Plugin Source --- # Docker Input Plugin diff --git a/content/telegraf/v1/input-plugins/docker_log/_index.md b/content/telegraf/v1/input-plugins/docker_log/_index.md index 16f41026a1..155cddee71 100644 --- a/content/telegraf/v1/input-plugins/docker_log/_index.md +++ b/content/telegraf/v1/input-plugins/docker_log/_index.md @@ -10,7 +10,7 @@ introduced: "v1.12.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/docker_log/README.md, Docker Log Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/docker_log/README.md, Docker Log Plugin Source --- # Docker Log Input Plugin diff --git a/content/telegraf/v1/input-plugins/dovecot/_index.md b/content/telegraf/v1/input-plugins/dovecot/_index.md index cf90b6be9d..595f35fb38 100644 --- a/content/telegraf/v1/input-plugins/dovecot/_index.md +++ b/content/telegraf/v1/input-plugins/dovecot/_index.md @@ -10,7 +10,7 @@ introduced: "v0.10.3" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/dovecot/README.md, Dovecot Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/dovecot/README.md, Dovecot Plugin Source --- # Dovecot Input Plugin diff --git a/content/telegraf/v1/input-plugins/dpdk/_index.md b/content/telegraf/v1/input-plugins/dpdk/_index.md index 8eb6440e14..f23988ea45 100644 --- a/content/telegraf/v1/input-plugins/dpdk/_index.md +++ b/content/telegraf/v1/input-plugins/dpdk/_index.md @@ -10,7 +10,7 @@ introduced: "v1.19.0" os_support: "linux" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/dpdk/README.md, Data Plane Development Kit (DPDK) Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/dpdk/README.md, Data Plane Development Kit (DPDK) Plugin Source --- # Data Plane Development Kit (DPDK) Input Plugin diff --git a/content/telegraf/v1/input-plugins/ecs/_index.md b/content/telegraf/v1/input-plugins/ecs/_index.md index 750223b42d..b209580f72 100644 --- a/content/telegraf/v1/input-plugins/ecs/_index.md +++ b/content/telegraf/v1/input-plugins/ecs/_index.md @@ -10,7 +10,7 @@ introduced: "v1.11.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/ecs/README.md, Amazon Elastic Container Service Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/ecs/README.md, Amazon Elastic Container Service Plugin Source --- # Amazon Elastic Container Service Input Plugin diff --git a/content/telegraf/v1/input-plugins/elasticsearch/_index.md b/content/telegraf/v1/input-plugins/elasticsearch/_index.md index 43f0fc08b3..9720200e4d 100644 --- a/content/telegraf/v1/input-plugins/elasticsearch/_index.md +++ b/content/telegraf/v1/input-plugins/elasticsearch/_index.md @@ -10,7 +10,7 @@ introduced: "v0.1.5" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/elasticsearch/README.md, Elasticsearch Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/elasticsearch/README.md, Elasticsearch Plugin Source --- # Elasticsearch Input Plugin diff --git a/content/telegraf/v1/input-plugins/elasticsearch_query/_index.md b/content/telegraf/v1/input-plugins/elasticsearch_query/_index.md index aec2c44252..5a2958dbfc 100644 --- a/content/telegraf/v1/input-plugins/elasticsearch_query/_index.md +++ b/content/telegraf/v1/input-plugins/elasticsearch_query/_index.md @@ -10,7 +10,7 @@ introduced: "v1.20.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/elasticsearch_query/README.md, Elasticsearch Query Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/elasticsearch_query/README.md, Elasticsearch Query Plugin Source --- # Elasticsearch Query Input Plugin diff --git a/content/telegraf/v1/input-plugins/ethtool/_index.md b/content/telegraf/v1/input-plugins/ethtool/_index.md index 39c46f5035..876c5b753a 100644 --- a/content/telegraf/v1/input-plugins/ethtool/_index.md +++ b/content/telegraf/v1/input-plugins/ethtool/_index.md @@ -10,7 +10,7 @@ introduced: "v1.13.0" os_support: "linux" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/ethtool/README.md, Ethtool Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/ethtool/README.md, Ethtool Plugin Source --- # Ethtool Input Plugin diff --git a/content/telegraf/v1/input-plugins/eventhub_consumer/_index.md b/content/telegraf/v1/input-plugins/eventhub_consumer/_index.md index 249402bed8..95e073e6f7 100644 --- a/content/telegraf/v1/input-plugins/eventhub_consumer/_index.md +++ b/content/telegraf/v1/input-plugins/eventhub_consumer/_index.md @@ -10,7 +10,7 @@ introduced: "v1.14.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/eventhub_consumer/README.md, Azure Event Hub Consumer Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/eventhub_consumer/README.md, Azure Event Hub Consumer Plugin Source --- # Azure Event Hub Consumer Input Plugin diff --git a/content/telegraf/v1/input-plugins/exec/_index.md b/content/telegraf/v1/input-plugins/exec/_index.md index b584b9034f..c542d6d0b6 100644 --- a/content/telegraf/v1/input-plugins/exec/_index.md +++ b/content/telegraf/v1/input-plugins/exec/_index.md @@ -10,7 +10,7 @@ introduced: "v0.1.5" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/exec/README.md, Exec Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/exec/README.md, Exec Plugin Source --- # Exec Input Plugin diff --git a/content/telegraf/v1/input-plugins/execd/_index.md b/content/telegraf/v1/input-plugins/execd/_index.md index 2b24f995d9..3202d578c0 100644 --- a/content/telegraf/v1/input-plugins/execd/_index.md +++ b/content/telegraf/v1/input-plugins/execd/_index.md @@ -10,7 +10,7 @@ introduced: "v1.14.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/execd/README.md, Execd Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/execd/README.md, Execd Plugin Source --- # Execd Input Plugin diff --git a/content/telegraf/v1/input-plugins/fail2ban/_index.md b/content/telegraf/v1/input-plugins/fail2ban/_index.md index b7fa982410..7d355b0385 100644 --- a/content/telegraf/v1/input-plugins/fail2ban/_index.md +++ b/content/telegraf/v1/input-plugins/fail2ban/_index.md @@ -10,7 +10,7 @@ introduced: "v1.4.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/fail2ban/README.md, Fail2ban Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/fail2ban/README.md, Fail2ban Plugin Source --- # Fail2ban Input Plugin diff --git a/content/telegraf/v1/input-plugins/fibaro/_index.md b/content/telegraf/v1/input-plugins/fibaro/_index.md index 98f35fbff8..7e48c38010 100644 --- a/content/telegraf/v1/input-plugins/fibaro/_index.md +++ b/content/telegraf/v1/input-plugins/fibaro/_index.md @@ -10,7 +10,7 @@ introduced: "v1.7.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/fibaro/README.md, Fibaro Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/fibaro/README.md, Fibaro Plugin Source --- # Fibaro Input Plugin diff --git a/content/telegraf/v1/input-plugins/file/_index.md b/content/telegraf/v1/input-plugins/file/_index.md index 1900fe1508..570bf0c16a 100644 --- a/content/telegraf/v1/input-plugins/file/_index.md +++ b/content/telegraf/v1/input-plugins/file/_index.md @@ -10,7 +10,7 @@ introduced: "v1.8.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/file/README.md, File Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/file/README.md, File Plugin Source --- # File Input Plugin diff --git a/content/telegraf/v1/input-plugins/filecount/_index.md b/content/telegraf/v1/input-plugins/filecount/_index.md index d383604230..e3ab21c982 100644 --- a/content/telegraf/v1/input-plugins/filecount/_index.md +++ b/content/telegraf/v1/input-plugins/filecount/_index.md @@ -10,7 +10,7 @@ introduced: "v1.8.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/filecount/README.md, Filecount Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/filecount/README.md, Filecount Plugin Source --- # Filecount Input Plugin diff --git a/content/telegraf/v1/input-plugins/filestat/_index.md b/content/telegraf/v1/input-plugins/filestat/_index.md index 53e55d55f5..445959fd45 100644 --- a/content/telegraf/v1/input-plugins/filestat/_index.md +++ b/content/telegraf/v1/input-plugins/filestat/_index.md @@ -10,7 +10,7 @@ introduced: "v0.13.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/filestat/README.md, File statistics Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/filestat/README.md, File statistics Plugin Source --- # File statistics Input Plugin diff --git a/content/telegraf/v1/input-plugins/fireboard/_index.md b/content/telegraf/v1/input-plugins/fireboard/_index.md index ae0f0b3d48..324508952a 100644 --- a/content/telegraf/v1/input-plugins/fireboard/_index.md +++ b/content/telegraf/v1/input-plugins/fireboard/_index.md @@ -10,7 +10,7 @@ introduced: "v1.12.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/fireboard/README.md, Fireboard Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/fireboard/README.md, Fireboard Plugin Source --- # Fireboard Input Plugin diff --git a/content/telegraf/v1/input-plugins/firehose/_index.md b/content/telegraf/v1/input-plugins/firehose/_index.md index b8551bdc87..b7c7a20c4a 100644 --- a/content/telegraf/v1/input-plugins/firehose/_index.md +++ b/content/telegraf/v1/input-plugins/firehose/_index.md @@ -10,7 +10,7 @@ introduced: "v1.34.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/firehose/README.md, AWS Data Firehose Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/firehose/README.md, AWS Data Firehose Plugin Source --- # AWS Data Firehose Input Plugin diff --git a/content/telegraf/v1/input-plugins/fluentd/_index.md b/content/telegraf/v1/input-plugins/fluentd/_index.md index 1a27d3b32c..4d8b89e73c 100644 --- a/content/telegraf/v1/input-plugins/fluentd/_index.md +++ b/content/telegraf/v1/input-plugins/fluentd/_index.md @@ -10,7 +10,7 @@ introduced: "v1.4.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/fluentd/README.md, Fluentd Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/fluentd/README.md, Fluentd Plugin Source --- # Fluentd Input Plugin diff --git a/content/telegraf/v1/input-plugins/fritzbox/_index.md b/content/telegraf/v1/input-plugins/fritzbox/_index.md index 710ad6e380..0c55e106dd 100644 --- a/content/telegraf/v1/input-plugins/fritzbox/_index.md +++ b/content/telegraf/v1/input-plugins/fritzbox/_index.md @@ -10,7 +10,7 @@ introduced: "v1.35.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/fritzbox/README.md, Fritzbox Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/fritzbox/README.md, Fritzbox Plugin Source --- # Fritzbox Input Plugin diff --git a/content/telegraf/v1/input-plugins/github/_index.md b/content/telegraf/v1/input-plugins/github/_index.md index 68a54e067b..6f2e64af10 100644 --- a/content/telegraf/v1/input-plugins/github/_index.md +++ b/content/telegraf/v1/input-plugins/github/_index.md @@ -10,7 +10,7 @@ introduced: "v1.11.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/github/README.md, GitHub Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/github/README.md, GitHub Plugin Source --- # GitHub Input Plugin diff --git a/content/telegraf/v1/input-plugins/gnmi/_index.md b/content/telegraf/v1/input-plugins/gnmi/_index.md index f1ba5f1244..ff455b9d8f 100644 --- a/content/telegraf/v1/input-plugins/gnmi/_index.md +++ b/content/telegraf/v1/input-plugins/gnmi/_index.md @@ -10,7 +10,7 @@ introduced: "v1.15.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/gnmi/README.md, gNMI (gRPC Network Management Interface) Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/gnmi/README.md, gNMI (gRPC Network Management Interface) Plugin Source --- # gNMI (gRPC Network Management Interface) Input Plugin @@ -102,6 +102,9 @@ details on how to use them. ## Only receive updates for the state, also suppresses receiving the initial state # updates_only = false + ## Emit a metric for "delete" messages + # emit_delete_metrics = false + ## Enforces the namespace of the first element as origin for aliases and ## response paths, required for backward compatibility. ## NOTE: Set to 'false' if possible but be aware that this might change the path tag! diff --git a/content/telegraf/v1/input-plugins/google_cloud_storage/_index.md b/content/telegraf/v1/input-plugins/google_cloud_storage/_index.md index cabb075374..83c26d2fb5 100644 --- a/content/telegraf/v1/input-plugins/google_cloud_storage/_index.md +++ b/content/telegraf/v1/input-plugins/google_cloud_storage/_index.md @@ -10,7 +10,7 @@ introduced: "v1.25.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/google_cloud_storage/README.md, Google Cloud Storage Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/google_cloud_storage/README.md, Google Cloud Storage Plugin Source --- # Google Cloud Storage Input Plugin diff --git a/content/telegraf/v1/input-plugins/graylog/_index.md b/content/telegraf/v1/input-plugins/graylog/_index.md index c5fb9e72b3..2ce54eb3f4 100644 --- a/content/telegraf/v1/input-plugins/graylog/_index.md +++ b/content/telegraf/v1/input-plugins/graylog/_index.md @@ -10,7 +10,7 @@ introduced: "v1.0.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/graylog/README.md, GrayLog Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/graylog/README.md, GrayLog Plugin Source --- # GrayLog Input Plugin diff --git a/content/telegraf/v1/input-plugins/haproxy/_index.md b/content/telegraf/v1/input-plugins/haproxy/_index.md index 1a45a15a68..6a033ac835 100644 --- a/content/telegraf/v1/input-plugins/haproxy/_index.md +++ b/content/telegraf/v1/input-plugins/haproxy/_index.md @@ -10,7 +10,7 @@ introduced: "v0.1.5" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/haproxy/README.md, HAProxy Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/haproxy/README.md, HAProxy Plugin Source --- # HAProxy Input Plugin diff --git a/content/telegraf/v1/input-plugins/hddtemp/_index.md b/content/telegraf/v1/input-plugins/hddtemp/_index.md index 693d645824..a81942c1bc 100644 --- a/content/telegraf/v1/input-plugins/hddtemp/_index.md +++ b/content/telegraf/v1/input-plugins/hddtemp/_index.md @@ -10,7 +10,7 @@ introduced: "v1.0.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/hddtemp/README.md, HDDtemp Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/hddtemp/README.md, HDDtemp Plugin Source --- # HDDtemp Input Plugin diff --git a/content/telegraf/v1/input-plugins/http/_index.md b/content/telegraf/v1/input-plugins/http/_index.md index 11aed22063..f83815209e 100644 --- a/content/telegraf/v1/input-plugins/http/_index.md +++ b/content/telegraf/v1/input-plugins/http/_index.md @@ -10,7 +10,7 @@ introduced: "v1.6.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/http/README.md, HTTP Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/http/README.md, HTTP Plugin Source --- # HTTP Input Plugin @@ -83,6 +83,9 @@ to use them. # max_idle_conn_per_host = 0 # response_timeout = "0s" + ## Use the local address for connecting, assigned by the OS by default + # local_address = "" + ## Optional proxy settings # use_system_proxy = false # http_proxy_url = "" diff --git a/content/telegraf/v1/input-plugins/http_listener_v2/_index.md b/content/telegraf/v1/input-plugins/http_listener_v2/_index.md index 49f2eacb49..dac3a1ff70 100644 --- a/content/telegraf/v1/input-plugins/http_listener_v2/_index.md +++ b/content/telegraf/v1/input-plugins/http_listener_v2/_index.md @@ -10,7 +10,7 @@ introduced: "v1.9.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/http_listener_v2/README.md, HTTP Listener v2 Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/http_listener_v2/README.md, HTTP Listener v2 Plugin Source --- # HTTP Listener v2 Input Plugin diff --git a/content/telegraf/v1/input-plugins/http_response/_index.md b/content/telegraf/v1/input-plugins/http_response/_index.md index 5178a9a5e3..d1b1c14db9 100644 --- a/content/telegraf/v1/input-plugins/http_response/_index.md +++ b/content/telegraf/v1/input-plugins/http_response/_index.md @@ -10,7 +10,7 @@ introduced: "v0.12.1" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/http_response/README.md, HTTP Response Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/http_response/README.md, HTTP Response Plugin Source --- # HTTP Response Input Plugin diff --git a/content/telegraf/v1/input-plugins/huebridge/_index.md b/content/telegraf/v1/input-plugins/huebridge/_index.md index eccdcbfca4..a161383e2d 100644 --- a/content/telegraf/v1/input-plugins/huebridge/_index.md +++ b/content/telegraf/v1/input-plugins/huebridge/_index.md @@ -10,7 +10,7 @@ introduced: "v1.34.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/huebridge/README.md, HueBridge Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/huebridge/README.md, HueBridge Plugin Source --- # HueBridge Input Plugin diff --git a/content/telegraf/v1/input-plugins/hugepages/_index.md b/content/telegraf/v1/input-plugins/hugepages/_index.md index dbc738ce2b..9c5d184829 100644 --- a/content/telegraf/v1/input-plugins/hugepages/_index.md +++ b/content/telegraf/v1/input-plugins/hugepages/_index.md @@ -10,7 +10,7 @@ introduced: "v1.22.0" os_support: "linux" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/hugepages/README.md, Hugepages Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/hugepages/README.md, Hugepages Plugin Source --- # Hugepages Input Plugin diff --git a/content/telegraf/v1/input-plugins/icinga2/_index.md b/content/telegraf/v1/input-plugins/icinga2/_index.md index ccb5c70091..5731b4edb6 100644 --- a/content/telegraf/v1/input-plugins/icinga2/_index.md +++ b/content/telegraf/v1/input-plugins/icinga2/_index.md @@ -10,7 +10,7 @@ introduced: "v1.8.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/icinga2/README.md, Icinga2 Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/icinga2/README.md, Icinga2 Plugin Source --- # Icinga2 Input Plugin diff --git a/content/telegraf/v1/input-plugins/infiniband/_index.md b/content/telegraf/v1/input-plugins/infiniband/_index.md index 30801ba3db..e49548c6bd 100644 --- a/content/telegraf/v1/input-plugins/infiniband/_index.md +++ b/content/telegraf/v1/input-plugins/infiniband/_index.md @@ -10,7 +10,7 @@ introduced: "v1.14.0" os_support: "linux" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/infiniband/README.md, InfiniBand Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/infiniband/README.md, InfiniBand Plugin Source --- # InfiniBand Input Plugin diff --git a/content/telegraf/v1/input-plugins/influxdb/_index.md b/content/telegraf/v1/input-plugins/influxdb/_index.md index 083b0f6db8..7f4aea245c 100644 --- a/content/telegraf/v1/input-plugins/influxdb/_index.md +++ b/content/telegraf/v1/input-plugins/influxdb/_index.md @@ -10,7 +10,7 @@ introduced: "v0.2.5" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/influxdb/README.md, InfluxDB Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/influxdb/README.md, InfluxDB Plugin Source --- # InfluxDB Input Plugin diff --git a/content/telegraf/v1/input-plugins/influxdb_listener/_index.md b/content/telegraf/v1/input-plugins/influxdb_listener/_index.md index 4dad0abca2..7ee64afc35 100644 --- a/content/telegraf/v1/input-plugins/influxdb_listener/_index.md +++ b/content/telegraf/v1/input-plugins/influxdb_listener/_index.md @@ -10,7 +10,7 @@ introduced: "v1.9.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/influxdb_listener/README.md, InfluxDB Listener Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/influxdb_listener/README.md, InfluxDB Listener Plugin Source --- # InfluxDB Listener Input Plugin diff --git a/content/telegraf/v1/input-plugins/influxdb_v2_listener/_index.md b/content/telegraf/v1/input-plugins/influxdb_v2_listener/_index.md index dbd52f76cb..acf23499f8 100644 --- a/content/telegraf/v1/input-plugins/influxdb_v2_listener/_index.md +++ b/content/telegraf/v1/input-plugins/influxdb_v2_listener/_index.md @@ -10,7 +10,7 @@ introduced: "v1.16.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/influxdb_v2_listener/README.md, InfluxDB V2 Listener Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/influxdb_v2_listener/README.md, InfluxDB V2 Listener Plugin Source --- # InfluxDB V2 Listener Input Plugin diff --git a/content/telegraf/v1/input-plugins/intel_baseband/_index.md b/content/telegraf/v1/input-plugins/intel_baseband/_index.md index 85c5317681..bec0e91282 100644 --- a/content/telegraf/v1/input-plugins/intel_baseband/_index.md +++ b/content/telegraf/v1/input-plugins/intel_baseband/_index.md @@ -10,7 +10,7 @@ introduced: "v1.27.0" os_support: "linux" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/intel_baseband/README.md, Intel Baseband Accelerator Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/intel_baseband/README.md, Intel Baseband Accelerator Plugin Source --- # Intel Baseband Accelerator Input Plugin diff --git a/content/telegraf/v1/input-plugins/intel_dlb/_index.md b/content/telegraf/v1/input-plugins/intel_dlb/_index.md index 91f126c89c..b8413787ec 100644 --- a/content/telegraf/v1/input-plugins/intel_dlb/_index.md +++ b/content/telegraf/v1/input-plugins/intel_dlb/_index.md @@ -10,7 +10,7 @@ introduced: "v1.25.0" os_support: "linux" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/intel_dlb/README.md, Intel® Dynamic Load Balancer Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/intel_dlb/README.md, Intel® Dynamic Load Balancer Plugin Source --- # Intel® Dynamic Load Balancer Input Plugin diff --git a/content/telegraf/v1/input-plugins/intel_pmt/_index.md b/content/telegraf/v1/input-plugins/intel_pmt/_index.md index dfb76375c8..5ee77cf4f3 100644 --- a/content/telegraf/v1/input-plugins/intel_pmt/_index.md +++ b/content/telegraf/v1/input-plugins/intel_pmt/_index.md @@ -10,7 +10,7 @@ introduced: "v1.28.0" os_support: "linux" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/intel_pmt/README.md, Intel® Platform Monitoring Technology Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/intel_pmt/README.md, Intel® Platform Monitoring Technology Plugin Source --- # Intel® Platform Monitoring Technology Input Plugin diff --git a/content/telegraf/v1/input-plugins/intel_pmu/_index.md b/content/telegraf/v1/input-plugins/intel_pmu/_index.md index 9d8a39fefd..c52e3aacef 100644 --- a/content/telegraf/v1/input-plugins/intel_pmu/_index.md +++ b/content/telegraf/v1/input-plugins/intel_pmu/_index.md @@ -10,7 +10,7 @@ introduced: "v1.21.0" os_support: "linux" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/intel_pmu/README.md, Intel Performance Monitoring Unit Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/intel_pmu/README.md, Intel Performance Monitoring Unit Plugin Source --- # Intel Performance Monitoring Unit Plugin diff --git a/content/telegraf/v1/input-plugins/intel_powerstat/_index.md b/content/telegraf/v1/input-plugins/intel_powerstat/_index.md index 36d8a5f3b0..f1c45121bf 100644 --- a/content/telegraf/v1/input-plugins/intel_powerstat/_index.md +++ b/content/telegraf/v1/input-plugins/intel_powerstat/_index.md @@ -10,7 +10,7 @@ introduced: "v1.17.0" os_support: "linux" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/intel_powerstat/README.md, Intel PowerStat Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/intel_powerstat/README.md, Intel PowerStat Plugin Source --- # Intel PowerStat Input Plugin diff --git a/content/telegraf/v1/input-plugins/intel_rdt/_index.md b/content/telegraf/v1/input-plugins/intel_rdt/_index.md index 979618b315..c390395523 100644 --- a/content/telegraf/v1/input-plugins/intel_rdt/_index.md +++ b/content/telegraf/v1/input-plugins/intel_rdt/_index.md @@ -10,7 +10,7 @@ introduced: "v1.16.0" os_support: "freebsd, linux, macos" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/intel_rdt/README.md, Intel RDT Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/intel_rdt/README.md, Intel RDT Plugin Source --- # Intel RDT Input Plugin diff --git a/content/telegraf/v1/input-plugins/internal/_index.md b/content/telegraf/v1/input-plugins/internal/_index.md index 86a9048fa5..b60595fc28 100644 --- a/content/telegraf/v1/input-plugins/internal/_index.md +++ b/content/telegraf/v1/input-plugins/internal/_index.md @@ -10,7 +10,7 @@ introduced: "v1.2.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/internal/README.md, Telegraf Internal Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/internal/README.md, Telegraf Internal Plugin Source --- # Telegraf Internal Input Plugin diff --git a/content/telegraf/v1/input-plugins/internet_speed/_index.md b/content/telegraf/v1/input-plugins/internet_speed/_index.md index 34b13ba045..a22e8f7d8d 100644 --- a/content/telegraf/v1/input-plugins/internet_speed/_index.md +++ b/content/telegraf/v1/input-plugins/internet_speed/_index.md @@ -10,7 +10,7 @@ introduced: "v1.20.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/internet_speed/README.md, Internet Speed Monitor Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/internet_speed/README.md, Internet Speed Monitor Plugin Source --- # Internet Speed Monitor Input Plugin diff --git a/content/telegraf/v1/input-plugins/interrupts/_index.md b/content/telegraf/v1/input-plugins/interrupts/_index.md index a49e96b490..3b20515cb0 100644 --- a/content/telegraf/v1/input-plugins/interrupts/_index.md +++ b/content/telegraf/v1/input-plugins/interrupts/_index.md @@ -10,7 +10,7 @@ introduced: "v1.3.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/interrupts/README.md, Interrupts Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/interrupts/README.md, Interrupts Plugin Source --- # Interrupts Input Plugin diff --git a/content/telegraf/v1/input-plugins/ipmi_sensor/_index.md b/content/telegraf/v1/input-plugins/ipmi_sensor/_index.md index 916bacf3a2..efba924e51 100644 --- a/content/telegraf/v1/input-plugins/ipmi_sensor/_index.md +++ b/content/telegraf/v1/input-plugins/ipmi_sensor/_index.md @@ -10,7 +10,7 @@ introduced: "v0.12.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/ipmi_sensor/README.md, IPMI Sensor Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/ipmi_sensor/README.md, IPMI Sensor Plugin Source --- # IPMI Sensor Input Plugin diff --git a/content/telegraf/v1/input-plugins/ipset/_index.md b/content/telegraf/v1/input-plugins/ipset/_index.md index 526813c129..dc649f4df3 100644 --- a/content/telegraf/v1/input-plugins/ipset/_index.md +++ b/content/telegraf/v1/input-plugins/ipset/_index.md @@ -10,7 +10,7 @@ introduced: "v1.6.0" os_support: "linux" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/ipset/README.md, Ipset Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/ipset/README.md, Ipset Plugin Source --- # Ipset Input Plugin diff --git a/content/telegraf/v1/input-plugins/iptables/_index.md b/content/telegraf/v1/input-plugins/iptables/_index.md index 36efd03fa6..ebad367e7b 100644 --- a/content/telegraf/v1/input-plugins/iptables/_index.md +++ b/content/telegraf/v1/input-plugins/iptables/_index.md @@ -10,7 +10,7 @@ introduced: "v1.1.0" os_support: "linux" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/iptables/README.md, Iptables Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/iptables/README.md, Iptables Plugin Source --- # Iptables Input Plugin diff --git a/content/telegraf/v1/input-plugins/ipvs/_index.md b/content/telegraf/v1/input-plugins/ipvs/_index.md index 52809fee57..af3bcb998f 100644 --- a/content/telegraf/v1/input-plugins/ipvs/_index.md +++ b/content/telegraf/v1/input-plugins/ipvs/_index.md @@ -10,7 +10,7 @@ introduced: "v1.9.0" os_support: "linux" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/ipvs/README.md, IPVS Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/ipvs/README.md, IPVS Plugin Source --- # IPVS Input Plugin diff --git a/content/telegraf/v1/input-plugins/jenkins/_index.md b/content/telegraf/v1/input-plugins/jenkins/_index.md index 26ec6e26b6..18a8b1814f 100644 --- a/content/telegraf/v1/input-plugins/jenkins/_index.md +++ b/content/telegraf/v1/input-plugins/jenkins/_index.md @@ -10,7 +10,7 @@ introduced: "v1.9.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/jenkins/README.md, Jenkins Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/jenkins/README.md, Jenkins Plugin Source --- # Jenkins Input Plugin diff --git a/content/telegraf/v1/input-plugins/jolokia2_agent/_index.md b/content/telegraf/v1/input-plugins/jolokia2_agent/_index.md index 467d74043f..d9abc046d8 100644 --- a/content/telegraf/v1/input-plugins/jolokia2_agent/_index.md +++ b/content/telegraf/v1/input-plugins/jolokia2_agent/_index.md @@ -10,7 +10,7 @@ introduced: "v1.5.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/jolokia2_agent/README.md, Jolokia2 Agent Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/jolokia2_agent/README.md, Jolokia2 Agent Plugin Source --- # Jolokia2 Agent Input Plugin diff --git a/content/telegraf/v1/input-plugins/jolokia2_proxy/_index.md b/content/telegraf/v1/input-plugins/jolokia2_proxy/_index.md index 816f1e7617..f0b842deb7 100644 --- a/content/telegraf/v1/input-plugins/jolokia2_proxy/_index.md +++ b/content/telegraf/v1/input-plugins/jolokia2_proxy/_index.md @@ -10,7 +10,7 @@ introduced: "v1.5.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/jolokia2_proxy/README.md, Jolokia2 Proxy Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/jolokia2_proxy/README.md, Jolokia2 Proxy Plugin Source --- # Jolokia2 Proxy Input Plugin diff --git a/content/telegraf/v1/input-plugins/jti_openconfig_telemetry/_index.md b/content/telegraf/v1/input-plugins/jti_openconfig_telemetry/_index.md index 2a15f8462f..1a42ea3017 100644 --- a/content/telegraf/v1/input-plugins/jti_openconfig_telemetry/_index.md +++ b/content/telegraf/v1/input-plugins/jti_openconfig_telemetry/_index.md @@ -10,7 +10,7 @@ introduced: "v1.7.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/jti_openconfig_telemetry/README.md, Juniper Telemetry Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/jti_openconfig_telemetry/README.md, Juniper Telemetry Plugin Source --- # Juniper Telemetry Input Plugin diff --git a/content/telegraf/v1/input-plugins/kafka_consumer/_index.md b/content/telegraf/v1/input-plugins/kafka_consumer/_index.md index 344cc8c447..0ce0ddef7c 100644 --- a/content/telegraf/v1/input-plugins/kafka_consumer/_index.md +++ b/content/telegraf/v1/input-plugins/kafka_consumer/_index.md @@ -10,7 +10,7 @@ introduced: "v0.2.3" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/kafka_consumer/README.md, Apache Kafka Consumer Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/kafka_consumer/README.md, Apache Kafka Consumer Plugin Source --- # Apache Kafka Consumer Input Plugin diff --git a/content/telegraf/v1/input-plugins/kapacitor/_index.md b/content/telegraf/v1/input-plugins/kapacitor/_index.md index dd7e693895..96ec47145c 100644 --- a/content/telegraf/v1/input-plugins/kapacitor/_index.md +++ b/content/telegraf/v1/input-plugins/kapacitor/_index.md @@ -10,7 +10,7 @@ introduced: "v1.3.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/kapacitor/README.md, Kapacitor Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/kapacitor/README.md, Kapacitor Plugin Source --- # Kapacitor Input Plugin diff --git a/content/telegraf/v1/input-plugins/kernel/_index.md b/content/telegraf/v1/input-plugins/kernel/_index.md index 885f3b581d..99ae684782 100644 --- a/content/telegraf/v1/input-plugins/kernel/_index.md +++ b/content/telegraf/v1/input-plugins/kernel/_index.md @@ -10,7 +10,7 @@ introduced: "v0.11.0" os_support: "linux" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/kernel/README.md, Kernel Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/kernel/README.md, Kernel Plugin Source --- # Kernel Input Plugin diff --git a/content/telegraf/v1/input-plugins/kernel_vmstat/_index.md b/content/telegraf/v1/input-plugins/kernel_vmstat/_index.md index c60667001f..91c30e94b4 100644 --- a/content/telegraf/v1/input-plugins/kernel_vmstat/_index.md +++ b/content/telegraf/v1/input-plugins/kernel_vmstat/_index.md @@ -10,7 +10,7 @@ introduced: "v1.0.0" os_support: "linux" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/kernel_vmstat/README.md, Kernel VM Statistics Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/kernel_vmstat/README.md, Kernel VM Statistics Plugin Source --- # Kernel VM Statistics Input Plugin diff --git a/content/telegraf/v1/input-plugins/kibana/_index.md b/content/telegraf/v1/input-plugins/kibana/_index.md index 51b2370f7f..81aba85e64 100644 --- a/content/telegraf/v1/input-plugins/kibana/_index.md +++ b/content/telegraf/v1/input-plugins/kibana/_index.md @@ -10,7 +10,7 @@ introduced: "v1.8.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/kibana/README.md, Kibana Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/kibana/README.md, Kibana Plugin Source --- # Kibana Input Plugin diff --git a/content/telegraf/v1/input-plugins/kinesis_consumer/_index.md b/content/telegraf/v1/input-plugins/kinesis_consumer/_index.md index bcedef7581..fd5ae57afd 100644 --- a/content/telegraf/v1/input-plugins/kinesis_consumer/_index.md +++ b/content/telegraf/v1/input-plugins/kinesis_consumer/_index.md @@ -10,7 +10,7 @@ introduced: "v1.10.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/kinesis_consumer/README.md, Kinesis Consumer Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/kinesis_consumer/README.md, Kinesis Consumer Plugin Source --- # Kinesis Consumer Input Plugin diff --git a/content/telegraf/v1/input-plugins/knx_listener/_index.md b/content/telegraf/v1/input-plugins/knx_listener/_index.md index d570e22853..f8e3b64c1e 100644 --- a/content/telegraf/v1/input-plugins/knx_listener/_index.md +++ b/content/telegraf/v1/input-plugins/knx_listener/_index.md @@ -10,7 +10,7 @@ introduced: "v1.19.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/knx_listener/README.md, KNX Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/knx_listener/README.md, KNX Plugin Source --- # KNX Input Plugin diff --git a/content/telegraf/v1/input-plugins/kube_inventory/_index.md b/content/telegraf/v1/input-plugins/kube_inventory/_index.md index 509a3c0e5b..109a1f4a9d 100644 --- a/content/telegraf/v1/input-plugins/kube_inventory/_index.md +++ b/content/telegraf/v1/input-plugins/kube_inventory/_index.md @@ -10,7 +10,7 @@ introduced: "v1.10.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/kube_inventory/README.md, Kubernetes Inventory Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/kube_inventory/README.md, Kubernetes Inventory Plugin Source --- # Kubernetes Inventory Input Plugin diff --git a/content/telegraf/v1/input-plugins/kubernetes/_index.md b/content/telegraf/v1/input-plugins/kubernetes/_index.md index ab57f0864c..ca819f3cd4 100644 --- a/content/telegraf/v1/input-plugins/kubernetes/_index.md +++ b/content/telegraf/v1/input-plugins/kubernetes/_index.md @@ -10,7 +10,7 @@ introduced: "v1.1.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/kubernetes/README.md, Kubernetes Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/kubernetes/README.md, Kubernetes Plugin Source --- # Kubernetes Input Plugin diff --git a/content/telegraf/v1/input-plugins/lanz/_index.md b/content/telegraf/v1/input-plugins/lanz/_index.md index 4b36f65972..142a6d4ac4 100644 --- a/content/telegraf/v1/input-plugins/lanz/_index.md +++ b/content/telegraf/v1/input-plugins/lanz/_index.md @@ -10,7 +10,7 @@ introduced: "v1.14.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/lanz/README.md, Arista LANZ Consumer Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/lanz/README.md, Arista LANZ Consumer Plugin Source --- # Arista LANZ Consumer Input Plugin diff --git a/content/telegraf/v1/input-plugins/ldap/_index.md b/content/telegraf/v1/input-plugins/ldap/_index.md index 5ff26f8f4b..e21452871f 100644 --- a/content/telegraf/v1/input-plugins/ldap/_index.md +++ b/content/telegraf/v1/input-plugins/ldap/_index.md @@ -10,7 +10,7 @@ introduced: "v1.29.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/ldap/README.md, LDAP Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/ldap/README.md, LDAP Plugin Source --- # LDAP Input Plugin diff --git a/content/telegraf/v1/input-plugins/leofs/_index.md b/content/telegraf/v1/input-plugins/leofs/_index.md index 2755a1882e..79aa4366f1 100644 --- a/content/telegraf/v1/input-plugins/leofs/_index.md +++ b/content/telegraf/v1/input-plugins/leofs/_index.md @@ -10,7 +10,7 @@ introduced: "v0.1.5" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/leofs/README.md, LeoFS Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/leofs/README.md, LeoFS Plugin Source --- # LeoFS Input Plugin diff --git a/content/telegraf/v1/input-plugins/libvirt/_index.md b/content/telegraf/v1/input-plugins/libvirt/_index.md index 642cad6578..e751aced37 100644 --- a/content/telegraf/v1/input-plugins/libvirt/_index.md +++ b/content/telegraf/v1/input-plugins/libvirt/_index.md @@ -10,7 +10,7 @@ introduced: "v1.25.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/libvirt/README.md, Libvirt Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/libvirt/README.md, Libvirt Plugin Source --- # Libvirt Input Plugin diff --git a/content/telegraf/v1/input-plugins/linux_cpu/_index.md b/content/telegraf/v1/input-plugins/linux_cpu/_index.md index ce032c4bbd..74a2d13f56 100644 --- a/content/telegraf/v1/input-plugins/linux_cpu/_index.md +++ b/content/telegraf/v1/input-plugins/linux_cpu/_index.md @@ -10,7 +10,7 @@ introduced: "v1.24.0" os_support: "linux" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/linux_cpu/README.md, Linux CPU Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/linux_cpu/README.md, Linux CPU Plugin Source --- # Linux CPU Input Plugin diff --git a/content/telegraf/v1/input-plugins/linux_sysctl_fs/_index.md b/content/telegraf/v1/input-plugins/linux_sysctl_fs/_index.md index 3caf08b81f..01c498cce7 100644 --- a/content/telegraf/v1/input-plugins/linux_sysctl_fs/_index.md +++ b/content/telegraf/v1/input-plugins/linux_sysctl_fs/_index.md @@ -10,7 +10,7 @@ introduced: "v1.24.0" os_support: "linux" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/linux_sysctl_fs/README.md, Linux Sysctl Filesystem Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/linux_sysctl_fs/README.md, Linux Sysctl Filesystem Plugin Source --- # Linux Sysctl Filesystem Input Plugin diff --git a/content/telegraf/v1/input-plugins/logql/_index.md b/content/telegraf/v1/input-plugins/logql/_index.md index f75b79c5fe..d612a27a91 100644 --- a/content/telegraf/v1/input-plugins/logql/_index.md +++ b/content/telegraf/v1/input-plugins/logql/_index.md @@ -10,7 +10,7 @@ introduced: "v1.37.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/logql/README.md, LogQL Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/logql/README.md, LogQL Plugin Source --- # LogQL Input Plugin @@ -69,6 +69,9 @@ more details on how to use them. # max_idle_conn_per_host = 0 # response_timeout = "0s" + ## Use the local address for connecting, assigned by the OS by default + # local_address = "" + ## Optional proxy settings # use_system_proxy = false # http_proxy_url = "" diff --git a/content/telegraf/v1/input-plugins/logstash/_index.md b/content/telegraf/v1/input-plugins/logstash/_index.md index 9833ac5f8b..c9340e7689 100644 --- a/content/telegraf/v1/input-plugins/logstash/_index.md +++ b/content/telegraf/v1/input-plugins/logstash/_index.md @@ -10,7 +10,7 @@ introduced: "v1.12.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/logstash/README.md, Logstash Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/logstash/README.md, Logstash Plugin Source --- # Logstash Input Plugin diff --git a/content/telegraf/v1/input-plugins/lustre2/_index.md b/content/telegraf/v1/input-plugins/lustre2/_index.md index e33ff9b63d..718ffa91db 100644 --- a/content/telegraf/v1/input-plugins/lustre2/_index.md +++ b/content/telegraf/v1/input-plugins/lustre2/_index.md @@ -10,7 +10,7 @@ introduced: "v0.1.5" os_support: "linux" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/lustre2/README.md, Lustre Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/lustre2/README.md, Lustre Plugin Source --- # Lustre Input Plugin diff --git a/content/telegraf/v1/input-plugins/lvm/_index.md b/content/telegraf/v1/input-plugins/lvm/_index.md index 878fbd368c..8cab33dd5a 100644 --- a/content/telegraf/v1/input-plugins/lvm/_index.md +++ b/content/telegraf/v1/input-plugins/lvm/_index.md @@ -10,7 +10,7 @@ introduced: "v1.21.0" os_support: "linux" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/lvm/README.md, Logical Volume Manager Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/lvm/README.md, Logical Volume Manager Plugin Source --- # Logical Volume Manager Input Plugin diff --git a/content/telegraf/v1/input-plugins/mailchimp/_index.md b/content/telegraf/v1/input-plugins/mailchimp/_index.md index 41b2884dd8..6fcee34322 100644 --- a/content/telegraf/v1/input-plugins/mailchimp/_index.md +++ b/content/telegraf/v1/input-plugins/mailchimp/_index.md @@ -10,7 +10,7 @@ introduced: "v0.2.4" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/mailchimp/README.md, Mailchimp Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/mailchimp/README.md, Mailchimp Plugin Source --- # Mailchimp Input Plugin diff --git a/content/telegraf/v1/input-plugins/marklogic/_index.md b/content/telegraf/v1/input-plugins/marklogic/_index.md index 3d6a548381..96cf441bc9 100644 --- a/content/telegraf/v1/input-plugins/marklogic/_index.md +++ b/content/telegraf/v1/input-plugins/marklogic/_index.md @@ -10,7 +10,7 @@ introduced: "v1.12.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/marklogic/README.md, MarkLogic Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/marklogic/README.md, MarkLogic Plugin Source --- # MarkLogic Input Plugin diff --git a/content/telegraf/v1/input-plugins/mavlink/_index.md b/content/telegraf/v1/input-plugins/mavlink/_index.md index e196a3125a..19a4f24b8e 100644 --- a/content/telegraf/v1/input-plugins/mavlink/_index.md +++ b/content/telegraf/v1/input-plugins/mavlink/_index.md @@ -10,7 +10,7 @@ introduced: "v1.35.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/mavlink/README.md, MavLink Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/mavlink/README.md, MavLink Plugin Source --- # MavLink Input Plugin diff --git a/content/telegraf/v1/input-plugins/mcrouter/_index.md b/content/telegraf/v1/input-plugins/mcrouter/_index.md index f99c44faaf..6f68324985 100644 --- a/content/telegraf/v1/input-plugins/mcrouter/_index.md +++ b/content/telegraf/v1/input-plugins/mcrouter/_index.md @@ -10,7 +10,7 @@ introduced: "v1.7.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/mcrouter/README.md, Mcrouter Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/mcrouter/README.md, Mcrouter Plugin Source --- # Mcrouter Input Plugin diff --git a/content/telegraf/v1/input-plugins/mdstat/_index.md b/content/telegraf/v1/input-plugins/mdstat/_index.md index 885960c940..6317b5d78b 100644 --- a/content/telegraf/v1/input-plugins/mdstat/_index.md +++ b/content/telegraf/v1/input-plugins/mdstat/_index.md @@ -10,7 +10,7 @@ introduced: "v1.20.0" os_support: "linux" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/mdstat/README.md, MD RAID Statistics Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/mdstat/README.md, MD RAID Statistics Plugin Source --- # MD RAID Statistics Input Plugin diff --git a/content/telegraf/v1/input-plugins/mem/_index.md b/content/telegraf/v1/input-plugins/mem/_index.md index 5b457d5f0f..d8cc65783a 100644 --- a/content/telegraf/v1/input-plugins/mem/_index.md +++ b/content/telegraf/v1/input-plugins/mem/_index.md @@ -10,7 +10,7 @@ introduced: "v0.1.5" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/mem/README.md, Memory Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/mem/README.md, Memory Plugin Source --- # Memory Input Plugin diff --git a/content/telegraf/v1/input-plugins/memcached/_index.md b/content/telegraf/v1/input-plugins/memcached/_index.md index 15d6ca9508..1653714fb3 100644 --- a/content/telegraf/v1/input-plugins/memcached/_index.md +++ b/content/telegraf/v1/input-plugins/memcached/_index.md @@ -10,7 +10,7 @@ introduced: "v0.1.2" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/memcached/README.md, Memcached Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/memcached/README.md, Memcached Plugin Source --- # Memcached Input Plugin diff --git a/content/telegraf/v1/input-plugins/mesos/_index.md b/content/telegraf/v1/input-plugins/mesos/_index.md index 270ffa2881..f1b2c504d0 100644 --- a/content/telegraf/v1/input-plugins/mesos/_index.md +++ b/content/telegraf/v1/input-plugins/mesos/_index.md @@ -10,7 +10,7 @@ introduced: "v0.10.3" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/mesos/README.md, Apache Mesos Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/mesos/README.md, Apache Mesos Plugin Source --- # Apache Mesos Input Plugin diff --git a/content/telegraf/v1/input-plugins/minecraft/_index.md b/content/telegraf/v1/input-plugins/minecraft/_index.md index 412c8aaa44..3909fa3618 100644 --- a/content/telegraf/v1/input-plugins/minecraft/_index.md +++ b/content/telegraf/v1/input-plugins/minecraft/_index.md @@ -10,7 +10,7 @@ introduced: "v1.4.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/minecraft/README.md, Minecraft Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/minecraft/README.md, Minecraft Plugin Source --- # Minecraft Input Plugin diff --git a/content/telegraf/v1/input-plugins/mock/_index.md b/content/telegraf/v1/input-plugins/mock/_index.md index 54399ab981..97d7c726ab 100644 --- a/content/telegraf/v1/input-plugins/mock/_index.md +++ b/content/telegraf/v1/input-plugins/mock/_index.md @@ -10,7 +10,7 @@ introduced: "v1.22.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/mock/README.md, Mock Data Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/mock/README.md, Mock Data Plugin Source --- # Mock Data Input Plugin diff --git a/content/telegraf/v1/input-plugins/modbus/_index.md b/content/telegraf/v1/input-plugins/modbus/_index.md index 82f6191c17..02291a3509 100644 --- a/content/telegraf/v1/input-plugins/modbus/_index.md +++ b/content/telegraf/v1/input-plugins/modbus/_index.md @@ -10,7 +10,7 @@ introduced: "v1.14.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/modbus/README.md, Modbus Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/modbus/README.md, Modbus Plugin Source --- diff --git a/content/telegraf/v1/input-plugins/mongodb/_index.md b/content/telegraf/v1/input-plugins/mongodb/_index.md index 19c83f65f9..6083e33c54 100644 --- a/content/telegraf/v1/input-plugins/mongodb/_index.md +++ b/content/telegraf/v1/input-plugins/mongodb/_index.md @@ -10,7 +10,7 @@ introduced: "v0.1.5" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/mongodb/README.md, MongoDB Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/mongodb/README.md, MongoDB Plugin Source --- # MongoDB Input Plugin diff --git a/content/telegraf/v1/input-plugins/monit/_index.md b/content/telegraf/v1/input-plugins/monit/_index.md index 49be217a66..7b24ecc47f 100644 --- a/content/telegraf/v1/input-plugins/monit/_index.md +++ b/content/telegraf/v1/input-plugins/monit/_index.md @@ -10,7 +10,7 @@ introduced: "v1.14.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/monit/README.md, Monit Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/monit/README.md, Monit Plugin Source --- # Monit Input Plugin diff --git a/content/telegraf/v1/input-plugins/mqtt_consumer/_index.md b/content/telegraf/v1/input-plugins/mqtt_consumer/_index.md index cccb517f3f..310bde179b 100644 --- a/content/telegraf/v1/input-plugins/mqtt_consumer/_index.md +++ b/content/telegraf/v1/input-plugins/mqtt_consumer/_index.md @@ -10,7 +10,7 @@ introduced: "v0.10.3" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/mqtt_consumer/README.md, MQTT Consumer Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/mqtt_consumer/README.md, MQTT Consumer Plugin Source --- # MQTT Consumer Input Plugin @@ -105,6 +105,12 @@ to use them. ## Connection timeout for initial connection in seconds # connection_timeout = "30s" + ## Maximum interval between reconnection attempts after a connection loss. + ## The MQTT library uses exponential backoff starting at 1 second up to this + ## ceiling. The library default is 10 minutes, which can cause long delays + ## before message flow resumes after a network outage. + # max_reconnect_interval = "30s" + ## Interval and ping timeout for keep-alive messages ## The sum of those options defines when a connection loss is detected. ## Note: The keep-alive interval needs to be greater or equal one second and diff --git a/content/telegraf/v1/input-plugins/multifile/_index.md b/content/telegraf/v1/input-plugins/multifile/_index.md index 0d84a53615..fd3581f29e 100644 --- a/content/telegraf/v1/input-plugins/multifile/_index.md +++ b/content/telegraf/v1/input-plugins/multifile/_index.md @@ -10,7 +10,7 @@ introduced: "v1.10.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/multifile/README.md, Multifile Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/multifile/README.md, Multifile Plugin Source --- # Multifile Input Plugin diff --git a/content/telegraf/v1/input-plugins/mysql/_index.md b/content/telegraf/v1/input-plugins/mysql/_index.md index 5ffac05059..3b35d5f10e 100644 --- a/content/telegraf/v1/input-plugins/mysql/_index.md +++ b/content/telegraf/v1/input-plugins/mysql/_index.md @@ -10,7 +10,7 @@ introduced: "v0.1.1" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/mysql/README.md, MySQL Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/mysql/README.md, MySQL Plugin Source --- # MySQL Input Plugin @@ -262,7 +262,17 @@ measurement name. ## Metrics * Global statuses - all numeric and boolean values of `SHOW GLOBAL STATUSES` + * wsrep_evs_repl_latency - a complex field containing multiple values is split + into separate fields + * wsrep_evs_repl_latency_min(float, seconds) + * wsrep_evs_repl_latency_avg(float, seconds) + * wsrep_evs_repl_latency_max(float, seconds) + * wsrep_evs_repl_latency_stdev(float, seconds) + * wsrep_evs_repl_latency_sample_size(float, number) * Global variables - all numeric and boolean values of `SHOW GLOBAL VARIABLES` + * wsrep_provider_options - a complex field containing multiple values is split + into separate fields + * gcache_size(int, bytes) * Slave status - metrics from `SHOW SLAVE STATUS` the metrics are gathered when the single-source replication is on. If the multi-source replication is set, then everything works differently, this metric does not work with multi-source diff --git a/content/telegraf/v1/input-plugins/nats/_index.md b/content/telegraf/v1/input-plugins/nats/_index.md index 9732b8caa1..5bb2106532 100644 --- a/content/telegraf/v1/input-plugins/nats/_index.md +++ b/content/telegraf/v1/input-plugins/nats/_index.md @@ -10,7 +10,7 @@ introduced: "v1.6.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/nats/README.md, NATS Server Monitoring Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/nats/README.md, NATS Server Monitoring Plugin Source --- # NATS Server Monitoring Input Plugin diff --git a/content/telegraf/v1/input-plugins/nats_consumer/_index.md b/content/telegraf/v1/input-plugins/nats_consumer/_index.md index 400c43a54d..f3fc6c6800 100644 --- a/content/telegraf/v1/input-plugins/nats_consumer/_index.md +++ b/content/telegraf/v1/input-plugins/nats_consumer/_index.md @@ -10,7 +10,7 @@ introduced: "v0.10.3" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/nats_consumer/README.md, NATS Consumer Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/nats_consumer/README.md, NATS Consumer Plugin Source --- # NATS Consumer Input Plugin diff --git a/content/telegraf/v1/input-plugins/neoom_beaam/_index.md b/content/telegraf/v1/input-plugins/neoom_beaam/_index.md index 6a817c629e..601fe0054a 100644 --- a/content/telegraf/v1/input-plugins/neoom_beaam/_index.md +++ b/content/telegraf/v1/input-plugins/neoom_beaam/_index.md @@ -10,7 +10,7 @@ introduced: "v1.33.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/neoom_beaam/README.md, Neoom Beaam Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/neoom_beaam/README.md, Neoom Beaam Plugin Source --- # Neoom Beaam Input Plugin diff --git a/content/telegraf/v1/input-plugins/neptune_apex/_index.md b/content/telegraf/v1/input-plugins/neptune_apex/_index.md index 0ad9cb2da4..9362837fcf 100644 --- a/content/telegraf/v1/input-plugins/neptune_apex/_index.md +++ b/content/telegraf/v1/input-plugins/neptune_apex/_index.md @@ -10,7 +10,7 @@ introduced: "v1.10.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/neptune_apex/README.md, Neptune Apex Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/neptune_apex/README.md, Neptune Apex Plugin Source --- # Neptune Apex Input Plugin diff --git a/content/telegraf/v1/input-plugins/net/_index.md b/content/telegraf/v1/input-plugins/net/_index.md index 0292ac5f21..22490b1854 100644 --- a/content/telegraf/v1/input-plugins/net/_index.md +++ b/content/telegraf/v1/input-plugins/net/_index.md @@ -10,7 +10,7 @@ introduced: "v0.1.1" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/net/README.md, Network Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/net/README.md, Network Plugin Source --- # Network Input Plugin diff --git a/content/telegraf/v1/input-plugins/net_response/_index.md b/content/telegraf/v1/input-plugins/net_response/_index.md index c0cae121a8..29bdaf2941 100644 --- a/content/telegraf/v1/input-plugins/net_response/_index.md +++ b/content/telegraf/v1/input-plugins/net_response/_index.md @@ -10,7 +10,7 @@ introduced: "v0.10.3" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/net_response/README.md, Network Response Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/net_response/README.md, Network Response Plugin Source --- # Network Response Input Plugin diff --git a/content/telegraf/v1/input-plugins/netflow/_index.md b/content/telegraf/v1/input-plugins/netflow/_index.md index c116ace4b0..95ff91fd48 100644 --- a/content/telegraf/v1/input-plugins/netflow/_index.md +++ b/content/telegraf/v1/input-plugins/netflow/_index.md @@ -10,7 +10,7 @@ introduced: "v1.25.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/netflow/README.md, Netflow Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/netflow/README.md, Netflow Plugin Source --- # Netflow Input Plugin diff --git a/content/telegraf/v1/input-plugins/netstat/_index.md b/content/telegraf/v1/input-plugins/netstat/_index.md index 982e2aa6ed..6a338a4cd0 100644 --- a/content/telegraf/v1/input-plugins/netstat/_index.md +++ b/content/telegraf/v1/input-plugins/netstat/_index.md @@ -10,7 +10,7 @@ introduced: "v0.2.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/netstat/README.md, Network Connection Statistics Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/netstat/README.md, Network Connection Statistics Plugin Source --- # Network Connection Statistics Input Plugin diff --git a/content/telegraf/v1/input-plugins/nfsclient/_index.md b/content/telegraf/v1/input-plugins/nfsclient/_index.md index a55540f63e..c2881bfa33 100644 --- a/content/telegraf/v1/input-plugins/nfsclient/_index.md +++ b/content/telegraf/v1/input-plugins/nfsclient/_index.md @@ -10,7 +10,7 @@ introduced: "v1.18.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/nfsclient/README.md, Network Filesystem Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/nfsclient/README.md, Network Filesystem Plugin Source --- # Network Filesystem Input Plugin diff --git a/content/telegraf/v1/input-plugins/nftables/_index.md b/content/telegraf/v1/input-plugins/nftables/_index.md index 4c44121ee6..aa8cb9a4e7 100644 --- a/content/telegraf/v1/input-plugins/nftables/_index.md +++ b/content/telegraf/v1/input-plugins/nftables/_index.md @@ -10,17 +10,13 @@ introduced: "v1.37.0" os_support: "linux" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/nftables/README.md, Nftables Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/nftables/README.md, Nftables Plugin Source --- # Nftables Plugin This plugin gathers packets and bytes counters for rules within -Linux's [nftables](https://wiki.nftables.org/wiki-nftables/index.php/Main_Page) firewall. - -> [!IMPORTANT] -> Rules are identified by the associated comment so those **comments have to be unique**! -> Rules without comment are ignored. +Linux's [nftables](https://wiki.nftables.org/wiki-nftables/index.php/Main_Page) firewall, as well as set element counts. **Introduced in:** Telegraf v1.37.0 **Tags:** network, system @@ -43,11 +39,16 @@ plugin ordering. See [CONFIGURATION.md](/telegraf/v1/configuration/#plugins) for ## Use the specified binary which will be looked-up in PATH # binary = "nft" - ## Use sudo for command execution, can be restricted to "nft --json list table" + ## Use sudo for command execution, can be restricted to + ## "nft --json list table" # use_sudo = false - ## Tables to monitor containing both a counter and comment declaration + ## Tables to monitor (may use "family table" format, e.g., "inet filter") # tables = [ "filter" ] + + ## Kinds of objects to monitor: "counters" (named counters), "sets", + ## (named sets), "anonymous-counters" (on commented rules). + # include = ["anonymous-counters"] ``` Since telegraf will fork a process to run nftables, `AmbientCapabilities` is @@ -58,11 +59,32 @@ required to transmit the capabilities bounding set to the forked process. You may edit your sudo configuration with the following: ```sudo -telegraf ALL=(root) NOPASSWD: /usr/bin/nft * +telegraf ALL=(root) NOPASSWD: /usr/bin/nft --json list table * ``` ## Metrics +Counters (when `counters` included): + +* nftables + * tags: + * table + * counter + * fields: + * pkts (integer, count) + * bytes (integer, bytes) + +Sets (when `sets` included): + +* nftables + * tags: + * table + * set + * field: + * count (integer, count) + +Anonymous counters on commented rules (when `anonymous-counters` included): + * nftables * tags: * table @@ -75,6 +97,8 @@ telegraf ALL=(root) NOPASSWD: /usr/bin/nft * ## Example Output ```text +> nftables,host=my_hostname,counter=my_counter,table=filter bytes=48968i,pkts=48i 1757367516000000000 +> nftables,host=my_hostname,set=my_set,table=filter count=10i 1757367516000000000 > nftables,chain=incoming,host=my_hostname,rule=comment_val_1,table=filter bytes=66435845i,pkts=133882i 1757367516000000000 -> nftables,chain=outgoing,host=my_hostname,rule=comment_val2,table=filter bytes=25596512i,pkts=145129i 1757367516000000000 +> nftables,chain=outgoing,host=my_hostname,rule=comment_val_2,table=filter bytes=25596512i,pkts=145129i 1757367516000000000 ``` diff --git a/content/telegraf/v1/input-plugins/nginx/_index.md b/content/telegraf/v1/input-plugins/nginx/_index.md index 8f11618bc7..a856f1f410 100644 --- a/content/telegraf/v1/input-plugins/nginx/_index.md +++ b/content/telegraf/v1/input-plugins/nginx/_index.md @@ -10,7 +10,7 @@ introduced: "v0.1.5" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/nginx/README.md, Nginx Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/nginx/README.md, Nginx Plugin Source --- # Nginx Input Plugin diff --git a/content/telegraf/v1/input-plugins/nginx_plus/_index.md b/content/telegraf/v1/input-plugins/nginx_plus/_index.md index 1c5cd2ca45..19039c2ad0 100644 --- a/content/telegraf/v1/input-plugins/nginx_plus/_index.md +++ b/content/telegraf/v1/input-plugins/nginx_plus/_index.md @@ -10,7 +10,7 @@ introduced: "v1.5.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/nginx_plus/README.md, Nginx Plus Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/nginx_plus/README.md, Nginx Plus Plugin Source --- # Nginx Plus Input Plugin diff --git a/content/telegraf/v1/input-plugins/nginx_plus_api/_index.md b/content/telegraf/v1/input-plugins/nginx_plus_api/_index.md index 01e79f6264..219e7dad27 100644 --- a/content/telegraf/v1/input-plugins/nginx_plus_api/_index.md +++ b/content/telegraf/v1/input-plugins/nginx_plus_api/_index.md @@ -10,7 +10,7 @@ introduced: "v1.9.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/nginx_plus_api/README.md, Nginx Plus API Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/nginx_plus_api/README.md, Nginx Plus API Plugin Source --- # Nginx Plus API Input Plugin diff --git a/content/telegraf/v1/input-plugins/nginx_sts/_index.md b/content/telegraf/v1/input-plugins/nginx_sts/_index.md index 76ea47a8ff..8cedbcc79a 100644 --- a/content/telegraf/v1/input-plugins/nginx_sts/_index.md +++ b/content/telegraf/v1/input-plugins/nginx_sts/_index.md @@ -10,7 +10,7 @@ introduced: "v1.15.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/nginx_sts/README.md, Nginx Stream Server Traffic Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/nginx_sts/README.md, Nginx Stream Server Traffic Plugin Source --- # Nginx Stream Server Traffic Input Plugin diff --git a/content/telegraf/v1/input-plugins/nginx_upstream_check/_index.md b/content/telegraf/v1/input-plugins/nginx_upstream_check/_index.md index 3d4d56fed2..a03f93b093 100644 --- a/content/telegraf/v1/input-plugins/nginx_upstream_check/_index.md +++ b/content/telegraf/v1/input-plugins/nginx_upstream_check/_index.md @@ -10,7 +10,7 @@ introduced: "v1.10.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/nginx_upstream_check/README.md, Nginx Upstream Check Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/nginx_upstream_check/README.md, Nginx Upstream Check Plugin Source --- # Nginx Upstream Check Input Plugin diff --git a/content/telegraf/v1/input-plugins/nginx_vts/_index.md b/content/telegraf/v1/input-plugins/nginx_vts/_index.md index 4836ad4e97..4542d2f7e7 100644 --- a/content/telegraf/v1/input-plugins/nginx_vts/_index.md +++ b/content/telegraf/v1/input-plugins/nginx_vts/_index.md @@ -10,7 +10,7 @@ introduced: "v1.9.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/nginx_vts/README.md, Nginx Virtual Host Traffic Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/nginx_vts/README.md, Nginx Virtual Host Traffic Plugin Source --- # Nginx Virtual Host Traffic Input Plugin diff --git a/content/telegraf/v1/input-plugins/nomad/_index.md b/content/telegraf/v1/input-plugins/nomad/_index.md index 09b15b6bd9..c657b90a7b 100644 --- a/content/telegraf/v1/input-plugins/nomad/_index.md +++ b/content/telegraf/v1/input-plugins/nomad/_index.md @@ -10,7 +10,7 @@ introduced: "v1.22.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/nomad/README.md, Hashicorp Nomad Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/nomad/README.md, Hashicorp Nomad Plugin Source --- # Hashicorp Nomad Input Plugin diff --git a/content/telegraf/v1/input-plugins/nsd/_index.md b/content/telegraf/v1/input-plugins/nsd/_index.md index 77ae15cce4..fb09170566 100644 --- a/content/telegraf/v1/input-plugins/nsd/_index.md +++ b/content/telegraf/v1/input-plugins/nsd/_index.md @@ -10,7 +10,7 @@ introduced: "v1.0.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/nsd/README.md, NLnet Labs Name Server Daemon Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/nsd/README.md, NLnet Labs Name Server Daemon Plugin Source --- # NLnet Labs Name Server Daemon Input Plugin diff --git a/content/telegraf/v1/input-plugins/nsdp/_index.md b/content/telegraf/v1/input-plugins/nsdp/_index.md index 25ce409d41..c2de1c995c 100644 --- a/content/telegraf/v1/input-plugins/nsdp/_index.md +++ b/content/telegraf/v1/input-plugins/nsdp/_index.md @@ -10,7 +10,7 @@ introduced: "v1.34.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/nsdp/README.md, Netgear Switch Discovery Protocol Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/nsdp/README.md, Netgear Switch Discovery Protocol Plugin Source --- # Netgear Switch Discovery Protocol Input Plugin diff --git a/content/telegraf/v1/input-plugins/nsq/_index.md b/content/telegraf/v1/input-plugins/nsq/_index.md index 7c0213c166..549983ba03 100644 --- a/content/telegraf/v1/input-plugins/nsq/_index.md +++ b/content/telegraf/v1/input-plugins/nsq/_index.md @@ -10,7 +10,7 @@ introduced: "v1.16.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/nsq/README.md, NSQ Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/nsq/README.md, NSQ Plugin Source --- # NSQ Input Plugin diff --git a/content/telegraf/v1/input-plugins/nsq_consumer/_index.md b/content/telegraf/v1/input-plugins/nsq_consumer/_index.md index 6edb934d1d..a87436ea2b 100644 --- a/content/telegraf/v1/input-plugins/nsq_consumer/_index.md +++ b/content/telegraf/v1/input-plugins/nsq_consumer/_index.md @@ -10,7 +10,7 @@ introduced: "v0.10.1" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/nsq_consumer/README.md, NSQ Consumer Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/nsq_consumer/README.md, NSQ Consumer Plugin Source --- # NSQ Consumer Input Plugin diff --git a/content/telegraf/v1/input-plugins/nstat/_index.md b/content/telegraf/v1/input-plugins/nstat/_index.md index 3d026d2046..3e74a8a81f 100644 --- a/content/telegraf/v1/input-plugins/nstat/_index.md +++ b/content/telegraf/v1/input-plugins/nstat/_index.md @@ -10,7 +10,7 @@ introduced: "v0.13.1" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/nstat/README.md, Kernel Network Statistics Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/nstat/README.md, Kernel Network Statistics Plugin Source --- # Kernel Network Statistics Input Plugin diff --git a/content/telegraf/v1/input-plugins/ntpq/_index.md b/content/telegraf/v1/input-plugins/ntpq/_index.md index a530c1ced7..ac1115ce83 100644 --- a/content/telegraf/v1/input-plugins/ntpq/_index.md +++ b/content/telegraf/v1/input-plugins/ntpq/_index.md @@ -10,7 +10,7 @@ introduced: "v0.11.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/ntpq/README.md, Network Time Protocol Query Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/ntpq/README.md, Network Time Protocol Query Plugin Source --- # Network Time Protocol Query Input Plugin diff --git a/content/telegraf/v1/input-plugins/nvidia_smi/_index.md b/content/telegraf/v1/input-plugins/nvidia_smi/_index.md index b7eccd9dfd..d35bdbbfe4 100644 --- a/content/telegraf/v1/input-plugins/nvidia_smi/_index.md +++ b/content/telegraf/v1/input-plugins/nvidia_smi/_index.md @@ -10,7 +10,7 @@ introduced: "v1.7.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/nvidia_smi/README.md, Nvidia System Management Interface (SMI) Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/nvidia_smi/README.md, Nvidia System Management Interface (SMI) Plugin Source --- # Nvidia System Management Interface (SMI) Input Plugin diff --git a/content/telegraf/v1/input-plugins/opcua/_index.md b/content/telegraf/v1/input-plugins/opcua/_index.md index feb631d027..6100b0eaf4 100644 --- a/content/telegraf/v1/input-plugins/opcua/_index.md +++ b/content/telegraf/v1/input-plugins/opcua/_index.md @@ -10,7 +10,7 @@ introduced: "v1.16.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/opcua/README.md, OPC UA Client Reader Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/opcua/README.md, OPC UA Client Reader Plugin Source --- # OPC UA Client Reader Input Plugin @@ -115,21 +115,34 @@ to use them. ## Node ID configuration ## name - field name to use in the output + ## id - OPC UA node ID string (e.g., "ns=0;i=2262" or "nsu=http://...;s=Name") ## namespace - OPC UA namespace of the node (integer value 0 thru 3) ## namespace_uri - OPC UA namespace URI (alternative to namespace for stable references) ## identifier_type - OPC UA ID type (s=string, i=numeric, g=guid, b=opaque) ## identifier - OPC UA ID (tag as shown in opcua browser) ## default_tags - extra tags to be added to the output metric (optional) ## - ## Note: Specify either 'namespace' or 'namespace_uri', not both. + ## Use EITHER 'id' OR the combination of 'namespace/namespace_uri' + 'identifier_type' + 'identifier' ## Use either the inline notation or the bracketed notation, not both. - ## Inline notation (default_tags not supported yet) + ## Inline notation using id string (recommended for simplicity) + # nodes = [ + # {name="ProductUri", id="ns=0;i=2262"}, + # {name="ServerState", id="ns=0;i=2259"}, + # ] + + ## Inline notation using individual fields (default_tags not supported yet) # nodes = [ # {name="", namespace="", identifier_type="", identifier=""}, # ] - ## Bracketed notation + ## Bracketed notation using id string + # [[inputs.opcua.nodes]] + # name = "ProductUri" + # id = "ns=0;i=2262" + # default_tags = { tag1 = "value1", tag2 = "value2" } + + ## Bracketed notation using individual fields # [[inputs.opcua.nodes]] # name = "node1" # namespace = "" @@ -264,13 +277,29 @@ An OPC UA node ID may resemble: "ns=3;s=Temperature". In this example: `identifier` value is 'Temperature' - This example temperature node has a value of 79.0 -To gather data from this node enter the following line into the 'nodes' -property above: +### Using `id` String (Recommended) + +You can specify nodes using the standard OPC UA node ID string format directly: ```text -{field_name="temp", namespace="3", identifier_type="s", identifier="Temperature"}, +{name="temp", id="ns=3;s=Temperature"}, ``` +This is simpler and matches the format shown in OPC UA browsers. + +### Using Individual Fields + +Alternatively, you can specify each component separately: + +```text +{name="temp", namespace="3", identifier_type="s", identifier="Temperature"}, +``` + +> [!NOTE] +> Use either `id` OR the combination of +> `namespace`/`namespace_uri` + `identifier_type` + `identifier`. +> Do not mix both formats for the same node. + This node configuration produces a metric like this: ```text @@ -301,7 +330,8 @@ OPC UA supports two ways to specify namespaces: 2. **Namespace URI** (`namespace_uri`): A string URI that uniquely identifies the namespace. This is more stable across server restarts but requires the - plugin to fetch the namespace array from the server to resolve the URI to an index. + plugin to fetch the namespace array from the server to resolve the URI to an + index. **When to use namespace index:** diff --git a/content/telegraf/v1/input-plugins/opcua_listener/_index.md b/content/telegraf/v1/input-plugins/opcua_listener/_index.md index 184cd84591..043f03a557 100644 --- a/content/telegraf/v1/input-plugins/opcua_listener/_index.md +++ b/content/telegraf/v1/input-plugins/opcua_listener/_index.md @@ -10,7 +10,7 @@ introduced: "v1.25.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/opcua_listener/README.md, OPC UA Client Listener Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/opcua_listener/README.md, OPC UA Client Listener Plugin Source --- # OPC UA Client Listener Input Plugin @@ -137,6 +137,7 @@ to use them. # ## Node ID configuration ## name - field name to use in the output + ## id - OPC UA node ID string (e.g., "ns=0;i=2262" or "nsu=http://...;s=Name") ## namespace - OPC UA namespace of the node (integer value 0 thru 3) ## namespace_uri - OPC UA namespace URI (alternative to namespace for stable references) ## identifier_type - OPC UA ID type (s=string, i=numeric, g=guid, b=opaque) @@ -144,7 +145,7 @@ to use them. ## default_tags - extra tags to be added to the output metric (optional) ## monitoring_params - additional settings for the monitored node (optional) ## - ## Note: Specify either 'namespace' or 'namespace_uri', not both. + ## Use EITHER 'id' OR the combination of 'namespace/namespace_uri' + 'identifier_type' + 'identifier' ## ## Monitoring parameters ## sampling_interval - interval at which the server should check for data @@ -178,13 +179,25 @@ to use them. ## ## Use either the inline notation or the bracketed notation, not both. # - ## Inline notation (default_tags and monitoring_params not supported yet) + ## Inline notation using id string (recommended for simplicity) + # nodes = [ + # {name="ProductUri", id="ns=0;i=2262"}, + # {name="ServerState", id="ns=0;i=2259"} + # ] + # + ## Inline notation using individual fields (default_tags and monitoring_params not supported yet) # nodes = [ # {name="node1", namespace="", identifier_type="", identifier=""}, # {name="node2", namespace="", identifier_type="", identifier=""} # ] # - ## Bracketed notation + ## Bracketed notation using id string + # [[inputs.opcua_listener.nodes]] + # name = "ProductUri" + # id = "ns=0;i=2262" + # default_tags = { tag1 = "value1", tag2 = "value2" } + + ## Bracketed notation using individual fields # [[inputs.opcua_listener.nodes]] # name = "node1" # namespace = "" @@ -285,12 +298,13 @@ to use them. # ## Multiple event groups are allowed. + ## Event nodes support both 'id' string format and individual fields. # [[inputs.opcua_listener.events]] # ## Polling interval for data collection # # sampling_interval = "10s" # ## Size of the notification queue # # queue_size = 10 - # ## Node parameter defaults for node definitions below + # ## Node parameter defaults for node definitions below (used when id is not specified) # # namespace = "" # # identifier_type = "" # ## Specifies OPCUA Event sources to filter on @@ -299,17 +313,22 @@ to use them. # fields = ["Severity", "Message", "Time"] # # ## Type or level of events to capture from the monitored nodes. + # ## Use 'id' string OR individual fields (namespace/identifier_type/identifier) # [inputs.opcua_listener.events.event_type_node] - # namespace = "" - # identifier_type = "" - # identifier = "" + # id = "ns=0;i=2041" + # # Or use individual fields: + # # namespace = "" + # # identifier_type = "" + # # identifier = "" # # ## Nodes to monitor for event notifications associated with the defined - # ## event type + # ## event type. Use 'id' string OR individual fields. # [[inputs.opcua_listener.events.node_ids]] - # namespace = "" - # identifier_type = "" - # identifier = "" + # id = "ns=2;s=EventSource1" + # # Or use individual fields: + # # namespace = "" + # # identifier_type = "" + # # identifier = "" ## Enable workarounds required by some devices to work correctly # [inputs.opcua_listener.workarounds] @@ -328,13 +347,29 @@ An OPC UA node ID may resemble: "ns=3;s=Temperature". In this example: `identifier` value is 'Temperature' - This example temperature node has a value of 79.0 -To gather data from this node enter the following line into the 'nodes' -property above: +#### Using `id` String (Recommended) + +You can specify nodes using the standard OPC UA node ID string format directly: + +```text +{name="temp", id="ns=3;s=Temperature"}, +``` + +This is simpler and matches the format shown in OPC UA browsers. + +#### Using Individual Fields + +Alternatively, you can specify each component separately: ```text {name="temp", namespace="3", identifier_type="s", identifier="Temperature"}, ``` +> [!NOTE] +> Use either `id` OR the combination of +> `namespace`/`namespace_uri` + `identifier_type` + `identifier`. +> Do not mix both formats for the same node. + This node configuration produces a metric like this: ```text @@ -365,7 +400,8 @@ OPC UA supports two ways to specify namespaces: 2. **Namespace URI** (`namespace_uri`): A string URI that uniquely identifies the namespace. This is more stable across server restarts but requires the - plugin to fetch the namespace array from the server to resolve the URI to an index. + plugin to fetch the namespace array from the server to resolve the URI to an + index. **When to use namespace index:** diff --git a/content/telegraf/v1/input-plugins/openldap/_index.md b/content/telegraf/v1/input-plugins/openldap/_index.md index 8c9250aa72..1dc9a64b4b 100644 --- a/content/telegraf/v1/input-plugins/openldap/_index.md +++ b/content/telegraf/v1/input-plugins/openldap/_index.md @@ -10,7 +10,7 @@ introduced: "v1.4.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/openldap/README.md, OpenLDAP Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/openldap/README.md, OpenLDAP Plugin Source --- # OpenLDAP Input Plugin diff --git a/content/telegraf/v1/input-plugins/openntpd/_index.md b/content/telegraf/v1/input-plugins/openntpd/_index.md index ac3c750555..180ca5a469 100644 --- a/content/telegraf/v1/input-plugins/openntpd/_index.md +++ b/content/telegraf/v1/input-plugins/openntpd/_index.md @@ -10,7 +10,7 @@ introduced: "v1.12.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/openntpd/README.md, OpenNTPD Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/openntpd/README.md, OpenNTPD Plugin Source --- # OpenNTPD Input Plugin diff --git a/content/telegraf/v1/input-plugins/opensearch_query/_index.md b/content/telegraf/v1/input-plugins/opensearch_query/_index.md index 52f21aee04..19c6d994c8 100644 --- a/content/telegraf/v1/input-plugins/opensearch_query/_index.md +++ b/content/telegraf/v1/input-plugins/opensearch_query/_index.md @@ -10,7 +10,7 @@ introduced: "v1.26.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/opensearch_query/README.md, OpenSearch Query Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/opensearch_query/README.md, OpenSearch Query Plugin Source --- # OpenSearch Query Input Plugin diff --git a/content/telegraf/v1/input-plugins/opensmtpd/_index.md b/content/telegraf/v1/input-plugins/opensmtpd/_index.md index aaae6b5207..9548403962 100644 --- a/content/telegraf/v1/input-plugins/opensmtpd/_index.md +++ b/content/telegraf/v1/input-plugins/opensmtpd/_index.md @@ -10,7 +10,7 @@ introduced: "v1.5.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/opensmtpd/README.md, OpenSMTPD Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/opensmtpd/README.md, OpenSMTPD Plugin Source --- # OpenSMTPD Input Plugin diff --git a/content/telegraf/v1/input-plugins/openstack/_index.md b/content/telegraf/v1/input-plugins/openstack/_index.md index 32bc6d5fcb..b98b8f1521 100644 --- a/content/telegraf/v1/input-plugins/openstack/_index.md +++ b/content/telegraf/v1/input-plugins/openstack/_index.md @@ -10,7 +10,7 @@ introduced: "v1.21.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/openstack/README.md, OpenStack Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/openstack/README.md, OpenStack Plugin Source --- # OpenStack Input Plugin diff --git a/content/telegraf/v1/input-plugins/opentelemetry/_index.md b/content/telegraf/v1/input-plugins/opentelemetry/_index.md index b6e8f1031f..4b2cafc7f5 100644 --- a/content/telegraf/v1/input-plugins/opentelemetry/_index.md +++ b/content/telegraf/v1/input-plugins/opentelemetry/_index.md @@ -10,7 +10,7 @@ introduced: "v1.19.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/opentelemetry/README.md, OpenTelemetry Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/opentelemetry/README.md, OpenTelemetry Plugin Source --- # OpenTelemetry Input Plugin diff --git a/content/telegraf/v1/input-plugins/openweathermap/_index.md b/content/telegraf/v1/input-plugins/openweathermap/_index.md index 7d46b6649b..88e7eaf729 100644 --- a/content/telegraf/v1/input-plugins/openweathermap/_index.md +++ b/content/telegraf/v1/input-plugins/openweathermap/_index.md @@ -10,7 +10,7 @@ introduced: "v1.11.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/openweathermap/README.md, OpenWeatherMap Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/openweathermap/README.md, OpenWeatherMap Plugin Source --- # OpenWeatherMap Input Plugin diff --git a/content/telegraf/v1/input-plugins/p4runtime/_index.md b/content/telegraf/v1/input-plugins/p4runtime/_index.md index 7069ad147c..f0b79c03f2 100644 --- a/content/telegraf/v1/input-plugins/p4runtime/_index.md +++ b/content/telegraf/v1/input-plugins/p4runtime/_index.md @@ -10,7 +10,7 @@ introduced: "v1.26.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/p4runtime/README.md, P4 Runtime Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/p4runtime/README.md, P4 Runtime Plugin Source --- # P4 Runtime Input Plugin diff --git a/content/telegraf/v1/input-plugins/passenger/_index.md b/content/telegraf/v1/input-plugins/passenger/_index.md index 91113aa6ca..eec5673444 100644 --- a/content/telegraf/v1/input-plugins/passenger/_index.md +++ b/content/telegraf/v1/input-plugins/passenger/_index.md @@ -10,7 +10,7 @@ introduced: "v0.10.1" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/passenger/README.md, Passenger Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/passenger/README.md, Passenger Plugin Source --- # Passenger Input Plugin diff --git a/content/telegraf/v1/input-plugins/pf/_index.md b/content/telegraf/v1/input-plugins/pf/_index.md index 36ef96c819..1c49189946 100644 --- a/content/telegraf/v1/input-plugins/pf/_index.md +++ b/content/telegraf/v1/input-plugins/pf/_index.md @@ -10,7 +10,7 @@ introduced: "v1.5.0" os_support: "freebsd" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/pf/README.md, PF Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/pf/README.md, PF Plugin Source --- # PF Input Plugin diff --git a/content/telegraf/v1/input-plugins/pgbouncer/_index.md b/content/telegraf/v1/input-plugins/pgbouncer/_index.md index ae370ba515..802117955e 100644 --- a/content/telegraf/v1/input-plugins/pgbouncer/_index.md +++ b/content/telegraf/v1/input-plugins/pgbouncer/_index.md @@ -10,7 +10,7 @@ introduced: "v1.8.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/pgbouncer/README.md, PgBouncer Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/pgbouncer/README.md, PgBouncer Plugin Source --- # PgBouncer Input Plugin diff --git a/content/telegraf/v1/input-plugins/phpfpm/_index.md b/content/telegraf/v1/input-plugins/phpfpm/_index.md index 627d72de85..20673fe7c2 100644 --- a/content/telegraf/v1/input-plugins/phpfpm/_index.md +++ b/content/telegraf/v1/input-plugins/phpfpm/_index.md @@ -10,7 +10,7 @@ introduced: "v0.1.10" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/phpfpm/README.md, PHP-FPM Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/phpfpm/README.md, PHP-FPM Plugin Source --- # PHP-FPM Input Plugin diff --git a/content/telegraf/v1/input-plugins/ping/_index.md b/content/telegraf/v1/input-plugins/ping/_index.md index cd53f7175f..040760f29c 100644 --- a/content/telegraf/v1/input-plugins/ping/_index.md +++ b/content/telegraf/v1/input-plugins/ping/_index.md @@ -10,7 +10,7 @@ introduced: "v0.1.8" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/ping/README.md, Ping Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/ping/README.md, Ping Plugin Source --- # Ping Input Plugin diff --git a/content/telegraf/v1/input-plugins/postfix/_index.md b/content/telegraf/v1/input-plugins/postfix/_index.md index ad19afa873..9b62f77021 100644 --- a/content/telegraf/v1/input-plugins/postfix/_index.md +++ b/content/telegraf/v1/input-plugins/postfix/_index.md @@ -10,7 +10,7 @@ introduced: "v1.5.0" os_support: "freebsd, linux, macos, solaris" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/postfix/README.md, Postfix Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/postfix/README.md, Postfix Plugin Source --- # Postfix Input Plugin diff --git a/content/telegraf/v1/input-plugins/postgresql/_index.md b/content/telegraf/v1/input-plugins/postgresql/_index.md index ae1746d5c0..13647079a0 100644 --- a/content/telegraf/v1/input-plugins/postgresql/_index.md +++ b/content/telegraf/v1/input-plugins/postgresql/_index.md @@ -10,7 +10,7 @@ introduced: "v0.10.3" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/postgresql/README.md, PostgreSQL Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/postgresql/README.md, PostgreSQL Plugin Source --- # PostgreSQL Input Plugin diff --git a/content/telegraf/v1/input-plugins/postgresql_extensible/_index.md b/content/telegraf/v1/input-plugins/postgresql_extensible/_index.md index 647cfea008..8d0975b8ad 100644 --- a/content/telegraf/v1/input-plugins/postgresql_extensible/_index.md +++ b/content/telegraf/v1/input-plugins/postgresql_extensible/_index.md @@ -10,7 +10,7 @@ introduced: "v0.12.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/postgresql_extensible/README.md, PostgreSQL Extensible Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/postgresql_extensible/README.md, PostgreSQL Extensible Plugin Source --- # PostgreSQL Extensible Input Plugin diff --git a/content/telegraf/v1/input-plugins/powerdns/_index.md b/content/telegraf/v1/input-plugins/powerdns/_index.md index a749f1f63f..2f6e7357a5 100644 --- a/content/telegraf/v1/input-plugins/powerdns/_index.md +++ b/content/telegraf/v1/input-plugins/powerdns/_index.md @@ -10,7 +10,7 @@ introduced: "v0.10.2" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/powerdns/README.md, PowerDNS Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/powerdns/README.md, PowerDNS Plugin Source --- # PowerDNS Input Plugin diff --git a/content/telegraf/v1/input-plugins/powerdns_recursor/_index.md b/content/telegraf/v1/input-plugins/powerdns_recursor/_index.md index f1236b748c..1f40df3aa3 100644 --- a/content/telegraf/v1/input-plugins/powerdns_recursor/_index.md +++ b/content/telegraf/v1/input-plugins/powerdns_recursor/_index.md @@ -10,7 +10,7 @@ introduced: "v1.11.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/powerdns_recursor/README.md, PowerDNS Recursor Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/powerdns_recursor/README.md, PowerDNS Recursor Plugin Source --- # PowerDNS Recursor Input Plugin diff --git a/content/telegraf/v1/input-plugins/processes/_index.md b/content/telegraf/v1/input-plugins/processes/_index.md index da0f0b6003..4cfd56d601 100644 --- a/content/telegraf/v1/input-plugins/processes/_index.md +++ b/content/telegraf/v1/input-plugins/processes/_index.md @@ -10,7 +10,7 @@ introduced: "v0.11.0" os_support: "freebsd, linux, macos" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/processes/README.md, Processes Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/processes/README.md, Processes Plugin Source --- # Processes Input Plugin diff --git a/content/telegraf/v1/input-plugins/procstat/_index.md b/content/telegraf/v1/input-plugins/procstat/_index.md index 92abefd334..6c053924ab 100644 --- a/content/telegraf/v1/input-plugins/procstat/_index.md +++ b/content/telegraf/v1/input-plugins/procstat/_index.md @@ -10,7 +10,7 @@ introduced: "v0.2.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/procstat/README.md, Procstat Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/procstat/README.md, Procstat Plugin Source --- # Procstat Input Plugin diff --git a/content/telegraf/v1/input-plugins/prometheus/_index.md b/content/telegraf/v1/input-plugins/prometheus/_index.md index f7fd966e59..b50c5041e6 100644 --- a/content/telegraf/v1/input-plugins/prometheus/_index.md +++ b/content/telegraf/v1/input-plugins/prometheus/_index.md @@ -10,7 +10,7 @@ introduced: "v0.1.5" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/prometheus/README.md, Prometheus Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/prometheus/README.md, Prometheus Plugin Source --- # Prometheus Input Plugin diff --git a/content/telegraf/v1/input-plugins/promql/_index.md b/content/telegraf/v1/input-plugins/promql/_index.md index 73a9b52dca..b1170b421c 100644 --- a/content/telegraf/v1/input-plugins/promql/_index.md +++ b/content/telegraf/v1/input-plugins/promql/_index.md @@ -10,7 +10,7 @@ introduced: "v1.37.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/promql/README.md, PromQL Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/promql/README.md, PromQL Plugin Source --- # PromQL Input Plugin @@ -66,6 +66,9 @@ more details on how to use them. # max_idle_conn_per_host = 0 # response_timeout = "0s" + ## Use the local address for connecting, assigned by the OS by default + # local_address = "" + ## Optional proxy settings # use_system_proxy = false # http_proxy_url = "" diff --git a/content/telegraf/v1/input-plugins/proxmox/_index.md b/content/telegraf/v1/input-plugins/proxmox/_index.md index b299a9458e..c8c0e2276a 100644 --- a/content/telegraf/v1/input-plugins/proxmox/_index.md +++ b/content/telegraf/v1/input-plugins/proxmox/_index.md @@ -10,7 +10,7 @@ introduced: "v1.16.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/proxmox/README.md, Proxmox Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/proxmox/README.md, Proxmox Plugin Source --- # Proxmox Input Plugin diff --git a/content/telegraf/v1/input-plugins/puppetagent/_index.md b/content/telegraf/v1/input-plugins/puppetagent/_index.md index b8e55563ef..89f732da61 100644 --- a/content/telegraf/v1/input-plugins/puppetagent/_index.md +++ b/content/telegraf/v1/input-plugins/puppetagent/_index.md @@ -10,7 +10,7 @@ introduced: "v0.2.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/puppetagent/README.md, Puppet Agent Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/puppetagent/README.md, Puppet Agent Plugin Source --- # Puppet Agent Input Plugin diff --git a/content/telegraf/v1/input-plugins/rabbitmq/_index.md b/content/telegraf/v1/input-plugins/rabbitmq/_index.md index fe5adcd70d..f6ed7c171e 100644 --- a/content/telegraf/v1/input-plugins/rabbitmq/_index.md +++ b/content/telegraf/v1/input-plugins/rabbitmq/_index.md @@ -10,7 +10,7 @@ introduced: "v0.1.5" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/rabbitmq/README.md, RabbitMQ Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/rabbitmq/README.md, RabbitMQ Plugin Source --- # RabbitMQ Input Plugin diff --git a/content/telegraf/v1/input-plugins/radius/_index.md b/content/telegraf/v1/input-plugins/radius/_index.md index 2209af5a83..1850d08aef 100644 --- a/content/telegraf/v1/input-plugins/radius/_index.md +++ b/content/telegraf/v1/input-plugins/radius/_index.md @@ -10,7 +10,7 @@ introduced: "v1.26.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/radius/README.md, Radius Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/radius/README.md, Radius Plugin Source --- # Radius Input Plugin diff --git a/content/telegraf/v1/input-plugins/raindrops/_index.md b/content/telegraf/v1/input-plugins/raindrops/_index.md index 9afb8c9dcd..a31e3bf26c 100644 --- a/content/telegraf/v1/input-plugins/raindrops/_index.md +++ b/content/telegraf/v1/input-plugins/raindrops/_index.md @@ -10,7 +10,7 @@ introduced: "v0.10.3" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/raindrops/README.md, Raindrops Middleware Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/raindrops/README.md, Raindrops Middleware Plugin Source --- # Raindrops Middleware Input Plugin diff --git a/content/telegraf/v1/input-plugins/ras/_index.md b/content/telegraf/v1/input-plugins/ras/_index.md index abfc63847d..a13cdfaf5a 100644 --- a/content/telegraf/v1/input-plugins/ras/_index.md +++ b/content/telegraf/v1/input-plugins/ras/_index.md @@ -10,7 +10,7 @@ introduced: "v1.16.0" os_support: "linux" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/ras/README.md, RAS Daemon Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/ras/README.md, RAS Daemon Plugin Source --- # RAS Daemon Input Plugin diff --git a/content/telegraf/v1/input-plugins/ravendb/_index.md b/content/telegraf/v1/input-plugins/ravendb/_index.md index 6d1e98f1da..a2ec18ced0 100644 --- a/content/telegraf/v1/input-plugins/ravendb/_index.md +++ b/content/telegraf/v1/input-plugins/ravendb/_index.md @@ -10,7 +10,7 @@ introduced: "v1.18.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/ravendb/README.md, RavenDB Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/ravendb/README.md, RavenDB Plugin Source --- # RavenDB Input Plugin diff --git a/content/telegraf/v1/input-plugins/redfish/_index.md b/content/telegraf/v1/input-plugins/redfish/_index.md index 31685bb8fd..069fbacd8f 100644 --- a/content/telegraf/v1/input-plugins/redfish/_index.md +++ b/content/telegraf/v1/input-plugins/redfish/_index.md @@ -10,7 +10,7 @@ introduced: "v1.15.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/redfish/README.md, Redfish Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/redfish/README.md, Redfish Plugin Source --- # Redfish Input Plugin diff --git a/content/telegraf/v1/input-plugins/redis/_index.md b/content/telegraf/v1/input-plugins/redis/_index.md index fc322e18b1..9472c4ae38 100644 --- a/content/telegraf/v1/input-plugins/redis/_index.md +++ b/content/telegraf/v1/input-plugins/redis/_index.md @@ -10,7 +10,7 @@ introduced: "v0.1.1" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/redis/README.md, Redis Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/redis/README.md, Redis Plugin Source --- # Redis Input Plugin diff --git a/content/telegraf/v1/input-plugins/redis_sentinel/_index.md b/content/telegraf/v1/input-plugins/redis_sentinel/_index.md index 5d36437b6b..ef77c13a0c 100644 --- a/content/telegraf/v1/input-plugins/redis_sentinel/_index.md +++ b/content/telegraf/v1/input-plugins/redis_sentinel/_index.md @@ -10,7 +10,7 @@ introduced: "v1.22.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/redis_sentinel/README.md, Redis Sentinel Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/redis_sentinel/README.md, Redis Sentinel Plugin Source --- # Redis Sentinel Input Plugin diff --git a/content/telegraf/v1/input-plugins/rethinkdb/_index.md b/content/telegraf/v1/input-plugins/rethinkdb/_index.md index d7cf4bb9fd..3d9881c4d5 100644 --- a/content/telegraf/v1/input-plugins/rethinkdb/_index.md +++ b/content/telegraf/v1/input-plugins/rethinkdb/_index.md @@ -10,7 +10,7 @@ introduced: "v0.1.3" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/rethinkdb/README.md, RethinkDB Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/rethinkdb/README.md, RethinkDB Plugin Source --- # RethinkDB Input Plugin diff --git a/content/telegraf/v1/input-plugins/riak/_index.md b/content/telegraf/v1/input-plugins/riak/_index.md index 0de9f52854..0531a8c986 100644 --- a/content/telegraf/v1/input-plugins/riak/_index.md +++ b/content/telegraf/v1/input-plugins/riak/_index.md @@ -10,7 +10,7 @@ introduced: "v0.10.4" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/riak/README.md, Riak Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/riak/README.md, Riak Plugin Source --- # Riak Input Plugin diff --git a/content/telegraf/v1/input-plugins/riemann_listener/_index.md b/content/telegraf/v1/input-plugins/riemann_listener/_index.md index 806bb1dc97..62929360ea 100644 --- a/content/telegraf/v1/input-plugins/riemann_listener/_index.md +++ b/content/telegraf/v1/input-plugins/riemann_listener/_index.md @@ -10,7 +10,7 @@ introduced: "v1.17.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/riemann_listener/README.md, Riemann Listener Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/riemann_listener/README.md, Riemann Listener Plugin Source --- # Riemann Listener Input Plugin diff --git a/content/telegraf/v1/input-plugins/s7comm/_index.md b/content/telegraf/v1/input-plugins/s7comm/_index.md index 1e3daa8e43..11776766af 100644 --- a/content/telegraf/v1/input-plugins/s7comm/_index.md +++ b/content/telegraf/v1/input-plugins/s7comm/_index.md @@ -10,7 +10,7 @@ introduced: "v1.28.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/s7comm/README.md, Siemens S7 Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/s7comm/README.md, Siemens S7 Plugin Source --- # Siemens S7 Input Plugin diff --git a/content/telegraf/v1/input-plugins/salesforce/_index.md b/content/telegraf/v1/input-plugins/salesforce/_index.md index 2fd239403c..990a581932 100644 --- a/content/telegraf/v1/input-plugins/salesforce/_index.md +++ b/content/telegraf/v1/input-plugins/salesforce/_index.md @@ -10,7 +10,7 @@ introduced: "v1.4.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/salesforce/README.md, Salesforce Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/salesforce/README.md, Salesforce Plugin Source --- # Salesforce Input Plugin diff --git a/content/telegraf/v1/input-plugins/sensors/_index.md b/content/telegraf/v1/input-plugins/sensors/_index.md index 41c6aa27ef..f2da8bc248 100644 --- a/content/telegraf/v1/input-plugins/sensors/_index.md +++ b/content/telegraf/v1/input-plugins/sensors/_index.md @@ -10,7 +10,7 @@ introduced: "v0.10.1" os_support: "linux" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/sensors/README.md, LM Sensors Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/sensors/README.md, LM Sensors Plugin Source --- # LM Sensors Input Plugin diff --git a/content/telegraf/v1/input-plugins/sflow/_index.md b/content/telegraf/v1/input-plugins/sflow/_index.md index 867e44d41e..36de6d5de1 100644 --- a/content/telegraf/v1/input-plugins/sflow/_index.md +++ b/content/telegraf/v1/input-plugins/sflow/_index.md @@ -10,7 +10,7 @@ introduced: "v1.14.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/sflow/README.md, SFlow Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/sflow/README.md, SFlow Plugin Source --- # SFlow Input Plugin diff --git a/content/telegraf/v1/input-plugins/sip/_index.md b/content/telegraf/v1/input-plugins/sip/_index.md new file mode 100644 index 0000000000..9cabab6c7c --- /dev/null +++ b/content/telegraf/v1/input-plugins/sip/_index.md @@ -0,0 +1,182 @@ +--- +description: "Telegraf plugin for collecting metrics from SIP" +menu: + telegraf_v1_ref: + parent: input_plugins_reference + name: SIP + identifier: input-sip +tags: [SIP, "input-plugins", "configuration", "network"] +introduced: "v1.38.0" +os_support: "freebsd, linux, macos, solaris, windows" +related: + - /telegraf/v1/configure_plugins/ + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/sip/README.md, SIP Plugin Source +--- + +# SIP Input Plugin + +This plugin gathers metrics about the health and availability of +[SIP (Session Initiation Protocol)](https://datatracker.ietf.org/doc/html/rfc3261) servers such as PBX systems, SIP +proxies, registrars, and VoIP service providers. It sends SIP requests +(typically OPTIONS) and measures response times and status codes. + +**Introduced in:** Telegraf v1.38.0 +**Tags:** network +**OS support:** all + +[sip]: https://datatracker.ietf.org/doc/html/rfc3261 + +## Global configuration options + +Plugins support additional global and plugin configuration settings for tasks +such as modifying metrics, tags, and fields, creating aliases, and configuring +plugin ordering. See [CONFIGURATION.md](/telegraf/v1/configuration/#plugins) for more details. + +[CONFIGURATION.md]: ../../../docs/CONFIGURATION.md#plugins + +## Secret-store support + +This plugin supports secrets from secret-stores for the `username` and +`password` option. +See the [secret-store documentation](/telegraf/v1/configuration/#secret-store-secrets) for more details on how +to use them. + +[SECRETSTORE]: ../../../docs/CONFIGURATION.md#secret-store-secrets + +## Configuration + +```toml @sample.conf +# SIP (Session Initiation Protocol) health check plugin +[[inputs.sip]] + ## SIP server address to monitor + ## Format: sip://host[:port] or sips://host[:port] + ## sip:// - Standard SIP (default port 5060) + ## sips:// - Secure SIP with TLS (default port 5061) + server = "sip://sip.example.com:5060" + + ## Transport protocol + ## Valid values: udp, tcp, ws, wss + # transport = "udp" + + ## SIP method to use for health checks + ## Valid values: OPTIONS, INVITE, MESSAGE + # method = "OPTIONS" + + ## Request timeout + # timeout = "5s" + + ## From user as it appears in SIP header + # from_user = "telegraf" + + ## From domain (domain part of From header) + ## If not specified, uses the server hostname + # from_domain = "" + + ## To user as it appears in SIP header + ## If not specified, uses the same value as from_user + # to_user = "" + + ## Local address to use for outgoing requests + # local_address = "" + + ## SIP digest authentication credentials + ## Leave empty to use no authentication + # username = "" + # password = "" + + ## Optional TLS Config (only used for sips:// URLs or transport=tls/wss) + ## Set to true/false to enforce TLS being enabled/disabled. If not set, + ## enable TLS only if any of the other options are specified. + # tls_enable = + ## Trusted root certificates for server + # tls_ca = "/path/to/cafile" + ## Used for TLS client certificate authentication + # tls_cert = "/path/to/certfile" + ## Used for TLS client certificate authentication + # tls_key = "/path/to/keyfile" + ## Password for the key file if it is encrypted + # tls_key_pwd = "" + ## Send the specified TLS server name via SNI + # tls_server_name = "kubernetes.example.com" + ## Minimal TLS version to accept by the client + # tls_min_version = "TLS12" + ## List of ciphers to accept, by default all secure ciphers will be accepted + ## See https://pkg.go.dev/crypto/tls#pkg-constants for supported values. + ## Use "all", "secure" and "insecure" to add all support ciphers, secure + ## suites or insecure suites respectively. + # tls_cipher_suites = ["secure"] + ## Renegotiation method, "never", "once" or "freely" + # tls_renegotiation_method = "never" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false +``` + +### SIP Methods + +The plugin supports the following SIP methods: + +- **OPTIONS** (recommended): Standard SIP method for health checks. Queries + server capabilities without establishing a session. +- **INVITE**: Initiates a session. Use with caution as it may create call + records. +- **MESSAGE**: Sends an instant message. Useful for testing messaging + infrastructure. + +## Troubleshooting + +### Permission Issues + +Some SIP implementations may require specific network permissions. If you +encounter permission errors, ensure Telegraf has appropriate network access. + +### Firewall Configuration + +Ensure that: + +- Outbound connections to SIP ports (typically 5060/5061) are allowed +- If using UDP, firewall allows UDP packets +- Return traffic is permitted for the transaction + +### Timeout Issues + +If experiencing frequent timeouts: + +- Increase the `timeout` value +- Verify network connectivity to the SIP server +- Check if the SIP server is configured to respond to the chosen method +- Ensure the correct transport protocol is selected + +### Response Codes + +Different SIP servers may respond with different status codes to OPTIONS requests: + +- `200 OK` - Server is operational and responding +- `404 Not Found` - User or resource doesn't exist (may still indicate healthy server) +- `401 Unauthorized` / `407 Proxy Authentication Required` - Authentication required + +## Metrics + +- sip + - tags: + - source (the SIP server address) + - method (the SIP method used, lowercase: options, invite, message) + - transport (the transport protocol: udp, tcp, ws, wss) + - status_code (the SIP response status code, e.g., "200", "404"; not always present, e.g. on timeout) + - fields: + - response_time_s (float, seconds) - Time taken to receive response + (for timeouts, this equals the configured timeout value) + - result (string) - The outcome of the request: the SIP reason phrase when + a response is received (e.g. "OK", "Not Found", "Unauthorized"), or a + sentinel value when no valid response is received (`Timeout`, `Error`, + `No Response`) + - server_agent (string, optional) - Value of the `Server` header from the + SIP response, identifying the remote server software + +## Example Output + +```text +sip,host=telegraf-host,method=options,source=sip://sip.example.com:5060,status_code=200,transport=udp response_time_s=0.023,result="OK" 1640000000000000000 +sip,host=telegraf-host,method=options,source=sip://unreachable.example.com:5060,transport=udp response_time_s=5.0,result="Timeout" 1640000000000000000 +sip,host=telegraf-host,method=options,source=sip://sip.provider.com:5060,status_code=404,transport=udp response_time_s=0.045,result="Not Found" 1640000000000000000 +sip,host=telegraf-host,method=options,source=sips://secure.voip.example.com:5061,status_code=200,transport=tcp response_time_s=0.067,result="OK",server_agent="Asterisk PBX 18.15.0" 1640000000000000000 +``` diff --git a/content/telegraf/v1/input-plugins/slab/_index.md b/content/telegraf/v1/input-plugins/slab/_index.md index 4d4c4fbc41..8e699cb75e 100644 --- a/content/telegraf/v1/input-plugins/slab/_index.md +++ b/content/telegraf/v1/input-plugins/slab/_index.md @@ -10,7 +10,7 @@ introduced: "v1.23.0" os_support: "linux" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/slab/README.md, Slab Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/slab/README.md, Slab Plugin Source --- # Slab Input Plugin diff --git a/content/telegraf/v1/input-plugins/slurm/_index.md b/content/telegraf/v1/input-plugins/slurm/_index.md index ea6b981db2..7b3d6a352d 100644 --- a/content/telegraf/v1/input-plugins/slurm/_index.md +++ b/content/telegraf/v1/input-plugins/slurm/_index.md @@ -10,7 +10,7 @@ introduced: "v1.32.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/slurm/README.md, SLURM Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/slurm/README.md, SLURM Plugin Source --- # SLURM Input Plugin diff --git a/content/telegraf/v1/input-plugins/smart/_index.md b/content/telegraf/v1/input-plugins/smart/_index.md index 65f776c90b..2eff623735 100644 --- a/content/telegraf/v1/input-plugins/smart/_index.md +++ b/content/telegraf/v1/input-plugins/smart/_index.md @@ -10,7 +10,7 @@ introduced: "v1.5.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/smart/README.md, S.M.A.R.T. Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/smart/README.md, S.M.A.R.T. Plugin Source --- # S.M.A.R.T. Input Plugin @@ -225,14 +225,25 @@ smartctl --scan -d nvme - serial_no - wwn - fields: + - available_spare (NVMe) + - available_spare_threshold (NVMe) + - critical_temperature_time (NVMe) + - critical_warning (NVMe) + - error_log_entries (NVMe) - exit_status - health_ok + - media_errors (NVMe) - media_wearout_indicator - percent_lifetime_remain + - percentage_used (NVMe) + - power_cycle_count + - power_on_hours - read_error_rate - - seek_error + - seek_error_rate - temp_c - udma_crc_errors + - unsafe_shutdowns (NVMe) + - warning_temperature_time (NVMe) - wear_leveling_count - smart_attribute: diff --git a/content/telegraf/v1/input-plugins/smartctl/_index.md b/content/telegraf/v1/input-plugins/smartctl/_index.md index 36f6a30823..36c1a65b5f 100644 --- a/content/telegraf/v1/input-plugins/smartctl/_index.md +++ b/content/telegraf/v1/input-plugins/smartctl/_index.md @@ -10,7 +10,7 @@ introduced: "v1.31.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/smartctl/README.md, smartctl JSON Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/smartctl/README.md, smartctl JSON Plugin Source --- # smartctl JSON Input Plugin diff --git a/content/telegraf/v1/input-plugins/snmp/_index.md b/content/telegraf/v1/input-plugins/snmp/_index.md index 7c3c6d8dc5..5e7f7433aa 100644 --- a/content/telegraf/v1/input-plugins/snmp/_index.md +++ b/content/telegraf/v1/input-plugins/snmp/_index.md @@ -10,7 +10,7 @@ introduced: "v0.10.1" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/snmp/README.md, SNMP Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/snmp/README.md, SNMP Plugin Source --- # SNMP Input Plugin diff --git a/content/telegraf/v1/input-plugins/snmp_trap/_index.md b/content/telegraf/v1/input-plugins/snmp_trap/_index.md index 527317f4bb..0c785bae63 100644 --- a/content/telegraf/v1/input-plugins/snmp_trap/_index.md +++ b/content/telegraf/v1/input-plugins/snmp_trap/_index.md @@ -10,7 +10,7 @@ introduced: "v1.13.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/snmp_trap/README.md, SNMP Trap Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/snmp_trap/README.md, SNMP Trap Plugin Source --- # SNMP Trap Input Plugin diff --git a/content/telegraf/v1/input-plugins/socket_listener/_index.md b/content/telegraf/v1/input-plugins/socket_listener/_index.md index f3886afe0c..18c9a3bcc1 100644 --- a/content/telegraf/v1/input-plugins/socket_listener/_index.md +++ b/content/telegraf/v1/input-plugins/socket_listener/_index.md @@ -10,7 +10,7 @@ introduced: "v1.3.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/socket_listener/README.md, Socket Listener Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/socket_listener/README.md, Socket Listener Plugin Source --- # Socket Listener Input Plugin diff --git a/content/telegraf/v1/input-plugins/socketstat/_index.md b/content/telegraf/v1/input-plugins/socketstat/_index.md index 74cafe45a6..eb978f037b 100644 --- a/content/telegraf/v1/input-plugins/socketstat/_index.md +++ b/content/telegraf/v1/input-plugins/socketstat/_index.md @@ -10,7 +10,7 @@ introduced: "v1.22.0" os_support: "freebsd, linux, macos" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/socketstat/README.md, Socket Statistics Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/socketstat/README.md, Socket Statistics Plugin Source --- # Socket Statistics Input Plugin diff --git a/content/telegraf/v1/input-plugins/solr/_index.md b/content/telegraf/v1/input-plugins/solr/_index.md index c2e13acc4d..7bba310eee 100644 --- a/content/telegraf/v1/input-plugins/solr/_index.md +++ b/content/telegraf/v1/input-plugins/solr/_index.md @@ -10,7 +10,7 @@ introduced: "v1.5.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/solr/README.md, Apache Solr Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/solr/README.md, Apache Solr Plugin Source --- # Apache Solr Input Plugin diff --git a/content/telegraf/v1/input-plugins/sql/_index.md b/content/telegraf/v1/input-plugins/sql/_index.md index dca95f6827..0d4919ebd0 100644 --- a/content/telegraf/v1/input-plugins/sql/_index.md +++ b/content/telegraf/v1/input-plugins/sql/_index.md @@ -10,7 +10,7 @@ introduced: "v1.19.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/sql/README.md, SQL Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/sql/README.md, SQL Plugin Source --- # SQL Input Plugin diff --git a/content/telegraf/v1/input-plugins/sqlserver/_index.md b/content/telegraf/v1/input-plugins/sqlserver/_index.md index 8248ab2132..848d689e08 100644 --- a/content/telegraf/v1/input-plugins/sqlserver/_index.md +++ b/content/telegraf/v1/input-plugins/sqlserver/_index.md @@ -10,7 +10,7 @@ introduced: "v0.10.1" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/sqlserver/README.md, Microsoft SQL Server Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/sqlserver/README.md, Microsoft SQL Server Plugin Source --- # Microsoft SQL Server Input Plugin diff --git a/content/telegraf/v1/input-plugins/stackdriver/_index.md b/content/telegraf/v1/input-plugins/stackdriver/_index.md index f3171a4b07..e4dd9dfa16 100644 --- a/content/telegraf/v1/input-plugins/stackdriver/_index.md +++ b/content/telegraf/v1/input-plugins/stackdriver/_index.md @@ -10,7 +10,7 @@ introduced: "v1.10.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/stackdriver/README.md, Stackdriver Google Cloud Monitoring Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/stackdriver/README.md, Stackdriver Google Cloud Monitoring Plugin Source --- # Stackdriver Google Cloud Monitoring Input Plugin diff --git a/content/telegraf/v1/input-plugins/statsd/_index.md b/content/telegraf/v1/input-plugins/statsd/_index.md index 66fc897847..7bb86749a7 100644 --- a/content/telegraf/v1/input-plugins/statsd/_index.md +++ b/content/telegraf/v1/input-plugins/statsd/_index.md @@ -10,7 +10,7 @@ introduced: "v0.2.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/statsd/README.md, StatsD Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/statsd/README.md, StatsD Plugin Source --- # StatsD Input Plugin @@ -88,7 +88,7 @@ plugin ordering. See [CONFIGURATION.md](/telegraf/v1/configuration/#plugins) for metric_separator = "_" ## Parses extensions to statsd in the datadog statsd format - ## currently supports metrics and datadog tags. + ## currently supports metrics, datadog tags, events, and service checks. ## http://docs.datadoghq.com/guides/dogstatsd/ datadog_extensions = false @@ -268,6 +268,39 @@ metric type: given time interval, a Distribution metric sends all the raw data during a time interval. +### Datadog Service Checks + +When `datadog_extensions` is enabled, the plugin also supports +[Datadog service checks](https://docs.datadoghq.com/developers/service_checks/dogstatsd_service_checks_submission/) in the format: + +```text +_sc|||d:|h:|#:|m: +``` + +- `` - service check name (required) +- `` - 0=OK, 1=Warning, 2=Critical, 3=Unknown (required) +- `d:` - optional Unix timestamp +- `h:` - optional hostname override +- `#` - optional tags (same format as metrics) +- `m:` - optional message + +Example: + +```shell +echo "_sc|my.service.check|0|#env:prod|m:Service is healthy" | nc -u -w1 127.0.0.1 8125 +``` + +Service checks produce a metric with measurement name `statsd_service_check`: + +- **Tags:** + - `check_name`: The service check name + - `source`: Hostname (from `h:` field or default) + - Plus any custom tags from the `#` section +- **Fields:** + - `status` (int): Status code (0-3) + - `status_text` (string): "ok", "warning", "critical", or "unknown" + - `message` (string): Optional message from `m:` field + ## Plugin arguments - **protocol** string: Protocol used in listener - tcp or udp options @@ -293,7 +326,7 @@ measurements and tags. [dogstatsd format](http://docs.datadoghq.com/guides/dogstatsd/) - **datadog_extensions** boolean: Enable parsing of DataDog's extensions to [dogstatsd format](http://docs.datadoghq.com/guides/dogstatsd/) - and more + including events and service checks - **datadog_distributions** boolean: Enable parsing of the Distribution metric in [DataDog's distribution format](https://docs.datadoghq.com/developers/metrics/types/?tab=distribution#definition) - **datadog_keep_container_tag** boolean: Keep or drop the container id as tag. @@ -305,6 +338,7 @@ measurements and tags. [dogstatsd_format]: http://docs.datadoghq.com/guides/dogstatsd/ [dogstatsd_distri_format]: https://docs.datadoghq.com/developers/metrics/types/?tab=distribution#definition +[dogstatsd_service_checks]: https://docs.datadoghq.com/developers/service_checks/dogstatsd_service_checks_submission/ ## Statsd bucket -> InfluxDB line-protocol Templates diff --git a/content/telegraf/v1/input-plugins/supervisor/_index.md b/content/telegraf/v1/input-plugins/supervisor/_index.md index 6e6fb1b6aa..0308383d94 100644 --- a/content/telegraf/v1/input-plugins/supervisor/_index.md +++ b/content/telegraf/v1/input-plugins/supervisor/_index.md @@ -10,7 +10,7 @@ introduced: "v1.24.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/supervisor/README.md, Supervisor Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/supervisor/README.md, Supervisor Plugin Source --- # Supervisor Input Plugin diff --git a/content/telegraf/v1/input-plugins/suricata/_index.md b/content/telegraf/v1/input-plugins/suricata/_index.md index be8e55b8ec..00db0d4512 100644 --- a/content/telegraf/v1/input-plugins/suricata/_index.md +++ b/content/telegraf/v1/input-plugins/suricata/_index.md @@ -10,7 +10,7 @@ introduced: "v1.13.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/suricata/README.md, Suricata Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/suricata/README.md, Suricata Plugin Source --- # Suricata Input Plugin diff --git a/content/telegraf/v1/input-plugins/swap/_index.md b/content/telegraf/v1/input-plugins/swap/_index.md index 057c20d26c..6316dac314 100644 --- a/content/telegraf/v1/input-plugins/swap/_index.md +++ b/content/telegraf/v1/input-plugins/swap/_index.md @@ -10,7 +10,7 @@ introduced: "v1.7.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/swap/README.md, Swap Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/swap/README.md, Swap Plugin Source --- # Swap Input Plugin diff --git a/content/telegraf/v1/input-plugins/synproxy/_index.md b/content/telegraf/v1/input-plugins/synproxy/_index.md index 7293550a48..b958653fa6 100644 --- a/content/telegraf/v1/input-plugins/synproxy/_index.md +++ b/content/telegraf/v1/input-plugins/synproxy/_index.md @@ -10,7 +10,7 @@ introduced: "v1.13.0" os_support: "linux" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/synproxy/README.md, Synproxy Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/synproxy/README.md, Synproxy Plugin Source --- # Synproxy Input Plugin diff --git a/content/telegraf/v1/input-plugins/syslog/_index.md b/content/telegraf/v1/input-plugins/syslog/_index.md index 77fc71a6c4..fa16c20855 100644 --- a/content/telegraf/v1/input-plugins/syslog/_index.md +++ b/content/telegraf/v1/input-plugins/syslog/_index.md @@ -10,7 +10,7 @@ introduced: "v1.7.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/syslog/README.md, Syslog Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/syslog/README.md, Syslog Plugin Source --- # Syslog Input Plugin diff --git a/content/telegraf/v1/input-plugins/sysstat/_index.md b/content/telegraf/v1/input-plugins/sysstat/_index.md index b61176ef97..5dc88528ff 100644 --- a/content/telegraf/v1/input-plugins/sysstat/_index.md +++ b/content/telegraf/v1/input-plugins/sysstat/_index.md @@ -10,7 +10,7 @@ introduced: "v0.12.1" os_support: "linux" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/sysstat/README.md, System Performance Statistics Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/sysstat/README.md, System Performance Statistics Plugin Source --- # System Performance Statistics Input Plugin diff --git a/content/telegraf/v1/input-plugins/system/_index.md b/content/telegraf/v1/input-plugins/system/_index.md index 1f221504a7..e8f6ddac2a 100644 --- a/content/telegraf/v1/input-plugins/system/_index.md +++ b/content/telegraf/v1/input-plugins/system/_index.md @@ -10,7 +10,7 @@ introduced: "v0.1.6" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/system/README.md, System Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/system/README.md, System Plugin Source --- # System Input Plugin diff --git a/content/telegraf/v1/input-plugins/systemd_units/_index.md b/content/telegraf/v1/input-plugins/systemd_units/_index.md index f873708c57..d68b0d64c4 100644 --- a/content/telegraf/v1/input-plugins/systemd_units/_index.md +++ b/content/telegraf/v1/input-plugins/systemd_units/_index.md @@ -10,7 +10,7 @@ introduced: "v1.13.0" os_support: "linux" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/systemd_units/README.md, Systemd-Units Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/systemd_units/README.md, Systemd-Units Plugin Source --- # Systemd-Units Input Plugin diff --git a/content/telegraf/v1/input-plugins/tacacs/_index.md b/content/telegraf/v1/input-plugins/tacacs/_index.md index 5a74c92a85..085b3de279 100644 --- a/content/telegraf/v1/input-plugins/tacacs/_index.md +++ b/content/telegraf/v1/input-plugins/tacacs/_index.md @@ -10,7 +10,7 @@ introduced: "v1.28.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/tacacs/README.md, Tacacs Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/tacacs/README.md, Tacacs Plugin Source --- # Tacacs Input Plugin diff --git a/content/telegraf/v1/input-plugins/tail/_index.md b/content/telegraf/v1/input-plugins/tail/_index.md index f98ecd8c2e..1c358c6925 100644 --- a/content/telegraf/v1/input-plugins/tail/_index.md +++ b/content/telegraf/v1/input-plugins/tail/_index.md @@ -10,7 +10,7 @@ introduced: "v1.1.2" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/tail/README.md, Tail Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/tail/README.md, Tail Plugin Source --- # Tail Input Plugin diff --git a/content/telegraf/v1/input-plugins/teamspeak/_index.md b/content/telegraf/v1/input-plugins/teamspeak/_index.md index d266fb446c..3834d03e13 100644 --- a/content/telegraf/v1/input-plugins/teamspeak/_index.md +++ b/content/telegraf/v1/input-plugins/teamspeak/_index.md @@ -10,7 +10,7 @@ introduced: "v1.5.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/teamspeak/README.md, Teamspeak Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/teamspeak/README.md, Teamspeak Plugin Source --- # Teamspeak Input Plugin diff --git a/content/telegraf/v1/input-plugins/temp/_index.md b/content/telegraf/v1/input-plugins/temp/_index.md index 38c22bdde7..e0708cdffb 100644 --- a/content/telegraf/v1/input-plugins/temp/_index.md +++ b/content/telegraf/v1/input-plugins/temp/_index.md @@ -10,7 +10,7 @@ introduced: "v1.8.0" os_support: "linux, macos, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/temp/README.md, Temperature Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/temp/README.md, Temperature Plugin Source --- # Temperature Input Plugin diff --git a/content/telegraf/v1/input-plugins/tengine/_index.md b/content/telegraf/v1/input-plugins/tengine/_index.md index a96613537d..773aa9b61e 100644 --- a/content/telegraf/v1/input-plugins/tengine/_index.md +++ b/content/telegraf/v1/input-plugins/tengine/_index.md @@ -10,7 +10,7 @@ introduced: "v1.8.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/tengine/README.md, Tengine Web Server Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/tengine/README.md, Tengine Web Server Plugin Source --- # Tengine Web Server Input Plugin diff --git a/content/telegraf/v1/input-plugins/timex/_index.md b/content/telegraf/v1/input-plugins/timex/_index.md index 919bb051e7..2e8c68d74a 100644 --- a/content/telegraf/v1/input-plugins/timex/_index.md +++ b/content/telegraf/v1/input-plugins/timex/_index.md @@ -10,7 +10,7 @@ introduced: "v1.37.0" os_support: "linux" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/timex/README.md, Timex Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/timex/README.md, Timex Plugin Source --- # Timex Input Plugin diff --git a/content/telegraf/v1/input-plugins/tomcat/_index.md b/content/telegraf/v1/input-plugins/tomcat/_index.md index 202aba94eb..fd55d5ca4d 100644 --- a/content/telegraf/v1/input-plugins/tomcat/_index.md +++ b/content/telegraf/v1/input-plugins/tomcat/_index.md @@ -10,7 +10,7 @@ introduced: "v1.4.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/tomcat/README.md, Apache Tomcat Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/tomcat/README.md, Apache Tomcat Plugin Source --- # Apache Tomcat Input Plugin diff --git a/content/telegraf/v1/input-plugins/trig/_index.md b/content/telegraf/v1/input-plugins/trig/_index.md index e193a3a743..aaadc3279b 100644 --- a/content/telegraf/v1/input-plugins/trig/_index.md +++ b/content/telegraf/v1/input-plugins/trig/_index.md @@ -10,7 +10,7 @@ introduced: "v0.3.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/trig/README.md, Trig Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/trig/README.md, Trig Plugin Source --- # Trig Input Plugin diff --git a/content/telegraf/v1/input-plugins/turbostat/_index.md b/content/telegraf/v1/input-plugins/turbostat/_index.md index 1184c74f8e..b3d9a9e504 100644 --- a/content/telegraf/v1/input-plugins/turbostat/_index.md +++ b/content/telegraf/v1/input-plugins/turbostat/_index.md @@ -10,7 +10,7 @@ introduced: "v1.36.0" os_support: "linux" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/turbostat/README.md, Turbostat Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/turbostat/README.md, Turbostat Plugin Source --- # Turbostat Input Plugin diff --git a/content/telegraf/v1/input-plugins/twemproxy/_index.md b/content/telegraf/v1/input-plugins/twemproxy/_index.md index 6ef031b45e..d1d1d4710c 100644 --- a/content/telegraf/v1/input-plugins/twemproxy/_index.md +++ b/content/telegraf/v1/input-plugins/twemproxy/_index.md @@ -10,7 +10,7 @@ introduced: "v0.3.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/twemproxy/README.md, Twemproxy Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/twemproxy/README.md, Twemproxy Plugin Source --- # Twemproxy Input Plugin diff --git a/content/telegraf/v1/input-plugins/unbound/_index.md b/content/telegraf/v1/input-plugins/unbound/_index.md index 426dbee024..cbcc346bdd 100644 --- a/content/telegraf/v1/input-plugins/unbound/_index.md +++ b/content/telegraf/v1/input-plugins/unbound/_index.md @@ -10,7 +10,7 @@ introduced: "v1.5.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/unbound/README.md, Unbound Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/unbound/README.md, Unbound Plugin Source --- # Unbound Input Plugin diff --git a/content/telegraf/v1/input-plugins/upsd/_index.md b/content/telegraf/v1/input-plugins/upsd/_index.md index a5a573957e..0de57dba93 100644 --- a/content/telegraf/v1/input-plugins/upsd/_index.md +++ b/content/telegraf/v1/input-plugins/upsd/_index.md @@ -10,7 +10,7 @@ introduced: "v1.24.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/upsd/README.md, UPSD Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/upsd/README.md, UPSD Plugin Source --- # UPSD Input Plugin diff --git a/content/telegraf/v1/input-plugins/uwsgi/_index.md b/content/telegraf/v1/input-plugins/uwsgi/_index.md index d6ed91769f..0aac387b46 100644 --- a/content/telegraf/v1/input-plugins/uwsgi/_index.md +++ b/content/telegraf/v1/input-plugins/uwsgi/_index.md @@ -10,7 +10,7 @@ introduced: "v1.12.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/uwsgi/README.md, uWSGI Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/uwsgi/README.md, uWSGI Plugin Source --- # uWSGI Input Plugin diff --git a/content/telegraf/v1/input-plugins/varnish/_index.md b/content/telegraf/v1/input-plugins/varnish/_index.md index e5b415b6e3..0eeeed2483 100644 --- a/content/telegraf/v1/input-plugins/varnish/_index.md +++ b/content/telegraf/v1/input-plugins/varnish/_index.md @@ -10,7 +10,7 @@ introduced: "v0.13.1" os_support: "freebsd, linux, macos" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/varnish/README.md, Varnish Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/varnish/README.md, Varnish Plugin Source --- # Varnish Input Plugin diff --git a/content/telegraf/v1/input-plugins/vault/_index.md b/content/telegraf/v1/input-plugins/vault/_index.md index 06e495e6a8..7ba6462ece 100644 --- a/content/telegraf/v1/input-plugins/vault/_index.md +++ b/content/telegraf/v1/input-plugins/vault/_index.md @@ -10,7 +10,7 @@ introduced: "v1.22.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/vault/README.md, Hashicorp Vault Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/vault/README.md, Hashicorp Vault Plugin Source --- # Hashicorp Vault Input Plugin diff --git a/content/telegraf/v1/input-plugins/vsphere/_index.md b/content/telegraf/v1/input-plugins/vsphere/_index.md index 49cee33881..9913b6f841 100644 --- a/content/telegraf/v1/input-plugins/vsphere/_index.md +++ b/content/telegraf/v1/input-plugins/vsphere/_index.md @@ -10,7 +10,7 @@ introduced: "v1.8.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/vsphere/README.md, VMware vSphere Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/vsphere/README.md, VMware vSphere Plugin Source --- # VMware vSphere Input Plugin diff --git a/content/telegraf/v1/input-plugins/webhooks/_index.md b/content/telegraf/v1/input-plugins/webhooks/_index.md index 17799c32b5..e653be2f06 100644 --- a/content/telegraf/v1/input-plugins/webhooks/_index.md +++ b/content/telegraf/v1/input-plugins/webhooks/_index.md @@ -10,7 +10,7 @@ introduced: "v1.0.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/webhooks/README.md, Webhooks Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/webhooks/README.md, Webhooks Plugin Source --- # Webhooks Input Plugin diff --git a/content/telegraf/v1/input-plugins/whois/_index.md b/content/telegraf/v1/input-plugins/whois/_index.md index 637d9b1f81..cdaea29ebb 100644 --- a/content/telegraf/v1/input-plugins/whois/_index.md +++ b/content/telegraf/v1/input-plugins/whois/_index.md @@ -10,7 +10,7 @@ introduced: "v1.35.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/whois/README.md, WHOIS Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/whois/README.md, WHOIS Plugin Source --- # WHOIS Input Plugin diff --git a/content/telegraf/v1/input-plugins/win_eventlog/_index.md b/content/telegraf/v1/input-plugins/win_eventlog/_index.md index d35eff5e8c..7c7cfb8201 100644 --- a/content/telegraf/v1/input-plugins/win_eventlog/_index.md +++ b/content/telegraf/v1/input-plugins/win_eventlog/_index.md @@ -10,7 +10,7 @@ introduced: "v1.16.0" os_support: "windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/win_eventlog/README.md, Windows Eventlog Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/win_eventlog/README.md, Windows Eventlog Plugin Source --- # Windows Eventlog Input Plugin diff --git a/content/telegraf/v1/input-plugins/win_perf_counters/_index.md b/content/telegraf/v1/input-plugins/win_perf_counters/_index.md index da5128b3b9..4255193186 100644 --- a/content/telegraf/v1/input-plugins/win_perf_counters/_index.md +++ b/content/telegraf/v1/input-plugins/win_perf_counters/_index.md @@ -10,7 +10,7 @@ introduced: "v0.10.2" os_support: "windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/win_perf_counters/README.md, Windows Performance Counters Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/win_perf_counters/README.md, Windows Performance Counters Plugin Source --- # Windows Performance Counters Input Plugin diff --git a/content/telegraf/v1/input-plugins/win_services/_index.md b/content/telegraf/v1/input-plugins/win_services/_index.md index 230c4eb38a..1360ba626b 100644 --- a/content/telegraf/v1/input-plugins/win_services/_index.md +++ b/content/telegraf/v1/input-plugins/win_services/_index.md @@ -10,7 +10,7 @@ introduced: "v1.4.0" os_support: "windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/win_services/README.md, Windows Services Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/win_services/README.md, Windows Services Plugin Source --- # Windows Services Input Plugin diff --git a/content/telegraf/v1/input-plugins/win_wmi/_index.md b/content/telegraf/v1/input-plugins/win_wmi/_index.md index 5570241efc..509fcdb912 100644 --- a/content/telegraf/v1/input-plugins/win_wmi/_index.md +++ b/content/telegraf/v1/input-plugins/win_wmi/_index.md @@ -10,7 +10,7 @@ introduced: "v1.26.0" os_support: "windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/win_wmi/README.md, Windows Management Instrumentation Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/win_wmi/README.md, Windows Management Instrumentation Plugin Source --- # Windows Management Instrumentation Input Plugin diff --git a/content/telegraf/v1/input-plugins/wireguard/_index.md b/content/telegraf/v1/input-plugins/wireguard/_index.md index 56a81facd6..2b4cf5b318 100644 --- a/content/telegraf/v1/input-plugins/wireguard/_index.md +++ b/content/telegraf/v1/input-plugins/wireguard/_index.md @@ -10,7 +10,7 @@ introduced: "v1.14.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/wireguard/README.md, Wireguard Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/wireguard/README.md, Wireguard Plugin Source --- # Wireguard Input Plugin diff --git a/content/telegraf/v1/input-plugins/wireless/_index.md b/content/telegraf/v1/input-plugins/wireless/_index.md index e4004b99ee..5d4035ea8f 100644 --- a/content/telegraf/v1/input-plugins/wireless/_index.md +++ b/content/telegraf/v1/input-plugins/wireless/_index.md @@ -10,7 +10,7 @@ introduced: "v1.9.0" os_support: "linux" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/wireless/README.md, Wireless Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/wireless/README.md, Wireless Plugin Source --- # Wireless Input Plugin diff --git a/content/telegraf/v1/input-plugins/x509_cert/_index.md b/content/telegraf/v1/input-plugins/x509_cert/_index.md index 2ba100c2bf..71476f6c98 100644 --- a/content/telegraf/v1/input-plugins/x509_cert/_index.md +++ b/content/telegraf/v1/input-plugins/x509_cert/_index.md @@ -10,7 +10,7 @@ introduced: "v1.8.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/x509_cert/README.md, x509 Certificate Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/x509_cert/README.md, x509 Certificate Plugin Source --- # x509 Certificate Input Plugin diff --git a/content/telegraf/v1/input-plugins/xtremio/_index.md b/content/telegraf/v1/input-plugins/xtremio/_index.md index 820b40f849..9a7ce75f99 100644 --- a/content/telegraf/v1/input-plugins/xtremio/_index.md +++ b/content/telegraf/v1/input-plugins/xtremio/_index.md @@ -10,7 +10,7 @@ introduced: "v1.22.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/xtremio/README.md, Dell EMC XtremIO Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/xtremio/README.md, Dell EMC XtremIO Plugin Source --- # Dell EMC XtremIO Input Plugin diff --git a/content/telegraf/v1/input-plugins/zfs/_index.md b/content/telegraf/v1/input-plugins/zfs/_index.md index 7c0db823ef..d054658c81 100644 --- a/content/telegraf/v1/input-plugins/zfs/_index.md +++ b/content/telegraf/v1/input-plugins/zfs/_index.md @@ -10,7 +10,7 @@ introduced: "v0.2.1" os_support: "freebsd, linux" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/zfs/README.md, ZFS Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/zfs/README.md, ZFS Plugin Source --- # ZFS Input Plugin diff --git a/content/telegraf/v1/input-plugins/zipkin/_index.md b/content/telegraf/v1/input-plugins/zipkin/_index.md index 416597381e..43c8473f92 100644 --- a/content/telegraf/v1/input-plugins/zipkin/_index.md +++ b/content/telegraf/v1/input-plugins/zipkin/_index.md @@ -10,7 +10,7 @@ introduced: "v1.4.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/zipkin/README.md, Zipkin Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/zipkin/README.md, Zipkin Plugin Source --- # Zipkin Input Plugin diff --git a/content/telegraf/v1/input-plugins/zookeeper/_index.md b/content/telegraf/v1/input-plugins/zookeeper/_index.md index 6b6995a65d..847a88b3ed 100644 --- a/content/telegraf/v1/input-plugins/zookeeper/_index.md +++ b/content/telegraf/v1/input-plugins/zookeeper/_index.md @@ -10,7 +10,7 @@ introduced: "v0.2.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/zookeeper/README.md, Apache Zookeeper Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/zookeeper/README.md, Apache Zookeeper Plugin Source --- # Apache Zookeeper Input Plugin diff --git a/content/telegraf/v1/output-plugins/amon/_index.md b/content/telegraf/v1/output-plugins/amon/_index.md index 24c5f09e99..30fb0b5e43 100644 --- a/content/telegraf/v1/output-plugins/amon/_index.md +++ b/content/telegraf/v1/output-plugins/amon/_index.md @@ -12,7 +12,7 @@ removal: v1.40.0 os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/amon/README.md, Amon Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/amon/README.md, Amon Plugin Source --- # Amon Output Plugin diff --git a/content/telegraf/v1/output-plugins/amqp/_index.md b/content/telegraf/v1/output-plugins/amqp/_index.md index 11b2b54d6d..007cff3f8f 100644 --- a/content/telegraf/v1/output-plugins/amqp/_index.md +++ b/content/telegraf/v1/output-plugins/amqp/_index.md @@ -10,7 +10,7 @@ introduced: "v0.1.9" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/amqp/README.md, AMQP Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/amqp/README.md, AMQP Plugin Source --- # AMQP Output Plugin diff --git a/content/telegraf/v1/output-plugins/application_insights/_index.md b/content/telegraf/v1/output-plugins/application_insights/_index.md index 206dca9758..b00a7af99c 100644 --- a/content/telegraf/v1/output-plugins/application_insights/_index.md +++ b/content/telegraf/v1/output-plugins/application_insights/_index.md @@ -10,7 +10,7 @@ introduced: "v1.7.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/application_insights/README.md, Azure Application Insights Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/application_insights/README.md, Azure Application Insights Plugin Source --- # Azure Application Insights Output Plugin diff --git a/content/telegraf/v1/output-plugins/arc/_index.md b/content/telegraf/v1/output-plugins/arc/_index.md index 231a332f86..0d3ef9d2b7 100644 --- a/content/telegraf/v1/output-plugins/arc/_index.md +++ b/content/telegraf/v1/output-plugins/arc/_index.md @@ -10,7 +10,7 @@ introduced: "v1.37.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/arc/README.md, Arc Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/arc/README.md, Arc Plugin Source --- # Arc Output Plugin diff --git a/content/telegraf/v1/output-plugins/azure_data_explorer/_index.md b/content/telegraf/v1/output-plugins/azure_data_explorer/_index.md index 06c44c9113..04647c41e1 100644 --- a/content/telegraf/v1/output-plugins/azure_data_explorer/_index.md +++ b/content/telegraf/v1/output-plugins/azure_data_explorer/_index.md @@ -10,7 +10,7 @@ introduced: "v1.20.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/azure_data_explorer/README.md, Azure Data Explorer Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/azure_data_explorer/README.md, Azure Data Explorer Plugin Source --- # Azure Data Explorer Output Plugin diff --git a/content/telegraf/v1/output-plugins/azure_monitor/_index.md b/content/telegraf/v1/output-plugins/azure_monitor/_index.md index 7484cef9aa..6c2c587bf6 100644 --- a/content/telegraf/v1/output-plugins/azure_monitor/_index.md +++ b/content/telegraf/v1/output-plugins/azure_monitor/_index.md @@ -10,7 +10,7 @@ introduced: "v1.8.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/azure_monitor/README.md, Azure Monitor Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/azure_monitor/README.md, Azure Monitor Plugin Source --- # Azure Monitor Output Plugin diff --git a/content/telegraf/v1/output-plugins/bigquery/_index.md b/content/telegraf/v1/output-plugins/bigquery/_index.md index ddf55e6cc4..81e032ce84 100644 --- a/content/telegraf/v1/output-plugins/bigquery/_index.md +++ b/content/telegraf/v1/output-plugins/bigquery/_index.md @@ -10,7 +10,7 @@ introduced: "v1.18.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/bigquery/README.md, Google BigQuery Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/bigquery/README.md, Google BigQuery Plugin Source --- # Google BigQuery Output Plugin diff --git a/content/telegraf/v1/output-plugins/clarify/_index.md b/content/telegraf/v1/output-plugins/clarify/_index.md index 359e9799dc..03a277e44b 100644 --- a/content/telegraf/v1/output-plugins/clarify/_index.md +++ b/content/telegraf/v1/output-plugins/clarify/_index.md @@ -10,7 +10,7 @@ introduced: "v1.27.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/clarify/README.md, Clarify Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/clarify/README.md, Clarify Plugin Source --- # Clarify Output Plugin diff --git a/content/telegraf/v1/output-plugins/cloud_pubsub/_index.md b/content/telegraf/v1/output-plugins/cloud_pubsub/_index.md index 193e4fbc47..1bfe0bccc5 100644 --- a/content/telegraf/v1/output-plugins/cloud_pubsub/_index.md +++ b/content/telegraf/v1/output-plugins/cloud_pubsub/_index.md @@ -10,7 +10,7 @@ introduced: "v1.10.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/cloud_pubsub/README.md, Google Cloud PubSub Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/cloud_pubsub/README.md, Google Cloud PubSub Plugin Source --- # Google Cloud PubSub Output Plugin diff --git a/content/telegraf/v1/output-plugins/cloudwatch/_index.md b/content/telegraf/v1/output-plugins/cloudwatch/_index.md index 5ae20f45f3..0b956edea6 100644 --- a/content/telegraf/v1/output-plugins/cloudwatch/_index.md +++ b/content/telegraf/v1/output-plugins/cloudwatch/_index.md @@ -10,7 +10,7 @@ introduced: "v0.10.1" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/cloudwatch/README.md, Amazon CloudWatch Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/cloudwatch/README.md, Amazon CloudWatch Plugin Source --- # Amazon CloudWatch Output Plugin diff --git a/content/telegraf/v1/output-plugins/cloudwatch_logs/_index.md b/content/telegraf/v1/output-plugins/cloudwatch_logs/_index.md index 2e11a5175f..007c3cd87d 100644 --- a/content/telegraf/v1/output-plugins/cloudwatch_logs/_index.md +++ b/content/telegraf/v1/output-plugins/cloudwatch_logs/_index.md @@ -10,7 +10,7 @@ introduced: "v1.19.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/cloudwatch_logs/README.md, Amazon CloudWatch Logs Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/cloudwatch_logs/README.md, Amazon CloudWatch Logs Plugin Source --- # Amazon CloudWatch Logs Output Plugin diff --git a/content/telegraf/v1/output-plugins/cratedb/_index.md b/content/telegraf/v1/output-plugins/cratedb/_index.md index 1a724ff001..145464efdc 100644 --- a/content/telegraf/v1/output-plugins/cratedb/_index.md +++ b/content/telegraf/v1/output-plugins/cratedb/_index.md @@ -10,7 +10,7 @@ introduced: "v1.5.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/cratedb/README.md, CrateDB Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/cratedb/README.md, CrateDB Plugin Source --- # CrateDB Output Plugin diff --git a/content/telegraf/v1/output-plugins/datadog/_index.md b/content/telegraf/v1/output-plugins/datadog/_index.md index 8eabf235f8..2574e93402 100644 --- a/content/telegraf/v1/output-plugins/datadog/_index.md +++ b/content/telegraf/v1/output-plugins/datadog/_index.md @@ -10,7 +10,7 @@ introduced: "v0.1.6" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/datadog/README.md, Datadog Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/datadog/README.md, Datadog Plugin Source --- # Datadog Output Plugin diff --git a/content/telegraf/v1/output-plugins/discard/_index.md b/content/telegraf/v1/output-plugins/discard/_index.md index 8eba9fdc94..b587e37d99 100644 --- a/content/telegraf/v1/output-plugins/discard/_index.md +++ b/content/telegraf/v1/output-plugins/discard/_index.md @@ -10,7 +10,7 @@ introduced: "v1.2.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/discard/README.md, Discard Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/discard/README.md, Discard Plugin Source --- # Discard Output Plugin diff --git a/content/telegraf/v1/output-plugins/dynatrace/_index.md b/content/telegraf/v1/output-plugins/dynatrace/_index.md index eafe072bb1..17dd1fa964 100644 --- a/content/telegraf/v1/output-plugins/dynatrace/_index.md +++ b/content/telegraf/v1/output-plugins/dynatrace/_index.md @@ -10,7 +10,7 @@ introduced: "v1.16.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/dynatrace/README.md, Dynatrace Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/dynatrace/README.md, Dynatrace Plugin Source --- # Dynatrace Output Plugin diff --git a/content/telegraf/v1/output-plugins/elasticsearch/_index.md b/content/telegraf/v1/output-plugins/elasticsearch/_index.md index f1108f349c..c8f39f92dd 100644 --- a/content/telegraf/v1/output-plugins/elasticsearch/_index.md +++ b/content/telegraf/v1/output-plugins/elasticsearch/_index.md @@ -10,7 +10,7 @@ introduced: "v0.1.5" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/elasticsearch/README.md, Elasticsearch Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/elasticsearch/README.md, Elasticsearch Plugin Source --- # Elasticsearch Output Plugin diff --git a/content/telegraf/v1/output-plugins/event_hubs/_index.md b/content/telegraf/v1/output-plugins/event_hubs/_index.md index 36df6804ec..d8cfa9cb8b 100644 --- a/content/telegraf/v1/output-plugins/event_hubs/_index.md +++ b/content/telegraf/v1/output-plugins/event_hubs/_index.md @@ -10,7 +10,7 @@ introduced: "v1.21.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/event_hubs/README.md, Azure Event Hubs Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/event_hubs/README.md, Azure Event Hubs Plugin Source --- # Azure Event Hubs Output Plugin diff --git a/content/telegraf/v1/output-plugins/exec/_index.md b/content/telegraf/v1/output-plugins/exec/_index.md index b5bd947a2c..6b9fc2a952 100644 --- a/content/telegraf/v1/output-plugins/exec/_index.md +++ b/content/telegraf/v1/output-plugins/exec/_index.md @@ -10,7 +10,7 @@ introduced: "v1.12.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/exec/README.md, Executable Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/exec/README.md, Executable Plugin Source --- # Executable Output Plugin diff --git a/content/telegraf/v1/output-plugins/execd/_index.md b/content/telegraf/v1/output-plugins/execd/_index.md index 3a49d678a4..0958fd4b1b 100644 --- a/content/telegraf/v1/output-plugins/execd/_index.md +++ b/content/telegraf/v1/output-plugins/execd/_index.md @@ -10,7 +10,7 @@ introduced: "v1.15.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/execd/README.md, Executable Daemon Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/execd/README.md, Executable Daemon Plugin Source --- # Executable Daemon Output Plugin diff --git a/content/telegraf/v1/output-plugins/file/_index.md b/content/telegraf/v1/output-plugins/file/_index.md index 3145396de0..38ce613c9b 100644 --- a/content/telegraf/v1/output-plugins/file/_index.md +++ b/content/telegraf/v1/output-plugins/file/_index.md @@ -10,7 +10,7 @@ introduced: "v0.10.3" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/file/README.md, File Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/file/README.md, File Plugin Source --- # File Output Plugin diff --git a/content/telegraf/v1/output-plugins/graphite/_index.md b/content/telegraf/v1/output-plugins/graphite/_index.md index 2261e32cb3..ec2cb65585 100644 --- a/content/telegraf/v1/output-plugins/graphite/_index.md +++ b/content/telegraf/v1/output-plugins/graphite/_index.md @@ -10,7 +10,7 @@ introduced: "v0.10.1" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/graphite/README.md, Graphite Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/graphite/README.md, Graphite Plugin Source --- # Graphite Output Plugin diff --git a/content/telegraf/v1/output-plugins/graylog/_index.md b/content/telegraf/v1/output-plugins/graylog/_index.md index f94fe15a6f..98e2c1b3f9 100644 --- a/content/telegraf/v1/output-plugins/graylog/_index.md +++ b/content/telegraf/v1/output-plugins/graylog/_index.md @@ -10,7 +10,7 @@ introduced: "v1.0.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/graylog/README.md, Graylog Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/graylog/README.md, Graylog Plugin Source --- # Graylog Output Plugin diff --git a/content/telegraf/v1/output-plugins/groundwork/_index.md b/content/telegraf/v1/output-plugins/groundwork/_index.md index 1fb8abd099..1308320bb6 100644 --- a/content/telegraf/v1/output-plugins/groundwork/_index.md +++ b/content/telegraf/v1/output-plugins/groundwork/_index.md @@ -10,7 +10,7 @@ introduced: "v1.21.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/groundwork/README.md, GroundWork Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/groundwork/README.md, GroundWork Plugin Source --- # GroundWork Output Plugin diff --git a/content/telegraf/v1/output-plugins/health/_index.md b/content/telegraf/v1/output-plugins/health/_index.md index c317642306..66890f70d4 100644 --- a/content/telegraf/v1/output-plugins/health/_index.md +++ b/content/telegraf/v1/output-plugins/health/_index.md @@ -10,7 +10,7 @@ introduced: "v1.11.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/health/README.md, Health Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/health/README.md, Health Plugin Source --- # Health Output Plugin @@ -60,6 +60,9 @@ plugin ordering. See [CONFIGURATION.md](/telegraf/v1/configuration/#plugins) for # tls_cert = "/etc/telegraf/cert.pem" # tls_key = "/etc/telegraf/key.pem" + ## HTTP status code reported during startup i.e. before any write was called + # default_status = 200 + ## Maximum expected time between metrics being written ## Enforces an unhealthy state if there was no new metric seen for at least ## the specified time. The check is disabled by default and only used if a diff --git a/content/telegraf/v1/output-plugins/heartbeat/_index.md b/content/telegraf/v1/output-plugins/heartbeat/_index.md index c93d4b6f57..f507c8c176 100644 --- a/content/telegraf/v1/output-plugins/heartbeat/_index.md +++ b/content/telegraf/v1/output-plugins/heartbeat/_index.md @@ -10,7 +10,7 @@ introduced: "v1.37.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/heartbeat/README.md, Heartbeat Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/heartbeat/README.md, Heartbeat Plugin Source --- # Heartbeat Output Plugin @@ -61,8 +61,42 @@ to use them. ## hostname -- hostname of the instance running Telegraf ## statistics -- number of metrics, logged errors and warnings, etc ## configs -- redacted list of configs loaded by this instance + ## logs -- detailed log-entries for this instance + ## status -- result of the status condition evaluation # include = ["hostname"] + ## Logging information filtering, only applies if "logs" is added to "include" + # [outputs.heartbeat.logs] + # ## Number of log entries to send (unlimited by default) + # ## In case more log-entries are available entries with higher log levels + # ## and more recent entries are preferred. + # # limit = 0 + # + # ## Minimum log-level for sending the entry + # # level = "error" + + ## Logical conditions to determine the agent status, only applies if "status" + ## is included in the message + # [outputs.heartbeat.status] + # ## Conditions to signal the given status as CEL programs returning a + # ## boolean. Conditions are evaluated in the order below until a program + # ## evaluates to "true". + # # ok = "false" + # # warn = "false" + # # fail = "false" + # + # ## Evaluation order of the conditions above; available: "ok", "warn", "fail" + # # order = ["ok", "warn", "fail"] + # + # ## Default status used if none of the conditions above matches + # ## available: "ok", "warn", "fail", "undefined" + # # default = "ok" + # + # ## If set, send this initial status before the first write, otherwise + # ## compute the status from the conditions and default above. + # ## available: "ok", "warn", "fail", "undefined", "" + # # initial = "" + ## Additional HTTP headers # [outputs.heartbeat.headers] # User-Agent = "telegraf" @@ -103,4 +137,143 @@ configuration directory while a new configuration is added or removed. > information. However, sensitive information might still be contained in the > URL or the path sent. Use with care! +### Logging information + +When including `logs` in the message the actual log _messages_ are included. +This comprises the log messages of _all_ plugins _and_ the agent itself being +logged _after_ the `Connect` function of this plugin was called, i.e. you will +not see any initialization or configuration errors in the heartbeat messages! +You can limit the messages sent within the optional `outputs.heartbeat.logs` +section where you can limit the messages by log-`level` or limit the number +of messages included using the `limit` setting. + +> [!WARNING] +> As the amount of log messages can be high, especially when configuring a low +> level such as `info` the resulting heartbeat messages might be large. Restrict +> the included messages by choosing a higher log-level and/or by using a limit! +When including `logs` in the message the number of errors and warnings logged +in this Telegraf instance are included in the heartbeat message. This comprises +_all_ log messages of all plugins and the agent itself logged _after_ the +`Connect` function of this plugin was called, i.e. you will not see any +initialization or configuration errors in the heartbeat messages! + +For getting the actual log _messages_ you can include `log-details`. Via the +optional `outputs.heartbeat.status` you can limit the messages by log-`level` +or limit the number included using the `limit` setting. + +> [!WARNING] +> As the amount of log messages can be high, especially when configuring low +> level such as `info` the resulting heartbeat messages might be large. Use the +> `log-details` option with care if network traffic is a limiting factor and +> restrict the included messages to high levels and use a limit! + +When setting the `level` option only messages with this or more severe levels +are included. + +The `limit` setting allows to specify the maximum number of log-messages +included in the heartbeat message. If the number of log-messages exceeds the +given limit they are selected by the most severe level and most recent messages +first. +given limit they are selected by most severe and most recent messages first. + +### Status information + +By including `status` the message will contain the status of the Telegraf +instance as configured via the `outputs.heartbeat.status` section. + +This section allows to set an `initial` state used as long as no flush was +performed by Telegraf. If `initial` is not configured or empty, the status +expressions are evaluated also before the first flush. + +The `ok`, `warn` and `fail` settings allow to specify [CEL expressions](https://cel.dev) +evaluating to a boolean value. Available information for the expressions are +listed below. The first expression evaluating to `true` defines the status. +The `order` parameter allows to customize the evaluation order. + +> [!NOTE] +> If an expression is omitted in the `order` setting it will __not__ be +> evaluated! + +The status defined via `default` is used in case none of the status expressions +evaluate to true. + +For defining expressions you can use the following variables + +- `metrics` (int) -- number of metrics arriving at this plugin +- `log_errors` (int) -- number of errors logged +- `log_warnings` (int) -- number of warnings logged +- `last_update` (time) -- time of last successful heartbeat message, can be used + to e.g. calculate rates +- `agent` (map) -- agent statistics, see below +- `inputs` (map) -- input plugin statistics, see below +- `outputs` (map) -- output plugin statistics, see below + +The `agent` statistics variable is a `map` with information matching the +`internal_agent` metric of the [internal input plugin](/telegraf/v1/plugins/#input-internal): + +- `metrics_written` (int) -- number of metrics written in total by all outputs +- `metrics_rejected` (int) -- number of metrics rejected in total by all outputs +- `metrics_dropped` (int) -- number of metrics dropped in total by all outputs +- `metrics_gathered` (int) -- number of metrics collected in total by all inputs +- `gather_errors` (int) -- number of errors during collection by all inputs +- `gather_timeouts` (int) -- number of collection timeouts by all inputs + +The `inputs` statistics variable is a `map` with the key denoting the plugin +type (e.g. `cpu` for `inputs.cpu`) and the value being list of plugin +statistics. Each entry in the list corresponds to an input plugin instance with +information matching the `internal_gather` metric of the +[internal input plugin](/telegraf/v1/plugins/#input-internal): + +- `id` (string) -- unique plugin identifier +- `alias` (string) -- alias set for the plugin; only exists if alias + is defined +- `errors` (int) -- collection errors for this plugin instance +- `metrics_gathered` (int) -- number of metrics collected +- `gather_time_ns` (int) -- time used to gather the metrics in nanoseconds +- `gather_timeouts` (int) -- number of timeouts during metric collection +- `startup_errors` (int) -- number of times the plugin failed to start + +The `outputs` statistics variable is a `map` with the key denoting the plugin +type (e.g. `influxdb` for `outputs.influxdb`) and the value being list of plugin +statistics. Each entry in the list corresponds to an output plugin instance with +information matching the `internal_write` metric of the +[internal input plugin](/telegraf/v1/plugins/#input-internal): + +- `id` (string) -- unique plugin identifier +- `alias` (string) -- alias set for the plugin; only exists if alias + is defined +- `errors` (int) -- write errors for this plugin instance +- `metrics_filtered` (int) -- number of metrics filtered by the output +- `write_time_ns` (int) -- time used to write the metrics in nanoseconds +- `startup_errors` (int) -- number of times the plugin failed to start +- `metrics_added` (int) -- number of metrics added to the output buffer +- `metrics_written` (int) -- number of metrics written to the output +- `metrics_rejected` (int) -- number of metrics rejected by the service or + serialization +- `metrics_dropped` (int) -- number of metrics dropped e.g. due to buffer + fullness +- `buffer_size` (int) -- current number of metrics currently in the output + buffer for the plugin instance +- `buffer_limit` (int) -- capacity of the output buffer; irrelevant for + disk-based buffers +- `buffer_fullness` (float) -- current ratio of metrics in the buffer to + capacity; can be greater than one (i.e. `> 100%`) + for disk-based buffers + +If not stated otherwise, all variables are accumulated since the last successful +heartbeat message. + +The following functions are available: + +- `encoding` functions of the [CEL encoder library](https://github.com/google/cel-go/blob/master/ext/README.md#encoders) +- `math` functions of the [CEL math library](https://github.com/google/cel-go/blob/master/ext/README.md#math) +- `string` functions of the [CEL strings library](https://github.com/google/cel-go/blob/master/ext/README.md#strings) +- `now` function for getting the current time + [schema]: /plugins/outputs/heartbeat/schema_v1.json +[internal_plugin]: /plugins/inputs/internal/README.md + +[cel]: https://cel.dev +[cel_encoder]: https://github.com/google/cel-go/blob/master/ext/README.md#encoders +[cel_math]: https://github.com/google/cel-go/blob/master/ext/README.md#math +[cel_strings]: https://github.com/google/cel-go/blob/master/ext/README.md#strings diff --git a/content/telegraf/v1/output-plugins/http/_index.md b/content/telegraf/v1/output-plugins/http/_index.md index 4d5c5d68a4..c39dd87b17 100644 --- a/content/telegraf/v1/output-plugins/http/_index.md +++ b/content/telegraf/v1/output-plugins/http/_index.md @@ -10,7 +10,7 @@ introduced: "v1.7.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/http/README.md, HTTP Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/http/README.md, HTTP Plugin Source --- # HTTP Output Plugin @@ -69,6 +69,9 @@ to use them. # max_idle_conn_per_host = 0 # response_timeout = "0s" + ## Use the local address for connecting, assigned by the OS by default + # local_address = "" + ## Optional proxy settings # use_system_proxy = false # http_proxy_url = "" diff --git a/content/telegraf/v1/output-plugins/influxdb/_index.md b/content/telegraf/v1/output-plugins/influxdb/_index.md index 27d66099f5..b574679c6c 100644 --- a/content/telegraf/v1/output-plugins/influxdb/_index.md +++ b/content/telegraf/v1/output-plugins/influxdb/_index.md @@ -10,7 +10,7 @@ introduced: "v0.1.1" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/influxdb/README.md, InfluxDB v1.x Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/influxdb/README.md, InfluxDB v1.x Plugin Source --- # InfluxDB v1.x Output Plugin diff --git a/content/telegraf/v1/output-plugins/influxdb_v2/_index.md b/content/telegraf/v1/output-plugins/influxdb_v2/_index.md index be5f1e062c..dfbec8759e 100644 --- a/content/telegraf/v1/output-plugins/influxdb_v2/_index.md +++ b/content/telegraf/v1/output-plugins/influxdb_v2/_index.md @@ -10,7 +10,7 @@ introduced: "v1.8.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/influxdb_v2/README.md, InfluxDB v2.x Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/influxdb_v2/README.md, InfluxDB v2.x Plugin Source --- # InfluxDB v2.x Output Plugin @@ -42,7 +42,7 @@ more details on how to use them. ## Configuration ```toml @sample.conf -# Configuration for sending metrics to InfluxDB 2.0 +# Configuration for sending metrics to InfluxDB 2.x [[outputs.influxdb_v2]] ## The URLs of the InfluxDB cluster nodes. ## diff --git a/content/telegraf/v1/output-plugins/influxdb_v3/_index.md b/content/telegraf/v1/output-plugins/influxdb_v3/_index.md new file mode 100644 index 0000000000..3ba99085df --- /dev/null +++ b/content/telegraf/v1/output-plugins/influxdb_v3/_index.md @@ -0,0 +1,146 @@ +--- +description: "Telegraf plugin for sending metrics to InfluxDB v3.x" +menu: + telegraf_v1_ref: + parent: output_plugins_reference + name: InfluxDB v3.x + identifier: output-influxdb_v3 +tags: [InfluxDB v3.x, "output-plugins", "configuration", "datastore"] +introduced: "v1.38.0" +os_support: "freebsd, linux, macos, solaris, windows" +related: + - /telegraf/v1/configure_plugins/ + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/influxdb_v3/README.md, InfluxDB v3.x Plugin Source +--- + +# InfluxDB v3.x Output Plugin + +This plugin writes metrics to a [InfluxDB v3.x](https://docs.influxdata.com) Core or Enterprise +instance via the HTTP API. + +**Introduced in:** Telegraf v1.38.0 +**Tags:** datastore +**OS support:** all + +[influxdb_v3]: https://docs.influxdata.com + +## Global configuration options + +Plugins support additional global and plugin configuration settings for tasks +such as modifying metrics, tags, and fields, creating aliases, and configuring +plugin ordering. See [CONFIGURATION.md](/telegraf/v1/configuration/#plugins) for more details. + +[CONFIGURATION.md]: ../../../docs/CONFIGURATION.md#plugins + +## Secret-store support + +This plugin supports secrets from secret-stores for the `token` option. +See the [secret-store documentation](/telegraf/v1/configuration/#secret-store-secrets) for more details on how +to use them. + +[SECRETSTORE]: ../../../docs/CONFIGURATION.md#secret-store-secrets + +## Configuration + +```toml @sample.conf +# Configuration for sending metrics to InfluxDB 3.x Core and Enterprise +[[outputs.influxdb_v3]] + ## Multiple URLs can be specified but only ONE of them will be selected + ## randomly in each interval for writing. If endpoints are unavailable another + ## one will be used until all are exhausted or the write succeeds. + urls = ["http://127.0.0.1:8181"] + + ## Token for authentication + token = "" + + ## Destination database to write into + database = "" + + ## The value of this tag will be used to determine the database. If this + ## tag is not set the 'database' option is used as the default. + # database_tag = "" + + ## If true, the database tag will not be added to the metric + # exclude_database_tag = false + + ## Wait for WAL persistence to complete synchronization + ## Setting this to false reduces latency but increases the risk of data loss. + ## See https://docs.influxdata.com/influxdb3/enterprise/write-data/http-api/v3-write-lp/#use-no_sync-for-immediate-write-responses + # sync = true + + ## Enable or disable conversion of unsigned integer fields to signed integers + ## This is useful if existing data exist as signed integers e.g. from previous + ## versions of InfluxDB. + # convert_uint_to_int = false + + ## Omit the timestamp of the metrics when sending to allow InfluxDB to set the + ## timestamp of the data during ingestion. You likely want this to be false + ## to submit the metric timestamp + # omit_timestamp = false + + ## HTTP User-Agent + # user_agent = "telegraf" + + ## Content-Encoding for write request body, available values are "gzip", + ## "none" and "identity" + # content_encoding = "gzip" + + ## Amount of time allowed to complete the HTTP request + # timeout = "5s" + + ## HTTP connection settings + # idle_conn_timeout = "0s" + # max_idle_conn = 0 + # max_idle_conn_per_host = 0 + # response_timeout = "0s" + + ## Use the local address for connecting, assigned by the OS by default + # local_address = "" + + ## Optional proxy settings + # use_system_proxy = false + # http_proxy_url = "" + + ## Optional TLS settings + ## Set to true/false to enforce TLS being enabled/disabled. If not set, + ## enable TLS only if any of the other options are specified. + # tls_enable = + ## Trusted root certificates for server + # tls_ca = "/path/to/cafile" + ## Used for TLS client certificate authentication + # tls_cert = "/path/to/certfile" + ## Used for TLS client certificate authentication + # tls_key = "/path/to/keyfile" + ## Password for the key file if it is encrypted + # tls_key_pwd = "" + ## Send the specified TLS server name via SNI + # tls_server_name = "kubernetes.example.com" + ## Minimal TLS version to accept by the client + # tls_min_version = "TLS12" + ## List of ciphers to accept, by default all secure ciphers will be accepted + ## See https://pkg.go.dev/crypto/tls#pkg-constants for supported values. + ## Use "all", "secure" and "insecure" to add all support ciphers, secure + ## suites or insecure suites respectively. + # tls_cipher_suites = ["secure"] + ## Renegotiation method, "never", "once" or "freely" + # tls_renegotiation_method = "never" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + + ## OAuth2 Client Credentials. The options 'client_id', 'client_secret', and 'token_url' are required to use OAuth2. + # client_id = "clientid" + # client_secret = "secret" + # token_url = "https://indentityprovider/oauth2/v1/token" + # audience = "" + # scopes = ["urn:opc:idm:__myscopes__"] + + ## Optional Cookie authentication + # cookie_auth_url = "https://localhost/authMe" + # cookie_auth_method = "POST" + # cookie_auth_username = "username" + # cookie_auth_password = "pa$$word" + # cookie_auth_headers = { Content-Type = "application/json", X-MY-HEADER = "hello" } + # cookie_auth_body = '{"username": "user", "password": "pa$$word", "authenticate": "me"}' + ## cookie_auth_renewal not set or set to "0" will auth once and never renew the cookie + # cookie_auth_renewal = "0s" +``` diff --git a/content/telegraf/v1/output-plugins/inlong/_index.md b/content/telegraf/v1/output-plugins/inlong/_index.md index 139eb91305..1568c504b6 100644 --- a/content/telegraf/v1/output-plugins/inlong/_index.md +++ b/content/telegraf/v1/output-plugins/inlong/_index.md @@ -10,7 +10,7 @@ introduced: "v1.35.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/inlong/README.md, Inlong Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/inlong/README.md, Inlong Plugin Source --- # Inlong Output Plugin diff --git a/content/telegraf/v1/output-plugins/instrumental/_index.md b/content/telegraf/v1/output-plugins/instrumental/_index.md index 285ab02c9b..ebc6fdb2a8 100644 --- a/content/telegraf/v1/output-plugins/instrumental/_index.md +++ b/content/telegraf/v1/output-plugins/instrumental/_index.md @@ -10,7 +10,7 @@ introduced: "v0.13.1" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/instrumental/README.md, Instrumental Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/instrumental/README.md, Instrumental Plugin Source --- # Instrumental Output Plugin diff --git a/content/telegraf/v1/output-plugins/iotdb/_index.md b/content/telegraf/v1/output-plugins/iotdb/_index.md index e617d99b83..45776967eb 100644 --- a/content/telegraf/v1/output-plugins/iotdb/_index.md +++ b/content/telegraf/v1/output-plugins/iotdb/_index.md @@ -10,7 +10,7 @@ introduced: "v1.24.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/iotdb/README.md, Apache IoTDB Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/iotdb/README.md, Apache IoTDB Plugin Source --- # Apache IoTDB Output Plugin diff --git a/content/telegraf/v1/output-plugins/kafka/_index.md b/content/telegraf/v1/output-plugins/kafka/_index.md index 88c5a79740..d04a678479 100644 --- a/content/telegraf/v1/output-plugins/kafka/_index.md +++ b/content/telegraf/v1/output-plugins/kafka/_index.md @@ -10,7 +10,7 @@ introduced: "v0.1.7" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/kafka/README.md, Kafka Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/kafka/README.md, Kafka Plugin Source --- # Kafka Output Plugin diff --git a/content/telegraf/v1/output-plugins/kinesis/_index.md b/content/telegraf/v1/output-plugins/kinesis/_index.md index 43f0c87c27..53526180e8 100644 --- a/content/telegraf/v1/output-plugins/kinesis/_index.md +++ b/content/telegraf/v1/output-plugins/kinesis/_index.md @@ -10,7 +10,7 @@ introduced: "v0.2.5" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/kinesis/README.md, Amazon Kinesis Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/kinesis/README.md, Amazon Kinesis Plugin Source --- # Amazon Kinesis Output Plugin diff --git a/content/telegraf/v1/output-plugins/librato/_index.md b/content/telegraf/v1/output-plugins/librato/_index.md index 5743b3848b..a77ad163bc 100644 --- a/content/telegraf/v1/output-plugins/librato/_index.md +++ b/content/telegraf/v1/output-plugins/librato/_index.md @@ -10,7 +10,7 @@ introduced: "v0.2.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/librato/README.md, Librato Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/librato/README.md, Librato Plugin Source --- # Librato Output Plugin diff --git a/content/telegraf/v1/output-plugins/logzio/_index.md b/content/telegraf/v1/output-plugins/logzio/_index.md index 66bcc79f89..9f7e4044c5 100644 --- a/content/telegraf/v1/output-plugins/logzio/_index.md +++ b/content/telegraf/v1/output-plugins/logzio/_index.md @@ -10,7 +10,7 @@ introduced: "v1.17.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/logzio/README.md, Logz.io Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/logzio/README.md, Logz.io Plugin Source --- # Logz.io Output Plugin diff --git a/content/telegraf/v1/output-plugins/loki/_index.md b/content/telegraf/v1/output-plugins/loki/_index.md index fa99386f72..2d1b16ddb8 100644 --- a/content/telegraf/v1/output-plugins/loki/_index.md +++ b/content/telegraf/v1/output-plugins/loki/_index.md @@ -10,7 +10,7 @@ introduced: "v1.18.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/loki/README.md, Grafana Loki Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/loki/README.md, Grafana Loki Plugin Source --- # Grafana Loki Output Plugin diff --git a/content/telegraf/v1/output-plugins/microsoft_fabric/_index.md b/content/telegraf/v1/output-plugins/microsoft_fabric/_index.md index 4607efbda4..54a32c47c1 100644 --- a/content/telegraf/v1/output-plugins/microsoft_fabric/_index.md +++ b/content/telegraf/v1/output-plugins/microsoft_fabric/_index.md @@ -10,7 +10,7 @@ introduced: "v1.35.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/microsoft_fabric/README.md, Microsoft Fabric Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/microsoft_fabric/README.md, Microsoft Fabric Plugin Source --- # Microsoft Fabric Output Plugin diff --git a/content/telegraf/v1/output-plugins/mongodb/_index.md b/content/telegraf/v1/output-plugins/mongodb/_index.md index 412701a0bb..58f485a763 100644 --- a/content/telegraf/v1/output-plugins/mongodb/_index.md +++ b/content/telegraf/v1/output-plugins/mongodb/_index.md @@ -10,7 +10,7 @@ introduced: "v1.21.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/mongodb/README.md, MongoDB Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/mongodb/README.md, MongoDB Plugin Source --- # MongoDB Output Plugin @@ -49,43 +49,47 @@ to use them. ```toml @sample.conf # A plugin that can transmit logs to mongodb [[outputs.mongodb]] - # connection string examples for mongodb + ## Connection string + ## example: dsn = "mongodb://mongod1:27017,mongod2:27017,mongod3:27017/admin&replicaSet=myReplSet&w=1" dsn = "mongodb://localhost:27017" - # dsn = "mongodb://mongod1:27017,mongod2:27017,mongod3:27017/admin&replicaSet=myReplSet&w=1" - # overrides serverSelectionTimeoutMS in dsn if set + ## Overrides serverSelectionTimeoutMS in DSN if set # timeout = "30s" - # default authentication, optional + ## Authentication method, available options are NONE, PLAIN, SCRAM, X509 # authentication = "NONE" - # for SCRAM-SHA-256 authentication - # authentication = "SCRAM" - # username = "root" - # password = "***" - - ## for PLAIN authentication (e.g., LDAP) - ## IMPORTANT: PLAIN authentication sends credentials in plaintext during the - ## authentication handshake. Always use TLS to encrypt credentials in transit. - # authentication = "PLAIN" - # username = "myuser" - # password = "***" - - # for x509 certificate authentication - # authentication = "X509" - # tls_ca = "ca.pem" - # tls_key = "client.pem" - # # tls_key_pwd = "changeme" # required for encrypted tls_key - # insecure_skip_verify = false - - # database to store measurements and time series collections + # ## for SCRAM-SHA-256 authentication + # # authentication = "SCRAM" + # # username = "root" + # # password = "***" + + # ## for PLAIN authentication (e.g., LDAP) + # ## IMPORTANT: PLAIN authentication sends credentials in plaintext during the + # ## authentication handshake. Always use TLS to encrypt credentials in transit. + # # authentication = "PLAIN" + # # username = "myuser" + # # password = "***" + + # ## X509 based certificate authentication + # # authentication = "X509" + # # tls_ca = "ca.pem" + # # tls_key = "client.pem" + # # # tls_key_pwd = "changeme" # required for encrypted tls_key + # # insecure_skip_verify = false + + ## Database to store measurements and time series collections # database = "telegraf" - # granularity can be seconds, minutes, or hours. - # configuring this value will be based on your input collection frequency. - # see https://docs.mongodb.com/manual/core/timeseries-collections/#create-a-time-series-collection + ## Granularity can be seconds, minutes, or hours. + ## Configuring this value will be based on your input collection frequency + ## see https://docs.mongodb.com/manual/core/timeseries-collections/#create-a-time-series-collection # granularity = "seconds" - # optionally set a TTL to automatically expire documents from the measurement collections. + ## TTL to automatically expire documents from the measurement collections. # ttl = "360h" + + ## If true, write multiple metrics for the same collection in a batched + ## fashion. Otherwise, write each metric individually. + # write_batch = false ``` diff --git a/content/telegraf/v1/output-plugins/mqtt/_index.md b/content/telegraf/v1/output-plugins/mqtt/_index.md index 90a55b3a5c..66ec6f64e4 100644 --- a/content/telegraf/v1/output-plugins/mqtt/_index.md +++ b/content/telegraf/v1/output-plugins/mqtt/_index.md @@ -10,7 +10,7 @@ introduced: "v0.2.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/mqtt/README.md, MQTT Producer Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/mqtt/README.md, MQTT Producer Plugin Source --- # MQTT Producer Output Plugin diff --git a/content/telegraf/v1/output-plugins/nats/_index.md b/content/telegraf/v1/output-plugins/nats/_index.md index a58f54476e..546b1f3c3e 100644 --- a/content/telegraf/v1/output-plugins/nats/_index.md +++ b/content/telegraf/v1/output-plugins/nats/_index.md @@ -10,7 +10,7 @@ introduced: "v1.1.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/nats/README.md, NATS Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/nats/README.md, NATS Plugin Source --- # NATS Output Plugin diff --git a/content/telegraf/v1/output-plugins/nebius_cloud_monitoring/_index.md b/content/telegraf/v1/output-plugins/nebius_cloud_monitoring/_index.md index 8e55aad07a..290f93b835 100644 --- a/content/telegraf/v1/output-plugins/nebius_cloud_monitoring/_index.md +++ b/content/telegraf/v1/output-plugins/nebius_cloud_monitoring/_index.md @@ -10,7 +10,7 @@ introduced: "v1.27.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/nebius_cloud_monitoring/README.md, Nebius Cloud Monitoring Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/nebius_cloud_monitoring/README.md, Nebius Cloud Monitoring Plugin Source --- # Nebius Cloud Monitoring Output Plugin diff --git a/content/telegraf/v1/output-plugins/newrelic/_index.md b/content/telegraf/v1/output-plugins/newrelic/_index.md index f0df09e5d1..9b35ab3be5 100644 --- a/content/telegraf/v1/output-plugins/newrelic/_index.md +++ b/content/telegraf/v1/output-plugins/newrelic/_index.md @@ -10,7 +10,7 @@ introduced: "v1.15.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/newrelic/README.md, New Relic Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/newrelic/README.md, New Relic Plugin Source --- # New Relic Output Plugin diff --git a/content/telegraf/v1/output-plugins/nsq/_index.md b/content/telegraf/v1/output-plugins/nsq/_index.md index 5dbb9bf6e8..239fd7fa46 100644 --- a/content/telegraf/v1/output-plugins/nsq/_index.md +++ b/content/telegraf/v1/output-plugins/nsq/_index.md @@ -10,7 +10,7 @@ introduced: "v0.2.1" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/nsq/README.md, NSQ Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/nsq/README.md, NSQ Plugin Source --- # NSQ Output Plugin diff --git a/content/telegraf/v1/output-plugins/opensearch/_index.md b/content/telegraf/v1/output-plugins/opensearch/_index.md index 273d6c951e..eb44158561 100644 --- a/content/telegraf/v1/output-plugins/opensearch/_index.md +++ b/content/telegraf/v1/output-plugins/opensearch/_index.md @@ -10,7 +10,7 @@ introduced: "v1.29.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/opensearch/README.md, OpenSearch Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/opensearch/README.md, OpenSearch Plugin Source --- # OpenSearch Output Plugin diff --git a/content/telegraf/v1/output-plugins/opentelemetry/_index.md b/content/telegraf/v1/output-plugins/opentelemetry/_index.md index 9828b7f89b..2630571246 100644 --- a/content/telegraf/v1/output-plugins/opentelemetry/_index.md +++ b/content/telegraf/v1/output-plugins/opentelemetry/_index.md @@ -10,13 +10,13 @@ introduced: "v1.20.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/opentelemetry/README.md, OpenTelemetry Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/opentelemetry/README.md, OpenTelemetry Plugin Source --- # OpenTelemetry Output Plugin This plugin writes metrics to [OpenTelemetry](https://opentelemetry.io) servers and agents -via gRPC. +via gRPC or HTTP. **Introduced in:** Telegraf v1.20.0 **Tags:** logging, messaging @@ -38,8 +38,12 @@ plugin ordering. See [CONFIGURATION.md](/telegraf/v1/configuration/#plugins) for # Send OpenTelemetry metrics over gRPC [[outputs.opentelemetry]] ## Override the default (localhost:4317) OpenTelemetry gRPC service - ## address:port + ## When the protocol is grpc, address:port + ## When the protocol is http, http(s)://address:port/path # service_address = "localhost:4317" + ## Override the default (protobuf) encodingType when Protocol is http + ## protobuf, json + # encoding_type = "protobuf" ## Override the default (5s) request timeout # timeout = "5s" diff --git a/content/telegraf/v1/output-plugins/opentsdb/_index.md b/content/telegraf/v1/output-plugins/opentsdb/_index.md index c6ca28884c..c4c0803ff0 100644 --- a/content/telegraf/v1/output-plugins/opentsdb/_index.md +++ b/content/telegraf/v1/output-plugins/opentsdb/_index.md @@ -10,7 +10,7 @@ introduced: "v0.1.9" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/opentsdb/README.md, OpenTSDB Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/opentsdb/README.md, OpenTSDB Plugin Source --- # OpenTSDB Output Plugin diff --git a/content/telegraf/v1/output-plugins/parquet/_index.md b/content/telegraf/v1/output-plugins/parquet/_index.md index 54a93686e1..361eb01aa1 100644 --- a/content/telegraf/v1/output-plugins/parquet/_index.md +++ b/content/telegraf/v1/output-plugins/parquet/_index.md @@ -10,7 +10,7 @@ introduced: "v1.32.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/parquet/README.md, Parquet Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/parquet/README.md, Parquet Plugin Source --- # Parquet Output Plugin diff --git a/content/telegraf/v1/output-plugins/postgresql/_index.md b/content/telegraf/v1/output-plugins/postgresql/_index.md index e3d0fb8dc8..cf227560de 100644 --- a/content/telegraf/v1/output-plugins/postgresql/_index.md +++ b/content/telegraf/v1/output-plugins/postgresql/_index.md @@ -10,7 +10,7 @@ introduced: "v1.24.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/postgresql/README.md, PostgreSQL Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/postgresql/README.md, PostgreSQL Plugin Source --- # PostgreSQL Output Plugin diff --git a/content/telegraf/v1/output-plugins/prometheus_client/_index.md b/content/telegraf/v1/output-plugins/prometheus_client/_index.md index 4538073861..f1c6bcdeec 100644 --- a/content/telegraf/v1/output-plugins/prometheus_client/_index.md +++ b/content/telegraf/v1/output-plugins/prometheus_client/_index.md @@ -10,7 +10,7 @@ introduced: "v0.2.1" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/prometheus_client/README.md, Prometheus Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/prometheus_client/README.md, Prometheus Plugin Source --- # Prometheus Output Plugin @@ -84,6 +84,12 @@ to use them. ## Unless set to false all string metrics will be sent as labels. # string_as_label = true + ## Control how metric names and label names are sanitized. + ## The default "legacy" keeps ASCII-only Prometheus name rules. + ## Set to "utf8" to allow UTF-8 metric and label names. + ## Valid options: "legacy", "utf8" + # name_sanitization = "legacy" + ## If set, enable TLS with the given certificate. # tls_cert = "/etc/ssl/telegraf.crt" # tls_key = "/etc/ssl/telegraf.key" diff --git a/content/telegraf/v1/output-plugins/quix/_index.md b/content/telegraf/v1/output-plugins/quix/_index.md index 06b3c9db0a..15bfdde24d 100644 --- a/content/telegraf/v1/output-plugins/quix/_index.md +++ b/content/telegraf/v1/output-plugins/quix/_index.md @@ -10,7 +10,7 @@ introduced: "v1.33.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/quix/README.md, Quix Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/quix/README.md, Quix Plugin Source --- # Quix Output Plugin diff --git a/content/telegraf/v1/output-plugins/redistimeseries/_index.md b/content/telegraf/v1/output-plugins/redistimeseries/_index.md index b53fdcd48f..66b7b1a309 100644 --- a/content/telegraf/v1/output-plugins/redistimeseries/_index.md +++ b/content/telegraf/v1/output-plugins/redistimeseries/_index.md @@ -10,7 +10,7 @@ introduced: "v1.0.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/redistimeseries/README.md, Redis Time Series Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/redistimeseries/README.md, Redis Time Series Plugin Source --- # Redis Time Series Output Plugin @@ -56,6 +56,13 @@ to use them. ## Timeout for operations such as ping or sending metrics # timeout = "10s" + ## Set a time-to-live (TTL) on each Redis key + ## If set, Redis will expire the key after the specified duration + ## The TTL is refreshed on every write, so the key only expires + ## if no new data arrives within the configured period + ## Disabled by default (no expiry) + # expire = "" + ## Enable attempt to convert string fields to numeric values ## If "false" or in case the string value cannot be converted the string ## field will be dropped. diff --git a/content/telegraf/v1/output-plugins/remotefile/_index.md b/content/telegraf/v1/output-plugins/remotefile/_index.md index 6474bc1dd3..00c444a6e8 100644 --- a/content/telegraf/v1/output-plugins/remotefile/_index.md +++ b/content/telegraf/v1/output-plugins/remotefile/_index.md @@ -10,7 +10,7 @@ introduced: "v1.32.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/remotefile/README.md, Remote File Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/remotefile/README.md, Remote File Plugin Source --- # Remote File Output Plugin diff --git a/content/telegraf/v1/output-plugins/riemann/_index.md b/content/telegraf/v1/output-plugins/riemann/_index.md index 2d1d3e6a43..2a3a3a7140 100644 --- a/content/telegraf/v1/output-plugins/riemann/_index.md +++ b/content/telegraf/v1/output-plugins/riemann/_index.md @@ -10,7 +10,7 @@ introduced: "v1.3.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/riemann/README.md, Riemann Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/riemann/README.md, Riemann Plugin Source --- # Riemann Output Plugin diff --git a/content/telegraf/v1/output-plugins/sensu/_index.md b/content/telegraf/v1/output-plugins/sensu/_index.md index 28c9d31bc3..8b81682398 100644 --- a/content/telegraf/v1/output-plugins/sensu/_index.md +++ b/content/telegraf/v1/output-plugins/sensu/_index.md @@ -10,7 +10,7 @@ introduced: "v1.18.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/sensu/README.md, Sensu Go Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/sensu/README.md, Sensu Go Plugin Source --- # Sensu Go Output Plugin diff --git a/content/telegraf/v1/output-plugins/signalfx/_index.md b/content/telegraf/v1/output-plugins/signalfx/_index.md index 6bc09f840b..4b581611c6 100644 --- a/content/telegraf/v1/output-plugins/signalfx/_index.md +++ b/content/telegraf/v1/output-plugins/signalfx/_index.md @@ -10,7 +10,7 @@ introduced: "v1.18.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/signalfx/README.md, SignalFx Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/signalfx/README.md, SignalFx Plugin Source --- # SignalFx Output Plugin diff --git a/content/telegraf/v1/output-plugins/socket_writer/_index.md b/content/telegraf/v1/output-plugins/socket_writer/_index.md index 3171264755..ba29532a9b 100644 --- a/content/telegraf/v1/output-plugins/socket_writer/_index.md +++ b/content/telegraf/v1/output-plugins/socket_writer/_index.md @@ -10,7 +10,7 @@ introduced: "v1.3.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/socket_writer/README.md, Socket Writer Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/socket_writer/README.md, Socket Writer Plugin Source --- # Socket Writer Output Plugin diff --git a/content/telegraf/v1/output-plugins/sql/_index.md b/content/telegraf/v1/output-plugins/sql/_index.md index 5359dbf733..bd7851586b 100644 --- a/content/telegraf/v1/output-plugins/sql/_index.md +++ b/content/telegraf/v1/output-plugins/sql/_index.md @@ -10,7 +10,7 @@ introduced: "v1.19.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/sql/README.md, SQL Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/sql/README.md, SQL Plugin Source --- # SQL Output Plugin diff --git a/content/telegraf/v1/output-plugins/stackdriver/_index.md b/content/telegraf/v1/output-plugins/stackdriver/_index.md index 9945f4a364..c3e4c7d5d3 100644 --- a/content/telegraf/v1/output-plugins/stackdriver/_index.md +++ b/content/telegraf/v1/output-plugins/stackdriver/_index.md @@ -10,7 +10,7 @@ introduced: "v1.9.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/stackdriver/README.md, Google Cloud Monitoring Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/stackdriver/README.md, Google Cloud Monitoring Plugin Source --- # Google Cloud Monitoring Output Plugin @@ -42,6 +42,14 @@ plugin ordering. See [CONFIGURATION.md](/telegraf/v1/configuration/#plugins) for [CONFIGURATION.md]: ../../../docs/CONFIGURATION.md#plugins +## Secret-store support + +This plugin supports secrets from secret-stores for the `token` option. +See the [secret-store documentation](/telegraf/v1/configuration/#secret-store-secrets) for more details on how +to use them. + +[SECRETSTORE]: ../../../docs/CONFIGURATION.md#secret-store-secrets + ## Configuration ```toml @sample.conf @@ -50,6 +58,9 @@ plugin ordering. See [CONFIGURATION.md](/telegraf/v1/configuration/#plugins) for ## GCP Project project = "erudite-bloom-151019" + ## GCP access token for authorizing calls to Cloud Monitoring APIs + # token = "@{gcp_auth:token}" + ## Quota Project ## Specifies the Google Cloud project that should be billed for metric ingestion. ## If omitted, the quota is charged to the service account’s default project. diff --git a/content/telegraf/v1/output-plugins/stomp/_index.md b/content/telegraf/v1/output-plugins/stomp/_index.md index 9f1b5bffa8..6a7bcdd35f 100644 --- a/content/telegraf/v1/output-plugins/stomp/_index.md +++ b/content/telegraf/v1/output-plugins/stomp/_index.md @@ -10,7 +10,7 @@ introduced: "v1.24.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/stomp/README.md, ActiveMQ STOMP Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/stomp/README.md, ActiveMQ STOMP Plugin Source --- # ActiveMQ STOMP Output Plugin diff --git a/content/telegraf/v1/output-plugins/sumologic/_index.md b/content/telegraf/v1/output-plugins/sumologic/_index.md index 9535aa1d7b..83b4434e4e 100644 --- a/content/telegraf/v1/output-plugins/sumologic/_index.md +++ b/content/telegraf/v1/output-plugins/sumologic/_index.md @@ -10,7 +10,7 @@ introduced: "v1.16.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/sumologic/README.md, Sumo Logic Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/sumologic/README.md, Sumo Logic Plugin Source --- # Sumo Logic Output Plugin diff --git a/content/telegraf/v1/output-plugins/syslog/_index.md b/content/telegraf/v1/output-plugins/syslog/_index.md index d67879247d..7d748fcb49 100644 --- a/content/telegraf/v1/output-plugins/syslog/_index.md +++ b/content/telegraf/v1/output-plugins/syslog/_index.md @@ -10,7 +10,7 @@ introduced: "v1.11.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/syslog/README.md, Syslog Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/syslog/README.md, Syslog Plugin Source --- # Syslog Output Plugin diff --git a/content/telegraf/v1/output-plugins/timestream/_index.md b/content/telegraf/v1/output-plugins/timestream/_index.md index 76a7391539..845caaa8c9 100644 --- a/content/telegraf/v1/output-plugins/timestream/_index.md +++ b/content/telegraf/v1/output-plugins/timestream/_index.md @@ -10,7 +10,7 @@ introduced: "v1.16.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/timestream/README.md, Amazon Timestream Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/timestream/README.md, Amazon Timestream Plugin Source --- # Amazon Timestream Output Plugin diff --git a/content/telegraf/v1/output-plugins/warp10/_index.md b/content/telegraf/v1/output-plugins/warp10/_index.md index 9abc0635ca..11eafbe5e4 100644 --- a/content/telegraf/v1/output-plugins/warp10/_index.md +++ b/content/telegraf/v1/output-plugins/warp10/_index.md @@ -10,7 +10,7 @@ introduced: "v1.14.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/warp10/README.md, Warp10 Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/warp10/README.md, Warp10 Plugin Source --- # Warp10 Output Plugin diff --git a/content/telegraf/v1/output-plugins/wavefront/_index.md b/content/telegraf/v1/output-plugins/wavefront/_index.md index 5354bbc075..bb1632b058 100644 --- a/content/telegraf/v1/output-plugins/wavefront/_index.md +++ b/content/telegraf/v1/output-plugins/wavefront/_index.md @@ -10,7 +10,7 @@ introduced: "v1.5.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/wavefront/README.md, Wavefront Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/wavefront/README.md, Wavefront Plugin Source --- # Wavefront Output Plugin diff --git a/content/telegraf/v1/output-plugins/websocket/_index.md b/content/telegraf/v1/output-plugins/websocket/_index.md index 2cf7d56e7a..f39c5a93aa 100644 --- a/content/telegraf/v1/output-plugins/websocket/_index.md +++ b/content/telegraf/v1/output-plugins/websocket/_index.md @@ -10,7 +10,7 @@ introduced: "v1.19.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/websocket/README.md, Websocket Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/websocket/README.md, Websocket Plugin Source --- # Websocket Output Plugin diff --git a/content/telegraf/v1/output-plugins/yandex_cloud_monitoring/_index.md b/content/telegraf/v1/output-plugins/yandex_cloud_monitoring/_index.md index 32e86d1d9e..26bf629ed2 100644 --- a/content/telegraf/v1/output-plugins/yandex_cloud_monitoring/_index.md +++ b/content/telegraf/v1/output-plugins/yandex_cloud_monitoring/_index.md @@ -10,7 +10,7 @@ introduced: "v1.17.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/yandex_cloud_monitoring/README.md, Yandex Cloud Monitoring Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/yandex_cloud_monitoring/README.md, Yandex Cloud Monitoring Plugin Source --- # Yandex Cloud Monitoring Output Plugin diff --git a/content/telegraf/v1/output-plugins/zabbix/_index.md b/content/telegraf/v1/output-plugins/zabbix/_index.md index b1cbac2421..5bc8888fef 100644 --- a/content/telegraf/v1/output-plugins/zabbix/_index.md +++ b/content/telegraf/v1/output-plugins/zabbix/_index.md @@ -10,7 +10,7 @@ introduced: "v1.30.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/zabbix/README.md, Zabbix Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/zabbix/README.md, Zabbix Plugin Source --- # Zabbix Output Plugin diff --git a/content/telegraf/v1/processor-plugins/aws_ec2/_index.md b/content/telegraf/v1/processor-plugins/aws_ec2/_index.md index 002487e2a7..ca824176e4 100644 --- a/content/telegraf/v1/processor-plugins/aws_ec2/_index.md +++ b/content/telegraf/v1/processor-plugins/aws_ec2/_index.md @@ -10,7 +10,7 @@ introduced: "v1.18.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/aws_ec2/README.md, AWS EC2 Metadata Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/aws_ec2/README.md, AWS EC2 Metadata Plugin Source --- # AWS EC2 Metadata Processor Plugin diff --git a/content/telegraf/v1/processor-plugins/batch/_index.md b/content/telegraf/v1/processor-plugins/batch/_index.md index 73ff3f7880..3f8d0392ff 100644 --- a/content/telegraf/v1/processor-plugins/batch/_index.md +++ b/content/telegraf/v1/processor-plugins/batch/_index.md @@ -10,7 +10,7 @@ introduced: "v1.33.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/batch/README.md, Batch Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/batch/README.md, Batch Plugin Source --- # Batch Processor Plugin diff --git a/content/telegraf/v1/processor-plugins/clone/_index.md b/content/telegraf/v1/processor-plugins/clone/_index.md index 32519a7e27..a3f1bed7bc 100644 --- a/content/telegraf/v1/processor-plugins/clone/_index.md +++ b/content/telegraf/v1/processor-plugins/clone/_index.md @@ -10,7 +10,7 @@ introduced: "v1.13.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/clone/README.md, Clone Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/clone/README.md, Clone Plugin Source --- # Clone Processor Plugin diff --git a/content/telegraf/v1/processor-plugins/converter/_index.md b/content/telegraf/v1/processor-plugins/converter/_index.md index 70a2f831de..cd5dc402ac 100644 --- a/content/telegraf/v1/processor-plugins/converter/_index.md +++ b/content/telegraf/v1/processor-plugins/converter/_index.md @@ -10,7 +10,7 @@ introduced: "v1.7.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/converter/README.md, Converter Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/converter/README.md, Converter Plugin Source --- # Converter Processor Plugin diff --git a/content/telegraf/v1/processor-plugins/cumulative_sum/_index.md b/content/telegraf/v1/processor-plugins/cumulative_sum/_index.md index b00022072c..09807da975 100644 --- a/content/telegraf/v1/processor-plugins/cumulative_sum/_index.md +++ b/content/telegraf/v1/processor-plugins/cumulative_sum/_index.md @@ -10,7 +10,7 @@ introduced: "v1.35.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/cumulative_sum/README.md, Cumulative Sum Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/cumulative_sum/README.md, Cumulative Sum Plugin Source --- # Cumulative Sum Processor Plugin diff --git a/content/telegraf/v1/processor-plugins/date/_index.md b/content/telegraf/v1/processor-plugins/date/_index.md index 6aae7d255c..2ce955e9ec 100644 --- a/content/telegraf/v1/processor-plugins/date/_index.md +++ b/content/telegraf/v1/processor-plugins/date/_index.md @@ -10,7 +10,7 @@ introduced: "v1.12.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/date/README.md, Date Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/date/README.md, Date Plugin Source --- # Date Processor Plugin diff --git a/content/telegraf/v1/processor-plugins/dedup/_index.md b/content/telegraf/v1/processor-plugins/dedup/_index.md index 93de9b7112..9691aba84b 100644 --- a/content/telegraf/v1/processor-plugins/dedup/_index.md +++ b/content/telegraf/v1/processor-plugins/dedup/_index.md @@ -10,7 +10,7 @@ introduced: "v1.14.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/dedup/README.md, Dedup Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/dedup/README.md, Dedup Plugin Source --- # Dedup Processor Plugin diff --git a/content/telegraf/v1/processor-plugins/defaults/_index.md b/content/telegraf/v1/processor-plugins/defaults/_index.md index 1547879b2d..d4ebb1c82d 100644 --- a/content/telegraf/v1/processor-plugins/defaults/_index.md +++ b/content/telegraf/v1/processor-plugins/defaults/_index.md @@ -10,7 +10,7 @@ introduced: "v1.15.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/defaults/README.md, Defaults Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/defaults/README.md, Defaults Plugin Source --- # Defaults Processor Plugin diff --git a/content/telegraf/v1/processor-plugins/enum/_index.md b/content/telegraf/v1/processor-plugins/enum/_index.md index 1bdababffe..d180f7105b 100644 --- a/content/telegraf/v1/processor-plugins/enum/_index.md +++ b/content/telegraf/v1/processor-plugins/enum/_index.md @@ -10,7 +10,7 @@ introduced: "v1.8.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/enum/README.md, Enum Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/enum/README.md, Enum Plugin Source --- # Enum Processor Plugin diff --git a/content/telegraf/v1/processor-plugins/execd/_index.md b/content/telegraf/v1/processor-plugins/execd/_index.md index 20eadfdfc8..2270dbb3fb 100644 --- a/content/telegraf/v1/processor-plugins/execd/_index.md +++ b/content/telegraf/v1/processor-plugins/execd/_index.md @@ -10,7 +10,7 @@ introduced: "v1.15.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/execd/README.md, Execd Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/execd/README.md, Execd Plugin Source --- # Execd Processor Plugin diff --git a/content/telegraf/v1/processor-plugins/filepath/_index.md b/content/telegraf/v1/processor-plugins/filepath/_index.md index f210ae6616..f31d1ff255 100644 --- a/content/telegraf/v1/processor-plugins/filepath/_index.md +++ b/content/telegraf/v1/processor-plugins/filepath/_index.md @@ -10,7 +10,7 @@ introduced: "v1.15.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/filepath/README.md, Filepath Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/filepath/README.md, Filepath Plugin Source --- # Filepath Processor Plugin diff --git a/content/telegraf/v1/processor-plugins/filter/_index.md b/content/telegraf/v1/processor-plugins/filter/_index.md index f49fca7cef..fb8dbfdbd8 100644 --- a/content/telegraf/v1/processor-plugins/filter/_index.md +++ b/content/telegraf/v1/processor-plugins/filter/_index.md @@ -10,7 +10,7 @@ introduced: "v1.29.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/filter/README.md, Filter Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/filter/README.md, Filter Plugin Source --- # Filter Processor Plugin diff --git a/content/telegraf/v1/processor-plugins/ifname/_index.md b/content/telegraf/v1/processor-plugins/ifname/_index.md index 09ae567200..feb994de5c 100644 --- a/content/telegraf/v1/processor-plugins/ifname/_index.md +++ b/content/telegraf/v1/processor-plugins/ifname/_index.md @@ -10,7 +10,7 @@ introduced: "v1.15.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/ifname/README.md, Network Interface Name Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/ifname/README.md, Network Interface Name Plugin Source --- # Network Interface Name Processor Plugin diff --git a/content/telegraf/v1/processor-plugins/lookup/_index.md b/content/telegraf/v1/processor-plugins/lookup/_index.md index 61615c0849..271ab8484b 100644 --- a/content/telegraf/v1/processor-plugins/lookup/_index.md +++ b/content/telegraf/v1/processor-plugins/lookup/_index.md @@ -10,7 +10,7 @@ introduced: "v1.15.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/lookup/README.md, Lookup Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/lookup/README.md, Lookup Plugin Source --- # Lookup Processor Plugin diff --git a/content/telegraf/v1/processor-plugins/noise/_index.md b/content/telegraf/v1/processor-plugins/noise/_index.md index 327940fda1..6a451438fc 100644 --- a/content/telegraf/v1/processor-plugins/noise/_index.md +++ b/content/telegraf/v1/processor-plugins/noise/_index.md @@ -10,7 +10,7 @@ introduced: "v1.22.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/noise/README.md, Noise Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/noise/README.md, Noise Plugin Source --- # Noise Processor Plugin diff --git a/content/telegraf/v1/processor-plugins/override/_index.md b/content/telegraf/v1/processor-plugins/override/_index.md index 8dd1048656..42ee0f52d6 100644 --- a/content/telegraf/v1/processor-plugins/override/_index.md +++ b/content/telegraf/v1/processor-plugins/override/_index.md @@ -10,7 +10,7 @@ introduced: "v1.6.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/override/README.md, Override Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/override/README.md, Override Plugin Source --- # Override Processor Plugin diff --git a/content/telegraf/v1/processor-plugins/parser/_index.md b/content/telegraf/v1/processor-plugins/parser/_index.md index f4afb64ee6..5dca40d7d9 100644 --- a/content/telegraf/v1/processor-plugins/parser/_index.md +++ b/content/telegraf/v1/processor-plugins/parser/_index.md @@ -10,7 +10,7 @@ introduced: "v1.8.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/parser/README.md, Parser Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/parser/README.md, Parser Plugin Source --- # Parser Processor Plugin diff --git a/content/telegraf/v1/processor-plugins/pivot/_index.md b/content/telegraf/v1/processor-plugins/pivot/_index.md index 8e4855d9e2..6e6e4f65f3 100644 --- a/content/telegraf/v1/processor-plugins/pivot/_index.md +++ b/content/telegraf/v1/processor-plugins/pivot/_index.md @@ -10,7 +10,7 @@ introduced: "v1.12.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/pivot/README.md, Pivot Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/pivot/README.md, Pivot Plugin Source --- # Pivot Processor Plugin diff --git a/content/telegraf/v1/processor-plugins/port_name/_index.md b/content/telegraf/v1/processor-plugins/port_name/_index.md index f74cf96774..3e6244797d 100644 --- a/content/telegraf/v1/processor-plugins/port_name/_index.md +++ b/content/telegraf/v1/processor-plugins/port_name/_index.md @@ -10,7 +10,7 @@ introduced: "v1.15.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/port_name/README.md, Port Name Lookup Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/port_name/README.md, Port Name Lookup Plugin Source --- # Port Name Lookup Processor Plugin diff --git a/content/telegraf/v1/processor-plugins/printer/_index.md b/content/telegraf/v1/processor-plugins/printer/_index.md index f42278c06c..5cda8275f9 100644 --- a/content/telegraf/v1/processor-plugins/printer/_index.md +++ b/content/telegraf/v1/processor-plugins/printer/_index.md @@ -10,7 +10,7 @@ introduced: "v1.1.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/printer/README.md, Printer Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/printer/README.md, Printer Plugin Source --- # Printer Processor Plugin diff --git a/content/telegraf/v1/processor-plugins/regex/_index.md b/content/telegraf/v1/processor-plugins/regex/_index.md index ad91a57035..f2d13c045f 100644 --- a/content/telegraf/v1/processor-plugins/regex/_index.md +++ b/content/telegraf/v1/processor-plugins/regex/_index.md @@ -10,7 +10,7 @@ introduced: "v1.7.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/regex/README.md, Regex Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/regex/README.md, Regex Plugin Source --- # Regex Processor Plugin diff --git a/content/telegraf/v1/processor-plugins/rename/_index.md b/content/telegraf/v1/processor-plugins/rename/_index.md index b479855cbc..b0becb81b9 100644 --- a/content/telegraf/v1/processor-plugins/rename/_index.md +++ b/content/telegraf/v1/processor-plugins/rename/_index.md @@ -10,7 +10,7 @@ introduced: "v1.8.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/rename/README.md, Rename Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/rename/README.md, Rename Plugin Source --- # Rename Processor Plugin diff --git a/content/telegraf/v1/processor-plugins/reverse_dns/_index.md b/content/telegraf/v1/processor-plugins/reverse_dns/_index.md index 0c10608c4f..6130a66d27 100644 --- a/content/telegraf/v1/processor-plugins/reverse_dns/_index.md +++ b/content/telegraf/v1/processor-plugins/reverse_dns/_index.md @@ -10,7 +10,7 @@ introduced: "v1.15.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/reverse_dns/README.md, Reverse DNS Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/reverse_dns/README.md, Reverse DNS Plugin Source --- # Reverse DNS Processor Plugin diff --git a/content/telegraf/v1/processor-plugins/round/_index.md b/content/telegraf/v1/processor-plugins/round/_index.md index 1f8ef22874..24a97f4b88 100644 --- a/content/telegraf/v1/processor-plugins/round/_index.md +++ b/content/telegraf/v1/processor-plugins/round/_index.md @@ -10,7 +10,7 @@ introduced: "v1.36.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/round/README.md, Round Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/round/README.md, Round Plugin Source --- # Round Processor Plugin diff --git a/content/telegraf/v1/processor-plugins/s2geo/_index.md b/content/telegraf/v1/processor-plugins/s2geo/_index.md index 0acfdb7f80..7b4d50bd69 100644 --- a/content/telegraf/v1/processor-plugins/s2geo/_index.md +++ b/content/telegraf/v1/processor-plugins/s2geo/_index.md @@ -10,7 +10,7 @@ introduced: "v1.14.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/s2geo/README.md, S2 Geo Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/s2geo/README.md, S2 Geo Plugin Source --- # S2 Geo Processor Plugin diff --git a/content/telegraf/v1/processor-plugins/scale/_index.md b/content/telegraf/v1/processor-plugins/scale/_index.md index 18fb212f50..8b47950c42 100644 --- a/content/telegraf/v1/processor-plugins/scale/_index.md +++ b/content/telegraf/v1/processor-plugins/scale/_index.md @@ -10,7 +10,7 @@ introduced: "v1.27.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/scale/README.md, Scale Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/scale/README.md, Scale Plugin Source --- # Scale Processor Plugin diff --git a/content/telegraf/v1/processor-plugins/snmp_lookup/_index.md b/content/telegraf/v1/processor-plugins/snmp_lookup/_index.md index 8f67100fbc..b32a1e3acf 100644 --- a/content/telegraf/v1/processor-plugins/snmp_lookup/_index.md +++ b/content/telegraf/v1/processor-plugins/snmp_lookup/_index.md @@ -10,7 +10,7 @@ introduced: "v1.30.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/snmp_lookup/README.md, SNMP Lookup Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/snmp_lookup/README.md, SNMP Lookup Plugin Source --- # SNMP Lookup Processor Plugin diff --git a/content/telegraf/v1/processor-plugins/split/_index.md b/content/telegraf/v1/processor-plugins/split/_index.md index 7cde5f9677..2407f23a97 100644 --- a/content/telegraf/v1/processor-plugins/split/_index.md +++ b/content/telegraf/v1/processor-plugins/split/_index.md @@ -10,7 +10,7 @@ introduced: "v1.28.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/split/README.md, Split Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/split/README.md, Split Plugin Source --- # Split Processor Plugin diff --git a/content/telegraf/v1/processor-plugins/starlark/_index.md b/content/telegraf/v1/processor-plugins/starlark/_index.md index 6bfade8f8c..3647123ca0 100644 --- a/content/telegraf/v1/processor-plugins/starlark/_index.md +++ b/content/telegraf/v1/processor-plugins/starlark/_index.md @@ -10,7 +10,7 @@ introduced: "v1.15.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/starlark/README.md, Starlark Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/starlark/README.md, Starlark Plugin Source --- # Starlark Processor Plugin diff --git a/content/telegraf/v1/processor-plugins/strings/_index.md b/content/telegraf/v1/processor-plugins/strings/_index.md index 45ce80ad7e..b5b59e9c87 100644 --- a/content/telegraf/v1/processor-plugins/strings/_index.md +++ b/content/telegraf/v1/processor-plugins/strings/_index.md @@ -10,7 +10,7 @@ introduced: "v1.8.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/strings/README.md, Strings Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/strings/README.md, Strings Plugin Source --- # Strings Processor Plugin diff --git a/content/telegraf/v1/processor-plugins/tag_limit/_index.md b/content/telegraf/v1/processor-plugins/tag_limit/_index.md index 1b5c76f2b9..cb1773c466 100644 --- a/content/telegraf/v1/processor-plugins/tag_limit/_index.md +++ b/content/telegraf/v1/processor-plugins/tag_limit/_index.md @@ -10,7 +10,7 @@ introduced: "v1.12.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/tag_limit/README.md, Tag Limit Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/tag_limit/README.md, Tag Limit Plugin Source --- # Tag Limit Processor Plugin diff --git a/content/telegraf/v1/processor-plugins/template/_index.md b/content/telegraf/v1/processor-plugins/template/_index.md index 27bc28d996..2fe864e7c9 100644 --- a/content/telegraf/v1/processor-plugins/template/_index.md +++ b/content/telegraf/v1/processor-plugins/template/_index.md @@ -10,7 +10,7 @@ introduced: "v1.14.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/template/README.md, Template Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/template/README.md, Template Plugin Source --- # Template Processor Plugin diff --git a/content/telegraf/v1/processor-plugins/timestamp/_index.md b/content/telegraf/v1/processor-plugins/timestamp/_index.md index e0fe607236..ef3a1b872f 100644 --- a/content/telegraf/v1/processor-plugins/timestamp/_index.md +++ b/content/telegraf/v1/processor-plugins/timestamp/_index.md @@ -10,7 +10,7 @@ introduced: "v1.31.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/timestamp/README.md, Timestamp Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/timestamp/README.md, Timestamp Plugin Source --- # Timestamp Processor Plugin diff --git a/content/telegraf/v1/processor-plugins/topk/_index.md b/content/telegraf/v1/processor-plugins/topk/_index.md index 8b2a003bfc..14a1d32907 100644 --- a/content/telegraf/v1/processor-plugins/topk/_index.md +++ b/content/telegraf/v1/processor-plugins/topk/_index.md @@ -10,7 +10,7 @@ introduced: "v1.7.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/topk/README.md, TopK Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/topk/README.md, TopK Plugin Source --- # TopK Processor Plugin diff --git a/content/telegraf/v1/processor-plugins/unpivot/_index.md b/content/telegraf/v1/processor-plugins/unpivot/_index.md index a09c12d283..a56eab141e 100644 --- a/content/telegraf/v1/processor-plugins/unpivot/_index.md +++ b/content/telegraf/v1/processor-plugins/unpivot/_index.md @@ -10,7 +10,7 @@ introduced: "v1.12.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/unpivot/README.md, Unpivot Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/unpivot/README.md, Unpivot Plugin Source --- # Unpivot Processor Plugin diff --git a/content/telegraf/v1/release-notes.md b/content/telegraf/v1/release-notes.md index 96094f5e1f..f5ae49a89e 100644 --- a/content/telegraf/v1/release-notes.md +++ b/content/telegraf/v1/release-notes.md @@ -11,6 +11,81 @@ menu: weight: 60 --- +## v1.38.0 {date="2026-03-09"} + +### Important Changes + +- PR [#17961](https://github.com/influxdata/telegraf/pull/17961) makes the + **strict environment variable handling the default**! In case you need the old + behavior you can opt-out using the `--non-strict-env-handling` flag. + +### New Plugins + +- [#18183](https://github.com/influxdata/telegraf/pull/18183) `inputs.sip` Add plugin +- [#18223](https://github.com/influxdata/telegraf/pull/18223) `outputs.influxdb_v3` Add plugin + +### Features + +- [#18086](https://github.com/influxdata/telegraf/pull/18086) `agent` Optimise disk buffer strategy +- [#18232](https://github.com/influxdata/telegraf/pull/18232) `common.opcua` Add string configuration option for node ID +- [#18411](https://github.com/influxdata/telegraf/pull/18411) `common.opcua` Add support for datetime arrays +- [#18181](https://github.com/influxdata/telegraf/pull/18181) `inputs.docker` Implement startup error behavior options +- [#18425](https://github.com/influxdata/telegraf/pull/18425) `inputs.gnmi` Allow to emit delete metrics +- [#18466](https://github.com/influxdata/telegraf/pull/18466) `inputs.mqtt_consumer` Add option for maximum reconnect interval +- [#18063](https://github.com/influxdata/telegraf/pull/18063) `inputs.mysql` Add replication latency fields +- [#18117](https://github.com/influxdata/telegraf/pull/18117) `inputs.mysql` Add wsrep provider options fields +- [#18272](https://github.com/influxdata/telegraf/pull/18272) `inputs.mysql` Support encryption algorithm statistics if present +- [#18134](https://github.com/influxdata/telegraf/pull/18134) `inputs.nftables` Monitor set element counts +- [#18246](https://github.com/influxdata/telegraf/pull/18246) `inputs.nftables` Support named counters +- [#18259](https://github.com/influxdata/telegraf/pull/18259) `inputs.statsd` Add support for Datadog service checks +- [#18393](https://github.com/influxdata/telegraf/pull/18393) `outputs.health` Add option for setting default status +- [#18415](https://github.com/influxdata/telegraf/pull/18415) `outputs.heartbeat` Add logging information +- [#17577](https://github.com/influxdata/telegraf/pull/17577) `outputs.heartbeat` Add status evaluation +- [#18305](https://github.com/influxdata/telegraf/pull/18305) `outputs.influxdb_v2` Add trace logging for write request timing +- [#18422](https://github.com/influxdata/telegraf/pull/18422) `outputs.mongodb` Allow writing metrics in batches +- [#17997](https://github.com/influxdata/telegraf/pull/17997) `outputs.opentelemetry` Support http protocol +- [#18337](https://github.com/influxdata/telegraf/pull/18337) `outputs.redistimeseries` Add option to expire values +- [#18339](https://github.com/influxdata/telegraf/pull/18339) `outputs.stackdriver` Add credentials file support for stackdriver output plugin +- [#18341](https://github.com/influxdata/telegraf/pull/18341) `prometheus` Add UTF-8 metric and label name sanitization + +### Bugfixes + +- [#18429](https://github.com/influxdata/telegraf/pull/18429) `common.opcua` Use configured timestamp format for datetime arrays +- [#18381](https://github.com/influxdata/telegraf/pull/18381) `inputs.fibaro` Handle numeric value2 field from HC3 devices +- [#18424](https://github.com/influxdata/telegraf/pull/18424) `inputs.http` Close gzip request body on early failures +- [#18412](https://github.com/influxdata/telegraf/pull/18412) `inputs.internet_speed` Fix server_id_include filter logic +- [#18452](https://github.com/influxdata/telegraf/pull/18452) `inputs.mqtt_consumer` Rely on paho auto-reconnect to restore message flow after network disruption +- [#18392](https://github.com/influxdata/telegraf/pull/18392) `inputs.opcua_listener` Prevent panic on events with empty fields +- [#18387](https://github.com/influxdata/telegraf/pull/18387) `inputs.smart` Include NVMe SMART data in smart_device measurement +- [#18416](https://github.com/influxdata/telegraf/pull/18416) `outputs.influxdb` Prevent goroutine leak on gzip write failure +- [#18418](https://github.com/influxdata/telegraf/pull/18418) `outputs.opentelemetry` Prevent goroutine leak on gzip write failure + +### Dependency Updates + +- [#18436](https://github.com/influxdata/telegraf/pull/18436) `deps` Bump cloud.google.com/go/bigquery from 1.73.1 to 1.74.0 +- [#18444](https://github.com/influxdata/telegraf/pull/18444) `deps` Bump github.com/IBM/sarama from 1.46.3 to 1.47.0 +- [#18449](https://github.com/influxdata/telegraf/pull/18449) `deps` Bump github.com/SAP/go-hdb from 1.15.0 to 1.15.1 +- [#18398](https://github.com/influxdata/telegraf/pull/18398) `deps` Bump github.com/antchfx/xpath from 1.3.5 to 1.3.6 +- [#18442](https://github.com/influxdata/telegraf/pull/18442) `deps` Bump github.com/aws/smithy-go from 1.24.1 to 1.24.2 +- [#18400](https://github.com/influxdata/telegraf/pull/18400) `deps` Bump github.com/hashicorp/consul/api from 1.33.2 to 1.33.3 +- [#18438](https://github.com/influxdata/telegraf/pull/18438) `deps` Bump github.com/hashicorp/consul/api from 1.33.3 to 1.33.4 +- [#18446](https://github.com/influxdata/telegraf/pull/18446) `deps` Bump github.com/lxc/incus/v6 from 6.21.0 to 6.22.0 +- [#18441](https://github.com/influxdata/telegraf/pull/18441) `deps` Bump github.com/microsoft/go-mssqldb from 1.9.6 to 1.9.8 +- [#18404](https://github.com/influxdata/telegraf/pull/18404) `deps` Bump github.com/nats-io/nats.go from 1.48.0 to 1.49.0 +- [#18439](https://github.com/influxdata/telegraf/pull/18439) `deps` Bump github.com/prometheus/procfs from 0.19.2 to 0.20.1 +- [#18440](https://github.com/influxdata/telegraf/pull/18440) `deps` Bump github.com/shirou/gopsutil/v4 from 4.26.1 to 4.26.2 +- [#18402](https://github.com/influxdata/telegraf/pull/18402) `deps` Bump github.com/vmware/govmomi from 0.52.0 to 0.53.0 +- [#18399](https://github.com/influxdata/telegraf/pull/18399) `deps` Bump go.step.sm/crypto from 0.76.0 to 0.76.2 +- [#18450](https://github.com/influxdata/telegraf/pull/18450) `deps` Bump golang.org/x/net from 0.50.0 to 0.51.0 +- [#18437](https://github.com/influxdata/telegraf/pull/18437) `deps` Bump google.golang.org/api from 0.266.0 to 0.269.0 +- [#18448](https://github.com/influxdata/telegraf/pull/18448) `deps` Bump k8s.io/api from 0.35.1 to 0.35.2 +- [#18447](https://github.com/influxdata/telegraf/pull/18447) `deps` Bump k8s.io/apimachinery from 0.35.1 to 0.35.2 +- [#18443](https://github.com/influxdata/telegraf/pull/18443) `deps` Bump k8s.io/client-go from 0.35.1 to 0.35.2 +- [#18403](https://github.com/influxdata/telegraf/pull/18403) `deps` Bump modernc.org/sqlite from 1.45.0 to 1.46.1 +- [#18397](https://github.com/influxdata/telegraf/pull/18397) `deps` Bump the aws-sdk-go-v2 group with 11 updates +- [#18435](https://github.com/influxdata/telegraf/pull/18435) `deps` Bump the aws-sdk-go-v2 group with 2 updates +- [#18396](https://github.com/influxdata/telegraf/pull/18396) `deps` Bump tj-actions/changed-files from 47.0.2 to 47.0.4 + ## v1.37.3 {date="2026-02-23"} ### Bugfixes diff --git a/data/products.yml b/data/products.yml index 90d37e0541..1662a618ff 100644 --- a/data/products.yml +++ b/data/products.yml @@ -287,9 +287,9 @@ telegraf: menu_category: other list_order: 6 versions: [v1] - latest: v1.37 + latest: v1.38 latest_patches: - v1: 1.37.3 + v1: 1.38.0 ai_sample_questions: - How do I configure Telegraf for InfluxDB 3? - How do I write a custom Telegraf plugin? diff --git a/data/telegraf_plugins.yml b/data/telegraf_plugins.yml index 8fe1b7ab6d..7498cfc937 100644 --- a/data/telegraf_plugins.yml +++ b/data/telegraf_plugins.yml @@ -1740,11 +1740,7 @@ input: description: | This plugin gathers packets and bytes counters for rules within Linux's [nftables](https://wiki.nftables.org/wiki-nftables/index.php/Main_Page) - firewall. - - > [!IMPORTANT] - > Rules are identified by the associated comment so those **comments have - > to be unique**! Rules without comment are ignored. + firewall, as well as set element counts. introduced: v1.37.0 os_support: [linux] tags: [network, system] @@ -2369,6 +2365,18 @@ input: introduced: v1.14.0 os_support: [freebsd, linux, macos, solaris, windows] tags: [network] + - name: SIP + id: sip + description: | + This plugin gathers metrics about the health and availability of [SIP + (Session Initiation + Protocol)](https://datatracker.ietf.org/doc/html/rfc3261) servers such as + PBX systems, SIP proxies, registrars, and VoIP service providers. It + sends SIP requests (typically OPTIONS) and measures response times and + status codes. + introduced: v1.38.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [network] - name: Slab id: slab description: | @@ -3303,6 +3311,15 @@ output: introduced: v1.8.0 os_support: [freebsd, linux, macos, solaris, windows] tags: [datastore] + - name: InfluxDB v3.x + id: influxdb_v3 + description: | + This plugin writes metrics to a [InfluxDB + v3.x](https://docs.influxdata.com) Core or Enterprise instance via the + HTTP API. + introduced: v1.38.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [datastore] - name: Inlong id: inlong description: | @@ -3494,7 +3511,7 @@ output: id: opentelemetry description: | This plugin writes metrics to [OpenTelemetry](https://opentelemetry.io) - servers and agents via gRPC. + servers and agents via gRPC or HTTP. introduced: v1.20.0 os_support: [freebsd, linux, macos, solaris, windows] tags: [logging, messaging] From 731179fc838a0c79a073f84c9dbd299496fe5b4a Mon Sep 17 00:00:00 2001 From: peterbarnett03 Date: Tue, 10 Mar 2026 12:46:46 -0400 Subject: [PATCH 08/15] chore: update release notes to 3.8.4 (#6915) * chore: update to 3.8.4 * chore: lint * Apply suggestions from code review * Update content/shared/v3-core-enterprise-release-notes/_index.md --------- Co-authored-by: Jason Stirnaman --- .../_index.md | 25 +++++++++++++++++-- data/products.yml | 2 +- 2 files changed, 24 insertions(+), 3 deletions(-) diff --git a/content/shared/v3-core-enterprise-release-notes/_index.md b/content/shared/v3-core-enterprise-release-notes/_index.md index 07503f63c2..2530ed0cd1 100644 --- a/content/shared/v3-core-enterprise-release-notes/_index.md +++ b/content/shared/v3-core-enterprise-release-notes/_index.md @@ -6,6 +6,27 @@ > All updates to Core are automatically included in Enterprise. > The Enterprise sections below only list updates exclusive to Enterprise. +## v3.8.4 {date="2026-03-10"} + +### Core + +No adjustments in this release. +Core remains on v3.8.3. + +### Enterprise + +#### Security + +- **Read and write tokens can no longer delete databases**: Authorization now evaluates both the HTTP method and the request path. Previously, tokens with read or write access to a database could also issue delete requests. + +#### Bug fixes + +- **Stale compactor blocking startup**: Fixed an issue where stopped (stale) compactor entries in the catalog prevented new compactor nodes from starting. Enterprise now only considers currently running compactor nodes for conflict checks. + +- **WAL replay**: Fixed an issue where combined-mode deployments silently ignored the `--wal-replay-concurrency-limit` flag and always used serial replay (concurrency of 1). The flag is now respected. + +- Other bug fixes and performance improvements. + ## v3.8.3 {date="2026-02-24"} ### Core @@ -428,9 +449,9 @@ All Core updates are included in Enterprise. Additional Enterprise-specific feat ## v3.1.0 {date="2025-05-29"} -**Core**: revision 482dd8aac580c04f37e8713a8fffae89ae8bc264 +**Core**: revision `482dd8aac580c04f37e8713a8fffae89ae8bc264` -**Enterprise**: revision 2cb23cf32b67f9f0d0803e31b356813a1a151b00 +**Enterprise**: revision `2cb23cf32b67f9f0d0803e31b356813a1a151b00` ### Core diff --git a/data/products.yml b/data/products.yml index 1662a618ff..9693cb96dc 100644 --- a/data/products.yml +++ b/data/products.yml @@ -46,7 +46,7 @@ influxdb3_enterprise: versions: [enterprise] list_order: 2 latest: enterprise - latest_patch: 3.8.3 + latest_patch: 3.8.4 placeholder_host: localhost:8181 limits: database: 100 From 478a0ff6fc41ca8fd8ed65da434c46e13371368a Mon Sep 17 00:00:00 2001 From: Copilot <198982749+Copilot@users.noreply.github.com> Date: Tue, 10 Mar 2026 11:55:14 -0500 Subject: [PATCH 09/15] Harden PR preview workflow against transient deploy/comment failures (#6919) * Initial plan * chore: add preview deployment validation Co-authored-by: jstirnaman <212227+jstirnaman@users.noreply.github.com> --------- Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com> Co-authored-by: jstirnaman <212227+jstirnaman@users.noreply.github.com> Co-authored-by: Jason Stirnaman --- .github/workflows/pr-preview.yml | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/.github/workflows/pr-preview.yml b/.github/workflows/pr-preview.yml index 18c8ca3788..85aaf11d11 100644 --- a/.github/workflows/pr-preview.yml +++ b/.github/workflows/pr-preview.yml @@ -139,6 +139,8 @@ jobs: - name: Deploy preview if: steps.detect.outputs.pages-to-deploy != '[]' + id: deploy-preview + continue-on-error: true uses: rossjrw/pr-preview-action@v1.4.8 with: source-dir: ./preview-staging @@ -146,8 +148,27 @@ jobs: umbrella-dir: pr-preview action: deploy - - name: Post success comment + - name: Validate preview deployment if: steps.detect.outputs.pages-to-deploy != '[]' + id: validate-deploy + run: | + DEPLOY_OUTCOME="${{ steps.deploy-preview.outcome }}" + DEPLOY_URL="${{ steps.deploy-preview.outputs.deployment-url }}" + + if [ -z "$DEPLOY_URL" ]; then + echo "Deployment step did not produce a preview URL. Failing preview job." + exit 1 + fi + + if [ "$DEPLOY_OUTCOME" != "success" ]; then + echo "Deployment reported outcome: $DEPLOY_OUTCOME" + echo "Preview URL exists; treating as transient post-deploy comment error." + fi + + echo "status=ok" >> "$GITHUB_OUTPUT" + + - name: Post success comment + if: steps.detect.outputs.pages-to-deploy != '[]' && steps.validate-deploy.outputs.status == 'ok' uses: actions/github-script@v7 with: script: | From 507ec68cf7054074ee3d460151dbca73e518c19d Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Tue, 10 Mar 2026 12:16:19 -0500 Subject: [PATCH 10/15] fix: resolve high-severity dependency vulnerabilities (#6916) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Update tar resolution 7.5.7 → 7.5.11 (CVE-2026-26960, CVE-2026-29786) - Remove unused copilot optional dependency (CVE-2026-29783) --- package.json | 6 ++---- yarn.lock | 57 ++++------------------------------------------------ 2 files changed, 6 insertions(+), 57 deletions(-) diff --git a/package.json b/package.json index f05c1202ba..9eb5344369 100644 --- a/package.json +++ b/package.json @@ -13,7 +13,7 @@ }, "resolutions": { "serialize-javascript": "^6.0.2", - "tar": "7.5.7", + "tar": "7.5.11", "lodash-es": "^4.17.23" }, "devDependencies": { @@ -115,7 +115,5 @@ }, "keywords": [], "author": "", - "optionalDependencies": { - "copilot": "^0.0.2" - } + "optionalDependencies": {} } diff --git a/yarn.lock b/yarn.lock index 3b8d8d78e3..49b022d291 100644 --- a/yarn.lock +++ b/yarn.lock @@ -246,48 +246,6 @@ resolved "https://registry.yarnpkg.com/@exodus/bytes/-/bytes-1.14.1.tgz#9b5c29077162a35f1bd25613e0cd3c239f6e7ad8" integrity sha512-OhkBFWI6GcRMUroChZiopRiSp2iAMvEBK47NhJooDqz1RERO4QuZIZnjP63TXX8GAiLABkYmX+fuQsdJ1dd2QQ== -"@github/copilot-darwin-arm64@0.0.420": - version "0.0.420" - resolved "https://registry.yarnpkg.com/@github/copilot-darwin-arm64/-/copilot-darwin-arm64-0.0.420.tgz#560ca002fa491c04fdb6f74f84fee87e52575c53" - integrity sha512-sj8Oxcf3oKDbeUotm2gtq5YU1lwCt3QIzbMZioFD/PMLOeqSX/wrecI+c0DDYXKofFhALb0+DxxnWgbEs0mnkQ== - -"@github/copilot-darwin-x64@0.0.420": - version "0.0.420" - resolved "https://registry.yarnpkg.com/@github/copilot-darwin-x64/-/copilot-darwin-x64-0.0.420.tgz#1d5cf40ac4e04bbd69fb0a79abf3743897c5f795" - integrity sha512-2acA93IqXz1uuz3TVUm0Y7BVrBr0MySh1kQa8LqMILhTsG0YHRMm8ybzTp2HA7Mi1tl5CjqMSk163kkS7OzfUA== - -"@github/copilot-linux-arm64@0.0.420": - version "0.0.420" - resolved "https://registry.yarnpkg.com/@github/copilot-linux-arm64/-/copilot-linux-arm64-0.0.420.tgz#e247517854927a14f5c076bfa99309160afec2d7" - integrity sha512-h/IvEryTOYm1HzR2GNq8s2aDtN4lvT4MxldfZuS42CtWJDOfVG2jLLsoHWU1T3QV8j1++PmDgE//HX0JLpLMww== - -"@github/copilot-linux-x64@0.0.420": - version "0.0.420" - resolved "https://registry.yarnpkg.com/@github/copilot-linux-x64/-/copilot-linux-x64-0.0.420.tgz#00d22974499f0fab6354fe4e22f6be59b800ab98" - integrity sha512-iL2NpZvXIDZ+3lw7sO2fo5T0nKmP5dZbU2gdYcv+SFBm/ONhCxIY5VRX4yN/9VkFaa9ePv5JzCnsl3vZINiDxg== - -"@github/copilot-win32-arm64@0.0.420": - version "0.0.420" - resolved "https://registry.yarnpkg.com/@github/copilot-win32-arm64/-/copilot-win32-arm64-0.0.420.tgz#733c45aced1e42c2877ae44012074abbcce3d55d" - integrity sha512-Njlc2j9vYSBAL+lC6FIEhQ3C+VxO3xavwKnw0ecVRiNLcGLyPrTdzPfPQOmEjC63gpVCqLabikoDGv8fuLPA2w== - -"@github/copilot-win32-x64@0.0.420": - version "0.0.420" - resolved "https://registry.yarnpkg.com/@github/copilot-win32-x64/-/copilot-win32-x64-0.0.420.tgz#d45f47f2f08d4bba87760b8afb21af19d1988780" - integrity sha512-rZlH35oNehAP2DvQbu4vQFVNeCh/1p3rUjafBYaEY0Nkhx7RmdrYBileL5U3PtRPPRsBPaq3Qp+pVIrGoCDLzQ== - -"@github/copilot@latest": - version "0.0.420" - resolved "https://registry.yarnpkg.com/@github/copilot/-/copilot-0.0.420.tgz#596349de076566a310836a7e06e6807b87ea6bfe" - integrity sha512-UpPuSjxUxQ+j02WjZEFffWf0scLb23LvuGHzMFtaSsweR+P/BdbtDUI5ZDIA6T0tVyyt6+X1/vgfsJiRqd6jig== - optionalDependencies: - "@github/copilot-darwin-arm64" "0.0.420" - "@github/copilot-darwin-x64" "0.0.420" - "@github/copilot-linux-arm64" "0.0.420" - "@github/copilot-linux-x64" "0.0.420" - "@github/copilot-win32-arm64" "0.0.420" - "@github/copilot-win32-x64" "0.0.420" - "@humanfs/core@^0.19.1": version "0.19.1" resolved "https://registry.yarnpkg.com/@humanfs/core/-/core-0.19.1.tgz#17c55ca7d426733fe3c561906b8173c336b40a77" @@ -1482,13 +1440,6 @@ confbox@^0.1.8: resolved "https://registry.yarnpkg.com/confbox/-/confbox-0.1.8.tgz#820d73d3b3c82d9bd910652c5d4d599ef8ff8b06" integrity sha512-RMtmw0iFkeR4YV+fUOSucriAQNb9g8zFR52MWCtl+cCZOFRNL6zeB395vPzFhEjjn4fMxXudmELnl/KF/WrK6w== -copilot@^0.0.2: - version "0.0.2" - resolved "https://registry.yarnpkg.com/copilot/-/copilot-0.0.2.tgz#4712810c9182cd784820ed44627bedd32dd377f9" - integrity sha512-nedf34AaYj9JnFhRmiJEZemAno2WDXMypq6FW5aCVR0N+QdpQ6viukP1JpvJDChpaMEVvbUkMjmjMifJbO/AgQ== - dependencies: - "@github/copilot" latest - core-util-is@1.0.2: version "1.0.2" resolved "https://registry.yarnpkg.com/core-util-is/-/core-util-is-1.0.2.tgz#b5fd54220aa2bc5ab57aab7140c940754503c1a7" @@ -5386,10 +5337,10 @@ tar-stream@^3.1.5: fast-fifo "^1.2.0" streamx "^2.15.0" -tar@7.5.7, tar@^7.5.9: - version "7.5.7" - resolved "https://registry.yarnpkg.com/tar/-/tar-7.5.7.tgz#adf99774008ba1c89819f15dbd6019c630539405" - integrity sha512-fov56fJiRuThVFXD6o6/Q354S7pnWMJIVlDBYijsTNx6jKSE4pvrDTs6lUnmGvNyfJwFQQwWy3owKz1ucIhveQ== +tar@7.5.11, tar@^7.5.9: + version "7.5.11" + resolved "https://registry.yarnpkg.com/tar/-/tar-7.5.11.tgz#1250fae45d98806b36d703b30973fa8e0a6d8868" + integrity sha512-ChjMH33/KetonMTAtpYdgUFr0tbz69Fp2v7zWxQfYZX4g5ZN2nOBXm1R2xyA+lMIKrLKIoKAwFj93jE/avX9cQ== dependencies: "@isaacs/fs-minipass" "^4.0.0" chownr "^3.0.0" From 3952e3b65baba8fca1c6aa2d1a7ad74ddb0e5181 Mon Sep 17 00:00:00 2001 From: Copilot <198982749+Copilot@users.noreply.github.com> Date: Tue, 10 Mar 2026 12:22:58 -0500 Subject: [PATCH 11/15] fix: use correct API to invoke Copilot code review in doc-review workflow (#6918) * Initial plan * fix: use correct API to request Copilot code review in doc-review workflow Co-authored-by: jstirnaman <212227+jstirnaman@users.noreply.github.com> --------- Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com> Co-authored-by: jstirnaman <212227+jstirnaman@users.noreply.github.com> Co-authored-by: Jason Stirnaman --- .github/workflows/doc-review.yml | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/.github/workflows/doc-review.yml b/.github/workflows/doc-review.yml index 9ccf81b2ae..b6f79cffaa 100644 --- a/.github/workflows/doc-review.yml +++ b/.github/workflows/doc-review.yml @@ -89,11 +89,27 @@ jobs: !contains(github.event.pull_request.labels.*.name, 'skip-review')) steps: - name: Request Copilot review + uses: actions/github-script@f28e40c7f34bde8b3046d885e986cb6290c5673b # v7.1.0 env: - GH_TOKEN: ${{ github.token }} PR_NUMBER: ${{ github.event.pull_request.number || inputs.pr_number }} - REPO: ${{ github.repository }} - run: gh pr edit "$PR_NUMBER" --repo "$REPO" --add-reviewer "copilot-reviews" + with: + script: | + const prNumber = context.issue.number || Number(process.env.PR_NUMBER); + try { + await github.rest.pulls.requestReviewers({ + owner: context.repo.owner, + repo: context.repo.repo, + pull_number: prNumber, + reviewers: ['copilot-pull-request-reviewer'], + }); + core.info('Copilot code review requested successfully'); + } catch (error) { + core.warning(`Could not request Copilot review: ${error.message}`); + core.warning( + 'To enable automatic Copilot reviews, configure a repository ruleset: ' + + 'Settings → Rules → Rulesets → "Automatically request Copilot code review"' + ); + } # ----------------------------------------------------------------- # Job 3: Copilot visual review (depends on Job 1 for URLs) @@ -187,7 +203,7 @@ jobs: '', '---', '', - `@copilot please review the preview pages listed above using the template below:`, + `@github-copilot please review the preview pages listed above using the template below:`, '', template.trim(), '', From 8a6143d5d14a08d6c42f79d80e3c2a4f9110e1c2 Mon Sep 17 00:00:00 2001 From: Copilot <198982749+Copilot@users.noreply.github.com> Date: Tue, 10 Mar 2026 12:27:35 -0500 Subject: [PATCH 12/15] fix(vale): propagate Google.Units=NO to product-specific Vale configs (#6917) * Initial plan * fix(vale): add missing Google.Units=NO to product-specific .vale.ini files Duration literals like 7d, 24h, 30d were being flagged as errors when running Vale with product-specific configs because those configs were missing Google.Units = NO. Also adds Vale.Terms = NO, write-good.TooWordy = NO, and TokenIgnores to match the root .vale.ini. Co-authored-by: jstirnaman <212227+jstirnaman@users.noreply.github.com> --------- Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com> Co-authored-by: jstirnaman <212227+jstirnaman@users.noreply.github.com> Co-authored-by: Jason Stirnaman --- .claude/skills/vale-linting/SKILL.md | 29 +++++++++++++++++++- content/influxdb/v2/.vale.ini | 20 +++++++++++++- content/influxdb3/cloud-dedicated/.vale.ini | 20 +++++++++++++- content/influxdb3/cloud-serverless/.vale.ini | 20 +++++++++++++- content/influxdb3/clustered/.vale.ini | 20 +++++++++++++- content/influxdb3/core/.vale.ini | 20 +++++++++++++- 6 files changed, 123 insertions(+), 6 deletions(-) diff --git a/.claude/skills/vale-linting/SKILL.md b/.claude/skills/vale-linting/SKILL.md index aa324981cc..760487d522 100644 --- a/.claude/skills/vale-linting/SKILL.md +++ b/.claude/skills/vale-linting/SKILL.md @@ -299,15 +299,42 @@ echo "systemd" >> .ci/vale/styles/config/vocabularies/InfluxDataDocs/accept.txt ### Creating a Product-Specific Override +> [!Important] +> Product-specific `.vale.ini` files must include the same disabled rules as the +> root `.vale.ini`. Rules disabled in the root config are **not** inherited by +> product-specific configs. Omitting them re-enables the rules for those products. +> For example, omitting `Google.Units = NO` causes duration literals like `7d`, +> `24h` to be flagged as errors in product-specific linting runs. + ```bash # 1. Create product-specific .vale.ini cat > content/influxdb3/cloud-dedicated/.vale.ini << 'EOF' StylesPath = ../../../.ci/vale/styles -MinAlertLevel = error +MinAlertLevel = warning Vocab = InfluxDataDocs +Packages = Google, write-good, Hugo + [*.md] BasedOnStyles = Vale, InfluxDataDocs, Google, write-good + +# These rules must be disabled in every product .vale.ini, same as the root .vale.ini. +Google.Acronyms = NO +Google.DateFormat = NO +Google.Ellipses = NO +Google.Headings = NO +Google.WordList = NO +# Disable Google.Units in favor of InfluxDataDocs.Units which only checks byte +# units (GB, TB, etc). Duration literals (30d, 24h, 1h) are valid InfluxDB syntax. +Google.Units = NO +Vale.Spelling = NO +Vale.Terms = NO +write-good.TooWordy = NO + +TokenIgnores = /[a-zA-Z0-9/_\-\.]+, \ + https?://[^\s\)\]>"]+, \ + `[^`]+` + # Product-specific overrides InfluxDataDocs.Branding = YES EOF diff --git a/content/influxdb/v2/.vale.ini b/content/influxdb/v2/.vale.ini index 06147959c9..3538a1d808 100644 --- a/content/influxdb/v2/.vale.ini +++ b/content/influxdb/v2/.vale.ini @@ -14,4 +14,22 @@ Google.DateFormat = NO Google.Ellipses = NO Google.Headings = NO Google.WordList = NO -Vale.Spelling = NO \ No newline at end of file +# Disable Google.Units in favor of InfluxDataDocs.Units which only checks byte +# units (GB, TB, etc). Duration literals (30d, 24h, 1h) are valid InfluxDB syntax. +Google.Units = NO +Vale.Spelling = NO +# Disable Vale.Terms - the vocabulary-based substitution rule creates too many +# false positives from URLs, file paths, and code. The accepted terms in +# accept.txt still work for spelling checks via InfluxDataDocs.Spelling. +Vale.Terms = NO +# Disable write-good.TooWordy - flags legitimate technical terms like +# "aggregate", "expiration", "multiple", "However" that are standard in +# database documentation. +write-good.TooWordy = NO + +# Ignore URL paths like /api/v3/..., /cli/..., /influxdb3/... +# Ignore full URLs like https://example.com/... +# Ignore inline code in frontmatter (description fields, etc.) +TokenIgnores = /[a-zA-Z0-9/_\-\.]+, \ + https?://[^\s\)\]>"]+, \ + `[^`]+` \ No newline at end of file diff --git a/content/influxdb3/cloud-dedicated/.vale.ini b/content/influxdb3/cloud-dedicated/.vale.ini index c2ca4cb2fa..35dfc38e56 100644 --- a/content/influxdb3/cloud-dedicated/.vale.ini +++ b/content/influxdb3/cloud-dedicated/.vale.ini @@ -14,4 +14,22 @@ Google.DateFormat = NO Google.Ellipses = NO Google.Headings = NO Google.WordList = NO -Vale.Spelling = NO \ No newline at end of file +# Disable Google.Units in favor of InfluxDataDocs.Units which only checks byte +# units (GB, TB, etc). Duration literals (30d, 24h, 1h) are valid InfluxDB syntax. +Google.Units = NO +Vale.Spelling = NO +# Disable Vale.Terms - the vocabulary-based substitution rule creates too many +# false positives from URLs, file paths, and code. The accepted terms in +# accept.txt still work for spelling checks via InfluxDataDocs.Spelling. +Vale.Terms = NO +# Disable write-good.TooWordy - flags legitimate technical terms like +# "aggregate", "expiration", "multiple", "However" that are standard in +# database documentation. +write-good.TooWordy = NO + +# Ignore URL paths like /api/v3/..., /cli/..., /influxdb3/... +# Ignore full URLs like https://example.com/... +# Ignore inline code in frontmatter (description fields, etc.) +TokenIgnores = /[a-zA-Z0-9/_\-\.]+, \ + https?://[^\s\)\]>"]+, \ + `[^`]+` \ No newline at end of file diff --git a/content/influxdb3/cloud-serverless/.vale.ini b/content/influxdb3/cloud-serverless/.vale.ini index 4472fd3c10..9ebc431b72 100644 --- a/content/influxdb3/cloud-serverless/.vale.ini +++ b/content/influxdb3/cloud-serverless/.vale.ini @@ -14,4 +14,22 @@ Google.DateFormat = NO Google.Ellipses = NO Google.Headings = NO Google.WordList = NO -Vale.Spelling = NO \ No newline at end of file +# Disable Google.Units in favor of InfluxDataDocs.Units which only checks byte +# units (GB, TB, etc). Duration literals (30d, 24h, 1h) are valid InfluxDB syntax. +Google.Units = NO +Vale.Spelling = NO +# Disable Vale.Terms - the vocabulary-based substitution rule creates too many +# false positives from URLs, file paths, and code. The accepted terms in +# accept.txt still work for spelling checks via InfluxDataDocs.Spelling. +Vale.Terms = NO +# Disable write-good.TooWordy - flags legitimate technical terms like +# "aggregate", "expiration", "multiple", "However" that are standard in +# database documentation. +write-good.TooWordy = NO + +# Ignore URL paths like /api/v3/..., /cli/..., /influxdb3/... +# Ignore full URLs like https://example.com/... +# Ignore inline code in frontmatter (description fields, etc.) +TokenIgnores = /[a-zA-Z0-9/_\-\.]+, \ + https?://[^\s\)\]>"]+, \ + `[^`]+` \ No newline at end of file diff --git a/content/influxdb3/clustered/.vale.ini b/content/influxdb3/clustered/.vale.ini index c381f0cde9..2ae7567c0a 100644 --- a/content/influxdb3/clustered/.vale.ini +++ b/content/influxdb3/clustered/.vale.ini @@ -14,4 +14,22 @@ Google.DateFormat = NO Google.Ellipses = NO Google.Headings = NO Google.WordList = NO -Vale.Spelling = NO \ No newline at end of file +# Disable Google.Units in favor of InfluxDataDocs.Units which only checks byte +# units (GB, TB, etc). Duration literals (30d, 24h, 1h) are valid InfluxDB syntax. +Google.Units = NO +Vale.Spelling = NO +# Disable Vale.Terms - the vocabulary-based substitution rule creates too many +# false positives from URLs, file paths, and code. The accepted terms in +# accept.txt still work for spelling checks via InfluxDataDocs.Spelling. +Vale.Terms = NO +# Disable write-good.TooWordy - flags legitimate technical terms like +# "aggregate", "expiration", "multiple", "However" that are standard in +# database documentation. +write-good.TooWordy = NO + +# Ignore URL paths like /api/v3/..., /cli/..., /influxdb3/... +# Ignore full URLs like https://example.com/... +# Ignore inline code in frontmatter (description fields, etc.) +TokenIgnores = /[a-zA-Z0-9/_\-\.]+, \ + https?://[^\s\)\]>"]+, \ + `[^`]+` \ No newline at end of file diff --git a/content/influxdb3/core/.vale.ini b/content/influxdb3/core/.vale.ini index 86731aebfc..03f6d282a3 100644 --- a/content/influxdb3/core/.vale.ini +++ b/content/influxdb3/core/.vale.ini @@ -19,4 +19,22 @@ Google.DateFormat = NO Google.Ellipses = NO Google.Headings = NO Google.WordList = NO -Vale.Spelling = NO \ No newline at end of file +# Disable Google.Units in favor of InfluxDataDocs.Units which only checks byte +# units (GB, TB, etc). Duration literals (30d, 24h, 1h) are valid InfluxDB syntax. +Google.Units = NO +Vale.Spelling = NO +# Disable Vale.Terms - the vocabulary-based substitution rule creates too many +# false positives from URLs, file paths, and code. The accepted terms in +# accept.txt still work for spelling checks via InfluxDataDocs.Spelling. +Vale.Terms = NO +# Disable write-good.TooWordy - flags legitimate technical terms like +# "aggregate", "expiration", "multiple", "However" that are standard in +# database documentation. +write-good.TooWordy = NO + +# Ignore URL paths like /api/v3/..., /cli/..., /influxdb3/... +# Ignore full URLs like https://example.com/... +# Ignore inline code in frontmatter (description fields, etc.) +TokenIgnores = /[a-zA-Z0-9/_\-\.]+, \ + https?://[^\s\)\]>"]+, \ + `[^`]+` \ No newline at end of file From 6d5f88c9e028a35ea28002b7fb160c06375e787a Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Tue, 10 Mar 2026 14:08:33 -0500 Subject: [PATCH 13/15] chore: add hosted influxdb-docs MCP server to .mcp.json (#6914) * chore: add hosted influxdb-docs MCP server to .mcp.json Add the hosted InfluxDB documentation search MCP server (influxdb-docs.mcp.kapa.ai) as the primary MCP server using SSE transport. Keep the local server with a note that it uses deprecated endpoints. https://claude.ai/code/session_01SURpmrJ2sxzBtp4euiAxpc * chore: add API key auth option for hosted influxdb-docs MCP server Make the API key-authenticated server the primary entry (influxdb-docs) using Bearer token from DOCS_KAPA_API_KEY env var. Keep the OAuth fallback as influxdb-docs-oauth for users without an API key. https://claude.ai/code/session_01SURpmrJ2sxzBtp4euiAxpc * chore: rename env vars to INFLUXDATA_DOCS_ namespace Rename DOCS_KAPA_API_KEY, DOCS_API_KEY_FILE, DOCS_MODE, and MCP_LOG_LEVEL to use the INFLUXDATA_DOCS_ prefix for consistency. https://claude.ai/code/session_01SURpmrJ2sxzBtp4euiAxpc * docs: add MCP server setup docs for contributors and AI agents Update CLAUDE.md, AGENTS.md, and content-editing SKILL.md with concise documentation for the hosted influxdb-docs MCP server, covering both API key and OAuth authentication options. https://claude.ai/code/session_01SURpmrJ2sxzBtp4euiAxpc * fix: remove cross-agent references from .claude files and AGENTS.md Keep .claude/ files focused on Claude Code. Link AGENTS.md to the published MCP server docs page instead of mentioning tool-specific setup. https://claude.ai/code/session_01SURpmrJ2sxzBtp4euiAxpc --------- Co-authored-by: Claude --- .claude/skills/content-editing/SKILL.md | 42 +++---------- .mcp.json | 25 ++++++-- AGENTS.md | 78 +++++++++++++++++++++++++ CLAUDE.md | 10 ++++ 4 files changed, 115 insertions(+), 40 deletions(-) diff --git a/.claude/skills/content-editing/SKILL.md b/.claude/skills/content-editing/SKILL.md index eb35f23e66..863b5c101c 100644 --- a/.claude/skills/content-editing/SKILL.md +++ b/.claude/skills/content-editing/SKILL.md @@ -359,33 +359,12 @@ Use the Documentation MCP Server when the information here is inconclusive, when ### Setup -The documentation MCP server is hosted—no local installation required. Add the server URL to your AI assistant's MCP configuration. +The documentation MCP server is hosted at `https://influxdb-docs.mcp.kapa.ai`—no local installation required. -**MCP server URL:** +Already configured in [`.mcp.json`](/.mcp.json). Two server entries are available: -```text -https://influxdb-docs.mcp.kapa.ai -``` - -**Claude Desktop configuration** (Settings > Developer): - -```json -{ - "mcpServers": { - "influxdb-docs": { - "url": "https://influxdb-docs.mcp.kapa.ai" - } - } -} -``` - -For other AI assistants see the [InfluxDB documentation MCP server guide](/influxdb3/core/admin/mcp-server/) -and verify the MCP configuration options and syntax for a specific AI assistant. - -**Rate limits** (per Google OAuth user): - -- 40 requests per hour -- 200 requests per day +- **`influxdb-docs`** (API key) — Set `INFLUXDATA_DOCS_KAPA_API_KEY` env var. 60 req/min. +- **`influxdb-docs-oauth`** (OAuth) — No setup. Authenticates via Google on first use. 40 req/hr, 200 req/day. ### Available Tool @@ -552,17 +531,12 @@ touch content/influxdb3/enterprise/path/to/file.md ### MCP Server Not Responding -The hosted MCP server (`https://influxdb-docs.mcp.kapa.ai`) requires: - -1. **Google OAuth authentication** - On first use, sign in with Google -2. **Rate limits** - 40 requests/hour, 200 requests/day per user - **Troubleshooting steps:** -- Verify your AI assistant has the MCP server URL configured correctly -- Check if you've exceeded rate limits (wait an hour or until the next day) -- Try re-authenticating by clearing your OAuth session -- Ensure your network allows connections to `*.kapa.ai` +- **API key auth** (`influxdb-docs`): Verify `INFLUXDATA_DOCS_KAPA_API_KEY` is set. Rate limit: 60 req/min. +- **OAuth auth** (`influxdb-docs-oauth`): Sign in with Google on first use. Rate limits: 40 req/hr, 200 req/day. +- Verify your network allows connections to `*.kapa.ai` +- Check if you've exceeded rate limits (wait and retry) ### Cypress Tests Fail diff --git a/.mcp.json b/.mcp.json index f600dfa672..f538540906 100644 --- a/.mcp.json +++ b/.mcp.json @@ -1,20 +1,33 @@ { "$schema": "https://raw.githubusercontent.com/modelcontextprotocol/modelcontextprotocol/refs/heads/main/schema/2025-06-18/schema.json", - "description": "InfluxData documentation assistance via MCP server - Node.js execution", + "description": "InfluxData documentation assistance via MCP servers", "mcpServers": { + "influxdb-docs": { + "comment": "Hosted InfluxDB documentation search. Uses API key auth (set INFLUXDATA_DOCS_KAPA_API_KEY env var). Get your key from the Kapa dashboard. Rate limits: 60 req/min.", + "type": "sse", + "url": "https://influxdb-docs.mcp.kapa.ai", + "headers": { + "Authorization": "Bearer ${INFLUXDATA_DOCS_KAPA_API_KEY}" + } + }, + "influxdb-docs-oauth": { + "comment": "Hosted InfluxDB documentation search (OAuth). No API key needed--authenticates via Google OAuth on first use. Rate limits: 40 req/hr, 200 req/day.", + "type": "sse", + "url": "https://influxdb-docs.mcp.kapa.ai" + }, "influxdata": { - "comment": "Use Node to run Docs MCP. To install and setup, see https://github.com/influxdata/docs-mcp-server", + "comment": "Local Docs MCP server (optional). To install and setup, see https://github.com/influxdata/docs-mcp-server. NOTE: uses deprecated endpoints--pending update.", "type": "stdio", "command": "node", "args": [ "${DOCS_MCP_SERVER_PATH}/dist/index.js" ], "env": { - "DOCS_API_KEY_FILE": "${DOCS_API_KEY_FILE:-$HOME/.env.docs-kapa-api-key}", - "DOCS_MODE": "external-only", - "MCP_LOG_LEVEL": "${MCP_LOG_LEVEL:-info}", + "INFLUXDATA_DOCS_API_KEY_FILE": "${INFLUXDATA_DOCS_API_KEY_FILE:-$HOME/.env.docs-kapa-api-key}", + "INFLUXDATA_DOCS_MODE": "external-only", + "INFLUXDATA_DOCS_LOG_LEVEL": "${INFLUXDATA_DOCS_LOG_LEVEL:-info}", "NODE_ENV": "${NODE_ENV:-production}" } } } -} \ No newline at end of file +} diff --git a/AGENTS.md b/AGENTS.md index c05d86e2b3..bd27e9efaf 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -18,6 +18,84 @@ | Test code blocks | `yarn test:codeblocks:all` | 15-45m — **NEVER CANCEL** | | Lint | `yarn lint` | ~1m | +## Repository Structure + +``` +docs-v2/ +├── content/ # Documentation content +│ ├── influxdb3/ # InfluxDB 3 (core, enterprise, cloud-*) +│ ├── influxdb/ # InfluxDB v2 and v1 +│ ├── enterprise_influxdb/ # InfluxDB Enterprise v1 +│ ├── telegraf/ # Telegraf docs +│ ├── shared/ # Shared content across products +│ └── example.md # Shortcode testing playground +├── layouts/ # Hugo templates and shortcodes +├── assets/ # JS, CSS, TypeScript +├── api-docs/ # InfluxDB OpenAPI specifications, API reference documentation generation scripts +├── data/ # YAML/JSON data files +├── public/ # Build output (gitignored, ~529MB) +└── .github/ + └── copilot-instructions.md # Primary AI instructions +``` + +**Content Paths**: See [copilot-instructions.md](.github/copilot-instructions.md#content-organization) + +## Documentation MCP Server + +A hosted MCP server provides semantic search over all InfluxDB documentation. +Use it to verify technical accuracy, check API syntax, and find related docs. + +See the [InfluxDB documentation MCP server guide](https://docs.influxdata.com/influxdb3/core/admin/mcp-server/) for setup instructions. + +## Common Workflows + +### Creating/Editing Content + +**Frontmatter** (page metadata): +```yaml +title: Page Title # Required - becomes h1 +description: Brief desc # Required - for SEO +menu: + influxdb_2_0: + name: Nav Label # Optional - nav display name + parent: Parent Node # Optional - for nesting +weight: 1 # Required - sort order +``` + +**Shared Content** (avoid duplication): +```yaml +source: /shared/path/to/content.md +``` + +Shared content files (`/shared/path/to/content.md`): +- Don't store frontmatter +- Can use `{{% show-in %}}`, `{{% hide-in %}}`, and the `version` keyword (`/influxdb3/version/content.md`) + +**Common Shortcodes**: +- Callouts: `> [!Note]`, `> [!Warning]`, `> [!Important]`, `> [!Tip]` +- Tabs: `{{< tabs-wrapper >}}` + `{{% tabs %}}` + `{{% tab-content %}}` +- Required: `{{< req >}}` or `{{< req type="key" >}}` +- Code placeholders: `{ placeholders="" }` + +**📖 Complete Reference**: [DOCS-SHORTCODES.md](DOCS-SHORTCODES.md) | [DOCS-FRONTMATTER.md](DOCS-FRONTMATTER.md) + +### Testing Changes + +**Always test before committing**: +```bash +# Verify server renders (check 200 status) +curl -s -o /dev/null -w "%{http_code}" http://localhost:1313/influxdb3/core/ + +# Test specific content +yarn test:links content/influxdb3/core/**/*.md + +# Run style linting +.ci/vale/vale.sh content/**/*.md +``` + +**📖 Complete Reference**: [DOCS-TESTING.md](DOCS-TESTING.md) + + ## Constraints - **NEVER cancel** Hugo builds (~75s) or test runs (15-45m) — the site has 5,359+ pages diff --git a/CLAUDE.md b/CLAUDE.md index a549ac0759..dc8350d2b0 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -13,6 +13,16 @@ > - Specialized agents in `.claude/agents/` > - Custom skills in `.claude/skills/` +## Documentation MCP server + +This repo includes [`.mcp.json`](.mcp.json) with a hosted InfluxDB documentation search server. +Use it to verify technical accuracy, check API syntax, and find related docs. + +- **`influxdb-docs`** — API key auth. Set `INFLUXDATA_DOCS_KAPA_API_KEY` env var before launching Claude Code. +- **`influxdb-docs-oauth`** — OAuth fallback. No setup needed. + +See [content-editing skill](.claude/skills/content-editing/SKILL.md#part-4-fact-checking-with-the-documentation-mcp-server) for usage details. + ## Purpose and scope Claude should help document InfluxData products by creating clear, accurate technical content with proper code examples, frontmatter, and formatting. From bf79e613b012d8ef3ab1711136e127c0a02a5e38 Mon Sep 17 00:00:00 2001 From: Scott Anderson Date: Tue, 10 Mar 2026 13:50:18 -0600 Subject: [PATCH 14/15] hotfix: Change date for InfluxDB Docker latest tag Update the date for InfluxDB Docker latest tag change. --- data/notifications.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data/notifications.yaml b/data/notifications.yaml index 5c513b0ff5..e0fde317ac 100644 --- a/data/notifications.yaml +++ b/data/notifications.yaml @@ -76,7 +76,7 @@ - /influxdb/cloud title: InfluxDB Docker latest tag changing to InfluxDB 3 Core slug: | - On **April 7, 2026**, the `latest` tag for InfluxDB Docker images will + On **May 27, 2026**, the `latest` tag for InfluxDB Docker images will point to InfluxDB 3 Core. To avoid unexpected upgrades, use specific version tags in your Docker deployments. message: | From c26f567ab3e921398eca0e30087d536e01012ce1 Mon Sep 17 00:00:00 2001 From: Copilot <198982749+Copilot@users.noreply.github.com> Date: Sun, 29 Mar 2026 15:07:03 -0500 Subject: [PATCH 15/15] rebase(ci): cleanly rebase fix-pr-preview-api-pages onto feat-api-uplift (#7005) * Initial plan * chore: rebase onto feat-api-uplift - align branch with feat-api-uplift base Reset branch to exactly match feat-api-uplift state plus the PR fix: - .github/scripts/detect-preview-pages.js: add detectApiPages() and has-api-doc-changes output - .github/workflows/pr-preview.yml: add Build API docs step Removes all master-merge pollution from the branch history so that the PR diff against feat-api-uplift shows only the intended fix changes. Co-authored-by: jstirnaman <212227+jstirnaman@users.noreply.github.com> --------- Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com> Co-authored-by: jstirnaman <212227+jstirnaman@users.noreply.github.com> --- .claude/agents/doc-review-agent.md | 82 - .claude/agents/doc-triage-agent.md | 72 - .claude/agents/influxdb3-tech-writer.md | 28 +- .claude/agents/ts-component-dev.md | 2 +- .claude/settings.json | 93 +- .claude/skills/content-editing/SKILL.md | 42 +- .claude/skills/hugo-template-dev/SKILL.md | 18 + .claude/skills/vale-linting/SKILL.md | 29 +- .claude/skills/vale-rule-config/SKILL.md | 6 +- .github/DOC-REVIEW-PIPELINE-PLAN.md | 704 --- .github/ISSUE_TEMPLATE/sync-plugin-docs.yml | 2 +- .github/LABEL_GUIDE.md | 100 - .github/copilot-instructions.md | 258 +- .../content-review.instructions.md | 76 - .github/prompts/copilot-visual-review.md | 34 - .github/scripts/resolve-review-urls.js | 38 - .github/scripts/workflow-utils.js | 104 - .github/templates/review-comment.md | 98 - .github/workflows/auto-label.yml | 122 - .github/workflows/doc-review.yml | 280 -- .github/workflows/pr-preview.yml | 23 +- .github/workflows/sync-plugins.yml | 6 +- .gitignore | 16 + .mcp.json | 25 +- AGENTS.md | 272 +- CLAUDE.md | 13 +- DOCS-CONTRIBUTING.md | 48 +- DOCS-TESTING.md | 14 +- PLAN.md | 58 + PLATFORM_REFERENCE.md | 78 +- SPELL-CHECK.md | 50 +- api-docs/README.md | 69 +- api-docs/enterprise_influxdb/v1/.config.yml | 10 + .../v1/v1/content/info.yml | 31 + .../v1/v1/content/servers.yml | 2 + api-docs/enterprise_influxdb/v1/v1/ref.yml | 1108 +++++ api-docs/generate-api-docs.sh | 10 +- api-docs/getswagger.sh | 24 +- .../influxdb/cloud/v2/content/tag-groups.yml | 10 - api-docs/influxdb/cloud/v2/ref.yml | 91 +- api-docs/influxdb/v1/.config.yml | 10 + api-docs/influxdb/v1/v1/content/info.yml | 32 + api-docs/influxdb/v1/v1/content/servers.yml | 2 + api-docs/influxdb/v1/v1/ref.yml | 1093 +++++ .../influxdb/v2/v2/content/tag-groups.yml | 11 - api-docs/influxdb/v2/v2/ref.yml | 102 +- .../influxdb3/cloud-dedicated/.config.yml | 8 +- .../management/content/tag-groups.yml | 6 - .../cloud-dedicated/management/openapi.yml | 277 +- .../cloud-dedicated/v2/content/tag-groups.yml | 13 - api-docs/influxdb3/cloud-dedicated/v2/ref.yml | 205 +- .../v2/content/tag-groups.yml | 28 - .../influxdb3/cloud-serverless/v2/ref.yml | 100 +- api-docs/influxdb3/clustered/.config.yml | 8 +- .../management/content/tag-groups.yml | 6 - .../clustered/management/openapi.yml | 258 +- .../clustered/v2/content/tag-groups.yml | 12 - api-docs/influxdb3/clustered/v2/ref.yml | 191 +- api-docs/influxdb3/core/.config.yml | 2 +- api-docs/influxdb3/core/v3/content/info.yml | 5 +- .../influxdb3/core/v3/content/tag-groups.yml | 15 - .../influxdb3/core/v3/ref.yml | 3788 +++++++--------- api-docs/influxdb3/enterprise/.config.yml | 2 +- .../influxdb3/enterprise/v3/content/info.yml | 5 +- .../enterprise/v3/content/tag-groups.yml | 15 - .../v3/influxdb3-enterprise-openapi.yaml | 3799 ----------------- .../v3/ref.yml} | 3779 ++++++++-------- .../decorators/tags/set-tag-groups.cjs | 94 - api-docs/openapi/plugins/docs-content.cjs | 2 - api-docs/openapi/plugins/docs-plugin.cjs | 5 +- api-docs/scripts/README.md | 390 ++ .../scripts/dist/generate-openapi-articles.js | 838 ++++ .../dist/openapi-paths-to-hugo-data/index.js | 920 ++++ api-docs/scripts/generate-openapi-articles.ts | 1145 +++++ .../openapi-paths-to-hugo-data/index.ts | 1415 ++++++ .../openapi-paths-to-hugo-data/package.json | 14 + .../openapi-paths-to-hugo-data/yarn.lock | 32 + api-docs/scripts/tsconfig.json | 25 + assets/js/components/api-toc.ts | 473 ++ assets/js/content-interactions.js | 28 +- assets/js/main.js | 2 + assets/styles/layouts/_api-code-samples.scss | 67 + assets/styles/layouts/_api-layout.scss | 785 ++++ assets/styles/layouts/_api-operations.scss | 540 +++ assets/styles/layouts/_api-overrides.scss | 270 +- .../styles/layouts/_api-security-schemes.scss | 92 + assets/styles/layouts/_sidebar.scss | 60 + assets/styles/styles-default.scss | 6 +- content/enterprise_influxdb/v1/tools/api.md | 2 +- .../cloud/admin/buckets/bucket-schema.md | 12 +- .../cloud/admin/buckets/create-bucket.md | 8 +- .../cloud/admin/buckets/update-bucket.md | 12 +- .../cloud/admin/buckets/view-buckets.md | 4 +- .../api-guide/api-invokable-scripts/_index.md | 10 +- content/influxdb/cloud/get-started/setup.md | 6 +- .../cloud/query-data/parameterized-queries.md | 2 +- .../influxdb/cloud/reference/api/_index.md | 17 - .../cloud/reference/internals/ttbr.md | 2 +- .../reference/release-notes/cloud-updates.md | 2 +- .../cloud/write-data/developer-tools/api.md | 2 +- content/influxdb/v1/tools/api.md | 10 +- content/influxdb/v2/.vale.ini | 20 +- .../v2/admin/buckets/create-bucket.md | 4 +- content/influxdb/v2/get-started/setup.md | 10 +- .../v2/process-data/manage-tasks/run-task.md | 4 +- .../process-data/manage-tasks/view-tasks.md | 4 +- content/influxdb/v2/reference/api/_index.md | 22 - .../influxdb/v2/reference/config-options.md | 2 +- .../v2/reference/internals/metrics.md | 2 +- .../v2/reference/release-notes/influxdb.md | 8 +- .../v2/write-data/developer-tools/api.md | 2 +- .../influxdb/v2/write-data/troubleshoot.md | 4 +- content/influxdb3/cloud-dedicated/.vale.ini | 20 +- .../cloud-dedicated/admin/databases/create.md | 4 +- .../cloud-dedicated/admin/databases/delete.md | 2 +- .../cloud-dedicated/admin/databases/list.md | 2 +- .../cloud-dedicated/admin/databases/update.md | 2 +- .../cloud-dedicated/admin/tables/create.md | 4 +- .../admin/tokens/database/create.md | 6 +- .../admin/tokens/database/list.md | 4 +- .../admin/tokens/database/revoke.md | 2 +- .../admin/tokens/database/update.md | 4 +- .../cloud-dedicated/get-started/write.md | 8 +- .../cloud-dedicated/reference/api/_index.md | 41 - .../v2/javascript/nodejs/write.md | 2 +- content/influxdb3/cloud-serverless/.vale.ini | 20 +- .../admin/buckets/create-bucket.md | 4 +- .../buckets/manage-explicit-bucket-schemas.md | 8 +- .../admin/buckets/update-bucket.md | 12 +- .../admin/buckets/view-buckets.md | 4 +- .../admin/tokens/create-token.md | 6 +- .../admin/tokens/delete-token.md | 4 +- .../admin/tokens/update-tokens.md | 4 +- .../admin/tokens/view-tokens.md | 8 +- .../influxdb3/cloud-serverless/api/_index.md | 20 + .../api/api-compatibility/_index.md | 109 + .../api/authentication/_index.md | 36 + .../api/authorizations-api-tokens/_index.md | 107 + .../api/bucket-schemas/_index.md | 48 + .../cloud-serverless/api/buckets/_index.md | 126 + .../api/common-parameters/_index.md | 87 + .../cloud-serverless/api/dbrps/_index.md | 77 + .../cloud-serverless/api/delete/_index.md | 30 + .../cloud-serverless/api/headers/_index.md | 77 + .../api/invokable-scripts/_index.md | 83 + .../cloud-serverless/api/limits/_index.md | 30 + .../api/organizations/_index.md | 96 + .../cloud-serverless/api/pagination/_index.md | 91 + .../api/quick-start/_index.md | 81 + .../cloud-serverless/api/resources/_index.md | 30 + .../api/response-codes/_index.md | 147 + .../cloud-serverless/api/routes/_index.md | 30 + .../cloud-serverless/api/secrets/_index.md | 48 + .../security-and-access-endpoints/_index.md | 54 + .../api/supported-operations/_index.md | 21 + .../system-information-endpoints/_index.md | 36 + .../cloud-serverless/api/tasks/_index.md | 179 + .../cloud-serverless/api/telegrafs/_index.md | 108 + .../cloud-serverless/api/templates/_index.md | 101 + .../cloud-serverless/api/usage/_index.md | 30 + .../cloud-serverless/api/variables/_index.md | 78 + .../cloud-serverless/get-started/setup.md | 2 +- .../cloud-serverless/get-started/write.md | 6 +- .../guides/api-compatibility/v1/_index.md | 16 +- .../query-data/execute-queries/v1-http.md | 2 +- .../cloud-serverless/reference/api/_index.md | 31 - .../v2/javascript/nodejs/write.md | 2 +- .../write-data/delete-data.md | 2 +- content/influxdb3/clustered/.vale.ini | 20 +- .../influxdb3/clustered/get-started/write.md | 8 +- .../clustered/reference/api/_index.md | 32 - .../v2/javascript/nodejs/write.md | 2 +- content/influxdb3/core/.vale.ini | 20 +- .../influxdb3/core/admin/databases/create.md | 2 +- .../influxdb3/core/admin/databases/delete.md | 2 +- .../influxdb3/core/admin/databases/list.md | 2 +- .../core/admin/tokens/admin/create.md | 2 +- .../core/admin/tokens/admin/regenerate.md | 2 +- .../migrate-from-influxdb-v1-v2.md | 12 + .../influxdb3/core/reference/api/_index.md | 22 +- .../cli/influxdb3/create/database.md | 2 +- .../core/reference/cli/influxdb3/serve.md | 20 +- .../reference/internals/data-retention.md | 2 +- .../core/write-data/client-libraries.md | 2 +- .../core/write-data/http-api/_index.md | 2 +- .../write-data/http-api/compatibility-apis.md | 4 +- .../core/write-data/http-api/v3-write-lp.md | 2 +- .../enterprise/admin/databases/create.md | 2 +- .../enterprise/admin/databases/delete.md | 2 +- .../enterprise/admin/databases/list.md | 2 +- .../enterprise/admin/tables/create.md | 2 +- .../enterprise/admin/tables/delete.md | 2 +- .../influxdb3/enterprise/admin/tables/list.md | 2 +- .../enterprise/admin/tokens/admin/create.md | 2 +- .../admin/tokens/resource/create.md | 2 +- .../migrate-from-influxdb-v1-v2.md | 12 + .../enterprise/reference/api/_index.md | 20 +- .../cli/influxdb3/create/database.md | 2 +- .../reference/cli/influxdb3/create/table.md | 2 +- .../reference/cli/influxdb3/serve.md | 23 +- .../cli/influxdb3/update/database.md | 2 +- .../reference/internals/data-retention.md | 6 +- .../enterprise/write-data/client-libraries.md | 2 +- .../write-data/compatibility-apis.md | 4 +- .../enterprise/write-data/http-api/_index.md | 2 +- .../write-data/http-api/compatibility-apis.md | 4 +- .../write-data/http-api/v3-write-lp.md | 2 +- content/kapacitor/v1/working/flux/_index.md | 2 +- .../v2/javascript/nodejs/write.md | 2 +- .../influxdb-v2/admin/tokens/create-token.md | 6 +- .../influxdb-v2/admin/tokens/delete-token.md | 4 +- .../influxdb-v2/admin/tokens/update-tokens.md | 4 +- .../influxdb-v2/admin/tokens/view-tokens.md | 6 +- .../shared/influxdb-v2/api-guide/api_intro.md | 2 +- .../client-libraries/nodejs/write.md | 2 +- .../shared/influxdb-v2/get-started/query.md | 6 +- .../shared/influxdb-v2/get-started/write.md | 2 +- .../monitor-alert/custom-checks.md | 2 +- .../process-data/manage-tasks/create-task.md | 12 +- .../process-data/manage-tasks/delete-task.md | 4 +- .../process-data/manage-tasks/run-task.md | 8 +- .../manage-tasks/task-run-history.md | 12 +- .../process-data/manage-tasks/update-task.md | 4 +- .../query-data/execute-queries/influx-api.md | 6 +- .../query-data/flux/flux-version.md | 2 +- .../influxdb-v2/query-data/influxql/dbrp.md | 16 +- .../influxdb-v2/write-data/delete-data.md | 8 +- .../write-data/replication/replicate-data.md | 8 +- .../distinct-value-cache/create.md | 2 +- .../distinct-value-cache/query.md | 4 +- .../distinct-value-cache/show.md | 4 +- .../last-value-cache/create.md | 2 +- .../last-value-cache/delete.md | 2 +- .../influxdb3-admin/last-value-cache/show.md | 4 +- .../influxdb3-admin/tokens/admin/create.md | 4 +- .../migrate-from-influxdb-v1-v2.md | 29 + .../influxdb3-internals/data-retention.md | 4 +- .../influxql/parameterized-queries.md | 2 +- .../sql/parameterized-queries.md | 2 +- .../shared/influxdb3-write-guides/_index.md | 4 +- .../influxdb3-write-guides/http-api/_index.md | 4 +- .../http-api/compatibility-apis.md | 8 +- .../_index.md | 25 +- .../aggregator-plugins/basicstats/_index.md | 2 +- .../aggregator-plugins/derivative/_index.md | 2 +- .../v1/aggregator-plugins/final/_index.md | 2 +- .../v1/aggregator-plugins/histogram/_index.md | 2 +- .../v1/aggregator-plugins/merge/_index.md | 2 +- .../v1/aggregator-plugins/minmax/_index.md | 2 +- .../v1/aggregator-plugins/quantile/_index.md | 2 +- .../v1/aggregator-plugins/starlark/_index.md | 2 +- .../aggregator-plugins/valuecounter/_index.md | 2 +- .../v1/input-plugins/activemq/_index.md | 2 +- .../v1/input-plugins/aerospike/_index.md | 2 +- .../v1/input-plugins/aliyuncms/_index.md | 2 +- .../v1/input-plugins/amd_rocm_smi/_index.md | 2 +- .../v1/input-plugins/amqp_consumer/_index.md | 2 +- .../v1/input-plugins/apache/_index.md | 2 +- .../v1/input-plugins/apcupsd/_index.md | 2 +- .../v1/input-plugins/aurora/_index.md | 2 +- .../v1/input-plugins/azure_monitor/_index.md | 2 +- .../azure_storage_queue/_index.md | 2 +- .../v1/input-plugins/bcache/_index.md | 2 +- .../v1/input-plugins/beanstalkd/_index.md | 2 +- .../telegraf/v1/input-plugins/beat/_index.md | 2 +- .../telegraf/v1/input-plugins/bind/_index.md | 2 +- .../telegraf/v1/input-plugins/bond/_index.md | 2 +- .../v1/input-plugins/burrow/_index.md | 2 +- .../telegraf/v1/input-plugins/ceph/_index.md | 2 +- .../v1/input-plugins/cgroup/_index.md | 2 +- .../v1/input-plugins/chrony/_index.md | 2 +- .../cisco_telemetry_mdt/_index.md | 2 +- .../v1/input-plugins/clickhouse/_index.md | 2 +- .../v1/input-plugins/cloud_pubsub/_index.md | 2 +- .../input-plugins/cloud_pubsub_push/_index.md | 2 +- .../v1/input-plugins/cloudwatch/_index.md | 2 +- .../cloudwatch_metric_streams/_index.md | 2 +- .../v1/input-plugins/conntrack/_index.md | 2 +- .../v1/input-plugins/consul/_index.md | 2 +- .../v1/input-plugins/consul_agent/_index.md | 2 +- .../v1/input-plugins/couchbase/_index.md | 2 +- .../v1/input-plugins/couchdb/_index.md | 2 +- .../telegraf/v1/input-plugins/cpu/_index.md | 2 +- .../telegraf/v1/input-plugins/csgo/_index.md | 2 +- .../input-plugins/ctrlx_datalayer/_index.md | 2 +- .../telegraf/v1/input-plugins/dcos/_index.md | 2 +- .../input-plugins/directory_monitor/_index.md | 2 +- .../telegraf/v1/input-plugins/disk/_index.md | 2 +- .../v1/input-plugins/diskio/_index.md | 2 +- .../v1/input-plugins/disque/_index.md | 2 +- .../v1/input-plugins/dmcache/_index.md | 2 +- .../v1/input-plugins/dns_query/_index.md | 2 +- .../v1/input-plugins/docker/_index.md | 2 +- .../v1/input-plugins/docker_log/_index.md | 2 +- .../v1/input-plugins/dovecot/_index.md | 2 +- .../telegraf/v1/input-plugins/dpdk/_index.md | 2 +- .../telegraf/v1/input-plugins/ecs/_index.md | 2 +- .../v1/input-plugins/elasticsearch/_index.md | 2 +- .../elasticsearch_query/_index.md | 2 +- .../v1/input-plugins/ethtool/_index.md | 2 +- .../input-plugins/eventhub_consumer/_index.md | 2 +- .../telegraf/v1/input-plugins/exec/_index.md | 2 +- .../telegraf/v1/input-plugins/execd/_index.md | 2 +- .../v1/input-plugins/fail2ban/_index.md | 2 +- .../v1/input-plugins/fibaro/_index.md | 2 +- .../telegraf/v1/input-plugins/file/_index.md | 2 +- .../v1/input-plugins/filecount/_index.md | 2 +- .../v1/input-plugins/filestat/_index.md | 2 +- .../v1/input-plugins/fireboard/_index.md | 2 +- .../v1/input-plugins/firehose/_index.md | 2 +- .../v1/input-plugins/fluentd/_index.md | 2 +- .../v1/input-plugins/fritzbox/_index.md | 2 +- .../v1/input-plugins/github/_index.md | 2 +- .../telegraf/v1/input-plugins/gnmi/_index.md | 5 +- .../google_cloud_storage/_index.md | 2 +- .../v1/input-plugins/graylog/_index.md | 2 +- .../v1/input-plugins/haproxy/_index.md | 2 +- .../v1/input-plugins/hddtemp/_index.md | 2 +- .../telegraf/v1/input-plugins/http/_index.md | 5 +- .../input-plugins/http_listener_v2/_index.md | 2 +- .../v1/input-plugins/http_response/_index.md | 2 +- .../v1/input-plugins/huebridge/_index.md | 2 +- .../v1/input-plugins/hugepages/_index.md | 2 +- .../v1/input-plugins/icinga2/_index.md | 2 +- .../v1/input-plugins/infiniband/_index.md | 2 +- .../v1/input-plugins/influxdb/_index.md | 2 +- .../input-plugins/influxdb_listener/_index.md | 2 +- .../influxdb_v2_listener/_index.md | 2 +- .../v1/input-plugins/intel_baseband/_index.md | 2 +- .../v1/input-plugins/intel_dlb/_index.md | 2 +- .../v1/input-plugins/intel_pmt/_index.md | 2 +- .../v1/input-plugins/intel_pmu/_index.md | 2 +- .../input-plugins/intel_powerstat/_index.md | 2 +- .../v1/input-plugins/intel_rdt/_index.md | 2 +- .../v1/input-plugins/internal/_index.md | 2 +- .../v1/input-plugins/internet_speed/_index.md | 2 +- .../v1/input-plugins/interrupts/_index.md | 2 +- .../v1/input-plugins/ipmi_sensor/_index.md | 2 +- .../telegraf/v1/input-plugins/ipset/_index.md | 2 +- .../v1/input-plugins/iptables/_index.md | 2 +- .../telegraf/v1/input-plugins/ipvs/_index.md | 2 +- .../v1/input-plugins/jenkins/_index.md | 2 +- .../v1/input-plugins/jolokia2_agent/_index.md | 2 +- .../v1/input-plugins/jolokia2_proxy/_index.md | 2 +- .../jti_openconfig_telemetry/_index.md | 2 +- .../v1/input-plugins/kafka_consumer/_index.md | 2 +- .../v1/input-plugins/kapacitor/_index.md | 2 +- .../v1/input-plugins/kernel/_index.md | 2 +- .../v1/input-plugins/kernel_vmstat/_index.md | 2 +- .../v1/input-plugins/kibana/_index.md | 2 +- .../input-plugins/kinesis_consumer/_index.md | 2 +- .../v1/input-plugins/knx_listener/_index.md | 2 +- .../v1/input-plugins/kube_inventory/_index.md | 2 +- .../v1/input-plugins/kubernetes/_index.md | 2 +- .../telegraf/v1/input-plugins/lanz/_index.md | 2 +- .../telegraf/v1/input-plugins/ldap/_index.md | 2 +- .../telegraf/v1/input-plugins/leofs/_index.md | 2 +- .../v1/input-plugins/libvirt/_index.md | 2 +- .../v1/input-plugins/linux_cpu/_index.md | 2 +- .../input-plugins/linux_sysctl_fs/_index.md | 2 +- .../telegraf/v1/input-plugins/logql/_index.md | 5 +- .../v1/input-plugins/logstash/_index.md | 2 +- .../v1/input-plugins/lustre2/_index.md | 2 +- .../telegraf/v1/input-plugins/lvm/_index.md | 2 +- .../v1/input-plugins/mailchimp/_index.md | 2 +- .../v1/input-plugins/marklogic/_index.md | 2 +- .../v1/input-plugins/mavlink/_index.md | 2 +- .../v1/input-plugins/mcrouter/_index.md | 2 +- .../v1/input-plugins/mdstat/_index.md | 2 +- .../telegraf/v1/input-plugins/mem/_index.md | 2 +- .../v1/input-plugins/memcached/_index.md | 2 +- .../telegraf/v1/input-plugins/mesos/_index.md | 2 +- .../v1/input-plugins/minecraft/_index.md | 2 +- .../telegraf/v1/input-plugins/mock/_index.md | 2 +- .../v1/input-plugins/modbus/_index.md | 2 +- .../v1/input-plugins/mongodb/_index.md | 2 +- .../telegraf/v1/input-plugins/monit/_index.md | 2 +- .../v1/input-plugins/mqtt_consumer/_index.md | 8 +- .../v1/input-plugins/multifile/_index.md | 2 +- .../telegraf/v1/input-plugins/mysql/_index.md | 12 +- .../telegraf/v1/input-plugins/nats/_index.md | 2 +- .../v1/input-plugins/nats_consumer/_index.md | 2 +- .../v1/input-plugins/neoom_beaam/_index.md | 2 +- .../v1/input-plugins/neptune_apex/_index.md | 2 +- .../telegraf/v1/input-plugins/net/_index.md | 2 +- .../v1/input-plugins/net_response/_index.md | 2 +- .../v1/input-plugins/netflow/_index.md | 2 +- .../v1/input-plugins/netstat/_index.md | 2 +- .../v1/input-plugins/nfsclient/_index.md | 2 +- .../v1/input-plugins/nftables/_index.md | 44 +- .../telegraf/v1/input-plugins/nginx/_index.md | 2 +- .../v1/input-plugins/nginx_plus/_index.md | 2 +- .../v1/input-plugins/nginx_plus_api/_index.md | 2 +- .../v1/input-plugins/nginx_sts/_index.md | 2 +- .../nginx_upstream_check/_index.md | 2 +- .../v1/input-plugins/nginx_vts/_index.md | 2 +- .../telegraf/v1/input-plugins/nomad/_index.md | 2 +- .../telegraf/v1/input-plugins/nsd/_index.md | 2 +- .../telegraf/v1/input-plugins/nsdp/_index.md | 2 +- .../telegraf/v1/input-plugins/nsq/_index.md | 2 +- .../v1/input-plugins/nsq_consumer/_index.md | 2 +- .../telegraf/v1/input-plugins/nstat/_index.md | 2 +- .../telegraf/v1/input-plugins/ntpq/_index.md | 2 +- .../v1/input-plugins/nvidia_smi/_index.md | 2 +- .../telegraf/v1/input-plugins/opcua/_index.md | 46 +- .../v1/input-plugins/opcua_listener/_index.md | 66 +- .../v1/input-plugins/openldap/_index.md | 2 +- .../v1/input-plugins/openntpd/_index.md | 2 +- .../input-plugins/opensearch_query/_index.md | 2 +- .../v1/input-plugins/opensmtpd/_index.md | 2 +- .../v1/input-plugins/openstack/_index.md | 2 +- .../v1/input-plugins/opentelemetry/_index.md | 2 +- .../v1/input-plugins/openweathermap/_index.md | 2 +- .../v1/input-plugins/p4runtime/_index.md | 2 +- .../v1/input-plugins/passenger/_index.md | 2 +- .../telegraf/v1/input-plugins/pf/_index.md | 2 +- .../v1/input-plugins/pgbouncer/_index.md | 2 +- .../v1/input-plugins/phpfpm/_index.md | 2 +- .../telegraf/v1/input-plugins/ping/_index.md | 2 +- .../v1/input-plugins/postfix/_index.md | 2 +- .../v1/input-plugins/postgresql/_index.md | 2 +- .../postgresql_extensible/_index.md | 2 +- .../v1/input-plugins/powerdns/_index.md | 2 +- .../input-plugins/powerdns_recursor/_index.md | 2 +- .../v1/input-plugins/processes/_index.md | 2 +- .../v1/input-plugins/procstat/_index.md | 2 +- .../v1/input-plugins/prometheus/_index.md | 2 +- .../v1/input-plugins/promql/_index.md | 5 +- .../v1/input-plugins/proxmox/_index.md | 2 +- .../v1/input-plugins/puppetagent/_index.md | 2 +- .../v1/input-plugins/rabbitmq/_index.md | 2 +- .../v1/input-plugins/radius/_index.md | 2 +- .../v1/input-plugins/raindrops/_index.md | 2 +- .../telegraf/v1/input-plugins/ras/_index.md | 2 +- .../v1/input-plugins/ravendb/_index.md | 2 +- .../v1/input-plugins/redfish/_index.md | 2 +- .../telegraf/v1/input-plugins/redis/_index.md | 2 +- .../v1/input-plugins/redis_sentinel/_index.md | 2 +- .../v1/input-plugins/rethinkdb/_index.md | 2 +- .../telegraf/v1/input-plugins/riak/_index.md | 2 +- .../input-plugins/riemann_listener/_index.md | 2 +- .../v1/input-plugins/s7comm/_index.md | 2 +- .../v1/input-plugins/salesforce/_index.md | 2 +- .../v1/input-plugins/sensors/_index.md | 2 +- .../telegraf/v1/input-plugins/sflow/_index.md | 2 +- .../telegraf/v1/input-plugins/sip/_index.md | 182 - .../telegraf/v1/input-plugins/slab/_index.md | 2 +- .../telegraf/v1/input-plugins/slurm/_index.md | 2 +- .../telegraf/v1/input-plugins/smart/_index.md | 15 +- .../v1/input-plugins/smartctl/_index.md | 2 +- .../telegraf/v1/input-plugins/snmp/_index.md | 2 +- .../v1/input-plugins/snmp_trap/_index.md | 2 +- .../input-plugins/socket_listener/_index.md | 2 +- .../v1/input-plugins/socketstat/_index.md | 2 +- .../telegraf/v1/input-plugins/solr/_index.md | 2 +- .../telegraf/v1/input-plugins/sql/_index.md | 2 +- .../v1/input-plugins/sqlserver/_index.md | 2 +- .../v1/input-plugins/stackdriver/_index.md | 2 +- .../v1/input-plugins/statsd/_index.md | 40 +- .../v1/input-plugins/supervisor/_index.md | 2 +- .../v1/input-plugins/suricata/_index.md | 2 +- .../telegraf/v1/input-plugins/swap/_index.md | 2 +- .../v1/input-plugins/synproxy/_index.md | 2 +- .../v1/input-plugins/syslog/_index.md | 2 +- .../v1/input-plugins/sysstat/_index.md | 2 +- .../v1/input-plugins/system/_index.md | 2 +- .../v1/input-plugins/systemd_units/_index.md | 2 +- .../v1/input-plugins/tacacs/_index.md | 2 +- .../telegraf/v1/input-plugins/tail/_index.md | 2 +- .../v1/input-plugins/teamspeak/_index.md | 2 +- .../telegraf/v1/input-plugins/temp/_index.md | 2 +- .../v1/input-plugins/tengine/_index.md | 2 +- .../telegraf/v1/input-plugins/timex/_index.md | 2 +- .../v1/input-plugins/tomcat/_index.md | 2 +- .../telegraf/v1/input-plugins/trig/_index.md | 2 +- .../v1/input-plugins/turbostat/_index.md | 2 +- .../v1/input-plugins/twemproxy/_index.md | 2 +- .../v1/input-plugins/unbound/_index.md | 2 +- .../telegraf/v1/input-plugins/upsd/_index.md | 2 +- .../telegraf/v1/input-plugins/uwsgi/_index.md | 2 +- .../v1/input-plugins/varnish/_index.md | 2 +- .../telegraf/v1/input-plugins/vault/_index.md | 2 +- .../v1/input-plugins/vsphere/_index.md | 2 +- .../v1/input-plugins/webhooks/_index.md | 2 +- .../telegraf/v1/input-plugins/whois/_index.md | 2 +- .../v1/input-plugins/win_eventlog/_index.md | 2 +- .../input-plugins/win_perf_counters/_index.md | 2 +- .../v1/input-plugins/win_services/_index.md | 2 +- .../v1/input-plugins/win_wmi/_index.md | 2 +- .../v1/input-plugins/wireguard/_index.md | 2 +- .../v1/input-plugins/wireless/_index.md | 2 +- .../v1/input-plugins/x509_cert/_index.md | 2 +- .../v1/input-plugins/xtremio/_index.md | 2 +- .../telegraf/v1/input-plugins/zfs/_index.md | 2 +- .../v1/input-plugins/zipkin/_index.md | 2 +- .../v1/input-plugins/zookeeper/_index.md | 2 +- .../telegraf/v1/output-plugins/amon/_index.md | 2 +- .../telegraf/v1/output-plugins/amqp/_index.md | 2 +- .../application_insights/_index.md | 2 +- .../telegraf/v1/output-plugins/arc/_index.md | 2 +- .../azure_data_explorer/_index.md | 2 +- .../v1/output-plugins/azure_monitor/_index.md | 2 +- .../v1/output-plugins/bigquery/_index.md | 2 +- .../v1/output-plugins/clarify/_index.md | 2 +- .../v1/output-plugins/cloud_pubsub/_index.md | 2 +- .../v1/output-plugins/cloudwatch/_index.md | 2 +- .../output-plugins/cloudwatch_logs/_index.md | 2 +- .../v1/output-plugins/cratedb/_index.md | 2 +- .../v1/output-plugins/datadog/_index.md | 2 +- .../v1/output-plugins/discard/_index.md | 2 +- .../v1/output-plugins/dynatrace/_index.md | 2 +- .../v1/output-plugins/elasticsearch/_index.md | 2 +- .../v1/output-plugins/event_hubs/_index.md | 2 +- .../telegraf/v1/output-plugins/exec/_index.md | 2 +- .../v1/output-plugins/execd/_index.md | 2 +- .../telegraf/v1/output-plugins/file/_index.md | 2 +- .../v1/output-plugins/graphite/_index.md | 2 +- .../v1/output-plugins/graylog/_index.md | 2 +- .../v1/output-plugins/groundwork/_index.md | 2 +- .../v1/output-plugins/health/_index.md | 5 +- .../v1/output-plugins/heartbeat/_index.md | 175 +- .../telegraf/v1/output-plugins/http/_index.md | 5 +- .../v1/output-plugins/influxdb/_index.md | 2 +- .../v1/output-plugins/influxdb_v2/_index.md | 4 +- .../v1/output-plugins/influxdb_v3/_index.md | 146 - .../v1/output-plugins/inlong/_index.md | 2 +- .../v1/output-plugins/instrumental/_index.md | 2 +- .../v1/output-plugins/iotdb/_index.md | 2 +- .../v1/output-plugins/kafka/_index.md | 2 +- .../v1/output-plugins/kinesis/_index.md | 2 +- .../v1/output-plugins/librato/_index.md | 2 +- .../v1/output-plugins/logzio/_index.md | 2 +- .../telegraf/v1/output-plugins/loki/_index.md | 2 +- .../output-plugins/microsoft_fabric/_index.md | 2 +- .../v1/output-plugins/mongodb/_index.md | 62 +- .../telegraf/v1/output-plugins/mqtt/_index.md | 2 +- .../telegraf/v1/output-plugins/nats/_index.md | 2 +- .../nebius_cloud_monitoring/_index.md | 2 +- .../v1/output-plugins/newrelic/_index.md | 2 +- .../telegraf/v1/output-plugins/nsq/_index.md | 2 +- .../v1/output-plugins/opensearch/_index.md | 2 +- .../v1/output-plugins/opentelemetry/_index.md | 10 +- .../v1/output-plugins/opentsdb/_index.md | 2 +- .../v1/output-plugins/parquet/_index.md | 2 +- .../v1/output-plugins/postgresql/_index.md | 2 +- .../prometheus_client/_index.md | 8 +- .../telegraf/v1/output-plugins/quix/_index.md | 2 +- .../output-plugins/redistimeseries/_index.md | 9 +- .../v1/output-plugins/remotefile/_index.md | 2 +- .../v1/output-plugins/riemann/_index.md | 2 +- .../v1/output-plugins/sensu/_index.md | 2 +- .../v1/output-plugins/signalfx/_index.md | 2 +- .../v1/output-plugins/socket_writer/_index.md | 2 +- .../telegraf/v1/output-plugins/sql/_index.md | 2 +- .../v1/output-plugins/stackdriver/_index.md | 13 +- .../v1/output-plugins/stomp/_index.md | 2 +- .../v1/output-plugins/sumologic/_index.md | 2 +- .../v1/output-plugins/syslog/_index.md | 2 +- .../v1/output-plugins/timestream/_index.md | 2 +- .../v1/output-plugins/warp10/_index.md | 2 +- .../v1/output-plugins/wavefront/_index.md | 2 +- .../v1/output-plugins/websocket/_index.md | 2 +- .../yandex_cloud_monitoring/_index.md | 2 +- .../v1/output-plugins/zabbix/_index.md | 2 +- .../v1/processor-plugins/aws_ec2/_index.md | 2 +- .../v1/processor-plugins/batch/_index.md | 2 +- .../v1/processor-plugins/clone/_index.md | 2 +- .../v1/processor-plugins/converter/_index.md | 2 +- .../cumulative_sum/_index.md | 2 +- .../v1/processor-plugins/date/_index.md | 2 +- .../v1/processor-plugins/dedup/_index.md | 2 +- .../v1/processor-plugins/defaults/_index.md | 2 +- .../v1/processor-plugins/enum/_index.md | 2 +- .../v1/processor-plugins/execd/_index.md | 2 +- .../v1/processor-plugins/filepath/_index.md | 2 +- .../v1/processor-plugins/filter/_index.md | 2 +- .../v1/processor-plugins/ifname/_index.md | 2 +- .../v1/processor-plugins/lookup/_index.md | 2 +- .../v1/processor-plugins/noise/_index.md | 2 +- .../v1/processor-plugins/override/_index.md | 2 +- .../v1/processor-plugins/parser/_index.md | 2 +- .../v1/processor-plugins/pivot/_index.md | 2 +- .../v1/processor-plugins/port_name/_index.md | 2 +- .../v1/processor-plugins/printer/_index.md | 2 +- .../v1/processor-plugins/regex/_index.md | 2 +- .../v1/processor-plugins/rename/_index.md | 2 +- .../processor-plugins/reverse_dns/_index.md | 2 +- .../v1/processor-plugins/round/_index.md | 2 +- .../v1/processor-plugins/s2geo/_index.md | 2 +- .../v1/processor-plugins/scale/_index.md | 2 +- .../processor-plugins/snmp_lookup/_index.md | 2 +- .../v1/processor-plugins/split/_index.md | 2 +- .../v1/processor-plugins/starlark/_index.md | 2 +- .../v1/processor-plugins/strings/_index.md | 2 +- .../v1/processor-plugins/tag_limit/_index.md | 2 +- .../v1/processor-plugins/template/_index.md | 2 +- .../v1/processor-plugins/timestamp/_index.md | 2 +- .../v1/processor-plugins/topk/_index.md | 2 +- .../v1/processor-plugins/unpivot/_index.md | 2 +- content/telegraf/v1/release-notes.md | 75 - cypress/e2e/content/api-reference.cy.js | 547 ++- data/labels.yml | 86 - data/notifications.yaml | 2 +- data/products.yml | 50 +- data/telegraf_plugins.yml | 29 +- ...ed-cloud-dedicated-api-structure-design.md | 154 + ...6-01-07-api-reference-rapidoc-migration.md | 184 + docs/plans/2026-01-21-api-tag-pages-design.md | 145 + .../2026-02-04-api-link-migration-design.md | 174 + ...02-04-api-link-migration-implementation.md | 605 +++ .../2026-02-04-v1-api-deduplication-design.md | 93 + .../2026-02-13-hugo-native-api-migration.md | 344 ++ ...026-02-17-api-clean-regeneration-design.md | 160 + ...7-api-clean-regeneration-implementation.md | 519 +++ .../2026-03-07-api-code-samples-design.md | 125 + docs/plans/TESTING.md | 77 + helper-scripts/label-migration/README.md | 95 - .../label-migration/create-labels.sh | 97 - .../label-migration/delete-labels.sh | 137 - .../label-migration/migrate-labels.sh | 104 - helper-scripts/migrate-api-links.cjs | 331 ++ layouts/{_default => }/LLMS-TXT-README.md | 96 +- layouts/_default/api.html | 112 +- layouts/api/all-endpoints.html | 48 + layouts/api/list.html | 200 + layouts/api/section.html | 86 + layouts/api/single.html | 152 + layouts/partials/api/all-endpoints-list.html | 221 + layouts/partials/api/code-sample.html | 235 + layouts/partials/api/normalize-path.html | 22 + layouts/partials/api/operation.html | 69 + layouts/partials/api/parameter-row.html | 65 + layouts/partials/api/parameters.html | 85 + layouts/partials/api/renderer.html | 12 + layouts/partials/api/request-body.html | 60 + layouts/partials/api/responses.html | 79 + layouts/partials/api/schema.html | 117 + layouts/partials/api/section-children.html | 112 + layouts/partials/api/security-schemes.html | 68 + layouts/partials/api/tag-renderer.html | 68 + layouts/partials/article/related.html | 39 +- layouts/partials/sidebar.html | 6 +- layouts/partials/sidebar/api-menu-items.html | 134 + layouts/partials/sidebar/nested-menu.html | 29 +- package.json | 6 +- scripts/deploy-staging.sh | 2 + scripts/docs-cli/README.md | 2 +- .../__tests__/editor-resolver.test.js | 4 +- .../__tests__/process-manager.test.js | 4 +- scripts/docs-cli/lib/editor-resolver.js | 13 +- scripts/puppeteer/QUICK-REFERENCE.md | 26 +- scripts/puppeteer/README.md | 23 +- scripts/puppeteer/SETUP.md | 12 +- .../openapi/influxdb3-enterprise-openapi.yaml | 3799 ----------------- yarn.lock | 63 +- 655 files changed, 22119 insertions(+), 17580 deletions(-) delete mode 100644 .claude/agents/doc-review-agent.md delete mode 100644 .claude/agents/doc-triage-agent.md delete mode 100644 .github/DOC-REVIEW-PIPELINE-PLAN.md delete mode 100644 .github/LABEL_GUIDE.md delete mode 100644 .github/instructions/content-review.instructions.md delete mode 100644 .github/prompts/copilot-visual-review.md delete mode 100644 .github/scripts/resolve-review-urls.js delete mode 100644 .github/scripts/workflow-utils.js delete mode 100644 .github/templates/review-comment.md delete mode 100644 .github/workflows/auto-label.yml delete mode 100644 .github/workflows/doc-review.yml create mode 100644 PLAN.md create mode 100644 api-docs/enterprise_influxdb/v1/.config.yml create mode 100644 api-docs/enterprise_influxdb/v1/v1/content/info.yml create mode 100644 api-docs/enterprise_influxdb/v1/v1/content/servers.yml create mode 100644 api-docs/enterprise_influxdb/v1/v1/ref.yml delete mode 100644 api-docs/influxdb/cloud/v2/content/tag-groups.yml create mode 100644 api-docs/influxdb/v1/.config.yml create mode 100644 api-docs/influxdb/v1/v1/content/info.yml create mode 100644 api-docs/influxdb/v1/v1/content/servers.yml create mode 100644 api-docs/influxdb/v1/v1/ref.yml delete mode 100644 api-docs/influxdb/v2/v2/content/tag-groups.yml delete mode 100644 api-docs/influxdb3/cloud-dedicated/management/content/tag-groups.yml delete mode 100644 api-docs/influxdb3/cloud-dedicated/v2/content/tag-groups.yml delete mode 100644 api-docs/influxdb3/cloud-serverless/v2/content/tag-groups.yml delete mode 100644 api-docs/influxdb3/clustered/management/content/tag-groups.yml delete mode 100644 api-docs/influxdb3/clustered/v2/content/tag-groups.yml delete mode 100644 api-docs/influxdb3/core/v3/content/tag-groups.yml rename static/openapi/influxdb3-core-openapi.yaml => api-docs/influxdb3/core/v3/ref.yml (66%) delete mode 100644 api-docs/influxdb3/enterprise/v3/content/tag-groups.yml delete mode 100644 api-docs/influxdb3/enterprise/v3/influxdb3-enterprise-openapi.yaml rename api-docs/influxdb3/{core/v3/influxdb3-core-openapi.yaml => enterprise/v3/ref.yml} (74%) delete mode 100644 api-docs/openapi/plugins/decorators/tags/set-tag-groups.cjs create mode 100644 api-docs/scripts/README.md create mode 100644 api-docs/scripts/dist/generate-openapi-articles.js create mode 100644 api-docs/scripts/dist/openapi-paths-to-hugo-data/index.js create mode 100644 api-docs/scripts/generate-openapi-articles.ts create mode 100644 api-docs/scripts/openapi-paths-to-hugo-data/index.ts create mode 100644 api-docs/scripts/openapi-paths-to-hugo-data/package.json create mode 100644 api-docs/scripts/openapi-paths-to-hugo-data/yarn.lock create mode 100644 api-docs/scripts/tsconfig.json create mode 100644 assets/js/components/api-toc.ts create mode 100644 assets/styles/layouts/_api-code-samples.scss create mode 100644 assets/styles/layouts/_api-layout.scss create mode 100644 assets/styles/layouts/_api-operations.scss create mode 100644 assets/styles/layouts/_api-security-schemes.scss delete mode 100644 content/influxdb/cloud/reference/api/_index.md delete mode 100644 content/influxdb/v2/reference/api/_index.md delete mode 100644 content/influxdb3/cloud-dedicated/reference/api/_index.md create mode 100644 content/influxdb3/cloud-serverless/api/_index.md create mode 100644 content/influxdb3/cloud-serverless/api/api-compatibility/_index.md create mode 100644 content/influxdb3/cloud-serverless/api/authentication/_index.md create mode 100644 content/influxdb3/cloud-serverless/api/authorizations-api-tokens/_index.md create mode 100644 content/influxdb3/cloud-serverless/api/bucket-schemas/_index.md create mode 100644 content/influxdb3/cloud-serverless/api/buckets/_index.md create mode 100644 content/influxdb3/cloud-serverless/api/common-parameters/_index.md create mode 100644 content/influxdb3/cloud-serverless/api/dbrps/_index.md create mode 100644 content/influxdb3/cloud-serverless/api/delete/_index.md create mode 100644 content/influxdb3/cloud-serverless/api/headers/_index.md create mode 100644 content/influxdb3/cloud-serverless/api/invokable-scripts/_index.md create mode 100644 content/influxdb3/cloud-serverless/api/limits/_index.md create mode 100644 content/influxdb3/cloud-serverless/api/organizations/_index.md create mode 100644 content/influxdb3/cloud-serverless/api/pagination/_index.md create mode 100644 content/influxdb3/cloud-serverless/api/quick-start/_index.md create mode 100644 content/influxdb3/cloud-serverless/api/resources/_index.md create mode 100644 content/influxdb3/cloud-serverless/api/response-codes/_index.md create mode 100644 content/influxdb3/cloud-serverless/api/routes/_index.md create mode 100644 content/influxdb3/cloud-serverless/api/secrets/_index.md create mode 100644 content/influxdb3/cloud-serverless/api/security-and-access-endpoints/_index.md create mode 100644 content/influxdb3/cloud-serverless/api/supported-operations/_index.md create mode 100644 content/influxdb3/cloud-serverless/api/system-information-endpoints/_index.md create mode 100644 content/influxdb3/cloud-serverless/api/tasks/_index.md create mode 100644 content/influxdb3/cloud-serverless/api/telegrafs/_index.md create mode 100644 content/influxdb3/cloud-serverless/api/templates/_index.md create mode 100644 content/influxdb3/cloud-serverless/api/usage/_index.md create mode 100644 content/influxdb3/cloud-serverless/api/variables/_index.md delete mode 100644 content/influxdb3/cloud-serverless/reference/api/_index.md delete mode 100644 content/influxdb3/clustered/reference/api/_index.md create mode 100644 content/influxdb3/core/get-started/migrate-from-influxdb-v1-v2.md create mode 100644 content/influxdb3/enterprise/get-started/migrate-from-influxdb-v1-v2.md create mode 100644 content/shared/influxdb3-get-started/migrate-from-influxdb-v1-v2.md delete mode 100644 content/telegraf/v1/input-plugins/sip/_index.md delete mode 100644 content/telegraf/v1/output-plugins/influxdb_v3/_index.md delete mode 100644 data/labels.yml create mode 100644 docs/plans/2025-02-06-clustered-cloud-dedicated-api-structure-design.md create mode 100644 docs/plans/2026-01-07-api-reference-rapidoc-migration.md create mode 100644 docs/plans/2026-01-21-api-tag-pages-design.md create mode 100644 docs/plans/2026-02-04-api-link-migration-design.md create mode 100644 docs/plans/2026-02-04-api-link-migration-implementation.md create mode 100644 docs/plans/2026-02-04-v1-api-deduplication-design.md create mode 100644 docs/plans/2026-02-13-hugo-native-api-migration.md create mode 100644 docs/plans/2026-02-17-api-clean-regeneration-design.md create mode 100644 docs/plans/2026-02-17-api-clean-regeneration-implementation.md create mode 100644 docs/plans/2026-03-07-api-code-samples-design.md create mode 100644 docs/plans/TESTING.md delete mode 100644 helper-scripts/label-migration/README.md delete mode 100755 helper-scripts/label-migration/create-labels.sh delete mode 100755 helper-scripts/label-migration/delete-labels.sh delete mode 100755 helper-scripts/label-migration/migrate-labels.sh create mode 100755 helper-scripts/migrate-api-links.cjs rename layouts/{_default => }/LLMS-TXT-README.md (63%) create mode 100644 layouts/api/all-endpoints.html create mode 100644 layouts/api/list.html create mode 100644 layouts/api/section.html create mode 100644 layouts/api/single.html create mode 100644 layouts/partials/api/all-endpoints-list.html create mode 100644 layouts/partials/api/code-sample.html create mode 100644 layouts/partials/api/normalize-path.html create mode 100644 layouts/partials/api/operation.html create mode 100644 layouts/partials/api/parameter-row.html create mode 100644 layouts/partials/api/parameters.html create mode 100644 layouts/partials/api/renderer.html create mode 100644 layouts/partials/api/request-body.html create mode 100644 layouts/partials/api/responses.html create mode 100644 layouts/partials/api/schema.html create mode 100644 layouts/partials/api/section-children.html create mode 100644 layouts/partials/api/security-schemes.html create mode 100644 layouts/partials/api/tag-renderer.html create mode 100644 layouts/partials/sidebar/api-menu-items.html delete mode 100644 static/openapi/influxdb3-enterprise-openapi.yaml diff --git a/.claude/agents/doc-review-agent.md b/.claude/agents/doc-review-agent.md deleted file mode 100644 index a16adae0da..0000000000 --- a/.claude/agents/doc-review-agent.md +++ /dev/null @@ -1,82 +0,0 @@ ---- -name: doc-review-agent -description: | - Diff-only PR review agent for documentation changes. Reviews Markdown - changes against style guide, frontmatter rules, shortcode syntax, and - documentation standards. Available for local Claude Code review sessions. -model: sonnet ---- - -You are a documentation review agent for the InfluxData docs-v2 repository. -Your job is to review PR diffs for documentation quality issues. You review -Markdown source only — visual/rendered review is handled separately by Copilot. - -## Review Scope - -Check the PR diff for these categories. Reference the linked docs for -detailed rules — do not invent rules that aren't documented. - -### 1. Frontmatter - -Rules: [DOCS-FRONTMATTER.md](../../DOCS-FRONTMATTER.md) - -- `title` and `description` are required on every page -- `menu` structure matches the product's menu key -- `weight` is present and uses the correct range (1-99, 101-199, etc.) -- `source` paths for shared content point to valid `/shared/` paths -- No duplicate or conflicting frontmatter keys - -### 2. Shortcode Syntax - -Rules: [DOCS-SHORTCODES.md](../../DOCS-SHORTCODES.md) - -- Shortcodes use correct opening/closing syntax (`{{< >}}` vs `{{% %}}` - depending on whether inner content is Markdown) -- Required parameters are present -- Closing tags match opening tags -- Callouts use GitHub-style syntax: `> [!Note]`, `> [!Warning]`, etc. - -### 3. Semantic Line Feeds - -Rules: [DOCS-CONTRIBUTING.md](../../DOCS-CONTRIBUTING.md) - -- One sentence per line -- Long sentences should be on their own line, not concatenated - -### 4. Heading Hierarchy - -- No h1 headings in content (h1 comes from `title` frontmatter) -- Headings don't skip levels (h2 → h4 without h3) - -### 5. Terminology and Product Names - -- Use official product names: "InfluxDB 3 Core", "InfluxDB 3 Enterprise", - "InfluxDB Cloud Serverless", "InfluxDB Cloud Dedicated", etc. -- Don't mix v2/v3 terminology in v3 docs (e.g., "bucket" in Core docs) -- Version references match the content path - -### 6. Links - -- Internal links use relative paths or Hugo `relref` shortcodes -- No hardcoded `docs.influxdata.com` links in content files -- Anchor links match actual heading IDs - -### 7. Shared Content - -- `source:` frontmatter points to an existing shared file path -- Shared files don't contain frontmatter (only content) -- Changes to shared content are intentional (affects multiple products) - -## Output Format - -Follow the shared review comment format, severity definitions, and label -mapping in -[.github/templates/review-comment.md](../../.github/templates/review-comment.md). - -## What NOT to Review - -- Rendered HTML appearance (Copilot handles this) -- Code correctness inside code blocks (pytest handles this) -- Link validity (link-checker workflow handles this) -- Vale style linting (Vale handles this) -- Files outside the diff diff --git a/.claude/agents/doc-triage-agent.md b/.claude/agents/doc-triage-agent.md deleted file mode 100644 index 59a4b4effa..0000000000 --- a/.claude/agents/doc-triage-agent.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -name: doc-triage-agent -description: | - Triage agent for documentation issues and PRs. Applies product labels, - assesses priority, and determines readiness for automated workflows. - Uses data/products.yml as the single source of truth for path-to-product - mapping. -model: sonnet ---- - -You are a documentation triage agent for the InfluxData docs-v2 repository. -Your job is to label, prioritize, and route issues and PRs for the -documentation team. - -## Label Taxonomy - -Apply labels using the definitions in these source files: - -- **Product labels** (`product:*`): Read - [data/products.yml](../../data/products.yml) — match changed file paths - against each product's `content_path`, apply `product:{label_group}`. - Apply all matching labels. For shared content, apply `product:shared` plus - labels for all products that reference the shared file. -- **Non-product labels**: Read - [data/labels.yml](../../data/labels.yml) for all source, waiting, workflow, - and review label names and descriptions. -- **Review labels** (`review:*`): Defined in `data/labels.yml` but applied - only by the doc-review workflow, not during triage. - -## Priority Assessment - -Assess priority based on: - -1. **Product tier:** InfluxDB 3 Core/Enterprise > Cloud Dedicated/Serverless > v2 > v1 -2. **Issue type:** Incorrect information > missing content > style issues -3. **Scope:** Security/data-loss implications > functional docs > reference docs -4. **Staleness:** Issues with `waiting:*` labels older than 14 days should be - escalated or re-triaged - -## Decision Logic - -### When to apply `agent-ready` - -Apply when ALL of these are true: -- The issue has clear, actionable requirements -- No external dependencies (no `waiting:*` labels) -- The fix is within the documentation scope (not a product bug) -- Product labels are applied (agent needs to know which content to modify) - -### When to apply `waiting:*` - -Apply when the issue: -- References undocumented API behavior → `waiting:engineering` -- Requires a product decision about feature naming or scope → `waiting:product` -- Needs clarification from the reporter about expected behavior → add a comment asking, don't apply waiting - -### When to apply `review:needs-human` - -Apply during triage only if: -- The issue involves complex cross-product implications -- The content change could affect shared content used by many products -- The issue requires domain expertise the agent doesn't have - -## Triage Workflow - -1. Read the issue/PR title and body -2. Identify affected products from content paths or mentions -3. Apply product labels -4. Apply source label if applicable -5. Assess whether the issue is ready for agent work -6. Apply `agent-ready` or `waiting:*` as appropriate -7. Post a brief triage comment summarizing the labeling decision diff --git a/.claude/agents/influxdb3-tech-writer.md b/.claude/agents/influxdb3-tech-writer.md index 3953121419..a7ce4cf09e 100644 --- a/.claude/agents/influxdb3-tech-writer.md +++ b/.claude/agents/influxdb3-tech-writer.md @@ -9,6 +9,7 @@ You are an expert InfluxDB 3 technical writer with deep knowledge of InfluxData' ## Core Expertise Areas **InfluxDB 3 Products & Architecture:** + - **Self-hosted products:** - InfluxDB 3 Core (`influxdata/influxdb/influxdb3*` source code) - open source - InfluxDB 3 Enterprise - licensed @@ -29,6 +30,7 @@ You are an expert InfluxDB 3 technical writer with deep knowledge of InfluxData' - `content/shared/` - Shared content across products, versions, and editions **APIs & Interfaces:** + - InfluxDB 3 HTTP APIs: - v1 compatibility API (InfluxQL write/query) - v2 compatibility API (Flux) @@ -42,6 +44,7 @@ You are an expert InfluxDB 3 technical writer with deep knowledge of InfluxData' - Telegraf integration patterns and plugin ecosystem **Documentation Standards:** + - Google Developer Documentation Style guidelines - InfluxData documentation structure and conventions (from CLAUDE.md context) - Hugo shortcodes and frontmatter requirements @@ -51,6 +54,7 @@ You are an expert InfluxDB 3 technical writer with deep knowledge of InfluxData' ## Your Responsibilities **Content Creation & Review:** + - Write technically accurate documentation that reflects actual product behavior - Create comprehensive API documentation with proper OpenAPI specifications - Develop clear, testable code examples with proper annotations @@ -59,6 +63,7 @@ You are an expert InfluxDB 3 technical writer with deep knowledge of InfluxData' - Identify when content should be shared vs. product-specific **Technical Accuracy:** + - Verify code examples work with current product versions - Cross-reference implementation details with source code when needed - Validate API endpoints, parameters, and response formats @@ -71,6 +76,7 @@ You are an expert InfluxDB 3 technical writer with deep knowledge of InfluxData' - Cloud: Managed features, quotas, billing **Style & Standards Compliance:** + - Apply Google Developer Documentation Style consistently - Use semantic line feeds and proper Markdown formatting - Implement appropriate shortcodes for product-specific content @@ -80,30 +86,25 @@ You are an expert InfluxDB 3 technical writer with deep knowledge of InfluxData' ## Content Development Process -1. **Analyze Requirements:** +1. **Analyze Requirements:** - Understand the target audience, product version(s), and documentation type - Determine if content should be shared or product-specific - -2. **Research Implementation:** +2. **Research Implementation:** - Reference source code, APIs, and existing documentation for accuracy - Identify product-specific behaviors and differences - -3. **Structure Content:** +3. **Structure Content:** - Use appropriate frontmatter, headings, and shortcodes for the content type - Apply shared content pattern when content applies to multiple products - Use product-specific conditionals when needed - -4. **Create Examples:** +4. **Create Examples:** - Develop working, testable code examples with proper annotations - Include examples for relevant products and deployment models - -5. **Apply Standards:** +5. **Apply Standards:** - Ensure compliance with style guidelines and documentation conventions - Use docs CLI tools for content creation and validation - -6. **Cross-Reference:** +6. **Cross-Reference:** - Verify consistency with related documentation and product variants - - Add alt_links for cross-product navigation + - Add alt\_links for cross-product navigation - Link related concepts and procedures ## Quality Assurance @@ -114,11 +115,12 @@ You are an expert InfluxDB 3 technical writer with deep knowledge of InfluxData' - Use placeholder conventions consistently (UPPERCASE for user-replaceable values) - Ensure proper cross-linking between related concepts and procedures - Verify shared content works correctly across all target products -- Test cross-product navigation (alt_links) +- Test cross-product navigation (alt\_links) ## Product-Specific Considerations **When documenting, consider:** + - **Core vs Enterprise:** Feature availability (clustering, HA, RBAC) - **Self-hosted vs Cloud:** Configuration methods, authentication, quotas - **Clustered vs Dedicated:** Deployment model, scaling, management diff --git a/.claude/agents/ts-component-dev.md b/.claude/agents/ts-component-dev.md index 2f202d96ea..1070424603 100644 --- a/.claude/agents/ts-component-dev.md +++ b/.claude/agents/ts-component-dev.md @@ -197,7 +197,7 @@ assets/js/ │ ├── api-nav.ts # API navigation behavior │ ├── api-toc.ts # Table of contents │ ├── api-tabs.ts # Tab switching -│ └── api-scalar.ts # Scalar/RapiDoc integration +│ └── api-rapidoc.ts # RapiDoc integration └── utils/ ├── dom-helpers.ts # Shared DOM utilities └── debug-helpers.js # Debugging utilities diff --git a/.claude/settings.json b/.claude/settings.json index c21b0380a6..f5838526be 100644 --- a/.claude/settings.json +++ b/.claude/settings.json @@ -1,51 +1,51 @@ { "permissions": { "allow": [ - "Bash(.ci/vale/vale.sh:*)", - "Bash(npm:*)", - "Bash(yarn:*)", - "Bash(pnpm:*)", - "Bash(npx:*)", - "Bash(node:*)", - "Bash(python:*)", - "Bash(python3:*)", - "Bash(pip:*)", - "Bash(poetry:*)", - "Bash(make:*)", - "Bash(cargo:*)", - "Bash(go:*)", - "Bash(curl:*)", - "Bash(gh:*)", - "Bash(hugo:*)", - "Bash(htmlq:*)", - "Bash(jq:*)", - "Bash(yq:*)", - "Bash(mkdir:*)", - "Bash(cat:*)", - "Bash(ls:*)", - "Bash(echo:*)", - "Bash(rg:*)", - "Bash(grep:*)", - "Bash(find:*)", - "Bash(bash:*)", - "Bash(wc:*)", - "Bash(sort:*)", - "Bash(uniq:*)", - "Bash(head:*)", - "Bash(tail:*)", - "Bash(awk:*)", - "Bash(touch:*)", - "Bash(docker:*)", - "Edit", - "Read", - "Write", - "Grep", - "Glob", - "LS", - "Skill(superpowers:brainstorming)", - "Skill(superpowers:brainstorming:*)", - "mcp__acp__Bash" - ], + "Bash(.ci/vale/vale.sh:*)", + "Bash(npm:*)", + "Bash(yarn:*)", + "Bash(pnpm:*)", + "Bash(npx:*)", + "Bash(node:*)", + "Bash(python:*)", + "Bash(python3:*)", + "Bash(pip:*)", + "Bash(poetry:*)", + "Bash(make:*)", + "Bash(cargo:*)", + "Bash(go:*)", + "Bash(curl:*)", + "Bash(gh:*)", + "Bash(hugo:*)", + "Bash(htmlq:*)", + "Bash(jq:*)", + "Bash(yq:*)", + "Bash(mkdir:*)", + "Bash(cat:*)", + "Bash(ls:*)", + "Bash(echo:*)", + "Bash(rg:*)", + "Bash(grep:*)", + "Bash(find:*)", + "Bash(bash:*)", + "Bash(wc:*)", + "Bash(sort:*)", + "Bash(uniq:*)", + "Bash(head:*)", + "Bash(tail:*)", + "Bash(awk:*)", + "Bash(touch:*)", + "Bash(docker:*)", + "Edit", + "Read", + "Write", + "Grep", + "Glob", + "LS", + "Skill(superpowers:brainstorming)", + "Skill(superpowers:brainstorming:*)", + "mcp__acp__Bash" + ], "deny": [ "Read(./.env)", "Read(./.env.*)", @@ -58,8 +58,5 @@ "Bash(rm:*)", "Read(/tmp)" ] - }, - "enabledPlugins": { - "github@claude-plugins-official": true } } diff --git a/.claude/skills/content-editing/SKILL.md b/.claude/skills/content-editing/SKILL.md index 863b5c101c..eb35f23e66 100644 --- a/.claude/skills/content-editing/SKILL.md +++ b/.claude/skills/content-editing/SKILL.md @@ -359,12 +359,33 @@ Use the Documentation MCP Server when the information here is inconclusive, when ### Setup -The documentation MCP server is hosted at `https://influxdb-docs.mcp.kapa.ai`—no local installation required. +The documentation MCP server is hosted—no local installation required. Add the server URL to your AI assistant's MCP configuration. -Already configured in [`.mcp.json`](/.mcp.json). Two server entries are available: +**MCP server URL:** -- **`influxdb-docs`** (API key) — Set `INFLUXDATA_DOCS_KAPA_API_KEY` env var. 60 req/min. -- **`influxdb-docs-oauth`** (OAuth) — No setup. Authenticates via Google on first use. 40 req/hr, 200 req/day. +```text +https://influxdb-docs.mcp.kapa.ai +``` + +**Claude Desktop configuration** (Settings > Developer): + +```json +{ + "mcpServers": { + "influxdb-docs": { + "url": "https://influxdb-docs.mcp.kapa.ai" + } + } +} +``` + +For other AI assistants see the [InfluxDB documentation MCP server guide](/influxdb3/core/admin/mcp-server/) +and verify the MCP configuration options and syntax for a specific AI assistant. + +**Rate limits** (per Google OAuth user): + +- 40 requests per hour +- 200 requests per day ### Available Tool @@ -531,12 +552,17 @@ touch content/influxdb3/enterprise/path/to/file.md ### MCP Server Not Responding +The hosted MCP server (`https://influxdb-docs.mcp.kapa.ai`) requires: + +1. **Google OAuth authentication** - On first use, sign in with Google +2. **Rate limits** - 40 requests/hour, 200 requests/day per user + **Troubleshooting steps:** -- **API key auth** (`influxdb-docs`): Verify `INFLUXDATA_DOCS_KAPA_API_KEY` is set. Rate limit: 60 req/min. -- **OAuth auth** (`influxdb-docs-oauth`): Sign in with Google on first use. Rate limits: 40 req/hr, 200 req/day. -- Verify your network allows connections to `*.kapa.ai` -- Check if you've exceeded rate limits (wait and retry) +- Verify your AI assistant has the MCP server URL configured correctly +- Check if you've exceeded rate limits (wait an hour or until the next day) +- Try re-authenticating by clearing your OAuth session +- Ensure your network allows connections to `*.kapa.ai` ### Cypress Tests Fail diff --git a/.claude/skills/hugo-template-dev/SKILL.md b/.claude/skills/hugo-template-dev/SKILL.md index 2c2d7336a4..b463ca1e83 100644 --- a/.claude/skills/hugo-template-dev/SKILL.md +++ b/.claude/skills/hugo-template-dev/SKILL.md @@ -353,6 +353,24 @@ The **only** acceptable inline scripts are minimal initialization that MUST run Everything else belongs in `assets/js/`. +### File Organization for Components + +``` +assets/ +├── js/ +│ ├── main.js # Entry point, component registry +│ ├── components/ +│ │ ├── api-nav.ts # API navigation behavior +│ │ ├── api-toc.ts # Table of contents +│ │ ├── api-tabs.ts # Tab switching (if needed beyond CSS) +│ │ └── api-rapidoc.ts # RapiDoc integration +│ └── utils/ +│ └── dom-helpers.ts # Shared DOM utilities +└── styles/ + └── layouts/ + └── _api-layout.scss # API-specific styles +``` + ### TypeScript Component Checklist When creating a new interactive feature: diff --git a/.claude/skills/vale-linting/SKILL.md b/.claude/skills/vale-linting/SKILL.md index 760487d522..aa324981cc 100644 --- a/.claude/skills/vale-linting/SKILL.md +++ b/.claude/skills/vale-linting/SKILL.md @@ -299,42 +299,15 @@ echo "systemd" >> .ci/vale/styles/config/vocabularies/InfluxDataDocs/accept.txt ### Creating a Product-Specific Override -> [!Important] -> Product-specific `.vale.ini` files must include the same disabled rules as the -> root `.vale.ini`. Rules disabled in the root config are **not** inherited by -> product-specific configs. Omitting them re-enables the rules for those products. -> For example, omitting `Google.Units = NO` causes duration literals like `7d`, -> `24h` to be flagged as errors in product-specific linting runs. - ```bash # 1. Create product-specific .vale.ini cat > content/influxdb3/cloud-dedicated/.vale.ini << 'EOF' StylesPath = ../../../.ci/vale/styles -MinAlertLevel = warning +MinAlertLevel = error Vocab = InfluxDataDocs -Packages = Google, write-good, Hugo - [*.md] BasedOnStyles = Vale, InfluxDataDocs, Google, write-good - -# These rules must be disabled in every product .vale.ini, same as the root .vale.ini. -Google.Acronyms = NO -Google.DateFormat = NO -Google.Ellipses = NO -Google.Headings = NO -Google.WordList = NO -# Disable Google.Units in favor of InfluxDataDocs.Units which only checks byte -# units (GB, TB, etc). Duration literals (30d, 24h, 1h) are valid InfluxDB syntax. -Google.Units = NO -Vale.Spelling = NO -Vale.Terms = NO -write-good.TooWordy = NO - -TokenIgnores = /[a-zA-Z0-9/_\-\.]+, \ - https?://[^\s\)\]>"]+, \ - `[^`]+` - # Product-specific overrides InfluxDataDocs.Branding = YES EOF diff --git a/.claude/skills/vale-rule-config/SKILL.md b/.claude/skills/vale-rule-config/SKILL.md index 0ef5c04db3..332b5115cf 100644 --- a/.claude/skills/vale-rule-config/SKILL.md +++ b/.claude/skills/vale-rule-config/SKILL.md @@ -102,7 +102,6 @@ level: warning ### Critical: Vale Uses regexp2, Not RE2 - Vale uses the [regexp2](https://pkg.go.dev/github.com/dlclark/regexp2) library, **not** Go's standard `regexp` package (which uses RE2). This is a common source of confusion because Vale is written in Go. ### Supported Regex Features @@ -155,11 +154,13 @@ tokens: ### tokens vs raw **tokens:** + - Automatically wrapped in word boundaries - Converted to non-capturing groups - Good for simple patterns **raw:** + - Full control over the pattern - No automatic processing - Use for complex regex @@ -333,16 +334,19 @@ print(matches2) # Should be empty ### Common Issues **Pattern not matching:** + 1. Check if you need `nonword: true` for punctuation 2. Verify scope is appropriate (`sentence`, `heading`, etc.) 3. Test with `raw` instead of `tokens` for complex patterns **Too many false positives:** + 1. Add exceptions using negative lookahead/lookbehind 2. Adjust scope to be more specific 3. Consider using substitution rule with exceptions **Pattern works in Python but not Vale:** + - Unlikely if you're using PCRE features (Vale supports them) - Check for differences in whitespace handling - Try `raw` field for exact pattern control diff --git a/.github/DOC-REVIEW-PIPELINE-PLAN.md b/.github/DOC-REVIEW-PIPELINE-PLAN.md deleted file mode 100644 index 60815f485f..0000000000 --- a/.github/DOC-REVIEW-PIPELINE-PLAN.md +++ /dev/null @@ -1,704 +0,0 @@ -# Doc Review Pipeline — Implementation Plan - -**Status:** Complete — all phases implemented and tested -**Repository:** influxdata/docs-v2 -**Author:** Triage agent (Claude Code) -**Date:** 2026-02-28 - ---- - -## Table of Contents - -1. [Goal](#goal) -2. [What Already Exists](#what-already-exists) -3. [Architecture Overview](#architecture-overview) -4. [Phase 1: Label System Overhaul](#phase-1-label-system-overhaul) -5. [Phase 2: Doc Review Workflow](#phase-2-doc-review-workflow) -6. [Phase 3: Documentation and Agent Instructions](#phase-3-documentation-and-agent-instructions) -7. [Future Phases (Not In Scope)](#future-phases-not-in-scope) -8. [Decisions (Resolved)](#decisions-resolved) -9. [Risk Assessment](#risk-assessment) - ---- - -## Goal - -Build two interconnected systems: - -1. **Label system** — An automation-driven label taxonomy that supports - cross-repo automation, agentic workflows, and human-in-the-loop review. -2. **Doc review pipeline** — A GitHub Actions workflow that automates - documentation PR review using Copilot for both code review (diff-based, - using auto-loaded instruction files) and visual review (rendered HTML - at preview URLs), with rendered-page verification that catches issues - invisible in the Markdown source. - -The pipeline catches issues only visible in rendered output — expanded -shortcodes, broken layouts, incorrect product names — by having Copilot -analyze the rendered HTML of deployed preview pages. - ---- - -## What Already Exists - -### Infrastructure - -| Component | Location | Notes | -|-----------|----------|-------| -| PR preview deployment | `.github/workflows/pr-preview.yml` | Builds Hugo site, deploys to `gh-pages` branch at `influxdata.github.io/docs-v2/pr-preview/pr-{N}/` | -| Changed file detection | `.github/scripts/detect-preview-pages.js` | Detects changed files, maps content to public URLs, handles shared content | -| Content-to-URL mapping | `scripts/lib/content-utils.js` | `getChangedContentFiles()`, `mapContentToPublic()`, `expandSharedContentChanges()` | -| Screenshot tooling | `scripts/puppeteer/screenshot.js` | Puppeteer-based screenshot utility (already a dependency) | -| Playwright | `package.json` | Already a dependency (`^1.58.1`) | -| Claude agent instructions | `CLAUDE.md`, `AGENTS.md`, `.claude/` | Review criteria, style guide, skills, commands | -| Copilot instructions | `.github/copilot-instructions.md` | Style guide, repo structure, patterns | -| Copilot pattern instructions | `.github/instructions/` | Auto-loaded by Copilot based on changed file patterns | -| Auto-labeling (path-based) | Not yet implemented | Needed for Phase 1 | -| Link checker workflow | `.github/workflows/pr-link-check.yml` | Validates links on PR changes | -| Sync plugins workflow | `.github/workflows/sync-plugins.yml` | Issue-triggered workflow pattern to follow | -| Audit documentation workflow | `.github/workflows/audit-documentation.yml` | Creates issues from audit results | - -### Labels (Current State) - -The repo has 30+ labels with inconsistent naming patterns and significant -overlap. Product labels use long names (`InfluxDB 3 Core and Enterprise`), -workflow states are minimal (`release:pending` is the only actively used one), -and there is no agent-readiness or blocking-state taxonomy. - ---- - -## Architecture Overview - -``` -PR opened/updated (content paths) - │ - ├──────────────────────────┐ - ▼ ▼ -┌─ Job 1: Resolve URLs ────┐ ┌─ Job 2: Copilot Code Review ───┐ -│ resolve-review-urls.js │ │ gh pr edit --add-reviewer │ -│ changed files → URLs │ │ copilot-reviews │ -│ Output: url list │ │ Uses .github/instructions/ │ -└──────────┬───────────────┘ │ for auto-loaded review rules │ - │ └──────────────┬─────────────────┘ - ▼ │ -┌─ Job 3: Copilot Visual Review ────────┐ │ -│ Wait for preview deployment │ │ -│ Post preview URLs + review prompt │ │ -│ @copilot analyzes rendered HTML │ │ -│ Checks: layout, shortcodes, 404s │ │ -└──────────────┬───────────────────────┘ │ - │ │ - ▼ ▼ - Human reviews what remains -``` - -**Job 2 (Copilot code review) runs in parallel with Jobs 1→3** — it uses -GitHub's native Copilot reviewer, which analyzes the PR diff using -auto-loaded instruction files from `.github/instructions/`. - ---- - -## Phase 1: Label System Overhaul - -### Rationale - -The label system is a prerequisite for agentic workflows. Agents need clear -signals about issue readiness (`agent-ready`), blocking states -(`waiting:engineering`, `waiting:product`), and product scope -(`product:v3-monolith`, `product:v3-distributed`). -Consistent label patterns also enable GitHub API queries for dashboards and -automation. - -### 1.1 — Label taxonomy - -> **Note:** The tables below are a planning snapshot. The authoritative -> definitions live in `data/labels.yml` (non-product labels) and -> `data/products.yml` (product labels). See `.github/LABEL_GUIDE.md` for -> the current index. - -**24 labels organized into 6 categories:** - -#### Product labels (11) — Color: `#FFA500` (yellow) - -| Label | Description | -|-------|-------------| -| `product:v3-monolith` | InfluxDB 3 Core and Enterprise (single-node / clusterable) | -| `product:v3-distributed` | InfluxDB 3 Cloud Serverless, Cloud Dedicated, Clustered | -| `product:v2` | InfluxDB v2 (Cloud, OSS) | -| `product:v1` | InfluxDB v1 OSS | -| `product:v1-enterprise` | InfluxDB Enterprise v1 | -| `product:telegraf` | Telegraf documentation | -| `product:chronograf` | Chronograf documentation | -| `product:kapacitor` | Kapacitor documentation | -| `product:flux` | Flux language documentation | -| `product:explorer` | InfluxDB 3 Explorer | -| `product:shared` | Shared content across products | - -#### Source tracking labels (4) — Color: `#9370DB` (purple) - -| Label | Description | -|-------|-------------| -| `source:auto-detected` | Created by change detection within this repo | -| `source:dar` | Generated by DAR pipeline (issue analysis → draft) | -| `source:sync` | Synced from an external repository | -| `source:manual` | Human-created issue | - -#### Waiting states (2) — Color: `#FF8C00` (orange) - -| Label | Description | -|-------|-------------| -| `waiting:engineering` | Waiting for engineer confirmation | -| `waiting:product` | Waiting for product/PM decision | - -#### Workflow states (2) — Color: `#00FF00` / `#1E90FF` - -| Label | Description | -|-------|-------------| -| `agent-ready` | Agent can work on this autonomously | -| `skip-review` | Skip automated doc review pipeline | - -> [!Note] -> Human codeowner approval uses GitHub's native PR review mechanism (CODEOWNERS file), not a label. The `review:*` labels below are applied **manually** after reviewing Copilot feedback. - -#### Review outcome labels (3) — Color: `#28A745` / `#DC3545` / `#FFC107` - -| Label | Description | -|-------|-------------| -| `review:approved` | Review passed — no blocking issues found | -| `review:changes-requested` | Review found blocking issues | -| `review:needs-human` | Review inconclusive, needs human | - -> [!Note] -> All labels use colons (`:`) as separators for consistency. The `review:*` labels -> are mutually exclusive. They are applied manually after review — the CI workflow -> does not manage labels. Copilot code review uses GitHub's native "Comment" -> review type. - -#### Existing labels to keep (renamed) (2) - -| Old Name | New Name | Description | -|----------|----------|-------------| -| `AI assistant tooling` | `ai:tooling` | Related to AI assistant infrastructure | -| `ci:testing-and-validation` | `ci:testing` | CI/testing infrastructure | - -### 1.2 — Migration scripts - -Create migration scripts in `helper-scripts/label-migration/`: - -- **`create-labels.sh`** — Creates all new labels using `gh label create --force` (idempotent) -- **`migrate-labels.sh`** — Migrates existing issues from old labels to new labels using `gh issue edit` -- **`delete-labels.sh`** — Deletes old labels (requires interactive confirmation) -- **`README.md`** — Execution order, prerequisites, rollback instructions - -**Migration mapping:** - -| Old Label | New Label | -|-----------|-----------| -| `InfluxDB 3 Core and Enterprise` | `product:v3-monolith` | -| `InfluxDB v3` | `product:v3-monolith` (review individually — some may be distributed) | -| `Processing engine` | `product:v3-monolith` | -| `InfluxDB v2` | `product:v2` | -| `InfluxDB v1` | `product:v1` | -| `Enterprise 1.x` | `product:v1-enterprise` | -| `Chronograf 1.x` | `product:chronograf` | -| `Kapacitor` | `product:kapacitor` | -| `Flux` | `product:flux` | -| `InfluxDB 3 Explorer` | `product:explorer` | -| `Pending Release` | `release:pending` | -| `release/influxdb3` | `release:pending` | -| `sync-plugin-docs` | `source:sync` | - -> [!Important] -> **Workflow Updates Required:** -> The `sync-plugin-docs` label is used in GitHub Actions workflows. After migrating this label to `source:sync`, the following files must be updated: -> - `.github/workflows/sync-plugins.yml` (lines 28, 173, 421) -> - `.github/ISSUE_TEMPLATE/sync-plugin-docs.yml` (line 4) -> -> Update all references from `sync-plugin-docs` to `source:sync` to ensure the plugin sync automation continues to work after the label migration. - -> [!Note] -> `release:pending` is an existing workflow state label that we are keeping as-is. -> The migration scripts **must ensure** this label exists (create it if missing) and **must not** delete it in the cleanup step. - -**Labels to delete after migration:** -`bug`, `priority`, `documentation`, `Proposal`, `Research Phase`, -`ready-for-collaboration`, `ui`, `javascript`, `dependencies`, -`integration-demo-blog`, `API`, `Docker`, `Grafana`, `Ask AI`, -plus all old product labels listed above. - -**Execution:** -1. Run `create-labels.sh` (safe, idempotent) -2. Run `migrate-labels.sh` -3. Human verifies a sample of issues -4. Run `delete-labels.sh` (destructive, requires confirmation) - -### 1.3 — Auto-labeling workflow - -**File:** `.github/workflows/auto-label.yml` - -**Trigger:** `pull_request: [opened, synchronize]` - -**Logic:** -- List changed files via `github.rest.pulls.listFiles()` -- Read `data/products.yml` for path-to-label mappings (single source of truth): - - Each product entry has `content_path` and `label_group` fields - - Match file paths against `content/{content_path}/` → `product:{label_group}` - - Example: `content/influxdb3/core/` matches `content_path: influxdb3/core`, - `label_group: v3-monolith` → applies `product:v3-monolith` -- Shared content handling: - - `content/shared/` changes apply `product:shared` label - - Additionally expand shared content to affected products using - `expandSharedContentChanges()` from `scripts/lib/content-utils.js` - - Apply all affected product labels (additive) -- Multi-product PRs: apply all matching `product:*` labels (additive) -- Only add labels that are not already present (idempotent) -- Runs as `actions/github-script@v7` - ---- - -## Phase 2: Doc Review Workflow - -### 2.1 — Workflow file - -**File:** `.github/workflows/doc-review.yml` - -**Trigger:** - -```yaml -on: - pull_request: - types: [opened, synchronize, ready_for_review] - paths: - - 'content/**' - - 'layouts/**' - - 'assets/**' - - 'data/**' -``` - -**Permissions:** `contents: read`, `pull-requests: write` - -**Concurrency:** `group: doc-review-${{ github.event.number }}`, `cancel-in-progress: true` - -**Skip conditions:** Draft PRs, fork PRs, PRs with a `skip-review` label (new label to be added in Phase 1 via the label migration scripts). - -### 2.2 — Job 1: Resolve URLs - -**Purpose:** Map changed files to preview URLs. - -**Implementation:** -- Reuse the existing `detect-preview-pages.js` script and `content-utils.js` library -- Same logic as `pr-preview.yml` Job 1, but output a JSON artifact instead of deploying -- Output format: `[{"file": "content/influxdb3/core/write-data/_index.md", "url": "/influxdb3/core/write-data/"}]` -- Upload as `urls.json` workflow artifact - -**Key detail:** This job runs `getChangedContentFiles()` and `mapContentToPublic()` -from `scripts/lib/content-utils.js`, which already handles shared content -expansion (if `content/shared/foo.md` changes, all pages with -`source: /shared/foo.md` are included). - -### 2.3 — Job 2: Copilot Code Review - -**Purpose:** Review Markdown changes against the style guide and documentation -standards using GitHub's native Copilot code review. Visual review of rendered -pages is handled separately in Job 3. - -**Dependencies:** None beyond the PR itself. This job runs in parallel with -Jobs 1→3. - -**Implementation:** -- Adds `copilot-reviews` as a PR reviewer via `gh pr edit --add-reviewer` -- Copilot automatically reviews the PR diff using instruction files from - `.github/instructions/` that are auto-loaded based on changed file patterns -- No custom prompt or API key required - -**Review criteria file:** `.github/instructions/content-review.instructions.md` - -This file is auto-loaded by Copilot for PRs that change `content/**/*.md` -files. It checks for: - -1. **Frontmatter correctness** — Required fields, menu structure, weights -2. **Shortcode syntax** — Correct usage, closing tags, parameters -3. **Semantic line feeds** — One sentence per line -4. **Heading hierarchy** — No h1 in content (title comes from frontmatter) -5. **Product-specific terminology** — Correct product names, versions -6. **Link format** — Relative links, proper shortcode links -7. **Shared content** — `source:` frontmatter correctness -8. **Code blocks** — Language identifiers, line length, long CLI options - -**Severity classification:** -- `BLOCKING` — Wrong product names, invalid frontmatter, broken shortcode syntax -- `WARNING` — Style inconsistencies, missing semantic line feeds -- `INFO` — Suggestions, not problems - -**Output:** -- Copilot posts inline review comments using GitHub's native "Comment" - review type -- `review:*` labels are applied manually by humans after reviewing the - Copilot feedback — the workflow does not manage labels - -### 2.4 — Job 3: Copilot Visual Review (rendered HTML) - -**Purpose:** Have Copilot analyze the rendered preview pages to catch visual -and structural issues invisible in the Markdown source. - -**Dependencies:** Depends on Job 1 (needs URL list). Must wait for the -`pr-preview.yml` deployment to be live. - -**Why Copilot for visual review:** -- Copilot can analyze rendered HTML content at public preview URLs — no - screenshot capture or image upload required. -- Visual review is a good fit for Copilot because the rendered pages are - self-contained artifacts (no need to cross-reference repo files). -- Copilot code review (Job 2) handles the diff; visual review catches what - the diff review cannot. - -**Implementation:** - -1. **Wait for preview deployment:** - - Poll `https://influxdata.github.io/docs-v2/pr-preview/pr-{N}/` with - `curl --head` until it returns 200 - - Timeout: 10 minutes (preview build takes ~75s + deploy time) - - Poll interval: 15 seconds - - If timeout, skip visual review; Copilot code review (Job 2) still runs - -2. **Post preview URLs and trigger Copilot review:** - - Use `actions/github-script@v7` to post a PR comment listing the preview - URLs from Job 1, formatted as clickable links - - Post a follow-up comment tagging `@copilot` with instructions to review - the rendered pages at the preview URLs. The comment should instruct - Copilot to check each page for: - - Raw shortcode syntax visible on the page (`{{<` or `{{%`) - - Placeholder text that should have been replaced - - Broken layouts: overlapping text, missing images, collapsed sections - - Code blocks rendered incorrectly (raw HTML/Markdown fences visible) - - Navigation/sidebar entries correct - - Visible 404 or error state - - Product name inconsistencies in the rendered page header/breadcrumbs - - The review instruction template is stored in - `.github/prompts/copilot-visual-review.md` for maintainability - - Preview URL count capped at 50 pages (matching `MAX_PAGES` in - `detect-preview-pages.js`) - -3. **Comment upsert pattern:** - - Visual review comments use a marker-based upsert pattern — the workflow - updates an existing comment if one with the marker exists, otherwise - creates a new one. This prevents duplicate comments on `synchronize` - events. - -### 2.6 — Workflow failure handling - -- If preview deployment times out: skip Copilot visual review (Job 3), - Copilot code review (Job 2) still runs independently. Post a comment - explaining visual review was skipped. -- If Copilot does not respond to the `@copilot` mention: the preview URLs - remain in the comment for human review. -- Never block PR merge on workflow failures — the workflow adds comments - but does not set required status checks or manage labels. - ---- - -## Phase 3: Documentation and Agent Instructions - -### 3.1 — Instruction file architecture - -**Principle:** One `CLAUDE.md` that references role-specific files. No per-role -CLAUDE files — Claude Code only reads one `CLAUDE.md` per directory level. The -role context comes from the task prompt (GitHub Actions workflow), not the config -file. - -``` -CLAUDE.md ← lightweight pointer (already exists) - ├── references .github/LABEL_GUIDE.md ← label taxonomy + usage - ├── references .claude/agents/ ← role-specific agent instructions - │ ├── doc-triage-agent.md ← triage + auto-label logic - │ └── doc-review-agent.md ← local review sessions (Claude Code) - └── references .github/instructions/ ← Copilot auto-loaded instructions - └── content-review.instructions.md ← review criteria for content/**/*.md -``` - -**How review roles are assigned at runtime:** -- **Copilot code review (CI):** GitHub's native reviewer. Auto-loads - instruction files from `.github/instructions/` based on changed file - patterns. No custom prompt or API key needed. -- **Copilot visual review (CI):** Triggered by `@copilot` mention in a PR - comment with preview URLs and a review template. -- **Claude local review:** Uses `.claude/agents/doc-review-agent.md` for - local Claude Code sessions. Not used in CI. -- Shared rules (style guide, frontmatter, shortcodes) stay in the existing - referenced files (`DOCS-CONTRIBUTING.md`, `DOCS-SHORTCODES.md`, etc.) -- No duplication — each instruction file says what's unique to that context - -### 3.2 — Agent instruction files - -#### `.claude/agents/doc-triage-agent.md` - -Role-specific instructions for issue/PR triage. Contents: - -- **Label taxonomy** — Full label list with categories, colors, descriptions -- **Path-to-product mapping** — Which content paths map to which `product:*` labels -- **Priority rules** — How to assess priority based on product, scope, and issue type -- **Decision logic** — When to apply `agent-ready`, `waiting:*`, `review:needs-human` -- **Migration context** — Old label → new label mapping (useful during transition) - -This file does NOT duplicate style guide rules. It references -`DOCS-CONTRIBUTING.md` for those. - -#### `.claude/agents/doc-review-agent.md` - -Role-specific instructions for **local** Claude Code review sessions. This -file is NOT used in CI — the CI review is handled by Copilot using -`.github/instructions/content-review.instructions.md`. - -Contents: - -- **Review scope** — Markdown diff review only (frontmatter, shortcodes, - semantic line feeds, heading hierarchy, terminology, links, shared content). -- **Severity classification** — BLOCKING / WARNING / INFO definitions with examples -- **Output format** — Structured review comment template - -This file references `DOCS-CONTRIBUTING.md` for style rules and -`DOCS-SHORTCODES.md` for shortcode syntax — it does NOT restate them. - -### 3.3 — Label usage guide - -**File:** `.github/LABEL_GUIDE.md` - -Contents: -- Label categories with descriptions and colors -- Common workflows (issue triage, DAR pipeline, manual work) -- GitHub filter queries for agents and humans -- Auto-labeling behavior reference - -### 3.4 — Update existing pointer files - -**`CLAUDE.md`** — Add one line to the "Full instruction resources" list: -```markdown -- [.github/LABEL_GUIDE.md](.github/LABEL_GUIDE.md) - Label taxonomy and pipeline usage -``` - -**`AGENTS.md`** — Add a section referencing the label guide and agent roles: -```markdown -## Doc Review Pipeline -- Label guide: `.github/LABEL_GUIDE.md` -- Triage agent: `.claude/agents/doc-triage-agent.md` -- Review agent: `.claude/agents/doc-review-agent.md` -``` - -**`.github/copilot-instructions.md`** — Add the label guide to the -"Specialized Resources" table. - -These are small additions — no restructuring of existing files. - -### 3.5 — Review instruction files - -#### `.github/instructions/content-review.instructions.md` (Copilot code review) - -Auto-loaded by Copilot for PRs that change `content/**/*.md` files. Contains -the review criteria (frontmatter, shortcodes, heading hierarchy, terminology, -links, code blocks) with severity classification. - -This file replaces the original `.github/prompts/doc-review.md` Claude prompt. -The review criteria are the same but delivered through Copilot's native -instruction file mechanism instead of a custom action. - -#### `.github/templates/review-comment.md` (shared format) - -Shared definitions for severity levels, comment structure, and result → label -mapping. Used by `doc-review-agent.md` (local review sessions) and the -Copilot visual review template. - -#### Copilot visual review template - -The `@copilot` visual review comment is constructed inline in the -`doc-review.yml` workflow using the review template from -`.github/templates/review-comment.md`. Contains: - -- The visual review checklist (raw shortcodes, broken layouts, 404s, etc.) -- Instructions for analyzing the rendered pages at the preview URLs -- Output format guidance (what to flag, severity levels) - ---- - -## Future Phases (Not In Scope) - -These are explicitly **not** part of this plan. Documented here for context. - -### v2 — Screenshot-based visual review -- Add Playwright screenshot capture script (`.github/scripts/capture-screenshots.js`) - for design/layout PRs where HTML analysis isn't sufficient. -- Capture full-page PNGs of preview pages, upload as workflow artifacts. -- Useful for PRs touching `layouts/`, `assets/css/`, or template changes - where visual regression matters. -- The existing `scripts/puppeteer/screenshot.js` remains for local debugging; - the CI script should use Playwright for reliability. - -### v3 — Stale PR management -- Cron job that scans for stale PRs (draft >3 days with no review activity) - and pings the author. -- Metrics tracking: % of PRs that pass Copilot review on first attempt. - -### v4 — Agent-driven issue resolution -- Auto-assign doc issues to agents based on `agent-ready` label. -- Claude or Copilot drafts the fix, then the other agent reviews. -- Closes the loop: issue → draft → review → human approval. - ---- - -## Decisions (Resolved) - -### Q1: How should Copilot review rendered pages? — RESOLVED - -**Decision:** Copilot reviews rendered HTML at public preview URLs — no -screenshots needed. Job 3 posts preview URLs in a PR comment, then tags -`@copilot` with a review prompt. See section 2.5 for implementation details. - -This approach works because: -- Preview pages are publicly accessible at `influxdata.github.io/docs-v2/pr-preview/pr-{N}/` -- Copilot can analyze HTML content at public URLs -- No screenshot capture, image upload, or artifact management required - -Screenshot capture is deferred to Future Phases (v2) for design/layout PRs -where visual regression testing matters. - -### Q2: Should the review workflow be a required status check? — RESOLVED - -**Decision:** No. Start as advisory (comments only). The workflow posts review -comments but does not set required status checks or manage labels. `review:*` -labels are applied manually after review. Make it required only after the team -confirms the false-positive rate is acceptable (see Future Phases). - -### Q3: Should screenshots use Playwright or Puppeteer? — DEFERRED - -**Decision:** Deferred to Future Phases (v2). The current implementation -reviews rendered HTML at preview URLs, not screenshots. When screenshot -capture is added later, use Playwright for CI and keep Puppeteer for local -debugging. - -### Q4: How to handle the `pr-preview.yml` dependency? — RESOLVED - -**Decision:** Option A — poll the preview URL with timeout. Job 3 polls -`https://influxdata.github.io/docs-v2/pr-preview/pr-{N}/` with `curl --head` -every 15 seconds until it returns 200, with a 10-minute timeout. If timeout is -reached, skip Copilot visual review; Copilot code review (Job 2) still runs -independently. - -Rationale: Polling is simple, self-contained, and resilient. The URL pattern is -deterministic. Option B (`workflow_run`) adds complexity and doesn't handle -cases where preview doesn't deploy. Option C (combined workflow) makes the -workflow too large and eliminates the parallelism benefit. - -### Q5: Cost and rate limiting — RESOLVED - -**Decision:** Acceptable. Both code review and visual review use the repo's -Copilot allocation. No external API keys or per-call costs. - -Mitigations already designed into the workflow: -- `paths` filter ensures only doc-content PRs trigger the workflow. -- `skip-review` label allows trivial PRs to opt out. -- Concurrency group cancels in-progress reviews when the PR is updated. -- Preview URL count is capped at 50 pages (matching `MAX_PAGES` in - `resolve-review-urls.js`). -- Draft and fork PRs are skipped entirely. - -### Q6: Label separator convention — RESOLVED - -**Decision:** Use colons (`:`) everywhere. No slashes. One separator for -consistency — expecting humans or agents to infer different semantics from -separator choice is unrealistic. Mutually exclusive behavior (e.g., `review:*` -labels) is enforced in workflow code, not punctuation. - -### Q7: Human approval mechanism — RESOLVED - -**Decision:** Use GitHub's native PR review system (CODEOWNERS file) for human -approval. No `approval:codeowner` label. The `review:*` labels are exclusively -for automated pipeline outcomes. - -### Q8: Product path mapping — RESOLVED - -**Decision:** Extend `data/products.yml` with `content_path` and `label_group` -fields. This file becomes the single source of truth for path-to-product -resolution, used by the auto-label workflow, matrix-generator, and documentation -(AGENTS.md). Eliminates duplicated mappings across multiple files. - -### Q9: `sync-plugin-docs` label migration — RESOLVED - -**Decision:** Migrate to `source:sync` (not `source:auto-detected`). Plugin -sync is a distinct operation from change detection. `source:sync` is general -enough to cover future external repo syncs without being hyper-specific. - -### Q10: Multi-product and shared content labeling — RESOLVED - -**Decision:** Auto-labeling is additive — apply all matching `product:*` labels. -Changes to `content/shared/` get the `product:shared` label plus all expanded -product labels (resolved via `expandSharedContentChanges()`). - ---- - -## Risk Assessment - -| Risk | Impact | Mitigation | -|------|--------|------------| -| Preview not deployed in time | Low | 10-minute polling timeout, fall back to code-only review | -| False positives in review | Medium | Start as advisory (not required check), iterate instruction files | -| Label migration data loss | Low | Migrate before deleting; human verification gate | -| Copilot visual review misses issues | Medium | Preview URLs remain in comment for human review; start advisory | -| Copilot code review quality | Medium | Review criteria in `.github/instructions/` can be iterated; local Claude review available as backup | -| Product mapping drift | Low | Single source of truth in `data/products.yml`; auto-label and matrix-generator both derive from it | - ---- - -## File Summary - -Files to create or modify: - -| Action | File | Phase | Status | -|--------|------|-------|--------| -| Modify | `data/products.yml` | 1.0 | Done | -| Modify | `data/labels.yml` | 1.1 | Done | -| Create | `helper-scripts/label-migration/create-labels.sh` | 1.2 | Done | -| Create | `helper-scripts/label-migration/migrate-labels.sh` | 1.2 | Done | -| Create | `helper-scripts/label-migration/delete-labels.sh` | 1.2 | Done | -| Create | `helper-scripts/label-migration/README.md` | 1.2 | Done | -| Create | `.github/workflows/auto-label.yml` | 1.3 | Done | -| Create | `.github/workflows/doc-review.yml` | 2.1 | Done | -| Create | `.claude/agents/doc-triage-agent.md` | 3.2 | Done | -| Create | `.claude/agents/doc-review-agent.md` | 3.2 | Done | -| Create | `.github/LABEL_GUIDE.md` | 3.3 | Done | -| Create | `.github/instructions/content-review.instructions.md` | 3.5 | Done | -| Create | `.github/templates/review-comment.md` | 2.5/3.5 | Done | -| Modify | `CLAUDE.md` | 3.4 | Done | -| Modify | `AGENTS.md` | 3.4 | Done | -| Modify | `.github/copilot-instructions.md` | 3.4 | Done | - ---- - -## Implementation Order - -1. ~~**Phase 1.0** — Extend `data/products.yml` with `content_path` and `label_group`~~ ✅ -2. ~~**Phase 1.1–1.2** — Create label migration scripts~~ ✅ -3. ~~**Phase 1.3** — Create auto-label workflow~~ ✅ -4. ~~**Execute label migration** — Run scripts, then manual cleanup~~ ✅ -5. ~~**Phase 3.2** — Create agent instruction files~~ ✅ -6. ~~**Phase 2.1–2.3** — Workflow skeleton + URL resolution + Copilot code review~~ ✅ -7. ~~**Phase 2.5** — Copilot visual review job~~ ✅ -8. ~~**Phase 3.3–3.5** — Label guide, instruction files, pointer updates~~ ✅ -9. ~~**Test end-to-end** — Triggered workflows via `workflow_dispatch` against PR #6890~~ ✅ - -### End-to-end test results (2026-03-09) - -Triggered via `workflow_dispatch` with `pr_number=6890` on branch -`claude/triage-agent-plan-EOY0u`. - -| Workflow | Job | Result | Notes | -|----------|-----|--------|-------| -| Auto-label PRs | auto-label | Pass | Loaded 14 path mappings, 0 product labels (correct — no content changes) | -| Doc Review | resolve-urls | Pass | 0 preview URLs (correct — no content changes) | -| Doc Review | copilot-review | Pass | `copilot-reviews` added as reviewer | -| Doc Review | copilot-visual-review | Skipped | Correct — 0 URLs to review | - -**Fixes applied during testing:** -- `npm ci` replaced with targeted `js-yaml` install (sparse checkout lacks lock file) -- Added `workflow_dispatch` with `pr_number` input for on-demand re-runs - -**Remaining:** Visual review (Job 3) needs a content-changing PR to fully exercise -the preview URL polling and Copilot `@copilot` mention flow. diff --git a/.github/ISSUE_TEMPLATE/sync-plugin-docs.yml b/.github/ISSUE_TEMPLATE/sync-plugin-docs.yml index 0331a501c5..382af5ce3f 100644 --- a/.github/ISSUE_TEMPLATE/sync-plugin-docs.yml +++ b/.github/ISSUE_TEMPLATE/sync-plugin-docs.yml @@ -1,7 +1,7 @@ name: Sync Plugin Documentation description: Request synchronization of plugin documentation from influxdb3_plugins repository title: "Sync plugin docs: [PLUGIN_NAMES]" -labels: ["source:sync", "documentation", "automation"] +labels: ["sync-plugin-docs", "documentation", "automation"] assignees: [] body: - type: markdown diff --git a/.github/LABEL_GUIDE.md b/.github/LABEL_GUIDE.md deleted file mode 100644 index 14de676703..0000000000 --- a/.github/LABEL_GUIDE.md +++ /dev/null @@ -1,100 +0,0 @@ -# Label Guide - -Label taxonomy for the docs-v2 repository. Used by automation workflows, -triage agents, and human contributors. - -## Label Definitions - -- **Product labels** (`product:*`): Derived from - [data/products.yml](../data/products.yml) — each product's `label_group` - field determines the label name, `content_path` determines which files - trigger it. Applied by the [auto-label workflow](workflows/auto-label.yml). - Multi-product PRs get all matching labels. Shared content changes get - `product:shared` plus labels for all products that reference the shared file. - -- **Source, waiting, workflow, and review labels**: Defined in - [data/labels.yml](../data/labels.yml) — names, colors, and descriptions. - -- **Review label behavior** (severity levels, result rules, result → label - mapping): Defined in - [templates/review-comment.md](templates/review-comment.md). - -Human approval uses GitHub's native PR review system (CODEOWNERS), not labels. - -## Renamed Labels - -| Old Name | New Name | -|----------|----------| -| `AI assistant tooling` | `ai:tooling` | -| `ci:testing-and-validation` | `ci:testing` | -| `design` | `area:site-ui` | -| `InfluxDB Cloud` | `product:v2-cloud` | -| `user feedback` | `source:feedback` | -| `ai:tooling` | `area:agents` | - -## Deleted Labels - -| Label | Replacement | Reason | -|-------|-------------|--------| -| `Pending PR` | `waiting:pr` | Consolidated into `waiting:` namespace | -| `broke-link` | `area:links` | Consolidated into `area:` namespace | - -## Common Workflows - -### Issue triage - -1. Read issue → identify product(s) → apply `product:*` labels -2. Apply `source:*` label if applicable -3. Determine readiness → apply `agent-ready` or `waiting:*` - -### PR review pipeline - -1. PR opened → auto-label applies `product:*` labels -2. Doc review workflow triggers (unless `skip-review` is present) -3. Copilot code review runs on the diff (uses - [`.github/instructions/`](instructions/) files from the base branch) -4. Copilot visual review checks rendered preview pages -5. Human reviewer uses GitHub's PR review for final approval - -Review labels (`review:*`) are applied manually after review, not by CI. - -### GitHub Filter Queries - -``` -# PRs needing human review -label:review:needs-human is:pr is:open - -# Agent-ready issues -label:agent-ready is:issue is:open -label:waiting:engineering -label:waiting:product - -# All InfluxDB 3 issues -label:product:v3-monolith,product:v3-distributed is:issue is:open - -# Blocked issues -label:waiting:engineering,waiting:product is:issue is:open - -# PRs that skipped review -label:skip-review is:pr -``` - -## Auto-labeling Behavior - -The [auto-label workflow](workflows/auto-label.yml) runs on -`pull_request: [opened, synchronize]` and: - -- Reads path-to-product mappings from `data/products.yml` -- Matches changed files to product labels -- Expands shared content changes to affected product labels -- Adds labels idempotently (skips labels already present) -- Skips draft and fork PRs - -## References - -- Label definitions: `data/labels.yml` -- Product definitions: `data/products.yml` -- Review comment format: `.github/templates/review-comment.md` -- Auto-label workflow: `.github/workflows/auto-label.yml` -- Doc review workflow: `.github/workflows/doc-review.yml` -- Triage agent: `.claude/agents/doc-triage-agent.md` -- Review agent: `.claude/agents/doc-review-agent.md` -- Migration scripts: `helper-scripts/label-migration/` diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md index 975b1a0f02..b31410cbf3 100644 --- a/.github/copilot-instructions.md +++ b/.github/copilot-instructions.md @@ -2,61 +2,261 @@ > **For GitHub Copilot and other AI coding agents** > +> This is the primary instruction file for GitHub Copilot working with the InfluxData documentation site. +> > **Instruction resources**: > +> - [.github/agents/copilot-instructions-agent.md](agents/copilot-instructions-agent.md) - **Creating/improving Copilot instructions** +> - [.claude/skills/](../.claude/skills/) - **Detailed workflows** (content editing, testing, InfluxDB setup, templates) > - [.github/instructions/](instructions/) - **Pattern-specific** (auto-loaded by file type) -> - [AGENTS.md](../AGENTS.md) - Shared project guidelines (style, constraints, content structure) -> - [.github/LABEL_GUIDE.md](LABEL_GUIDE.md) - Label taxonomy and review pipeline +> - [.github/agents/](agents/) - **Specialist agents** (TypeScript/Hugo, Copilot management) +> - [AGENTS.md](../AGENTS.md), [CLAUDE.md](../CLAUDE.md) - General AI assistant guides ## Quick Reference -| Task | Command | Time | -| ---------------- | ----------------------------------------------------- | ------- | -| Install | `CYPRESS_INSTALL_BINARY=0 yarn install` | \~4s | -| Build | `npx hugo --quiet` | \~75s | -| Dev Server | `npx hugo server` | \~92s | -| Create Docs | `docs create --products ` | varies | -| Edit Docs | `docs edit ` | instant | -| Add Placeholders | `docs placeholders ` | instant | -| Audit Docs | `docs audit --products ` | varies | -| Test All | `yarn test:codeblocks:all` | 15-45m | -| Lint | `yarn lint` | \~1m | - -**NEVER CANCEL** Hugo builds (\~75s) or test runs (15-45m). +| Task | Command | Time | Details | +| ---------------- | ----------------------------------------------------- | ------- | ------------------------------------- | +| Install | `CYPRESS_INSTALL_BINARY=0 yarn install` | \~4s | Skip Cypress for CI | +| Build | `npx hugo --quiet` | \~75s | NEVER CANCEL | +| Dev Server | `npx hugo server` | \~92s | Port 1313 | +| Create Docs | `docs create --products ` | varies | AI-assisted scaffolding | +| Create & Open | `docs create --products --open` | instant | Non-blocking (background) | +| Create & Wait | `docs create --products --open --wait` | varies | Blocking (interactive) | +| Edit Docs | `docs edit ` | instant | Non-blocking (background) | +| Edit Docs (wait) | `docs edit --wait` | varies | Blocking (interactive) | +| List Files | `docs edit --list` | instant | Show files without opening | +| Add Placeholders | `docs placeholders ` | instant | Add placeholder syntax to code blocks | +| Audit Docs | `docs audit --products ` | varies | Audit documentation coverage | +| Release Notes | `docs release-notes --products ` | varies | Generate release notes from commits | +| Test All | `yarn test:codeblocks:all` | 15-45m | NEVER CANCEL | +| Lint | `yarn lint` | \~1m | Pre-commit checks | ## CLI Tools +**For when to use CLI vs direct editing**, see [docs-cli-workflow skill](../.claude/skills/docs-cli-workflow/SKILL.md). + ```bash -docs --help # Full reference +# Create new documentation (AI-assisted scaffolding) +docs create --products +docs create --products influxdb3_core --open # Non-blocking +docs create --products influxdb3_core --open --wait # Blocking + +# Find and edit documentation by URL +docs edit # Non-blocking (agent-friendly) +docs edit --list # List files only +docs edit --wait # Wait for editor + +# Other tools +docs placeholders # Add placeholder syntax to code blocks +docs audit --products # Audit documentation coverage +docs release-notes --products + +# Get help +docs --help +docs create --help ``` -Non-blocking by default. Use `--wait` for interactive editing. +**Key points**: + +- Accepts both product keys (`influxdb3_core`) and paths (`/influxdb3/core`) +- Non-blocking by default (agent-friendly) +- Use `--wait` for interactive editing +- `--products` and `--repos` are mutually exclusive for audit/release-notes ## Workflows -- **Content editing**: See [content-editing skill](../.claude/skills/content-editing/SKILL.md) -- **Testing**: See [DOCS-TESTING.md](../DOCS-TESTING.md) -- **Hugo templates**: See [hugo-template-dev skill](../.claude/skills/hugo-template-dev/SKILL.md) +### Content Editing + +See [content-editing skill](../.claude/skills/content-editing/SKILL.md) for complete workflow: + +- Creating/editing content with CLI +- Shared content management +- Testing and validation + +### Testing + +See [DOCS-TESTING.md](../DOCS-TESTING.md) and [cypress-e2e-testing skill](../.claude/skills/cypress-e2e-testing/SKILL.md). + +Quick tests (NEVER CANCEL long-running): + +```bash +yarn test:codeblocks:all # 15-45m +yarn test:links # 1-5m +yarn lint # 1m +``` + +### InfluxDB 3 Setup + +See [influxdb3-test-setup skill](../.claude/skills/influxdb3-test-setup/SKILL.md). + +Quick setup: + +```bash +./test/scripts/init-influxdb3.sh core # Per-worktree, port 8282 +./test/scripts/init-influxdb3.sh enterprise # Shared, port 8181 +./test/scripts/init-influxdb3.sh all # Both +``` + +### Hugo Template Development + +See [hugo-template-dev skill](../.claude/skills/hugo-template-dev/SKILL.md) for template syntax, data access, and testing strategies. + +## Repository Structure + +### Content Organization + +- **InfluxDB 3**: `/content/influxdb3/` (core, enterprise, cloud-dedicated, cloud-serverless, clustered, explorer) +- **InfluxDB v2**: `/content/influxdb/` (v2, cloud) +- **InfluxDB v1**: `/content/influxdb/v1` +- **InfluxDB Enterprise (v1)**: `/content/enterprise_influxdb/v1/` +- **Telegraf**: `/content/telegraf/v1/` +- **Kapacitor**: `/content/kapacitor/` +- **Chronograf**: `/content/chronograf/` +- **Flux**: `/content/flux/` +- **Examples**: `/content/example.md` (comprehensive shortcode reference) +- **Shared content**: `/content/shared/` + +### Key Files + +- **Config**: `/config/_default/`, `package.json`, `compose.yaml`, `lefthook.yml` +- **Testing**: `cypress.config.js`, `pytest.ini`, `.vale.ini` +- **Assets**: `/assets/` (JS, CSS), `/layouts/` (templates), `/data/` (YAML/JSON) +- **Build output**: `/public/` (\~529MB, gitignored) + +## Technology Stack + +- **Hugo** - Static site generator +- **Node.js/Yarn** - Package management +- **Testing**: Pytest, Cypress, link-checker, Vale +- **Tools**: Docker, ESLint, Prettier, Lefthook + +## Common Issues + +### Network Restrictions -## Product and Content Paths +Commands that may fail in restricted environments: -Defined in [data/products.yml](../data/products.yml). +- Docker builds (external repos) +- `docker compose up local-dev` (Alpine packages) +- Cypress installation (use `CYPRESS_INSTALL_BINARY=0`) + +### Pre-commit Validation + +```bash +# Quick validation before commits +yarn prettier --write "**/*.{css,js,ts,jsx,tsx}" +yarn eslint assets/js/**/*.js +npx hugo --quiet +``` + +## Documentation Coverage + +- **InfluxDB 3**: Core, Enterprise, Cloud (Dedicated/Serverless), Clustered, Explorer, plugins +- **InfluxDB v2/v1**: OSS, Cloud, Enterprise +- **Tools**: Telegraf, Kapacitor, Chronograf, Flux +- **API Reference**: All InfluxDB editions ## Content Guidelines -- [DOCS-CONTRIBUTING.md](../DOCS-CONTRIBUTING.md) - Style, workflow, commit format -- [DOCS-SHORTCODES.md](../DOCS-SHORTCODES.md) - Shortcode reference -- [DOCS-FRONTMATTER.md](../DOCS-FRONTMATTER.md) - Frontmatter reference -- [content/example.md](../content/example.md) - Working shortcode examples +**Style guide**: Google Developer Documentation Style Guide\ +**Voice**: Active, present tense, second person\ +**Line breaks**: Semantic line feeds (one sentence per line)\ +**Files**: lowercase-with-hyphens.md + +### Quick Shortcodes + +````markdown +# Callouts (GitHub-style alerts) +> [!Note] / [!Warning] / [!Tip] / [!Important] / [!Caution] + +# Required elements +{{< req >}} +{{< req type="key" >}} + +# Code placeholders +```sh { placeholders="DATABASE_NAME|API_TOKEN" } +curl https://example.com/api?db=DATABASE_NAME +```` + +```` + +**Complete reference**: [DOCS-SHORTCODES.md](../DOCS-SHORTCODES.md) + +### Required Frontmatter + +```yaml +title: # Required +description: # Required +menu: + product_menu_key: + name: # Optional + parent: # Optional +weight: # Required: 1-99, 101-199, 201-299... +```` + +**Shared content**: Add `source: /shared/path/to/file.md` -## File Pattern-Specific Instructions +**Complete reference**: [DOCS-FRONTMATTER.md](../DOCS-FRONTMATTER.md) -Auto-loaded by GitHub Copilot based on changed files: +### Resources + +- [DOCS-CONTRIBUTING.md](../DOCS-CONTRIBUTING.md) - Workflow & guidelines +- [DOCS-SHORTCODES.md](../DOCS-SHORTCODES.md) - Complete shortcodes +- [DOCS-FRONTMATTER.md](../DOCS-FRONTMATTER.md) - Complete metadata +- [DOCS-TESTING.md](../DOCS-TESTING.md) - Testing procedures +- [content/example.md](../content/example.md) - Working examples + +## Troubleshooting + +| Issue | Solution | +| ------------------------ | ---------------------------------------------------------------- | +| Pytest collected 0 items | Use `python` not `py` for language identifier | +| Hugo build errors | Check `/config/_default/` | +| Docker build fails | Expected in restricted networks - use local Hugo | +| Cypress install fails | Use `CYPRESS_INSTALL_BINARY=0 yarn install` | +| Link validation slow | Test specific files: `yarn test:links content/file.md` | +| Vale "0 errors in stdin" | File is outside repo - Vale Docker can only access repo files | +| Vale false positives | Add terms to `.ci/vale/styles/InfluxDataDocs/Terms/ignore.txt` | +| Vale duration warnings | Duration literals (`30d`) are valid - check InfluxDataDocs.Units | + +## Specialized Instructions + +### File Pattern-Specific Instructions + +These instructions are automatically loaded by GitHub Copilot based on the files you're working with: | Pattern | File | Description | | ------------------------ | ----------------------------------------------------------------- | ------------------------------------------------ | | `content/**/*.md` | [content.instructions.md](instructions/content.instructions.md) | Content file guidelines, frontmatter, shortcodes | -| `content/**/*.md` | [content-review.instructions.md](instructions/content-review.instructions.md) | Review criteria for content changes | | `layouts/**/*.html` | [layouts.instructions.md](instructions/layouts.instructions.md) | Shortcode implementation patterns and testing | | `api-docs/**/*.yml` | [api-docs.instructions.md](instructions/api-docs.instructions.md) | OpenAPI spec workflow | | `assets/js/**/*.{js,ts}` | [assets.instructions.md](instructions/assets.instructions.md) | TypeScript/JavaScript and CSS development | + +### Specialized Resources + +**Custom Agents** (`.github/agents/`): + +- [typescript-hugo-agent.md](agents/typescript-hugo-agent.md) - TypeScript/Hugo development +- [copilot-instructions-agent.md](agents/copilot-instructions-agent.md) - Managing Copilot instructions + +**Claude Skills** (`.claude/skills/` - detailed workflows): + +- [content-editing](../.claude/skills/content-editing/SKILL.md) - Complete content workflow +- [docs-cli-workflow](../.claude/skills/docs-cli-workflow/SKILL.md) - CLI decision guidance +- [cypress-e2e-testing](../.claude/skills/cypress-e2e-testing/SKILL.md) - E2E testing +- [hugo-template-dev](../.claude/skills/hugo-template-dev/SKILL.md) - Hugo templates +- [influxdb3-test-setup](../.claude/skills/influxdb3-test-setup/SKILL.md) - InfluxDB 3 setup +- [vale-linting](../.claude/skills/vale-linting/SKILL.md) - Vale configuration and debugging + +**Documentation**: + +- [DOCS-TESTING.md](../DOCS-TESTING.md) - Testing procedures +- [DOCS-CONTRIBUTING.md](../DOCS-CONTRIBUTING.md) - Contribution guidelines +- [DOCS-FRONTMATTER.md](../DOCS-FRONTMATTER.md) - Frontmatter reference +- [DOCS-SHORTCODES.md](../DOCS-SHORTCODES.md) - Shortcodes reference + +## Important Notes + +- This is a large site (5,359+ pages) with complex build processes +- **NEVER CANCEL** long-running operations (Hugo builds, tests) +- Set appropriate timeouts: Hugo build (180s+), tests (30+ minutes) diff --git a/.github/instructions/content-review.instructions.md b/.github/instructions/content-review.instructions.md deleted file mode 100644 index 185786556d..0000000000 --- a/.github/instructions/content-review.instructions.md +++ /dev/null @@ -1,76 +0,0 @@ ---- -applyTo: "content/**/*.md" ---- - -# Content Review Criteria - -Review documentation changes against these rules. Only flag issues you are -confident about. Reference the linked docs for detailed rules. - -## Frontmatter - -Rules: [DOCS-FRONTMATTER.md](../../DOCS-FRONTMATTER.md) - -- `title` and `description` are required on every page -- `menu` structure matches the product's menu key -- `weight` is present for pages in navigation -- `source` paths point to valid `/shared/` paths -- No duplicate or conflicting frontmatter keys - -## Shortcode Syntax - -Rules: [DOCS-SHORTCODES.md](../../DOCS-SHORTCODES.md) - -- `{{< >}}` for HTML output, `{{% %}}` for Markdown-processed content -- Closing tags match opening tags -- Required parameters are present -- Callouts use GitHub-style syntax: `> [!Note]`, `> [!Warning]`, etc. - -## Heading Hierarchy - -- No h1 headings in content (h1 comes from `title` frontmatter) -- Headings don't skip levels (h2 -> h4 without h3) - -## Semantic Line Feeds - -Rules: [DOCS-CONTRIBUTING.md](../../DOCS-CONTRIBUTING.md) - -- One sentence per line (better diffs) -- Long sentences on their own line, not concatenated - -## Terminology and Product Names - -Products defined in [data/products.yml](../../data/products.yml): - -- Use official names: "InfluxDB 3 Core", "InfluxDB 3 Enterprise", - "InfluxDB Cloud Serverless", "InfluxDB Cloud Dedicated" -- Don't mix v2/v3 terminology (e.g., "bucket" in v3 Core docs) -- Version references match the content path - -## Links - -- Internal links use relative paths or Hugo `relref` shortcodes -- No hardcoded `docs.influxdata.com` links in content files -- Anchor links match actual heading IDs - -## Code Blocks - -- Use `python` not `py` for language identifiers (pytest requirement) -- Long options in CLI examples (`--output` not `-o`) -- Keep lines within 80 characters -- Include language identifier on fenced code blocks - -## Shared Content - -- `source:` frontmatter points to an existing shared file -- Shared files don't contain frontmatter (only content) -- Changes to shared content affect multiple products — flag if unintentional - -## Severity - -- **BLOCKING**: Broken rendering, wrong product names, missing required - frontmatter, malformed shortcodes, h1 in content body -- **WARNING**: Missing semantic line feeds, skipped heading levels, missing - `weight`, long CLI options not used -- **INFO**: Suggestions, code block missing language identifier, opportunities - to use shared content diff --git a/.github/prompts/copilot-visual-review.md b/.github/prompts/copilot-visual-review.md deleted file mode 100644 index 2ce9de1545..0000000000 --- a/.github/prompts/copilot-visual-review.md +++ /dev/null @@ -1,34 +0,0 @@ -# Visual Review Prompt - -Review the rendered documentation pages at the preview URLs listed below. -Check each page for visual and structural issues that are invisible in the -Markdown source. - -## Checklist - -For each preview URL, verify: - -- [ ] **No raw shortcodes** — No `{{<` or `{{%` syntax visible on the page -- [ ] **No placeholder text** — No `PLACEHOLDER`, `TODO`, `FIXME`, or - template variables visible in rendered content -- [ ] **Layout intact** — No overlapping text, missing images, or collapsed - sections -- [ ] **Code blocks render correctly** — No raw HTML fences or Markdown - syntax visible inside code blocks -- [ ] **Product names correct** — Page header, breadcrumbs, and sidebar show - the correct product name -- [ ] **No 404s or errors** — Page loads without error states -- [ ] **Navigation correct** — Sidebar entries link to the right pages and - the page appears in the expected location - -## Output - -Follow the shared review comment format, severity definitions, and label -mapping in -[templates/review-comment.md](../templates/review-comment.md). - -Adapt the "Files Reviewed" section to list preview URLs instead of file -paths. - -## Preview URLs - diff --git a/.github/scripts/resolve-review-urls.js b/.github/scripts/resolve-review-urls.js deleted file mode 100644 index 8869555d75..0000000000 --- a/.github/scripts/resolve-review-urls.js +++ /dev/null @@ -1,38 +0,0 @@ -/** - * Resolve Review URLs - * - * Maps changed content files to URL paths for the doc-review workflow. - * Reuses the same content-utils functions as detect-preview-pages.js. - * - * Outputs (for GitHub Actions): - * - urls: JSON array of URL paths - * - url-count: Number of URLs - */ - -import { appendFileSync } from 'fs'; -import { - getChangedContentFiles, - mapContentToPublic, -} from '../../scripts/lib/content-utils.js'; - -const GITHUB_OUTPUT = process.env.GITHUB_OUTPUT || '/dev/stdout'; -const BASE_REF = process.env.BASE_REF || 'origin/master'; -const MAX_PAGES = 50; - -if (!/^origin\/[a-zA-Z0-9._/-]+$/.test(BASE_REF)) { - console.error(`Invalid BASE_REF: ${BASE_REF}`); - process.exit(1); -} - -const changed = getChangedContentFiles(BASE_REF); -const htmlPaths = mapContentToPublic(changed, 'public'); - -const urls = Array.from(htmlPaths) - .sort() - .map((p) => '/' + p.replace(/^public\//, '').replace(/\/index\.html$/, '/')) - .slice(0, MAX_PAGES); - -appendFileSync(GITHUB_OUTPUT, `urls=${JSON.stringify(urls)}\n`); -appendFileSync(GITHUB_OUTPUT, `url-count=${urls.length}\n`); - -console.log(`Detected ${urls.length} preview URLs`); diff --git a/.github/scripts/workflow-utils.js b/.github/scripts/workflow-utils.js deleted file mode 100644 index 38f49695a3..0000000000 --- a/.github/scripts/workflow-utils.js +++ /dev/null @@ -1,104 +0,0 @@ -/** - * Workflow Utilities - * - * Canonical import for GitHub Actions workflow scripts. Re-exports shared - * utilities from scripts/lib/ and adds workflow-specific helpers. - * - * Usage from github-script inline steps: - * - * const utils = await import(`${process.cwd()}/.github/scripts/workflow-utils.js`); - * const pathToLabel = await utils.getProductLabelMap(); - * const labels = utils.matchFilesToLabels(changedFiles, pathToLabel); - * - * Usage from .github/scripts/ ESM modules: - * - * import { getProductLabelMap, findPagesReferencingSharedContent } from './workflow-utils.js'; - */ - -import { readFileSync } from 'fs'; -import { findPagesReferencingSharedContent } from '../../scripts/lib/content-utils.js'; - -// --- Re-export content utilities --- -export { - findPagesReferencingSharedContent, - expandSharedContentChanges, - getChangedContentFiles, - mapContentToPublic, - categorizeContentFiles, - getSourceFromFrontmatter, -} from '../../scripts/lib/content-utils.js'; - -/** - * Build a Map of content path prefixes to product label names - * by reading data/products.yml. - * - * Requires `js-yaml` to be installed (e.g., `npm install js-yaml`). - * - * @param {string} [productsPath='data/products.yml'] - Path to products.yml - * @returns {Promise>} Map of "content/{path}/" → "product:{label_group}" - */ -export async function getProductLabelMap(productsPath = 'data/products.yml') { - const { load } = await import('js-yaml'); - const products = load(readFileSync(productsPath, 'utf8')); - const pathToLabel = new Map(); - - for (const product of Object.values(products)) { - const cp = product.content_path; - const lg = product.label_group; - if (!cp || !lg) continue; - - if (typeof cp === 'string' && typeof lg === 'string') { - pathToLabel.set(`content/${cp}/`, `product:${lg}`); - } else if (typeof cp === 'object' && typeof lg === 'object') { - for (const version of Object.keys(cp)) { - if (lg[version]) { - pathToLabel.set(`content/${cp[version]}/`, `product:${lg[version]}`); - } - } - } - } - - return pathToLabel; -} - -/** - * Match a list of file paths against the product label map. - * For shared content files, expands to find affected products. - * - * @param {string[]} files - Changed file paths - * @param {Map} pathToLabel - From getProductLabelMap() - * @returns {Set} Set of label names to apply - */ -export function matchFilesToLabels(files, pathToLabel) { - const labels = new Set(); - - for (const file of files) { - if (file.startsWith('content/shared/')) { - labels.add('product:shared'); - - try { - const referencingPages = findPagesReferencingSharedContent(file); - for (const page of referencingPages) { - for (const [prefix, label] of pathToLabel) { - if (page.startsWith(prefix)) { - labels.add(label); - break; - } - } - } - } catch { - // Shared content expansion failed — product:shared still applied - } - continue; - } - - for (const [prefix, label] of pathToLabel) { - if (file.startsWith(prefix)) { - labels.add(label); - break; - } - } - } - - return labels; -} diff --git a/.github/templates/review-comment.md b/.github/templates/review-comment.md deleted file mode 100644 index 790ff29128..0000000000 --- a/.github/templates/review-comment.md +++ /dev/null @@ -1,98 +0,0 @@ -# Review Comment Format - -Shared definitions for severity levels, comment structure, and result → label -mapping. Used by doc-review-agent.md (local review sessions) and -copilot-visual-review.md (rendered page review). - -## Severity Levels - -### BLOCKING - -Issues that will cause incorrect rendering, broken pages, or misleading -content. These must be fixed before merge. - -Examples: -- Missing required frontmatter (`title`, `description`) -- Unclosed or malformed shortcode tags -- Wrong product name in content (e.g., "InfluxDB 3" in v2 docs) -- Broken `source:` path for shared content -- h1 heading in content body -- Raw shortcode syntax visible on rendered page (`{{<` or `{{%`) -- 404 errors on preview pages -- Wrong product name in header or breadcrumbs - -### WARNING - -Style issues or minor visual problems that should be fixed but don't break -functionality or correctness. - -Examples: -- Missing semantic line feeds (multiple sentences on one line) -- Heading level skipped (h2 → h4) -- Long option not used in CLI examples (`-o` instead of `--output`) -- Missing `weight` in frontmatter -- Minor layout issues (overlapping text, collapsed sections) -- Missing images -- Placeholder text visible (`TODO`, `FIXME`) - -### INFO - -Suggestions and observations. Not problems. - -Examples: -- Opportunity to use a shared content file -- Unusually long page that could be split -- Code block missing language identifier -- Cosmetic improvements - -## Comment Structure - -Post a single review comment on the PR with this structure: - -```markdown -## Doc Review Summary - -**Result:** APPROVED | CHANGES REQUESTED | NEEDS HUMAN REVIEW - -### Issues Found - -#### BLOCKING - -- **file:line** — Description of the issue - - Suggested fix: ... - -#### WARNING - -- **file:line** — Description of the issue - -#### INFO - -- **file:line** — Observation - -### Files Reviewed - -- `path/to/file.md` — Brief summary of changes -``` - -Adapt the "Files Reviewed" section to the review context: -- **Source review:** list file paths from the diff -- **Visual review (Copilot):** list preview URLs instead of file paths - -## Result Rules - -- Zero BLOCKING issues → **APPROVED** -- Any BLOCKING issues → **CHANGES REQUESTED** -- Cannot determine severity or diff is ambiguous → **NEEDS HUMAN REVIEW** -- Only report issues you are confident about. Do not guess. -- Group issues by file when multiple issues exist in the same file. - -## Result → Label Mapping - -| Result | Label | -|--------|-------| -| APPROVED | `review:approved` | -| CHANGES REQUESTED | `review:changes-requested` | -| NEEDS HUMAN REVIEW | `review:needs-human` | - -Labels are mutually exclusive. Apply manually after review — Copilot code -review uses GitHub's native "Comment" review type and does not manage labels. diff --git a/.github/workflows/auto-label.yml b/.github/workflows/auto-label.yml deleted file mode 100644 index 7dfccb43c2..0000000000 --- a/.github/workflows/auto-label.yml +++ /dev/null @@ -1,122 +0,0 @@ -name: Auto-label PRs - -on: - pull_request: - types: [opened, synchronize] - workflow_dispatch: - inputs: - pr_number: - description: 'PR number to label' - required: true - type: number - -permissions: {} - -concurrency: - group: auto-label-${{ github.event.number || inputs.pr_number }} - cancel-in-progress: true - -jobs: - auto-label: - runs-on: ubuntu-latest - permissions: - contents: read - pull-requests: write - # Skip draft PRs and fork PRs (workflow_dispatch always runs) - if: | - github.event_name == 'workflow_dispatch' || - (!github.event.pull_request.draft && - github.event.pull_request.head.repo.full_name == github.repository) - steps: - - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4.3.1 - with: - persist-credentials: false - sparse-checkout: | - content - data/products.yml - scripts/lib/content-utils.js - .github/scripts/workflow-utils.js - package.json - sparse-checkout-cone-mode: false - - - uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4.4.0 - with: - node-version: 22 - - - name: Install js-yaml - run: npm install --no-save --ignore-scripts --no-package-lock --legacy-peer-deps js-yaml - - - name: Apply product labels - uses: actions/github-script@f28e40c7f34bde8b3046d885e986cb6290c5673b # v7.1.0 - with: - script: | - const { - getProductLabelMap, - matchFilesToLabels, - } = await import( - `${process.cwd()}/.github/scripts/workflow-utils.js` - ); - - const prNumber = - context.issue.number || - Number('${{ inputs.pr_number }}'); - - if (!prNumber) { - core.setFailed('No PR number available'); - return; - } - - // --- Build path-to-label mapping from products.yml --- - const pathToLabel = await getProductLabelMap(); - core.info( - `Loaded ${pathToLabel.size} path-to-label mappings from products.yml` - ); - - // --- Get changed files from the PR (paginated) --- - const files = await github.paginate( - github.rest.pulls.listFiles, - { - owner: context.repo.owner, - repo: context.repo.repo, - pull_number: prNumber, - per_page: 100, - } - ); - - const changedFiles = files.map(f => f.filename); - core.info(`PR has ${changedFiles.length} changed files`); - - // --- Match files to product labels --- - const labelsToAdd = matchFilesToLabels(changedFiles, pathToLabel); - - if (labelsToAdd.size === 0) { - core.info('No product labels to add'); - return; - } - - // --- Get existing PR labels to avoid duplicates --- - const { data: prData } = await github.rest.issues.get({ - owner: context.repo.owner, - repo: context.repo.repo, - issue_number: prNumber, - }); - - const existingLabels = new Set(prData.labels.map(l => l.name)); - const newLabels = [...labelsToAdd].filter( - l => !existingLabels.has(l) - ); - - if (newLabels.length === 0) { - core.info('All matching labels already present'); - return; - } - - // --- Apply labels --- - await github.rest.issues.addLabels({ - owner: context.repo.owner, - repo: context.repo.repo, - issue_number: prNumber, - labels: newLabels, - }); - - core.info(`Added labels: ${newLabels.join(', ')}`); diff --git a/.github/workflows/doc-review.yml b/.github/workflows/doc-review.yml deleted file mode 100644 index b6f79cffaa..0000000000 --- a/.github/workflows/doc-review.yml +++ /dev/null @@ -1,280 +0,0 @@ -name: Doc Review - -on: - pull_request: - types: [opened, synchronize, ready_for_review] - paths: - - 'content/**' - - 'layouts/**' - - 'assets/**' - - 'data/**' - workflow_dispatch: - inputs: - pr_number: - description: 'PR number to review' - required: true - type: number - -permissions: {} - -concurrency: - group: doc-review-${{ github.event.number || inputs.pr_number }} - cancel-in-progress: true - -jobs: - # ----------------------------------------------------------------- - # Job 1: Resolve preview URLs from changed content files - # ----------------------------------------------------------------- - resolve-urls: - runs-on: ubuntu-latest - permissions: - contents: read - pull-requests: read - if: | - github.event_name == 'workflow_dispatch' || - (!github.event.pull_request.draft && - github.event.pull_request.head.repo.full_name == github.repository && - !contains(github.event.pull_request.labels.*.name, 'skip-review')) - outputs: - urls: ${{ steps.detect.outputs.urls }} - url-count: ${{ steps.detect.outputs.url-count }} - steps: - - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4.3.1 - with: - persist-credentials: false - fetch-depth: 0 - sparse-checkout: | - content - data/products.yml - scripts/lib/content-utils.js - .github/scripts/resolve-review-urls.js - package.json - sparse-checkout-cone-mode: false - - - uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4.4.0 - with: - node-version: 22 - - - name: Resolve base ref - id: base - env: - GH_TOKEN: ${{ github.token }} - PR_NUMBER: ${{ github.event.pull_request.number || inputs.pr_number }} - run: | - if [ -n "${{ github.base_ref }}" ]; then - echo "ref=origin/${{ github.base_ref }}" >> "$GITHUB_OUTPUT" - else - BASE=$(gh pr view "$PR_NUMBER" --repo "${{ github.repository }}" --json baseRefName -q .baseRefName) - git fetch origin "$BASE" - echo "ref=origin/$BASE" >> "$GITHUB_OUTPUT" - fi - - - name: Detect changed pages - id: detect - env: - BASE_REF: ${{ steps.base.outputs.ref }} - run: node .github/scripts/resolve-review-urls.js - - # ----------------------------------------------------------------- - # Job 2: Copilot code review (runs in parallel with Job 1) - # ----------------------------------------------------------------- - copilot-review: - runs-on: ubuntu-latest - permissions: - pull-requests: write - if: | - github.event_name == 'workflow_dispatch' || - (!github.event.pull_request.draft && - github.event.pull_request.head.repo.full_name == github.repository && - !contains(github.event.pull_request.labels.*.name, 'skip-review')) - steps: - - name: Request Copilot review - uses: actions/github-script@f28e40c7f34bde8b3046d885e986cb6290c5673b # v7.1.0 - env: - PR_NUMBER: ${{ github.event.pull_request.number || inputs.pr_number }} - with: - script: | - const prNumber = context.issue.number || Number(process.env.PR_NUMBER); - try { - await github.rest.pulls.requestReviewers({ - owner: context.repo.owner, - repo: context.repo.repo, - pull_number: prNumber, - reviewers: ['copilot-pull-request-reviewer'], - }); - core.info('Copilot code review requested successfully'); - } catch (error) { - core.warning(`Could not request Copilot review: ${error.message}`); - core.warning( - 'To enable automatic Copilot reviews, configure a repository ruleset: ' + - 'Settings → Rules → Rulesets → "Automatically request Copilot code review"' - ); - } - - # ----------------------------------------------------------------- - # Job 3: Copilot visual review (depends on Job 1 for URLs) - # ----------------------------------------------------------------- - copilot-visual-review: - runs-on: ubuntu-latest - permissions: - contents: read - pull-requests: write - needs: resolve-urls - if: needs.resolve-urls.result == 'success' && fromJson(needs.resolve-urls.outputs.url-count) > 0 - steps: - - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4.3.1 - with: - persist-credentials: false - sparse-checkout: .github/prompts/copilot-visual-review.md - sparse-checkout-cone-mode: false - - - name: Wait for preview deployment - id: wait - env: - PR_NUMBER: ${{ github.event.pull_request.number || inputs.pr_number }} - run: | - PREVIEW_URL="https://influxdata.github.io/docs-v2/pr-preview/pr-${PR_NUMBER}/" - TIMEOUT=600 # 10 minutes - INTERVAL=15 - ELAPSED=0 - - echo "Waiting for preview at ${PREVIEW_URL}" - - while [ "$ELAPSED" -lt "$TIMEOUT" ]; do - STATUS=$(curl -s -o /dev/null -L -w "%{http_code}" "$PREVIEW_URL" || echo "000") - if [ "$STATUS" = "200" ]; then - echo "Preview is live" - echo "available=true" >> "$GITHUB_OUTPUT" - exit 0 - fi - echo "Status: ${STATUS} (${ELAPSED}s / ${TIMEOUT}s)" - sleep "$INTERVAL" - ELAPSED=$((ELAPSED + INTERVAL)) - done - - echo "Preview deployment timed out after ${TIMEOUT}s" - echo "available=false" >> "$GITHUB_OUTPUT" - - - name: Post visual review request - if: steps.wait.outputs.available == 'true' - uses: actions/github-script@f28e40c7f34bde8b3046d885e986cb6290c5673b # v7.1.0 - env: - PREVIEW_URLS: ${{ needs.resolve-urls.outputs.urls }} - PR_NUMBER: ${{ github.event.pull_request.number || inputs.pr_number }} - with: - script: | - const fs = require('fs'); - - let urls; - try { - urls = JSON.parse(process.env.PREVIEW_URLS); - } catch (e) { - core.warning(`Failed to parse PREVIEW_URLS: ${e.message}`); - return; - } - - const prNumber = context.issue.number || Number(process.env.PR_NUMBER); - const previewBase = `https://influxdata.github.io/docs-v2/pr-preview/pr-${prNumber}`; - - // Build preview URL list - const urlList = urls - .map(u => `- [${u}](${previewBase}${u})`) - .join('\n'); - - // Read the Copilot visual review template - const template = fs.readFileSync( - '.github/prompts/copilot-visual-review.md', - 'utf8' - ); - - const marker = ''; - const body = [ - marker, - '## Preview Pages for Review', - '', - `${urls.length} page(s) changed in this PR:`, - '', - '
', - 'Preview URLs', - '', - urlList, - '', - '
', - '', - '---', - '', - `@github-copilot please review the preview pages listed above using the template below:`, - '', - template.trim(), - '', - urlList, - ].join('\n'); - - // Update existing comment or create new one - const { data: comments } = await github.rest.issues.listComments({ - owner: context.repo.owner, - repo: context.repo.repo, - issue_number: prNumber, - }); - const existing = comments.find(c => c.body.includes(marker)); - - if (existing) { - await github.rest.issues.updateComment({ - owner: context.repo.owner, - repo: context.repo.repo, - comment_id: existing.id, - body, - }); - } else { - await github.rest.issues.createComment({ - owner: context.repo.owner, - repo: context.repo.repo, - issue_number: prNumber, - body, - }); - } - - core.info(`Posted visual review request with ${urls.length} URLs`); - - - name: Post timeout notice - if: steps.wait.outputs.available == 'false' - uses: actions/github-script@f28e40c7f34bde8b3046d885e986cb6290c5673b # v7.1.0 - env: - PR_NUMBER: ${{ github.event.pull_request.number || inputs.pr_number }} - with: - script: | - const prNumber = context.issue.number || Number(process.env.PR_NUMBER); - const marker = ''; - const body = [ - marker, - '## Visual Review Skipped', - '', - 'The PR preview deployment did not become available within 10 minutes.', - 'Visual review was skipped. The Copilot code review (Job 2) still ran.', - '', - 'To trigger visual review manually, re-run this workflow after the', - 'preview is deployed.', - ].join('\n'); - - const { data: comments } = await github.rest.issues.listComments({ - owner: context.repo.owner, - repo: context.repo.repo, - issue_number: prNumber, - }); - const existing = comments.find(c => c.body.includes(marker)); - - if (existing) { - await github.rest.issues.updateComment({ - owner: context.repo.owner, - repo: context.repo.repo, - comment_id: existing.id, - body, - }); - } else { - await github.rest.issues.createComment({ - owner: context.repo.owner, - repo: context.repo.repo, - issue_number: prNumber, - body, - }); - } diff --git a/.github/workflows/pr-preview.yml b/.github/workflows/pr-preview.yml index 7b4c1c090b..7a0dc8469f 100644 --- a/.github/workflows/pr-preview.yml +++ b/.github/workflows/pr-preview.yml @@ -143,8 +143,6 @@ jobs: - name: Deploy preview if: steps.detect.outputs.pages-to-deploy != '[]' - id: deploy-preview - continue-on-error: true uses: rossjrw/pr-preview-action@v1.4.8 with: source-dir: ./preview-staging @@ -152,27 +150,8 @@ jobs: umbrella-dir: pr-preview action: deploy - - name: Validate preview deployment - if: steps.detect.outputs.pages-to-deploy != '[]' - id: validate-deploy - run: | - DEPLOY_OUTCOME="${{ steps.deploy-preview.outcome }}" - DEPLOY_URL="${{ steps.deploy-preview.outputs.deployment-url }}" - - if [ -z "$DEPLOY_URL" ]; then - echo "Deployment step did not produce a preview URL. Failing preview job." - exit 1 - fi - - if [ "$DEPLOY_OUTCOME" != "success" ]; then - echo "Deployment reported outcome: $DEPLOY_OUTCOME" - echo "Preview URL exists; treating as transient post-deploy comment error." - fi - - echo "status=ok" >> "$GITHUB_OUTPUT" - - name: Post success comment - if: steps.detect.outputs.pages-to-deploy != '[]' && steps.validate-deploy.outputs.status == 'ok' + if: steps.detect.outputs.pages-to-deploy != '[]' uses: actions/github-script@v7 with: script: | diff --git a/.github/workflows/sync-plugins.yml b/.github/workflows/sync-plugins.yml index d840f42475..50c9ecb4ec 100644 --- a/.github/workflows/sync-plugins.yml +++ b/.github/workflows/sync-plugins.yml @@ -25,7 +25,7 @@ jobs: # Only run on issues with sync-plugin-docs label or manual dispatch if: | github.event_name == 'workflow_dispatch' || - (github.event_name == 'issues' && contains(github.event.issue.labels.*.name, 'source:sync')) + (github.event_name == 'issues' && contains(github.event.issue.labels.*.name, 'sync-plugin-docs')) steps: - name: Parse issue inputs @@ -170,7 +170,7 @@ jobs: repo: context.repo.repo, issue_number: parseInt(issueNumber), state: 'closed', - labels: ['source:sync', 'validation-failed'] + labels: ['sync-plugin-docs', 'validation-failed'] }); } @@ -418,7 +418,7 @@ jobs: repo: context.repo.repo, issue_number: ${{ steps.inputs.outputs.issue_number }}, state: 'closed', - labels: ['source:sync', 'completed'] + labels: ['sync-plugin-docs', 'completed'] }); - name: Report failure diff --git a/.gitignore b/.gitignore index b134cbfb1f..f4b8e0f042 100644 --- a/.gitignore +++ b/.gitignore @@ -12,8 +12,22 @@ package-lock.json # Content generation /content/influxdb*/**/api/**/*.html +/content/influxdb*/**/api/**/*.md !api-docs/**/.config.yml /api-docs/redoc-static.html* + +# API documentation generation (generated by api-docs/scripts/) +/content/influxdb/*/api/** +/content/influxdb3/*/api/** +/content/influxdb3/*/reference/api/** +/content/enterprise_influxdb/*/api/** +/static/openapi +/data/article_data + +# Exception: hand-crafted API conceptual pages (not generated) +!/content/influxdb3/*/api/administration/ +!/content/influxdb3/*/api/administration/_index.md + /helper-scripts/output/* /telegraf-build !telegraf-build/templates @@ -38,6 +52,8 @@ tmp # TypeScript build output **/dist/ +# Exception: include compiled API doc scripts for easier use +!api-docs/scripts/dist/ **/dist-lambda/ # User context files for AI assistant tools diff --git a/.mcp.json b/.mcp.json index f538540906..f600dfa672 100644 --- a/.mcp.json +++ b/.mcp.json @@ -1,33 +1,20 @@ { "$schema": "https://raw.githubusercontent.com/modelcontextprotocol/modelcontextprotocol/refs/heads/main/schema/2025-06-18/schema.json", - "description": "InfluxData documentation assistance via MCP servers", + "description": "InfluxData documentation assistance via MCP server - Node.js execution", "mcpServers": { - "influxdb-docs": { - "comment": "Hosted InfluxDB documentation search. Uses API key auth (set INFLUXDATA_DOCS_KAPA_API_KEY env var). Get your key from the Kapa dashboard. Rate limits: 60 req/min.", - "type": "sse", - "url": "https://influxdb-docs.mcp.kapa.ai", - "headers": { - "Authorization": "Bearer ${INFLUXDATA_DOCS_KAPA_API_KEY}" - } - }, - "influxdb-docs-oauth": { - "comment": "Hosted InfluxDB documentation search (OAuth). No API key needed--authenticates via Google OAuth on first use. Rate limits: 40 req/hr, 200 req/day.", - "type": "sse", - "url": "https://influxdb-docs.mcp.kapa.ai" - }, "influxdata": { - "comment": "Local Docs MCP server (optional). To install and setup, see https://github.com/influxdata/docs-mcp-server. NOTE: uses deprecated endpoints--pending update.", + "comment": "Use Node to run Docs MCP. To install and setup, see https://github.com/influxdata/docs-mcp-server", "type": "stdio", "command": "node", "args": [ "${DOCS_MCP_SERVER_PATH}/dist/index.js" ], "env": { - "INFLUXDATA_DOCS_API_KEY_FILE": "${INFLUXDATA_DOCS_API_KEY_FILE:-$HOME/.env.docs-kapa-api-key}", - "INFLUXDATA_DOCS_MODE": "external-only", - "INFLUXDATA_DOCS_LOG_LEVEL": "${INFLUXDATA_DOCS_LOG_LEVEL:-info}", + "DOCS_API_KEY_FILE": "${DOCS_API_KEY_FILE:-$HOME/.env.docs-kapa-api-key}", + "DOCS_MODE": "external-only", + "MCP_LOG_LEVEL": "${MCP_LOG_LEVEL:-info}", "NODE_ENV": "${NODE_ENV:-production}" } } } -} +} \ No newline at end of file diff --git a/AGENTS.md b/AGENTS.md index bd27e9efaf..9fc79161bb 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -1,22 +1,36 @@ # InfluxData Documentation (docs-v2) -> **Shared project guidelines for all AI assistants** +> **For general AI assistants (Claude, ChatGPT, Gemini, etc.)** +> +> This guide provides comprehensive instructions for AI assistants helping with the InfluxData documentation repository. It focuses on content creation, writing workflows, and style guidelines. > > **Other instruction resources**: -> - [.github/copilot-instructions.md](.github/copilot-instructions.md) - GitHub Copilot (CLI tools, workflows, repo structure) -> - [CLAUDE.md](CLAUDE.md) - Claude with MCP (pointer file) +> +> - [.github/copilot-instructions.md](.github/copilot-instructions.md) - For GitHub Copilot (focused on coding and automation) +> - [CLAUDE.md](CLAUDE.md) - For Claude with MCP (minimal pointer) > - [.claude/](.claude/) - Claude MCP configuration (commands, agents, skills) > - [.github/instructions/](.github/instructions/) - File pattern-specific instructions -## Commands +## Project Overview + +This repository powers [docs.influxdata.com](https://docs.influxdata.com), a Hugo-based static documentation site covering InfluxDB 3, InfluxDB v2/v1, Telegraf, and related products. + +**Key Characteristics:** -| Task | Command | Notes | -|------|---------|-------| -| Install | `CYPRESS_INSTALL_BINARY=0 yarn install` | ~4s | -| Build | `npx hugo --quiet` | ~75s — **NEVER CANCEL** | -| Dev server | `npx hugo server` | ~92s, port 1313 | -| Test code blocks | `yarn test:codeblocks:all` | 15-45m — **NEVER CANCEL** | -| Lint | `yarn lint` | ~1m | +- **Scale**: 5,359+ pages +- **Build time**: \~75 seconds (NEVER cancel Hugo builds) +- **Tech stack**: Hugo, Node.js, Docker, Vale, Pytest, Cypress +- **Test time**: 15-45 minutes for full code block tests + +## Quick Commands + +| Task | Command | Time | +| -------------------- | --------------------------------------- | ------ | +| Install dependencies | `CYPRESS_INSTALL_BINARY=0 yarn install` | \~4s | +| Build site | `npx hugo --quiet` | \~75s | +| Dev server | `npx hugo server` | \~92s | +| Test code blocks | `yarn test:codeblocks:all` | 15-45m | +| Lint | `yarn lint` | \~1m | ## Repository Structure @@ -31,7 +45,7 @@ docs-v2/ │ └── example.md # Shortcode testing playground ├── layouts/ # Hugo templates and shortcodes ├── assets/ # JS, CSS, TypeScript -├── api-docs/ # InfluxDB OpenAPI specifications, API reference documentation generation scripts +├── api-docs/ # OpenAPI specifications ├── data/ # YAML/JSON data files ├── public/ # Build output (gitignored, ~529MB) └── .github/ @@ -40,18 +54,19 @@ docs-v2/ **Content Paths**: See [copilot-instructions.md](.github/copilot-instructions.md#content-organization) -## Documentation MCP Server +## Common Workflows -A hosted MCP server provides semantic search over all InfluxDB documentation. -Use it to verify technical accuracy, check API syntax, and find related docs. +### Editing a page in your browser -See the [InfluxDB documentation MCP server guide](https://docs.influxdata.com/influxdb3/core/admin/mcp-server/) for setup instructions. +1. Navigate to the desired page on [docs.influxdata.com](https://docs.influxdata.com) +2. Click the "Edit this page" link at the bottom +3. Make changes in the GitHub web editor +4. Commit changes via a pull request -## Common Workflows - -### Creating/Editing Content +### Creating/Editing Content Manually **Frontmatter** (page metadata): + ```yaml title: Page Title # Required - becomes h1 description: Brief desc # Required - for SEO @@ -63,15 +78,18 @@ weight: 1 # Required - sort order ``` **Shared Content** (avoid duplication): + ```yaml source: /shared/path/to/content.md ``` Shared content files (`/shared/path/to/content.md`): + - Don't store frontmatter - Can use `{{% show-in %}}`, `{{% hide-in %}}`, and the `version` keyword (`/influxdb3/version/content.md`) **Common Shortcodes**: + - Callouts: `> [!Note]`, `> [!Warning]`, `> [!Important]`, `> [!Tip]` - Tabs: `{{< tabs-wrapper >}}` + `{{% tabs %}}` + `{{% tab-content %}}` - Required: `{{< req >}}` or `{{< req type="key" >}}` @@ -82,6 +100,7 @@ Shared content files (`/shared/path/to/content.md`): ### Testing Changes **Always test before committing**: + ```bash # Verify server renders (check 200 status) curl -s -o /dev/null -w "%{http_code}" http://localhost:1313/influxdb3/core/ @@ -95,83 +114,140 @@ yarn test:links content/influxdb3/core/**/*.md **📖 Complete Reference**: [DOCS-TESTING.md](DOCS-TESTING.md) +### Committing Changes + +**Commit Message Format**: + +``` +type(scope): description + +Examples: +- fix(enterprise): correct Docker environment variable +- feat(influxdb3): add new plugin documentation +- docs(core): update configuration examples +``` + +**Types**: `fix`, `feat`, `style`, `refactor`, `test`, `chore` + +**Scopes**: `enterprise`, `influxdb3`, `core`, `cloud`, `telegraf`, etc. + +**Pre-commit hooks** run automatically (Vale, Prettier, tests). Skip with: + +```bash +git commit -m "message" --no-verify +``` + +**📖 Complete Reference**: [DOCS-CONTRIBUTING.md](DOCS-CONTRIBUTING.md#commit-guidelines) + +## Key Patterns + +### Content Organization + +- **Product versions**: Managed in `/data/products.yml` +- **Semantic line feeds**: One sentence per line for better diffs +- **Heading hierarchy**: Use h2-h6 only (h1 auto-generated from frontmatter) +- **Image naming**: `project/version-context-description.png` + +### Code Examples + +**Testable code blocks** (pytest): + +```python +print("Hello, world!") +``` + + + +``` +Hello, world! +``` + +**Language identifiers**: Use `python` not `py`, `bash` not `sh` (for pytest collection) + +### API Documentation + +- **Location**: `/api-docs/` directory +- **Format**: OpenAPI 3.0 YAML +- **Generation**: Uses Redoc + custom processing +- **📖 Workflow**: [api-docs/README.md](api-docs/README.md) + +### JavaScript/TypeScript + +- **Entry point**: `assets/js/main.js` +- **Pattern**: Component-based with `data-component` attributes +- **Debugging**: Source maps or debug helpers available +- **📖 Details**: [DOCS-CONTRIBUTING.md](DOCS-CONTRIBUTING.md#javascript-in-the-documentation-ui) + +## Important Constraints + +### Performance + +- **NEVER cancel Hugo builds** - they take \~75s normally +- **NEVER cancel test runs** - code block tests take 15-45 minutes +- **Set timeouts**: Hugo (180s+), tests (30+ minutes) + +### Style Guidelines + +- Use Google Developer Documentation style +- Active voice, present tense, second person for instructions +- No emojis unless explicitly requested +- Use long options in CLI examples (`--option` vs `-o`) +- Format code blocks within 80 characters + +### Network Restrictions + +Some operations may fail in restricted environments: + +- Docker builds requiring external repos +- `docker compose up local-dev` (Alpine packages) +- Cypress installation (use `CYPRESS_INSTALL_BINARY=0`) + +## Documentation References + +| Document | Purpose | +| ------------------------------------------------------------------ | ------------------------------------------------ | +| [DOCS-CONTRIBUTING.md](DOCS-CONTRIBUTING.md) | Contribution workflow, style guidelines | +| [DOCS-TESTING.md](DOCS-TESTING.md) | Testing procedures (code blocks, links, linting) | +| [DOCS-SHORTCODES.md](DOCS-SHORTCODES.md) | Complete shortcode reference | +| [DOCS-FRONTMATTER.md](DOCS-FRONTMATTER.md) | Complete frontmatter field reference | +| [.github/copilot-instructions.md](.github/copilot-instructions.md) | Primary AI assistant instructions | +| [api-docs/README.md](api-docs/README.md) | API documentation workflow | +| [content/example.md](content/example.md) | Live shortcode examples for testing | + +## Specialized Topics + +### Working with Specific Products + +| Product | Content Path | Special Notes | +| ------------------------ | ----------------------------------------------------------------------------- | --------------------------------------- | +| InfluxDB 3 Core | `/content/influxdb3/core/` | Latest architecture | +| InfluxDB 3 Enterprise | `/content/influxdb3/enterprise/` | Core + licensed features, clustered | +| InfluxDB Cloud Dedicated | `/content/influxdb3/cloud-dedicated/`, `/content/influxdb3/cloud-serverless/` | Managed and distributed | +| InfluxDB Clustered | `/content/influxdb3/clustered/` | Self-managed and distributed | +| InfluxDB Cloud | `/content/influxdb/cloud/` | Legacy but active | +| InfluxDB v2 | `/content/influxdb/v2/` | Legacy but active | +| InfluxDB Enterprise v1 | `/content/enterprise_influxdb/v1/` | Legacy but active enterprise, clustered | + +### Advanced Tasks + +- **Vale configuration**: `.ci/vale/styles/` for custom rules +- **Link checking**: Uses custom `link-checker` binary +- **Docker testing**: `compose.yaml` defines test services +- **Lefthook**: Git hooks configuration in `lefthook.yml` + +## Troubleshooting + +| Issue | Solution | +| ------------------------ | ------------------------------------------------------ | +| Pytest collected 0 items | Use `python` not `py` for code block language | +| Hugo build errors | Check `/config/_default/` configuration | +| Link validation slow | Test specific files: `yarn test:links content/file.md` | +| Vale errors | Check `.ci/vale/styles/config/vocabularies` | + +## Critical Reminders -## Constraints - -- **NEVER cancel** Hugo builds (~75s) or test runs (15-45m) — the site has 5,359+ pages -- Set timeouts: Hugo 180s+, tests 30m+ -- Use `python` not `py` for code block language identifiers (pytest won't collect `py` blocks) -- Shared content files (`content/shared/`) have no frontmatter — the consuming page provides it -- Product names and versions come from `data/products.yml` (single source of truth) -- Commit format: `type(scope): description` — see [DOCS-CONTRIBUTING.md](DOCS-CONTRIBUTING.md#commit-guidelines) -- Network-restricted environments: Cypress (`CYPRESS_INSTALL_BINARY=0`), Docker builds, and Alpine packages may fail - -## Style Rules - -Follows [Google Developer Documentation Style Guide](https://developers.google.com/style) with these project-specific additions: - -- **Semantic line feeds** — one sentence per line (better diffs) -- **No h1 in content** — `title` frontmatter auto-generates h1 -- Active voice, present tense, second person -- Long options in CLI examples (`--output` not `-o`) -- Code blocks within 80 characters - -## Content Structure - -**Required frontmatter**: `title`, `description`, `menu`, `weight` -— see [DOCS-FRONTMATTER.md](DOCS-FRONTMATTER.md) - -**Shared content**: `source: /shared/path/to/content.md` -— shared files use `{{% show-in %}}` / `{{% hide-in %}}` for product-specific content - -**Shortcodes**: Callouts use `> [!Note]` / `> [!Warning]` syntax -— see [DOCS-SHORTCODES.md](DOCS-SHORTCODES.md) and [content/example.md](content/example.md) - -## Product Content Paths - -Canonical paths from `data/products.yml`: - -| Product | Content Path | -|---------|-------------| -| InfluxDB 3 Core | `content/influxdb3/core/` | -| InfluxDB 3 Enterprise | `content/influxdb3/enterprise/` | -| InfluxDB 3 Explorer | `content/influxdb3/explorer/` | -| InfluxDB Cloud Serverless | `content/influxdb3/cloud-serverless/` | -| InfluxDB Cloud Dedicated | `content/influxdb3/cloud-dedicated/` | -| InfluxDB Clustered | `content/influxdb3/clustered/` | -| InfluxDB OSS v2 | `content/influxdb/v2/` | -| InfluxDB OSS v1 | `content/influxdb/v1/` | -| InfluxDB Cloud (TSM) | `content/influxdb/cloud/` | -| InfluxDB Enterprise v1 | `content/enterprise_influxdb/` | -| Telegraf | `content/telegraf/` | -| Chronograf | `content/chronograf/` | -| Kapacitor | `content/kapacitor/` | -| Flux | `content/flux/` | -| Shared content | `content/shared/` | - -## Doc Review Pipeline - -Automated PR review for documentation changes. -See [.github/LABEL_GUIDE.md](.github/LABEL_GUIDE.md) for the label taxonomy. - -| Resource | Path | -|----------|------| -| Label guide | [.github/LABEL_GUIDE.md](.github/LABEL_GUIDE.md) | -| Triage agent | [.claude/agents/doc-triage-agent.md](.claude/agents/doc-triage-agent.md) | -| Content review instructions | [.github/instructions/content-review.instructions.md](.github/instructions/content-review.instructions.md) | -| Review agent (local) | [.claude/agents/doc-review-agent.md](.claude/agents/doc-review-agent.md) | -| Auto-label workflow | [.github/workflows/auto-label.yml](.github/workflows/auto-label.yml) | -| Doc review workflow | [.github/workflows/doc-review.yml](.github/workflows/doc-review.yml) | - -## Reference - -| Document | Purpose | -|----------|---------| -| [DOCS-CONTRIBUTING.md](DOCS-CONTRIBUTING.md) | Style guidelines, commit format, contribution workflow | -| [DOCS-TESTING.md](DOCS-TESTING.md) | Code block testing, link validation, Vale linting | -| [DOCS-SHORTCODES.md](DOCS-SHORTCODES.md) | Complete shortcode reference | -| [DOCS-FRONTMATTER.md](DOCS-FRONTMATTER.md) | Complete frontmatter field reference | -| [api-docs/README.md](api-docs/README.md) | API documentation workflow | -| [content/example.md](content/example.md) | Live shortcode examples | -| [.github/copilot-instructions.md](.github/copilot-instructions.md) | CLI tools, repo structure, workflows | -| [.github/LABEL_GUIDE.md](.github/LABEL_GUIDE.md) | Label taxonomy and review pipeline | +1. **Be a critical thinking partner** - Challenge assumptions, identify issues +2. **Test before committing** - Run relevant tests locally +3. **Reference, don't duplicate** - Link to detailed docs instead of copying +4. **Respect build times** - Don't cancel long-running operations +5. **Follow conventions** - Use established patterns for consistency diff --git a/CLAUDE.md b/CLAUDE.md index dc8350d2b0..fe99fa453a 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -6,23 +6,12 @@ > > **Full instruction resources**: > - [.github/copilot-instructions.md](.github/copilot-instructions.md) - For GitHub Copilot (technical setup, automation) -> - [AGENTS.md](AGENTS.md) - Shared project guidelines (style, constraints, content structure) -> - [.github/LABEL_GUIDE.md](.github/LABEL_GUIDE.md) - Label taxonomy and pipeline usage +> - [AGENTS.md](AGENTS.md) - For general AI assistants (content creation, workflows, style guidelines) > - [.claude/](.claude/) - Claude MCP configuration directory with: > - Custom commands in `.claude/commands/` > - Specialized agents in `.claude/agents/` > - Custom skills in `.claude/skills/` -## Documentation MCP server - -This repo includes [`.mcp.json`](.mcp.json) with a hosted InfluxDB documentation search server. -Use it to verify technical accuracy, check API syntax, and find related docs. - -- **`influxdb-docs`** — API key auth. Set `INFLUXDATA_DOCS_KAPA_API_KEY` env var before launching Claude Code. -- **`influxdb-docs-oauth`** — OAuth fallback. No setup needed. - -See [content-editing skill](.claude/skills/content-editing/SKILL.md#part-4-fact-checking-with-the-documentation-mcp-server) for usage details. - ## Purpose and scope Claude should help document InfluxData products by creating clear, accurate technical content with proper code examples, frontmatter, and formatting. diff --git a/DOCS-CONTRIBUTING.md b/DOCS-CONTRIBUTING.md index 7f09715533..5fcd6db1af 100644 --- a/DOCS-CONTRIBUTING.md +++ b/DOCS-CONTRIBUTING.md @@ -1,6 +1,7 @@ # Contributing to InfluxData Documentation + ## Quick Start Ready to contribute? @@ -14,7 +15,7 @@ Ready to contribute? For detailed setup and reference information, see the sections below. ---- +*** ## Legal & Getting Started @@ -27,18 +28,19 @@ What constitutes a "substantial" change is at the discretion of InfluxData docum [Sign the InfluxData CLA](https://www.influxdata.com/legal/cla/) -_**Note:** Typo and broken link fixes are greatly appreciated and do not require signing the CLA._ +***Note:** Typo and broken link fixes are greatly appreciated and do not require signing the CLA.* -_If you're new to contributing or you're looking for an easy update, see [`docs-v2` good-first-issues](https://github.com/influxdata/docs-v2/issues?q=is%3Aissue+is%3Aopen+label%3Agood-first-issue)._ +*If you're new to contributing or you're looking for an easy update, see [`docs-v2` good-first-issues](https://github.com/influxdata/docs-v2/issues?q=is%3Aissue+is%3Aopen+label%3Agood-first-issue).* ### Fork and clone InfluxData Documentation Repository [Fork this repository](https://help.github.com/articles/fork-a-repo/) and [clone it](https://help.github.com/articles/cloning-a-repository/) to your local machine. ---- +*** + ## Development Environment Setup ### Prerequisites @@ -76,9 +78,9 @@ dev dependencies used in pre-commit hooks for linting, syntax-checking, and test Dev dependencies include: - [Lefthook](https://github.com/evilmartians/lefthook): configures and -manages git pre-commit and pre-push hooks for linting and testing Markdown content. + manages git pre-commit and pre-push hooks for linting and testing Markdown content. - [prettier](https://prettier.io/docs/en/): formats code, including Markdown, according to style rules for consistency -- [Cypress]: e2e testing for UI elements and URLs in content +- \[Cypress]: e2e testing for UI elements and URLs in content ### Install Vale (style linting) @@ -114,11 +116,11 @@ docs-v2 contains a `./.vscode/settings.json` that configures the following exten - Vale: shows linter errors and suggestions in the editor. - YAML Schemas: validates frontmatter attributes. ---- +*** -## Making Changes +## Making Changes ### Style Guidelines @@ -130,7 +132,7 @@ Content follows Google Developer Documentation Style Guide and YouTube API docum Most docs-v2 documentation content uses [Markdown](https://en.wikipedia.org/wiki/Markdown). -_Some parts of the documentation, such as `./api-docs`, contain Markdown within YAML and rely on additional tooling._ +*Some parts of the documentation, such as `./api-docs`, contain Markdown within YAML and rely on additional tooling.* #### Semantic line feeds @@ -270,6 +272,7 @@ Documentation audit tools should: 4. Support both single-line and multi-line exclusion lists + ### Common Shortcodes Reference #### Callouts (notes and warnings) @@ -327,7 +330,7 @@ For the complete shortcodes reference with all available shortcodes and usage ex Test shortcodes with working examples in **[content/example.md](content/example.md)**. ---- +*** ### InfluxDB API documentation @@ -338,11 +341,10 @@ InfluxDB API documentation when documentation is deployed. For more information about editing and generating InfluxDB API documentation, see the [API Documentation README](https://github.com/influxdata/docs-v2/tree/master/api-docs#readme). ---- +*** ## Testing & Quality Assurance - Pre-commit hooks run automatically when you commit changes, testing your staged files with Vale, Prettier, Cypress, and Pytest. To skip hooks if needed: ```sh @@ -364,13 +366,14 @@ yarn test:links content/influxdb3/core/**/*.md For comprehensive testing information, including code block testing, link validation, style linting, and advanced testing procedures, see **[DOCS-TESTING.md](DOCS-TESTING.md)**. - ---- +*** + ## Submission Process + ### Commit Guidelines When creating commits, follow these guidelines: @@ -383,6 +386,7 @@ When creating commits, follow these guidelines: - For multiple issues, use comma separation: `closes influxdata/DAR#517, closes influxdata/DAR#518` **Examples:** + ``` fix(enterprise): correct Docker environment variable name for license email fix(influxdb3): correct Docker environment variable and compose examples for monolith @@ -394,7 +398,7 @@ chore(ci): update Vale configuration Push your changes up to your forked repository, then [create a new pull request](https://help.github.com/articles/creating-a-pull-request/). ---- +*** ## Reference Documentation @@ -404,6 +408,7 @@ For detailed reference documentation, see: - **[DOCS-SHORTCODES.md](DOCS-SHORTCODES.md)** - Complete shortcodes reference with usage examples for all available shortcodes + ### Advanced Configuration #### Vale style linting configuration @@ -434,6 +439,7 @@ To add accepted/rejected terms for specific products, configure a style for the To learn more about configuration and rules, see [Vale configuration](https://vale.sh/docs/topics/config). + #### JavaScript in the documentation UI The InfluxData documentation UI uses TypeScript and JavaScript with ES6+ syntax and @@ -450,13 +456,14 @@ If you're adding UI functionality that requires JavaScript, follow these steps: ```html
- ``` + ``` 2. Following the component pattern, create a single-purpose JavaScript module (`assets/js/components/my-component.js`) that exports a single function that receives the component element and initializes it. + 3. In `assets/js/main.js`, import the module and register the component to ensure - the component is initialized on page load. + the component is initialized on page load. ##### Debugging JavaScript @@ -470,7 +477,7 @@ To debug JavaScript code used in the InfluxData documentation UI, choose one of 1. In VS Code, select Run > Start Debugging. 2. Select the "Debug Docs (source maps)" configuration. 3. Click the play button to start the debugger. -5. Set breakpoints in the JavaScript source files--files in the +4. Set breakpoints in the JavaScript source files--files in the `assets/js/ns-hugo-imp:` namespace-- in the VS Code editor or in the Chrome Developer Tools Sources panel: @@ -484,8 +491,9 @@ To debug JavaScript code used in the InfluxData documentation UI, choose one of 1. In your JavaScript module, import debug helpers from `assets/js/utils/debug-helpers.js`. These helpers provide breakpoints and console logging as a workaround or alternative for using source maps and the Chrome DevTools debugger. + 2. Insert debug statements by calling the helper functions in your code--for example: - + ```js import { debugLog, debugBreak, debugInspect } from './utils/debug-helpers.js'; @@ -512,4 +520,4 @@ Your system uses the configuration in `launch.json` to launch the site in Chrome and attach the debugger to the Developer Tools console. Make sure to remove the debug statements before merging your changes. -The debug helpers are designed to be used in development and should not be used in production. \ No newline at end of file +The debug helpers are designed to be used in development and should not be used in production. diff --git a/DOCS-TESTING.md b/DOCS-TESTING.md index 17506fc045..95bf9ba94f 100644 --- a/DOCS-TESTING.md +++ b/DOCS-TESTING.md @@ -11,13 +11,13 @@ This guide covers all testing procedures for the InfluxData documentation, inclu ## Test Types Overview -| Test Type | Purpose | Command | -| ----------------------- | ----------------------------------- | ---------------------------- | -| **Code blocks** | Validate shell/Python code examples | `yarn test:codeblocks:all` | -| **Link validation** | Check internal/external links | `yarn test:links` | -| **Style linting** | Enforce writing standards | `.ci/vale/vale.sh` | -| **Markdown generation** | Generate LLM-friendly Markdown | `yarn build:md` | -| **E2E tests** | UI and functionality testing | `yarn test:e2e` | +| Test Type | Purpose | Command | +| ----------------------- | ----------------------------------- | -------------------------- | +| **Code blocks** | Validate shell/Python code examples | `yarn test:codeblocks:all` | +| **Link validation** | Check internal/external links | `yarn test:links` | +| **Style linting** | Enforce writing standards | `.ci/vale/vale.sh` | +| **Markdown generation** | Generate LLM-friendly Markdown | `yarn build:md` | +| **E2E tests** | UI and functionality testing | `yarn test:e2e` | ## Code Block Testing diff --git a/PLAN.md b/PLAN.md new file mode 100644 index 0000000000..fca5b5330d --- /dev/null +++ b/PLAN.md @@ -0,0 +1,58 @@ +--- +branch: feat-api-uplift +repo: docs-v2 +created: 2025-12-02T15:28:32Z +status: in-progress +--- + +# feat-api-uplift + +## Overview + +Replace the current API reference documentation implementation (RapiDoc web components) with Hugo-native templates. + +## Phase 1: Core Infrastructure (completed) + +### Build process + +- `yarn build:api` parses OpenAPI specs into Hugo data +- Generates Hugo pages with frontmatter for Algolia search integration +- Static JSON chunks for faster page loads + +### OpenAPI tag cleanup + +- Removed unused tags from OpenAPI specs +- Updated tags to be consistent and descriptive + +### Hugo-native POC + +- Implemented Hugo-native templates in `layouts/partials/api/hugo-native/` +- Tested with InfluxDB 3 Core product + +## Phase 2: Migration to Hugo-Native (in progress) + +**Plan**: @docs/plans/2026-02-13-hugo-native-api-migration.md + +### Task Order + +1. ✅ **Promote Hugo-native templates** - Move from POC to production +2. ✅ **Remove RapiDoc templates** - Delete templates and partials +3. ✅ **Remove RapiDoc JavaScript** - Delete components +4. ✅ **Remove operation pages** - Delete individual operation page generation +5. ✅ **Update Cypress tests** - Simplify tests for static HTML +6. ✅ **Clean up styles** - Remove RapiDoc CSS and dead auth modal code +7. ✅ **Fix generation script cleanup** - Added `--clean` (default) and `--dry-run` flags +8. ✅ **Add inline code samples** - curl examples and Ask AI links per operation +9. ✅ **Refine API styling** - Theme-aware code blocks, font normalization, layout width, TOC border +10. **Apply Cache Data tag split** - Enterprise spec update (planned) +11. **Migrate remaining products** - Apply to all InfluxDB products (planned) + +## Related Files + +- Branch: `feat-api-uplift` +- Plan: `plans/2026-02-13-hugo-native-api-migration.md` + +## Notes + +- Use Chrome devtools and Cypress to debug +- No individual operation pages - operations accessed only via tag pages diff --git a/PLATFORM_REFERENCE.md b/PLATFORM_REFERENCE.md index c4e5f5d676..374eb6be94 100644 --- a/PLATFORM_REFERENCE.md +++ b/PLATFORM_REFERENCE.md @@ -1,65 +1,79 @@ + Use the following information to help determine which InfluxDB version and product the user is asking about: InfluxDB OSS v2: - - Documentation: https://docs.influxdata.com/influxdb/v2/ - - Query languages: InfluxQL and Flux - - Clients: Telegraf, influx CLI, v1/v2 client libraries + +- Documentation: +- Query languages: InfluxQL and Flux +- Clients: Telegraf, influx CLI, v1/v2 client libraries InfluxDB OSS v1: - - Documentation: https://docs.influxdata.com/influxdb/v1/ - - Query languages: InfluxQL and Flux - - Clients: Telegraf, influx CLI, v1/v2 client libraries + +- Documentation: +- Query languages: InfluxQL and Flux +- Clients: Telegraf, influx CLI, v1/v2 client libraries InfluxDB Enterprise v1: - - Documentation: https://docs.influxdata.com/enterprise_influxdb/v1.12/ - - Query languages: InfluxQL and Flux - - Clients: Telegraf, influx CLI, v1/v2 client libraries + +- Documentation: +- Query languages: InfluxQL and Flux +- Clients: Telegraf, influx CLI, v1/v2 client libraries InfluxDB Cloud (TSM): - - Documentation: https://docs.influxdata.com/influxdb/cloud/ - - Query languages: InfluxQL and Flux - - Clients: Telegraf, influx CLI, v2 client libraries + +- Documentation: +- Query languages: InfluxQL and Flux +- Clients: Telegraf, influx CLI, v2 client libraries InfluxDB Cloud Serverless: - - Documentation: https://docs.influxdata.com/influxdb3/cloud-serverless/ - - Query languages: SQL and InfluxQL and Flux - - Clients: Telegraf, influxctl CLI, v3 client libraries + +- Documentation: +- Query languages: SQL and InfluxQL and Flux +- Clients: Telegraf, influxctl CLI, v3 client libraries InfluxDB Cloud Dedicated: - - Documentation: https://docs.influxdata.com/influxdb3/cloud-dedicated/ - - Query languages: SQL and InfluxQL - - Clients: Telegraf, influxctl CLI, v3 client libraries + +- Documentation: +- Query languages: SQL and InfluxQL +- Clients: Telegraf, influxctl CLI, v3 client libraries InfluxDB Clustered: - - Documentation: https://docs.influxdata.com/influxdb3/clustered/ - - Query languages: SQL and InfluxQL - - Clients: Telegraf, influxctl CLI, v3 client libraries + +- Documentation: +- Query languages: SQL and InfluxQL +- Clients: Telegraf, influxctl CLI, v3 client libraries InfluxDB 3 Core: - - Documentation: https://docs.influxdata.com/influxdb3/core/ - - Query languages: SQL and InfluxQL - - Clients: Telegraf, influxdb3 CLI, v3 client libraries, InfluxDB 3 Explorer + +- Documentation: +- Query languages: SQL and InfluxQL +- Clients: Telegraf, influxdb3 CLI, v3 client libraries, InfluxDB 3 Explorer InfluxDB 3 Enterprise: - - Documentation: https://docs.influxdata.com/influxdb3/enterprise/ - - Query languages: SQL and InfluxQL - - Clients: Telegraf, influxdb3 CLI, v3 client libraries, InfluxDB 3 Explorer + +- Documentation: +- Query languages: SQL and InfluxQL +- Clients: Telegraf, influxdb3 CLI, v3 client libraries, InfluxDB 3 Explorer InfluxDB 3 Explorer: - - Documentation: https://docs.influxdata.com/influxdb3/explorer/ + +- Documentation: Telegraf: - - Documentation: https://docs.influxdata.com/telegraf/v1.37/ + +- Documentation: Chronograf: - - Documentation: https://docs.influxdata.com/chronograf/v1.11/ + +- Documentation: Kapacitor: - - Documentation: https://docs.influxdata.com/kapacitor/v1.8/ + +- Documentation: Flux: - - Documentation: https://docs.influxdata.com/flux/v0.x/ +- Documentation: diff --git a/SPELL-CHECK.md b/SPELL-CHECK.md index 4ef0eeb6dd..f66336fce1 100644 --- a/SPELL-CHECK.md +++ b/SPELL-CHECK.md @@ -11,14 +11,14 @@ The docs-v2 repository uses **two complementary spell-checking tools**: ## Tool Comparison -| Feature | Vale | Codespell | -|---------|------|-----------| -| **Purpose** | Document spell checking | Code comment spell checking | -| **Integration** | Pre-commit hooks (Docker) | CI/CD pipeline | -| **False Positives** | Low (comprehensive filters) | Low (clear dictionary only) | -| **Customization** | YAML rules | INI config + dictionary lists | -| **Performance** | Moderate | Fast | -| **True Positive Detection** | Document-level | Code-level | +| Feature | Vale | Codespell | +| --------------------------- | --------------------------- | ----------------------------- | +| **Purpose** | Document spell checking | Code comment spell checking | +| **Integration** | Pre-commit hooks (Docker) | CI/CD pipeline | +| **False Positives** | Low (comprehensive filters) | Low (clear dictionary only) | +| **Customization** | YAML rules | INI config + dictionary lists | +| **Performance** | Moderate | Fast | +| **True Positive Detection** | Document-level | Code-level | ## Vale Configuration @@ -51,17 +51,20 @@ Unlike other documentation style checkers, this configuration **intentionally in ### Filter Patterns Explained -#### 1. camelCase and snake_case Identifiers +#### 1. camelCase and snake\_case Identifiers + ```regex (?:_*[a-z]+(?:[A-Z][a-z0-9]*)+(?:[A-Z][a-zA-Z0-9]*)*|[a-z_][a-z0-9]*_[a-z0-9_]*) ``` + **Why**: Prevents false positives on variable/method names while NOT matching normal prose **Breakdown**: + - **camelCase**: `_*[a-z]+(?:[A-Z][a-z0-9]*)+(?:[A-Z][a-zA-Z0-9]*)*` - Requires at least one uppercase letter (distinguishes `myVariable` from `provide`) - Allows leading underscores for private variables (`_privateVar`, `__dunder__`) -- **snake_case**: `[a-z_][a-z0-9]*_[a-z0-9_]*` +- **snake\_case**: `[a-z_][a-z0-9]*_[a-z0-9_]*` - Requires at least one underscore - Distinguishes `my_variable` from normal words @@ -69,49 +72,61 @@ Unlike other documentation style checkers, this configuration **intentionally in **Examples NOT Ignored** (caught by spell-checker): `provide`, `database`, `variable` (normal prose) -#### 2. UPPER_CASE Constants +#### 2. UPPER\_CASE Constants + ```regex [A-Z_][A-Z0-9_]+ ``` + **Why**: Prevents false positives on environment variables and constants **Examples Ignored**: `API_KEY`, `AWS_REGION`, `INFLUXDB_TOKEN` **Note**: Matches AWS, API (even single uppercase acronyms) - acceptable in docs #### 3. Version Numbers + ```regex \d+\.\d+(?:\.\d+)* ``` + **Why**: Version numbers aren't words **Examples Ignored**: `1.0`, `2.3.1`, `0.101.0`, `1.2.3.4`, `v1.2.3` **Note**: Handles any number of version parts (2-part, 3-part, 4-part, etc.) #### 4. Hexadecimal Values + ```regex 0[xX][0-9a-fA-F]+ ``` + **Why**: Hex values appear in code and aren't dictionary words **Examples Ignored**: `0xFF`, `0xDEADBEEF`, `0x1A` #### 5. URLs and Paths + ```regex /[a-zA-Z0-9/_\-\.\{\}]+ # Paths: /api/v2/write https?://[^\s\)\]>"]+ # Full URLs: https://docs.example.com ``` + **Why**: URLs contain hyphens, slashes, and special chars **Examples Ignored**: `/api/v2/write`, `/kapacitor/v1/`, `https://docs.influxdata.com` #### 6. Shortcode Attributes + ```regex (?:endpoint|method|url|href|src|path)="[^"]+" ``` + **Why**: Hugo shortcode attribute values often contain hyphens and special chars **Examples Ignored**: `endpoint="https://..."`, `method="POST"` **Future Enhancement**: Add more attributes as needed (name, value, data, etc.) #### 7. Code Punctuation + ```regex [@#$%^&*()_+=\[\]{};:,.<>?/\\|-]+ ``` + **Why**: Symbols and special characters aren't words **Examples Ignored**: `()`, `{}`, `[]`, `->`, `=>`, `|`, etc. @@ -134,15 +149,15 @@ To add a word that should be ignored, edit the appropriate file. - `clear` - Unambiguous spelling errors only - Examples: "recieve" → "receive", "occured" → "occurred" - - False positive rate: ~1% + - False positive rate: \~1% - `rare` - Includes uncommon but valid English words - Would flag legitimate technical terms - - False positive rate: ~15-20% + - False positive rate: \~15-20% - `code` - Includes code-specific words - Too aggressive for documentation - - False positive rate: ~25-30% + - False positive rate: \~25-30% #### Skip Directories @@ -167,6 +182,7 @@ ignore-words-list = aks,invokable - **`invokable`** - InfluxData product branding term (scriptable tasks/queries) **To add more**: + 1. Edit `.codespellrc` 2. Add word to `ignore-words-list` (comma-separated) 3. Add inline comment explaining why @@ -178,6 +194,7 @@ ignore-words-list = aks,invokable Vale automatically runs on files you commit via Lefthook. **Manual check**: + ```bash # Check all content docker compose run -T vale content/**/*.md @@ -233,7 +250,8 @@ echo "recieve the data" | codespell **Problem**: Vale flags a word that should be valid **Solutions**: -1. Check if it's a code identifier (camelCase, UPPER_CASE, hex, version) + +1. Check if it's a code identifier (camelCase, UPPER\_CASE, hex, version) 2. Add to `InfluxDataDocs/Terms/ignore.txt` if it's a technical term 3. Add filter pattern to `.ci/vale/styles/InfluxDataDocs/Spelling.yml` if it's a pattern @@ -242,6 +260,7 @@ echo "recieve the data" | codespell **Problem**: Codespell flags a legitimate term **Solutions**: + 1. Add to `ignore-words-list` in `.codespellrc` 2. Add skip directory if entire directory should be excluded 3. Use `-i 3` (interactive mode) to review before accepting @@ -251,6 +270,7 @@ echo "recieve the data" | codespell **Problem**: A real typo isn't caught **Solutions**: + 1. Verify it's actually a typo (not a branding term or intentional) 2. Check if it's in excluded scope (tables, URLs, code identifiers) 3. Report as GitHub issue for tool improvement diff --git a/api-docs/README.md b/api-docs/README.md index 3e59d120cf..3c35387d15 100755 --- a/api-docs/README.md +++ b/api-docs/README.md @@ -48,6 +48,7 @@ ``` 3. To generate the HTML files for local testing, follow the instructions to [generate API docs locally](#generate-api-docs-locally). + 4. To commit your updated spec files, push your branch to `influxdata/docs-v2`, and create a PR against the `master` branch. ## Update API docs for an InfluxDB OSS release @@ -106,8 +107,8 @@ # Copy the old version directory to a directory for the new version: cp -r v2.2 v2.3 ``` - -8. In your editor, update custom content files in NEW_VERSION/content. + +8. In your editor, update custom content files in NEW\_VERSION/content. 9. Enter the following commands into your terminal to fetch and process the contracts: @@ -117,6 +118,7 @@ ``` 10. To generate the HTML files for local testing, follow the instructions to [generate API docs locally](#generate-api-docs-locally). + 11. To commit your updated spec files, push your branch to `influxdata/docs-v2`, and create a PR against the `master` branch. ## Update API docs for OSS spec changes between releases @@ -142,6 +144,8 @@ Follow these steps to update OSS API docs between version releases--for example, git cherry-pick [COMMIT_SHAs] git push -f origin docs-release/influxdb-oss + ``` + 4. Go into your `docs-v2` directory and create a branch for your changes--for example: ```sh @@ -165,6 +169,7 @@ Follow these steps to update OSS API docs between version releases--for example, ``` 7. To generate the HTML files for local testing, follow the instructions to [generate API docs locally](#generate-api-docs-locally). + 8. To commit your updated spec files, push your branch to `influxdata/docs-v2`, and create a PR against the `master` branch. ## Generate InfluxDB API docs @@ -197,7 +202,7 @@ The script uses `npx` to download and execute the Redocly CLI. If `npx` returns errors, [download](https://nodejs.org/en/) and run a recent version of the Node.js installer for your OS. -2. To generate API docs for _all_ InfluxDB versions in `./openapi`, enter the following command into your terminal: +2. To generate API docs for *all* InfluxDB versions in `./openapi`, enter the following command into your terminal: ```sh sh generate-api-docs.sh @@ -239,9 +244,9 @@ We regenerate API reference docs from `influxdata/openapi` ### InfluxDB OSS v2 version - Given that - `influxdata/openapi` **master** may contain OSS spec changes not implemented - in the current OSS release, we (Docs team) maintain a release branch, `influxdata/openapi` +Given that +`influxdata/openapi` **master** may contain OSS spec changes not implemented +in the current OSS release, we (Docs team) maintain a release branch, `influxdata/openapi` **docs-release/influxdb-oss**, used to generate OSS reference docs. ### How to find the API spec used by an InfluxDB OSS version @@ -249,7 +254,7 @@ We regenerate API reference docs from `influxdata/openapi` `influxdata/openapi` does not version the InfluxData API. To find the `influxdata/openapi` commit SHA used in a specific version of InfluxDB OSS, see `/scripts/fetch-swagger.sh` in `influxdata/influxdb`--for example, -for the `influxdata/openapi` commit used in OSS v2.2.0, see https://github.com/influxdata/influxdb/blob/v2.2.0/scripts/fetch-swagger.sh#L13=. +for the `influxdata/openapi` commit used in OSS v2.2.0, see . For convenience, we tag `influxdata/influxdb` (OSS) release points in `influxdata/openapi` as `influxdb-oss-v[OSS_VERSION]`. See . @@ -281,16 +286,17 @@ To add new YAML files for other nodes in the contracts, follow these steps: `@redocly/cli` also provides some [built-in decorators](https://redocly.com/docs/cli/decorators/) that you can configure in `.redocly` without having to write JavaScript. + ### How to add tag content or describe a group of paths In API reference docs, we use OpenAPI `tags` elements for navigation, the `x-traitTag` vendor extension for providing custom content, and the `x-tagGroups` vendor extension for grouping tags in navigation. -| Example | OpenAPI field | | -|:-------------------------------------------------------------------------------------------------------|-------------------------------------------------------|--------------------------------------------| -| [Add supplementary documentation](https://docs.influxdata.com/influxdb/cloud/api/#tag/Quick-start) | `tags: [ { name: 'Quick start', x-traitTag: true } ]` | [Source](https://github.com/influxdata/openapi/master/src/cloud/tags.yml) | -| Group tags in navigation | `x-tagGroups: [ { name: 'All endpoints', tags: [...], ...} ]` | [Source](https://github.com/influxdata/docs-v2/blob/da6c2e467de7212fc2197dfe0b87f0f0296688ee/api-docs/cloud-iox/content/tag-groups.yml)) | +| Example | OpenAPI field | | +| :------------------------------------------------------------------------------------------------- | ------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------- | +| [Add supplementary documentation](https://docs.influxdata.com/influxdb/cloud/api/#tag/Quick-start) | `tags: [ { name: 'Quick start', x-traitTag: true } ]` | [Source](https://github.com/influxdata/openapi/master/src/cloud/tags.yml) | +| Group tags in navigation | `x-tagGroups: [ { name: 'All endpoints', tags: [...], ...} ]` | [Source](https://github.com/influxdata/docs-v2/blob/da6c2e467de7212fc2197dfe0b87f0f0296688ee/api-docs/cloud-iox/content/tag-groups.yml)) | #### Add and update x-tagGroups @@ -302,6 +308,47 @@ those tags. If you assign an empty array(`[]`) to the `All endpoints` x-tagGroup in `PLATFORM/content/tag-groups.yml`, the decorator replaces the empty array with the list of tags from all Operations in the spec. +## Documentation links in OpenAPI specs + +Use the `/influxdb/version/` placeholder when including InfluxDB links in OpenAPI spec description and summary fields. +The build process automatically transforms these placeholders to product-specific paths based on the spec file location. + +### Writing links + +```yaml +# In api-docs/influxdb3/core/openapi/ref.yml +info: + description: | + See [authentication](/influxdb/version/api/authentication/) for details. + Related: [tokens](/influxdb/version/admin/tokens/) +``` + +After build, these become: + +- `/influxdb3/core/api/authentication/` +- `/influxdb3/core/admin/tokens/` + +### How it works + +The product path is derived from the spec file location: + +- `api-docs/influxdb3/core/...` → `/influxdb3/core` +- `api-docs/influxdb3/enterprise/...` → `/influxdb3/enterprise` +- `api-docs/influxdb/v2/...` → `/influxdb/v2` + +Only `description` and `summary` fields are transformed. +Explicit cross-product links (e.g., `/telegraf/v1/plugins/`) remain unchanged. + +### Link validation + +Run with the `--validate-links` flag to check for broken links: + +```bash +yarn build:api-docs --validate-links +``` + +This validates that transformed links point to existing Hugo content files and warns about any broken links. + ## How to test your spec or API reference changes You can use `getswagger.sh` to fetch contracts from any URL. diff --git a/api-docs/enterprise_influxdb/v1/.config.yml b/api-docs/enterprise_influxdb/v1/.config.yml new file mode 100644 index 0000000000..021084937a --- /dev/null +++ b/api-docs/enterprise_influxdb/v1/.config.yml @@ -0,0 +1,10 @@ +plugins: + - '../../openapi/plugins/docs-plugin.cjs' +extends: + - recommended + - docs/all +x-influxdata-product-name: InfluxDB Enterprise v1 + +apis: + v1@1: + root: v1/ref.yml diff --git a/api-docs/enterprise_influxdb/v1/v1/content/info.yml b/api-docs/enterprise_influxdb/v1/v1/content/info.yml new file mode 100644 index 0000000000..470121c386 --- /dev/null +++ b/api-docs/enterprise_influxdb/v1/v1/content/info.yml @@ -0,0 +1,31 @@ +title: InfluxDB Enterprise v1 HTTP API +x-influxdata-short-title: InfluxDB Enterprise v1 API +x-influxdata-short-description: >- + The InfluxDB Enterprise v1 HTTP API provides a programmatic interface for writing, + querying, and managing InfluxDB Enterprise v1 clusters. +version: 1.11.6 +description: | + The InfluxDB Enterprise v1 HTTP API provides a simple way to interact with the database. + It uses HTTP response codes, authentication with username and password credentials + or API tokens, and JSON-formatted response data. + + ## Cluster Features + + InfluxDB Enterprise includes additional parameters for cluster operations: + - **Write Consistency**: Control write consistency across cluster nodes + + ## Authentication + + InfluxDB Enterprise v1 supports multiple authentication methods: + + - **Basic Authentication**: Use HTTP Basic Auth with username and password + - **Query String Authentication**: Pass `u` (username) and `p` (password) as query parameters + - **Token Authentication** (v2-compatible): Use `Authorization: Token username:password` header + + Authentication is optional unless [enabled in the configuration](/enterprise_influxdb/v1/administration/authentication_and_authorization/). +license: + name: Proprietary + url: https://www.influxdata.com/legal/slsa/ +contact: + name: InfluxData + url: https://www.influxdata.com diff --git a/api-docs/enterprise_influxdb/v1/v1/content/servers.yml b/api-docs/enterprise_influxdb/v1/v1/content/servers.yml new file mode 100644 index 0000000000..56066093ba --- /dev/null +++ b/api-docs/enterprise_influxdb/v1/v1/content/servers.yml @@ -0,0 +1,2 @@ +- url: http://localhost:8086 + description: Local InfluxDB Enterprise data node diff --git a/api-docs/enterprise_influxdb/v1/v1/ref.yml b/api-docs/enterprise_influxdb/v1/v1/ref.yml new file mode 100644 index 0000000000..becac77bee --- /dev/null +++ b/api-docs/enterprise_influxdb/v1/v1/ref.yml @@ -0,0 +1,1108 @@ +openapi: 3.0.0 +info: + title: InfluxDB Enterprise v1 HTTP API + version: 1.11.6 + description: | + The InfluxDB Enterprise v1 HTTP API provides a simple way to interact with the database. + It uses HTTP response codes, authentication with username and password credentials + or API tokens, and JSON-formatted response data. + + ## Cluster Features + + InfluxDB Enterprise includes additional parameters for cluster operations: + - **Write Consistency**: Control write consistency across cluster nodes + + ## Authentication + + InfluxDB Enterprise v1 supports multiple authentication methods: + + - **Basic Authentication**: Use HTTP Basic Auth with username and password + - **Query String Authentication**: Pass `u` (username) and `p` (password) as query parameters + - **Token Authentication** (v2-compatible): Use `Authorization: Token username:password` header + + Authentication is optional unless [enabled in the configuration](/enterprise_influxdb/v1/administration/authentication_and_authorization/). + contact: + name: InfluxData + url: https://www.influxdata.com + license: + name: Proprietary + url: https://www.influxdata.com/legal/slsa/ +servers: + - url: http://localhost:8086 + description: Local InfluxDB Enterprise data node +security: + - BasicAuth: [] + - QueryAuth: [] +tags: + - name: System Information + description: | + Endpoints for checking server status, health, and version information. + - name: Query + description: | + Query data using InfluxQL. The `/query` endpoint supports both read queries + (SELECT, SHOW) and write queries (CREATE, DROP, ALTER, etc.). + - name: Write + description: | + Write time series data using InfluxDB line protocol. + + **Enterprise Feature**: Use the `consistency` parameter to control write consistency + across cluster nodes. + - name: Debug + description: | + Debugging and profiling endpoints for troubleshooting and performance analysis. + - name: v2 Compatibility + description: | + InfluxDB 2.x API compatibility endpoints. These endpoints allow you to use + InfluxDB 2.x client libraries with InfluxDB Enterprise 1.8+. + + Use the `Token` scheme with v1.x credentials: + ``` + Authorization: Token username:password + ``` + - name: Authentication + x-traitTag: true + description: | + InfluxDB Enterprise v1 supports multiple authentication methods: + + ### Basic Authentication + ```bash + curl -u username:password http://localhost:8086/query?q=SHOW+DATABASES + ``` + + ### Query String Authentication + ```bash + curl "http://localhost:8086/query?u=username&p=password&q=SHOW+DATABASES" + ``` + + ### Token Authentication (v2-compatible) + For v2-compatible endpoints, use the Token scheme: + ```bash + curl -H "Authorization: Token username:password" http://localhost:8086/api/v2/query + ``` +paths: + /ping: + get: + operationId: GetPing + summary: Check server status + description: | + Check the status of your InfluxDB Enterprise instance and retrieve version information. + The `/ping` endpoint returns a `204 No Content` response by default. + + Use the `verbose=true` query parameter to return a `200 OK` response, + which is required for [Google Cloud Load Balancing](https://cloud.google.com/load-balancing/docs/health-check-concepts) health checks. + tags: + - System Information + parameters: + - name: verbose + in: query + description: | + When `true`, returns HTTP 200 instead of 204. Required for Google Cloud Load Balancing health checks. + schema: + type: boolean + default: false + responses: + '200': + description: Server is running (verbose mode) + headers: + X-Influxdb-Build: + description: InfluxDB build type (`ENT` for Enterprise) + schema: + type: string + enum: + - ENT + X-Influxdb-Version: + description: InfluxDB version + schema: + type: string + '204': + description: Server is running + headers: + X-Influxdb-Build: + description: InfluxDB build type (`ENT` for Enterprise) + schema: + type: string + enum: + - ENT + X-Influxdb-Version: + description: InfluxDB version + schema: + type: string + head: + operationId: HeadPing + summary: Check server status (HEAD) + description: | + Check the status of your InfluxDB Enterprise instance using a HEAD request. + Returns the same headers as GET but without a response body. + tags: + - System Information + responses: + '204': + description: Server is running + headers: + X-Influxdb-Build: + description: InfluxDB build type (`ENT` for Enterprise) + schema: + type: string + enum: + - ENT + X-Influxdb-Version: + description: InfluxDB version + schema: + type: string + /health: + get: + operationId: GetHealth + summary: Check server health + description: | + Check the health of your InfluxDB Enterprise instance. + Returns health status as JSON with a `pass` or `fail` status. + tags: + - System Information + - v2 Compatibility + responses: + '200': + description: Server is healthy + content: + application/json: + schema: + $ref: '#/components/schemas/HealthCheck' + example: + name: influxdb + message: ready for queries and writes + status: pass + version: 1.11.6 + '503': + description: Server is unhealthy + content: + application/json: + schema: + $ref: '#/components/schemas/HealthCheck' + example: + name: influxdb + message: service unavailable + status: fail + version: 1.11.6 + /query: + get: + operationId: GetQuery + summary: Query data (GET) + description: | + Query data using InfluxQL. Use GET for read-only queries that start with: + - `SELECT` (except queries with `INTO` clause) + - `SHOW` + + For write operations (CREATE, DROP, ALTER, etc.), use POST. + tags: + - Query + parameters: + - $ref: '#/components/parameters/QueryDB' + - $ref: '#/components/parameters/QueryQ' + - $ref: '#/components/parameters/QueryEpoch' + - $ref: '#/components/parameters/QueryPretty' + - $ref: '#/components/parameters/QueryChunked' + - $ref: '#/components/parameters/AuthUsername' + - $ref: '#/components/parameters/AuthPassword' + responses: + '200': + description: Query executed successfully + content: + application/json: + schema: + $ref: '#/components/schemas/QueryResponse' + example: + results: + - statement_id: 0 + series: + - name: mymeas + columns: + - time + - myfield + - mytag1 + - mytag2 + values: + - - '2017-03-01T00:16:18Z' + - 33.1 + - null + - null + - - '2017-03-01T00:17:18Z' + - 12.4 + - '12' + - '14' + application/csv: + schema: + type: string + example: | + name,tags,time,myfield,mytag1,mytag2 + mymeas,,1488327378000000000,33.1,, + mymeas,,1488327438000000000,12.4,12,14 + '400': + description: Bad request (syntax error in query) + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + example: + error: 'error parsing query: found EOF, expected FROM at line 1, char 9' + '401': + description: Unauthorized + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + example: + error: authorization failed + post: + operationId: PostQuery + summary: Query data (POST) + description: | + Query data or execute database management commands using InfluxQL. + Use POST for queries that start with: + - `SELECT` with `INTO` clause + - `ALTER` + - `CREATE` + - `DELETE` + - `DROP` + - `GRANT` + - `KILL` + - `REVOKE` + tags: + - Query + parameters: + - $ref: '#/components/parameters/QueryDB' + - $ref: '#/components/parameters/QueryEpoch' + - $ref: '#/components/parameters/QueryPretty' + - $ref: '#/components/parameters/QueryChunked' + - $ref: '#/components/parameters/AuthUsername' + - $ref: '#/components/parameters/AuthPassword' + requestBody: + required: true + content: + application/x-www-form-urlencoded: + schema: + type: object + required: + - q + properties: + q: + type: string + description: InfluxQL query string + example: CREATE DATABASE mydb + params: + type: string + description: | + JSON object containing bind parameter values. + Use `$` syntax in the query to reference parameters. + example: '{"tag_value":"12","field_value":30}' + multipart/form-data: + schema: + type: object + properties: + q: + type: string + format: binary + description: File containing InfluxQL queries (separated by semicolons) + async: + type: boolean + description: Execute queries asynchronously + default: false + responses: + '200': + description: Query executed successfully + content: + application/json: + schema: + $ref: '#/components/schemas/QueryResponse' + '400': + description: Bad request + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + '401': + description: Unauthorized + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + /write: + post: + operationId: PostWrite + summary: Write data + description: | + Write time series data to InfluxDB Enterprise using line protocol format. + + Data must be sent as binary encoded line protocol in the request body. + Use the `--data-binary` flag with curl to preserve newlines. + + **Enterprise Feature**: Use the `consistency` parameter to control write + consistency across cluster nodes. + + **Best Practices:** + - Write points in batches of 5,000 to 10,000 for optimal performance + - Use the least precise timestamp precision possible for better compression + tags: + - Write + parameters: + - name: db + in: query + required: true + description: Target database for the write + schema: + type: string + example: mydb + - name: rp + in: query + description: | + Target retention policy. If not specified, writes to the default retention policy. + schema: + type: string + - name: precision + in: query + description: | + Timestamp precision. InfluxDB assumes nanoseconds if not specified. + schema: + type: string + enum: + - 'n' + - u + - ms + - s + - m + - h + default: 'n' + - name: consistency + in: query + description: | + **Enterprise only.** Sets the write consistency for the point. + See [write consistency documentation](/enterprise_influxdb/v1/concepts/clustering#write-consistency) + for detailed descriptions of each option. + schema: + type: string + enum: + - any + - one + - quorum + - all + default: one + - $ref: '#/components/parameters/AuthUsername' + - $ref: '#/components/parameters/AuthPassword' + requestBody: + required: true + description: | + Line protocol data. Multiple points should be separated by newlines. + Use `@filename` to write from a file. + content: + text/plain: + schema: + type: string + example: | + mymeas,mytag=1 myfield=90 1463683075000000000 + mymeas,mytag=2 myfield=34 1463683076000000000 + responses: + '204': + description: Write successful + '400': + description: Bad request (line protocol syntax error or type conflict) + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + examples: + badTimestamp: + summary: Invalid timestamp + value: + error: 'unable to parse ''mymeas,mytag=1 myfield=91 abc123'': bad timestamp' + typeConflict: + summary: Field type conflict + value: + error: 'field type conflict: input field "myfield" on measurement "mymeas" is type int64, already exists as type float' + '401': + description: Unauthorized + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + '404': + description: Database not found + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + example: + error: 'database not found: "mydb1"' + '413': + description: Request entity too large + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + example: + error: Request Entity Too Large + '500': + description: Internal server error (e.g., retention policy not found) + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + example: + error: 'retention policy not found: myrp' + /debug/pprof: + get: + operationId: GetDebugPprof + summary: Get profiling index + description: | + Returns an HTML page listing available Go pprof profiles. + Use the individual profile endpoints to retrieve specific profile data. + tags: + - Debug + responses: + '200': + description: HTML page with profile links + content: + text/html: + schema: + type: string + /debug/pprof/{profile}: + get: + operationId: GetDebugPprofProfile + summary: Get profile data + description: | + Retrieve a specific Go pprof profile. Available profiles: + - `block`: Stack traces that led to blocking on synchronization primitives + - `goroutine`: Stack traces of all current goroutines + - `heap`: Sampling of stack traces for heap allocations + - `mutex`: Stack traces of holders of contended mutexes + - `threadcreate`: Stack traces that led to creation of new OS threads + - `profile`: CPU profile (use `seconds` parameter to specify duration) + - `trace`: Execution trace (use `seconds` parameter to specify duration) + tags: + - Debug + parameters: + - name: profile + in: path + required: true + description: Profile name + schema: + type: string + enum: + - block + - goroutine + - heap + - mutex + - threadcreate + - profile + - trace + - allocs + - cmdline + - name: seconds + in: query + description: Duration in seconds for CPU profile or trace + schema: + type: integer + default: 30 + - name: debug + in: query + description: Return human-readable text output instead of binary + schema: + type: integer + enum: + - 0 + - 1 + default: 0 + responses: + '200': + description: Profile data + content: + application/octet-stream: + schema: + type: string + format: binary + text/plain: + schema: + type: string + /debug/pprof/all: + get: + operationId: GetDebugPprofAll + summary: Get all profiles archive + description: | + Generate a `profiles.tar.gz` archive containing all standard Go profiling + information and additional debugging data. Intended primarily for use by + InfluxData support. + + Use the `cpu` parameter to include a CPU profile of the specified duration. + tags: + - Debug + parameters: + - name: cpu + in: query + description: | + Duration for CPU profile. Specify as a duration string (e.g., `30s`). + schema: + type: string + example: 30s + responses: + '200': + description: Compressed archive containing all profiles + content: + application/gzip: + schema: + type: string + format: binary + /debug/requests: + get: + operationId: GetDebugRequests + summary: Track HTTP requests + description: | + Track HTTP client requests to the `/write` and `/query` endpoints. + Returns the number of writes and queries per username and IP address + over the specified time interval. + tags: + - Debug + parameters: + - name: seconds + in: query + description: Duration in seconds to collect request data + schema: + type: integer + default: 10 + responses: + '200': + description: Request statistics by user and IP + content: + application/json: + schema: + type: object + additionalProperties: + type: object + properties: + writes: + type: integer + queries: + type: integer + example: + user1:123.45.678.91: + writes: 1 + queries: 0 + user1:000.0.0.0: + writes: 0 + queries: 16 + /debug/vars: + get: + operationId: GetDebugVars + summary: Get server statistics + description: | + Retrieve runtime statistics and information about the InfluxDB Enterprise instance. + Returns detailed metrics in JSON format including memory usage, + goroutine counts, and database statistics. + + The [InfluxDB Telegraf input plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/influxdb) + can collect these metrics automatically. + tags: + - Debug + responses: + '200': + description: Server statistics in JSON format + content: + application/json: + schema: + type: object + description: Server runtime statistics + /api/v2/query: + post: + operationId: PostApiV2Query + summary: Query with Flux (v2 compatible) + description: | + Query data using [Flux](/enterprise_influxdb/v1/flux/) language. + This endpoint provides forward compatibility with InfluxDB 2.x client libraries. + + **Required Headers:** + - `Accept: application/csv` + - `Content-type: application/vnd.flux` + - `Authorization: Token username:password` (if authentication is enabled) + tags: + - v2 Compatibility + - Query + security: + - TokenAuth: [] + requestBody: + required: true + content: + application/vnd.flux: + schema: + type: string + example: | + from(bucket:"telegraf") + |> range(start:-5m) + |> filter(fn:(r) => r._measurement == "cpu") + responses: + '200': + description: Query results in CSV format + content: + application/csv: + schema: + type: string + '401': + description: Unauthorized + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + /api/v2/write: + post: + operationId: PostApiV2Write + summary: Write data (v2 compatible) + description: | + Write data using the InfluxDB 2.x API format. + This endpoint provides forward compatibility with InfluxDB 2.x client libraries. + + **Bucket Mapping:** + The `bucket` parameter maps to InfluxDB 1.x database and retention policy: + - `database/retention-policy` - specific retention policy + - `database/` or `database` - default retention policy + + The `org` parameter is ignored in InfluxDB Enterprise 1.x. + tags: + - v2 Compatibility + - Write + security: + - TokenAuth: [] + parameters: + - name: bucket + in: query + required: true + description: | + Database and retention policy in format `database/retention-policy`. + Use `database/` or `database` for the default retention policy. + schema: + type: string + example: mydb/autogen + - name: org + in: query + description: Organization (ignored in InfluxDB Enterprise 1.x) + schema: + type: string + - name: precision + in: query + description: Timestamp precision + schema: + type: string + enum: + - ns + - us + - ms + - s + default: ns + requestBody: + required: true + content: + text/plain: + schema: + type: string + example: mem,host=host1 used_percent=23.43234543 1556896326 + responses: + '204': + description: Write successful + '400': + description: Bad request + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + '401': + description: Unauthorized + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + /api/v2/buckets: + get: + operationId: GetApiV2Buckets + summary: List buckets (v2 compatible) + description: | + List all databases as buckets. Provides forward compatibility with + InfluxDB 2.x client libraries. + tags: + - v2 Compatibility + security: + - TokenAuth: [] + responses: + '200': + description: List of buckets + content: + application/json: + schema: + $ref: '#/components/schemas/BucketList' + '401': + description: Unauthorized + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + post: + operationId: PostApiV2Buckets + summary: Create bucket (v2 compatible) + description: | + Create a new database as a bucket. Provides forward compatibility with + InfluxDB 2.x client libraries. + tags: + - v2 Compatibility + security: + - TokenAuth: [] + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/BucketCreate' + responses: + '201': + description: Bucket created + content: + application/json: + schema: + $ref: '#/components/schemas/Bucket' + '401': + description: Unauthorized + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + /api/v2/buckets/{bucketID}: + delete: + operationId: DeleteApiV2BucketsBucketID + summary: Delete bucket (v2 compatible) + description: | + Delete a database/retention policy combination. + The bucketID format is `database/retention-policy`. + tags: + - v2 Compatibility + security: + - TokenAuth: [] + parameters: + - name: bucketID + in: path + required: true + description: Bucket ID in format `database/retention-policy` + schema: + type: string + example: test/autogen + responses: + '204': + description: Bucket deleted + '401': + description: Unauthorized + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + '404': + description: Bucket not found + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + /api/v2/delete: + post: + operationId: PostApiV2Delete + summary: Delete data (v2 compatible) + description: | + Delete data from InfluxDB Enterprise using predicate expressions. + Supports deletion by tag value, timestamp, and measurement. + + **Predicate Syntax:** + ``` + _measurement="example" AND tagKey="tagValue" + ``` + + See [delete predicate syntax](/influxdb/v2/reference/syntax/delete-predicate/) + for detailed syntax and [limitations](/influxdb/v2/reference/syntax/delete-predicate/#limitations). + tags: + - v2 Compatibility + security: + - TokenAuth: [] + parameters: + - name: bucket + in: query + required: true + description: Database and retention policy in format `database/retention-policy` + schema: + type: string + example: exampleDB/autogen + - name: precision + in: query + description: Timestamp precision + schema: + type: string + enum: + - ns + - us + - ms + - s + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/DeleteRequest' + examples: + timeRange: + summary: Delete by time range + value: + start: '2020-03-01T00:00:00Z' + stop: '2020-11-14T00:00:00Z' + withPredicate: + summary: Delete with predicate + value: + start: '2020-03-01T00:00:00Z' + stop: '2020-11-14T00:00:00Z' + predicate: _measurement="example-measurement" AND exampleTag="exampleTagValue" + responses: + '204': + description: Delete successful + '400': + description: Bad request + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + '401': + description: Unauthorized + content: + application/json: + schema: + $ref: '#/components/schemas/Error' +components: + securitySchemes: + BasicAuth: + type: http + scheme: basic + description: | + Use HTTP Basic Authentication by including your username and password in the request. + + ```bash + curl -u username:password "http://localhost:8086/query?q=SHOW+DATABASES" + ``` + + Or encode credentials in the URL (not recommended for production): + + ```bash + curl "http://username:password@localhost:8086/query?q=SHOW+DATABASES" + ``` + QueryAuth: + type: apiKey + in: query + name: u + description: | + Pass your credentials as query parameters. Use `u` for username and `p` for password. + + ```bash + curl "http://localhost:8086/query?u=username&p=password&q=SHOW+DATABASES" + ``` + + > [!Note] + > Query string authentication exposes credentials in URLs and server logs. + > Use Basic Authentication or Token Authentication for production environments. + TokenAuth: + type: http + scheme: bearer + bearerFormat: Token + description: | + For v2-compatible endpoints (`/api/v2/*`), use the `Authorization` header with the `Token` scheme. + + Include your InfluxDB 1.x username and password separated by a colon: + + ```bash + curl -H "Authorization: Token username:password" \ + "http://localhost:8086/api/v2/query" + ``` + + This format is compatible with InfluxDB 2.x client libraries, allowing you to + use the same code with both InfluxDB 1.8+ and InfluxDB 2.x. + parameters: + QueryDB: + name: db + in: query + description: Target database for the query + schema: + type: string + example: mydb + QueryQ: + name: q + in: query + required: true + description: InfluxQL query string + schema: + type: string + example: SELECT * FROM "mymeas" + QueryEpoch: + name: epoch + in: query + description: | + Return timestamps as Unix epoch values with specified precision. + By default, timestamps are returned in RFC3339 format. + schema: + type: string + enum: + - ns + - u + - µ + - ms + - s + - m + - h + QueryPretty: + name: pretty + in: query + description: Enable pretty-printed JSON output (not recommended for production) + schema: + type: boolean + default: false + QueryChunked: + name: chunked + in: query + description: | + Return results in streamed batches. Set to `true` to chunk by series + or every 10,000 points. Set to a number to chunk by that many points. + schema: + oneOf: + - type: boolean + - type: integer + AuthUsername: + name: u + in: query + description: Username for authentication + schema: + type: string + AuthPassword: + name: p + in: query + description: Password for authentication + schema: + type: string + schemas: + HealthCheck: + type: object + properties: + name: + type: string + example: influxdb + message: + type: string + example: ready for queries and writes + status: + type: string + enum: + - pass + - fail + version: + type: string + example: 1.11.6 + QueryResponse: + type: object + properties: + results: + type: array + items: + type: object + properties: + statement_id: + type: integer + series: + type: array + items: + $ref: '#/components/schemas/Series' + error: + type: string + Series: + type: object + properties: + name: + type: string + description: Measurement name + tags: + type: object + additionalProperties: + type: string + columns: + type: array + items: + type: string + values: + type: array + items: + type: array + items: {} + Error: + type: object + required: + - error + properties: + error: + type: string + description: Error message + DeleteRequest: + type: object + required: + - start + - stop + properties: + start: + type: string + format: date-time + description: Start time (inclusive) + stop: + type: string + format: date-time + description: Stop time (exclusive) + predicate: + type: string + description: | + InfluxQL-like predicate expression to filter data to delete. + Example: `_measurement="example" AND tagKey="tagValue"` + BucketList: + type: object + properties: + buckets: + type: array + items: + $ref: '#/components/schemas/Bucket' + Bucket: + type: object + properties: + id: + type: string + description: Bucket ID (database/retention-policy) + name: + type: string + description: Bucket name + retentionRules: + type: array + items: + type: object + properties: + type: + type: string + enum: + - expire + everySeconds: + type: integer + BucketCreate: + type: object + required: + - name + properties: + name: + type: string + description: Database name + retentionRules: + type: array + items: + type: object + properties: + type: + type: string + enum: + - expire + everySeconds: + type: integer + description: Retention period in seconds diff --git a/api-docs/generate-api-docs.sh b/api-docs/generate-api-docs.sh index f603bb2af1..62648c1117 100755 --- a/api-docs/generate-api-docs.sh +++ b/api-docs/generate-api-docs.sh @@ -70,7 +70,7 @@ function generateHtml { local specbundle=redoc-static_index.html # Define the temporary file for the Hugo template and Redoc HTML. local tmpfile="${productVersion}-${api}_index.tmp" - + echo "Bundling $specPath" # Use npx to install and run the specified version of redoc-cli. @@ -83,9 +83,9 @@ function generateHtml { --title="$title" \ --options.sortPropsAlphabetically \ --options.menuToggle \ + --options.hideDownloadButton \ --options.hideHostname \ --options.noAutoAuth \ - --options.hideDownloadButton \ --output=$specbundle \ --templateOptions.description="$shortDescription" \ --templateOptions.product="$productVersion" \ @@ -187,3 +187,9 @@ function build { } build + +# Generate tag-based article data and content pages +echo "Generating OpenAPI article data..." +cd .. +node api-docs/scripts/dist/generate-openapi-articles.js +cd api-docs diff --git a/api-docs/getswagger.sh b/api-docs/getswagger.sh index 1ff077a45f..c0d835484f 100755 --- a/api-docs/getswagger.sh +++ b/api-docs/getswagger.sh @@ -62,7 +62,7 @@ function showHelp { subcommand=$1 case "$subcommand" in - cloud-dedicated-v2|cloud-dedicated-management|cloud-serverless-v2|clustered-management|clustered-v2|cloud-v2|v2|v1-compat|core-v3|enterprise-v3|all) + cloud-dedicated-v2|cloud-dedicated-management|cloud-serverless-v2|clustered-management|clustered-v2|cloud-v2|v2|v1-compat|oss-v1|enterprise-v1|core-v3|enterprise-v3|all) product=$1 shift @@ -273,6 +273,18 @@ function updateV1Compat { postProcess $outFile 'influxdb3/clustered/.config.yml' 'v1-compatibility' } +function updateOSSV1 { + outFile="influxdb/v1/v1/ref.yml" + echo "Processing $outFile with decorators" + postProcess $outFile 'influxdb/v1/.config.yml' 'v1@1' +} + +function updateEnterpriseV1 { + outFile="enterprise_influxdb/v1/v1/ref.yml" + echo "Processing $outFile with decorators" + postProcess $outFile 'enterprise_influxdb/v1/.config.yml' 'v1@1' +} + UPDATE_OPTIONS="--fail" if [ ! -z ${verbose} ]; @@ -312,6 +324,12 @@ then elif [ "$product" = "v1-compat" ]; then updateV1Compat +elif [ "$product" = "oss-v1" ]; +then + updateOSSV1 +elif [ "$product" = "enterprise-v1" ]; +then + updateEnterpriseV1 elif [ "$product" = "all" ]; then updateCloudV2 @@ -322,8 +340,10 @@ then updateCoreV3 updateEnterpriseV3 updateOSSV2 + updateOSSV1 + updateEnterpriseV1 updateV1Compat else - echo "Provide a product argument: cloud-v2, cloud-serverless-v2, cloud-dedicated-v2, cloud-dedicated-management, clustered-management, clustered-v2, core-v3, enterprise-v3, v2, v1-compat, or all." + echo "Provide a product argument: cloud-v2, cloud-serverless-v2, cloud-dedicated-v2, cloud-dedicated-management, clustered-management, clustered-v2, core-v3, enterprise-v3, v2, oss-v1, enterprise-v1, v1-compat, or all." showHelp fi diff --git a/api-docs/influxdb/cloud/v2/content/tag-groups.yml b/api-docs/influxdb/cloud/v2/content/tag-groups.yml deleted file mode 100644 index 7fcd8cc8dc..0000000000 --- a/api-docs/influxdb/cloud/v2/content/tag-groups.yml +++ /dev/null @@ -1,10 +0,0 @@ -- name: Using the InfluxDB HTTP API - tags: - - Quick start - - Authentication - - Supported operations - - Headers - - Pagination - - Response codes -- name: All endpoints - tags: [] diff --git a/api-docs/influxdb/cloud/v2/ref.yml b/api-docs/influxdb/cloud/v2/ref.yml index 365e6d7db0..cd7413777d 100644 --- a/api-docs/influxdb/cloud/v2/ref.yml +++ b/api-docs/influxdb/cloud/v2/ref.yml @@ -99,7 +99,6 @@ tags: x-traitTag: true - name: Config - name: Dashboards - - name: Data I/O endpoints - description: | The InfluxDB 1.x data model includes [databases](/influxdb/cloud/reference/glossary/#database) and [retention policies](/influxdb/cloud/reference/glossary/#retention-policy-rp). @@ -126,7 +125,7 @@ tags: | Header | Value type | Description | |:------------------------ |:--------------------- |:-------------------------------------------| | `Accept` | string | The content type that the client can understand. | - | `Authorization` | string | The authorization scheme and credential. | + | `Authorization` | string | The [authorization scheme and credential](/influxdb/version/api/authentication/). | | `Content-Length` | integer | The size of the entity-body, in bytes, sent to the database. | | `Content-Type` | string | The format of the data in the request body. | name: Headers @@ -208,13 +207,33 @@ tags: - name: Ping - description: | Retrieve data, analyze queries, and get query suggestions. - name: Query + name: Query data - description: | - See the [**API Quick Start**](/influxdb/cloud/api-guide/api_intro/) - to get up and running authenticating with tokens, writing to buckets, and querying data. + Authenticate, write, and query with the API: - [**InfluxDB API client libraries**](/influxdb/cloud/api-guide/client-libraries/) - are available for popular languages and ready to import into your application. + 1. Create an API token to authorize API requests. + Use the InfluxDB Cloud UI or `POST /api/v2/authorizations`. + + 2. Write data to InfluxDB Cloud. + + ```bash + curl -X POST "https://cloud2.influxdata.com/api/v2/write?org=ORG_NAME&bucket=BUCKET_NAME&precision=ns" \ + --header "Authorization: Token API_TOKEN" \ + --data-raw "home,room=Kitchen temp=72.0 + home,room=Living\ Room temp=71.5" + ``` + + 3. Query data using Flux. + + ```bash + curl -X POST "https://cloud2.influxdata.com/api/v2/query?org=ORG_NAME" \ + --header "Authorization: Token API_TOKEN" \ + --header "Content-Type: application/vnd.flux" \ + --data 'from(bucket: "BUCKET_NAME") |> range(start: -1h)' + ``` + + For more information, see the + [Get started](/influxdb/cloud/get-started/) guide. name: Quick start x-traitTag: true - name: Resources @@ -326,7 +345,7 @@ tags: - name: Views - description: | Write time series data to [buckets](/influxdb/cloud/reference/glossary/#bucket). - name: Write + name: Write data paths: /api/v2: get: @@ -4008,7 +4027,6 @@ paths: $ref: '#/components/responses/GeneralServerError' summary: Delete data tags: - - Data I/O endpoints - Delete x-codeSamples: - label: cURL @@ -6186,8 +6204,7 @@ paths: $ref: '#/components/responses/GeneralServerError' summary: Query data tags: - - Data I/O endpoints - - Query + - Query data x-codeSamples: - label: cURL lang: Shell @@ -6332,7 +6349,7 @@ paths: type: string summary: Analyze a Flux query tags: - - Query + - Query data x-codeSamples: - label: 'cURL: Analyze a Flux query' lang: Shell @@ -6777,7 +6794,7 @@ paths: description: Internal server error. summary: Generate a query Abstract Syntax Tree (AST) tags: - - Query + - Query data x-codeSamples: - label: 'cURL: Analyze and generate AST for the query' lang: Shell @@ -7441,7 +7458,7 @@ paths: description: Internal server error. summary: List Flux query suggestions tags: - - Query + - Query data x-codeSamples: - label: cURL lang: Shell @@ -7513,7 +7530,7 @@ paths: The value passed for _`name`_ may have been misspelled. summary: Retrieve a query suggestion for a branching suggestion tags: - - Query + - Query data x-codeSamples: - label: cURL lang: Shell @@ -7639,7 +7656,6 @@ paths: description: Unexpected error. summary: List scripts tags: - - Data I/O endpoints - Invokable Scripts x-codeSamples: - label: 'cURL: retrieves the first 100 scripts.' @@ -7835,7 +7851,6 @@ paths: description: Internal server error. summary: Retrieve a script tags: - - Data I/O endpoints - Invokable Scripts patch: description: | @@ -8048,7 +8063,6 @@ paths: description: Unexpected error. summary: Invoke a script tags: - - Data I/O endpoints - Invokable Scripts x-codeSamples: - label: cURL @@ -8935,7 +8949,6 @@ paths: $ref: '#/components/responses/GeneralServerError' summary: List all tasks tags: - - Data I/O endpoints - Tasks x-codeSamples: - label: 'cURL: all tasks, basic output' @@ -9054,7 +9067,6 @@ paths: description: Unexpected error summary: Create a task tags: - - Data I/O endpoints - Tasks x-codeSamples: - label: 'cURL: create a Flux script task' @@ -9161,7 +9173,6 @@ paths: $ref: '#/components/responses/GeneralServerError' summary: Retrieve a task tags: - - Data I/O endpoints - Tasks patch: description: | @@ -9803,7 +9814,6 @@ paths: $ref: '#/components/responses/GeneralServerError' summary: Start a task run, overriding the schedule tags: - - Data I/O endpoints - Tasks /api/v2/tasks/{taskID}/runs/{runID}: delete: @@ -11832,8 +11842,7 @@ paths: $ref: '#/components/responses/GeneralServerError' summary: Write data tags: - - Data I/O endpoints - - Write + - Write data /legacy/authorizations: get: operationId: GetLegacyAuthorizations @@ -19069,6 +19078,36 @@ components: in: header name: Authorization type: apiKey + QuerystringAuthentication: + description: | + Use the query string authentication scheme with InfluxDB v1-compatible API operations. + Pass your InfluxDB Cloud email address and API token as query parameters using `u` for username and `p` for password (API token). + + ### Syntax + + `?u=EMAIL_ADDRESS&p=INFLUX_API_TOKEN` + + ### Example + + ```sh + curl --get "INFLUX_URL/query" \ + --data-urlencode "u=EMAIL_ADDRESS" \ + --data-urlencode "p=INFLUX_API_TOKEN" \ + --data-urlencode "q=SHOW DATABASES" + ``` + + Replace the following: + + - *`INFLUX_URL`*: your InfluxDB Cloud URL + - *`EMAIL_ADDRESS`*: your InfluxDB Cloud email address + - *`INFLUX_API_TOKEN`*: your [InfluxDB API token](/influxdb/cloud/reference/glossary/#token) + + > [!Warning] + > Query string authentication exposes your credentials in the URL and server logs. + > Use [Token authentication](#section/Authentication/TokenAuthentication) for production environments. + in: query + name: u + type: apiKey x-tagGroups: - name: Using the InfluxDB HTTP API tags: @@ -19099,7 +19138,7 @@ x-tagGroups: - NotificationRules - Organizations - Ping - - Query + - Query data - Resources - Routes - Rules @@ -19115,4 +19154,4 @@ x-tagGroups: - Users - Variables - Views - - Write + - Write data diff --git a/api-docs/influxdb/v1/.config.yml b/api-docs/influxdb/v1/.config.yml new file mode 100644 index 0000000000..0d8af024fc --- /dev/null +++ b/api-docs/influxdb/v1/.config.yml @@ -0,0 +1,10 @@ +plugins: + - '../../openapi/plugins/docs-plugin.cjs' +extends: + - recommended + - docs/all +x-influxdata-product-name: InfluxDB OSS v1 + +apis: + v1@1: + root: v1/ref.yml diff --git a/api-docs/influxdb/v1/v1/content/info.yml b/api-docs/influxdb/v1/v1/content/info.yml new file mode 100644 index 0000000000..f8ed3b9152 --- /dev/null +++ b/api-docs/influxdb/v1/v1/content/info.yml @@ -0,0 +1,32 @@ +title: InfluxDB v1 HTTP API +x-influxdata-short-title: InfluxDB v1 API +x-influxdata-short-description: >- + The InfluxDB v1 HTTP API provides a programmatic interface for writing, + querying, and managing InfluxDB v1 databases. +version: 1.8.10 +description: | + The InfluxDB v1 HTTP API provides a simple way to interact with the database. + It uses HTTP response codes, authentication with username and password credentials + or API tokens, and JSON-formatted response data. + + ## InfluxDB 3 Compatibility + + InfluxDB 3 supports the v1 `/write` and `/query` HTTP API endpoints. + If you're getting started with InfluxDB v1, we recommend using the + InfluxDB v1 client libraries and InfluxQL for future compatibility. + + ## Authentication + + InfluxDB v1 supports two authentication methods: + + - **Basic Authentication**: Use HTTP Basic Auth with username and password + - **Query String Authentication**: Pass `u` (username) and `p` (password) as query parameters + - **Token Authentication** (v2-compatible): Use `Authorization: Token username:password` header + + Authentication is optional unless [enabled in the configuration](/influxdb/v1/administration/authentication_and_authorization/). +license: + name: MIT + url: https://opensource.org/licenses/MIT +contact: + name: InfluxData + url: https://www.influxdata.com diff --git a/api-docs/influxdb/v1/v1/content/servers.yml b/api-docs/influxdb/v1/v1/content/servers.yml new file mode 100644 index 0000000000..076333dd90 --- /dev/null +++ b/api-docs/influxdb/v1/v1/content/servers.yml @@ -0,0 +1,2 @@ +- url: http://localhost:8086 + description: Local InfluxDB instance diff --git a/api-docs/influxdb/v1/v1/ref.yml b/api-docs/influxdb/v1/v1/ref.yml new file mode 100644 index 0000000000..4f35df22ce --- /dev/null +++ b/api-docs/influxdb/v1/v1/ref.yml @@ -0,0 +1,1093 @@ +openapi: 3.0.0 +info: + title: InfluxDB v1 HTTP API + version: 1.8.10 + description: | + The InfluxDB v1 HTTP API provides a simple way to interact with the database. + It uses HTTP response codes, authentication with username and password credentials + or API tokens, and JSON-formatted response data. + + ## InfluxDB 3 Compatibility + + InfluxDB 3 supports the v1 `/write` and `/query` HTTP API endpoints. + If you're getting started with InfluxDB v1, we recommend using the + InfluxDB v1 client libraries and InfluxQL for future compatibility. + + ## Authentication + + InfluxDB v1 supports two authentication methods: + + - **Basic Authentication**: Use HTTP Basic Auth with username and password + - **Query String Authentication**: Pass `u` (username) and `p` (password) as query parameters + - **Token Authentication** (v2-compatible): Use `Authorization: Token username:password` header + + Authentication is optional unless [enabled in the configuration](/influxdb/v1/administration/authentication_and_authorization/). + contact: + name: InfluxData + url: https://www.influxdata.com + license: + name: MIT + url: https://opensource.org/licenses/MIT +servers: + - url: http://localhost:8086 + description: Local InfluxDB instance +security: + - BasicAuth: [] + - QueryAuth: [] +tags: + - name: System Information + description: | + Endpoints for checking server status, health, and version information. + - name: Query + description: | + Query data using InfluxQL. The `/query` endpoint supports both read queries + (SELECT, SHOW) and write queries (CREATE, DROP, ALTER, etc.). + - name: Write + description: | + Write time series data using InfluxDB line protocol. + - name: Debug + description: | + Debugging and profiling endpoints for troubleshooting and performance analysis. + - name: v2 Compatibility + description: | + InfluxDB 2.x API compatibility endpoints. These endpoints allow you to use + InfluxDB 2.x client libraries with InfluxDB 1.8+. + + Use the `Token` scheme with v1.x credentials: + ``` + Authorization: Token username:password + ``` + - name: Authentication + x-traitTag: true + description: | + InfluxDB v1 supports multiple authentication methods: + + ### Basic Authentication + ```bash + curl -u username:password http://localhost:8086/query?q=SHOW+DATABASES + ``` + + ### Query String Authentication + ```bash + curl "http://localhost:8086/query?u=username&p=password&q=SHOW+DATABASES" + ``` + + ### Token Authentication (v2-compatible) + For v2-compatible endpoints, use the Token scheme: + ```bash + curl -H "Authorization: Token username:password" http://localhost:8086/api/v2/query + ``` +paths: + /ping: + get: + operationId: GetPing + summary: Check server status + description: | + Check the status of your InfluxDB instance and retrieve version information. + The `/ping` endpoint returns a `204 No Content` response by default. + + Use the `verbose=true` query parameter to return a `200 OK` response, + which is required for [Google Cloud Load Balancing](https://cloud.google.com/load-balancing/docs/health-check-concepts) health checks. + tags: + - System Information + parameters: + - name: verbose + in: query + description: | + When `true`, returns HTTP 200 instead of 204. Required for Google Cloud Load Balancing health checks. + schema: + type: boolean + default: false + responses: + '200': + description: Server is running (verbose mode) + headers: + X-Influxdb-Build: + description: InfluxDB build type (`OSS` or `ENT`) + schema: + type: string + enum: + - OSS + - ENT + X-Influxdb-Version: + description: InfluxDB version + schema: + type: string + '204': + description: Server is running + headers: + X-Influxdb-Build: + description: InfluxDB build type (`OSS` or `ENT`) + schema: + type: string + enum: + - OSS + - ENT + X-Influxdb-Version: + description: InfluxDB version + schema: + type: string + head: + operationId: HeadPing + summary: Check server status (HEAD) + description: | + Check the status of your InfluxDB instance using a HEAD request. + Returns the same headers as GET but without a response body. + tags: + - System Information + responses: + '204': + description: Server is running + headers: + X-Influxdb-Build: + description: InfluxDB build type (`OSS` or `ENT`) + schema: + type: string + enum: + - OSS + - ENT + X-Influxdb-Version: + description: InfluxDB version + schema: + type: string + /health: + get: + operationId: GetHealth + summary: Check server health + description: | + Check the health of your InfluxDB instance. + Returns health status as JSON with a `pass` or `fail` status. + tags: + - System Information + - v2 Compatibility + responses: + '200': + description: Server is healthy + content: + application/json: + schema: + $ref: '#/components/schemas/HealthCheck' + example: + name: influxdb + message: ready for queries and writes + status: pass + version: 1.8.10 + '503': + description: Server is unhealthy + content: + application/json: + schema: + $ref: '#/components/schemas/HealthCheck' + example: + name: influxdb + message: service unavailable + status: fail + version: 1.8.10 + /query: + get: + operationId: GetQuery + summary: Query data (GET) + description: | + Query data using InfluxQL. Use GET for read-only queries that start with: + - `SELECT` (except queries with `INTO` clause) + - `SHOW` + + For write operations (CREATE, DROP, ALTER, etc.), use POST. + tags: + - Query + parameters: + - $ref: '#/components/parameters/QueryDB' + - $ref: '#/components/parameters/QueryQ' + - $ref: '#/components/parameters/QueryEpoch' + - $ref: '#/components/parameters/QueryPretty' + - $ref: '#/components/parameters/QueryChunked' + - $ref: '#/components/parameters/AuthUsername' + - $ref: '#/components/parameters/AuthPassword' + responses: + '200': + description: Query executed successfully + content: + application/json: + schema: + $ref: '#/components/schemas/QueryResponse' + example: + results: + - statement_id: 0 + series: + - name: mymeas + columns: + - time + - myfield + - mytag1 + - mytag2 + values: + - - '2017-03-01T00:16:18Z' + - 33.1 + - null + - null + - - '2017-03-01T00:17:18Z' + - 12.4 + - '12' + - '14' + application/csv: + schema: + type: string + example: | + name,tags,time,myfield,mytag1,mytag2 + mymeas,,1488327378000000000,33.1,, + mymeas,,1488327438000000000,12.4,12,14 + '400': + description: Bad request (syntax error in query) + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + example: + error: 'error parsing query: found EOF, expected FROM at line 1, char 9' + '401': + description: Unauthorized + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + example: + error: authorization failed + post: + operationId: PostQuery + summary: Query data (POST) + description: | + Query data or execute database management commands using InfluxQL. + Use POST for queries that start with: + - `SELECT` with `INTO` clause + - `ALTER` + - `CREATE` + - `DELETE` + - `DROP` + - `GRANT` + - `KILL` + - `REVOKE` + tags: + - Query + parameters: + - $ref: '#/components/parameters/QueryDB' + - $ref: '#/components/parameters/QueryEpoch' + - $ref: '#/components/parameters/QueryPretty' + - $ref: '#/components/parameters/QueryChunked' + - $ref: '#/components/parameters/AuthUsername' + - $ref: '#/components/parameters/AuthPassword' + requestBody: + required: true + content: + application/x-www-form-urlencoded: + schema: + type: object + required: + - q + properties: + q: + type: string + description: InfluxQL query string + example: CREATE DATABASE mydb + params: + type: string + description: | + JSON object containing bind parameter values. + Use `$` syntax in the query to reference parameters. + example: '{"tag_value":"12","field_value":30}' + multipart/form-data: + schema: + type: object + properties: + q: + type: string + format: binary + description: File containing InfluxQL queries (separated by semicolons) + async: + type: boolean + description: Execute queries asynchronously + default: false + responses: + '200': + description: Query executed successfully + content: + application/json: + schema: + $ref: '#/components/schemas/QueryResponse' + '400': + description: Bad request + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + '401': + description: Unauthorized + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + /write: + post: + operationId: PostWrite + summary: Write data + description: | + Write time series data to InfluxDB using line protocol format. + + Data must be sent as binary encoded line protocol in the request body. + Use the `--data-binary` flag with curl to preserve newlines. + + **Best Practices:** + - Write points in batches of 5,000 to 10,000 for optimal performance + - Use the least precise timestamp precision possible for better compression + tags: + - Write + parameters: + - name: db + in: query + required: true + description: Target database for the write + schema: + type: string + example: mydb + - name: rp + in: query + description: | + Target retention policy. If not specified, writes to the default retention policy. + schema: + type: string + - name: precision + in: query + description: | + Timestamp precision. InfluxDB assumes nanoseconds if not specified. + schema: + type: string + enum: + - 'n' + - u + - ms + - s + - m + - h + default: 'n' + - $ref: '#/components/parameters/AuthUsername' + - $ref: '#/components/parameters/AuthPassword' + requestBody: + required: true + description: | + Line protocol data. Multiple points should be separated by newlines. + Use `@filename` to write from a file. + content: + text/plain: + schema: + type: string + example: | + mymeas,mytag=1 myfield=90 1463683075000000000 + mymeas,mytag=2 myfield=34 1463683076000000000 + responses: + '204': + description: Write successful + '400': + description: Bad request (line protocol syntax error or type conflict) + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + examples: + badTimestamp: + summary: Invalid timestamp + value: + error: 'unable to parse ''mymeas,mytag=1 myfield=91 abc123'': bad timestamp' + typeConflict: + summary: Field type conflict + value: + error: 'field type conflict: input field "myfield" on measurement "mymeas" is type int64, already exists as type float' + '401': + description: Unauthorized + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + '404': + description: Database not found + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + example: + error: 'database not found: "mydb1"' + '413': + description: Request entity too large + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + example: + error: Request Entity Too Large + '500': + description: Internal server error (e.g., retention policy not found) + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + example: + error: 'retention policy not found: myrp' + /debug/pprof: + get: + operationId: GetDebugPprof + summary: Get profiling index + description: | + Returns an HTML page listing available Go pprof profiles. + Use the individual profile endpoints to retrieve specific profile data. + tags: + - Debug + responses: + '200': + description: HTML page with profile links + content: + text/html: + schema: + type: string + /debug/pprof/{profile}: + get: + operationId: GetDebugPprofProfile + summary: Get profile data + description: | + Retrieve a specific Go pprof profile. Available profiles: + - `block`: Stack traces that led to blocking on synchronization primitives + - `goroutine`: Stack traces of all current goroutines + - `heap`: Sampling of stack traces for heap allocations + - `mutex`: Stack traces of holders of contended mutexes + - `threadcreate`: Stack traces that led to creation of new OS threads + - `profile`: CPU profile (use `seconds` parameter to specify duration) + - `trace`: Execution trace (use `seconds` parameter to specify duration) + tags: + - Debug + parameters: + - name: profile + in: path + required: true + description: Profile name + schema: + type: string + enum: + - block + - goroutine + - heap + - mutex + - threadcreate + - profile + - trace + - allocs + - cmdline + - name: seconds + in: query + description: Duration in seconds for CPU profile or trace + schema: + type: integer + default: 30 + - name: debug + in: query + description: Return human-readable text output instead of binary + schema: + type: integer + enum: + - 0 + - 1 + default: 0 + responses: + '200': + description: Profile data + content: + application/octet-stream: + schema: + type: string + format: binary + text/plain: + schema: + type: string + /debug/pprof/all: + get: + operationId: GetDebugPprofAll + summary: Get all profiles archive + description: | + Generate a `profiles.tar.gz` archive containing all standard Go profiling + information and additional debugging data. Intended primarily for use by + InfluxData support. + + Use the `cpu` parameter to include a CPU profile of the specified duration. + tags: + - Debug + parameters: + - name: cpu + in: query + description: | + Duration for CPU profile. Specify as a duration string (e.g., `30s`). + For InfluxDB 1.8.3 and earlier, use `cpu=true`. + schema: + type: string + example: 30s + responses: + '200': + description: Compressed archive containing all profiles + content: + application/gzip: + schema: + type: string + format: binary + /debug/requests: + get: + operationId: GetDebugRequests + summary: Track HTTP requests + description: | + Track HTTP client requests to the `/write` and `/query` endpoints. + Returns the number of writes and queries per username and IP address + over the specified time interval. + tags: + - Debug + parameters: + - name: seconds + in: query + description: Duration in seconds to collect request data + schema: + type: integer + default: 10 + responses: + '200': + description: Request statistics by user and IP + content: + application/json: + schema: + type: object + additionalProperties: + type: object + properties: + writes: + type: integer + queries: + type: integer + example: + user1:123.45.678.91: + writes: 1 + queries: 0 + user1:000.0.0.0: + writes: 0 + queries: 16 + /debug/vars: + get: + operationId: GetDebugVars + summary: Get server statistics + description: | + Retrieve runtime statistics and information about the InfluxDB instance. + Returns detailed metrics in JSON format including memory usage, + goroutine counts, and database statistics. + + The [InfluxDB Telegraf input plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/influxdb) + can collect these metrics automatically. + tags: + - Debug + responses: + '200': + description: Server statistics in JSON format + content: + application/json: + schema: + type: object + description: Server runtime statistics + /api/v2/query: + post: + operationId: PostApiV2Query + summary: Query with Flux (v2 compatible) + description: | + Query data using [Flux](/influxdb/v1/flux/) language. + This endpoint provides forward compatibility with InfluxDB 2.x client libraries. + + **Required Headers:** + - `Accept: application/csv` + - `Content-type: application/vnd.flux` + - `Authorization: Token username:password` (if authentication is enabled) + tags: + - v2 Compatibility + - Query + security: + - TokenAuth: [] + requestBody: + required: true + content: + application/vnd.flux: + schema: + type: string + example: | + from(bucket:"telegraf") + |> range(start:-5m) + |> filter(fn:(r) => r._measurement == "cpu") + responses: + '200': + description: Query results in CSV format + content: + application/csv: + schema: + type: string + '401': + description: Unauthorized + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + /api/v2/write: + post: + operationId: PostApiV2Write + summary: Write data (v2 compatible) + description: | + Write data using the InfluxDB 2.x API format. + This endpoint provides forward compatibility with InfluxDB 2.x client libraries. + + **Bucket Mapping:** + The `bucket` parameter maps to InfluxDB 1.x database and retention policy: + - `database/retention-policy` - specific retention policy + - `database/` or `database` - default retention policy + + The `org` parameter is ignored in InfluxDB 1.x. + tags: + - v2 Compatibility + - Write + security: + - TokenAuth: [] + parameters: + - name: bucket + in: query + required: true + description: | + Database and retention policy in format `database/retention-policy`. + Use `database/` or `database` for the default retention policy. + schema: + type: string + example: mydb/autogen + - name: org + in: query + description: Organization (ignored in InfluxDB 1.x) + schema: + type: string + - name: precision + in: query + description: Timestamp precision + schema: + type: string + enum: + - ns + - us + - ms + - s + default: ns + requestBody: + required: true + content: + text/plain: + schema: + type: string + example: mem,host=host1 used_percent=23.43234543 1556896326 + responses: + '204': + description: Write successful + '400': + description: Bad request + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + '401': + description: Unauthorized + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + /api/v2/buckets: + get: + operationId: GetApiV2Buckets + summary: List buckets (v2 compatible) + description: | + List all databases as buckets. Provides forward compatibility with + InfluxDB 2.x client libraries. + tags: + - v2 Compatibility + security: + - TokenAuth: [] + responses: + '200': + description: List of buckets + content: + application/json: + schema: + $ref: '#/components/schemas/BucketList' + '401': + description: Unauthorized + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + post: + operationId: PostApiV2Buckets + summary: Create bucket (v2 compatible) + description: | + Create a new database as a bucket. Provides forward compatibility with + InfluxDB 2.x client libraries. + tags: + - v2 Compatibility + security: + - TokenAuth: [] + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/BucketCreate' + responses: + '201': + description: Bucket created + content: + application/json: + schema: + $ref: '#/components/schemas/Bucket' + '401': + description: Unauthorized + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + /api/v2/buckets/{bucketID}: + delete: + operationId: DeleteApiV2BucketsBucketID + summary: Delete bucket (v2 compatible) + description: | + Delete a database/retention policy combination. + The bucketID format is `database/retention-policy`. + tags: + - v2 Compatibility + security: + - TokenAuth: [] + parameters: + - name: bucketID + in: path + required: true + description: Bucket ID in format `database/retention-policy` + schema: + type: string + example: test/autogen + responses: + '204': + description: Bucket deleted + '401': + description: Unauthorized + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + '404': + description: Bucket not found + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + /api/v2/delete: + post: + operationId: PostApiV2Delete + summary: Delete data (v2 compatible) + description: | + Delete data from InfluxDB using predicate expressions. + Supports deletion by tag value, timestamp, and measurement. + + **Predicate Syntax:** + ``` + _measurement="example" AND tagKey="tagValue" + ``` + + See [delete predicate syntax](/influxdb/v2/reference/syntax/delete-predicate/) + for detailed syntax and [limitations](/influxdb/v2/reference/syntax/delete-predicate/#limitations). + tags: + - v2 Compatibility + security: + - TokenAuth: [] + parameters: + - name: bucket + in: query + required: true + description: Database and retention policy in format `database/retention-policy` + schema: + type: string + example: exampleDB/autogen + - name: precision + in: query + description: Timestamp precision + schema: + type: string + enum: + - ns + - us + - ms + - s + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/DeleteRequest' + examples: + timeRange: + summary: Delete by time range + value: + start: '2020-03-01T00:00:00Z' + stop: '2020-11-14T00:00:00Z' + withPredicate: + summary: Delete with predicate + value: + start: '2020-03-01T00:00:00Z' + stop: '2020-11-14T00:00:00Z' + predicate: _measurement="example-measurement" AND exampleTag="exampleTagValue" + responses: + '204': + description: Delete successful + '400': + description: Bad request + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + '401': + description: Unauthorized + content: + application/json: + schema: + $ref: '#/components/schemas/Error' +components: + securitySchemes: + BasicAuth: + type: http + scheme: basic + description: | + Use HTTP Basic Authentication by including your username and password in the request. + + ```bash + curl -u username:password "http://localhost:8086/query?q=SHOW+DATABASES" + ``` + + Or encode credentials in the URL (not recommended for production): + + ```bash + curl "http://username:password@localhost:8086/query?q=SHOW+DATABASES" + ``` + QueryAuth: + type: apiKey + in: query + name: u + description: | + Pass your credentials as query parameters. Use `u` for username and `p` for password. + + ```bash + curl "http://localhost:8086/query?u=username&p=password&q=SHOW+DATABASES" + ``` + + > [!Note] + > Query string authentication exposes credentials in URLs and server logs. + > Use Basic Authentication or Token Authentication for production environments. + TokenAuth: + type: http + scheme: bearer + bearerFormat: Token + description: | + For v2-compatible endpoints (`/api/v2/*`), use the `Authorization` header with the `Token` scheme. + + Include your InfluxDB 1.x username and password separated by a colon: + + ```bash + curl -H "Authorization: Token username:password" \ + "http://localhost:8086/api/v2/query" + ``` + + This format is compatible with InfluxDB 2.x client libraries, allowing you to + use the same code with both InfluxDB 1.8+ and InfluxDB 2.x. + parameters: + QueryDB: + name: db + in: query + description: Target database for the query + schema: + type: string + example: mydb + QueryQ: + name: q + in: query + required: true + description: InfluxQL query string + schema: + type: string + example: SELECT * FROM "mymeas" + QueryEpoch: + name: epoch + in: query + description: | + Return timestamps as Unix epoch values with specified precision. + By default, timestamps are returned in RFC3339 format. + schema: + type: string + enum: + - ns + - u + - µ + - ms + - s + - m + - h + QueryPretty: + name: pretty + in: query + description: Enable pretty-printed JSON output (not recommended for production) + schema: + type: boolean + default: false + QueryChunked: + name: chunked + in: query + description: | + Return results in streamed batches. Set to `true` to chunk by series + or every 10,000 points. Set to a number to chunk by that many points. + schema: + oneOf: + - type: boolean + - type: integer + AuthUsername: + name: u + in: query + description: Username for authentication + schema: + type: string + AuthPassword: + name: p + in: query + description: Password for authentication + schema: + type: string + schemas: + HealthCheck: + type: object + properties: + name: + type: string + example: influxdb + message: + type: string + example: ready for queries and writes + status: + type: string + enum: + - pass + - fail + version: + type: string + example: 1.8.10 + QueryResponse: + type: object + properties: + results: + type: array + items: + type: object + properties: + statement_id: + type: integer + series: + type: array + items: + $ref: '#/components/schemas/Series' + error: + type: string + Series: + type: object + properties: + name: + type: string + description: Measurement name + tags: + type: object + additionalProperties: + type: string + columns: + type: array + items: + type: string + values: + type: array + items: + type: array + items: {} + Error: + type: object + required: + - error + properties: + error: + type: string + description: Error message + DeleteRequest: + type: object + required: + - start + - stop + properties: + start: + type: string + format: date-time + description: Start time (inclusive) + stop: + type: string + format: date-time + description: Stop time (exclusive) + predicate: + type: string + description: | + InfluxQL-like predicate expression to filter data to delete. + Example: `_measurement="example" AND tagKey="tagValue"` + BucketList: + type: object + properties: + buckets: + type: array + items: + $ref: '#/components/schemas/Bucket' + Bucket: + type: object + properties: + id: + type: string + description: Bucket ID (database/retention-policy) + name: + type: string + description: Bucket name + retentionRules: + type: array + items: + type: object + properties: + type: + type: string + enum: + - expire + everySeconds: + type: integer + BucketCreate: + type: object + required: + - name + properties: + name: + type: string + description: Database name + retentionRules: + type: array + items: + type: object + properties: + type: + type: string + enum: + - expire + everySeconds: + type: integer + description: Retention period in seconds diff --git a/api-docs/influxdb/v2/v2/content/tag-groups.yml b/api-docs/influxdb/v2/v2/content/tag-groups.yml deleted file mode 100644 index 905c380ef6..0000000000 --- a/api-docs/influxdb/v2/v2/content/tag-groups.yml +++ /dev/null @@ -1,11 +0,0 @@ -- name: Using the InfluxDB HTTP API - tags: - - Quick start - - Authentication - - Supported operations - - Headers - - Pagination - - Response codes - - Compatibility endpoints -- name: All endpoints - tags: [] diff --git a/api-docs/influxdb/v2/v2/ref.yml b/api-docs/influxdb/v2/v2/ref.yml index 778aceaccc..c1f958f159 100644 --- a/api-docs/influxdb/v2/v2/ref.yml +++ b/api-docs/influxdb/v2/v2/ref.yml @@ -91,16 +91,13 @@ tags: x-traitTag: true - name: Compatibility endpoints description: | - InfluxDB v2 provides a v1-compatible API for backward compatibility with InfluxDB 1.x clients and integrations. + InfluxDB v2 provides v1-compatible API endpoints for backward compatibility with InfluxDB 1.x clients and integrations. - Use these endpoints with InfluxDB 1.x client libraries and third-party integrations such as Grafana, Telegraf, and other tools designed for InfluxDB 1.x. The compatibility layer maps InfluxDB 1.x concepts (databases, retention policies) to InfluxDB v2 resources (buckets, organizations) through database retention policy (DBRP) mappings. + Use these endpoints with InfluxDB 1.x client libraries and third-party integrations such as Grafana, Telegraf, and other tools designed for InfluxDB 1.x. - - [Write data (v1-compatible)](#tag/Write-data-(v1-compatible)) - - [Query data using InfluxQL (v1-compatible)](#tag/Query-data-(v1-compatible)) - - [Manage v1-compatible users and permissions](#tag/Authorizations-(v1-compatible)) + The compatibility layer maps InfluxDB 1.x concepts (databases, retention policies) to InfluxDB v2 resources (buckets, organizations) through database retention policy (DBRP) mappings. - name: Config - name: Dashboards - - name: Data I/O endpoints - description: | The InfluxDB 1.x data model includes [databases](/influxdb/v2/reference/glossary/#database) and [retention policies](/influxdb/v2/reference/glossary/#retention-policy-rp). @@ -142,7 +139,7 @@ tags: | Header | Value type | Description | |:------------------------ |:--------------------- |:-------------------------------------------| | `Accept` | string | The content type that the client can understand. | - | `Authorization` | string | The authorization scheme and credential. | + | `Authorization` | string | The [authorization scheme and credential](/influxdb/version/api/authentication/). | | `Content-Length` | integer | The size of the entity-body, in bytes, sent to the database. | | `Content-Type` | string | The format of the data in the request body. | name: Headers @@ -200,14 +197,40 @@ tags: - name: Ping - description: | Retrieve data, analyze queries, and get query suggestions. - name: Query + name: Query data - name: Query data (v1-compatible) - description: | - See the [**API Quick Start**](/influxdb/v2/api-guide/api_intro/) - to get up and running authenticating with tokens, writing to buckets, and querying data. + Authenticate, write, and query with the API: - [**InfluxDB API client libraries**](/influxdb/v2/api-guide/client-libraries/) - are available for popular languages and ready to import into your application. + 1. Create an API token to authorize API requests. + Use the InfluxDB UI or `POST /api/v2/authorizations`. + + 2. Check the status of the InfluxDB server. + + ```bash + curl "http://localhost:8086/health" + ``` + + 3. Write data to InfluxDB. + + ```bash + curl -X POST "http://localhost:8086/api/v2/write?org=ORG_NAME&bucket=BUCKET_NAME&precision=ns" \ + --header "Authorization: Token API_TOKEN" \ + --data-raw "home,room=Kitchen temp=72.0 + home,room=Living\ Room temp=71.5" + ``` + + 4. Query data using Flux. + + ```bash + curl -X POST "http://localhost:8086/api/v2/query?org=ORG_NAME" \ + --header "Authorization: Token API_TOKEN" \ + --header "Content-Type: application/vnd.flux" \ + --data 'from(bucket: "BUCKET_NAME") |> range(start: -1h)' + ``` + + For more information, see the + [Get started](/influxdb/v2/get-started/) guide. name: Quick start x-traitTag: true - name: Ready @@ -321,7 +344,7 @@ tags: - name: Views - description: | Write time series data to [buckets](/influxdb/v2/reference/glossary/#bucket). - name: Write + name: Write data - name: Write data (v1-compatible) paths: /api/v2: @@ -4490,7 +4513,6 @@ paths: $ref: '#/components/responses/GeneralServerError' summary: Delete data tags: - - Data I/O endpoints - Delete x-codeSamples: - label: cURL @@ -6690,8 +6712,7 @@ paths: $ref: '#/components/responses/GeneralServerError' summary: Query data tags: - - Data I/O endpoints - - Query + - Query data x-codeSamples: - label: cURL lang: Shell @@ -6836,7 +6857,7 @@ paths: type: string summary: Analyze a Flux query tags: - - Query + - Query data x-codeSamples: - label: 'cURL: Analyze a Flux query' lang: Shell @@ -7281,7 +7302,7 @@ paths: description: Internal server error. summary: Generate a query Abstract Syntax Tree (AST) tags: - - Query + - Query data x-codeSamples: - label: 'cURL: Analyze and generate AST for the query' lang: Shell @@ -7945,7 +7966,7 @@ paths: description: Internal server error. summary: List Flux query suggestions tags: - - Query + - Query data x-codeSamples: - label: cURL lang: Shell @@ -8017,7 +8038,7 @@ paths: The value passed for _`name`_ may have been misspelled. summary: Retrieve a query suggestion for a branching suggestion tags: - - Query + - Query data x-codeSamples: - label: cURL lang: Shell @@ -9930,7 +9951,6 @@ paths: $ref: '#/components/responses/GeneralServerError' summary: List tasks tags: - - Data I/O endpoints - Tasks x-codeSamples: - label: 'cURL: all tasks, basic output' @@ -10001,7 +10021,6 @@ paths: description: Unexpected error summary: Create a task tags: - - Data I/O endpoints - Tasks x-codeSamples: - label: 'cURL: create a task' @@ -10086,7 +10105,6 @@ paths: $ref: '#/components/responses/GeneralServerError' summary: Retrieve a task tags: - - Data I/O endpoints - Tasks patch: description: | @@ -10680,7 +10698,6 @@ paths: $ref: '#/components/responses/GeneralServerError' summary: Start a task run, overriding the schedule tags: - - Data I/O endpoints - Tasks /api/v2/tasks/{taskID}/runs/{runID}: delete: @@ -12809,8 +12826,7 @@ paths: $ref: '#/components/responses/GeneralServerError' summary: Write data tags: - - Data I/O endpoints - - Write + - Write data /legacy/authorizations: get: operationId: GetLegacyAuthorizations @@ -20173,6 +20189,36 @@ components: in: header name: Authorization type: apiKey + QuerystringAuthentication: + description: | + Use the query string authentication scheme with InfluxDB v1-compatible API operations. + Pass your InfluxDB username and API token as query parameters using `u` for username and `p` for password (API token). + + ### Syntax + + `?u=USERNAME&p=INFLUX_API_TOKEN` + + ### Example + + ```sh + curl --get "INFLUX_URL/query" \ + --data-urlencode "u=USERNAME" \ + --data-urlencode "p=INFLUX_API_TOKEN" \ + --data-urlencode "q=SHOW DATABASES" + ``` + + Replace the following: + + - *`INFLUX_URL`*: your InfluxDB URL + - *`USERNAME`*: your InfluxDB username + - *`INFLUX_API_TOKEN`*: your [InfluxDB API token](/influxdb/v2/reference/glossary/#token) + + > [!Warning] + > Query string authentication exposes your credentials in the URL and server logs. + > Use [Token authentication](#section/Authentication/TokenAuthentication) for production environments. + in: query + name: u + type: apiKey x-tagGroups: - name: Using the InfluxDB HTTP API tags: @@ -20204,7 +20250,7 @@ x-tagGroups: - NotificationRules - Organizations - Ping - - Query + - Query data - Query data (v1-compatible) - Ready - RemoteConnections @@ -20226,5 +20272,5 @@ x-tagGroups: - Users - Variables - Views - - Write + - Write data - Write data (v1-compatible) diff --git a/api-docs/influxdb3/cloud-dedicated/.config.yml b/api-docs/influxdb3/cloud-dedicated/.config.yml index 11808b8216..f784adf26a 100644 --- a/api-docs/influxdb3/cloud-dedicated/.config.yml +++ b/api-docs/influxdb3/cloud-dedicated/.config.yml @@ -6,13 +6,11 @@ extends: x-influxdata-product-name: InfluxDB 3 Cloud Dedicated apis: - management@0: - root: management/openapi.yml v2@2: root: v2/ref.yml x-influxdata-docs-aliases: - /influxdb3/cloud-dedicated/api/ - v1-compatibility@2: - root: v1-compatibility/swaggerV1Compat.yml - x-influxdata-docs-aliases: - /influxdb3/cloud-dedicated/api/v1/ + - /influxdb3/cloud-dedicated/api/v2/ + - /influxdb3/cloud-dedicated/api/v1-compatibility + - /influxdb3/cloud-dedicated/api/v2-compatibility diff --git a/api-docs/influxdb3/cloud-dedicated/management/content/tag-groups.yml b/api-docs/influxdb3/cloud-dedicated/management/content/tag-groups.yml deleted file mode 100644 index 57e8c8484c..0000000000 --- a/api-docs/influxdb3/cloud-dedicated/management/content/tag-groups.yml +++ /dev/null @@ -1,6 +0,0 @@ -- name: Using the Management API - tags: - - Authentication - - Quickstart -- name: All endpoints - tags: [] diff --git a/api-docs/influxdb3/cloud-dedicated/management/openapi.yml b/api-docs/influxdb3/cloud-dedicated/management/openapi.yml index 2a84a00416..2983a87de8 100644 --- a/api-docs/influxdb3/cloud-dedicated/management/openapi.yml +++ b/api-docs/influxdb3/cloud-dedicated/management/openapi.yml @@ -28,269 +28,47 @@ security: - bearerAuthManagementToken: [] bearerAuthJwt: [] tags: - - name: Authentication - x-traitTag: true - description: | - With InfluxDB 3 Cloud Dedicated, the InfluxDB Management API endpoints require the following credentials: - - - `ACCOUNT_ID`: The ID of the [account](/influxdb3/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) that the cluster belongs to. To view account ID and cluster ID, [list cluster details](/influxdb3/cloud-dedicated/admin/clusters/list/#detailed-output-in-json). - - `CLUSTER_ID`: The ID of the [cluster](/influxdb3/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) that you want to manage. To view account ID and cluster ID, [list cluster details](/influxdb3/cloud-dedicated/admin/clusters/list/#detailed-output-in-json). - - `Authorization MANAGEMENT_TOKEN`: the `Authorization` HTTP header with a [management token](/influxdb3/cloud-dedicated/admin/tokens/management/). - - See how to [create a management token](/influxdb3/cloud-dedicated/admin/tokens/management/). - - By default, management tokens in InfluxDB 3 are short-lived tokens issued by an OAuth2 identity provider that grant a specific user administrative access to your InfluxDB cluster. However, for automation purposes, you can manually create management tokens that authenticate directly with your InfluxDB cluster and do not require human interaction with your identity provider. - - name: Database tokens description: Manage database read/write tokens for a cluster - name: Databases description: Manage databases for a cluster - - name: Quickstart + - name: Quick start x-traitTag: true description: | - The following example script shows how to use `curl` to make database and token management requests: - - ```shell - #!/bin/bash - - # Usage: - # Note the leading space in the command below to keep secrets out of the shell history - # - # ``` - # MANAGEMENT_TOKEN= ACCOUNT_ID= CLUSTER_ID= ./scripts/test_http_api_v0_endpoints.sh - # ``` - - # Env var validation - if [ -z "${MANAGEMENT_TOKEN}" ]; then - echo " - [Error]: ❌ - \$MANAGEMENT_TOKEN env var is required. - " - exit 1 - fi - - if [ -z "${ACCOUNT_ID}" ]; then - echo " - [Error]: ❌ - \$ACCOUNT_ID env var is required. - " - exit 1 - fi - - if [ -z "${CLUSTER_ID}" ]; then - echo " - [Error]: ❌ - \$CLUSTER_ID env var is required. - " - exit 1 - fi - - HOST="https://console.influxdata.com" - - # Database request functions - list_databases () { - local response=$( \ - curl \ - --location "$HOST/api/v0/accounts/$ACCOUNT_ID/clusters/$CLUSTER_ID/databases" \ - --header "Accept: application/json" \ - --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ - ) - echo "$response" - } - - create_database () { - local databaseName=$1 - local response=$( \ - curl \ - --location "$HOST/api/v0/accounts/$ACCOUNT_ID/clusters/$CLUSTER_ID/databases" \ - --header "Accept: application/json" \ - --header 'Content-Type: application/json' \ - --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ - --data '{ - "name": "'$databaseName'", - "maxTables": 75, - "maxColumnsPerTable": 90, - "retentionPeriod": 600000000000, - "partitionTemplate": [ - { - "type": "tag", - "value": "abc" - }, - { - "type": "bucket", - "value": { - "tagName": "def", - "numberOfBuckets": 5 - } - } - ] - }' \ - ) - echo "$response" - } - - update_database () { - local databaseName=$1 - local response=$( \ - curl \ - --location "$HOST/api/v0/accounts/$ACCOUNT_ID/clusters/$CLUSTER_ID/databases/$databaseName" \ - --request PATCH \ - --header "Accept: application/json" \ - --header 'Content-Type: application/json' \ - --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ - --data '{ - "maxTables": 150, - "maxColumnsPerTable": 180, - "retentionPeriod": 1200000000000 - }' \ - ) - echo "$response" - } - - delete_database () { - local databaseName=$1 - local response=$( \ - curl \ - --location "$HOST/api/v0/accounts/$ACCOUNT_ID/clusters/$CLUSTER_ID/databases/$databaseName" \ - --request DELETE \ - --header "Accept: application/json" \ - --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ - ) - echo "$response" - } - - # Token request functions - list_tokens () { - local response=$( \ - curl \ - --location "$HOST/api/v0/accounts/$ACCOUNT_ID/clusters/$CLUSTER_ID/tokens" \ - --header "Accept: application/json" \ - --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ - ) - echo "$response" - } - - create_token () { - local response=$( \ - curl \ - --location "$HOST/api/v0/accounts/$ACCOUNT_ID/clusters/$CLUSTER_ID/tokens" \ - --header "Accept: application/json" \ - --header 'Content-Type: application/json' \ - --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ - --data '{ - "description": "my test token", - "permissions": [ - { - "action": "write", - "resource": "database_one" - }, - { - "action": "read", - "resource": "database_two" - } - ] - }' \ - ) - echo "$response" - } - - get_token () { - local token_id=$1 - local response=$( \ - curl \ - --location "$HOST/api/v0/accounts/$ACCOUNT_ID/clusters/$CLUSTER_ID/tokens/$tokenId" \ - --header "Accept: application/json" \ - --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ - ) - echo "$response" - } - - update_token () { - local token_id=$1 - local response=$( \ - curl \ - --location "$HOST/api/v0/accounts/$ACCOUNT_ID/clusters/$CLUSTER_ID/tokens/$tokenId" \ - --request PATCH \ - --header "Accept: application/json" \ - --header 'Content-Type: application/json' \ - --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ - --data '{ - "description": "my updated test token", - "permissions": [ - { - "action": "database_one", - "resource": "read" - } - ] - }' \ - ) - echo "$response" - } - - delete_token () { - local token_id=$1 - local response=$( \ - curl \ - --location "$HOST/api/v0/accounts/$ACCOUNT_ID/clusters/$CLUSTER_ID/tokens/$tokenId" \ - --request DELETE \ - --header "Accept: application/json" \ - --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ - ) - echo "$response" - } - - - # Test database endpoints - databaseName="test_database_$RANDOM" - - printf "\n🏗️ Creating database... 🏗️\n\n" - response="$(create_database $databaseName)" - echo $response | jq - printf "\n🏗️ Creating database successful 🏗️\n\n" + Authenticate, write, and query with the API: - printf "\n⬆️ Updating database... ⬆️\n\n" - response="$(update_database $databaseName)" - echo $response | jq - printf "\n⬆️ Updating database successful ⬆️\n\n" + 1. Create a management token using `influxctl token create`. - printf "\n⬇️ Listing databases... ⬇️\n\n" - response="$(list_databases)" - echo $response | jq - printf "\n⬇️ Listing databases successful ⬇️\n\n" + 2. Create a database using the Management API. - printf "\n🗑️ Deleting database... 🗑️\n\n" - response="$(delete_database $databaseName)" - echo $response | jq - printf "\n🗑️ Deleting database successful 🗑️\n\n" + ```bash + curl -X POST "https://console.influxdata.com/api/v0/accounts/ACCOUNT_ID/clusters/CLUSTER_ID/databases" \ + --header "Authorization: Bearer MANAGEMENT_TOKEN" \ + --header "Content-Type: application/json" \ + --data '{"name": "sensors"}' + ``` + 3. Create a database token for read/write access. - # Test token endpoints - printf "\n🏗️ Creating token... 🏗️\n\n" - response="$(create_token)" - echo $response | jq - tokenId=$(echo $response | jq '.id') - printf "\n🏗️ Creating token successful 🏗️\n\n" + ```bash + curl -X POST "https://console.influxdata.com/api/v0/accounts/ACCOUNT_ID/clusters/CLUSTER_ID/tokens" \ + --header "Authorization: Bearer MANAGEMENT_TOKEN" \ + --header "Content-Type: application/json" \ + --data '{"description": "my token", "permissions": [{"action": "write", "resource": "sensors"}]}' + ``` - printf "\n⬇️ Getting token... ⬇️\n\n" - response="$(get_token $tokenId)" - echo $response | jq - printf "\n⬇️ Getting token successful ⬇️\n\n" + 4. Write data to InfluxDB Cloud Dedicated. - printf "\n⬆️ Updating token... ⬆️\n\n" - response="$(update_token $tokenId)" - echo $response | jq - printf "\n⬆️ Updating token successful ⬆️\n\n" + ```bash + curl -X POST "https://CLUSTER_HOST/api/v2/write?bucket=sensors&precision=ns" \ + --header "Authorization: Token DATABASE_TOKEN" \ + --data-raw "home,room=Kitchen temp=72.0" + ``` - printf "\n📋 Listing tokens... 📋\n\n" - response="$(list_tokens)" - echo $response | jq - printf "\n📋 Listing tokens successful 📋\n\n" + 5. Query data using SQL or InfluxQL with a Flight client or the HTTP API. - printf "\n🗑️ Deleting token... 🗑️\n\n" - response="$(delete_token $tokenId)" - echo $response | jq - printf "\n🗑️ Deleting token successful 🗑️\n\n" - ``` + For more information, see the + [Get started](/influxdb3/cloud-dedicated/get-started/) guide. - name: Tables description: Manage tables in a database paths: @@ -2425,8 +2203,7 @@ components: x-tagGroups: - name: Using the Management API tags: - - Authentication - - Quickstart + - Quick start - name: All endpoints tags: - Database tokens diff --git a/api-docs/influxdb3/cloud-dedicated/v2/content/tag-groups.yml b/api-docs/influxdb3/cloud-dedicated/v2/content/tag-groups.yml deleted file mode 100644 index ace9fe77dd..0000000000 --- a/api-docs/influxdb3/cloud-dedicated/v2/content/tag-groups.yml +++ /dev/null @@ -1,13 +0,0 @@ -- name: Using the InfluxDB HTTP API - tags: - - Quick start - - API compatibility - - Authentication - - Headers - - Pagination - - Response codes -- name: All endpoints - tags: - - Ping - - Query - - Write diff --git a/api-docs/influxdb3/cloud-dedicated/v2/ref.yml b/api-docs/influxdb3/cloud-dedicated/v2/ref.yml index f4b3e76fe5..f2bfb79449 100644 --- a/api-docs/influxdb3/cloud-dedicated/v2/ref.yml +++ b/api-docs/influxdb3/cloud-dedicated/v2/ref.yml @@ -32,49 +32,68 @@ security: - QuerystringAuthentication: [] tags: - description: | + Use InfluxDB v1 and v2 compatible endpoints to write and query data. + ### Write data InfluxDB 3 Cloud Dedicated provides the following HTTP API endpoints for writing data: - - **Recommended**: [`/api/v2/write` endpoint](#operation/PostWrite) for new write workloads or for bringing existing InfluxDB v2 write workloads to InfluxDB 3. - - [`/write` endpoint](#operation/PostLegacyWrite) for bringing existing InfluxDB v1 write workloads to InfluxDB 3. + - `/api/v2/write` endpoint (recommended) for new write workloads or for + bringing existing InfluxDB v2 write workloads to InfluxDB Cloud Dedicated. + - `/write` endpoint for bringing existing InfluxDB v1 write workloads to + InfluxDB Cloud Dedicated. - Both endpoints accept the same line protocol format and process data in the same way. + Both endpoints accept line protocol format and process data the same way. ### Query data InfluxDB 3 Cloud Dedicated provides the following protocols for executing a query: - - **Recommended**: _Flight+gRPC_ request that contains an SQL or InfluxQL query. See how to [get started querying InfluxDB using Flight and SQL](/influxdb3/cloud-dedicated/get-started/query/). - - HTTP API [`/query` request](#operation/GetLegacyQuery) that contains an InfluxQL query. - Use this protocol when bringing existing InfluxDB v1 query workloads to InfluxDB 3. + - Flight+gRPC request (recommended) that contains an SQL or InfluxQL query. + - HTTP API `/query` request that contains an InfluxQL query. + Use this protocol for existing InfluxDB v1 query workloads. ### InfluxDB v2 compatibility - The HTTP API [`/api/v2/write` endpoint](#operation/PostWrite) works with the [`Bearer`](#section/Authentication/BearerAuthentication) and [`Token`](#section/Authentication/TokenAuthentication) authentication schemes and existing InfluxDB 2.x tools and code for [writing data](/influxdb3/cloud-dedicated/write-data/). - - See how to [use the InfluxDB v2 HTTP API with InfluxDB 3 Cloud Dedicated](/influxdb3/cloud-dedicated/guides/api-compatibility/v2/). + The `/api/v2/write` endpoint works with Bearer and Token authentication + and existing InfluxDB 2.x tools and code. ### InfluxDB v1 compatibility - The HTTP API [`/write` endpoint](#operation/PostLegacyWrite) and [`/query` endpoint](#operation/GetLegacyQuery) work with InfluxDB 1.x username/password [authentication schemes](#section/Authentication/) and existing InfluxDB 1.x tools and code. - - See how to [use the InfluxDB v1 HTTP API with InfluxDB 3 Cloud Dedicated](/influxdb3/cloud-dedicated/guides/api-compatibility/v1/). + The `/write` and `/query` endpoints work with InfluxDB 1.x username/password + authentication and existing InfluxDB 1.x tools and code. name: API compatibility x-traitTag: true - description: | - Depending on your workflow, use one of the following schemes to authenticate to the InfluxDB 3 API: - - | Authentication scheme | Works with | - |:-------------------|:-----------| - | [Bearer authentication](#section/Authentication/BearerAuthentication) | All endpoints | - | [Token authentication](#section/Authentication/TokenAuthentication) | v1, v2 endpoints | - | [Basic authentication](#section/Authentication/BasicAuthentication) | v1 endpoints | - | [Querystring authentication](#section/Authentication/QuerystringAuthentication) | v1 endpoints | + Authenticate API requests using tokens, basic auth, or query strings. + + ## Token types + + InfluxDB 3 Cloud Dedicated uses two types of tokens for API authentication: + + | Token type | Used for | How to create | + |:-----------|:---------|:--------------| + | **Management token** | Management operations (`/api/v0/*`) - manage databases, tables, and database tokens | [`influxctl management create`](/influxdb3/cloud-dedicated/reference/cli/influxctl/management/create/) | + | **Database token** | Data API (`/api/v2/write`, `/query`, etc.) - write and query data | [Database tokens API](/influxdb3/cloud-dedicated/api/database-tokens/) or [`influxctl token create`](/influxdb3/cloud-dedicated/reference/cli/influxctl/token/create/) | + + ## Authentication schemes + + Choose an authentication scheme based on the endpoint and your workflow: + + | Scheme | Token type | Endpoints | Header format | + |:-------|:-----------|:----------|:--------------| + | [Bearer](#section/Authentication/BearerAuthentication) | Database | All data endpoints | `Authorization: Bearer DATABASE_TOKEN` | + | [Token](#section/Authentication/TokenAuthentication) | Database | v1, v2 endpoints | `Authorization: Token DATABASE_TOKEN` | + | [Basic](#section/Authentication/BasicAuthentication) | Database | v1 endpoints | `Authorization: Basic base64(username:DATABASE_TOKEN)` | + | [Query string](#section/Authentication/QuerystringAuthentication) | Database | v1 endpoints | `?u=username&p=DATABASE_TOKEN` | + + Management operations (`/api/v0/*`) require a management token in the `Authorization: Bearer` header. name: Authentication x-traitTag: true - description: | + Parameters for specifying resources in API requests. + To specify resources, some InfluxDB API endpoints require parameters or properties in the request--for example, writing to a `database` resource. @@ -86,9 +105,6 @@ tags: | `database`, `db` | string | The database name | name: Common parameters x-traitTag: true - - name: Data I/O endpoints - description: | - Write and query data stored in InfluxDB. - description: | InfluxDB HTTP API endpoints use standard HTTP request and response headers. The following table shows common headers used by many InfluxDB API endpoints. @@ -98,7 +114,7 @@ tags: | Header | Value type | Description | |:------------------------ |:--------------------- |:-------------------------------------------| | `Accept` | string | The content type that the client can understand. | - | `Authorization` | string | The authorization scheme and credential. | + | `Authorization` | string | The [authorization scheme and credential](/influxdb/version/api/authentication/). | | `Content-Length` | integer | The size of the entity-body, in bytes, sent to the database. | | `Content-Type` | string | The format of the data in the request body. | name: Headers @@ -107,22 +123,48 @@ tags: - description: | Query data stored in a database. - - HTTP clients can query the v1 [`/query` endpoint](#operation/GetLegacyQuery) + - HTTP clients can query the v1 [`/query` endpoint](/influxdb3/cloud-dedicated/api/query-data/) using **InfluxQL** and retrieve data in **CSV** or **JSON** format. - The `/api/v2/query` endpoint can't query InfluxDB 3 Cloud Dedicated. - _Flight + gRPC_ clients can query using **SQL** or **InfluxQL** and retrieve data in **Arrow** format. + name: Query data + x-influxdatadocs-related: + - /influxdb3/cloud-dedicated/get-started/query/ + - /influxdb3/cloud-dedicated/query-data/execute-queries/ + - description: | + Get started with the InfluxDB 3 Cloud Dedicated API: - #### Related guides + 1. **Create a management token** using [`influxctl management create`](/influxdb3/cloud-dedicated/reference/cli/influxctl/management/create/). - - [Get started querying InfluxDB](/influxdb3/cloud-dedicated/get-started/query/) - - [Execute queries](/influxdb3/cloud-dedicated/query-data/execute-queries/) - name: Query - - description: | - See the [**Get Started**](/influxdb3/cloud-dedicated/get-started/) tutorial - to get up and running authenticating with tokens, writing to databases, and querying data. + 2. **Create a database** using the [Databases API](/influxdb3/cloud-dedicated/api/databases/). + + ```bash + curl -X POST "https://cluster-id.a.influxdb.io/api/v0/databases" \ + --header "Authorization: Bearer MANAGEMENT_TOKEN" \ + --header "Content-Type: application/json" \ + --data '{"name": "sensors"}' + ``` + + 3. **Create a database token** for read/write access. + + ```bash + curl -X POST "https://cluster-id.a.influxdb.io/api/v0/tokens" \ + --header "Authorization: Bearer MANAGEMENT_TOKEN" \ + --header "Content-Type: application/json" \ + --data '{"description": "my token", "permissions": [{"action": "write", "resource": "sensors"}]}' + ``` - [**InfluxDB API client libraries and Flight clients**](/influxdb3/cloud-dedicated/reference/client-libraries/) - are available to integrate InfluxDB APIs with your application. + 4. **Write data** to InfluxDB 3 Cloud Dedicated. + + ```bash + curl -X POST "https://cluster-id.a.influxdb.io/api/v2/write?bucket=sensors&precision=ns" \ + --header "Authorization: Token DATABASE_TOKEN" \ + --data-raw "home,room=Kitchen temp=72.0" + ``` + + 5. **Query data** using SQL or InfluxQL with a [Flight client](/influxdb3/cloud-dedicated/query-data/execute-queries/flight-sql/) or the HTTP [`/query` endpoint](/influxdb3/cloud-dedicated/api/query-data/). + + For more information, see the [Get started](/influxdb3/cloud-dedicated/get-started/) guide. name: Quick start x-traitTag: true - description: | @@ -151,7 +193,7 @@ tags: - name: Usage - description: | Write time series data to [databases](/influxdb3/cloud-dedicated/admin/databases/) using InfluxDB v1 or v2 endpoints. - name: Write + name: Write data paths: /ping: get: @@ -254,18 +296,16 @@ paths: #### Write endpoints - The [`/write`](#operation/PostLegacyWrite) and [`/api/v2/write`](#operation/PostWrite) endpoints are functionally equivalent for writing data to InfluxDB 3 Cloud Dedicated. - - - Use the [`/write` endpoint](#operation/PostLegacyWrite) for [InfluxDB v1 parameter compatibility](/influxdb3/cloud-dedicated/guides/api-compatibility/v1/). - - Use the [`/api/v2/write` endpoint](#operation/PostWrite) for [InfluxDB v2 parameter compatibility](/influxdb3/cloud-dedicated/guides/api-compatibility/v2/). + The [`/write`](/influxdb3/cloud-dedicated/api/write-data/) and [`/api/v2/write`](/influxdb3/cloud-dedicated/api/write-data/) endpoints are functionally equivalent for writing data to InfluxDB Cloud Dedicated. - #### Related guides - - - [Get started writing data](/influxdb3/cloud-dedicated/get-started/write/) - - [Write data](/influxdb3/cloud-dedicated/write-data/) - - [Best practices for writing data](/influxdb3/cloud-dedicated/write-data/best-practices/) - - [Troubleshoot issues writing data](/influxdb3/cloud-dedicated/write-data/troubleshoot/) + - Use the [`/write` endpoint](/influxdb3/cloud-dedicated/api/write-data/) for [InfluxDB v1 parameter compatibility](/influxdb3/cloud-dedicated/guides/api-compatibility/v1/). + - Use the [`/api/v2/write` endpoint](/influxdb3/cloud-dedicated/api/write-data/) for [InfluxDB v2 parameter compatibility](/influxdb3/cloud-dedicated/guides/api-compatibility/v2/). operationId: PostWrite + x-influxdatadocs-related: + - /influxdb3/cloud-dedicated/get-started/write/ + - /influxdb3/cloud-dedicated/write-data/ + - /influxdb3/cloud-dedicated/write-data/best-practices/ + - /influxdb3/cloud-dedicated/write-data/troubleshoot/ parameters: - $ref: '#/components/parameters/TraceSpan' - description: | @@ -313,10 +353,6 @@ paths: - Returns only `application/json` for format and limit errors. - Returns only `text/html` for some quota limit errors. - - #### Related guides - - - [Troubleshoot issues writing data](/influxdb3/cloud-dedicated/write-data/troubleshoot/) in: header name: Accept schema: @@ -378,10 +414,6 @@ paths: 1. Use [gzip](https://www.gzip.org/) to compress the line protocol data. 2. In your request, send the compressed data and the `Content-Encoding: gzip` header. - - #### Related guides - - - [Best practices for optimizing writes](/influxdb3/cloud-dedicated/write-data/best-practices/optimize-writes/) required: true responses: '201': @@ -427,10 +459,9 @@ paths: default: $ref: '#/components/responses/GeneralServerError' - summary: Write data + summary: Write data using the InfluxDB v2 HTTP API tags: - - Data I/O endpoints - - Write + - Write data /query: get: description: Queries InfluxDB using InfluxQL with InfluxDB v1 request and response formats. @@ -567,8 +598,7 @@ paths: description: Error processing query summary: Query using the InfluxDB v1 HTTP API tags: - - Query - - Data I/O endpoints + - Query data /write: post: operationId: PostLegacyWrite @@ -596,17 +626,15 @@ paths: #### Write endpoints - The [`/write`](#operation/PostLegacyWrite) and [`/api/v2/write`](#operation/PostWrite) endpoints are functionally equivalent for writing data to InfluxDB 3 Cloud Dedicated. - - - Use the [`/write` endpoint](#operation/PostLegacyWrite) for [InfluxDB v1 parameter compatibility](/influxdb3/cloud-dedicated/guides/api-compatibility/v1/). - - Use the [`/api/v2/write` endpoint](#operation/PostWrite) for [InfluxDB v2 parameter compatibility](/influxdb3/cloud-dedicated/guides/api-compatibility/v2/). + The [`/write`](/influxdb3/cloud-dedicated/api/write-data/) and [`/api/v2/write`](/influxdb3/cloud-dedicated/api/write-data/) endpoints are functionally equivalent for writing data to InfluxDB Cloud Dedicated. - #### Related guides - - - [Get started writing data](/influxdb3/cloud-dedicated/get-started/write/) - - [Write data](/influxdb3/cloud-dedicated/write-data/) - - [Best practices for writing data](/influxdb3/cloud-dedicated/write-data/best-practices/) - - [Troubleshoot issues writing data](/influxdb3/cloud-dedicated/write-data/troubleshoot/) + - Use the [`/write` endpoint](/influxdb3/cloud-dedicated/api/write-data/) for [InfluxDB v1 parameter compatibility](/influxdb3/cloud-dedicated/guides/api-compatibility/v1/). + - Use the [`/api/v2/write` endpoint](/influxdb3/cloud-dedicated/api/write-data/) for [InfluxDB v2 parameter compatibility](/influxdb3/cloud-dedicated/guides/api-compatibility/v2/). + x-influxdatadocs-related: + - /influxdb3/cloud-dedicated/get-started/write/ + - /influxdb3/cloud-dedicated/write-data/ + - /influxdb3/cloud-dedicated/write-data/best-practices/ + - /influxdb3/cloud-dedicated/write-data/troubleshoot/ parameters: - $ref: '#/components/parameters/TraceSpan' - description: The InfluxDB 1.x username to authenticate the request. @@ -715,8 +743,7 @@ paths: description: Internal server error summary: Write data using the InfluxDB v1 HTTP API tags: - - Data I/O endpoints - - Write + - Write data components: parameters: TraceSpan: @@ -1045,9 +1072,7 @@ components: Annotation rows to include in the results. An _annotation_ is metadata associated with an object (column) in the data model. - #### Related guides - - - See [Annotated CSV annotations](/influxdb3/cloud-dedicated/reference/syntax/annotated-csv/#annotations) for examples and more information. + See [Annotated CSV annotations](/influxdb3/cloud-dedicated/reference/syntax/annotated-csv/#annotations) for examples and more information. For more information about **annotations** in tabular data, see [W3 metadata vocabulary for tabular data](https://www.w3.org/TR/2015/REC-tabular-data-model-20151217/#columns). @@ -1811,9 +1836,7 @@ components: - Doesn't use `shardGroupDurationsSeconds`. - #### Related guides - - - InfluxDB [shards and shard groups](/influxdb3/cloud-dedicated/reference/internals/shards/) + For more information, see [shards and shard groups](/influxdb3/cloud-dedicated/reference/internals/shards/). format: int64 type: integer type: @@ -1958,7 +1981,7 @@ components: description: | Use the `Authorization` header with the `Basic` scheme to authenticate v1 API requests. - Works with v1 compatibility [`/write`](#operation/PostV1Write) and [`/query`](#operation/GetV1Query) endpoints in InfluxDB 3. + Works with v1 compatibility [`/write`](/influxdb3/cloud-dedicated/api/write-data/) and [`/query`](/influxdb3/cloud-dedicated/api/query-data/) endpoints in InfluxDB 3. When authenticating requests, InfluxDB 3 checks that the `password` part of the decoded credential is an authorized token and ignores the `username` part of the decoded credential. @@ -1977,10 +2000,8 @@ components: - **`DATABASE_NAME`**: your InfluxDB 3 Cloud Dedicated database - **`DATABASE_TOKEN`**: a database token with sufficient permissions to the database - #### Related guides - - - [Authenticate v1 API requests](/influxdb3/cloud-dedicated/guides/api-compatibility/v1/) - - [Manage tokens](/influxdb3/cloud-dedicated/admin/tokens/) + For more information, see [Authenticate v1 API requests](/influxdb3/cloud-dedicated/guides/api-compatibility/v1/) + and [Manage tokens](/influxdb3/cloud-dedicated/admin/tokens/). QuerystringAuthentication: type: apiKey in: query @@ -1988,7 +2009,7 @@ components: description: | Use InfluxDB 1.x API parameters to provide credentials through the query string for v1 API requests. - Querystring authentication works with v1-compatible [`/write`](#operation/PostV1Write) and [`/query`](#operation/GetV1Query) endpoints. + Querystring authentication works with v1-compatible [`/write`](/influxdb3/cloud-dedicated/api/write-data/) and [`/query`](/influxdb3/cloud-dedicated/api/query-data/) endpoints. When authenticating requests, InfluxDB 3 checks that the `p` (_password_) query parameter is an authorized token and ignores the `u` (_username_) query parameter. @@ -2033,10 +2054,8 @@ components: - **`DATABASE_NAME`**: the database to query - **`DATABASE_TOKEN`**: a database token with sufficient permissions to the database - #### Related guides - - - [Authenticate v1 API requests](/influxdb3/cloud-dedicated/guides/api-compatibility/v1/) - - [Manage tokens](/influxdb3/cloud-dedicated/admin/tokens/) + For more information, see [Authenticate v1 API requests](/influxdb3/cloud-dedicated/guides/api-compatibility/v1/) + and [Manage tokens](/influxdb3/cloud-dedicated/admin/tokens/). BearerAuthentication: type: http scheme: bearer @@ -2044,7 +2063,7 @@ components: description: | Use the OAuth Bearer authentication - scheme to provide an authorization token to InfluxDB 3. + scheme to provide an authorization token to InfluxDB Cloud Dedicated. Bearer authentication works with all endpoints. @@ -2066,9 +2085,9 @@ components: ``` TokenAuthentication: description: | - Use InfluxDB v2 Token authentication to provide an authorization token to InfluxDB 3. + Use InfluxDB v2 Token authentication to provide an authorization token to InfluxDB Cloud Dedicated. - The v2 Token scheme works with v1 and v2 compatibility endpoints in InfluxDB 3. + The v2 Token scheme works with v1 and v2 compatibility endpoints in InfluxDB Cloud Dedicated. In your API requests, send an `Authorization` header. For the header value, provide the word `Token` followed by a space and a database token. @@ -2093,9 +2112,7 @@ components: --data-binary 'home,room=kitchen temp=72 1463683075' ``` - ### Related guides - - - [Manage tokens](/influxdb3/cloud-dedicated/admin/tokens/) + For more information, see [Manage tokens](/influxdb3/cloud-dedicated/admin/tokens/). in: header name: Authorization type: apiKey @@ -2111,5 +2128,5 @@ x-tagGroups: - name: All endpoints tags: - Ping - - Query - - Write + - Query data + - Write data diff --git a/api-docs/influxdb3/cloud-serverless/v2/content/tag-groups.yml b/api-docs/influxdb3/cloud-serverless/v2/content/tag-groups.yml deleted file mode 100644 index e5f2323135..0000000000 --- a/api-docs/influxdb3/cloud-serverless/v2/content/tag-groups.yml +++ /dev/null @@ -1,28 +0,0 @@ -- name: Using the InfluxDB HTTP API - tags: - - Quick start - - Authentication - - Supported operations - - Headers - - Pagination - - Response codes -- name: All endpoints - tags: - - Authorizations (API tokens) - - Bucket Schemas - - Buckets - - Delete - - DBRPs - - Invokable Scripts - - Limits - - Organizations - - Query - - Resources - - Routes - - Secrets - - Tasks - - Telegrafs - - Templates - - Usage - - Variables - - Write diff --git a/api-docs/influxdb3/cloud-serverless/v2/ref.yml b/api-docs/influxdb3/cloud-serverless/v2/ref.yml index 3b8ac502e0..054080b794 100644 --- a/api-docs/influxdb3/cloud-serverless/v2/ref.yml +++ b/api-docs/influxdb3/cloud-serverless/v2/ref.yml @@ -29,38 +29,41 @@ security: - TokenAuthentication: [] tags: - description: | + Use InfluxDB v1 and v2 compatible endpoints to write and query data. + ### Write data InfluxDB 3 Cloud Serverless provides the following HTTP API endpoints for writing data: - - **Recommended**: [`/api/v2/write` endpoint](#operation/PostWrite) - for new write workloads or for bringing existing InfluxDB v2 write workloads to InfluxDB 3. - - [`/write` endpoint](#operation/PostLegacyWrite) for bringing existing InfluxDB v1 write workloads to InfluxDB 3. + - `/api/v2/write` endpoint (recommended) for new write workloads or for + bringing existing InfluxDB v2 write workloads to InfluxDB Cloud Serverless. + - `/write` endpoint for bringing existing InfluxDB v1 write workloads to + InfluxDB Cloud Serverless. - Both endpoints accept the same line protocol format and process data in the same way. + Both endpoints accept line protocol format and process data the same way. ### Query data InfluxDB 3 Cloud Serverless provides the following protocols for executing a query: - - **Recommended**: _Flight+gRPC_ request that contains an SQL or InfluxQL query. See how to [get started querying InfluxDB using Flight and SQL](/influxdb3/cloud-serverless/get-started/query/). - - HTTP API [`/query` request](/influxdb3/cloud-serverless/api/#operation/GetLegacyQuery) that contains an InfluxQL query. - Use this protocol when bringing existing InfluxDB v1 query workloads to InfluxDB 3. + - Flight+gRPC request (recommended) that contains an SQL or InfluxQL query. + - HTTP API `/query` request that contains an InfluxQL query. + Use this protocol for existing InfluxDB v1 query workloads. ### InfluxDB v2 compatibility - The HTTP API [`/api/v2/write` endpoint](#operation/PostWrite) works with the [`Token` authentication scheme](#section/Authentication/TokenAuthentication) and existing InfluxDB 2.x tools and code for [writing data](/influxdb3/cloud-serverless/write-data/). - - See how to [use the InfluxDB v2 HTTP API with InfluxDB 3 Cloud Serverless](/influxdb3/cloud-serverless/guides/api-compatibility/v2/). + The `/api/v2/write` endpoint works with Token authentication and existing + InfluxDB 2.x tools and code. ### InfluxDB v1 compatibility - The HTTP API [`/write` endpoint](#operation/PostLegacyWrite) and [`/query` endpoint](#operation/GetLegacyQuery) work with InfluxDB 1.x username/password [authentication schemes](#section/Authentication/) and existing InfluxDB 1.x tools and code. - - See how to [use the InfluxDB v1 HTTP API with InfluxDB 3 Cloud Serverless](/influxdb3/cloud-serverless/guides/api-compatibility/v1/). + The `/write` and `/query` endpoints work with InfluxDB 1.x username/password + authentication and existing InfluxDB 1.x tools and code. name: API compatibility x-traitTag: true - description: | + Authenticate API requests using tokens, basic auth, or query strings. + Use one of the following schemes to authenticate to the InfluxDB API: - [Token authentication](#section/Authentication/TokenAuthentication) @@ -116,6 +119,8 @@ tags: - name: Cells - name: Checks - description: | + Parameters for specifying resources in API requests. + To specify resources, some InfluxDB API endpoints require parameters or properties in the request--for example, writing to a `bucket` resource in an `org` (_organization_ resource). @@ -132,7 +137,6 @@ tags: x-traitTag: true - name: Config - name: Dashboards - - name: Data I/O endpoints - description: | The InfluxDB 1.x data model includes [databases](/influxdb3/cloud-serverless/reference/glossary/#database) and [retention policies](/influxdb3/cloud-serverless/reference/glossary/#retention-policy-rp). @@ -159,7 +163,7 @@ tags: | Header | Value type | Description | |:------------------------ |:--------------------- |:-------------------------------------------| | `Accept` | string | The content type that the client can understand. | - | `Authorization` | string | The authorization scheme and credential. | + | `Authorization` | string | The [authorization scheme and credential](/influxdb/version/api/authentication/). | | `Content-Length` | integer | The size of the entity-body, in bytes. | | `Content-Type` | string | The format of the data in the request body. | name: Headers @@ -245,13 +249,38 @@ tags: - [Get started querying InfluxDB](/influxdb3/cloud-serverless/get-started/query/) - [Execute queries](/influxdb3/cloud-serverless/query-data/execute-queries/) - name: Query + name: Query data - description: | - See the [**Get started**](/influxdb3/cloud-serverless/get-started/) tutorial - to get up and running authenticating with tokens, writing to buckets, and querying data. + Authenticate, write, and query with the API: + + 1. Create a database token to authorize API requests in the InfluxDB Cloud + Serverless UI. + + 2. Write data to InfluxDB Cloud Serverless. + + ```bash + curl -X POST "https://cloud2.influxdata.com/api/v2/write?bucket=DATABASE_NAME&precision=ns" \ + --header "Authorization: Token DATABASE_TOKEN" \ + --header "Content-Type: text/plain; charset=utf-8" \ + --data-raw "home,room=Kitchen temp=72.0 + home,room=Living\ room temp=71.5" + ``` + + If all data is written, the response is `204 No Content`. + + 3. Query data from InfluxDB Cloud Serverless using SQL or InfluxQL. + For best performance, use a Flight client to query data. + The HTTP API `/query` endpoint supports InfluxQL queries. + + ```bash + curl -G "https://cloud2.influxdata.com/query" \ + --header "Authorization: Token DATABASE_TOKEN" \ + --data-urlencode "db=DATABASE_NAME" \ + --data-urlencode "q=SELECT * FROM home WHERE time > now() - 1h" + ``` - [**InfluxDB API client libraries and Flight clients**](/influxdb3/cloud-serverless/reference/client-libraries/) - are available to integrate InfluxDB with your application. + For more information about using InfluxDB Cloud Serverless, see the + [Get started](/influxdb3/cloud-serverless/get-started/) guide. name: Quick start x-traitTag: true - name: Resources @@ -357,7 +386,7 @@ tags: - name: Views - description: | Write time series data to [buckets](/influxdb3/cloud-serverless/reference/glossary/#bucket) using InfluxDB v1 or v2 endpoints. - name: Write + name: Write data paths: /api/v2: get: @@ -3885,7 +3914,7 @@ paths: $ref: '#/components/responses/GeneralServerError' summary: Query data tags: - - Query + - Query data /api/v2/query/analyze: post: deprecated: true @@ -3989,7 +4018,7 @@ paths: type: string summary: Analyze a Flux query tags: - - Query + - Query data /api/v2/query/ast: post: deprecated: true @@ -4052,7 +4081,7 @@ paths: description: Internal server error. summary: Generate a query Abstract Syntax Tree (AST) tags: - - Query + - Query data /api/v2/query/suggestions: get: deprecated: true @@ -4095,7 +4124,7 @@ paths: description: Internal server error. summary: List Flux query suggestions tags: - - Query + - Query data x-codeSamples: - label: cURL lang: Shell @@ -4142,7 +4171,7 @@ paths: The value passed for _`name`_ may have been misspelled. summary: Retrieve a query suggestion for a branching suggestion tags: - - Query + - Query data /api/v2/resources: get: operationId: GetResources @@ -7448,7 +7477,7 @@ paths: #### Write endpoints - The [`/write`](#operation/PostLegacyWrite) and [`/api/v2/write`](#operation/PostWrite) endpoints are functionally equivalent for writing data to InfluxDB 3 Cloud Serverless. + The [`/write`](#operation/PostLegacyWrite) and [`/api/v2/write`](#operation/PostWrite) endpoints are functionally equivalent for writing data to InfluxDB Cloud Serverless. - Use the [`/write` endpoint](#operation/PostLegacyWrite) for [InfluxDB v1 parameter compatibility](/influxdb3/cloud-serverless/guides/api-compatibility/v1/). - Use [`/api/v2/write` endpoint](#operation/PostWrite) for [InfluxDB v2 parameter compatibility](/influxdb3/cloud-serverless/guides/api-compatibility/v2/). @@ -7678,10 +7707,9 @@ paths: type: integer default: $ref: '#/components/responses/GeneralServerError' - summary: Write data + summary: Write data using the InfluxDB v2 HTTP API tags: - - Data I/O endpoints - - Write + - Write data /legacy/authorizations: servers: - url: /private @@ -7847,8 +7875,7 @@ paths: description: Error processing query summary: Query using the InfluxDB v1 HTTP API tags: - - Data I/O endpoints - - Query + - Query data /write: post: operationId: PostLegacyWrite @@ -7876,7 +7903,7 @@ paths: #### Write endpoints - The [`/write`](#operation/PostLegacyWrite) and [`/api/v2/write`](#operation/PostWrite) endpoints are functionally equivalent for writing data to InfluxDB 3 Cloud Serverless. + The [`/write`](#operation/PostLegacyWrite) and [`/api/v2/write`](#operation/PostWrite) endpoints are functionally equivalent for writing data to InfluxDB Cloud Serverless. - Use the [`/write` endpoint](#operation/PostLegacyWrite) for [InfluxDB v1 parameter compatibility](/influxdb3/cloud-serverless/guides/api-compatibility/v1/). - Use [`/api/v2/write` endpoint](#operation/PostWrite) for [InfluxDB v2 parameter compatibility](/influxdb3/cloud-serverless/guides/api-compatibility/v2/). @@ -8011,8 +8038,7 @@ paths: description: Internal server error summary: Write data using the InfluxDB v1 HTTP API tags: - - Data I/O endpoints - - Write + - Write data components: examples: AuthorizationPostRequest: @@ -14756,7 +14782,7 @@ x-tagGroups: - Invokable Scripts - Limits - Organizations - - Query + - Query data - Resources - Routes - Secrets @@ -14765,4 +14791,4 @@ x-tagGroups: - Templates - Usage - Variables - - Write + - Write data diff --git a/api-docs/influxdb3/clustered/.config.yml b/api-docs/influxdb3/clustered/.config.yml index 1715e1bf92..4fe722f84b 100644 --- a/api-docs/influxdb3/clustered/.config.yml +++ b/api-docs/influxdb3/clustered/.config.yml @@ -6,13 +6,11 @@ extends: x-influxdata-product-name: InfluxDB 3 Clustered apis: - management@0: - root: management/openapi.yml v2@2: root: v2/ref.yml x-influxdata-docs-aliases: - /influxdb3/clustered/api/ - v1-compatibility@2: - root: v1-compatibility/swaggerV1Compat.yml - x-influxdata-docs-aliases: - /influxdb3/clustered/api/v1/ + - /influxdb3/clustered/api/v2/ + - /influxdb3/clustered/api/v1-compatibility + - /influxdb3/clustered/api/v2-compatibility diff --git a/api-docs/influxdb3/clustered/management/content/tag-groups.yml b/api-docs/influxdb3/clustered/management/content/tag-groups.yml deleted file mode 100644 index 57e8c8484c..0000000000 --- a/api-docs/influxdb3/clustered/management/content/tag-groups.yml +++ /dev/null @@ -1,6 +0,0 @@ -- name: Using the Management API - tags: - - Authentication - - Quickstart -- name: All endpoints - tags: [] diff --git a/api-docs/influxdb3/clustered/management/openapi.yml b/api-docs/influxdb3/clustered/management/openapi.yml index 9c924b3550..4c6b1a3a2a 100644 --- a/api-docs/influxdb3/clustered/management/openapi.yml +++ b/api-docs/influxdb3/clustered/management/openapi.yml @@ -28,250 +28,47 @@ security: - bearerAuthManagementToken: [] bearerAuthJwt: [] tags: - - name: Authentication - x-traitTag: true - description: | - With InfluxDB 3 Clustered, InfluxDB Management API endpoints require the following credential: - - - `Authorization MANAGEMENT_TOKEN`: the `Authorization` HTTP header with a [management token](/influxdb3/clustered/admin/tokens/management/). - - See how to [create a management token](/influxdb3/clustered/admin/tokens/management/). - - By default, management tokens in InfluxDB 3 are short-lived tokens issued by an OAuth2 identity provider that grant a specific user administrative access to your InfluxDB cluster. However, for automation purposes, you can manually create management tokens that authenticate directly with your InfluxDB cluster and do not require human interaction with your identity provider. - name: Database tokens description: Manage database read/write tokens for a cluster - name: Databases description: Manage databases for a cluster - - name: Quickstart + - name: Quick start x-traitTag: true description: | - The following example script shows how to use `curl` to make database and token management requests: - - ```shell - #!/bin/bash - - # Usage: - # Note the leading space in the command below to keep secrets out of the shell history - # - # ``` - # MANAGEMENT_TOKEN= ./scripts/test_http_api_v0_endpoints.sh - # ``` - - # Env var validation - if [ -z "${MANAGEMENT_TOKEN}" ]; then - echo " - [Error]: ❌ - \$MANAGEMENT_TOKEN env var is required. - " - exit 1 - fi - - HOST="https://cluster-host.com" - - # Database request functions - list_databases () { - local response=$( \ - curl \ - --location "$HOST/api/v0/databases" \ - --header "Accept: application/json" \ - --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ - ) - echo "$response" - } - - create_database () { - local databaseName=$1 - local response=$( \ - curl \ - --location "$HOST/api/v0/databases" \ - --header "Accept: application/json" \ - --header 'Content-Type: application/json' \ - --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ - --data '{ - "name": "'$databaseName'", - "maxTables": 75, - "maxColumnsPerTable": 90, - "retentionPeriod": 600000000000, - "partitionTemplate": [ - { - "type": "tag", - "value": "abc" - }, - { - "type": "bucket", - "value": { - "tagName": "def", - "numberOfBuckets": 5 - } - } - ] - }' \ - ) - echo "$response" - } - - update_database () { - local databaseName=$1 - local response=$( \ - curl \ - --location "$HOST/api/v0/databases/$databaseName" \ - --request PATCH \ - --header "Accept: application/json" \ - --header 'Content-Type: application/json' \ - --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ - --data '{ - "maxTables": 150, - "maxColumnsPerTable": 180, - "retentionPeriod": 1200000000000 - }' \ - ) - echo "$response" - } - - delete_database () { - local databaseName=$1 - local response=$( \ - curl \ - --location "$HOST/api/v0/databases/$databaseName" \ - --request DELETE \ - --header "Accept: application/json" \ - --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ - ) - echo "$response" - } - - # Token request functions - list_tokens () { - local response=$( \ - curl \ - --location "$HOST/api/v0/tokens" \ - --header "Accept: application/json" \ - --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ - ) - echo "$response" - } - - create_token () { - local response=$( \ - curl \ - --location "$HOST/api/v0/tokens" \ - --header "Accept: application/json" \ - --header 'Content-Type: application/json' \ - --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ - --data '{ - "description": "my test token", - "permissions": [ - { - "action": "write", - "resource": "database_one" - }, - { - "action": "read", - "resource": "database_two" - } - ] - }' \ - ) - echo "$response" - } - - get_token () { - local token_id=$1 - local response=$( \ - curl \ - --location "$HOST/api/v0/tokens/$tokenId" \ - --header "Accept: application/json" \ - --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ - ) - echo "$response" - } - - update_token () { - local token_id=$1 - local response=$( \ - curl \ - --location "$HOST/api/v0/tokens/$tokenId" \ - --request PATCH \ - --header "Accept: application/json" \ - --header 'Content-Type: application/json' \ - --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ - --data '{ - "description": "my updated test token", - "permissions": [ - { - "action": "database_one", - "resource": "read" - } - ] - }' \ - ) - echo "$response" - } - - delete_token () { - local token_id=$1 - local response=$( \ - curl \ - --location "$HOST/api/v0/tokens/$tokenId" \ - --request DELETE \ - --header "Accept: application/json" \ - --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ - ) - echo "$response" - } - - - # Test database endpoints - databaseName="test_database_$RANDOM" - - printf "\n🏗️ Creating database... 🏗️\n\n" - response="$(create_database $databaseName)" - echo $response | jq - printf "\n🏗️ Creating database successful 🏗️\n\n" + Authenticate, write, and query with the API: - printf "\n⬆️ Updating database... ⬆️\n\n" - response="$(update_database $databaseName)" - echo $response | jq - printf "\n⬆️ Updating database successful ⬆️\n\n" + 1. Create a management token using `influxctl token create`. - printf "\n⬇️ Listing databases... ⬇️\n\n" - response="$(list_databases)" - echo $response | jq - printf "\n⬇️ Listing databases successful ⬇️\n\n" + 2. Create a database using the Management API. - printf "\n🗑️ Deleting database... 🗑️\n\n" - response="$(delete_database $databaseName)" - echo $response | jq - printf "\n🗑️ Deleting database successful 🗑️\n\n" + ```bash + curl -X POST "https://CLUSTER_HOST/api/v0/databases" \ + --header "Authorization: Bearer MANAGEMENT_TOKEN" \ + --header "Content-Type: application/json" \ + --data '{"name": "sensors"}' + ``` + 3. Create a database token for read/write access. - # Test token endpoints - printf "\n🏗️ Creating token... 🏗️\n\n" - response="$(create_token)" - echo $response | jq - tokenId=$(echo $response | jq '.id') - printf "\n🏗️ Creating token successful 🏗️\n\n" + ```bash + curl -X POST "https://CLUSTER_HOST/api/v0/tokens" \ + --header "Authorization: Bearer MANAGEMENT_TOKEN" \ + --header "Content-Type: application/json" \ + --data '{"description": "my token", "permissions": [{"action": "write", "resource": "sensors"}]}' + ``` - printf "\n⬇️ Getting token... ⬇️\n\n" - response="$(get_token $tokenId)" - echo $response | jq - printf "\n⬇️ Getting token successful ⬇️\n\n" + 4. Write data to InfluxDB Clustered. - printf "\n⬆️ Updating token... ⬆️\n\n" - response="$(update_token $tokenId)" - echo $response | jq - printf "\n⬆️ Updating token successful ⬆️\n\n" + ```bash + curl -X POST "https://CLUSTER_HOST/api/v2/write?bucket=sensors&precision=ns" \ + --header "Authorization: Token DATABASE_TOKEN" \ + --data-raw "home,room=Kitchen temp=72.0" + ``` - printf "\n📋 Listing tokens... 📋\n\n" - response="$(list_tokens)" - echo $response | jq - printf "\n📋 Listing tokens successful 📋\n\n" + 5. Query data using SQL or InfluxQL with a Flight client or the HTTP API. - printf "\n🗑️ Deleting token... 🗑️\n\n" - response="$(delete_token $tokenId)" - echo $response | jq - printf "\n🗑️ Deleting token successful 🗑️\n\n" - ``` + For more information, see the + [Get started](/influxdb3/clustered/get-started/) guide. - name: Tables description: Manage tables in a database paths: @@ -2031,8 +1828,7 @@ components: x-tagGroups: - name: Using the Management API tags: - - Authentication - - Quickstart + - Quick start - name: All endpoints tags: - Database tokens diff --git a/api-docs/influxdb3/clustered/v2/content/tag-groups.yml b/api-docs/influxdb3/clustered/v2/content/tag-groups.yml deleted file mode 100644 index 47deee8ef8..0000000000 --- a/api-docs/influxdb3/clustered/v2/content/tag-groups.yml +++ /dev/null @@ -1,12 +0,0 @@ -- name: Using the InfluxDB HTTP API - tags: - - Quick start - - Authentication - - Headers - - Pagination - - Response codes -- name: All endpoints - tags: - - Ping - - Query - - Write diff --git a/api-docs/influxdb3/clustered/v2/ref.yml b/api-docs/influxdb3/clustered/v2/ref.yml index a93a582f1f..cbcbd4cdfb 100644 --- a/api-docs/influxdb3/clustered/v2/ref.yml +++ b/api-docs/influxdb3/clustered/v2/ref.yml @@ -32,49 +32,68 @@ security: - QuerystringAuthentication: [] tags: - description: | + Use InfluxDB v1 and v2 compatible endpoints to write and query data. + ### Write data InfluxDB 3 Clustered provides the following HTTP API endpoints for writing data: - - **Recommended**: [`/api/v2/write` endpoint](/influxdb3/clustered/api/#operation/PostWrite) for new write workloads or for bringing existing InfluxDB v2 write workloads to InfluxDB 3. - - [`/write` endpoint](/influxdb3/clustered/api/#operation/PostLegacyWrite) for bringing existing InfluxDB v1 write workloads to InfluxDB 3. + - `/api/v2/write` endpoint (recommended) for new write workloads or for + bringing existing InfluxDB v2 write workloads to InfluxDB Clustered. + - `/write` endpoint for bringing existing InfluxDB v1 write workloads to + InfluxDB Clustered. - Both endpoints accept the same line protocol format and process data in the same way. + Both endpoints accept line protocol format and process data the same way. ### Query data InfluxDB 3 Clustered provides the following protocols for executing a query: - - **Recommended**: _Flight+gRPC_ request that contains an SQL or InfluxQL query. See how to [get started querying InfluxDB using Flight and SQL](/influxdb3/clustered/get-started/query/). - - HTTP API [`/query` request](/influxdb3/clustered/api/#operation/GetLegacyQuery) that contains an InfluxQL query. - Use this protocol when bringing existing InfluxDB v1 query workloads to InfluxDB 3. + - Flight+gRPC request (recommended) that contains an SQL or InfluxQL query. + - HTTP API `/query` request that contains an InfluxQL query. + Use this protocol for existing InfluxDB v1 query workloads. ### InfluxDB v2 compatibility - The HTTP API [`/api/v2/write` endpoint](/influxdb3/clustered/api/#operation/PostWrite) works with the [`Bearer`](#section/Authentication/BearerAuthentication) and [`Token`](#section/Authentication/TokenAuthentication) authentication schemes and existing InfluxDB 2.x tools and code for [writing data](/influxdb3/clustered/write-data/). - - See how to [use the InfluxDB v2 HTTP API with InfluxDB 3 Clustered ](/influxdb3/clustered/guides/api-compatibility/v2/). + The `/api/v2/write` endpoint works with Bearer and Token authentication + and existing InfluxDB 2.x tools and code. ### InfluxDB v1 compatibility - The HTTP API [`/write` endpoint](/influxdb3/clustered/api/#operation/PostLegacyWrite) and [`/query` endpoint](/influxdb3/clustered/api/#operation/GetLegacyQuery) work with InfluxDB 1.x username/password [authentication schemes](#section/Authentication/) and existing InfluxDB 1.x tools and code. - - See how to [use the InfluxDB v1 HTTP API with InfluxDB 3 Clustered ](/influxdb3/clustered/guides/api-compatibility/v1/). + The `/write` and `/query` endpoints work with InfluxDB 1.x username/password + authentication and existing InfluxDB 1.x tools and code. name: API compatibility x-traitTag: true - description: | - Depending on your workflow, use one of the following schemes to authenticate to the InfluxDB 3 API: - - | Authentication scheme | Works with | - |:-------------------|:-----------| - | [Bearer authentication](#section/Authentication/BearerAuthentication) | All endpoints | - | [Token authentication](#section/Authentication/TokenAuthentication) | v1, v2 endpoints | - | [Basic authentication](#section/Authentication/BasicAuthentication) | v1 endpoints | - | [Querystring authentication](#section/Authentication/QuerystringAuthentication) | v1 endpoints | + Authenticate API requests using tokens, basic auth, or query strings. + + ## Token types + + InfluxDB 3 Clustered uses two types of tokens for API authentication: + + | Token type | Used for | How to create | + |:-----------|:---------|:--------------| + | **Management token** | Management operations (`/api/v0/*`) - manage databases, tables, and database tokens | [`influxctl management create`](/influxdb3/clustered/reference/cli/influxctl/management/create/) | + | **Database token** | Data API (`/api/v2/write`, `/query`, etc.) - write and query data | [Database tokens API](/influxdb3/clustered/api/database-tokens/) or [`influxctl token create`](/influxdb3/clustered/reference/cli/influxctl/token/create/) | + + ## Authentication schemes + + Choose an authentication scheme based on the endpoint and your workflow: + + | Scheme | Token type | Endpoints | Header format | + |:-------|:-----------|:----------|:--------------| + | [Bearer](#section/Authentication/BearerAuthentication) | Database | All data endpoints | `Authorization: Bearer DATABASE_TOKEN` | + | [Token](#section/Authentication/TokenAuthentication) | Database | v1, v2 endpoints | `Authorization: Token DATABASE_TOKEN` | + | [Basic](#section/Authentication/BasicAuthentication) | Database | v1 endpoints | `Authorization: Basic base64(username:DATABASE_TOKEN)` | + | [Query string](#section/Authentication/QuerystringAuthentication) | Database | v1 endpoints | `?u=username&p=DATABASE_TOKEN` | + + Management operations (`/api/v0/*`) require a management token in the `Authorization: Bearer` header. name: Authentication x-traitTag: true - description: | + Parameters for specifying resources in API requests. + To specify resources, some InfluxDB API endpoints require parameters or properties in the request--for example, writing to a `database` resource. @@ -86,9 +105,6 @@ tags: | `database`, `db` | string | The database name | name: Common parameters x-traitTag: true - - name: Data I/O endpoints - description: | - Write and query data stored in InfluxDB. - description: | InfluxDB HTTP API endpoints use standard HTTP request and response headers. The following table shows common headers used by many InfluxDB API endpoints. @@ -98,7 +114,7 @@ tags: | Header | Value type | Description | |:------------------------ |:--------------------- |:-------------------------------------------| | `Accept` | string | The content type that the client can understand. | - | `Authorization` | string | The authorization scheme and credential. | + | `Authorization` | string | The [authorization scheme and credential](/influxdb/version/api/authentication/). | | `Content-Length` | integer | The size of the entity-body, in bytes, sent to the database. | | `Content-Type` | string | The format of the data in the request body. | name: Headers @@ -107,22 +123,48 @@ tags: - description: | Query data stored in a database. - - HTTP clients can query the v1 [`/query` endpoint](/influxdb3/clustered/api/#operation/GetLegacyQuery) + - HTTP clients can query the v1 [`/query` endpoint](/influxdb3/clustered/api/query-data/) using **InfluxQL** and retrieve data in **CSV** or **JSON** format. - The `/api/v2/query` endpoint can't query InfluxDB 3 Clustered. - _Flight + gRPC_ clients can query using **SQL** or **InfluxQL** and retrieve data in **Arrow** format. + name: Query data + x-influxdatadocs-related: + - /influxdb3/clustered/get-started/query/ + - /influxdb3/clustered/query-data/execute-queries/ + - description: | + Get started with the InfluxDB 3 Clustered API: - #### Related guides + 1. **Create a management token** using [`influxctl management create`](/influxdb3/clustered/reference/cli/influxctl/management/create/). - - [Get started querying InfluxDB](/influxdb3/clustered/get-started/query/) - - [Execute queries](/influxdb3/clustered/query-data/execute-queries/) - name: Query - - description: | - See the [**Get Started**](/influxdb3/clustered/get-started/) tutorial - to get up and running authenticating with tokens, writing to databases, and querying data. + 2. **Create a database** using the [Databases API](/influxdb3/clustered/api/databases/). + + ```bash + curl -X POST "https://cluster-host.com/api/v0/databases" \ + --header "Authorization: Bearer MANAGEMENT_TOKEN" \ + --header "Content-Type: application/json" \ + --data '{"name": "sensors"}' + ``` + + 3. **Create a database token** for read/write access. + + ```bash + curl -X POST "https://cluster-host.com/api/v0/tokens" \ + --header "Authorization: Bearer MANAGEMENT_TOKEN" \ + --header "Content-Type: application/json" \ + --data '{"description": "my token", "permissions": [{"action": "write", "resource": "sensors"}]}' + ``` - [**InfluxDB API client libraries and Flight clients**](/influxdb3/clustered/reference/client-libraries/) - are available to integrate InfluxDB APIs with your application. + 4. **Write data** to InfluxDB 3 Clustered. + + ```bash + curl -X POST "https://cluster-host.com/api/v2/write?bucket=sensors&precision=ns" \ + --header "Authorization: Token DATABASE_TOKEN" \ + --data-raw "home,room=Kitchen temp=72.0" + ``` + + 5. **Query data** using SQL or InfluxQL with a [Flight client](/influxdb3/clustered/query-data/execute-queries/flight-sql/) or the HTTP [`/query` endpoint](/influxdb3/clustered/api/query-data/). + + For more information, see the [Get started](/influxdb3/clustered/get-started/) guide. name: Quick start x-traitTag: true - description: | @@ -152,7 +194,7 @@ tags: - name: Usage - description: | Write time series data to [databases](/influxdb3/clustered/admin/databases/) using InfluxDB v1 or v2 endpoints. - name: Write + name: Write data paths: /ping: get: @@ -239,14 +281,12 @@ paths: To ensure that InfluxDB Cloud handles writes in the order you request them, wait for a success response (HTTP `2xx` status code) before you send the next request. - - #### Related guides - - - [Get started writing data](/influxdb3/clustered/get-started/write/) - - [Write data](/influxdb3/clustered/write-data/) - - [Best practices for writing data](/influxdb3/clustered/write-data/best-practices/) - - [Troubleshoot issues writing data](/influxdb3/clustered/write-data/troubleshoot/) operationId: PostWrite + x-influxdatadocs-related: + - /influxdb3/clustered/get-started/write/ + - /influxdb3/clustered/write-data/ + - /influxdb3/clustered/write-data/best-practices/ + - /influxdb3/clustered/write-data/troubleshoot/ parameters: - $ref: '#/components/parameters/TraceSpan' - description: | @@ -292,10 +332,6 @@ paths: - Returns only `application/json` for format and limit errors. - Returns only `text/html` for some quota limit errors. - - #### Related guides - - - [Troubleshoot issues writing data](/influxdb3/clustered/write-data/troubleshoot/) in: header name: Accept schema: @@ -357,10 +393,6 @@ paths: 1. Use [gzip](https://www.gzip.org/) to compress the line protocol data. 2. In your request, send the compressed data and the `Content-Encoding: gzip` header. - - #### Related guides - - - [Best practices for optimizing writes](/influxdb3/clustered/write-data/best-practices/optimize-writes/) required: true responses: '204': @@ -430,10 +462,9 @@ paths: - Returns this error if the server is temporarily unavailable to accept writes due to concurrent request limits or insufficient healthy ingesters. default: $ref: '#/components/responses/GeneralServerError' - summary: Write data + summary: Write data using the InfluxDB v2 HTTP API tags: - - Data I/O endpoints - - Write + - Write data /query: get: description: Queries InfluxDB using InfluxQL with InfluxDB v1 request and response formats. @@ -569,7 +600,7 @@ paths: description: Error processing query summary: Query using the InfluxDB v1 HTTP API tags: - - Query + - Query data /write: post: operationId: PostLegacyWrite @@ -624,7 +655,7 @@ paths: '400': description: | Data from the batch was rejected and not written. The response body indicates if a partial write occurred or all data was rejected. - If a partial write occurred, then some points from the batch are written and queryable. + If a partial write occurred, then some points from the batch are written and queryable. The response body contains details about the [rejected points](/influxdb3/clustered/write-data/troubleshoot/#troubleshoot-rejected-points), up to 100 points. content: application/json: @@ -684,15 +715,13 @@ paths: To ensure that InfluxDB handles writes in the order you request them, wait for a success response (HTTP `2xx` status code) before you send the next request. - - #### Related guides - - - [Write data with the InfluxDB API](/influxdb3/clustered/get-started/write/) - - [Optimize writes to InfluxDB](/influxdb3/clustered/write-data/best-practices/optimize-writes/) - - [Troubleshoot issues writing data](/influxdb3/clustered/write-data/troubleshoot/) summary: Write data using the InfluxDB v1 HTTP API tags: - - Write + - Write data + x-influxdatadocs-related: + - /influxdb3/clustered/get-started/write/ + - /influxdb3/clustered/write-data/best-practices/optimize-writes/ + - /influxdb3/clustered/write-data/troubleshoot/ components: parameters: TraceSpan: @@ -1021,9 +1050,7 @@ components: Annotation rows to include in the results. An _annotation_ is metadata associated with an object (column) in the data model. - #### Related guides - - - See [Annotated CSV annotations](/influxdb3/clustered/reference/syntax/annotated-csv/#annotations) for examples and more information. + See [Annotated CSV annotations](/influxdb3/clustered/reference/syntax/annotated-csv/#annotations) for examples and more information. For more information about **annotations** in tabular data, see [W3 metadata vocabulary for tabular data](https://www.w3.org/TR/2015/REC-tabular-data-model-20151217/#columns). @@ -1787,9 +1814,7 @@ components: - Doesn't use `shardGroupDurationsSeconds`. - #### Related guides - - - InfluxDB [shards and shard groups](/influxdb3/clustered/reference/internals/shards/) + For more information, see [shards and shard groups](/influxdb3/clustered/reference/internals/shards/). format: int64 type: integer type: @@ -1934,7 +1959,7 @@ components: description: | Use the `Authorization` header with the `Basic` scheme to authenticate v1 API requests. - Works with v1 compatibility [`/write`](#operation/PostV1Write) and [`/query`](#operation/GetV1Query) endpoints in InfluxDB 3. + Works with v1 compatibility [`/write`](/influxdb3/clustered/api/write-data/) and [`/query`](/influxdb3/clustered/api/query-data/) endpoints in InfluxDB 3. When authenticating requests, InfluxDB 3 checks that the `password` part of the decoded credential is an authorized token and ignores the `username` part of the decoded credential. @@ -1959,10 +1984,8 @@ components: - **`DATABASE_NAME`**: your InfluxDB 3 Clustered database - **`DATABASE_TOKEN`**: a database token with sufficient permissions to the database - #### Related guides - - - [Authenticate v1 API requests](/influxdb3/clustered/guides/api-compatibility/v1/) - - [Manage tokens](/influxdb3/clustered/admin/tokens/) + For more information, see [Authenticate v1 API requests](/influxdb3/clustered/guides/api-compatibility/v1/) + and [Manage tokens](/influxdb3/clustered/admin/tokens/). QuerystringAuthentication: type: apiKey in: query @@ -1970,7 +1993,7 @@ components: description: | Use InfluxDB 1.x API parameters to provide credentials through the query string for v1 API requests. - Querystring authentication works with v1-compatible [`/write`](#operation/PostV1Write) and [`/query`](#operation/GetV1Query) endpoints. + Querystring authentication works with v1-compatible [`/write`](/influxdb3/clustered/api/write-data/) and [`/query`](/influxdb3/clustered/api/query-data/) endpoints. When authenticating requests, InfluxDB 3 checks that the `p` (_password_) query parameter is an authorized token and ignores the `u` (_username_) query parameter. @@ -2015,10 +2038,8 @@ components: - **`DATABASE_NAME`**: the database to query - **`DATABASE_TOKEN`**: a database token with sufficient permissions to the database - #### Related guides - - - [Authenticate v1 API requests](/influxdb3/clustered/guides/api-compatibility/v1/) - - [Manage tokens](/influxdb3/clustered/admin/tokens/) + For more information, see [Authenticate v1 API requests](/influxdb3/clustered/guides/api-compatibility/v1/) + and [Manage tokens](/influxdb3/clustered/admin/tokens/). BearerAuthentication: type: http scheme: bearer @@ -2026,7 +2047,7 @@ components: description: | Use the OAuth Bearer authentication - scheme to provide an authorization token to InfluxDB 3. + scheme to provide an authorization token to InfluxDB Clustered. Bearer authentication works with all endpoints. @@ -2048,9 +2069,9 @@ components: ``` TokenAuthentication: description: | - Use InfluxDB v2 Token authentication to provide an authorization token to InfluxDB 3. + Use InfluxDB v2 Token authentication to provide an authorization token to InfluxDB Clustered. - The v2 Token scheme works with v1 and v2 compatibility endpoints in InfluxDB 3. + The v2 Token scheme works with v1 and v2 compatibility endpoints in InfluxDB Clustered. In your API requests, send an `Authorization` header. For the header value, provide the word `Token` followed by a space and a database token. @@ -2075,9 +2096,7 @@ components: --data-binary 'home,room=kitchen temp=72 1463683075' ``` - ### Related guides - - - [Manage tokens](/influxdb3/clustered/admin/tokens/) + For more information, see [Manage tokens](/influxdb3/clustered/admin/tokens/). in: header name: Authorization type: apiKey @@ -2092,5 +2111,5 @@ x-tagGroups: - name: All endpoints tags: - Ping - - Query - - Write + - Query data + - Write data diff --git a/api-docs/influxdb3/core/.config.yml b/api-docs/influxdb3/core/.config.yml index d492b29edd..14792e219a 100644 --- a/api-docs/influxdb3/core/.config.yml +++ b/api-docs/influxdb3/core/.config.yml @@ -7,7 +7,7 @@ x-influxdata-product-name: InfluxDB 3 Core apis: v3@3: - root: v3/influxdb3-core-openapi.yaml + root: v3/ref.yml x-influxdata-docs-aliases: - /influxdb3/core/api/ - /influxdb3/core/api/v1/ diff --git a/api-docs/influxdb3/core/v3/content/info.yml b/api-docs/influxdb3/core/v3/content/info.yml index 107c08b130..34e55186eb 100644 --- a/api-docs/influxdb3/core/v3/content/info.yml +++ b/api-docs/influxdb3/core/v3/content/info.yml @@ -21,7 +21,10 @@ description: | - `/`: Compatibility endpoints for InfluxDB v1 workloads and clients - `/api/v2/write`: Compatibility endpoint for InfluxDB v2 workloads and clients - [Download the OpenAPI specification](/openapi/influxdb3-core-openapi.yaml) + license: name: MIT url: 'https://opensource.org/licenses/MIT' diff --git a/api-docs/influxdb3/core/v3/content/tag-groups.yml b/api-docs/influxdb3/core/v3/content/tag-groups.yml deleted file mode 100644 index 364d5e7940..0000000000 --- a/api-docs/influxdb3/core/v3/content/tag-groups.yml +++ /dev/null @@ -1,15 +0,0 @@ -- name: Using the InfluxDB HTTP API - tags: - - Quick start - - Authentication - - Cache data - - Common parameters - - Response codes - - Compatibility endpoints - - Database - - Processing engine - - Server information - - Table - - Token - - Query data - - Write data diff --git a/static/openapi/influxdb3-core-openapi.yaml b/api-docs/influxdb3/core/v3/ref.yml similarity index 66% rename from static/openapi/influxdb3-core-openapi.yaml rename to api-docs/influxdb3/core/v3/ref.yml index bd0d928430..b96ccc57f9 100644 --- a/static/openapi/influxdb3-core-openapi.yaml +++ b/api-docs/influxdb3/core/v3/ref.yml @@ -14,11 +14,14 @@ info: The API includes endpoints under the following paths: - `/api/v3`: InfluxDB 3 Core native endpoints - - `/`: Compatibility endpoints for InfluxDB v1 workloads and clients - - `/api/v2/write`: Compatibility endpoint for InfluxDB v2 workloads and clients - - To download the OpenAPI specification for this API, use the **Download** button above. - version: v3.8.0 + - `/api/v2/write`: v2-compatible write endpoint + - `/write`, `/query`: v1-compatible endpoints + + + version: '3.7.0' license: name: MIT url: https://opensource.org/licenses/MIT @@ -26,7 +29,9 @@ info: name: InfluxData url: https://www.influxdata.com email: support@influxdata.com - x-source-hash: sha256:1259b96096eab6c8dbf3f76c974924f124e9b3e08eedc6b0c9a66d3108857c52 + x-related: + - title: Migrate from InfluxDB v1 or v2 + href: /influxdb3/core/get-started/migrate-from-influxdb-v1-v2/ servers: - url: https://{baseurl} description: InfluxDB 3 Core API URL @@ -38,45 +43,36 @@ servers: description: InfluxDB 3 Core URL security: - BearerAuthentication: [] - - TokenAuthentication: [] - - BasicAuthentication: [] - - QuerystringAuthentication: [] tags: - name: Authentication description: | Depending on your workflow, use one of the following schemes to authenticate to the InfluxDB 3 API: | Authentication scheme | Works with | - |:-------------------|:-----------| - | [Bearer authentication](#section/Authentication/BearerAuthentication) | All endpoints | - | [Token authentication](#section/Authentication/TokenAuthentication) | v1, v2 endpoints | - | [Basic authentication](#section/Authentication/BasicAuthentication) | v1 endpoints | - | [Querystring authentication](#section/Authentication/QuerystringAuthentication) | v1 endpoints | - - x-traitTag: true - x-related: - - title: Authenticate v1 API requests - href: /influxdb3/core/guides/api-compatibility/v1/ - - title: Manage tokens - href: /influxdb3/core/admin/tokens/ - - name: Cache data - description: |- - Manage the in-memory cache. - - #### Distinct Value Cache + |:----------------------|:-----------| + | Bearer authentication | All endpoints | + | Token authentication | v1 and v2 compatibility endpoints (`/write`, `/query`, `/api/v2/write`) | + | Basic authentication | v1 compatibility endpoints (`/write`, `/query`) | + | Querystring authentication | v1 compatibility endpoints (`/write`, `/query`) | + See the **Security Schemes** section below for details on each authentication method. + x-traitTag: true + - name: Cache distinct values + description: | The Distinct Value Cache (DVC) lets you cache distinct values of one or more columns in a table, improving the performance of queries that return distinct tag and field values. The DVC is an in-memory cache that stores distinct values for specific columns - in a table. When you create an DVC, you can specify what columns' distinct + in a table. When you create a DVC, you can specify what columns' distinct values to cache, the maximum number of distinct value combinations to cache, and the maximum age of cached values. A DVC is associated with a table, which can have multiple DVCs. - - #### Last value cache - + x-related: + - title: Manage the Distinct Value Cache + href: /influxdb3/core/admin/distinct-value-cache/ + - name: Cache last value + description: | The Last Value Cache (LVC) lets you cache the most recent values for specific fields in a table, improving the performance of queries that return the most recent value of a field for specific series or the last N values @@ -88,140 +84,77 @@ tags: number of values to cache for each unique series. An LVC is associated with a table, which can have multiple LVCs. x-related: - - title: Manage the Distinct Value Cache - href: /influxdb3/core/admin/distinct-value-cache/ - title: Manage the Last Value Cache href: /influxdb3/core/admin/last-value-cache/ - - name: Compatibility endpoints - description: > - InfluxDB 3 provides compatibility endpoints for InfluxDB 1.x and InfluxDB 2.x workloads and clients. - - - ### Write data using v1- or v2-compatible endpoints - - - - [`/api/v2/write` endpoint](#operation/PostV2Write) - for InfluxDB v2 clients and when you bring existing InfluxDB v2 write workloads to InfluxDB 3. - - [`/write` endpoint](#operation/PostV1Write) for InfluxDB v1 clients and when you bring existing InfluxDB v1 - write workloads to InfluxDB 3. - - - For new workloads, use the [`/api/v3/write_lp` endpoint](#operation/PostWriteLP). - - - All endpoints accept the same line protocol format. - - - ### Query data - - - Use the HTTP [`/query`](#operation/GetV1ExecuteQuery) endpoint for InfluxDB v1 clients and v1 query workloads - using InfluxQL. - - - For new workloads, use one of the following: - - - - HTTP [`/api/v3/query_sql` endpoint](#operation/GetExecuteQuerySQL) for new query workloads using SQL. - - - HTTP [`/api/v3/query_influxql` endpoint](#operation/GetExecuteInfluxQLQuery) for new query workloads using - InfluxQL. - - - Flight SQL and InfluxDB 3 _Flight+gRPC_ APIs for querying with SQL or InfluxQL. For more information about using - Flight APIs, see [InfluxDB 3 client - libraries](https://github.com/InfluxCommunity?q=influxdb3&type=public&language=&sort=). - + - name: Migrate from InfluxDB v1 or v2 + x-traitTag: true + description: | + Migrate your existing InfluxDB v1 or v2 workloads to InfluxDB 3 Core. - ### Server information + InfluxDB 3 provides compatibility endpoints that work with InfluxDB 1.x and 2.x client libraries and tools. + Operations marked with v1 or v2 badges are compatible with the respective InfluxDB version. + ### Migration guides - Server information endpoints such as `/health` and `metrics` are compatible with InfluxDB 1.x and InfluxDB 2.x - clients. - x-related: - - title: Use compatibility APIs to write data - href: /influxdb3/core/write-data/http-api/compatibility-apis/ + - [Migrate from InfluxDB v1](/influxdb3/core/guides/migrate/influxdb-1x/) - For users migrating from InfluxDB 1.x + - [Migrate from InfluxDB v2](/influxdb3/core/guides/migrate/influxdb-2x/) - For users migrating from InfluxDB 2.x or Cloud + - [Use compatibility APIs to write data](/influxdb3/core/write-data/http-api/compatibility-apis/) - v1 and v2 write endpoints + - [Use the v1 HTTP query API](/influxdb3/core/query-data/execute-queries/influxdb-v1-api/) - InfluxQL queries via HTTP - name: Database description: Manage databases - - description: > + - description: | Most InfluxDB API endpoints require parameters in the request--for example, specifying the database to use. - ### Common parameters - The following table shows common parameters used by many InfluxDB API endpoints. - Many endpoints may require other parameters in the query string or in the - request body that perform functions specific to those endpoints. - | Query parameter | Value type | Description | - |:------------------------ |:--------------------- |:-------------------------------------------| - | `db` | string | The database name | - InfluxDB HTTP API endpoints use standard HTTP request and response headers. - The following table shows common headers used by many InfluxDB API endpoints. - Some endpoints may use other headers that perform functions more specific to those endpoints--for example, - - the write endpoints accept the `Content-Encoding` header to indicate that line protocol is compressed in the - request body. - + the write endpoints accept the `Content-Encoding` header to indicate that line protocol is compressed in the request body. | Header | Value type | Description | - |:------------------------ |:--------------------- |:-------------------------------------------| - | `Accept` | string | The content type that the client can understand. | - - | `Authorization` | string | The authorization scheme and credential. | - + | `Authorization` | string | The [authorization scheme and credential](/influxdb/version/api/authentication/). | | `Content-Length` | integer | The size of the entity-body, in bytes. | - | `Content-Type` | string | The format of the data in the request body. | name: Headers and parameters x-traitTag: true - name: Processing engine - description: > + description: | Manage Processing engine triggers, test plugins, and send requests to trigger On Request plugins. - - InfluxDB 3 Core provides the InfluxDB 3 processing engine, an embedded Python VM that can dynamically load and - trigger Python plugins in response to events in your database. - + InfluxDB 3 Core provides the InfluxDB 3 processing engine, an embedded Python VM that can dynamically load and trigger Python plugins in response to events in your database. Use Processing engine plugins and triggers to run code and perform tasks for different database events. - - To get started with the processing engine, see the [Processing engine and Python - plugins](/influxdb3/core/processing-engine/) guide. - x-related: - - title: Processing engine and Python plugins - href: /influxdb3/core/plugins/ + To get started with the processing engine, see the [Processing engine and Python plugins](/influxdb3/core/processing-engine/) guide. - name: Query data description: Query data using SQL or InfluxQL - x-related: - - title: Use the InfluxDB v1 HTTP query API and InfluxQL to query data - href: /influxdb3/core/query-data/execute-queries/influxdb-v1-api/ - name: Quick start description: | - 1. [Create an admin token](#section/Authentication) to authorize API requests. + Authenticate, write, and query with the API: + + 1. Create an admin token to authorize API requests. ```bash curl -X POST "http://localhost:8181/api/v3/configure/token/admin" ``` - 2. [Check the status](#section/Server-information) of the InfluxDB server. + 2. Check the status of the InfluxDB server. ```bash curl "http://localhost:8181/health" \ --header "Authorization: Bearer ADMIN_TOKEN" ``` - 3. [Write data](#operation/PostWriteLP) to InfluxDB. + 3. Write data to InfluxDB. ```bash curl "http://localhost:8181/api/v3/write_lp?db=sensors&precision=auto" @@ -232,7 +165,7 @@ tags: If all data is written, the response is `204 No Content`. - 4. [Query data](#operation/GetExecuteQuerySQL) from InfluxDB. + 4. Query data from InfluxDB. ```bash curl -G "http://localhost:8181/api/v3/query_sql" \ @@ -254,11 +187,16 @@ tags: description: Retrieve server metrics, status, and version information - name: Table description: Manage table schemas and data - - name: Token + - name: Auth token description: Manage tokens for authentication and authorization - name: Write data + x-related: + - title: Write data using HTTP APIs + href: /influxdb3/core/write-data/http-api/ + - title: Line protocol reference + href: /influxdb3/core/reference/syntax/line-protocol/ description: | - Write data to InfluxDB 3 using line protocol format. + Write data to InfluxDB 3 Core using line protocol format. #### Timestamp precision across write APIs @@ -272,79 +210,153 @@ tags: | **Milliseconds** | ✅ `ms` | ✅ `ms` | ✅ `millisecond` | | **Microseconds** | ✅ `u` or `µ` | ✅ `us` | ✅ `microsecond` | | **Nanoseconds** | ✅ `ns` | ✅ `ns` | ✅ `nanosecond` | + | **Minutes** | ✅ `m` | ❌ No | ❌ No | + | **Hours** | ✅ `h` | ❌ No | ❌ No | | **Default** | Nanosecond | Nanosecond | **Auto** (guessed) | All timestamps are stored internally as nanoseconds. paths: - /api/v1/health: - get: - operationId: GetHealthV1 - summary: Health check (v1) - description: | - Checks the status of the service. - - Returns `OK` if the service is running. This endpoint does not return version information. - Use the [`/ping`](#operation/GetPing) endpoint to retrieve version details. - - > **Note**: This endpoint requires authentication by default in InfluxDB 3 Core. - responses: - "200": - description: Service is running. Returns `OK`. - content: - text/plain: - schema: - type: string - example: OK - "401": - description: Unauthorized. Authentication is required. - "500": - description: Service is unavailable. - tags: - - Server information - - Compatibility endpoints - /api/v2/write: + /write: post: - operationId: PostV2Write + operationId: PostV1Write + summary: Write line protocol (v1-compatible) + x-compatibility-version: v1 + description: | + Writes line protocol to the specified database. + Use this endpoint to send data in [line protocol](/influxdb3/core/reference/syntax/line-protocol/) format to InfluxDB. + externalDocs: + description: Use compatibility APIs to write data + url: /influxdb3/core/write-data/http-api/compatibility-apis/ + parameters: + - $ref: '#/components/parameters/dbWriteParam' + - $ref: '#/components/parameters/compatibilityPrecisionParam' + - $ref: '#/components/parameters/v1UsernameParam' + - $ref: '#/components/parameters/v1PasswordParam' + - name: rp + in: query + required: false + schema: + type: string + description: | + Retention policy name. Honored but discouraged. InfluxDB 3 doesn't use retention policies. + - name: consistency + in: query + required: false + schema: + type: string + description: | + Write consistency level. Ignored by InfluxDB 3. Provided for compatibility with InfluxDB 1.x clients. + - name: Authorization + in: header + required: false + schema: + type: string + description: | + Authorization header for token-based authentication. + Supported schemes: + - `Bearer AUTH_TOKEN` - OAuth bearer token scheme + - `Token AUTH_TOKEN` - InfluxDB v2 token scheme + - `Basic ` - Basic authentication (username is ignored) + - name: Content-Type + in: header + description: | + The content type of the request payload. + schema: + $ref: '#/components/schemas/LineProtocol' + required: false + - name: Accept + in: header + description: | + The content type that the client can understand. + Writes only return a response body if they fail (partially or completely)--for example, + due to a syntax problem or type mismatch. + schema: + type: string + default: application/json + enum: + - application/json + required: false + - $ref: '#/components/parameters/ContentEncoding' + - $ref: '#/components/parameters/ContentLength' + requestBody: + $ref: '#/components/requestBodies/lineProtocolRequestBody' responses: - "204": + '204': description: Success ("No Content"). All data in the batch is written and queryable. headers: cluster-uuid: - $ref: "#/components/headers/ClusterUUID" - "400": - description: Bad request. - "401": - $ref: "#/components/responses/Unauthorized" - "403": + $ref: '#/components/headers/ClusterUUID' + '400': + description: | + Bad request. Some (a _partial write_) or all of the data from the batch was rejected and not written. + If a partial write occurred, then some points from the batch are written and queryable. + + The response body: + - indicates if a partial write occurred or all data was rejected. + - contains details about the [rejected points](/influxdb3/core/write-data/troubleshoot/#troubleshoot-rejected-points), up to 100 points. + content: + application/json: + examples: + rejectedAllPoints: + summary: Rejected all points in the batch + value: | + { + "error": "write of line protocol failed", + "data": [ + { + "original_line": "dquote> home,room=Kitchen temp=hi", + "line_number": 2, + "error_message": "No fields were provided" + } + ] + } + partialWriteErrorWithRejectedPoints: + summary: Partial write rejected some points in the batch + value: | + { + "error": "partial write of line protocol occurred", + "data": [ + { + "original_line": "dquote> home,room=Kitchen temp=hi", + "line_number": 2, + "error_message": "No fields were provided" + } + ] + } + '401': + $ref: '#/components/responses/Unauthorized' + '403': description: Access denied. - "413": + '413': description: Request entity too large. + security: + - BearerAuthentication: [] + - TokenAuthentication: [] + - BasicAuthentication: [] + - QuerystringAuthentication: [] + tags: + - Write data + x-related: + - title: Use compatibility APIs to write data + href: /influxdb3/core/write-data/http-api/compatibility-apis/ + /api/v2/write: + post: + operationId: PostV2Write summary: Write line protocol (v2-compatible) - description: > + x-compatibility-version: v2 + description: | Writes line protocol to the specified database. - - - This endpoint provides backward compatibility for InfluxDB 2.x write workloads using tools such as InfluxDB 2.x - client libraries, the Telegraf `outputs.influxdb_v2` output plugin, or third-party tools. - - - Use this endpoint to send data in [line protocol](/influxdb3/core/reference/syntax/line-protocol/) format to - InfluxDB. - - Use query parameters to specify options for writing data. - - - #### Related - - - - [Use compatibility APIs to write data](/influxdb3/core/write-data/http-api/compatibility-apis/) + Use this endpoint to send data in [line protocol](/influxdb3/core/reference/syntax/line-protocol/) format to InfluxDB. + externalDocs: + description: Use compatibility APIs to write data + url: /influxdb3/core/write-data/http-api/compatibility-apis/ parameters: - name: Content-Type in: header description: | The content type of the request payload. schema: - $ref: "#/components/schemas/LineProtocol" + $ref: '#/components/schemas/LineProtocol' required: false - description: | The compression applied to the line protocol in the request payload. @@ -379,680 +391,752 @@ paths: enum: - application/json type: string - - name: bucket + - name: db in: query required: true schema: type: string - description: |- + description: | A database name. InfluxDB creates the database if it doesn't already exist, and then writes all points in the batch to the database. - - This parameter is named `bucket` for compatibility with InfluxDB v2 client libraries. - name: accept_partial in: query required: false schema: - $ref: "#/components/schemas/AcceptPartial" - - $ref: "#/components/parameters/compatibilityPrecisionParam" + $ref: '#/components/schemas/AcceptPartial' + - $ref: '#/components/parameters/compatibilityPrecisionParam' requestBody: - $ref: "#/components/requestBodies/lineProtocolRequestBody" + $ref: '#/components/requestBodies/lineProtocolRequestBody' + responses: + '204': + description: Success ("No Content"). All data in the batch is written and queryable. + headers: + cluster-uuid: + $ref: '#/components/headers/ClusterUUID' + '400': + description: Bad request. + '401': + $ref: '#/components/responses/Unauthorized' + '403': + description: Access denied. + '413': + description: Request entity too large. + security: + - BearerAuthentication: [] + - TokenAuthentication: [] tags: - - Compatibility endpoints - Write data - /api/v3/configure/database: - delete: - operationId: DeleteConfigureDatabase + x-related: + - title: Use compatibility APIs to write data + href: /influxdb3/core/write-data/http-api/compatibility-apis/ + /api/v3/write_lp: + post: + operationId: PostWriteLP + summary: Write line protocol + description: | + Writes line protocol to the specified database. + + This is the native InfluxDB 3 Core write endpoint that provides enhanced control + over write behavior with advanced parameters for high-performance and fault-tolerant operations. + + Use this endpoint to send data in [line protocol](/influxdb3/core/reference/syntax/line-protocol/) format to InfluxDB. + Use query parameters to specify options for writing data. + + #### Features + + - **Partial writes**: Use `accept_partial=true` to allow partial success when some lines in a batch fail + - **Asynchronous writes**: Use `no_sync=true` to skip waiting for WAL synchronization, allowing faster response times but sacrificing durability guarantees + - **Flexible precision**: Automatic timestamp precision detection with `precision=auto` (default) + + #### Auto precision detection + + When you use `precision=auto` or omit the precision parameter, InfluxDB 3 automatically detects + the timestamp precision based on the magnitude of the timestamp value: + + - Timestamps < 5e9 → Second precision (multiplied by 1,000,000,000 to convert to nanoseconds) + - Timestamps < 5e12 → Millisecond precision (multiplied by 1,000,000) + - Timestamps < 5e15 → Microsecond precision (multiplied by 1,000) + - Larger timestamps → Nanosecond precision (no conversion needed) + + #### Related + + - [Use the InfluxDB v3 write_lp API to write data](/influxdb3/core/write-data/http-api/v3-write-lp/) parameters: - - $ref: "#/components/parameters/db" - - name: data_only + - $ref: '#/components/parameters/dbWriteParam' + - $ref: '#/components/parameters/accept_partial' + - $ref: '#/components/parameters/precisionParam' + - name: no_sync in: query - required: false schema: - type: boolean - default: false + $ref: '#/components/schemas/NoSync' + - name: Content-Type + in: header description: | - Delete only data while preserving the database schema and all associated resources - (tokens, triggers, last value caches, distinct value caches, processing engine configurations). - When `false` (default), the entire database is deleted. - - name: remove_tables - in: query - required: false + The content type of the request payload. schema: - type: boolean - default: false - description: | - Used with `data_only=true` to remove table resources (caches) while preserving - database-level resources (tokens, triggers, processing engine configurations). - Has no effect when `data_only=false`. - - name: hard_delete_at - in: query + $ref: '#/components/schemas/LineProtocol' required: false + - name: Accept + in: header + description: | + The content type that the client can understand. + Writes only return a response body if they fail (partially or completely)--for example, + due to a syntax problem or type mismatch. schema: type: string - format: date-time - description: |- - Schedule the database for hard deletion at the specified time. - If not provided, the database will be soft deleted. - Use ISO 8601 date-time format (for example, "2025-12-31T23:59:59Z"). - - #### Deleting a database cannot be undone - - Deleting a database is a destructive action. - Once a database is deleted, data stored in that database cannot be recovered. - - - Also accepts special string values: - - `now` — hard delete immediately - - `never` — soft delete only (default behavior) - - `default` — use the system default hard deletion time + default: application/json + enum: + - application/json + required: false + - $ref: '#/components/parameters/ContentEncoding' + - $ref: '#/components/parameters/ContentLength' + requestBody: + $ref: '#/components/requestBodies/lineProtocolRequestBody' responses: - "200": - description: Success. Database deleted. - "401": - $ref: "#/components/responses/Unauthorized" - "404": - description: Database not found. - summary: Delete a database - description: | - Soft deletes a database. - The database is scheduled for deletion and unavailable for querying. - Use the `hard_delete_at` parameter to schedule a hard deletion. - Use the `data_only` parameter to delete data while preserving the database schema and resources. + '204': + description: Success ("No Content"). All data in the batch is written and queryable. + headers: + cluster-uuid: + $ref: '#/components/headers/ClusterUUID' + '400': + description: Bad request. + '401': + $ref: '#/components/responses/Unauthorized' + '403': + description: Access denied. + '413': + description: Request entity too large. + '422': + description: Unprocessable entity. tags: - - Database + - Write data + /api/v3/query_sql: get: - operationId: GetConfigureDatabase + operationId: GetExecuteQuerySQL + summary: Execute SQL query + description: Executes an SQL query to retrieve data from the specified database. + parameters: + - $ref: '#/components/parameters/db' + - $ref: '#/components/parameters/querySqlParam' + - $ref: '#/components/parameters/format' + - $ref: '#/components/parameters/AcceptQueryHeader' + - $ref: '#/components/parameters/ContentType' responses: - "200": - description: Success. The response body contains the list of databases. + '200': + description: Success. The response body contains query results. content: application/json: schema: - $ref: "#/components/schemas/ShowDatabasesResponse" - "400": + $ref: '#/components/schemas/QueryResponse' + example: + results: + - series: + - name: mytable + columns: + - time + - value + values: + - - '2024-02-02T12:00:00Z' + - 42 + text/csv: + schema: + type: string + application/vnd.apache.parquet: + schema: + type: string + application/jsonl: + schema: + type: string + '400': description: Bad request. - "401": - $ref: "#/components/responses/Unauthorized" - "404": + '401': + $ref: '#/components/responses/Unauthorized' + '403': + description: Access denied. + '404': description: Database not found. - summary: List databases - description: Retrieves a list of databases. - parameters: - - $ref: "#/components/parameters/formatRequired" - - name: show_deleted - in: query - required: false - schema: - type: boolean - default: false - description: | - Include soft-deleted databases in the response. - By default, only active databases are returned. + '405': + description: Method not allowed. + '422': + description: Unprocessable entity. tags: - - Database + - Query data post: - operationId: PostConfigureDatabase - responses: - "200": - description: Success. Database created. - "400": - description: Bad request. - "401": - $ref: "#/components/responses/Unauthorized" - "409": - description: Database already exists. - summary: Create a database - description: Creates a new database in the system. + operationId: PostExecuteQuerySQL + summary: Execute SQL query + description: Executes an SQL query to retrieve data from the specified database. + parameters: + - $ref: '#/components/parameters/AcceptQueryHeader' + - $ref: '#/components/parameters/ContentType' requestBody: - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/CreateDatabaseRequest" - tags: - - Database - put: - operationId: update_database + $ref: '#/components/requestBodies/queryRequestBody' responses: - "200": - description: Success. The database has been updated. - "400": + '200': + description: Success. The response body contains query results. + content: + application/json: + schema: + $ref: '#/components/schemas/QueryResponse' + text/csv: + schema: + type: string + application/vnd.apache.parquet: + schema: + type: string + application/jsonl: + schema: + type: string + '400': description: Bad request. - "401": - $ref: "#/components/responses/Unauthorized" - "404": - description: Database not found. - summary: Update a database - description: | - Updates database configuration, such as retention period. - requestBody: - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/UpdateDatabaseRequest" - tags: - - Database - /api/v3/configure/database/retention_period: - delete: - operationId: DeleteDatabaseRetentionPeriod - summary: Remove database retention period - description: | - Removes the retention period from a database, setting it to infinite retention. - parameters: - - $ref: "#/components/parameters/db" - responses: - "204": - description: Success. The database retention period has been removed. - "401": - $ref: "#/components/responses/Unauthorized" - "404": + '401': + $ref: '#/components/responses/Unauthorized' + '403': + description: Access denied. + '404': description: Database not found. + '405': + description: Method not allowed. + '422': + description: Unprocessable entity. tags: - - Database - /api/v3/configure/distinct_cache: - delete: - operationId: DeleteConfigureDistinctCache - responses: - "200": - description: Success. The distinct cache has been deleted. - "400": - description: Bad request. - "401": - $ref: "#/components/responses/Unauthorized" - "404": - description: Cache not found. - summary: Delete distinct cache - description: Deletes a distinct cache. + - Query data + /api/v3/query_influxql: + get: + operationId: GetExecuteInfluxQLQuery + summary: Execute InfluxQL query + description: Executes an InfluxQL query to retrieve data from the specified database. parameters: - - $ref: "#/components/parameters/db" - - name: table + - $ref: '#/components/parameters/dbQueryParam' + - name: q in: query required: true schema: type: string - description: The name of the table containing the distinct cache. - - name: name + - name: format in: query - required: true + required: false schema: type: string - description: The name of the distinct cache to delete. + - $ref: '#/components/parameters/AcceptQueryHeader' + responses: + '200': + description: Success. The response body contains query results. + content: + application/json: + schema: + $ref: '#/components/schemas/QueryResponse' + text/csv: + schema: + type: string + application/vnd.apache.parquet: + schema: + type: string + application/jsonl: + schema: + type: string + '400': + description: Bad request. + '401': + $ref: '#/components/responses/Unauthorized' + '403': + description: Access denied. + '404': + description: Database not found. + '405': + description: Method not allowed. + '422': + description: Unprocessable entity. tags: - - Cache data - - Table + - Query data post: - operationId: PostConfigureDistinctCache - responses: - "201": - description: Success. The distinct cache has been created. - "400": - description: > - Bad request. - - - The server responds with status `400` if the request would overwrite an existing cache with a different - configuration. - "409": - description: Conflict. A distinct cache with this configuration already exists. - summary: Create distinct cache - description: Creates a distinct cache for a table. + operationId: PostExecuteQueryInfluxQL + summary: Execute InfluxQL query + description: Executes an InfluxQL query to retrieve data from the specified database. + parameters: + - $ref: '#/components/parameters/AcceptQueryHeader' + - $ref: '#/components/parameters/ContentType' requestBody: - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/DistinctCacheCreateRequest" - tags: - - Cache data - - Table - /api/v3/configure/last_cache: - delete: - operationId: DeleteConfigureLastCache + $ref: '#/components/requestBodies/queryRequestBody' responses: - "200": - description: Success. The last cache has been deleted. - "400": + '200': + description: Success. The response body contains query results. + content: + application/json: + schema: + $ref: '#/components/schemas/QueryResponse' + text/csv: + schema: + type: string + application/vnd.apache.parquet: + schema: + type: string + application/jsonl: + schema: + type: string + '400': description: Bad request. - "401": - $ref: "#/components/responses/Unauthorized" - "404": - description: Cache not found. - summary: Delete last cache - description: Deletes a last cache. + '401': + $ref: '#/components/responses/Unauthorized' + '403': + description: Access denied. + '404': + description: Database not found. + '405': + description: Method not allowed. + '422': + description: Unprocessable entity. + tags: + - Query data + /query: + get: + operationId: GetV1ExecuteQuery + summary: Execute InfluxQL query (v1-compatible) + x-compatibility-version: v1 + description: | + Executes an InfluxQL query to retrieve data from the specified database. + Compatible with InfluxDB 1.x client libraries and third-party integrations such as Grafana. + externalDocs: + description: Use the InfluxDB v1 HTTP query API and InfluxQL to query data + url: /influxdb3/core/query-data/execute-queries/influxdb-v1-api/ parameters: - - $ref: "#/components/parameters/db" - - name: table - in: query - required: true + - name: Accept + in: header schema: type: string - description: The name of the table containing the last cache. - - name: name - in: query - required: true + default: application/json + enum: + - application/json + - application/csv + - text/csv + required: false + description: | + The content type that the client can understand. + + If `text/csv` is specified, the `Content-type` response header is `application/csv` and the response is formatted as CSV. + + Returns an error if the format is invalid or non-UTF8. + - in: query + name: chunked + description: | + If true, the response is divided into chunks of size `chunk_size`. + schema: + type: boolean + default: false + - in: query + name: chunk_size + description: | + The number of records that will go into a chunk. + This parameter is only used if `chunked=true`. + schema: + type: integer + default: 10000 + - in: query + name: db + description: The database to query. If not provided, the InfluxQL query string must specify the database. schema: type: string - description: The name of the last cache to delete. - tags: - - Cache data - - Table - post: - operationId: PostConfigureLastCache - responses: - "201": - description: Success. Last cache created. - "400": - description: Bad request. A cache with this name already exists or the request is malformed. - "401": - $ref: "#/components/responses/Unauthorized" - "404": - description: Cache not found. - summary: Create last cache - description: Creates a last cache for a table. - requestBody: - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/LastCacheCreateRequest" + format: InfluxQL + - in: query + name: pretty + description: | + If true, the JSON response is formatted in a human-readable format. + schema: + type: boolean + default: false + - in: query + name: q + description: The InfluxQL query string. + required: true + schema: + type: string + - name: epoch + description: | + Formats timestamps as [unix (epoch) timestamps](/influxdb3/core/reference/glossary/#unix-timestamp) with the specified precision + instead of [RFC3339 timestamps](/influxdb3/core/reference/glossary/#rfc3339-timestamp) with nanosecond precision. + in: query + schema: + $ref: '#/components/schemas/EpochCompatibility' + - $ref: '#/components/parameters/v1UsernameParam' + - $ref: '#/components/parameters/v1PasswordParam' + - name: rp + in: query + required: false + schema: + type: string + description: | + Retention policy name. Honored but discouraged. InfluxDB 3 doesn't use retention policies. + - name: Authorization + in: header + required: false + schema: + type: string + description: | + Authorization header for token-based authentication. + Supported schemes: + - `Bearer AUTH_TOKEN` - OAuth bearer token scheme + - `Token AUTH_TOKEN` - InfluxDB v2 token scheme + - `Basic ` - Basic authentication (username is ignored) + responses: + '200': + description: | + Success. The response body contains query results. + content: + application/json: + schema: + $ref: '#/components/schemas/QueryResponse' + application/csv: + schema: + type: string + headers: + Content-Type: + description: | + The content type of the response. + Default is `application/json`. + + If the `Accept` request header is `application/csv` or `text/csv`, the `Content-type` response header is `application/csv` + and the response is formatted as CSV. + schema: + type: string + default: application/json + enum: + - application/json + - application/csv + '400': + description: Bad request. + '401': + $ref: '#/components/responses/Unauthorized' + '403': + description: Access denied. + '404': + description: Database not found. + '405': + description: Method not allowed. + '422': + description: Unprocessable entity. + security: + - BearerAuthentication: [] + - TokenAuthentication: [] + - BasicAuthentication: [] + - QuerystringAuthentication: [] tags: - - Cache data - - Table - /api/v3/configure/plugin_environment/install_packages: + - Query data + x-related: + - title: Use the InfluxDB v1 HTTP query API and InfluxQL to query data + href: /influxdb3/core/query-data/execute-queries/influxdb-v1-api/ post: - operationId: PostInstallPluginPackages - summary: Install plugin packages - description: |- - Installs the specified Python packages into the processing engine plugin environment. - - This endpoint is synchronous and blocks until the packages are installed. - parameters: - - $ref: "#/components/parameters/ContentType" + operationId: PostExecuteV1Query + summary: Execute InfluxQL query (v1-compatible) + x-compatibility-version: v1 + description: Executes an InfluxQL query to retrieve data from the specified database. + externalDocs: + description: Use the InfluxDB v1 HTTP query API and InfluxQL to query data + url: /influxdb3/core/query-data/execute-queries/influxdb-v1-api/ requestBody: - required: true content: application/json: schema: type: object properties: - packages: - type: array - items: - type: string + db: + type: string + description: The database to query. If not provided, the InfluxQL query string must specify the database. + q: + description: The InfluxQL query string. + type: string + chunked: description: | - A list of Python package names to install. - Can include version specifiers (e.g., "scipy==1.9.0"). - example: - - influxdb3-python - - scipy - - pandas==1.5.0 - - requests - required: - - packages - example: - packages: - - influxdb3-python - - scipy - - pandas==1.5.0 - - requests - responses: - "200": - description: Success. The packages are installed. - "400": - description: Bad request. - "401": - $ref: "#/components/responses/Unauthorized" - tags: - - Processing engine - /api/v3/configure/plugin_environment/install_requirements: - post: - operationId: PostInstallPluginRequirements - summary: Install plugin requirements - description: > - Installs requirements from a requirements file (also known as a "pip requirements file") into the processing - engine plugin environment. - - - This endpoint is synchronous and blocks until the requirements are installed. - - - ### Related - + If true, the response is divided into chunks of size `chunk_size`. + type: boolean + chunk_size: + description: | + The number of records that will go into a chunk. + This parameter is only used if `chunked=true`. + type: integer + default: 10000 + epoch: + description: | + A unix timestamp precision. - - [Processing engine and Python plugins](/influxdb3/core/plugins/) + - `h` for hours + - `m` for minutes + - `s` for seconds + - `ms` for milliseconds + - `u` or `µ` for microseconds + - `ns` for nanoseconds - - [Python requirements file format](https://pip.pypa.io/en/stable/reference/requirements-file-format/) - parameters: - - $ref: "#/components/parameters/ContentType" - requestBody: - required: true - content: - application/json: - schema: - type: object - properties: - requirements_location: + Formats timestamps as [unix (epoch) timestamps](/influxdb3/core/reference/glossary/#unix-timestamp) with the specified precision + instead of [RFC3339 timestamps](/influxdb3/core/reference/glossary/#rfc3339-timestamp) with nanosecond precision. + enum: + - ns + - u + - µ + - ms + - s + - m + - h type: string + pretty: description: | - The path to the requirements file containing Python packages to install. - Can be a relative path (relative to the plugin directory) or an absolute path. - example: requirements.txt + If true, the JSON response is formatted in a human-readable format. + type: boolean required: - - requirements_location - example: - requirements_location: requirements.txt + - q + parameters: + - name: Accept + in: header + schema: + type: string + default: application/json + enum: + - application/json + - application/csv + - text/csv + required: false + description: | + The content type that the client can understand. + + If `text/csv` is specified, the `Content-type` response header is `application/csv` and the response is formatted as CSV. + + Returns an error if the format is invalid or non-UTF8. responses: - "200": - description: Success. The requirements have been installed. - "400": + '200': + description: | + Success. The response body contains query results. + content: + application/json: + schema: + $ref: '#/components/schemas/QueryResponse' + application/csv: + schema: + type: string + headers: + Content-Type: + description: | + The content type of the response. + Default is `application/json`. + + If the `Accept` request header is `application/csv` or `text/csv`, the `Content-type` response header is `application/csv` + and the response is formatted as CSV. + schema: + type: string + default: application/json + enum: + - application/json + - application/csv + '400': description: Bad request. - "401": - $ref: "#/components/responses/Unauthorized" + '401': + $ref: '#/components/responses/Unauthorized' + '403': + description: Access denied. + '404': + description: Database not found. + '405': + description: Method not allowed. + '422': + description: Unprocessable entity. + security: + - BearerAuthentication: [] + - TokenAuthentication: [] + - BasicAuthentication: [] + - QuerystringAuthentication: [] tags: - - Processing engine - /api/v3/configure/processing_engine_trigger: - post: - operationId: PostConfigureProcessingEngineTrigger - summary: Create processing engine trigger - description: Creates a processing engine trigger with the specified plugin file and trigger specification. - requestBody: - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/ProcessingEngineTriggerRequest" - examples: - schedule_cron: - summary: Schedule trigger using cron - description: > - In `"cron:CRON_EXPRESSION"`, `CRON_EXPRESSION` uses extended 6-field cron format. + - Query data + x-related: + - title: Use the InfluxDB v1 HTTP query API and InfluxQL to query data + href: /influxdb3/core/query-data/execute-queries/influxdb-v1-api/ + /health: + get: + operationId: GetHealth + summary: Health check + description: | + Checks the status of the service. - The cron expression `0 0 6 * * 1-5` means the trigger will run at 6:00 AM every weekday (Monday to - Friday). - value: - db: DATABASE_NAME - plugin_filename: schedule.py - trigger_name: schedule_cron_trigger - trigger_specification: cron:0 0 6 * * 1-5 - disabled: false - trigger_settings: - run_async: false - error_behavior: Log - schedule_every: - summary: Schedule trigger using interval - description: | - In `"every:DURATION"`, `DURATION` specifies the interval between trigger executions. - The duration `1h` means the trigger will run every hour. - value: - db: mydb - plugin_filename: schedule.py - trigger_name: schedule_every_trigger - trigger_specification: every:1h - disabled: false - trigger_settings: - run_async: false - error_behavior: Log - schedule_every_seconds: - summary: Schedule trigger using seconds interval - description: | - Example of scheduling a trigger to run every 30 seconds. - value: - db: mydb - plugin_filename: schedule.py - trigger_name: schedule_every_30s_trigger - trigger_specification: every:30s - disabled: false - trigger_settings: - run_async: false - error_behavior: Log - schedule_every_minutes: - summary: Schedule trigger using minutes interval - description: | - Example of scheduling a trigger to run every 5 minutes. - value: - db: mydb - plugin_filename: schedule.py - trigger_name: schedule_every_5m_trigger - trigger_specification: every:5m - disabled: false - trigger_settings: - run_async: false - error_behavior: Log - all_tables: - summary: All tables trigger example - description: | - Trigger that fires on write events to any table in the database. - value: - db: mydb - plugin_filename: all_tables.py - trigger_name: all_tables_trigger - trigger_specification: all_tables - disabled: false - trigger_settings: - run_async: false - error_behavior: Log - table_specific: - summary: Table-specific trigger example - description: | - Trigger that fires on write events to a specific table. - value: - db: mydb - plugin_filename: table.py - trigger_name: table_trigger - trigger_specification: table:sensors - disabled: false - trigger_settings: - run_async: false - error_behavior: Log - api_request: - summary: On-demand request trigger example - description: | - Creates an HTTP endpoint `/api/v3/engine/hello-world` for manual invocation. - value: - db: mydb - plugin_filename: request.py - trigger_name: hello_world_trigger - trigger_specification: request:hello-world - disabled: false - trigger_settings: - run_async: false - error_behavior: Log - cron_friday_afternoon: - summary: Cron trigger for Friday afternoons - description: | - Example of a cron trigger that runs every Friday at 2:30 PM. - value: - db: reports - plugin_filename: weekly_report.py - trigger_name: friday_report_trigger - trigger_specification: cron:0 30 14 * * 5 - disabled: false - trigger_settings: - run_async: false - error_behavior: Log - cron_monthly: - summary: Cron trigger for monthly execution - description: | - Example of a cron trigger that runs on the first day of every month at midnight. - value: - db: monthly_data - plugin_filename: monthly_cleanup.py - trigger_name: monthly_cleanup_trigger - trigger_specification: cron:0 0 0 1 * * - disabled: false - trigger_settings: - run_async: false - error_behavior: Log + Returns `OK` if the service is running. This endpoint does not return version information. + Use the `/ping` endpoint to retrieve version details. responses: - "200": - description: Success. Processing engine trigger created. - "400": - description: Bad request. - "401": - $ref: "#/components/responses/Unauthorized" - "404": - description: Trigger not found. + '200': + description: Service is running. Returns `OK`. + content: + text/plain: + schema: + type: string + example: OK + '500': + description: Service is unavailable. tags: - - Processing engine - delete: - operationId: DeleteConfigureProcessingEngineTrigger - summary: Delete processing engine trigger - description: Deletes a processing engine trigger. - parameters: - - $ref: "#/components/parameters/db" - - name: trigger_name - in: query - required: true - schema: - type: string - - name: force - in: query - required: false - schema: - type: boolean - default: false + - Server information + /api/v1/health: + get: + operationId: GetHealthV1 + summary: Health check (v1-compatible) + x-compatibility-version: v1 + description: Checks the status of the service. + responses: + '200': + description: Service is running. + '500': + description: Service is unavailable. + tags: + - Server information + /ping: + get: + operationId: GetPing + tags: + - Server information + summary: Ping the server + description: | + Returns version information for the server. + + **Important**: Use a GET request. HEAD requests return `404 Not Found`. + + The response includes version information in both headers and the JSON body: + + - **Headers**: `x-influxdb-version` and `x-influxdb-build` + - **Body**: JSON object with `version`, `revision`, and `process_id` + responses: + '200': + description: Success. The response body contains server information. + headers: + x-influxdb-version: + description: The InfluxDB version number (for example, `3.8.0`). + schema: + type: string + example: '3.8.0' + x-influxdb-build: + description: The InfluxDB build type (`Core` or `Enterprise`). + schema: + type: string + example: Core + content: + application/json: + schema: + type: object + properties: + version: + type: string + description: The InfluxDB version number. + example: '3.8.0' + revision: + type: string + description: The git revision hash for the build. + example: '5276213d5b' + process_id: + type: string + description: A unique identifier for the server process. + example: 'b756d9e0-cecd-4f72-b6d0-19e2d4f8cbb7' + '404': description: | - Force deletion of the trigger even if it has active executions. - By default, deletion fails if the trigger is currently executing. + Not Found. Returned for HEAD requests. + Use a GET request to retrieve version information. + /metrics: + get: + operationId: GetMetrics + summary: Metrics + description: Retrieves Prometheus-compatible server metrics. responses: - "200": - description: Success. The processing engine trigger has been deleted. - "400": - description: Bad request. - "401": - $ref: "#/components/responses/Unauthorized" - "404": - description: Trigger not found. + '200': + description: Success. The response body contains Prometheus-compatible server metrics. tags: - - Processing engine - /api/v3/configure/processing_engine_trigger/disable: - post: - operationId: PostDisableProcessingEngineTrigger - summary: Disable processing engine trigger - description: Disables a processing engine trigger. + - Server information + /api/v3/configure/database: + get: + operationId: GetConfigureDatabase + summary: List databases + description: Retrieves a list of databases. parameters: - - name: db - in: query - required: true - schema: - type: string - description: The database name. - - name: trigger_name - in: query - required: true - schema: - type: string - description: The name of the trigger. + - $ref: '#/components/parameters/formatRequired' responses: - "200": - description: Success. The processing engine trigger has been disabled. - "400": + '200': + description: Success. The response body contains the list of databases. + content: + application/json: + schema: + $ref: '#/components/schemas/ShowDatabasesResponse' + '400': description: Bad request. - "401": - $ref: "#/components/responses/Unauthorized" - "404": - description: Trigger not found. + '401': + $ref: '#/components/responses/Unauthorized' + '404': + description: Database not found. tags: - - Processing engine - /api/v3/configure/processing_engine_trigger/enable: + - Database post: - operationId: PostEnableProcessingEngineTrigger - summary: Enable processing engine trigger - description: Enables a processing engine trigger. - parameters: - - name: db - in: query - required: true - schema: - type: string - description: The database name. - - name: trigger_name - in: query - required: true - schema: - type: string - description: The name of the trigger. + operationId: PostConfigureDatabase + summary: Create a database + description: Creates a new database in the system. + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateDatabaseRequest' responses: - "200": - description: Success. The processing engine trigger is enabled. - "400": + '201': + description: Success. Database created. + '400': description: Bad request. - "401": - $ref: "#/components/responses/Unauthorized" - "404": - description: Trigger not found. + '401': + $ref: '#/components/responses/Unauthorized' + '409': + description: Database already exists. tags: - - Processing engine - /api/v3/configure/table: + - Database delete: - operationId: DeleteConfigureTable + operationId: DeleteConfigureDatabase + summary: Delete a database + description: | + Soft deletes a database. + The database is scheduled for deletion and unavailable for querying. + Use the `hard_delete_at` parameter to schedule a hard deletion. parameters: - - $ref: "#/components/parameters/db" - - name: table - in: query - required: true - schema: - type: string - - name: data_only - in: query - required: false - schema: - type: boolean - default: false - description: | - Delete only data while preserving the table schema and all associated resources - (last value caches, distinct value caches). - When `false` (default), the entire table is deleted. + - $ref: '#/components/parameters/db' - name: hard_delete_at in: query required: false schema: type: string format: date-time - description: |- - Schedule the table for hard deletion at the specified time. - If not provided, the table will be soft deleted. - Use ISO 8601 format (for example, "2025-12-31T23:59:59Z"). + description: | + Schedule the database for hard deletion at the specified time. + If not provided, the database will be soft deleted. + Use ISO 8601 date-time format (for example, "2025-12-31T23:59:59Z"). + #### Deleting a database cannot be undone - Also accepts special string values: - - `now` — hard delete immediately - - `never` — soft delete only (default behavior) - - `default` — use the system default hard deletion time + Deleting a database is a destructive action. + Once a database is deleted, data stored in that database cannot be recovered. responses: - "200": - description: Success (no content). The table has been deleted. - "401": - $ref: "#/components/responses/Unauthorized" - "404": - description: Table not found. - summary: Delete a table + '200': + description: Success. Database deleted. + '401': + $ref: '#/components/responses/Unauthorized' + '404': + description: Database not found. + tags: + - Database + /api/v3/configure/database/retention_period: + delete: + operationId: DeleteDatabaseRetentionPeriod + summary: Remove database retention period description: | - Soft deletes a table. - The table is scheduled for deletion and unavailable for querying. - Use the `hard_delete_at` parameter to schedule a hard deletion. - Use the `data_only` parameter to delete data while preserving the table schema and resources. - - #### Deleting a table cannot be undone - - Deleting a table is a destructive action. - Once a table is deleted, data stored in that table cannot be recovered. + Removes the retention period from a database, setting it to infinite retention. + Data in the database will not expire based on time. + parameters: + - $ref: '#/components/parameters/db' + responses: + '200': + description: Success. Retention period removed from database. + '401': + $ref: '#/components/responses/Unauthorized' + '404': + description: Database not found. tags: - - Table + - Database + /api/v3/configure/table: post: operationId: PostConfigureTable - responses: - "200": - description: Success. The table has been created. - "400": - description: Bad request. - "401": - $ref: "#/components/responses/Unauthorized" - "404": - description: Database not found. summary: Create a table description: Creates a new table within a database. requestBody: @@ -1060,1267 +1144,720 @@ paths: content: application/json: schema: - $ref: "#/components/schemas/CreateTableRequest" + $ref: '#/components/schemas/CreateTableRequest' + responses: + '201': + description: Success. The table has been created. + '400': + description: Bad request. + '401': + $ref: '#/components/responses/Unauthorized' + '404': + description: Database not found. tags: - Table - /api/v3/configure/token: delete: - operationId: DeleteToken + operationId: DeleteConfigureTable + summary: Delete a table + description: | + Soft deletes a table. + The table is scheduled for deletion and unavailable for querying. + Use the `hard_delete_at` parameter to schedule a hard deletion. + + #### Deleting a table cannot be undone + + Deleting a table is a destructive action. + Once a table is deleted, data stored in that table cannot be recovered. parameters: - - name: token_name + - $ref: '#/components/parameters/db' + - name: table in: query required: true schema: type: string - description: The name of the token to delete. - responses: - "200": - description: Success. The token has been deleted. - "401": - $ref: "#/components/responses/Unauthorized" - "404": - description: Token not found. - summary: Delete token - description: | - Deletes a token. - tags: - - Authentication - - Token - /api/v3/configure/token/admin: - post: - operationId: PostCreateAdminToken - responses: - "201": + - name: hard_delete_at + in: query + required: false + schema: + type: string + format: date-time description: | - Success. The admin token has been created. - The response body contains the token string and metadata. - content: - application/json: - schema: - $ref: "#/components/schemas/AdminTokenObject" - "401": - $ref: "#/components/responses/Unauthorized" - summary: Create admin token - description: | - Creates an admin token. - An admin token is a special type of token that has full access to all resources in the system. - tags: - - Authentication - - Token - /api/v3/configure/token/admin/regenerate: - post: - operationId: PostRegenerateAdminToken - summary: Regenerate admin token - description: | - Regenerates an admin token and revokes the previous token with the same name. - parameters: [] + Schedule the table for hard deletion at the specified time. + If not provided, the table will be soft deleted. + Use ISO 8601 format (for example, "2025-12-31T23:59:59Z"). responses: - "201": - description: Success. The admin token has been regenerated. - content: - application/json: - schema: - $ref: "#/components/schemas/AdminTokenObject" - "401": - $ref: "#/components/responses/Unauthorized" + '200': + description: Success (no content). The table has been deleted. + '401': + $ref: '#/components/responses/Unauthorized' + '404': + description: Table not found. tags: - - Authentication - - Token - /api/v3/configure/token/named_admin: + - Table + /api/v3/configure/distinct_cache: post: - operationId: PostCreateNamedAdminToken - responses: - "201": - description: | - Success. The named admin token has been created. - The response body contains the token string and metadata. - content: - application/json: - schema: - $ref: "#/components/schemas/AdminTokenObject" - "401": - $ref: "#/components/responses/Unauthorized" - "409": - description: A token with this name already exists. - summary: Create named admin token - description: | - Creates a named admin token. - A named admin token is a special type of admin token with a custom name for identification and management. + operationId: PostConfigureDistinctCache + summary: Create distinct cache + description: Creates a distinct cache for a table. tags: - - Authentication - - Token + - Cache distinct values requestBody: required: true content: application/json: schema: - type: object - properties: - token_name: - type: string - description: The name for the admin token. - expiry_secs: - type: integer - description: Optional expiration time in seconds. If not provided, the token does not expire. - nullable: true - required: - - token_name - /api/v3/engine/{request_path}: - get: - operationId: GetProcessingEnginePluginRequest - responses: - "200": - description: Success. The plugin request has been executed. - "400": - description: Malformed request. - "401": - $ref: "#/components/responses/Unauthorized" - "404": - description: Plugin not found. - "500": - description: Processing failure. - summary: On Request processing engine plugin request - description: > - Executes the On Request processing engine plugin specified in the trigger's `plugin_filename`. - - The request can include request headers, query string parameters, and a request body, which InfluxDB passes to - the plugin. - - - An On Request plugin implements the following signature: - - - ```python - - def process_request(influxdb3_local, query_parameters, request_headers, request_body, args=None) - - ``` - - - The response depends on the plugin implementation. - tags: - - Processing engine - post: - operationId: PostProcessingEnginePluginRequest + $ref: '#/components/schemas/DistinctCacheCreateRequest' responses: - "200": - description: Success. The plugin request has been executed. - "400": - description: Malformed request. - "401": - $ref: "#/components/responses/Unauthorized" - "404": - description: Plugin not found. - "500": - description: Processing failure. - summary: On Request processing engine plugin request - description: > - Executes the On Request processing engine plugin specified in the trigger's `plugin_filename`. - - The request can include request headers, query string parameters, and a request body, which InfluxDB passes to - the plugin. - - - An On Request plugin implements the following signature: - - - ```python - - def process_request(influxdb3_local, query_parameters, request_headers, request_body, args=None) - - ``` - + '201': + description: Success. The distinct cache has been created. + '204': + description: Not created. A distinct cache with this configuration already exists. + '400': + description: | + Bad request. - The response depends on the plugin implementation. + The server responds with status `400` if the request would overwrite an existing cache with a different configuration. + delete: + operationId: DeleteConfigureDistinctCache + summary: Delete distinct cache + description: Deletes a distinct cache. parameters: - - $ref: "#/components/parameters/ContentType" - requestBody: - required: false - content: - application/json: - schema: - type: object - additionalProperties: true - tags: - - Processing engine - parameters: - - name: request_path - description: | - The path configured in the request trigger specification for the plugin. - - For example, if you define a trigger with the following: - - ```json - trigger_specification: "request:hello-world" - ``` - - then, the HTTP API exposes the following plugin endpoint: - - ``` - /api/v3/engine/hello-world - ``` - in: path - required: true - schema: - type: string - /api/v3/plugin_test/schedule: - post: - operationId: PostTestSchedulingPlugin + - $ref: '#/components/parameters/db' + - name: table + in: query + required: true + schema: + type: string + description: The name of the table containing the distinct cache. + - name: name + in: query + required: true + schema: + type: string + description: The name of the distinct cache to delete. responses: - "200": - description: Success. The plugin test has been executed. - "400": + '200': + description: Success. The distinct cache has been deleted. + '400': description: Bad request. - "401": - $ref: "#/components/responses/Unauthorized" - "404": - description: Plugin not enabled. - summary: Test scheduling plugin - description: Executes a test of a scheduling plugin. - requestBody: - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/SchedulePluginTestRequest" + '401': + $ref: '#/components/responses/Unauthorized' + '404': + description: Cache not found. tags: - - Processing engine - /api/v3/plugin_test/wal: + - Cache distinct values + /api/v3/configure/last_cache: post: - operationId: PostTestWALPlugin - responses: - "200": - description: Success. The plugin test has been executed. - "400": - description: Bad request. - "401": - $ref: "#/components/responses/Unauthorized" - "404": - description: Plugin not enabled. - summary: Test WAL plugin - description: Executes a test of a write-ahead logging (WAL) plugin. + operationId: PostConfigureLastCache + summary: Create last cache + description: Creates a last cache for a table. requestBody: required: true content: application/json: schema: - $ref: "#/components/schemas/WALPluginTestRequest" + $ref: '#/components/schemas/LastCacheCreateRequest' + responses: + '201': + description: Success. Last cache created. + '400': + description: Bad request. + '401': + $ref: '#/components/responses/Unauthorized' + '404': + description: Cache not found. + '409': + description: Cache already exists. tags: - - Processing engine - /api/v3/plugins/directory: - put: - operationId: PutPluginDirectory - requestBody: - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/PluginDirectoryRequest" - responses: - "200": - description: Success. The plugin directory has been updated. - "401": - $ref: "#/components/responses/Unauthorized" - "403": - description: Forbidden. Admin token required. - "500": - description: Plugin not found. The `plugin_name` does not match any registered trigger. - summary: Update a multi-file plugin directory - description: | - Replaces all files in a multi-file plugin directory. The - `plugin_name` must match a registered trigger name. Each entry in - the `files` array specifies a `relative_path` and `content`—the - server writes them into the trigger's plugin directory. - - Use this endpoint to update multi-file plugins (directories with - `__init__.py` and supporting modules). For single-file plugins, - use `PUT /api/v3/plugins/files` instead. - tags: - - Processing engine - x-security-note: Requires an admin token - /api/v3/plugins/files: - post: - operationId: create_plugin_file - summary: Create a plugin file - description: | - Creates a single plugin file in the plugin directory. Writes the - `content` to a file named after `plugin_name`. Does not require an - existing trigger—use this to upload plugin files before creating - triggers that reference them. - requestBody: - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/PluginFileRequest" - responses: - "200": - description: Success. The plugin file has been created. - "401": - $ref: "#/components/responses/Unauthorized" - "403": - description: Forbidden. Admin token required. - tags: - - Processing engine - x-security-note: Requires an admin token - put: - operationId: PutPluginFile - summary: Update a plugin file - description: | - Updates a single plugin file for an existing trigger. The - `plugin_name` must match a registered trigger name—the server - resolves the trigger's `plugin_filename` and overwrites that file - with the provided `content`. - - To upload a new plugin file before creating a trigger, use - `POST /api/v3/plugins/files` instead. To update a multi-file - plugin directory, use `PUT /api/v3/plugins/directory`. - requestBody: - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/PluginFileRequest" - responses: - "200": - description: Success. The plugin file has been updated. - "401": - $ref: "#/components/responses/Unauthorized" - "403": - description: Forbidden. Admin token required. - "500": - description: Plugin not found. The `plugin_name` does not match any registered trigger. - tags: - - Processing engine - x-security-note: Requires an admin token - /api/v3/query_influxql: - get: - operationId: GetExecuteInfluxQLQuery - responses: - "200": - description: Success. The response body contains query results. - content: - application/json: - schema: - $ref: "#/components/schemas/QueryResponse" - text/csv: - schema: - type: string - application/vnd.apache.parquet: - schema: - type: string - application/jsonl: - schema: - type: string - "400": - description: Bad request. - "401": - $ref: "#/components/responses/Unauthorized" - "403": - description: Access denied. - "404": - description: Database not found. - "405": - description: Method not allowed. - "422": - description: Unprocessable entity. - summary: Execute InfluxQL query - description: Executes an InfluxQL query to retrieve data from the specified database. + - Cache last value + delete: + operationId: DeleteConfigureLastCache + summary: Delete last cache + description: Deletes a last cache. parameters: - - $ref: "#/components/parameters/dbQueryParam" - - name: q + - $ref: '#/components/parameters/db' + - name: table in: query required: true schema: type: string - - name: format - in: query - required: false - schema: - type: string - - $ref: "#/components/parameters/AcceptQueryHeader" - - name: params - in: query - required: false - schema: - type: string - description: JSON-encoded query parameters. Use this to pass bind parameters to parameterized queries. - description: JSON-encoded query parameters for parameterized queries. - tags: - - Query data - post: - operationId: PostExecuteQueryInfluxQL - responses: - "200": - description: Success. The response body contains query results. - content: - application/json: - schema: - $ref: "#/components/schemas/QueryResponse" - text/csv: - schema: - type: string - application/vnd.apache.parquet: - schema: - type: string - application/jsonl: - schema: - type: string - "400": - description: Bad request. - "401": - $ref: "#/components/responses/Unauthorized" - "403": - description: Access denied. - "404": - description: Database not found. - "405": - description: Method not allowed. - "422": - description: Unprocessable entity. - summary: Execute InfluxQL query - description: Executes an InfluxQL query to retrieve data from the specified database. - parameters: - - $ref: "#/components/parameters/AcceptQueryHeader" - - $ref: "#/components/parameters/ContentType" - requestBody: - $ref: "#/components/requestBodies/queryRequestBody" - tags: - - Query data - /api/v3/query_sql: - get: - operationId: GetExecuteQuerySQL - responses: - "200": - description: Success. The response body contains query results. - content: - application/json: - schema: - $ref: "#/components/schemas/QueryResponse" - example: - results: - - series: - - name: mytable - columns: - - time - - value - values: - - - "2024-02-02T12:00:00Z" - - 42 - text/csv: - schema: - type: string - application/vnd.apache.parquet: - schema: - type: string - application/jsonl: - schema: - type: string - "400": - description: Bad request. - "401": - $ref: "#/components/responses/Unauthorized" - "403": - description: Access denied. - "404": - description: Database not found. - "405": - description: Method not allowed. - "422": - description: Unprocessable entity. - summary: Execute SQL query - description: Executes an SQL query to retrieve data from the specified database. - parameters: - - $ref: "#/components/parameters/db" - - $ref: "#/components/parameters/querySqlParam" - - $ref: "#/components/parameters/format" - - $ref: "#/components/parameters/AcceptQueryHeader" - - $ref: "#/components/parameters/ContentType" - - name: params + description: The name of the table containing the last cache. + - name: name in: query - required: false + required: true schema: type: string - description: JSON-encoded query parameters. Use this to pass bind parameters to parameterized queries. - description: JSON-encoded query parameters for parameterized queries. - tags: - - Query data - post: - operationId: PostExecuteQuerySQL + description: The name of the last cache to delete. responses: - "200": - description: Success. The response body contains query results. - content: - application/json: - schema: - $ref: "#/components/schemas/QueryResponse" - text/csv: - schema: - type: string - application/vnd.apache.parquet: - schema: - type: string - application/jsonl: - schema: - type: string - "400": + '200': + description: Success. The last cache has been deleted. + '400': description: Bad request. - "401": - $ref: "#/components/responses/Unauthorized" - "403": - description: Access denied. - "404": - description: Database not found. - "405": - description: Method not allowed. - "422": - description: Unprocessable entity. - summary: Execute SQL query - description: Executes an SQL query to retrieve data from the specified database. - parameters: - - $ref: "#/components/parameters/AcceptQueryHeader" - - $ref: "#/components/parameters/ContentType" - requestBody: - $ref: "#/components/requestBodies/queryRequestBody" + '401': + $ref: '#/components/responses/Unauthorized' + '404': + description: Cache not found. tags: - - Query data - /api/v3/write_lp: + - Cache last value + /api/v3/configure/processing_engine_trigger: post: - operationId: PostWriteLP - parameters: - - $ref: "#/components/parameters/dbWriteParam" - - $ref: "#/components/parameters/accept_partial" - - $ref: "#/components/parameters/precisionParam" - - name: no_sync - in: query - schema: - $ref: "#/components/schemas/NoSync" - - name: Content-Type - in: header - description: | - The content type of the request payload. - schema: - $ref: "#/components/schemas/LineProtocol" - required: false - - name: Accept - in: header - description: | - The content type that the client can understand. - Writes only return a response body if they fail (partially or completely)--for example, - due to a syntax problem or type mismatch. - schema: - type: string - default: application/json - enum: - - application/json - required: false - - $ref: "#/components/parameters/ContentEncoding" - - $ref: "#/components/parameters/ContentLength" - responses: - "204": - description: Success ("No Content"). All data in the batch is written and queryable. - headers: - cluster-uuid: - $ref: "#/components/headers/ClusterUUID" - "400": - description: Bad request. - "401": - $ref: "#/components/responses/Unauthorized" - "403": - description: Access denied. - "413": - description: Request entity too large. - "422": - description: Unprocessable entity. - summary: Write line protocol - description: > - Writes line protocol to the specified database. - - - This is the native InfluxDB 3 Core write endpoint that provides enhanced control - - over write behavior with advanced parameters for high-performance and fault-tolerant operations. - - - Use this endpoint to send data in [line protocol](/influxdb3/core/reference/syntax/line-protocol/) format to - InfluxDB. - - Use query parameters to specify options for writing data. - - - #### Features - - - - **Partial writes**: Use `accept_partial=true` to allow partial success when some lines in a batch fail - - - **Asynchronous writes**: Use `no_sync=true` to skip waiting for WAL synchronization, allowing faster response - times but sacrificing durability guarantees - - - **Flexible precision**: Automatic timestamp precision detection with `precision=auto` (default) - - - #### Auto precision detection - - - When you use `precision=auto` or omit the precision parameter, InfluxDB 3 automatically detects - - the timestamp precision based on the magnitude of the timestamp value: - - - - Timestamps < 5e9 → Second precision (multiplied by 1,000,000,000 to convert to nanoseconds) - - - Timestamps < 5e12 → Millisecond precision (multiplied by 1,000,000) - - - Timestamps < 5e15 → Microsecond precision (multiplied by 1,000) - - - Larger timestamps → Nanosecond precision (no conversion needed) - - - #### Related + operationId: PostConfigureProcessingEngineTrigger + summary: Create processing engine trigger + description: | + Creates a processing engine trigger with the specified plugin file and trigger specification. + ### Related guides - - [Use the InfluxDB v3 write_lp API to write data](/influxdb3/core/write-data/http-api/v3-write-lp/) + - [Processing engine and Python plugins](/influxdb3/core/plugins/) requestBody: - $ref: "#/components/requestBodies/lineProtocolRequestBody" - tags: - - Write data - x-codeSamples: - - label: cURL - Basic write - lang: Shell - source: | - curl --request POST "http://localhost:8181/api/v3/write_lp?db=sensors" \ - --header "Authorization: Bearer DATABASE_TOKEN" \ - --header "Content-Type: text/plain" \ - --data-raw "cpu,host=server01 usage=85.2 1638360000000000000" - - label: cURL - Write with millisecond precision - lang: Shell - source: | - curl --request POST "http://localhost:8181/api/v3/write_lp?db=sensors&precision=ms" \ - --header "Authorization: Bearer DATABASE_TOKEN" \ - --header "Content-Type: text/plain" \ - --data-raw "cpu,host=server01 usage=85.2 1638360000000" - - label: cURL - Asynchronous write with partial acceptance - lang: Shell - source: > - curl --request POST - "http://localhost:8181/api/v3/write_lp?db=sensors&accept_partial=true&no_sync=true&precision=auto" \ - --header "Authorization: Bearer DATABASE_TOKEN" \ - --header "Content-Type: text/plain" \ - --data-raw "cpu,host=server01 usage=85.2 - memory,host=server01 used=4096" - - label: cURL - Multiple measurements with tags - lang: Shell - source: | - curl --request POST "http://localhost:8181/api/v3/write_lp?db=sensors&precision=ns" \ - --header "Authorization: Bearer DATABASE_TOKEN" \ - --header "Content-Type: text/plain" \ - --data-raw "cpu,host=server01,region=us-west usage=85.2,load=0.75 1638360000000000000 - memory,host=server01,region=us-west used=4096,free=12288 1638360000000000000 - disk,host=server01,region=us-west,device=/dev/sda1 used=50.5,free=49.5 1638360000000000000" - /health: - get: - operationId: GetHealth - responses: - "200": - description: Service is running. Returns `OK`. - content: - text/plain: - schema: - type: string - example: OK - "401": - description: Unauthorized. Authentication is required. - "500": - description: Service is unavailable. - summary: Health check - description: | - Checks the status of the service. - - Returns `OK` if the service is running. This endpoint does not return version information. - Use the [`/ping`](#operation/GetPing) endpoint to retrieve version details. - - > **Note**: This endpoint requires authentication by default in InfluxDB 3 Core. - tags: - - Server information - /metrics: - get: - operationId: GetMetrics - responses: - "200": - description: Success - summary: Metrics - description: Retrieves Prometheus-compatible server metrics. - tags: - - Server information - /ping: - get: - operationId: GetPing - responses: - "200": - description: Success. The response body contains server information. - headers: - x-influxdb-version: - description: The InfluxDB version number (for example, `3.8.0`). - schema: - type: string - example: 3.8.0 - x-influxdb-build: - description: The InfluxDB build type (`Core` or `Enterprise`). - schema: - type: string - example: Core - content: - application/json: - schema: - type: object - properties: - version: - type: string - description: The InfluxDB version number. - example: 3.8.0 - revision: - type: string - description: The git revision hash for the build. - example: 83b589b883 - process_id: - type: string - description: A unique identifier for the server process. - example: b756d9e0-cecd-4f72-b6d0-19e2d4f8cbb7 - "401": - description: Unauthorized. Authentication is required. - "404": - description: | - Not Found. Returned for HEAD requests. - Use a GET request to retrieve version information. - x-client-method: ping - summary: Ping the server - description: | - Returns version information for the server. - - **Important**: Use a GET request. HEAD requests return `404 Not Found`. - - The response includes version information in both headers and the JSON body: - - - **Headers**: `x-influxdb-version` and `x-influxdb-build` - - **Body**: JSON object with `version`, `revision`, and `process_id` - - > **Note**: This endpoint requires authentication by default in InfluxDB 3 Core. - tags: - - Server information - post: - operationId: ping - responses: - "200": - description: Success. The response body contains server information. - headers: - x-influxdb-version: - description: The InfluxDB version number (for example, `3.8.0`). - schema: - type: string - example: 3.8.0 - x-influxdb-build: - description: The InfluxDB build type (`Core` or `Enterprise`). - schema: - type: string - example: Core - content: - application/json: - schema: - type: object - properties: - version: - type: string - description: The InfluxDB version number. - example: 3.8.0 - revision: - type: string - description: The git revision hash for the build. - example: 83b589b883 - process_id: - type: string - description: A unique identifier for the server process. - example: b756d9e0-cecd-4f72-b6d0-19e2d4f8cbb7 - "401": - description: Unauthorized. Authentication is required. - "404": - description: | - Not Found. Returned for HEAD requests. - Use a GET request to retrieve version information. - summary: Ping the server - description: Returns version information for the server. Accepts POST in addition to GET. - tags: - - Server information - /query: - get: - operationId: GetV1ExecuteQuery - responses: - "200": - description: | - Success. The response body contains query results. - content: - application/json: - schema: - $ref: "#/components/schemas/QueryResponse" - application/csv: - schema: - type: string - headers: - Content-Type: - description: > - The content type of the response. - - Default is `application/json`. - - - If the `Accept` request header is `application/csv` or `text/csv`, the `Content-type` response header is - `application/csv` - - and the response is formatted as CSV. - schema: - type: string - default: application/json - enum: - - application/json - - application/csv - "400": - description: Bad request. - "401": - $ref: "#/components/responses/Unauthorized" - "403": - description: Access denied. - "404": - description: Database not found. - "405": - description: Method not allowed. - "422": - description: Unprocessable entity. - summary: Execute InfluxQL query (v1-compatible) - description: > - Executes an InfluxQL query to retrieve data from the specified database. - - - This endpoint is compatible with InfluxDB 1.x client libraries and third-party integrations such as Grafana. - - Use query parameters to specify the database and the InfluxQL query. - - - #### Related - - - - [Use the InfluxDB v1 HTTP query API and InfluxQL to query - data](/influxdb3/core/query-data/execute-queries/influxdb-v1-api/) + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/ProcessingEngineTriggerRequest' + examples: + schedule_cron: + summary: Schedule trigger using cron + description: | + In `"cron:CRON_EXPRESSION"`, `CRON_EXPRESSION` uses extended 6-field cron format. + The cron expression `0 0 6 * * 1-5` means the trigger will run at 6:00 AM every weekday (Monday to Friday). + value: + db: mydb + plugin_filename: schedule.py + trigger_name: schedule_cron_trigger + trigger_specification: cron:0 0 6 * * 1-5 + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + schedule_every: + summary: Schedule trigger using interval + description: | + In `"every:DURATION"`, `DURATION` specifies the interval between trigger executions. + The duration `1h` means the trigger will run every hour. + value: + db: mydb + plugin_filename: schedule.py + trigger_name: schedule_every_trigger + trigger_specification: every:1h + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + schedule_every_seconds: + summary: Schedule trigger using seconds interval + description: | + Example of scheduling a trigger to run every 30 seconds. + value: + db: mydb + plugin_filename: schedule.py + trigger_name: schedule_every_30s_trigger + trigger_specification: every:30s + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + schedule_every_minutes: + summary: Schedule trigger using minutes interval + description: | + Example of scheduling a trigger to run every 5 minutes. + value: + db: mydb + plugin_filename: schedule.py + trigger_name: schedule_every_5m_trigger + trigger_specification: every:5m + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + all_tables: + summary: All tables trigger example + description: | + Trigger that fires on write events to any table in the database. + value: + db: mydb + plugin_filename: all_tables.py + trigger_name: all_tables_trigger + trigger_specification: all_tables + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + table_specific: + summary: Table-specific trigger example + description: | + Trigger that fires on write events to a specific table. + value: + db: mydb + plugin_filename: table.py + trigger_name: table_trigger + trigger_specification: table:sensors + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + api_request: + summary: On-demand request trigger example + description: | + Creates an HTTP endpoint `/api/v3/engine/hello-world` for manual invocation. + value: + db: mydb + plugin_filename: request.py + trigger_name: hello_world_trigger + trigger_specification: request:hello-world + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + cron_friday_afternoon: + summary: Cron trigger for Friday afternoons + description: | + Example of a cron trigger that runs every Friday at 2:30 PM. + value: + db: reports + plugin_filename: weekly_report.py + trigger_name: friday_report_trigger + trigger_specification: cron:0 30 14 * * 5 + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + cron_monthly: + summary: Cron trigger for monthly execution + description: | + Example of a cron trigger that runs on the first day of every month at midnight. + value: + db: monthly_data + plugin_filename: monthly_cleanup.py + trigger_name: monthly_cleanup_trigger + trigger_specification: cron:0 0 0 1 * * + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + responses: + '200': + description: Success. Processing engine trigger created. + '400': + description: Bad request. + '401': + $ref: '#/components/responses/Unauthorized' + '404': + description: Trigger not found. + tags: + - Processing engine + delete: + operationId: DeleteConfigureProcessingEngineTrigger + summary: Delete processing engine trigger + description: Deletes a processing engine trigger. parameters: - - name: Accept - in: header - schema: - type: string - default: application/json - enum: - - application/json - - application/csv - - text/csv - required: false - description: > - The content type that the client can understand. - - - If `text/csv` is specified, the `Content-type` response header is `application/csv` and the response is - formatted as CSV. - - - Returns an error if the format is invalid or non-UTF8. - - in: query - name: chunked - description: | - If true, the response is divided into chunks of size `chunk_size`. - schema: - type: boolean - default: false - - in: query - name: chunk_size - description: | - The number of records that will go into a chunk. - This parameter is only used if `chunked=true`. - schema: - type: integer - default: 10000 - - in: query - name: db - description: The database to query. If not provided, the InfluxQL query string must specify the database. - schema: - type: string - format: InfluxQL - - in: query - name: pretty - description: | - If true, the JSON response is formatted in a human-readable format. - schema: - type: boolean - default: false - - in: query - name: q - description: The InfluxQL query string. + - $ref: '#/components/parameters/db' + - name: trigger_name + in: query required: true schema: type: string - - name: epoch - description: > - Formats timestamps as [unix (epoch) timestamps](/influxdb3/core/reference/glossary/#unix-timestamp) with the - specified precision - - instead of [RFC3339 timestamps](/influxdb3/core/reference/glossary/#rfc3339-timestamp) with nanosecond - precision. - in: query - schema: - $ref: "#/components/schemas/EpochCompatibility" - - $ref: "#/components/parameters/v1UsernameParam" - - $ref: "#/components/parameters/v1PasswordParam" - - name: rp + - name: force in: query required: false schema: - type: string - description: | - Retention policy name. Honored but discouraged. InfluxDB 3 doesn't use retention policies. - - name: Authorization - in: header - required: false - schema: - type: string - description: | - Authorization header for token-based authentication. - Supported schemes: - - `Bearer AUTH_TOKEN` - OAuth bearer token scheme - - `Token AUTH_TOKEN` - InfluxDB v2 token scheme - - `Basic ` - Basic authentication (username is ignored) + type: boolean + default: false + responses: + '200': + description: Success. The processing engine trigger has been deleted. + '400': + description: Bad request. + '401': + $ref: '#/components/responses/Unauthorized' + '404': + description: Trigger not found. tags: - - Query data - - Compatibility endpoints + - Processing engine + /api/v3/configure/processing_engine_trigger/disable: post: - operationId: PostExecuteV1Query + operationId: PostDisableProcessingEngineTrigger + summary: Disable processing engine trigger + description: Disables a processing engine trigger. + parameters: + - $ref: '#/components/parameters/ContentType' + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/ProcessingEngineTriggerRequest' responses: - "200": - description: | - Success. The response body contains query results. - content: - application/json: - schema: - $ref: "#/components/schemas/QueryResponse" - application/csv: - schema: - type: string - headers: - Content-Type: - description: > - The content type of the response. - - Default is `application/json`. - - - If the `Accept` request header is `application/csv` or `text/csv`, the `Content-type` response header is - `application/csv` - - and the response is formatted as CSV. - schema: - type: string - default: application/json - enum: - - application/json - - application/csv - "400": + '200': + description: Success. The processing engine trigger has been disabled. + '400': description: Bad request. - "401": - $ref: "#/components/responses/Unauthorized" - "403": - description: Access denied. - "404": - description: Database not found. - "405": - description: Method not allowed. - "422": - description: Unprocessable entity. - summary: Execute InfluxQL query (v1-compatible) - description: > - Executes an InfluxQL query to retrieve data from the specified database. - - - #### Related - - - - [Use the InfluxDB v1 HTTP query API and InfluxQL to query - data](/influxdb3/core/query-data/execute-queries/influxdb-v1-api/) + '401': + $ref: '#/components/responses/Unauthorized' + '404': + description: Trigger not found. + tags: + - Processing engine + /api/v3/configure/processing_engine_trigger/enable: + post: + operationId: PostEnableProcessingEngineTrigger + summary: Enable processing engine trigger + description: Enables a processing engine trigger. parameters: - - name: Accept - in: header - schema: - type: string - default: application/json - enum: - - application/json - - application/csv - - text/csv - required: false - description: > - The content type that the client can understand. - + - $ref: '#/components/parameters/ContentType' + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/ProcessingEngineTriggerRequest' + responses: + '200': + description: Success. The processing engine trigger is enabled. + '400': + description: Bad request. + '401': + $ref: '#/components/responses/Unauthorized' + '404': + description: Trigger not found. + tags: + - Processing engine + /api/v3/configure/plugin_environment/install_packages: + post: + operationId: PostInstallPluginPackages + summary: Install plugin packages + description: | + Installs the specified Python packages into the processing engine plugin environment. - If `text/csv` is specified, the `Content-type` response header is `application/csv` and the response is - formatted as CSV. + This endpoint is synchronous and blocks until the packages are installed. + ### Related guides - Returns an error if the format is invalid or non-UTF8. + - [Processing engine and Python plugins](/influxdb3/core/plugins/) + parameters: + - $ref: '#/components/parameters/ContentType' requestBody: + required: true content: application/json: schema: type: object properties: - db: - type: string - description: The database to query. If not provided, the InfluxQL query string must specify the database. - q: - description: The InfluxQL query string. - type: string - chunked: - description: | - If true, the response is divided into chunks of size `chunk_size`. - type: boolean - chunk_size: - description: | - The number of records that will go into a chunk. - This parameter is only used if `chunked=true`. - type: integer - default: 10000 - epoch: - description: > - A unix timestamp precision. - - - - `h` for hours - - - `m` for minutes - - - `s` for seconds - - - `ms` for milliseconds - - - `u` or `µ` for microseconds - - - `ns` for nanoseconds - - - Formats timestamps as [unix (epoch) timestamps](/influxdb3/core/reference/glossary/#unix-timestamp) - with the specified precision - - instead of [RFC3339 timestamps](/influxdb3/core/reference/glossary/#rfc3339-timestamp) with - nanosecond precision. - enum: - - ns - - u - - µ - - ms - - s - - m - - h - type: string - pretty: + packages: + type: array + items: + type: string description: | - If true, the JSON response is formatted in a human-readable format. - type: boolean + A list of Python package names to install. + Can include version specifiers (e.g., "scipy==1.9.0"). + example: + - influxdb3-python + - scipy + - pandas==1.5.0 + - requests required: - - q - application/x-www-form-urlencoded: + - packages + example: + packages: + - influxdb3-python + - scipy + - pandas==1.5.0 + - requests + responses: + '200': + description: Success. The packages are installed. + '400': + description: Bad request. + '401': + $ref: '#/components/responses/Unauthorized' + tags: + - Processing engine + /api/v3/configure/plugin_environment/install_requirements: + post: + operationId: PostInstallPluginRequirements + summary: Install plugin requirements + description: | + Installs requirements from a requirements file (also known as a "pip requirements file") into the processing engine plugin environment. + + This endpoint is synchronous and blocks until the requirements are installed. + + ### Related + + - [Processing engine and Python plugins](/influxdb3/core/plugins/) + - [Python requirements file format](https://pip.pypa.io/en/stable/reference/requirements-file-format/) + parameters: + - $ref: '#/components/parameters/ContentType' + requestBody: + required: true + content: + application/json: schema: type: object properties: - db: - type: string - description: The database to query. If not provided, the InfluxQL query string must specify the database. - q: - description: The InfluxQL query string. + requirements_location: type: string - chunked: - description: | - If true, the response is divided into chunks of size `chunk_size`. - type: boolean - chunk_size: description: | - The number of records that will go into a chunk. - This parameter is only used if `chunked=true`. - type: integer - default: 10000 - epoch: - description: > - A unix timestamp precision. + The path to the requirements file containing Python packages to install. + Can be a relative path (relative to the plugin directory) or an absolute path. + example: requirements.txt + required: + - requirements_location + example: + requirements_location: requirements.txt + responses: + '200': + description: Success. The requirements have been installed. + '400': + description: Bad request. + '401': + $ref: '#/components/responses/Unauthorized' + tags: + - Processing engine + /api/v3/plugin_test/wal: + post: + operationId: PostTestWALPlugin + summary: Test WAL plugin + description: Executes a test of a write-ahead logging (WAL) plugin. + responses: + '200': + description: Success. The plugin test has been executed. + '400': + description: Bad request. + '401': + $ref: '#/components/responses/Unauthorized' + '404': + description: Plugin not enabled. + tags: + - Processing engine + /api/v3/plugin_test/schedule: + post: + operationId: PostTestSchedulingPlugin + summary: Test scheduling plugin + description: Executes a test of a scheduling plugin. + responses: + '200': + description: Success. The plugin test has been executed. + '400': + description: Bad request. + '401': + $ref: '#/components/responses/Unauthorized' + '404': + description: Plugin not enabled. + tags: + - Processing engine + /api/v3/engine/{request_path}: + parameters: + - name: request_path + description: | + The path configured in the request trigger specification for the plugin. + For example, if you define a trigger with the following: - - `h` for hours + ```json + trigger_specification: "request:hello-world" + ``` - - `m` for minutes + then, the HTTP API exposes the following plugin endpoint: - - `s` for seconds + ``` + /api/v3/engine/hello-world + ``` + in: path + required: true + schema: + type: string + get: + operationId: GetProcessingEnginePluginRequest + summary: On Request processing engine plugin request + description: | + Executes the On Request processing engine plugin specified in the trigger's `plugin_filename`. + The request can include request headers, query string parameters, and a request body, which InfluxDB passes to the plugin. - - `ms` for milliseconds + An On Request plugin implements the following signature: - - `u` or `µ` for microseconds + ```python + def process_request(influxdb3_local, query_parameters, request_headers, request_body, args=None) + ``` - - `ns` for nanoseconds + The response depends on the plugin implementation. + responses: + '200': + description: Success. The plugin request has been executed. + '400': + description: Malformed request. + '401': + $ref: '#/components/responses/Unauthorized' + '404': + description: Plugin not found. + '500': + description: Processing failure. + tags: + - Processing engine + post: + operationId: PostProcessingEnginePluginRequest + summary: On Request processing engine plugin request + description: | + Executes the On Request processing engine plugin specified in the trigger's `plugin_filename`. + The request can include request headers, query string parameters, and a request body, which InfluxDB passes to the plugin. + An On Request plugin implements the following signature: - Formats timestamps as [unix (epoch) timestamps](/influxdb3/core/reference/glossary/#unix-timestamp) - with the specified precision + ```python + def process_request(influxdb3_local, query_parameters, request_headers, request_body, args=None) + ``` - instead of [RFC3339 timestamps](/influxdb3/core/reference/glossary/#rfc3339-timestamp) with - nanosecond precision. - enum: - - ns - - u - - µ - - ms - - s - - m - - h - type: string - pretty: - description: | - If true, the JSON response is formatted in a human-readable format. - type: boolean - required: - - q - application/vnd.influxql: + The response depends on the plugin implementation. + parameters: + - $ref: '#/components/parameters/ContentType' + requestBody: + required: false + content: + application/json: schema: - type: string - description: InfluxQL query string sent as the request body. + type: object + additionalProperties: true + responses: + '200': + description: Success. The plugin request has been executed. + '400': + description: Malformed request. + '401': + $ref: '#/components/responses/Unauthorized' + '404': + description: Plugin not found. + '500': + description: Processing failure. tags: - - Query data - - Compatibility endpoints - /write: + - Processing engine + /api/v3/configure/token/admin: post: - operationId: PostV1Write + operationId: PostCreateAdminToken + summary: Create admin token + description: | + Creates an admin token. + An admin token is a special type of token that has full access to all resources in the system. responses: - "204": - description: Success ("No Content"). All data in the batch is written and queryable. - headers: - cluster-uuid: - $ref: "#/components/headers/ClusterUUID" - "400": + '201': description: | - Bad request. Some (a _partial write_) or all of the data from the batch was rejected and not written. - If a partial write occurred, then some points from the batch are written and queryable. - - The response body: - - indicates if a partial write occurred or all data was rejected. - - contains details about the [rejected points](/influxdb3/core/write-data/troubleshoot/#troubleshoot-rejected-points), up to 100 points. + Success. The admin token has been created. + The response body contains the token string and metadata. content: application/json: - examples: - rejectedAllPoints: - summary: Rejected all points in the batch - value: | - { - "error": "write of line protocol failed", - "data": [ - { - "original_line": "dquote> home,room=Kitchen temp=hi", - "line_number": 2, - "error_message": "No fields were provided" - } - ] - } - partialWriteErrorWithRejectedPoints: - summary: Partial write rejected some points in the batch - value: | - { - "error": "partial write of line protocol occurred", - "data": [ - { - "original_line": "dquote> home,room=Kitchen temp=hi", - "line_number": 2, - "error_message": "No fields were provided" - } - ] - } - "401": - $ref: "#/components/responses/Unauthorized" - "403": - description: Access denied. - "413": - description: Request entity too large. - summary: Write line protocol (v1-compatible) - description: > - Writes line protocol to the specified database. - - - This endpoint provides backward compatibility for InfluxDB 1.x write workloads using tools such as InfluxDB 1.x - client libraries, the Telegraf `outputs.influxdb` output plugin, or third-party tools. - - - Use this endpoint to send data in [line - protocol](https://docs.influxdata.com/influxdb3/core/reference/syntax/line-protocol/) format to InfluxDB. - - Use query parameters to specify options for writing data. - - - #### Related - - - - [Use compatibility APIs to write data](/influxdb3/core/write-data/http-api/compatibility-apis/) + schema: + $ref: '#/components/schemas/AdminTokenObject' + '401': + $ref: '#/components/responses/Unauthorized' + security: + - BearerAuthentication: [] + - {} # No auth required for initial token creation + tags: + - Auth token + /api/v3/configure/token/admin/regenerate: + post: + operationId: PostRegenerateAdminToken + summary: Regenerate admin token + description: | + Regenerates an admin token and revokes the previous token with the same name. + parameters: [] + responses: + '201': + description: Success. The admin token has been regenerated. + content: + application/json: + schema: + $ref: '#/components/schemas/AdminTokenObject' + '401': + $ref: '#/components/responses/Unauthorized' + tags: + - Authentication + - Auth token + /api/v3/configure/token: + delete: + operationId: DeleteToken + summary: Delete token + description: | + Deletes a token. parameters: - - $ref: "#/components/parameters/dbWriteParam" - - $ref: "#/components/parameters/compatibilityPrecisionParam" - - $ref: "#/components/parameters/v1UsernameParam" - - $ref: "#/components/parameters/v1PasswordParam" - - name: rp + - name: id in: query - required: false + required: true schema: type: string description: | - Retention policy name. Honored but discouraged. InfluxDB 3 doesn't use retention policies. - - name: consistency + The ID of the token to delete. + responses: + '204': + description: Success. The token has been deleted. + '401': + $ref: '#/components/responses/Unauthorized' + '404': + description: Token not found. + tags: + - Authentication + - Auth token + /api/v3/configure/token/named_admin: + post: + operationId: PostCreateNamedAdminToken + summary: Create named admin token + description: | + Creates a named admin token. + A named admin token is an admin token with a specific name identifier. + parameters: + - name: name in: query - required: false - schema: - type: string - description: | - Write consistency level. Ignored by InfluxDB 3. Provided for compatibility with InfluxDB 1.x clients. - - name: Authorization - in: header - required: false + required: true schema: type: string description: | - Authorization header for token-based authentication. - Supported schemes: - - `Bearer AUTH_TOKEN` - OAuth bearer token scheme - - `Token AUTH_TOKEN` - InfluxDB v2 token scheme - - `Basic ` - Basic authentication (username is ignored) - - name: Content-Type - in: header - description: | - The content type of the request payload. - schema: - $ref: "#/components/schemas/LineProtocol" - required: false - - name: Accept - in: header + The name for the admin token. + responses: + '201': description: | - The content type that the client can understand. - Writes only return a response body if they fail (partially or completely)--for example, - due to a syntax problem or type mismatch. - schema: - type: string - default: application/json - enum: - - application/json - required: false - - $ref: "#/components/parameters/ContentEncoding" - - $ref: "#/components/parameters/ContentLength" - requestBody: - $ref: "#/components/requestBodies/lineProtocolRequestBody" + Success. The named admin token has been created. + The response body contains the token string and metadata. + content: + application/json: + schema: + $ref: '#/components/schemas/AdminTokenObject' + '401': + $ref: '#/components/responses/Unauthorized' + '409': + description: A token with this name already exists. tags: - - Compatibility endpoints - - Write data + - Authentication + - Auth token + /api/v3/plugins/files: + put: + operationId: PutPluginFile + summary: Update plugin file + description: | + Updates a plugin file in the plugin directory. + x-security-note: Requires an admin token + responses: + '204': + description: Success. The plugin file has been updated. + '401': + $ref: '#/components/responses/Unauthorized' + '403': + description: Forbidden. Admin token required. + tags: + - Processing engine + /api/v3/plugins/directory: + put: + operationId: PutPluginDirectory + summary: Update plugin directory + description: | + Updates the plugin directory configuration. + x-security-note: Requires an admin token + responses: + '204': + description: Success. The plugin directory has been updated. + '401': + $ref: '#/components/responses/Unauthorized' + '403': + description: Forbidden. Admin token required. + tags: + - Processing engine components: parameters: AcceptQueryHeader: @@ -2344,7 +1881,7 @@ components: The compression applied to the line protocol in the request payload. To send a gzip payload, pass `Content-Encoding: gzip` header. schema: - $ref: "#/components/schemas/ContentEncoding" + $ref: '#/components/schemas/ContentEncoding' required: false ContentLength: name: Content-Length @@ -2352,7 +1889,7 @@ components: description: | The size of the entity-body, in bytes, sent to InfluxDB. schema: - $ref: "#/components/schemas/ContentLength" + $ref: '#/components/schemas/ContentLength' ContentType: name: Content-Type description: | @@ -2396,20 +1933,20 @@ components: in: query required: false schema: - $ref: "#/components/schemas/AcceptPartial" + $ref: '#/components/schemas/AcceptPartial' compatibilityPrecisionParam: name: precision in: query - required: false + required: true schema: - $ref: "#/components/schemas/PrecisionWriteCompatibility" + $ref: '#/components/schemas/PrecisionWriteCompatibility' description: The precision for unix timestamps in the line protocol batch. precisionParam: name: precision in: query - required: false + required: true schema: - $ref: "#/components/schemas/PrecisionWrite" + $ref: '#/components/schemas/PrecisionWrite' description: The precision for unix timestamps in the line protocol batch. querySqlParam: name: q @@ -2425,24 +1962,22 @@ components: in: query required: false schema: - $ref: "#/components/schemas/Format" + $ref: '#/components/schemas/Format' formatRequired: name: format in: query required: true schema: - $ref: "#/components/schemas/Format" + $ref: '#/components/schemas/Format' v1UsernameParam: name: u in: query required: false schema: type: string - description: > + description: | Username for v1 compatibility authentication. - - When using Basic authentication or query string authentication, InfluxDB 3 ignores this parameter but allows any - arbitrary string for compatibility with InfluxDB 1.x clients. + When using Basic authentication or query string authentication, InfluxDB 3 ignores this parameter but allows any arbitrary string for compatibility with InfluxDB 1.x clients. v1PasswordParam: name: p in: query @@ -2451,7 +1986,7 @@ components: type: string description: | Password for v1 compatibility authentication. - For query string authentication, pass a database token with write permissions as this parameter. + For query string authentication, pass an admin token. InfluxDB 3 checks that the `p` value is an authorized token. requestBodies: lineProtocolRequestBody: @@ -2460,22 +1995,29 @@ components: text/plain: schema: type: string + description: | + Line protocol data. Each line represents a point with a measurement name, + optional tag set, field set, and optional timestamp. + + Format: `[,=...] =[,=...] []` examples: - line: - summary: Example line protocol - value: measurement,tag=value field=1 1234567890 - multiline: - summary: Example line protocol with UTF-8 characters + single-point: + summary: Write a single point + description: Write one point with tags and fields to a table. + value: cpu,host=server01 usage=85.2,load=0.75 1638360000000000000 + multiple-tables: + summary: Write to multiple tables + description: Write points to different tables (measurements) in a single request. value: | - measurement,tag=value field=1 1234567890 - measurement,tag=value field=2 1234567900 - measurement,tag=value field=3 1234568000 + cpu,host=server01,region=us-west usage=85.2,load=0.75 1638360000000000000 + memory,host=server01,region=us-west used=4096,free=12288 1638360000000000000 + disk,host=server01,region=us-west,device=/dev/sda1 used=50.5,free=49.5 1638360000000000000 queryRequestBody: required: true content: application/json: schema: - $ref: "#/components/schemas/QueryRequestObject" + $ref: '#/components/schemas/QueryRequestObject' schemas: AdminTokenObject: type: object @@ -2498,31 +2040,23 @@ components: name: _admin token: apiv3_00xx0Xx0xx00XX0x0 hash: 00xx0Xx0xx00XX0x0 - created_at: "2025-04-18T14:02:45.331Z" + created_at: '2025-04-18T14:02:45.331Z' expiry: null ContentEncoding: type: string enum: - gzip - identity - description: > + description: | Content coding. - Use `gzip` for compressed data or `identity` for unmodified, uncompressed data. - #### Multi-member gzip support - - InfluxDB 3 supports multi-member gzip payloads (concatenated gzip files per [RFC - 1952](https://www.rfc-editor.org/rfc/rfc1952)). - + InfluxDB 3 supports multi-member gzip payloads (concatenated gzip files per [RFC 1952](https://www.rfc-editor.org/rfc/rfc1952)). This allows you to: - - Concatenate multiple gzip files and send them in a single request - - Maintain compatibility with InfluxDB v1 and v2 write endpoints - - Simplify batch operations using standard compression tools default: identity LineProtocol: @@ -2536,6 +2070,8 @@ components: ContentLength: type: integer description: The length in decimal number of octets. + Database: + type: string AcceptPartial: type: boolean default: true @@ -2546,12 +2082,9 @@ components: - json - csv - parquet - - json_lines - jsonl - - pretty - description: |- + description: | The format of data in the response body. - `json_lines` is the canonical name; `jsonl` is accepted as an alias. NoSync: type: boolean default: false @@ -2567,14 +2100,11 @@ components: - ms - s - us - - u - ns - - "n" type: string - description: |- + description: | The precision for unix timestamps in the line protocol batch. - Use `ms` for milliseconds, `s` for seconds, `us` or `u` for microseconds, or `ns` or `n` for nanoseconds. - Optional — defaults to nanosecond precision if omitted. + Use `ms` for milliseconds, `s` for seconds, `us` for microseconds, or `ns` for nanoseconds. PrecisionWrite: enum: - auto @@ -2610,7 +2140,6 @@ components: - json - csv - parquet - - json_lines - jsonl - pretty params: @@ -2632,13 +2161,9 @@ components: properties: db: type: string - pattern: ^[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]$|^[a-zA-Z0-9]$ - description: |- - The database name. Database names cannot contain underscores (_). - Names must start and end with alphanumeric characters and can contain hyphens (-) in the middle. retention_period: type: string - description: |- + description: | The retention period for the database. Specifies how long data should be retained. Use duration format (for example, "1d", "1h", "30m", "7d"). example: 7d @@ -2673,12 +2198,6 @@ components: required: - name - type - retention_period: - type: string - description: |- - The retention period for the table. Specifies how long data in this table should be retained. - Use duration format (for example, "1d", "1h", "30m", "7d"). - example: 30d required: - db - table @@ -2773,93 +2292,56 @@ components: description: | Configuration for trigger error handling and execution behavior. allOf: - - $ref: "#/components/schemas/TriggerSettings" + - $ref: '#/components/schemas/TriggerSettings' trigger_specification: - description: > + type: string + description: | Specifies when and how the processing engine trigger should be invoked. - ## Supported trigger specifications: - ### Cron-based scheduling - Format: `cron:CRON_EXPRESSION` - Uses extended (6-field) cron format (second minute hour day_of_month month day_of_week): - ``` - ┌───────────── second (0-59) - │ ┌───────────── minute (0-59) - │ │ ┌───────────── hour (0-23) - │ │ │ ┌───────────── day of month (1-31) - │ │ │ │ ┌───────────── month (1-12) - │ │ │ │ │ ┌───────────── day of week (0-6, Sunday=0) - │ │ │ │ │ │ - * * * * * * - ``` - Examples: - - `cron:0 0 6 * * 1-5` - Every weekday at 6:00 AM - - `cron:0 30 14 * * 5` - Every Friday at 2:30 PM - - `cron:0 0 0 1 * *` - First day of every month at midnight - ### Interval-based scheduling - Format: `every:DURATION` - - Supported durations: `s` (seconds), `m` (minutes), `h` (hours), `d` (days), `w` (weeks), `M` (months), `y` - (years): - + Supported durations: `s` (seconds), `m` (minutes), `h` (hours), `d` (days), `w` (weeks), `M` (months), `y` (years): - `every:30s` - Every 30 seconds - - `every:5m` - Every 5 minutes - - `every:1h` - Every hour - - `every:1d` - Every day - - `every:1w` - Every week - - `every:1M` - Every month - - `every:1y` - Every year - **Maximum interval**: 1 year - ### Table-based triggers - - `all_tables` - Triggers on write events to any table in the database - - `table:TABLE_NAME` - Triggers on write events to a specific table - ### On-demand triggers - Format: `request:REQUEST_PATH` - Creates an HTTP endpoint `/api/v3/engine/REQUEST_PATH` for manual invocation: - - `request:hello-world` - Creates endpoint `/api/v3/engine/hello-world` - - `request:data-export` - Creates endpoint `/api/v3/engine/data-export` pattern: ^(cron:[0-9 *,/-]+|every:[0-9]+[smhd]|all_tables|table:[a-zA-Z_][a-zA-Z0-9_]*|request:[a-zA-Z0-9_-]+)$ example: cron:0 0 6 * * 1-5 @@ -2905,116 +2387,6 @@ components: required: - run_async - error_behavior - WALPluginTestRequest: - type: object - description: | - Request body for testing a write-ahead logging (WAL) plugin. - properties: - filename: - type: string - description: | - The path and filename of the plugin to test. - database: - type: string - description: | - The database name to use for the test. - input_lp: - type: string - description: | - Line protocol data to use as input for the test. - cache_name: - type: string - description: | - Optional name of the cache to use in the test. - input_arguments: - type: object - additionalProperties: - type: string - description: | - Optional key-value pairs of arguments to pass to the plugin. - required: - - filename - - database - - input_lp - SchedulePluginTestRequest: - type: object - description: | - Request body for testing a scheduling plugin. - properties: - filename: - type: string - description: | - The path and filename of the plugin to test. - database: - type: string - description: | - The database name to use for the test. - schedule: - type: string - description: | - Optional schedule specification in cron or interval format. - cache_name: - type: string - description: | - Optional name of the cache to use in the test. - input_arguments: - type: object - additionalProperties: - type: string - description: | - Optional key-value pairs of arguments to pass to the plugin. - required: - - filename - - database - PluginFileRequest: - type: object - description: | - Request body for updating a plugin file. - properties: - plugin_name: - type: string - description: | - The name of the plugin file to update. - content: - type: string - description: | - The content of the plugin file. - required: - - plugin_name - - content - PluginDirectoryRequest: - type: object - description: | - Request body for updating plugin directory with multiple files. - properties: - plugin_name: - type: string - description: | - The name of the plugin directory to update. - files: - type: array - items: - $ref: "#/components/schemas/PluginFileEntry" - description: | - List of plugin files to include in the directory. - required: - - plugin_name - - files - PluginFileEntry: - type: object - description: | - Represents a single file in a plugin directory. - properties: - content: - type: string - description: | - The content of the file. - relative_path: - type: string - description: The relative path of the file within the plugin directory. - required: - - relative_path - - content ShowDatabasesResponse: type: object properties: @@ -3037,7 +2409,7 @@ components: - time - value values: - - - "2024-02-02T12:00:00Z" + - - '2024-02-02T12:00:00Z' - 42 ErrorMessage: type: object @@ -3047,6 +2419,38 @@ components: data: type: object nullable: true + LineProtocolError: + properties: + code: + description: Code is the machine-readable error code. + enum: + - internal error + - not found + - conflict + - invalid + - empty value + - unavailable + readOnly: true + type: string + err: + description: Stack of errors that occurred during processing of the request. Useful for debugging. + readOnly: true + type: string + line: + description: First line in the request body that contains malformed data. + format: int32 + readOnly: true + type: integer + message: + description: Human-readable message. + readOnly: true + type: string + op: + description: Describes the logical code operation when the error occurred. Useful for debugging. + readOnly: true + type: string + required: + - code EpochCompatibility: description: | A unix timestamp precision. @@ -3075,13 +2479,62 @@ components: Use duration format (for example, "1d", "1h", "30m", "7d"). example: 7d description: Request schema for updating database configuration. + UpdateTableRequest: + type: object + properties: + db: + type: string + description: The name of the database containing the table. + table: + type: string + description: The name of the table to update. + retention_period: + type: string + description: | + The retention period for the table. Specifies how long data in this table should be retained. + Use duration format (for example, "1d", "1h", "30m", "7d"). + example: 30d + required: + - db + - table + description: Request schema for updating table configuration. + LicenseResponse: + type: object + properties: + license_type: + type: string + description: The type of license (for example, "enterprise", "trial"). + example: enterprise + expires_at: + type: string + format: date-time + description: The expiration date of the license in ISO 8601 format. + example: '2025-12-31T23:59:59Z' + features: + type: array + items: + type: string + description: List of features enabled by the license. + example: + - clustering + - processing_engine + - advanced_auth + status: + type: string + enum: + - active + - expired + - invalid + description: The current status of the license. + example: active + description: Response schema for license information. responses: Unauthorized: description: Unauthorized access. content: application/json: schema: - $ref: "#/components/schemas/ErrorMessage" + $ref: '#/components/schemas/ErrorMessage' BadRequest: description: | Request failed. Possible reasons: @@ -3092,19 +2545,19 @@ components: content: application/json: schema: - $ref: "#/components/schemas/ErrorMessage" + $ref: '#/components/schemas/ErrorMessage' Forbidden: description: Access denied. content: application/json: schema: - $ref: "#/components/schemas/ErrorMessage" + $ref: '#/components/schemas/ErrorMessage' NotFound: description: Resource not found. content: application/json: schema: - $ref: "#/components/schemas/ErrorMessage" + $ref: '#/components/schemas/ErrorMessage' headers: ClusterUUID: description: | @@ -3121,126 +2574,88 @@ components: BasicAuthentication: type: http scheme: basic - description: >- + description: | Use the `Authorization` header with the `Basic` scheme to authenticate v1 API requests. + Works with v1-compatible `/write` and `/query` endpoints. - Works with v1 compatibility [`/write`](#operation/PostV1Write) and [`/query`](#operation/GetV1Query) endpoints - in InfluxDB 3. - - - When authenticating requests, InfluxDB 3 checks that the `password` part of the decoded credential is an - authorized token - + When authenticating requests, InfluxDB 3 checks that the `password` part of the decoded credential is an authorized token and ignores the `username` part of the decoded credential. - - ### Syntax - - - ```http - - Authorization: Basic - - ``` - - ### Example - ```bash - curl "http://localhost:8181/write?db=DATABASE_NAME&precision=s" \ --user "":"AUTH_TOKEN" \ --header "Content-type: text/plain; charset=utf-8" \ --data-binary 'home,room=kitchen temp=72 1641024000' ``` - Replace the following: - - **`DATABASE_NAME`**: your InfluxDB 3 Core database + - **`AUTH_TOKEN`**: an admin token - - **`AUTH_TOKEN`**: an admin token or database token authorized for the database + #### Related guides + + - [Authenticate v1 API requests](/influxdb3/core/guides/api-compatibility/v1/) + - [Manage tokens](/influxdb3/core/admin/tokens/) QuerystringAuthentication: type: apiKey in: query name: u=&p= - description: >- + description: | Use InfluxDB 1.x API parameters to provide credentials through the query string for v1 API requests. - - Querystring authentication works with v1-compatible [`/write`](#operation/PostV1Write) and - [`/query`](#operation/GetV1Query) endpoints. - + Querystring authentication works with v1-compatible `/write` and `/query` endpoints. When authenticating requests, InfluxDB 3 checks that the `p` (_password_) query parameter is an authorized token - and ignores the `u` (_username_) query parameter. - ### Syntax - ```http - - https://localhost:8181/query/?[u=any]&p=AUTH_TOKEN - - https://localhost:8181/write/?[u=any]&p=AUTH_TOKEN - + http://localhost:8181/query/?[u=any]&p=DATABASE_TOKEN + http://localhost:8181/write/?[u=any]&p=DATABASE_TOKEN ``` - ### Examples - ```bash - curl "http://localhost:8181/write?db=DATABASE_NAME&precision=s&p=AUTH_TOKEN" \ --header "Content-type: text/plain; charset=utf-8" \ --data-binary 'home,room=kitchen temp=72 1641024000' ``` - Replace the following: - - **`DATABASE_NAME`**: your InfluxDB 3 Core database - - - **`AUTH_TOKEN`**: an admin token or database token authorized for the database - + - **`AUTH_TOKEN`**: an admin token ```bash - ####################################### - # Use an InfluxDB 1.x compatible username and password - # to query the InfluxDB v1 HTTP API - ####################################### - # Use authentication query parameters: - - # ?p=AUTH_TOKEN - + # ?p=DATABASE_TOKEN ####################################### - curl --get "http://localhost:8181/query" \ --data-urlencode "p=AUTH_TOKEN" \ --data-urlencode "db=DATABASE_NAME" \ --data-urlencode "q=SELECT * FROM MEASUREMENT" ``` - Replace the following: - - **`DATABASE_NAME`**: the database to query + - **`AUTH_TOKEN`**: an [admin token](/influxdb3/core/admin/tokens/) + + #### Related guides - - **`AUTH_TOKEN`**: a database token with sufficient permissions to the database + - [Authenticate v1 API requests](/influxdb3/core/guides/api-compatibility/v1/) + - [Manage tokens](/influxdb3/core/admin/tokens/) BearerAuthentication: type: http scheme: bearer @@ -3248,12 +2663,13 @@ components: description: | Use the OAuth Bearer authentication - scheme to provide an authorization token to InfluxDB 3. + scheme to provide an authorization token to InfluxDB 3 Core. Bearer authentication works with all endpoints. In your API requests, send an `Authorization` header. - For the header value, provide the word `Bearer` followed by a space and a database token. + For the header value, provide the word `Bearer` followed by a space and an admin token. + ### Syntax @@ -3268,10 +2684,10 @@ components: --header "Authorization: Bearer AUTH_TOKEN" ``` TokenAuthentication: - description: |- - Use InfluxDB v2 Token authentication to provide an authorization token to InfluxDB 3. + description: | + Use InfluxDB v2 Token authentication to provide an authorization token to InfluxDB 3 Core. - The v2 Token scheme works with v1 and v2 compatibility endpoints in InfluxDB 3. + The v2 Token scheme works with v1 and v2 compatibility endpoints in InfluxDB 3 Core. In your API requests, send an `Authorization` header. For the header value, provide the word `Token` followed by a space and a database token. @@ -3295,22 +2711,10 @@ components: --header "Authorization: Token AUTH_TOKEN" \ --data-binary 'home,room=kitchen temp=72 1463683075' ``` + + ### Related guides + + - [Manage tokens](/influxdb3/core/admin/tokens/) in: header name: Authorization type: apiKey -x-tagGroups: - - name: Using the InfluxDB HTTP API - tags: - - Quick start - - Authentication - - Cache data - - Common parameters - - Response codes - - Compatibility endpoints - - Database - - Processing engine - - Server information - - Table - - Token - - Query data - - Write data diff --git a/api-docs/influxdb3/enterprise/.config.yml b/api-docs/influxdb3/enterprise/.config.yml index d39bc413c8..4b8210b97c 100644 --- a/api-docs/influxdb3/enterprise/.config.yml +++ b/api-docs/influxdb3/enterprise/.config.yml @@ -7,7 +7,7 @@ x-influxdata-product-name: InfluxDB 3 Enterprise apis: v3@3: - root: v3/influxdb3-enterprise-openapi.yaml + root: v3/ref.yml x-influxdata-docs-aliases: - /influxdb3/enterprise/api/ - /influxdb3/enterprise/api/v1/ diff --git a/api-docs/influxdb3/enterprise/v3/content/info.yml b/api-docs/influxdb3/enterprise/v3/content/info.yml index cd2e5acdf3..e4ec8ef609 100644 --- a/api-docs/influxdb3/enterprise/v3/content/info.yml +++ b/api-docs/influxdb3/enterprise/v3/content/info.yml @@ -21,7 +21,10 @@ description: | - `/`: Compatibility endpoints for InfluxDB v1 workloads and clients - `/api/v2/write`: Compatibility endpoint for InfluxDB v2 workloads and clients - [Download the OpenAPI specification](/openapi/influxdb3-enterprise-openapi.yaml) + license: name: MIT url: 'https://opensource.org/licenses/MIT' diff --git a/api-docs/influxdb3/enterprise/v3/content/tag-groups.yml b/api-docs/influxdb3/enterprise/v3/content/tag-groups.yml deleted file mode 100644 index 364d5e7940..0000000000 --- a/api-docs/influxdb3/enterprise/v3/content/tag-groups.yml +++ /dev/null @@ -1,15 +0,0 @@ -- name: Using the InfluxDB HTTP API - tags: - - Quick start - - Authentication - - Cache data - - Common parameters - - Response codes - - Compatibility endpoints - - Database - - Processing engine - - Server information - - Table - - Token - - Query data - - Write data diff --git a/api-docs/influxdb3/enterprise/v3/influxdb3-enterprise-openapi.yaml b/api-docs/influxdb3/enterprise/v3/influxdb3-enterprise-openapi.yaml deleted file mode 100644 index 5ff481f9d0..0000000000 --- a/api-docs/influxdb3/enterprise/v3/influxdb3-enterprise-openapi.yaml +++ /dev/null @@ -1,3799 +0,0 @@ -openapi: 3.0.3 -info: - title: InfluxDB 3 Enterprise API Service - description: | - The InfluxDB HTTP API for InfluxDB 3 Enterprise provides a programmatic interface for - interacting with InfluxDB 3 Enterprise databases and resources. - Use this API to: - - - Write data to InfluxDB 3 Enterprise databases - - Query data using SQL or InfluxQL - - Process data using Processing engine plugins - - Manage databases, tables, and Processing engine triggers - - Perform administrative tasks and access system information - - The API includes endpoints under the following paths: - - `/api/v3`: InfluxDB 3 Enterprise native endpoints - - `/`: Compatibility endpoints for InfluxDB v1 workloads and clients - - `/api/v2/write`: Compatibility endpoint for InfluxDB v2 workloads and clients - - [Download the OpenAPI specification](/openapi/influxdb3-enterprise-openapi.yaml) - version: v3.8.0 - license: - name: MIT - url: https://opensource.org/licenses/MIT - contact: - name: InfluxData - url: https://www.influxdata.com - email: support@influxdata.com - x-source-hash: sha256:1259b96096eab6c8dbf3f76c974924f124e9b3e08eedc6b0c9a66d3108857c52 -servers: - - url: https://{baseurl} - description: InfluxDB 3 Enterprise API URL - variables: - baseurl: - enum: - - localhost:8181 - default: localhost:8181 - description: InfluxDB 3 Enterprise URL -security: - - BearerAuthentication: [] - - TokenAuthentication: [] - - BasicAuthentication: [] - - QuerystringAuthentication: [] -tags: - - name: Authentication - description: | - Depending on your workflow, use one of the following schemes to authenticate to the InfluxDB 3 API: - - | Authentication scheme | Works with | - |:-------------------|:-----------| - | [Bearer authentication](#section/Authentication/BearerAuthentication) | All endpoints | - | [Token authentication](#section/Authentication/TokenAuthentication) | v1, v2 endpoints | - | [Basic authentication](#section/Authentication/BasicAuthentication) | v1 endpoints | - | [Querystring authentication](#section/Authentication/QuerystringAuthentication) | v1 endpoints | - - x-traitTag: true - x-related: - - title: Authenticate v1 API requests - href: /influxdb3/enterprise/guides/api-compatibility/v1/ - - title: Manage tokens - href: /influxdb3/enterprise/admin/tokens/ - - name: Cache data - description: |- - Manage the in-memory cache. - - #### Distinct Value Cache - - The Distinct Value Cache (DVC) lets you cache distinct - values of one or more columns in a table, improving the performance of - queries that return distinct tag and field values. - - The DVC is an in-memory cache that stores distinct values for specific columns - in a table. When you create an DVC, you can specify what columns' distinct - values to cache, the maximum number of distinct value combinations to cache, and - the maximum age of cached values. A DVC is associated with a table, which can - have multiple DVCs. - - #### Last value cache - - The Last Value Cache (LVC) lets you cache the most recent - values for specific fields in a table, improving the performance of queries that - return the most recent value of a field for specific series or the last N values - of a field. - - The LVC is an in-memory cache that stores the last N number of values for - specific fields of series in a table. When you create an LVC, you can specify - what fields to cache, what tags to use to identify each series, and the - number of values to cache for each unique series. - An LVC is associated with a table, which can have multiple LVCs. - x-related: - - title: Manage the Distinct Value Cache - href: /influxdb3/enterprise/admin/distinct-value-cache/ - - title: Manage the Last Value Cache - href: /influxdb3/enterprise/admin/last-value-cache/ - - name: Compatibility endpoints - description: > - InfluxDB 3 provides compatibility endpoints for InfluxDB 1.x and InfluxDB 2.x workloads and clients. - - - ### Write data using v1- or v2-compatible endpoints - - - - [`/api/v2/write` endpoint](#operation/PostV2Write) - for InfluxDB v2 clients and when you bring existing InfluxDB v2 write workloads to InfluxDB 3. - - [`/write` endpoint](#operation/PostV1Write) for InfluxDB v1 clients and when you bring existing InfluxDB v1 - write workloads to InfluxDB 3. - - - For new workloads, use the [`/api/v3/write_lp` endpoint](#operation/PostWriteLP). - - - All endpoints accept the same line protocol format. - - - ### Query data - - - Use the HTTP [`/query`](#operation/GetV1ExecuteQuery) endpoint for InfluxDB v1 clients and v1 query workloads - using InfluxQL. - - - For new workloads, use one of the following: - - - - HTTP [`/api/v3/query_sql` endpoint](#operation/GetExecuteQuerySQL) for new query workloads using SQL. - - - HTTP [`/api/v3/query_influxql` endpoint](#operation/GetExecuteInfluxQLQuery) for new query workloads using - InfluxQL. - - - Flight SQL and InfluxDB 3 _Flight+gRPC_ APIs for querying with SQL or InfluxQL. For more information about using - Flight APIs, see [InfluxDB 3 client - libraries](https://github.com/InfluxCommunity?q=influxdb3&type=public&language=&sort=). - - - ### Server information - - - Server information endpoints such as `/health` and `metrics` are compatible with InfluxDB 1.x and InfluxDB 2.x - clients. - x-related: - - title: Use compatibility APIs to write data - href: /influxdb3/enterprise/write-data/http-api/compatibility-apis/ - - name: Database - description: Manage databases - - description: > - Most InfluxDB API endpoints require parameters in the request--for example, specifying the database to use. - - - ### Common parameters - - - The following table shows common parameters used by many InfluxDB API endpoints. - - Many endpoints may require other parameters in the query string or in the - - request body that perform functions specific to those endpoints. - - - | Query parameter | Value type | Description | - - |:------------------------ |:--------------------- |:-------------------------------------------| - - | `db` | string | The database name | - - - InfluxDB HTTP API endpoints use standard HTTP request and response headers. - - The following table shows common headers used by many InfluxDB API endpoints. - - Some endpoints may use other headers that perform functions more specific to those endpoints--for example, - - the write endpoints accept the `Content-Encoding` header to indicate that line protocol is compressed in the - request body. - - - | Header | Value type | Description | - - |:------------------------ |:--------------------- |:-------------------------------------------| - - | `Accept` | string | The content type that the client can understand. | - - | `Authorization` | string | The authorization scheme and credential. | - - | `Content-Length` | integer | The size of the entity-body, in bytes. | - - | `Content-Type` | string | The format of the data in the request body. | - name: Headers and parameters - x-traitTag: true - - name: Processing engine - description: > - Manage Processing engine triggers, test plugins, and send requests to trigger On Request plugins. - - - InfluxDB 3 Enterprise provides the InfluxDB 3 processing engine, an embedded Python VM that can dynamically load - and trigger Python plugins in response to events in your database. - - Use Processing engine plugins and triggers to run code and perform tasks for different database events. - - - To get started with the processing engine, see the [Processing engine and Python - plugins](/influxdb3/enterprise/processing-engine/) guide. - x-related: - - title: Processing engine and Python plugins - href: /influxdb3/enterprise/plugins/ - - name: Query data - description: Query data using SQL or InfluxQL - x-related: - - title: Use the InfluxDB v1 HTTP query API and InfluxQL to query data - href: /influxdb3/enterprise/query-data/execute-queries/influxdb-v1-api/ - - name: Quick start - description: > - 1. [Create an admin token](#section/Authentication) to authorize API requests. - - ```bash - curl -X POST "http://localhost:8181/api/v3/configure/token/admin" - ``` - 2. [Check the status](#section/Server-information) of the InfluxDB server. - - ```bash - curl "http://localhost:8181/health" \ - --header "Authorization: Bearer ADMIN_TOKEN" - ``` - - 3. [Write data](#operation/PostWriteLP) to InfluxDB. - - ```bash - curl "http://localhost:8181/api/v3/write_lp?db=sensors&precision=auto" - --header "Authorization: Bearer ADMIN_TOKEN" \ - --data-raw "home,room=Kitchen temp=72.0 - home,room=Living\ room temp=71.5" - ``` - - If all data is written, the response is `204 No Content`. - - 4. [Query data](#operation/GetExecuteQuerySQL) from InfluxDB. - - ```bash - curl -G "http://localhost:8181/api/v3/query_sql" \ - --header "Authorization: Bearer ADMIN_TOKEN" \ - --data-urlencode "db=sensors" \ - --data-urlencode "q=SELECT * FROM home WHERE room='Living room'" \ - --data-urlencode "format=jsonl" - ``` - - Output: - - ```jsonl - {"room":"Living room","temp":71.5,"time":"2025-02-25T20:19:34.984098"} - ``` - - For more information about using InfluxDB 3 Enterprise, see the [Get started](/influxdb3/enterprise/get-started/) - guide. - x-traitTag: true - - name: Server information - description: Retrieve server metrics, status, and version information - - name: Table - description: Manage table schemas and data - - name: Token - description: Manage tokens for authentication and authorization - - name: Write data - description: | - Write data to InfluxDB 3 using line protocol format. - - #### Timestamp precision across write APIs - - InfluxDB 3 provides multiple write endpoints for compatibility with different InfluxDB versions. - The following table compares timestamp precision support across v1, v2, and v3 write APIs: - - | Precision | v1 (`/write`) | v2 (`/api/v2/write`) | v3 (`/api/v3/write_lp`) | - |-----------|---------------|----------------------|-------------------------| - | **Auto detection** | ❌ No | ❌ No | ✅ `auto` (default) | - | **Seconds** | ✅ `s` | ✅ `s` | ✅ `second` | - | **Milliseconds** | ✅ `ms` | ✅ `ms` | ✅ `millisecond` | - | **Microseconds** | ✅ `u` or `µ` | ✅ `us` | ✅ `microsecond` | - | **Nanoseconds** | ✅ `ns` | ✅ `ns` | ✅ `nanosecond` | - | **Default** | Nanosecond | Nanosecond | **Auto** (guessed) | - - All timestamps are stored internally as nanoseconds. -paths: - /api/v1/health: - get: - operationId: GetHealthV1 - summary: Health check (v1) - description: | - Checks the status of the service. - - Returns `OK` if the service is running. This endpoint does not return version information. - Use the [`/ping`](#operation/GetPing) endpoint to retrieve version details. - - > **Note**: This endpoint requires authentication by default in InfluxDB 3 Enterprise. - responses: - "200": - description: Service is running. Returns `OK`. - content: - text/plain: - schema: - type: string - example: OK - "401": - description: Unauthorized. Authentication is required. - "500": - description: Service is unavailable. - tags: - - Server information - - Compatibility endpoints - /api/v2/write: - post: - operationId: PostV2Write - responses: - "204": - description: Success ("No Content"). All data in the batch is written and queryable. - headers: - cluster-uuid: - $ref: "#/components/headers/ClusterUUID" - "400": - description: Bad request. - "401": - $ref: "#/components/responses/Unauthorized" - "403": - description: Access denied. - "413": - description: Request entity too large. - summary: Write line protocol (v2-compatible) - description: > - Writes line protocol to the specified database. - - - This endpoint provides backward compatibility for InfluxDB 2.x write workloads using tools such as InfluxDB 2.x - client libraries, the Telegraf `outputs.influxdb_v2` output plugin, or third-party tools. - - - Use this endpoint to send data in [line protocol](/influxdb3/enterprise/reference/syntax/line-protocol/) format - to InfluxDB. - - Use query parameters to specify options for writing data. - - - #### Related - - - - [Use compatibility APIs to write data](/influxdb3/enterprise/write-data/http-api/compatibility-apis/) - parameters: - - name: Content-Type - in: header - description: | - The content type of the request payload. - schema: - $ref: "#/components/schemas/LineProtocol" - required: false - - description: | - The compression applied to the line protocol in the request payload. - To send a gzip payload, pass `Content-Encoding: gzip` header. - in: header - name: Content-Encoding - schema: - default: identity - description: | - Content coding. - Use `gzip` for compressed data or `identity` for unmodified, uncompressed data. - enum: - - gzip - - identity - type: string - - description: | - The size of the entity-body, in bytes, sent to InfluxDB. - in: header - name: Content-Length - schema: - description: The length in decimal number of octets. - type: integer - - description: | - The content type that the client can understand. - Writes only return a response body if they fail (partially or completely)--for example, - due to a syntax problem or type mismatch. - in: header - name: Accept - schema: - default: application/json - description: Error content type. - enum: - - application/json - type: string - - name: bucket - in: query - required: true - schema: - type: string - description: |- - A database name. - InfluxDB creates the database if it doesn't already exist, and then - writes all points in the batch to the database. - - This parameter is named `bucket` for compatibility with InfluxDB v2 client libraries. - - name: accept_partial - in: query - required: false - schema: - $ref: "#/components/schemas/AcceptPartial" - - $ref: "#/components/parameters/compatibilityPrecisionParam" - requestBody: - $ref: "#/components/requestBodies/lineProtocolRequestBody" - tags: - - Compatibility endpoints - - Write data - /api/v3/configure/database: - delete: - operationId: DeleteConfigureDatabase - parameters: - - $ref: "#/components/parameters/db" - - name: data_only - in: query - required: false - schema: - type: boolean - default: false - description: | - Delete only data while preserving the database schema and all associated resources - (tokens, triggers, last value caches, distinct value caches, processing engine configurations). - When `false` (default), the entire database is deleted. - - name: remove_tables - in: query - required: false - schema: - type: boolean - default: false - description: | - Used with `data_only=true` to remove table resources (caches) while preserving - database-level resources (tokens, triggers, processing engine configurations). - Has no effect when `data_only=false`. - - name: hard_delete_at - in: query - required: false - schema: - type: string - format: date-time - description: |- - Schedule the database for hard deletion at the specified time. - If not provided, the database will be soft deleted. - Use ISO 8601 date-time format (for example, "2025-12-31T23:59:59Z"). - - #### Deleting a database cannot be undone - - Deleting a database is a destructive action. - Once a database is deleted, data stored in that database cannot be recovered. - - - Also accepts special string values: - - `now` — hard delete immediately - - `never` — soft delete only (default behavior) - - `default` — use the system default hard deletion time - responses: - "200": - description: Success. Database deleted. - "401": - $ref: "#/components/responses/Unauthorized" - "404": - description: Database not found. - summary: Delete a database - description: | - Soft deletes a database. - The database is scheduled for deletion and unavailable for querying. - Use the `hard_delete_at` parameter to schedule a hard deletion. - Use the `data_only` parameter to delete data while preserving the database schema and resources. - tags: - - Database - get: - operationId: GetConfigureDatabase - responses: - "200": - description: Success. The response body contains the list of databases. - content: - application/json: - schema: - $ref: "#/components/schemas/ShowDatabasesResponse" - "400": - description: Bad request. - "401": - $ref: "#/components/responses/Unauthorized" - "404": - description: Database not found. - summary: List databases - description: Retrieves a list of databases. - parameters: - - $ref: "#/components/parameters/formatRequired" - - name: show_deleted - in: query - required: false - schema: - type: boolean - default: false - description: | - Include soft-deleted databases in the response. - By default, only active databases are returned. - tags: - - Database - post: - operationId: PostConfigureDatabase - responses: - "200": - description: Success. Database created. - "400": - description: Bad request. - "401": - $ref: "#/components/responses/Unauthorized" - "409": - description: Database already exists. - summary: Create a database - description: Creates a new database in the system. - requestBody: - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/CreateDatabaseRequest" - tags: - - Database - put: - operationId: update_database - responses: - "200": - description: Success. The database has been updated. - "400": - description: Bad request. - "401": - $ref: "#/components/responses/Unauthorized" - "404": - description: Database not found. - summary: Update a database - description: | - Updates database configuration, such as retention period. - requestBody: - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/UpdateDatabaseRequest" - tags: - - Database - /api/v3/configure/database/retention_period: - delete: - operationId: DeleteDatabaseRetentionPeriod - summary: Remove database retention period - description: | - Removes the retention period from a database, setting it to infinite retention. - parameters: - - $ref: "#/components/parameters/db" - responses: - "204": - description: Success. The database retention period has been removed. - "401": - $ref: "#/components/responses/Unauthorized" - "404": - description: Database not found. - tags: - - Database - /api/v3/configure/distinct_cache: - delete: - operationId: DeleteConfigureDistinctCache - responses: - "200": - description: Success. The distinct cache has been deleted. - "400": - description: Bad request. - "401": - $ref: "#/components/responses/Unauthorized" - "404": - description: Cache not found. - summary: Delete distinct cache - description: Deletes a distinct cache. - parameters: - - $ref: "#/components/parameters/db" - - name: table - in: query - required: true - schema: - type: string - description: The name of the table containing the distinct cache. - - name: name - in: query - required: true - schema: - type: string - description: The name of the distinct cache to delete. - tags: - - Cache data - - Table - post: - operationId: PostConfigureDistinctCache - responses: - "201": - description: Success. The distinct cache has been created. - "400": - description: > - Bad request. - - - The server responds with status `400` if the request would overwrite an existing cache with a different - configuration. - "409": - description: Conflict. A distinct cache with this configuration already exists. - summary: Create distinct cache - description: Creates a distinct cache for a table. - requestBody: - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/DistinctCacheCreateRequest" - tags: - - Cache data - - Table - /api/v3/configure/last_cache: - delete: - operationId: DeleteConfigureLastCache - responses: - "200": - description: Success. The last cache has been deleted. - "400": - description: Bad request. - "401": - $ref: "#/components/responses/Unauthorized" - "404": - description: Cache not found. - summary: Delete last cache - description: Deletes a last cache. - parameters: - - $ref: "#/components/parameters/db" - - name: table - in: query - required: true - schema: - type: string - description: The name of the table containing the last cache. - - name: name - in: query - required: true - schema: - type: string - description: The name of the last cache to delete. - tags: - - Cache data - - Table - post: - operationId: PostConfigureLastCache - responses: - "201": - description: Success. Last cache created. - "400": - description: Bad request. A cache with this name already exists or the request is malformed. - "401": - $ref: "#/components/responses/Unauthorized" - "404": - description: Cache not found. - summary: Create last cache - description: Creates a last cache for a table. - requestBody: - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/LastCacheCreateRequest" - tags: - - Cache data - - Table - /api/v3/configure/plugin_environment/install_packages: - post: - operationId: PostInstallPluginPackages - summary: Install plugin packages - description: |- - Installs the specified Python packages into the processing engine plugin environment. - - This endpoint is synchronous and blocks until the packages are installed. - parameters: - - $ref: "#/components/parameters/ContentType" - requestBody: - required: true - content: - application/json: - schema: - type: object - properties: - packages: - type: array - items: - type: string - description: | - A list of Python package names to install. - Can include version specifiers (e.g., "scipy==1.9.0"). - example: - - influxdb3-python - - scipy - - pandas==1.5.0 - - requests - required: - - packages - example: - packages: - - influxdb3-python - - scipy - - pandas==1.5.0 - - requests - responses: - "200": - description: Success. The packages are installed. - "400": - description: Bad request. - "401": - $ref: "#/components/responses/Unauthorized" - tags: - - Processing engine - /api/v3/configure/plugin_environment/install_requirements: - post: - operationId: PostInstallPluginRequirements - summary: Install plugin requirements - description: > - Installs requirements from a requirements file (also known as a "pip requirements file") into the processing - engine plugin environment. - - - This endpoint is synchronous and blocks until the requirements are installed. - - - ### Related - - - - [Processing engine and Python plugins](/influxdb3/enterprise/plugins/) - - - [Python requirements file format](https://pip.pypa.io/en/stable/reference/requirements-file-format/) - parameters: - - $ref: "#/components/parameters/ContentType" - requestBody: - required: true - content: - application/json: - schema: - type: object - properties: - requirements_location: - type: string - description: | - The path to the requirements file containing Python packages to install. - Can be a relative path (relative to the plugin directory) or an absolute path. - example: requirements.txt - required: - - requirements_location - example: - requirements_location: requirements.txt - responses: - "200": - description: Success. The requirements have been installed. - "400": - description: Bad request. - "401": - $ref: "#/components/responses/Unauthorized" - tags: - - Processing engine - /api/v3/configure/processing_engine_trigger: - post: - operationId: PostConfigureProcessingEngineTrigger - summary: Create processing engine trigger - description: Creates a processing engine trigger with the specified plugin file and trigger specification. - requestBody: - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/ProcessingEngineTriggerRequest" - examples: - schedule_cron: - summary: Schedule trigger using cron - description: > - In `"cron:CRON_EXPRESSION"`, `CRON_EXPRESSION` uses extended 6-field cron format. - - The cron expression `0 0 6 * * 1-5` means the trigger will run at 6:00 AM every weekday (Monday to - Friday). - value: - db: DATABASE_NAME - plugin_filename: schedule.py - trigger_name: schedule_cron_trigger - trigger_specification: cron:0 0 6 * * 1-5 - disabled: false - trigger_settings: - run_async: false - error_behavior: Log - schedule_every: - summary: Schedule trigger using interval - description: | - In `"every:DURATION"`, `DURATION` specifies the interval between trigger executions. - The duration `1h` means the trigger will run every hour. - value: - db: mydb - plugin_filename: schedule.py - trigger_name: schedule_every_trigger - trigger_specification: every:1h - disabled: false - trigger_settings: - run_async: false - error_behavior: Log - schedule_every_seconds: - summary: Schedule trigger using seconds interval - description: | - Example of scheduling a trigger to run every 30 seconds. - value: - db: mydb - plugin_filename: schedule.py - trigger_name: schedule_every_30s_trigger - trigger_specification: every:30s - disabled: false - trigger_settings: - run_async: false - error_behavior: Log - schedule_every_minutes: - summary: Schedule trigger using minutes interval - description: | - Example of scheduling a trigger to run every 5 minutes. - value: - db: mydb - plugin_filename: schedule.py - trigger_name: schedule_every_5m_trigger - trigger_specification: every:5m - disabled: false - trigger_settings: - run_async: false - error_behavior: Log - all_tables: - summary: All tables trigger example - description: | - Trigger that fires on write events to any table in the database. - value: - db: mydb - plugin_filename: all_tables.py - trigger_name: all_tables_trigger - trigger_specification: all_tables - disabled: false - trigger_settings: - run_async: false - error_behavior: Log - table_specific: - summary: Table-specific trigger example - description: | - Trigger that fires on write events to a specific table. - value: - db: mydb - plugin_filename: table.py - trigger_name: table_trigger - trigger_specification: table:sensors - disabled: false - trigger_settings: - run_async: false - error_behavior: Log - api_request: - summary: On-demand request trigger example - description: | - Creates an HTTP endpoint `/api/v3/engine/hello-world` for manual invocation. - value: - db: mydb - plugin_filename: request.py - trigger_name: hello_world_trigger - trigger_specification: request:hello-world - disabled: false - trigger_settings: - run_async: false - error_behavior: Log - cron_friday_afternoon: - summary: Cron trigger for Friday afternoons - description: | - Example of a cron trigger that runs every Friday at 2:30 PM. - value: - db: reports - plugin_filename: weekly_report.py - trigger_name: friday_report_trigger - trigger_specification: cron:0 30 14 * * 5 - disabled: false - trigger_settings: - run_async: false - error_behavior: Log - cron_monthly: - summary: Cron trigger for monthly execution - description: | - Example of a cron trigger that runs on the first day of every month at midnight. - value: - db: monthly_data - plugin_filename: monthly_cleanup.py - trigger_name: monthly_cleanup_trigger - trigger_specification: cron:0 0 0 1 * * - disabled: false - trigger_settings: - run_async: false - error_behavior: Log - responses: - "200": - description: Success. Processing engine trigger created. - "400": - description: Bad request. - "401": - $ref: "#/components/responses/Unauthorized" - "404": - description: Trigger not found. - tags: - - Processing engine - delete: - operationId: DeleteConfigureProcessingEngineTrigger - summary: Delete processing engine trigger - description: Deletes a processing engine trigger. - parameters: - - $ref: "#/components/parameters/db" - - name: trigger_name - in: query - required: true - schema: - type: string - - name: force - in: query - required: false - schema: - type: boolean - default: false - description: | - Force deletion of the trigger even if it has active executions. - By default, deletion fails if the trigger is currently executing. - responses: - "200": - description: Success. The processing engine trigger has been deleted. - "400": - description: Bad request. - "401": - $ref: "#/components/responses/Unauthorized" - "404": - description: Trigger not found. - tags: - - Processing engine - /api/v3/configure/processing_engine_trigger/disable: - post: - operationId: PostDisableProcessingEngineTrigger - summary: Disable processing engine trigger - description: Disables a processing engine trigger. - parameters: - - name: db - in: query - required: true - schema: - type: string - description: The database name. - - name: trigger_name - in: query - required: true - schema: - type: string - description: The name of the trigger. - responses: - "200": - description: Success. The processing engine trigger has been disabled. - "400": - description: Bad request. - "401": - $ref: "#/components/responses/Unauthorized" - "404": - description: Trigger not found. - tags: - - Processing engine - /api/v3/configure/processing_engine_trigger/enable: - post: - operationId: PostEnableProcessingEngineTrigger - summary: Enable processing engine trigger - description: Enables a processing engine trigger. - parameters: - - name: db - in: query - required: true - schema: - type: string - description: The database name. - - name: trigger_name - in: query - required: true - schema: - type: string - description: The name of the trigger. - responses: - "200": - description: Success. The processing engine trigger is enabled. - "400": - description: Bad request. - "401": - $ref: "#/components/responses/Unauthorized" - "404": - description: Trigger not found. - tags: - - Processing engine - /api/v3/configure/table: - delete: - operationId: DeleteConfigureTable - parameters: - - $ref: "#/components/parameters/db" - - name: table - in: query - required: true - schema: - type: string - - name: data_only - in: query - required: false - schema: - type: boolean - default: false - description: | - Delete only data while preserving the table schema and all associated resources - (last value caches, distinct value caches). - When `false` (default), the entire table is deleted. - - name: hard_delete_at - in: query - required: false - schema: - type: string - format: date-time - description: |- - Schedule the table for hard deletion at the specified time. - If not provided, the table will be soft deleted. - Use ISO 8601 format (for example, "2025-12-31T23:59:59Z"). - - - Also accepts special string values: - - `now` — hard delete immediately - - `never` — soft delete only (default behavior) - - `default` — use the system default hard deletion time - responses: - "200": - description: Success (no content). The table has been deleted. - "401": - $ref: "#/components/responses/Unauthorized" - "404": - description: Table not found. - summary: Delete a table - description: | - Soft deletes a table. - The table is scheduled for deletion and unavailable for querying. - Use the `hard_delete_at` parameter to schedule a hard deletion. - Use the `data_only` parameter to delete data while preserving the table schema and resources. - - #### Deleting a table cannot be undone - - Deleting a table is a destructive action. - Once a table is deleted, data stored in that table cannot be recovered. - tags: - - Table - post: - operationId: PostConfigureTable - responses: - "200": - description: Success. The table has been created. - "400": - description: Bad request. - "401": - $ref: "#/components/responses/Unauthorized" - "404": - description: Database not found. - summary: Create a table - description: Creates a new table within a database. - requestBody: - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/CreateTableRequest" - tags: - - Table - put: - operationId: PatchConfigureTable - responses: - "200": - description: Success. The table has been updated. - "400": - description: Bad request. - "401": - $ref: "#/components/responses/Unauthorized" - "404": - description: Table not found. - summary: Update a table - description: | - Updates table configuration, such as retention period. - requestBody: - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/UpdateTableRequest" - tags: - - Table - x-enterprise-only: true - /api/v3/configure/token: - delete: - operationId: DeleteToken - parameters: - - name: token_name - in: query - required: true - schema: - type: string - description: The name of the token to delete. - responses: - "200": - description: Success. The token has been deleted. - "401": - $ref: "#/components/responses/Unauthorized" - "404": - description: Token not found. - summary: Delete token - description: | - Deletes a token. - tags: - - Authentication - - Token - /api/v3/configure/token/admin: - post: - operationId: PostCreateAdminToken - responses: - "201": - description: | - Success. The admin token has been created. - The response body contains the token string and metadata. - content: - application/json: - schema: - $ref: "#/components/schemas/AdminTokenObject" - "401": - $ref: "#/components/responses/Unauthorized" - summary: Create admin token - description: | - Creates an admin token. - An admin token is a special type of token that has full access to all resources in the system. - tags: - - Authentication - - Token - /api/v3/configure/token/admin/regenerate: - post: - operationId: PostRegenerateAdminToken - summary: Regenerate admin token - description: | - Regenerates an admin token and revokes the previous token with the same name. - parameters: [] - responses: - "201": - description: Success. The admin token has been regenerated. - content: - application/json: - schema: - $ref: "#/components/schemas/AdminTokenObject" - "401": - $ref: "#/components/responses/Unauthorized" - tags: - - Authentication - - Token - /api/v3/configure/token/named_admin: - post: - operationId: PostCreateNamedAdminToken - responses: - "201": - description: | - Success. The named admin token has been created. - The response body contains the token string and metadata. - content: - application/json: - schema: - $ref: "#/components/schemas/AdminTokenObject" - "401": - $ref: "#/components/responses/Unauthorized" - "409": - description: A token with this name already exists. - summary: Create named admin token - description: | - Creates a named admin token. - A named admin token is a special type of admin token with a custom name for identification and management. - tags: - - Authentication - - Token - requestBody: - required: true - content: - application/json: - schema: - type: object - properties: - token_name: - type: string - description: The name for the admin token. - expiry_secs: - type: integer - description: Optional expiration time in seconds. If not provided, the token does not expire. - nullable: true - required: - - token_name - /api/v3/engine/{request_path}: - get: - operationId: GetProcessingEnginePluginRequest - responses: - "200": - description: Success. The plugin request has been executed. - "400": - description: Malformed request. - "401": - $ref: "#/components/responses/Unauthorized" - "404": - description: Plugin not found. - "500": - description: Processing failure. - summary: On Request processing engine plugin request - description: > - Executes the On Request processing engine plugin specified in the trigger's `plugin_filename`. - - The request can include request headers, query string parameters, and a request body, which InfluxDB passes to - the plugin. - - - An On Request plugin implements the following signature: - - - ```python - - def process_request(influxdb3_local, query_parameters, request_headers, request_body, args=None) - - ``` - - - The response depends on the plugin implementation. - tags: - - Processing engine - post: - operationId: PostProcessingEnginePluginRequest - responses: - "200": - description: Success. The plugin request has been executed. - "400": - description: Malformed request. - "401": - $ref: "#/components/responses/Unauthorized" - "404": - description: Plugin not found. - "500": - description: Processing failure. - summary: On Request processing engine plugin request - description: > - Executes the On Request processing engine plugin specified in the trigger's `plugin_filename`. - - The request can include request headers, query string parameters, and a request body, which InfluxDB passes to - the plugin. - - - An On Request plugin implements the following signature: - - - ```python - - def process_request(influxdb3_local, query_parameters, request_headers, request_body, args=None) - - ``` - - - The response depends on the plugin implementation. - parameters: - - $ref: "#/components/parameters/ContentType" - requestBody: - required: false - content: - application/json: - schema: - type: object - additionalProperties: true - tags: - - Processing engine - parameters: - - name: request_path - description: | - The path configured in the request trigger specification for the plugin. - - For example, if you define a trigger with the following: - - ```json - trigger_specification: "request:hello-world" - ``` - - then, the HTTP API exposes the following plugin endpoint: - - ``` - /api/v3/engine/hello-world - ``` - in: path - required: true - schema: - type: string - /api/v3/enterprise/configure/file_index: - post: - operationId: configure_file_index_create - summary: Create a file index - description: >- - Creates a file index for a database or table. - - - A file index improves query performance by indexing data files based on specified columns, enabling the query - engine to skip irrelevant files during query execution. - - - This endpoint is only available in InfluxDB 3 Enterprise. - x-enterprise-only: true - requestBody: - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/FileIndexCreateRequest" - responses: - "200": - description: Success. The file index has been created. - "400": - description: Bad request. - "401": - $ref: "#/components/responses/Unauthorized" - "404": - description: Database or table not found. - tags: - - Database - - Table - delete: - operationId: configure_file_index_delete - summary: Delete a file index - description: |- - Deletes a file index from a database or table. - - This endpoint is only available in InfluxDB 3 Enterprise. - x-enterprise-only: true - requestBody: - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/FileIndexDeleteRequest" - responses: - "200": - description: Success. The file index has been deleted. - "400": - description: Bad request. - "401": - $ref: "#/components/responses/Unauthorized" - "404": - description: Database, table, or file index not found. - tags: - - Database - - Table - /api/v3/enterprise/configure/node/stop: - post: - operationId: stop_node - summary: Mark a node as stopped - description: >- - Marks a node as stopped in the catalog, freeing up the licensed cores it was using for other nodes. - - - Use this endpoint after you have already stopped the physical instance (for example, using `kill` or stopping - the container). This endpoint does not shut down the running process — you must stop the instance first. - - - When the node is marked as stopped: - - 1. Licensed cores from the stopped node are freed for reuse - - 2. Other nodes in the cluster see the update after their catalog sync interval - - - This endpoint is only available in InfluxDB 3 Enterprise. - - - #### Related - - - - [influxdb3 stop node](/influxdb3/enterprise/reference/cli/influxdb3/stop/node/) - x-enterprise-only: true - requestBody: - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/StopNodeRequest" - responses: - "200": - description: Success. The node has been marked as stopped. - "401": - $ref: "#/components/responses/Unauthorized" - "404": - description: Node not found. - tags: - - Server information - /api/v3/enterprise/configure/table/retention_period: - post: - operationId: create_or_update_retention_period_for_table - summary: Set table retention period - description: >- - Sets or updates the retention period for a specific table. - - - Use this endpoint to control how long data in a table is retained independently of the database-level retention - period. - - - This endpoint is only available in InfluxDB 3 Enterprise. - - - #### Related - - - - [influxdb3 update table](/influxdb3/enterprise/reference/cli/influxdb3/update/table/) - x-enterprise-only: true - parameters: - - name: db - in: query - required: true - schema: - type: string - description: The database name. - - name: table - in: query - required: true - schema: - type: string - description: The table name. - - name: duration - in: query - required: true - schema: - type: string - description: The retention period as a human-readable duration (for example, "30d", "24h", "1y"). - responses: - "204": - description: Success. The table retention period has been set. - "400": - description: Bad request. Invalid duration format. - "401": - $ref: "#/components/responses/Unauthorized" - "404": - description: Database or table not found. - tags: - - Table - delete: - operationId: delete_retention_period_for_table - summary: Clear table retention period - description: >- - Removes the retention period from a specific table, reverting to the database-level retention period (or - infinite retention if no database-level retention is set). - - - This endpoint is only available in InfluxDB 3 Enterprise. - - - #### Related - - - - [influxdb3 update table](/influxdb3/enterprise/reference/cli/influxdb3/update/table/) - x-enterprise-only: true - parameters: - - name: db - in: query - required: true - schema: - type: string - description: The database name. - - name: table - in: query - required: true - schema: - type: string - description: The table name. - responses: - "204": - description: Success. The table retention period has been cleared. - "401": - $ref: "#/components/responses/Unauthorized" - "404": - description: Database or table not found. - tags: - - Table - /api/v3/enterprise/configure/token: - post: - operationId: PostCreateResourceToken - summary: Create a resource token - description: | - Creates a resource (fine-grained permissions) token. - A resource token is a token that has access to specific resources in the system. - - This endpoint is only available in InfluxDB 3 Enterprise. - responses: - "201": - description: | - Success. The resource token has been created. - The response body contains the token string and metadata. - content: - application/json: - schema: - $ref: "#/components/schemas/ResourceTokenObject" - "401": - $ref: "#/components/responses/Unauthorized" - tags: - - Authentication - - Token - x-enterprise-only: true - requestBody: - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/CreateTokenWithPermissionsRequest" - /api/v3/plugin_test/schedule: - post: - operationId: PostTestSchedulingPlugin - responses: - "200": - description: Success. The plugin test has been executed. - "400": - description: Bad request. - "401": - $ref: "#/components/responses/Unauthorized" - "404": - description: Plugin not enabled. - summary: Test scheduling plugin - description: Executes a test of a scheduling plugin. - requestBody: - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/SchedulePluginTestRequest" - tags: - - Processing engine - /api/v3/plugin_test/wal: - post: - operationId: PostTestWALPlugin - responses: - "200": - description: Success. The plugin test has been executed. - "400": - description: Bad request. - "401": - $ref: "#/components/responses/Unauthorized" - "404": - description: Plugin not enabled. - summary: Test WAL plugin - description: Executes a test of a write-ahead logging (WAL) plugin. - requestBody: - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/WALPluginTestRequest" - tags: - - Processing engine - /api/v3/plugins/directory: - put: - operationId: PutPluginDirectory - requestBody: - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/PluginDirectoryRequest" - responses: - "200": - description: Success. The plugin directory has been updated. - "401": - $ref: "#/components/responses/Unauthorized" - "403": - description: Forbidden. Admin token required. - "500": - description: Plugin not found. The `plugin_name` does not match any registered trigger. - summary: Update a multi-file plugin directory - description: | - Replaces all files in a multi-file plugin directory. The - `plugin_name` must match a registered trigger name. Each entry in - the `files` array specifies a `relative_path` and `content`—the - server writes them into the trigger's plugin directory. - - Use this endpoint to update multi-file plugins (directories with - `__init__.py` and supporting modules). For single-file plugins, - use `PUT /api/v3/plugins/files` instead. - tags: - - Processing engine - x-security-note: Requires an admin token - /api/v3/plugins/files: - post: - operationId: create_plugin_file - summary: Create a plugin file - description: | - Creates a single plugin file in the plugin directory. Writes the - `content` to a file named after `plugin_name`. Does not require an - existing trigger—use this to upload plugin files before creating - triggers that reference them. - requestBody: - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/PluginFileRequest" - responses: - "200": - description: Success. The plugin file has been created. - "401": - $ref: "#/components/responses/Unauthorized" - "403": - description: Forbidden. Admin token required. - tags: - - Processing engine - x-security-note: Requires an admin token - put: - operationId: PutPluginFile - summary: Update a plugin file - description: | - Updates a single plugin file for an existing trigger. The - `plugin_name` must match a registered trigger name—the server - resolves the trigger's `plugin_filename` and overwrites that file - with the provided `content`. - - To upload a new plugin file before creating a trigger, use - `POST /api/v3/plugins/files` instead. To update a multi-file - plugin directory, use `PUT /api/v3/plugins/directory`. - requestBody: - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/PluginFileRequest" - responses: - "200": - description: Success. The plugin file has been updated. - "401": - $ref: "#/components/responses/Unauthorized" - "403": - description: Forbidden. Admin token required. - "500": - description: Plugin not found. The `plugin_name` does not match any registered trigger. - tags: - - Processing engine - x-security-note: Requires an admin token - /api/v3/query_influxql: - get: - operationId: GetExecuteInfluxQLQuery - responses: - "200": - description: Success. The response body contains query results. - content: - application/json: - schema: - $ref: "#/components/schemas/QueryResponse" - text/csv: - schema: - type: string - application/vnd.apache.parquet: - schema: - type: string - application/jsonl: - schema: - type: string - "400": - description: Bad request. - "401": - $ref: "#/components/responses/Unauthorized" - "403": - description: Access denied. - "404": - description: Database not found. - "405": - description: Method not allowed. - "422": - description: Unprocessable entity. - summary: Execute InfluxQL query - description: Executes an InfluxQL query to retrieve data from the specified database. - parameters: - - $ref: "#/components/parameters/dbQueryParam" - - name: q - in: query - required: true - schema: - type: string - - name: format - in: query - required: false - schema: - type: string - - $ref: "#/components/parameters/AcceptQueryHeader" - - name: params - in: query - required: false - schema: - type: string - description: JSON-encoded query parameters. Use this to pass bind parameters to parameterized queries. - description: JSON-encoded query parameters for parameterized queries. - tags: - - Query data - post: - operationId: PostExecuteQueryInfluxQL - responses: - "200": - description: Success. The response body contains query results. - content: - application/json: - schema: - $ref: "#/components/schemas/QueryResponse" - text/csv: - schema: - type: string - application/vnd.apache.parquet: - schema: - type: string - application/jsonl: - schema: - type: string - "400": - description: Bad request. - "401": - $ref: "#/components/responses/Unauthorized" - "403": - description: Access denied. - "404": - description: Database not found. - "405": - description: Method not allowed. - "422": - description: Unprocessable entity. - summary: Execute InfluxQL query - description: Executes an InfluxQL query to retrieve data from the specified database. - parameters: - - $ref: "#/components/parameters/AcceptQueryHeader" - - $ref: "#/components/parameters/ContentType" - requestBody: - $ref: "#/components/requestBodies/queryRequestBody" - tags: - - Query data - /api/v3/query_sql: - get: - operationId: GetExecuteQuerySQL - responses: - "200": - description: Success. The response body contains query results. - content: - application/json: - schema: - $ref: "#/components/schemas/QueryResponse" - example: - results: - - series: - - name: mytable - columns: - - time - - value - values: - - - "2024-02-02T12:00:00Z" - - 42 - text/csv: - schema: - type: string - application/vnd.apache.parquet: - schema: - type: string - application/jsonl: - schema: - type: string - "400": - description: Bad request. - "401": - $ref: "#/components/responses/Unauthorized" - "403": - description: Access denied. - "404": - description: Database not found. - "405": - description: Method not allowed. - "422": - description: Unprocessable entity. - summary: Execute SQL query - description: Executes an SQL query to retrieve data from the specified database. - parameters: - - $ref: "#/components/parameters/db" - - $ref: "#/components/parameters/querySqlParam" - - $ref: "#/components/parameters/format" - - $ref: "#/components/parameters/AcceptQueryHeader" - - $ref: "#/components/parameters/ContentType" - - name: params - in: query - required: false - schema: - type: string - description: JSON-encoded query parameters. Use this to pass bind parameters to parameterized queries. - description: JSON-encoded query parameters for parameterized queries. - tags: - - Query data - post: - operationId: PostExecuteQuerySQL - responses: - "200": - description: Success. The response body contains query results. - content: - application/json: - schema: - $ref: "#/components/schemas/QueryResponse" - text/csv: - schema: - type: string - application/vnd.apache.parquet: - schema: - type: string - application/jsonl: - schema: - type: string - "400": - description: Bad request. - "401": - $ref: "#/components/responses/Unauthorized" - "403": - description: Access denied. - "404": - description: Database not found. - "405": - description: Method not allowed. - "422": - description: Unprocessable entity. - summary: Execute SQL query - description: Executes an SQL query to retrieve data from the specified database. - parameters: - - $ref: "#/components/parameters/AcceptQueryHeader" - - $ref: "#/components/parameters/ContentType" - requestBody: - $ref: "#/components/requestBodies/queryRequestBody" - tags: - - Query data - /api/v3/write_lp: - post: - operationId: PostWriteLP - parameters: - - $ref: "#/components/parameters/dbWriteParam" - - $ref: "#/components/parameters/accept_partial" - - $ref: "#/components/parameters/precisionParam" - - name: no_sync - in: query - schema: - $ref: "#/components/schemas/NoSync" - - name: Content-Type - in: header - description: | - The content type of the request payload. - schema: - $ref: "#/components/schemas/LineProtocol" - required: false - - name: Accept - in: header - description: | - The content type that the client can understand. - Writes only return a response body if they fail (partially or completely)--for example, - due to a syntax problem or type mismatch. - schema: - type: string - default: application/json - enum: - - application/json - required: false - - $ref: "#/components/parameters/ContentEncoding" - - $ref: "#/components/parameters/ContentLength" - responses: - "204": - description: Success ("No Content"). All data in the batch is written and queryable. - headers: - cluster-uuid: - $ref: "#/components/headers/ClusterUUID" - "400": - description: Bad request. - "401": - $ref: "#/components/responses/Unauthorized" - "403": - description: Access denied. - "413": - description: Request entity too large. - "422": - description: Unprocessable entity. - summary: Write line protocol - description: > - Writes line protocol to the specified database. - - - This is the native InfluxDB 3 Enterprise write endpoint that provides enhanced control - - over write behavior with advanced parameters for high-performance and fault-tolerant operations. - - - Use this endpoint to send data in [line protocol](/influxdb3/enterprise/reference/syntax/line-protocol/) format - to InfluxDB. - - Use query parameters to specify options for writing data. - - - #### Features - - - - **Partial writes**: Use `accept_partial=true` to allow partial success when some lines in a batch fail - - - **Asynchronous writes**: Use `no_sync=true` to skip waiting for WAL synchronization, allowing faster response - times but sacrificing durability guarantees - - - **Flexible precision**: Automatic timestamp precision detection with `precision=auto` (default) - - - #### Auto precision detection - - - When you use `precision=auto` or omit the precision parameter, InfluxDB 3 automatically detects - - the timestamp precision based on the magnitude of the timestamp value: - - - - Timestamps < 5e9 → Second precision (multiplied by 1,000,000,000 to convert to nanoseconds) - - - Timestamps < 5e12 → Millisecond precision (multiplied by 1,000,000) - - - Timestamps < 5e15 → Microsecond precision (multiplied by 1,000) - - - Larger timestamps → Nanosecond precision (no conversion needed) - - - #### Related - - - - [Use the InfluxDB v3 write_lp API to write data](/influxdb3/enterprise/write-data/http-api/v3-write-lp/) - requestBody: - $ref: "#/components/requestBodies/lineProtocolRequestBody" - tags: - - Write data - x-codeSamples: - - label: cURL - Basic write - lang: Shell - source: | - curl --request POST "http://localhost:8181/api/v3/write_lp?db=sensors" \ - --header "Authorization: Bearer DATABASE_TOKEN" \ - --header "Content-Type: text/plain" \ - --data-raw "cpu,host=server01 usage=85.2 1638360000000000000" - - label: cURL - Write with millisecond precision - lang: Shell - source: | - curl --request POST "http://localhost:8181/api/v3/write_lp?db=sensors&precision=ms" \ - --header "Authorization: Bearer DATABASE_TOKEN" \ - --header "Content-Type: text/plain" \ - --data-raw "cpu,host=server01 usage=85.2 1638360000000" - - label: cURL - Asynchronous write with partial acceptance - lang: Shell - source: > - curl --request POST - "http://localhost:8181/api/v3/write_lp?db=sensors&accept_partial=true&no_sync=true&precision=auto" \ - --header "Authorization: Bearer DATABASE_TOKEN" \ - --header "Content-Type: text/plain" \ - --data-raw "cpu,host=server01 usage=85.2 - memory,host=server01 used=4096" - - label: cURL - Multiple measurements with tags - lang: Shell - source: | - curl --request POST "http://localhost:8181/api/v3/write_lp?db=sensors&precision=ns" \ - --header "Authorization: Bearer DATABASE_TOKEN" \ - --header "Content-Type: text/plain" \ - --data-raw "cpu,host=server01,region=us-west usage=85.2,load=0.75 1638360000000000000 - memory,host=server01,region=us-west used=4096,free=12288 1638360000000000000 - disk,host=server01,region=us-west,device=/dev/sda1 used=50.5,free=49.5 1638360000000000000" - /health: - get: - operationId: GetHealth - responses: - "200": - description: Service is running. Returns `OK`. - content: - text/plain: - schema: - type: string - example: OK - "401": - description: Unauthorized. Authentication is required. - "500": - description: Service is unavailable. - summary: Health check - description: | - Checks the status of the service. - - Returns `OK` if the service is running. This endpoint does not return version information. - Use the [`/ping`](#operation/GetPing) endpoint to retrieve version details. - - > **Note**: This endpoint requires authentication by default in InfluxDB 3 Enterprise. - tags: - - Server information - /metrics: - get: - operationId: GetMetrics - responses: - "200": - description: Success - summary: Metrics - description: Retrieves Prometheus-compatible server metrics. - tags: - - Server information - /ping: - get: - operationId: GetPing - responses: - "200": - description: Success. The response body contains server information. - headers: - x-influxdb-version: - description: The InfluxDB version number (for example, `3.8.0`). - schema: - type: string - example: 3.8.0 - x-influxdb-build: - description: The InfluxDB build type (`Core` or `Enterprise`). - schema: - type: string - example: Enterprise - content: - application/json: - schema: - type: object - properties: - version: - type: string - description: The InfluxDB version number. - example: 3.8.0 - revision: - type: string - description: The git revision hash for the build. - example: 83b589b883 - process_id: - type: string - description: A unique identifier for the server process. - example: b756d9e0-cecd-4f72-b6d0-19e2d4f8cbb7 - "401": - description: Unauthorized. Authentication is required. - "404": - description: | - Not Found. Returned for HEAD requests. - Use a GET request to retrieve version information. - x-client-method: ping - summary: Ping the server - description: | - Returns version information for the server. - - **Important**: Use a GET request. HEAD requests return `404 Not Found`. - - The response includes version information in both headers and the JSON body: - - - **Headers**: `x-influxdb-version` and `x-influxdb-build` - - **Body**: JSON object with `version`, `revision`, and `process_id` - - > **Note**: This endpoint requires authentication by default in InfluxDB 3 Enterprise. - tags: - - Server information - post: - operationId: ping - responses: - "200": - description: Success. The response body contains server information. - headers: - x-influxdb-version: - description: The InfluxDB version number (for example, `3.8.0`). - schema: - type: string - example: 3.8.0 - x-influxdb-build: - description: The InfluxDB build type (`Core` or `Enterprise`). - schema: - type: string - example: Enterprise - content: - application/json: - schema: - type: object - properties: - version: - type: string - description: The InfluxDB version number. - example: 3.8.0 - revision: - type: string - description: The git revision hash for the build. - example: 83b589b883 - process_id: - type: string - description: A unique identifier for the server process. - example: b756d9e0-cecd-4f72-b6d0-19e2d4f8cbb7 - "401": - description: Unauthorized. Authentication is required. - "404": - description: | - Not Found. Returned for HEAD requests. - Use a GET request to retrieve version information. - summary: Ping the server - description: Returns version information for the server. Accepts POST in addition to GET. - tags: - - Server information - /query: - get: - operationId: GetV1ExecuteQuery - responses: - "200": - description: | - Success. The response body contains query results. - content: - application/json: - schema: - $ref: "#/components/schemas/QueryResponse" - application/csv: - schema: - type: string - headers: - Content-Type: - description: > - The content type of the response. - - Default is `application/json`. - - - If the `Accept` request header is `application/csv` or `text/csv`, the `Content-type` response header is - `application/csv` - - and the response is formatted as CSV. - schema: - type: string - default: application/json - enum: - - application/json - - application/csv - "400": - description: Bad request. - "401": - $ref: "#/components/responses/Unauthorized" - "403": - description: Access denied. - "404": - description: Database not found. - "405": - description: Method not allowed. - "422": - description: Unprocessable entity. - summary: Execute InfluxQL query (v1-compatible) - description: > - Executes an InfluxQL query to retrieve data from the specified database. - - - This endpoint is compatible with InfluxDB 1.x client libraries and third-party integrations such as Grafana. - - Use query parameters to specify the database and the InfluxQL query. - - - #### Related - - - - [Use the InfluxDB v1 HTTP query API and InfluxQL to query - data](/influxdb3/enterprise/query-data/execute-queries/influxdb-v1-api/) - parameters: - - name: Accept - in: header - schema: - type: string - default: application/json - enum: - - application/json - - application/csv - - text/csv - required: false - description: > - The content type that the client can understand. - - - If `text/csv` is specified, the `Content-type` response header is `application/csv` and the response is - formatted as CSV. - - - Returns an error if the format is invalid or non-UTF8. - - in: query - name: chunked - description: | - If true, the response is divided into chunks of size `chunk_size`. - schema: - type: boolean - default: false - - in: query - name: chunk_size - description: | - The number of records that will go into a chunk. - This parameter is only used if `chunked=true`. - schema: - type: integer - default: 10000 - - in: query - name: db - description: The database to query. If not provided, the InfluxQL query string must specify the database. - schema: - type: string - format: InfluxQL - - in: query - name: pretty - description: | - If true, the JSON response is formatted in a human-readable format. - schema: - type: boolean - default: false - - in: query - name: q - description: The InfluxQL query string. - required: true - schema: - type: string - - name: epoch - description: > - Formats timestamps as [unix (epoch) timestamps](/influxdb3/enterprise/reference/glossary/#unix-timestamp) - with the specified precision - - instead of [RFC3339 timestamps](/influxdb3/enterprise/reference/glossary/#rfc3339-timestamp) with nanosecond - precision. - in: query - schema: - $ref: "#/components/schemas/EpochCompatibility" - - $ref: "#/components/parameters/v1UsernameParam" - - $ref: "#/components/parameters/v1PasswordParam" - - name: rp - in: query - required: false - schema: - type: string - description: | - Retention policy name. Honored but discouraged. InfluxDB 3 doesn't use retention policies. - - name: Authorization - in: header - required: false - schema: - type: string - description: | - Authorization header for token-based authentication. - Supported schemes: - - `Bearer AUTH_TOKEN` - OAuth bearer token scheme - - `Token AUTH_TOKEN` - InfluxDB v2 token scheme - - `Basic ` - Basic authentication (username is ignored) - tags: - - Query data - - Compatibility endpoints - post: - operationId: PostExecuteV1Query - responses: - "200": - description: | - Success. The response body contains query results. - content: - application/json: - schema: - $ref: "#/components/schemas/QueryResponse" - application/csv: - schema: - type: string - headers: - Content-Type: - description: > - The content type of the response. - - Default is `application/json`. - - - If the `Accept` request header is `application/csv` or `text/csv`, the `Content-type` response header is - `application/csv` - - and the response is formatted as CSV. - schema: - type: string - default: application/json - enum: - - application/json - - application/csv - "400": - description: Bad request. - "401": - $ref: "#/components/responses/Unauthorized" - "403": - description: Access denied. - "404": - description: Database not found. - "405": - description: Method not allowed. - "422": - description: Unprocessable entity. - summary: Execute InfluxQL query (v1-compatible) - description: > - Executes an InfluxQL query to retrieve data from the specified database. - - - #### Related - - - - [Use the InfluxDB v1 HTTP query API and InfluxQL to query - data](/influxdb3/enterprise/query-data/execute-queries/influxdb-v1-api/) - parameters: - - name: Accept - in: header - schema: - type: string - default: application/json - enum: - - application/json - - application/csv - - text/csv - required: false - description: > - The content type that the client can understand. - - - If `text/csv` is specified, the `Content-type` response header is `application/csv` and the response is - formatted as CSV. - - - Returns an error if the format is invalid or non-UTF8. - requestBody: - content: - application/json: - schema: - type: object - properties: - db: - type: string - description: The database to query. If not provided, the InfluxQL query string must specify the database. - q: - description: The InfluxQL query string. - type: string - chunked: - description: | - If true, the response is divided into chunks of size `chunk_size`. - type: boolean - chunk_size: - description: | - The number of records that will go into a chunk. - This parameter is only used if `chunked=true`. - type: integer - default: 10000 - epoch: - description: > - A unix timestamp precision. - - - - `h` for hours - - - `m` for minutes - - - `s` for seconds - - - `ms` for milliseconds - - - `u` or `µ` for microseconds - - - `ns` for nanoseconds - - - Formats timestamps as [unix (epoch) - timestamps](/influxdb3/enterprise/reference/glossary/#unix-timestamp) with the specified precision - - instead of [RFC3339 timestamps](/influxdb3/enterprise/reference/glossary/#rfc3339-timestamp) with - nanosecond precision. - enum: - - ns - - u - - µ - - ms - - s - - m - - h - type: string - pretty: - description: | - If true, the JSON response is formatted in a human-readable format. - type: boolean - required: - - q - application/x-www-form-urlencoded: - schema: - type: object - properties: - db: - type: string - description: The database to query. If not provided, the InfluxQL query string must specify the database. - q: - description: The InfluxQL query string. - type: string - chunked: - description: | - If true, the response is divided into chunks of size `chunk_size`. - type: boolean - chunk_size: - description: | - The number of records that will go into a chunk. - This parameter is only used if `chunked=true`. - type: integer - default: 10000 - epoch: - description: > - A unix timestamp precision. - - - - `h` for hours - - - `m` for minutes - - - `s` for seconds - - - `ms` for milliseconds - - - `u` or `µ` for microseconds - - - `ns` for nanoseconds - - - Formats timestamps as [unix (epoch) - timestamps](/influxdb3/enterprise/reference/glossary/#unix-timestamp) with the specified precision - - instead of [RFC3339 timestamps](/influxdb3/enterprise/reference/glossary/#rfc3339-timestamp) with - nanosecond precision. - enum: - - ns - - u - - µ - - ms - - s - - m - - h - type: string - pretty: - description: | - If true, the JSON response is formatted in a human-readable format. - type: boolean - required: - - q - application/vnd.influxql: - schema: - type: string - description: InfluxQL query string sent as the request body. - tags: - - Query data - - Compatibility endpoints - /write: - post: - operationId: PostV1Write - responses: - "204": - description: Success ("No Content"). All data in the batch is written and queryable. - headers: - cluster-uuid: - $ref: "#/components/headers/ClusterUUID" - "400": - description: | - Bad request. Some (a _partial write_) or all of the data from the batch was rejected and not written. - If a partial write occurred, then some points from the batch are written and queryable. - - The response body: - - indicates if a partial write occurred or all data was rejected. - - contains details about the [rejected points](/influxdb3/enterprise/write-data/troubleshoot/#troubleshoot-rejected-points), up to 100 points. - content: - application/json: - examples: - rejectedAllPoints: - summary: Rejected all points in the batch - value: | - { - "error": "write of line protocol failed", - "data": [ - { - "original_line": "dquote> home,room=Kitchen temp=hi", - "line_number": 2, - "error_message": "No fields were provided" - } - ] - } - partialWriteErrorWithRejectedPoints: - summary: Partial write rejected some points in the batch - value: | - { - "error": "partial write of line protocol occurred", - "data": [ - { - "original_line": "dquote> home,room=Kitchen temp=hi", - "line_number": 2, - "error_message": "No fields were provided" - } - ] - } - "401": - $ref: "#/components/responses/Unauthorized" - "403": - description: Access denied. - "413": - description: Request entity too large. - summary: Write line protocol (v1-compatible) - description: > - Writes line protocol to the specified database. - - - This endpoint provides backward compatibility for InfluxDB 1.x write workloads using tools such as InfluxDB 1.x - client libraries, the Telegraf `outputs.influxdb` output plugin, or third-party tools. - - - Use this endpoint to send data in [line - protocol](https://docs.influxdata.com/influxdb3/enterprise/reference/syntax/line-protocol/) format to InfluxDB. - - Use query parameters to specify options for writing data. - - - #### Related - - - - [Use compatibility APIs to write data](/influxdb3/enterprise/write-data/http-api/compatibility-apis/) - parameters: - - $ref: "#/components/parameters/dbWriteParam" - - $ref: "#/components/parameters/compatibilityPrecisionParam" - - $ref: "#/components/parameters/v1UsernameParam" - - $ref: "#/components/parameters/v1PasswordParam" - - name: rp - in: query - required: false - schema: - type: string - description: | - Retention policy name. Honored but discouraged. InfluxDB 3 doesn't use retention policies. - - name: consistency - in: query - required: false - schema: - type: string - description: | - Write consistency level. Ignored by InfluxDB 3. Provided for compatibility with InfluxDB 1.x clients. - - name: Authorization - in: header - required: false - schema: - type: string - description: | - Authorization header for token-based authentication. - Supported schemes: - - `Bearer AUTH_TOKEN` - OAuth bearer token scheme - - `Token AUTH_TOKEN` - InfluxDB v2 token scheme - - `Basic ` - Basic authentication (username is ignored) - - name: Content-Type - in: header - description: | - The content type of the request payload. - schema: - $ref: "#/components/schemas/LineProtocol" - required: false - - name: Accept - in: header - description: | - The content type that the client can understand. - Writes only return a response body if they fail (partially or completely)--for example, - due to a syntax problem or type mismatch. - schema: - type: string - default: application/json - enum: - - application/json - required: false - - $ref: "#/components/parameters/ContentEncoding" - - $ref: "#/components/parameters/ContentLength" - requestBody: - $ref: "#/components/requestBodies/lineProtocolRequestBody" - tags: - - Compatibility endpoints - - Write data -components: - parameters: - AcceptQueryHeader: - name: Accept - in: header - schema: - type: string - default: application/json - enum: - - application/json - - application/jsonl - - application/vnd.apache.parquet - - text/csv - required: false - description: | - The content type that the client can understand. - ContentEncoding: - name: Content-Encoding - in: header - description: | - The compression applied to the line protocol in the request payload. - To send a gzip payload, pass `Content-Encoding: gzip` header. - schema: - $ref: "#/components/schemas/ContentEncoding" - required: false - ContentLength: - name: Content-Length - in: header - description: | - The size of the entity-body, in bytes, sent to InfluxDB. - schema: - $ref: "#/components/schemas/ContentLength" - ContentType: - name: Content-Type - description: | - The format of the data in the request body. - in: header - schema: - type: string - enum: - - application/json - required: false - db: - name: db - in: query - required: true - schema: - type: string - description: | - The name of the database. - dbWriteParam: - name: db - in: query - required: true - schema: - type: string - description: | - The name of the database. - InfluxDB creates the database if it doesn't already exist, and then - writes all points in the batch to the database. - dbQueryParam: - name: db - in: query - required: false - schema: - type: string - description: | - The name of the database. - - If you provide a query that specifies the database, you can omit the 'db' parameter from your request. - accept_partial: - name: accept_partial - in: query - required: false - schema: - $ref: "#/components/schemas/AcceptPartial" - compatibilityPrecisionParam: - name: precision - in: query - required: false - schema: - $ref: "#/components/schemas/PrecisionWriteCompatibility" - description: The precision for unix timestamps in the line protocol batch. - precisionParam: - name: precision - in: query - required: false - schema: - $ref: "#/components/schemas/PrecisionWrite" - description: The precision for unix timestamps in the line protocol batch. - querySqlParam: - name: q - in: query - required: true - schema: - type: string - format: SQL - description: | - The query to execute. - format: - name: format - in: query - required: false - schema: - $ref: "#/components/schemas/Format" - formatRequired: - name: format - in: query - required: true - schema: - $ref: "#/components/schemas/Format" - v1UsernameParam: - name: u - in: query - required: false - schema: - type: string - description: > - Username for v1 compatibility authentication. - - When using Basic authentication or query string authentication, InfluxDB 3 ignores this parameter but allows any - arbitrary string for compatibility with InfluxDB 1.x clients. - v1PasswordParam: - name: p - in: query - required: false - schema: - type: string - description: | - Password for v1 compatibility authentication. - For query string authentication, pass a database token with write permissions as this parameter. - InfluxDB 3 checks that the `p` value is an authorized token. - requestBodies: - lineProtocolRequestBody: - required: true - content: - text/plain: - schema: - type: string - examples: - line: - summary: Example line protocol - value: measurement,tag=value field=1 1234567890 - multiline: - summary: Example line protocol with UTF-8 characters - value: | - measurement,tag=value field=1 1234567890 - measurement,tag=value field=2 1234567900 - measurement,tag=value field=3 1234568000 - queryRequestBody: - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/QueryRequestObject" - schemas: - AdminTokenObject: - type: object - properties: - id: - type: integer - name: - type: string - token: - type: string - hash: - type: string - created_at: - type: string - format: date-time - expiry: - format: date-time - example: - id: 0 - name: _admin - token: apiv3_00xx0Xx0xx00XX0x0 - hash: 00xx0Xx0xx00XX0x0 - created_at: "2025-04-18T14:02:45.331Z" - expiry: null - ResourceTokenObject: - type: object - properties: - token_name: - type: string - permissions: - type: array - items: - type: object - properties: - resource_type: - type: string - enum: - - system - - db - actions: - type: array - items: - type: string - enum: - - read - - write - resource_names: - type: array - items: - type: string - description: List of resource names. Use "*" for all resources. - expiry_secs: - type: integer - description: The expiration time in seconds. - example: - token_name: All system information - permissions: - - resource_type: system - actions: - - read - resource_names: - - "*" - expiry_secs: 300000 - ContentEncoding: - type: string - enum: - - gzip - - identity - description: > - Content coding. - - Use `gzip` for compressed data or `identity` for unmodified, uncompressed data. - - - #### Multi-member gzip support - - - InfluxDB 3 supports multi-member gzip payloads (concatenated gzip files per [RFC - 1952](https://www.rfc-editor.org/rfc/rfc1952)). - - This allows you to: - - - Concatenate multiple gzip files and send them in a single request - - - Maintain compatibility with InfluxDB v1 and v2 write endpoints - - - Simplify batch operations using standard compression tools - default: identity - LineProtocol: - type: string - enum: - - text/plain - - text/plain; charset=utf-8 - description: | - `text/plain` is the content type for line protocol. `UTF-8` is the default character set. - default: text/plain; charset=utf-8 - ContentLength: - type: integer - description: The length in decimal number of octets. - Database: - type: string - AcceptPartial: - type: boolean - default: true - description: Accept partial writes. - Format: - type: string - enum: - - json - - csv - - parquet - - json_lines - - jsonl - - pretty - description: |- - The format of data in the response body. - `json_lines` is the canonical name; `jsonl` is accepted as an alias. - NoSync: - type: boolean - default: false - description: | - Acknowledges a successful write without waiting for WAL persistence. - - #### Related - - - [Use the HTTP API and client libraries to write data](/influxdb3/enterprise/write-data/api-client-libraries/) - - [Data durability](/influxdb3/enterprise/reference/internals/durability/) - PrecisionWriteCompatibility: - enum: - - ms - - s - - us - - u - - ns - - "n" - type: string - description: |- - The precision for unix timestamps in the line protocol batch. - Use `ms` for milliseconds, `s` for seconds, `us` or `u` for microseconds, or `ns` or `n` for nanoseconds. - Optional — defaults to nanosecond precision if omitted. - PrecisionWrite: - enum: - - auto - - nanosecond - - microsecond - - millisecond - - second - type: string - description: | - The precision for unix timestamps in the line protocol batch. - - Supported values: - - `auto` (default): Automatically detects precision based on timestamp magnitude - - `nanosecond`: Nanoseconds - - `microsecond`: Microseconds - - `millisecond`: Milliseconds - - `second`: Seconds - QueryRequestObject: - type: object - properties: - db: - description: | - The name of the database to query. - Required if the query (`q`) doesn't specify the database. - type: string - q: - description: The query to execute. - type: string - format: - description: The format of the query results. - type: string - enum: - - json - - csv - - parquet - - json_lines - - jsonl - - pretty - params: - description: | - Additional parameters for the query. - Use this field to pass query parameters. - type: object - additionalProperties: true - required: - - db - - q - example: - db: mydb - q: SELECT * FROM mytable - format: json - params: {} - CreateDatabaseRequest: - type: object - properties: - db: - type: string - pattern: ^[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]$|^[a-zA-Z0-9]$ - description: |- - The database name. Database names cannot contain underscores (_). - Names must start and end with alphanumeric characters and can contain hyphens (-) in the middle. - retention_period: - type: string - description: |- - The retention period for the database. Specifies how long data should be retained. - Use duration format (for example, "1d", "1h", "30m", "7d"). - example: 7d - required: - - db - CreateTableRequest: - type: object - properties: - db: - type: string - table: - type: string - tags: - type: array - items: - type: string - fields: - type: array - items: - type: object - properties: - name: - type: string - type: - type: string - enum: - - utf8 - - int64 - - uint64 - - float64 - - bool - required: - - name - - type - retention_period: - type: string - description: |- - The retention period for the table. Specifies how long data in this table should be retained. - Use duration format (for example, "1d", "1h", "30m", "7d"). - example: 30d - required: - - db - - table - - tags - - fields - DistinctCacheCreateRequest: - type: object - properties: - db: - type: string - table: - type: string - node_spec: - $ref: "#/components/schemas/ApiNodeSpec" - name: - type: string - description: Optional cache name. - columns: - type: array - items: - type: string - max_cardinality: - type: integer - description: Optional maximum cardinality. - max_age: - type: integer - description: Optional maximum age in seconds. - required: - - db - - table - - columns - example: - db: mydb - table: mytable - columns: - - tag1 - - tag2 - max_cardinality: 1000 - max_age: 3600 - LastCacheCreateRequest: - type: object - properties: - db: - type: string - table: - type: string - node_spec: - $ref: "#/components/schemas/ApiNodeSpec" - name: - type: string - description: Optional cache name. - key_columns: - type: array - items: - type: string - description: Optional list of key columns. - value_columns: - type: array - items: - type: string - description: Optional list of value columns. - count: - type: integer - description: Optional count. - ttl: - type: integer - description: Optional time-to-live in seconds. - required: - - db - - table - example: - db: mydb - table: mytable - key_columns: - - tag1 - value_columns: - - field1 - count: 100 - ttl: 3600 - ProcessingEngineTriggerRequest: - type: object - properties: - db: - type: string - plugin_filename: - type: string - description: | - The path and filename of the plugin to execute--for example, - `schedule.py` or `endpoints/report.py`. - The path can be absolute or relative to the `--plugins-dir` directory configured when starting InfluxDB 3. - - The plugin file must implement the trigger interface associated with the trigger's specification. - node_spec: - $ref: "#/components/schemas/ApiNodeSpec" - trigger_name: - type: string - trigger_settings: - description: | - Configuration for trigger error handling and execution behavior. - allOf: - - $ref: "#/components/schemas/TriggerSettings" - trigger_specification: - description: > - Specifies when and how the processing engine trigger should be invoked. - - - ## Supported trigger specifications: - - - ### Cron-based scheduling - - Format: `cron:CRON_EXPRESSION` - - - Uses extended (6-field) cron format (second minute hour day_of_month month day_of_week): - - ``` - - ┌───────────── second (0-59) - - │ ┌───────────── minute (0-59) - - │ │ ┌───────────── hour (0-23) - - │ │ │ ┌───────────── day of month (1-31) - - │ │ │ │ ┌───────────── month (1-12) - - │ │ │ │ │ ┌───────────── day of week (0-6, Sunday=0) - - │ │ │ │ │ │ - - * * * * * * - - ``` - - Examples: - - - `cron:0 0 6 * * 1-5` - Every weekday at 6:00 AM - - - `cron:0 30 14 * * 5` - Every Friday at 2:30 PM - - - `cron:0 0 0 1 * *` - First day of every month at midnight - - - ### Interval-based scheduling - - Format: `every:DURATION` - - - Supported durations: `s` (seconds), `m` (minutes), `h` (hours), `d` (days), `w` (weeks), `M` (months), `y` - (years): - - - `every:30s` - Every 30 seconds - - - `every:5m` - Every 5 minutes - - - `every:1h` - Every hour - - - `every:1d` - Every day - - - `every:1w` - Every week - - - `every:1M` - Every month - - - `every:1y` - Every year - - - **Maximum interval**: 1 year - - - ### Table-based triggers - - - `all_tables` - Triggers on write events to any table in the database - - - `table:TABLE_NAME` - Triggers on write events to a specific table - - - ### On-demand triggers - - Format: `request:REQUEST_PATH` - - - Creates an HTTP endpoint `/api/v3/engine/REQUEST_PATH` for manual invocation: - - - `request:hello-world` - Creates endpoint `/api/v3/engine/hello-world` - - - `request:data-export` - Creates endpoint `/api/v3/engine/data-export` - pattern: ^(cron:[0-9 *,/-]+|every:[0-9]+[smhd]|all_tables|table:[a-zA-Z_][a-zA-Z0-9_]*|request:[a-zA-Z0-9_-]+)$ - example: cron:0 0 6 * * 1-5 - trigger_arguments: - type: object - additionalProperties: true - description: Optional arguments passed to the plugin. - disabled: - type: boolean - default: false - description: Whether the trigger is disabled. - required: - - db - - plugin_filename - - trigger_name - - trigger_settings - - trigger_specification - - disabled - TriggerSettings: - type: object - description: | - Configuration settings for processing engine trigger error handling and execution behavior. - properties: - run_async: - type: boolean - default: false - description: | - Whether to run the trigger asynchronously. - When `true`, the trigger executes in the background without blocking. - When `false`, the trigger executes synchronously. - error_behavior: - type: string - enum: - - Log - - Retry - - Disable - description: | - Specifies how to handle errors that occur during trigger execution: - - `Log`: Log the error and continue (default) - - `Retry`: Retry the trigger execution - - `Disable`: Disable the trigger after an error - default: Log - required: - - run_async - - error_behavior - ApiNodeSpec: - x-enterprise-only: true - type: object - description: | - Optional specification for targeting specific nodes in a multi-node InfluxDB 3 Enterprise cluster. - Use this to control which node(s) should handle the cache or trigger. - properties: - node_id: - type: string - description: | - The ID of a specific node in the cluster. - If specified, the cache or trigger will only be created on this node. - node_group: - type: string - description: | - The name of a node group in the cluster. - If specified, the cache or trigger will be created on all nodes in this group. - WALPluginTestRequest: - type: object - description: | - Request body for testing a write-ahead logging (WAL) plugin. - properties: - filename: - type: string - description: | - The path and filename of the plugin to test. - database: - type: string - description: | - The database name to use for the test. - input_lp: - type: string - description: | - Line protocol data to use as input for the test. - cache_name: - type: string - description: | - Optional name of the cache to use in the test. - input_arguments: - type: object - additionalProperties: - type: string - description: | - Optional key-value pairs of arguments to pass to the plugin. - required: - - filename - - database - - input_lp - SchedulePluginTestRequest: - type: object - description: | - Request body for testing a scheduling plugin. - properties: - filename: - type: string - description: | - The path and filename of the plugin to test. - database: - type: string - description: | - The database name to use for the test. - schedule: - type: string - description: | - Optional schedule specification in cron or interval format. - cache_name: - type: string - description: | - Optional name of the cache to use in the test. - input_arguments: - type: object - additionalProperties: - type: string - description: | - Optional key-value pairs of arguments to pass to the plugin. - required: - - filename - - database - PluginFileRequest: - type: object - description: | - Request body for updating a plugin file. - properties: - plugin_name: - type: string - description: | - The name of the plugin file to update. - content: - type: string - description: | - The content of the plugin file. - required: - - plugin_name - - content - PluginDirectoryRequest: - type: object - description: | - Request body for updating plugin directory with multiple files. - properties: - plugin_name: - type: string - description: | - The name of the plugin directory to update. - files: - type: array - items: - $ref: "#/components/schemas/PluginFileEntry" - description: | - List of plugin files to include in the directory. - required: - - plugin_name - - files - PluginFileEntry: - type: object - description: | - Represents a single file in a plugin directory. - properties: - content: - type: string - description: | - The content of the file. - relative_path: - type: string - description: The relative path of the file within the plugin directory. - required: - - relative_path - - content - ShowDatabasesResponse: - type: object - properties: - databases: - type: array - items: - type: string - QueryResponse: - type: object - properties: - results: - type: array - items: - type: object - example: - results: - - series: - - name: mytable - columns: - - time - - value - values: - - - "2024-02-02T12:00:00Z" - - 42 - ErrorMessage: - type: object - properties: - error: - type: string - data: - type: object - nullable: true - LineProtocolError: - properties: - code: - description: Code is the machine-readable error code. - enum: - - internal error - - not found - - conflict - - invalid - - empty value - - unavailable - readOnly: true - type: string - err: - description: Stack of errors that occurred during processing of the request. Useful for debugging. - readOnly: true - type: string - line: - description: First line in the request body that contains malformed data. - format: int32 - readOnly: true - type: integer - message: - description: Human-readable message. - readOnly: true - type: string - op: - description: Describes the logical code operation when the error occurred. Useful for debugging. - readOnly: true - type: string - required: - - code - EpochCompatibility: - description: | - A unix timestamp precision. - - `h` for hours - - `m` for minutes - - `s` for seconds - - `ms` for milliseconds - - `u` or `µ` for microseconds - - `ns` for nanoseconds - enum: - - ns - - u - - µ - - ms - - s - - m - - h - type: string - UpdateDatabaseRequest: - type: object - properties: - retention_period: - type: string - description: | - The retention period for the database. Specifies how long data should be retained. - Use duration format (for example, "1d", "1h", "30m", "7d"). - example: 7d - description: Request schema for updating database configuration. - UpdateTableRequest: - type: object - properties: - db: - type: string - description: The name of the database containing the table. - table: - type: string - description: The name of the table to update. - retention_period: - type: string - description: | - The retention period for the table. Specifies how long data in this table should be retained. - Use duration format (for example, "1d", "1h", "30m", "7d"). - example: 30d - required: - - db - - table - description: Request schema for updating table configuration. - LicenseResponse: - type: object - properties: - license_type: - type: string - description: The type of license (for example, "enterprise", "trial"). - example: enterprise - expires_at: - type: string - format: date-time - description: The expiration date of the license in ISO 8601 format. - example: "2025-12-31T23:59:59Z" - features: - type: array - items: - type: string - description: List of features enabled by the license. - example: - - clustering - - processing_engine - - advanced_auth - status: - type: string - enum: - - active - - expired - - invalid - description: The current status of the license. - example: active - description: Response schema for license information. - CreateTokenWithPermissionsRequest: - type: object - properties: - token_name: - type: string - description: The name for the resource token. - permissions: - type: array - items: - $ref: "#/components/schemas/PermissionDetailsApi" - description: List of permissions to grant to the token. - expiry_secs: - type: integer - description: Optional expiration time in seconds. - nullable: true - required: - - token_name - - permissions - PermissionDetailsApi: - type: object - properties: - resource_type: - type: string - enum: - - system - - db - description: The type of resource. - resource_names: - type: array - items: - type: string - description: List of resource names. Use "*" for all resources. - actions: - type: array - items: - type: string - enum: - - read - - write - description: List of actions to grant. - required: - - resource_type - - resource_names - - actions - FileIndexCreateRequest: - type: object - description: Request body for creating a file index. - properties: - db: - type: string - description: The database name. - table: - type: string - description: The table name. If omitted, the file index applies to the database. - nullable: true - columns: - type: array - items: - type: string - description: The columns to use for the file index. - required: - - db - - columns - example: - db: mydb - table: mytable - columns: - - tag1 - - tag2 - FileIndexDeleteRequest: - type: object - description: Request body for deleting a file index. - properties: - db: - type: string - description: The database name. - table: - type: string - description: The table name. If omitted, deletes the database-level file index. - nullable: true - required: - - db - example: - db: mydb - table: mytable - StopNodeRequest: - type: object - description: Request body for marking a node as stopped in the catalog. - properties: - node_id: - type: string - description: The ID of the node to mark as stopped. - required: - - node_id - example: - node_id: node-1 - responses: - Unauthorized: - description: Unauthorized access. - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorMessage" - BadRequest: - description: | - Request failed. Possible reasons: - - - Invalid database name - - Malformed request body - - Invalid timestamp precision - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorMessage" - Forbidden: - description: Access denied. - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorMessage" - NotFound: - description: Resource not found. - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorMessage" - headers: - ClusterUUID: - description: | - The catalog UUID of the InfluxDB instance. - This header is included in all HTTP API responses and enables you to: - - Identify which cluster instance handled the request - - Monitor deployments across multiple InfluxDB instances - - Debug and troubleshoot distributed systems - schema: - type: string - format: uuid - example: 01234567-89ab-cdef-0123-456789abcdef - securitySchemes: - BasicAuthentication: - type: http - scheme: basic - description: >- - Use the `Authorization` header with the `Basic` scheme to authenticate v1 API requests. - - - Works with v1 compatibility [`/write`](#operation/PostV1Write) and [`/query`](#operation/GetV1Query) endpoints - in InfluxDB 3. - - - When authenticating requests, InfluxDB 3 checks that the `password` part of the decoded credential is an - authorized token - - and ignores the `username` part of the decoded credential. - - - ### Syntax - - - ```http - - Authorization: Basic - - ``` - - - ### Example - - - ```bash - - curl "http://localhost:8181/write?db=DATABASE_NAME&precision=s" \ - --user "":"AUTH_TOKEN" \ - --header "Content-type: text/plain; charset=utf-8" \ - --data-binary 'home,room=kitchen temp=72 1641024000' - ``` - - - Replace the following: - - - - **`DATABASE_NAME`**: your InfluxDB 3 Enterprise database - - - **`AUTH_TOKEN`**: an admin token or database token authorized for the database - QuerystringAuthentication: - type: apiKey - in: query - name: u=&p= - description: >- - Use InfluxDB 1.x API parameters to provide credentials through the query string for v1 API requests. - - - Querystring authentication works with v1-compatible [`/write`](#operation/PostV1Write) and - [`/query`](#operation/GetV1Query) endpoints. - - - When authenticating requests, InfluxDB 3 checks that the `p` (_password_) query parameter is an authorized token - - and ignores the `u` (_username_) query parameter. - - - ### Syntax - - - ```http - - https://localhost:8181/query/?[u=any]&p=AUTH_TOKEN - - https://localhost:8181/write/?[u=any]&p=AUTH_TOKEN - - ``` - - - ### Examples - - - ```bash - - curl "http://localhost:8181/write?db=DATABASE_NAME&precision=s&p=AUTH_TOKEN" \ - --header "Content-type: text/plain; charset=utf-8" \ - --data-binary 'home,room=kitchen temp=72 1641024000' - ``` - - - Replace the following: - - - - **`DATABASE_NAME`**: your InfluxDB 3 Enterprise database - - - **`AUTH_TOKEN`**: an admin token or database token authorized for the database - - - ```bash - - ####################################### - - # Use an InfluxDB 1.x compatible username and password - - # to query the InfluxDB v1 HTTP API - - ####################################### - - # Use authentication query parameters: - - # ?p=AUTH_TOKEN - - ####################################### - - - curl --get "http://localhost:8181/query" \ - --data-urlencode "p=AUTH_TOKEN" \ - --data-urlencode "db=DATABASE_NAME" \ - --data-urlencode "q=SELECT * FROM MEASUREMENT" - ``` - - - Replace the following: - - - - **`DATABASE_NAME`**: the database to query - - - **`AUTH_TOKEN`**: a database token with sufficient permissions to the database - BearerAuthentication: - type: http - scheme: bearer - bearerFormat: JWT - description: | - - Use the OAuth Bearer authentication - scheme to provide an authorization token to InfluxDB 3. - - Bearer authentication works with all endpoints. - - In your API requests, send an `Authorization` header. - For the header value, provide the word `Bearer` followed by a space and a database token. - - ### Syntax - - ```http - Authorization: Bearer AUTH_TOKEN - ``` - - ### Example - - ```bash - curl http://localhost:8181/api/v3/query_influxql \ - --header "Authorization: Bearer AUTH_TOKEN" - ``` - TokenAuthentication: - description: |- - Use InfluxDB v2 Token authentication to provide an authorization token to InfluxDB 3. - - The v2 Token scheme works with v1 and v2 compatibility endpoints in InfluxDB 3. - - In your API requests, send an `Authorization` header. - For the header value, provide the word `Token` followed by a space and a database token. - The word `Token` is case-sensitive. - - ### Syntax - - ```http - Authorization: Token AUTH_TOKEN - ``` - - ### Example - - ```sh - ######################################################## - # Use the Token authentication scheme with /api/v2/write - # to write data. - ######################################################## - - curl --request post "http://localhost:8181/api/v2/write?bucket=DATABASE_NAME&precision=s" \ - --header "Authorization: Token AUTH_TOKEN" \ - --data-binary 'home,room=kitchen temp=72 1463683075' - ``` - in: header - name: Authorization - type: apiKey -x-tagGroups: - - name: Using the InfluxDB HTTP API - tags: - - Quick start - - Authentication - - Cache data - - Common parameters - - Response codes - - Compatibility endpoints - - Database - - Processing engine - - Server information - - Table - - Token - - Query data - - Write data diff --git a/api-docs/influxdb3/core/v3/influxdb3-core-openapi.yaml b/api-docs/influxdb3/enterprise/v3/ref.yml similarity index 74% rename from api-docs/influxdb3/core/v3/influxdb3-core-openapi.yaml rename to api-docs/influxdb3/enterprise/v3/ref.yml index f413341474..7262b9e80f 100644 --- a/api-docs/influxdb3/core/v3/influxdb3-core-openapi.yaml +++ b/api-docs/influxdb3/enterprise/v3/ref.yml @@ -1,24 +1,27 @@ openapi: 3.0.3 info: - title: InfluxDB 3 Core API Service + title: InfluxDB 3 Enterprise API Service description: | - The InfluxDB HTTP API for InfluxDB 3 Core provides a programmatic interface for - interacting with InfluxDB 3 Core databases and resources. + The InfluxDB HTTP API for InfluxDB 3 Enterprise provides a programmatic interface for + interacting with InfluxDB 3 Enterprise databases and resources. Use this API to: - - Write data to InfluxDB 3 Core databases + - Write data to InfluxDB 3 Enterprise databases - Query data using SQL or InfluxQL - Process data using Processing engine plugins - Manage databases, tables, and Processing engine triggers - Perform administrative tasks and access system information The API includes endpoints under the following paths: - - `/api/v3`: InfluxDB 3 Core native endpoints - - `/`: Compatibility endpoints for InfluxDB v1 workloads and clients - - `/api/v2/write`: Compatibility endpoint for InfluxDB v2 workloads and clients - - [Download the OpenAPI specification](/openapi/influxdb3-core-openapi.yaml) - version: v3.8.0 + - `/api/v3`: InfluxDB 3 Enterprise native endpoints + - `/api/v2/write`: v2-compatible write endpoint + - `/write`, `/query`: v1-compatible endpoints + + + version: '3.7.0' license: name: MIT url: https://opensource.org/licenses/MIT @@ -26,16 +29,18 @@ info: name: InfluxData url: https://www.influxdata.com email: support@influxdata.com - x-source-hash: sha256:1259b96096eab6c8dbf3f76c974924f124e9b3e08eedc6b0c9a66d3108857c52 + x-related: + - title: Migrate from InfluxDB v1 or v2 + href: /influxdb3/enterprise/get-started/migrate-from-influxdb-v1-v2/ servers: - url: https://{baseurl} - description: InfluxDB 3 Core API URL + description: InfluxDB 3 Enterprise API URL variables: baseurl: enum: - localhost:8181 default: localhost:8181 - description: InfluxDB 3 Core URL + description: InfluxDB 3 Enterprise URL security: - BearerAuthentication: [] - TokenAuthentication: [] @@ -48,19 +53,14 @@ tags: | Authentication scheme | Works with | |:-------------------|:-----------| - | [Bearer authentication](#section/Authentication/BearerAuthentication) | All endpoints | - | [Token authentication](#section/Authentication/TokenAuthentication) | v1, v2 endpoints | - | [Basic authentication](#section/Authentication/BasicAuthentication) | v1 endpoints | - | [Querystring authentication](#section/Authentication/QuerystringAuthentication) | v1 endpoints | + | Bearer authentication | All endpoints | + | Token authentication | v1, v2 endpoints | + | Basic authentication | v1 endpoints | + | Querystring authentication | v1 endpoints | x-traitTag: true - x-related: - - title: Authenticate v1 API requests - href: /influxdb3/core/guides/api-compatibility/v1/ - - title: Manage tokens - href: /influxdb3/core/admin/tokens/ - name: Cache data - description: |- + description: | Manage the in-memory cache. #### Distinct Value Cache @@ -87,141 +87,64 @@ tags: what fields to cache, what tags to use to identify each series, and the number of values to cache for each unique series. An LVC is associated with a table, which can have multiple LVCs. - x-related: - - title: Manage the Distinct Value Cache - href: /influxdb3/core/admin/distinct-value-cache/ - - title: Manage the Last Value Cache - href: /influxdb3/core/admin/last-value-cache/ - - name: Compatibility endpoints - description: > - InfluxDB 3 provides compatibility endpoints for InfluxDB 1.x and InfluxDB 2.x workloads and clients. - - - ### Write data using v1- or v2-compatible endpoints - - - - [`/api/v2/write` endpoint](#operation/PostV2Write) - for InfluxDB v2 clients and when you bring existing InfluxDB v2 write workloads to InfluxDB 3. - - [`/write` endpoint](#operation/PostV1Write) for InfluxDB v1 clients and when you bring existing InfluxDB v1 - write workloads to InfluxDB 3. - - - For new workloads, use the [`/api/v3/write_lp` endpoint](#operation/PostWriteLP). - - - All endpoints accept the same line protocol format. - - - ### Query data - - - Use the HTTP [`/query`](#operation/GetV1ExecuteQuery) endpoint for InfluxDB v1 clients and v1 query workloads - using InfluxQL. - - - For new workloads, use one of the following: + #### Related guides - - HTTP [`/api/v3/query_sql` endpoint](#operation/GetExecuteQuerySQL) for new query workloads using SQL. - - - HTTP [`/api/v3/query_influxql` endpoint](#operation/GetExecuteInfluxQLQuery) for new query workloads using - InfluxQL. - - - Flight SQL and InfluxDB 3 _Flight+gRPC_ APIs for querying with SQL or InfluxQL. For more information about using - Flight APIs, see [InfluxDB 3 client - libraries](https://github.com/InfluxCommunity?q=influxdb3&type=public&language=&sort=). - - - ### Server information - - - Server information endpoints such as `/health` and `metrics` are compatible with InfluxDB 1.x and InfluxDB 2.x - clients. - x-related: - - title: Use compatibility APIs to write data - href: /influxdb3/core/write-data/http-api/compatibility-apis/ + - [Manage the Distinct Value Cache](/influxdb3/enterprise/admin/distinct-value-cache/) + - [Manage the Last Value Cache](/influxdb3/enterprise/admin/last-value-cache/) - name: Database description: Manage databases - - description: > + - description: | Most InfluxDB API endpoints require parameters in the request--for example, specifying the database to use. - ### Common parameters - The following table shows common parameters used by many InfluxDB API endpoints. - Many endpoints may require other parameters in the query string or in the - request body that perform functions specific to those endpoints. - | Query parameter | Value type | Description | - |:------------------------ |:--------------------- |:-------------------------------------------| - | `db` | string | The database name | - InfluxDB HTTP API endpoints use standard HTTP request and response headers. - The following table shows common headers used by many InfluxDB API endpoints. - Some endpoints may use other headers that perform functions more specific to those endpoints--for example, - - the write endpoints accept the `Content-Encoding` header to indicate that line protocol is compressed in the - request body. - + the write endpoints accept the `Content-Encoding` header to indicate that line protocol is compressed in the request body. | Header | Value type | Description | - |:------------------------ |:--------------------- |:-------------------------------------------| - | `Accept` | string | The content type that the client can understand. | - | `Authorization` | string | The authorization scheme and credential. | - | `Content-Length` | integer | The size of the entity-body, in bytes. | - | `Content-Type` | string | The format of the data in the request body. | name: Headers and parameters x-traitTag: true - name: Processing engine - description: > + description: | Manage Processing engine triggers, test plugins, and send requests to trigger On Request plugins. - - InfluxDB 3 Core provides the InfluxDB 3 processing engine, an embedded Python VM that can dynamically load and - trigger Python plugins in response to events in your database. - + InfluxDB 3 Enterprise provides the InfluxDB 3 processing engine, an embedded Python VM that can dynamically load and trigger Python plugins in response to events in your database. Use Processing engine plugins and triggers to run code and perform tasks for different database events. - - To get started with the processing engine, see the [Processing engine and Python - plugins](/influxdb3/core/processing-engine/) guide. - x-related: - - title: Processing engine and Python plugins - href: /influxdb3/core/plugins/ + To get started with the processing engine, see the [Processing engine and Python plugins](/influxdb3/enterprise/processing-engine/) guide. - name: Query data description: Query data using SQL or InfluxQL - x-related: - - title: Use the InfluxDB v1 HTTP query API and InfluxQL to query data - href: /influxdb3/core/query-data/execute-queries/influxdb-v1-api/ - name: Quick start description: | - 1. [Create an admin token](#section/Authentication) to authorize API requests. + 1. Create an admin token to authorize API requests. ```bash curl -X POST "http://localhost:8181/api/v3/configure/token/admin" ``` - 2. [Check the status](#section/Server-information) of the InfluxDB server. + 2. Check the status of the InfluxDB server. ```bash curl "http://localhost:8181/health" \ --header "Authorization: Bearer ADMIN_TOKEN" ``` - 3. [Write data](#operation/PostWriteLP) to InfluxDB. + 3. Write data to InfluxDB. ```bash curl "http://localhost:8181/api/v3/write_lp?db=sensors&precision=auto" @@ -232,7 +155,7 @@ tags: If all data is written, the response is `204 No Content`. - 4. [Query data](#operation/GetExecuteQuerySQL) from InfluxDB. + 4. Query data from InfluxDB. ```bash curl -G "http://localhost:8181/api/v3/query_sql" \ @@ -248,7 +171,7 @@ tags: {"room":"Living room","temp":71.5,"time":"2025-02-25T20:19:34.984098"} ``` - For more information about using InfluxDB 3 Core, see the [Get started](/influxdb3/core/get-started/) guide. + For more information, see the [Get started](/influxdb3/enterprise/get-started/) guide. x-traitTag: true - name: Server information description: Retrieve server metrics, status, and version information @@ -258,7 +181,7 @@ tags: description: Manage tokens for authentication and authorization - name: Write data description: | - Write data to InfluxDB 3 using line protocol format. + Write data to InfluxDB 3 Enterprise using line protocol format. #### Timestamp precision across write APIs @@ -272,79 +195,157 @@ tags: | **Milliseconds** | ✅ `ms` | ✅ `ms` | ✅ `millisecond` | | **Microseconds** | ✅ `u` or `µ` | ✅ `us` | ✅ `microsecond` | | **Nanoseconds** | ✅ `ns` | ✅ `ns` | ✅ `nanosecond` | + | **Minutes** | ✅ `m` | ❌ No | ❌ No | + | **Hours** | ✅ `h` | ❌ No | ❌ No | | **Default** | Nanosecond | Nanosecond | **Auto** (guessed) | All timestamps are stored internally as nanoseconds. paths: - /api/v1/health: - get: - operationId: GetHealthV1 - summary: Health check (v1) + /write: + post: + operationId: PostV1Write + summary: Write line protocol (v1-compatible) description: | - Checks the status of the service. + Writes line protocol to the specified database. - Returns `OK` if the service is running. This endpoint does not return version information. - Use the [`/ping`](#operation/GetPing) endpoint to retrieve version details. + This endpoint provides backward compatibility for InfluxDB 1.x write workloads using tools such as InfluxDB 1.x client libraries, the Telegraf `outputs.influxdb` output plugin, or third-party tools. - > **Note**: This endpoint requires authentication by default in InfluxDB 3 Core. - responses: - "200": - description: Service is running. Returns `OK`. - content: - text/plain: - schema: - type: string - example: OK - "401": - description: Unauthorized. Authentication is required. - "500": - description: Service is unavailable. - tags: - - Server information - - Compatibility endpoints - /api/v2/write: - post: - operationId: PostV2Write + Use this endpoint to send data in [line protocol](https://docs.influxdata.com/influxdb3/enterprise/reference/syntax/line-protocol/) format to InfluxDB. + Use query parameters to specify options for writing data. + + #### Related + + - [Use compatibility APIs to write data](/influxdb3/enterprise/write-data/http-api/compatibility-apis/) + parameters: + - $ref: '#/components/parameters/dbWriteParam' + - $ref: '#/components/parameters/compatibilityPrecisionParam' + - $ref: '#/components/parameters/v1UsernameParam' + - $ref: '#/components/parameters/v1PasswordParam' + - name: rp + in: query + required: false + schema: + type: string + description: | + Retention policy name. Honored but discouraged. InfluxDB 3 doesn't use retention policies. + - name: consistency + in: query + required: false + schema: + type: string + description: | + Write consistency level. Ignored by InfluxDB 3. Provided for compatibility with InfluxDB 1.x clients. + - name: Authorization + in: header + required: false + schema: + type: string + description: | + Authorization header for token-based authentication. + Supported schemes: + - `Bearer AUTH_TOKEN` - OAuth bearer token scheme + - `Token AUTH_TOKEN` - InfluxDB v2 token scheme + - `Basic ` - Basic authentication (username is ignored) + - name: Content-Type + in: header + description: | + The content type of the request payload. + schema: + $ref: '#/components/schemas/LineProtocol' + required: false + - name: Accept + in: header + description: | + The content type that the client can understand. + Writes only return a response body if they fail (partially or completely)--for example, + due to a syntax problem or type mismatch. + schema: + type: string + default: application/json + enum: + - application/json + required: false + - $ref: '#/components/parameters/ContentEncoding' + - $ref: '#/components/parameters/ContentLength' + requestBody: + $ref: '#/components/requestBodies/lineProtocolRequestBody' responses: - "204": + '204': description: Success ("No Content"). All data in the batch is written and queryable. headers: cluster-uuid: - $ref: "#/components/headers/ClusterUUID" - "400": - description: Bad request. - "401": - $ref: "#/components/responses/Unauthorized" - "403": + $ref: '#/components/headers/ClusterUUID' + '400': + description: | + Bad request. Some (a _partial write_) or all of the data from the batch was rejected and not written. + If a partial write occurred, then some points from the batch are written and queryable. + + The response body: + - indicates if a partial write occurred or all data was rejected. + - contains details about the [rejected points](/influxdb3/enterprise/write-data/troubleshoot/#troubleshoot-rejected-points), up to 100 points. + content: + application/json: + examples: + rejectedAllPoints: + summary: Rejected all points in the batch + value: | + { + "error": "write of line protocol failed", + "data": [ + { + "original_line": "dquote> home,room=Kitchen temp=hi", + "line_number": 2, + "error_message": "No fields were provided" + } + ] + } + partialWriteErrorWithRejectedPoints: + summary: Partial write rejected some points in the batch + value: | + { + "error": "partial write of line protocol occurred", + "data": [ + { + "original_line": "dquote> home,room=Kitchen temp=hi", + "line_number": 2, + "error_message": "No fields were provided" + } + ] + } + '401': + $ref: '#/components/responses/Unauthorized' + '403': description: Access denied. - "413": + '413': description: Request entity too large. + tags: + + - Write data + x-related: + - title: Use compatibility APIs to write data + href: /influxdb3/enterprise/write-data/http-api/compatibility-apis/ + /api/v2/write: + post: + operationId: PostV2Write summary: Write line protocol (v2-compatible) - description: > + description: | Writes line protocol to the specified database. + This endpoint provides backward compatibility for InfluxDB 2.x write workloads using tools such as InfluxDB 2.x client libraries, the Telegraf `outputs.influxdb_v2` output plugin, or third-party tools. - This endpoint provides backward compatibility for InfluxDB 2.x write workloads using tools such as InfluxDB 2.x - client libraries, the Telegraf `outputs.influxdb_v2` output plugin, or third-party tools. - - - Use this endpoint to send data in [line protocol](/influxdb3/core/reference/syntax/line-protocol/) format to - InfluxDB. - + Use this endpoint to send data in [line protocol](/influxdb3/enterprise/reference/syntax/line-protocol/) format to InfluxDB. Use query parameters to specify options for writing data. - #### Related - - - [Use compatibility APIs to write data](/influxdb3/core/write-data/http-api/compatibility-apis/) + - [Use compatibility APIs to write data](/influxdb3/enterprise/write-data/http-api/compatibility-apis/) parameters: - name: Content-Type in: header description: | The content type of the request payload. schema: - $ref: "#/components/schemas/LineProtocol" + $ref: '#/components/schemas/LineProtocol' required: false - description: | The compression applied to the line protocol in the request payload. @@ -379,618 +380,848 @@ paths: enum: - application/json type: string - - name: bucket + - name: db in: query required: true schema: type: string - description: |- + description: | A database name. InfluxDB creates the database if it doesn't already exist, and then writes all points in the batch to the database. - - This parameter is named `bucket` for compatibility with InfluxDB v2 client libraries. - name: accept_partial in: query required: false schema: - $ref: "#/components/schemas/AcceptPartial" - - $ref: "#/components/parameters/compatibilityPrecisionParam" + $ref: '#/components/schemas/AcceptPartial' + - $ref: '#/components/parameters/compatibilityPrecisionParam' requestBody: - $ref: "#/components/requestBodies/lineProtocolRequestBody" + $ref: '#/components/requestBodies/lineProtocolRequestBody' + responses: + '204': + description: Success ("No Content"). All data in the batch is written and queryable. + headers: + cluster-uuid: + $ref: '#/components/headers/ClusterUUID' + '400': + description: Bad request. + '401': + $ref: '#/components/responses/Unauthorized' + '403': + description: Access denied. + '413': + description: Request entity too large. tags: - - Compatibility endpoints + - Write data - /api/v3/configure/database: - delete: - operationId: DeleteConfigureDatabase + x-related: + - title: Use compatibility APIs to write data + href: /influxdb3/enterprise/write-data/http-api/compatibility-apis/ + /api/v3/write_lp: + post: + operationId: PostWriteLP + summary: Write line protocol + description: | + Writes line protocol to the specified database. + + This is the native InfluxDB 3 Enterprise write endpoint that provides enhanced control + over write behavior with advanced parameters for high-performance and fault-tolerant operations. + + Use this endpoint to send data in [line protocol](/influxdb3/enterprise/reference/syntax/line-protocol/) format to InfluxDB. + Use query parameters to specify options for writing data. + + #### Features + + - **Partial writes**: Use `accept_partial=true` to allow partial success when some lines in a batch fail + - **Asynchronous writes**: Use `no_sync=true` to skip waiting for WAL synchronization, allowing faster response times but sacrificing durability guarantees + - **Flexible precision**: Automatic timestamp precision detection with `precision=auto` (default) + + #### Auto precision detection + + When you use `precision=auto` or omit the precision parameter, InfluxDB 3 automatically detects + the timestamp precision based on the magnitude of the timestamp value: + + - Timestamps < 5e9 → Second precision (multiplied by 1,000,000,000 to convert to nanoseconds) + - Timestamps < 5e12 → Millisecond precision (multiplied by 1,000,000) + - Timestamps < 5e15 → Microsecond precision (multiplied by 1,000) + - Larger timestamps → Nanosecond precision (no conversion needed) + + #### Related + + - [Use the InfluxDB v3 write_lp API to write data](/influxdb3/enterprise/write-data/http-api/v3-write-lp/) parameters: - - $ref: "#/components/parameters/db" - - name: data_only + - $ref: '#/components/parameters/dbWriteParam' + - $ref: '#/components/parameters/accept_partial' + - $ref: '#/components/parameters/precisionParam' + - name: no_sync in: query - required: false schema: - type: boolean - default: false + $ref: '#/components/schemas/NoSync' + - name: Content-Type + in: header description: | - Delete only data while preserving the database schema and all associated resources - (tokens, triggers, last value caches, distinct value caches, processing engine configurations). - When `false` (default), the entire database is deleted. - - name: remove_tables - in: query - required: false + The content type of the request payload. schema: - type: boolean - default: false - description: | - Used with `data_only=true` to remove table resources (caches) while preserving - database-level resources (tokens, triggers, processing engine configurations). - Has no effect when `data_only=false`. - - name: hard_delete_at - in: query + $ref: '#/components/schemas/LineProtocol' required: false + - name: Accept + in: header + description: | + The content type that the client can understand. + Writes only return a response body if they fail (partially or completely)--for example, + due to a syntax problem or type mismatch. schema: type: string - format: date-time - description: |- - Schedule the database for hard deletion at the specified time. - If not provided, the database will be soft deleted. - Use ISO 8601 date-time format (for example, "2025-12-31T23:59:59Z"). - - #### Deleting a database cannot be undone - - Deleting a database is a destructive action. - Once a database is deleted, data stored in that database cannot be recovered. - - - Also accepts special string values: - - `now` — hard delete immediately - - `never` — soft delete only (default behavior) - - `default` — use the system default hard deletion time + default: application/json + enum: + - application/json + required: false + - $ref: '#/components/parameters/ContentEncoding' + - $ref: '#/components/parameters/ContentLength' + requestBody: + $ref: '#/components/requestBodies/lineProtocolRequestBody' responses: - "200": - description: Success. Database deleted. - "401": - $ref: "#/components/responses/Unauthorized" - "404": - description: Database not found. - summary: Delete a database - description: | - Soft deletes a database. - The database is scheduled for deletion and unavailable for querying. - Use the `hard_delete_at` parameter to schedule a hard deletion. - Use the `data_only` parameter to delete data while preserving the database schema and resources. + '204': + description: Success ("No Content"). All data in the batch is written and queryable. + headers: + cluster-uuid: + $ref: '#/components/headers/ClusterUUID' + '400': + description: Bad request. + '401': + $ref: '#/components/responses/Unauthorized' + '403': + description: Access denied. + '413': + description: Request entity too large. + '422': + description: Unprocessable entity. + x-codeSamples: + - label: cURL - Basic write + lang: Shell + source: | + curl --request POST "http://localhost:8181/api/v3/write_lp?db=sensors" \ + --header "Authorization: Bearer DATABASE_TOKEN" \ + --header "Content-Type: text/plain" \ + --data-raw "cpu,host=server01 usage=85.2 1638360000000000000" + - label: cURL - Write with millisecond precision + lang: Shell + source: | + curl --request POST "http://localhost:8181/api/v3/write_lp?db=sensors&precision=ms" \ + --header "Authorization: Bearer DATABASE_TOKEN" \ + --header "Content-Type: text/plain" \ + --data-raw "cpu,host=server01 usage=85.2 1638360000000" + - label: cURL - Asynchronous write with partial acceptance + lang: Shell + source: | + curl --request POST "http://localhost:8181/api/v3/write_lp?db=sensors&accept_partial=true&no_sync=true&precision=auto" \ + --header "Authorization: Bearer DATABASE_TOKEN" \ + --header "Content-Type: text/plain" \ + --data-raw "cpu,host=server01 usage=85.2 + memory,host=server01 used=4096" + - label: cURL - Multiple measurements with tags + lang: Shell + source: | + curl --request POST "http://localhost:8181/api/v3/write_lp?db=sensors&precision=ns" \ + --header "Authorization: Bearer DATABASE_TOKEN" \ + --header "Content-Type: text/plain" \ + --data-raw "cpu,host=server01,region=us-west usage=85.2,load=0.75 1638360000000000000 + memory,host=server01,region=us-west used=4096,free=12288 1638360000000000000 + disk,host=server01,region=us-west,device=/dev/sda1 used=50.5,free=49.5 1638360000000000000" tags: - - Database + - Write data + /api/v3/query_sql: get: - operationId: GetConfigureDatabase + operationId: GetExecuteQuerySQL + summary: Execute SQL query + description: Executes an SQL query to retrieve data from the specified database. + parameters: + - $ref: '#/components/parameters/db' + - $ref: '#/components/parameters/querySqlParam' + - $ref: '#/components/parameters/format' + - $ref: '#/components/parameters/AcceptQueryHeader' + - $ref: '#/components/parameters/ContentType' responses: - "200": - description: Success. The response body contains the list of databases. + '200': + description: Success. The response body contains query results. content: application/json: schema: - $ref: "#/components/schemas/ShowDatabasesResponse" - "400": + $ref: '#/components/schemas/QueryResponse' + example: + results: + - series: + - name: mytable + columns: + - time + - value + values: + - - '2024-02-02T12:00:00Z' + - 42 + text/csv: + schema: + type: string + application/vnd.apache.parquet: + schema: + type: string + application/jsonl: + schema: + type: string + '400': description: Bad request. - "401": - $ref: "#/components/responses/Unauthorized" - "404": + '401': + $ref: '#/components/responses/Unauthorized' + '403': + description: Access denied. + '404': description: Database not found. - summary: List databases - description: Retrieves a list of databases. - parameters: - - $ref: "#/components/parameters/formatRequired" - - name: show_deleted - in: query - required: false - schema: - type: boolean - default: false - description: | - Include soft-deleted databases in the response. - By default, only active databases are returned. + '405': + description: Method not allowed. + '422': + description: Unprocessable entity. tags: - - Database + - Query data post: - operationId: PostConfigureDatabase - responses: - "200": - description: Success. Database created. - "400": - description: Bad request. - "401": - $ref: "#/components/responses/Unauthorized" - "409": - description: Database already exists. - summary: Create a database - description: Creates a new database in the system. + operationId: PostExecuteQuerySQL + summary: Execute SQL query + description: Executes an SQL query to retrieve data from the specified database. + parameters: + - $ref: '#/components/parameters/AcceptQueryHeader' + - $ref: '#/components/parameters/ContentType' requestBody: - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/CreateDatabaseRequest" - tags: - - Database - put: - operationId: update_database + $ref: '#/components/requestBodies/queryRequestBody' responses: - "200": - description: Success. The database has been updated. - "400": + '200': + description: Success. The response body contains query results. + content: + application/json: + schema: + $ref: '#/components/schemas/QueryResponse' + text/csv: + schema: + type: string + application/vnd.apache.parquet: + schema: + type: string + application/jsonl: + schema: + type: string + '400': description: Bad request. - "401": - $ref: "#/components/responses/Unauthorized" - "404": - description: Database not found. - summary: Update a database - description: | - Updates database configuration, such as retention period. - requestBody: - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/UpdateDatabaseRequest" - tags: - - Database - /api/v3/configure/database/retention_period: - delete: - operationId: DeleteDatabaseRetentionPeriod - summary: Remove database retention period - description: | - Removes the retention period from a database, setting it to infinite retention. - parameters: - - $ref: "#/components/parameters/db" - responses: - "204": - description: Success. The database retention period has been removed. - "401": - $ref: "#/components/responses/Unauthorized" - "404": + '401': + $ref: '#/components/responses/Unauthorized' + '403': + description: Access denied. + '404': description: Database not found. + '405': + description: Method not allowed. + '422': + description: Unprocessable entity. tags: - - Database - /api/v3/configure/distinct_cache: - delete: - operationId: DeleteConfigureDistinctCache - responses: - "200": - description: Success. The distinct cache has been deleted. - "400": - description: Bad request. - "401": - $ref: "#/components/responses/Unauthorized" - "404": - description: Cache not found. - summary: Delete distinct cache - description: Deletes a distinct cache. + - Query data + /api/v3/query_influxql: + get: + operationId: GetExecuteInfluxQLQuery + summary: Execute InfluxQL query + description: Executes an InfluxQL query to retrieve data from the specified database. parameters: - - $ref: "#/components/parameters/db" - - name: table + - $ref: '#/components/parameters/dbQueryParam' + - name: q in: query required: true schema: type: string - description: The name of the table containing the distinct cache. - - name: name + - name: format in: query - required: true + required: false schema: type: string - description: The name of the distinct cache to delete. + - $ref: '#/components/parameters/AcceptQueryHeader' + responses: + '200': + description: Success. The response body contains query results. + content: + application/json: + schema: + $ref: '#/components/schemas/QueryResponse' + text/csv: + schema: + type: string + application/vnd.apache.parquet: + schema: + type: string + application/jsonl: + schema: + type: string + '400': + description: Bad request. + '401': + $ref: '#/components/responses/Unauthorized' + '403': + description: Access denied. + '404': + description: Database not found. + '405': + description: Method not allowed. + '422': + description: Unprocessable entity. tags: - - Cache data - - Table + - Query data post: - operationId: PostConfigureDistinctCache - responses: - "201": - description: Success. The distinct cache has been created. - "400": - description: > - Bad request. - - - The server responds with status `400` if the request would overwrite an existing cache with a different - configuration. - "409": - description: Conflict. A distinct cache with this configuration already exists. - summary: Create distinct cache - description: Creates a distinct cache for a table. + operationId: PostExecuteQueryInfluxQL + summary: Execute InfluxQL query + description: Executes an InfluxQL query to retrieve data from the specified database. + parameters: + - $ref: '#/components/parameters/AcceptQueryHeader' + - $ref: '#/components/parameters/ContentType' requestBody: - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/DistinctCacheCreateRequest" - tags: - - Cache data - - Table - /api/v3/configure/last_cache: - delete: - operationId: DeleteConfigureLastCache + $ref: '#/components/requestBodies/queryRequestBody' responses: - "200": - description: Success. The last cache has been deleted. - "400": + '200': + description: Success. The response body contains query results. + content: + application/json: + schema: + $ref: '#/components/schemas/QueryResponse' + text/csv: + schema: + type: string + application/vnd.apache.parquet: + schema: + type: string + application/jsonl: + schema: + type: string + '400': description: Bad request. - "401": - $ref: "#/components/responses/Unauthorized" - "404": - description: Cache not found. - summary: Delete last cache - description: Deletes a last cache. + '401': + $ref: '#/components/responses/Unauthorized' + '403': + description: Access denied. + '404': + description: Database not found. + '405': + description: Method not allowed. + '422': + description: Unprocessable entity. + tags: + - Query data + /query: + get: + operationId: GetV1ExecuteQuery + summary: Execute InfluxQL query (v1-compatible) + description: | + Executes an InfluxQL query to retrieve data from the specified database. + + This endpoint is compatible with InfluxDB 1.x client libraries and third-party integrations such as Grafana. + Use query parameters to specify the database and the InfluxQL query. + + #### Related + + - [Use the InfluxDB v1 HTTP query API and InfluxQL to query data](/influxdb3/enterprise/query-data/execute-queries/influxdb-v1-api/) parameters: - - $ref: "#/components/parameters/db" - - name: table - in: query - required: true - schema: - type: string - description: The name of the table containing the last cache. - - name: name - in: query - required: true + - name: Accept + in: header schema: type: string - description: The name of the last cache to delete. - tags: - - Cache data - - Table - post: - operationId: PostConfigureLastCache - responses: - "201": - description: Success. Last cache created. - "400": - description: Bad request. A cache with this name already exists or the request is malformed. - "401": - $ref: "#/components/responses/Unauthorized" - "404": - description: Cache not found. - summary: Create last cache - description: Creates a last cache for a table. - requestBody: - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/LastCacheCreateRequest" - tags: - - Cache data - - Table - /api/v3/configure/plugin_environment/install_packages: - post: - operationId: PostInstallPluginPackages - summary: Install plugin packages - description: |- - Installs the specified Python packages into the processing engine plugin environment. + default: application/json + enum: + - application/json + - application/csv + - text/csv + required: false + description: | + The content type that the client can understand. - This endpoint is synchronous and blocks until the packages are installed. - parameters: - - $ref: "#/components/parameters/ContentType" - requestBody: - required: true - content: - application/json: - schema: - type: object - properties: - packages: - type: array - items: - type: string - description: | - A list of Python package names to install. - Can include version specifiers (e.g., "scipy==1.9.0"). - example: - - influxdb3-python - - scipy - - pandas==1.5.0 - - requests - required: - - packages - example: - packages: - - influxdb3-python - - scipy - - pandas==1.5.0 - - requests + If `text/csv` is specified, the `Content-type` response header is `application/csv` and the response is formatted as CSV. + + Returns an error if the format is invalid or non-UTF8. + - in: query + name: chunked + description: | + If true, the response is divided into chunks of size `chunk_size`. + schema: + type: boolean + default: false + - in: query + name: chunk_size + description: | + The number of records that will go into a chunk. + This parameter is only used if `chunked=true`. + schema: + type: integer + default: 10000 + - in: query + name: db + description: The database to query. If not provided, the InfluxQL query string must specify the database. + schema: + type: string + format: InfluxQL + - in: query + name: pretty + description: | + If true, the JSON response is formatted in a human-readable format. + schema: + type: boolean + default: false + - in: query + name: q + description: The InfluxQL query string. + required: true + schema: + type: string + - name: epoch + description: | + Formats timestamps as [unix (epoch) timestamps](/influxdb3/enterprise/reference/glossary/#unix-timestamp) with the specified precision + instead of [RFC3339 timestamps](/influxdb3/enterprise/reference/glossary/#rfc3339-timestamp) with nanosecond precision. + in: query + schema: + $ref: '#/components/schemas/EpochCompatibility' + - $ref: '#/components/parameters/v1UsernameParam' + - $ref: '#/components/parameters/v1PasswordParam' + - name: rp + in: query + required: false + schema: + type: string + description: | + Retention policy name. Honored but discouraged. InfluxDB 3 doesn't use retention policies. + - name: Authorization + in: header + required: false + schema: + type: string + description: | + Authorization header for token-based authentication. + Supported schemes: + - `Bearer AUTH_TOKEN` - OAuth bearer token scheme + - `Token AUTH_TOKEN` - InfluxDB v2 token scheme + - `Basic ` - Basic authentication (username is ignored) responses: - "200": - description: Success. The packages are installed. - "400": + '200': + description: | + Success. The response body contains query results. + content: + application/json: + schema: + $ref: '#/components/schemas/QueryResponse' + application/csv: + schema: + type: string + headers: + Content-Type: + description: | + The content type of the response. + Default is `application/json`. + + If the `Accept` request header is `application/csv` or `text/csv`, the `Content-type` response header is `application/csv` + and the response is formatted as CSV. + schema: + type: string + default: application/json + enum: + - application/json + - application/csv + '400': description: Bad request. - "401": - $ref: "#/components/responses/Unauthorized" + '401': + $ref: '#/components/responses/Unauthorized' + '403': + description: Access denied. + '404': + description: Database not found. + '405': + description: Method not allowed. + '422': + description: Unprocessable entity. tags: - - Processing engine - /api/v3/configure/plugin_environment/install_requirements: + - Query data + + x-related: + - title: Use the InfluxDB v1 HTTP query API and InfluxQL to query data + href: /influxdb3/enterprise/query-data/execute-queries/influxdb-v1-api/ post: - operationId: PostInstallPluginRequirements - summary: Install plugin requirements - description: > - Installs requirements from a requirements file (also known as a "pip requirements file") into the processing - engine plugin environment. - - - This endpoint is synchronous and blocks until the requirements are installed. - - - ### Related - + operationId: PostExecuteV1Query + summary: Execute InfluxQL query (v1-compatible) + description: | + Executes an InfluxQL query to retrieve data from the specified database. - - [Processing engine and Python plugins](/influxdb3/core/plugins/) + #### Related - - [Python requirements file format](https://pip.pypa.io/en/stable/reference/requirements-file-format/) - parameters: - - $ref: "#/components/parameters/ContentType" + - [Use the InfluxDB v1 HTTP query API and InfluxQL to query data](/influxdb3/enterprise/query-data/execute-queries/influxdb-v1-api/) requestBody: - required: true content: application/json: schema: type: object properties: - requirements_location: + db: + type: string + description: The database to query. If not provided, the InfluxQL query string must specify the database. + q: + description: The InfluxQL query string. type: string + chunked: description: | - The path to the requirements file containing Python packages to install. - Can be a relative path (relative to the plugin directory) or an absolute path. - example: requirements.txt + If true, the response is divided into chunks of size `chunk_size`. + type: boolean + chunk_size: + description: | + The number of records that will go into a chunk. + This parameter is only used if `chunked=true`. + type: integer + default: 10000 + epoch: + description: | + A unix timestamp precision. + + - `h` for hours + - `m` for minutes + - `s` for seconds + - `ms` for milliseconds + - `u` or `µ` for microseconds + - `ns` for nanoseconds + + Formats timestamps as [unix (epoch) timestamps](/influxdb3/enterprise/reference/glossary/#unix-timestamp) with the specified precision + instead of [RFC3339 timestamps](/influxdb3/enterprise/reference/glossary/#rfc3339-timestamp) with nanosecond precision. + enum: + - ns + - u + - µ + - ms + - s + - m + - h + type: string + pretty: + description: | + If true, the JSON response is formatted in a human-readable format. + type: boolean required: - - requirements_location - example: - requirements_location: requirements.txt + - q + parameters: + - name: Accept + in: header + schema: + type: string + default: application/json + enum: + - application/json + - application/csv + - text/csv + required: false + description: | + The content type that the client can understand. + + If `text/csv` is specified, the `Content-type` response header is `application/csv` and the response is formatted as CSV. + + Returns an error if the format is invalid or non-UTF8. responses: - "200": - description: Success. The requirements have been installed. - "400": + '200': + description: | + Success. The response body contains query results. + content: + application/json: + schema: + $ref: '#/components/schemas/QueryResponse' + application/csv: + schema: + type: string + headers: + Content-Type: + description: | + The content type of the response. + Default is `application/json`. + + If the `Accept` request header is `application/csv` or `text/csv`, the `Content-type` response header is `application/csv` + and the response is formatted as CSV. + schema: + type: string + default: application/json + enum: + - application/json + - application/csv + '400': description: Bad request. - "401": - $ref: "#/components/responses/Unauthorized" + '401': + $ref: '#/components/responses/Unauthorized' + '403': + description: Access denied. + '404': + description: Database not found. + '405': + description: Method not allowed. + '422': + description: Unprocessable entity. tags: - - Processing engine - /api/v3/configure/processing_engine_trigger: - post: - operationId: PostConfigureProcessingEngineTrigger - summary: Create processing engine trigger - description: Creates a processing engine trigger with the specified plugin file and trigger specification. - requestBody: + - Query data + + x-related: + - title: Use the InfluxDB v1 HTTP query API and InfluxQL to query data + href: /influxdb3/enterprise/query-data/execute-queries/influxdb-v1-api/ + /health: + get: + operationId: GetHealth + summary: Health check + description: | + Checks the status of the service. + + Returns `OK` if the service is running. This endpoint does not return version information. + Use the `/ping` endpoint to retrieve version details. + + > **Note**: This endpoint requires authentication by default in InfluxDB 3 Enterprise. + responses: + '200': + description: Service is running. Returns `OK`. + content: + text/plain: + schema: + type: string + example: OK + '401': + description: Unauthorized. Authentication is required. + '500': + description: Service is unavailable. + tags: + - Server information + /api/v1/health: + get: + operationId: GetHealthV1 + summary: Health check (v1) + description: Checks the status of the service. + responses: + '200': + description: Service is running. + '500': + description: Service is unavailable. + tags: + - Server information + + /ping: + get: + operationId: GetPing + tags: + - Server information + summary: Ping the server + description: | + Returns version information for the server. + + **Important**: Use a GET request. HEAD requests return `404 Not Found`. + + The response includes version information in both headers and the JSON body: + + - **Headers**: `x-influxdb-version` and `x-influxdb-build` + - **Body**: JSON object with `version`, `revision`, and `process_id` + + > **Note**: This endpoint requires authentication by default in InfluxDB 3 Enterprise. + responses: + '200': + description: Success. The response body contains server information. + headers: + x-influxdb-version: + description: The InfluxDB version number (for example, `3.8.0`). + schema: + type: string + example: '3.8.0' + x-influxdb-build: + description: The InfluxDB build type (`Core` or `Enterprise`). + schema: + type: string + example: Enterprise + content: + application/json: + schema: + type: object + properties: + version: + type: string + description: The InfluxDB version number. + example: '3.8.0' + revision: + type: string + description: The git revision hash for the build. + example: '83b589b883' + process_id: + type: string + description: A unique identifier for the server process. + example: 'b756d9e0-cecd-4f72-b6d0-19e2d4f8cbb7' + '401': + description: Unauthorized. Authentication is required. + '404': + description: | + Not Found. Returned for HEAD requests. + Use a GET request to retrieve version information. + /metrics: + get: + operationId: GetMetrics + summary: Metrics + description: Retrieves Prometheus-compatible server metrics. + responses: + '200': + description: Success. The response body contains Prometheus-compatible server metrics. + tags: + - Server information + /api/v3/configure/database: + get: + operationId: GetConfigureDatabase + summary: List databases + description: Retrieves a list of databases. + parameters: + - $ref: '#/components/parameters/formatRequired' + - name: show_deleted + in: query + required: false + schema: + type: boolean + default: false + description: | + Include soft-deleted databases in the response. + By default, only active databases are returned. + responses: + '200': + description: Success. The response body contains the list of databases. + content: + application/json: + schema: + $ref: '#/components/schemas/ShowDatabasesResponse' + '400': + description: Bad request. + '401': + $ref: '#/components/responses/Unauthorized' + '404': + description: Database not found. + tags: + - Database + post: + operationId: PostConfigureDatabase + summary: Create a database + description: Creates a new database in the system. + requestBody: required: true content: application/json: schema: - $ref: "#/components/schemas/ProcessingEngineTriggerRequest" - examples: - schedule_cron: - summary: Schedule trigger using cron - description: > - In `"cron:CRON_EXPRESSION"`, `CRON_EXPRESSION` uses extended 6-field cron format. - - The cron expression `0 0 6 * * 1-5` means the trigger will run at 6:00 AM every weekday (Monday to - Friday). - value: - db: DATABASE_NAME - plugin_filename: schedule.py - trigger_name: schedule_cron_trigger - trigger_specification: cron:0 0 6 * * 1-5 - disabled: false - trigger_settings: - run_async: false - error_behavior: Log - schedule_every: - summary: Schedule trigger using interval - description: | - In `"every:DURATION"`, `DURATION` specifies the interval between trigger executions. - The duration `1h` means the trigger will run every hour. - value: - db: mydb - plugin_filename: schedule.py - trigger_name: schedule_every_trigger - trigger_specification: every:1h - disabled: false - trigger_settings: - run_async: false - error_behavior: Log - schedule_every_seconds: - summary: Schedule trigger using seconds interval - description: | - Example of scheduling a trigger to run every 30 seconds. - value: - db: mydb - plugin_filename: schedule.py - trigger_name: schedule_every_30s_trigger - trigger_specification: every:30s - disabled: false - trigger_settings: - run_async: false - error_behavior: Log - schedule_every_minutes: - summary: Schedule trigger using minutes interval - description: | - Example of scheduling a trigger to run every 5 minutes. - value: - db: mydb - plugin_filename: schedule.py - trigger_name: schedule_every_5m_trigger - trigger_specification: every:5m - disabled: false - trigger_settings: - run_async: false - error_behavior: Log - all_tables: - summary: All tables trigger example - description: | - Trigger that fires on write events to any table in the database. - value: - db: mydb - plugin_filename: all_tables.py - trigger_name: all_tables_trigger - trigger_specification: all_tables - disabled: false - trigger_settings: - run_async: false - error_behavior: Log - table_specific: - summary: Table-specific trigger example - description: | - Trigger that fires on write events to a specific table. - value: - db: mydb - plugin_filename: table.py - trigger_name: table_trigger - trigger_specification: table:sensors - disabled: false - trigger_settings: - run_async: false - error_behavior: Log - api_request: - summary: On-demand request trigger example - description: | - Creates an HTTP endpoint `/api/v3/engine/hello-world` for manual invocation. - value: - db: mydb - plugin_filename: request.py - trigger_name: hello_world_trigger - trigger_specification: request:hello-world - disabled: false - trigger_settings: - run_async: false - error_behavior: Log - cron_friday_afternoon: - summary: Cron trigger for Friday afternoons - description: | - Example of a cron trigger that runs every Friday at 2:30 PM. - value: - db: reports - plugin_filename: weekly_report.py - trigger_name: friday_report_trigger - trigger_specification: cron:0 30 14 * * 5 - disabled: false - trigger_settings: - run_async: false - error_behavior: Log - cron_monthly: - summary: Cron trigger for monthly execution - description: | - Example of a cron trigger that runs on the first day of every month at midnight. - value: - db: monthly_data - plugin_filename: monthly_cleanup.py - trigger_name: monthly_cleanup_trigger - trigger_specification: cron:0 0 0 1 * * - disabled: false - trigger_settings: - run_async: false - error_behavior: Log + $ref: '#/components/schemas/CreateDatabaseRequest' responses: - "200": - description: Success. Processing engine trigger created. - "400": + '201': + description: Success. Database created. + '400': description: Bad request. - "401": - $ref: "#/components/responses/Unauthorized" - "404": - description: Trigger not found. + '401': + $ref: '#/components/responses/Unauthorized' + '409': + description: Database already exists. tags: - - Processing engine + - Database delete: - operationId: DeleteConfigureProcessingEngineTrigger - summary: Delete processing engine trigger - description: Deletes a processing engine trigger. + operationId: DeleteConfigureDatabase + summary: Delete a database + description: | + Soft deletes a database. + The database is scheduled for deletion and unavailable for querying. + Use the `hard_delete_at` parameter to schedule a hard deletion. + Use the `data_only` parameter to delete data while preserving the database schema and resources. parameters: - - $ref: "#/components/parameters/db" - - name: trigger_name - in: query - required: true - schema: - type: string - - name: force + - $ref: '#/components/parameters/db' + - name: data_only in: query required: false schema: type: boolean default: false description: | - Force deletion of the trigger even if it has active executions. - By default, deletion fails if the trigger is currently executing. - responses: - "200": - description: Success. The processing engine trigger has been deleted. - "400": - description: Bad request. - "401": - $ref: "#/components/responses/Unauthorized" - "404": - description: Trigger not found. - tags: - - Processing engine - /api/v3/configure/processing_engine_trigger/disable: - post: - operationId: PostDisableProcessingEngineTrigger - summary: Disable processing engine trigger - description: Disables a processing engine trigger. - parameters: - - name: db + Delete only data while preserving the database schema and all associated resources + (tokens, triggers, last value caches, distinct value caches, processing engine configurations). + When `false` (default), the entire database is deleted. + - name: remove_tables in: query - required: true + required: false schema: - type: string - description: The database name. - - name: trigger_name + type: boolean + default: false + description: | + Used with `data_only=true` to remove table resources (caches) while preserving + database-level resources (tokens, triggers, processing engine configurations). + Has no effect when `data_only=false`. + - name: hard_delete_at in: query - required: true + required: false schema: type: string - description: The name of the trigger. + format: date-time + description: | + Schedule the database for hard deletion at the specified time. + If not provided, the database will be soft deleted. + Use ISO 8601 date-time format (for example, "2025-12-31T23:59:59Z"). + + #### Deleting a database cannot be undone + + Deleting a database is a destructive action. + Once a database is deleted, data stored in that database cannot be recovered. responses: - "200": - description: Success. The processing engine trigger has been disabled. - "400": - description: Bad request. - "401": - $ref: "#/components/responses/Unauthorized" - "404": - description: Trigger not found. + '200': + description: Success. Database deleted. + '401': + $ref: '#/components/responses/Unauthorized' + '404': + description: Database not found. tags: - - Processing engine - /api/v3/configure/processing_engine_trigger/enable: - post: - operationId: PostEnableProcessingEngineTrigger - summary: Enable processing engine trigger - description: Enables a processing engine trigger. + - Database + /api/v3/configure/database/retention_period: + delete: + operationId: DeleteDatabaseRetentionPeriod + summary: Remove database retention period + description: | + Removes the retention period from a database, setting it to infinite retention. parameters: - - name: db - in: query - required: true - schema: - type: string - description: The database name. - - name: trigger_name - in: query - required: true - schema: - type: string - description: The name of the trigger. + - $ref: '#/components/parameters/db' responses: - "200": - description: Success. The processing engine trigger is enabled. - "400": - description: Bad request. - "401": - $ref: "#/components/responses/Unauthorized" - "404": - description: Trigger not found. + '204': + description: Success. The database retention period has been removed. + '401': + $ref: '#/components/responses/Unauthorized' + '404': + description: Database not found. tags: - - Processing engine + - Database /api/v3/configure/table: + post: + operationId: PostConfigureTable + summary: Create a table + description: Creates a new table within a database. + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateTableRequest' + responses: + '201': + description: Success. The table has been created. + '400': + description: Bad request. + '401': + $ref: '#/components/responses/Unauthorized' + '404': + description: Database not found. + tags: + - Table delete: operationId: DeleteConfigureTable + summary: Delete a table + description: | + Soft deletes a table. + The table is scheduled for deletion and unavailable for querying. + Use the `hard_delete_at` parameter to schedule a hard deletion. + Use the `data_only` parameter to delete data while preserving the table schema and resources. + + #### Deleting a table cannot be undone + + Deleting a table is a destructive action. + Once a table is deleted, data stored in that table cannot be recovered. parameters: - - $ref: "#/components/parameters/db" + - $ref: '#/components/parameters/db' - name: table in: query required: true @@ -1012,1315 +1243,804 @@ paths: schema: type: string format: date-time - description: |- + description: | Schedule the table for hard deletion at the specified time. If not provided, the table will be soft deleted. Use ISO 8601 format (for example, "2025-12-31T23:59:59Z"). - - - Also accepts special string values: - - `now` — hard delete immediately - - `never` — soft delete only (default behavior) - - `default` — use the system default hard deletion time responses: - "200": + '200': description: Success (no content). The table has been deleted. - "401": - $ref: "#/components/responses/Unauthorized" - "404": + '401': + $ref: '#/components/responses/Unauthorized' + '404': description: Table not found. - summary: Delete a table - description: | - Soft deletes a table. - The table is scheduled for deletion and unavailable for querying. - Use the `hard_delete_at` parameter to schedule a hard deletion. - Use the `data_only` parameter to delete data while preserving the table schema and resources. - - #### Deleting a table cannot be undone - - Deleting a table is a destructive action. - Once a table is deleted, data stored in that table cannot be recovered. tags: - Table - post: - operationId: PostConfigureTable - responses: - "200": - description: Success. The table has been created. - "400": - description: Bad request. - "401": - $ref: "#/components/responses/Unauthorized" - "404": - description: Database not found. - summary: Create a table - description: Creates a new table within a database. + patch: + operationId: PatchConfigureTable + summary: Update a table + description: | + Updates table configuration, such as retention period. requestBody: required: true content: application/json: schema: - $ref: "#/components/schemas/CreateTableRequest" + $ref: '#/components/schemas/UpdateTableRequest' + responses: + '200': + description: Success. The table has been updated. + '400': + description: Bad request. + '401': + $ref: '#/components/responses/Unauthorized' + '404': + description: Table not found. tags: - Table - /api/v3/configure/token: - delete: - operationId: DeleteToken + /api/v3/configure/database/{db}: + patch: + operationId: PatchConfigureDatabase + summary: Update a database + description: | + Updates database configuration, such as retention period. parameters: - - name: token_name - in: query + - name: db + in: path required: true schema: type: string - description: The name of the token to delete. - responses: - "200": - description: Success. The token has been deleted. - "401": - $ref: "#/components/responses/Unauthorized" - "404": - description: Token not found. - summary: Delete token - description: | - Deletes a token. - tags: - - Authentication - - Token - /api/v3/configure/token/admin: - post: - operationId: PostCreateAdminToken + description: The name of the database to update. + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/UpdateDatabaseRequest' responses: - "201": - description: | - Success. The admin token has been created. - The response body contains the token string and metadata. - content: - application/json: - schema: - $ref: "#/components/schemas/AdminTokenObject" - "401": - $ref: "#/components/responses/Unauthorized" - summary: Create admin token - description: | - Creates an admin token. - An admin token is a special type of token that has full access to all resources in the system. + '200': + description: Success. The database has been updated. + '400': + description: Bad request. + '401': + $ref: '#/components/responses/Unauthorized' + '404': + description: Database not found. tags: - - Authentication - - Token - /api/v3/configure/token/admin/regenerate: - post: - operationId: PostRegenerateAdminToken - summary: Regenerate admin token + - Database + /api/v3/show/license: + get: + operationId: GetShowLicense + summary: Show license information description: | - Regenerates an admin token and revokes the previous token with the same name. - parameters: [] + Retrieves information about the current InfluxDB 3 Enterprise license. responses: - "201": - description: Success. The admin token has been regenerated. + '200': + description: Success. The response body contains license information. content: application/json: schema: - $ref: "#/components/schemas/AdminTokenObject" - "401": - $ref: "#/components/responses/Unauthorized" + $ref: '#/components/schemas/LicenseResponse' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + description: Access denied. tags: - - Authentication - - Token - /api/v3/configure/token/named_admin: + - Server information + /api/v3/configure/distinct_cache: post: - operationId: PostCreateNamedAdminToken - responses: - "201": - description: | - Success. The named admin token has been created. - The response body contains the token string and metadata. - content: - application/json: - schema: - $ref: "#/components/schemas/AdminTokenObject" - "401": - $ref: "#/components/responses/Unauthorized" - "409": - description: A token with this name already exists. - summary: Create named admin token - description: | - Creates a named admin token. - A named admin token is a special type of admin token with a custom name for identification and management. + operationId: PostConfigureDistinctCache + summary: Create distinct cache + description: Creates a distinct cache for a table. tags: - - Authentication - - Token + - Cache data + - Table requestBody: required: true content: application/json: schema: - type: object - properties: - token_name: - type: string - description: The name for the admin token. - expiry_secs: - type: integer - description: Optional expiration time in seconds. If not provided, the token does not expire. - nullable: true - required: - - token_name - /api/v3/engine/{request_path}: - get: - operationId: GetProcessingEnginePluginRequest + $ref: '#/components/schemas/DistinctCacheCreateRequest' responses: - "200": - description: Success. The plugin request has been executed. - "400": - description: Malformed request. - "401": - $ref: "#/components/responses/Unauthorized" - "404": - description: Plugin not found. - "500": - description: Processing failure. - summary: On Request processing engine plugin request - description: > - Executes the On Request processing engine plugin specified in the trigger's `plugin_filename`. - - The request can include request headers, query string parameters, and a request body, which InfluxDB passes to - the plugin. - - - An On Request plugin implements the following signature: - - - ```python - - def process_request(influxdb3_local, query_parameters, request_headers, request_body, args=None) - - ``` - + '201': + description: Success. The distinct cache has been created. + '204': + description: Not created. A distinct cache with this configuration already exists. + '400': + description: | + Bad request. - The response depends on the plugin implementation. - tags: - - Processing engine - post: - operationId: PostProcessingEnginePluginRequest + The server responds with status `400` if the request would overwrite an existing cache with a different configuration. + delete: + operationId: DeleteConfigureDistinctCache + summary: Delete distinct cache + description: Deletes a distinct cache. + parameters: + - $ref: '#/components/parameters/db' + - name: table + in: query + required: true + schema: + type: string + description: The name of the table containing the distinct cache. + - name: name + in: query + required: true + schema: + type: string + description: The name of the distinct cache to delete. responses: - "200": - description: Success. The plugin request has been executed. - "400": - description: Malformed request. - "401": - $ref: "#/components/responses/Unauthorized" - "404": - description: Plugin not found. - "500": - description: Processing failure. - summary: On Request processing engine plugin request - description: > - Executes the On Request processing engine plugin specified in the trigger's `plugin_filename`. - - The request can include request headers, query string parameters, and a request body, which InfluxDB passes to - the plugin. - - - An On Request plugin implements the following signature: - - - ```python - - def process_request(influxdb3_local, query_parameters, request_headers, request_body, args=None) - - ``` - - - The response depends on the plugin implementation. - parameters: - - $ref: "#/components/parameters/ContentType" - requestBody: - required: false - content: - application/json: - schema: - type: object - additionalProperties: true - tags: - - Processing engine - parameters: - - name: request_path - description: | - The path configured in the request trigger specification for the plugin. - - For example, if you define a trigger with the following: - - ```json - trigger_specification: "request:hello-world" - ``` - - then, the HTTP API exposes the following plugin endpoint: - - ``` - /api/v3/engine/hello-world - ``` - in: path - required: true - schema: - type: string - /api/v3/plugin_test/schedule: - post: - operationId: PostTestSchedulingPlugin - responses: - "200": - description: Success. The plugin test has been executed. - "400": - description: Bad request. - "401": - $ref: "#/components/responses/Unauthorized" - "404": - description: Plugin not enabled. - summary: Test scheduling plugin - description: Executes a test of a scheduling plugin. - requestBody: - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/SchedulePluginTestRequest" - tags: - - Processing engine - /api/v3/plugin_test/wal: - post: - operationId: PostTestWALPlugin - responses: - "200": - description: Success. The plugin test has been executed. - "400": + '200': + description: Success. The distinct cache has been deleted. + '400': description: Bad request. - "401": - $ref: "#/components/responses/Unauthorized" - "404": - description: Plugin not enabled. - summary: Test WAL plugin - description: Executes a test of a write-ahead logging (WAL) plugin. - requestBody: - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/WALPluginTestRequest" - tags: - - Processing engine - /api/v3/plugins/directory: - put: - operationId: PutPluginDirectory - requestBody: - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/PluginDirectoryRequest" - responses: - "200": - description: Success. The plugin directory has been updated. - "401": - $ref: "#/components/responses/Unauthorized" - "403": - description: Forbidden. Admin token required. - "500": - description: Plugin not found. The `plugin_name` does not match any registered trigger. - summary: Update a multi-file plugin directory - description: | - Replaces all files in a multi-file plugin directory. The - `plugin_name` must match a registered trigger name. Each entry in - the `files` array specifies a `relative_path` and `content`—the - server writes them into the trigger's plugin directory. - - Use this endpoint to update multi-file plugins (directories with - `__init__.py` and supporting modules). For single-file plugins, - use `PUT /api/v3/plugins/files` instead. + '401': + $ref: '#/components/responses/Unauthorized' + '404': + description: Cache not found. tags: - - Processing engine - x-security-note: Requires an admin token - /api/v3/plugins/files: + - Cache data + - Table + /api/v3/configure/last_cache: post: - operationId: create_plugin_file - summary: Create a plugin file - description: | - Creates a single plugin file in the plugin directory. Writes the - `content` to a file named after `plugin_name`. Does not require an - existing trigger—use this to upload plugin files before creating - triggers that reference them. - requestBody: - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/PluginFileRequest" - responses: - "200": - description: Success. The plugin file has been created. - "401": - $ref: "#/components/responses/Unauthorized" - "403": - description: Forbidden. Admin token required. - tags: - - Processing engine - x-security-note: Requires an admin token - put: - operationId: PutPluginFile - summary: Update a plugin file - description: | - Updates a single plugin file for an existing trigger. The - `plugin_name` must match a registered trigger name—the server - resolves the trigger's `plugin_filename` and overwrites that file - with the provided `content`. - - To upload a new plugin file before creating a trigger, use - `POST /api/v3/plugins/files` instead. To update a multi-file - plugin directory, use `PUT /api/v3/plugins/directory`. + operationId: PostConfigureLastCache + summary: Create last cache + description: Creates a last cache for a table. requestBody: required: true content: application/json: schema: - $ref: "#/components/schemas/PluginFileRequest" - responses: - "200": - description: Success. The plugin file has been updated. - "401": - $ref: "#/components/responses/Unauthorized" - "403": - description: Forbidden. Admin token required. - "500": - description: Plugin not found. The `plugin_name` does not match any registered trigger. - tags: - - Processing engine - x-security-note: Requires an admin token - /api/v3/query_influxql: - get: - operationId: GetExecuteInfluxQLQuery + $ref: '#/components/schemas/LastCacheCreateRequest' responses: - "200": - description: Success. The response body contains query results. - content: - application/json: - schema: - $ref: "#/components/schemas/QueryResponse" - text/csv: - schema: - type: string - application/vnd.apache.parquet: - schema: - type: string - application/jsonl: - schema: - type: string - "400": + '201': + description: Success. Last cache created. + '400': description: Bad request. - "401": - $ref: "#/components/responses/Unauthorized" - "403": - description: Access denied. - "404": - description: Database not found. - "405": - description: Method not allowed. - "422": - description: Unprocessable entity. - summary: Execute InfluxQL query - description: Executes an InfluxQL query to retrieve data from the specified database. + '401': + $ref: '#/components/responses/Unauthorized' + '404': + description: Cache not found. + '409': + description: Cache already exists. + tags: + - Cache data + - Table + delete: + operationId: DeleteConfigureLastCache + summary: Delete last cache + description: Deletes a last cache. parameters: - - $ref: "#/components/parameters/dbQueryParam" - - name: q + - $ref: '#/components/parameters/db' + - name: table in: query required: true schema: type: string - - name: format - in: query - required: false - schema: - type: string - - $ref: "#/components/parameters/AcceptQueryHeader" - - name: params + description: The name of the table containing the last cache. + - name: name in: query - required: false + required: true schema: type: string - description: JSON-encoded query parameters. Use this to pass bind parameters to parameterized queries. - description: JSON-encoded query parameters for parameterized queries. - tags: - - Query data - post: - operationId: PostExecuteQueryInfluxQL - responses: - "200": - description: Success. The response body contains query results. - content: - application/json: - schema: - $ref: "#/components/schemas/QueryResponse" - text/csv: - schema: - type: string - application/vnd.apache.parquet: - schema: - type: string - application/jsonl: - schema: - type: string - "400": - description: Bad request. - "401": - $ref: "#/components/responses/Unauthorized" - "403": - description: Access denied. - "404": - description: Database not found. - "405": - description: Method not allowed. - "422": - description: Unprocessable entity. - summary: Execute InfluxQL query - description: Executes an InfluxQL query to retrieve data from the specified database. - parameters: - - $ref: "#/components/parameters/AcceptQueryHeader" - - $ref: "#/components/parameters/ContentType" - requestBody: - $ref: "#/components/requestBodies/queryRequestBody" - tags: - - Query data - /api/v3/query_sql: - get: - operationId: GetExecuteQuerySQL + description: The name of the last cache to delete. responses: - "200": - description: Success. The response body contains query results. - content: - application/json: - schema: - $ref: "#/components/schemas/QueryResponse" - example: - results: - - series: - - name: mytable - columns: - - time - - value - values: - - - "2024-02-02T12:00:00Z" - - 42 - text/csv: - schema: - type: string - application/vnd.apache.parquet: - schema: - type: string - application/jsonl: - schema: - type: string - "400": + '200': + description: Success. The last cache has been deleted. + '400': description: Bad request. - "401": - $ref: "#/components/responses/Unauthorized" - "403": - description: Access denied. - "404": - description: Database not found. - "405": - description: Method not allowed. - "422": - description: Unprocessable entity. - summary: Execute SQL query - description: Executes an SQL query to retrieve data from the specified database. - parameters: - - $ref: "#/components/parameters/db" - - $ref: "#/components/parameters/querySqlParam" - - $ref: "#/components/parameters/format" - - $ref: "#/components/parameters/AcceptQueryHeader" - - $ref: "#/components/parameters/ContentType" - - name: params - in: query - required: false - schema: - type: string - description: JSON-encoded query parameters. Use this to pass bind parameters to parameterized queries. - description: JSON-encoded query parameters for parameterized queries. + '401': + $ref: '#/components/responses/Unauthorized' + '404': + description: Cache not found. tags: - - Query data + - Cache data + - Table + /api/v3/configure/processing_engine_trigger: post: - operationId: PostExecuteQuerySQL - responses: - "200": - description: Success. The response body contains query results. - content: - application/json: - schema: - $ref: "#/components/schemas/QueryResponse" - text/csv: - schema: - type: string - application/vnd.apache.parquet: - schema: - type: string - application/jsonl: - schema: - type: string - "400": - description: Bad request. - "401": - $ref: "#/components/responses/Unauthorized" - "403": - description: Access denied. - "404": - description: Database not found. - "405": - description: Method not allowed. - "422": - description: Unprocessable entity. - summary: Execute SQL query - description: Executes an SQL query to retrieve data from the specified database. - parameters: - - $ref: "#/components/parameters/AcceptQueryHeader" - - $ref: "#/components/parameters/ContentType" - requestBody: - $ref: "#/components/requestBodies/queryRequestBody" - tags: - - Query data - /api/v3/write_lp: - post: - operationId: PostWriteLP - parameters: - - $ref: "#/components/parameters/dbWriteParam" - - $ref: "#/components/parameters/accept_partial" - - $ref: "#/components/parameters/precisionParam" - - name: no_sync - in: query - schema: - $ref: "#/components/schemas/NoSync" - - name: Content-Type - in: header - description: | - The content type of the request payload. - schema: - $ref: "#/components/schemas/LineProtocol" - required: false - - name: Accept - in: header - description: | - The content type that the client can understand. - Writes only return a response body if they fail (partially or completely)--for example, - due to a syntax problem or type mismatch. - schema: - type: string - default: application/json - enum: - - application/json - required: false - - $ref: "#/components/parameters/ContentEncoding" - - $ref: "#/components/parameters/ContentLength" - responses: - "204": - description: Success ("No Content"). All data in the batch is written and queryable. - headers: - cluster-uuid: - $ref: "#/components/headers/ClusterUUID" - "400": - description: Bad request. - "401": - $ref: "#/components/responses/Unauthorized" - "403": - description: Access denied. - "413": - description: Request entity too large. - "422": - description: Unprocessable entity. - summary: Write line protocol - description: > - Writes line protocol to the specified database. - - - This is the native InfluxDB 3 Core write endpoint that provides enhanced control - - over write behavior with advanced parameters for high-performance and fault-tolerant operations. - - - Use this endpoint to send data in [line protocol](/influxdb3/core/reference/syntax/line-protocol/) format to - InfluxDB. - - Use query parameters to specify options for writing data. - - - #### Features - - - - **Partial writes**: Use `accept_partial=true` to allow partial success when some lines in a batch fail - - - **Asynchronous writes**: Use `no_sync=true` to skip waiting for WAL synchronization, allowing faster response - times but sacrificing durability guarantees - - - **Flexible precision**: Automatic timestamp precision detection with `precision=auto` (default) - - - #### Auto precision detection - - - When you use `precision=auto` or omit the precision parameter, InfluxDB 3 automatically detects - - the timestamp precision based on the magnitude of the timestamp value: - - - - Timestamps < 5e9 → Second precision (multiplied by 1,000,000,000 to convert to nanoseconds) - - - Timestamps < 5e12 → Millisecond precision (multiplied by 1,000,000) - - - Timestamps < 5e15 → Microsecond precision (multiplied by 1,000) - - - Larger timestamps → Nanosecond precision (no conversion needed) - - - #### Related - - - - [Use the InfluxDB v3 write_lp API to write data](/influxdb3/core/write-data/http-api/v3-write-lp/) - requestBody: - $ref: "#/components/requestBodies/lineProtocolRequestBody" - tags: - - Write data - x-codeSamples: - - label: cURL - Basic write - lang: Shell - source: | - curl --request POST "http://localhost:8181/api/v3/write_lp?db=sensors" \ - --header "Authorization: Bearer DATABASE_TOKEN" \ - --header "Content-Type: text/plain" \ - --data-raw "cpu,host=server01 usage=85.2 1638360000000000000" - - label: cURL - Write with millisecond precision - lang: Shell - source: | - curl --request POST "http://localhost:8181/api/v3/write_lp?db=sensors&precision=ms" \ - --header "Authorization: Bearer DATABASE_TOKEN" \ - --header "Content-Type: text/plain" \ - --data-raw "cpu,host=server01 usage=85.2 1638360000000" - - label: cURL - Asynchronous write with partial acceptance - lang: Shell - source: > - curl --request POST - "http://localhost:8181/api/v3/write_lp?db=sensors&accept_partial=true&no_sync=true&precision=auto" \ - --header "Authorization: Bearer DATABASE_TOKEN" \ - --header "Content-Type: text/plain" \ - --data-raw "cpu,host=server01 usage=85.2 - memory,host=server01 used=4096" - - label: cURL - Multiple measurements with tags - lang: Shell - source: | - curl --request POST "http://localhost:8181/api/v3/write_lp?db=sensors&precision=ns" \ - --header "Authorization: Bearer DATABASE_TOKEN" \ - --header "Content-Type: text/plain" \ - --data-raw "cpu,host=server01,region=us-west usage=85.2,load=0.75 1638360000000000000 - memory,host=server01,region=us-west used=4096,free=12288 1638360000000000000 - disk,host=server01,region=us-west,device=/dev/sda1 used=50.5,free=49.5 1638360000000000000" - /health: - get: - operationId: GetHealth - responses: - "200": - description: Service is running. Returns `OK`. - content: - text/plain: - schema: - type: string - example: OK - "401": - description: Unauthorized. Authentication is required. - "500": - description: Service is unavailable. - summary: Health check - description: | - Checks the status of the service. - - Returns `OK` if the service is running. This endpoint does not return version information. - Use the [`/ping`](#operation/GetPing) endpoint to retrieve version details. - - > **Note**: This endpoint requires authentication by default in InfluxDB 3 Core. - tags: - - Server information - /metrics: - get: - operationId: GetMetrics - responses: - "200": - description: Success - summary: Metrics - description: Retrieves Prometheus-compatible server metrics. - tags: - - Server information - /ping: - get: - operationId: GetPing - responses: - "200": - description: Success. The response body contains server information. - headers: - x-influxdb-version: - description: The InfluxDB version number (for example, `3.8.0`). - schema: - type: string - example: 3.8.0 - x-influxdb-build: - description: The InfluxDB build type (`Core` or `Enterprise`). - schema: - type: string - example: Core - content: - application/json: - schema: - type: object - properties: - version: - type: string - description: The InfluxDB version number. - example: 3.8.0 - revision: - type: string - description: The git revision hash for the build. - example: 83b589b883 - process_id: - type: string - description: A unique identifier for the server process. - example: b756d9e0-cecd-4f72-b6d0-19e2d4f8cbb7 - "401": - description: Unauthorized. Authentication is required. - "404": - description: | - Not Found. Returned for HEAD requests. - Use a GET request to retrieve version information. - x-client-method: ping - summary: Ping the server + operationId: PostConfigureProcessingEngineTrigger + summary: Create processing engine trigger description: | - Returns version information for the server. - - **Important**: Use a GET request. HEAD requests return `404 Not Found`. - - The response includes version information in both headers and the JSON body: - - - **Headers**: `x-influxdb-version` and `x-influxdb-build` - - **Body**: JSON object with `version`, `revision`, and `process_id` + Creates a processing engine trigger with the specified plugin file and trigger specification. - > **Note**: This endpoint requires authentication by default in InfluxDB 3 Core. - tags: - - Server information - post: - operationId: ping - responses: - "200": - description: Success. The response body contains server information. - headers: - x-influxdb-version: - description: The InfluxDB version number (for example, `3.8.0`). - schema: - type: string - example: 3.8.0 - x-influxdb-build: - description: The InfluxDB build type (`Core` or `Enterprise`). - schema: - type: string - example: Core - content: - application/json: - schema: - type: object - properties: - version: - type: string - description: The InfluxDB version number. - example: 3.8.0 - revision: - type: string - description: The git revision hash for the build. - example: 83b589b883 - process_id: - type: string - description: A unique identifier for the server process. - example: b756d9e0-cecd-4f72-b6d0-19e2d4f8cbb7 - "401": - description: Unauthorized. Authentication is required. - "404": - description: | - Not Found. Returned for HEAD requests. - Use a GET request to retrieve version information. - summary: Ping the server - description: Returns version information for the server. Accepts POST in addition to GET. - tags: - - Server information - /query: - get: - operationId: GetV1ExecuteQuery - responses: - "200": - description: | - Success. The response body contains query results. - content: - application/json: - schema: - $ref: "#/components/schemas/QueryResponse" - application/csv: - schema: - type: string - headers: - Content-Type: - description: > - The content type of the response. + ### Related guides - Default is `application/json`. - - - If the `Accept` request header is `application/csv` or `text/csv`, the `Content-type` response header is - `application/csv` - - and the response is formatted as CSV. - schema: - type: string - default: application/json - enum: - - application/json - - application/csv - "400": + - [Processing engine and Python plugins](/influxdb3/enterprise/plugins/) + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/ProcessingEngineTriggerRequest' + examples: + schedule_cron: + summary: Schedule trigger using cron + description: | + In `"cron:CRON_EXPRESSION"`, `CRON_EXPRESSION` uses extended 6-field cron format. + The cron expression `0 0 6 * * 1-5` means the trigger will run at 6:00 AM every weekday (Monday to Friday). + value: + db: DATABASE_NAME + plugin_filename: schedule.py + trigger_name: schedule_cron_trigger + trigger_specification: cron:0 0 6 * * 1-5 + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + schedule_every: + summary: Schedule trigger using interval + description: | + In `"every:DURATION"`, `DURATION` specifies the interval between trigger executions. + The duration `1h` means the trigger will run every hour. + value: + db: mydb + plugin_filename: schedule.py + trigger_name: schedule_every_trigger + trigger_specification: every:1h + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + schedule_every_seconds: + summary: Schedule trigger using seconds interval + description: | + Example of scheduling a trigger to run every 30 seconds. + value: + db: mydb + plugin_filename: schedule.py + trigger_name: schedule_every_30s_trigger + trigger_specification: every:30s + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + schedule_every_minutes: + summary: Schedule trigger using minutes interval + description: | + Example of scheduling a trigger to run every 5 minutes. + value: + db: mydb + plugin_filename: schedule.py + trigger_name: schedule_every_5m_trigger + trigger_specification: every:5m + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + all_tables: + summary: All tables trigger example + description: | + Trigger that fires on write events to any table in the database. + value: + db: mydb + plugin_filename: all_tables.py + trigger_name: all_tables_trigger + trigger_specification: all_tables + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + table_specific: + summary: Table-specific trigger example + description: | + Trigger that fires on write events to a specific table. + value: + db: mydb + plugin_filename: table.py + trigger_name: table_trigger + trigger_specification: table:sensors + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + api_request: + summary: On-demand request trigger example + description: | + Creates an HTTP endpoint `/api/v3/engine/hello-world` for manual invocation. + value: + db: mydb + plugin_filename: request.py + trigger_name: hello_world_trigger + trigger_specification: request:hello-world + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + cron_friday_afternoon: + summary: Cron trigger for Friday afternoons + description: | + Example of a cron trigger that runs every Friday at 2:30 PM. + value: + db: reports + plugin_filename: weekly_report.py + trigger_name: friday_report_trigger + trigger_specification: cron:0 30 14 * * 5 + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + cron_monthly: + summary: Cron trigger for monthly execution + description: | + Example of a cron trigger that runs on the first day of every month at midnight. + value: + db: monthly_data + plugin_filename: monthly_cleanup.py + trigger_name: monthly_cleanup_trigger + trigger_specification: cron:0 0 0 1 * * + disabled: false + trigger_settings: + run_async: false + error_behavior: Log + responses: + '200': + description: Success. Processing engine trigger created. + '400': description: Bad request. - "401": - $ref: "#/components/responses/Unauthorized" - "403": - description: Access denied. - "404": - description: Database not found. - "405": - description: Method not allowed. - "422": - description: Unprocessable entity. - summary: Execute InfluxQL query (v1-compatible) - description: > - Executes an InfluxQL query to retrieve data from the specified database. - - - This endpoint is compatible with InfluxDB 1.x client libraries and third-party integrations such as Grafana. - - Use query parameters to specify the database and the InfluxQL query. - - - #### Related - - - - [Use the InfluxDB v1 HTTP query API and InfluxQL to query - data](/influxdb3/core/query-data/execute-queries/influxdb-v1-api/) + '401': + $ref: '#/components/responses/Unauthorized' + '404': + description: Trigger not found. + tags: + - Processing engine + delete: + operationId: DeleteConfigureProcessingEngineTrigger + summary: Delete processing engine trigger + description: Deletes a processing engine trigger. parameters: - - name: Accept - in: header - schema: - type: string - default: application/json - enum: - - application/json - - application/csv - - text/csv - required: false - description: > - The content type that the client can understand. - - - If `text/csv` is specified, the `Content-type` response header is `application/csv` and the response is - formatted as CSV. - - - Returns an error if the format is invalid or non-UTF8. - - in: query - name: chunked - description: | - If true, the response is divided into chunks of size `chunk_size`. - schema: - type: boolean - default: false - - in: query - name: chunk_size - description: | - The number of records that will go into a chunk. - This parameter is only used if `chunked=true`. - schema: - type: integer - default: 10000 - - in: query - name: db - description: The database to query. If not provided, the InfluxQL query string must specify the database. - schema: - type: string - format: InfluxQL - - in: query - name: pretty - description: | - If true, the JSON response is formatted in a human-readable format. - schema: - type: boolean - default: false - - in: query - name: q - description: The InfluxQL query string. + - $ref: '#/components/parameters/db' + - name: trigger_name + in: query required: true schema: type: string - - name: epoch - description: > - Formats timestamps as [unix (epoch) timestamps](/influxdb3/core/reference/glossary/#unix-timestamp) with the - specified precision - - instead of [RFC3339 timestamps](/influxdb3/core/reference/glossary/#rfc3339-timestamp) with nanosecond - precision. - in: query - schema: - $ref: "#/components/schemas/EpochCompatibility" - - $ref: "#/components/parameters/v1UsernameParam" - - $ref: "#/components/parameters/v1PasswordParam" - - name: rp + - name: force in: query required: false schema: - type: string - description: | - Retention policy name. Honored but discouraged. InfluxDB 3 doesn't use retention policies. - - name: Authorization - in: header - required: false - schema: - type: string + type: boolean + default: false description: | - Authorization header for token-based authentication. - Supported schemes: - - `Bearer AUTH_TOKEN` - OAuth bearer token scheme - - `Token AUTH_TOKEN` - InfluxDB v2 token scheme - - `Basic ` - Basic authentication (username is ignored) + Force deletion of the trigger even if it has active executions. + By default, deletion fails if the trigger is currently executing. + responses: + '200': + description: Success. The processing engine trigger has been deleted. + '400': + description: Bad request. + '401': + $ref: '#/components/responses/Unauthorized' + '404': + description: Trigger not found. tags: - - Query data - - Compatibility endpoints + - Processing engine + /api/v3/configure/processing_engine_trigger/disable: post: - operationId: PostExecuteV1Query + operationId: PostDisableProcessingEngineTrigger + summary: Disable processing engine trigger + description: Disables a processing engine trigger. + parameters: + - $ref: '#/components/parameters/ContentType' + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/ProcessingEngineTriggerRequest' responses: - "200": - description: | - Success. The response body contains query results. - content: - application/json: - schema: - $ref: "#/components/schemas/QueryResponse" - application/csv: - schema: - type: string - headers: - Content-Type: - description: > - The content type of the response. - - Default is `application/json`. - - - If the `Accept` request header is `application/csv` or `text/csv`, the `Content-type` response header is - `application/csv` - - and the response is formatted as CSV. - schema: - type: string - default: application/json - enum: - - application/json - - application/csv - "400": + '200': + description: Success. The processing engine trigger has been disabled. + '400': description: Bad request. - "401": - $ref: "#/components/responses/Unauthorized" - "403": - description: Access denied. - "404": - description: Database not found. - "405": - description: Method not allowed. - "422": - description: Unprocessable entity. - summary: Execute InfluxQL query (v1-compatible) - description: > - Executes an InfluxQL query to retrieve data from the specified database. - - - #### Related - - - - [Use the InfluxDB v1 HTTP query API and InfluxQL to query - data](/influxdb3/core/query-data/execute-queries/influxdb-v1-api/) + '401': + $ref: '#/components/responses/Unauthorized' + '404': + description: Trigger not found. + tags: + - Processing engine + /api/v3/configure/processing_engine_trigger/enable: + post: + operationId: PostEnableProcessingEngineTrigger + summary: Enable processing engine trigger + description: Enables a processing engine trigger. parameters: - - name: Accept - in: header - schema: - type: string - default: application/json - enum: - - application/json - - application/csv - - text/csv - required: false - description: > - The content type that the client can understand. - + - $ref: '#/components/parameters/ContentType' + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/ProcessingEngineTriggerRequest' + responses: + '200': + description: Success. The processing engine trigger is enabled. + '400': + description: Bad request. + '401': + $ref: '#/components/responses/Unauthorized' + '404': + description: Trigger not found. + tags: + - Processing engine + /api/v3/configure/plugin_environment/install_packages: + post: + operationId: PostInstallPluginPackages + summary: Install plugin packages + description: | + Installs the specified Python packages into the processing engine plugin environment. - If `text/csv` is specified, the `Content-type` response header is `application/csv` and the response is - formatted as CSV. + This endpoint is synchronous and blocks until the packages are installed. + ### Related guides - Returns an error if the format is invalid or non-UTF8. + - [Processing engine and Python plugins](/influxdb3/enterprise/plugins/) + parameters: + - $ref: '#/components/parameters/ContentType' requestBody: + required: true content: application/json: schema: type: object properties: - db: - type: string - description: The database to query. If not provided, the InfluxQL query string must specify the database. - q: - description: The InfluxQL query string. - type: string - chunked: - description: | - If true, the response is divided into chunks of size `chunk_size`. - type: boolean - chunk_size: + packages: + type: array + items: + type: string description: | - The number of records that will go into a chunk. - This parameter is only used if `chunked=true`. - type: integer - default: 10000 - epoch: - description: > - A unix timestamp precision. - - - - `h` for hours - - - `m` for minutes - - - `s` for seconds - - - `ms` for milliseconds - - - `u` or `µ` for microseconds - - - `ns` for nanoseconds + A list of Python package names to install. + Can include version specifiers (e.g., "scipy==1.9.0"). + example: + - influxdb3-python + - scipy + - pandas==1.5.0 + - requests + required: + - packages + example: + packages: + - influxdb3-python + - scipy + - pandas==1.5.0 + - requests + responses: + '200': + description: Success. The packages are installed. + '400': + description: Bad request. + '401': + $ref: '#/components/responses/Unauthorized' + tags: + - Processing engine + /api/v3/configure/plugin_environment/install_requirements: + post: + operationId: PostInstallPluginRequirements + summary: Install plugin requirements + description: | + Installs requirements from a requirements file (also known as a "pip requirements file") into the processing engine plugin environment. + This endpoint is synchronous and blocks until the requirements are installed. - Formats timestamps as [unix (epoch) timestamps](/influxdb3/core/reference/glossary/#unix-timestamp) - with the specified precision + ### Related - instead of [RFC3339 timestamps](/influxdb3/core/reference/glossary/#rfc3339-timestamp) with - nanosecond precision. - enum: - - ns - - u - - µ - - ms - - s - - m - - h - type: string - pretty: - description: | - If true, the JSON response is formatted in a human-readable format. - type: boolean - required: - - q - application/x-www-form-urlencoded: + - [Processing engine and Python plugins](/influxdb3/enterprise/plugins/) + - [Python requirements file format](https://pip.pypa.io/en/stable/reference/requirements-file-format/) + parameters: + - $ref: '#/components/parameters/ContentType' + requestBody: + required: true + content: + application/json: schema: type: object properties: - db: - type: string - description: The database to query. If not provided, the InfluxQL query string must specify the database. - q: - description: The InfluxQL query string. + requirements_location: type: string - chunked: - description: | - If true, the response is divided into chunks of size `chunk_size`. - type: boolean - chunk_size: description: | - The number of records that will go into a chunk. - This parameter is only used if `chunked=true`. - type: integer - default: 10000 - epoch: - description: > - A unix timestamp precision. + The path to the requirements file containing Python packages to install. + Can be a relative path (relative to the plugin directory) or an absolute path. + example: requirements.txt + required: + - requirements_location + example: + requirements_location: requirements.txt + responses: + '200': + description: Success. The requirements have been installed. + '400': + description: Bad request. + '401': + $ref: '#/components/responses/Unauthorized' + tags: + - Processing engine + /api/v3/plugin_test/wal: + post: + operationId: PostTestWALPlugin + summary: Test WAL plugin + description: Executes a test of a write-ahead logging (WAL) plugin. + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/WALPluginTestRequest' + responses: + '200': + description: Success. The plugin test has been executed. + '400': + description: Bad request. + '401': + $ref: '#/components/responses/Unauthorized' + '404': + description: Plugin not enabled. + tags: + - Processing engine + /api/v3/plugin_test/schedule: + post: + operationId: PostTestSchedulingPlugin + summary: Test scheduling plugin + description: Executes a test of a scheduling plugin. + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/SchedulePluginTestRequest' + responses: + '200': + description: Success. The plugin test has been executed. + '400': + description: Bad request. + '401': + $ref: '#/components/responses/Unauthorized' + '404': + description: Plugin not enabled. + tags: + - Processing engine + /api/v3/engine/{request_path}: + parameters: + - name: request_path + description: | + The path configured in the request trigger specification for the plugin. + For example, if you define a trigger with the following: - - `h` for hours + ```json + trigger_specification: "request:hello-world" + ``` - - `m` for minutes + then, the HTTP API exposes the following plugin endpoint: - - `s` for seconds + ``` + /api/v3/engine/hello-world + ``` + in: path + required: true + schema: + type: string + get: + operationId: GetProcessingEnginePluginRequest + summary: On Request processing engine plugin request + description: | + Executes the On Request processing engine plugin specified in the trigger's `plugin_filename`. + The request can include request headers, query string parameters, and a request body, which InfluxDB passes to the plugin. - - `ms` for milliseconds + An On Request plugin implements the following signature: - - `u` or `µ` for microseconds + ```python + def process_request(influxdb3_local, query_parameters, request_headers, request_body, args=None) + ``` - - `ns` for nanoseconds + The response depends on the plugin implementation. + responses: + '200': + description: Success. The plugin request has been executed. + '400': + description: Malformed request. + '401': + $ref: '#/components/responses/Unauthorized' + '404': + description: Plugin not found. + '500': + description: Processing failure. + tags: + - Processing engine + post: + operationId: PostProcessingEnginePluginRequest + summary: On Request processing engine plugin request + description: | + Executes the On Request processing engine plugin specified in the trigger's `plugin_filename`. + The request can include request headers, query string parameters, and a request body, which InfluxDB passes to the plugin. + An On Request plugin implements the following signature: - Formats timestamps as [unix (epoch) timestamps](/influxdb3/core/reference/glossary/#unix-timestamp) - with the specified precision + ```python + def process_request(influxdb3_local, query_parameters, request_headers, request_body, args=None) + ``` - instead of [RFC3339 timestamps](/influxdb3/core/reference/glossary/#rfc3339-timestamp) with - nanosecond precision. - enum: - - ns - - u - - µ - - ms - - s - - m - - h - type: string - pretty: - description: | - If true, the JSON response is formatted in a human-readable format. - type: boolean - required: - - q - application/vnd.influxql: + The response depends on the plugin implementation. + parameters: + - $ref: '#/components/parameters/ContentType' + requestBody: + required: false + content: + application/json: schema: - type: string - description: InfluxQL query string sent as the request body. + type: object + additionalProperties: true + responses: + '200': + description: Success. The plugin request has been executed. + '400': + description: Malformed request. + '401': + $ref: '#/components/responses/Unauthorized' + '404': + description: Plugin not found. + '500': + description: Processing failure. tags: - - Query data - - Compatibility endpoints - /write: + - Processing engine + /api/v3/configure/enterprise/token: post: - operationId: PostV1Write + operationId: PostCreateResourceToken + summary: Create a resource token + description: | + Creates a resource (fine-grained permissions) token. + A resource token is a token that has access to specific resources in the system. + + This endpoint is only available in InfluxDB 3 Enterprise. responses: - "204": - description: Success ("No Content"). All data in the batch is written and queryable. - headers: - cluster-uuid: - $ref: "#/components/headers/ClusterUUID" - "400": + '201': description: | - Bad request. Some (a _partial write_) or all of the data from the batch was rejected and not written. - If a partial write occurred, then some points from the batch are written and queryable. - - The response body: - - indicates if a partial write occurred or all data was rejected. - - contains details about the [rejected points](/influxdb3/core/write-data/troubleshoot/#troubleshoot-rejected-points), up to 100 points. + Success. The resource token has been created. + The response body contains the token string and metadata. content: application/json: - examples: - rejectedAllPoints: - summary: Rejected all points in the batch - value: | - { - "error": "write of line protocol failed", - "data": [ - { - "original_line": "dquote> home,room=Kitchen temp=hi", - "line_number": 2, - "error_message": "No fields were provided" - } - ] - } - partialWriteErrorWithRejectedPoints: - summary: Partial write rejected some points in the batch - value: | - { - "error": "partial write of line protocol occurred", - "data": [ - { - "original_line": "dquote> home,room=Kitchen temp=hi", - "line_number": 2, - "error_message": "No fields were provided" - } - ] - } - "401": - $ref: "#/components/responses/Unauthorized" - "403": - description: Access denied. - "413": - description: Request entity too large. - summary: Write line protocol (v1-compatible) - description: > - Writes line protocol to the specified database. - - - This endpoint provides backward compatibility for InfluxDB 1.x write workloads using tools such as InfluxDB 1.x - client libraries, the Telegraf `outputs.influxdb` output plugin, or third-party tools. - - - Use this endpoint to send data in [line - protocol](https://docs.influxdata.com/influxdb3/core/reference/syntax/line-protocol/) format to InfluxDB. - - Use query parameters to specify options for writing data. - - - #### Related - - - - [Use compatibility APIs to write data](/influxdb3/core/write-data/http-api/compatibility-apis/) + schema: + $ref: '#/components/schemas/ResourceTokenObject' + '401': + $ref: '#/components/responses/Unauthorized' + tags: + - Authentication + - Token + /api/v3/configure/token/admin: + post: + operationId: PostCreateAdminToken + summary: Create admin token + description: | + Creates an admin token. + An admin token is a special type of token that has full access to all resources in the system. + responses: + '201': + description: | + Success. The admin token has been created. + The response body contains the token string and metadata. + content: + application/json: + schema: + $ref: '#/components/schemas/AdminTokenObject' + '401': + $ref: '#/components/responses/Unauthorized' + tags: + - Authentication + - Token + /api/v3/configure/token/admin/regenerate: + post: + operationId: PostRegenerateAdminToken + summary: Regenerate admin token + description: | + Regenerates an admin token and revokes the previous token with the same name. + parameters: [] + responses: + '201': + description: Success. The admin token has been regenerated. + content: + application/json: + schema: + $ref: '#/components/schemas/AdminTokenObject' + '401': + $ref: '#/components/responses/Unauthorized' + tags: + - Authentication + - Token + /api/v3/configure/token: + delete: + operationId: DeleteToken + summary: Delete token + description: | + Deletes a token. parameters: - - $ref: "#/components/parameters/dbWriteParam" - - $ref: "#/components/parameters/compatibilityPrecisionParam" - - $ref: "#/components/parameters/v1UsernameParam" - - $ref: "#/components/parameters/v1PasswordParam" - - name: rp + - name: id in: query - required: false + required: true schema: type: string - description: | - Retention policy name. Honored but discouraged. InfluxDB 3 doesn't use retention policies. - - name: consistency + description: The ID of the token to delete. + responses: + '204': + description: Success. The token has been deleted. + '401': + $ref: '#/components/responses/Unauthorized' + '404': + description: Token not found. + tags: + - Authentication + - Token + /api/v3/configure/token/named_admin: + post: + operationId: PostCreateNamedAdminToken + summary: Create named admin token + description: | + Creates a named admin token. + A named admin token is a special type of admin token with a custom name for identification and management. + parameters: + - name: name in: query - required: false - schema: - type: string - description: | - Write consistency level. Ignored by InfluxDB 3. Provided for compatibility with InfluxDB 1.x clients. - - name: Authorization - in: header - required: false + required: true schema: type: string + description: The name for the admin token. + responses: + '201': description: | - Authorization header for token-based authentication. - Supported schemes: - - `Bearer AUTH_TOKEN` - OAuth bearer token scheme - - `Token AUTH_TOKEN` - InfluxDB v2 token scheme - - `Basic ` - Basic authentication (username is ignored) - - name: Content-Type - in: header - description: | - The content type of the request payload. - schema: - $ref: "#/components/schemas/LineProtocol" - required: false - - name: Accept - in: header - description: | - The content type that the client can understand. - Writes only return a response body if they fail (partially or completely)--for example, - due to a syntax problem or type mismatch. - schema: - type: string - default: application/json - enum: - - application/json - required: false - - $ref: "#/components/parameters/ContentEncoding" - - $ref: "#/components/parameters/ContentLength" + Success. The named admin token has been created. + The response body contains the token string and metadata. + content: + application/json: + schema: + $ref: '#/components/schemas/AdminTokenObject' + '401': + $ref: '#/components/responses/Unauthorized' + '409': + description: A token with this name already exists. + tags: + - Authentication + - Token + /api/v3/plugins/files: + put: + operationId: PutPluginFile + summary: Update plugin file + description: | + Updates a plugin file in the plugin directory. + x-security-note: Requires an admin token requestBody: - $ref: "#/components/requestBodies/lineProtocolRequestBody" + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/PluginFileRequest' + responses: + '204': + description: Success. The plugin file has been updated. + '401': + $ref: '#/components/responses/Unauthorized' + '403': + description: Forbidden. Admin token required. tags: - - Compatibility endpoints - - Write data + - Processing engine + /api/v3/plugins/directory: + put: + operationId: PutPluginDirectory + summary: Update plugin directory + description: | + Updates the plugin directory configuration. + x-security-note: Requires an admin token + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/PluginDirectoryRequest' + responses: + '204': + description: Success. The plugin directory has been updated. + '401': + $ref: '#/components/responses/Unauthorized' + '403': + description: Forbidden. Admin token required. + tags: + - Processing engine components: parameters: AcceptQueryHeader: @@ -2344,7 +2064,7 @@ components: The compression applied to the line protocol in the request payload. To send a gzip payload, pass `Content-Encoding: gzip` header. schema: - $ref: "#/components/schemas/ContentEncoding" + $ref: '#/components/schemas/ContentEncoding' required: false ContentLength: name: Content-Length @@ -2352,7 +2072,7 @@ components: description: | The size of the entity-body, in bytes, sent to InfluxDB. schema: - $ref: "#/components/schemas/ContentLength" + $ref: '#/components/schemas/ContentLength' ContentType: name: Content-Type description: | @@ -2396,20 +2116,20 @@ components: in: query required: false schema: - $ref: "#/components/schemas/AcceptPartial" + $ref: '#/components/schemas/AcceptPartial' compatibilityPrecisionParam: name: precision in: query - required: false + required: true schema: - $ref: "#/components/schemas/PrecisionWriteCompatibility" + $ref: '#/components/schemas/PrecisionWriteCompatibility' description: The precision for unix timestamps in the line protocol batch. precisionParam: name: precision in: query - required: false + required: true schema: - $ref: "#/components/schemas/PrecisionWrite" + $ref: '#/components/schemas/PrecisionWrite' description: The precision for unix timestamps in the line protocol batch. querySqlParam: name: q @@ -2425,24 +2145,22 @@ components: in: query required: false schema: - $ref: "#/components/schemas/Format" + $ref: '#/components/schemas/Format' formatRequired: name: format in: query required: true schema: - $ref: "#/components/schemas/Format" + $ref: '#/components/schemas/Format' v1UsernameParam: name: u in: query required: false schema: type: string - description: > + description: | Username for v1 compatibility authentication. - - When using Basic authentication or query string authentication, InfluxDB 3 ignores this parameter but allows any - arbitrary string for compatibility with InfluxDB 1.x clients. + When using Basic authentication or query string authentication, InfluxDB 3 ignores this parameter but allows any arbitrary string for compatibility with InfluxDB 1.x clients. v1PasswordParam: name: p in: query @@ -2475,7 +2193,7 @@ components: content: application/json: schema: - $ref: "#/components/schemas/QueryRequestObject" + $ref: '#/components/schemas/QueryRequestObject' schemas: AdminTokenObject: type: object @@ -2498,31 +2216,61 @@ components: name: _admin token: apiv3_00xx0Xx0xx00XX0x0 hash: 00xx0Xx0xx00XX0x0 - created_at: "2025-04-18T14:02:45.331Z" + created_at: '2025-04-18T14:02:45.331Z' expiry: null + ResourceTokenObject: + type: object + properties: + token_name: + type: string + permissions: + type: array + items: + type: object + properties: + resource_type: + type: string + enum: + - system + - db + resource_identifier: + type: array + items: + type: string + actions: + type: array + items: + type: string + enum: + - read + - write + expiry_secs: + type: integer + description: The expiration time in seconds. + example: + token_name: All system information + permissions: + - resource_type: system + resource_identifier: + - '*' + actions: + - read + expiry_secs: 300000 ContentEncoding: type: string enum: - gzip - identity - description: > + description: | Content coding. - Use `gzip` for compressed data or `identity` for unmodified, uncompressed data. - #### Multi-member gzip support - - InfluxDB 3 supports multi-member gzip payloads (concatenated gzip files per [RFC - 1952](https://www.rfc-editor.org/rfc/rfc1952)). - + InfluxDB 3 supports multi-member gzip payloads (concatenated gzip files per [RFC 1952](https://www.rfc-editor.org/rfc/rfc1952)). This allows you to: - - Concatenate multiple gzip files and send them in a single request - - Maintain compatibility with InfluxDB v1 and v2 write endpoints - - Simplify batch operations using standard compression tools default: identity LineProtocol: @@ -2536,6 +2284,8 @@ components: ContentLength: type: integer description: The length in decimal number of octets. + Database: + type: string AcceptPartial: type: boolean default: true @@ -2546,12 +2296,9 @@ components: - json - csv - parquet - - json_lines - jsonl - - pretty - description: |- + description: | The format of data in the response body. - `json_lines` is the canonical name; `jsonl` is accepted as an alias. NoSync: type: boolean default: false @@ -2560,21 +2307,18 @@ components: #### Related - - [Use the HTTP API and client libraries to write data](/influxdb3/core/write-data/api-client-libraries/) - - [Data durability](/influxdb3/core/reference/internals/durability/) + - [Use the HTTP API and client libraries to write data](/influxdb3/enterprise/write-data/api-client-libraries/) + - [Data durability](/influxdb3/enterprise/reference/internals/durability/) PrecisionWriteCompatibility: enum: - ms - s - us - - u - ns - - "n" type: string - description: |- + description: | The precision for unix timestamps in the line protocol batch. - Use `ms` for milliseconds, `s` for seconds, `us` or `u` for microseconds, or `ns` or `n` for nanoseconds. - Optional — defaults to nanosecond precision if omitted. + Use `ms` for milliseconds, `s` for seconds, `us` for microseconds, or `ns` for nanoseconds. PrecisionWrite: enum: - auto @@ -2610,7 +2354,6 @@ components: - json - csv - parquet - - json_lines - jsonl - pretty params: @@ -2691,6 +2434,8 @@ components: type: string table: type: string + node_spec: + $ref: '#/components/schemas/ApiNodeSpec' name: type: string description: Optional cache name. @@ -2723,6 +2468,8 @@ components: type: string table: type: string + node_spec: + $ref: '#/components/schemas/ApiNodeSpec' name: type: string description: Optional cache name. @@ -2767,99 +2514,63 @@ components: The path can be absolute or relative to the `--plugins-dir` directory configured when starting InfluxDB 3. The plugin file must implement the trigger interface associated with the trigger's specification. + node_spec: + $ref: '#/components/schemas/ApiNodeSpec' trigger_name: type: string trigger_settings: description: | Configuration for trigger error handling and execution behavior. allOf: - - $ref: "#/components/schemas/TriggerSettings" + - $ref: '#/components/schemas/TriggerSettings' trigger_specification: - description: > + description: | Specifies when and how the processing engine trigger should be invoked. - ## Supported trigger specifications: - ### Cron-based scheduling - Format: `cron:CRON_EXPRESSION` - Uses extended (6-field) cron format (second minute hour day_of_month month day_of_week): - ``` - ┌───────────── second (0-59) - │ ┌───────────── minute (0-59) - │ │ ┌───────────── hour (0-23) - │ │ │ ┌───────────── day of month (1-31) - │ │ │ │ ┌───────────── month (1-12) - │ │ │ │ │ ┌───────────── day of week (0-6, Sunday=0) - │ │ │ │ │ │ - * * * * * * - ``` - Examples: - - `cron:0 0 6 * * 1-5` - Every weekday at 6:00 AM - - `cron:0 30 14 * * 5` - Every Friday at 2:30 PM - - `cron:0 0 0 1 * *` - First day of every month at midnight - ### Interval-based scheduling - Format: `every:DURATION` - - Supported durations: `s` (seconds), `m` (minutes), `h` (hours), `d` (days), `w` (weeks), `M` (months), `y` - (years): - + Supported durations: `s` (seconds), `m` (minutes), `h` (hours), `d` (days), `w` (weeks), `M` (months), `y` (years): - `every:30s` - Every 30 seconds - - `every:5m` - Every 5 minutes - - `every:1h` - Every hour - - `every:1d` - Every day - - `every:1w` - Every week - - `every:1M` - Every month - - `every:1y` - Every year - **Maximum interval**: 1 year - ### Table-based triggers - - `all_tables` - Triggers on write events to any table in the database - - `table:TABLE_NAME` - Triggers on write events to a specific table - ### On-demand triggers - Format: `request:REQUEST_PATH` - Creates an HTTP endpoint `/api/v3/engine/REQUEST_PATH` for manual invocation: - - `request:hello-world` - Creates endpoint `/api/v3/engine/hello-world` - - `request:data-export` - Creates endpoint `/api/v3/engine/data-export` pattern: ^(cron:[0-9 *,/-]+|every:[0-9]+[smhd]|all_tables|table:[a-zA-Z_][a-zA-Z0-9_]*|request:[a-zA-Z0-9_-]+)$ example: cron:0 0 6 * * 1-5 @@ -2905,6 +2616,22 @@ components: required: - run_async - error_behavior + ApiNodeSpec: + type: object + description: | + Optional specification for targeting specific nodes in a multi-node InfluxDB 3 Enterprise cluster. + Use this to control which node(s) should handle the cache or trigger. + properties: + node_id: + type: string + description: | + The ID of a specific node in the cluster. + If specified, the cache or trigger will only be created on this node. + node_group: + type: string + description: | + The name of a node group in the cluster. + If specified, the cache or trigger will be created on all nodes in this group. WALPluginTestRequest: type: object description: | @@ -2994,7 +2721,7 @@ components: files: type: array items: - $ref: "#/components/schemas/PluginFileEntry" + $ref: '#/components/schemas/PluginFileEntry' description: | List of plugin files to include in the directory. required: @@ -3005,15 +2732,16 @@ components: description: | Represents a single file in a plugin directory. properties: + filename: + type: string + description: | + The name of the file within the plugin directory. content: type: string description: | The content of the file. - relative_path: - type: string - description: The relative path of the file within the plugin directory. required: - - relative_path + - filename - content ShowDatabasesResponse: type: object @@ -3037,7 +2765,7 @@ components: - time - value values: - - - "2024-02-02T12:00:00Z" + - - '2024-02-02T12:00:00Z' - 42 ErrorMessage: type: object @@ -3047,6 +2775,38 @@ components: data: type: object nullable: true + LineProtocolError: + properties: + code: + description: Code is the machine-readable error code. + enum: + - internal error + - not found + - conflict + - invalid + - empty value + - unavailable + readOnly: true + type: string + err: + description: Stack of errors that occurred during processing of the request. Useful for debugging. + readOnly: true + type: string + line: + description: First line in the request body that contains malformed data. + format: int32 + readOnly: true + type: integer + message: + description: Human-readable message. + readOnly: true + type: string + op: + description: Describes the logical code operation when the error occurred. Useful for debugging. + readOnly: true + type: string + required: + - code EpochCompatibility: description: | A unix timestamp precision. @@ -3075,13 +2835,62 @@ components: Use duration format (for example, "1d", "1h", "30m", "7d"). example: 7d description: Request schema for updating database configuration. + UpdateTableRequest: + type: object + properties: + db: + type: string + description: The name of the database containing the table. + table: + type: string + description: The name of the table to update. + retention_period: + type: string + description: | + The retention period for the table. Specifies how long data in this table should be retained. + Use duration format (for example, "1d", "1h", "30m", "7d"). + example: 30d + required: + - db + - table + description: Request schema for updating table configuration. + LicenseResponse: + type: object + properties: + license_type: + type: string + description: The type of license (for example, "enterprise", "trial"). + example: enterprise + expires_at: + type: string + format: date-time + description: The expiration date of the license in ISO 8601 format. + example: '2025-12-31T23:59:59Z' + features: + type: array + items: + type: string + description: List of features enabled by the license. + example: + - clustering + - processing_engine + - advanced_auth + status: + type: string + enum: + - active + - expired + - invalid + description: The current status of the license. + example: active + description: Response schema for license information. responses: Unauthorized: description: Unauthorized access. content: application/json: schema: - $ref: "#/components/schemas/ErrorMessage" + $ref: '#/components/schemas/ErrorMessage' BadRequest: description: | Request failed. Possible reasons: @@ -3092,19 +2901,19 @@ components: content: application/json: schema: - $ref: "#/components/schemas/ErrorMessage" + $ref: '#/components/schemas/ErrorMessage' Forbidden: description: Access denied. content: application/json: schema: - $ref: "#/components/schemas/ErrorMessage" + $ref: '#/components/schemas/ErrorMessage' NotFound: description: Resource not found. content: application/json: schema: - $ref: "#/components/schemas/ErrorMessage" + $ref: '#/components/schemas/ErrorMessage' headers: ClusterUUID: description: | @@ -3121,126 +2930,94 @@ components: BasicAuthentication: type: http scheme: basic - description: >- + description: | Use the `Authorization` header with the `Basic` scheme to authenticate v1 API requests. + Works with v1-compatible `/write` and `/query` endpoints. - Works with v1 compatibility [`/write`](#operation/PostV1Write) and [`/query`](#operation/GetV1Query) endpoints - in InfluxDB 3. - - - When authenticating requests, InfluxDB 3 checks that the `password` part of the decoded credential is an - authorized token - + When authenticating requests, InfluxDB 3 checks that the `password` part of the decoded credential is an authorized token and ignores the `username` part of the decoded credential. - ### Syntax - ```http - Authorization: Basic - ``` - ### Example - ```bash - curl "http://localhost:8181/write?db=DATABASE_NAME&precision=s" \ --user "":"AUTH_TOKEN" \ --header "Content-type: text/plain; charset=utf-8" \ --data-binary 'home,room=kitchen temp=72 1641024000' ``` - Replace the following: + - **`DATABASE_NAME`**: your InfluxDB 3 Enterprise database + - **`AUTH_TOKEN`**: an admin token or database token authorized for the database - - **`DATABASE_NAME`**: your InfluxDB 3 Core database + #### Related guides - - **`AUTH_TOKEN`**: an admin token or database token authorized for the database + - [Authenticate v1 API requests](/influxdb3/enterprise/guides/api-compatibility/v1/) + - [Manage tokens](/influxdb3/enterprise/admin/tokens/) QuerystringAuthentication: type: apiKey in: query name: u=&p= - description: >- + description: | Use InfluxDB 1.x API parameters to provide credentials through the query string for v1 API requests. - - Querystring authentication works with v1-compatible [`/write`](#operation/PostV1Write) and - [`/query`](#operation/GetV1Query) endpoints. - + Querystring authentication works with v1-compatible `/write` and `/query` endpoints. When authenticating requests, InfluxDB 3 checks that the `p` (_password_) query parameter is an authorized token - and ignores the `u` (_username_) query parameter. - ### Syntax - ```http - https://localhost:8181/query/?[u=any]&p=AUTH_TOKEN - https://localhost:8181/write/?[u=any]&p=AUTH_TOKEN - ``` - ### Examples - ```bash - curl "http://localhost:8181/write?db=DATABASE_NAME&precision=s&p=AUTH_TOKEN" \ --header "Content-type: text/plain; charset=utf-8" \ --data-binary 'home,room=kitchen temp=72 1641024000' ``` - Replace the following: - - - **`DATABASE_NAME`**: your InfluxDB 3 Core database - + - **`DATABASE_NAME`**: your InfluxDB 3 Enterprise database - **`AUTH_TOKEN`**: an admin token or database token authorized for the database - ```bash - ####################################### - # Use an InfluxDB 1.x compatible username and password - # to query the InfluxDB v1 HTTP API - ####################################### - # Use authentication query parameters: - # ?p=AUTH_TOKEN - ####################################### - curl --get "http://localhost:8181/query" \ --data-urlencode "p=AUTH_TOKEN" \ --data-urlencode "db=DATABASE_NAME" \ --data-urlencode "q=SELECT * FROM MEASUREMENT" ``` - Replace the following: - - **`DATABASE_NAME`**: the database to query - - **`AUTH_TOKEN`**: a database token with sufficient permissions to the database + + #### Related guides + + - [Authenticate v1 API requests](/influxdb3/enterprise/guides/api-compatibility/v1/) + - [Manage tokens](/influxdb3/enterprise/admin/tokens/) BearerAuthentication: type: http scheme: bearer @@ -3248,7 +3025,7 @@ components: description: | Use the OAuth Bearer authentication - scheme to provide an authorization token to InfluxDB 3. + scheme to provide an authorization token to InfluxDB 3 Enterprise. Bearer authentication works with all endpoints. @@ -3268,10 +3045,10 @@ components: --header "Authorization: Bearer AUTH_TOKEN" ``` TokenAuthentication: - description: |- - Use InfluxDB v2 Token authentication to provide an authorization token to InfluxDB 3. + description: | + Use InfluxDB v2 Token authentication to provide an authorization token to InfluxDB 3 Enterprise. - The v2 Token scheme works with v1 and v2 compatibility endpoints in InfluxDB 3. + The v2 Token scheme works with v1 and v2 compatibility endpoints in InfluxDB 3 Enterprise. In your API requests, send an `Authorization` header. For the header value, provide the word `Token` followed by a space and a database token. @@ -3295,6 +3072,10 @@ components: --header "Authorization: Token AUTH_TOKEN" \ --data-binary 'home,room=kitchen temp=72 1463683075' ``` + + ### Related guides + + - [Manage tokens](/influxdb3/enterprise/admin/tokens/) in: header name: Authorization type: apiKey @@ -3306,7 +3087,7 @@ x-tagGroups: - Cache data - Common parameters - Response codes - - Compatibility endpoints + - Database - Processing engine - Server information diff --git a/api-docs/openapi/plugins/decorators/tags/set-tag-groups.cjs b/api-docs/openapi/plugins/decorators/tags/set-tag-groups.cjs deleted file mode 100644 index 38a752859f..0000000000 --- a/api-docs/openapi/plugins/decorators/tags/set-tag-groups.cjs +++ /dev/null @@ -1,94 +0,0 @@ -module.exports = SetTagGroups; - -const { collect, getName, sortName, isPresent } = require('../../helpers/content-helper.cjs') -/** - * Returns an object that defines handler functions for: - * - Operation nodes - * - DefinitionRoot (the root openapi) node - * The order of the two functions is significant. - * The Operation handler collects tags from the - * operation ('get', 'post', etc.) in every path. - * The DefinitionRoot handler, executed when - * the parser is leaving the root node, - * adds custom `tagGroups` content to `x-tagGroups` - * and sets the value of `All Endpoints` to the collected tags. - */ -/** @type {import('@redocly/openapi-cli').OasDecorator} */ -function SetTagGroups(data) { - if(!Array.isArray(data)) { - data = []; - } - - const ALL_ENDPOINTS = 'All endpoints'; - /** Tag names used for ad-hoc grouping of Operations and not specific to a resource or path. - * For example, these might be useful for UI navigation and filtering, but shouldn't appear - * in a list of resource tags. - */ - const nonResourceTags = [ - 'Data I/O endpoints', - 'Security and access endpoints', - 'System information endpoints' - ]; - - const allEndpointsGroup = data.filter(customGroup => customGroup.name === ALL_ENDPOINTS).pop(); - - function addAllEndpointTags(tagGroups) { - tagGroups.map(grp => { - if(grp.name === ALL_ENDPOINTS && !grp.tags.length) { - grp.tags = endpointTags; - } - return grp; - }) - } - - let tags = []; - /** Collect tags for each operation and convert string tags to object tags. **/ - return { - DefinitionRoot: { - Operation: { - leave(op, ctx) { - let opTags = op.tags?.map( - function(t) { - return typeof t === 'string' ? { name: t } : t; - } - ) || []; - - const { parent, key } = ctx; - if(allEndpointsGroup?.tags.length) { - opTags.forEach( - function(t) { - if(!isPresent(allEndpointsGroup.tags, t) && !isPresent(nonResourceTags, t)) { - /** If a custom allEndpointsGroup is defined and the current Operation - * contains a tag not specified in allEndpointsGroup, - * then delete the Operation from the doc so that it doesn't appear in other tags. - */ - delete parent[key]; - return; - } - } - ) - } - - tags = collect(tags, opTags); - } - }, - leave(root) { - root.tags = root.tags || []; - root.tags = collect(root.tags, tags) - .sort((a, b) => sortName(a, b)); - - endpointTags = root.tags - .filter(t => !t['x-traitTag']) - .filter(t => !isPresent(nonResourceTags, t)) - .map(t => getName(t)); - - /** In Redoc, if x-tagGroups is present, a tag (and its paths) - * must be assigned to an x-tagGroup in order to display. */ - if(data.length) { - addAllEndpointTags(data); - root['x-tagGroups'] = data; - } - } - } - } -}; diff --git a/api-docs/openapi/plugins/docs-content.cjs b/api-docs/openapi/plugins/docs-content.cjs index 289ee62152..31a093b6e9 100644 --- a/api-docs/openapi/plugins/docs-content.cjs +++ b/api-docs/openapi/plugins/docs-content.cjs @@ -15,10 +15,8 @@ function getVersioned(filename) { const info = () => getVersioned('info.yml'); const servers = () => getVersioned('servers.yml'); -const tagGroups = () => getVersioned('tag-groups.yml'); module.exports = { info, servers, - tagGroups, } diff --git a/api-docs/openapi/plugins/docs-plugin.cjs b/api-docs/openapi/plugins/docs-plugin.cjs index a075e8bc2b..59c51ecdbc 100644 --- a/api-docs/openapi/plugins/docs-plugin.cjs +++ b/api-docs/openapi/plugins/docs-plugin.cjs @@ -1,4 +1,4 @@ -const {info, servers, tagGroups} = require('./docs-content.cjs'); +const {info, servers} = require('./docs-content.cjs'); const ReportTags = require('./rules/report-tags.cjs'); const ValidateServersUrl = require('./rules/validate-servers-url.cjs'); const RemovePrivatePaths = require('./decorators/paths/remove-private-paths.cjs'); @@ -7,7 +7,6 @@ const ReplaceShortcodes = require('./decorators/replace-shortcodes.cjs'); const SetInfo = require('./decorators/set-info.cjs'); const DeleteServers = require('./decorators/servers/delete-servers.cjs'); const SetServers = require('./decorators/servers/set-servers.cjs'); -const SetTagGroups = require('./decorators/tags/set-tag-groups.cjs'); const StripVersionPrefix = require('./decorators/paths/strip-version-prefix.cjs'); const StripTrailingSlash = require('./decorators/paths/strip-trailing-slash.cjs'); @@ -31,7 +30,6 @@ const decorators = { 'strip-version-prefix': StripVersionPrefix, 'strip-trailing-slash': StripTrailingSlash, 'set-info': () => SetInfo(info()), - 'set-tag-groups': () => SetTagGroups(tagGroups()), 'replace-docs-url-shortcode': ReplaceShortcodes().docsUrl } }; @@ -52,7 +50,6 @@ module.exports = { 'docs/strip-version-prefix': 'error', 'docs/strip-trailing-slash': 'error', 'docs/set-info': 'error', - 'docs/set-tag-groups': 'error', 'docs/replace-docs-url-shortcode': 'error' }, }, diff --git a/api-docs/scripts/README.md b/api-docs/scripts/README.md new file mode 100644 index 0000000000..2ea5f440d1 --- /dev/null +++ b/api-docs/scripts/README.md @@ -0,0 +1,390 @@ +# API Documentation Generation Scripts + +TypeScript-based scripts for generating Hugo data files and content pages from OpenAPI specifications. + +## Overview + +These scripts convert OpenAPI v3 specifications into Hugo-compatible data files and content pages for all InfluxDB products. + +### What Gets Generated + +For each product, the scripts generate: + +1. **OpenAPI Spec Copies** (static directory): + - `influxdb-{product}.yml` - YAML version of the spec + - `influxdb-{product}.json` - JSON version of the spec + +2. **Path Group Fragments** (static/openapi/{product}/paths/): + - Separate YAML and JSON files for each API path group + - Example: `ref-api-v2-buckets.yaml` and `ref-api-v2-buckets.json` + +3. **Article Metadata** (data/article-data/influxdb/{product}/): + - `articles.yml` - Hugo data file with article metadata + - `articles.json` - JSON version for programmatic access + +4. **Hugo Content Pages** (content/{product}/api/): + - Markdown files generated from article data + - One page per API path group + +## Quick Start + +### Build Scripts + +Compile TypeScript to JavaScript (required before running): + +```bash +yarn build:api-scripts +``` + +### Generate API Pages + +**Generate all products:** + +```bash +yarn build:api-pages +``` + +**Generate specific product(s):** + +```bash +yarn build:api-pages:product cloud-v2 +yarn build:api-pages:product cloud-v2 oss-v2 +``` + +## Supported Products + +| Product ID | Description | Spec File | Content Path | +| ---------------------- | ------------------------- | ------------------------------------------------ | -------------------------------------------- | +| `cloud-v2` | InfluxDB Cloud (v2 API) | `api-docs/cloud/v2/ref.yml` | `content/influxdb/cloud/api/v2` | +| `oss-v2` | InfluxDB OSS v2 | `api-docs/v2/ref.yml` | `content/influxdb/v2/api/v2` | +| `influxdb3-core` | InfluxDB 3 Core | `api-docs/influxdb3/core/v3/ref.yml` | `content/influxdb3/core/reference/api` | +| `influxdb3-enterprise` | InfluxDB 3 Enterprise | `api-docs/influxdb3/enterprise/v3/ref.yml` | `content/influxdb3/enterprise/reference/api` | +| `cloud-dedicated` | InfluxDB Cloud Dedicated | `api-docs/influxdb3/cloud-dedicated/v2/ref.yml` | `content/influxdb/cloud-dedicated/api` | +| `cloud-serverless` | InfluxDB Cloud Serverless | `api-docs/influxdb3/cloud-serverless/v2/ref.yml` | `content/influxdb/cloud-serverless/api` | +| `clustered` | InfluxDB Clustered | `api-docs/influxdb3/clustered/v2/ref.yml` | `content/influxdb/clustered/api` | + +## Architecture + +### TypeScript Files + +``` +api-docs/scripts/ +├── tsconfig.json # TypeScript configuration +├── generate-openapi-articles.ts # Main orchestration script +└── openapi-paths-to-hugo-data/ + ├── index.ts # Core conversion logic + └── package.json # Module dependencies +``` + +### Compiled JavaScript + +After running `yarn build:api-scripts`, compiled files are in: + +``` +api-docs/scripts/dist/ +├── generate-openapi-articles.js +├── generate-openapi-articles.d.ts +└── openapi-paths-to-hugo-data/ + ├── index.js + └── index.d.ts +``` + +## Script Details + +### generate-openapi-articles.ts + +Main orchestration script that processes products. + +**For each product, it:** + +1. Runs `getswagger.sh` to fetch/bundle the OpenAPI spec +2. Copies spec to `static/openapi/influxdb-{product}.yml` +3. Generates JSON version at `static/openapi/influxdb-{product}.json` +4. Generates path group fragments (YAML and JSON) +5. Creates article metadata (YAML and JSON) +6. Generates Hugo content pages + +**Usage:** + +```bash +node api-docs/scripts/dist/generate-openapi-articles.js [product-ids...] + +# Examples: +node api-docs/scripts/dist/generate-openapi-articles.js # All products +node api-docs/scripts/dist/generate-openapi-articles.js cloud-v2 # Single product +node api-docs/scripts/dist/generate-openapi-articles.js cloud-v2 oss-v2 # Multiple products +``` + +**Output:** + +``` +📋 Processing all products... + +================================================================================ +Processing InfluxDB Cloud (v2 API) +================================================================================ + +Fetching OpenAPI spec for cloud-v2... +✓ Copied spec to static/openapi/influxdb-cloud-v2.yml +✓ Generated JSON spec at static/openapi/influxdb-cloud-v2.json + +Generating OpenAPI path files in static/openapi/influxdb-cloud-v2/paths.... +Generated: ref-api-v2-buckets.yaml and ref-api-v2-buckets.json +... + +Generating OpenAPI article data in data/article-data/influxdb/cloud-v2... +Generated 32 articles in data/article-data/influxdb/cloud-v2 + +✅ Successfully processed InfluxDB Cloud (v2 API) +``` + +### openapi-paths-to-hugo-data/index.ts + +Core conversion library that processes OpenAPI specs. + +**Key Functions:** + +- `generateHugoData(options)` - Main entry point +- `writePathOpenapis()` - Groups paths and writes fragments +- `createArticleDataForPathGroup()` - Generates article metadata + +**Path Grouping Logic:** + +Paths are grouped by their base path (first 3-4 segments, excluding placeholders): + +``` +/api/v2/buckets → api-v2-buckets +/api/v2/buckets/{id} → api-v2-buckets (same group) +/api/v2/authorizations → api-v2-authorizations +``` + +**Output Formats:** + +- **YAML**: Hugo-compatible data files +- **JSON**: Programmatic access and tooling + +## Development + +### Prerequisites + +- Node.js >= 16.0.0 +- Yarn package manager +- TypeScript installed (via root package.json) + +### Setup + +```bash +# Install dependencies (from repo root) +yarn install + +# Or install in the openapi-paths-to-hugo-data module +cd api-docs/scripts/openapi-paths-to-hugo-data +yarn install +``` + +### TypeScript Configuration + +The scripts use a dedicated `tsconfig.json` with CommonJS output: + +```json +{ + "compilerOptions": { + "target": "ES2021", + "module": "CommonJS", + "outDir": "./dist", + "strict": true, + ... + } +} +``` + +### Making Changes + +1. Edit TypeScript files in `api-docs/scripts/` +2. Compile: `yarn build:api-scripts` +3. Test: `yarn build:api-pages:product cloud-v2` + +### Watch Mode + +For active development: + +```bash +cd api-docs/scripts/openapi-paths-to-hugo-data +yarn build:watch +``` + +## Testing + +### Unit Test Example + +```javascript +const converter = require('./api-docs/scripts/dist/openapi-paths-to-hugo-data/index.js'); + +converter.generateHugoData({ + specFile: 'api-docs/influxdb/cloud/v2/ref.yml', + dataOutPath: './test-output/paths', + articleOutPath: './test-output/articles' +}); +``` + +### Verify Output + +After generation, check: + +1. **Path fragments exist:** + ```bash + ls -l static/openapi/influxdb-cloud-v2/paths/ + ``` + +2. **Both formats generated:** + ```bash + ls -l static/openapi/influxdb-cloud-v2/paths/*.{yaml,json} + ``` + +3. **Article data created:** + ```bash + cat data/article-data/influxdb/cloud-v2/articles.yml + cat data/article-data/influxdb/cloud-v2/articles.json + ``` + +4. **Hugo pages generated:** + ```bash + ls -l content/influxdb/cloud/api/v2/ + ``` + +## Troubleshooting + +### TypeScript Compilation Errors + +```bash +# Clean and rebuild +rm -rf api-docs/scripts/dist +yarn build:api-scripts +``` + +### Missing Type Definitions + +```bash +cd api-docs/scripts/openapi-paths-to-hugo-data +yarn add --dev @types/js-yaml @types/node +``` + +### Spec File Not Found + +Make sure to run `getswagger.sh` first: + +```bash +cd api-docs +./getswagger.sh cloud-v2 -B +``` + +### Path Grouping Issues + +The script groups paths by their first 3-4 segments. If you need different grouping: + +1. Edit `writePathOpenapis()` in `openapi-paths-to-hugo-data/index.ts` +2. Modify the `key.slice(0, 4)` logic +3. Rebuild: `yarn build:api-scripts` + +## Migration from JavaScript + +The original JavaScript files are preserved for reference: + +- `api-docs/scripts/generate-openapi-articles.js` (original) +- `api-docs/scripts/openapi-paths-to-hugo-data/index.js` (original) + +### Key Improvements + +1. **TypeScript**: Full type safety and IDE support +2. **Dual Formats**: Generates both YAML and JSON +3. **All Products**: Includes all 7 InfluxDB products +4. **Better Errors**: Clear error messages with product validation +5. **CLI Arguments**: Support for processing specific products +6. **Comprehensive Logging**: Progress indicators and status messages + +## Related Documentation + +- **API Docs README**: `api-docs/README.md` - Complete API documentation workflow +- **OpenAPI Plugins**: `api-docs/openapi/plugins/` - Custom processing plugins +- **Hugo Data to Pages**: `hugo-data-to-pages/` - Page generation from data files + +## Examples + +### Generate Only Cloud Products + +```bash +yarn build:api-pages:product cloud-v2 cloud-dedicated cloud-serverless +``` + +### Generate Only InfluxDB 3 Products + +```bash +yarn build:api-pages:product influxdb3-core influxdb3-enterprise +``` + +### Process Single Product Manually + +```bash +# Compile first +yarn build:api-scripts + +# Run for specific product +node api-docs/scripts/dist/generate-openapi-articles.js oss-v2 +``` + +## API Reference + +### generateHugoData(options) + +Generate Hugo data files from an OpenAPI specification. + +**Parameters:** + +- `options.specFile` (string) - Path to the OpenAPI spec file +- `options.dataOutPath` (string) - Output path for OpenAPI path fragments +- `options.articleOutPath` (string) - Output path for article metadata + +**Example:** + +```javascript +const { generateHugoData } = require('./api-docs/scripts/dist/openapi-paths-to-hugo-data/index.js'); + +generateHugoData({ + specFile: 'api-docs/influxdb/cloud/v2/ref.yml', + dataOutPath: 'static/openapi/influxdb-cloud-v2/paths', + articleOutPath: 'data/article-data/influxdb/cloud-v2' +}); +``` + +### productConfigs + +Map of product configurations exported from `generate-openapi-articles.ts`. + +**Type:** + +```typescript +type ProductConfig = { + specFile: string; // Path to OpenAPI spec + pagesDir: string; // Hugo content directory + description?: string; // Product description +}; + +const productConfigs: Record; +``` + +**Usage:** + +```javascript +const { productConfigs } = require('./api-docs/scripts/dist/generate-openapi-articles.js'); + +console.log(productConfigs['cloud-v2']); +// { +// specFile: 'api-docs/cloud/v2/ref.yml', +// pagesDir: 'content/influxdb/cloud/api/v2', +// description: 'InfluxDB Cloud (v2 API)' +// } +``` + +## License + +Same as parent docs-v2 repository (MIT). diff --git a/api-docs/scripts/dist/generate-openapi-articles.js b/api-docs/scripts/dist/generate-openapi-articles.js new file mode 100644 index 0000000000..3547ac5a37 --- /dev/null +++ b/api-docs/scripts/dist/generate-openapi-articles.js @@ -0,0 +1,838 @@ +#!/usr/bin/env node +"use strict"; +/** + * Generate OpenAPI Articles Script + * + * Generates Hugo data files and content pages from OpenAPI specifications + * for all InfluxDB products. + * + * Products are auto-discovered by scanning api-docs/ for .config.yml files. + * Hugo paths, menu keys, and static file names are derived from directory + * structure and existing Hugo frontmatter. + * + * This script: + * 1. Discovers products from .config.yml files + * 2. Cleans output directories (unless --no-clean) + * 3. Transforms documentation links in specs + * 4. Copies specs to static directory for download + * 5. Generates tag-based data fragments (YAML and JSON) + * 6. Generates Hugo content pages from article data + * + * Usage: + * node generate-openapi-articles.js # Clean and generate all products + * node generate-openapi-articles.js influxdb3-core # Clean and generate single product + * node generate-openapi-articles.js --no-clean # Generate without cleaning + * node generate-openapi-articles.js --dry-run # Preview what would be cleaned + * node generate-openapi-articles.js --skip-fetch # Skip getswagger.sh fetch step + * node generate-openapi-articles.js --validate-links # Validate documentation links + * + * @module generate-openapi-articles + */ +var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) { + if (k2 === undefined) k2 = k; + var desc = Object.getOwnPropertyDescriptor(m, k); + if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) { + desc = { enumerable: true, get: function() { return m[k]; } }; + } + Object.defineProperty(o, k2, desc); +}) : (function(o, m, k, k2) { + if (k2 === undefined) k2 = k; + o[k2] = m[k]; +})); +var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) { + Object.defineProperty(o, "default", { enumerable: true, value: v }); +}) : function(o, v) { + o["default"] = v; +}); +var __importStar = (this && this.__importStar) || (function () { + var ownKeys = function(o) { + ownKeys = Object.getOwnPropertyNames || function (o) { + var ar = []; + for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k; + return ar; + }; + return ownKeys(o); + }; + return function (mod) { + if (mod && mod.__esModule) return mod; + var result = {}; + if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]); + __setModuleDefault(result, mod); + return result; + }; +})(); +Object.defineProperty(exports, "__esModule", { value: true }); +exports.LINK_PATTERN = exports.MARKDOWN_FIELDS = void 0; +exports.discoverProducts = discoverProducts; +exports.processProduct = processProduct; +exports.processApiSection = processApiSection; +exports.transformDocLinks = transformDocLinks; +exports.validateDocLinks = validateDocLinks; +exports.resolveContentPath = resolveContentPath; +exports.deriveStaticDirName = deriveStaticDirName; +exports.getSectionSlug = getSectionSlug; +exports.parseApiEntry = parseApiEntry; +exports.readMenuKey = readMenuKey; +const child_process_1 = require("child_process"); +const path = __importStar(require("path")); +const fs = __importStar(require("fs")); +// Import the OpenAPI to Hugo converter +const openapiPathsToHugo = require('./openapi-paths-to-hugo-data/index.js'); +// --------------------------------------------------------------------------- +// Constants and CLI flags +// --------------------------------------------------------------------------- +const DOCS_ROOT = '.'; +const API_DOCS_ROOT = 'api-docs'; +const validateLinks = process.argv.includes('--validate-links'); +const skipFetch = process.argv.includes('--skip-fetch'); +const noClean = process.argv.includes('--no-clean'); +const dryRun = process.argv.includes('--dry-run'); +// --------------------------------------------------------------------------- +// Utility functions +// --------------------------------------------------------------------------- +/** + * Load products with API paths from data/products.yml. + * Returns a map of alt_link_key to API path for alt_links generation. + */ +function loadApiProducts() { + const yaml = require('js-yaml'); + const productsFile = path.join(DOCS_ROOT, 'data/products.yml'); + if (!fs.existsSync(productsFile)) { + console.warn('⚠️ products.yml not found, skipping alt_links generation'); + return new Map(); + } + const productsContent = fs.readFileSync(productsFile, 'utf8'); + const products = yaml.load(productsContent); + const apiProducts = new Map(); + for (const [, product] of Object.entries(products)) { + if (product.api_path && product.alt_link_key) { + apiProducts.set(product.alt_link_key, product.api_path); + } + } + return apiProducts; +} +const apiProductsMap = loadApiProducts(); +/** Execute a shell command and handle errors */ +function execCommand(command, description) { + try { + if (description) { + console.log(`\n${description}...`); + } + console.log(`Executing: ${command}\n`); + (0, child_process_1.execSync)(command, { stdio: 'inherit' }); + } + catch (error) { + console.error(`\n❌ Error executing command: ${command}`); + if (error instanceof Error) { + console.error(error.message); + } + process.exit(1); + } +} +// --------------------------------------------------------------------------- +// Auto-discovery functions +// --------------------------------------------------------------------------- +/** + * Recursively find all .config.yml files under api-docs/. + * Excludes the root api-docs/.config.yml and internal directories. + */ +function findConfigFiles(rootDir) { + const configs = []; + const skipDirs = new Set([ + 'node_modules', + 'dist', + '_build', + 'scripts', + 'openapi', + ]); + function scanDir(dir, depth) { + if (depth > 5) + return; + let entries; + try { + entries = fs.readdirSync(dir, { withFileTypes: true }); + } + catch { + return; + } + for (const entry of entries) { + if (skipDirs.has(entry.name)) + continue; + const fullPath = path.join(dir, entry.name); + if (entry.isDirectory()) { + scanDir(fullPath, depth + 1); + } + else if (entry.name === '.config.yml' && dir !== rootDir) { + configs.push(fullPath); + } + } + } + scanDir(rootDir, 0); + return configs.sort(); +} +/** + * Parse an API entry key like 'v3@3' into apiKey and version. + */ +function parseApiEntry(entry) { + const atIdx = entry.indexOf('@'); + if (atIdx === -1) { + return { apiKey: entry, version: '0' }; + } + return { + apiKey: entry.substring(0, atIdx), + version: entry.substring(atIdx + 1), + }; +} +/** + * Determine Hugo section slug from API key. + * 'management' → 'management-api', everything else → 'api'. + */ +function getSectionSlug(apiKey) { + if (apiKey === 'management') + return 'management-api'; + return 'api'; +} +/** + * Derive a clean static directory name from a product directory path. + * Replaces path separators and underscores with hyphens. + * + * @example 'influxdb3/core' → 'influxdb3-core' + * @example 'enterprise_influxdb/v1' → 'enterprise-influxdb-v1' + */ +function deriveStaticDirName(productDir) { + return productDir.replace(/[/_]/g, '-'); +} +/** + * Read the cascade.product field from a product's _index.md frontmatter. + * This value serves as the Hugo menu key. + */ +function readMenuKey(pagesDir) { + const yaml = require('js-yaml'); + const indexFile = path.join(pagesDir, '_index.md'); + if (!fs.existsSync(indexFile)) { + console.warn(`⚠️ Product index not found: ${indexFile}`); + return ''; + } + const content = fs.readFileSync(indexFile, 'utf8'); + const fmMatch = content.match(/^---\n([\s\S]*?)\n---/); + if (!fmMatch) + return ''; + try { + const fm = yaml.load(fmMatch[1]); + const cascade = fm.cascade; + if (cascade?.product) + return cascade.product; + // Fallback: first key of the menu map + if (fm.menu && typeof fm.menu === 'object') { + const keys = Object.keys(fm.menu); + if (keys.length > 0) + return keys[0]; + } + } + catch { + console.warn(`⚠️ Could not parse frontmatter in ${indexFile}`); + } + return ''; +} +/** + * Check whether a hand-maintained api/_index.md already has a menu entry. + * If so, the generator should skip adding its own parent menu entry. + */ +function hasExistingApiMenu(pagesDir) { + const yaml = require('js-yaml'); + const apiIndex = path.join(pagesDir, 'api', '_index.md'); + if (!fs.existsSync(apiIndex)) + return false; + const content = fs.readFileSync(apiIndex, 'utf8'); + const fmMatch = content.match(/^---\n([\s\S]*?)\n---/); + if (!fmMatch) + return false; + try { + const fm = yaml.load(fmMatch[1]); + return !!fm.menu; + } + catch { + return false; + } +} +/** + * Discover all products by scanning api-docs/ for .config.yml files. + * Derives Hugo paths from directory structure and existing frontmatter. + */ +function discoverProducts() { + const yaml = require('js-yaml'); + const products = []; + const configFiles = findConfigFiles(API_DOCS_ROOT); + for (const configPath of configFiles) { + const configDir = path.dirname(configPath); + const productDir = path.relative(API_DOCS_ROOT, configDir); + let config; + try { + const raw = fs.readFileSync(configPath, 'utf8'); + config = yaml.load(raw); + } + catch (err) { + console.warn(`⚠️ Could not parse ${configPath}: ${err}`); + continue; + } + if (!config.apis || Object.keys(config.apis).length === 0) { + continue; + } + const pagesDir = path.join(DOCS_ROOT, 'content', productDir); + const staticDirName = deriveStaticDirName(productDir); + const menuKey = readMenuKey(pagesDir); + const skipParentMenu = hasExistingApiMenu(pagesDir); + // Parse API entries, skipping compatibility specs + const apis = []; + for (const [entryKey, entry] of Object.entries(config.apis)) { + const { apiKey, version } = parseApiEntry(entryKey); + // Skip v1-compatibility entries (being removed in pipeline restructure) + if (apiKey.includes('compatibility')) + continue; + const specFile = path.join(configDir, entry.root); + const sectionSlug = getSectionSlug(apiKey); + apis.push({ apiKey, version, specFile, sectionSlug }); + } + if (apis.length === 0) + continue; + products.push({ + configDir, + productDir, + productName: config['x-influxdata-product-name'] || productDir, + pagesDir, + menuKey, + skipParentMenu, + staticDirName, + apis, + }); + } + return products; +} +// --------------------------------------------------------------------------- +// Cleanup functions +// --------------------------------------------------------------------------- +/** + * Get all paths that would be cleaned for a product. + * + * @param product - The product to clean + * @param allStaticDirNames - Names of all products (to avoid prefix collisions) + */ +function getCleanupPaths(product, allStaticDirNames) { + const staticPath = path.join(DOCS_ROOT, 'static/openapi'); + const directories = []; + const files = []; + // Tag specs directory: static/openapi/{staticDirName}/ + const tagSpecsDir = path.join(staticPath, product.staticDirName); + if (fs.existsSync(tagSpecsDir)) { + directories.push(tagSpecsDir); + } + // Article data directory: data/article_data/influxdb/{staticDirName}/ + const articleDataDir = path.join(DOCS_ROOT, `data/article_data/influxdb/${product.staticDirName}`); + if (fs.existsSync(articleDataDir)) { + directories.push(articleDataDir); + } + // Content pages: content/{pagesDir}/{sectionSlug}/ for each API + for (const api of product.apis) { + const contentDir = path.join(product.pagesDir, api.sectionSlug); + if (fs.existsSync(contentDir)) { + directories.push(contentDir); + } + } + // Root spec files: static/openapi/{staticDirName}*.yml and .json + // Avoid matching files that belong to products with longer names + // (e.g., 'influxdb-cloud' should not match 'influxdb-cloud-dedicated-*.yml') + const longerPrefixes = allStaticDirNames.filter((n) => n !== product.staticDirName && + n.startsWith(product.staticDirName + '-')); + if (fs.existsSync(staticPath)) { + const staticFiles = fs.readdirSync(staticPath); + staticFiles + .filter((f) => { + if (!f.startsWith(product.staticDirName)) + return false; + // Exclude files belonging to a longer-named product + for (const longer of longerPrefixes) { + if (f.startsWith(longer)) + return false; + } + return f.endsWith('.yml') || f.endsWith('.json'); + }) + .forEach((f) => { + files.push(path.join(staticPath, f)); + }); + } + return { directories, files }; +} +/** Clean output directories for a product before regeneration. */ +function cleanProductOutputs(product, allStaticDirNames) { + const { directories, files } = getCleanupPaths(product, allStaticDirNames); + for (const dir of directories) { + console.log(`🧹 Removing directory: ${dir}`); + fs.rmSync(dir, { recursive: true, force: true }); + } + for (const file of files) { + console.log(`🧹 Removing file: ${file}`); + fs.unlinkSync(file); + } + const total = directories.length + files.length; + if (total > 0) { + console.log(`✓ Cleaned ${directories.length} directories, ${files.length} files for ${product.staticDirName}`); + } +} +/** Display dry-run preview of what would be cleaned. */ +function showDryRunPreview(product, allStaticDirNames) { + const { directories, files } = getCleanupPaths(product, allStaticDirNames); + console.log(`\nDRY RUN: Would clean the following for ${product.staticDirName}:\n`); + if (directories.length > 0) { + console.log('Directories to remove:'); + directories.forEach((dir) => console.log(` - ${dir}`)); + } + if (files.length > 0) { + console.log('\nFiles to remove:'); + files.forEach((file) => console.log(` - ${file}`)); + } + if (directories.length === 0 && files.length === 0) { + console.log(' (no files to clean)'); + } + console.log(`\nSummary: ${directories.length} directories, ${files.length} files would be removed`); +} +// --------------------------------------------------------------------------- +// Link transformation +// --------------------------------------------------------------------------- +/** Fields that can contain markdown with links */ +const MARKDOWN_FIELDS = new Set(['description', 'summary']); +exports.MARKDOWN_FIELDS = MARKDOWN_FIELDS; +/** Link placeholder pattern */ +const LINK_PATTERN = /\/influxdb\/version\//g; +exports.LINK_PATTERN = LINK_PATTERN; +/** + * Transform documentation links in OpenAPI spec markdown fields. + * Replaces `/influxdb/version/` with the actual product path. + */ +function transformDocLinks(spec, productPath) { + function transformValue(value) { + if (typeof value === 'string') { + return value.replace(LINK_PATTERN, `${productPath}/`); + } + if (Array.isArray(value)) { + return value.map(transformValue); + } + if (value !== null && typeof value === 'object') { + return transformObject(value); + } + return value; + } + function transformObject(obj) { + const result = {}; + for (const [key, value] of Object.entries(obj)) { + if (MARKDOWN_FIELDS.has(key) && typeof value === 'string') { + result[key] = value.replace(LINK_PATTERN, `${productPath}/`); + } + else if (value !== null && typeof value === 'object') { + result[key] = transformValue(value); + } + else { + result[key] = value; + } + } + return result; + } + return transformObject(spec); +} +/** + * Resolve a URL path to a content file path. + * + * @example '/influxdb3/core/api/auth/' → 'content/influxdb3/core/api/auth/_index.md' + */ +function resolveContentPath(urlPath, contentDir) { + const normalized = urlPath.replace(/\/$/, ''); + const indexPath = path.join(contentDir, normalized, '_index.md'); + const directPath = path.join(contentDir, normalized + '.md'); + if (fs.existsSync(indexPath)) + return indexPath; + if (fs.existsSync(directPath)) + return directPath; + return indexPath; +} +/** + * Validate that transformed links point to existing content. + */ +function validateDocLinks(spec, contentDir) { + const errors = []; + const linkPattern = /\[([^\]]+)\]\(([^)]+)\)/g; + function extractLinks(value, jsonPath) { + if (typeof value === 'string') { + let match; + while ((match = linkPattern.exec(value)) !== null) { + const [, linkText, linkUrl] = match; + if (linkUrl.startsWith('/') && !linkUrl.startsWith('//')) { + const contentPath = resolveContentPath(linkUrl, contentDir); + if (!fs.existsSync(contentPath)) { + errors.push(`Broken link at ${jsonPath}: [${linkText}](${linkUrl})`); + } + } + } + linkPattern.lastIndex = 0; + } + else if (Array.isArray(value)) { + value.forEach((item, index) => extractLinks(item, `${jsonPath}[${index}]`)); + } + else if (value !== null && typeof value === 'object') { + for (const [key, val] of Object.entries(value)) { + extractLinks(val, `${jsonPath}.${key}`); + } + } + } + extractLinks(spec, 'spec'); + return errors; +} +/** + * Generate Hugo content pages from tag-based article data. + * + * Creates markdown files with frontmatter from article metadata. + * Each article becomes a page with type: api that renders via Hugo-native + * templates. Includes operation metadata for TOC generation. + */ +function generateTagPagesFromArticleData(options) { + const { articlesPath, contentPath, sectionSlug, menuKey, menuParent, productDescription, skipParentMenu, specDownloadPath, articleDataKey, articleSection, } = options; + const yaml = require('js-yaml'); + const articlesFile = path.join(articlesPath, 'articles.yml'); + if (!fs.existsSync(articlesFile)) { + console.warn(`⚠️ Articles file not found: ${articlesFile}`); + return; + } + const articlesContent = fs.readFileSync(articlesFile, 'utf8'); + const data = yaml.load(articlesContent); + if (!data.articles || !Array.isArray(data.articles)) { + console.warn(`⚠️ No articles found in ${articlesFile}`); + return; + } + if (!fs.existsSync(contentPath)) { + fs.mkdirSync(contentPath, { recursive: true }); + } + // Generate parent _index.md for the section + const sectionDir = path.join(contentPath, sectionSlug); + const parentIndexFile = path.join(sectionDir, '_index.md'); + if (!fs.existsSync(sectionDir)) { + fs.mkdirSync(sectionDir, { recursive: true }); + } + if (!fs.existsSync(parentIndexFile)) { + const apiDescription = productDescription || + `Use the InfluxDB HTTP API to write data, query data, and manage databases, tables, and tokens.`; + const parentFrontmatter = { + title: menuParent || 'InfluxDB HTTP API', + description: apiDescription, + weight: 104, + type: 'api', + articleDataKey, + articleSection, + }; + if (menuKey && !skipParentMenu) { + parentFrontmatter.menu = { + [menuKey]: { + name: menuParent || 'InfluxDB HTTP API', + parent: 'Reference', + }, + }; + } + if (apiProductsMap.size > 0) { + const altLinks = {}; + apiProductsMap.forEach((apiPath, productName) => { + altLinks[productName] = apiPath; + }); + parentFrontmatter.alt_links = altLinks; + } + const introText = apiDescription.replace('InfluxDB', '{{% product-name %}}'); + const parentContent = `--- +${yaml.dump(parentFrontmatter)}--- + +${introText} + +{{< children >}} +`; + fs.writeFileSync(parentIndexFile, parentContent); + console.log(`✓ Generated parent index at ${parentIndexFile}`); + } + // Generate "All endpoints" page + const allEndpointsDir = path.join(sectionDir, 'all-endpoints'); + const allEndpointsFile = path.join(allEndpointsDir, '_index.md'); + if (!fs.existsSync(allEndpointsDir)) { + fs.mkdirSync(allEndpointsDir, { recursive: true }); + } + const allEndpointsFrontmatter = { + title: 'All endpoints', + description: `View all API endpoints sorted by path.`, + type: 'api', + layout: 'all-endpoints', + weight: 999, + isAllEndpoints: true, + articleDataKey, + articleSection, + }; + if (menuKey) { + allEndpointsFrontmatter.menu = { + [menuKey]: { + name: 'All endpoints', + parent: menuParent || 'InfluxDB HTTP API', + }, + }; + } + if (apiProductsMap.size > 0) { + const altLinks = {}; + apiProductsMap.forEach((apiPath, productName) => { + altLinks[productName] = apiPath; + }); + allEndpointsFrontmatter.alt_links = altLinks; + } + const allEndpointsContent = `--- +${yaml.dump(allEndpointsFrontmatter)}--- + +All {{% product-name %}} API endpoints, sorted by path. +`; + fs.writeFileSync(allEndpointsFile, allEndpointsContent); + console.log(`✓ Generated all-endpoints page at ${allEndpointsFile}`); + // Generate a page for each article (tag) + for (const article of data.articles) { + const pagePath = path.join(contentPath, article.path); + const pageFile = path.join(pagePath, '_index.md'); + if (!fs.existsSync(pagePath)) { + fs.mkdirSync(pagePath, { recursive: true }); + } + const title = article.fields.title || article.fields.name || article.path; + const isConceptual = article.fields.isConceptual === true; + const weight = article.fields.weight ?? 100; + const frontmatter = { + title, + description: article.fields.description || `API reference for ${title}`, + type: 'api', + layout: isConceptual ? 'single' : 'list', + staticFilePath: article.fields.staticFilePath, + weight, + tag: article.fields.tag, + isConceptual, + menuGroup: article.fields.menuGroup, + specDownloadPath, + articleDataKey, + articleSection, + }; + if (!isConceptual && + article.fields.operations && + article.fields.operations.length > 0) { + frontmatter.operations = article.fields.operations; + } + if (isConceptual && article.fields.tagDescription) { + frontmatter.tagDescription = article.fields.tagDescription; + } + if (article.fields.showSecuritySchemes) { + frontmatter.showSecuritySchemes = true; + } + // Add related links if present + if (article.fields.related && + Array.isArray(article.fields.related) && + article.fields.related.length > 0) { + frontmatter.related = article.fields.related; + } + // Add client library related link for InfluxDB 3 products + if (contentPath.includes('influxdb3/') && !isConceptual) { + const influxdb3Match = contentPath.match(/influxdb3\/([^/]+)/); + if (influxdb3Match) { + const productSegment = influxdb3Match[1]; + const clientLibLink = { + title: 'InfluxDB 3 API client libraries', + href: `/influxdb3/${productSegment}/reference/client-libraries/v3/`, + }; + const existing = frontmatter.related || []; + const alreadyHas = existing.some((r) => typeof r === 'object' && r.href === clientLibLink.href); + if (!alreadyHas) { + frontmatter.related = [...existing, clientLibLink]; + } + } + } + if (apiProductsMap.size > 0) { + const altLinks = {}; + apiProductsMap.forEach((apiPath, productName) => { + altLinks[productName] = apiPath; + }); + frontmatter.alt_links = altLinks; + } + const pageContent = `--- +${yaml.dump(frontmatter)}--- +`; + fs.writeFileSync(pageFile, pageContent); + } + console.log(`✓ Generated ${data.articles.length} tag-based content pages in ${contentPath}`); +} +// --------------------------------------------------------------------------- +// Spec processing +// --------------------------------------------------------------------------- +/** + * Process a single API section: transform links, write static spec, + * generate tag data, and create Hugo content pages. + */ +function processApiSection(product, api, staticBasePath) { + const yaml = require('js-yaml'); + const isDualApi = product.apis.length > 1; + console.log(`\n📄 Processing ${api.sectionSlug} section (${api.apiKey})`); + // --- 1. Determine paths --- + // Root spec download: single → {dir}.yml, dual → {dir}-{section}.yml + const specSuffix = isDualApi ? `-${api.sectionSlug}` : ''; + const staticSpecPath = path.join(staticBasePath, `${product.staticDirName}${specSuffix}.yml`); + const staticJsonSpecPath = staticSpecPath.replace('.yml', '.json'); + // Tag specs directory + const tagSpecsBase = isDualApi + ? path.join(staticBasePath, product.staticDirName, api.sectionSlug) + : path.join(staticBasePath, product.staticDirName); + // Article data + const articlesPath = path.join(DOCS_ROOT, 'data/article_data/influxdb', product.staticDirName, api.sectionSlug); + // Download path for frontmatter + const specDownloadPath = `/openapi/${product.staticDirName}${specSuffix}.yml`; + // Path spec files for per-operation rendering + const pathSpecsDir = isDualApi + ? path.join(staticBasePath, product.staticDirName, api.sectionSlug, 'paths') + : path.join(staticBasePath, product.staticDirName, 'paths'); + // --- 2. Read and transform spec --- + if (!fs.existsSync(api.specFile)) { + console.warn(`⚠️ Spec file not found: ${api.specFile}`); + return; + } + const specContent = fs.readFileSync(api.specFile, 'utf8'); + const specObject = yaml.load(specContent); + const productPath = `/${product.productDir}`; + const transformedSpec = transformDocLinks(specObject, productPath); + console.log(`✓ Transformed documentation links for ${api.apiKey} to ${productPath}`); + // Validate links if enabled + if (validateLinks) { + const contentDir = path.join(DOCS_ROOT, 'content'); + const linkErrors = validateDocLinks(transformedSpec, contentDir); + if (linkErrors.length > 0) { + console.warn(`\n⚠️ Link validation warnings for ${api.specFile}:`); + linkErrors.forEach((err) => console.warn(` ${err}`)); + } + } + // --- 3. Write transformed spec to static folder --- + if (!fs.existsSync(staticBasePath)) { + fs.mkdirSync(staticBasePath, { recursive: true }); + } + fs.writeFileSync(staticSpecPath, yaml.dump(transformedSpec)); + console.log(`✓ Wrote transformed spec to ${staticSpecPath}`); + fs.writeFileSync(staticJsonSpecPath, JSON.stringify(transformedSpec, null, 2)); + console.log(`✓ Generated JSON spec at ${staticJsonSpecPath}`); + // --- 4. Generate tag-based data --- + console.log(`\n📋 Generating tag-based data for ${api.apiKey} in ${tagSpecsBase}...`); + openapiPathsToHugo.generateHugoDataByTag({ + specFile: staticSpecPath, + dataOutPath: tagSpecsBase, + articleOutPath: articlesPath, + includePaths: true, + }); + // Generate path-specific specs + openapiPathsToHugo.generatePathSpecificSpecs(staticSpecPath, pathSpecsDir); + // --- 5. Generate Hugo content pages --- + generateTagPagesFromArticleData({ + articlesPath, + contentPath: product.pagesDir, + sectionSlug: api.sectionSlug, + menuKey: product.menuKey, + menuParent: 'InfluxDB HTTP API', + skipParentMenu: product.skipParentMenu, + specDownloadPath, + articleDataKey: product.staticDirName, + articleSection: api.sectionSlug, + }); +} +/** + * Process a single product: clean outputs and process each API section. + */ +function processProduct(product, allStaticDirNames) { + console.log('\n' + '='.repeat(80)); + console.log(`Processing ${product.productName}`); + console.log('='.repeat(80)); + // Clean output directories before regeneration + if (!noClean && !dryRun) { + cleanProductOutputs(product, allStaticDirNames); + } + const staticBasePath = path.join(DOCS_ROOT, 'static/openapi'); + // Fetch specs if needed + if (!skipFetch) { + const getswaggerScript = path.join(API_DOCS_ROOT, 'getswagger.sh'); + if (fs.existsSync(getswaggerScript)) { + // The build function in generate-api-docs.sh handles per-product + // fetching. When called standalone, use product directory name. + execCommand(`cd ${API_DOCS_ROOT} && ./getswagger.sh ${product.productDir} -B`, `Fetching OpenAPI spec for ${product.productName}`); + } + else { + console.log(`⚠️ getswagger.sh not found, skipping fetch step`); + } + } + else { + console.log(`⏭️ Skipping getswagger.sh (--skip-fetch flag set)`); + } + // Process each API section independently + for (const api of product.apis) { + processApiSection(product, api, staticBasePath); + } + console.log(`\n✅ Successfully processed ${product.productName}\n`); +} +// --------------------------------------------------------------------------- +// Main +// --------------------------------------------------------------------------- +function main() { + const args = process.argv.slice(2).filter((arg) => !arg.startsWith('--')); + // Discover all products from .config.yml files + const allProducts = discoverProducts(); + if (allProducts.length === 0) { + console.error('❌ No products discovered. Ensure .config.yml files exist under api-docs/.'); + process.exit(1); + } + // Determine which products to process + let productsToProcess; + if (args.length === 0) { + productsToProcess = allProducts; + console.log(`\n📋 Discovered ${allProducts.length} products, processing all...\n`); + } + else { + // Match by staticDirName or productDir + productsToProcess = []; + const invalid = []; + for (const arg of args) { + const found = allProducts.find((p) => p.staticDirName === arg || + p.productDir === arg || + p.productDir.replace(/\//g, '-') === arg); + if (found) { + productsToProcess.push(found); + } + else { + invalid.push(arg); + } + } + if (invalid.length > 0) { + console.error(`\n❌ Unknown product identifier(s): ${invalid.join(', ')}`); + console.error('\nDiscovered products:'); + allProducts.forEach((p) => { + console.error(` - ${p.staticDirName} (${p.productName}) [${p.productDir}]`); + }); + process.exit(1); + } + console.log(`\n📋 Processing specified products: ${productsToProcess.map((p) => p.staticDirName).join(', ')}\n`); + } + // Collect all staticDirNames for prefix-safe cleanup + const allStaticDirNames = allProducts.map((p) => p.staticDirName); + // Handle dry-run mode + if (dryRun) { + console.log('\n📋 DRY RUN MODE - No files will be modified\n'); + productsToProcess.forEach((p) => showDryRunPreview(p, allStaticDirNames)); + console.log('\nDry run complete. No files were modified.'); + return; + } + // Process each product + productsToProcess.forEach((product) => { + processProduct(product, allStaticDirNames); + }); + console.log('\n' + '='.repeat(80)); + console.log('✅ All products processed successfully!'); + console.log('='.repeat(80) + '\n'); +} +// Execute if run directly +if (require.main === module) { + main(); +} +//# sourceMappingURL=generate-openapi-articles.js.map \ No newline at end of file diff --git a/api-docs/scripts/dist/openapi-paths-to-hugo-data/index.js b/api-docs/scripts/dist/openapi-paths-to-hugo-data/index.js new file mode 100644 index 0000000000..4874d1000c --- /dev/null +++ b/api-docs/scripts/dist/openapi-paths-to-hugo-data/index.js @@ -0,0 +1,920 @@ +"use strict"; +/** + * OpenAPI to Hugo Data Converter + * + * Converts OpenAPI v3 specifications into Hugo-compatible data files. + * Generates both YAML and JSON versions of spec fragments grouped by path. + * + * @module openapi-paths-to-hugo-data + */ +var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) { + if (k2 === undefined) k2 = k; + var desc = Object.getOwnPropertyDescriptor(m, k); + if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) { + desc = { enumerable: true, get: function() { return m[k]; } }; + } + Object.defineProperty(o, k2, desc); +}) : (function(o, m, k, k2) { + if (k2 === undefined) k2 = k; + o[k2] = m[k]; +})); +var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) { + Object.defineProperty(o, "default", { enumerable: true, value: v }); +}) : function(o, v) { + o["default"] = v; +}); +var __importStar = (this && this.__importStar) || (function () { + var ownKeys = function(o) { + ownKeys = Object.getOwnPropertyNames || function (o) { + var ar = []; + for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k; + return ar; + }; + return ownKeys(o); + }; + return function (mod) { + if (mod && mod.__esModule) return mod; + var result = {}; + if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]); + __setModuleDefault(result, mod); + return result; + }; +})(); +Object.defineProperty(exports, "__esModule", { value: true }); +exports.writePathSpecificSpecs = writePathSpecificSpecs; +exports.generateHugoDataByTag = generateHugoDataByTag; +exports.generateHugoData = generateHugoData; +exports.generatePathSpecificSpecs = generatePathSpecificSpecs; +const yaml = __importStar(require("js-yaml")); +const fs = __importStar(require("fs")); +const path = __importStar(require("path")); +/** + * Read a YAML file and parse it + * + * @param filepath - Path to the YAML file + * @param encoding - File encoding (default: 'utf8') + * @returns Parsed YAML content + */ +function readFile(filepath, encoding = 'utf8') { + const content = fs.readFileSync(filepath, encoding); + return yaml.load(content); +} +/** + * Write data to a YAML file + * + * @param data - Data to write + * @param outputTo - Output file path + */ +function writeDataFile(data, outputTo) { + fs.writeFileSync(outputTo, yaml.dump(data)); +} +/** + * Write data to a JSON file + * + * @param data - Data to write + * @param outputTo - Output file path + */ +function writeJsonFile(data, outputTo) { + fs.writeFileSync(outputTo, JSON.stringify(data, null, 2)); +} +/** + * OpenAPI utility functions + */ +const openapiUtils = { + /** + * Check if a path fragment is a placeholder (e.g., {id}) + * + * @param str - Path fragment to check + * @returns True if the fragment is a placeholder + */ + isPlaceholderFragment(str) { + const placeholderRegex = /^\{.*\}$/; + return placeholderRegex.test(str); + }, +}; +/** + * Convert tag name to URL-friendly slug + * + * @param tagName - Tag name (e.g., "Write data", "Processing engine") + * @returns URL-friendly slug (e.g., "write-data", "processing-engine") + */ +function slugifyTag(tagName) { + return tagName + .toLowerCase() + .replace(/[^a-z0-9]+/g, '-') + .replace(/^-|-$/g, ''); +} +/** + * Menu group mappings for tag-based navigation + * Maps OpenAPI tags to sidebar groups + */ +const TAG_MENU_GROUPS = { + // Concepts group + 'Quick start': 'Concepts', + Authentication: 'Concepts', + 'Headers and parameters': 'Concepts', + 'Response codes': 'Concepts', + // Data Operations group + 'Write data': 'Data Operations', + 'Query data': 'Data Operations', + 'Cache data': 'Data Operations', + // Administration group + Database: 'Administration', + Table: 'Administration', + Token: 'Administration', + // Processing Engine group + 'Processing engine': 'Processing Engine', + // Server group + 'Server information': 'Server', + // Compatibility group + 'Compatibility endpoints': 'Compatibility', +}; +/** + * Get menu group for a tag + * + * @param tagName - Tag name + * @returns Menu group name or 'Other' if not mapped + */ +function getMenuGroupForTag(tagName) { + return TAG_MENU_GROUPS[tagName] || 'Other'; +} +/** + * HTTP methods to check for operations + */ +const HTTP_METHODS = [ + 'get', + 'post', + 'put', + 'patch', + 'delete', + 'options', + 'head', + 'trace', +]; +/** + * Extract all operations from an OpenAPI document grouped by tag + * + * @param openapi - OpenAPI document + * @returns Map of tag name to operations with that tag + */ +function extractOperationsByTag(openapi) { + const tagOperations = new Map(); + Object.entries(openapi.paths).forEach(([pathKey, pathItem]) => { + HTTP_METHODS.forEach((method) => { + const operation = pathItem[method]; + if (operation) { + const opMeta = { + operationId: operation.operationId || `${method}-${pathKey}`, + method: method.toUpperCase(), + path: pathKey, + summary: operation.summary || '', + tags: operation.tags || [], + }; + // Extract compatibility version if present + if (operation['x-compatibility-version']) { + opMeta.compatVersion = operation['x-compatibility-version']; + } + // Extract externalDocs if present + if (operation.externalDocs) { + opMeta.externalDocs = { + description: operation.externalDocs.description || '', + url: operation.externalDocs.url, + }; + } + // Extract x-influxdatadocs-related if present + if (operation['x-influxdatadocs-related'] && + Array.isArray(operation['x-influxdatadocs-related'])) { + opMeta.related = operation['x-influxdatadocs-related']; + } + // Extract x-related (title/href objects) if present + if (operation['x-related'] && Array.isArray(operation['x-related'])) { + opMeta.relatedLinks = operation['x-related']; + } + // Add operation to each of its tags + (operation.tags || []).forEach((tag) => { + if (!tagOperations.has(tag)) { + tagOperations.set(tag, []); + } + tagOperations.get(tag).push(opMeta); + }); + } + }); + }); + return tagOperations; +} +/** + * Write OpenAPI specs grouped by tag to separate files + * Generates both YAML and JSON versions per tag + * + * @param openapi - OpenAPI document + * @param prefix - Filename prefix for output files + * @param outPath - Output directory path + */ +function writeTagOpenapis(openapi, prefix, outPath) { + const tagOperations = extractOperationsByTag(openapi); + // Process each tag + tagOperations.forEach((operations, tagName) => { + // Deep copy openapi + const doc = JSON.parse(JSON.stringify(openapi)); + // Filter paths to only include those with operations for this tag + const filteredPaths = {}; + Object.entries(openapi.paths).forEach(([pathKey, pathItem]) => { + const filteredPathItem = {}; + let hasOperations = false; + HTTP_METHODS.forEach((method) => { + const operation = pathItem[method]; + if (operation?.tags?.includes(tagName)) { + // Clone the operation and restrict tags to only this tag + // This prevents the operation from being rendered multiple times + // (once per tag) when an operation belongs to multiple tags + const filteredOperation = { ...operation, tags: [tagName] }; + filteredPathItem[method] = filteredOperation; + hasOperations = true; + } + }); + // Include path-level parameters if we have operations + if (hasOperations) { + if (pathItem.parameters) { + filteredPathItem.parameters = pathItem.parameters; + } + filteredPaths[pathKey] = filteredPathItem; + } + }); + doc.paths = filteredPaths; + // Filter tags to only include this tag (and trait tags for context) + if (doc.tags) { + doc.tags = doc.tags.filter((tag) => tag.name === tagName || tag['x-traitTag']); + } + // Update info + const tagSlug = slugifyTag(tagName); + doc.info.title = tagName; + doc.info.description = `API reference for ${tagName}`; + doc['x-tagGroup'] = tagName; + try { + if (!fs.existsSync(outPath)) { + fs.mkdirSync(outPath, { recursive: true }); + } + const baseFilename = `${prefix}${tagSlug}`; + const yamlPath = path.resolve(outPath, `${baseFilename}.yaml`); + const jsonPath = path.resolve(outPath, `${baseFilename}.json`); + writeDataFile(doc, yamlPath); + writeJsonFile(doc, jsonPath); + console.log(`Generated tag spec: ${baseFilename}.yaml (${Object.keys(filteredPaths).length} paths, ${operations.length} operations)`); + } + catch (err) { + console.error(`Error writing tag group ${tagName}:`, err); + } + }); + // Also create specs for conceptual tags (x-traitTag) without operations + (openapi.tags || []).forEach((tag) => { + if (tag['x-traitTag'] && !tagOperations.has(tag.name)) { + const doc = JSON.parse(JSON.stringify(openapi)); + doc.paths = {}; + doc.tags = [tag]; + doc.info.title = tag.name; + doc.info.description = tag.description || `API reference for ${tag.name}`; + doc['x-tagGroup'] = tag.name; + const tagSlug = slugifyTag(tag.name); + try { + const baseFilename = `${prefix}${tagSlug}`; + const yamlPath = path.resolve(outPath, `${baseFilename}.yaml`); + const jsonPath = path.resolve(outPath, `${baseFilename}.json`); + writeDataFile(doc, yamlPath); + writeJsonFile(doc, jsonPath); + console.log(`Generated conceptual tag spec: ${baseFilename}.yaml`); + } + catch (err) { + console.error(`Error writing conceptual tag ${tag.name}:`, err); + } + } + }); +} +/** + * Convert API path to filename-safe slug + * + * @param apiPath - API path (e.g., "/api/v3/configure/token/admin") + * @returns Filename-safe slug (e.g., "api-v3-configure-token-admin") + */ +function pathToFileSlug(apiPath) { + return apiPath + .replace(/^\//, '') // Remove leading slash + .replace(/\//g, '-') // Replace slashes with dashes + .replace(/[{}]/g, '') // Remove curly braces from path params + .replace(/-+/g, '-') // Collapse multiple dashes + .replace(/-$/, ''); // Remove trailing dash +} +/** + * Write path-specific OpenAPI specs (one file per exact API path) + * + * Each file contains all HTTP methods for a single path, enabling + * operation pages to filter by method only (no path prefix conflicts). + * + * @param openapi - OpenAPI document + * @param outPath - Output directory path (e.g., "static/openapi/{product}/paths") + * @returns Map of API path to spec file path (for use in frontmatter) + */ +function writePathSpecificSpecs(openapi, outPath) { + const pathSpecFiles = new Map(); + if (!fs.existsSync(outPath)) { + fs.mkdirSync(outPath, { recursive: true }); + } + Object.entries(openapi.paths).forEach(([apiPath, pathItem]) => { + // Deep clone pathItem to avoid mutating original + const clonedPathItem = JSON.parse(JSON.stringify(pathItem)); + // Limit each operation to a single tag to prevent duplicate rendering + // Operations with multiple tags would be rendered once per tag + const usedTags = new Set(); + HTTP_METHODS.forEach((method) => { + const operation = clonedPathItem[method]; + if (operation?.tags && operation.tags.length > 0) { + // Select the most specific tag to avoid duplicate rendering + // Prefer "Auth token" over "Authentication" for token-related operations + let primaryTag = operation.tags[0]; + if (operation.tags.includes('Auth token')) { + primaryTag = 'Auth token'; + } + operation.tags = [primaryTag]; + usedTags.add(primaryTag); + } + }); + // Create spec with just this path (all its methods) + // Include global security requirements so auth info displays correctly + const pathSpec = { + openapi: openapi.openapi, + info: { + ...openapi.info, + title: apiPath, + description: `API reference for ${apiPath}`, + }, + paths: { [apiPath]: clonedPathItem }, + components: openapi.components, // Include for $ref resolution + servers: openapi.servers, + security: openapi.security, // Global security requirements + }; + // Filter spec-level tags to only include those used by operations + if (openapi.tags) { + pathSpec.tags = openapi.tags.filter((tag) => usedTags.has(tag.name) && !tag['x-traitTag']); + } + // Write files + const slug = pathToFileSlug(apiPath); + const yamlPath = path.resolve(outPath, `${slug}.yaml`); + const jsonPath = path.resolve(outPath, `${slug}.json`); + writeDataFile(pathSpec, yamlPath); + writeJsonFile(pathSpec, jsonPath); + // Store the web-accessible path (without "static/" prefix) + // Hugo serves files from static/ at the root, so we extract the path after 'static/' + const staticMatch = yamlPath.match(/static\/(.+)$/); + const webPath = staticMatch ? `/${staticMatch[1]}` : yamlPath; + pathSpecFiles.set(apiPath, webPath); + }); + console.log(`Generated ${pathSpecFiles.size} path-specific specs in ${outPath}`); + return pathSpecFiles; +} +/** + * Write OpenAPI specs grouped by path to separate files + * Generates both YAML and JSON versions + * + * @param openapi - OpenAPI document + * @param prefix - Filename prefix for output files + * @param outPath - Output directory path + */ +function writePathOpenapis(openapi, prefix, outPath) { + const pathGroups = {}; + // Group paths by their base path (first 3-4 segments, excluding placeholders) + Object.keys(openapi.paths) + .sort() + .forEach((p) => { + const delimiter = '/'; + let key = p.split(delimiter); + // Check if this is an item path (ends with a placeholder) + let isItemPath = openapiUtils.isPlaceholderFragment(key[key.length - 1]); + if (isItemPath) { + key = key.slice(0, -1); + } + // Take first 4 segments + key = key.slice(0, 4); + // Check if the last segment is still a placeholder + isItemPath = openapiUtils.isPlaceholderFragment(key[key.length - 1]); + if (isItemPath) { + key = key.slice(0, -1); + } + const groupKey = key.join('/'); + pathGroups[groupKey] = pathGroups[groupKey] || {}; + pathGroups[groupKey][p] = openapi.paths[p]; + }); + // Write each path group to separate YAML and JSON files + Object.keys(pathGroups).forEach((pg) => { + // Deep copy openapi + const doc = JSON.parse(JSON.stringify(openapi)); + doc.paths = pathGroups[pg]; + // Collect tags used by operations in this path group + const usedTags = new Set(); + Object.values(doc.paths).forEach((pathItem) => { + const httpMethods = [ + 'get', + 'post', + 'put', + 'patch', + 'delete', + 'options', + 'head', + 'trace', + ]; + httpMethods.forEach((method) => { + const operation = pathItem[method]; + if (operation?.tags) { + operation.tags.forEach((tag) => usedTags.add(tag)); + } + }); + }); + // Filter tags to only include those used by operations in this path group + // Exclude x-traitTag tags (supplementary documentation tags) + if (doc.tags) { + doc.tags = doc.tags.filter((tag) => usedTags.has(tag.name) && !tag['x-traitTag']); + } + // Simplify info for path-specific docs + doc.info.title = pg; + doc.info.description = `API reference for ${pg}`; + doc['x-pathGroup'] = pg; + try { + if (!fs.existsSync(outPath)) { + fs.mkdirSync(outPath, { recursive: true }); + } + const baseFilename = `${prefix}${pg.replaceAll('/', '-').replace(/^-/, '')}`; + const yamlPath = path.resolve(outPath, `${baseFilename}.yaml`); + const jsonPath = path.resolve(outPath, `${baseFilename}.json`); + // Write both YAML and JSON versions + writeDataFile(doc, yamlPath); + writeJsonFile(doc, jsonPath); + console.log(`Generated: ${baseFilename}.yaml and ${baseFilename}.json`); + } + catch (err) { + console.error(`Error writing path group ${pg}:`, err); + } + }); +} +/** + * Create article metadata for a path group + * + * @param openapi - OpenAPI document with x-pathGroup + * @returns Article metadata object + */ +function createArticleDataForPathGroup(openapi) { + const article = { + path: '', + fields: { + name: openapi['x-pathGroup'] || '', + describes: Object.keys(openapi.paths), + }, + }; + /** + * Convert OpenAPI path to Hugo-friendly article path + * Legacy endpoints (without /api/ prefix) go under api/ directly + * Versioned endpoints (with /api/vN/) keep their structure + * + * @param p - Path to convert (e.g., '/health', '/api/v3/query_sql') + * @returns Path suitable for Hugo content directory (e.g., 'api/health', 'api/v3/query_sql') + */ + const toHugoPath = (p) => { + if (!p) { + return ''; + } + // If path doesn't start with /api/, it's a legacy endpoint + // Place it directly under api/ to avoid collision with /api/v1/* paths + if (!p.startsWith('/api/')) { + // /health -> api/health + // /write -> api/write + return `api${p}`; + } + // /api/v1/health -> api/v1/health + // /api/v2/write -> api/v2/write + // /api/v3/query_sql -> api/v3/query_sql + return p.replace(/^\//, ''); + }; + /** + * Convert path to tag-friendly format (dashes instead of slashes) + * + * @param p - Path to convert + * @returns Tag-friendly path + */ + const toTagPath = (p) => { + if (!p) { + return ''; + } + return p.replace(/^\//, '').replaceAll('/', '-'); + }; + const pathGroup = openapi['x-pathGroup'] || ''; + article.path = toHugoPath(pathGroup); + // Store original path for menu display (shows actual endpoint path) + article.fields.menuName = pathGroup; + article.fields.title = openapi.info?.title; + article.fields.description = openapi.description; + const pathGroupFrags = path.parse(openapi['x-pathGroup'] || ''); + article.fields.tags = [pathGroupFrags?.dir, pathGroupFrags?.name] + .filter(Boolean) + .map((t) => toTagPath(t)); + // Extract x-relatedLinks and OpenAPI tags from path items or operations + const relatedLinks = []; + const apiTags = []; + const httpMethods = [ + 'get', + 'post', + 'put', + 'patch', + 'delete', + 'options', + 'head', + 'trace', + ]; + Object.values(openapi.paths).forEach((pathItem) => { + // Check path-level x-relatedLinks + if (pathItem['x-relatedLinks'] && + Array.isArray(pathItem['x-relatedLinks'])) { + relatedLinks.push(...pathItem['x-relatedLinks'].filter((link) => !relatedLinks.includes(link))); + } + // Check operation-level x-relatedLinks and tags + httpMethods.forEach((method) => { + const operation = pathItem[method]; + if (operation) { + // Extract x-relatedLinks + if (operation['x-relatedLinks'] && + Array.isArray(operation['x-relatedLinks'])) { + relatedLinks.push(...operation['x-relatedLinks'].filter((link) => !relatedLinks.includes(link))); + } + // Extract OpenAPI tags from operation + if (operation.tags && Array.isArray(operation.tags)) { + operation.tags.forEach((tag) => { + if (!apiTags.includes(tag)) { + apiTags.push(tag); + } + }); + } + } + }); + }); + // Only add related if there are links + if (relatedLinks.length > 0) { + article.fields.related = relatedLinks; + } + // Add OpenAPI tags from operations (for Hugo frontmatter) + if (apiTags.length > 0) { + article.fields.apiTags = apiTags; + } + return article; +} +/** + * Write OpenAPI article metadata to Hugo data files + * Generates articles.yml and articles.json + * + * @param sourcePath - Path to directory containing OpenAPI fragment files + * @param targetPath - Output path for article data + * @param opts - Options including file pattern filter + */ +function writeOpenapiArticleData(sourcePath, targetPath, opts) { + /** + * Check if path is a file + */ + const isFile = (filePath) => { + return fs.lstatSync(filePath).isFile(); + }; + /** + * Check if filename matches pattern + */ + const matchesPattern = (filePath) => { + return opts.filePattern + ? path.parse(filePath).name.startsWith(opts.filePattern) + : true; + }; + try { + const articles = fs + .readdirSync(sourcePath) + .map((fileName) => path.join(sourcePath, fileName)) + .filter(matchesPattern) + .filter(isFile) + .filter((filePath) => filePath.endsWith('.yaml') || filePath.endsWith('.yml')) // Only process YAML files + .map((filePath) => { + const openapi = readFile(filePath); + const article = createArticleDataForPathGroup(openapi); + article.fields.source = filePath; + // Hugo omits "/static" from the URI when serving files stored in "./static" + article.fields.staticFilePath = filePath.replace(/^static\//, '/'); + return article; + }); + if (!fs.existsSync(targetPath)) { + fs.mkdirSync(targetPath, { recursive: true }); + } + const articleCollection = { articles }; + // Write both YAML and JSON versions + const yamlPath = path.resolve(targetPath, 'articles.yml'); + const jsonPath = path.resolve(targetPath, 'articles.json'); + writeDataFile(articleCollection, yamlPath); + writeJsonFile(articleCollection, jsonPath); + console.log(`Generated ${articles.length} articles in ${targetPath}`); + } + catch (e) { + console.error('Error writing article data:', e); + } +} +/** + * Sanitize markdown description by removing fragment links and ReDoc directives + * + * Handles three cases: + * 1. OpenAPI fragment links: [text](#section/...) -> text (removes the link entirely) + * 2. Relative links with fragments: [text](/path/#anchor) -> [text](/path/) (keeps link, removes fragment) + * 3. ReDoc injection directives: (removes entirely) + * + * This sanitization is necessary because fragment links don't work when article + * descriptions are rendered via the {{< children >}} shortcode on parent pages. + * + * @param description - Markdown description that may contain fragment links + * @returns Sanitized description suitable for children shortcode rendering + */ +function sanitizeDescription(description) { + if (!description) { + return ''; + } + let sanitized = description; + // Remove ReDoc injection directives (e.g., ) + sanitized = sanitized.replace(//g, ''); + // Handle markdown links: + // 1. OpenAPI fragment links (#section/..., #operation/..., #tag/...) -> replace with just the text + // 2. Relative links with fragments (/path/#anchor) -> keep link but remove fragment + sanitized = sanitized.replace(/\[([^\]]+)\]\(([^)]+)\)/g, (match, text, url) => { + // Case 1: OpenAPI fragment links (starts with #section/, #operation/, #tag/) + if (url.match(/^#(section|operation|tag)\//)) { + return text; // Just return the link text, no markdown link + } + // Case 2: Relative link with fragment (starts with /, contains #) + if (url.startsWith('/') && url.includes('#')) { + const urlWithoutFragment = url.split('#')[0]; + if (urlWithoutFragment === '/' || urlWithoutFragment === '') { + return text; + } + return `[${text}](${urlWithoutFragment})`; + } + // Case 3: Keep other links as-is (external links, non-fragment links) + return match; + }); + // Clean up extra whitespace left by directive removals + sanitized = sanitized.replace(/\n\n\n+/g, '\n\n').trim(); + return sanitized; +} +/** + * Create article data for a tag-based grouping + * + * @param openapi - OpenAPI document with x-tagGroup + * @param operations - Operations for this tag + * @param tagMeta - Tag metadata from OpenAPI spec + * @returns Article metadata object + */ +function createArticleDataForTag(openapi, operations, tagMeta) { + const tagName = openapi['x-tagGroup'] || ''; + const tagSlug = slugifyTag(tagName); + const isConceptual = tagMeta?.['x-traitTag'] === true; + const article = { + path: `api/${tagSlug}`, + fields: { + name: tagName, + describes: Object.keys(openapi.paths), + title: tagName, + description: sanitizeDescription(tagMeta?.description || + openapi.info?.description || + `API reference for ${tagName}`), + tag: tagName, + isConceptual, + menuGroup: getMenuGroupForTag(tagName), + operations: operations.map((op) => ({ + operationId: op.operationId, + method: op.method, + path: op.path, + summary: op.summary, + tags: op.tags, + ...(op.compatVersion && { compatVersion: op.compatVersion }), + ...(op.externalDocs && { externalDocs: op.externalDocs }), + })), + }, + }; + // Add tag description for conceptual pages (sanitized for children shortcode) + if (tagMeta?.description) { + article.fields.tagDescription = sanitizeDescription(tagMeta.description); + } + // Show security schemes section on Authentication pages + if (tagName === 'Authentication') { + article.fields.showSecuritySchemes = true; + } + // Set custom weight for Quick start to appear first in nav + if (tagName === 'Quick start') { + article.fields.weight = 1; + } + // Set default weight for consistent sorting (articles without explicit weight) + if (article.fields.weight === undefined) { + article.fields.weight = 100; + } + // Aggregate related links from multiple sources into article-level related + // This populates Hugo frontmatter `related` field for "Related content" links + // Supports both plain URL strings and {title, href} objects + const relatedItems = []; + const seenHrefs = new Set(); + // Helper to add a link, deduplicating by href + const addRelated = (item) => { + const href = typeof item === 'string' ? item : item.href; + if (!seenHrefs.has(href)) { + seenHrefs.add(href); + relatedItems.push(item); + } + }; + // Tag-level x-related ({title, href} objects) + if (tagMeta?.['x-related']) { + tagMeta['x-related'].forEach(addRelated); + } + // Tag-level x-influxdatadocs-related (plain URLs) + if (tagMeta?.['x-influxdatadocs-related']) { + tagMeta['x-influxdatadocs-related'].forEach(addRelated); + } + // Tag-level externalDocs (legacy single link) + if (tagMeta?.externalDocs?.url) { + addRelated(tagMeta.externalDocs.url); + } + // Operation-level related links + operations.forEach((op) => { + if (op.relatedLinks) { + op.relatedLinks.forEach(addRelated); + } + if (op.related) { + op.related.forEach(addRelated); + } + if (op.externalDocs?.url) { + addRelated(op.externalDocs.url); + } + }); + if (relatedItems.length > 0) { + article.fields.related = relatedItems; + } + return article; +} +/** + * Write tag-based OpenAPI article metadata to Hugo data files + * Generates articles.yml and articles.json + * + * @param sourcePath - Path to directory containing tag-based OpenAPI fragment files + * @param targetPath - Output path for article data + * @param openapi - Original OpenAPI document (for tag metadata) + * @param opts - Options including file pattern filter + */ +function writeOpenapiTagArticleData(sourcePath, targetPath, openapi, opts) { + const isFile = (filePath) => { + return fs.lstatSync(filePath).isFile(); + }; + const matchesPattern = (filePath) => { + return opts.filePattern + ? path.parse(filePath).name.startsWith(opts.filePattern) + : true; + }; + // Create tag metadata lookup + const tagMetaMap = new Map(); + (openapi.tags || []).forEach((tag) => { + tagMetaMap.set(tag.name, tag); + }); + try { + const articles = fs + .readdirSync(sourcePath) + .map((fileName) => path.join(sourcePath, fileName)) + .filter(matchesPattern) + .filter(isFile) + .filter((filePath) => filePath.endsWith('.yaml') || filePath.endsWith('.yml')) + .map((filePath) => { + const tagOpenapi = readFile(filePath); + const tagName = tagOpenapi['x-tagGroup'] || tagOpenapi.info?.title || ''; + const tagMeta = tagMetaMap.get(tagName); + // Extract operations from the tag-filtered spec + const operations = []; + Object.entries(tagOpenapi.paths).forEach(([pathKey, pathItem]) => { + HTTP_METHODS.forEach((method) => { + const operation = pathItem[method]; + if (operation) { + const opMeta = { + operationId: operation.operationId || `${method}-${pathKey}`, + method: method.toUpperCase(), + path: pathKey, + summary: operation.summary || '', + tags: operation.tags || [], + }; + // Extract compatibility version if present + if (operation['x-compatibility-version']) { + opMeta.compatVersion = operation['x-compatibility-version']; + } + // Extract externalDocs if present + if (operation.externalDocs) { + opMeta.externalDocs = { + description: operation.externalDocs.description || '', + url: operation.externalDocs.url, + }; + } + // Extract x-influxdatadocs-related if present + if (operation['x-influxdatadocs-related'] && + Array.isArray(operation['x-influxdatadocs-related'])) { + opMeta.related = operation['x-influxdatadocs-related']; + } + // Extract x-related (title/href objects) + if (operation['x-related'] && + Array.isArray(operation['x-related'])) { + opMeta.relatedLinks = operation['x-related']; + } + operations.push(opMeta); + } + }); + }); + const article = createArticleDataForTag(tagOpenapi, operations, tagMeta); + article.fields.source = filePath; + article.fields.staticFilePath = filePath.replace(/^static\//, '/'); + return article; + }); + if (!fs.existsSync(targetPath)) { + fs.mkdirSync(targetPath, { recursive: true }); + } + const articleCollection = { articles }; + // Write both YAML and JSON versions + const yamlPath = path.resolve(targetPath, 'articles.yml'); + const jsonPath = path.resolve(targetPath, 'articles.json'); + writeDataFile(articleCollection, yamlPath); + writeJsonFile(articleCollection, jsonPath); + console.log(`Generated ${articles.length} tag-based articles in ${targetPath}`); + } + catch (e) { + console.error('Error writing tag article data:', e); + } +} +/** + * Generate Hugo data files from an OpenAPI specification grouped by tag + * + * This function: + * 1. Reads the OpenAPI spec file + * 2. Groups operations by their OpenAPI tags + * 3. Writes each tag group to separate YAML and JSON files + * 4. Generates tag-based article metadata for Hugo + * + * @param options - Generation options + */ +function generateHugoDataByTag(options) { + const filenamePrefix = `${path.parse(options.specFile).name}-`; + const sourceFile = readFile(options.specFile, 'utf8'); + // Optionally generate path-based files for backwards compatibility + if (options.includePaths) { + console.log(`\nGenerating OpenAPI path files in ${options.dataOutPath}....`); + writePathOpenapis(sourceFile, filenamePrefix, options.dataOutPath); + } + // Generate tag-based files + const tagOutPath = options.includePaths + ? path.join(options.dataOutPath, 'tags') + : options.dataOutPath; + console.log(`\nGenerating OpenAPI tag files in ${tagOutPath}....`); + writeTagOpenapis(sourceFile, filenamePrefix, tagOutPath); + console.log(`\nGenerating OpenAPI tag article data in ${options.articleOutPath}...`); + writeOpenapiTagArticleData(tagOutPath, options.articleOutPath, sourceFile, { + filePattern: filenamePrefix, + }); + console.log('\nTag-based generation complete!\n'); +} +/** + * Generate Hugo data files from an OpenAPI specification + * + * This function: + * 1. Reads the OpenAPI spec file + * 2. Groups paths by their base path + * 3. Writes each group to separate YAML and JSON files + * 4. Generates article metadata for Hugo + * + * @param options - Generation options + */ +function generateHugoData(options) { + const filenamePrefix = `${path.parse(options.specFile).name}-`; + const sourceFile = readFile(options.specFile, 'utf8'); + console.log(`\nGenerating OpenAPI path files in ${options.dataOutPath}....`); + writePathOpenapis(sourceFile, filenamePrefix, options.dataOutPath); + console.log(`\nGenerating OpenAPI article data in ${options.articleOutPath}...`); + writeOpenapiArticleData(options.dataOutPath, options.articleOutPath, { + filePattern: filenamePrefix, + }); + console.log('\nGeneration complete!\n'); +} +/** + * Generate path-specific OpenAPI specs from a spec file + * + * Convenience wrapper that reads the spec file and generates path-specific specs. + * + * @param specFile - Path to OpenAPI spec file + * @param outPath - Output directory for path-specific specs + * @returns Map of API path to spec file web path (for use in frontmatter) + */ +function generatePathSpecificSpecs(specFile, outPath) { + const openapi = readFile(specFile, 'utf8'); + return writePathSpecificSpecs(openapi, outPath); +} +// CommonJS export for backward compatibility +module.exports = { + generateHugoData, + generateHugoDataByTag, + generatePathSpecificSpecs, + writePathSpecificSpecs, +}; +//# sourceMappingURL=index.js.map \ No newline at end of file diff --git a/api-docs/scripts/generate-openapi-articles.ts b/api-docs/scripts/generate-openapi-articles.ts new file mode 100644 index 0000000000..7b1ca6707f --- /dev/null +++ b/api-docs/scripts/generate-openapi-articles.ts @@ -0,0 +1,1145 @@ +#!/usr/bin/env node +/** + * Generate OpenAPI Articles Script + * + * Generates Hugo data files and content pages from OpenAPI specifications + * for all InfluxDB products. + * + * Products are auto-discovered by scanning api-docs/ for .config.yml files. + * Hugo paths, menu keys, and static file names are derived from directory + * structure and existing Hugo frontmatter. + * + * This script: + * 1. Discovers products from .config.yml files + * 2. Cleans output directories (unless --no-clean) + * 3. Transforms documentation links in specs + * 4. Copies specs to static directory for download + * 5. Generates tag-based data fragments (YAML and JSON) + * 6. Generates Hugo content pages from article data + * + * Usage: + * node generate-openapi-articles.js # Clean and generate all products + * node generate-openapi-articles.js influxdb3-core # Clean and generate single product + * node generate-openapi-articles.js --no-clean # Generate without cleaning + * node generate-openapi-articles.js --dry-run # Preview what would be cleaned + * node generate-openapi-articles.js --skip-fetch # Skip getswagger.sh fetch step + * node generate-openapi-articles.js --validate-links # Validate documentation links + * + * @module generate-openapi-articles + */ + +import { execSync } from 'child_process'; +import * as path from 'path'; +import * as fs from 'fs'; + +// Import the OpenAPI to Hugo converter +const openapiPathsToHugo = require('./openapi-paths-to-hugo-data/index.js'); + +// --------------------------------------------------------------------------- +// Interfaces +// --------------------------------------------------------------------------- + +/** Operation metadata structure from tag-based articles */ +interface OperationMeta { + operationId: string; + method: string; + path: string; + summary: string; + tags: string[]; + compatVersion?: string; + externalDocs?: { + description: string; + url: string; + }; +} + +/** Article data structure from articles.yml */ +interface ArticleData { + articles: Array<{ + path: string; + fields: { + name?: string; + title?: string; + description?: string; + tag?: string; + isConceptual?: boolean; + showSecuritySchemes?: boolean; + tagDescription?: string; + menuGroup?: string; + staticFilePath?: string; + operations?: OperationMeta[]; + related?: (string | { title: string; href: string })[]; + source?: string; + weight?: number; + }; + }>; +} + +/** Single API entry from .config.yml */ +interface ApiConfigEntry { + root: string; + 'x-influxdata-docs-aliases'?: string[]; +} + +/** Parsed .config.yml file */ +interface DotConfig { + 'x-influxdata-product-name'?: string; + apis?: Record; +} + +/** A resolved API section within a product */ +interface DiscoveredApi { + /** API key from .config.yml (e.g., 'v3', 'management', 'data') */ + apiKey: string; + /** Version number from the @-suffix (e.g., '3', '0', '2') */ + version: string; + /** Resolved full path to the spec file */ + specFile: string; + /** Hugo section slug: 'api' or 'management-api' */ + sectionSlug: string; +} + +/** A fully resolved product discovered from .config.yml */ +interface DiscoveredProduct { + /** Directory containing .config.yml */ + configDir: string; + /** Product directory relative to api-docs/ (e.g., 'influxdb3/core') */ + productDir: string; + /** Human-readable name from x-influxdata-product-name */ + productName: string; + /** Hugo content directory (e.g., 'content/influxdb3/core') */ + pagesDir: string; + /** Hugo menu key from cascade.product (e.g., 'influxdb3_core') */ + menuKey: string; + /** True if hand-maintained api/_index.md has its own menu entry */ + skipParentMenu: boolean; + /** Static file directory name (e.g., 'influxdb3-core') */ + staticDirName: string; + /** Resolved API sections */ + apis: DiscoveredApi[]; +} + +/** Product data from products.yml with api_path */ +interface ProductData { + name: string; + api_path?: string; + alt_link_key?: string; +} + +// --------------------------------------------------------------------------- +// Constants and CLI flags +// --------------------------------------------------------------------------- + +const DOCS_ROOT = '.'; +const API_DOCS_ROOT = 'api-docs'; + +const validateLinks = process.argv.includes('--validate-links'); +const skipFetch = process.argv.includes('--skip-fetch'); +const noClean = process.argv.includes('--no-clean'); +const dryRun = process.argv.includes('--dry-run'); + +// --------------------------------------------------------------------------- +// Utility functions +// --------------------------------------------------------------------------- + +/** + * Load products with API paths from data/products.yml. + * Returns a map of alt_link_key to API path for alt_links generation. + */ +function loadApiProducts(): Map { + const yaml = require('js-yaml'); + const productsFile = path.join(DOCS_ROOT, 'data/products.yml'); + + if (!fs.existsSync(productsFile)) { + console.warn('⚠️ products.yml not found, skipping alt_links generation'); + return new Map(); + } + + const productsContent = fs.readFileSync(productsFile, 'utf8'); + const products = yaml.load(productsContent) as Record; + const apiProducts = new Map(); + + for (const [, product] of Object.entries(products)) { + if (product.api_path && product.alt_link_key) { + apiProducts.set(product.alt_link_key, product.api_path); + } + } + + return apiProducts; +} + +const apiProductsMap = loadApiProducts(); + +/** Execute a shell command and handle errors */ +function execCommand(command: string, description?: string): void { + try { + if (description) { + console.log(`\n${description}...`); + } + console.log(`Executing: ${command}\n`); + execSync(command, { stdio: 'inherit' }); + } catch (error) { + console.error(`\n❌ Error executing command: ${command}`); + if (error instanceof Error) { + console.error(error.message); + } + process.exit(1); + } +} + +// --------------------------------------------------------------------------- +// Auto-discovery functions +// --------------------------------------------------------------------------- + +/** + * Recursively find all .config.yml files under api-docs/. + * Excludes the root api-docs/.config.yml and internal directories. + */ +function findConfigFiles(rootDir: string): string[] { + const configs: string[] = []; + const skipDirs = new Set([ + 'node_modules', + 'dist', + '_build', + 'scripts', + 'openapi', + ]); + + function scanDir(dir: string, depth: number): void { + if (depth > 5) return; + let entries: fs.Dirent[]; + try { + entries = fs.readdirSync(dir, { withFileTypes: true }); + } catch { + return; + } + for (const entry of entries) { + if (skipDirs.has(entry.name)) continue; + const fullPath = path.join(dir, entry.name); + if (entry.isDirectory()) { + scanDir(fullPath, depth + 1); + } else if (entry.name === '.config.yml' && dir !== rootDir) { + configs.push(fullPath); + } + } + } + + scanDir(rootDir, 0); + return configs.sort(); +} + +/** + * Parse an API entry key like 'v3@3' into apiKey and version. + */ +function parseApiEntry(entry: string): { apiKey: string; version: string } { + const atIdx = entry.indexOf('@'); + if (atIdx === -1) { + return { apiKey: entry, version: '0' }; + } + return { + apiKey: entry.substring(0, atIdx), + version: entry.substring(atIdx + 1), + }; +} + +/** + * Determine Hugo section slug from API key. + * 'management' → 'management-api', everything else → 'api'. + */ +function getSectionSlug(apiKey: string): string { + if (apiKey === 'management') return 'management-api'; + return 'api'; +} + +/** + * Derive a clean static directory name from a product directory path. + * Replaces path separators and underscores with hyphens. + * + * @example 'influxdb3/core' → 'influxdb3-core' + * @example 'enterprise_influxdb/v1' → 'enterprise-influxdb-v1' + */ +function deriveStaticDirName(productDir: string): string { + return productDir.replace(/[/_]/g, '-'); +} + +/** + * Read the cascade.product field from a product's _index.md frontmatter. + * This value serves as the Hugo menu key. + */ +function readMenuKey(pagesDir: string): string { + const yaml = require('js-yaml'); + const indexFile = path.join(pagesDir, '_index.md'); + + if (!fs.existsSync(indexFile)) { + console.warn(`⚠️ Product index not found: ${indexFile}`); + return ''; + } + + const content = fs.readFileSync(indexFile, 'utf8'); + const fmMatch = content.match(/^---\n([\s\S]*?)\n---/); + if (!fmMatch) return ''; + + try { + const fm = yaml.load(fmMatch[1]) as Record; + const cascade = fm.cascade as Record | undefined; + if (cascade?.product) return cascade.product; + + // Fallback: first key of the menu map + if (fm.menu && typeof fm.menu === 'object') { + const keys = Object.keys(fm.menu as Record); + if (keys.length > 0) return keys[0]; + } + } catch { + console.warn(`⚠️ Could not parse frontmatter in ${indexFile}`); + } + + return ''; +} + +/** + * Check whether a hand-maintained api/_index.md already has a menu entry. + * If so, the generator should skip adding its own parent menu entry. + */ +function hasExistingApiMenu(pagesDir: string): boolean { + const yaml = require('js-yaml'); + const apiIndex = path.join(pagesDir, 'api', '_index.md'); + + if (!fs.existsSync(apiIndex)) return false; + + const content = fs.readFileSync(apiIndex, 'utf8'); + const fmMatch = content.match(/^---\n([\s\S]*?)\n---/); + if (!fmMatch) return false; + + try { + const fm = yaml.load(fmMatch[1]) as Record; + return !!fm.menu; + } catch { + return false; + } +} + +/** + * Discover all products by scanning api-docs/ for .config.yml files. + * Derives Hugo paths from directory structure and existing frontmatter. + */ +function discoverProducts(): DiscoveredProduct[] { + const yaml = require('js-yaml'); + const products: DiscoveredProduct[] = []; + const configFiles = findConfigFiles(API_DOCS_ROOT); + + for (const configPath of configFiles) { + const configDir = path.dirname(configPath); + const productDir = path.relative(API_DOCS_ROOT, configDir); + + let config: DotConfig; + try { + const raw = fs.readFileSync(configPath, 'utf8'); + config = yaml.load(raw) as DotConfig; + } catch (err) { + console.warn(`⚠️ Could not parse ${configPath}: ${err}`); + continue; + } + + if (!config.apis || Object.keys(config.apis).length === 0) { + continue; + } + + const pagesDir = path.join(DOCS_ROOT, 'content', productDir); + const staticDirName = deriveStaticDirName(productDir); + const menuKey = readMenuKey(pagesDir); + const skipParentMenu = hasExistingApiMenu(pagesDir); + + // Parse API entries, skipping compatibility specs + const apis: DiscoveredApi[] = []; + for (const [entryKey, entry] of Object.entries(config.apis)) { + const { apiKey, version } = parseApiEntry(entryKey); + + // Skip v1-compatibility entries (being removed in pipeline restructure) + if (apiKey.includes('compatibility')) continue; + + const specFile = path.join(configDir, entry.root); + const sectionSlug = getSectionSlug(apiKey); + + apis.push({ apiKey, version, specFile, sectionSlug }); + } + + if (apis.length === 0) continue; + + products.push({ + configDir, + productDir, + productName: config['x-influxdata-product-name'] || productDir, + pagesDir, + menuKey, + skipParentMenu, + staticDirName, + apis, + }); + } + + return products; +} + +// --------------------------------------------------------------------------- +// Cleanup functions +// --------------------------------------------------------------------------- + +/** + * Get all paths that would be cleaned for a product. + * + * @param product - The product to clean + * @param allStaticDirNames - Names of all products (to avoid prefix collisions) + */ +function getCleanupPaths( + product: DiscoveredProduct, + allStaticDirNames: string[] +): { + directories: string[]; + files: string[]; +} { + const staticPath = path.join(DOCS_ROOT, 'static/openapi'); + const directories: string[] = []; + const files: string[] = []; + + // Tag specs directory: static/openapi/{staticDirName}/ + const tagSpecsDir = path.join(staticPath, product.staticDirName); + if (fs.existsSync(tagSpecsDir)) { + directories.push(tagSpecsDir); + } + + // Article data directory: data/article_data/influxdb/{staticDirName}/ + const articleDataDir = path.join( + DOCS_ROOT, + `data/article_data/influxdb/${product.staticDirName}` + ); + if (fs.existsSync(articleDataDir)) { + directories.push(articleDataDir); + } + + // Content pages: content/{pagesDir}/{sectionSlug}/ for each API + for (const api of product.apis) { + const contentDir = path.join(product.pagesDir, api.sectionSlug); + if (fs.existsSync(contentDir)) { + directories.push(contentDir); + } + } + + // Root spec files: static/openapi/{staticDirName}*.yml and .json + // Avoid matching files that belong to products with longer names + // (e.g., 'influxdb-cloud' should not match 'influxdb-cloud-dedicated-*.yml') + const longerPrefixes = allStaticDirNames.filter( + (n) => + n !== product.staticDirName && + n.startsWith(product.staticDirName + '-') + ); + + if (fs.existsSync(staticPath)) { + const staticFiles = fs.readdirSync(staticPath); + staticFiles + .filter((f) => { + if (!f.startsWith(product.staticDirName)) return false; + // Exclude files belonging to a longer-named product + for (const longer of longerPrefixes) { + if (f.startsWith(longer)) return false; + } + return f.endsWith('.yml') || f.endsWith('.json'); + }) + .forEach((f) => { + files.push(path.join(staticPath, f)); + }); + } + + return { directories, files }; +} + +/** Clean output directories for a product before regeneration. */ +function cleanProductOutputs( + product: DiscoveredProduct, + allStaticDirNames: string[] +): void { + const { directories, files } = getCleanupPaths(product, allStaticDirNames); + + for (const dir of directories) { + console.log(`🧹 Removing directory: ${dir}`); + fs.rmSync(dir, { recursive: true, force: true }); + } + + for (const file of files) { + console.log(`🧹 Removing file: ${file}`); + fs.unlinkSync(file); + } + + const total = directories.length + files.length; + if (total > 0) { + console.log( + `✓ Cleaned ${directories.length} directories, ${files.length} files for ${product.staticDirName}` + ); + } +} + +/** Display dry-run preview of what would be cleaned. */ +function showDryRunPreview( + product: DiscoveredProduct, + allStaticDirNames: string[] +): void { + const { directories, files } = getCleanupPaths(product, allStaticDirNames); + + console.log( + `\nDRY RUN: Would clean the following for ${product.staticDirName}:\n` + ); + + if (directories.length > 0) { + console.log('Directories to remove:'); + directories.forEach((dir) => console.log(` - ${dir}`)); + } + + if (files.length > 0) { + console.log('\nFiles to remove:'); + files.forEach((file) => console.log(` - ${file}`)); + } + + if (directories.length === 0 && files.length === 0) { + console.log(' (no files to clean)'); + } + + console.log( + `\nSummary: ${directories.length} directories, ${files.length} files would be removed` + ); +} + +// --------------------------------------------------------------------------- +// Link transformation +// --------------------------------------------------------------------------- + +/** Fields that can contain markdown with links */ +const MARKDOWN_FIELDS = new Set(['description', 'summary']); + +/** Link placeholder pattern */ +const LINK_PATTERN = /\/influxdb\/version\//g; + +/** + * Transform documentation links in OpenAPI spec markdown fields. + * Replaces `/influxdb/version/` with the actual product path. + */ +function transformDocLinks( + spec: Record, + productPath: string +): Record { + function transformValue(value: unknown): unknown { + if (typeof value === 'string') { + return value.replace(LINK_PATTERN, `${productPath}/`); + } + if (Array.isArray(value)) { + return value.map(transformValue); + } + if (value !== null && typeof value === 'object') { + return transformObject(value as Record); + } + return value; + } + + function transformObject( + obj: Record + ): Record { + const result: Record = {}; + for (const [key, value] of Object.entries(obj)) { + if (MARKDOWN_FIELDS.has(key) && typeof value === 'string') { + result[key] = value.replace(LINK_PATTERN, `${productPath}/`); + } else if (value !== null && typeof value === 'object') { + result[key] = transformValue(value); + } else { + result[key] = value; + } + } + return result; + } + + return transformObject(spec); +} + +/** + * Resolve a URL path to a content file path. + * + * @example '/influxdb3/core/api/auth/' → 'content/influxdb3/core/api/auth/_index.md' + */ +function resolveContentPath(urlPath: string, contentDir: string): string { + const normalized = urlPath.replace(/\/$/, ''); + const indexPath = path.join(contentDir, normalized, '_index.md'); + const directPath = path.join(contentDir, normalized + '.md'); + + if (fs.existsSync(indexPath)) return indexPath; + if (fs.existsSync(directPath)) return directPath; + return indexPath; +} + +/** + * Validate that transformed links point to existing content. + */ +function validateDocLinks( + spec: Record, + contentDir: string +): string[] { + const errors: string[] = []; + const linkPattern = /\[([^\]]+)\]\(([^)]+)\)/g; + + function extractLinks(value: unknown, jsonPath: string): void { + if (typeof value === 'string') { + let match; + while ((match = linkPattern.exec(value)) !== null) { + const [, linkText, linkUrl] = match; + if (linkUrl.startsWith('/') && !linkUrl.startsWith('//')) { + const contentPath = resolveContentPath(linkUrl, contentDir); + if (!fs.existsSync(contentPath)) { + errors.push( + `Broken link at ${jsonPath}: [${linkText}](${linkUrl})` + ); + } + } + } + linkPattern.lastIndex = 0; + } else if (Array.isArray(value)) { + value.forEach((item, index) => + extractLinks(item, `${jsonPath}[${index}]`) + ); + } else if (value !== null && typeof value === 'object') { + for (const [key, val] of Object.entries( + value as Record + )) { + extractLinks(val, `${jsonPath}.${key}`); + } + } + } + + extractLinks(spec, 'spec'); + return errors; +} + +// --------------------------------------------------------------------------- +// Page generation +// --------------------------------------------------------------------------- + +/** + * Options for generating tag-based pages from article data + */ +interface GenerateTagPagesOptions { + articlesPath: string; + contentPath: string; + sectionSlug: string; + menuKey?: string; + menuParent?: string; + productDescription?: string; + skipParentMenu?: boolean; + specDownloadPath: string; + articleDataKey: string; + articleSection: string; + pathSpecFiles?: Map; +} + +/** + * Generate Hugo content pages from tag-based article data. + * + * Creates markdown files with frontmatter from article metadata. + * Each article becomes a page with type: api that renders via Hugo-native + * templates. Includes operation metadata for TOC generation. + */ +function generateTagPagesFromArticleData( + options: GenerateTagPagesOptions +): void { + const { + articlesPath, + contentPath, + sectionSlug, + menuKey, + menuParent, + productDescription, + skipParentMenu, + specDownloadPath, + articleDataKey, + articleSection, + } = options; + const yaml = require('js-yaml'); + const articlesFile = path.join(articlesPath, 'articles.yml'); + + if (!fs.existsSync(articlesFile)) { + console.warn(`⚠️ Articles file not found: ${articlesFile}`); + return; + } + + const articlesContent = fs.readFileSync(articlesFile, 'utf8'); + const data = yaml.load(articlesContent) as ArticleData; + + if (!data.articles || !Array.isArray(data.articles)) { + console.warn(`⚠️ No articles found in ${articlesFile}`); + return; + } + + if (!fs.existsSync(contentPath)) { + fs.mkdirSync(contentPath, { recursive: true }); + } + + // Generate parent _index.md for the section + const sectionDir = path.join(contentPath, sectionSlug); + const parentIndexFile = path.join(sectionDir, '_index.md'); + + if (!fs.existsSync(sectionDir)) { + fs.mkdirSync(sectionDir, { recursive: true }); + } + + if (!fs.existsSync(parentIndexFile)) { + const apiDescription = + productDescription || + `Use the InfluxDB HTTP API to write data, query data, and manage databases, tables, and tokens.`; + + const parentFrontmatter: Record = { + title: menuParent || 'InfluxDB HTTP API', + description: apiDescription, + weight: 104, + type: 'api', + articleDataKey, + articleSection, + }; + + if (menuKey && !skipParentMenu) { + parentFrontmatter.menu = { + [menuKey]: { + name: menuParent || 'InfluxDB HTTP API', + parent: 'Reference', + }, + }; + } + + if (apiProductsMap.size > 0) { + const altLinks: Record = {}; + apiProductsMap.forEach((apiPath, productName) => { + altLinks[productName] = apiPath; + }); + parentFrontmatter.alt_links = altLinks; + } + + const introText = apiDescription.replace( + 'InfluxDB', + '{{% product-name %}}' + ); + const parentContent = `--- +${yaml.dump(parentFrontmatter)}--- + +${introText} + +{{< children >}} +`; + + fs.writeFileSync(parentIndexFile, parentContent); + console.log(`✓ Generated parent index at ${parentIndexFile}`); + } + + // Generate "All endpoints" page + const allEndpointsDir = path.join(sectionDir, 'all-endpoints'); + const allEndpointsFile = path.join(allEndpointsDir, '_index.md'); + + if (!fs.existsSync(allEndpointsDir)) { + fs.mkdirSync(allEndpointsDir, { recursive: true }); + } + + const allEndpointsFrontmatter: Record = { + title: 'All endpoints', + description: `View all API endpoints sorted by path.`, + type: 'api', + layout: 'all-endpoints', + weight: 999, + isAllEndpoints: true, + articleDataKey, + articleSection, + }; + + if (menuKey) { + allEndpointsFrontmatter.menu = { + [menuKey]: { + name: 'All endpoints', + parent: menuParent || 'InfluxDB HTTP API', + }, + }; + } + + if (apiProductsMap.size > 0) { + const altLinks: Record = {}; + apiProductsMap.forEach((apiPath, productName) => { + altLinks[productName] = apiPath; + }); + allEndpointsFrontmatter.alt_links = altLinks; + } + + const allEndpointsContent = `--- +${yaml.dump(allEndpointsFrontmatter)}--- + +All {{% product-name %}} API endpoints, sorted by path. +`; + + fs.writeFileSync(allEndpointsFile, allEndpointsContent); + console.log(`✓ Generated all-endpoints page at ${allEndpointsFile}`); + + // Generate a page for each article (tag) + for (const article of data.articles) { + const pagePath = path.join(contentPath, article.path); + const pageFile = path.join(pagePath, '_index.md'); + + if (!fs.existsSync(pagePath)) { + fs.mkdirSync(pagePath, { recursive: true }); + } + + const title = article.fields.title || article.fields.name || article.path; + const isConceptual = article.fields.isConceptual === true; + const weight = article.fields.weight ?? 100; + + const frontmatter: Record = { + title, + description: article.fields.description || `API reference for ${title}`, + type: 'api', + layout: isConceptual ? 'single' : 'list', + staticFilePath: article.fields.staticFilePath, + weight, + tag: article.fields.tag, + isConceptual, + menuGroup: article.fields.menuGroup, + specDownloadPath, + articleDataKey, + articleSection, + }; + + if ( + !isConceptual && + article.fields.operations && + article.fields.operations.length > 0 + ) { + frontmatter.operations = article.fields.operations; + } + + if (isConceptual && article.fields.tagDescription) { + frontmatter.tagDescription = article.fields.tagDescription; + } + + if (article.fields.showSecuritySchemes) { + frontmatter.showSecuritySchemes = true; + } + + // Add related links if present + if ( + article.fields.related && + Array.isArray(article.fields.related) && + article.fields.related.length > 0 + ) { + frontmatter.related = article.fields.related; + } + + // Add client library related link for InfluxDB 3 products + if (contentPath.includes('influxdb3/') && !isConceptual) { + const influxdb3Match = contentPath.match(/influxdb3\/([^/]+)/); + if (influxdb3Match) { + const productSegment = influxdb3Match[1]; + const clientLibLink = { + title: 'InfluxDB 3 API client libraries', + href: `/influxdb3/${productSegment}/reference/client-libraries/v3/`, + }; + const existing = + (frontmatter.related as Array<{ title: string; href: string }>) || []; + const alreadyHas = existing.some( + (r) => typeof r === 'object' && r.href === clientLibLink.href + ); + if (!alreadyHas) { + frontmatter.related = [...existing, clientLibLink]; + } + } + } + + if (apiProductsMap.size > 0) { + const altLinks: Record = {}; + apiProductsMap.forEach((apiPath, productName) => { + altLinks[productName] = apiPath; + }); + frontmatter.alt_links = altLinks; + } + + const pageContent = `--- +${yaml.dump(frontmatter)}--- +`; + + fs.writeFileSync(pageFile, pageContent); + } + + console.log( + `✓ Generated ${data.articles.length} tag-based content pages in ${contentPath}` + ); +} + +// --------------------------------------------------------------------------- +// Spec processing +// --------------------------------------------------------------------------- + +/** + * Process a single API section: transform links, write static spec, + * generate tag data, and create Hugo content pages. + */ +function processApiSection( + product: DiscoveredProduct, + api: DiscoveredApi, + staticBasePath: string +): void { + const yaml = require('js-yaml'); + const isDualApi = product.apis.length > 1; + + console.log(`\n📄 Processing ${api.sectionSlug} section (${api.apiKey})`); + + // --- 1. Determine paths --- + + // Root spec download: single → {dir}.yml, dual → {dir}-{section}.yml + const specSuffix = isDualApi ? `-${api.sectionSlug}` : ''; + const staticSpecPath = path.join( + staticBasePath, + `${product.staticDirName}${specSuffix}.yml` + ); + const staticJsonSpecPath = staticSpecPath.replace('.yml', '.json'); + + // Tag specs directory + const tagSpecsBase = isDualApi + ? path.join(staticBasePath, product.staticDirName, api.sectionSlug) + : path.join(staticBasePath, product.staticDirName); + + // Article data + const articlesPath = path.join( + DOCS_ROOT, + 'data/article_data/influxdb', + product.staticDirName, + api.sectionSlug + ); + + // Download path for frontmatter + const specDownloadPath = `/openapi/${product.staticDirName}${specSuffix}.yml`; + + // Path spec files for per-operation rendering + const pathSpecsDir = isDualApi + ? path.join( + staticBasePath, + product.staticDirName, + api.sectionSlug, + 'paths' + ) + : path.join(staticBasePath, product.staticDirName, 'paths'); + + // --- 2. Read and transform spec --- + + if (!fs.existsSync(api.specFile)) { + console.warn(`⚠️ Spec file not found: ${api.specFile}`); + return; + } + + const specContent = fs.readFileSync(api.specFile, 'utf8'); + const specObject = yaml.load(specContent) as Record; + + const productPath = `/${product.productDir}`; + const transformedSpec = transformDocLinks(specObject, productPath); + console.log( + `✓ Transformed documentation links for ${api.apiKey} to ${productPath}` + ); + + // Validate links if enabled + if (validateLinks) { + const contentDir = path.join(DOCS_ROOT, 'content'); + const linkErrors = validateDocLinks(transformedSpec, contentDir); + if (linkErrors.length > 0) { + console.warn(`\n⚠️ Link validation warnings for ${api.specFile}:`); + linkErrors.forEach((err) => console.warn(` ${err}`)); + } + } + + // --- 3. Write transformed spec to static folder --- + + if (!fs.existsSync(staticBasePath)) { + fs.mkdirSync(staticBasePath, { recursive: true }); + } + + fs.writeFileSync(staticSpecPath, yaml.dump(transformedSpec)); + console.log(`✓ Wrote transformed spec to ${staticSpecPath}`); + + fs.writeFileSync( + staticJsonSpecPath, + JSON.stringify(transformedSpec, null, 2) + ); + console.log(`✓ Generated JSON spec at ${staticJsonSpecPath}`); + + // --- 4. Generate tag-based data --- + + console.log( + `\n📋 Generating tag-based data for ${api.apiKey} in ${tagSpecsBase}...` + ); + openapiPathsToHugo.generateHugoDataByTag({ + specFile: staticSpecPath, + dataOutPath: tagSpecsBase, + articleOutPath: articlesPath, + includePaths: true, + }); + + // Generate path-specific specs + openapiPathsToHugo.generatePathSpecificSpecs(staticSpecPath, pathSpecsDir); + + // --- 5. Generate Hugo content pages --- + + generateTagPagesFromArticleData({ + articlesPath, + contentPath: product.pagesDir, + sectionSlug: api.sectionSlug, + menuKey: product.menuKey, + menuParent: 'InfluxDB HTTP API', + skipParentMenu: product.skipParentMenu, + specDownloadPath, + articleDataKey: product.staticDirName, + articleSection: api.sectionSlug, + }); +} + +/** + * Process a single product: clean outputs and process each API section. + */ +function processProduct( + product: DiscoveredProduct, + allStaticDirNames: string[] +): void { + console.log('\n' + '='.repeat(80)); + console.log(`Processing ${product.productName}`); + console.log('='.repeat(80)); + + // Clean output directories before regeneration + if (!noClean && !dryRun) { + cleanProductOutputs(product, allStaticDirNames); + } + + const staticBasePath = path.join(DOCS_ROOT, 'static/openapi'); + + // Fetch specs if needed + if (!skipFetch) { + const getswaggerScript = path.join(API_DOCS_ROOT, 'getswagger.sh'); + if (fs.existsSync(getswaggerScript)) { + // The build function in generate-api-docs.sh handles per-product + // fetching. When called standalone, use product directory name. + execCommand( + `cd ${API_DOCS_ROOT} && ./getswagger.sh ${product.productDir} -B`, + `Fetching OpenAPI spec for ${product.productName}` + ); + } else { + console.log(`⚠️ getswagger.sh not found, skipping fetch step`); + } + } else { + console.log(`⏭️ Skipping getswagger.sh (--skip-fetch flag set)`); + } + + // Process each API section independently + for (const api of product.apis) { + processApiSection(product, api, staticBasePath); + } + + console.log( + `\n✅ Successfully processed ${product.productName}\n` + ); +} + +// --------------------------------------------------------------------------- +// Main +// --------------------------------------------------------------------------- + +function main(): void { + const args = process.argv.slice(2).filter((arg) => !arg.startsWith('--')); + + // Discover all products from .config.yml files + const allProducts = discoverProducts(); + + if (allProducts.length === 0) { + console.error( + '❌ No products discovered. Ensure .config.yml files exist under api-docs/.' + ); + process.exit(1); + } + + // Determine which products to process + let productsToProcess: DiscoveredProduct[]; + + if (args.length === 0) { + productsToProcess = allProducts; + console.log( + `\n📋 Discovered ${allProducts.length} products, processing all...\n` + ); + } else { + // Match by staticDirName or productDir + productsToProcess = []; + const invalid: string[] = []; + + for (const arg of args) { + const found = allProducts.find( + (p) => + p.staticDirName === arg || + p.productDir === arg || + p.productDir.replace(/\//g, '-') === arg + ); + if (found) { + productsToProcess.push(found); + } else { + invalid.push(arg); + } + } + + if (invalid.length > 0) { + console.error( + `\n❌ Unknown product identifier(s): ${invalid.join(', ')}` + ); + console.error('\nDiscovered products:'); + allProducts.forEach((p) => { + console.error( + ` - ${p.staticDirName} (${p.productName}) [${p.productDir}]` + ); + }); + process.exit(1); + } + + console.log( + `\n📋 Processing specified products: ${productsToProcess.map((p) => p.staticDirName).join(', ')}\n` + ); + } + + // Collect all staticDirNames for prefix-safe cleanup + const allStaticDirNames = allProducts.map((p) => p.staticDirName); + + // Handle dry-run mode + if (dryRun) { + console.log('\n📋 DRY RUN MODE - No files will be modified\n'); + productsToProcess.forEach((p) => showDryRunPreview(p, allStaticDirNames)); + console.log('\nDry run complete. No files were modified.'); + return; + } + + // Process each product + productsToProcess.forEach((product) => { + processProduct(product, allStaticDirNames); + }); + + console.log('\n' + '='.repeat(80)); + console.log('✅ All products processed successfully!'); + console.log('='.repeat(80) + '\n'); +} + +// Execute if run directly +if (require.main === module) { + main(); +} + +// Export for use as a module +export { + discoverProducts, + processProduct, + processApiSection, + transformDocLinks, + validateDocLinks, + resolveContentPath, + deriveStaticDirName, + getSectionSlug, + parseApiEntry, + readMenuKey, + MARKDOWN_FIELDS, + LINK_PATTERN, +}; diff --git a/api-docs/scripts/openapi-paths-to-hugo-data/index.ts b/api-docs/scripts/openapi-paths-to-hugo-data/index.ts new file mode 100644 index 0000000000..185b1e508c --- /dev/null +++ b/api-docs/scripts/openapi-paths-to-hugo-data/index.ts @@ -0,0 +1,1415 @@ +/** + * OpenAPI to Hugo Data Converter + * + * Converts OpenAPI v3 specifications into Hugo-compatible data files. + * Generates both YAML and JSON versions of spec fragments grouped by path. + * + * @module openapi-paths-to-hugo-data + */ + +import * as yaml from 'js-yaml'; +import * as fs from 'fs'; +import * as path from 'path'; + +/** + * Related link with title and href (from x-influxdata-related) + */ +interface RelatedLink { + title: string; + href: string; +} + +/** + * OpenAPI path item object + */ +interface PathItem { + get?: Operation; + post?: Operation; + put?: Operation; + patch?: Operation; + delete?: Operation; + options?: Operation; + head?: Operation; + trace?: Operation; + parameters?: Parameter[]; + [key: string]: unknown; +} + +/** + * OpenAPI operation object + */ +interface Operation { + operationId?: string; + summary?: string; + description?: string; + tags?: string[]; + parameters?: Parameter[]; + requestBody?: RequestBody; + responses?: Record; + externalDocs?: ExternalDocs; + /** Compatibility version for migration context (v1 or v2) */ + 'x-compatibility-version'?: string; + /** Related documentation links as plain URLs */ + 'x-influxdatadocs-related'?: string[]; + /** Related documentation links with title and href */ + 'x-related'?: RelatedLink[]; + [key: string]: unknown; +} + +/** + * OpenAPI parameter object + */ +interface Parameter { + name: string; + in: 'query' | 'header' | 'path' | 'cookie'; + description?: string; + required?: boolean; + schema?: Schema; + [key: string]: unknown; +} + +/** + * OpenAPI request body object + */ +interface RequestBody { + description?: string; + content?: Record; + required?: boolean; + [key: string]: unknown; +} + +/** + * OpenAPI response object + */ +interface Response { + description: string; + content?: Record; + headers?: Record; + [key: string]: unknown; +} + +/** + * OpenAPI media type object + */ +interface MediaType { + schema?: Schema; + example?: unknown; + examples?: Record; + [key: string]: unknown; +} + +/** + * OpenAPI schema object + */ +interface Schema { + type?: string; + format?: string; + description?: string; + properties?: Record; + items?: Schema; + required?: string[]; + [key: string]: unknown; +} + +/** + * OpenAPI header object + */ +interface Header { + description?: string; + schema?: Schema; + [key: string]: unknown; +} + +/** + * OpenAPI example object + */ +interface Example { + summary?: string; + description?: string; + value?: unknown; + [key: string]: unknown; +} + +/** + * OpenAPI document structure + */ +interface OpenAPIDocument { + openapi: string; + info: Info; + paths: Record; + components?: Components; + servers?: Server[]; + tags?: Tag[]; + description?: string; + 'x-pathGroup'?: string; + [key: string]: unknown; +} + +/** + * OpenAPI info object + */ +interface Info { + title: string; + version: string; + description?: string; + termsOfService?: string; + contact?: Contact; + license?: License; + [key: string]: unknown; +} + +/** + * OpenAPI contact object + */ +interface Contact { + name?: string; + url?: string; + email?: string; + [key: string]: unknown; +} + +/** + * OpenAPI license object + */ +interface License { + name: string; + url?: string; + [key: string]: unknown; +} + +/** + * OpenAPI components object + */ +interface Components { + schemas?: Record; + responses?: Record; + parameters?: Record; + requestBodies?: Record; + headers?: Record; + securitySchemes?: Record; + [key: string]: unknown; +} + +/** + * OpenAPI security scheme object + */ +interface SecurityScheme { + type: string; + description?: string; + [key: string]: unknown; +} + +/** + * OpenAPI server object + */ +interface Server { + url: string; + description?: string; + variables?: Record; + [key: string]: unknown; +} + +/** + * OpenAPI server variable object + */ +interface ServerVariable { + default: string; + enum?: string[]; + description?: string; + [key: string]: unknown; +} + +/** + * OpenAPI tag object + */ +interface Tag { + name: string; + description?: string; + externalDocs?: ExternalDocs; + /** Indicates this is a conceptual/supplementary tag (no operations) */ + 'x-traitTag'?: boolean; + /** Related documentation links as plain URLs */ + 'x-influxdatadocs-related'?: string[]; + /** Related documentation links with title and href */ + 'x-related'?: RelatedLink[]; + [key: string]: unknown; +} + +/** + * Operation metadata for TOC generation + */ +interface OperationMeta { + operationId: string; + method: string; + path: string; + summary: string; + tags: string[]; + /** Compatibility version (v1 or v2) for migration context */ + compatVersion?: string; + /** External documentation link */ + externalDocs?: { + description: string; + url: string; + }; + /** Related documentation links (plain URLs) */ + related?: string[]; + /** Related documentation links with title and href */ + relatedLinks?: RelatedLink[]; +} + +/** + * OpenAPI external docs object + */ +interface ExternalDocs { + url: string; + description?: string; + [key: string]: unknown; +} + +/** + * Article metadata for Hugo + */ +interface Article { + path: string; + fields: { + name: string; + describes: string[]; + title?: string; + description?: string; + tags?: string[]; + source?: string; + staticFilePath?: string; + /** Related documentation links (plain URLs or {title, href} objects) */ + related?: (string | RelatedLink)[]; + /** OpenAPI tags from operations (for Hugo frontmatter) */ + apiTags?: string[]; + /** Menu display name (actual endpoint path, different from Hugo path) */ + menuName?: string; + /** OpenAPI tag name (for tag-based articles) */ + tag?: string; + /** Whether this is a conceptual tag (x-traitTag) */ + isConceptual?: boolean; + /** Whether to show security schemes section */ + showSecuritySchemes?: boolean; + /** Tag description from OpenAPI spec */ + tagDescription?: string; + /** Sidebar navigation group */ + menuGroup?: string; + /** Operations metadata for TOC generation */ + operations?: OperationMeta[]; + /** Page weight for ordering in navigation */ + weight?: number; + }; +} + +/** + * Article collection for Hugo data files + */ +interface ArticleCollection { + articles: Article[]; +} + +/** + * Options for generating Hugo data + */ +export interface GenerateHugoDataOptions { + /** Path to the OpenAPI spec file */ + specFile: string; + /** Output path for generated OpenAPI path fragments */ + dataOutPath: string; + /** Output path for article metadata */ + articleOutPath: string; +} + +/** + * Options for writing OpenAPI article data + */ +interface WriteOpenapiArticleDataOptions { + /** File pattern to match when filtering files */ + filePattern?: string; +} + +/** + * Read a YAML file and parse it + * + * @param filepath - Path to the YAML file + * @param encoding - File encoding (default: 'utf8') + * @returns Parsed YAML content + */ +function readFile( + filepath: string, + encoding: BufferEncoding = 'utf8' +): OpenAPIDocument { + const content = fs.readFileSync(filepath, encoding); + return yaml.load(content) as OpenAPIDocument; +} + +/** + * Write data to a YAML file + * + * @param data - Data to write + * @param outputTo - Output file path + */ +function writeDataFile(data: unknown, outputTo: string): void { + fs.writeFileSync(outputTo, yaml.dump(data)); +} + +/** + * Write data to a JSON file + * + * @param data - Data to write + * @param outputTo - Output file path + */ +function writeJsonFile(data: unknown, outputTo: string): void { + fs.writeFileSync(outputTo, JSON.stringify(data, null, 2)); +} + +/** + * OpenAPI utility functions + */ +const openapiUtils = { + /** + * Check if a path fragment is a placeholder (e.g., {id}) + * + * @param str - Path fragment to check + * @returns True if the fragment is a placeholder + */ + isPlaceholderFragment(str: string): boolean { + const placeholderRegex = /^\{.*\}$/; + return placeholderRegex.test(str); + }, +}; + +/** + * Convert tag name to URL-friendly slug + * + * @param tagName - Tag name (e.g., "Write data", "Processing engine") + * @returns URL-friendly slug (e.g., "write-data", "processing-engine") + */ +function slugifyTag(tagName: string): string { + return tagName + .toLowerCase() + .replace(/[^a-z0-9]+/g, '-') + .replace(/^-|-$/g, ''); +} + +/** + * Menu group mappings for tag-based navigation + * Maps OpenAPI tags to sidebar groups + */ +const TAG_MENU_GROUPS: Record = { + // Concepts group + 'Quick start': 'Concepts', + Authentication: 'Concepts', + 'Headers and parameters': 'Concepts', + 'Response codes': 'Concepts', + // Data Operations group + 'Write data': 'Data Operations', + 'Query data': 'Data Operations', + 'Cache data': 'Data Operations', + // Administration group + Database: 'Administration', + Table: 'Administration', + Token: 'Administration', + // Processing Engine group + 'Processing engine': 'Processing Engine', + // Server group + 'Server information': 'Server', + // Compatibility group + 'Compatibility endpoints': 'Compatibility', +}; + +/** + * Get menu group for a tag + * + * @param tagName - Tag name + * @returns Menu group name or 'Other' if not mapped + */ +function getMenuGroupForTag(tagName: string): string { + return TAG_MENU_GROUPS[tagName] || 'Other'; +} + +/** + * HTTP methods to check for operations + */ +const HTTP_METHODS = [ + 'get', + 'post', + 'put', + 'patch', + 'delete', + 'options', + 'head', + 'trace', +] as const; + +/** + * Extract all operations from an OpenAPI document grouped by tag + * + * @param openapi - OpenAPI document + * @returns Map of tag name to operations with that tag + */ +function extractOperationsByTag( + openapi: OpenAPIDocument +): Map { + const tagOperations = new Map(); + + Object.entries(openapi.paths).forEach(([pathKey, pathItem]) => { + HTTP_METHODS.forEach((method) => { + const operation = pathItem[method] as Operation | undefined; + if (operation) { + const opMeta: OperationMeta = { + operationId: operation.operationId || `${method}-${pathKey}`, + method: method.toUpperCase(), + path: pathKey, + summary: operation.summary || '', + tags: operation.tags || [], + }; + + // Extract compatibility version if present + if (operation['x-compatibility-version']) { + opMeta.compatVersion = operation['x-compatibility-version']; + } + + // Extract externalDocs if present + if (operation.externalDocs) { + opMeta.externalDocs = { + description: operation.externalDocs.description || '', + url: operation.externalDocs.url, + }; + } + + // Extract x-influxdatadocs-related if present + if ( + operation['x-influxdatadocs-related'] && + Array.isArray(operation['x-influxdatadocs-related']) + ) { + opMeta.related = operation['x-influxdatadocs-related']; + } + + // Extract x-related (title/href objects) if present + if (operation['x-related'] && Array.isArray(operation['x-related'])) { + opMeta.relatedLinks = operation['x-related'] as RelatedLink[]; + } + + // Add operation to each of its tags + (operation.tags || []).forEach((tag) => { + if (!tagOperations.has(tag)) { + tagOperations.set(tag, []); + } + tagOperations.get(tag)!.push(opMeta); + }); + } + }); + }); + + return tagOperations; +} + +/** + * Write OpenAPI specs grouped by tag to separate files + * Generates both YAML and JSON versions per tag + * + * @param openapi - OpenAPI document + * @param prefix - Filename prefix for output files + * @param outPath - Output directory path + */ +function writeTagOpenapis( + openapi: OpenAPIDocument, + prefix: string, + outPath: string +): void { + const tagOperations = extractOperationsByTag(openapi); + + // Process each tag + tagOperations.forEach((operations, tagName) => { + // Deep copy openapi + const doc: OpenAPIDocument = JSON.parse(JSON.stringify(openapi)); + + // Filter paths to only include those with operations for this tag + const filteredPaths: Record = {}; + Object.entries(openapi.paths).forEach(([pathKey, pathItem]) => { + const filteredPathItem: PathItem = {}; + let hasOperations = false; + + HTTP_METHODS.forEach((method) => { + const operation = pathItem[method] as Operation | undefined; + if (operation?.tags?.includes(tagName)) { + // Clone the operation and restrict tags to only this tag + // This prevents the operation from being rendered multiple times + // (once per tag) when an operation belongs to multiple tags + const filteredOperation = { ...operation, tags: [tagName] }; + filteredPathItem[method] = filteredOperation; + hasOperations = true; + } + }); + + // Include path-level parameters if we have operations + if (hasOperations) { + if (pathItem.parameters) { + filteredPathItem.parameters = pathItem.parameters; + } + filteredPaths[pathKey] = filteredPathItem; + } + }); + + doc.paths = filteredPaths; + + // Filter tags to only include this tag (and trait tags for context) + if (doc.tags) { + doc.tags = doc.tags.filter( + (tag) => tag.name === tagName || tag['x-traitTag'] + ); + } + + // Update info + const tagSlug = slugifyTag(tagName); + doc.info.title = tagName; + doc.info.description = `API reference for ${tagName}`; + doc['x-tagGroup'] = tagName; + + try { + if (!fs.existsSync(outPath)) { + fs.mkdirSync(outPath, { recursive: true }); + } + + const baseFilename = `${prefix}${tagSlug}`; + const yamlPath = path.resolve(outPath, `${baseFilename}.yaml`); + const jsonPath = path.resolve(outPath, `${baseFilename}.json`); + + writeDataFile(doc, yamlPath); + writeJsonFile(doc, jsonPath); + + console.log( + `Generated tag spec: ${baseFilename}.yaml (${Object.keys(filteredPaths).length} paths, ${operations.length} operations)` + ); + } catch (err) { + console.error(`Error writing tag group ${tagName}:`, err); + } + }); + + // Also create specs for conceptual tags (x-traitTag) without operations + (openapi.tags || []).forEach((tag) => { + if (tag['x-traitTag'] && !tagOperations.has(tag.name)) { + const doc: OpenAPIDocument = JSON.parse(JSON.stringify(openapi)); + doc.paths = {}; + doc.tags = [tag]; + doc.info.title = tag.name; + doc.info.description = tag.description || `API reference for ${tag.name}`; + doc['x-tagGroup'] = tag.name; + + const tagSlug = slugifyTag(tag.name); + + try { + const baseFilename = `${prefix}${tagSlug}`; + const yamlPath = path.resolve(outPath, `${baseFilename}.yaml`); + const jsonPath = path.resolve(outPath, `${baseFilename}.json`); + + writeDataFile(doc, yamlPath); + writeJsonFile(doc, jsonPath); + + console.log(`Generated conceptual tag spec: ${baseFilename}.yaml`); + } catch (err) { + console.error(`Error writing conceptual tag ${tag.name}:`, err); + } + } + }); +} + +/** + * Convert API path to filename-safe slug + * + * @param apiPath - API path (e.g., "/api/v3/configure/token/admin") + * @returns Filename-safe slug (e.g., "api-v3-configure-token-admin") + */ +function pathToFileSlug(apiPath: string): string { + return apiPath + .replace(/^\//, '') // Remove leading slash + .replace(/\//g, '-') // Replace slashes with dashes + .replace(/[{}]/g, '') // Remove curly braces from path params + .replace(/-+/g, '-') // Collapse multiple dashes + .replace(/-$/, ''); // Remove trailing dash +} + +/** + * Write path-specific OpenAPI specs (one file per exact API path) + * + * Each file contains all HTTP methods for a single path, enabling + * operation pages to filter by method only (no path prefix conflicts). + * + * @param openapi - OpenAPI document + * @param outPath - Output directory path (e.g., "static/openapi/{product}/paths") + * @returns Map of API path to spec file path (for use in frontmatter) + */ +export function writePathSpecificSpecs( + openapi: OpenAPIDocument, + outPath: string +): Map { + const pathSpecFiles = new Map(); + + if (!fs.existsSync(outPath)) { + fs.mkdirSync(outPath, { recursive: true }); + } + + Object.entries(openapi.paths).forEach(([apiPath, pathItem]) => { + // Deep clone pathItem to avoid mutating original + const clonedPathItem: PathItem = JSON.parse(JSON.stringify(pathItem)); + + // Limit each operation to a single tag to prevent duplicate rendering + // Operations with multiple tags would be rendered once per tag + const usedTags = new Set(); + HTTP_METHODS.forEach((method) => { + const operation = clonedPathItem[method] as Operation | undefined; + if (operation?.tags && operation.tags.length > 0) { + // Select the most specific tag to avoid duplicate rendering + // Prefer "Auth token" over "Authentication" for token-related operations + let primaryTag = operation.tags[0]; + if (operation.tags.includes('Auth token')) { + primaryTag = 'Auth token'; + } + operation.tags = [primaryTag]; + usedTags.add(primaryTag); + } + }); + + // Create spec with just this path (all its methods) + // Include global security requirements so auth info displays correctly + const pathSpec: OpenAPIDocument = { + openapi: openapi.openapi, + info: { + ...openapi.info, + title: apiPath, + description: `API reference for ${apiPath}`, + }, + paths: { [apiPath]: clonedPathItem }, + components: openapi.components, // Include for $ref resolution + servers: openapi.servers, + security: openapi.security, // Global security requirements + }; + + // Filter spec-level tags to only include those used by operations + if (openapi.tags) { + pathSpec.tags = openapi.tags.filter( + (tag) => usedTags.has(tag.name) && !tag['x-traitTag'] + ); + } + + // Write files + const slug = pathToFileSlug(apiPath); + const yamlPath = path.resolve(outPath, `${slug}.yaml`); + const jsonPath = path.resolve(outPath, `${slug}.json`); + + writeDataFile(pathSpec, yamlPath); + writeJsonFile(pathSpec, jsonPath); + + // Store the web-accessible path (without "static/" prefix) + // Hugo serves files from static/ at the root, so we extract the path after 'static/' + const staticMatch = yamlPath.match(/static\/(.+)$/); + const webPath = staticMatch ? `/${staticMatch[1]}` : yamlPath; + pathSpecFiles.set(apiPath, webPath); + }); + + console.log( + `Generated ${pathSpecFiles.size} path-specific specs in ${outPath}` + ); + + return pathSpecFiles; +} + +/** + * Write OpenAPI specs grouped by path to separate files + * Generates both YAML and JSON versions + * + * @param openapi - OpenAPI document + * @param prefix - Filename prefix for output files + * @param outPath - Output directory path + */ +function writePathOpenapis( + openapi: OpenAPIDocument, + prefix: string, + outPath: string +): void { + const pathGroups: Record> = {}; + + // Group paths by their base path (first 3-4 segments, excluding placeholders) + Object.keys(openapi.paths) + .sort() + .forEach((p) => { + const delimiter = '/'; + let key = p.split(delimiter); + + // Check if this is an item path (ends with a placeholder) + let isItemPath = openapiUtils.isPlaceholderFragment(key[key.length - 1]); + if (isItemPath) { + key = key.slice(0, -1); + } + + // Take first 4 segments + key = key.slice(0, 4); + + // Check if the last segment is still a placeholder + isItemPath = openapiUtils.isPlaceholderFragment(key[key.length - 1]); + if (isItemPath) { + key = key.slice(0, -1); + } + + const groupKey = key.join('/'); + pathGroups[groupKey] = pathGroups[groupKey] || {}; + pathGroups[groupKey][p] = openapi.paths[p]; + }); + + // Write each path group to separate YAML and JSON files + Object.keys(pathGroups).forEach((pg) => { + // Deep copy openapi + const doc: OpenAPIDocument = JSON.parse(JSON.stringify(openapi)); + doc.paths = pathGroups[pg]; + + // Collect tags used by operations in this path group + const usedTags = new Set(); + Object.values(doc.paths).forEach((pathItem: PathItem) => { + const httpMethods = [ + 'get', + 'post', + 'put', + 'patch', + 'delete', + 'options', + 'head', + 'trace', + ]; + httpMethods.forEach((method) => { + const operation = pathItem[method] as Operation | undefined; + if (operation?.tags) { + operation.tags.forEach((tag) => usedTags.add(tag)); + } + }); + }); + + // Filter tags to only include those used by operations in this path group + // Exclude x-traitTag tags (supplementary documentation tags) + if (doc.tags) { + doc.tags = doc.tags.filter( + (tag) => usedTags.has(tag.name) && !tag['x-traitTag'] + ); + } + + // Simplify info for path-specific docs + doc.info.title = pg; + doc.info.description = `API reference for ${pg}`; + doc['x-pathGroup'] = pg; + + try { + if (!fs.existsSync(outPath)) { + fs.mkdirSync(outPath, { recursive: true }); + } + + const baseFilename = `${prefix}${pg.replaceAll('/', '-').replace(/^-/, '')}`; + const yamlPath = path.resolve(outPath, `${baseFilename}.yaml`); + const jsonPath = path.resolve(outPath, `${baseFilename}.json`); + + // Write both YAML and JSON versions + writeDataFile(doc, yamlPath); + writeJsonFile(doc, jsonPath); + + console.log(`Generated: ${baseFilename}.yaml and ${baseFilename}.json`); + } catch (err) { + console.error(`Error writing path group ${pg}:`, err); + } + }); +} + +/** + * Create article metadata for a path group + * + * @param openapi - OpenAPI document with x-pathGroup + * @returns Article metadata object + */ +function createArticleDataForPathGroup(openapi: OpenAPIDocument): Article { + const article: Article = { + path: '', + fields: { + name: openapi['x-pathGroup'] || '', + describes: Object.keys(openapi.paths), + }, + }; + + /** + * Convert OpenAPI path to Hugo-friendly article path + * Legacy endpoints (without /api/ prefix) go under api/ directly + * Versioned endpoints (with /api/vN/) keep their structure + * + * @param p - Path to convert (e.g., '/health', '/api/v3/query_sql') + * @returns Path suitable for Hugo content directory (e.g., 'api/health', 'api/v3/query_sql') + */ + const toHugoPath = (p: string): string => { + if (!p) { + return ''; + } + // If path doesn't start with /api/, it's a legacy endpoint + // Place it directly under api/ to avoid collision with /api/v1/* paths + if (!p.startsWith('/api/')) { + // /health -> api/health + // /write -> api/write + return `api${p}`; + } + // /api/v1/health -> api/v1/health + // /api/v2/write -> api/v2/write + // /api/v3/query_sql -> api/v3/query_sql + return p.replace(/^\//, ''); + }; + + /** + * Convert path to tag-friendly format (dashes instead of slashes) + * + * @param p - Path to convert + * @returns Tag-friendly path + */ + const toTagPath = (p: string): string => { + if (!p) { + return ''; + } + return p.replace(/^\//, '').replaceAll('/', '-'); + }; + + const pathGroup = openapi['x-pathGroup'] || ''; + article.path = toHugoPath(pathGroup); + // Store original path for menu display (shows actual endpoint path) + article.fields.menuName = pathGroup; + article.fields.title = openapi.info?.title; + article.fields.description = openapi.description; + + const pathGroupFrags = path.parse(openapi['x-pathGroup'] || ''); + article.fields.tags = [pathGroupFrags?.dir, pathGroupFrags?.name] + .filter(Boolean) + .map((t) => toTagPath(t)); + + // Extract x-relatedLinks and OpenAPI tags from path items or operations + const relatedLinks: string[] = []; + const apiTags: string[] = []; + const httpMethods = [ + 'get', + 'post', + 'put', + 'patch', + 'delete', + 'options', + 'head', + 'trace', + ]; + + Object.values(openapi.paths).forEach((pathItem: PathItem) => { + // Check path-level x-relatedLinks + if ( + pathItem['x-relatedLinks'] && + Array.isArray(pathItem['x-relatedLinks']) + ) { + relatedLinks.push( + ...(pathItem['x-relatedLinks'] as string[]).filter( + (link) => !relatedLinks.includes(link) + ) + ); + } + + // Check operation-level x-relatedLinks and tags + httpMethods.forEach((method) => { + const operation = pathItem[method] as Operation | undefined; + if (operation) { + // Extract x-relatedLinks + if ( + operation['x-relatedLinks'] && + Array.isArray(operation['x-relatedLinks']) + ) { + relatedLinks.push( + ...(operation['x-relatedLinks'] as string[]).filter( + (link) => !relatedLinks.includes(link) + ) + ); + } + // Extract OpenAPI tags from operation + if (operation.tags && Array.isArray(operation.tags)) { + operation.tags.forEach((tag) => { + if (!apiTags.includes(tag)) { + apiTags.push(tag); + } + }); + } + } + }); + }); + + // Only add related if there are links + if (relatedLinks.length > 0) { + article.fields.related = relatedLinks; + } + + // Add OpenAPI tags from operations (for Hugo frontmatter) + if (apiTags.length > 0) { + article.fields.apiTags = apiTags; + } + + return article; +} + +/** + * Write OpenAPI article metadata to Hugo data files + * Generates articles.yml and articles.json + * + * @param sourcePath - Path to directory containing OpenAPI fragment files + * @param targetPath - Output path for article data + * @param opts - Options including file pattern filter + */ +function writeOpenapiArticleData( + sourcePath: string, + targetPath: string, + opts: WriteOpenapiArticleDataOptions +): void { + /** + * Check if path is a file + */ + const isFile = (filePath: string): boolean => { + return fs.lstatSync(filePath).isFile(); + }; + + /** + * Check if filename matches pattern + */ + const matchesPattern = (filePath: string): boolean => { + return opts.filePattern + ? path.parse(filePath).name.startsWith(opts.filePattern) + : true; + }; + + try { + const articles = fs + .readdirSync(sourcePath) + .map((fileName) => path.join(sourcePath, fileName)) + .filter(matchesPattern) + .filter(isFile) + .filter( + (filePath) => filePath.endsWith('.yaml') || filePath.endsWith('.yml') + ) // Only process YAML files + .map((filePath) => { + const openapi = readFile(filePath); + const article = createArticleDataForPathGroup(openapi); + article.fields.source = filePath; + // Hugo omits "/static" from the URI when serving files stored in "./static" + article.fields.staticFilePath = filePath.replace(/^static\//, '/'); + return article; + }); + + if (!fs.existsSync(targetPath)) { + fs.mkdirSync(targetPath, { recursive: true }); + } + + const articleCollection: ArticleCollection = { articles }; + + // Write both YAML and JSON versions + const yamlPath = path.resolve(targetPath, 'articles.yml'); + const jsonPath = path.resolve(targetPath, 'articles.json'); + + writeDataFile(articleCollection, yamlPath); + writeJsonFile(articleCollection, jsonPath); + + console.log(`Generated ${articles.length} articles in ${targetPath}`); + } catch (e) { + console.error('Error writing article data:', e); + } +} + +/** + * Sanitize markdown description by removing fragment links and ReDoc directives + * + * Handles three cases: + * 1. OpenAPI fragment links: [text](#section/...) -> text (removes the link entirely) + * 2. Relative links with fragments: [text](/path/#anchor) -> [text](/path/) (keeps link, removes fragment) + * 3. ReDoc injection directives: (removes entirely) + * + * This sanitization is necessary because fragment links don't work when article + * descriptions are rendered via the {{< children >}} shortcode on parent pages. + * + * @param description - Markdown description that may contain fragment links + * @returns Sanitized description suitable for children shortcode rendering + */ +function sanitizeDescription(description: string | undefined): string { + if (!description) { + return ''; + } + + let sanitized = description; + + // Remove ReDoc injection directives (e.g., ) + sanitized = sanitized.replace(//g, ''); + + // Handle markdown links: + // 1. OpenAPI fragment links (#section/..., #operation/..., #tag/...) -> replace with just the text + // 2. Relative links with fragments (/path/#anchor) -> keep link but remove fragment + sanitized = sanitized.replace( + /\[([^\]]+)\]\(([^)]+)\)/g, + (match, text, url) => { + // Case 1: OpenAPI fragment links (starts with #section/, #operation/, #tag/) + if (url.match(/^#(section|operation|tag)\//)) { + return text; // Just return the link text, no markdown link + } + + // Case 2: Relative link with fragment (starts with /, contains #) + if (url.startsWith('/') && url.includes('#')) { + const urlWithoutFragment = url.split('#')[0]; + if (urlWithoutFragment === '/' || urlWithoutFragment === '') { + return text; + } + return `[${text}](${urlWithoutFragment})`; + } + + // Case 3: Keep other links as-is (external links, non-fragment links) + return match; + } + ); + + // Clean up extra whitespace left by directive removals + sanitized = sanitized.replace(/\n\n\n+/g, '\n\n').trim(); + + return sanitized; +} + +/** + * Create article data for a tag-based grouping + * + * @param openapi - OpenAPI document with x-tagGroup + * @param operations - Operations for this tag + * @param tagMeta - Tag metadata from OpenAPI spec + * @returns Article metadata object + */ +function createArticleDataForTag( + openapi: OpenAPIDocument, + operations: OperationMeta[], + tagMeta?: Tag +): Article { + const tagName = (openapi['x-tagGroup'] as string) || ''; + const tagSlug = slugifyTag(tagName); + const isConceptual = tagMeta?.['x-traitTag'] === true; + + const article: Article = { + path: `api/${tagSlug}`, + fields: { + name: tagName, + describes: Object.keys(openapi.paths), + title: tagName, + description: sanitizeDescription( + tagMeta?.description || + openapi.info?.description || + `API reference for ${tagName}` + ), + tag: tagName, + isConceptual, + menuGroup: getMenuGroupForTag(tagName), + operations: operations.map((op) => ({ + operationId: op.operationId, + method: op.method, + path: op.path, + summary: op.summary, + tags: op.tags, + ...(op.compatVersion && { compatVersion: op.compatVersion }), + ...(op.externalDocs && { externalDocs: op.externalDocs }), + })), + }, + }; + + // Add tag description for conceptual pages (sanitized for children shortcode) + if (tagMeta?.description) { + article.fields.tagDescription = sanitizeDescription(tagMeta.description); + } + + // Show security schemes section on Authentication pages + if (tagName === 'Authentication') { + article.fields.showSecuritySchemes = true; + } + + // Set custom weight for Quick start to appear first in nav + if (tagName === 'Quick start') { + article.fields.weight = 1; + } + + // Set default weight for consistent sorting (articles without explicit weight) + if (article.fields.weight === undefined) { + article.fields.weight = 100; + } + + // Aggregate related links from multiple sources into article-level related + // This populates Hugo frontmatter `related` field for "Related content" links + // Supports both plain URL strings and {title, href} objects + const relatedItems: (string | RelatedLink)[] = []; + const seenHrefs = new Set(); + + // Helper to add a link, deduplicating by href + const addRelated = (item: string | RelatedLink): void => { + const href = typeof item === 'string' ? item : item.href; + if (!seenHrefs.has(href)) { + seenHrefs.add(href); + relatedItems.push(item); + } + }; + + // Tag-level x-related ({title, href} objects) + if (tagMeta?.['x-related']) { + (tagMeta['x-related'] as RelatedLink[]).forEach(addRelated); + } + + // Tag-level x-influxdatadocs-related (plain URLs) + if (tagMeta?.['x-influxdatadocs-related']) { + (tagMeta['x-influxdatadocs-related'] as string[]).forEach(addRelated); + } + + // Tag-level externalDocs (legacy single link) + if (tagMeta?.externalDocs?.url) { + addRelated(tagMeta.externalDocs.url); + } + + // Operation-level related links + operations.forEach((op) => { + if (op.relatedLinks) { + op.relatedLinks.forEach(addRelated); + } + if (op.related) { + op.related.forEach(addRelated); + } + if (op.externalDocs?.url) { + addRelated(op.externalDocs.url); + } + }); + + if (relatedItems.length > 0) { + article.fields.related = relatedItems; + } + + return article; +} + +/** + * Write tag-based OpenAPI article metadata to Hugo data files + * Generates articles.yml and articles.json + * + * @param sourcePath - Path to directory containing tag-based OpenAPI fragment files + * @param targetPath - Output path for article data + * @param openapi - Original OpenAPI document (for tag metadata) + * @param opts - Options including file pattern filter + */ +function writeOpenapiTagArticleData( + sourcePath: string, + targetPath: string, + openapi: OpenAPIDocument, + opts: WriteOpenapiArticleDataOptions +): void { + const isFile = (filePath: string): boolean => { + return fs.lstatSync(filePath).isFile(); + }; + + const matchesPattern = (filePath: string): boolean => { + return opts.filePattern + ? path.parse(filePath).name.startsWith(opts.filePattern) + : true; + }; + + // Create tag metadata lookup + const tagMetaMap = new Map(); + (openapi.tags || []).forEach((tag) => { + tagMetaMap.set(tag.name, tag); + }); + + try { + const articles = fs + .readdirSync(sourcePath) + .map((fileName) => path.join(sourcePath, fileName)) + .filter(matchesPattern) + .filter(isFile) + .filter( + (filePath) => filePath.endsWith('.yaml') || filePath.endsWith('.yml') + ) + .map((filePath) => { + const tagOpenapi = readFile(filePath); + const tagName = + (tagOpenapi['x-tagGroup'] as string) || tagOpenapi.info?.title || ''; + const tagMeta = tagMetaMap.get(tagName); + + // Extract operations from the tag-filtered spec + const operations: OperationMeta[] = []; + Object.entries(tagOpenapi.paths).forEach(([pathKey, pathItem]) => { + HTTP_METHODS.forEach((method) => { + const operation = pathItem[method] as Operation | undefined; + if (operation) { + const opMeta: OperationMeta = { + operationId: operation.operationId || `${method}-${pathKey}`, + method: method.toUpperCase(), + path: pathKey, + summary: operation.summary || '', + tags: operation.tags || [], + }; + + // Extract compatibility version if present + if (operation['x-compatibility-version']) { + opMeta.compatVersion = operation['x-compatibility-version']; + } + + // Extract externalDocs if present + if (operation.externalDocs) { + opMeta.externalDocs = { + description: operation.externalDocs.description || '', + url: operation.externalDocs.url, + }; + } + + // Extract x-influxdatadocs-related if present + if ( + operation['x-influxdatadocs-related'] && + Array.isArray(operation['x-influxdatadocs-related']) + ) { + opMeta.related = operation['x-influxdatadocs-related']; + } + + // Extract x-related (title/href objects) + if ( + operation['x-related'] && + Array.isArray(operation['x-related']) + ) { + opMeta.relatedLinks = operation['x-related'] as RelatedLink[]; + } + + operations.push(opMeta); + } + }); + }); + + const article = createArticleDataForTag( + tagOpenapi, + operations, + tagMeta + ); + article.fields.source = filePath; + article.fields.staticFilePath = filePath.replace(/^static\//, '/'); + return article; + }); + + if (!fs.existsSync(targetPath)) { + fs.mkdirSync(targetPath, { recursive: true }); + } + + const articleCollection: ArticleCollection = { articles }; + + // Write both YAML and JSON versions + const yamlPath = path.resolve(targetPath, 'articles.yml'); + const jsonPath = path.resolve(targetPath, 'articles.json'); + + writeDataFile(articleCollection, yamlPath); + writeJsonFile(articleCollection, jsonPath); + + console.log( + `Generated ${articles.length} tag-based articles in ${targetPath}` + ); + } catch (e) { + console.error('Error writing tag article data:', e); + } +} + +/** + * Options for generating Hugo data by tag + */ +export interface GenerateHugoDataByTagOptions extends GenerateHugoDataOptions { + /** Whether to also generate path-based files (for backwards compatibility) */ + includePaths?: boolean; +} + +/** + * Generate Hugo data files from an OpenAPI specification grouped by tag + * + * This function: + * 1. Reads the OpenAPI spec file + * 2. Groups operations by their OpenAPI tags + * 3. Writes each tag group to separate YAML and JSON files + * 4. Generates tag-based article metadata for Hugo + * + * @param options - Generation options + */ +export function generateHugoDataByTag( + options: GenerateHugoDataByTagOptions +): void { + const filenamePrefix = `${path.parse(options.specFile).name}-`; + const sourceFile = readFile(options.specFile, 'utf8'); + + // Optionally generate path-based files for backwards compatibility + if (options.includePaths) { + console.log( + `\nGenerating OpenAPI path files in ${options.dataOutPath}....` + ); + writePathOpenapis(sourceFile, filenamePrefix, options.dataOutPath); + } + + // Generate tag-based files + const tagOutPath = options.includePaths + ? path.join(options.dataOutPath, 'tags') + : options.dataOutPath; + + console.log(`\nGenerating OpenAPI tag files in ${tagOutPath}....`); + writeTagOpenapis(sourceFile, filenamePrefix, tagOutPath); + + console.log( + `\nGenerating OpenAPI tag article data in ${options.articleOutPath}...` + ); + writeOpenapiTagArticleData(tagOutPath, options.articleOutPath, sourceFile, { + filePattern: filenamePrefix, + }); + + console.log('\nTag-based generation complete!\n'); +} + +/** + * Generate Hugo data files from an OpenAPI specification + * + * This function: + * 1. Reads the OpenAPI spec file + * 2. Groups paths by their base path + * 3. Writes each group to separate YAML and JSON files + * 4. Generates article metadata for Hugo + * + * @param options - Generation options + */ +export function generateHugoData(options: GenerateHugoDataOptions): void { + const filenamePrefix = `${path.parse(options.specFile).name}-`; + + const sourceFile = readFile(options.specFile, 'utf8'); + + console.log(`\nGenerating OpenAPI path files in ${options.dataOutPath}....`); + writePathOpenapis(sourceFile, filenamePrefix, options.dataOutPath); + + console.log( + `\nGenerating OpenAPI article data in ${options.articleOutPath}...` + ); + writeOpenapiArticleData(options.dataOutPath, options.articleOutPath, { + filePattern: filenamePrefix, + }); + + console.log('\nGeneration complete!\n'); +} + +/** + * Generate path-specific OpenAPI specs from a spec file + * + * Convenience wrapper that reads the spec file and generates path-specific specs. + * + * @param specFile - Path to OpenAPI spec file + * @param outPath - Output directory for path-specific specs + * @returns Map of API path to spec file web path (for use in frontmatter) + */ +export function generatePathSpecificSpecs( + specFile: string, + outPath: string +): Map { + const openapi = readFile(specFile, 'utf8'); + return writePathSpecificSpecs(openapi, outPath); +} + +// CommonJS export for backward compatibility +module.exports = { + generateHugoData, + generateHugoDataByTag, + generatePathSpecificSpecs, + writePathSpecificSpecs, +}; diff --git a/api-docs/scripts/openapi-paths-to-hugo-data/package.json b/api-docs/scripts/openapi-paths-to-hugo-data/package.json new file mode 100644 index 0000000000..78bd5bc114 --- /dev/null +++ b/api-docs/scripts/openapi-paths-to-hugo-data/package.json @@ -0,0 +1,14 @@ +{ + "name": "openapi-paths-to-hugo-data", + "version": "1.0.0", + "description": "Convert OpenAPI specifications to Hugo data files for API documentation", + "main": "index.js", + "type": "commonjs", + "dependencies": { + "js-yaml": "^4.1.1" + }, + "devDependencies": {}, + "scripts": {}, + "author": "InfluxData", + "license": "MIT" +} diff --git a/api-docs/scripts/openapi-paths-to-hugo-data/yarn.lock b/api-docs/scripts/openapi-paths-to-hugo-data/yarn.lock new file mode 100644 index 0000000000..96bb86828b --- /dev/null +++ b/api-docs/scripts/openapi-paths-to-hugo-data/yarn.lock @@ -0,0 +1,32 @@ +# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY. +# yarn lockfile v1 + + +"@types/js-yaml@^4.0.9": + version "4.0.9" + resolved "https://registry.yarnpkg.com/@types/js-yaml/-/js-yaml-4.0.9.tgz#cd82382c4f902fed9691a2ed79ec68c5898af4c2" + integrity sha512-k4MGaQl5TGo/iipqb2UDG2UwjXziSWkh0uysQelTlJpX1qGlpUZYm8PnO4DxG1qBomtJUdYJ6qR6xdIah10JLg== + +"@types/node@^24.10.1": + version "24.10.1" + resolved "https://registry.yarnpkg.com/@types/node/-/node-24.10.1.tgz#91e92182c93db8bd6224fca031e2370cef9a8f01" + integrity sha512-GNWcUTRBgIRJD5zj+Tq0fKOJ5XZajIiBroOF0yvj2bSU1WvNdYS/dn9UxwsujGW4JX06dnHyjV2y9rRaybH0iQ== + dependencies: + undici-types "~7.16.0" + +argparse@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/argparse/-/argparse-2.0.1.tgz#246f50f3ca78a3240f6c997e8a9bd1eac49e4b38" + integrity sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q== + +js-yaml@^4.1.0: + version "4.1.1" + resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-4.1.1.tgz#854c292467705b699476e1a2decc0c8a3458806b" + integrity sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA== + dependencies: + argparse "^2.0.1" + +undici-types@~7.16.0: + version "7.16.0" + resolved "https://registry.yarnpkg.com/undici-types/-/undici-types-7.16.0.tgz#ffccdff36aea4884cbfce9a750a0580224f58a46" + integrity sha512-Zz+aZWSj8LE6zoxD+xrjh4VfkIG8Ya6LvYkZqtUQGJPZjYl53ypCaUwWqo7eI0x66KBGeRo+mlBEkMSeSZ38Nw== diff --git a/api-docs/scripts/tsconfig.json b/api-docs/scripts/tsconfig.json new file mode 100644 index 0000000000..e36776534b --- /dev/null +++ b/api-docs/scripts/tsconfig.json @@ -0,0 +1,25 @@ +{ + "compilerOptions": { + "target": "ES2021", + "lib": ["ES2021"], + "module": "CommonJS", + "moduleResolution": "node", + "outDir": "./dist", + "rootDir": ".", + "strict": true, + "esModuleInterop": true, + "skipLibCheck": true, + "forceConsistentCasingInFileNames": true, + "resolveJsonModule": true, + "declaration": true, + "sourceMap": true, + "types": ["node"] + }, + "include": [ + "**/*.ts" + ], + "exclude": [ + "node_modules", + "dist" + ] +} diff --git a/assets/js/components/api-toc.ts b/assets/js/components/api-toc.ts new file mode 100644 index 0000000000..cc75120afc --- /dev/null +++ b/assets/js/components/api-toc.ts @@ -0,0 +1,473 @@ +/** + * API Table of Contents Component + * + * Generates "ON THIS PAGE" navigation from content headings or operations data. + * Features: + * - Builds TOC from h2 headings by default + * - Builds TOC from operations data passed via data-operations attribute (tag-based) + * - Highlights current section on scroll (intersection observer) + * - Smooth scroll to anchors + * - Updates when tab changes + * + * Usage: + * + * + * Attributes: + * - data-operations: JSON array of operation objects for server-rendered TOC + * - data-toc-depth: Max heading level to include (default: "2", use "3" for h2+h3) + */ + +interface ComponentOptions { + component: HTMLElement; +} + +interface TocEntry { + id: string; + text: string; + level: number; +} + +/** + * Operation metadata from frontmatter (for tag-based pages) + */ +interface OperationMeta { + operationId: string; + method: string; + path: string; + summary: string; + tags: string[]; +} + +/** + * Get headings from the currently visible content + * + * @param maxLevel - Maximum heading level to include (default: 2) + */ +function getVisibleHeadings(maxLevel: number = 2): TocEntry[] { + // Find the active tab panel or main content area + const activePanel = document.querySelector( + '.tab-content:not([style*="display: none"]), [data-tab-panel]:not([style*="display: none"]), .article--content' + ); + + if (!activePanel) { + return []; + } + + // Build selector based on maxLevel (e.g., 'h2' or 'h2, h3') + const selectors = []; + for (let level = 2; level <= maxLevel; level++) { + selectors.push(`h${level}`); + } + const headings = activePanel.querySelectorAll(selectors.join(', ')); + const entries: TocEntry[] = []; + + headings.forEach((heading) => { + // Skip headings without IDs + if (!heading.id) { + return; + } + + // Skip hidden headings + const rect = heading.getBoundingClientRect(); + if (rect.width === 0 && rect.height === 0) { + return; + } + + const level = parseInt(heading.tagName.charAt(1), 10); + entries.push({ + id: heading.id, + text: heading.textContent?.trim() || '', + level, + }); + }); + + return entries; +} + +/** + * Build TOC HTML from entries + */ +function buildTocHtml(entries: TocEntry[]): string { + if (entries.length === 0) { + // Return empty string - the TOC container can be hidden via CSS when empty + return ''; + } + + let html = '
    '; + + entries.forEach((entry) => { + const indent = entry.level === 3 ? ' api-toc-item--nested' : ''; + html += ` +
  • + ${entry.text} +
  • + `; + }); + + html += '
'; + return html; +} + +/** + * Get method badge class for HTTP method + */ +function getMethodClass(method: string): string { + const m = method.toLowerCase(); + switch (m) { + case 'get': + return 'api-method--get'; + case 'post': + return 'api-method--post'; + case 'put': + return 'api-method--put'; + case 'patch': + return 'api-method--patch'; + case 'delete': + return 'api-method--delete'; + default: + return ''; + } +} + +/** + * Build TOC HTML from operations data (for tag-based pages) + */ +function buildOperationsTocHtml(operations: OperationMeta[]): string { + if (operations.length === 0) { + return '

No operations on this page.

'; + } + + let html = '
    '; + + operations.forEach((op) => { + // Generate anchor ID matching Redocly operation/{operationId} format + const anchorId = `operation/${op.operationId}`; + const methodClass = getMethodClass(op.method); + + html += ` +
  • + + ${op.method.toUpperCase()} + ${op.path} + +
  • + `; + }); + + html += '
'; + return html; +} + +/** + * Parse operations from data attribute + */ +function parseOperationsData(component: HTMLElement): OperationMeta[] | null { + const dataAttr = component.getAttribute('data-operations'); + if (!dataAttr) { + return null; + } + + try { + const operations = JSON.parse(dataAttr) as OperationMeta[]; + return Array.isArray(operations) ? operations : null; + } catch (e) { + console.warn('[API TOC] Failed to parse operations data:', e); + return null; + } +} + +/** + * Set up intersection observer for scroll highlighting + */ +function setupScrollHighlighting( + container: HTMLElement, + entries: TocEntry[] +): IntersectionObserver | null { + if (entries.length === 0) { + return null; + } + + const headingIds = entries.map((e) => e.id); + const links = container.querySelectorAll('.api-toc-link'); + + // Create a map of heading ID to link element + const linkMap = new Map(); + links.forEach((link) => { + const href = link.getAttribute('href'); + if (href?.startsWith('#')) { + linkMap.set(href.slice(1), link); + } + }); + + // Track which headings are visible + const visibleHeadings = new Set(); + + const observer = new IntersectionObserver( + (observerEntries) => { + observerEntries.forEach((entry) => { + const id = entry.target.id; + + if (entry.isIntersecting) { + visibleHeadings.add(id); + } else { + visibleHeadings.delete(id); + } + }); + + // Find the first visible heading (in document order) + let activeId: string | null = null; + for (const id of headingIds) { + if (visibleHeadings.has(id)) { + activeId = id; + break; + } + } + + // If no heading is visible, use the last one that was scrolled past + if (!activeId && visibleHeadings.size === 0) { + const scrollY = window.scrollY; + for (let i = headingIds.length - 1; i >= 0; i--) { + const heading = document.getElementById(headingIds[i]); + if (heading && heading.offsetTop < scrollY + 100) { + activeId = headingIds[i]; + break; + } + } + } + + // Update active state on links + links.forEach((link) => { + link.classList.remove('is-active'); + }); + + if (activeId) { + const activeLink = linkMap.get(activeId); + activeLink?.classList.add('is-active'); + } + }, + { + rootMargin: '-80px 0px -70% 0px', + threshold: 0, + } + ); + + // Observe all headings + headingIds.forEach((id) => { + const heading = document.getElementById(id); + if (heading) { + observer.observe(heading); + } + }); + + return observer; +} + +/** + * Set up smooth scroll for TOC links + */ +function setupSmoothScroll(container: HTMLElement): void { + container.addEventListener('click', (event) => { + const target = event.target as HTMLElement; + const link = target.closest('.api-toc-link'); + + if (!link) { + return; + } + + const href = link.getAttribute('href'); + if (!href?.startsWith('#')) { + return; + } + + const targetElement = document.getElementById(href.slice(1)); + if (!targetElement) { + return; + } + + event.preventDefault(); + + // Scroll with offset for fixed header + const headerOffset = 80; + const elementPosition = targetElement.getBoundingClientRect().top; + const offsetPosition = elementPosition + window.scrollY - headerOffset; + + window.scrollTo({ + top: offsetPosition, + behavior: 'smooth', + }); + + // Update URL hash without jumping + history.pushState(null, '', href); + }); +} + +/** + * Update TOC visibility based on active tab + */ +function updateTocVisibility(container: HTMLElement): void { + const operationsPanel = document.querySelector( + '[data-tab-panel="operations"]' + ); + const isOperationsVisible = + operationsPanel && + !operationsPanel.getAttribute('style')?.includes('display: none'); + + if (isOperationsVisible) { + container.classList.add('is-hidden'); + } else { + container.classList.remove('is-hidden'); + } +} + +/** + * Watch for tab changes to rebuild TOC + */ +function watchTabChanges( + container: HTMLElement, + rebuild: () => void +): MutationObserver { + const tabPanels = document.querySelector('.api-tab-panels'); + + if (!tabPanels) { + return new MutationObserver(() => {}); + } + + const observer = new MutationObserver((mutations) => { + // Check if any tab panel visibility changed + const hasVisibilityChange = mutations.some((mutation) => { + return ( + mutation.type === 'attributes' && + (mutation.attributeName === 'style' || + mutation.attributeName === 'class') + ); + }); + + if (hasVisibilityChange) { + // Update visibility based on active tab + updateTocVisibility(container); + // Debounce rebuild + setTimeout(rebuild, 100); + } + }); + + observer.observe(tabPanels, { + attributes: true, + subtree: true, + attributeFilter: ['style', 'class'], + }); + + return observer; +} + +/** + * Initialize API TOC component + */ +export default function ApiToc({ component }: ComponentOptions): void { + const nav = component.querySelector('.api-toc-nav'); + + if (!nav) { + console.warn('[API TOC] No .api-toc-nav element found'); + return; + } + + // Check if TOC was pre-rendered server-side (has existing links) + const hasServerRenderedToc = nav.querySelectorAll('.api-toc-link').length > 0; + + if (hasServerRenderedToc) { + // Server-side TOC exists - show it, set up navigation and scroll highlighting + component.classList.remove('is-hidden'); + setupSmoothScroll(component); + + // Extract entries from pre-rendered links for scroll highlighting + const preRenderedLinks = + nav.querySelectorAll('.api-toc-link'); + const preRenderedEntries: TocEntry[] = []; + preRenderedLinks.forEach((link) => { + const href = link.getAttribute('href'); + if (href?.startsWith('#')) { + preRenderedEntries.push({ + id: href.slice(1), + text: link.textContent?.trim() || '', + level: 2, + }); + } + }); + if (preRenderedEntries.length > 0) { + setupScrollHighlighting(component, preRenderedEntries); + } + return; + } + + // Check for operations data (tag-based pages) + const operations = parseOperationsData(component); + let observer: IntersectionObserver | null = null; + + // Get max heading level from data attribute (default: 2) + // Use data-toc-depth="3" to include h3 headings if needed + const maxHeadingLevel = parseInt( + component.getAttribute('data-toc-depth') || '2', + 10 + ); + + /** + * Rebuild the TOC + */ + function rebuild(): void { + // Clean up previous observer + if (observer) { + observer.disconnect(); + observer = null; + } + + // If operations data is present, build operations-based TOC + if (operations && operations.length > 0) { + if (nav) { + nav.innerHTML = buildOperationsTocHtml(operations); + } + // Don't hide TOC for tag-based pages - always show operations + component.classList.remove('is-hidden'); + return; + } + + // Otherwise, fall back to heading-based TOC + const entries = getVisibleHeadings(maxHeadingLevel); + if (nav) { + nav.innerHTML = buildTocHtml(entries); + } + + // Hide TOC if no entries, show if entries exist + if (entries.length === 0) { + component.classList.add('is-hidden'); + } else { + component.classList.remove('is-hidden'); + // Set up scroll highlighting only when we have entries + observer = setupScrollHighlighting(component, entries); + } + } + + // Check initial visibility (hide for Operations tab, only for non-operations pages) + if (!operations || operations.length === 0) { + updateTocVisibility(component); + } + + // Initial build + rebuild(); + + // Set up smooth scroll + setupSmoothScroll(component); + + // Watch for tab changes (only for non-operations pages) + if (!operations || operations.length === 0) { + watchTabChanges(component, rebuild); + } + + // Also rebuild on window resize (headings may change visibility) + let resizeTimeout: number; + window.addEventListener('resize', () => { + clearTimeout(resizeTimeout); + resizeTimeout = window.setTimeout(rebuild, 250); + }); +} diff --git a/assets/js/content-interactions.js b/assets/js/content-interactions.js index eb9b4e1bc0..4c2a374c0a 100644 --- a/assets/js/content-interactions.js +++ b/assets/js/content-interactions.js @@ -122,21 +122,29 @@ function expandAccordions() { // Expand accordions on load based on URL anchor function openAccordionByHash() { - var anchor = window.location.hash; + var hash = window.location.hash; + if (!hash || hash.length <= 1) return; + + // Use native DOM method to handle special characters in IDs (like /) + var id = hash.substring(1); // Remove leading # + var anchorElement = document.getElementById(id); + if (!anchorElement) return; + + var $anchor = $(anchorElement); function expandElement() { - if ($(anchor).parents('.expand').length > 0) { - return $(anchor).closest('.expand').children('.expand-label'); - } else if ($(anchor).hasClass('expand')) { - return $(anchor).children('.expand-label'); + if ($anchor.parents('.expand').length > 0) { + return $anchor.closest('.expand').children('.expand-label'); + } else if ($anchor.hasClass('expand')) { + return $anchor.children('.expand-label'); } + return null; } - if (expandElement() != null) { - if (expandElement().children('.expand-toggle').hasClass('open')) { - // Do nothing? - } else { - expandElement().children('.expand-toggle').trigger('click'); + var $expandLabel = expandElement(); + if ($expandLabel != null) { + if (!$expandLabel.children('.expand-toggle').hasClass('open')) { + $expandLabel.children('.expand-toggle').trigger('click'); } } } diff --git a/assets/js/main.js b/assets/js/main.js index 826ad9a116..bc9d6c89e9 100644 --- a/assets/js/main.js +++ b/assets/js/main.js @@ -46,6 +46,7 @@ import SidebarSearch from './components/sidebar-search.js'; import { SidebarToggle } from './sidebar-toggle.js'; import Theme from './theme.js'; import ThemeSwitch from './theme-switch.js'; +import ApiToc from './components/api-toc.ts'; /** * Component Registry @@ -77,6 +78,7 @@ const componentRegistry = { 'sidebar-toggle': SidebarToggle, theme: Theme, 'theme-switch': ThemeSwitch, + 'api-toc': ApiToc, }; /** diff --git a/assets/styles/layouts/_api-code-samples.scss b/assets/styles/layouts/_api-code-samples.scss new file mode 100644 index 0000000000..f4dbe6ecc0 --- /dev/null +++ b/assets/styles/layouts/_api-code-samples.scss @@ -0,0 +1,67 @@ +// API Code Samples +// Styles for inline curl examples and Ask AI links within API operations + +.api-code-sample { + margin: $api-spacing-lg 0; + border: 1px solid rgba(0, 0, 0, 0.1); + border-radius: $api-border-radius; + overflow: hidden; + + .dark-theme & { + border-color: rgba(255, 255, 255, 0.1); + } +} + +.api-code-sample-header { + display: flex; + align-items: center; + justify-content: space-between; + margin: 0; + padding: $api-spacing-sm $api-spacing-md; + background: rgba(0, 0, 0, 0.03); + border-bottom: 1px solid rgba(0, 0, 0, 0.1); + + .dark-theme & { + background: rgba(255, 255, 255, 0.03); + border-bottom-color: rgba(255, 255, 255, 0.1); + } + + .api-code-sample-title { + font-size: 0.85rem; + font-weight: 600; + } +} + +pre.api-code-block { + margin: 0; + padding: $api-spacing-md; + overflow-x: auto; + background: $article-code-bg; + color: $article-code; + font-size: 0.8rem; + line-height: 1.5; + border-radius: 0; + + code { + background: none; + padding: 0; + color: inherit; + font-size: inherit; + line-height: inherit; + white-space: pre; + } +} + +.api-code-ask-ai { + font-size: 0.8rem; + font-weight: 400; + text-decoration: none; + color: $article-link; + opacity: 0.7; + transition: opacity 0.2s; + white-space: nowrap; + + &:hover { + opacity: 1; + } +} diff --git a/assets/styles/layouts/_api-layout.scss b/assets/styles/layouts/_api-layout.scss new file mode 100644 index 0000000000..2515b00e1f --- /dev/null +++ b/assets/styles/layouts/_api-layout.scss @@ -0,0 +1,785 @@ +/////////////////////////////// API Reference Layout /////////////////////////////// +// +// 3-column layout for API reference documentation: +// - Left: Existing Hugo sidebar + API navigation section +// - Center: Content with page-level tabs (Operations | Server | Auth | Compatibility) +// - Right: "ON THIS PAGE" table of contents +// +//////////////////////////////////////////////////////////////////////////////// + +// Content wrapper becomes flex container when used with API content +// Override overflow:hidden from _content-wrapper.scss to enable sticky positioning +// Widen to compensate for the API TOC so article content matches regular pages +.content-wrapper.api-content { + display: flex; + flex-direction: row; + align-items: flex-start; + overflow: visible; // Required for sticky TOC to work + width: calc(75% + 200px); + max-width: calc(100% - 2rem); +} + +// Main API content area (center column) +.api-main { + flex: 1; + min-width: 0; // Prevent flex item from overflowing + padding-right: 1rem; +} + +// Right-side TOC (third column) +.api-toc { + width: 200px; + flex-shrink: 0; + position: sticky; + top: 80px; // Account for fixed header height + align-self: flex-start; // Critical for sticky to work in flexbox + max-height: calc(100vh - 100px); + overflow-y: auto; + padding: 1rem; + border-left: 1px solid $nav-border; + + // Hidden state (used when a tab panel hides the TOC) + &.is-hidden { + display: none; + } + + &-header { + font-size: 0.75rem; + font-weight: $bold; + text-transform: uppercase; + letter-spacing: 0.08rem; + color: rgba($article-heading, 0.5); + margin: 0 0 1rem; + } + + &-nav { + // TOC list styles + .api-toc-list { + list-style: none; + margin: 0; + padding: 0; + } + + .api-toc-item { + margin: 0; + + &--nested { + padding-left: 0.75rem; + } + } + + .api-toc-link { + display: block; + padding: 0.35rem 0; + font-size: 0.85rem; + color: $nav-item; + text-decoration: none; + transition: color 0.2s; + line-height: 1.4; + + &:hover { + color: $nav-item-hover; + } + + &.is-active { + color: $nav-active; + font-weight: $medium; + } + } + } + + &-empty { + font-size: 0.85rem; + color: rgba($article-text, 0.5); + font-style: italic; + } + + // Operations-based TOC (for tag-based pages) + &-nav .api-toc-list--operations { + .api-toc-item--operation { + margin: 0.35rem 0; + } + + .api-toc-link--operation { + display: flex; + align-items: center; + gap: 0.5rem; + font-size: 0.8rem; + padding: 0.3rem 0; + } + + // HTTP method badges in TOC + .api-method { + display: inline-block; + font-size: 0.6rem; + font-weight: $bold; + text-transform: uppercase; + padding: 0.15rem 0.3rem; + border-radius: 3px; + min-width: 2.2rem; + text-align: center; + flex-shrink: 0; + + &--get { background-color: $b-pool; color: #fff; } // #00A3FF - bright brand blue + &--post { background-color: $gr-rainforest; color: #fff; } // #34BB55 - bright brand green + &--put { background-color: $y-pineapple; color: #fff; } // #FFB94A - bright yellow (distinct from red) + &--patch { background-color: $br-new-purple; color: #fff; } // #9b2aff - distinctive brand purple + &--delete { background-color: $r-curacao; color: #fff; } // #F95F53 - bright brand red + } + + .api-path { + font-family: $code; + font-size: 0.75rem; + word-break: break-all; + color: inherit; + } + } +} + +//////////////////////////////////////////////////////////////////////////////// +////////////////////////// Operations List (Main Content) ////////////////////// +//////////////////////////////////////////////////////////////////////////////// + +// Operations list section +.api-operations-list { + margin: 2rem 0; + + h2 { + margin-bottom: 1rem; + } +} + +// Grid container for operation cards +.api-operations-grid { + display: flex; + flex-direction: column; + gap: 0.5rem; +} + +// Individual operation card (clickable link) +.api-operation-card { + display: flex; + align-items: flex-start; + gap: 0.75rem; + padding: 0.75rem 1rem; + background: rgba($article-bg, 0.5); + border: 1px solid $nav-border; + border-radius: $radius; + text-decoration: none; + color: $article-text; + transition: background-color 0.2s, border-color 0.2s; + + &:hover { + background: rgba($article-bg, 0.8); + border-color: $nav-item-hover; + } + + // HTTP method badge + .api-method { + display: inline-block; + font-size: 0.7rem; + font-weight: $bold; + text-transform: uppercase; + padding: 0.2rem 0.4rem; + border-radius: 3px; + min-width: 3.5rem; + text-align: center; + flex-shrink: 0; + margin-top: 0.15rem; + border: 2px solid; + background-color: transparent; + + &--get { border-color: $b-pool; color: $b-pool; } // #00A3FF - bright brand blue + &--post { border-color: $gr-rainforest; color: $gr-rainforest; } // #34BB55 - bright brand green + &--put { border-color: $y-pineapple; color: $y-pineapple; } // #FFB94A - bright yellow (distinct from red) + &--patch { border-color: $br-new-purple; color: $br-new-purple; } // #9b2aff - distinctive brand purple + &--delete { border-color: $r-curacao; color: $r-curacao; } // #F95F53 - bright brand red + } + + // API path in monospace + // Note: Uses element but we override the default code background + // to prevent inconsistent "progress bar" appearance from varying text lengths + .api-path { + font-family: $code; + font-size: 0.9rem; + color: $article-heading; + word-break: break-all; + flex: 1; + min-width: 0; // Allow text to shrink and wrap + background: none; // Override default code background + padding: 0; // Remove default code padding + } + + // Operation summary text + .api-operation-summary { + font-size: 0.875rem; + color: rgba($article-text, 0.8); + flex-shrink: 0; + } +} + +// Responsive: Stack operation cards vertically on small screens +@include media(small) { + .api-operation-card { + flex-direction: column; + align-items: stretch; + gap: 0.5rem; + + .api-method { + align-self: flex-start; + margin-top: 0; + } + + .api-path { + font-size: 0.85rem; + line-height: 1.4; + } + + .api-operation-summary { + font-size: 0.8rem; + line-height: 1.5; + } + } +} + +// Overview/Description section +.api-description { + margin: 2rem 0; + color: $article-text !important; // Override any inherited black color + + h2 { + margin-bottom: 1rem; + } + + // Ensure description text is visible and readable + p, ul, ol, pre, code { + color: $article-text !important; + opacity: 1; + } + + // Also ensure direct text nodes use correct color + & > * { + color: $article-text !important; + } +} + +//////////////////////////////////////////////////////////////////////////////// +////////////////////////// API Navigation in Sidebar /////////////////////////// +//////////////////////////////////////////////////////////////////////////////// + +// API navigation wrapper - controls visibility +// Hidden by default, revealed via JS (localStorage) or on API pages +.api-nav-wrapper { + display: none; // Hidden by default + + &.is-revealed { + display: block; // Revealed via JS + } + + // Always show on API pages (server-rendered with .api-reference class) + .api-reference & { + display: block; + } +} + +// API navigation section added to the existing Hugo sidebar +.api-nav { + margin-top: 2rem; + padding-top: 1rem; + border-top: 1px solid $nav-border; + + &-header { + font-size: 0.85rem; + font-weight: $bold; + text-transform: uppercase; + letter-spacing: 0.06rem; + color: rgba($article-heading, 0.6); + margin: 0 0 1rem; + padding-left: 1.5rem; + } + + // API nav groups (collapsible sections) + &-group { + margin-bottom: 0.5rem; + + &-header { + display: flex; + align-items: center; + padding: 0.5rem 0 0.5rem 1.5rem; + font-weight: $medium; + color: $nav-category; + cursor: pointer; + transition: color 0.2s; + // Button reset for dark mode compatibility + background: none; + border: none; + width: 100%; + text-align: left; + font-size: 1.2rem; // Match sidebar .nav-category > a (19.2px) + font-family: inherit; + text-decoration: none; // For anchor version + + &:hover { + color: $nav-category-hover; + } + + &.is-active { + color: $nav-active; + } + + // Collapse/expand indicator (for button headers) + &::before { + content: ""; + display: inline-block; + width: 0; + height: 0; + margin-right: 0.5rem; + border-left: 5px solid $nav-border; + border-top: 4px solid transparent; + border-bottom: 4px solid transparent; + transition: transform 0.2s; + flex-shrink: 0; + } + + &.is-open::before { + transform: rotate(90deg); + } + } + + // For anchor headers, keep the ::before arrow (same as button) + // No special handling needed - anchor headers look the same as button headers + a#{&}-header { + // Same styling as button, arrow works via ::before + } + + &-items { + list-style: none; + padding-left: 2.5rem; + margin: 0; + max-height: 0; + overflow: hidden; + transition: max-height 0.3s ease-out; + background: $body-bg; // Match sidebar background + + &.is-open { + max-height: 2000px; // Large enough to show all operations + } + } + } + + // Individual API nav items + &-item { + margin: 0.25rem 0; + position: relative; + + a { + display: flex; + align-items: center; + padding: 0.35rem 0; + color: $nav-item; + text-decoration: none; + font-size: 0.95rem; + transition: color 0.2s; + + &:hover { + color: $nav-item-hover; + } + } + + &.is-active a { + color: $nav-active; + font-weight: $medium; + } + + // HTTP method badge (legacy class) + .method-badge { + display: inline-block; + font-size: 0.65rem; + font-weight: $bold; + text-transform: uppercase; + padding: 0.15rem 0.35rem; + margin-right: 0.5rem; + border-radius: 3px; + min-width: 2.5rem; + text-align: center; + + &.get { background-color: $gr-rainforest; color: #fff; } + &.post { background-color: $b-ocean; color: #fff; } + &.put { background-color: $br-galaxy; color: #fff; } + &.patch { background-color: $y-thunder; color: rgba($g5-pepper, 0.75); } + &.delete { background-color: $r-curacao; color: #fff; } + } + + // Tag items that link to tag pages + &.api-nav-tag { + > a { + font-weight: $medium; + } + + // Nested operations list under tag + .api-nav-operations { + list-style: none; + margin: 0.25rem 0 0.5rem; + padding-left: 0.75rem; + + .api-nav-operation { + margin: 0.15rem 0; + + a { + display: flex; + align-items: center; + gap: 0.4rem; + font-size: 0.85rem; + padding: 0.25rem 0; + } + } + } + } + + // Operation items with method badges + &.api-nav-operation, + .api-nav-operation { + .api-method { + display: inline-block; + font-size: 0.55rem; + font-weight: $bold; + text-transform: uppercase; + padding: 0.1rem 0.25rem; + border-radius: 3px; + min-width: 2rem; + text-align: center; + flex-shrink: 0; + + &--get { background-color: $b-pool; color: #fff; } // #00A3FF - bright brand blue + &--post { background-color: $gr-rainforest; color: #fff; } // #34BB55 - bright brand green + &--put { background-color: $y-pineapple; color: #fff; } // #FFB94A - bright yellow (distinct from red) + &--patch { background-color: $br-new-purple; color: #fff; } // #9b2aff - distinctive brand purple + &--delete { background-color: $r-curacao; color: #fff; } // #F95F53 - bright brand red + } + + .api-path { + font-family: $code; + font-size: 0.85rem; + word-break: break-all; + color: inherit; + } + } + } +} + +//////////////////////////////////////////////////////////////////////////////// +/////////////////////////// API Header with Actions //////////////////////////// +//////////////////////////////////////////////////////////////////////////////// + +// Header row with title and download button +.article--header-row { + display: flex; + justify-content: space-between; + align-items: flex-start; + gap: 1rem; + flex-wrap: wrap; +} + +.article--header-text { + flex: 1 1 100%; // Take full width, allowing download button to wrap + min-width: 0; +} + +// Summary paragraph in header - ensure full width +.article--summary { + max-width: none; + width: 100%; +} + +// Download OpenAPI spec button +.api-spec-actions { + flex-shrink: 0; +} + +.api-spec-download { + display: inline-flex; + align-items: center; + gap: 0.5rem; + padding: 0.5rem 1rem; + background-color: $g20-white; + color: $article-text; + text-decoration: none; + border-radius: $radius; + font-size: 0.875rem; + font-weight: $medium; + transition: background-color 0.2s, color 0.2s; + border: 1px solid $nav-border; + white-space: nowrap; + + &:hover { + background-color: $r-curacao; + color: $g20-white; + border-color: $r-curacao; + } + + svg { + flex-shrink: 0; + } +} + +//////////////////////////////////////////////////////////////////////////////// +////////////////////////////////// API Tabs //////////////////////////////////// +//////////////////////////////////////////////////////////////////////////////// + +// API-specific tab wrapper (uses api-tabs-wrapper to avoid conflict with +// tabbed-content.js which handles .tabs-wrapper elements) +.api-tabs-wrapper { + margin: 1.5rem 0 1rem; +} + +// API tab navigation bar +.api-tabs-nav { + display: flex; + flex-wrap: wrap; + gap: 2px; + + a { + flex-grow: 1; + position: relative; + font-size: 1rem; + font-weight: $medium; + padding: 0.65rem 1.25rem; + display: inline-block; + white-space: nowrap; + text-align: center; + color: $article-tab-text !important; + border-radius: $radius; + background-color: $article-tab-bg; + text-decoration: none; + transition: background-color 0.2s, color 0.2s; + z-index: 1; + + &::after { + content: ''; + position: absolute; + display: block; + top: 0; + right: 0; + width: 100%; + height: 100%; + border-radius: $radius; + @include gradient($article-btn-gradient); + opacity: 0; + transition: opacity 0.2s; + z-index: -1; + } + + &:hover { + color: $article-tab-active-text !important; + &::after { + opacity: 1; + } + } + + &.is-active { + color: $article-tab-active-text !important; + &::after { + opacity: 1; + @include gradient($article-btn-gradient); + } + } + } +} + +// Tab panels container +.api-tab-panels { + // Tab content visibility (follows existing pattern) + .tab-content:not(:first-of-type) { + display: none; + } +} + +//////////////////////////////////////////////////////////////////////////////// +////////////////////////// Authentication Tab Content ////////////////////////// +//////////////////////////////////////////////////////////////////////////////// + +.api-auth-content { + max-width: 800px; +} + +.api-auth-card { + background: $article-bg; + border: 1px solid $nav-border; + border-radius: $radius; + padding: 1.5rem; + margin-bottom: 1.5rem; + + h3 { + margin-top: 0; + margin-bottom: 0.5rem; + } + + h4 { + margin-top: 1rem; + margin-bottom: 0.5rem; + font-size: 0.9rem; + text-transform: uppercase; + letter-spacing: 0.05em; + color: rgba($article-text, 0.6); + } + + pre { + margin: 0.5rem 0; + padding: 1rem; + background: $article-code-bg; + border-radius: $radius; + overflow-x: auto; + } + + code { + font-family: $code; + font-size: 0.875rem; + } +} + +.api-auth-badge .badge { + display: inline-block; + padding: 0.25rem 0.5rem; + font-size: 0.75rem; + font-weight: $bold; + text-transform: uppercase; + border-radius: $radius; + + &.recommended { + background: $gr-rainforest; + color: $g20-white; + } +} + +//////////////////////////////////////////////////////////////////////////////// +//////////////////////////// Server Tab Content //////////////////////////////// +//////////////////////////////////////////////////////////////////////////////// + +.api-server-panel { + max-width: 600px; + + h2 { + margin-top: 0; + } +} + +.server-url-config { + display: flex; + gap: 0.5rem; + align-items: flex-end; + margin: 1rem 0; + flex-wrap: wrap; + + label { + width: 100%; + font-weight: $medium; + margin-bottom: 0.25rem; + } + + input { + flex: 1; + min-width: 200px; + padding: 0.5rem; + border: 1px solid $nav-border; + border-radius: $radius; + font-family: $code; + background: $article-bg; + color: $article-text; + } + + button { + padding: 0.5rem 1rem; + background: $r-curacao; + color: $g20-white; + border: none; + border-radius: $radius; + cursor: pointer; + font-weight: $medium; + + &:hover { + background: darken($r-curacao, 10%); + } + } +} + +.server-info { + margin-top: 1.5rem; + + ul { + list-style: disc; + padding-left: 1.5rem; + } + + li { + margin: 0.5rem 0; + } + + code { + background: $article-code-bg; + padding: 0.2rem 0.4rem; + border-radius: 3px; + font-family: $code; + } +} + +//////////////////////////////////////////////////////////////////////////////// +///////////////////////////////// MEDIA QUERIES //////////////////////////////// +//////////////////////////////////////////////////////////////////////////////// + +// Tablet: Hide TOC, keep sidebar +@include media(large) { + .content-wrapper.api-content { + flex-direction: column; + width: 75%; // Reset to default when TOC is hidden + } + + .api-toc { + display: none; + } + + .api-main { + padding-right: 0; + } +} + +// Mobile: Standard Hugo sidebar behavior +@include media(medium) { + .content-wrapper.api-content { + flex-direction: column; + } + + .api-toc { + display: none; + } + + .api-main { + padding-right: 0; + } + + // Collapse API nav in mobile view + .api-nav { + margin-top: 1rem; + padding-top: 0.5rem; + + &-group-items { + max-height: none; // Show all items by default in mobile + } + } +} + +// Large screens: Wider TOC +@include media(xlarge) { + .api-toc { + width: 240px; + } +} + +// Compressed layout: narrower TOC, drop border to reduce visual clutter +// TOC is hidden at ≤1280px (large breakpoint), so this targets the +// narrow window where the TOC is visible but space is tight. +@media (min-width: 1281px) and (max-width: 1535px) { + .api-toc { + width: 180px; + border-left: none; + } +} diff --git a/assets/styles/layouts/_api-operations.scss b/assets/styles/layouts/_api-operations.scss new file mode 100644 index 0000000000..12ccc36537 --- /dev/null +++ b/assets/styles/layouts/_api-operations.scss @@ -0,0 +1,540 @@ +// API Operations Styles +// Renders OpenAPI operations, parameters, schemas, and responses + +// Variables +$api-border-radius: 6px; +$api-spacing-sm: 0.5rem; +$api-spacing-md: 1rem; +$api-spacing-lg: 1.5rem; +$api-spacing-xl: 2rem; + +// Method colors +$method-get: #00A3FF; +$method-post: #34BB55; +$method-put: #FFB94A; +$method-delete: #D63031; +$method-patch: #9b2aff; + +// Status code colors — intentionally distinct from method colors +$status-success: #34BB55; +$status-redirect: #FFB94A; +$status-client-error: #E17055; +$status-server-error: #9b2aff; + +// ============================================ +// Operation Block +// ============================================ + +.api-hugo-native { + width: 100%; +} + +.api-operation { + margin-bottom: $api-spacing-xl; + padding-top: $api-spacing-xl; + border-top: 2px solid $nav-border; + + // Keep inline code proportional to surrounding text + code { + font-size: inherit; + } + + &:first-child { + border-top: none; + padding-top: 0; + } + + &:target { + animation: highlight-operation 1.5s ease-out; + } +} + +@keyframes highlight-operation { + 0% { + outline: 2px solid rgba($method-get, 0.4); + outline-offset: 8px; + } + 100% { + outline-color: transparent; + } +} + +// Operation Header +.api-operation-header { + margin-bottom: $api-spacing-md; +} + +.api-operation-endpoint { + display: flex; + align-items: center; + gap: $api-spacing-sm; + margin-bottom: $api-spacing-md; + padding-bottom: $api-spacing-md; + border-bottom: 1px solid $nav-border; +} + +.api-method { + display: inline-flex; + align-items: center; + justify-content: center; + padding: 0.25rem 0.5rem; + font-size: 0.75rem; + font-weight: 600; + text-transform: uppercase; + border-radius: 4px; + color: #fff; + + &--get { background-color: $method-get; } + &--post { background-color: $method-post; } + &--put { background-color: $method-put; } + &--delete { background-color: $method-delete; } + &--patch { background-color: $method-patch; } +} + +.api-path { + font-family: $code; + font-size: 0.95rem; + color: $article-text; + background: $article-code-bg; + padding: 0.25rem 0.5rem; + border-radius: 4px; +} + +.api-operation-summary { + margin: 0; + font-size: 1.25rem; + font-weight: 600; + color: $article-heading; +} + +.api-operation-description { + margin: $api-spacing-md 0; + color: $article-text; + line-height: 1.6; + + p:last-child { + margin-bottom: 0; + } +} + +// ============================================ +// Section Titles +// ============================================ + +.api-section-title { + display: flex; + align-items: center; + gap: $api-spacing-sm; + margin: $api-spacing-lg 0 $api-spacing-md; + padding-bottom: $api-spacing-sm; + font-size: 1rem; + font-weight: 600; + color: $article-heading; + border-bottom: 1px solid $nav-border; +} + +// ============================================ +// Parameters Section +// ============================================ + +.api-parameters { + margin: $api-spacing-lg 0; +} + +.api-param-group { + margin-bottom: $api-spacing-md; +} + +.api-param-group-title { + margin: $api-spacing-sm 0; + font-size: 0.85rem; + font-weight: 500; + color: rgba($article-text, 0.6); + text-transform: uppercase; + letter-spacing: 0.05em; +} + +.api-param-list { + // Flat list — no outer border, rows separated by dividers +} + +.api-param-row { + padding: $api-spacing-md 0; + border-bottom: 1px solid $nav-border; + + &:last-child { + border-bottom: none; + } +} + +.api-param-name-line { + display: flex; + align-items: center; + gap: $api-spacing-sm; + margin-bottom: 0.25rem; +} + +.api-param-name { + font-family: $code; + font-size: 0.9rem; + font-weight: 600; + color: $article-heading; +} + +.api-param-type { + font-size: 0.8rem; + color: rgba($article-text, 0.6); +} + +.api-param-description { + margin-top: 0.25rem; + color: $article-text; + line-height: 1.5; + + p { + margin: 0; + } +} + +.api-param-enum, +.api-param-default { + margin-top: 0.5rem; + font-size: 0.85rem; +} + +.api-param-enum-label, +.api-param-default-label { + color: rgba($article-text, 0.6); +} + +.api-param-enum-value, +.api-param-default-value { + font-family: $code; + font-size: 0.8rem; + background: $article-code-bg; + padding: 0.125rem 0.375rem; + border-radius: 3px; +} + +// ============================================ +// Badges +// ============================================ + +.api-badge { + display: inline-flex; + align-items: center; + padding: 0.125rem 0.375rem; + font-size: 0.7rem; + font-weight: 600; + text-transform: uppercase; + letter-spacing: 0.03em; + border-radius: 3px; + + &--required { + background: rgba($method-delete, 0.1); + color: $method-delete; + } +} + +// ============================================ +// Request Body Section +// ============================================ + +.api-request-body { + margin: $api-spacing-lg 0; +} + +.api-request-body-description { + margin: $api-spacing-sm 0; + color: $article-text; + + p:last-child { + margin-bottom: 0; + } +} + +.api-content-type { + margin: $api-spacing-sm 0; + font-size: 0.85rem; + + code { + font-family: $code; + background: $article-code-bg; + padding: 0.125rem 0.375rem; + border-radius: 3px; + } +} + +.api-content-type-label { + color: rgba($article-text, 0.6); +} + +// ============================================ +// Schema Section +// ============================================ + +.api-schema { + margin: $api-spacing-md 0; + + &--nested { + margin-left: $api-spacing-lg; + padding-left: $api-spacing-md; + border-left: 2px solid $nav-border; + } +} + +.api-schema-properties { + // Flat list — no outer border, rows separated by dividers +} + +.api-schema-property { + padding: $api-spacing-md 0; + border-bottom: 1px solid $nav-border; + + &:last-child { + border-bottom: none; + } + + &--required { + .api-schema-property-name { + &::after { + content: '*'; + color: $method-delete; + margin-left: 0.25rem; + } + } + } +} + +.api-schema-property-header { + display: flex; + align-items: center; + gap: $api-spacing-sm; + margin-bottom: 0.25rem; +} + +.api-schema-property-name { + font-family: $code; + font-size: 0.9rem; + font-weight: 600; + color: $article-heading; +} + +.api-schema-property-type { + font-size: 0.8rem; + color: rgba($article-text, 0.6); +} + +.api-schema-property-description { + margin-top: 0.25rem; + color: $article-text; + line-height: 1.5; + + p { + margin: 0; + } +} + +.api-schema-property-enum, +.api-schema-property-default, +.api-schema-property-example { + margin-top: 0.5rem; + font-size: 0.85rem; +} + +.api-enum-label, +.api-default-label, +.api-example-label { + color: rgba($article-text, 0.6); +} + +.api-enum-value, +.api-default-value, +.api-example-value { + font-family: $code; + font-size: 0.8rem; + background: $article-code-bg; + padding: 0.125rem 0.375rem; + border-radius: 3px; +} + +// Schema Example Block +.api-schema-example { + margin-top: $api-spacing-md; + border: 1px solid rgba(0, 0, 0, 0.1); + border-radius: $api-border-radius; + overflow: hidden; + + .dark-theme & { + border-color: rgba(255, 255, 255, 0.1); + } +} + +.api-schema-example-title { + display: block; + margin: 0; + padding: $api-spacing-sm $api-spacing-md; + font-size: 0.85rem; + font-weight: 600; + background: rgba(0, 0, 0, 0.03); + border-bottom: 1px solid rgba(0, 0, 0, 0.1); + + .dark-theme & { + background: rgba(255, 255, 255, 0.03); + border-bottom-color: rgba(255, 255, 255, 0.1); + } +} + +pre.api-schema-example-code { + margin: 0; + padding: $api-spacing-sm $api-spacing-md; + background: $article-code-bg; + overflow-x: auto; + + code { + font-family: $code; + font-size: 0.85rem; + } +} + +// ============================================ +// Responses Section +// ============================================ + +.api-responses { + margin: $api-spacing-lg 0; +} + +.api-response-list { + display: flex; + flex-direction: column; + gap: $api-spacing-sm; +} + +.api-response { + padding: $api-spacing-sm 0; +} + +.api-response-header { + display: flex; + align-items: center; + gap: $api-spacing-sm; +} + +.api-response-status { + display: inline-flex; + align-items: center; + justify-content: center; + min-width: 3rem; + padding: 0.25rem 0.5rem; + font-size: 0.8rem; + font-weight: 600; + border-radius: 4px; + color: #fff; + + &--success { background-color: $status-success; } + &--redirect { background-color: $status-redirect; } + &--client-error { background-color: $status-client-error; } + &--server-error { background-color: $status-server-error; } + &--info { background-color: rgba($article-text, 0.6); } +} + +.api-response-description { + color: $article-text; +} + +.api-response-body { + margin-top: $api-spacing-sm; + margin-left: $api-spacing-lg; + padding: $api-spacing-sm 0 $api-spacing-sm $api-spacing-md; + border-left: 2px solid rgba($article-text, 0.1); +} + +// ============================================ +// Tag Overview +// ============================================ + +.api-tag-overview { + margin-bottom: $api-spacing-xl; + padding-bottom: $api-spacing-lg; + border-bottom: 1px solid $nav-border; +} + +.api-tag-description { + color: $article-text; + line-height: 1.7; + + h4, h5 { + margin-top: $api-spacing-lg; + margin-bottom: $api-spacing-sm; + color: $article-heading; + } + + ul, ol { + padding-left: $api-spacing-lg; + } + + a { + color: $article-link; + + &:hover { + text-decoration: underline; + } + } +} + +// ============================================ +// Related Guides Section +// ============================================ + +.api-related-guides { + margin-top: $api-spacing-xl; + padding-top: $api-spacing-lg; + border-top: 1px solid $nav-border; +} + +.api-related-title { + margin: 0 0 $api-spacing-md; + font-size: 1rem; + font-weight: 600; + color: $article-heading; +} + +.api-related-list { + margin: 0; + padding: 0; + list-style: none; + + li { + margin-bottom: $api-spacing-sm; + } + + a { + color: $article-link; + text-decoration: none; + + &:hover { + text-decoration: underline; + } + } +} + +// ============================================ +// Responsive Adjustments +// ============================================ + +@media (max-width: 768px) { + .api-operation-endpoint { + flex-wrap: wrap; + } + + .api-path { + font-size: 0.85rem; + word-break: break-all; + } + + .api-param-name-line, + .api-schema-property-header { + flex-wrap: wrap; + } +} diff --git a/assets/styles/layouts/_api-overrides.scss b/assets/styles/layouts/_api-overrides.scss index bc220588f8..00a7b12860 100644 --- a/assets/styles/layouts/_api-overrides.scss +++ b/assets/styles/layouts/_api-overrides.scss @@ -1,9 +1,16 @@ +//////////////////////////////////////////////////////////////////////////////// +// API Documentation Style Overrides +// +// Provides loading spinner and reusable HTTP method badge colors. +// Used by Hugo-native API templates for consistent styling. +//////////////////////////////////////////////////////////////////////////////// + @import "tools/color-palette"; @import "tools/fonts"; // Fonts $proxima: 'Proxima Nova', sans-serif; -$code: 'IBM Plex Mono', monospace;; +$code: 'IBM Plex Mono', monospace; // Font weights $medium: 500; @@ -22,7 +29,7 @@ $bold: 700; } @keyframes spinner { - to {transform: rotate(360deg);} + to { transform: rotate(360deg); } } .spinner:before { @@ -41,256 +48,15 @@ $bold: 700; animation: spinner .6s linear infinite; } -//////////////////////////////// InfluxDB Header /////////////////////////////// - -#influx-header { - font-family: $proxima; - padding: 10px ; - display: flex; - align-items: center; - justify-content: space-between; - background-color: $g2-kevlar; - a { - text-decoration: none; - &.back { - color: $g20-white; - transition: color .2s; - &:hover { - color: $b-pool; - } - &:before { - content: "\e919"; - font-family: 'icomoon-v2'; - margin-right: .65rem; - } - } - &.btn { - padding: .5rem .75rem .5rem .65rem; - font-size: .85rem; - font-weight: 500; - color: $g15-platinum; - background: $g5-pepper; - border-radius: 4.5px; - transition: all .2s; - &:before { - content: "\e934"; - display: inline-block; - font-size: .95rem; - margin-right: .5rem; - font-family: 'icomoon-v2'; - } - &:hover { - color: $g20-white; - background: $b-pool; - } - } - } -} - -// Header Media Queries - -@media (max-width: 600px) { - #influx-header span.version {display: none;} -} - +//////////////////////////////////////////////////////////////////////////////// +/////////////////////////// HTTP Method Badge Colors /////////////////////////// //////////////////////////////////////////////////////////////////////////////// -.cjtbAK { - h1,h2,h3,h4,h5,h6, - p,li,th,td { - font-family: $proxima !important; - } -} - -#redoc { - h1,h2,h3 { - font-weight: $medium !important; - } -} - -// Section title padding -.dluJDj { - padding: 20px 0; -} - -// Page h1 -.dTJWQH { - color: $g7-graphite; - font-size: 2rem; -} - -// Download button -.jIdpVJ { - background: $b-dodger; - color: $g20-white; - border: none; - border-radius: 3px; - font-family: $proxima; - font-size: .85rem; - font-weight: $medium; - transition: background-color .2s; - &:hover { - background-color: $b-pool; - } -} - -// Tag h1s -.WxWXp { - color: $g7-graphite; - font-size: 1.75rem; -} - -// Summaru h2s and table headers -.ioYTqA, .bxcHYI, .hoUoen { - color: $g7-graphite; -} - -// h3s -.espozG { - color: $g8-storm; -} - -// Links -.bnFPhO a { color: $b-dodger; - &:visited {color: $b-dodger;} -} - -.redoc-json { - font-family: $code !important; -} - -// Inline Code -.flfxUM code, -.gDsWLk code, -.kTVySD { - font-family: $code !important; - color: $cp-marguerite; - background: $cp-titan; - border-color: $cp-titan; -} - -// Required tags -.jsTAxL { - color: $r-curacao; -} - -///////////////////////////// RESPONSE COLOR BLOCKS //////////////////////////// - -// Green -.hLVzSF, .fDvFMp { - background-color: rgba($gr-honeydew, .2); - color: $gr-emerald; -} - -// Red -.byLrBg { - background-color: rgba($r-curacao, .1); - color: $r-curacao; -} - - - -/////////////////////////////////// LEFT NAV /////////////////////////////////// - -// Left nav background -.gZdDsM { - background-color: $g19-ghost; -} - -.gpbcFk:hover, .sc-eTuwsz.active { - background-color: $g17-whisper; -} - -// List item text -.SmuWE, .gcUzvG, .bbViyS, .sc-hrWEMg label { - font-family: $proxima !important; -} - -.fyUykq { - font-weight: $medium; -} - -// Request method tags -.cFwMcp { - &.post { background-color: $b-ocean; } - &.get { background-color: $gr-rainforest; } - &.put { background-color: $br-galaxy; } - &.patch { background-color: $y-thunder; color: rgba($g5-pepper, .75);} - &.delete { background-color: $r-curacao; } -} - -// Active nav section -.gcUzvG, .iNzLCk:hover { - color: $br-magenta; -} - -/////////////////////////////// RIGHT CODE COLUMN ////////////////////////////// - -// Right column backgrounds -.dtUibw, .fLUKgj { - background-color: $g2-kevlar; - h3,h4,h5,h6 { - font-family: $proxima !important; - font-weight: $medium !important; - } -} - -// Code backgrounds -.irpqyy > .react-tabs__tab-panel { - background-color: $g0-obsidian; -} -.dHLKeu, .fVaxnA { - padding-left: 10px; - background-color: $g0-obsidian; -} - -// Response code tabs -.irpqyy > ul > li { - background-color: $g0-obsidian; - border-radius: 3px; - &.react-tabs__tab--selected{ color: $br-pulsar;} - &.tab-error { color: $r-fire; } - &.tab-success { color: $gr-viridian; } -} - -// Request methods -.bNYCAJ, -.jBjYbV, -.hOczRB, -.fRsrDc, -.hPskZd { - font-family: $proxima; - font-weight: $medium; - letter-spacing: .04em; - border-radius: 3px; -} -.bNYCAJ { background-color: $b-ocean; } /* Post */ -.jBjYbV { background-color: $gr-viridian; } /* Get */ -.hOczRB { background-color: $br-galaxy; } /* Put */ -.fRsrDc { background-color: $y-thunder; color: $g5-pepper; } /* Patch */ -.hPskZd { background-color: $r-curacao; } /* Delete */ - -// Content type block -.gzAoUb { - background-color: $g2-kevlar; - font-family: $proxima; -} -.iENVAs { font-family: $code; } -.dpMbau { font-family: $proxima; } - -// Code controls -.fCJmC { - font-family: $proxima; - span { border-radius: 3px; } -} - -// Code blocks -.kZHJcC { font-family: $code; } -.jCgylq { - .token.string { - color: $gr-honeydew; - & + a { color: $b-pool; } - } - .token.boolean { color: #f955b0; } -} +// Reusable method badge colors (used by _api-layout.scss .method-badge) +// These follow standard REST API color conventions +$method-get: $gr-rainforest; +$method-post: $b-ocean; +$method-put: $br-galaxy; +$method-patch: $y-thunder; +$method-delete: $r-curacao; diff --git a/assets/styles/layouts/_api-security-schemes.scss b/assets/styles/layouts/_api-security-schemes.scss new file mode 100644 index 0000000000..3723786b08 --- /dev/null +++ b/assets/styles/layouts/_api-security-schemes.scss @@ -0,0 +1,92 @@ +//////////////////////////////////////////////////////////////////////////////// +// API Security Schemes Styling +// +// Styles for security schemes sections displayed on conceptual API pages +// (like Authentication). These sections are rendered from OpenAPI spec +// securitySchemes using Hugo templates. +//////////////////////////////////////////////////////////////////////////////// + +.api-security-schemes { + margin-top: 2rem; + padding-top: 2rem; + border-top: 1px solid $g5-pepper; + + h2 { + margin-bottom: 1.5rem; + } + + .security-scheme { + margin-bottom: 2rem; + + h3 { + margin: 0 0 1rem 0; + font-size: 1.1rem; + color: $article-heading; + } + } + + .scheme-details { + margin-bottom: 1rem; + + dl { + display: grid; + grid-template-columns: auto 1fr; + gap: 0.5rem 1rem; + margin: 0; + } + + dt { + font-weight: 600; + color: $g9-mountain; + } + + dd { + margin: 0; + + code { + background: $article-code-bg; + color: $article-code; + padding: 0.2em 0.5em; + border-radius: 3px; + font-size: 0.9em; + } + } + } + + .scheme-description { + margin-top: 1rem; + padding-top: 1rem; + border-top: 1px solid $g5-pepper; + + p:first-child { + margin-top: 0; + } + + pre { + margin: 1rem 0; + } + } +} + +// Dark theme overrides for security schemes +[data-theme="dark"], +html:has(link[title="dark-theme"]:not([disabled])) { + .api-security-schemes { + border-top-color: $grey25; + + .security-scheme { + // Removed background and border - now using plain styling + } + + .scheme-details { + dt { + color: $g15-platinum; + } + } + + .scheme-description { + border-top-color: $grey25; + } + } +} + diff --git a/assets/styles/layouts/_sidebar.scss b/assets/styles/layouts/_sidebar.scss index 30eef2b4e3..9d2bb6aa6d 100644 --- a/assets/styles/layouts/_sidebar.scss +++ b/assets/styles/layouts/_sidebar.scss @@ -255,6 +255,66 @@ } } } + + // API operation items within Hugo menu + .api-operation { + a { + display: flex; + align-items: center; + gap: 0.4rem; + } + + // Path-based operation display (All endpoints list) + &--path .api-path { + font-family: $proxima; + font-size: 0.9rem; + color: inherit; + word-break: break-all; + } + } + + .api-method { + font-size: 0.6rem; + font-weight: 700; + padding: 0.15rem 0.35rem; + border-radius: 3px; + text-transform: uppercase; + flex-shrink: 0; + line-height: 1; + border: 2px solid; + background-color: transparent; + + // Using lighter InfluxData brand colors - bordered style for readability + &--get { border-color: $b-pool; color: $b-pool; } // #00A3FF - bright brand blue + &--post { border-color: $gr-rainforest; color: $gr-rainforest; } // #34BB55 - bright brand green + &--put { border-color: $y-pineapple; color: $y-pineapple; } // #FFB94A - bright yellow (distinct from red) + &--delete { border-color: $r-curacao; color: $r-curacao; } // #F95F53 - bright brand red + &--patch { border-color: $br-new-purple; color: $br-new-purple; } // #9b2aff - distinctive brand purple + } + + // Compatibility version badge (v1 or v2) + .api-compat-badge { + font-size: 0.55rem; + font-weight: 600; + padding: 0.1rem 0.3rem; + border-radius: 3px; + text-transform: uppercase; + flex-shrink: 0; + line-height: 1; + margin-left: auto; + opacity: 0.8; + cursor: help; + + &--v1 { background: #8b5cf6; color: white; } // Purple for v1 + &--v2 { background: #06b6d4; color: white; } // Cyan for v2 + } + + // Non-link group labels (for multi-tag groups) + .nav-group-label { + color: $nav-item; + font-weight: $medium; + display: inline-block; + } } .feature-board-badge { diff --git a/assets/styles/styles-default.scss b/assets/styles/styles-default.scss index 8852a240c3..310f917731 100644 --- a/assets/styles/styles-default.scss +++ b/assets/styles/styles-default.scss @@ -32,7 +32,11 @@ "layouts/v1-overrides", "layouts/notifications", "layouts/code-controls", - "layouts/v3-wayfinding"; + "layouts/v3-wayfinding", + "layouts/api-layout", + "layouts/api-security-schemes", + "layouts/api-operations", + "layouts/api-code-samples"; // Import Components @import "components/influxdb-version-detector", diff --git a/content/enterprise_influxdb/v1/tools/api.md b/content/enterprise_influxdb/v1/tools/api.md index 35bfa24b00..a28374f7ad 100644 --- a/content/enterprise_influxdb/v1/tools/api.md +++ b/content/enterprise_influxdb/v1/tools/api.md @@ -162,7 +162,7 @@ curl -XGET "localhost:8086/health" ### `/api/v2/buckets/` HTTP endpoint -The [/api/v2/buckets](/influxdb/v2/api/#tag/Buckets) endpoint accepts `GET`, `POST` and `DELETE` HTTP requests. Use this endpoint to [create](/influxdb/v2/api/#operation/PostBuckets), [delete](/influxdb/v2/api/#operation/DeleteBucketsID), [list](/influxdb/v2/api/#operation/GetBuckets), [update](/influxdb/v2/api/#operation/PatchBucketsID) and [retrieve](/influxdb/v2/api/#operation/GetBucketsID) buckets in your InfluxDB instance. Note that InfluxDB 2.x uses organizations and buckets instead of databases and retention policies. +The [/api/v2/buckets](/influxdb/v2/api/#tag/Buckets) endpoint accepts `GET`, `POST` and `DELETE` HTTP requests. Use this endpoint to [create](/influxdb/v2/api/#post-/api/v2/buckets), [delete](/influxdb/v2/api/#delete-/api/v2/buckets/-bucketID-), [list](/influxdb/v2/api/#get-/api/v2/buckets), [update](/influxdb/v2/api/#patch-/api/v2/buckets/-bucketID-) and [retrieve](/influxdb/v2/api/#get-/api/v2/buckets/-bucketID-) buckets in your InfluxDB instance. Note that InfluxDB 2.x uses organizations and buckets instead of databases and retention policies. **Include the following URL parameters:** diff --git a/content/influxdb/cloud/admin/buckets/bucket-schema.md b/content/influxdb/cloud/admin/buckets/bucket-schema.md index cd4a37aed8..d50e63c5d9 100644 --- a/content/influxdb/cloud/admin/buckets/bucket-schema.md +++ b/content/influxdb/cloud/admin/buckets/bucket-schema.md @@ -122,7 +122,7 @@ Use the [**`influx` CLI**](/influxdb/cloud/reference/cli/influx/) or [**InfluxDB ``` #### Create a bucket schema using the InfluxDB HTTP API -Send a request to the HTTP API [`/api/v2/buckets/{BUCKET_ID}/schema/measurements` endpoint](/influxdb/cloud/api/#operation/createMeasurementSchema) +Send a request to the HTTP API [`/api/v2/buckets/{BUCKET_ID}/schema/measurements` endpoint](/influxdb/cloud/api/#post-/api/v2/buckets/-bucketID-/schema/measurements) and set the following properties in the request body: - `name`: the measurement name. @@ -130,7 +130,7 @@ and set the following properties in the request body: For example, the following request defines the _explicit_ bucket measurement schema for `airSensors` measurements: -{{< api-endpoint method="post" endpoint="https://cloud2.influxdata.com/api/v2/buckets/{BUCKET_ID}/schema/measurements" api-ref="/influxdb/cloud/api/#operation/createMeasurementSchema" >}} +{{< api-endpoint method="post" endpoint="https://cloud2.influxdata.com/api/v2/buckets/{BUCKET_ID}/schema/measurements" api-ref="/influxdb/cloud/api/#post-/api/v2/buckets/-bucketID-/schema/measurements" >}} ```js { @@ -215,9 +215,9 @@ To view schema column definitions and metadata, specify the `--json` flag. ### View schema type and schemas using the InfluxDB HTTP API -To list schemas for a bucket, send a request to the InfluxDB HTTP [`/api/v2/buckets/{BUCKET_ID}/schema/measurements` endpoint](/influxdb/cloud/api/#operation/getMeasurementSchemas): +To list schemas for a bucket, send a request to the InfluxDB HTTP [`/api/v2/buckets/{BUCKET_ID}/schema/measurements` endpoint](/influxdb/cloud/api/#get-/api/v2/buckets/-bucketID-/schema/measurements): -{{% api-endpoint method="get" endpoint="https://cloud2.influxdata.com/api/v2/buckets/{BUCKET_ID}/schema/measurements" api-ref="/influxdb/cloud/api/#operation/getMeasurementSchemas" %}} +{{% api-endpoint method="get" endpoint="https://cloud2.influxdata.com/api/v2/buckets/{BUCKET_ID}/schema/measurements" api-ref="/influxdb/cloud/api/#get-/api/v2/buckets/-bucketID-/schema/measurements" %}} ## Update a bucket schema @@ -256,11 +256,11 @@ You can't modify or delete columns in bucket schemas. 1. [View the existing measurement schema](#view-schema-type-and-schemas-using-the-influxdb-http-api) and copy the `columns` list. -2. Send a request to the HTTP API [`/api/v2/buckets/{BUCKET_ID}/schema/measurements/{MEASUREMENT_ID}` endpoint](/influxdb/cloud/api/#operation/updateMeasurementSchema). +2. Send a request to the HTTP API [`/api/v2/buckets/{BUCKET_ID}/schema/measurements/{MEASUREMENT_ID}` endpoint](/influxdb/cloud/api/#patch-/api/v2/buckets/-bucketID-/schema/measurements/-measurementID-). In the request body, set the `columns` property to a list of old and new column definitions for the measurement schema--for example, the following request appends the new column `CO2` to `columns` retrieved in the previous step: - {{< api-endpoint method="patch" endpoint="https://cloud2.influxdata.com/api/v2/buckets/{BUCKET_ID}/schema/measurements/{MEASUREMENT_ID}" api-ref="/influxdb/cloud/api/#operation/updateMeasurementSchema" >}} + {{< api-endpoint method="patch" endpoint="https://cloud2.influxdata.com/api/v2/buckets/{BUCKET_ID}/schema/measurements/{MEASUREMENT_ID}" api-ref="/influxdb/cloud/api/#patch-/api/v2/buckets/-bucketID-/schema/measurements/-measurementID-" >}} ```js { diff --git a/content/influxdb/cloud/admin/buckets/create-bucket.md b/content/influxdb/cloud/admin/buckets/create-bucket.md index 5571accc9a..61f8681507 100644 --- a/content/influxdb/cloud/admin/buckets/create-bucket.md +++ b/content/influxdb/cloud/admin/buckets/create-bucket.md @@ -106,7 +106,7 @@ influx bucket create \ To create a bucket with the InfluxDB HTTP API, send a request to the following endpoint: -{{< api-endpoint method="post" endpoint="https://cloud2.influxdata.com/api/v2/buckets" api-ref="/influxdb/cloud/api/#operation/PostBuckets" >}} +{{< api-endpoint method="post" endpoint="https://cloud2.influxdata.com/api/v2/buckets" api-ref="/influxdb/cloud/api/#post-/api/v2/buckets" >}} Include the following in your request: @@ -131,7 +131,7 @@ The following example creates a bucket with a retention period of `86,400` secon ``` _For information about **InfluxDB API options and response codes**, see -[InfluxDB API Buckets reference documentation](/influxdb3/cloud-serverless/api/#operation/PostBuckets)._ +[InfluxDB API Buckets reference documentation](/influxdb3/cloud-serverless/api/#post-/api/v2/buckets)._ {{% /tab-content %}} @@ -164,10 +164,10 @@ Use the `influx bucket create` command and specify the `--schema-type=explicit` -Use the HTTP API [`/api/v2/buckets`](/influxdb/cloud/api/#operation/PostBuckets) +Use the HTTP API [`/api/v2/buckets`](/influxdb/cloud/api/#post-/api/v2/buckets) endpoint and set the `schemaType` property value to `explicit` in the request body--for example: -{{< api-endpoint method="post" endpoint="https://cloud2.influxdata.com/api/v2/buckets" api-ref="/influxdb/cloud/api/#operation/PostBuckets" >}} +{{< api-endpoint method="post" endpoint="https://cloud2.influxdata.com/api/v2/buckets" api-ref="/influxdb/cloud/api/#post-/api/v2/buckets" >}} ```js { diff --git a/content/influxdb/cloud/admin/buckets/update-bucket.md b/content/influxdb/cloud/admin/buckets/update-bucket.md index 7838b96cf0..c479d5e65b 100644 --- a/content/influxdb/cloud/admin/buckets/update-bucket.md +++ b/content/influxdb/cloud/admin/buckets/update-bucket.md @@ -55,7 +55,7 @@ For information about permitted bucket names, see {{% note %}} Use the [`influx bucket update` command](#update-a-buckets-retention-period) -or the [InfluxDB HTTP API `PATCH /api/v2/buckets` endpoint](/influxdb/cloud/api/#operation/PatchBucketsID) to set a custom retention period. +or the [InfluxDB HTTP API `PATCH /api/v2/buckets` endpoint](/influxdb/cloud/api/#patch-/api/v2/buckets/-bucketID-) to set a custom retention period. {{% /note %}} 5. Click **{{< caps >}}Save Changes{{< /caps >}}**. @@ -106,7 +106,7 @@ influx bucket update -i 034ad714fdd6f000 -r 1209600000000000ns ## Update a bucket using the HTTP API -Use the InfluxDB HTTP API [`PATCH /api/v2/buckets` endpoint](/influxdb/cloud/api/#operation/PatchBucketsID) +Use the InfluxDB HTTP API [`PATCH /api/v2/buckets` endpoint](/influxdb/cloud/api/#patch-/api/v2/buckets/-bucketID-) to update a bucket. Updating a bucket requires the following: @@ -119,16 +119,16 @@ You can update the following bucket properties: - description - retention rules -1. To find the bucket ID, send a request to the HTTP API [`GET /api/v2/buckets/` endpoint](/influxdb/cloud/api/#operation/GetBuckets) to retrieve the list of buckets. +1. To find the bucket ID, send a request to the HTTP API [`GET /api/v2/buckets/` endpoint](/influxdb/cloud/api/#get-/api/v2/buckets) to retrieve the list of buckets. - {{< api-endpoint method="get" endpoint="https://cloud2.influxdata.com/api/v2/buckets" api-ref="/influxdb/cloud/api/#operation/GetBuckets" >}} + {{< api-endpoint method="get" endpoint="https://cloud2.influxdata.com/api/v2/buckets" api-ref="/influxdb/cloud/api/#get-/api/v2/buckets" >}} -2. Send a request to the HTTP API [PATCH `/api/v2/buckets/{BUCKET_ID}` endpoint](/influxdb/cloud/api/#operation/PatchBucketsID). +2. Send a request to the HTTP API [PATCH `/api/v2/buckets/{BUCKET_ID}` endpoint](/influxdb/cloud/api/#patch-/api/v2/buckets/-bucketID-). In the URL path, specify the ID of the bucket from the previous step that you want to update. In the request body, set the properties that you want to update--for example: - {{< api-endpoint method="patch" endpoint="https://cloud2.influxdata.com/api/v2/buckets/{BUCKET_ID}" api-ref="/influxdb/cloud/api/#operation/PatchBucketsID" >}} + {{< api-endpoint method="patch" endpoint="https://cloud2.influxdata.com/api/v2/buckets/{BUCKET_ID}" api-ref="/influxdb/cloud/api/#patch-/api/v2/buckets/-bucketID-" >}} ```js { diff --git a/content/influxdb/cloud/admin/buckets/view-buckets.md b/content/influxdb/cloud/admin/buckets/view-buckets.md index b401cd7572..67459811f0 100644 --- a/content/influxdb/cloud/admin/buckets/view-buckets.md +++ b/content/influxdb/cloud/admin/buckets/view-buckets.md @@ -41,6 +41,6 @@ for information about other available flags. ## View buckets using the InfluxDB HTTP API -Send a request to the InfluxDB HTTP API [`/api/v2/buckets` endpoint](/influxdb/cloud/api/#operation/GetBuckets) to view buckets in an organization. +Send a request to the InfluxDB HTTP API [`/api/v2/buckets` endpoint](/influxdb/cloud/api/#get-/api/v2/buckets) to view buckets in an organization. -{{% api-endpoint method="get" endpoint="https://cloud2.influxdata.com/api/v2/buckets" api-ref="/influxdb/cloud/api/#operation/GetBuckets" %}} \ No newline at end of file +{{% api-endpoint method="get" endpoint="https://cloud2.influxdata.com/api/v2/buckets" api-ref="/influxdb/cloud/api/#get-/api/v2/buckets" %}} \ No newline at end of file diff --git a/content/influxdb/cloud/api-guide/api-invokable-scripts/_index.md b/content/influxdb/cloud/api-guide/api-invokable-scripts/_index.md index d448406b36..8ee1837413 100644 --- a/content/influxdb/cloud/api-guide/api-invokable-scripts/_index.md +++ b/content/influxdb/cloud/api-guide/api-invokable-scripts/_index.md @@ -28,7 +28,7 @@ for your [organization](/influxdb/v2/reference/glossary/#organization), send a request using the `POST` method to the `/api/v2/scripts` InfluxDB API endpoint. -{{< api-endpoint method="post" endpoint="https://cloud2.influxdata.com/api/v2/scripts" api-ref="/influxdb/cloud/api/#operation/PostScripts" >}} +{{< api-endpoint method="post" endpoint="https://cloud2.influxdata.com/api/v2/scripts" api-ref="/influxdb/cloud/api/#post-/api/v2/scripts" >}} Provide the following in your API request: @@ -88,7 +88,7 @@ InfluxDB returns the newly created script. Next, see how to [invoke a script](#i ## Invoke a script To invoke a script, send a request using the `POST` method to the `/api/v2/scripts/SCRIPT_ID/invoke` InfluxDB API endpoint. -{{< api-endpoint method="post" endpoint="https://cloud2.influxdata.com/api/v2/scripts/SCRIPT_ID" api-ref="/influxdb/cloud/api/#operation/PostScriptsIDInvoke" >}} +{{< api-endpoint method="post" endpoint="https://cloud2.influxdata.com/api/v2/scripts/SCRIPT_ID/invoke" api-ref="/influxdb/cloud/api/#post-/api/v2/scripts/-scriptID-/invoke" >}} Replace *`SCRIPT_ID`* with the ID of the script you want to execute. To find the script ID, see how to [list scripts](#list-scripts). @@ -206,7 +206,7 @@ Provide the following in your request: To find a specific script for an organization, send a request using the `GET` method to the `/api/v2/scripts/SCRIPT_ID` InfluxDB API endpoint. -{{< api-endpoint method="get" endpoint="https://cloud2.influxdata.com/api/v2/scripts/SCRIPT_ID" api-ref="/influxdb/cloud/api/#operation/GetScriptsID" >}} +{{< api-endpoint method="get" endpoint="https://cloud2.influxdata.com/api/v2/scripts/SCRIPT_ID" api-ref="/influxdb/cloud/api/#get-/api/v2/scripts/-scriptID-" >}} Replace *`SCRIPT_ID`* with the ID of the script you want to find. @@ -228,7 +228,7 @@ Use the API to replace the following properties of an invokable script: To update an existing script for an organization, send a request using the `PATCH` method to the `/api/v2/scripts/SCRIPT_ID` InfluxDB API endpoint. Replace *`SCRIPT_ID`* with the ID of the script you want to update. -{{< api-endpoint method="patch" endpoint="https://cloud2.influxdata.com/api/v2/scripts/SCRIPT_ID" api-ref="/influxdb/cloud/api/#operation/PatchScriptsID" >}} +{{< api-endpoint method="patch" endpoint="https://cloud2.influxdata.com/api/v2/scripts/SCRIPT_ID" api-ref="/influxdb/cloud/api/#patch-/api/v2/scripts/-scriptID-" >}} Provide the following in your request: @@ -258,7 +258,7 @@ InfluxDB returns the updated invokable script. To delete a script, send a request using the `DELETE` method to the `/api/v2/scripts/SCRIPT_ID` InfluxDB API endpoint. Replace *`SCRIPT_ID`* with the ID of the script you want to update. -{{< api-endpoint method="delete" endpoint="https://cloud2.influxdata.com/api/v2/scripts/SCRIPT_ID" api-ref="/influxdb/cloud/api/#operation/DeleteScriptsID" >}} +{{< api-endpoint method="delete" endpoint="https://cloud2.influxdata.com/api/v2/scripts/SCRIPT_ID" api-ref="/influxdb/cloud/api/#delete-/api/v2/scripts/-scriptID-" >}} Provide the following in your request: diff --git a/content/influxdb/cloud/get-started/setup.md b/content/influxdb/cloud/get-started/setup.md index d429de7be3..edeb186f65 100644 --- a/content/influxdb/cloud/get-started/setup.md +++ b/content/influxdb/cloud/get-started/setup.md @@ -86,7 +86,7 @@ you need is in place. Send a request to the InfluxDB API `/api/v2/authorizations` endpoint using the `POST` request method. -{{< api-endpoint endpoint="http://cloud2.influxdata.com/api/v2/authorizations" method="post" api-ref="/influxdb/cloud/api/#operation/PostAuthorizations" >}} +{{< api-endpoint endpoint="http://cloud2.influxdata.com/api/v2/authorizations" method="post" api-ref="/influxdb/cloud/api/#post-/api/v2/authorizations" >}} Include the following with your request: @@ -105,7 +105,7 @@ Include the following with your request: - **orgID**: [InfluxDB organization ID](/influxdb/cloud/admin/organizations/view-orgs/#view-your-organization-id) - **type**: Resource type. _For information about what InfluxDB resource types exist, use the - [`/api/v2/resources` endpoint](/influxdb/cloud/api/#operation/GetResources)._ + [`/api/v2/resources` endpoint](/influxdb/cloud/api/#get-/api/v2/resources)._ The following example uses cURL and the InfluxDB API to generate an All-Access token: @@ -371,7 +371,7 @@ export INFLUX_TOKEN= To create a bucket using the InfluxDB HTTP API, send a request to the InfluxDB API `/api/v2/buckets` endpoint using the `POST` request method. -{{< api-endpoint endpoint="http://cloud2.influxdata.com/api/v2/buckets" method="post" api-ref="/influxdb/cloud/api/#operation/PostBuckets" >}} +{{< api-endpoint endpoint="http://cloud2.influxdata.com/api/v2/buckets" method="post" api-ref="/influxdb/cloud/api/#post-/api/v2/buckets" >}} Include the following with your request: diff --git a/content/influxdb/cloud/query-data/parameterized-queries.md b/content/influxdb/cloud/query-data/parameterized-queries.md index 2f251bd803..86e5060fde 100644 --- a/content/influxdb/cloud/query-data/parameterized-queries.md +++ b/content/influxdb/cloud/query-data/parameterized-queries.md @@ -22,7 +22,7 @@ see the [OWASP SQL Injection Prevention Cheat Sheet](https://cheatsheetseries.ow While this guide is about SQL, it contains useful general advice. {{% /note %}} -The InfluxDB Cloud [`/api/v2/query` API endpoint](/influxdb/cloud/api/#operation/PostQuery) +The InfluxDB Cloud [`/api/v2/query` API endpoint](/influxdb/cloud/api/#post-/api/v2/query) accepts a `params` field in the request body. The `params` field is a JSON object with key-value pairs where the key is a parameter name and the value is the parameter value. diff --git a/content/influxdb/cloud/reference/api/_index.md b/content/influxdb/cloud/reference/api/_index.md deleted file mode 100644 index 16e3bdfbfd..0000000000 --- a/content/influxdb/cloud/reference/api/_index.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: InfluxDB HTTP API -description: > - The InfluxDB HTTP API provides a programmatic interface for interactions with InfluxDB, such as writing and querying data, - and managing resources within an InfluxDB instance. - Access the InfluxDB API using the `/api/v2/` endpoint. -menu: - influxdb_cloud: - parent: Reference - name: InfluxDB HTTP API -weight: 3 -influxdb/cloud/tags: [api] -source: /shared/influxdb-v2/reference/api/_index.md ---- - - diff --git a/content/influxdb/cloud/reference/internals/ttbr.md b/content/influxdb/cloud/reference/internals/ttbr.md index 499ac26203..32e203838d 100644 --- a/content/influxdb/cloud/reference/internals/ttbr.md +++ b/content/influxdb/cloud/reference/internals/ttbr.md @@ -31,7 +31,7 @@ Whenever you send a write request to the `/api/v2/write` endpoint, the following 2. If the write is queued, API responds with an HTTP 204 status code. 3. API handles the write asynchronously and reaches eventual consistency. -_For more information, see [`/api/v2/write` documentation](/influxdb/cloud/api/#operation/PostWrite)._ +_For more information, see [`/api/v2/write` documentation](/influxdb/cloud/api/#post-/api/v2/write)._ {{% note %}} The returned 204 status code does not mean that the point is queryable; diff --git a/content/influxdb/cloud/reference/release-notes/cloud-updates.md b/content/influxdb/cloud/reference/release-notes/cloud-updates.md index d6cea7c845..7b92bb5016 100644 --- a/content/influxdb/cloud/reference/release-notes/cloud-updates.md +++ b/content/influxdb/cloud/reference/release-notes/cloud-updates.md @@ -309,7 +309,7 @@ Update Telegraf configuration in the UI to make it easier to set up and configur ### API updates -- [List all buckets](/influxdb/cloud/api/#operation/GetBuckets) in the API now supports the `after` parameter as an alternative to `offset`. +- [List all buckets](/influxdb/cloud/api/#get-/api/v2/buckets) in the API now supports the `after` parameter as an alternative to `offset`. - Add the `v1/authorization` package to support authorizing requests to the InfluxDB 1.x API. ### Task updates diff --git a/content/influxdb/cloud/write-data/developer-tools/api.md b/content/influxdb/cloud/write-data/developer-tools/api.md index 06b2ec0a77..3e5b2a17f9 100644 --- a/content/influxdb/cloud/write-data/developer-tools/api.md +++ b/content/influxdb/cloud/write-data/developer-tools/api.md @@ -53,4 +53,4 @@ Compression reduces network bandwidth, but increases server-side load. {{% /note %}} _For information about **InfluxDB API response codes**, see -[InfluxDB API Write documentation](/influxdb/cloud/api/#operation/PostWrite)._ +[InfluxDB API Write documentation](/influxdb/cloud/api/#post-/api/v2/write)._ diff --git a/content/influxdb/v1/tools/api.md b/content/influxdb/v1/tools/api.md index cbb7fb70a4..1ed9176ba8 100644 --- a/content/influxdb/v1/tools/api.md +++ b/content/influxdb/v1/tools/api.md @@ -168,11 +168,11 @@ curl -XPOST "localhost:8086/api/v2/write?bucket=db/rp&precision=s" \ The [/api/v2/buckets](/influxdb/v2/api/#tag/Buckets) endpoint accepts `GET`, `POST` and `DELETE` HTTP requests. Use this endpoint to -[create](/influxdb/v2/api/#operation/PostBuckets), -[delete](/influxdb/v2/api/#operation/DeleteBucketsID), -[list](/influxdb/v2/api/#operation/GetBuckets), -[update](/influxdb/v2/api/#operation/PatchBucketsID) and -[retrieve](/influxdb/v2/api/#operation/GetBucketsID) +[create](/influxdb/v2/api/#post-/api/v2/buckets), +[delete](/influxdb/v2/api/#delete-/api/v2/buckets/-bucketID-), +[list](/influxdb/v2/api/#get-/api/v2/buckets), +[update](/influxdb/v2/api/#patch-/api/v2/buckets/-bucketID-) and +[retrieve](/influxdb/v2/api/#get-/api/v2/buckets/-bucketID-) buckets in your InfluxDB instance. Note that InfluxDB 2.x uses organizations and buckets instead of databases and retention policies. diff --git a/content/influxdb/v2/.vale.ini b/content/influxdb/v2/.vale.ini index 3538a1d808..06147959c9 100644 --- a/content/influxdb/v2/.vale.ini +++ b/content/influxdb/v2/.vale.ini @@ -14,22 +14,4 @@ Google.DateFormat = NO Google.Ellipses = NO Google.Headings = NO Google.WordList = NO -# Disable Google.Units in favor of InfluxDataDocs.Units which only checks byte -# units (GB, TB, etc). Duration literals (30d, 24h, 1h) are valid InfluxDB syntax. -Google.Units = NO -Vale.Spelling = NO -# Disable Vale.Terms - the vocabulary-based substitution rule creates too many -# false positives from URLs, file paths, and code. The accepted terms in -# accept.txt still work for spelling checks via InfluxDataDocs.Spelling. -Vale.Terms = NO -# Disable write-good.TooWordy - flags legitimate technical terms like -# "aggregate", "expiration", "multiple", "However" that are standard in -# database documentation. -write-good.TooWordy = NO - -# Ignore URL paths like /api/v3/..., /cli/..., /influxdb3/... -# Ignore full URLs like https://example.com/... -# Ignore inline code in frontmatter (description fields, etc.) -TokenIgnores = /[a-zA-Z0-9/_\-\.]+, \ - https?://[^\s\)\]>"]+, \ - `[^`]+` \ No newline at end of file +Vale.Spelling = NO \ No newline at end of file diff --git a/content/influxdb/v2/admin/buckets/create-bucket.md b/content/influxdb/v2/admin/buckets/create-bucket.md index 077a587c82..b143159cdd 100644 --- a/content/influxdb/v2/admin/buckets/create-bucket.md +++ b/content/influxdb/v2/admin/buckets/create-bucket.md @@ -120,7 +120,7 @@ influx bucket create \ To create a bucket with the InfluxDB HTTP API, send a request to the following endpoint: -{{< api-endpoint method="post" endpoint="https://localhost:8086/api/v2/buckets" api-ref="/influxdb/v2/api/#operation/PostBuckets" >}} +{{< api-endpoint method="post" endpoint="https://localhost:8086/api/v2/buckets" api-ref="/influxdb/v2/api/#post-/api/v2/buckets" >}} Include the following in your request: @@ -149,7 +149,7 @@ instance _(see [InfluxDB URLs](/influxdb/v2/reference/urls/))_. ``` _For information about **InfluxDB API options and response codes**, see -[InfluxDB API Buckets documentation](/influxdb/v2/api/#operation/PostBuckets)._ +[InfluxDB API Buckets documentation](/influxdb/v2/api/#post-/api/v2/buckets)._ {{% /tab-content %}} diff --git a/content/influxdb/v2/get-started/setup.md b/content/influxdb/v2/get-started/setup.md index a577e848d2..d8bb2a57e1 100644 --- a/content/influxdb/v2/get-started/setup.md +++ b/content/influxdb/v2/get-started/setup.md @@ -155,7 +155,7 @@ Your InfluxDB instance is now initialized. {{% tab-content %}} Send a request to the following HTTP API endpoint: -{{< api-endpoint endpoint="http://localhost:8086/api/v2/setup" method="post" api-ref="/influxdb/v2/api/#operation/PostAuthorizations" >}} +{{< api-endpoint endpoint="http://localhost:8086/api/v2/setup" method="post" api-ref="/influxdb/v2/api/#post-/api/v2/authorizations" >}} {{% warn %}} @@ -195,7 +195,7 @@ We recommend using a password manager or a secret store to securely store sensitive tokens. {{% /note %}} -For more options and details, see the [`POST /api/v2/setup` API endpoint documentation](/influxdb/v2/api/#operation/PostSetup). +For more options and details, see the [`POST /api/v2/setup` API endpoint documentation](/influxdb/v2/api/#post-/api/v2/setup). {{% /tab-content%}} @@ -268,7 +268,7 @@ For more options and details, see the [`POST /api/v2/setup` API endpoint documen Send a request to the InfluxDB API `/api/v2/authorizations` endpoint using the `POST` request method. -{{< api-endpoint endpoint="http://localhost:8086/api/v2/authorizations" method="post" api-ref="/influxdb/v2/api/#operation/PostAuthorizations" >}} +{{< api-endpoint endpoint="http://localhost:8086/api/v2/authorizations" method="post" api-ref="/influxdb/v2/api/#post-/api/v2/authorizations" >}} Include the following with your request: @@ -287,7 +287,7 @@ Include the following with your request: - **orgID**: [InfluxDB organization ID](/influxdb/v2/admin/organizations/view-orgs/#view-your-organization-id) - **type**: Resource type. _For information about what InfluxDB resource types exist, use the - [`/api/v2/resources` endpoint](/influxdb/v2/api/#operation/GetResources)._ + [`/api/v2/resources` endpoint](/influxdb/v2/api/#get-/api/v2/resources)._ The following example uses cURL and the InfluxDB API to generate an All Access token: @@ -599,7 +599,7 @@ Replace the following: To create a bucket using the InfluxDB HTTP API, send a request to the InfluxDB API `/api/v2/buckets` endpoint using the `POST` request method. -{{< api-endpoint endpoint="http://localhost:8086/api/v2/buckets" method="post" api-ref="/influxdb/v2/api/#operation/PostBuckets">}} +{{< api-endpoint endpoint="http://localhost:8086/api/v2/buckets" method="post" api-ref="/influxdb/v2/api/#post-/api/v2/buckets">}} Include the following with your request: diff --git a/content/influxdb/v2/process-data/manage-tasks/run-task.md b/content/influxdb/v2/process-data/manage-tasks/run-task.md index 37793f22c8..0888e34339 100644 --- a/content/influxdb/v2/process-data/manage-tasks/run-task.md +++ b/content/influxdb/v2/process-data/manage-tasks/run-task.md @@ -12,8 +12,8 @@ related: - /influxdb/v2/reference/cli/influx/task/run - /influxdb/v2/reference/cli/influx/task/run/retry - /influxdb/v2/reference/cli/influx/task/retry-failed - - /influxdb/v2/api/#operation/PostTasksIDRuns - - /influxdb/v2/api/#operation/PostTasksIDRunsIDRetry + - /influxdb/v2/api/#post-/api/v2/tasks/-taskID-/runs + - /influxdb/v2/api/#post-/api/v2/tasks/-taskID-/runs/-runID-/retry source: /shared/influxdb-v2/process-data/manage-tasks/run-task.md --- diff --git a/content/influxdb/v2/process-data/manage-tasks/view-tasks.md b/content/influxdb/v2/process-data/manage-tasks/view-tasks.md index 649371501e..b0874b1b73 100644 --- a/content/influxdb/v2/process-data/manage-tasks/view-tasks.md +++ b/content/influxdb/v2/process-data/manage-tasks/view-tasks.md @@ -38,7 +38,7 @@ See the [`influx task list` documentation](/influxdb/v2/reference/cli/influx/tas for information about other available flags. ## View tasks with the InfluxDB API -Use the [`/tasks` InfluxDB API endpoint](/influxdb/v2/api/#operation/GetTasks) +Use the [`/tasks` InfluxDB API endpoint](/influxdb/v2/api/#get-/api/v2/tasks) to return a list of tasks. -{{< api-endpoint method="GET" endpoint="http://localhost:8086/api/v2/tasks" api-ref="/influxdb/v2/api/#operation/GetTasks" >}} \ No newline at end of file +{{< api-endpoint method="GET" endpoint="http://localhost:8086/api/v2/tasks" api-ref="/influxdb/v2/api/#get-/api/v2/tasks" >}} \ No newline at end of file diff --git a/content/influxdb/v2/reference/api/_index.md b/content/influxdb/v2/reference/api/_index.md deleted file mode 100644 index 47e1ea5fa9..0000000000 --- a/content/influxdb/v2/reference/api/_index.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: InfluxDB HTTP API -description: > - The InfluxDB HTTP API provides a programmatic interface for interactions with InfluxDB, such as writing and querying data, - and managing resources within an InfluxDB instance. - Access the InfluxDB API using the `/api/v2/` or InfluxDB v1 endpoints. -menu: - influxdb_v2: - parent: Reference - name: InfluxDB HTTP API -weight: 3 -influxdb/v2/tags: [api] -aliases: - - /influxdb/v2/concepts/api/ -related: - - /influxdb/v2/api-guide/api_intro/ - - /influxdb/v2/api-guide/influxdb-1x/ -source: /shared/influxdb-v2/reference/api/_index.md ---- - - diff --git a/content/influxdb/v2/reference/config-options.md b/content/influxdb/v2/reference/config-options.md index 4f2d83d1e2..1c4c36330b 100644 --- a/content/influxdb/v2/reference/config-options.md +++ b/content/influxdb/v2/reference/config-options.md @@ -54,7 +54,7 @@ influx server-config Use the `/api/v2/config` InfluxDB API endpoint to retrieve your runtime server configuration. -{{< api-endpoint method="GET" endpoint="http://localhost:8086/api/v2/config" api-ref="/influxdb/v2/api/#operation/GetConfig" >}} +{{< api-endpoint method="GET" endpoint="http://localhost:8086/api/v2/config" api-ref="/influxdb/v2/api/#get-/api/v2/config" >}} ### Configuration precedence InfluxDB honors configuration settings using the following precedence: diff --git a/content/influxdb/v2/reference/internals/metrics.md b/content/influxdb/v2/reference/internals/metrics.md index 34d98420f4..2af9ff797c 100644 --- a/content/influxdb/v2/reference/internals/metrics.md +++ b/content/influxdb/v2/reference/internals/metrics.md @@ -13,7 +13,7 @@ Get metrics about the workload performance of an InfluxDB OSS instance. InfluxDB OSS exposes a `/metrics` endpoint that returns performance, resource, and usage metrics formatted in the [Prometheus plain-text exposition format](https://prometheus.io/docs/instrumenting/exposition_formats). -{{< api-endpoint method="GET" endpoint="http://localhost:8086/metrics" api-ref="/influxdb/v2/api/#operation/GetMetrics" >}} +{{< api-endpoint method="GET" endpoint="http://localhost:8086/metrics" api-ref="/influxdb/v2/api/#get-/metrics" >}} Metrics contain a name, an optional set of key-value pairs, and a value. diff --git a/content/influxdb/v2/reference/release-notes/influxdb.md b/content/influxdb/v2/reference/release-notes/influxdb.md index 94eaafcc7a..a76a8a1d25 100644 --- a/content/influxdb/v2/reference/release-notes/influxdb.md +++ b/content/influxdb/v2/reference/release-notes/influxdb.md @@ -642,9 +642,9 @@ This release includes the following new features: #### API -- Add new parameters to GET [`/users`](/influxdb/v2/api/#operation/GetUsers) API, including: `offset`, `limit`, and `after`. -- Add the [`api/v2/backup/metadata`](/influxdb/v2/api/#operation/GetBackupMetadata) endpoint for backing up both key-value and SQL metadata, and the [`api/v2/restore/sql`](/influxdb/v2/api/#operation/GetRoutes) for restoring SQL metadata. -- Deprecated [`POST .../secrets/delete`](/influxdb/v2/api/#operation/PostOrgsIDSecrets). To delete a secret, use [`DELETE .../secrets/{secretID}`](/influxdb/v2/api/#operation/DeleteOrgsIDSecretsID). +- Add new parameters to GET [`/users`](/influxdb/v2/api/#get-/api/v2/users) API, including: `offset`, `limit`, and `after`. +- Add the [`api/v2/backup/metadata`](/influxdb/v2/api/#get-/api/v2/backup/metadata) endpoint for backing up both key-value and SQL metadata, and the [`api/v2/restore/sql`](/influxdb/v2/api/#get-/api/v2) for restoring SQL metadata. +- Deprecated [`POST .../secrets/delete`](/influxdb/v2/api/#post-/api/v2/orgs/-orgID-/secrets/delete). To delete a secret, use [`DELETE .../secrets/{secretID}`](/influxdb/v2/api/#delete-/api/v2/orgs/-orgID-/secrets/-secretID-). #### CLI @@ -730,7 +730,7 @@ For more information about each plugin, see [Telegraf plugins](/telegraf/v1/plug ### Bug fixes - Log API errors to server logs and tell clients to check the server logs for the error message. -- Fix pagination for GET [`/buckets`](/influxdb/v2/api/#operation/GetBuckets) API when displaying results. Previously, pagination was broken if a request included both an `org` filter AND the `after` request parameter. Also corrects `descending` parameter to sort when an `org` filter is used and saved. +- Fix pagination for GET [`/buckets`](/influxdb/v2/api/#get-/api/v2/buckets) API when displaying results. Previously, pagination was broken if a request included both an `org` filter AND the `after` request parameter. Also corrects `descending` parameter to sort when an `org` filter is used and saved. - Sync series segment to disk after writing. - Do not allow shard creation to create overlapping shards. - Don't drop shard group durations when upgrading InfluxDB. diff --git a/content/influxdb/v2/write-data/developer-tools/api.md b/content/influxdb/v2/write-data/developer-tools/api.md index 519657369d..bdc5aa8757 100644 --- a/content/influxdb/v2/write-data/developer-tools/api.md +++ b/content/influxdb/v2/write-data/developer-tools/api.md @@ -54,4 +54,4 @@ Compression reduces network bandwidth, but increases server-side load. {{% /note %}} _For information about **InfluxDB API response codes**, see -[InfluxDB API Write documentation](/influxdb/v2/api/#operation/PostWrite)._ +[InfluxDB API Write documentation](/influxdb/v2/api/#post-/api/v2/write)._ diff --git a/content/influxdb/v2/write-data/troubleshoot.md b/content/influxdb/v2/write-data/troubleshoot.md index 105e364e03..a55b82d3bb 100644 --- a/content/influxdb/v2/write-data/troubleshoot.md +++ b/content/influxdb/v2/write-data/troubleshoot.md @@ -10,8 +10,8 @@ menu: parent: Write data influxdb/v2/tags: [write, line protocol, errors] related: - - /influxdb/v2/api/v2/#operation/PostLegacyWrite, InfluxDB v1 API /write endpoint - - /influxdb/v2/api/v2/#operation/PostWrite, InfluxDB API /api/v2/write endpoint + - /influxdb/v2/api/v2/#post-/write, InfluxDB v1 API /write endpoint + - /influxdb/v2/api/v2/#post-/api/v2/write, InfluxDB API /api/v2/write endpoint - /influxdb/v2/reference/internals - /influxdb/v2/reference/cli/influx/write source: /shared/influxdb-v2/write-data/troubleshoot.md diff --git a/content/influxdb3/cloud-dedicated/.vale.ini b/content/influxdb3/cloud-dedicated/.vale.ini index 35dfc38e56..c2ca4cb2fa 100644 --- a/content/influxdb3/cloud-dedicated/.vale.ini +++ b/content/influxdb3/cloud-dedicated/.vale.ini @@ -14,22 +14,4 @@ Google.DateFormat = NO Google.Ellipses = NO Google.Headings = NO Google.WordList = NO -# Disable Google.Units in favor of InfluxDataDocs.Units which only checks byte -# units (GB, TB, etc). Duration literals (30d, 24h, 1h) are valid InfluxDB syntax. -Google.Units = NO -Vale.Spelling = NO -# Disable Vale.Terms - the vocabulary-based substitution rule creates too many -# false positives from URLs, file paths, and code. The accepted terms in -# accept.txt still work for spelling checks via InfluxDataDocs.Spelling. -Vale.Terms = NO -# Disable write-good.TooWordy - flags legitimate technical terms like -# "aggregate", "expiration", "multiple", "However" that are standard in -# database documentation. -write-good.TooWordy = NO - -# Ignore URL paths like /api/v3/..., /cli/..., /influxdb3/... -# Ignore full URLs like https://example.com/... -# Ignore inline code in frontmatter (description fields, etc.) -TokenIgnores = /[a-zA-Z0-9/_\-\.]+, \ - https?://[^\s\)\]>"]+, \ - `[^`]+` \ No newline at end of file +Vale.Spelling = NO \ No newline at end of file diff --git a/content/influxdb3/cloud-dedicated/admin/databases/create.md b/content/influxdb3/cloud-dedicated/admin/databases/create.md index a339818a37..29039b5f6c 100644 --- a/content/influxdb3/cloud-dedicated/admin/databases/create.md +++ b/content/influxdb3/cloud-dedicated/admin/databases/create.md @@ -109,7 +109,7 @@ _This example uses [cURL](https://curl.se/) to send a Management HTTP API reques {{% api-endpoint method="POST" endpoint="https://console.influxdata.com/api/v0/accounts/ACCOUNT_ID/clusters/CLUSTER_ID/databases" -api-ref="/influxdb3/cloud-dedicated/api/management/#operation/CreateClusterDatabase" %}} +api-ref="/influxdb3/cloud-dedicated/api/management/#post-/accounts/-accountId-/clusters/-clusterId-/databases" %}} {{% code-placeholders "ACCOUNT_ID|CLUSTER_ID|MANAGEMENT_TOKEN|DATABASE_NAME" %}} ```bash @@ -201,7 +201,7 @@ _This example uses [cURL](https://curl.se/) to send a Management HTTP API reques {{% api-endpoint method="POST" endpoint="https://console.influxdata.com/api/v0/accounts/ACCOUNT_ID/clusters/CLUSTER_ID/databases" -api-ref="/influxdb3/cloud-dedicated/api/management/#operation/CreateClusterDatabase" %}} +api-ref="/influxdb3/cloud-dedicated/api/management/#post-/accounts/-accountId-/clusters/-clusterId-/databases" %}} In the request body, include the `partitionTemplate` property and specify the [partition template parts](/influxdb3/cloud-dedicated/admin/custom-partitions/partition-templates/#tag-part-templates) as an array of objects--for example: diff --git a/content/influxdb3/cloud-dedicated/admin/databases/delete.md b/content/influxdb3/cloud-dedicated/admin/databases/delete.md index 8d21c3f710..01bb41f418 100644 --- a/content/influxdb3/cloud-dedicated/admin/databases/delete.md +++ b/content/influxdb3/cloud-dedicated/admin/databases/delete.md @@ -116,7 +116,7 @@ _This example uses [cURL](https://curl.se/) to send a Management HTTP API reques 1. If you haven't already, follow the instructions to [install cURL](https://everything.curl.dev/install/index.html) for your system. 2. In your terminal, use cURL to send a request to the following {{% product-name %}} endpoint: - {{% api-endpoint endpoint="https://console.influxdata.com/api/v0/accounts/ACCOUNT_ID/clusters/CLUSTER_ID/databases/DATABASE_NAME" method="delete" api-ref="/influxdb3/cloud-dedicated/api/management/#operation/DeleteClusterDatabase" %}} + {{% api-endpoint endpoint="https://console.influxdata.com/api/v0/accounts/ACCOUNT_ID/clusters/CLUSTER_ID/databases/DATABASE_NAME" method="delete" api-ref="/influxdb3/cloud-dedicated/api/management/#delete-/accounts/-accountId-/clusters/-clusterId-/databases/-databaseName-" %}} In the URL, provide the following: diff --git a/content/influxdb3/cloud-dedicated/admin/databases/list.md b/content/influxdb3/cloud-dedicated/admin/databases/list.md index 4de3813b97..192131221a 100644 --- a/content/influxdb3/cloud-dedicated/admin/databases/list.md +++ b/content/influxdb3/cloud-dedicated/admin/databases/list.md @@ -86,7 +86,7 @@ _This example uses [cURL](https://curl.se/) to send a Management HTTP API reques 1. If you haven't already, follow the instructions to [install cURL](https://everything.curl.dev/install/index.html) for your system. 2. In your terminal, use cURL to send a request to the following {{% product-name %}} endpoint: - {{% api-endpoint endpoint="https://console.influxdata.com/api/v0/accounts/ACCOUNT_ID/clusters/CLUSTER_ID/databases" method="get" api-ref="/influxdb3/cloud-dedicated/api/management/#operation/GetClusterDatabases" %}} + {{% api-endpoint endpoint="https://console.influxdata.com/api/v0/accounts/ACCOUNT_ID/clusters/CLUSTER_ID/databases" method="get" api-ref="/influxdb3/cloud-dedicated/api/management/#get-/accounts/-accountId-/clusters/-clusterId-/databases" %}} In the URL, provide the following credentials: - `ACCOUNT_ID`: The ID of the [account](/influxdb3/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) that the cluster belongs to _(see how to [list cluster details](/influxdb3/cloud-dedicated/admin/clusters/list/#detailed-output-in-json))_. diff --git a/content/influxdb3/cloud-dedicated/admin/databases/update.md b/content/influxdb3/cloud-dedicated/admin/databases/update.md index 9c6507b0c2..75ca7799d8 100644 --- a/content/influxdb3/cloud-dedicated/admin/databases/update.md +++ b/content/influxdb3/cloud-dedicated/admin/databases/update.md @@ -161,7 +161,7 @@ The retention period value cannot be negative or contain whitespace. 1. In your terminal, use cURL to send a request to the following {{% product-name %}} endpoint: - {{% api-endpoint endpoint="https://console.influxdata.com/api/v0/accounts/ACCOUNT_ID/clusters/CLUSTER_ID/databases" method="post" api-ref="/influxdb3/cloud-dedicated/api/management/#operation/CreateClusterDatabase" %}} + {{% api-endpoint endpoint="https://console.influxdata.com/api/v0/accounts/ACCOUNT_ID/clusters/CLUSTER_ID/databases" method="post" api-ref="/influxdb3/cloud-dedicated/api/management/#post-/accounts/-accountId-/clusters/-clusterId-/databases" %}} In the URL, provide the following credentials: diff --git a/content/influxdb3/cloud-dedicated/admin/tables/create.md b/content/influxdb3/cloud-dedicated/admin/tables/create.md index 7f5c7801a2..ba8ec6367c 100644 --- a/content/influxdb3/cloud-dedicated/admin/tables/create.md +++ b/content/influxdb3/cloud-dedicated/admin/tables/create.md @@ -112,7 +112,7 @@ _This example uses [cURL](https://curl.se/) to send a Management HTTP API reques {{% api-endpoint method="POST" endpoint="https://console.influxdata.com/api/v0/accounts/ACCOUNT_ID/clusters/CLUSTER_ID/databases/DATABASE_NAME/tables" -api-ref="/influxdb3/cloud-dedicated/api/management/#operation/CreateClusterDatabaseTable" %}} +api-ref="/influxdb3/cloud-dedicated/api/management/#post-/accounts/-accountId-/clusters/-clusterId-/databases/-databaseName-/tables" %}} {{% code-placeholders "ACCOUNT_ID|CLUSTER_ID|DATABASE_NAME|TABLE_NAME|MANAGEMENT_TOKEN" %}} ```bash @@ -242,7 +242,7 @@ _This example uses [cURL](https://curl.se/) to send a Management HTTP API reques {{% api-endpoint method="POST" endpoint="https://console.influxdata.com/api/v0/accounts/ACCOUNT_ID/clusters/CLUSTER_ID/databases/DATABASE_NAME/tables" -api-ref="/influxdb3/cloud-dedicated/api/management/#operation/CreateClusterDatabaseTable" %}} +api-ref="/influxdb3/cloud-dedicated/api/management/#post-/accounts/-accountId-/clusters/-clusterId-/databases/-databaseName-/tables" %}} In the request body, include the `partitionTemplate` property and specify the [partition template parts](/influxdb3/cloud-dedicated/admin/custom-partitions/partition-templates/#tag-part-templates) as an array of objects--for example: diff --git a/content/influxdb3/cloud-dedicated/admin/tokens/database/create.md b/content/influxdb3/cloud-dedicated/admin/tokens/database/create.md index 2cea32ab01..67b4c4798a 100644 --- a/content/influxdb3/cloud-dedicated/admin/tokens/database/create.md +++ b/content/influxdb3/cloud-dedicated/admin/tokens/database/create.md @@ -142,7 +142,7 @@ _This example uses [cURL](https://curl.se/) to send a Management HTTP API reques 1. If you haven't already, follow the instructions to [install cURL](https://everything.curl.dev/install/index.html) for your system. 2. In your terminal, use cURL to send a request to the following {{% product-name %}} endpoint: - {{% api-endpoint endpoint="https://console.influxdata.com/api/v0/accounts/ACCOUNT_ID/clusters/CLUSTER_ID/tokens" method="post" api-ref="/influxdb3/cloud-dedicated/api/management/#operation/CreateDatabaseToken" %}} + {{% api-endpoint endpoint="https://console.influxdata.com/api/v0/accounts/ACCOUNT_ID/clusters/CLUSTER_ID/tokens" method="post" api-ref="/influxdb3/cloud-dedicated/api/management/#post-/accounts/-accountId-/clusters/-clusterId-/tokens" %}} In the URL, provide the following credentials: @@ -157,7 +157,7 @@ _This example uses [cURL](https://curl.se/) to send a Management HTTP API reques In the request body, provide the following parameters: - - `permissions`: an array of token [permissions](/influxdb3/cloud-dedicated/api/management/#operation/CreateDatabaseToken) (read or write) objects: + - `permissions`: an array of token [permissions](/influxdb3/cloud-dedicated/api/management/#post-/accounts/-accountId-/clusters/-clusterId-/tokens) (read or write) objects: - `"action"`: Specify `read` or `write` permission to the database. - `"resource"`: Specify the database name. - `description`: Provide a description of the token. @@ -230,7 +230,7 @@ If a new database token doesn't immediately work (you receive a `401 Unauthorize The `influxctl token create` command supports the `--format json` option. By default, the command outputs the token string. -For [token details](/influxdb3/cloud-dedicated/api/management/#operation/CreateDatabaseToken) and easier programmatic access to the command output, include `--format json` +For [token details](/influxdb3/cloud-dedicated/api/management/#post-/accounts/-accountId-/clusters/-clusterId-/tokens) and easier programmatic access to the command output, include `--format json` with your command to format the output as JSON. The Management API outputs JSON format in the response body. diff --git a/content/influxdb3/cloud-dedicated/admin/tokens/database/list.md b/content/influxdb3/cloud-dedicated/admin/tokens/database/list.md index 02daf2c264..e808b1c348 100644 --- a/content/influxdb3/cloud-dedicated/admin/tokens/database/list.md +++ b/content/influxdb3/cloud-dedicated/admin/tokens/database/list.md @@ -107,7 +107,7 @@ _This example uses [cURL](https://curl.se/) to send a Management HTTP API reques 1. If you haven't already, follow the instructions to [install cURL](https://everything.curl.dev/install/index.html) for your system. 2. In your terminal, use cURL to send a request to the following {{% product-name %}} endpoint: - {{% api-endpoint endpoint="https://console.influxdata.com/api/v0/accounts/ACCOUNT_ID/clusters/CLUSTER_ID/tokens" method="get" api-ref="/influxdb3/cloud-dedicated/api/management/#operation/GetDatabaseTokens" %}} + {{% api-endpoint endpoint="https://console.influxdata.com/api/v0/accounts/ACCOUNT_ID/clusters/CLUSTER_ID/tokens" method="get" api-ref="/influxdb3/cloud-dedicated/api/management/#get-/accounts/-accountId-/clusters/-clusterId-/tokens" %}} In the URL, provide the following credentials: @@ -142,7 +142,7 @@ Replace the following in your request: To retrieve a specific database token by ID, send a request to the following {{% product-name %}} endpoint: - {{% api-endpoint endpoint="https://console.influxdata.com/api/v0/accounts/ACCOUNT_ID/clusters/CLUSTER_ID/tokens/TOKEN_ID" method="get" api-ref="/influxdb3/cloud-dedicated/api/management/#operation/GetDatabaseToken" %}} + {{% api-endpoint endpoint="https://console.influxdata.com/api/v0/accounts/ACCOUNT_ID/clusters/CLUSTER_ID/tokens/TOKEN_ID" method="get" api-ref="/influxdb3/cloud-dedicated/api/management/#get-/accounts/-accountId-/clusters/-clusterId-/tokens/-tokenId-" %}} In the URL, provide the following: diff --git a/content/influxdb3/cloud-dedicated/admin/tokens/database/revoke.md b/content/influxdb3/cloud-dedicated/admin/tokens/database/revoke.md index 38cf8d8587..7d16c6c78b 100644 --- a/content/influxdb3/cloud-dedicated/admin/tokens/database/revoke.md +++ b/content/influxdb3/cloud-dedicated/admin/tokens/database/revoke.md @@ -119,7 +119,7 @@ but you can use any HTTP client._ 2. In your terminal, use cURL to send a request to the following {{% product-name %}} endpoint: - {{% api-endpoint endpoint="https://console.influxdata.com/api/v0/accounts/ACCOUNT_ID/clusters/CLUSTER_ID/tokens/TOKEN_ID" method="delete" api-ref="/influxdb3/cloud-dedicated/api/management/#operation/DeleteDatabaseToken" %}} + {{% api-endpoint endpoint="https://console.influxdata.com/api/v0/accounts/ACCOUNT_ID/clusters/CLUSTER_ID/tokens/TOKEN_ID" method="delete" api-ref="/influxdb3/cloud-dedicated/api/management/#delete-/accounts/-accountId-/clusters/-clusterId-/tokens/-tokenId-" %}} In the URL, provide the following: diff --git a/content/influxdb3/cloud-dedicated/admin/tokens/database/update.md b/content/influxdb3/cloud-dedicated/admin/tokens/database/update.md index 8763fabbe8..697d46a1fb 100644 --- a/content/influxdb3/cloud-dedicated/admin/tokens/database/update.md +++ b/content/influxdb3/cloud-dedicated/admin/tokens/database/update.md @@ -160,7 +160,7 @@ _This example uses [cURL](https://curl.se/) to send a Management HTTP API reques 1. If you haven't already, follow the instructions to [install cURL](https://everything.curl.dev/install/index.html) for your system. 2. In your terminal, use cURL to send a request to the following {{% product-name %}} endpoint: - {{% api-endpoint endpoint="https://console.influxdata.com/api/v0/accounts/ACCOUNT_ID/clusters/CLUSTER_ID/tokens/TOKEN_ID" method="patch" api-ref="/influxdb3/cloud-dedicated/api/management/#operation/UpdateDatabaseToken" %}} + {{% api-endpoint endpoint="https://console.influxdata.com/api/v0/accounts/ACCOUNT_ID/clusters/CLUSTER_ID/tokens/TOKEN_ID" method="patch" api-ref="/influxdb3/cloud-dedicated/api/management/#patch-/accounts/-accountId-/clusters/-clusterId-/tokens/-tokenId-" %}} In the URL, provide the following: @@ -176,7 +176,7 @@ _This example uses [cURL](https://curl.se/) to send a Management HTTP API reques In the request body, provide the following parameters: - - `permissions`: an array of token [permissions](/influxdb3/cloud-dedicated/api/management/#operation/CreateDatabaseToken) (read or write) objects: + - `permissions`: an array of token [permissions](/influxdb3/cloud-dedicated/api/management/#post-/accounts/-accountId-/clusters/-clusterId-/tokens) (read or write) objects: - `"action"`: Specify `read` or `write` permission to the database. - `"resource"`: Specify the database name. - `description`: Provide a description of the token. diff --git a/content/influxdb3/cloud-dedicated/get-started/write.md b/content/influxdb3/cloud-dedicated/get-started/write.md index 501752a76c..c0587d2a02 100644 --- a/content/influxdb3/cloud-dedicated/get-started/write.md +++ b/content/influxdb3/cloud-dedicated/get-started/write.md @@ -418,11 +418,11 @@ Write data with your existing workloads that already use the InfluxDB v1 To write data to InfluxDB using the [InfluxDB v1 HTTP API](/influxdb3/cloud-dedicated/reference/api/), send a request to the -[InfluxDB API `/write` endpoint](/influxdb3/cloud-dedicated/api/#operation/PostLegacyWrite) +[InfluxDB API `/write` endpoint](/influxdb3/cloud-dedicated/api/#post-/write) using the `POST` request method. {{% api-endpoint endpoint="https://{{< influxdb/host >}}/write" method="post" -api-ref="/influxdb3/cloud-dedicated/api/#operation/PostLegacyWrite"%}} +api-ref="/influxdb3/cloud-dedicated/api/#post-/write"%}} Include the following with your request: @@ -438,7 +438,7 @@ Include the following with your request: > [!Note] > With the {{% product-name %}} -> [v1 API `/write` endpoint](/influxdb3/cloud-dedicated/api/#operation/PostLegacyWrite), +> [v1 API `/write` endpoint](/influxdb3/cloud-dedicated/api/#post-/write), > `Authorization: Bearer` and `Authorization: Token` are equivalent and you can > use either scheme to pass a database token in your request. For more information > about HTTP API token schemes, see how to @@ -530,7 +530,7 @@ To write data to InfluxDB using the to the InfluxDB API `/api/v2/write` endpoint using the `POST` request method. {{< api-endpoint endpoint="https://{{< influxdb/host >}}/api/v2/write" -method="post" api-ref="/influxdb3/cloud-dedicated/api/#operation/PostWrite" >}} +method="post" api-ref="/influxdb3/cloud-dedicated/api/#post-/api/v2/write" >}} Include the following with your request: diff --git a/content/influxdb3/cloud-dedicated/reference/api/_index.md b/content/influxdb3/cloud-dedicated/reference/api/_index.md deleted file mode 100644 index 806c248aeb..0000000000 --- a/content/influxdb3/cloud-dedicated/reference/api/_index.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: InfluxDB HTTP API -description: > - The InfluxDB HTTP API provides a programmatic interface for interactions with - InfluxDB, such as writing and querying data, and managing an InfluxDB cluster. - Access the InfluxDB API using the `/api/v2/write`, InfluxDB v1, or - Management API endpoints for InfluxDB Cloud Dedicated. -menu: - influxdb3_cloud_dedicated: - parent: Reference - name: InfluxDB HTTP API -weight: 105 -influxdb3/cloud-dedicated/tags: [api] ---- - -The InfluxDB HTTP API provides a programmatic interface for interactions with -{{% product-name %}}, such as writing and querying data, and managing an InfluxDB cluster. - -Access the InfluxDB HTTP API using the `/api/v2/` endpoint, InfluxDB v1 endpoints, or -Management API endpoints for {{% product-name %}}. - -## InfluxDB v2 Compatibility API reference documentation - -InfluxDB v2 API for {{% product-name %}} - -The API reference describes requests and responses for InfluxDB v2-compatible -endpoints that work with {{% product-name %}} and with InfluxDB 2.x client -libraries and third-party integrations. - -## InfluxDB v1 Compatibility API reference documentation - -InfluxDB v1 API for {{% product-name %}} - -The API reference describes requests and responses for InfluxDB v1-compatible `/write` and `/query` endpoints that work with {{% product-name %}} and with InfluxDB 1.x client libraries and third-party integrations. - -## InfluxDB Management API reference documentation - -InfluxDB Management API for {{% product-name %}} - -The API reference describes requests and responses for InfluxDB Management API endpoints. -The Management API lets cluster administrators manage resources such as databases, partitioning templates, and database tokens. diff --git a/content/influxdb3/cloud-dedicated/reference/client-libraries/v2/javascript/nodejs/write.md b/content/influxdb3/cloud-dedicated/reference/client-libraries/v2/javascript/nodejs/write.md index 4a05ccd4c4..e862e4344f 100644 --- a/content/influxdb3/cloud-dedicated/reference/client-libraries/v2/javascript/nodejs/write.md +++ b/content/influxdb3/cloud-dedicated/reference/client-libraries/v2/javascript/nodejs/write.md @@ -144,4 +144,4 @@ node write.js ### Response codes _For information about **InfluxDB API response codes**, see -[InfluxDB API Write documentation](/influxdb3/cloud-dedicated/api/#operation/PostWrite)._ +[InfluxDB API Write documentation](/influxdb3/cloud-dedicated/api/#post-/api/v2/write)._ diff --git a/content/influxdb3/cloud-serverless/.vale.ini b/content/influxdb3/cloud-serverless/.vale.ini index 9ebc431b72..4472fd3c10 100644 --- a/content/influxdb3/cloud-serverless/.vale.ini +++ b/content/influxdb3/cloud-serverless/.vale.ini @@ -14,22 +14,4 @@ Google.DateFormat = NO Google.Ellipses = NO Google.Headings = NO Google.WordList = NO -# Disable Google.Units in favor of InfluxDataDocs.Units which only checks byte -# units (GB, TB, etc). Duration literals (30d, 24h, 1h) are valid InfluxDB syntax. -Google.Units = NO -Vale.Spelling = NO -# Disable Vale.Terms - the vocabulary-based substitution rule creates too many -# false positives from URLs, file paths, and code. The accepted terms in -# accept.txt still work for spelling checks via InfluxDataDocs.Spelling. -Vale.Terms = NO -# Disable write-good.TooWordy - flags legitimate technical terms like -# "aggregate", "expiration", "multiple", "However" that are standard in -# database documentation. -write-good.TooWordy = NO - -# Ignore URL paths like /api/v3/..., /cli/..., /influxdb3/... -# Ignore full URLs like https://example.com/... -# Ignore inline code in frontmatter (description fields, etc.) -TokenIgnores = /[a-zA-Z0-9/_\-\.]+, \ - https?://[^\s\)\]>"]+, \ - `[^`]+` \ No newline at end of file +Vale.Spelling = NO \ No newline at end of file diff --git a/content/influxdb3/cloud-serverless/admin/buckets/create-bucket.md b/content/influxdb3/cloud-serverless/admin/buckets/create-bucket.md index 9efd44dab5..2f0f2d39c8 100644 --- a/content/influxdb3/cloud-serverless/admin/buckets/create-bucket.md +++ b/content/influxdb3/cloud-serverless/admin/buckets/create-bucket.md @@ -183,7 +183,7 @@ The retention period value cannot be negative or contain whitespace. To create a bucket with the InfluxDB HTTP API, send a request to the following endpoint: -{{< api-endpoint method="post" endpoint="https://{{< influxdb/host >}}/api/v2/buckets" api-ref="/influxdb3/cloud-serverless/api/#operation/PostBuckets" >}} +{{< api-endpoint method="post" endpoint="https://{{< influxdb/host >}}/api/v2/buckets" api-ref="/influxdb3/cloud-serverless/api/#post-/api/v2/buckets" >}} Include the following in your request: @@ -266,7 +266,7 @@ The retention period value can't be negative or contain whitespace. ``` _For information about **InfluxDB API options and response codes**, see -[InfluxDB API Buckets reference documentation](/influxdb3/cloud-serverless/api/#operation/PostBuckets)._ +[InfluxDB API Buckets reference documentation](/influxdb3/cloud-serverless/api/#post-/api/v2/buckets)._ {{% /tab-content %}} {{< /tabs-wrapper >}} diff --git a/content/influxdb3/cloud-serverless/admin/buckets/manage-explicit-bucket-schemas.md b/content/influxdb3/cloud-serverless/admin/buckets/manage-explicit-bucket-schemas.md index dd5d8f7617..d82cfe156b 100644 --- a/content/influxdb3/cloud-serverless/admin/buckets/manage-explicit-bucket-schemas.md +++ b/content/influxdb3/cloud-serverless/admin/buckets/manage-explicit-bucket-schemas.md @@ -104,9 +104,9 @@ To view schema column definitions and metadata, specify the `--json` flag. ### View schema type and schemas using the InfluxDB HTTP API -To list schemas for a bucket, send a request to the InfluxDB HTTP [`/api/v2/buckets/{BUCKET_ID}/schema/measurements` endpoint](/influxdb3/cloud-serverless/api/#operation/getMeasurementSchemas): +To list schemas for a bucket, send a request to the InfluxDB HTTP [`/api/v2/buckets/{BUCKET_ID}/schema/measurements` endpoint](/influxdb3/cloud-serverless/api/#get-/api/v2/buckets/-bucketID-/schema/measurements): -{{% api-endpoint method="get" endpoint="https://{{< influxdb/host >}}/api/v2/buckets/{BUCKET_ID}/schema/measurements" api-ref="/influxdb3/cloud-serverless/api/#operation/getMeasurementSchemas" %}} +{{% api-endpoint method="get" endpoint="https://{{< influxdb/host >}}/api/v2/buckets/{BUCKET_ID}/schema/measurements" api-ref="/influxdb3/cloud-serverless/api/#get-/api/v2/buckets/-bucketID-/schema/measurements" %}} ## Update a bucket schema @@ -145,11 +145,11 @@ You can't modify or delete columns in bucket schemas. 1. [View the existing measurement schema](#view-schema-type-and-schemas-using-the-influxdb-http-api) and copy the `columns` list. -2. Send a request to the HTTP API [`/api/v2/buckets/{BUCKET_ID}/schema/measurements/{MEASUREMENT_ID}` endpoint](/influxdb3/cloud-serverless/api/#operation/updateMeasurementSchema). +2. Send a request to the HTTP API [`/api/v2/buckets/{BUCKET_ID}/schema/measurements/{MEASUREMENT_ID}` endpoint](/influxdb3/cloud-serverless/api/#patch-/api/v2/buckets/-bucketID-/schema/measurements/-measurementID-). In the request body, set the `columns` property to a list of old and new column definitions for the measurement schema--for example, the following request appends the new column `CO2` to `columns` retrieved in the previous step: - {{< api-endpoint method="patch" endpoint="https://{{< influxdb/host >}}/api/v2/buckets/{BUCKET_ID}/schema/measurements/{MEASUREMENT_ID}" api-ref="/influxdb3/cloud-serverless/api/#operation/updateMeasurementSchema" >}} + {{< api-endpoint method="patch" endpoint="https://{{< influxdb/host >}}/api/v2/buckets/{BUCKET_ID}/schema/measurements/{MEASUREMENT_ID}" api-ref="/influxdb3/cloud-serverless/api/#patch-/api/v2/buckets/-bucketID-/schema/measurements/-measurementID-" >}} ```js { diff --git a/content/influxdb3/cloud-serverless/admin/buckets/update-bucket.md b/content/influxdb3/cloud-serverless/admin/buckets/update-bucket.md index bdd8eef514..a346b16e02 100644 --- a/content/influxdb3/cloud-serverless/admin/buckets/update-bucket.md +++ b/content/influxdb3/cloud-serverless/admin/buckets/update-bucket.md @@ -46,7 +46,7 @@ or the InfluxDB HTTP API to update a bucket. > [!Note] > Use the [`influx bucket update` command](#update-a-buckets-retention-period) - > or the [InfluxDB HTTP API `PATCH /api/v2/buckets` endpoint](/influxdb3/cloud-serverless/api/#operation/PatchBucketsID) to set a custom retention period. + > or the [InfluxDB HTTP API `PATCH /api/v2/buckets` endpoint](/influxdb3/cloud-serverless/api/#patch-/api/v2/buckets/-bucketID-) to set a custom retention period. 5. Click **{{< caps >}}Save Changes{{< /caps >}}**. @@ -96,7 +96,7 @@ influx bucket update -i 034ad714fdd6f000 -r 1209600000000000ns ## Update a bucket using the HTTP API -Use the InfluxDB HTTP API [`PATCH /api/v2/buckets` endpoint](/influxdb3/cloud-serverless/api/#operation/PatchBucketsID) +Use the InfluxDB HTTP API [`PATCH /api/v2/buckets` endpoint](/influxdb3/cloud-serverless/api/#patch-/api/v2/buckets/-bucketID-) to update a bucket. Updating a bucket requires the following: @@ -108,16 +108,16 @@ You can update the following bucket properties: - description - retention rules -1. To find the bucket ID, send a request to the HTTP API [`GET /api/v2/buckets/` endpoint](/influxdb3/cloud-serverless/api/#operation/GetBuckets) to retrieve the list of buckets. +1. To find the bucket ID, send a request to the HTTP API [`GET /api/v2/buckets/` endpoint](/influxdb3/cloud-serverless/api/#get-/api/v2/buckets) to retrieve the list of buckets. - {{< api-endpoint method="get" endpoint="https://{{< influxdb/host >}}/api/v2/buckets" api-ref="/influxdb3/cloud-serverless/api/#operation/GetBuckets" >}} + {{< api-endpoint method="get" endpoint="https://{{< influxdb/host >}}/api/v2/buckets" api-ref="/influxdb3/cloud-serverless/api/#get-/api/v2/buckets" >}} -2. Send a request to the HTTP API [PATCH `/api/v2/buckets/{BUCKET_ID}` endpoint](/influxdb3/cloud-serverless/api/#operation/PatchBucketsID). +2. Send a request to the HTTP API [PATCH `/api/v2/buckets/{BUCKET_ID}` endpoint](/influxdb3/cloud-serverless/api/#patch-/api/v2/buckets/-bucketID-). In the URL path, specify the ID of the bucket from the previous step that you want to update. In the request body, set the properties that you want to update--for example: - {{< api-endpoint method="patch" endpoint="https://{{< influxdb/host >}}/api/v2/buckets/{BUCKET_ID}" api-ref="/influxdb3/cloud-serverless/api/#operation/PatchBucketsID" >}} + {{< api-endpoint method="patch" endpoint="https://{{< influxdb/host >}}/api/v2/buckets/{BUCKET_ID}" api-ref="/influxdb3/cloud-serverless/api/#patch-/api/v2/buckets/-bucketID-" >}} ```js { diff --git a/content/influxdb3/cloud-serverless/admin/buckets/view-buckets.md b/content/influxdb3/cloud-serverless/admin/buckets/view-buckets.md index d831dd22a7..00bde34762 100644 --- a/content/influxdb3/cloud-serverless/admin/buckets/view-buckets.md +++ b/content/influxdb3/cloud-serverless/admin/buckets/view-buckets.md @@ -42,8 +42,8 @@ for information about other available flags. ## View buckets using the InfluxDB HTTP API -Send a request to the InfluxDB HTTP API [`/api/v2/buckets` endpoint](/influxdb3/cloud-serverless/api/#operation/GetBuckets) to view buckets in an organization. +Send a request to the InfluxDB HTTP API [`/api/v2/buckets` endpoint](/influxdb3/cloud-serverless/api/#get-/api/v2/buckets) to view buckets in an organization. -{{% api-endpoint method="get" endpoint="https://{{< influxdb/host >}}/api/v2/buckets" api-ref="/influxdb3/cloud-serverless/api/#operation/GetBuckets" %}} +{{% api-endpoint method="get" endpoint="https://{{< influxdb/host >}}/api/v2/buckets" api-ref="/influxdb3/cloud-serverless/api/#get-/api/v2/buckets" %}} diff --git a/content/influxdb3/cloud-serverless/admin/tokens/create-token.md b/content/influxdb3/cloud-serverless/admin/tokens/create-token.md index 47063dec7d..9e36a181c8 100644 --- a/content/influxdb3/cloud-serverless/admin/tokens/create-token.md +++ b/content/influxdb3/cloud-serverless/admin/tokens/create-token.md @@ -181,13 +181,13 @@ for information about other available flags. Use the `/api/v2/authorizations` InfluxDB API endpoint to create a token. -{{< api-endpoint method="POST" endpoint="http://localhost:8086/api/v2/authorizations" api-ref="/influxdb/v2/api/#operation/PostAuthorizations" >}} +{{< api-endpoint method="POST" endpoint="http://localhost:8086/api/v2/authorizations" api-ref="/influxdb/v2/api/#post-/api/v2/authorizations" >}} Include the following in your request: - **Headers** - **Authorization**: `Token API_TOKEN` - (API token with the [`write: authorizations`](/influxdb3/cloud-serverless/api/#operation/PostAuthorizations) permission) + (API token with the [`write: authorizations`](/influxdb3/cloud-serverless/api/#post-/api/v2/authorizations) permission) - **Content-type**: `application/json` - **Request body**: JSON object with the following properties: - **status**: Token status (active or inactive) @@ -218,7 +218,7 @@ body. {{% /code-placeholders %}} See the -[`POST /api/v2/authorizations` documentation](/influxdb/v2/api/#operation/PostAuthorizations) +[`POST /api/v2/authorizations` documentation](/influxdb/v2/api/#post-/api/v2/authorizations) for more information about options. {{% /tab-content %}} diff --git a/content/influxdb3/cloud-serverless/admin/tokens/delete-token.md b/content/influxdb3/cloud-serverless/admin/tokens/delete-token.md index d0f1c633ad..ae22767798 100644 --- a/content/influxdb3/cloud-serverless/admin/tokens/delete-token.md +++ b/content/influxdb3/cloud-serverless/admin/tokens/delete-token.md @@ -68,13 +68,13 @@ influx auth delete \ Use the `/api/v2/authorizations` InfluxDB API endpoint to delete a token. -{{< api-endpoint method="DELETE" endpoint="https://{{< influxdb/host >}}/api/v2/authorizations/{AUTH_ID}" api-ref="/influxdb/v2/api/#operation/DeleteAuthorizationsID" >}} +{{< api-endpoint method="DELETE" endpoint="https://{{< influxdb/host >}}/api/v2/authorizations/{AUTH_ID}" api-ref="/influxdb/v2/api/#delete-/api/v2/authorizations/-authID-" >}} Include the following in your request: - **Headers**: - **Authorization**: `Token API_TOKEN` - (API token with the [`write: authorizations`](/influxdb3/cloud-serverless/api/#operation/PostAuthorizations) permission) + (API token with the [`write: authorizations`](/influxdb3/cloud-serverless/api/#post-/api/v2/authorizations) permission) - **Content-type**: `application/json` - **Path parameters**: - **authID**: Authorization ID to delete diff --git a/content/influxdb3/cloud-serverless/admin/tokens/update-tokens.md b/content/influxdb3/cloud-serverless/admin/tokens/update-tokens.md index 25d406a622..f9cd9c33d2 100644 --- a/content/influxdb3/cloud-serverless/admin/tokens/update-tokens.md +++ b/content/influxdb3/cloud-serverless/admin/tokens/update-tokens.md @@ -106,13 +106,13 @@ influx auth inactive \ Use the `/api/v2/authorizations` InfluxDB API endpoint to update the description and status of a token. -{{< api-endpoint method="PATCH" endpoint="https://{{< influxdb/host >}}/api/v2/authorizations/{AUTH_ID}" api-ref="/influxdb/v2/api/#operation/PatchAuthorizationsID" >}} +{{< api-endpoint method="PATCH" endpoint="https://{{< influxdb/host >}}/api/v2/authorizations/{AUTH_ID}" api-ref="/influxdb/v2/api/#patch-/api/v2/authorizations/-authID-" >}} Include the following in your request: - **Headers**: - **Authorization**: `Token API_TOKEN` - (API token with the [`write: authorizations`](/influxdb3/cloud-serverless/api/#operation/PostAuthorizations) permission) + (API token with the [`write: authorizations`](/influxdb3/cloud-serverless/api/#post-/api/v2/authorizations) permission) - **Content-type**: `application/json` - **Path parameters**: - **authID**: Authorization ID to update diff --git a/content/influxdb3/cloud-serverless/admin/tokens/view-tokens.md b/content/influxdb3/cloud-serverless/admin/tokens/view-tokens.md index 65134f5eaa..3cefd1183e 100644 --- a/content/influxdb3/cloud-serverless/admin/tokens/view-tokens.md +++ b/content/influxdb3/cloud-serverless/admin/tokens/view-tokens.md @@ -79,7 +79,7 @@ for information about other available flags. Use the `/api/v2/authorizations` InfluxDB API endpoint to view tokens and permissions. -{{< api-endpoint method="GET" endpoint="https://{{< influxdb/host >}}/api/v2/authorizations" api-ref="/influxdb3/cloud-serverless/api/#operation/GetAuthorizations" >}} +{{< api-endpoint method="GET" endpoint="https://{{< influxdb/host >}}/api/v2/authorizations" api-ref="/influxdb3/cloud-serverless/api/#get-/api/v2/authorizations" >}} - [View a single token](#view-a-single-token) - [Filter the token list](#filter-the-token-list) @@ -88,7 +88,7 @@ Include the following in your request: - **Headers**: - **Authorization**: `Token API_TOKEN` - (API token with the [`read: authorizations`](/influxdb3/cloud-serverless/api/#operation/PostAuthorizations) permission) + (API token with the [`read: authorizations`](/influxdb3/cloud-serverless/api/#post-/api/v2/authorizations) permission) - **Content-type**: `application/json` {{% code-placeholders "API_TOKEN" %}} @@ -101,13 +101,13 @@ Include the following in your request: To view a specific authorization and token, include the authorization ID in the URL path. -{{% api-endpoint method="GET" endpoint="https://{{< influxdb/host >}}/api/v2/authorizations/{authID}" api-ref="/influxdb3/cloud-serverless/api/#operation/GetAuthorizationsID" %}} +{{% api-endpoint method="GET" endpoint="https://{{< influxdb/host >}}/api/v2/authorizations/{authID}" api-ref="/influxdb3/cloud-serverless/api/#get-/api/v2/authorizations/-authID-" %}} Include the following in your request: - **Headers**: - **Authorization**: `Token API_TOKEN` - (API token with the [`read: authorizations`](/influxdb3/cloud-serverless/api/#operation/PostAuthorizations) permission) + (API token with the [`read: authorizations`](/influxdb3/cloud-serverless/api/#post-/api/v2/authorizations) permission) - **Content-type**: `application/json` {{% code-placeholders "(API|AUTHORIZATION)_(TOKEN|ID)" %}} diff --git a/content/influxdb3/cloud-serverless/api/_index.md b/content/influxdb3/cloud-serverless/api/_index.md new file mode 100644 index 0000000000..59acd1917a --- /dev/null +++ b/content/influxdb3/cloud-serverless/api/_index.md @@ -0,0 +1,20 @@ +--- +title: InfluxDB HTTP API +description: >- + Use the InfluxDB HTTP API to write data, query data, and manage resources + in InfluxDB Cloud Serverless. +weight: 104 +type: api +menu: + influxdb3_cloud_serverless: + name: InfluxDB HTTP API + parent: Reference +alt_links: + core: /influxdb3/core/api/ + enterprise: /influxdb3/enterprise/api/ + cloud-serverless: /influxdb3/cloud-serverless/api/ + cloud-dedicated: /influxdb3/cloud-dedicated/api/ + clustered: /influxdb3/clustered/api/ + v2: /influxdb/v2/api/ + cloud: /influxdb/cloud/api/ +--- diff --git a/content/influxdb3/cloud-serverless/api/api-compatibility/_index.md b/content/influxdb3/cloud-serverless/api/api-compatibility/_index.md new file mode 100644 index 0000000000..252a2f46ff --- /dev/null +++ b/content/influxdb3/cloud-serverless/api/api-compatibility/_index.md @@ -0,0 +1,109 @@ +--- +title: API compatibility +description: >- + Use InfluxDB v1 and v2 compatible endpoints to write and query data. + + + ### Write data + + + InfluxDB 3 Cloud Serverless provides the following HTTP API endpoints for + writing data: + + + - `/api/v2/write` endpoint (recommended) for new write workloads or for + bringing existing InfluxDB v2 write workloads to InfluxDB Cloud Serverless. + - `/write` endpoint for bringing existing InfluxDB v1 write workloads to + InfluxDB Cloud Serverless. + + Both endpoints accept line protocol format and process data the same way. + + + ### Query data + + + InfluxDB 3 Cloud Serverless provides the following protocols for executing a + query: + + + - Flight+gRPC request (recommended) that contains an SQL or InfluxQL query. + + - HTTP API `/query` request that contains an InfluxQL query. + Use this protocol for existing InfluxDB v1 query workloads. + + ### InfluxDB v2 compatibility + + + The `/api/v2/write` endpoint works with Token authentication and existing + + InfluxDB 2.x tools and code. + + + ### InfluxDB v1 compatibility + + + The `/write` and `/query` endpoints work with InfluxDB 1.x username/password + + authentication and existing InfluxDB 1.x tools and code. +type: api +layout: single +staticFilePath: >- + /openapi/influxdb-cloud-serverless/ref/tags/influxdb-cloud-serverless-ref-api-compatibility.yaml +weight: 100 +tag: API compatibility +isConceptual: true +menuGroup: Other +tagDescription: >- + Use InfluxDB v1 and v2 compatible endpoints to write and query data. + + + ### Write data + + + InfluxDB 3 Cloud Serverless provides the following HTTP API endpoints for + writing data: + + + - `/api/v2/write` endpoint (recommended) for new write workloads or for + bringing existing InfluxDB v2 write workloads to InfluxDB Cloud Serverless. + - `/write` endpoint for bringing existing InfluxDB v1 write workloads to + InfluxDB Cloud Serverless. + + Both endpoints accept line protocol format and process data the same way. + + + ### Query data + + + InfluxDB 3 Cloud Serverless provides the following protocols for executing a + query: + + + - Flight+gRPC request (recommended) that contains an SQL or InfluxQL query. + + - HTTP API `/query` request that contains an InfluxQL query. + Use this protocol for existing InfluxDB v1 query workloads. + + ### InfluxDB v2 compatibility + + + The `/api/v2/write` endpoint works with Token authentication and existing + + InfluxDB 2.x tools and code. + + + ### InfluxDB v1 compatibility + + + The `/write` and `/query` endpoints work with InfluxDB 1.x username/password + + authentication and existing InfluxDB 1.x tools and code. +alt_links: + core: /influxdb3/core/api/ + enterprise: /influxdb3/enterprise/api/ + cloud-serverless: /influxdb3/cloud-serverless/api/ + cloud-dedicated: /influxdb3/cloud-dedicated/api/ + clustered: /influxdb3/clustered/api/ + v2: /influxdb/v2/api/ + cloud: /influxdb/cloud/api/ +--- diff --git a/content/influxdb3/cloud-serverless/api/authentication/_index.md b/content/influxdb3/cloud-serverless/api/authentication/_index.md new file mode 100644 index 0000000000..bc7aa47aae --- /dev/null +++ b/content/influxdb3/cloud-serverless/api/authentication/_index.md @@ -0,0 +1,36 @@ +--- +title: Authentication +description: |- + Authenticate API requests using tokens, basic auth, or query strings. + + Use one of the following schemes to authenticate to the InfluxDB API: + + - Token authentication + - Basic authentication + - Querystring authentication +type: api +layout: single +staticFilePath: >- + /openapi/influxdb-cloud-serverless/ref/tags/influxdb-cloud-serverless-ref-authentication.yaml +weight: 100 +tag: Authentication +isConceptual: true +menuGroup: Concepts +tagDescription: |- + Authenticate API requests using tokens, basic auth, or query strings. + + Use one of the following schemes to authenticate to the InfluxDB API: + + - Token authentication + - Basic authentication + - Querystring authentication +showSecuritySchemes: true +alt_links: + core: /influxdb3/core/api/ + enterprise: /influxdb3/enterprise/api/ + cloud-serverless: /influxdb3/cloud-serverless/api/ + cloud-dedicated: /influxdb3/cloud-dedicated/api/ + clustered: /influxdb3/clustered/api/ + v2: /influxdb/v2/api/ + cloud: /influxdb/cloud/api/ +--- diff --git a/content/influxdb3/cloud-serverless/api/authorizations-api-tokens/_index.md b/content/influxdb3/cloud-serverless/api/authorizations-api-tokens/_index.md new file mode 100644 index 0000000000..39924aa18b --- /dev/null +++ b/content/influxdb3/cloud-serverless/api/authorizations-api-tokens/_index.md @@ -0,0 +1,107 @@ +--- +title: Authorizations (API tokens) +description: >- + Create and manage authorizations (API tokens). + + + An _authorization_ contains a list of `read` and `write` + + permissions for organization resources and provides an API token for + authentication. + + An authorization belongs to an organization and only contains permissions for + that organization. + + + We recommend the following for managing your tokens: + + + - Create a generic user to create and manage tokens for writing data. + + - Store your tokens in a secure password vault for future access. + + + ### User sessions with authorizations + + + Optionally, when creating an authorization, you can scope it to a specific + user. + + If the user signs in with username and password, creating a _user session_, + + the session carries the permissions granted by all the user's authorizations. + + For more information, see [how to assign a token to a specific + user](/influxdb3/cloud-serverless/security/tokens/create-token/). + + To create a user session, use the `POST /api/v2/signin` endpoint. + + + ### Related endpoints + + + - Signin + + - Signout + + + ### Related guides + + + - [Authorize API requests](/influxdb3/cloud-serverless/api-guide/api_intro/) + + - [Manage API tokens](/influxdb3/cloud-serverless/security/tokens/) + + - [Assign a token to a specific + user](/influxdb3/cloud-serverless/security/tokens/create-token/) +type: api +layout: list +staticFilePath: >- + /openapi/influxdb-cloud-serverless/ref/tags/influxdb-cloud-serverless-ref-authorizations-api-tokens.yaml +weight: 100 +tag: Authorizations (API tokens) +isConceptual: false +menuGroup: Other +operations: + - operationId: GetAuthorizations + method: GET + path: /api/v2/authorizations + summary: List authorizations + tags: + - Authorizations (API tokens) + - operationId: PostAuthorizations + method: POST + path: /api/v2/authorizations + summary: Create an authorization + tags: + - Authorizations (API tokens) + - operationId: GetAuthorizationsID + method: GET + path: /api/v2/authorizations/{authID} + summary: Retrieve an authorization + tags: + - Authorizations (API tokens) + - operationId: PatchAuthorizationsID + method: PATCH + path: /api/v2/authorizations/{authID} + summary: Update an API token to be active or inactive + tags: + - Authorizations (API tokens) + - operationId: DeleteAuthorizationsID + method: DELETE + path: /api/v2/authorizations/{authID} + summary: Delete an authorization + tags: + - Authorizations (API tokens) +related: + - title: InfluxDB 3 API client libraries + href: /influxdb3/cloud-serverless/reference/client-libraries/v3/ +alt_links: + core: /influxdb3/core/api/ + enterprise: /influxdb3/enterprise/api/ + cloud-serverless: /influxdb3/cloud-serverless/api/ + cloud-dedicated: /influxdb3/cloud-dedicated/api/ + clustered: /influxdb3/clustered/api/ + v2: /influxdb/v2/api/ + cloud: /influxdb/cloud/api/ +--- diff --git a/content/influxdb3/cloud-serverless/api/bucket-schemas/_index.md b/content/influxdb3/cloud-serverless/api/bucket-schemas/_index.md new file mode 100644 index 0000000000..a4d79085ba --- /dev/null +++ b/content/influxdb3/cloud-serverless/api/bucket-schemas/_index.md @@ -0,0 +1,48 @@ +--- +title: Bucket Schemas +description: API reference for Bucket Schemas +type: api +layout: list +staticFilePath: >- + /openapi/influxdb-cloud-serverless/ref/tags/influxdb-cloud-serverless-ref-bucket-schemas.yaml +weight: 100 +tag: Bucket Schemas +isConceptual: false +menuGroup: Other +operations: + - operationId: getMeasurementSchemas + method: GET + path: /api/v2/buckets/{bucketID}/schema/measurements + summary: List measurement schemas of a bucket + tags: + - Bucket Schemas + - operationId: createMeasurementSchema + method: POST + path: /api/v2/buckets/{bucketID}/schema/measurements + summary: Create a measurement schema for a bucket + tags: + - Bucket Schemas + - operationId: getMeasurementSchema + method: GET + path: /api/v2/buckets/{bucketID}/schema/measurements/{measurementID} + summary: Retrieve a measurement schema + tags: + - Bucket Schemas + - operationId: updateMeasurementSchema + method: PATCH + path: /api/v2/buckets/{bucketID}/schema/measurements/{measurementID} + summary: Update a measurement schema + tags: + - Bucket Schemas +related: + - title: InfluxDB 3 API client libraries + href: /influxdb3/cloud-serverless/reference/client-libraries/v3/ +alt_links: + core: /influxdb3/core/api/ + enterprise: /influxdb3/enterprise/api/ + cloud-serverless: /influxdb3/cloud-serverless/api/ + cloud-dedicated: /influxdb3/cloud-dedicated/api/ + clustered: /influxdb3/clustered/api/ + v2: /influxdb/v2/api/ + cloud: /influxdb/cloud/api/ +--- diff --git a/content/influxdb3/cloud-serverless/api/buckets/_index.md b/content/influxdb3/cloud-serverless/api/buckets/_index.md new file mode 100644 index 0000000000..1347919f7d --- /dev/null +++ b/content/influxdb3/cloud-serverless/api/buckets/_index.md @@ -0,0 +1,126 @@ +--- +title: Buckets +description: >- + Store your data in InfluxDB + [buckets](/influxdb3/cloud-serverless/reference/glossary/). + + A bucket is a named location where time series data is stored. All buckets + + have a [retention period](/influxdb3/cloud-serverless/reference/glossary/), + + a duration of time that each data point persists. InfluxDB drops all + + points with timestamps older than the bucket’s retention period. + + A bucket belongs to an organization. + + + ### Related guides + + + - [Manage buckets](/influxdb3/cloud-serverless/admin/buckets/) +type: api +layout: list +staticFilePath: >- + /openapi/influxdb-cloud-serverless/ref/tags/influxdb-cloud-serverless-ref-buckets.yaml +weight: 100 +tag: Buckets +isConceptual: false +menuGroup: Other +operations: + - operationId: GetBuckets + method: GET + path: /api/v2/buckets + summary: List buckets + tags: + - Buckets + - operationId: PostBuckets + method: POST + path: /api/v2/buckets + summary: Create a bucket + tags: + - Buckets + - operationId: GetBucketsID + method: GET + path: /api/v2/buckets/{bucketID} + summary: Retrieve a bucket + tags: + - Buckets + - operationId: PatchBucketsID + method: PATCH + path: /api/v2/buckets/{bucketID} + summary: Update a bucket + tags: + - Buckets + - operationId: DeleteBucketsID + method: DELETE + path: /api/v2/buckets/{bucketID} + summary: Delete a bucket + tags: + - Buckets + - operationId: GetBucketsIDLabels + method: GET + path: /api/v2/buckets/{bucketID}/labels + summary: List all labels for a bucket + tags: + - Buckets + - operationId: PostBucketsIDLabels + method: POST + path: /api/v2/buckets/{bucketID}/labels + summary: Add a label to a bucket + tags: + - Buckets + - operationId: DeleteBucketsIDLabelsID + method: DELETE + path: /api/v2/buckets/{bucketID}/labels/{labelID} + summary: Delete a label from a bucket + tags: + - Buckets + - operationId: GetBucketsIDMembers + method: GET + path: /api/v2/buckets/{bucketID}/members + summary: List all users with member privileges for a bucket + tags: + - Buckets + - operationId: PostBucketsIDMembers + method: POST + path: /api/v2/buckets/{bucketID}/members + summary: Add a member to a bucket + tags: + - Buckets + - operationId: DeleteBucketsIDMembersID + method: DELETE + path: /api/v2/buckets/{bucketID}/members/{userID} + summary: Remove a member from a bucket + tags: + - Buckets + - operationId: GetBucketsIDOwners + method: GET + path: /api/v2/buckets/{bucketID}/owners + summary: List all owners of a bucket + tags: + - Buckets + - operationId: PostBucketsIDOwners + method: POST + path: /api/v2/buckets/{bucketID}/owners + summary: Add an owner to a bucket + tags: + - Buckets + - operationId: DeleteBucketsIDOwnersID + method: DELETE + path: /api/v2/buckets/{bucketID}/owners/{userID} + summary: Remove an owner from a bucket + tags: + - Buckets +related: + - title: InfluxDB 3 API client libraries + href: /influxdb3/cloud-serverless/reference/client-libraries/v3/ +alt_links: + core: /influxdb3/core/api/ + enterprise: /influxdb3/enterprise/api/ + cloud-serverless: /influxdb3/cloud-serverless/api/ + cloud-dedicated: /influxdb3/cloud-dedicated/api/ + clustered: /influxdb3/clustered/api/ + v2: /influxdb/v2/api/ + cloud: /influxdb/cloud/api/ +--- diff --git a/content/influxdb3/cloud-serverless/api/common-parameters/_index.md b/content/influxdb3/cloud-serverless/api/common-parameters/_index.md new file mode 100644 index 0000000000..b8cde43795 --- /dev/null +++ b/content/influxdb3/cloud-serverless/api/common-parameters/_index.md @@ -0,0 +1,87 @@ +--- +title: Common parameters +description: >- + Parameters for specifying resources in API requests. + + + To specify resources, some InfluxDB API endpoints require parameters or + + properties in the request--for example, + + writing to a `bucket` resource in an `org` (_organization_ resource). + + + ### Common parameters + + + | Query parameter | Value type | + Description | + + |:------------------------ |:--------------------- + |:-------------------------------------------| + + | `bucket` | string | The bucket name or ID + ([find your bucket](/influxdb3/cloud-serverless/admin/buckets/view-buckets/). + | + + | `bucketID` | string | The bucket ID ([find your + bucket](/influxdb3/cloud-serverless/admin/buckets/view-buckets/). | + + | `org` | string | The organization name or + ID ([find your + organization](/influxdb3/cloud-serverless/admin/organizations/view-orgs/). | + + | `orgID` | 16-byte string | The organization ID + ([find your + organization](/influxdb3/cloud-serverless/admin/organizations/view-orgs/). | +type: api +layout: single +staticFilePath: >- + /openapi/influxdb-cloud-serverless/ref/tags/influxdb-cloud-serverless-ref-common-parameters.yaml +weight: 100 +tag: Common parameters +isConceptual: true +menuGroup: Other +tagDescription: >- + Parameters for specifying resources in API requests. + + + To specify resources, some InfluxDB API endpoints require parameters or + + properties in the request--for example, + + writing to a `bucket` resource in an `org` (_organization_ resource). + + + ### Common parameters + + + | Query parameter | Value type | + Description | + + |:------------------------ |:--------------------- + |:-------------------------------------------| + + | `bucket` | string | The bucket name or ID + ([find your bucket](/influxdb3/cloud-serverless/admin/buckets/view-buckets/). + | + + | `bucketID` | string | The bucket ID ([find your + bucket](/influxdb3/cloud-serverless/admin/buckets/view-buckets/). | + + | `org` | string | The organization name or + ID ([find your + organization](/influxdb3/cloud-serverless/admin/organizations/view-orgs/). | + + | `orgID` | 16-byte string | The organization ID + ([find your + organization](/influxdb3/cloud-serverless/admin/organizations/view-orgs/). | +alt_links: + core: /influxdb3/core/api/ + enterprise: /influxdb3/enterprise/api/ + cloud-serverless: /influxdb3/cloud-serverless/api/ + cloud-dedicated: /influxdb3/cloud-dedicated/api/ + clustered: /influxdb3/clustered/api/ + v2: /influxdb/v2/api/ + cloud: /influxdb/cloud/api/ +--- diff --git a/content/influxdb3/cloud-serverless/api/dbrps/_index.md b/content/influxdb3/cloud-serverless/api/dbrps/_index.md new file mode 100644 index 0000000000..e6f5d34cf6 --- /dev/null +++ b/content/influxdb3/cloud-serverless/api/dbrps/_index.md @@ -0,0 +1,77 @@ +--- +title: DBRPs +description: >- + The InfluxDB 1.x data model includes + [databases](/influxdb3/cloud-serverless/reference/glossary/) + + and [retention policies](/influxdb3/cloud-serverless/reference/glossary/). + + InfluxDB 2.x replaces databases and retention policies with buckets. + + To support InfluxDB 1.x query and write patterns in InfluxDB 2.x, + + databases and retention policies are mapped to buckets using the + + database and retention policy (DBRP) mapping service. + + The DBRP mapping service uses the database and retention policy + + specified in 1.x compatibility API requests to route operations to a bucket. + + + ### Related guides + + + - [Database and retention policy + mapping](/influxdb3/cloud-serverless/reference/api/influxdb-1x/dbrp/) +type: api +layout: list +staticFilePath: >- + /openapi/influxdb-cloud-serverless/ref/tags/influxdb-cloud-serverless-ref-dbrps.yaml +weight: 100 +tag: DBRPs +isConceptual: false +menuGroup: Other +operations: + - operationId: GetDBRPs + method: GET + path: /api/v2/dbrps + summary: List database retention policy mappings + tags: + - DBRPs + - operationId: PostDBRP + method: POST + path: /api/v2/dbrps + summary: Add a database retention policy mapping + tags: + - DBRPs + - operationId: GetDBRPsID + method: GET + path: /api/v2/dbrps/{dbrpID} + summary: Retrieve a database retention policy mapping + tags: + - DBRPs + - operationId: PatchDBRPID + method: PATCH + path: /api/v2/dbrps/{dbrpID} + summary: Update a database retention policy mapping + tags: + - DBRPs + - operationId: DeleteDBRPID + method: DELETE + path: /api/v2/dbrps/{dbrpID} + summary: Delete a database retention policy + tags: + - DBRPs +related: + - title: InfluxDB 3 API client libraries + href: /influxdb3/cloud-serverless/reference/client-libraries/v3/ +alt_links: + core: /influxdb3/core/api/ + enterprise: /influxdb3/enterprise/api/ + cloud-serverless: /influxdb3/cloud-serverless/api/ + cloud-dedicated: /influxdb3/cloud-dedicated/api/ + clustered: /influxdb3/clustered/api/ + v2: /influxdb/v2/api/ + cloud: /influxdb/cloud/api/ +--- diff --git a/content/influxdb3/cloud-serverless/api/delete/_index.md b/content/influxdb3/cloud-serverless/api/delete/_index.md new file mode 100644 index 0000000000..b4bb56fea3 --- /dev/null +++ b/content/influxdb3/cloud-serverless/api/delete/_index.md @@ -0,0 +1,30 @@ +--- +title: Delete +description: Delete data from an InfluxDB bucket. +type: api +layout: list +staticFilePath: >- + /openapi/influxdb-cloud-serverless/ref/tags/influxdb-cloud-serverless-ref-delete.yaml +weight: 100 +tag: Delete +isConceptual: false +menuGroup: Other +operations: + - operationId: PostDelete + method: POST + path: /api/v2/delete + summary: Delete data + tags: + - Delete +related: + - title: InfluxDB 3 API client libraries + href: /influxdb3/cloud-serverless/reference/client-libraries/v3/ +alt_links: + core: /influxdb3/core/api/ + enterprise: /influxdb3/enterprise/api/ + cloud-serverless: /influxdb3/cloud-serverless/api/ + cloud-dedicated: /influxdb3/cloud-dedicated/api/ + clustered: /influxdb3/clustered/api/ + v2: /influxdb/v2/api/ + cloud: /influxdb/cloud/api/ +--- diff --git a/content/influxdb3/cloud-serverless/api/headers/_index.md b/content/influxdb3/cloud-serverless/api/headers/_index.md new file mode 100644 index 0000000000..9bcf9b72c4 --- /dev/null +++ b/content/influxdb3/cloud-serverless/api/headers/_index.md @@ -0,0 +1,77 @@ +--- +title: Headers +description: >- + InfluxDB HTTP API endpoints use standard HTTP request and response headers. + + The following table shows common headers used by many InfluxDB API endpoints. + + Some endpoints may use other headers that perform functions more specific to + those endpoints--for example, + + the `POST /api/v2/write` endpoint accepts the `Content-Encoding` header to + indicate the compression applied to line protocol in the request body. + + + | Header | Value type | + Description | + + |:------------------------ |:--------------------- + |:-------------------------------------------| + + | `Accept` | string | The content type that the + client can understand. | + + | `Authorization` | string | The [authorization scheme + and credential](/influxdb3/cloud-serverless/api/authentication/). | + + | `Content-Length` | integer | The size of the + entity-body, in bytes. | + + | `Content-Type` | string | The format of the data in + the request body. | +type: api +layout: single +staticFilePath: >- + /openapi/influxdb-cloud-serverless/ref/tags/influxdb-cloud-serverless-ref-headers.yaml +weight: 100 +tag: Headers +isConceptual: true +menuGroup: Other +tagDescription: >- + InfluxDB HTTP API endpoints use standard HTTP request and response headers. + + The following table shows common headers used by many InfluxDB API endpoints. + + Some endpoints may use other headers that perform functions more specific to + those endpoints--for example, + + the `POST /api/v2/write` endpoint accepts the `Content-Encoding` header to + indicate the compression applied to line protocol in the request body. + + + | Header | Value type | + Description | + + |:------------------------ |:--------------------- + |:-------------------------------------------| + + | `Accept` | string | The content type that the + client can understand. | + + | `Authorization` | string | The [authorization scheme + and credential](/influxdb3/cloud-serverless/api/authentication/). | + + | `Content-Length` | integer | The size of the + entity-body, in bytes. | + + | `Content-Type` | string | The format of the data in + the request body. | +alt_links: + core: /influxdb3/core/api/ + enterprise: /influxdb3/enterprise/api/ + cloud-serverless: /influxdb3/cloud-serverless/api/ + cloud-dedicated: /influxdb3/cloud-dedicated/api/ + clustered: /influxdb3/clustered/api/ + v2: /influxdb/v2/api/ + cloud: /influxdb/cloud/api/ +--- diff --git a/content/influxdb3/cloud-serverless/api/invokable-scripts/_index.md b/content/influxdb3/cloud-serverless/api/invokable-scripts/_index.md new file mode 100644 index 0000000000..f89afbda1f --- /dev/null +++ b/content/influxdb3/cloud-serverless/api/invokable-scripts/_index.md @@ -0,0 +1,83 @@ +--- +title: Invokable Scripts +description: |- + Store, manage, and execute scripts in InfluxDB. + A script stores your custom Flux script and provides an invokable + endpoint that accepts runtime parameters. + In a script, you can specify custom runtime parameters + (`params`)--for example, `params.myparameter`. + Once you create a script, InfluxDB generates an + `/api/v2/scripts/SCRIPT_ID/invoke` endpoint + for your organization. + You can run the script from API requests and tasks, defining parameter + values for each run. + When the script runs, InfluxDB replaces `params` references in the + script with the runtime parameter values you define. + + Use the `/api/v2/scripts` endpoints to create and manage scripts. + See related guides to learn how to define parameters and execute scripts. + + +type: api +layout: list +staticFilePath: >- + /openapi/influxdb-cloud-serverless/ref/tags/influxdb-cloud-serverless-ref-invokable-scripts.yaml +weight: 100 +tag: Invokable Scripts +isConceptual: false +menuGroup: Other +operations: + - operationId: GetScripts + method: GET + path: /api/v2/scripts + summary: List scripts + tags: + - Invokable Scripts + - operationId: PostScripts + method: POST + path: /api/v2/scripts + summary: Create a script + tags: + - Invokable Scripts + - operationId: GetScriptsID + method: GET + path: /api/v2/scripts/{scriptID} + summary: Retrieve a script + tags: + - Invokable Scripts + - operationId: PatchScriptsID + method: PATCH + path: /api/v2/scripts/{scriptID} + summary: Update a script + tags: + - Invokable Scripts + - operationId: DeleteScriptsID + method: DELETE + path: /api/v2/scripts/{scriptID} + summary: Delete a script + tags: + - Invokable Scripts + - operationId: PostScriptsIDInvoke + method: POST + path: /api/v2/scripts/{scriptID}/invoke + summary: Invoke a script + tags: + - Invokable Scripts + - operationId: GetScriptsIDParams + method: GET + path: /api/v2/scripts/{scriptID}/params + summary: Find script parameters. + tags: + - Invokable Scripts +related: + - title: InfluxDB 3 API client libraries + href: /influxdb3/cloud-serverless/reference/client-libraries/v3/ +alt_links: + core: /influxdb3/core/api/ + enterprise: /influxdb3/enterprise/api/ + cloud-serverless: /influxdb3/cloud-serverless/api/ + cloud-dedicated: /influxdb3/cloud-dedicated/api/ + clustered: /influxdb3/clustered/api/ + v2: /influxdb/v2/api/ + cloud: /influxdb/cloud/api/ +--- diff --git a/content/influxdb3/cloud-serverless/api/limits/_index.md b/content/influxdb3/cloud-serverless/api/limits/_index.md new file mode 100644 index 0000000000..de7201bcaf --- /dev/null +++ b/content/influxdb3/cloud-serverless/api/limits/_index.md @@ -0,0 +1,30 @@ +--- +title: Limits +description: API reference for Limits +type: api +layout: list +staticFilePath: >- + /openapi/influxdb-cloud-serverless/ref/tags/influxdb-cloud-serverless-ref-limits.yaml +weight: 100 +tag: Limits +isConceptual: false +menuGroup: Other +operations: + - operationId: GetOrgLimitsID + method: GET + path: /api/v2/orgs/{orgID}/limits + summary: Retrieve limits for an organization + tags: + - Limits +related: + - title: InfluxDB 3 API client libraries + href: /influxdb3/cloud-serverless/reference/client-libraries/v3/ +alt_links: + core: /influxdb3/core/api/ + enterprise: /influxdb3/enterprise/api/ + cloud-serverless: /influxdb3/cloud-serverless/api/ + cloud-dedicated: /influxdb3/cloud-dedicated/api/ + clustered: /influxdb3/clustered/api/ + v2: /influxdb/v2/api/ + cloud: /influxdb/cloud/api/ +--- diff --git a/content/influxdb3/cloud-serverless/api/organizations/_index.md b/content/influxdb3/cloud-serverless/api/organizations/_index.md new file mode 100644 index 0000000000..169794bc2f --- /dev/null +++ b/content/influxdb3/cloud-serverless/api/organizations/_index.md @@ -0,0 +1,96 @@ +--- +title: Organizations +description: |- + Manage your [organization](/influxdb3/cloud-serverless/reference/glossary/). + An organization is a workspace for a group of users. Organizations can be + used to separate different environments, projects, teams or users within + InfluxDB. + + Use the `/api/v2/orgs` endpoints to view and manage organizations. +type: api +layout: list +staticFilePath: >- + /openapi/influxdb-cloud-serverless/ref/tags/influxdb-cloud-serverless-ref-organizations.yaml +weight: 100 +tag: Organizations +isConceptual: false +menuGroup: Other +operations: + - operationId: GetOrgs + method: GET + path: /api/v2/orgs + summary: List organizations + tags: + - Organizations + - operationId: PostOrgs + method: POST + path: /api/v2/orgs + summary: Create an organization + tags: + - Organizations + - operationId: GetOrgsID + method: GET + path: /api/v2/orgs/{orgID} + summary: Retrieve an organization + tags: + - Organizations + - operationId: PatchOrgsID + method: PATCH + path: /api/v2/orgs/{orgID} + summary: Update an organization + tags: + - Organizations + - operationId: DeleteOrgsID + method: DELETE + path: /api/v2/orgs/{orgID} + summary: Delete an organization + tags: + - Organizations + - operationId: GetOrgsIDMembers + method: GET + path: /api/v2/orgs/{orgID}/members + summary: List all members of an organization + tags: + - Organizations + - operationId: PostOrgsIDMembers + method: POST + path: /api/v2/orgs/{orgID}/members + summary: Add a member to an organization + tags: + - Organizations + - operationId: DeleteOrgsIDMembersID + method: DELETE + path: /api/v2/orgs/{orgID}/members/{userID} + summary: Remove a member from an organization + tags: + - Organizations + - operationId: GetOrgsIDOwners + method: GET + path: /api/v2/orgs/{orgID}/owners + summary: List all owners of an organization + tags: + - Organizations + - operationId: PostOrgsIDOwners + method: POST + path: /api/v2/orgs/{orgID}/owners + summary: Add an owner to an organization + tags: + - Organizations + - operationId: DeleteOrgsIDOwnersID + method: DELETE + path: /api/v2/orgs/{orgID}/owners/{userID} + summary: Remove an owner from an organization + tags: + - Organizations +related: + - title: InfluxDB 3 API client libraries + href: /influxdb3/cloud-serverless/reference/client-libraries/v3/ +alt_links: + core: /influxdb3/core/api/ + enterprise: /influxdb3/enterprise/api/ + cloud-serverless: /influxdb3/cloud-serverless/api/ + cloud-dedicated: /influxdb3/cloud-dedicated/api/ + clustered: /influxdb3/clustered/api/ + v2: /influxdb/v2/api/ + cloud: /influxdb/cloud/api/ +--- diff --git a/content/influxdb3/cloud-serverless/api/pagination/_index.md b/content/influxdb3/cloud-serverless/api/pagination/_index.md new file mode 100644 index 0000000000..643232bb9f --- /dev/null +++ b/content/influxdb3/cloud-serverless/api/pagination/_index.md @@ -0,0 +1,91 @@ +--- +title: Pagination +description: >- + Some InfluxDB API list operations may support the following query parameters + for paginating results: + + | Query parameter | Value type | Description | + |:------------------------ |:--------------------- |:-------------------------------------------| + | `limit` | integer | The maximum number of records to return (after other parameters are applied). | + | `offset` | integer | The number of records to skip (before `limit`, after other parameters are applied). | + | `after` | string (resource ID) | Only returns resources created after the specified resource. | + + ### Limitations + + - For specific endpoint parameters and examples, see the endpoint definition. + - If you specify an `offset` parameter value greater than the total number of records, + then InfluxDB returns an empty list in the response + (given `offset` skips the specified number of records). + + The following example passes `offset=50` to skip the first 50 results, + but the user only has 10 buckets: + + ```sh + curl --request GET "INFLUX_URL/api/v2/buckets?limit=1&offset=50" \ + --header "Authorization: Token INFLUX_API_TOKEN" + ``` + + The response contains the following: + + ```json + { + "links": { + "prev": "/api/v2/buckets?descending=false\u0026limit=1\u0026offset=49\u0026orgID=ORG_ID", + "self": "/api/v2/buckets?descending=false\u0026limit=1\u0026offset=50\u0026orgID=ORG_ID" + }, + "buckets": [] + } + ``` +type: api +layout: single +staticFilePath: >- + /openapi/influxdb-cloud-serverless/ref/tags/influxdb-cloud-serverless-ref-pagination.yaml +weight: 100 +tag: Pagination +isConceptual: true +menuGroup: Other +tagDescription: >- + Some InfluxDB API list operations may support the following query parameters + for paginating results: + + | Query parameter | Value type | Description | + |:------------------------ |:--------------------- |:-------------------------------------------| + | `limit` | integer | The maximum number of records to return (after other parameters are applied). | + | `offset` | integer | The number of records to skip (before `limit`, after other parameters are applied). | + | `after` | string (resource ID) | Only returns resources created after the specified resource. | + + ### Limitations + + - For specific endpoint parameters and examples, see the endpoint definition. + - If you specify an `offset` parameter value greater than the total number of records, + then InfluxDB returns an empty list in the response + (given `offset` skips the specified number of records). + + The following example passes `offset=50` to skip the first 50 results, + but the user only has 10 buckets: + + ```sh + curl --request GET "INFLUX_URL/api/v2/buckets?limit=1&offset=50" \ + --header "Authorization: Token INFLUX_API_TOKEN" + ``` + + The response contains the following: + + ```json + { + "links": { + "prev": "/api/v2/buckets?descending=false\u0026limit=1\u0026offset=49\u0026orgID=ORG_ID", + "self": "/api/v2/buckets?descending=false\u0026limit=1\u0026offset=50\u0026orgID=ORG_ID" + }, + "buckets": [] + } + ``` +alt_links: + core: /influxdb3/core/api/ + enterprise: /influxdb3/enterprise/api/ + cloud-serverless: /influxdb3/cloud-serverless/api/ + cloud-dedicated: /influxdb3/cloud-dedicated/api/ + clustered: /influxdb3/clustered/api/ + v2: /influxdb/v2/api/ + cloud: /influxdb/cloud/api/ +--- diff --git a/content/influxdb3/cloud-serverless/api/quick-start/_index.md b/content/influxdb3/cloud-serverless/api/quick-start/_index.md new file mode 100644 index 0000000000..56dc231f77 --- /dev/null +++ b/content/influxdb3/cloud-serverless/api/quick-start/_index.md @@ -0,0 +1,81 @@ +--- +title: Quick start +description: |- + Authenticate, write, and query with the API: + + 1. Create a database token to authorize API requests in the InfluxDB Cloud + Serverless UI. + + 2. Write data to InfluxDB Cloud Serverless. + + ```bash + curl -X POST "https://cloud2.influxdata.com/api/v2/write?bucket=DATABASE_NAME&precision=ns" \ + --header "Authorization: Token DATABASE_TOKEN" \ + --header "Content-Type: text/plain; charset=utf-8" \ + --data-raw "home,room=Kitchen temp=72.0 + home,room=Living\ room temp=71.5" + ``` + + If all data is written, the response is `204 No Content`. + + 3. Query data from InfluxDB Cloud Serverless using SQL or InfluxQL. + For best performance, use a Flight client to query data. + The HTTP API `/query` endpoint supports InfluxQL queries. + + ```bash + curl -G "https://cloud2.influxdata.com/query" \ + --header "Authorization: Token DATABASE_TOKEN" \ + --data-urlencode "db=DATABASE_NAME" \ + --data-urlencode "q=SELECT * FROM home WHERE time > now() - 1h" + ``` + + For more information about using InfluxDB Cloud Serverless, see the + [Get started](/influxdb3/cloud-serverless/get-started/) guide. +type: api +layout: single +staticFilePath: >- + /openapi/influxdb-cloud-serverless/ref/tags/influxdb-cloud-serverless-ref-quick-start.yaml +weight: 1 +tag: Quick start +isConceptual: true +menuGroup: Concepts +tagDescription: |- + Authenticate, write, and query with the API: + + 1. Create a database token to authorize API requests in the InfluxDB Cloud + Serverless UI. + + 2. Write data to InfluxDB Cloud Serverless. + + ```bash + curl -X POST "https://cloud2.influxdata.com/api/v2/write?bucket=DATABASE_NAME&precision=ns" \ + --header "Authorization: Token DATABASE_TOKEN" \ + --header "Content-Type: text/plain; charset=utf-8" \ + --data-raw "home,room=Kitchen temp=72.0 + home,room=Living\ room temp=71.5" + ``` + + If all data is written, the response is `204 No Content`. + + 3. Query data from InfluxDB Cloud Serverless using SQL or InfluxQL. + For best performance, use a Flight client to query data. + The HTTP API `/query` endpoint supports InfluxQL queries. + + ```bash + curl -G "https://cloud2.influxdata.com/query" \ + --header "Authorization: Token DATABASE_TOKEN" \ + --data-urlencode "db=DATABASE_NAME" \ + --data-urlencode "q=SELECT * FROM home WHERE time > now() - 1h" + ``` + + For more information about using InfluxDB Cloud Serverless, see the + [Get started](/influxdb3/cloud-serverless/get-started/) guide. +alt_links: + core: /influxdb3/core/api/ + enterprise: /influxdb3/enterprise/api/ + cloud-serverless: /influxdb3/cloud-serverless/api/ + cloud-dedicated: /influxdb3/cloud-dedicated/api/ + clustered: /influxdb3/clustered/api/ + v2: /influxdb/v2/api/ + cloud: /influxdb/cloud/api/ +--- diff --git a/content/influxdb3/cloud-serverless/api/resources/_index.md b/content/influxdb3/cloud-serverless/api/resources/_index.md new file mode 100644 index 0000000000..f72f88ec04 --- /dev/null +++ b/content/influxdb3/cloud-serverless/api/resources/_index.md @@ -0,0 +1,30 @@ +--- +title: Resources +description: API reference for Resources +type: api +layout: list +staticFilePath: >- + /openapi/influxdb-cloud-serverless/ref/tags/influxdb-cloud-serverless-ref-resources.yaml +weight: 100 +tag: Resources +isConceptual: false +menuGroup: Other +operations: + - operationId: GetResources + method: GET + path: /api/v2/resources + summary: List all known resources + tags: + - Resources +related: + - title: InfluxDB 3 API client libraries + href: /influxdb3/cloud-serverless/reference/client-libraries/v3/ +alt_links: + core: /influxdb3/core/api/ + enterprise: /influxdb3/enterprise/api/ + cloud-serverless: /influxdb3/cloud-serverless/api/ + cloud-dedicated: /influxdb3/cloud-dedicated/api/ + clustered: /influxdb3/clustered/api/ + v2: /influxdb/v2/api/ + cloud: /influxdb/cloud/api/ +--- diff --git a/content/influxdb3/cloud-serverless/api/response-codes/_index.md b/content/influxdb3/cloud-serverless/api/response-codes/_index.md new file mode 100644 index 0000000000..f528033bed --- /dev/null +++ b/content/influxdb3/cloud-serverless/api/response-codes/_index.md @@ -0,0 +1,147 @@ +--- +title: Response codes +description: >- + InfluxDB HTTP API endpoints use standard HTTP status codes for success and + failure responses. + + The response body may include additional details. + + For details about a specific operation's response, + + see **Responses** and **Response Samples** for that operation. + + + API operations may return the following HTTP status codes: + + + |  Code  | Status | Description | + + |:-----------:|:------------------------ |:--------------------- | + + | `200` | Success | | + + | `201` | Created | Successfully created a resource. + The response body may contain details, for example `/write` and + `/api/v2/write` response bodies contain details of partial write failures. | + + | `204` | No content | The request succeeded. | + + | `400` | Bad request | InfluxDB can't parse the request + due to an incorrect parameter or bad syntax. For _writes_, the error may + indicate one of the following problems:
  • Line protocol is malformed. + The response body contains the first malformed line in the data and indicates + what was expected.
  • The batch contains a point with the same series as + other points, but one of the field values has a different data + type.
  • `Authorization` header is missing or malformed or the API token + doesn't have permission for the operation.
| + + | `401` | Unauthorized | May indicate one of the following: +
  • `Authorization: Token` header is missing or malformed
  • API + token value is missing from the header
  • API token doesn't have + permission. For more information about token types and permissions, see + [Manage API tokens](/influxdb3/cloud-serverless/security/tokens/)
| + + | `404` | Not found | Requested resource was not found. + `message` in the response body provides details about the requested resource. + | + + | `405` | Method not allowed | The API path doesn't support the + HTTP method used in the request--for example, you send a `POST` request to an + endpoint that only allows `GET`. | + + | `413` | Request entity too large | Request payload exceeds the size + limit. | + + | `422` | Unprocessable entity | Request data is invalid. `code` and + `message` in the response body provide details about the problem. | + + | `429` | Too many requests | API token is temporarily over the + request quota. The `Retry-After` header describes when to try the request + again. | + + | `500` | Internal server error | | + + | `503` | Service unavailable | Server is temporarily unavailable + to process the request. The `Retry-After` header describes when to try the + request again. | +type: api +layout: single +staticFilePath: >- + /openapi/influxdb-cloud-serverless/ref/tags/influxdb-cloud-serverless-ref-response-codes.yaml +weight: 100 +tag: Response codes +isConceptual: true +menuGroup: Concepts +tagDescription: >- + InfluxDB HTTP API endpoints use standard HTTP status codes for success and + failure responses. + + The response body may include additional details. + + For details about a specific operation's response, + + see **Responses** and **Response Samples** for that operation. + + + API operations may return the following HTTP status codes: + + + |  Code  | Status | Description | + + |:-----------:|:------------------------ |:--------------------- | + + | `200` | Success | | + + | `201` | Created | Successfully created a resource. + The response body may contain details, for example `/write` and + `/api/v2/write` response bodies contain details of partial write failures. | + + | `204` | No content | The request succeeded. | + + | `400` | Bad request | InfluxDB can't parse the request + due to an incorrect parameter or bad syntax. For _writes_, the error may + indicate one of the following problems:
  • Line protocol is malformed. + The response body contains the first malformed line in the data and indicates + what was expected.
  • The batch contains a point with the same series as + other points, but one of the field values has a different data + type.
  • `Authorization` header is missing or malformed or the API token + doesn't have permission for the operation.
| + + | `401` | Unauthorized | May indicate one of the following: +
  • `Authorization: Token` header is missing or malformed
  • API + token value is missing from the header
  • API token doesn't have + permission. For more information about token types and permissions, see + [Manage API tokens](/influxdb3/cloud-serverless/security/tokens/)
| + + | `404` | Not found | Requested resource was not found. + `message` in the response body provides details about the requested resource. + | + + | `405` | Method not allowed | The API path doesn't support the + HTTP method used in the request--for example, you send a `POST` request to an + endpoint that only allows `GET`. | + + | `413` | Request entity too large | Request payload exceeds the size + limit. | + + | `422` | Unprocessable entity | Request data is invalid. `code` and + `message` in the response body provide details about the problem. | + + | `429` | Too many requests | API token is temporarily over the + request quota. The `Retry-After` header describes when to try the request + again. | + + | `500` | Internal server error | | + + | `503` | Service unavailable | Server is temporarily unavailable + to process the request. The `Retry-After` header describes when to try the + request again. | +alt_links: + core: /influxdb3/core/api/ + enterprise: /influxdb3/enterprise/api/ + cloud-serverless: /influxdb3/cloud-serverless/api/ + cloud-dedicated: /influxdb3/cloud-dedicated/api/ + clustered: /influxdb3/clustered/api/ + v2: /influxdb/v2/api/ + cloud: /influxdb/cloud/api/ +--- diff --git a/content/influxdb3/cloud-serverless/api/routes/_index.md b/content/influxdb3/cloud-serverless/api/routes/_index.md new file mode 100644 index 0000000000..4c2b8ea472 --- /dev/null +++ b/content/influxdb3/cloud-serverless/api/routes/_index.md @@ -0,0 +1,30 @@ +--- +title: Routes +description: API reference for Routes +type: api +layout: list +staticFilePath: >- + /openapi/influxdb-cloud-serverless/ref/tags/influxdb-cloud-serverless-ref-routes.yaml +weight: 100 +tag: Routes +isConceptual: false +menuGroup: Other +operations: + - operationId: GetRoutes + method: GET + path: /api/v2 + summary: List all top level routes + tags: + - Routes +related: + - title: InfluxDB 3 API client libraries + href: /influxdb3/cloud-serverless/reference/client-libraries/v3/ +alt_links: + core: /influxdb3/core/api/ + enterprise: /influxdb3/enterprise/api/ + cloud-serverless: /influxdb3/cloud-serverless/api/ + cloud-dedicated: /influxdb3/cloud-dedicated/api/ + clustered: /influxdb3/clustered/api/ + v2: /influxdb/v2/api/ + cloud: /influxdb/cloud/api/ +--- diff --git a/content/influxdb3/cloud-serverless/api/secrets/_index.md b/content/influxdb3/cloud-serverless/api/secrets/_index.md new file mode 100644 index 0000000000..0025e6b2cf --- /dev/null +++ b/content/influxdb3/cloud-serverless/api/secrets/_index.md @@ -0,0 +1,48 @@ +--- +title: Secrets +description: API reference for Secrets +type: api +layout: list +staticFilePath: >- + /openapi/influxdb-cloud-serverless/ref/tags/influxdb-cloud-serverless-ref-secrets.yaml +weight: 100 +tag: Secrets +isConceptual: false +menuGroup: Other +operations: + - operationId: GetOrgsIDSecrets + method: GET + path: /api/v2/orgs/{orgID}/secrets + summary: List all secret keys for an organization + tags: + - Secrets + - operationId: PatchOrgsIDSecrets + method: PATCH + path: /api/v2/orgs/{orgID}/secrets + summary: Update secrets in an organization + tags: + - Secrets + - operationId: DeleteOrgsIDSecretsID + method: DELETE + path: /api/v2/orgs/{orgID}/secrets/{secretID} + summary: Delete a secret from an organization + tags: + - Secrets + - operationId: PostOrgsIDSecrets + method: POST + path: /api/v2/orgs/{orgID}/secrets/delete + summary: Delete secrets from an organization + tags: + - Secrets +related: + - title: InfluxDB 3 API client libraries + href: /influxdb3/cloud-serverless/reference/client-libraries/v3/ +alt_links: + core: /influxdb3/core/api/ + enterprise: /influxdb3/enterprise/api/ + cloud-serverless: /influxdb3/cloud-serverless/api/ + cloud-dedicated: /influxdb3/cloud-dedicated/api/ + clustered: /influxdb3/clustered/api/ + v2: /influxdb/v2/api/ + cloud: /influxdb/cloud/api/ +--- diff --git a/content/influxdb3/cloud-serverless/api/security-and-access-endpoints/_index.md b/content/influxdb3/cloud-serverless/api/security-and-access-endpoints/_index.md new file mode 100644 index 0000000000..ffebd08947 --- /dev/null +++ b/content/influxdb3/cloud-serverless/api/security-and-access-endpoints/_index.md @@ -0,0 +1,54 @@ +--- +title: Security and access endpoints +description: API reference for Security and access endpoints +type: api +layout: list +staticFilePath: >- + /openapi/influxdb-cloud-serverless/ref/tags/influxdb-cloud-serverless-ref-security-and-access-endpoints.yaml +weight: 100 +tag: Security and access endpoints +isConceptual: false +menuGroup: Other +operations: + - operationId: GetAuthorizations + method: GET + path: /api/v2/authorizations + summary: List authorizations + tags: + - Security and access endpoints + - operationId: PostAuthorizations + method: POST + path: /api/v2/authorizations + summary: Create an authorization + tags: + - Security and access endpoints + - operationId: GetAuthorizationsID + method: GET + path: /api/v2/authorizations/{authID} + summary: Retrieve an authorization + tags: + - Security and access endpoints + - operationId: PatchAuthorizationsID + method: PATCH + path: /api/v2/authorizations/{authID} + summary: Update an API token to be active or inactive + tags: + - Security and access endpoints + - operationId: DeleteAuthorizationsID + method: DELETE + path: /api/v2/authorizations/{authID} + summary: Delete an authorization + tags: + - Security and access endpoints +related: + - title: InfluxDB 3 API client libraries + href: /influxdb3/cloud-serverless/reference/client-libraries/v3/ +alt_links: + core: /influxdb3/core/api/ + enterprise: /influxdb3/enterprise/api/ + cloud-serverless: /influxdb3/cloud-serverless/api/ + cloud-dedicated: /influxdb3/cloud-dedicated/api/ + clustered: /influxdb3/clustered/api/ + v2: /influxdb/v2/api/ + cloud: /influxdb/cloud/api/ +--- diff --git a/content/influxdb3/cloud-serverless/api/supported-operations/_index.md b/content/influxdb3/cloud-serverless/api/supported-operations/_index.md new file mode 100644 index 0000000000..8b17542150 --- /dev/null +++ b/content/influxdb3/cloud-serverless/api/supported-operations/_index.md @@ -0,0 +1,21 @@ +--- +title: Supported operations +description: "The following table shows the most common operations that the InfluxDB `/api/v2` API supports.\nSome resources may support other operations that perform functions more specific to those resources.\nFor example, you can use the `PATCH /api/v2/scripts` endpoint to update properties of a script\nresource.\n\n| Operation | |\n|:----------|:-----------------------------------------------------------------------|\n| Write | Writes (`POST`) data to a bucket. |\n| Run | Executes (`POST`) a query or script and returns the result. |\n| List |\tRetrieves (`GET`) a list of zero or more resources. |\n| Create |\tCreates (`POST`) a new resource and returns the resource. |\n| Update |\tModifies (`PUT`) an existing resource to reflect data in your request. |\n| Delete |\tRemoves (`DELETE`) a specific resource. |" +type: api +layout: single +staticFilePath: >- + /openapi/influxdb-cloud-serverless/ref/tags/influxdb-cloud-serverless-ref-supported-operations.yaml +weight: 100 +tag: Supported operations +isConceptual: true +menuGroup: Other +tagDescription: "The following table shows the most common operations that the InfluxDB `/api/v2` API supports.\nSome resources may support other operations that perform functions more specific to those resources.\nFor example, you can use the `PATCH /api/v2/scripts` endpoint to update properties of a script\nresource.\n\n| Operation | |\n|:----------|:-----------------------------------------------------------------------|\n| Write | Writes (`POST`) data to a bucket. |\n| Run | Executes (`POST`) a query or script and returns the result. |\n| List |\tRetrieves (`GET`) a list of zero or more resources. |\n| Create |\tCreates (`POST`) a new resource and returns the resource. |\n| Update |\tModifies (`PUT`) an existing resource to reflect data in your request. |\n| Delete |\tRemoves (`DELETE`) a specific resource. |" +alt_links: + core: /influxdb3/core/api/ + enterprise: /influxdb3/enterprise/api/ + cloud-serverless: /influxdb3/cloud-serverless/api/ + cloud-dedicated: /influxdb3/cloud-dedicated/api/ + clustered: /influxdb3/clustered/api/ + v2: /influxdb/v2/api/ + cloud: /influxdb/cloud/api/ +--- diff --git a/content/influxdb3/cloud-serverless/api/system-information-endpoints/_index.md b/content/influxdb3/cloud-serverless/api/system-information-endpoints/_index.md new file mode 100644 index 0000000000..c15b666cd5 --- /dev/null +++ b/content/influxdb3/cloud-serverless/api/system-information-endpoints/_index.md @@ -0,0 +1,36 @@ +--- +title: System information endpoints +description: API reference for System information endpoints +type: api +layout: list +staticFilePath: >- + /openapi/influxdb-cloud-serverless/ref/tags/influxdb-cloud-serverless-ref-system-information-endpoints.yaml +weight: 100 +tag: System information endpoints +isConceptual: false +menuGroup: Other +operations: + - operationId: GetRoutes + method: GET + path: /api/v2 + summary: List all top level routes + tags: + - System information endpoints + - operationId: GetResources + method: GET + path: /api/v2/resources + summary: List all known resources + tags: + - System information endpoints +related: + - title: InfluxDB 3 API client libraries + href: /influxdb3/cloud-serverless/reference/client-libraries/v3/ +alt_links: + core: /influxdb3/core/api/ + enterprise: /influxdb3/enterprise/api/ + cloud-serverless: /influxdb3/cloud-serverless/api/ + cloud-dedicated: /influxdb3/cloud-dedicated/api/ + clustered: /influxdb3/clustered/api/ + v2: /influxdb/v2/api/ + cloud: /influxdb/cloud/api/ +--- diff --git a/content/influxdb3/cloud-serverless/api/tasks/_index.md b/content/influxdb3/cloud-serverless/api/tasks/_index.md new file mode 100644 index 0000000000..028d2752eb --- /dev/null +++ b/content/influxdb3/cloud-serverless/api/tasks/_index.md @@ -0,0 +1,179 @@ +--- +title: Tasks +description: >- + Process and analyze your data with tasks + + in the InfluxDB task engine. + + Use the `/api/v2/tasks` endpoints to schedule and manage tasks, retry task + runs, and retrieve run logs. + + + To configure a task, provide the script and the schedule to run the task. + + For examples, see how to create a task with the `POST /api/v2/tasks` endpoint. + + + + + + ### Properties + + + A `task` object contains information about an InfluxDB task resource. + + + The following table defines the properties that appear in this object: + + + + + + +type: api +layout: list +staticFilePath: >- + /openapi/influxdb-cloud-serverless/ref/tags/influxdb-cloud-serverless-ref-tasks.yaml +weight: 100 +tag: Tasks +isConceptual: false +menuGroup: Other +operations: + - operationId: GetTasks + method: GET + path: /api/v2/tasks + summary: List all tasks + tags: + - Tasks + - operationId: PostTasks + method: POST + path: /api/v2/tasks + summary: Create a task + tags: + - Tasks + - operationId: GetTasksID + method: GET + path: /api/v2/tasks/{taskID} + summary: Retrieve a task + tags: + - Tasks + - operationId: PatchTasksID + method: PATCH + path: /api/v2/tasks/{taskID} + summary: Update a task + tags: + - Tasks + - operationId: DeleteTasksID + method: DELETE + path: /api/v2/tasks/{taskID} + summary: Delete a task + tags: + - Tasks + - operationId: GetTasksIDLabels + method: GET + path: /api/v2/tasks/{taskID}/labels + summary: List labels for a task + tags: + - Tasks + - operationId: PostTasksIDLabels + method: POST + path: /api/v2/tasks/{taskID}/labels + summary: Add a label to a task + tags: + - Tasks + - operationId: DeleteTasksIDLabelsID + method: DELETE + path: /api/v2/tasks/{taskID}/labels/{labelID} + summary: Delete a label from a task + tags: + - Tasks + - operationId: GetTasksIDLogs + method: GET + path: /api/v2/tasks/{taskID}/logs + summary: Retrieve all logs for a task + tags: + - Tasks + - operationId: GetTasksIDMembers + method: GET + path: /api/v2/tasks/{taskID}/members + summary: List all task members + tags: + - Tasks + - operationId: PostTasksIDMembers + method: POST + path: /api/v2/tasks/{taskID}/members + summary: Add a member to a task + tags: + - Tasks + - operationId: DeleteTasksIDMembersID + method: DELETE + path: /api/v2/tasks/{taskID}/members/{userID} + summary: Remove a member from a task + tags: + - Tasks + - operationId: GetTasksIDOwners + method: GET + path: /api/v2/tasks/{taskID}/owners + summary: List all owners of a task + tags: + - Tasks + - operationId: PostTasksIDOwners + method: POST + path: /api/v2/tasks/{taskID}/owners + summary: Add an owner for a task + tags: + - Tasks + - operationId: DeleteTasksIDOwnersID + method: DELETE + path: /api/v2/tasks/{taskID}/owners/{userID} + summary: Remove an owner from a task + tags: + - Tasks + - operationId: GetTasksIDRuns + method: GET + path: /api/v2/tasks/{taskID}/runs + summary: List runs for a task + tags: + - Tasks + - operationId: PostTasksIDRuns + method: POST + path: /api/v2/tasks/{taskID}/runs + summary: Start a task run, overriding the schedule + tags: + - Tasks + - operationId: GetTasksIDRunsID + method: GET + path: /api/v2/tasks/{taskID}/runs/{runID} + summary: Retrieve a run for a task. + tags: + - Tasks + - operationId: DeleteTasksIDRunsID + method: DELETE + path: /api/v2/tasks/{taskID}/runs/{runID} + summary: Cancel a running task + tags: + - Tasks + - operationId: GetTasksIDRunsIDLogs + method: GET + path: /api/v2/tasks/{taskID}/runs/{runID}/logs + summary: Retrieve all logs for a run + tags: + - Tasks + - operationId: PostTasksIDRunsIDRetry + method: POST + path: /api/v2/tasks/{taskID}/runs/{runID}/retry + summary: Retry a task run + tags: + - Tasks +related: + - title: InfluxDB 3 API client libraries + href: /influxdb3/cloud-serverless/reference/client-libraries/v3/ +alt_links: + core: /influxdb3/core/api/ + enterprise: /influxdb3/enterprise/api/ + cloud-serverless: /influxdb3/cloud-serverless/api/ + cloud-dedicated: /influxdb3/cloud-dedicated/api/ + clustered: /influxdb3/clustered/api/ + v2: /influxdb/v2/api/ + cloud: /influxdb/cloud/api/ +--- diff --git a/content/influxdb3/cloud-serverless/api/telegrafs/_index.md b/content/influxdb3/cloud-serverless/api/telegrafs/_index.md new file mode 100644 index 0000000000..45a2278f1b --- /dev/null +++ b/content/influxdb3/cloud-serverless/api/telegrafs/_index.md @@ -0,0 +1,108 @@ +--- +title: Telegrafs +description: API reference for Telegrafs +type: api +layout: list +staticFilePath: >- + /openapi/influxdb-cloud-serverless/ref/tags/influxdb-cloud-serverless-ref-telegrafs.yaml +weight: 100 +tag: Telegrafs +isConceptual: false +menuGroup: Other +operations: + - operationId: GetTelegrafs + method: GET + path: /api/v2/telegrafs + summary: List all Telegraf configurations + tags: + - Telegrafs + - operationId: PostTelegrafs + method: POST + path: /api/v2/telegrafs + summary: Create a Telegraf configuration + tags: + - Telegrafs + - operationId: GetTelegrafsID + method: GET + path: /api/v2/telegrafs/{telegrafID} + summary: Retrieve a Telegraf configuration + tags: + - Telegrafs + - operationId: PutTelegrafsID + method: PUT + path: /api/v2/telegrafs/{telegrafID} + summary: Update a Telegraf configuration + tags: + - Telegrafs + - operationId: DeleteTelegrafsID + method: DELETE + path: /api/v2/telegrafs/{telegrafID} + summary: Delete a Telegraf configuration + tags: + - Telegrafs + - operationId: GetTelegrafsIDLabels + method: GET + path: /api/v2/telegrafs/{telegrafID}/labels + summary: List all labels for a Telegraf config + tags: + - Telegrafs + - operationId: PostTelegrafsIDLabels + method: POST + path: /api/v2/telegrafs/{telegrafID}/labels + summary: Add a label to a Telegraf config + tags: + - Telegrafs + - operationId: DeleteTelegrafsIDLabelsID + method: DELETE + path: /api/v2/telegrafs/{telegrafID}/labels/{labelID} + summary: Delete a label from a Telegraf config + tags: + - Telegrafs + - operationId: GetTelegrafsIDMembers + method: GET + path: /api/v2/telegrafs/{telegrafID}/members + summary: List all users with member privileges for a Telegraf config + tags: + - Telegrafs + - operationId: PostTelegrafsIDMembers + method: POST + path: /api/v2/telegrafs/{telegrafID}/members + summary: Add a member to a Telegraf config + tags: + - Telegrafs + - operationId: DeleteTelegrafsIDMembersID + method: DELETE + path: /api/v2/telegrafs/{telegrafID}/members/{userID} + summary: Remove a member from a Telegraf config + tags: + - Telegrafs + - operationId: GetTelegrafsIDOwners + method: GET + path: /api/v2/telegrafs/{telegrafID}/owners + summary: List all owners of a Telegraf configuration + tags: + - Telegrafs + - operationId: PostTelegrafsIDOwners + method: POST + path: /api/v2/telegrafs/{telegrafID}/owners + summary: Add an owner to a Telegraf configuration + tags: + - Telegrafs + - operationId: DeleteTelegrafsIDOwnersID + method: DELETE + path: /api/v2/telegrafs/{telegrafID}/owners/{userID} + summary: Remove an owner from a Telegraf config + tags: + - Telegrafs +related: + - title: InfluxDB 3 API client libraries + href: /influxdb3/cloud-serverless/reference/client-libraries/v3/ +alt_links: + core: /influxdb3/core/api/ + enterprise: /influxdb3/enterprise/api/ + cloud-serverless: /influxdb3/cloud-serverless/api/ + cloud-dedicated: /influxdb3/cloud-dedicated/api/ + clustered: /influxdb3/clustered/api/ + v2: /influxdb/v2/api/ + cloud: /influxdb/cloud/api/ +--- diff --git a/content/influxdb3/cloud-serverless/api/templates/_index.md b/content/influxdb3/cloud-serverless/api/templates/_index.md new file mode 100644 index 0000000000..71514c6239 --- /dev/null +++ b/content/influxdb3/cloud-serverless/api/templates/_index.md @@ -0,0 +1,101 @@ +--- +title: Templates +description: >- + Export and apply InfluxDB **templates**. + + Manage **stacks** of templated InfluxDB resources. + + + InfluxDB templates are prepackaged configurations for resources. + + Use InfluxDB templates to configure a fresh instance of InfluxDB, + + back up your dashboard configuration, or share your configuration. + + + Use the `/api/v2/templates` endpoints to export templates and apply templates. + + + **InfluxDB stacks** are stateful InfluxDB templates that let you + + add, update, and remove installed template resources over time, avoid + duplicating + + resources when applying the same or similar templates more than once, and + + apply changes to distributed instances of InfluxDB OSS or InfluxDB Cloud. + + + Use the `/api/v2/stacks` endpoints to manage installed template resources. + + + +type: api +layout: list +staticFilePath: >- + /openapi/influxdb-cloud-serverless/ref/tags/influxdb-cloud-serverless-ref-templates.yaml +weight: 100 +tag: Templates +isConceptual: false +menuGroup: Other +operations: + - operationId: ListStacks + method: GET + path: /api/v2/stacks + summary: List installed stacks + tags: + - Templates + - operationId: CreateStack + method: POST + path: /api/v2/stacks + summary: Create a stack + tags: + - Templates + - operationId: ReadStack + method: GET + path: /api/v2/stacks/{stack_id} + summary: Retrieve a stack + tags: + - Templates + - operationId: UpdateStack + method: PATCH + path: /api/v2/stacks/{stack_id} + summary: Update a stack + tags: + - Templates + - operationId: DeleteStack + method: DELETE + path: /api/v2/stacks/{stack_id} + summary: Delete a stack and associated resources + tags: + - Templates + - operationId: UninstallStack + method: POST + path: /api/v2/stacks/{stack_id}/uninstall + summary: Uninstall a stack + tags: + - Templates + - operationId: ApplyTemplate + method: POST + path: /api/v2/templates/apply + summary: Apply or dry-run a template + tags: + - Templates + - operationId: ExportTemplate + method: POST + path: /api/v2/templates/export + summary: Export a new template + tags: + - Templates +related: + - title: InfluxDB 3 API client libraries + href: /influxdb3/cloud-serverless/reference/client-libraries/v3/ +alt_links: + core: /influxdb3/core/api/ + enterprise: /influxdb3/enterprise/api/ + cloud-serverless: /influxdb3/cloud-serverless/api/ + cloud-dedicated: /influxdb3/cloud-dedicated/api/ + clustered: /influxdb3/clustered/api/ + v2: /influxdb/v2/api/ + cloud: /influxdb/cloud/api/ +--- diff --git a/content/influxdb3/cloud-serverless/api/usage/_index.md b/content/influxdb3/cloud-serverless/api/usage/_index.md new file mode 100644 index 0000000000..835c467de2 --- /dev/null +++ b/content/influxdb3/cloud-serverless/api/usage/_index.md @@ -0,0 +1,30 @@ +--- +title: Usage +description: API reference for Usage +type: api +layout: list +staticFilePath: >- + /openapi/influxdb-cloud-serverless/ref/tags/influxdb-cloud-serverless-ref-usage.yaml +weight: 100 +tag: Usage +isConceptual: false +menuGroup: Other +operations: + - operationId: GetOrgUsageID + method: GET + path: /api/v2/orgs/{orgID}/usage + summary: Retrieve usage for an organization + tags: + - Usage +related: + - title: InfluxDB 3 API client libraries + href: /influxdb3/cloud-serverless/reference/client-libraries/v3/ +alt_links: + core: /influxdb3/core/api/ + enterprise: /influxdb3/enterprise/api/ + cloud-serverless: /influxdb3/cloud-serverless/api/ + cloud-dedicated: /influxdb3/cloud-dedicated/api/ + clustered: /influxdb3/clustered/api/ + v2: /influxdb/v2/api/ + cloud: /influxdb/cloud/api/ +--- diff --git a/content/influxdb3/cloud-serverless/api/variables/_index.md b/content/influxdb3/cloud-serverless/api/variables/_index.md new file mode 100644 index 0000000000..a581798af9 --- /dev/null +++ b/content/influxdb3/cloud-serverless/api/variables/_index.md @@ -0,0 +1,78 @@ +--- +title: Variables +description: API reference for Variables +type: api +layout: list +staticFilePath: >- + /openapi/influxdb-cloud-serverless/ref/tags/influxdb-cloud-serverless-ref-variables.yaml +weight: 100 +tag: Variables +isConceptual: false +menuGroup: Other +operations: + - operationId: GetVariables + method: GET + path: /api/v2/variables + summary: List all variables + tags: + - Variables + - operationId: PostVariables + method: POST + path: /api/v2/variables + summary: Create a variable + tags: + - Variables + - operationId: GetVariablesID + method: GET + path: /api/v2/variables/{variableID} + summary: Retrieve a variable + tags: + - Variables + - operationId: PutVariablesID + method: PUT + path: /api/v2/variables/{variableID} + summary: Replace a variable + tags: + - Variables + - operationId: PatchVariablesID + method: PATCH + path: /api/v2/variables/{variableID} + summary: Update a variable + tags: + - Variables + - operationId: DeleteVariablesID + method: DELETE + path: /api/v2/variables/{variableID} + summary: Delete a variable + tags: + - Variables + - operationId: GetVariablesIDLabels + method: GET + path: /api/v2/variables/{variableID}/labels + summary: List all labels for a variable + tags: + - Variables + - operationId: PostVariablesIDLabels + method: POST + path: /api/v2/variables/{variableID}/labels + summary: Add a label to a variable + tags: + - Variables + - operationId: DeleteVariablesIDLabelsID + method: DELETE + path: /api/v2/variables/{variableID}/labels/{labelID} + summary: Delete a label from a variable + tags: + - Variables +related: + - title: InfluxDB 3 API client libraries + href: /influxdb3/cloud-serverless/reference/client-libraries/v3/ +alt_links: + core: /influxdb3/core/api/ + enterprise: /influxdb3/enterprise/api/ + cloud-serverless: /influxdb3/cloud-serverless/api/ + cloud-dedicated: /influxdb3/cloud-dedicated/api/ + clustered: /influxdb3/clustered/api/ + v2: /influxdb/v2/api/ + cloud: /influxdb/cloud/api/ +--- diff --git a/content/influxdb3/cloud-serverless/get-started/setup.md b/content/influxdb3/cloud-serverless/get-started/setup.md index 7b303132dc..764f7ea373 100644 --- a/content/influxdb3/cloud-serverless/get-started/setup.md +++ b/content/influxdb3/cloud-serverless/get-started/setup.md @@ -395,7 +395,7 @@ influx bucket create \ To create a bucket using the InfluxDB HTTP API, send a request to the InfluxDB API `/api/v2/buckets` endpoint using the `POST` request method. -{{< api-endpoint endpoint="https://{{< influxdb/host >}}/api/v2/buckets" method="post" api-ref="/influxdb3/cloud-serverless/api/#operation/PostBuckets" >}} +{{< api-endpoint endpoint="https://{{< influxdb/host >}}/api/v2/buckets" method="post" api-ref="/influxdb3/cloud-serverless/api/#post-/api/v2/buckets" >}} Include the following with your request: diff --git a/content/influxdb3/cloud-serverless/get-started/write.md b/content/influxdb3/cloud-serverless/get-started/write.md index 8ac6bcf54a..88f22e01cd 100644 --- a/content/influxdb3/cloud-serverless/get-started/write.md +++ b/content/influxdb3/cloud-serverless/get-started/write.md @@ -407,11 +407,11 @@ API endpoint. To write data to InfluxDB using the [InfluxDB v1 HTTP API](/influxdb3/cloud-serverless/reference/api/), send a request to the -[InfluxDB API `/write` endpoint](/influxdb3/cloud-serverless/api/#operation/PostLegacyWrite) +[InfluxDB API `/write` endpoint](/influxdb3/cloud-serverless/api/#post-/write) using the `POST` request method. {{% api-endpoint endpoint="https://{{< influxdb/host >}}/write" method="post" -api-ref="/influxdb3/cloud-serverless/api/#operation/PostLegacyWrite"%}} +api-ref="/influxdb3/cloud-serverless/api/#post-/write"%}} Include the following with your request: @@ -518,7 +518,7 @@ To write data to InfluxDB using the to the InfluxDB API `/api/v2/write` endpoint using the `POST` request method. {{< api-endpoint endpoint="https://{{< influxdb/host >}}/api/v2/write" -method="post" api-ref="/influxdb3/cloud-serverless/api/#operation/PostWrite" >}} +method="post" api-ref="/influxdb3/cloud-serverless/api/#post-/api/v2/write" >}} Include the following with your request: diff --git a/content/influxdb3/cloud-serverless/guides/api-compatibility/v1/_index.md b/content/influxdb3/cloud-serverless/guides/api-compatibility/v1/_index.md index be12caec23..80a47b54b2 100644 --- a/content/influxdb3/cloud-serverless/guides/api-compatibility/v1/_index.md +++ b/content/influxdb3/cloud-serverless/guides/api-compatibility/v1/_index.md @@ -386,10 +386,10 @@ The output is the DBRP. {{% /tab-content %}} {{% tab-content %}} -Use the [`/api/v2/dbrps` API endpoint](/influxdb3/cloud-serverless/api/#operation/PostDBRP) +Use the [`/api/v2/dbrps` API endpoint](/influxdb3/cloud-serverless/api/#post-/api/v2/dbrps) to create a new DBRP mapping. -{{< api-endpoint endpoint="https://{{< influxdb/host >}}/api/v2/dbrps" method="POST" api-ref="/influxdb3/cloud-serverless/api/#operation/PostDBRP" >}} +{{< api-endpoint endpoint="https://{{< influxdb/host >}}/api/v2/dbrps" method="POST" api-ref="/influxdb3/cloud-serverless/api/#post-/api/v2/dbrps" >}} Include the following: @@ -477,9 +477,9 @@ influx v1 dbrp list \ {{% /tab-content %}} {{% tab-content %}} -Use the [`/api/v2/dbrps` API endpoint](/influxdb3/cloud-serverless/api/#operation/GetDBRPs) to list DBRP mappings. +Use the [`/api/v2/dbrps` API endpoint](/influxdb3/cloud-serverless/api/#get-/api/v2/dbrps) to list DBRP mappings. -{{< api-endpoint endpoint="https://{{< influxdb/host >}}/api/v2/dbrps" method="GET" api-ref="/influxdb3/cloud-serverless/api/#operation/GetDBRPs" >}} +{{< api-endpoint endpoint="https://{{< influxdb/host >}}/api/v2/dbrps" method="GET" api-ref="/influxdb3/cloud-serverless/api/#get-/api/v2/dbrps" >}} Include the following: @@ -586,9 +586,9 @@ The output is the DBRP. {{% /tab-content %}} {{% tab-content %}} -Use the [`/api/v2/dbrps/{dbrpID}` API endpoint](/influxdb3/cloud-serverless/api/#operation/GetDBRPs) to update DBRP mappings. +Use the [`/api/v2/dbrps/{dbrpID}` API endpoint](/influxdb3/cloud-serverless/api/#get-/api/v2/dbrps) to update DBRP mappings. -{{< api-endpoint endpoint="https://{{< influxdb/host >}}/api/v2/dbrps/{dbrpID}" method="PATCH" api-ref="/influxdb3/cloud-serverless/api/#operation/PatchDBRPID" >}} +{{< api-endpoint endpoint="https://{{< influxdb/host >}}/api/v2/dbrps/{dbrpID}" method="PATCH" api-ref="/influxdb3/cloud-serverless/api/#patch-/api/v2/dbrps/-dbrpID-" >}} Include the following: @@ -678,10 +678,10 @@ The output is the DBRP. {{% /tab-content %}} {{% tab-content %}} -Use the [`/api/v2/dbrps/{dbrpID}` API endpoint](/influxdb3/cloud-serverless/api/#operation/DeleteDBRPID) +Use the [`/api/v2/dbrps/{dbrpID}` API endpoint](/influxdb3/cloud-serverless/api/#delete-/api/v2/dbrps/-dbrpID-) to delete a DBRP mapping. -{{< api-endpoint endpoint="https://{{< influxdb/host >}}/api/v2/dbrps/{dbrpID}" method="DELETE" api-ref="/influxdb3/cloud-serverless/api/#operation/DeleteDBRPID" >}} +{{< api-endpoint endpoint="https://{{< influxdb/host >}}/api/v2/dbrps/{dbrpID}" method="DELETE" api-ref="/influxdb3/cloud-serverless/api/#delete-/api/v2/dbrps/-dbrpID-" >}} Include the following: diff --git a/content/influxdb3/cloud-serverless/query-data/execute-queries/v1-http.md b/content/influxdb3/cloud-serverless/query-data/execute-queries/v1-http.md index a575998420..b89cc0259a 100644 --- a/content/influxdb3/cloud-serverless/query-data/execute-queries/v1-http.md +++ b/content/influxdb3/cloud-serverless/query-data/execute-queries/v1-http.md @@ -53,7 +53,7 @@ _Before you can use the v1 query API, To query using HTTP and InfluxQL, send a `GET` or `POST` request to the v1 `/query` endpoint. -{{< api-endpoint endpoint="https://{{< influxdb/host >}}/query" method="get" api-ref="/influxdb3/cloud-serverless/api/#operation/GetLegacyQuery" >}} +{{< api-endpoint endpoint="https://{{< influxdb/host >}}/query" method="get" api-ref="/influxdb3/cloud-serverless/api/#get-/query" >}} ### Parameters diff --git a/content/influxdb3/cloud-serverless/reference/api/_index.md b/content/influxdb3/cloud-serverless/reference/api/_index.md deleted file mode 100644 index a895050e5d..0000000000 --- a/content/influxdb3/cloud-serverless/reference/api/_index.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: InfluxDB HTTP API -description: > - The InfluxDB HTTP API provides a programmatic interface for interactions with InfluxDB. - Access the InfluxDB API using the `/api/v2/write` or InfluxDB v1 endpoints. -menu: - influxdb3_cloud_serverless: - parent: Reference - name: InfluxDB HTTP API -weight: 104 -influxdb3/cloud-serverless/tags: [api] ---- - -The InfluxDB HTTP API provides a programmatic interface for interactions with -{{% product-name %}}, such as writing and querying data. - -Access the InfluxDB HTTP API using the `/api/v2/` or InfluxDB v1 endpoints. - -## InfluxDB v2 Compatibility API reference documentation - -InfluxDB v2 API for {{% product-name %}} - -The API reference describes requests and responses for InfluxDB v2-compatible -endpoints that work with {{% product-name %}} and with InfluxDB 2.x client -libraries and third-party integrations. - -## InfluxDB v1 Compatibility API reference documentation - -InfluxDB v1 API for {{% product-name %}} - -The API reference describes requests and responses for InfluxDB v1-compatible `/write` and `/query` endpoints that work with {{% product-name %}} and with InfluxDB 1.x client libraries and third-party integrations. diff --git a/content/influxdb3/cloud-serverless/reference/client-libraries/v2/javascript/nodejs/write.md b/content/influxdb3/cloud-serverless/reference/client-libraries/v2/javascript/nodejs/write.md index fbc8c1215a..4bc20d2d22 100644 --- a/content/influxdb3/cloud-serverless/reference/client-libraries/v2/javascript/nodejs/write.md +++ b/content/influxdb3/cloud-serverless/reference/client-libraries/v2/javascript/nodejs/write.md @@ -144,4 +144,4 @@ node write.js ### Response codes _For information about **InfluxDB API response codes**, see -[InfluxDB API Write documentation](/influxdb3/cloud-serverless/api/#operation/PostWrite)._ +[InfluxDB API Write documentation](/influxdb3/cloud-serverless/api/#post-/api/v2/write)._ diff --git a/content/influxdb3/cloud-serverless/write-data/delete-data.md b/content/influxdb3/cloud-serverless/write-data/delete-data.md index ca5d50c015..73768525ae 100644 --- a/content/influxdb3/cloud-serverless/write-data/delete-data.md +++ b/content/influxdb3/cloud-serverless/write-data/delete-data.md @@ -26,4 +26,4 @@ When querying: - [Filter for tag values](/influxdb3/cloud-serverless/query-data/sql/basic-query/#query-fields-based-on-tag-values) in your version tags. - [Use time boundaries](/influxdb3/cloud-serverless/query-data/sql/basic-query/#query-data-within-time-boundaries) that exclude old data. -_To delete a bucket and **all** its data, use the [InfluxDB `/api/v2/buckets` API endpoint](/influxdb3/cloud-serverless/api/#operation/DeleteBucketsID)._ +_To delete a bucket and **all** its data, use the [InfluxDB `/api/v2/buckets` API endpoint](/influxdb3/cloud-serverless/api/#delete-/api/v2/buckets/-bucketID-)._ diff --git a/content/influxdb3/clustered/.vale.ini b/content/influxdb3/clustered/.vale.ini index 2ae7567c0a..c381f0cde9 100644 --- a/content/influxdb3/clustered/.vale.ini +++ b/content/influxdb3/clustered/.vale.ini @@ -14,22 +14,4 @@ Google.DateFormat = NO Google.Ellipses = NO Google.Headings = NO Google.WordList = NO -# Disable Google.Units in favor of InfluxDataDocs.Units which only checks byte -# units (GB, TB, etc). Duration literals (30d, 24h, 1h) are valid InfluxDB syntax. -Google.Units = NO -Vale.Spelling = NO -# Disable Vale.Terms - the vocabulary-based substitution rule creates too many -# false positives from URLs, file paths, and code. The accepted terms in -# accept.txt still work for spelling checks via InfluxDataDocs.Spelling. -Vale.Terms = NO -# Disable write-good.TooWordy - flags legitimate technical terms like -# "aggregate", "expiration", "multiple", "However" that are standard in -# database documentation. -write-good.TooWordy = NO - -# Ignore URL paths like /api/v3/..., /cli/..., /influxdb3/... -# Ignore full URLs like https://example.com/... -# Ignore inline code in frontmatter (description fields, etc.) -TokenIgnores = /[a-zA-Z0-9/_\-\.]+, \ - https?://[^\s\)\]>"]+, \ - `[^`]+` \ No newline at end of file +Vale.Spelling = NO \ No newline at end of file diff --git a/content/influxdb3/clustered/get-started/write.md b/content/influxdb3/clustered/get-started/write.md index 19bff124ab..ab0836f4fc 100644 --- a/content/influxdb3/clustered/get-started/write.md +++ b/content/influxdb3/clustered/get-started/write.md @@ -426,10 +426,10 @@ API endpoint. To write data to InfluxDB using the [InfluxDB v1 HTTP API](/influxdb3/clustered/reference/api/), send a request to the -[InfluxDB API `/write` endpoint](/influxdb3/clustered/api/#operation/PostLegacyWrite) +[InfluxDB API `/write` endpoint](/influxdb3/clustered/api/#post-/write) using the `POST` request method. -{{% api-endpoint endpoint="https://{{< influxdb/host >}}/write" method="post" api-ref="/influxdb3/clustered/api/#operation/PostLegacyWrite"%}} +{{% api-endpoint endpoint="https://{{< influxdb/host >}}/write" method="post" api-ref="/influxdb3/clustered/api/#post-/write"%}} Include the following with your request: @@ -444,7 +444,7 @@ Include the following with your request: > [!Note] > With the {{% product-name %}} -> [v1 API `/write` endpoint](/influxdb3/clustered/api/#operation/PostLegacyWrite), +> [v1 API `/write` endpoint](/influxdb3/clustered/api/#post-/write), > `Authorization: Bearer` and `Authorization: Token` are equivalent and you can > use either scheme to pass a database token in your request. > Include the word `Bearer` or `Token`, a space, and your **token** value (all case-sensitive). @@ -538,7 +538,7 @@ To write data to InfluxDB using the to the InfluxDB API `/api/v2/write` endpoint using the `POST` request method. {{< api-endpoint endpoint="https://{{< influxdb/host >}}/api/v2/write" -method="post" api-ref="/influxdb3/clustered/api/#operation/PostWrite" >}} +method="post" api-ref="/influxdb3/clustered/api/#post-/api/v2/write" >}} Include the following with your request: diff --git a/content/influxdb3/clustered/reference/api/_index.md b/content/influxdb3/clustered/reference/api/_index.md deleted file mode 100644 index 3259b1437f..0000000000 --- a/content/influxdb3/clustered/reference/api/_index.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -title: InfluxDB HTTP API -description: > - The InfluxDB HTTP API provides a programmatic interface for interactions with - InfluxDB, such as writing and querying data. - Access the InfluxDB API using the `/api/v2/write` or InfluxDB v1 endpoints. -menu: - influxdb3_clustered: - parent: Reference - name: InfluxDB HTTP API -weight: 127 -influxdb3/clustered/tags: [api] ---- - -The InfluxDB HTTP API provides a programmatic interface for interactions with -{{% product-name %}}, such as writing and querying data. - -Access the InfluxDB HTTP API using the `/api/v2/` or InfluxDB v1 endpoints. - -## InfluxDB v2 Compatibility API reference documentation - -InfluxDB v2 API for {{% product-name %}} - -The API reference describes requests and responses for InfluxDB v2-compatible -endpoints that work with {{% product-name %}} and with InfluxDB 2.x client -libraries and third-party integrations. - -## InfluxDB v1 Compatibility API reference documentation - -InfluxDB v1 API for {{% product-name %}} - -The API reference describes requests and responses for InfluxDB v1-compatible `/write` and `/query` endpoints that work with {{% product-name %}} and with InfluxDB 1.x client libraries and third-party integrations. diff --git a/content/influxdb3/clustered/reference/client-libraries/v2/javascript/nodejs/write.md b/content/influxdb3/clustered/reference/client-libraries/v2/javascript/nodejs/write.md index 02d020c41c..dbfa4e0220 100644 --- a/content/influxdb3/clustered/reference/client-libraries/v2/javascript/nodejs/write.md +++ b/content/influxdb3/clustered/reference/client-libraries/v2/javascript/nodejs/write.md @@ -144,4 +144,4 @@ node write.js ### Response codes _For information about **InfluxDB API response codes**, see -[InfluxDB API Write documentation](/influxdb3/clustered/api/#operation/PostWrite)._ +[InfluxDB API Write documentation](/influxdb3/clustered/api/#post-/api/v2/write)._ diff --git a/content/influxdb3/core/.vale.ini b/content/influxdb3/core/.vale.ini index 03f6d282a3..86731aebfc 100644 --- a/content/influxdb3/core/.vale.ini +++ b/content/influxdb3/core/.vale.ini @@ -19,22 +19,4 @@ Google.DateFormat = NO Google.Ellipses = NO Google.Headings = NO Google.WordList = NO -# Disable Google.Units in favor of InfluxDataDocs.Units which only checks byte -# units (GB, TB, etc). Duration literals (30d, 24h, 1h) are valid InfluxDB syntax. -Google.Units = NO -Vale.Spelling = NO -# Disable Vale.Terms - the vocabulary-based substitution rule creates too many -# false positives from URLs, file paths, and code. The accepted terms in -# accept.txt still work for spelling checks via InfluxDataDocs.Spelling. -Vale.Terms = NO -# Disable write-good.TooWordy - flags legitimate technical terms like -# "aggregate", "expiration", "multiple", "However" that are standard in -# database documentation. -write-good.TooWordy = NO - -# Ignore URL paths like /api/v3/..., /cli/..., /influxdb3/... -# Ignore full URLs like https://example.com/... -# Ignore inline code in frontmatter (description fields, etc.) -TokenIgnores = /[a-zA-Z0-9/_\-\.]+, \ - https?://[^\s\)\]>"]+, \ - `[^`]+` \ No newline at end of file +Vale.Spelling = NO \ No newline at end of file diff --git a/content/influxdb3/core/admin/databases/create.md b/content/influxdb3/core/admin/databases/create.md index 784d8952ed..fdb45c5ba9 100644 --- a/content/influxdb3/core/admin/databases/create.md +++ b/content/influxdb3/core/admin/databases/create.md @@ -27,7 +27,7 @@ list_code_example: | ``` related: - /influxdb3/core/reference/cli/influxdb3/create/database/ - - /influxdb3/core/api/v3/#operation/PostConfigureDatabase, Create database API + - /influxdb3/core/api/v3/#post-/api/v3/configure/database, Create database API - /influxdb3/core/reference/naming-restrictions/ - /influxdb3/core/reference/internals/data-retention/ - /influxdb3/explorer/manage-databases/ diff --git a/content/influxdb3/core/admin/databases/delete.md b/content/influxdb3/core/admin/databases/delete.md index 93878b9f36..d851ee95cc 100644 --- a/content/influxdb3/core/admin/databases/delete.md +++ b/content/influxdb3/core/admin/databases/delete.md @@ -18,7 +18,7 @@ list_code_example: | ``` related: - /influxdb3/core/reference/cli/influxdb3/delete/database/ - - /influxdb3/core/api/v3/#operation/DeleteConfigureDatabase, Delete database API + - /influxdb3/core/api/v3/#delete-/api/v3/configure/database, Delete database API - /influxdb3/explorer/manage-databases/ source: /shared/influxdb3-admin/databases/delete.md --- diff --git a/content/influxdb3/core/admin/databases/list.md b/content/influxdb3/core/admin/databases/list.md index ae4a0a8dd2..681cba421b 100644 --- a/content/influxdb3/core/admin/databases/list.md +++ b/content/influxdb3/core/admin/databases/list.md @@ -17,7 +17,7 @@ list_code_example: | ``` related: - /influxdb3/core/reference/cli/influxdb3/show/databases/ - - /influxdb3/core/api/v3/#operation/GetConfigureDatabase, List databases API + - /influxdb3/core/api/v3/#get-/api/v3/configure/database, List databases API - /influxdb3/explorer/manage-databases/ source: /shared/influxdb3-admin/databases/list.md --- diff --git a/content/influxdb3/core/admin/tokens/admin/create.md b/content/influxdb3/core/admin/tokens/admin/create.md index 498c6cdeff..95b7fbfab6 100644 --- a/content/influxdb3/core/admin/tokens/admin/create.md +++ b/content/influxdb3/core/admin/tokens/admin/create.md @@ -2,7 +2,7 @@ title: Create an admin token description: > Use the [`influxdb3 create token --admin` command](/influxdb3/core/reference/cli/influxdb3/create/token/) - or the HTTP API [`/api/v3/configure/token/admin`](/influxdb3/core/api/v3/#operation/PostCreateAdminToken) endpoint + or the HTTP API [`/api/v3/configure/token/admin`](/influxdb3/core/api/v3/#post-/api/v3/configure/token/admin) endpoint to create an [admin token](/influxdb3/core/admin/tokens/admin/) for your {{< product-name omit="Clustered" >}} instance. An admin token grants access to all actions on the server. menu: diff --git a/content/influxdb3/core/admin/tokens/admin/regenerate.md b/content/influxdb3/core/admin/tokens/admin/regenerate.md index f8e8d7ab91..c1ba33ced4 100644 --- a/content/influxdb3/core/admin/tokens/admin/regenerate.md +++ b/content/influxdb3/core/admin/tokens/admin/regenerate.md @@ -2,7 +2,7 @@ title: Regenerate an admin token description: > Use the [`influxdb3 create token --admin` command](/influxdb3/core/reference/cli/influxdb3/create/token/) - or the HTTP API [`/api/v3/configure/token/admin/regenerate`](/influxdb3/core/api/v3/#operation/PostRegenerateAdminToken) endpoint + or the HTTP API [`/api/v3/configure/token/admin/regenerate`](/influxdb3/core/api/v3/#post-/api/v3/configure/token/admin/regenerate) endpoint to regenerate an [operator token](/influxdb3/core/admin/tokens/admin/) for your {{< product-name omit="Clustered" >}} instance. Regenerating an operator token deactivates the previous token. menu: diff --git a/content/influxdb3/core/get-started/migrate-from-influxdb-v1-v2.md b/content/influxdb3/core/get-started/migrate-from-influxdb-v1-v2.md new file mode 100644 index 0000000000..12d43ec292 --- /dev/null +++ b/content/influxdb3/core/get-started/migrate-from-influxdb-v1-v2.md @@ -0,0 +1,12 @@ +--- +title: Migrate from InfluxDB v1 or v2 +description: > + Migrate existing InfluxDB v1 or v2 workloads to InfluxDB 3 Core using + compatibility APIs and client libraries. +menu: + influxdb3_core: + name: Migrate from v1/v2 + parent: Get started +weight: 105 +source: /shared/influxdb3-get-started/migrate-from-influxdb-v1-v2.md +--- diff --git a/content/influxdb3/core/reference/api/_index.md b/content/influxdb3/core/reference/api/_index.md index 6a2200b1e5..13724ba97f 100644 --- a/content/influxdb3/core/reference/api/_index.md +++ b/content/influxdb3/core/reference/api/_index.md @@ -1,20 +1,12 @@ --- title: InfluxDB HTTP API description: > - The InfluxDB HTTP API for {{% product-name %}} provides a programmatic interface - for interactions with InfluxDB, - including writing, querying, and processing data, and managing an InfluxDB 3 - instance. -menu: - influxdb3_core: - parent: Reference - name: InfluxDB HTTP API -weight: 104 -influxdb3/core/tags: [api] -source: /shared/influxdb3-api-reference/_index.md + The InfluxDB HTTP API for InfluxDB 3 Core provides a programmatic interface + for interactions with InfluxDB. +# Redirect to the new location +aliases: + - /influxdb3/core/reference/api/ +redirect: /influxdb3/core/api/ --- - +This page has moved to [InfluxDB HTTP API](/influxdb3/core/api/). diff --git a/content/influxdb3/core/reference/cli/influxdb3/create/database.md b/content/influxdb3/core/reference/cli/influxdb3/create/database.md index a957f4ac7f..d77b00b268 100644 --- a/content/influxdb3/core/reference/cli/influxdb3/create/database.md +++ b/content/influxdb3/core/reference/cli/influxdb3/create/database.md @@ -9,7 +9,7 @@ menu: weight: 400 related: - /influxdb3/core/admin/databases/create/ - - /influxdb3/core/api/v3/#operation/PostConfigureDatabase, Create database API + - /influxdb3/core/api/v3/#post-/api/v3/configure/database, Create database API - /influxdb3/core/reference/internals/data-retention/ source: /shared/influxdb3-cli/create/database.md --- diff --git a/content/influxdb3/core/reference/cli/influxdb3/serve.md b/content/influxdb3/core/reference/cli/influxdb3/serve.md index b087a11e79..35fdd5835b 100644 --- a/content/influxdb3/core/reference/cli/influxdb3/serve.md +++ b/content/influxdb3/core/reference/cli/influxdb3/serve.md @@ -63,7 +63,7 @@ influxdb3 serve [OPTIONS] | | `--aws-session-token` | _See [configuration options](/influxdb3/core/reference/config-options/#aws-session-token)_ | | | `--aws-skip-signature` | _See [configuration options](/influxdb3/core/reference/config-options/#aws-skip-signature)_ | | | `--azure-allow-http` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#azure-allow-http)_ | -| | `--azure-endpoint` | _See [configuration options](/influxdb3/core/reference/config-options/#azure-endpoint)_ | +| | `--azure-endpoint` | _See [configuration options](/influxdb3/enterprise/reference/config-options/##azure-endpoint)_ | | | `--azure-storage-access-key` | _See [configuration options](/influxdb3/core/reference/config-options/#azure-storage-access-key)_ | | | `--azure-storage-account` | _See [configuration options](/influxdb3/core/reference/config-options/#azure-storage-account)_ | | | `--bucket` | _See [configuration options](/influxdb3/core/reference/config-options/#bucket)_ | @@ -71,14 +71,14 @@ influxdb3 serve [OPTIONS] | | `--datafusion-config` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-config)_ | | | `--datafusion-max-parquet-fanout` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-max-parquet-fanout)_ | | | `--datafusion-num-threads` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-num-threads)_ | -| | `--datafusion-runtime-disable-lifo-slot` | Development-only Tokio runtime configuration | -| | `--datafusion-runtime-event-interval` | Development-only Tokio runtime configuration | -| | `--datafusion-runtime-global-queue-interval` | Development-only Tokio runtime configuration | -| | `--datafusion-runtime-max-blocking-threads` | Development-only Tokio runtime configuration | -| | `--datafusion-runtime-max-io-events-per-tick` | Development-only Tokio runtime configuration | -| | `--datafusion-runtime-thread-keep-alive` | Development-only Tokio runtime configuration | -| | `--datafusion-runtime-thread-priority` | Development-only Tokio runtime configuration | -| | `--datafusion-runtime-type` | Development-only Tokio runtime configuration | +| | `--datafusion-runtime-disable-lifo-slot` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-runtime-disable-lifo-slot)_ | +| | `--datafusion-runtime-event-interval` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-runtime-event-interval)_ | +| | `--datafusion-runtime-global-queue-interval` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-runtime-global-queue-interval)_ | +| | `--datafusion-runtime-max-blocking-threads` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-runtime-max-blocking-threads)_ | +| | `--datafusion-runtime-max-io-events-per-tick` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-runtime-max-io-events-per-tick)_ | +| | `--datafusion-runtime-thread-keep-alive` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-runtime-thread-keep-alive)_ | +| | `--datafusion-runtime-thread-priority` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-runtime-thread-priority)_ | +| | `--datafusion-runtime-type` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-runtime-type)_ | | | `--datafusion-use-cached-parquet-loader` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-use-cached-parquet-loader)_ | | | `--delete-grace-period` | _See [configuration options](/influxdb3/core/reference/config-options/#delete-grace-period)_ | | | `--disable-authz` | _See [configuration options](/influxdb3/core/reference/config-options/#disable-authz)_ | @@ -118,7 +118,7 @@ influxdb3 serve [OPTIONS] | | `--table-index-cache-concurrency-limit` | _See [configuration options](/influxdb3/core/reference/config-options/#table-index-cache-concurrency-limit)_ | | | `--table-index-cache-max-entries` | _See [configuration options](/influxdb3/core/reference/config-options/#table-index-cache-max-entries)_ | | | `--tcp-listener-file-path` | _See [configuration options](/influxdb3/core/reference/config-options/#tcp-listener-file-path)_ | -| | `--telemetry-disable-upload` | _See [configuration options](/influxdb3/core/reference/config-options/#disable-telemetry-upload)_ | +| | `--telemetry-disable-upload` | _See [configuration options](/influxdb3/core/reference/config-options/#telemetry-disable-upload)_ | | | `--telemetry-endpoint` | _See [configuration options](/influxdb3/core/reference/config-options/#telemetry-endpoint)_ | | | `--tls-cert` | _See [configuration options](/influxdb3/core/reference/config-options/#tls-cert)_ | | | `--tls-key` | _See [configuration options](/influxdb3/core/reference/config-options/#tls-key)_ | diff --git a/content/influxdb3/core/reference/internals/data-retention.md b/content/influxdb3/core/reference/internals/data-retention.md index ffa4b98cb2..56ed71c768 100644 --- a/content/influxdb3/core/reference/internals/data-retention.md +++ b/content/influxdb3/core/reference/internals/data-retention.md @@ -12,7 +12,7 @@ influxdb3/core/tags: [internals, retention] related: - /influxdb3/core/admin/databases/create/ - /influxdb3/core/reference/cli/influxdb3/create/database/ - - /influxdb3/core/api/v3/#operation/PostConfigureDatabase, Create database API + - /influxdb3/core/api/v3/#post-/api/v3/configure/database, Create database API - /influxdb3/core/reference/glossary/#retention-period source: /shared/influxdb3-internals/data-retention.md --- diff --git a/content/influxdb3/core/write-data/client-libraries.md b/content/influxdb3/core/write-data/client-libraries.md index 2dcd72f328..50fd8df563 100644 --- a/content/influxdb3/core/write-data/client-libraries.md +++ b/content/influxdb3/core/write-data/client-libraries.md @@ -14,7 +14,7 @@ related: - /influxdb3/core/reference/syntax/line-protocol/ - /influxdb3/core/get-started/write/ - /influxdb3/core/reference/client-libraries/v3/ - - /influxdb3/core/api/v3/#operation/PostWriteLP, /api/v3/write_lp endpoint + - /influxdb3/core/api/v3/#post-/api/v3/write_lp, /api/v3/write_lp endpoint source: /shared/influxdb3-write-guides/client-libraries.md --- diff --git a/content/influxdb3/core/write-data/http-api/_index.md b/content/influxdb3/core/write-data/http-api/_index.md index d298113997..24dffc5604 100644 --- a/content/influxdb3/core/write-data/http-api/_index.md +++ b/content/influxdb3/core/write-data/http-api/_index.md @@ -12,7 +12,7 @@ weight: 100 related: - /influxdb3/core/reference/syntax/line-protocol/ - /influxdb3/core/get-started/write/ - - /influxdb3/core/api/v3/#operation/PostWriteLP, /api/v3/write_lp endpoint + - /influxdb3/core/api/v3/#post-/api/v3/write_lp, /api/v3/write_lp endpoint source: /shared/influxdb3-write-guides/http-api/_index.md --- diff --git a/content/influxdb3/core/write-data/http-api/compatibility-apis.md b/content/influxdb3/core/write-data/http-api/compatibility-apis.md index 901d0f9002..a219a41c4f 100644 --- a/content/influxdb3/core/write-data/http-api/compatibility-apis.md +++ b/content/influxdb3/core/write-data/http-api/compatibility-apis.md @@ -15,8 +15,8 @@ related: - /influxdb3/core/reference/syntax/line-protocol/ - /influxdb3/core/get-started/write/ - /influxdb3/core/reference/client-libraries/v2/ - - /influxdb3/core/api/v3/#operation/PostV2Write, /api/v2/write (v2-compatible) endpoint - - /influxdb3/core/api/v3/#operation/PostV1Write, /write (v1-compatible) endpoint + - /influxdb3/core/api/v3/#post-/api/v2/write, /api/v2/write (v2-compatible) endpoint + - /influxdb3/core/api/v3/#post-/write, /write (v1-compatible) endpoint source: /shared/influxdb3-write-guides/http-api/compatibility-apis.md --- diff --git a/content/influxdb3/core/write-data/http-api/v3-write-lp.md b/content/influxdb3/core/write-data/http-api/v3-write-lp.md index ae5fa9cdac..ee976c6253 100644 --- a/content/influxdb3/core/write-data/http-api/v3-write-lp.md +++ b/content/influxdb3/core/write-data/http-api/v3-write-lp.md @@ -10,7 +10,7 @@ weight: 201 related: - /influxdb3/core/reference/syntax/line-protocol/ - /influxdb3/core/get-started/write/ - - /influxdb3/core/api/v3/#operation/PostWriteLP, /api/v3/write_lp endpoint + - /influxdb3/core/api/v3/#post-/api/v3/write_lp, /api/v3/write_lp endpoint source: /shared/influxdb3-write-guides/http-api/v3-write-lp.md --- diff --git a/content/influxdb3/enterprise/admin/databases/create.md b/content/influxdb3/enterprise/admin/databases/create.md index cd3d3baa82..1eee597a47 100644 --- a/content/influxdb3/enterprise/admin/databases/create.md +++ b/content/influxdb3/enterprise/admin/databases/create.md @@ -28,7 +28,7 @@ list_code_example: | ``` related: - /influxdb3/enterprise/reference/cli/influxdb3/create/database/ - - /influxdb3/enterprise/api/v3/#operation/PostConfigureDatabase, Create database API + - /influxdb3/enterprise/api/v3/#post-/api/v3/configure/database, Create database API - /influxdb3/enterprise/reference/naming-restrictions/ - /influxdb3/enterprise/reference/internals/data-retention/ - /influxdb3/explorer/manage-databases/ diff --git a/content/influxdb3/enterprise/admin/databases/delete.md b/content/influxdb3/enterprise/admin/databases/delete.md index 8371ade619..6d43341a5b 100644 --- a/content/influxdb3/enterprise/admin/databases/delete.md +++ b/content/influxdb3/enterprise/admin/databases/delete.md @@ -18,7 +18,7 @@ list_code_example: | ``` related: - /influxdb3/enterprise/reference/cli/influxdb3/delete/database/ - - /influxdb3/enterprise/api/v3/#operation/DeleteConfigureDatabase, Delete database API + - /influxdb3/enterprise/api/v3/#delete-/api/v3/configure/database, Delete database API - /influxdb3/explorer/manage-databases/ source: /shared/influxdb3-admin/databases/delete.md --- diff --git a/content/influxdb3/enterprise/admin/databases/list.md b/content/influxdb3/enterprise/admin/databases/list.md index ae9c4fa0b4..84e8c3e5ef 100644 --- a/content/influxdb3/enterprise/admin/databases/list.md +++ b/content/influxdb3/enterprise/admin/databases/list.md @@ -17,7 +17,7 @@ list_code_example: | ``` related: - /influxdb3/enterprise/reference/cli/influxdb3/show/databases/ - - /influxdb3/enterprise/api/v3/#operation/GetConfigureDatabase, List databases API + - /influxdb3/enterprise/api/v3/#get-/api/v3/configure/database, List databases API - /influxdb3/explorer/manage-databases/ source: /shared/influxdb3-admin/databases/list.md --- diff --git a/content/influxdb3/enterprise/admin/tables/create.md b/content/influxdb3/enterprise/admin/tables/create.md index ef786ca04c..4e826a525e 100644 --- a/content/influxdb3/enterprise/admin/tables/create.md +++ b/content/influxdb3/enterprise/admin/tables/create.md @@ -30,7 +30,7 @@ list_code_example: | ``` related: - /influxdb3/enterprise/reference/cli/influxdb3/create/table/ - - /influxdb3/enterprise/api/v3/#operation/PostConfigureTable, Create table API + - /influxdb3/enterprise/api/v3/#post-/api/v3/configure/table, Create table API - /influxdb3/enterprise/reference/naming-restrictions/ - /influxdb3/enterprise/reference/internals/data-retention/ source: /shared/influxdb3-admin/tables/create.md diff --git a/content/influxdb3/enterprise/admin/tables/delete.md b/content/influxdb3/enterprise/admin/tables/delete.md index 8580ca7e32..34c4e1f13c 100644 --- a/content/influxdb3/enterprise/admin/tables/delete.md +++ b/content/influxdb3/enterprise/admin/tables/delete.md @@ -22,7 +22,7 @@ list_code_example: | ``` related: - /influxdb3/enterprise/reference/cli/influxdb3/delete/table/ - - /influxdb3/enterprise/api/v3/#operation/DeleteConfigureTable, Delete table API + - /influxdb3/enterprise/api/v3/#delete-/api/v3/configure/table, Delete table API source: /shared/influxdb3-admin/tables/delete.md --- diff --git a/content/influxdb3/enterprise/admin/tables/list.md b/content/influxdb3/enterprise/admin/tables/list.md index bc013aecee..0912596c2d 100644 --- a/content/influxdb3/enterprise/admin/tables/list.md +++ b/content/influxdb3/enterprise/admin/tables/list.md @@ -24,7 +24,7 @@ list_code_example: | ``` related: - /influxdb3/enterprise/reference/cli/influxdb3/query/ - - /influxdb3/enterprise/api/v3/#operation/GetQuerySql, Query API + - /influxdb3/enterprise/api/v3/#get-/api/v3/query_sql, Query API source: /shared/influxdb3-admin/tables/list.md --- diff --git a/content/influxdb3/enterprise/admin/tokens/admin/create.md b/content/influxdb3/enterprise/admin/tokens/admin/create.md index 9c821b4ab0..13d984dc89 100644 --- a/content/influxdb3/enterprise/admin/tokens/admin/create.md +++ b/content/influxdb3/enterprise/admin/tokens/admin/create.md @@ -2,7 +2,7 @@ title: Create an admin token description: > Use the [`influxdb3 create token --admin` command](/influxdb3/enterprise/reference/cli/influxdb3/create/token/) - or the HTTP API [`/api/v3/configure/token/admin`](/influxdb3/enterprise/api/v3/#operation/PostCreateAdminToken) + or the HTTP API [`/api/v3/configure/token/admin`](/influxdb3/enterprise/api/v3/#post-/api/v3/configure/token/admin) endpoint to create an operator or named [admin token](/influxdb3/enterprise/admin/tokens/admin/) for your {{< product-name omit="Clustered" >}} instance. An admin token grants access to all actions on the server. menu: diff --git a/content/influxdb3/enterprise/admin/tokens/resource/create.md b/content/influxdb3/enterprise/admin/tokens/resource/create.md index e7de10b617..08b61a44f7 100644 --- a/content/influxdb3/enterprise/admin/tokens/resource/create.md +++ b/content/influxdb3/enterprise/admin/tokens/resource/create.md @@ -43,7 +43,7 @@ alt_links: --- Use the [`influxdb3 create token --permission` command](/influxdb3/enterprise/reference/cli/influxdb3/create/token/permission/) -or the [`/api/v3/configure/token` HTTP API endpoint](/influxdb3/enterprise/api/v3/#operation/PostCreateResourceToken) +or the [`/api/v3/configure/token` HTTP API endpoint](/influxdb3/enterprise/api/v3/#post-/api/v3/configure/enterprise/token) to create fine-grained permissions tokens that grant access to resources such as databases and system information. Database tokens allow for reading and writing data in your {{< product-name omit="Clustered" >}} instance. System tokens allow for reading system information and metrics for your server. diff --git a/content/influxdb3/enterprise/get-started/migrate-from-influxdb-v1-v2.md b/content/influxdb3/enterprise/get-started/migrate-from-influxdb-v1-v2.md new file mode 100644 index 0000000000..fb41ab489c --- /dev/null +++ b/content/influxdb3/enterprise/get-started/migrate-from-influxdb-v1-v2.md @@ -0,0 +1,12 @@ +--- +title: Migrate from InfluxDB v1 or v2 +description: > + Migrate existing InfluxDB v1 or v2 workloads to InfluxDB 3 Enterprise using + compatibility APIs and client libraries. +menu: + influxdb3_enterprise: + name: Migrate from v1/v2 + parent: Get started +weight: 105 +source: /shared/influxdb3-get-started/migrate-from-influxdb-v1-v2.md +--- diff --git a/content/influxdb3/enterprise/reference/api/_index.md b/content/influxdb3/enterprise/reference/api/_index.md index ea78867f6d..a5a831de48 100644 --- a/content/influxdb3/enterprise/reference/api/_index.md +++ b/content/influxdb3/enterprise/reference/api/_index.md @@ -1,20 +1,10 @@ --- title: InfluxDB HTTP API description: > - The InfluxDB HTTP API for {{% product-name %}} provides a programmatic interface - for interactions with InfluxDB, - including writing, querying, and processing data, and managing an InfluxDB 3 - instance. -menu: - influxdb3_enterprise: - parent: Reference - name: InfluxDB HTTP API -weight: 104 -influxdb3/enterprise/tags: [api] -source: /shared/influxdb3-api-reference/_index.md + The InfluxDB HTTP API for InfluxDB 3 Enterprise provides a programmatic interface + for interactions with InfluxDB. +# Redirect to the new location +redirect: /influxdb3/enterprise/api/ --- - +This page has moved to [InfluxDB HTTP API](/influxdb3/enterprise/api/). diff --git a/content/influxdb3/enterprise/reference/cli/influxdb3/create/database.md b/content/influxdb3/enterprise/reference/cli/influxdb3/create/database.md index 5a1d61c7d4..8642c24cb3 100644 --- a/content/influxdb3/enterprise/reference/cli/influxdb3/create/database.md +++ b/content/influxdb3/enterprise/reference/cli/influxdb3/create/database.md @@ -9,7 +9,7 @@ menu: weight: 400 related: - /influxdb3/enterprise/admin/databases/create/ - - /influxdb3/enterprise/api/v3/#operation/PostConfigureDatabase, Create database API + - /influxdb3/enterprise/api/v3/#post-/api/v3/configure/database, Create database API - /influxdb3/enterprise/reference/internals/data-retention/ source: /shared/influxdb3-cli/create/database.md --- diff --git a/content/influxdb3/enterprise/reference/cli/influxdb3/create/table.md b/content/influxdb3/enterprise/reference/cli/influxdb3/create/table.md index 26c53e503d..a42396bc85 100644 --- a/content/influxdb3/enterprise/reference/cli/influxdb3/create/table.md +++ b/content/influxdb3/enterprise/reference/cli/influxdb3/create/table.md @@ -9,7 +9,7 @@ menu: weight: 400 related: - /influxdb3/enterprise/admin/tables/create/ - - /influxdb3/enterprise/api/v3/#operation/PostConfigureTable, Create table API + - /influxdb3/enterprise/api/v3/#post-/api/v3/configure/table, Create table API - /influxdb3/enterprise/reference/internals/data-retention/ source: /shared/influxdb3-cli/create/table.md --- diff --git a/content/influxdb3/enterprise/reference/cli/influxdb3/serve.md b/content/influxdb3/enterprise/reference/cli/influxdb3/serve.md index 4ade8944d8..804a826ec9 100644 --- a/content/influxdb3/enterprise/reference/cli/influxdb3/serve.md +++ b/content/influxdb3/enterprise/reference/cli/influxdb3/serve.md @@ -62,7 +62,7 @@ influxdb3 serve [OPTIONS] | | `--aws-session-token` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#aws-session-token)_ | | | `--aws-skip-signature` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#aws-skip-signature)_ | | | `--azure-allow-http` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#azure-allow-http)_ | -| | `--azure-endpoint` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#azure-endpoint)_ | +| | `--azure-endpoint` | _See [configuration options](/influxdb3/enterprise/reference/config-options/##azure-endpoint)_ | | | `--azure-storage-access-key` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#azure-storage-access-key)_ | | | `--azure-storage-account` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#azure-storage-account)_ | | | `--bucket` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#bucket)_ | @@ -73,18 +73,19 @@ influxdb3 serve [OPTIONS] | | `--compaction-gen2-duration` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#compaction-gen2-duration)_ | | | `--compaction-max-num-files-per-plan` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#compaction-max-num-files-per-plan)_ | | | `--compaction-multipliers` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#compaction-multipliers)_ | +| | `--compaction-row-limit` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#compaction-row-limit)_ | | | `--data-dir` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#data-dir)_ | | | `--datafusion-config` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#datafusion-config)_ | | | `--datafusion-max-parquet-fanout` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#datafusion-max-parquet-fanout)_ | | | `--datafusion-num-threads` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#datafusion-num-threads)_ | -| | `--datafusion-runtime-disable-lifo-slot` | Development-only Tokio runtime configuration | -| | `--datafusion-runtime-event-interval` | Development-only Tokio runtime configuration | -| | `--datafusion-runtime-global-queue-interval` | Development-only Tokio runtime configuration | -| | `--datafusion-runtime-max-blocking-threads` | Development-only Tokio runtime configuration | -| | `--datafusion-runtime-max-io-events-per-tick` | Development-only Tokio runtime configuration | -| | `--datafusion-runtime-thread-keep-alive` | Development-only Tokio runtime configuration | -| | `--datafusion-runtime-thread-priority` | Development-only Tokio runtime configuration | -| | `--datafusion-runtime-type` | Development-only Tokio runtime configuration | +| | `--datafusion-runtime-disable-lifo-slot` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#datafusion-runtime-disable-lifo-slot)_ | +| | `--datafusion-runtime-event-interval` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#datafusion-runtime-event-interval)_ | +| | `--datafusion-runtime-global-queue-interval` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#datafusion-runtime-global-queue-interval)_ | +| | `--datafusion-runtime-max-blocking-threads` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#datafusion-runtime-max-blocking-threads)_ | +| | `--datafusion-runtime-max-io-events-per-tick` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#datafusion-runtime-max-io-events-per-tick)_ | +| | `--datafusion-runtime-thread-keep-alive` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#datafusion-runtime-thread-keep-alive)_ | +| | `--datafusion-runtime-thread-priority` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#datafusion-runtime-thread-priority)_ | +| | `--datafusion-runtime-type` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#datafusion-runtime-type)_ | | | `--datafusion-use-cached-parquet-loader` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#datafusion-use-cached-parquet-loader)_ | | | `--delete-grace-period` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#delete-grace-period)_ | | | `--disable-authz` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#disable-authz)_ | @@ -112,7 +113,7 @@ influxdb3 serve [OPTIONS] | | `--node-id` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#node-id)_ | | | `--node-id-from-env` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#node-id-from-env)_ | | | `--num-cores` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#num-cores)_ | -| | `--num-datafusion-threads` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#datafusion-num-threads)_ | +| | `--num-datafusion-threads` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#num-datafusion-threads)_ | | | `--num-database-limit` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#num-database-limit)_ | | | `--num-table-limit` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#num-table-limit)_ | | | `--num-total-columns-per-table-limit` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#num-total-columns-per-table-limit)_ | @@ -139,7 +140,7 @@ influxdb3 serve [OPTIONS] | | `--table-index-cache-concurrency-limit` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#table-index-cache-concurrency-limit)_ | | | `--table-index-cache-max-entries` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#table-index-cache-max-entries)_ | | | `--tcp-listener-file-path` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#tcp-listener-file-path)_ | -| | `--telemetry-disable-upload` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#disable-telemetry-upload)_ | +| | `--telemetry-disable-upload` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#telemetry-disable-upload)_ | | | `--telemetry-endpoint` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#telemetry-endpoint)_ | | | `--tls-cert` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#tls-cert)_ | | | `--tls-key` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#tls-key)_ | diff --git a/content/influxdb3/enterprise/reference/cli/influxdb3/update/database.md b/content/influxdb3/enterprise/reference/cli/influxdb3/update/database.md index 289af5e08a..8600754d8c 100644 --- a/content/influxdb3/enterprise/reference/cli/influxdb3/update/database.md +++ b/content/influxdb3/enterprise/reference/cli/influxdb3/update/database.md @@ -8,7 +8,7 @@ menu: name: influxdb3 update database weight: 400 related: - - /influxdb3/enterprise/api/v3/#operation/PatchConfigureDatabase, Update database API + - /influxdb3/enterprise/api/v3/#patch-/api/v3/configure/database/-db-, Update database API - /influxdb3/enterprise/reference/internals/data-retention/ source: /shared/influxdb3-cli/update/database/_index.md --- diff --git a/content/influxdb3/enterprise/reference/internals/data-retention.md b/content/influxdb3/enterprise/reference/internals/data-retention.md index 9bae03474f..6050abe2ba 100644 --- a/content/influxdb3/enterprise/reference/internals/data-retention.md +++ b/content/influxdb3/enterprise/reference/internals/data-retention.md @@ -15,9 +15,9 @@ related: - /influxdb3/enterprise/reference/cli/influxdb3/create/database/ - /influxdb3/enterprise/reference/cli/influxdb3/create/table/ - /influxdb3/enterprise/reference/cli/influxdb3/update/database/ - - /influxdb3/enterprise/api/v3/#operation/PostConfigureDatabase, Create database API - - /influxdb3/enterprise/api/v3/#operation/PostConfigureTable, Create table API - - /influxdb3/enterprise/api/v3/#operation/PatchConfigureDatabase, Update database API + - /influxdb3/enterprise/api/v3/#post-/api/v3/configure/database, Create database API + - /influxdb3/enterprise/api/v3/#post-/api/v3/configure/table, Create table API + - /influxdb3/enterprise/api/v3/#patch-/api/v3/configure/database/-db-, Update database API - /influxdb3/enterprise/reference/glossary/#retention-period source: /shared/influxdb3-internals/data-retention.md --- diff --git a/content/influxdb3/enterprise/write-data/client-libraries.md b/content/influxdb3/enterprise/write-data/client-libraries.md index 6b32cc7cb8..5133a243ad 100644 --- a/content/influxdb3/enterprise/write-data/client-libraries.md +++ b/content/influxdb3/enterprise/write-data/client-libraries.md @@ -14,7 +14,7 @@ related: - /influxdb3/enterprise/reference/syntax/line-protocol/ - /influxdb3/enterprise/get-started/write/ - /influxdb3/enterprise/reference/client-libraries/v3/ - - /influxdb3/enterprise/api/v3/#operation/PostWriteLP, /api/v3/write_lp endpoint + - /influxdb3/enterprise/api/v3/#post-/api/v3/write_lp, /api/v3/write_lp endpoint source: /shared/influxdb3-write-guides/client-libraries.md --- diff --git a/content/influxdb3/enterprise/write-data/compatibility-apis.md b/content/influxdb3/enterprise/write-data/compatibility-apis.md index b811d24c02..db23edbaeb 100644 --- a/content/influxdb3/enterprise/write-data/compatibility-apis.md +++ b/content/influxdb3/enterprise/write-data/compatibility-apis.md @@ -15,8 +15,8 @@ related: - /influxdb3/enterprise/reference/syntax/line-protocol/ - /influxdb3/enterprise/get-started/write/ - /influxdb3/enterprise/reference/client-libraries/v2/ - - /influxdb3/enterprise/api/v3/#operation/PostV2Write, /api/v2/write (v2-compatible) endpoint - - /influxdb3/enterprise/api/v3/#operation/PostV1Write, /write (v1-compatible) endpoint + - /influxdb3/enterprise/api/v3/#post-/api/v2/write, /api/v2/write (v2-compatible) endpoint + - /influxdb3/enterprise/api/v3/#post-/write, /write (v1-compatible) endpoint source: /shared/influxdb3-write-guides/http-api/compatibility-apis.md --- diff --git a/content/influxdb3/enterprise/write-data/http-api/_index.md b/content/influxdb3/enterprise/write-data/http-api/_index.md index da321e75fd..a9d9df41c3 100644 --- a/content/influxdb3/enterprise/write-data/http-api/_index.md +++ b/content/influxdb3/enterprise/write-data/http-api/_index.md @@ -12,7 +12,7 @@ weight: 100 related: - /influxdb3/enterprise/reference/syntax/line-protocol/ - /influxdb3/enterprise/get-started/write/ - - /influxdb3/enterprise/api/v3/#operation/PostWriteLP, /api/v3/write_lp endpoint + - /influxdb3/enterprise/api/v3/#post-/api/v3/write_lp, /api/v3/write_lp endpoint source: /shared/influxdb3-write-guides/http-api/_index.md --- diff --git a/content/influxdb3/enterprise/write-data/http-api/compatibility-apis.md b/content/influxdb3/enterprise/write-data/http-api/compatibility-apis.md index 47f6458562..e5ccde619e 100644 --- a/content/influxdb3/enterprise/write-data/http-api/compatibility-apis.md +++ b/content/influxdb3/enterprise/write-data/http-api/compatibility-apis.md @@ -15,8 +15,8 @@ related: - /influxdb3/enterprise/reference/syntax/line-protocol/ - /influxdb3/enterprise/get-started/write/ - /influxdb3/enterprise/reference/client-libraries/v2/ - - /influxdb3/enterprise/api/v3/#operation/PostV2Write, /api/v2/write (v2-compatible) endpoint - - /influxdb3/enterprise/api/v3/#operation/PostV1Write, /write (v1-compatible) endpoint + - /influxdb3/enterprise/api/v3/#post-/api/v2/write, /api/v2/write (v2-compatible) endpoint + - /influxdb3/enterprise/api/v3/#post-/write, /write (v1-compatible) endpoint source: /shared/influxdb3-write-guides/http-api/compatibility-apis.md --- diff --git a/content/influxdb3/enterprise/write-data/http-api/v3-write-lp.md b/content/influxdb3/enterprise/write-data/http-api/v3-write-lp.md index 3b34fc2a39..5d5c80701b 100644 --- a/content/influxdb3/enterprise/write-data/http-api/v3-write-lp.md +++ b/content/influxdb3/enterprise/write-data/http-api/v3-write-lp.md @@ -10,7 +10,7 @@ weight: 201 related: - /influxdb3/enterprise/reference/syntax/line-protocol/ - /influxdb3/enterprise/get-started/write/ - - /influxdb3/enterprise/api/v3/#operation/PostWriteLP, /api/v3/write_lp endpoint + - /influxdb3/enterprise/api/v3/#post-/api/v3/write_lp, /api/v3/write_lp endpoint source: /shared/influxdb3-write-guides/http-api/v3-write-lp.md --- diff --git a/content/kapacitor/v1/working/flux/_index.md b/content/kapacitor/v1/working/flux/_index.md index 4c5becb90a..28d580b8da 100644 --- a/content/kapacitor/v1/working/flux/_index.md +++ b/content/kapacitor/v1/working/flux/_index.md @@ -197,7 +197,7 @@ Update or add the following settings under `[fluxtask]` your `kapacitor.conf`: - **task-run-influxdb**: Name of the [InfluxDB configuration in your `kapacitor.conf`](/kapacitor/v1/administration/configuration/#influxdb) to use to store Flux task data. _To disable Flux task logging, set to `"none"`._ -- **task-run-bucket**: InfluxDB bucket to store Flux task data and logs in. We recommend leaving this empty. By default, data is written to the `kapacitor_fluxtask_logs` bucket. To specify another bucket to write task log data to, use the [_tasks system bucket](/influxdb/cloud/reference/internals/system-buckets/#_tasks-system-bucket) or [create a new bucket](/influxdb/cloud/admin/buckets/create-bucket/). If the specified bucket does not already exist in InfluxDB, Kapacitor attempts to create it with [`POST /api/v2/buckets`](/influxdb/v2/api/#operation/PostBuckets), in which case your API token must have permissions to create buckets in InfluxDB. For more information, see [Manage API tokens](/influxdb/v2/admin/tokens/). +- **task-run-bucket**: InfluxDB bucket to store Flux task data and logs in. We recommend leaving this empty. By default, data is written to the `kapacitor_fluxtask_logs` bucket. To specify another bucket to write task log data to, use the [_tasks system bucket](/influxdb/cloud/reference/internals/system-buckets/#_tasks-system-bucket) or [create a new bucket](/influxdb/cloud/admin/buckets/create-bucket/). If the specified bucket does not already exist in InfluxDB, Kapacitor attempts to create it with [`POST /api/v2/buckets`](/influxdb/v2/api/#post-/api/v2/buckets), in which case your API token must have permissions to create buckets in InfluxDB. For more information, see [Manage API tokens](/influxdb/v2/admin/tokens/). - Provide one of the following: - **task-run-org**: InfluxDB organization name. - **task-run-orgid**: InfluxDB organization ID. diff --git a/content/shared/influxdb-client-libraries-reference/v2/javascript/nodejs/write.md b/content/shared/influxdb-client-libraries-reference/v2/javascript/nodejs/write.md index 6daf3997ab..9ebfc293cf 100644 --- a/content/shared/influxdb-client-libraries-reference/v2/javascript/nodejs/write.md +++ b/content/shared/influxdb-client-libraries-reference/v2/javascript/nodejs/write.md @@ -117,4 +117,4 @@ node write.js ### Response codes _For information about **InfluxDB API response codes**, see -[InfluxDB API Write documentation](/influxdb3/version/api/#operation/PostWrite)._ +[InfluxDB API Write documentation](/influxdb3/version/api/#post-/api/v2/write)._ diff --git a/content/shared/influxdb-v2/admin/tokens/create-token.md b/content/shared/influxdb-v2/admin/tokens/create-token.md index e9e41ba115..7071a8cfa3 100644 --- a/content/shared/influxdb-v2/admin/tokens/create-token.md +++ b/content/shared/influxdb-v2/admin/tokens/create-token.md @@ -172,13 +172,13 @@ See the [`influx auth create` documentation](/influxdb/version/reference/cli/inf Use the `/api/v2/authorizations` InfluxDB API endpoint to create a token. -{{< api-endpoint method="POST" endpoint="http://localhost:8086/api/v2/authorizations" api-ref="/influxdb/version/api/#operation/PostAuthorizations" >}} +{{< api-endpoint method="POST" endpoint="http://localhost:8086/api/v2/authorizations" api-ref="/influxdb/version/api/#post-/api/v2/authorizations" >}} Include the following in your request: | Requirement | Include by | |:----------- |:---------- | -| API token with the [`write: authorizations`](/influxdb/version/api/#operation/PostAuthorizations) permission | Use the `Authorization` header and the {{% show-in "v2" %}}`Bearer` or {{% /show-in %}}`Token` scheme. | +| API token with the [`write: authorizations`](/influxdb/version/api/#post-/api/v2/authorizations) permission | Use the `Authorization` header and the {{% show-in "v2" %}}`Bearer` or {{% /show-in %}}`Token` scheme. | | Organization | Pass as `orgID` in the request body. | Permissions list | Pass as a `permissions` array in the request body. @@ -196,5 +196,5 @@ body. ``` See the -[`POST /api/v2/authorizations` documentation](/influxdb/version/api/#operation/PostAuthorizations) +[`POST /api/v2/authorizations` documentation](/influxdb/version/api/#post-/api/v2/authorizations) for more information about options. diff --git a/content/shared/influxdb-v2/admin/tokens/delete-token.md b/content/shared/influxdb-v2/admin/tokens/delete-token.md index 0211fe22e0..59a611ad96 100644 --- a/content/shared/influxdb-v2/admin/tokens/delete-token.md +++ b/content/shared/influxdb-v2/admin/tokens/delete-token.md @@ -52,13 +52,13 @@ influx auth delete -i 03a2bee5a9c9a000 Use the `/api/v2/authorizations` InfluxDB API endpoint to delete a token. -{{< api-endpoint method="DELETE" endpoint="http://localhost:8086/api/v2/authorizations/AUTH_ID" api-ref="/influxdb/version/api/#operation/DeleteAuthorizationsID" >}} +{{< api-endpoint method="DELETE" endpoint="http://localhost:8086/api/v2/authorizations/AUTH_ID" api-ref="/influxdb/version/api/#delete-/api/v2/authorizations/-authID-" >}} Include the following in your request: | Requirement | Include by | |:----------- |:---------- | -| API token with the [`write: authorizations`](/influxdb/version/api/#operation/PostAuthorizations) permission | Use the `Authorization: Token YOUR_API_TOKEN` header. | +| API token with the [`write: authorizations`](/influxdb/version/api/#post-/api/v2/authorizations) permission | Use the `Authorization: Token YOUR_API_TOKEN` header. | | Authorization ID | URL path parameter. | ```sh diff --git a/content/shared/influxdb-v2/admin/tokens/update-tokens.md b/content/shared/influxdb-v2/admin/tokens/update-tokens.md index eea0e68b78..00281b5c40 100644 --- a/content/shared/influxdb-v2/admin/tokens/update-tokens.md +++ b/content/shared/influxdb-v2/admin/tokens/update-tokens.md @@ -86,13 +86,13 @@ influx auth find --json Use the `/api/v2/authorizations` InfluxDB API endpoint to update the description and status of a token. -{{< api-endpoint method="PATCH" endpoint="http://localhost:8086/api/v2/authorizations/AUTH_ID" api-ref="/influxdb/version/api/#operation/PatchAuthorizationsID" >}} +{{< api-endpoint method="PATCH" endpoint="http://localhost:8086/api/v2/authorizations/AUTH_ID" api-ref="/influxdb/version/api/#patch-/api/v2/authorizations/-authID-" >}} Include the following in your request: | Requirement | Include by | |:----------- |:---------- | -| API token with the [`write: authorizations`](/influxdb/version/api/#operation/PostAuthorizations) permission | Use the `Authorization: Token YOUR_API_TOKEN` header. | +| API token with the [`write: authorizations`](/influxdb/version/api/#post-/api/v2/authorizations) permission | Use the `Authorization: Token YOUR_API_TOKEN` header. | | Authorization ID | URL path parameter. | | Description and/or Status | Pass as `description`, `status` in the request body. | diff --git a/content/shared/influxdb-v2/admin/tokens/view-tokens.md b/content/shared/influxdb-v2/admin/tokens/view-tokens.md index 98aecb18de..d872e71f0a 100644 --- a/content/shared/influxdb-v2/admin/tokens/view-tokens.md +++ b/content/shared/influxdb-v2/admin/tokens/view-tokens.md @@ -62,13 +62,13 @@ for information about other available flags. Use the `/api/v2/authorizations` InfluxDB API endpoint to view tokens and permissions. -{{< api-endpoint method="GET" endpoint="/api/v2/authorizations" api-ref="/influxdb/version/api/#operation/GetAuthorizations" >}} +{{< api-endpoint method="GET" endpoint="/api/v2/authorizations" api-ref="/influxdb/version/api/#get-/api/v2/authorizations" >}} Include the following in your request: | Requirement | Include by | |:----------- |:---------- | -| API token with the [`read: authorizations`](/influxdb/version/api/#operation/PostAuthorizations) permission | Use the `Authorization: Token YOUR_API_TOKEN` header. | +| API token with the [`read: authorizations`](/influxdb/version/api/#post-/api/v2/authorizations) permission | Use the `Authorization: Token YOUR_API_TOKEN` header. | ```sh {{% get-shared-text "api/v2.0/auth/oss/tokens-view.sh" %}} @@ -78,7 +78,7 @@ Include the following in your request: To view a specific authorization and token, include the authorization ID in the URL path. -{{% api-endpoint method="GET" endpoint="/api/v2/authorizations/{authID}" api-ref="/influxdb/version/api/#operation/GetAuthorizationsID" %}} +{{% api-endpoint method="GET" endpoint="/api/v2/authorizations/{authID}" api-ref="/influxdb/version/api/#get-/api/v2/authorizations/-authID-" %}} ### Filter the token list diff --git a/content/shared/influxdb-v2/api-guide/api_intro.md b/content/shared/influxdb-v2/api-guide/api_intro.md index 58059ad4f6..65e7fb66dc 100644 --- a/content/shared/influxdb-v2/api-guide/api_intro.md +++ b/content/shared/influxdb-v2/api-guide/api_intro.md @@ -89,7 +89,7 @@ function listBuckets() { Before writing data you'll need to create a bucket in your InfluxDB instance. To use the API to create a bucket, send a request to the following endpoint: -{{% api-endpoint method="POST" endpoint="/api/v2/buckets" api-ref="/influxdb/version/api/v2/#operation/PostBuckets" %}} +{{% api-endpoint method="POST" endpoint="/api/v2/buckets" api-ref="/influxdb/version/api/v2/#post-/api/v2/buckets" %}} {{% code-placeholders "API_TOKEN|ORG_ID|BUCKET_NAME|RETENTION_PERIOD_SECONDS" %}} diff --git a/content/shared/influxdb-v2/api-guide/client-libraries/nodejs/write.md b/content/shared/influxdb-v2/api-guide/client-libraries/nodejs/write.md index f430765572..9ce5bbaf51 100644 --- a/content/shared/influxdb-v2/api-guide/client-libraries/nodejs/write.md +++ b/content/shared/influxdb-v2/api-guide/client-libraries/nodejs/write.md @@ -99,4 +99,4 @@ node write.js ### Response codes _For information about **InfluxDB API response codes**, see -[InfluxDB API Write documentation](/influxdb/cloud/api/#operation/PostWrite)._ +[InfluxDB API Write documentation](/influxdb/cloud/api/#post-/api/v2/write)._ diff --git a/content/shared/influxdb-v2/get-started/query.md b/content/shared/influxdb-v2/get-started/query.md index 0b80ef0dab..943898b69b 100644 --- a/content/shared/influxdb-v2/get-started/query.md +++ b/content/shared/influxdb-v2/get-started/query.md @@ -252,10 +252,10 @@ from(bucket: "get-started") To query data from InfluxDB using Flux and the InfluxDB HTTP API, send a request -to the InfluxDB API [`/api/v2/query` endpoint](/influxdb/version/api/#operation/PostQuery) +to the InfluxDB API [`/api/v2/query` endpoint](/influxdb/version/api/#post-/api/v2/query) using the `POST` request method. -{{< api-endpoint endpoint="http://localhost:8086/api/v2/query" method="post" api-ref="/influxdb/version/api/#operation/PostQuery" >}} +{{< api-endpoint endpoint="http://localhost:8086/api/v2/query" method="post" api-ref="/influxdb/version/api/#post-/api/v2/query" >}} Include the following with your request: @@ -496,7 +496,7 @@ To query data from InfluxDB using InfluxQL and the InfluxDB HTTP API, send a req to the InfluxDB API [`/query` 1.X compatibility endpoint](/influxdb/version/reference/api/influxdb-1x/query/) using the `POST` request method. -{{< api-endpoint endpoint="http://localhost:8086/query" method="post" api-ref="/influxdb/version/api/v1/#operation/PostQueryV1" >}} +{{< api-endpoint endpoint="http://localhost:8086/query" method="post" api-ref="/influxdb/version/api/v1/#post-/query" >}} Include the following with your request: diff --git a/content/shared/influxdb-v2/get-started/write.md b/content/shared/influxdb-v2/get-started/write.md index a23cf2d551..4560510997 100644 --- a/content/shared/influxdb-v2/get-started/write.md +++ b/content/shared/influxdb-v2/get-started/write.md @@ -200,7 +200,7 @@ The UI will confirm that the data has been written successfully. To write data to InfluxDB using the InfluxDB HTTP API, send a request to the InfluxDB API `/api/v2/write` endpoint using the `POST` request method. -{{< api-endpoint endpoint="http://localhost:8086/api/v2/write" method="post" api-ref="/influxdb/version/api/#operation/PostWrite" >}} +{{< api-endpoint endpoint="http://localhost:8086/api/v2/write" method="post" api-ref="/influxdb/version/api/#post-/api/v2/write" >}} Include the following with your request: diff --git a/content/shared/influxdb-v2/monitor-alert/custom-checks.md b/content/shared/influxdb-v2/monitor-alert/custom-checks.md index e8a429f5b7..94562a2b4e 100644 --- a/content/shared/influxdb-v2/monitor-alert/custom-checks.md +++ b/content/shared/influxdb-v2/monitor-alert/custom-checks.md @@ -21,7 +21,7 @@ Using a Flux task, you can create a custom check that provides a couple advantag 4. Enter the Flux script for your custom check, including the [`monitor.check`](/flux/v0/stdlib/influxdata/influxdb/monitor/check/) function. {{% note %}} -Use the [`/api/v2/checks/{checkID}/query` API endpoint](/influxdb/version/api/#operation/DeleteDashboardsIDOwnersID) +Use the [`/api/v2/checks/{checkID}/query` API endpoint](/influxdb/version/api/#delete-/api/v2/dashboards/-dashboardID-/owners/-userID-) to see the Flux code for a check built in the UI. This can be useful for constructing custom checks. {{% /note %}} diff --git a/content/shared/influxdb-v2/process-data/manage-tasks/create-task.md b/content/shared/influxdb-v2/process-data/manage-tasks/create-task.md index 07d65232b4..34e677a393 100644 --- a/content/shared/influxdb-v2/process-data/manage-tasks/create-task.md +++ b/content/shared/influxdb-v2/process-data/manage-tasks/create-task.md @@ -117,9 +117,9 @@ option task = { {{% show-in "v2" %}} -Use the [`/api/v2/tasks` InfluxDB API endpoint](/influxdb/version/api/#operation/PostTasks) to create a task. +Use the [`/api/v2/tasks` InfluxDB API endpoint](/influxdb/version/api/#post-/api/v2/tasks) to create a task. -{{< api-endpoint method="POST" endpoint="http://localhost:8086/api/v2/tasks/" api-ref="/influxdb/version/api/#operation/PostTasks" >}} +{{< api-endpoint method="POST" endpoint="http://localhost:8086/api/v2/tasks/" api-ref="/influxdb/version/api/#post-/api/v2/tasks" >}} Provide the following in your API request: ##### Request headers @@ -162,10 +162,10 @@ An InfluxDB Cloud task can run either an [invokable script](/influxdb/cloud/api- With InfluxDB Cloud invokable scripts, you can manage, reuse, and invoke scripts as API endpoints. You can use tasks to pass script parameters and schedule runs. -Use the [`/api/v2/tasks` InfluxDB API endpoint](/influxdb/cloud/api/#operation/PostTasks) to create a task +Use the [`/api/v2/tasks` InfluxDB API endpoint](/influxdb/cloud/api/#post-/api/v2/tasks) to create a task that references a script ID. -{{< api-endpoint method="POST" endpoint="http://localhost:8086/api/v2/tasks/" api-ref="/influxdb/cloud/api/#operation/PostTasks" >}} +{{< api-endpoint method="POST" endpoint="http://localhost:8086/api/v2/tasks/" api-ref="/influxdb/cloud/api/#post-/api/v2/tasks" >}} Provide the following in your API request: @@ -243,9 +243,9 @@ Replace **`INFLUX_API_TOKEN`** with your InfluxDB API token. ### Create a task that contains a Flux script -Use the [`/api/v2/tasks` InfluxDB API endpoint](/influxdb/cloud/api/#operation/PostTasks) to create a task that contains a Flux script with task options. +Use the [`/api/v2/tasks` InfluxDB API endpoint](/influxdb/cloud/api/#post-/api/v2/tasks) to create a task that contains a Flux script with task options. -{{< api-endpoint method="POST" endpoint="https://cloud2.influxdata.com/api/v2/tasks/" api-ref="/influxdb/cloud/api/#operation/PostTasks" >}} +{{< api-endpoint method="POST" endpoint="https://cloud2.influxdata.com/api/v2/tasks/" api-ref="/influxdb/cloud/api/#post-/api/v2/tasks" >}} Provide the following in your API request: diff --git a/content/shared/influxdb-v2/process-data/manage-tasks/delete-task.md b/content/shared/influxdb-v2/process-data/manage-tasks/delete-task.md index a450481a24..7c6c448f32 100644 --- a/content/shared/influxdb-v2/process-data/manage-tasks/delete-task.md +++ b/content/shared/influxdb-v2/process-data/manage-tasks/delete-task.md @@ -23,9 +23,9 @@ _To find the task ID, see [how to view tasks](/influxdb/version/process-data/man ## Delete a task using the InfluxDB API -Use the [`/tasks/TASK_ID` InfluxDB API endpoint](/influxdb/version/api/#operation/DeleteTasksID) to delete a task and all associated records (task runs, logs, and labels). +Use the [`/tasks/TASK_ID` InfluxDB API endpoint](/influxdb/version/api/#delete-/api/v2/tasks/-taskID-) to delete a task and all associated records (task runs, logs, and labels). -{{< api-endpoint method="DELETE" endpoint="http://localhost:8086/api/v2/tasks/TASK_ID" api-ref="/influxdb/version/api/#operation/DeleteTasksID" >}} +{{< api-endpoint method="DELETE" endpoint="http://localhost:8086/api/v2/tasks/TASK_ID" api-ref="/influxdb/version/api/#delete-/api/v2/tasks/-taskID-" >}} _To find the task ID, see [how to view tasks](/influxdb/version/process-data/manage-tasks/view-tasks/)_ diff --git a/content/shared/influxdb-v2/process-data/manage-tasks/run-task.md b/content/shared/influxdb-v2/process-data/manage-tasks/run-task.md index 30ffb2a91b..5427b7b5a5 100644 --- a/content/shared/influxdb-v2/process-data/manage-tasks/run-task.md +++ b/content/shared/influxdb-v2/process-data/manage-tasks/run-task.md @@ -52,13 +52,13 @@ influx task retry-failed \ ``` ## Run a task with the InfluxDB API -Use the [`/tasks/TASK_ID/runs` InfluxDB API endpoint](/influxdb/version/api/#operation/PostTasksIDRuns) +Use the [`/tasks/TASK_ID/runs` InfluxDB API endpoint](/influxdb/version/api/#post-/api/v2/tasks/-taskID-/runs) to manually start a task run. -{{< api-endpoint method="POST" endpoint="http://localhost:8086/api/v2/tasks/TASK_ID/runs" api-ref="/influxdb/version/api/#operation/PostTasksIDRuns" >}} +{{< api-endpoint method="POST" endpoint="http://localhost:8086/api/v2/tasks/TASK_ID/runs" api-ref="/influxdb/version/api/#post-/api/v2/tasks/-taskID-/runs" >}} ### Retry failed task runs -Use the [`/tasks/TASK_ID/runs/RUN_ID/retry` InfluxDB API endpoint](/influxdb/version/api/#operation/PostTasksIDRunsIDRetry) +Use the [`/tasks/TASK_ID/runs/RUN_ID/retry` InfluxDB API endpoint](/influxdb/version/api/#post-/api/v2/tasks/-taskID-/runs/-runID-/retry) to retry a task run. -{{< api-endpoint method="POST" endpoint="http://localhost:8086/api/v2/tasks/TASK_ID/runs/RUN_ID/retry" api-ref="/influxdb/version/api/#operation/PostTasksIDRunsIDRetry" >}} +{{< api-endpoint method="POST" endpoint="http://localhost:8086/api/v2/tasks/TASK_ID/runs/RUN_ID/retry" api-ref="/influxdb/version/api/#post-/api/v2/tasks/-taskID-/runs/-runID-/retry" >}} diff --git a/content/shared/influxdb-v2/process-data/manage-tasks/task-run-history.md b/content/shared/influxdb-v2/process-data/manage-tasks/task-run-history.md index 1aaf5f695e..c79bb5663b 100644 --- a/content/shared/influxdb-v2/process-data/manage-tasks/task-run-history.md +++ b/content/shared/influxdb-v2/process-data/manage-tasks/task-run-history.md @@ -49,23 +49,23 @@ To retry failed task runs, see how to [run tasks](/influxdb/version/process-data ## View logs for a task with the InfluxDB API -Use the [`/api/v2/tasks/TASK_ID/logs` InfluxDB API endpoint](/influxdb/version/api/#operation/GetTasksIDLogs) +Use the [`/api/v2/tasks/TASK_ID/logs` InfluxDB API endpoint](/influxdb/version/api/#get-/api/v2/tasks/-taskID-/logs) to view the log events for a task and exclude additional task metadata. -{{< api-endpoint method="GET" endpoint="http://localhost:8086/api/v2/tasks/TASK_ID/logs" api-ref="/influxdb/version/api/#operation/GetTasksIDLogs" >}} +{{< api-endpoint method="GET" endpoint="http://localhost:8086/api/v2/tasks/TASK_ID/logs" api-ref="/influxdb/version/api/#get-/api/v2/tasks/-taskID-/logs" >}} ## View a task's run history with the InfluxDB API -Use the [`/tasks/TASK_ID/runs` InfluxDB API endpoint](/influxdb/version/api/#operation/GetTasksIDRuns) +Use the [`/tasks/TASK_ID/runs` InfluxDB API endpoint](/influxdb/version/api/#get-/api/v2/tasks/-taskID-/runs) to view a task's run history. -{{< api-endpoint method="GET" endpoint="http://localhost:8086/api/v2/tasks/{taskID}/runs" api-ref="/influxdb/version/api/#operation/GetTasksIDRuns" >}} +{{< api-endpoint method="GET" endpoint="http://localhost:8086/api/v2/tasks/{taskID}/runs" api-ref="/influxdb/version/api/#get-/api/v2/tasks/-taskID-/runs" >}} ### View task run logs with the InfluxDB API To view logs associated with a run, use the -[`/api/v2/tasks/TASK_ID/runs/RUN_ID/logs` InfluxDB API endpoint](/influxdb/version/api/#operation/GetTasksIDRunsIDLogs). +[`/api/v2/tasks/TASK_ID/runs/RUN_ID/logs` InfluxDB API endpoint](/influxdb/version/api/#get-/api/v2/tasks/-taskID-/runs/-runID-/logs). -{{< api-endpoint method="GET" endpoint="http://localhost:8086/api/v2/tasks/TASK_ID/runs/RUN_ID/logs" api-ref="/influxdb/version/api/#operation/GetTasksIDRunsIDLogs" >}} +{{< api-endpoint method="GET" endpoint="http://localhost:8086/api/v2/tasks/TASK_ID/runs/RUN_ID/logs" api-ref="/influxdb/version/api/#get-/api/v2/tasks/-taskID-/runs/-runID-/logs" >}} To retry failed task runs, see how to [run tasks](/influxdb/version/process-data/manage-tasks/run-task/). diff --git a/content/shared/influxdb-v2/process-data/manage-tasks/update-task.md b/content/shared/influxdb-v2/process-data/manage-tasks/update-task.md index 00a38fee39..932eb09207 100644 --- a/content/shared/influxdb-v2/process-data/manage-tasks/update-task.md +++ b/content/shared/influxdb-v2/process-data/manage-tasks/update-task.md @@ -61,9 +61,9 @@ influx task update -i 0343698431c35000 --status inactive ``` ## Update a task with the InfluxDB API -Use the [`/tasks/TASK_ID` InfluxDB API endpoint](/influxdb/version/api/#operation/PatchTasksID) to update properties of a task. +Use the [`/tasks/TASK_ID` InfluxDB API endpoint](/influxdb/version/api/#patch-/api/v2/tasks/-taskID-) to update properties of a task. -{{< api-endpoint method="PATCH" endpoint="http://localhost:8086/api/v2/tasks/TASK_ID" api-ref="/influxdb/version/api/#operation/PatchTasksID" >}} +{{< api-endpoint method="PATCH" endpoint="http://localhost:8086/api/v2/tasks/TASK_ID" api-ref="/influxdb/version/api/#patch-/api/v2/tasks/-taskID-" >}} In your request, pass the task ID and an object that contains the updated key-value pairs. To activate or inactivate a task, set the `status` property. diff --git a/content/shared/influxdb-v2/query-data/execute-queries/influx-api.md b/content/shared/influxdb-v2/query-data/execute-queries/influx-api.md index 5e8c65be11..c6f635199f 100644 --- a/content/shared/influxdb-v2/query-data/execute-queries/influx-api.md +++ b/content/shared/influxdb-v2/query-data/execute-queries/influx-api.md @@ -9,7 +9,7 @@ To query InfluxDB {{< current-version >}}, do one of the following: Send a Flux query request to the following endpoint: -{{% api-endpoint method="POST" endpoint="/api/v2/query" api-ref="/influxdb/version/api/#operation/PostQueryAnalyze" %}} +{{% api-endpoint method="POST" endpoint="/api/v2/query" api-ref="/influxdb/version/api/#post-/api/v2/query/analyze" %}} In your request, set the following: @@ -86,9 +86,9 @@ Replace the following with your values: To query InfluxDB {{< current-version >}} using the [InfluxQL query language](/influxdb/v2/reference/syntax/influxql/), send a request to the v1-compatible API endpoint: -{{% api-endpoint method="GET" endpoint="/query" api-ref="/influxdb/v2/api/v2/#operation/GetLegacyQuery" %}} +{{% api-endpoint method="GET" endpoint="/query" api-ref="/influxdb/v2/api/v2/#get-/query" %}} -{{% api-endpoint method="POST" endpoint="/query" api-ref="/influxdb/v2/api/v2/#operation/PostQueryV1" %}} +{{% api-endpoint method="POST" endpoint="/query" api-ref="/influxdb/v2/api/v2/#post-/query" %}} In your request, set the following: diff --git a/content/shared/influxdb-v2/query-data/flux/flux-version.md b/content/shared/influxdb-v2/query-data/flux/flux-version.md index 20e2079fa0..c00e7ed8b9 100644 --- a/content/shared/influxdb-v2/query-data/flux/flux-version.md +++ b/content/shared/influxdb-v2/query-data/flux/flux-version.md @@ -87,7 +87,7 @@ Table: keys: [] To return the version of Flux installed with InfluxDB using the InfluxDB API, use the [`/api/v2/query` endpoint](/influxdb/version/api/#tag/Query). -{{< api-endpoint method="POST" endpoint="http://localhost:8086/api/v2/query" api-ref="/influxdb/version/api/#operation/PostQuery" >}} +{{< api-endpoint method="POST" endpoint="http://localhost:8086/api/v2/query" api-ref="/influxdb/version/api/#post-/api/v2/query" >}} Provide the following: - InfluxDB {{% show-in "cloud,cloud-serverless" %}}Cloud{{% /show-in %}} host diff --git a/content/shared/influxdb-v2/query-data/influxql/dbrp.md b/content/shared/influxdb-v2/query-data/influxql/dbrp.md index b30c45681e..5efad77fb2 100644 --- a/content/shared/influxdb-v2/query-data/influxql/dbrp.md +++ b/content/shared/influxdb-v2/query-data/influxql/dbrp.md @@ -83,9 +83,9 @@ influx v1 dbrp create \ {{% /tab-content %}} {{% tab-content %}} -Use the [`/api/v2/dbrps` API endpoint](/influxdb/version/api/#operation/PostDBRP) to create a new DBRP mapping. +Use the [`/api/v2/dbrps` API endpoint](/influxdb/version/api/#post-/api/v2/dbrps) to create a new DBRP mapping. -{{< api-endpoint endpoint="http://localhost:8086/api/v2/dbrps" method="POST" api-ref="/influxdb/version/api/#operation/PostDBRP" >}} +{{< api-endpoint endpoint="http://localhost:8086/api/v2/dbrps" method="POST" api-ref="/influxdb/version/api/#post-/api/v2/dbrps" >}} Include the following: @@ -155,9 +155,9 @@ influx v1 dbrp list --bucket-id 00oxo0oXx000x0Xo ``` {{% /tab-content %}} {{% tab-content %}} -Use the [`/api/v2/dbrps` API endpoint](/influxdb/version/api/#operation/GetDBRPs) to list DBRP mappings. +Use the [`/api/v2/dbrps` API endpoint](/influxdb/version/api/#get-/api/v2/dbrps) to list DBRP mappings. -{{< api-endpoint endpoint="http://localhost:8086/api/v2/dbrps" method="GET" api-ref="/influxdb/version/api/#operation/GetDBRPs" >}} +{{< api-endpoint endpoint="http://localhost:8086/api/v2/dbrps" method="GET" api-ref="/influxdb/version/api/#get-/api/v2/dbrps" >}} Include the following: @@ -238,9 +238,9 @@ influx v1 dbrp update \ {{% /tab-content %}} {{% tab-content %}} -Use the [`/api/v2/dbrps/{dbrpID}` API endpoint](/influxdb/version/api/#operation/GetDBRPs) to update DBRP mappings. +Use the [`/api/v2/dbrps/{dbrpID}` API endpoint](/influxdb/version/api/#get-/api/v2/dbrps) to update DBRP mappings. -{{< api-endpoint endpoint="http://localhost:8086/api/v2/dbrps/{dbrpID}" method="PATCH" api-ref="/influxdb/version/api/#operation/PatchDBRPID" >}} +{{< api-endpoint endpoint="http://localhost:8086/api/v2/dbrps/{dbrpID}" method="PATCH" api-ref="/influxdb/version/api/#patch-/api/v2/dbrps/-dbrpID-" >}} Include the following: @@ -306,9 +306,9 @@ influx v1 dbrp delete --id 00oxo0X0xx0XXoX0 {{% /tab-content %}} {{% tab-content %}} -Use the [`/api/v2/dbrps/{dbrpID}` API endpoint](/influxdb/version/api/#operation/DeleteDBRPID) to delete a DBRP mapping. +Use the [`/api/v2/dbrps/{dbrpID}` API endpoint](/influxdb/version/api/#delete-/api/v2/dbrps/-dbrpID-) to delete a DBRP mapping. -{{< api-endpoint endpoint="http://localhost:8086/api/v2/dbrps/{dbrpID}" method="DELETE" api-ref="/influxdb/version/api/#operation/DeleteDBRPID" >}} +{{< api-endpoint endpoint="http://localhost:8086/api/v2/dbrps/{dbrpID}" method="DELETE" api-ref="/influxdb/version/api/#delete-/api/v2/dbrps/-dbrpID-" >}} Include the following: diff --git a/content/shared/influxdb-v2/write-data/delete-data.md b/content/shared/influxdb-v2/write-data/delete-data.md index 9a543133a4..9b36e26bcb 100644 --- a/content/shared/influxdb-v2/write-data/delete-data.md +++ b/content/shared/influxdb-v2/write-data/delete-data.md @@ -1,6 +1,6 @@ Use the [`influx` CLI](/influxdb/version/reference/cli/influx/) or the InfluxDB API -[`/api/v2/delete`](/influxdb/version/api/#operation/PostDelete) endpoint to delete +[`/api/v2/delete`](/influxdb/version/api/#post-/api/v2/delete) endpoint to delete data from an InfluxDB bucket. - [Delete data using the influx CLI](#delete-data-using-the-influx-cli) @@ -95,10 +95,10 @@ influx delete --bucket example-bucket \ {{% /show-in %}} ## Delete data using the API -Use the InfluxDB API [`/api/v2/delete` endpoint](/influxdb/version/api/#operation/PostDelete) +Use the InfluxDB API [`/api/v2/delete` endpoint](/influxdb/version/api/#post-/api/v2/delete) to delete points from InfluxDB. -{{< api-endpoint method="post" endpoint="http://localhost:8086/api/v2/delete" api-ref="/influxdb/version/api/#operation/PostDelete" >}} +{{< api-endpoint method="post" endpoint="http://localhost:8086/api/v2/delete" api-ref="/influxdb/version/api/#post-/api/v2/delete" >}} Include the following: @@ -167,6 +167,6 @@ curl --request POST http://localhost:8086/api/v2/delete?org=example-org&bucket=e {{% /show-in %}} -_For more information, see the [`/api/v2/delete` endpoint documentation](/influxdb/version/api/#operation/PostDelete)._ +_For more information, see the [`/api/v2/delete` endpoint documentation](/influxdb/version/api/#post-/api/v2/delete)._ To delete a bucket see [Delete a bucket](/influxdb/version/admin/buckets/delete-bucket/). diff --git a/content/shared/influxdb-v2/write-data/replication/replicate-data.md b/content/shared/influxdb-v2/write-data/replication/replicate-data.md index cabc178b64..17c8e8b70c 100644 --- a/content/shared/influxdb-v2/write-data/replication/replicate-data.md +++ b/content/shared/influxdb-v2/write-data/replication/replicate-data.md @@ -113,7 +113,7 @@ To create a remote connection to replicate data to, send a `POST` request to your {{% show-in "v2" %}}local{{% /show-in %}} InfluxDB OSS `/api/v2/remotes` endpoint: {{< keep-url >}} -{{< api-endpoint endpoint="localhost:8086/api/v2/remotes" method="POST" api-ref="/influxdb/version/api/#operation/PostRemoteConnection" >}} +{{< api-endpoint endpoint="localhost:8086/api/v2/remotes" method="POST" api-ref="/influxdb/version/api/#post-/api/v2/remotes" >}} Include the following parameters in your request: @@ -167,7 +167,7 @@ To retrieve existing connections, use the `/api/v2/remotes` endpoint with the `GET` request method: {{< keep-url >}} -{{< api-endpoint endpoint="localhost:8086/api/v2/remotes" method="GET" api-ref="/influxdb/version/api/#operation/GetRemoteConnections" >}} +{{< api-endpoint endpoint="localhost:8086/api/v2/remotes" method="GET" api-ref="/influxdb/version/api/#get-/api/v2/remotes" >}} Include the following parameters in your request: @@ -189,7 +189,7 @@ Send a `POST` request to your {{% show-in "v2" %}}local{{% /show-in %}} InfluxDB `/api/v2/replications` endpoint to create a replication stream. {{< keep-url >}} -{{< api-endpoint endpoint="localhost:8086/api/v2/remotes" method="POST" api-ref="/influxdb/version/api/#operation/PostRemoteConnection" >}} +{{< api-endpoint endpoint="localhost:8086/api/v2/remotes" method="POST" api-ref="/influxdb/version/api/#post-/api/v2/remotes" >}} Include the following parameters in your request: @@ -250,7 +250,7 @@ information such as the current queue size, max queue size, and latest status code for each replication stream, send a `GET` request to your {{% show-in "v2" %}}local{{% /show-in %}} InfluxDB OSS `/api/v2/replications` endpoint: {{< keep-url >}} -{{< api-endpoint endpoint="localhost:8086/api/v2/replications" method="GET" api-ref="/influxdb/version/api/#operation/GetReplications" >}} +{{< api-endpoint endpoint="localhost:8086/api/v2/replications" method="GET" api-ref="/influxdb/version/api/#get-/api/v2/replications" >}} Include the following parameters in your request: diff --git a/content/shared/influxdb3-admin/distinct-value-cache/create.md b/content/shared/influxdb3-admin/distinct-value-cache/create.md index 560208c345..8db9d443c8 100644 --- a/content/shared/influxdb3-admin/distinct-value-cache/create.md +++ b/content/shared/influxdb3-admin/distinct-value-cache/create.md @@ -73,7 +73,7 @@ influxdb3 create distinct_cache \ To use the HTTP API to create a Distinct Value Cache, send a `POST` request to the `/api/v3/configure/distinct_cache` endpoint. -{{% api-endpoint method="POST" endpoint="/api/v3/configure/distinct_cache" api-ref="/influxdb3/version/api/v3/#operation/PostConfigureDistinctCache" %}} +{{% api-endpoint method="POST" endpoint="/api/v3/configure/distinct_cache" api-ref="/influxdb3/version/api/v3/#post-/api/v3/configure/distinct_cache" %}} {{% code-placeholders "(DATABASE|TABLE|DVC)_NAME|AUTH_TOKEN|COLUMNS|MAX_(CARDINALITY|AGE)" %}} diff --git a/content/shared/influxdb3-admin/distinct-value-cache/query.md b/content/shared/influxdb3-admin/distinct-value-cache/query.md index 9ec48d4eea..ab589d4db5 100644 --- a/content/shared/influxdb3-admin/distinct-value-cache/query.md +++ b/content/shared/influxdb3-admin/distinct-value-cache/query.md @@ -36,9 +36,9 @@ WHERE To use the HTTP API to query cached data, send a `GET` or `POST` request to the `/api/v3/query_sql` endpoint and include the [`distinct_cache()`](/influxdb3/version/reference/sql/functions/cache/#distinct_cache) function in your query. -{{% api-endpoint method="GET" endpoint="/api/v3/query_sql" api-ref="/influxdb3/version/api/v3/#operation/GetExecuteQuerySQL" %}} +{{% api-endpoint method="GET" endpoint="/api/v3/query_sql" api-ref="/influxdb3/version/api/v3/#get-/api/v3/query_sql" %}} -{{% api-endpoint method="POST" endpoint="/api/v3/query_sql" api-ref="/influxdb3/version/api/v3/#operation/PostExecuteQuerySQL" %}} +{{% api-endpoint method="POST" endpoint="/api/v3/query_sql" api-ref="/influxdb3/version/api/v3/#post-/api/v3/query_sql" %}} {{% code-placeholders "DATABASE_NAME|AUTH_TOKEN|TABLE_NAME|CACHE_NAME" %}} diff --git a/content/shared/influxdb3-admin/distinct-value-cache/show.md b/content/shared/influxdb3-admin/distinct-value-cache/show.md index fd825711d1..47c286465e 100644 --- a/content/shared/influxdb3-admin/distinct-value-cache/show.md +++ b/content/shared/influxdb3-admin/distinct-value-cache/show.md @@ -72,9 +72,9 @@ In the examples above, replace the following: To use the HTTP API to query and output cache information from the system table, send a `GET` or `POST` request to the `/api/v3/query_sql` endpoint. -{{% api-endpoint method="GET" endpoint="/api/v3/query_sql" api-ref="/influxdb3/version/api/v3/#operation/GetExecuteQuerySQL" %}} +{{% api-endpoint method="GET" endpoint="/api/v3/query_sql" api-ref="/influxdb3/version/api/v3/#get-/api/v3/query_sql" %}} -{{% api-endpoint method="POST" endpoint="/api/v3/query_sql" api-ref="/influxdb3/version/api/v3/#operation/PostExecuteQuerySQL" %}} +{{% api-endpoint method="POST" endpoint="/api/v3/query_sql" api-ref="/influxdb3/version/api/v3/#post-/api/v3/query_sql" %}} ### Query all caches diff --git a/content/shared/influxdb3-admin/last-value-cache/create.md b/content/shared/influxdb3-admin/last-value-cache/create.md index 5e57de0777..876c43b0fd 100644 --- a/content/shared/influxdb3-admin/last-value-cache/create.md +++ b/content/shared/influxdb3-admin/last-value-cache/create.md @@ -84,7 +84,7 @@ influxdb3 create last_cache \ To use the HTTP API to create a Last Value Cache, send a `POST` request to the `/api/v3/configure/last_cache` endpoint. -{{% api-endpoint method="POST" endpoint="/api/v3/configure/last_cache" api-ref="/influxdb3/version/api/v3/#operation/PostConfigureLastCache" %}} +{{% api-endpoint method="POST" endpoint="/api/v3/configure/last_cache" api-ref="/influxdb3/version/api/v3/#post-/api/v3/configure/last_cache" %}} {{% code-placeholders "(DATABASE|TABLE|LVC)_NAME|AUTH_TOKEN|(KEY|VALUE)_COLUMNS|COUNT|TTL" %}} diff --git a/content/shared/influxdb3-admin/last-value-cache/delete.md b/content/shared/influxdb3-admin/last-value-cache/delete.md index 8f61adaa6e..4edc812a80 100644 --- a/content/shared/influxdb3-admin/last-value-cache/delete.md +++ b/content/shared/influxdb3-admin/last-value-cache/delete.md @@ -27,7 +27,7 @@ influxdb3 delete last_cache \ To use the HTTP API to delete a Last Value Cache, send a `DELETE` request to the `/api/v3/configure/last_cache` endpoint with query parameters. -{{% api-endpoint method="DELETE" endpoint="/api/v3/configure/last_cache" api-ref="/influxdb3/core/api/v3/#operation/DeleteConfigureLastCache" %}} +{{% api-endpoint method="DELETE" endpoint="/api/v3/configure/last_cache" api-ref="/influxdb3/core/api/v3/#delete-/api/v3/configure/last_cache" %}} {{% code-placeholders "(DATABASE|TABLE|LVC)_NAME|AUTH_TOKEN" %}} ```bash diff --git a/content/shared/influxdb3-admin/last-value-cache/show.md b/content/shared/influxdb3-admin/last-value-cache/show.md index 623e1c57fa..407316e267 100644 --- a/content/shared/influxdb3-admin/last-value-cache/show.md +++ b/content/shared/influxdb3-admin/last-value-cache/show.md @@ -71,9 +71,9 @@ In the examples above, replace the following: To use the HTTP API to query and output cache information from the system table, send a `GET` or `POST` request to the `/api/v3/query_sql` endpoint. -{{% api-endpoint method="GET" endpoint="/api/v3/query_sql" api-ref="/influxdb3/version/api/v3/#operation/GetExecuteQuerySQL" %}} +{{% api-endpoint method="GET" endpoint="/api/v3/query_sql" api-ref="/influxdb3/version/api/v3/#get-/api/v3/query_sql" %}} -{{% api-endpoint method="POST" endpoint="/api/v3/query_sql" api-ref="/influxdb3/version/api/v3/#operation/PostExecuteQuerySQL" %}} +{{% api-endpoint method="POST" endpoint="/api/v3/query_sql" api-ref="/influxdb3/version/api/v3/#post-/api/v3/query_sql" %}} ### Query all last value caches diff --git a/content/shared/influxdb3-admin/tokens/admin/create.md b/content/shared/influxdb3-admin/tokens/admin/create.md index d63014c267..0b1433200b 100644 --- a/content/shared/influxdb3-admin/tokens/admin/create.md +++ b/content/shared/influxdb3-admin/tokens/admin/create.md @@ -48,7 +48,7 @@ influxdb3 create token --admin {{% tab-content %}} Use the following endpoint to create an operator token: -{{% api-endpoint method="POST" endpoint="/api/v3/configure/token/admin" api-ref="/influxdb3/version/api/v3/#operation/PostCreateAdminToken" %}} +{{% api-endpoint method="POST" endpoint="/api/v3/configure/token/admin" api-ref="/influxdb3/version/api/v3/#post-/api/v3/configure/token/admin" %}} ```bash curl -X POST "http://{{< influxdb/host >}}/api/v3/configure/token/admin" \ @@ -86,7 +86,7 @@ The output contains the token string in plain text. {{% tab-content %}} Use the following endpoint to create a named admin token: -{{% api-endpoint method="POST" endpoint="/api/v3/configure/token/admin" api-ref="/influxdb3/version/api/v3/#operation/PostCreateAdminToken" %}} +{{% api-endpoint method="POST" endpoint="/api/v3/configure/token/admin" api-ref="/influxdb3/version/api/v3/#post-/api/v3/configure/token/admin" %}} ```bash curl -X POST "http://{{< influxdb/host >}}/api/v3/configure/token/admin" \ diff --git a/content/shared/influxdb3-get-started/migrate-from-influxdb-v1-v2.md b/content/shared/influxdb3-get-started/migrate-from-influxdb-v1-v2.md new file mode 100644 index 0000000000..b0f44fa3ed --- /dev/null +++ b/content/shared/influxdb3-get-started/migrate-from-influxdb-v1-v2.md @@ -0,0 +1,29 @@ +InfluxDB 3 provides compatibility APIs and tools for migrating existing +InfluxDB v1 and v2 workloads. +Use existing client libraries and tools with minimal changes to your code. + +## Write data + +InfluxDB 3 supports v1 and v2 compatible write endpoints: + +- **`/api/v2/write`**: Compatible with InfluxDB v2 clients and tools +- **`/write`**: Compatible with InfluxDB v1 clients and tools + +Both endpoints accept line protocol and write data the same way. + +For more information, see [Use compatibility APIs to write data](/influxdb3/version/write-data/http-api/compatibility-apis/). + +## Query data + +InfluxDB 3 supports the v1 HTTP query API for InfluxQL queries: + +- **`/query`**: Compatible with InfluxDB v1 query clients + +For more information, see [Use the v1 HTTP query API](/influxdb3/version/query-data/execute-queries/influxdb-v1-api/). + +## Client libraries + +Use InfluxDB v1 and v2 client libraries with {{% product-name %}}: + +- [v2 client libraries](/influxdb3/version/reference/client-libraries/v2/) +- [v1 client libraries](/influxdb3/version/reference/client-libraries/v1/) diff --git a/content/shared/influxdb3-internals/data-retention.md b/content/shared/influxdb3-internals/data-retention.md index f2a9c8c2f9..cf44bd9640 100644 --- a/content/shared/influxdb3-internals/data-retention.md +++ b/content/shared/influxdb3-internals/data-retention.md @@ -98,7 +98,7 @@ You can combine multiple duration units in a single value: ### Set database retention period -Use the [`influxdb3 create database` command](/influxdb3/version/reference/cli/influxdb3/create/database/) or the [/api/v3/configure/database](/influxdb3/version/api/v3/#operation/PostConfigureDatabase) HTTP API endpoint to create a database with a retention period: +Use the [`influxdb3 create database` command](/influxdb3/version/reference/cli/influxdb3/create/database/) or the [/api/v3/configure/database](/influxdb3/version/api/v3/#post-/api/v3/configure/database) HTTP API endpoint to create a database with a retention period: {{< code-tabs-wrapper >}} {{% code-tabs %}} @@ -180,7 +180,7 @@ Replace the following: ### Set table retention period -Use the [`influxdb3 create table` command](/influxdb3/enterprise/reference/cli/influxdb3/create/table/) or the [/api/v3/configure/table](/influxdb3/enterprise/reference/api/v3/#operation/PostConfigureTable) HTTP API endpoint to create a table with a retention period: +Use the [`influxdb3 create table` command](/influxdb3/enterprise/reference/cli/influxdb3/create/table/) or the [/api/v3/configure/table](/influxdb3/enterprise/reference/api/v3/#post-/api/v3/configure/table) HTTP API endpoint to create a table with a retention period: {{< code-tabs-wrapper >}} {{% code-tabs %}} diff --git a/content/shared/influxdb3-query-guides/influxql/parameterized-queries.md b/content/shared/influxdb3-query-guides/influxql/parameterized-queries.md index bd3fbf4a42..b20c539d79 100644 --- a/content/shared/influxdb3-query-guides/influxql/parameterized-queries.md +++ b/content/shared/influxdb3-query-guides/influxql/parameterized-queries.md @@ -197,7 +197,7 @@ AND room = 'Kitchen' {{% product-name %}} provides the `/api/v3/query_influxql` HTTP API endpoint for executing InfluxQL queries with parameters. -{{% api-endpoint method="POST" endpoint="/api/v3/query_influxql" api-ref="/influxdb3/version/api/v3/#operation/PostExecuteQueryInfluxQL" %}} +{{% api-endpoint method="POST" endpoint="/api/v3/query_influxql" api-ref="/influxdb3/version/api/v3/#post-/api/v3/query_influxql" %}} Send a JSON object that contains `db` (database), `q` (query), and `params` (parameter name-value pairs) properties in the request body. diff --git a/content/shared/influxdb3-query-guides/sql/parameterized-queries.md b/content/shared/influxdb3-query-guides/sql/parameterized-queries.md index 7518717bef..4d3de3e2ed 100644 --- a/content/shared/influxdb3-query-guides/sql/parameterized-queries.md +++ b/content/shared/influxdb3-query-guides/sql/parameterized-queries.md @@ -197,7 +197,7 @@ AND room = 'Kitchen' {{% product-name %}} provides the `/api/v3/query_sql` HTTP API endpoint for executing SQL queries with parameters. -{{% api-endpoint method="POST" endpoint="/api/v3/query_sql" api-ref="/influxdb3/version/api/v3/#operation/PostExecuteQuerySQL" %}} +{{% api-endpoint method="POST" endpoint="/api/v3/query_sql" api-ref="/influxdb3/version/api/v3/#post-/api/v3/query_sql" %}} Send a JSON object that contains `db` (database), `q` (query), and `params` (parameter name-value pairs) properties in the request body. diff --git a/content/shared/influxdb3-write-guides/_index.md b/content/shared/influxdb3-write-guides/_index.md index 5d8f0724c2..400c3564a2 100644 --- a/content/shared/influxdb3-write-guides/_index.md +++ b/content/shared/influxdb3-write-guides/_index.md @@ -27,10 +27,10 @@ is the text-based format used to write data to InfluxDB. > and [client libraries](/influxdb3/version/write-data/client-libraries/). > > When bringing existing *v1* write workloads, use the {{% product-name %}} -> HTTP API [`/write` endpoint](/influxdb3/core/api/v3/#operation/PostV1Write). +> HTTP API [`/write` endpoint](/influxdb3/core/api/v3/#post-/write). > > When bringing existing *v2* write workloads, use the {{% product-name %}} -> HTTP API [`/api/v2/write` endpoint](/influxdb3/version/api/v3/#operation/PostV2Write). +> HTTP API [`/api/v2/write` endpoint](/influxdb3/version/api/v3/#post-/api/v2/write). > > **For Telegraf**, use the InfluxDB v1.x [`outputs.influxdb`](/telegraf/v1/output-plugins/influxdb/) or v2.x [`outputs.influxdb_v2`](/telegraf/v1/output-plugins/influxdb_v2/) output plugins. > See how to [use Telegraf to write data](/influxdb3/version/write-data/use-telegraf/). diff --git a/content/shared/influxdb3-write-guides/http-api/_index.md b/content/shared/influxdb3-write-guides/http-api/_index.md index 454185b6b8..bcbc7c49cd 100644 --- a/content/shared/influxdb3-write-guides/http-api/_index.md +++ b/content/shared/influxdb3-write-guides/http-api/_index.md @@ -10,10 +10,10 @@ Different APIs are available depending on your integration method. > and [client libraries](/influxdb3/version/write-data/client-libraries/). > > When bringing existing _v1_ write workloads, use the {{% product-name %}} -> HTTP API [`/write` endpoint](/influxdb3/core/api/v3/#operation/PostV1Write). +> HTTP API [`/write` endpoint](/influxdb3/core/api/v3/#post-/write). > > When bringing existing _v2_ write workloads, use the {{% product-name %}} -> HTTP API [`/api/v2/write` endpoint](/influxdb3/version/api/v3/#operation/PostV2Write). +> HTTP API [`/api/v2/write` endpoint](/influxdb3/version/api/v3/#post-/api/v2/write). > > **For Telegraf**, use the InfluxDB v1.x [`outputs.influxdb`](/telegraf/v1/output-plugins/influxdb/) or v2.x [`outputs.influxdb_v2`](/telegraf/v1/output-plugins/influxdb_v2/) output plugins. > See how to [use Telegraf to write data](/influxdb3/version/write-data/use-telegraf/). diff --git a/content/shared/influxdb3-write-guides/http-api/compatibility-apis.md b/content/shared/influxdb3-write-guides/http-api/compatibility-apis.md index dbd292032e..a6a6978364 100644 --- a/content/shared/influxdb3-write-guides/http-api/compatibility-apis.md +++ b/content/shared/influxdb3-write-guides/http-api/compatibility-apis.md @@ -13,10 +13,10 @@ to write points as line protocol data to {{% product-name %}}. > and [client libraries](/influxdb3/version/write-data/client-libraries/). > > When bringing existing v1 write workloads, use the {{% product-name %}} -> HTTP API [`/write` endpoint](/influxdb3/core/api/v3/#operation/PostV1Write). +> HTTP API [`/write` endpoint](/influxdb3/core/api/v3/#post-/write). > > When bringing existing v2 write workloads, use the {{% product-name %}} -> HTTP API [`/api/v2/write` endpoint](/influxdb3/version/api/v3/#operation/PostV2Write). +> HTTP API [`/api/v2/write` endpoint](/influxdb3/version/api/v3/#post-/api/v2/write). > > **For Telegraf**, use the InfluxDB v1.x [`outputs.influxdb`](/telegraf/v1/output-plugins/influxdb/) or v2.x [`outputs.influxdb_v2`](/telegraf/v1/output-plugins/influxdb_v2/) output plugins. > See how to [use Telegraf to write data](/influxdb3/version/write-data/use-telegraf/). @@ -34,7 +34,7 @@ to write points as line protocol data to {{% product-name %}}. The `/api/v2/write` InfluxDB v2 compatibility endpoint provides backwards compatibility with clients that can write data to InfluxDB OSS v2.x and Cloud 2 (TSM). -{{}} +{{}} ### Authenticate v2 API requests @@ -108,7 +108,7 @@ Use one of the following `precision` values in v2 API `/api/v2/write` requests: The `/write` InfluxDB v1 compatibility endpoint provides backwards compatibility with clients that can write data to InfluxDB v1.x. -{{}} +{{}} ### Authenticate v1 API requests diff --git a/content/shared/v3-core-enterprise-release-notes/_index.md b/content/shared/v3-core-enterprise-release-notes/_index.md index 2530ed0cd1..07503f63c2 100644 --- a/content/shared/v3-core-enterprise-release-notes/_index.md +++ b/content/shared/v3-core-enterprise-release-notes/_index.md @@ -6,27 +6,6 @@ > All updates to Core are automatically included in Enterprise. > The Enterprise sections below only list updates exclusive to Enterprise. -## v3.8.4 {date="2026-03-10"} - -### Core - -No adjustments in this release. -Core remains on v3.8.3. - -### Enterprise - -#### Security - -- **Read and write tokens can no longer delete databases**: Authorization now evaluates both the HTTP method and the request path. Previously, tokens with read or write access to a database could also issue delete requests. - -#### Bug fixes - -- **Stale compactor blocking startup**: Fixed an issue where stopped (stale) compactor entries in the catalog prevented new compactor nodes from starting. Enterprise now only considers currently running compactor nodes for conflict checks. - -- **WAL replay**: Fixed an issue where combined-mode deployments silently ignored the `--wal-replay-concurrency-limit` flag and always used serial replay (concurrency of 1). The flag is now respected. - -- Other bug fixes and performance improvements. - ## v3.8.3 {date="2026-02-24"} ### Core @@ -449,9 +428,9 @@ All Core updates are included in Enterprise. Additional Enterprise-specific feat ## v3.1.0 {date="2025-05-29"} -**Core**: revision `482dd8aac580c04f37e8713a8fffae89ae8bc264` +**Core**: revision 482dd8aac580c04f37e8713a8fffae89ae8bc264 -**Enterprise**: revision `2cb23cf32b67f9f0d0803e31b356813a1a151b00` +**Enterprise**: revision 2cb23cf32b67f9f0d0803e31b356813a1a151b00 ### Core diff --git a/content/telegraf/v1/aggregator-plugins/basicstats/_index.md b/content/telegraf/v1/aggregator-plugins/basicstats/_index.md index 761316c005..da433ed00d 100644 --- a/content/telegraf/v1/aggregator-plugins/basicstats/_index.md +++ b/content/telegraf/v1/aggregator-plugins/basicstats/_index.md @@ -10,7 +10,7 @@ introduced: "v1.5.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/aggregators/basicstats/README.md, Basic Statistics Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/aggregators/basicstats/README.md, Basic Statistics Plugin Source --- # Basic Statistics Aggregator Plugin diff --git a/content/telegraf/v1/aggregator-plugins/derivative/_index.md b/content/telegraf/v1/aggregator-plugins/derivative/_index.md index ac8c7622c9..17ae103695 100644 --- a/content/telegraf/v1/aggregator-plugins/derivative/_index.md +++ b/content/telegraf/v1/aggregator-plugins/derivative/_index.md @@ -10,7 +10,7 @@ introduced: "v1.18.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/aggregators/derivative/README.md, Derivative Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/aggregators/derivative/README.md, Derivative Plugin Source --- # Derivative Aggregator Plugin diff --git a/content/telegraf/v1/aggregator-plugins/final/_index.md b/content/telegraf/v1/aggregator-plugins/final/_index.md index 50c2447772..e0b5b692b9 100644 --- a/content/telegraf/v1/aggregator-plugins/final/_index.md +++ b/content/telegraf/v1/aggregator-plugins/final/_index.md @@ -10,7 +10,7 @@ introduced: "v1.11.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/aggregators/final/README.md, Final Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/aggregators/final/README.md, Final Plugin Source --- # Final Aggregator Plugin diff --git a/content/telegraf/v1/aggregator-plugins/histogram/_index.md b/content/telegraf/v1/aggregator-plugins/histogram/_index.md index 9e679991cf..2506c59217 100644 --- a/content/telegraf/v1/aggregator-plugins/histogram/_index.md +++ b/content/telegraf/v1/aggregator-plugins/histogram/_index.md @@ -10,7 +10,7 @@ introduced: "v1.4.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/aggregators/histogram/README.md, Histogram Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/aggregators/histogram/README.md, Histogram Plugin Source --- # Histogram Aggregator Plugin diff --git a/content/telegraf/v1/aggregator-plugins/merge/_index.md b/content/telegraf/v1/aggregator-plugins/merge/_index.md index 46d645c450..1d9d594c99 100644 --- a/content/telegraf/v1/aggregator-plugins/merge/_index.md +++ b/content/telegraf/v1/aggregator-plugins/merge/_index.md @@ -10,7 +10,7 @@ introduced: "v1.13.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/aggregators/merge/README.md, Merge Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/aggregators/merge/README.md, Merge Plugin Source --- # Merge Aggregator Plugin diff --git a/content/telegraf/v1/aggregator-plugins/minmax/_index.md b/content/telegraf/v1/aggregator-plugins/minmax/_index.md index 70c4bfd1bc..da6a71a6f6 100644 --- a/content/telegraf/v1/aggregator-plugins/minmax/_index.md +++ b/content/telegraf/v1/aggregator-plugins/minmax/_index.md @@ -10,7 +10,7 @@ introduced: "v1.1.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/aggregators/minmax/README.md, Minimum-Maximum Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/aggregators/minmax/README.md, Minimum-Maximum Plugin Source --- # Minimum-Maximum Aggregator Plugin diff --git a/content/telegraf/v1/aggregator-plugins/quantile/_index.md b/content/telegraf/v1/aggregator-plugins/quantile/_index.md index b46d45cbb4..9b9776f5c8 100644 --- a/content/telegraf/v1/aggregator-plugins/quantile/_index.md +++ b/content/telegraf/v1/aggregator-plugins/quantile/_index.md @@ -10,7 +10,7 @@ introduced: "v1.18.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/aggregators/quantile/README.md, Quantile Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/aggregators/quantile/README.md, Quantile Plugin Source --- # Quantile Aggregator Plugin diff --git a/content/telegraf/v1/aggregator-plugins/starlark/_index.md b/content/telegraf/v1/aggregator-plugins/starlark/_index.md index 98a47d6833..ab6eaf7d8b 100644 --- a/content/telegraf/v1/aggregator-plugins/starlark/_index.md +++ b/content/telegraf/v1/aggregator-plugins/starlark/_index.md @@ -10,7 +10,7 @@ introduced: "v1.21.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/aggregators/starlark/README.md, Starlark Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/aggregators/starlark/README.md, Starlark Plugin Source --- # Starlark Aggregator Plugin diff --git a/content/telegraf/v1/aggregator-plugins/valuecounter/_index.md b/content/telegraf/v1/aggregator-plugins/valuecounter/_index.md index 9877f44bb5..1218ca7d1b 100644 --- a/content/telegraf/v1/aggregator-plugins/valuecounter/_index.md +++ b/content/telegraf/v1/aggregator-plugins/valuecounter/_index.md @@ -10,7 +10,7 @@ introduced: "v1.8.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/aggregators/valuecounter/README.md, Value Counter Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/aggregators/valuecounter/README.md, Value Counter Plugin Source --- # Value Counter Aggregator Plugin diff --git a/content/telegraf/v1/input-plugins/activemq/_index.md b/content/telegraf/v1/input-plugins/activemq/_index.md index eb3db060eb..3498a4d237 100644 --- a/content/telegraf/v1/input-plugins/activemq/_index.md +++ b/content/telegraf/v1/input-plugins/activemq/_index.md @@ -10,7 +10,7 @@ introduced: "v1.8.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/activemq/README.md, ActiveMQ Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/activemq/README.md, ActiveMQ Plugin Source --- # ActiveMQ Input Plugin diff --git a/content/telegraf/v1/input-plugins/aerospike/_index.md b/content/telegraf/v1/input-plugins/aerospike/_index.md index 377fdcacb3..da98a802fd 100644 --- a/content/telegraf/v1/input-plugins/aerospike/_index.md +++ b/content/telegraf/v1/input-plugins/aerospike/_index.md @@ -12,7 +12,7 @@ removal: v1.40.0 os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/aerospike/README.md, Aerospike Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/aerospike/README.md, Aerospike Plugin Source --- # Aerospike Input Plugin diff --git a/content/telegraf/v1/input-plugins/aliyuncms/_index.md b/content/telegraf/v1/input-plugins/aliyuncms/_index.md index 7d14303710..d948a4a706 100644 --- a/content/telegraf/v1/input-plugins/aliyuncms/_index.md +++ b/content/telegraf/v1/input-plugins/aliyuncms/_index.md @@ -10,7 +10,7 @@ introduced: "v1.19.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/aliyuncms/README.md, Alibaba Cloud Monitor Service (Aliyun) Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/aliyuncms/README.md, Alibaba Cloud Monitor Service (Aliyun) Plugin Source --- # Alibaba Cloud Monitor Service (Aliyun) Input Plugin diff --git a/content/telegraf/v1/input-plugins/amd_rocm_smi/_index.md b/content/telegraf/v1/input-plugins/amd_rocm_smi/_index.md index fa47fb0ff7..c875d9e6f6 100644 --- a/content/telegraf/v1/input-plugins/amd_rocm_smi/_index.md +++ b/content/telegraf/v1/input-plugins/amd_rocm_smi/_index.md @@ -10,7 +10,7 @@ introduced: "v1.20.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/amd_rocm_smi/README.md, AMD ROCm System Management Interface (SMI) Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/amd_rocm_smi/README.md, AMD ROCm System Management Interface (SMI) Plugin Source --- # AMD ROCm System Management Interface (SMI) Input Plugin diff --git a/content/telegraf/v1/input-plugins/amqp_consumer/_index.md b/content/telegraf/v1/input-plugins/amqp_consumer/_index.md index cf55e8315f..1a049529a1 100644 --- a/content/telegraf/v1/input-plugins/amqp_consumer/_index.md +++ b/content/telegraf/v1/input-plugins/amqp_consumer/_index.md @@ -10,7 +10,7 @@ introduced: "v1.3.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/amqp_consumer/README.md, AMQP Consumer Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/amqp_consumer/README.md, AMQP Consumer Plugin Source --- # AMQP Consumer Input Plugin diff --git a/content/telegraf/v1/input-plugins/apache/_index.md b/content/telegraf/v1/input-plugins/apache/_index.md index 5352b30b87..16f9df9b2d 100644 --- a/content/telegraf/v1/input-plugins/apache/_index.md +++ b/content/telegraf/v1/input-plugins/apache/_index.md @@ -10,7 +10,7 @@ introduced: "v1.8.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/apache/README.md, Apache Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/apache/README.md, Apache Plugin Source --- # Apache Input Plugin diff --git a/content/telegraf/v1/input-plugins/apcupsd/_index.md b/content/telegraf/v1/input-plugins/apcupsd/_index.md index 1f081d1938..49eabfd48a 100644 --- a/content/telegraf/v1/input-plugins/apcupsd/_index.md +++ b/content/telegraf/v1/input-plugins/apcupsd/_index.md @@ -10,7 +10,7 @@ introduced: "v1.12.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/apcupsd/README.md, APC UPSD Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/apcupsd/README.md, APC UPSD Plugin Source --- # APC UPSD Input Plugin diff --git a/content/telegraf/v1/input-plugins/aurora/_index.md b/content/telegraf/v1/input-plugins/aurora/_index.md index d56c935f82..2aa80d69db 100644 --- a/content/telegraf/v1/input-plugins/aurora/_index.md +++ b/content/telegraf/v1/input-plugins/aurora/_index.md @@ -10,7 +10,7 @@ introduced: "v1.7.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/aurora/README.md, Apache Aurora Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/aurora/README.md, Apache Aurora Plugin Source --- # Apache Aurora Input Plugin diff --git a/content/telegraf/v1/input-plugins/azure_monitor/_index.md b/content/telegraf/v1/input-plugins/azure_monitor/_index.md index 47a1bbbfed..525b66cdbd 100644 --- a/content/telegraf/v1/input-plugins/azure_monitor/_index.md +++ b/content/telegraf/v1/input-plugins/azure_monitor/_index.md @@ -10,7 +10,7 @@ introduced: "v1.25.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/azure_monitor/README.md, Azure Monitor Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/azure_monitor/README.md, Azure Monitor Plugin Source --- # Azure Monitor Input Plugin diff --git a/content/telegraf/v1/input-plugins/azure_storage_queue/_index.md b/content/telegraf/v1/input-plugins/azure_storage_queue/_index.md index 0fdf846ca8..2da4ddb3ce 100644 --- a/content/telegraf/v1/input-plugins/azure_storage_queue/_index.md +++ b/content/telegraf/v1/input-plugins/azure_storage_queue/_index.md @@ -10,7 +10,7 @@ introduced: "v1.13.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/azure_storage_queue/README.md, Azure Queue Storage Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/azure_storage_queue/README.md, Azure Queue Storage Plugin Source --- # Azure Queue Storage Input Plugin diff --git a/content/telegraf/v1/input-plugins/bcache/_index.md b/content/telegraf/v1/input-plugins/bcache/_index.md index 04d998e4e0..ae2006ed74 100644 --- a/content/telegraf/v1/input-plugins/bcache/_index.md +++ b/content/telegraf/v1/input-plugins/bcache/_index.md @@ -10,7 +10,7 @@ introduced: "v0.2.0" os_support: "linux" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/bcache/README.md, Bcache Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/bcache/README.md, Bcache Plugin Source --- # Bcache Input Plugin diff --git a/content/telegraf/v1/input-plugins/beanstalkd/_index.md b/content/telegraf/v1/input-plugins/beanstalkd/_index.md index 66aa0f22a3..adbcf02017 100644 --- a/content/telegraf/v1/input-plugins/beanstalkd/_index.md +++ b/content/telegraf/v1/input-plugins/beanstalkd/_index.md @@ -10,7 +10,7 @@ introduced: "v1.8.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/beanstalkd/README.md, Beanstalkd Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/beanstalkd/README.md, Beanstalkd Plugin Source --- # Beanstalkd Input Plugin diff --git a/content/telegraf/v1/input-plugins/beat/_index.md b/content/telegraf/v1/input-plugins/beat/_index.md index 98d42d9ca6..54594f5214 100644 --- a/content/telegraf/v1/input-plugins/beat/_index.md +++ b/content/telegraf/v1/input-plugins/beat/_index.md @@ -10,7 +10,7 @@ introduced: "v1.18.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/beat/README.md, Beat Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/beat/README.md, Beat Plugin Source --- # Beat Input Plugin diff --git a/content/telegraf/v1/input-plugins/bind/_index.md b/content/telegraf/v1/input-plugins/bind/_index.md index 8bb23bbd69..5a7e2ec2f1 100644 --- a/content/telegraf/v1/input-plugins/bind/_index.md +++ b/content/telegraf/v1/input-plugins/bind/_index.md @@ -10,7 +10,7 @@ introduced: "v1.11.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/bind/README.md, BIND 9 Nameserver Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/bind/README.md, BIND 9 Nameserver Plugin Source --- # BIND 9 Nameserver Input Plugin diff --git a/content/telegraf/v1/input-plugins/bond/_index.md b/content/telegraf/v1/input-plugins/bond/_index.md index f4af68588b..e97304b7a6 100644 --- a/content/telegraf/v1/input-plugins/bond/_index.md +++ b/content/telegraf/v1/input-plugins/bond/_index.md @@ -10,7 +10,7 @@ introduced: "v1.5.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/bond/README.md, Bond Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/bond/README.md, Bond Plugin Source --- # Bond Input Plugin diff --git a/content/telegraf/v1/input-plugins/burrow/_index.md b/content/telegraf/v1/input-plugins/burrow/_index.md index 8cdf5715a4..79c6fa9cdb 100644 --- a/content/telegraf/v1/input-plugins/burrow/_index.md +++ b/content/telegraf/v1/input-plugins/burrow/_index.md @@ -10,7 +10,7 @@ introduced: "v1.7.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/burrow/README.md, Burrow Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/burrow/README.md, Burrow Plugin Source --- # Burrow Input Plugin diff --git a/content/telegraf/v1/input-plugins/ceph/_index.md b/content/telegraf/v1/input-plugins/ceph/_index.md index 4e2457853f..7b0a4054f1 100644 --- a/content/telegraf/v1/input-plugins/ceph/_index.md +++ b/content/telegraf/v1/input-plugins/ceph/_index.md @@ -10,7 +10,7 @@ introduced: "v0.13.1" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/ceph/README.md, Ceph Storage Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/ceph/README.md, Ceph Storage Plugin Source --- # Ceph Storage Input Plugin diff --git a/content/telegraf/v1/input-plugins/cgroup/_index.md b/content/telegraf/v1/input-plugins/cgroup/_index.md index e18fb7861a..cedbcf5191 100644 --- a/content/telegraf/v1/input-plugins/cgroup/_index.md +++ b/content/telegraf/v1/input-plugins/cgroup/_index.md @@ -10,7 +10,7 @@ introduced: "v1.0.0" os_support: "linux" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/cgroup/README.md, Control Group Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/cgroup/README.md, Control Group Plugin Source --- # Control Group Input Plugin diff --git a/content/telegraf/v1/input-plugins/chrony/_index.md b/content/telegraf/v1/input-plugins/chrony/_index.md index dde05fefea..47e700b850 100644 --- a/content/telegraf/v1/input-plugins/chrony/_index.md +++ b/content/telegraf/v1/input-plugins/chrony/_index.md @@ -10,7 +10,7 @@ introduced: "v0.13.1" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/chrony/README.md, chrony Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/chrony/README.md, chrony Plugin Source --- # chrony Input Plugin diff --git a/content/telegraf/v1/input-plugins/cisco_telemetry_mdt/_index.md b/content/telegraf/v1/input-plugins/cisco_telemetry_mdt/_index.md index dfde415719..9428d08979 100644 --- a/content/telegraf/v1/input-plugins/cisco_telemetry_mdt/_index.md +++ b/content/telegraf/v1/input-plugins/cisco_telemetry_mdt/_index.md @@ -10,7 +10,7 @@ introduced: "v1.11.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/cisco_telemetry_mdt/README.md, Cisco Model-Driven Telemetry (MDT) Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/cisco_telemetry_mdt/README.md, Cisco Model-Driven Telemetry (MDT) Plugin Source --- # Cisco Model-Driven Telemetry (MDT) Input Plugin diff --git a/content/telegraf/v1/input-plugins/clickhouse/_index.md b/content/telegraf/v1/input-plugins/clickhouse/_index.md index 720a87aecf..78db381bb4 100644 --- a/content/telegraf/v1/input-plugins/clickhouse/_index.md +++ b/content/telegraf/v1/input-plugins/clickhouse/_index.md @@ -10,7 +10,7 @@ introduced: "v1.14.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/clickhouse/README.md, ClickHouse Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/clickhouse/README.md, ClickHouse Plugin Source --- # ClickHouse Input Plugin diff --git a/content/telegraf/v1/input-plugins/cloud_pubsub/_index.md b/content/telegraf/v1/input-plugins/cloud_pubsub/_index.md index 85b9f8d224..acc51346a8 100644 --- a/content/telegraf/v1/input-plugins/cloud_pubsub/_index.md +++ b/content/telegraf/v1/input-plugins/cloud_pubsub/_index.md @@ -10,7 +10,7 @@ introduced: "v1.10.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/cloud_pubsub/README.md, Google Cloud PubSub Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/cloud_pubsub/README.md, Google Cloud PubSub Plugin Source --- # Google Cloud PubSub Input Plugin diff --git a/content/telegraf/v1/input-plugins/cloud_pubsub_push/_index.md b/content/telegraf/v1/input-plugins/cloud_pubsub_push/_index.md index cb8c8fdbec..29da06e3c6 100644 --- a/content/telegraf/v1/input-plugins/cloud_pubsub_push/_index.md +++ b/content/telegraf/v1/input-plugins/cloud_pubsub_push/_index.md @@ -10,7 +10,7 @@ introduced: "v1.10.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/cloud_pubsub_push/README.md, Google Cloud PubSub Push Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/cloud_pubsub_push/README.md, Google Cloud PubSub Push Plugin Source --- # Google Cloud PubSub Push Input Plugin diff --git a/content/telegraf/v1/input-plugins/cloudwatch/_index.md b/content/telegraf/v1/input-plugins/cloudwatch/_index.md index dbb1717367..47d54ab4eb 100644 --- a/content/telegraf/v1/input-plugins/cloudwatch/_index.md +++ b/content/telegraf/v1/input-plugins/cloudwatch/_index.md @@ -10,7 +10,7 @@ introduced: "v0.12.1" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/cloudwatch/README.md, Amazon CloudWatch Statistics Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/cloudwatch/README.md, Amazon CloudWatch Statistics Plugin Source --- # Amazon CloudWatch Statistics Input Plugin diff --git a/content/telegraf/v1/input-plugins/cloudwatch_metric_streams/_index.md b/content/telegraf/v1/input-plugins/cloudwatch_metric_streams/_index.md index 3bea448608..975e1d740b 100644 --- a/content/telegraf/v1/input-plugins/cloudwatch_metric_streams/_index.md +++ b/content/telegraf/v1/input-plugins/cloudwatch_metric_streams/_index.md @@ -10,7 +10,7 @@ introduced: "v1.24.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/cloudwatch_metric_streams/README.md, Amazon CloudWatch Metric Streams Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/cloudwatch_metric_streams/README.md, Amazon CloudWatch Metric Streams Plugin Source --- # Amazon CloudWatch Metric Streams Input Plugin diff --git a/content/telegraf/v1/input-plugins/conntrack/_index.md b/content/telegraf/v1/input-plugins/conntrack/_index.md index b2c9190633..fbf5ab027e 100644 --- a/content/telegraf/v1/input-plugins/conntrack/_index.md +++ b/content/telegraf/v1/input-plugins/conntrack/_index.md @@ -10,7 +10,7 @@ introduced: "v1.0.0" os_support: "linux" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/conntrack/README.md, Netfilter Conntrack Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/conntrack/README.md, Netfilter Conntrack Plugin Source --- # Netfilter Conntrack Input Plugin diff --git a/content/telegraf/v1/input-plugins/consul/_index.md b/content/telegraf/v1/input-plugins/consul/_index.md index c96d4a6a3e..ab45b41fe2 100644 --- a/content/telegraf/v1/input-plugins/consul/_index.md +++ b/content/telegraf/v1/input-plugins/consul/_index.md @@ -10,7 +10,7 @@ introduced: "v1.0.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/consul/README.md, Hashicorp Consul Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/consul/README.md, Hashicorp Consul Plugin Source --- # Hashicorp Consul Input Plugin diff --git a/content/telegraf/v1/input-plugins/consul_agent/_index.md b/content/telegraf/v1/input-plugins/consul_agent/_index.md index 916e398181..1364562cf4 100644 --- a/content/telegraf/v1/input-plugins/consul_agent/_index.md +++ b/content/telegraf/v1/input-plugins/consul_agent/_index.md @@ -10,7 +10,7 @@ introduced: "v1.22.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/consul_agent/README.md, Hashicorp Consul Agent Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/consul_agent/README.md, Hashicorp Consul Agent Plugin Source --- # Hashicorp Consul Agent Input Plugin diff --git a/content/telegraf/v1/input-plugins/couchbase/_index.md b/content/telegraf/v1/input-plugins/couchbase/_index.md index b6cccd27eb..6ae00773c5 100644 --- a/content/telegraf/v1/input-plugins/couchbase/_index.md +++ b/content/telegraf/v1/input-plugins/couchbase/_index.md @@ -10,7 +10,7 @@ introduced: "v0.12.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/couchbase/README.md, Couchbase Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/couchbase/README.md, Couchbase Plugin Source --- # Couchbase Input Plugin diff --git a/content/telegraf/v1/input-plugins/couchdb/_index.md b/content/telegraf/v1/input-plugins/couchdb/_index.md index d5fe0e4b15..5a84c73b83 100644 --- a/content/telegraf/v1/input-plugins/couchdb/_index.md +++ b/content/telegraf/v1/input-plugins/couchdb/_index.md @@ -10,7 +10,7 @@ introduced: "v0.10.3" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/couchdb/README.md, Apache CouchDB Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/couchdb/README.md, Apache CouchDB Plugin Source --- # Apache CouchDB Input Plugin diff --git a/content/telegraf/v1/input-plugins/cpu/_index.md b/content/telegraf/v1/input-plugins/cpu/_index.md index 9cfd0a595c..290d534302 100644 --- a/content/telegraf/v1/input-plugins/cpu/_index.md +++ b/content/telegraf/v1/input-plugins/cpu/_index.md @@ -10,7 +10,7 @@ introduced: "v0.1.5" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/cpu/README.md, CPU Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/cpu/README.md, CPU Plugin Source --- # CPU Input Plugin diff --git a/content/telegraf/v1/input-plugins/csgo/_index.md b/content/telegraf/v1/input-plugins/csgo/_index.md index 4adffe244e..4670882d3f 100644 --- a/content/telegraf/v1/input-plugins/csgo/_index.md +++ b/content/telegraf/v1/input-plugins/csgo/_index.md @@ -10,7 +10,7 @@ introduced: "v1.18.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/csgo/README.md, Counter-Strike Global Offensive (CSGO) Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/csgo/README.md, Counter-Strike Global Offensive (CSGO) Plugin Source --- # Counter-Strike: Global Offensive (CSGO) Input Plugin diff --git a/content/telegraf/v1/input-plugins/ctrlx_datalayer/_index.md b/content/telegraf/v1/input-plugins/ctrlx_datalayer/_index.md index 8cf6feff3f..af43240e55 100644 --- a/content/telegraf/v1/input-plugins/ctrlx_datalayer/_index.md +++ b/content/telegraf/v1/input-plugins/ctrlx_datalayer/_index.md @@ -10,7 +10,7 @@ introduced: "v1.27.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/ctrlx_datalayer/README.md, Bosch Rexroth ctrlX Data Layer Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/ctrlx_datalayer/README.md, Bosch Rexroth ctrlX Data Layer Plugin Source --- # Bosch Rexroth ctrlX Data Layer Input Plugin diff --git a/content/telegraf/v1/input-plugins/dcos/_index.md b/content/telegraf/v1/input-plugins/dcos/_index.md index 6340b5d0d2..38f7dc0003 100644 --- a/content/telegraf/v1/input-plugins/dcos/_index.md +++ b/content/telegraf/v1/input-plugins/dcos/_index.md @@ -10,7 +10,7 @@ introduced: "v1.5.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/dcos/README.md, Mesosphere Distributed Cloud OS Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/dcos/README.md, Mesosphere Distributed Cloud OS Plugin Source --- # Mesosphere Distributed Cloud OS Input Plugin diff --git a/content/telegraf/v1/input-plugins/directory_monitor/_index.md b/content/telegraf/v1/input-plugins/directory_monitor/_index.md index 4a131d4a3e..83b1b80733 100644 --- a/content/telegraf/v1/input-plugins/directory_monitor/_index.md +++ b/content/telegraf/v1/input-plugins/directory_monitor/_index.md @@ -10,7 +10,7 @@ introduced: "v1.18.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/directory_monitor/README.md, Directory Monitor Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/directory_monitor/README.md, Directory Monitor Plugin Source --- # Directory Monitor Input Plugin diff --git a/content/telegraf/v1/input-plugins/disk/_index.md b/content/telegraf/v1/input-plugins/disk/_index.md index 2f61daf2ff..49e88c18d8 100644 --- a/content/telegraf/v1/input-plugins/disk/_index.md +++ b/content/telegraf/v1/input-plugins/disk/_index.md @@ -10,7 +10,7 @@ introduced: "v0.1.1" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/disk/README.md, Disk Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/disk/README.md, Disk Plugin Source --- # Disk Input Plugin diff --git a/content/telegraf/v1/input-plugins/diskio/_index.md b/content/telegraf/v1/input-plugins/diskio/_index.md index 7a3ba1888e..b6e8e2da1a 100644 --- a/content/telegraf/v1/input-plugins/diskio/_index.md +++ b/content/telegraf/v1/input-plugins/diskio/_index.md @@ -10,7 +10,7 @@ introduced: "v0.10.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/diskio/README.md, DiskIO Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/diskio/README.md, DiskIO Plugin Source --- # DiskIO Input Plugin diff --git a/content/telegraf/v1/input-plugins/disque/_index.md b/content/telegraf/v1/input-plugins/disque/_index.md index f42f629372..6c9538d156 100644 --- a/content/telegraf/v1/input-plugins/disque/_index.md +++ b/content/telegraf/v1/input-plugins/disque/_index.md @@ -10,7 +10,7 @@ introduced: "v0.10.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/disque/README.md, Disque Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/disque/README.md, Disque Plugin Source --- # Disque Input Plugin diff --git a/content/telegraf/v1/input-plugins/dmcache/_index.md b/content/telegraf/v1/input-plugins/dmcache/_index.md index 3768a54cf5..a845854408 100644 --- a/content/telegraf/v1/input-plugins/dmcache/_index.md +++ b/content/telegraf/v1/input-plugins/dmcache/_index.md @@ -10,7 +10,7 @@ introduced: "v1.3.0" os_support: "linux" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/dmcache/README.md, Device Mapper Cache Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/dmcache/README.md, Device Mapper Cache Plugin Source --- # Device Mapper Cache Input Plugin diff --git a/content/telegraf/v1/input-plugins/dns_query/_index.md b/content/telegraf/v1/input-plugins/dns_query/_index.md index dba08d1a24..06d9603531 100644 --- a/content/telegraf/v1/input-plugins/dns_query/_index.md +++ b/content/telegraf/v1/input-plugins/dns_query/_index.md @@ -10,7 +10,7 @@ introduced: "v1.4.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/dns_query/README.md, DNS Query Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/dns_query/README.md, DNS Query Plugin Source --- # DNS Query Input Plugin diff --git a/content/telegraf/v1/input-plugins/docker/_index.md b/content/telegraf/v1/input-plugins/docker/_index.md index 9493c7e137..2d5154f6c5 100644 --- a/content/telegraf/v1/input-plugins/docker/_index.md +++ b/content/telegraf/v1/input-plugins/docker/_index.md @@ -10,7 +10,7 @@ introduced: "v0.1.9" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/docker/README.md, Docker Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/docker/README.md, Docker Plugin Source --- # Docker Input Plugin diff --git a/content/telegraf/v1/input-plugins/docker_log/_index.md b/content/telegraf/v1/input-plugins/docker_log/_index.md index 155cddee71..16f41026a1 100644 --- a/content/telegraf/v1/input-plugins/docker_log/_index.md +++ b/content/telegraf/v1/input-plugins/docker_log/_index.md @@ -10,7 +10,7 @@ introduced: "v1.12.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/docker_log/README.md, Docker Log Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/docker_log/README.md, Docker Log Plugin Source --- # Docker Log Input Plugin diff --git a/content/telegraf/v1/input-plugins/dovecot/_index.md b/content/telegraf/v1/input-plugins/dovecot/_index.md index 595f35fb38..cf90b6be9d 100644 --- a/content/telegraf/v1/input-plugins/dovecot/_index.md +++ b/content/telegraf/v1/input-plugins/dovecot/_index.md @@ -10,7 +10,7 @@ introduced: "v0.10.3" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/dovecot/README.md, Dovecot Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/dovecot/README.md, Dovecot Plugin Source --- # Dovecot Input Plugin diff --git a/content/telegraf/v1/input-plugins/dpdk/_index.md b/content/telegraf/v1/input-plugins/dpdk/_index.md index f23988ea45..8eb6440e14 100644 --- a/content/telegraf/v1/input-plugins/dpdk/_index.md +++ b/content/telegraf/v1/input-plugins/dpdk/_index.md @@ -10,7 +10,7 @@ introduced: "v1.19.0" os_support: "linux" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/dpdk/README.md, Data Plane Development Kit (DPDK) Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/dpdk/README.md, Data Plane Development Kit (DPDK) Plugin Source --- # Data Plane Development Kit (DPDK) Input Plugin diff --git a/content/telegraf/v1/input-plugins/ecs/_index.md b/content/telegraf/v1/input-plugins/ecs/_index.md index b209580f72..750223b42d 100644 --- a/content/telegraf/v1/input-plugins/ecs/_index.md +++ b/content/telegraf/v1/input-plugins/ecs/_index.md @@ -10,7 +10,7 @@ introduced: "v1.11.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/ecs/README.md, Amazon Elastic Container Service Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/ecs/README.md, Amazon Elastic Container Service Plugin Source --- # Amazon Elastic Container Service Input Plugin diff --git a/content/telegraf/v1/input-plugins/elasticsearch/_index.md b/content/telegraf/v1/input-plugins/elasticsearch/_index.md index 9720200e4d..43f0fc08b3 100644 --- a/content/telegraf/v1/input-plugins/elasticsearch/_index.md +++ b/content/telegraf/v1/input-plugins/elasticsearch/_index.md @@ -10,7 +10,7 @@ introduced: "v0.1.5" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/elasticsearch/README.md, Elasticsearch Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/elasticsearch/README.md, Elasticsearch Plugin Source --- # Elasticsearch Input Plugin diff --git a/content/telegraf/v1/input-plugins/elasticsearch_query/_index.md b/content/telegraf/v1/input-plugins/elasticsearch_query/_index.md index 5a2958dbfc..aec2c44252 100644 --- a/content/telegraf/v1/input-plugins/elasticsearch_query/_index.md +++ b/content/telegraf/v1/input-plugins/elasticsearch_query/_index.md @@ -10,7 +10,7 @@ introduced: "v1.20.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/elasticsearch_query/README.md, Elasticsearch Query Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/elasticsearch_query/README.md, Elasticsearch Query Plugin Source --- # Elasticsearch Query Input Plugin diff --git a/content/telegraf/v1/input-plugins/ethtool/_index.md b/content/telegraf/v1/input-plugins/ethtool/_index.md index 876c5b753a..39c46f5035 100644 --- a/content/telegraf/v1/input-plugins/ethtool/_index.md +++ b/content/telegraf/v1/input-plugins/ethtool/_index.md @@ -10,7 +10,7 @@ introduced: "v1.13.0" os_support: "linux" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/ethtool/README.md, Ethtool Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/ethtool/README.md, Ethtool Plugin Source --- # Ethtool Input Plugin diff --git a/content/telegraf/v1/input-plugins/eventhub_consumer/_index.md b/content/telegraf/v1/input-plugins/eventhub_consumer/_index.md index 95e073e6f7..249402bed8 100644 --- a/content/telegraf/v1/input-plugins/eventhub_consumer/_index.md +++ b/content/telegraf/v1/input-plugins/eventhub_consumer/_index.md @@ -10,7 +10,7 @@ introduced: "v1.14.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/eventhub_consumer/README.md, Azure Event Hub Consumer Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/eventhub_consumer/README.md, Azure Event Hub Consumer Plugin Source --- # Azure Event Hub Consumer Input Plugin diff --git a/content/telegraf/v1/input-plugins/exec/_index.md b/content/telegraf/v1/input-plugins/exec/_index.md index c542d6d0b6..b584b9034f 100644 --- a/content/telegraf/v1/input-plugins/exec/_index.md +++ b/content/telegraf/v1/input-plugins/exec/_index.md @@ -10,7 +10,7 @@ introduced: "v0.1.5" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/exec/README.md, Exec Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/exec/README.md, Exec Plugin Source --- # Exec Input Plugin diff --git a/content/telegraf/v1/input-plugins/execd/_index.md b/content/telegraf/v1/input-plugins/execd/_index.md index 3202d578c0..2b24f995d9 100644 --- a/content/telegraf/v1/input-plugins/execd/_index.md +++ b/content/telegraf/v1/input-plugins/execd/_index.md @@ -10,7 +10,7 @@ introduced: "v1.14.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/execd/README.md, Execd Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/execd/README.md, Execd Plugin Source --- # Execd Input Plugin diff --git a/content/telegraf/v1/input-plugins/fail2ban/_index.md b/content/telegraf/v1/input-plugins/fail2ban/_index.md index 7d355b0385..b7fa982410 100644 --- a/content/telegraf/v1/input-plugins/fail2ban/_index.md +++ b/content/telegraf/v1/input-plugins/fail2ban/_index.md @@ -10,7 +10,7 @@ introduced: "v1.4.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/fail2ban/README.md, Fail2ban Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/fail2ban/README.md, Fail2ban Plugin Source --- # Fail2ban Input Plugin diff --git a/content/telegraf/v1/input-plugins/fibaro/_index.md b/content/telegraf/v1/input-plugins/fibaro/_index.md index 7e48c38010..98f35fbff8 100644 --- a/content/telegraf/v1/input-plugins/fibaro/_index.md +++ b/content/telegraf/v1/input-plugins/fibaro/_index.md @@ -10,7 +10,7 @@ introduced: "v1.7.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/fibaro/README.md, Fibaro Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/fibaro/README.md, Fibaro Plugin Source --- # Fibaro Input Plugin diff --git a/content/telegraf/v1/input-plugins/file/_index.md b/content/telegraf/v1/input-plugins/file/_index.md index 570bf0c16a..1900fe1508 100644 --- a/content/telegraf/v1/input-plugins/file/_index.md +++ b/content/telegraf/v1/input-plugins/file/_index.md @@ -10,7 +10,7 @@ introduced: "v1.8.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/file/README.md, File Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/file/README.md, File Plugin Source --- # File Input Plugin diff --git a/content/telegraf/v1/input-plugins/filecount/_index.md b/content/telegraf/v1/input-plugins/filecount/_index.md index e3ab21c982..d383604230 100644 --- a/content/telegraf/v1/input-plugins/filecount/_index.md +++ b/content/telegraf/v1/input-plugins/filecount/_index.md @@ -10,7 +10,7 @@ introduced: "v1.8.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/filecount/README.md, Filecount Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/filecount/README.md, Filecount Plugin Source --- # Filecount Input Plugin diff --git a/content/telegraf/v1/input-plugins/filestat/_index.md b/content/telegraf/v1/input-plugins/filestat/_index.md index 445959fd45..53e55d55f5 100644 --- a/content/telegraf/v1/input-plugins/filestat/_index.md +++ b/content/telegraf/v1/input-plugins/filestat/_index.md @@ -10,7 +10,7 @@ introduced: "v0.13.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/filestat/README.md, File statistics Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/filestat/README.md, File statistics Plugin Source --- # File statistics Input Plugin diff --git a/content/telegraf/v1/input-plugins/fireboard/_index.md b/content/telegraf/v1/input-plugins/fireboard/_index.md index 324508952a..ae0f0b3d48 100644 --- a/content/telegraf/v1/input-plugins/fireboard/_index.md +++ b/content/telegraf/v1/input-plugins/fireboard/_index.md @@ -10,7 +10,7 @@ introduced: "v1.12.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/fireboard/README.md, Fireboard Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/fireboard/README.md, Fireboard Plugin Source --- # Fireboard Input Plugin diff --git a/content/telegraf/v1/input-plugins/firehose/_index.md b/content/telegraf/v1/input-plugins/firehose/_index.md index b7c7a20c4a..b8551bdc87 100644 --- a/content/telegraf/v1/input-plugins/firehose/_index.md +++ b/content/telegraf/v1/input-plugins/firehose/_index.md @@ -10,7 +10,7 @@ introduced: "v1.34.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/firehose/README.md, AWS Data Firehose Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/firehose/README.md, AWS Data Firehose Plugin Source --- # AWS Data Firehose Input Plugin diff --git a/content/telegraf/v1/input-plugins/fluentd/_index.md b/content/telegraf/v1/input-plugins/fluentd/_index.md index 4d8b89e73c..1a27d3b32c 100644 --- a/content/telegraf/v1/input-plugins/fluentd/_index.md +++ b/content/telegraf/v1/input-plugins/fluentd/_index.md @@ -10,7 +10,7 @@ introduced: "v1.4.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/fluentd/README.md, Fluentd Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/fluentd/README.md, Fluentd Plugin Source --- # Fluentd Input Plugin diff --git a/content/telegraf/v1/input-plugins/fritzbox/_index.md b/content/telegraf/v1/input-plugins/fritzbox/_index.md index 0c55e106dd..710ad6e380 100644 --- a/content/telegraf/v1/input-plugins/fritzbox/_index.md +++ b/content/telegraf/v1/input-plugins/fritzbox/_index.md @@ -10,7 +10,7 @@ introduced: "v1.35.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/fritzbox/README.md, Fritzbox Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/fritzbox/README.md, Fritzbox Plugin Source --- # Fritzbox Input Plugin diff --git a/content/telegraf/v1/input-plugins/github/_index.md b/content/telegraf/v1/input-plugins/github/_index.md index 6f2e64af10..68a54e067b 100644 --- a/content/telegraf/v1/input-plugins/github/_index.md +++ b/content/telegraf/v1/input-plugins/github/_index.md @@ -10,7 +10,7 @@ introduced: "v1.11.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/github/README.md, GitHub Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/github/README.md, GitHub Plugin Source --- # GitHub Input Plugin diff --git a/content/telegraf/v1/input-plugins/gnmi/_index.md b/content/telegraf/v1/input-plugins/gnmi/_index.md index ff455b9d8f..f1ba5f1244 100644 --- a/content/telegraf/v1/input-plugins/gnmi/_index.md +++ b/content/telegraf/v1/input-plugins/gnmi/_index.md @@ -10,7 +10,7 @@ introduced: "v1.15.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/gnmi/README.md, gNMI (gRPC Network Management Interface) Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/gnmi/README.md, gNMI (gRPC Network Management Interface) Plugin Source --- # gNMI (gRPC Network Management Interface) Input Plugin @@ -102,9 +102,6 @@ details on how to use them. ## Only receive updates for the state, also suppresses receiving the initial state # updates_only = false - ## Emit a metric for "delete" messages - # emit_delete_metrics = false - ## Enforces the namespace of the first element as origin for aliases and ## response paths, required for backward compatibility. ## NOTE: Set to 'false' if possible but be aware that this might change the path tag! diff --git a/content/telegraf/v1/input-plugins/google_cloud_storage/_index.md b/content/telegraf/v1/input-plugins/google_cloud_storage/_index.md index 83c26d2fb5..cabb075374 100644 --- a/content/telegraf/v1/input-plugins/google_cloud_storage/_index.md +++ b/content/telegraf/v1/input-plugins/google_cloud_storage/_index.md @@ -10,7 +10,7 @@ introduced: "v1.25.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/google_cloud_storage/README.md, Google Cloud Storage Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/google_cloud_storage/README.md, Google Cloud Storage Plugin Source --- # Google Cloud Storage Input Plugin diff --git a/content/telegraf/v1/input-plugins/graylog/_index.md b/content/telegraf/v1/input-plugins/graylog/_index.md index 2ce54eb3f4..c5fb9e72b3 100644 --- a/content/telegraf/v1/input-plugins/graylog/_index.md +++ b/content/telegraf/v1/input-plugins/graylog/_index.md @@ -10,7 +10,7 @@ introduced: "v1.0.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/graylog/README.md, GrayLog Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/graylog/README.md, GrayLog Plugin Source --- # GrayLog Input Plugin diff --git a/content/telegraf/v1/input-plugins/haproxy/_index.md b/content/telegraf/v1/input-plugins/haproxy/_index.md index 6a033ac835..1a45a15a68 100644 --- a/content/telegraf/v1/input-plugins/haproxy/_index.md +++ b/content/telegraf/v1/input-plugins/haproxy/_index.md @@ -10,7 +10,7 @@ introduced: "v0.1.5" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/haproxy/README.md, HAProxy Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/haproxy/README.md, HAProxy Plugin Source --- # HAProxy Input Plugin diff --git a/content/telegraf/v1/input-plugins/hddtemp/_index.md b/content/telegraf/v1/input-plugins/hddtemp/_index.md index a81942c1bc..693d645824 100644 --- a/content/telegraf/v1/input-plugins/hddtemp/_index.md +++ b/content/telegraf/v1/input-plugins/hddtemp/_index.md @@ -10,7 +10,7 @@ introduced: "v1.0.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/hddtemp/README.md, HDDtemp Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/hddtemp/README.md, HDDtemp Plugin Source --- # HDDtemp Input Plugin diff --git a/content/telegraf/v1/input-plugins/http/_index.md b/content/telegraf/v1/input-plugins/http/_index.md index f83815209e..11aed22063 100644 --- a/content/telegraf/v1/input-plugins/http/_index.md +++ b/content/telegraf/v1/input-plugins/http/_index.md @@ -10,7 +10,7 @@ introduced: "v1.6.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/http/README.md, HTTP Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/http/README.md, HTTP Plugin Source --- # HTTP Input Plugin @@ -83,9 +83,6 @@ to use them. # max_idle_conn_per_host = 0 # response_timeout = "0s" - ## Use the local address for connecting, assigned by the OS by default - # local_address = "" - ## Optional proxy settings # use_system_proxy = false # http_proxy_url = "" diff --git a/content/telegraf/v1/input-plugins/http_listener_v2/_index.md b/content/telegraf/v1/input-plugins/http_listener_v2/_index.md index dac3a1ff70..49f2eacb49 100644 --- a/content/telegraf/v1/input-plugins/http_listener_v2/_index.md +++ b/content/telegraf/v1/input-plugins/http_listener_v2/_index.md @@ -10,7 +10,7 @@ introduced: "v1.9.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/http_listener_v2/README.md, HTTP Listener v2 Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/http_listener_v2/README.md, HTTP Listener v2 Plugin Source --- # HTTP Listener v2 Input Plugin diff --git a/content/telegraf/v1/input-plugins/http_response/_index.md b/content/telegraf/v1/input-plugins/http_response/_index.md index d1b1c14db9..5178a9a5e3 100644 --- a/content/telegraf/v1/input-plugins/http_response/_index.md +++ b/content/telegraf/v1/input-plugins/http_response/_index.md @@ -10,7 +10,7 @@ introduced: "v0.12.1" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/http_response/README.md, HTTP Response Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/http_response/README.md, HTTP Response Plugin Source --- # HTTP Response Input Plugin diff --git a/content/telegraf/v1/input-plugins/huebridge/_index.md b/content/telegraf/v1/input-plugins/huebridge/_index.md index a161383e2d..eccdcbfca4 100644 --- a/content/telegraf/v1/input-plugins/huebridge/_index.md +++ b/content/telegraf/v1/input-plugins/huebridge/_index.md @@ -10,7 +10,7 @@ introduced: "v1.34.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/huebridge/README.md, HueBridge Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/huebridge/README.md, HueBridge Plugin Source --- # HueBridge Input Plugin diff --git a/content/telegraf/v1/input-plugins/hugepages/_index.md b/content/telegraf/v1/input-plugins/hugepages/_index.md index 9c5d184829..dbc738ce2b 100644 --- a/content/telegraf/v1/input-plugins/hugepages/_index.md +++ b/content/telegraf/v1/input-plugins/hugepages/_index.md @@ -10,7 +10,7 @@ introduced: "v1.22.0" os_support: "linux" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/hugepages/README.md, Hugepages Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/hugepages/README.md, Hugepages Plugin Source --- # Hugepages Input Plugin diff --git a/content/telegraf/v1/input-plugins/icinga2/_index.md b/content/telegraf/v1/input-plugins/icinga2/_index.md index 5731b4edb6..ccb5c70091 100644 --- a/content/telegraf/v1/input-plugins/icinga2/_index.md +++ b/content/telegraf/v1/input-plugins/icinga2/_index.md @@ -10,7 +10,7 @@ introduced: "v1.8.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/icinga2/README.md, Icinga2 Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/icinga2/README.md, Icinga2 Plugin Source --- # Icinga2 Input Plugin diff --git a/content/telegraf/v1/input-plugins/infiniband/_index.md b/content/telegraf/v1/input-plugins/infiniband/_index.md index e49548c6bd..30801ba3db 100644 --- a/content/telegraf/v1/input-plugins/infiniband/_index.md +++ b/content/telegraf/v1/input-plugins/infiniband/_index.md @@ -10,7 +10,7 @@ introduced: "v1.14.0" os_support: "linux" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/infiniband/README.md, InfiniBand Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/infiniband/README.md, InfiniBand Plugin Source --- # InfiniBand Input Plugin diff --git a/content/telegraf/v1/input-plugins/influxdb/_index.md b/content/telegraf/v1/input-plugins/influxdb/_index.md index 7f4aea245c..083b0f6db8 100644 --- a/content/telegraf/v1/input-plugins/influxdb/_index.md +++ b/content/telegraf/v1/input-plugins/influxdb/_index.md @@ -10,7 +10,7 @@ introduced: "v0.2.5" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/influxdb/README.md, InfluxDB Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/influxdb/README.md, InfluxDB Plugin Source --- # InfluxDB Input Plugin diff --git a/content/telegraf/v1/input-plugins/influxdb_listener/_index.md b/content/telegraf/v1/input-plugins/influxdb_listener/_index.md index 7ee64afc35..4dad0abca2 100644 --- a/content/telegraf/v1/input-plugins/influxdb_listener/_index.md +++ b/content/telegraf/v1/input-plugins/influxdb_listener/_index.md @@ -10,7 +10,7 @@ introduced: "v1.9.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/influxdb_listener/README.md, InfluxDB Listener Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/influxdb_listener/README.md, InfluxDB Listener Plugin Source --- # InfluxDB Listener Input Plugin diff --git a/content/telegraf/v1/input-plugins/influxdb_v2_listener/_index.md b/content/telegraf/v1/input-plugins/influxdb_v2_listener/_index.md index acf23499f8..dbd52f76cb 100644 --- a/content/telegraf/v1/input-plugins/influxdb_v2_listener/_index.md +++ b/content/telegraf/v1/input-plugins/influxdb_v2_listener/_index.md @@ -10,7 +10,7 @@ introduced: "v1.16.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/influxdb_v2_listener/README.md, InfluxDB V2 Listener Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/influxdb_v2_listener/README.md, InfluxDB V2 Listener Plugin Source --- # InfluxDB V2 Listener Input Plugin diff --git a/content/telegraf/v1/input-plugins/intel_baseband/_index.md b/content/telegraf/v1/input-plugins/intel_baseband/_index.md index bec0e91282..85c5317681 100644 --- a/content/telegraf/v1/input-plugins/intel_baseband/_index.md +++ b/content/telegraf/v1/input-plugins/intel_baseband/_index.md @@ -10,7 +10,7 @@ introduced: "v1.27.0" os_support: "linux" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/intel_baseband/README.md, Intel Baseband Accelerator Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/intel_baseband/README.md, Intel Baseband Accelerator Plugin Source --- # Intel Baseband Accelerator Input Plugin diff --git a/content/telegraf/v1/input-plugins/intel_dlb/_index.md b/content/telegraf/v1/input-plugins/intel_dlb/_index.md index b8413787ec..91f126c89c 100644 --- a/content/telegraf/v1/input-plugins/intel_dlb/_index.md +++ b/content/telegraf/v1/input-plugins/intel_dlb/_index.md @@ -10,7 +10,7 @@ introduced: "v1.25.0" os_support: "linux" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/intel_dlb/README.md, Intel® Dynamic Load Balancer Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/intel_dlb/README.md, Intel® Dynamic Load Balancer Plugin Source --- # Intel® Dynamic Load Balancer Input Plugin diff --git a/content/telegraf/v1/input-plugins/intel_pmt/_index.md b/content/telegraf/v1/input-plugins/intel_pmt/_index.md index 5ee77cf4f3..dfb76375c8 100644 --- a/content/telegraf/v1/input-plugins/intel_pmt/_index.md +++ b/content/telegraf/v1/input-plugins/intel_pmt/_index.md @@ -10,7 +10,7 @@ introduced: "v1.28.0" os_support: "linux" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/intel_pmt/README.md, Intel® Platform Monitoring Technology Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/intel_pmt/README.md, Intel® Platform Monitoring Technology Plugin Source --- # Intel® Platform Monitoring Technology Input Plugin diff --git a/content/telegraf/v1/input-plugins/intel_pmu/_index.md b/content/telegraf/v1/input-plugins/intel_pmu/_index.md index c52e3aacef..9d8a39fefd 100644 --- a/content/telegraf/v1/input-plugins/intel_pmu/_index.md +++ b/content/telegraf/v1/input-plugins/intel_pmu/_index.md @@ -10,7 +10,7 @@ introduced: "v1.21.0" os_support: "linux" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/intel_pmu/README.md, Intel Performance Monitoring Unit Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/intel_pmu/README.md, Intel Performance Monitoring Unit Plugin Source --- # Intel Performance Monitoring Unit Plugin diff --git a/content/telegraf/v1/input-plugins/intel_powerstat/_index.md b/content/telegraf/v1/input-plugins/intel_powerstat/_index.md index f1c45121bf..36d8a5f3b0 100644 --- a/content/telegraf/v1/input-plugins/intel_powerstat/_index.md +++ b/content/telegraf/v1/input-plugins/intel_powerstat/_index.md @@ -10,7 +10,7 @@ introduced: "v1.17.0" os_support: "linux" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/intel_powerstat/README.md, Intel PowerStat Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/intel_powerstat/README.md, Intel PowerStat Plugin Source --- # Intel PowerStat Input Plugin diff --git a/content/telegraf/v1/input-plugins/intel_rdt/_index.md b/content/telegraf/v1/input-plugins/intel_rdt/_index.md index c390395523..979618b315 100644 --- a/content/telegraf/v1/input-plugins/intel_rdt/_index.md +++ b/content/telegraf/v1/input-plugins/intel_rdt/_index.md @@ -10,7 +10,7 @@ introduced: "v1.16.0" os_support: "freebsd, linux, macos" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/intel_rdt/README.md, Intel RDT Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/intel_rdt/README.md, Intel RDT Plugin Source --- # Intel RDT Input Plugin diff --git a/content/telegraf/v1/input-plugins/internal/_index.md b/content/telegraf/v1/input-plugins/internal/_index.md index b60595fc28..86a9048fa5 100644 --- a/content/telegraf/v1/input-plugins/internal/_index.md +++ b/content/telegraf/v1/input-plugins/internal/_index.md @@ -10,7 +10,7 @@ introduced: "v1.2.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/internal/README.md, Telegraf Internal Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/internal/README.md, Telegraf Internal Plugin Source --- # Telegraf Internal Input Plugin diff --git a/content/telegraf/v1/input-plugins/internet_speed/_index.md b/content/telegraf/v1/input-plugins/internet_speed/_index.md index a22e8f7d8d..34b13ba045 100644 --- a/content/telegraf/v1/input-plugins/internet_speed/_index.md +++ b/content/telegraf/v1/input-plugins/internet_speed/_index.md @@ -10,7 +10,7 @@ introduced: "v1.20.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/internet_speed/README.md, Internet Speed Monitor Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/internet_speed/README.md, Internet Speed Monitor Plugin Source --- # Internet Speed Monitor Input Plugin diff --git a/content/telegraf/v1/input-plugins/interrupts/_index.md b/content/telegraf/v1/input-plugins/interrupts/_index.md index 3b20515cb0..a49e96b490 100644 --- a/content/telegraf/v1/input-plugins/interrupts/_index.md +++ b/content/telegraf/v1/input-plugins/interrupts/_index.md @@ -10,7 +10,7 @@ introduced: "v1.3.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/interrupts/README.md, Interrupts Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/interrupts/README.md, Interrupts Plugin Source --- # Interrupts Input Plugin diff --git a/content/telegraf/v1/input-plugins/ipmi_sensor/_index.md b/content/telegraf/v1/input-plugins/ipmi_sensor/_index.md index efba924e51..916bacf3a2 100644 --- a/content/telegraf/v1/input-plugins/ipmi_sensor/_index.md +++ b/content/telegraf/v1/input-plugins/ipmi_sensor/_index.md @@ -10,7 +10,7 @@ introduced: "v0.12.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/ipmi_sensor/README.md, IPMI Sensor Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/ipmi_sensor/README.md, IPMI Sensor Plugin Source --- # IPMI Sensor Input Plugin diff --git a/content/telegraf/v1/input-plugins/ipset/_index.md b/content/telegraf/v1/input-plugins/ipset/_index.md index dc649f4df3..526813c129 100644 --- a/content/telegraf/v1/input-plugins/ipset/_index.md +++ b/content/telegraf/v1/input-plugins/ipset/_index.md @@ -10,7 +10,7 @@ introduced: "v1.6.0" os_support: "linux" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/ipset/README.md, Ipset Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/ipset/README.md, Ipset Plugin Source --- # Ipset Input Plugin diff --git a/content/telegraf/v1/input-plugins/iptables/_index.md b/content/telegraf/v1/input-plugins/iptables/_index.md index ebad367e7b..36efd03fa6 100644 --- a/content/telegraf/v1/input-plugins/iptables/_index.md +++ b/content/telegraf/v1/input-plugins/iptables/_index.md @@ -10,7 +10,7 @@ introduced: "v1.1.0" os_support: "linux" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/iptables/README.md, Iptables Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/iptables/README.md, Iptables Plugin Source --- # Iptables Input Plugin diff --git a/content/telegraf/v1/input-plugins/ipvs/_index.md b/content/telegraf/v1/input-plugins/ipvs/_index.md index af3bcb998f..52809fee57 100644 --- a/content/telegraf/v1/input-plugins/ipvs/_index.md +++ b/content/telegraf/v1/input-plugins/ipvs/_index.md @@ -10,7 +10,7 @@ introduced: "v1.9.0" os_support: "linux" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/ipvs/README.md, IPVS Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/ipvs/README.md, IPVS Plugin Source --- # IPVS Input Plugin diff --git a/content/telegraf/v1/input-plugins/jenkins/_index.md b/content/telegraf/v1/input-plugins/jenkins/_index.md index 18a8b1814f..26ec6e26b6 100644 --- a/content/telegraf/v1/input-plugins/jenkins/_index.md +++ b/content/telegraf/v1/input-plugins/jenkins/_index.md @@ -10,7 +10,7 @@ introduced: "v1.9.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/jenkins/README.md, Jenkins Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/jenkins/README.md, Jenkins Plugin Source --- # Jenkins Input Plugin diff --git a/content/telegraf/v1/input-plugins/jolokia2_agent/_index.md b/content/telegraf/v1/input-plugins/jolokia2_agent/_index.md index d9abc046d8..467d74043f 100644 --- a/content/telegraf/v1/input-plugins/jolokia2_agent/_index.md +++ b/content/telegraf/v1/input-plugins/jolokia2_agent/_index.md @@ -10,7 +10,7 @@ introduced: "v1.5.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/jolokia2_agent/README.md, Jolokia2 Agent Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/jolokia2_agent/README.md, Jolokia2 Agent Plugin Source --- # Jolokia2 Agent Input Plugin diff --git a/content/telegraf/v1/input-plugins/jolokia2_proxy/_index.md b/content/telegraf/v1/input-plugins/jolokia2_proxy/_index.md index f0b842deb7..816f1e7617 100644 --- a/content/telegraf/v1/input-plugins/jolokia2_proxy/_index.md +++ b/content/telegraf/v1/input-plugins/jolokia2_proxy/_index.md @@ -10,7 +10,7 @@ introduced: "v1.5.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/jolokia2_proxy/README.md, Jolokia2 Proxy Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/jolokia2_proxy/README.md, Jolokia2 Proxy Plugin Source --- # Jolokia2 Proxy Input Plugin diff --git a/content/telegraf/v1/input-plugins/jti_openconfig_telemetry/_index.md b/content/telegraf/v1/input-plugins/jti_openconfig_telemetry/_index.md index 1a42ea3017..2a15f8462f 100644 --- a/content/telegraf/v1/input-plugins/jti_openconfig_telemetry/_index.md +++ b/content/telegraf/v1/input-plugins/jti_openconfig_telemetry/_index.md @@ -10,7 +10,7 @@ introduced: "v1.7.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/jti_openconfig_telemetry/README.md, Juniper Telemetry Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/jti_openconfig_telemetry/README.md, Juniper Telemetry Plugin Source --- # Juniper Telemetry Input Plugin diff --git a/content/telegraf/v1/input-plugins/kafka_consumer/_index.md b/content/telegraf/v1/input-plugins/kafka_consumer/_index.md index 0ce0ddef7c..344cc8c447 100644 --- a/content/telegraf/v1/input-plugins/kafka_consumer/_index.md +++ b/content/telegraf/v1/input-plugins/kafka_consumer/_index.md @@ -10,7 +10,7 @@ introduced: "v0.2.3" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/kafka_consumer/README.md, Apache Kafka Consumer Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/kafka_consumer/README.md, Apache Kafka Consumer Plugin Source --- # Apache Kafka Consumer Input Plugin diff --git a/content/telegraf/v1/input-plugins/kapacitor/_index.md b/content/telegraf/v1/input-plugins/kapacitor/_index.md index 96ec47145c..dd7e693895 100644 --- a/content/telegraf/v1/input-plugins/kapacitor/_index.md +++ b/content/telegraf/v1/input-plugins/kapacitor/_index.md @@ -10,7 +10,7 @@ introduced: "v1.3.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/kapacitor/README.md, Kapacitor Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/kapacitor/README.md, Kapacitor Plugin Source --- # Kapacitor Input Plugin diff --git a/content/telegraf/v1/input-plugins/kernel/_index.md b/content/telegraf/v1/input-plugins/kernel/_index.md index 99ae684782..885f3b581d 100644 --- a/content/telegraf/v1/input-plugins/kernel/_index.md +++ b/content/telegraf/v1/input-plugins/kernel/_index.md @@ -10,7 +10,7 @@ introduced: "v0.11.0" os_support: "linux" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/kernel/README.md, Kernel Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/kernel/README.md, Kernel Plugin Source --- # Kernel Input Plugin diff --git a/content/telegraf/v1/input-plugins/kernel_vmstat/_index.md b/content/telegraf/v1/input-plugins/kernel_vmstat/_index.md index 91c30e94b4..c60667001f 100644 --- a/content/telegraf/v1/input-plugins/kernel_vmstat/_index.md +++ b/content/telegraf/v1/input-plugins/kernel_vmstat/_index.md @@ -10,7 +10,7 @@ introduced: "v1.0.0" os_support: "linux" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/kernel_vmstat/README.md, Kernel VM Statistics Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/kernel_vmstat/README.md, Kernel VM Statistics Plugin Source --- # Kernel VM Statistics Input Plugin diff --git a/content/telegraf/v1/input-plugins/kibana/_index.md b/content/telegraf/v1/input-plugins/kibana/_index.md index 81aba85e64..51b2370f7f 100644 --- a/content/telegraf/v1/input-plugins/kibana/_index.md +++ b/content/telegraf/v1/input-plugins/kibana/_index.md @@ -10,7 +10,7 @@ introduced: "v1.8.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/kibana/README.md, Kibana Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/kibana/README.md, Kibana Plugin Source --- # Kibana Input Plugin diff --git a/content/telegraf/v1/input-plugins/kinesis_consumer/_index.md b/content/telegraf/v1/input-plugins/kinesis_consumer/_index.md index fd5ae57afd..bcedef7581 100644 --- a/content/telegraf/v1/input-plugins/kinesis_consumer/_index.md +++ b/content/telegraf/v1/input-plugins/kinesis_consumer/_index.md @@ -10,7 +10,7 @@ introduced: "v1.10.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/kinesis_consumer/README.md, Kinesis Consumer Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/kinesis_consumer/README.md, Kinesis Consumer Plugin Source --- # Kinesis Consumer Input Plugin diff --git a/content/telegraf/v1/input-plugins/knx_listener/_index.md b/content/telegraf/v1/input-plugins/knx_listener/_index.md index f8e3b64c1e..d570e22853 100644 --- a/content/telegraf/v1/input-plugins/knx_listener/_index.md +++ b/content/telegraf/v1/input-plugins/knx_listener/_index.md @@ -10,7 +10,7 @@ introduced: "v1.19.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/knx_listener/README.md, KNX Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/knx_listener/README.md, KNX Plugin Source --- # KNX Input Plugin diff --git a/content/telegraf/v1/input-plugins/kube_inventory/_index.md b/content/telegraf/v1/input-plugins/kube_inventory/_index.md index 109a1f4a9d..509a3c0e5b 100644 --- a/content/telegraf/v1/input-plugins/kube_inventory/_index.md +++ b/content/telegraf/v1/input-plugins/kube_inventory/_index.md @@ -10,7 +10,7 @@ introduced: "v1.10.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/kube_inventory/README.md, Kubernetes Inventory Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/kube_inventory/README.md, Kubernetes Inventory Plugin Source --- # Kubernetes Inventory Input Plugin diff --git a/content/telegraf/v1/input-plugins/kubernetes/_index.md b/content/telegraf/v1/input-plugins/kubernetes/_index.md index ca819f3cd4..ab57f0864c 100644 --- a/content/telegraf/v1/input-plugins/kubernetes/_index.md +++ b/content/telegraf/v1/input-plugins/kubernetes/_index.md @@ -10,7 +10,7 @@ introduced: "v1.1.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/kubernetes/README.md, Kubernetes Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/kubernetes/README.md, Kubernetes Plugin Source --- # Kubernetes Input Plugin diff --git a/content/telegraf/v1/input-plugins/lanz/_index.md b/content/telegraf/v1/input-plugins/lanz/_index.md index 142a6d4ac4..4b36f65972 100644 --- a/content/telegraf/v1/input-plugins/lanz/_index.md +++ b/content/telegraf/v1/input-plugins/lanz/_index.md @@ -10,7 +10,7 @@ introduced: "v1.14.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/lanz/README.md, Arista LANZ Consumer Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/lanz/README.md, Arista LANZ Consumer Plugin Source --- # Arista LANZ Consumer Input Plugin diff --git a/content/telegraf/v1/input-plugins/ldap/_index.md b/content/telegraf/v1/input-plugins/ldap/_index.md index e21452871f..5ff26f8f4b 100644 --- a/content/telegraf/v1/input-plugins/ldap/_index.md +++ b/content/telegraf/v1/input-plugins/ldap/_index.md @@ -10,7 +10,7 @@ introduced: "v1.29.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/ldap/README.md, LDAP Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/ldap/README.md, LDAP Plugin Source --- # LDAP Input Plugin diff --git a/content/telegraf/v1/input-plugins/leofs/_index.md b/content/telegraf/v1/input-plugins/leofs/_index.md index 79aa4366f1..2755a1882e 100644 --- a/content/telegraf/v1/input-plugins/leofs/_index.md +++ b/content/telegraf/v1/input-plugins/leofs/_index.md @@ -10,7 +10,7 @@ introduced: "v0.1.5" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/leofs/README.md, LeoFS Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/leofs/README.md, LeoFS Plugin Source --- # LeoFS Input Plugin diff --git a/content/telegraf/v1/input-plugins/libvirt/_index.md b/content/telegraf/v1/input-plugins/libvirt/_index.md index e751aced37..642cad6578 100644 --- a/content/telegraf/v1/input-plugins/libvirt/_index.md +++ b/content/telegraf/v1/input-plugins/libvirt/_index.md @@ -10,7 +10,7 @@ introduced: "v1.25.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/libvirt/README.md, Libvirt Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/libvirt/README.md, Libvirt Plugin Source --- # Libvirt Input Plugin diff --git a/content/telegraf/v1/input-plugins/linux_cpu/_index.md b/content/telegraf/v1/input-plugins/linux_cpu/_index.md index 74a2d13f56..ce032c4bbd 100644 --- a/content/telegraf/v1/input-plugins/linux_cpu/_index.md +++ b/content/telegraf/v1/input-plugins/linux_cpu/_index.md @@ -10,7 +10,7 @@ introduced: "v1.24.0" os_support: "linux" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/linux_cpu/README.md, Linux CPU Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/linux_cpu/README.md, Linux CPU Plugin Source --- # Linux CPU Input Plugin diff --git a/content/telegraf/v1/input-plugins/linux_sysctl_fs/_index.md b/content/telegraf/v1/input-plugins/linux_sysctl_fs/_index.md index 01c498cce7..3caf08b81f 100644 --- a/content/telegraf/v1/input-plugins/linux_sysctl_fs/_index.md +++ b/content/telegraf/v1/input-plugins/linux_sysctl_fs/_index.md @@ -10,7 +10,7 @@ introduced: "v1.24.0" os_support: "linux" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/linux_sysctl_fs/README.md, Linux Sysctl Filesystem Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/linux_sysctl_fs/README.md, Linux Sysctl Filesystem Plugin Source --- # Linux Sysctl Filesystem Input Plugin diff --git a/content/telegraf/v1/input-plugins/logql/_index.md b/content/telegraf/v1/input-plugins/logql/_index.md index d612a27a91..f75b79c5fe 100644 --- a/content/telegraf/v1/input-plugins/logql/_index.md +++ b/content/telegraf/v1/input-plugins/logql/_index.md @@ -10,7 +10,7 @@ introduced: "v1.37.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/logql/README.md, LogQL Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/logql/README.md, LogQL Plugin Source --- # LogQL Input Plugin @@ -69,9 +69,6 @@ more details on how to use them. # max_idle_conn_per_host = 0 # response_timeout = "0s" - ## Use the local address for connecting, assigned by the OS by default - # local_address = "" - ## Optional proxy settings # use_system_proxy = false # http_proxy_url = "" diff --git a/content/telegraf/v1/input-plugins/logstash/_index.md b/content/telegraf/v1/input-plugins/logstash/_index.md index c9340e7689..9833ac5f8b 100644 --- a/content/telegraf/v1/input-plugins/logstash/_index.md +++ b/content/telegraf/v1/input-plugins/logstash/_index.md @@ -10,7 +10,7 @@ introduced: "v1.12.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/logstash/README.md, Logstash Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/logstash/README.md, Logstash Plugin Source --- # Logstash Input Plugin diff --git a/content/telegraf/v1/input-plugins/lustre2/_index.md b/content/telegraf/v1/input-plugins/lustre2/_index.md index 718ffa91db..e33ff9b63d 100644 --- a/content/telegraf/v1/input-plugins/lustre2/_index.md +++ b/content/telegraf/v1/input-plugins/lustre2/_index.md @@ -10,7 +10,7 @@ introduced: "v0.1.5" os_support: "linux" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/lustre2/README.md, Lustre Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/lustre2/README.md, Lustre Plugin Source --- # Lustre Input Plugin diff --git a/content/telegraf/v1/input-plugins/lvm/_index.md b/content/telegraf/v1/input-plugins/lvm/_index.md index 8cab33dd5a..878fbd368c 100644 --- a/content/telegraf/v1/input-plugins/lvm/_index.md +++ b/content/telegraf/v1/input-plugins/lvm/_index.md @@ -10,7 +10,7 @@ introduced: "v1.21.0" os_support: "linux" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/lvm/README.md, Logical Volume Manager Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/lvm/README.md, Logical Volume Manager Plugin Source --- # Logical Volume Manager Input Plugin diff --git a/content/telegraf/v1/input-plugins/mailchimp/_index.md b/content/telegraf/v1/input-plugins/mailchimp/_index.md index 6fcee34322..41b2884dd8 100644 --- a/content/telegraf/v1/input-plugins/mailchimp/_index.md +++ b/content/telegraf/v1/input-plugins/mailchimp/_index.md @@ -10,7 +10,7 @@ introduced: "v0.2.4" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/mailchimp/README.md, Mailchimp Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/mailchimp/README.md, Mailchimp Plugin Source --- # Mailchimp Input Plugin diff --git a/content/telegraf/v1/input-plugins/marklogic/_index.md b/content/telegraf/v1/input-plugins/marklogic/_index.md index 96cf441bc9..3d6a548381 100644 --- a/content/telegraf/v1/input-plugins/marklogic/_index.md +++ b/content/telegraf/v1/input-plugins/marklogic/_index.md @@ -10,7 +10,7 @@ introduced: "v1.12.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/marklogic/README.md, MarkLogic Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/marklogic/README.md, MarkLogic Plugin Source --- # MarkLogic Input Plugin diff --git a/content/telegraf/v1/input-plugins/mavlink/_index.md b/content/telegraf/v1/input-plugins/mavlink/_index.md index 19a4f24b8e..e196a3125a 100644 --- a/content/telegraf/v1/input-plugins/mavlink/_index.md +++ b/content/telegraf/v1/input-plugins/mavlink/_index.md @@ -10,7 +10,7 @@ introduced: "v1.35.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/mavlink/README.md, MavLink Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/mavlink/README.md, MavLink Plugin Source --- # MavLink Input Plugin diff --git a/content/telegraf/v1/input-plugins/mcrouter/_index.md b/content/telegraf/v1/input-plugins/mcrouter/_index.md index 6f68324985..f99c44faaf 100644 --- a/content/telegraf/v1/input-plugins/mcrouter/_index.md +++ b/content/telegraf/v1/input-plugins/mcrouter/_index.md @@ -10,7 +10,7 @@ introduced: "v1.7.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/mcrouter/README.md, Mcrouter Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/mcrouter/README.md, Mcrouter Plugin Source --- # Mcrouter Input Plugin diff --git a/content/telegraf/v1/input-plugins/mdstat/_index.md b/content/telegraf/v1/input-plugins/mdstat/_index.md index 6317b5d78b..885960c940 100644 --- a/content/telegraf/v1/input-plugins/mdstat/_index.md +++ b/content/telegraf/v1/input-plugins/mdstat/_index.md @@ -10,7 +10,7 @@ introduced: "v1.20.0" os_support: "linux" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/mdstat/README.md, MD RAID Statistics Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/mdstat/README.md, MD RAID Statistics Plugin Source --- # MD RAID Statistics Input Plugin diff --git a/content/telegraf/v1/input-plugins/mem/_index.md b/content/telegraf/v1/input-plugins/mem/_index.md index d8cc65783a..5b457d5f0f 100644 --- a/content/telegraf/v1/input-plugins/mem/_index.md +++ b/content/telegraf/v1/input-plugins/mem/_index.md @@ -10,7 +10,7 @@ introduced: "v0.1.5" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/mem/README.md, Memory Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/mem/README.md, Memory Plugin Source --- # Memory Input Plugin diff --git a/content/telegraf/v1/input-plugins/memcached/_index.md b/content/telegraf/v1/input-plugins/memcached/_index.md index 1653714fb3..15d6ca9508 100644 --- a/content/telegraf/v1/input-plugins/memcached/_index.md +++ b/content/telegraf/v1/input-plugins/memcached/_index.md @@ -10,7 +10,7 @@ introduced: "v0.1.2" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/memcached/README.md, Memcached Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/memcached/README.md, Memcached Plugin Source --- # Memcached Input Plugin diff --git a/content/telegraf/v1/input-plugins/mesos/_index.md b/content/telegraf/v1/input-plugins/mesos/_index.md index f1b2c504d0..270ffa2881 100644 --- a/content/telegraf/v1/input-plugins/mesos/_index.md +++ b/content/telegraf/v1/input-plugins/mesos/_index.md @@ -10,7 +10,7 @@ introduced: "v0.10.3" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/mesos/README.md, Apache Mesos Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/mesos/README.md, Apache Mesos Plugin Source --- # Apache Mesos Input Plugin diff --git a/content/telegraf/v1/input-plugins/minecraft/_index.md b/content/telegraf/v1/input-plugins/minecraft/_index.md index 3909fa3618..412c8aaa44 100644 --- a/content/telegraf/v1/input-plugins/minecraft/_index.md +++ b/content/telegraf/v1/input-plugins/minecraft/_index.md @@ -10,7 +10,7 @@ introduced: "v1.4.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/minecraft/README.md, Minecraft Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/minecraft/README.md, Minecraft Plugin Source --- # Minecraft Input Plugin diff --git a/content/telegraf/v1/input-plugins/mock/_index.md b/content/telegraf/v1/input-plugins/mock/_index.md index 97d7c726ab..54399ab981 100644 --- a/content/telegraf/v1/input-plugins/mock/_index.md +++ b/content/telegraf/v1/input-plugins/mock/_index.md @@ -10,7 +10,7 @@ introduced: "v1.22.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/mock/README.md, Mock Data Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/mock/README.md, Mock Data Plugin Source --- # Mock Data Input Plugin diff --git a/content/telegraf/v1/input-plugins/modbus/_index.md b/content/telegraf/v1/input-plugins/modbus/_index.md index 02291a3509..82f6191c17 100644 --- a/content/telegraf/v1/input-plugins/modbus/_index.md +++ b/content/telegraf/v1/input-plugins/modbus/_index.md @@ -10,7 +10,7 @@ introduced: "v1.14.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/modbus/README.md, Modbus Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/modbus/README.md, Modbus Plugin Source --- diff --git a/content/telegraf/v1/input-plugins/mongodb/_index.md b/content/telegraf/v1/input-plugins/mongodb/_index.md index 6083e33c54..19c83f65f9 100644 --- a/content/telegraf/v1/input-plugins/mongodb/_index.md +++ b/content/telegraf/v1/input-plugins/mongodb/_index.md @@ -10,7 +10,7 @@ introduced: "v0.1.5" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/mongodb/README.md, MongoDB Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/mongodb/README.md, MongoDB Plugin Source --- # MongoDB Input Plugin diff --git a/content/telegraf/v1/input-plugins/monit/_index.md b/content/telegraf/v1/input-plugins/monit/_index.md index 7b24ecc47f..49be217a66 100644 --- a/content/telegraf/v1/input-plugins/monit/_index.md +++ b/content/telegraf/v1/input-plugins/monit/_index.md @@ -10,7 +10,7 @@ introduced: "v1.14.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/monit/README.md, Monit Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/monit/README.md, Monit Plugin Source --- # Monit Input Plugin diff --git a/content/telegraf/v1/input-plugins/mqtt_consumer/_index.md b/content/telegraf/v1/input-plugins/mqtt_consumer/_index.md index 310bde179b..cccb517f3f 100644 --- a/content/telegraf/v1/input-plugins/mqtt_consumer/_index.md +++ b/content/telegraf/v1/input-plugins/mqtt_consumer/_index.md @@ -10,7 +10,7 @@ introduced: "v0.10.3" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/mqtt_consumer/README.md, MQTT Consumer Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/mqtt_consumer/README.md, MQTT Consumer Plugin Source --- # MQTT Consumer Input Plugin @@ -105,12 +105,6 @@ to use them. ## Connection timeout for initial connection in seconds # connection_timeout = "30s" - ## Maximum interval between reconnection attempts after a connection loss. - ## The MQTT library uses exponential backoff starting at 1 second up to this - ## ceiling. The library default is 10 minutes, which can cause long delays - ## before message flow resumes after a network outage. - # max_reconnect_interval = "30s" - ## Interval and ping timeout for keep-alive messages ## The sum of those options defines when a connection loss is detected. ## Note: The keep-alive interval needs to be greater or equal one second and diff --git a/content/telegraf/v1/input-plugins/multifile/_index.md b/content/telegraf/v1/input-plugins/multifile/_index.md index fd3581f29e..0d84a53615 100644 --- a/content/telegraf/v1/input-plugins/multifile/_index.md +++ b/content/telegraf/v1/input-plugins/multifile/_index.md @@ -10,7 +10,7 @@ introduced: "v1.10.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/multifile/README.md, Multifile Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/multifile/README.md, Multifile Plugin Source --- # Multifile Input Plugin diff --git a/content/telegraf/v1/input-plugins/mysql/_index.md b/content/telegraf/v1/input-plugins/mysql/_index.md index 3b35d5f10e..5ffac05059 100644 --- a/content/telegraf/v1/input-plugins/mysql/_index.md +++ b/content/telegraf/v1/input-plugins/mysql/_index.md @@ -10,7 +10,7 @@ introduced: "v0.1.1" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/mysql/README.md, MySQL Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/mysql/README.md, MySQL Plugin Source --- # MySQL Input Plugin @@ -262,17 +262,7 @@ measurement name. ## Metrics * Global statuses - all numeric and boolean values of `SHOW GLOBAL STATUSES` - * wsrep_evs_repl_latency - a complex field containing multiple values is split - into separate fields - * wsrep_evs_repl_latency_min(float, seconds) - * wsrep_evs_repl_latency_avg(float, seconds) - * wsrep_evs_repl_latency_max(float, seconds) - * wsrep_evs_repl_latency_stdev(float, seconds) - * wsrep_evs_repl_latency_sample_size(float, number) * Global variables - all numeric and boolean values of `SHOW GLOBAL VARIABLES` - * wsrep_provider_options - a complex field containing multiple values is split - into separate fields - * gcache_size(int, bytes) * Slave status - metrics from `SHOW SLAVE STATUS` the metrics are gathered when the single-source replication is on. If the multi-source replication is set, then everything works differently, this metric does not work with multi-source diff --git a/content/telegraf/v1/input-plugins/nats/_index.md b/content/telegraf/v1/input-plugins/nats/_index.md index 5bb2106532..9732b8caa1 100644 --- a/content/telegraf/v1/input-plugins/nats/_index.md +++ b/content/telegraf/v1/input-plugins/nats/_index.md @@ -10,7 +10,7 @@ introduced: "v1.6.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/nats/README.md, NATS Server Monitoring Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/nats/README.md, NATS Server Monitoring Plugin Source --- # NATS Server Monitoring Input Plugin diff --git a/content/telegraf/v1/input-plugins/nats_consumer/_index.md b/content/telegraf/v1/input-plugins/nats_consumer/_index.md index f3fc6c6800..400c43a54d 100644 --- a/content/telegraf/v1/input-plugins/nats_consumer/_index.md +++ b/content/telegraf/v1/input-plugins/nats_consumer/_index.md @@ -10,7 +10,7 @@ introduced: "v0.10.3" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/nats_consumer/README.md, NATS Consumer Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/nats_consumer/README.md, NATS Consumer Plugin Source --- # NATS Consumer Input Plugin diff --git a/content/telegraf/v1/input-plugins/neoom_beaam/_index.md b/content/telegraf/v1/input-plugins/neoom_beaam/_index.md index 601fe0054a..6a817c629e 100644 --- a/content/telegraf/v1/input-plugins/neoom_beaam/_index.md +++ b/content/telegraf/v1/input-plugins/neoom_beaam/_index.md @@ -10,7 +10,7 @@ introduced: "v1.33.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/neoom_beaam/README.md, Neoom Beaam Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/neoom_beaam/README.md, Neoom Beaam Plugin Source --- # Neoom Beaam Input Plugin diff --git a/content/telegraf/v1/input-plugins/neptune_apex/_index.md b/content/telegraf/v1/input-plugins/neptune_apex/_index.md index 9362837fcf..0ad9cb2da4 100644 --- a/content/telegraf/v1/input-plugins/neptune_apex/_index.md +++ b/content/telegraf/v1/input-plugins/neptune_apex/_index.md @@ -10,7 +10,7 @@ introduced: "v1.10.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/neptune_apex/README.md, Neptune Apex Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/neptune_apex/README.md, Neptune Apex Plugin Source --- # Neptune Apex Input Plugin diff --git a/content/telegraf/v1/input-plugins/net/_index.md b/content/telegraf/v1/input-plugins/net/_index.md index 22490b1854..0292ac5f21 100644 --- a/content/telegraf/v1/input-plugins/net/_index.md +++ b/content/telegraf/v1/input-plugins/net/_index.md @@ -10,7 +10,7 @@ introduced: "v0.1.1" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/net/README.md, Network Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/net/README.md, Network Plugin Source --- # Network Input Plugin diff --git a/content/telegraf/v1/input-plugins/net_response/_index.md b/content/telegraf/v1/input-plugins/net_response/_index.md index 29bdaf2941..c0cae121a8 100644 --- a/content/telegraf/v1/input-plugins/net_response/_index.md +++ b/content/telegraf/v1/input-plugins/net_response/_index.md @@ -10,7 +10,7 @@ introduced: "v0.10.3" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/net_response/README.md, Network Response Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/net_response/README.md, Network Response Plugin Source --- # Network Response Input Plugin diff --git a/content/telegraf/v1/input-plugins/netflow/_index.md b/content/telegraf/v1/input-plugins/netflow/_index.md index 95ff91fd48..c116ace4b0 100644 --- a/content/telegraf/v1/input-plugins/netflow/_index.md +++ b/content/telegraf/v1/input-plugins/netflow/_index.md @@ -10,7 +10,7 @@ introduced: "v1.25.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/netflow/README.md, Netflow Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/netflow/README.md, Netflow Plugin Source --- # Netflow Input Plugin diff --git a/content/telegraf/v1/input-plugins/netstat/_index.md b/content/telegraf/v1/input-plugins/netstat/_index.md index 6a338a4cd0..982e2aa6ed 100644 --- a/content/telegraf/v1/input-plugins/netstat/_index.md +++ b/content/telegraf/v1/input-plugins/netstat/_index.md @@ -10,7 +10,7 @@ introduced: "v0.2.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/netstat/README.md, Network Connection Statistics Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/netstat/README.md, Network Connection Statistics Plugin Source --- # Network Connection Statistics Input Plugin diff --git a/content/telegraf/v1/input-plugins/nfsclient/_index.md b/content/telegraf/v1/input-plugins/nfsclient/_index.md index c2881bfa33..a55540f63e 100644 --- a/content/telegraf/v1/input-plugins/nfsclient/_index.md +++ b/content/telegraf/v1/input-plugins/nfsclient/_index.md @@ -10,7 +10,7 @@ introduced: "v1.18.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/nfsclient/README.md, Network Filesystem Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/nfsclient/README.md, Network Filesystem Plugin Source --- # Network Filesystem Input Plugin diff --git a/content/telegraf/v1/input-plugins/nftables/_index.md b/content/telegraf/v1/input-plugins/nftables/_index.md index aa8cb9a4e7..4c44121ee6 100644 --- a/content/telegraf/v1/input-plugins/nftables/_index.md +++ b/content/telegraf/v1/input-plugins/nftables/_index.md @@ -10,13 +10,17 @@ introduced: "v1.37.0" os_support: "linux" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/nftables/README.md, Nftables Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/nftables/README.md, Nftables Plugin Source --- # Nftables Plugin This plugin gathers packets and bytes counters for rules within -Linux's [nftables](https://wiki.nftables.org/wiki-nftables/index.php/Main_Page) firewall, as well as set element counts. +Linux's [nftables](https://wiki.nftables.org/wiki-nftables/index.php/Main_Page) firewall. + +> [!IMPORTANT] +> Rules are identified by the associated comment so those **comments have to be unique**! +> Rules without comment are ignored. **Introduced in:** Telegraf v1.37.0 **Tags:** network, system @@ -39,16 +43,11 @@ plugin ordering. See [CONFIGURATION.md](/telegraf/v1/configuration/#plugins) for ## Use the specified binary which will be looked-up in PATH # binary = "nft" - ## Use sudo for command execution, can be restricted to - ## "nft --json list table" + ## Use sudo for command execution, can be restricted to "nft --json list table" # use_sudo = false - ## Tables to monitor (may use "family table" format, e.g., "inet filter") + ## Tables to monitor containing both a counter and comment declaration # tables = [ "filter" ] - - ## Kinds of objects to monitor: "counters" (named counters), "sets", - ## (named sets), "anonymous-counters" (on commented rules). - # include = ["anonymous-counters"] ``` Since telegraf will fork a process to run nftables, `AmbientCapabilities` is @@ -59,32 +58,11 @@ required to transmit the capabilities bounding set to the forked process. You may edit your sudo configuration with the following: ```sudo -telegraf ALL=(root) NOPASSWD: /usr/bin/nft --json list table * +telegraf ALL=(root) NOPASSWD: /usr/bin/nft * ``` ## Metrics -Counters (when `counters` included): - -* nftables - * tags: - * table - * counter - * fields: - * pkts (integer, count) - * bytes (integer, bytes) - -Sets (when `sets` included): - -* nftables - * tags: - * table - * set - * field: - * count (integer, count) - -Anonymous counters on commented rules (when `anonymous-counters` included): - * nftables * tags: * table @@ -97,8 +75,6 @@ Anonymous counters on commented rules (when `anonymous-counters` included): ## Example Output ```text -> nftables,host=my_hostname,counter=my_counter,table=filter bytes=48968i,pkts=48i 1757367516000000000 -> nftables,host=my_hostname,set=my_set,table=filter count=10i 1757367516000000000 > nftables,chain=incoming,host=my_hostname,rule=comment_val_1,table=filter bytes=66435845i,pkts=133882i 1757367516000000000 -> nftables,chain=outgoing,host=my_hostname,rule=comment_val_2,table=filter bytes=25596512i,pkts=145129i 1757367516000000000 +> nftables,chain=outgoing,host=my_hostname,rule=comment_val2,table=filter bytes=25596512i,pkts=145129i 1757367516000000000 ``` diff --git a/content/telegraf/v1/input-plugins/nginx/_index.md b/content/telegraf/v1/input-plugins/nginx/_index.md index a856f1f410..8f11618bc7 100644 --- a/content/telegraf/v1/input-plugins/nginx/_index.md +++ b/content/telegraf/v1/input-plugins/nginx/_index.md @@ -10,7 +10,7 @@ introduced: "v0.1.5" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/nginx/README.md, Nginx Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/nginx/README.md, Nginx Plugin Source --- # Nginx Input Plugin diff --git a/content/telegraf/v1/input-plugins/nginx_plus/_index.md b/content/telegraf/v1/input-plugins/nginx_plus/_index.md index 19039c2ad0..1c5cd2ca45 100644 --- a/content/telegraf/v1/input-plugins/nginx_plus/_index.md +++ b/content/telegraf/v1/input-plugins/nginx_plus/_index.md @@ -10,7 +10,7 @@ introduced: "v1.5.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/nginx_plus/README.md, Nginx Plus Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/nginx_plus/README.md, Nginx Plus Plugin Source --- # Nginx Plus Input Plugin diff --git a/content/telegraf/v1/input-plugins/nginx_plus_api/_index.md b/content/telegraf/v1/input-plugins/nginx_plus_api/_index.md index 219e7dad27..01e79f6264 100644 --- a/content/telegraf/v1/input-plugins/nginx_plus_api/_index.md +++ b/content/telegraf/v1/input-plugins/nginx_plus_api/_index.md @@ -10,7 +10,7 @@ introduced: "v1.9.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/nginx_plus_api/README.md, Nginx Plus API Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/nginx_plus_api/README.md, Nginx Plus API Plugin Source --- # Nginx Plus API Input Plugin diff --git a/content/telegraf/v1/input-plugins/nginx_sts/_index.md b/content/telegraf/v1/input-plugins/nginx_sts/_index.md index 8cedbcc79a..76ea47a8ff 100644 --- a/content/telegraf/v1/input-plugins/nginx_sts/_index.md +++ b/content/telegraf/v1/input-plugins/nginx_sts/_index.md @@ -10,7 +10,7 @@ introduced: "v1.15.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/nginx_sts/README.md, Nginx Stream Server Traffic Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/nginx_sts/README.md, Nginx Stream Server Traffic Plugin Source --- # Nginx Stream Server Traffic Input Plugin diff --git a/content/telegraf/v1/input-plugins/nginx_upstream_check/_index.md b/content/telegraf/v1/input-plugins/nginx_upstream_check/_index.md index a03f93b093..3d4d56fed2 100644 --- a/content/telegraf/v1/input-plugins/nginx_upstream_check/_index.md +++ b/content/telegraf/v1/input-plugins/nginx_upstream_check/_index.md @@ -10,7 +10,7 @@ introduced: "v1.10.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/nginx_upstream_check/README.md, Nginx Upstream Check Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/nginx_upstream_check/README.md, Nginx Upstream Check Plugin Source --- # Nginx Upstream Check Input Plugin diff --git a/content/telegraf/v1/input-plugins/nginx_vts/_index.md b/content/telegraf/v1/input-plugins/nginx_vts/_index.md index 4542d2f7e7..4836ad4e97 100644 --- a/content/telegraf/v1/input-plugins/nginx_vts/_index.md +++ b/content/telegraf/v1/input-plugins/nginx_vts/_index.md @@ -10,7 +10,7 @@ introduced: "v1.9.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/nginx_vts/README.md, Nginx Virtual Host Traffic Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/nginx_vts/README.md, Nginx Virtual Host Traffic Plugin Source --- # Nginx Virtual Host Traffic Input Plugin diff --git a/content/telegraf/v1/input-plugins/nomad/_index.md b/content/telegraf/v1/input-plugins/nomad/_index.md index c657b90a7b..09b15b6bd9 100644 --- a/content/telegraf/v1/input-plugins/nomad/_index.md +++ b/content/telegraf/v1/input-plugins/nomad/_index.md @@ -10,7 +10,7 @@ introduced: "v1.22.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/nomad/README.md, Hashicorp Nomad Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/nomad/README.md, Hashicorp Nomad Plugin Source --- # Hashicorp Nomad Input Plugin diff --git a/content/telegraf/v1/input-plugins/nsd/_index.md b/content/telegraf/v1/input-plugins/nsd/_index.md index fb09170566..77ae15cce4 100644 --- a/content/telegraf/v1/input-plugins/nsd/_index.md +++ b/content/telegraf/v1/input-plugins/nsd/_index.md @@ -10,7 +10,7 @@ introduced: "v1.0.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/nsd/README.md, NLnet Labs Name Server Daemon Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/nsd/README.md, NLnet Labs Name Server Daemon Plugin Source --- # NLnet Labs Name Server Daemon Input Plugin diff --git a/content/telegraf/v1/input-plugins/nsdp/_index.md b/content/telegraf/v1/input-plugins/nsdp/_index.md index c2de1c995c..25ce409d41 100644 --- a/content/telegraf/v1/input-plugins/nsdp/_index.md +++ b/content/telegraf/v1/input-plugins/nsdp/_index.md @@ -10,7 +10,7 @@ introduced: "v1.34.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/nsdp/README.md, Netgear Switch Discovery Protocol Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/nsdp/README.md, Netgear Switch Discovery Protocol Plugin Source --- # Netgear Switch Discovery Protocol Input Plugin diff --git a/content/telegraf/v1/input-plugins/nsq/_index.md b/content/telegraf/v1/input-plugins/nsq/_index.md index 549983ba03..7c0213c166 100644 --- a/content/telegraf/v1/input-plugins/nsq/_index.md +++ b/content/telegraf/v1/input-plugins/nsq/_index.md @@ -10,7 +10,7 @@ introduced: "v1.16.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/nsq/README.md, NSQ Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/nsq/README.md, NSQ Plugin Source --- # NSQ Input Plugin diff --git a/content/telegraf/v1/input-plugins/nsq_consumer/_index.md b/content/telegraf/v1/input-plugins/nsq_consumer/_index.md index a87436ea2b..6edb934d1d 100644 --- a/content/telegraf/v1/input-plugins/nsq_consumer/_index.md +++ b/content/telegraf/v1/input-plugins/nsq_consumer/_index.md @@ -10,7 +10,7 @@ introduced: "v0.10.1" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/nsq_consumer/README.md, NSQ Consumer Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/nsq_consumer/README.md, NSQ Consumer Plugin Source --- # NSQ Consumer Input Plugin diff --git a/content/telegraf/v1/input-plugins/nstat/_index.md b/content/telegraf/v1/input-plugins/nstat/_index.md index 3e74a8a81f..3d026d2046 100644 --- a/content/telegraf/v1/input-plugins/nstat/_index.md +++ b/content/telegraf/v1/input-plugins/nstat/_index.md @@ -10,7 +10,7 @@ introduced: "v0.13.1" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/nstat/README.md, Kernel Network Statistics Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/nstat/README.md, Kernel Network Statistics Plugin Source --- # Kernel Network Statistics Input Plugin diff --git a/content/telegraf/v1/input-plugins/ntpq/_index.md b/content/telegraf/v1/input-plugins/ntpq/_index.md index ac1115ce83..a530c1ced7 100644 --- a/content/telegraf/v1/input-plugins/ntpq/_index.md +++ b/content/telegraf/v1/input-plugins/ntpq/_index.md @@ -10,7 +10,7 @@ introduced: "v0.11.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/ntpq/README.md, Network Time Protocol Query Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/ntpq/README.md, Network Time Protocol Query Plugin Source --- # Network Time Protocol Query Input Plugin diff --git a/content/telegraf/v1/input-plugins/nvidia_smi/_index.md b/content/telegraf/v1/input-plugins/nvidia_smi/_index.md index d35bdbbfe4..b7eccd9dfd 100644 --- a/content/telegraf/v1/input-plugins/nvidia_smi/_index.md +++ b/content/telegraf/v1/input-plugins/nvidia_smi/_index.md @@ -10,7 +10,7 @@ introduced: "v1.7.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/nvidia_smi/README.md, Nvidia System Management Interface (SMI) Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/nvidia_smi/README.md, Nvidia System Management Interface (SMI) Plugin Source --- # Nvidia System Management Interface (SMI) Input Plugin diff --git a/content/telegraf/v1/input-plugins/opcua/_index.md b/content/telegraf/v1/input-plugins/opcua/_index.md index 6100b0eaf4..feb631d027 100644 --- a/content/telegraf/v1/input-plugins/opcua/_index.md +++ b/content/telegraf/v1/input-plugins/opcua/_index.md @@ -10,7 +10,7 @@ introduced: "v1.16.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/opcua/README.md, OPC UA Client Reader Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/opcua/README.md, OPC UA Client Reader Plugin Source --- # OPC UA Client Reader Input Plugin @@ -115,34 +115,21 @@ to use them. ## Node ID configuration ## name - field name to use in the output - ## id - OPC UA node ID string (e.g., "ns=0;i=2262" or "nsu=http://...;s=Name") ## namespace - OPC UA namespace of the node (integer value 0 thru 3) ## namespace_uri - OPC UA namespace URI (alternative to namespace for stable references) ## identifier_type - OPC UA ID type (s=string, i=numeric, g=guid, b=opaque) ## identifier - OPC UA ID (tag as shown in opcua browser) ## default_tags - extra tags to be added to the output metric (optional) ## - ## Use EITHER 'id' OR the combination of 'namespace/namespace_uri' + 'identifier_type' + 'identifier' + ## Note: Specify either 'namespace' or 'namespace_uri', not both. ## Use either the inline notation or the bracketed notation, not both. - ## Inline notation using id string (recommended for simplicity) - # nodes = [ - # {name="ProductUri", id="ns=0;i=2262"}, - # {name="ServerState", id="ns=0;i=2259"}, - # ] - - ## Inline notation using individual fields (default_tags not supported yet) + ## Inline notation (default_tags not supported yet) # nodes = [ # {name="", namespace="", identifier_type="", identifier=""}, # ] - ## Bracketed notation using id string - # [[inputs.opcua.nodes]] - # name = "ProductUri" - # id = "ns=0;i=2262" - # default_tags = { tag1 = "value1", tag2 = "value2" } - - ## Bracketed notation using individual fields + ## Bracketed notation # [[inputs.opcua.nodes]] # name = "node1" # namespace = "" @@ -277,29 +264,13 @@ An OPC UA node ID may resemble: "ns=3;s=Temperature". In this example: `identifier` value is 'Temperature' - This example temperature node has a value of 79.0 -### Using `id` String (Recommended) - -You can specify nodes using the standard OPC UA node ID string format directly: +To gather data from this node enter the following line into the 'nodes' +property above: ```text -{name="temp", id="ns=3;s=Temperature"}, +{field_name="temp", namespace="3", identifier_type="s", identifier="Temperature"}, ``` -This is simpler and matches the format shown in OPC UA browsers. - -### Using Individual Fields - -Alternatively, you can specify each component separately: - -```text -{name="temp", namespace="3", identifier_type="s", identifier="Temperature"}, -``` - -> [!NOTE] -> Use either `id` OR the combination of -> `namespace`/`namespace_uri` + `identifier_type` + `identifier`. -> Do not mix both formats for the same node. - This node configuration produces a metric like this: ```text @@ -330,8 +301,7 @@ OPC UA supports two ways to specify namespaces: 2. **Namespace URI** (`namespace_uri`): A string URI that uniquely identifies the namespace. This is more stable across server restarts but requires the - plugin to fetch the namespace array from the server to resolve the URI to an - index. + plugin to fetch the namespace array from the server to resolve the URI to an index. **When to use namespace index:** diff --git a/content/telegraf/v1/input-plugins/opcua_listener/_index.md b/content/telegraf/v1/input-plugins/opcua_listener/_index.md index 043f03a557..184cd84591 100644 --- a/content/telegraf/v1/input-plugins/opcua_listener/_index.md +++ b/content/telegraf/v1/input-plugins/opcua_listener/_index.md @@ -10,7 +10,7 @@ introduced: "v1.25.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/opcua_listener/README.md, OPC UA Client Listener Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/opcua_listener/README.md, OPC UA Client Listener Plugin Source --- # OPC UA Client Listener Input Plugin @@ -137,7 +137,6 @@ to use them. # ## Node ID configuration ## name - field name to use in the output - ## id - OPC UA node ID string (e.g., "ns=0;i=2262" or "nsu=http://...;s=Name") ## namespace - OPC UA namespace of the node (integer value 0 thru 3) ## namespace_uri - OPC UA namespace URI (alternative to namespace for stable references) ## identifier_type - OPC UA ID type (s=string, i=numeric, g=guid, b=opaque) @@ -145,7 +144,7 @@ to use them. ## default_tags - extra tags to be added to the output metric (optional) ## monitoring_params - additional settings for the monitored node (optional) ## - ## Use EITHER 'id' OR the combination of 'namespace/namespace_uri' + 'identifier_type' + 'identifier' + ## Note: Specify either 'namespace' or 'namespace_uri', not both. ## ## Monitoring parameters ## sampling_interval - interval at which the server should check for data @@ -179,25 +178,13 @@ to use them. ## ## Use either the inline notation or the bracketed notation, not both. # - ## Inline notation using id string (recommended for simplicity) - # nodes = [ - # {name="ProductUri", id="ns=0;i=2262"}, - # {name="ServerState", id="ns=0;i=2259"} - # ] - # - ## Inline notation using individual fields (default_tags and monitoring_params not supported yet) + ## Inline notation (default_tags and monitoring_params not supported yet) # nodes = [ # {name="node1", namespace="", identifier_type="", identifier=""}, # {name="node2", namespace="", identifier_type="", identifier=""} # ] # - ## Bracketed notation using id string - # [[inputs.opcua_listener.nodes]] - # name = "ProductUri" - # id = "ns=0;i=2262" - # default_tags = { tag1 = "value1", tag2 = "value2" } - - ## Bracketed notation using individual fields + ## Bracketed notation # [[inputs.opcua_listener.nodes]] # name = "node1" # namespace = "" @@ -298,13 +285,12 @@ to use them. # ## Multiple event groups are allowed. - ## Event nodes support both 'id' string format and individual fields. # [[inputs.opcua_listener.events]] # ## Polling interval for data collection # # sampling_interval = "10s" # ## Size of the notification queue # # queue_size = 10 - # ## Node parameter defaults for node definitions below (used when id is not specified) + # ## Node parameter defaults for node definitions below # # namespace = "" # # identifier_type = "" # ## Specifies OPCUA Event sources to filter on @@ -313,22 +299,17 @@ to use them. # fields = ["Severity", "Message", "Time"] # # ## Type or level of events to capture from the monitored nodes. - # ## Use 'id' string OR individual fields (namespace/identifier_type/identifier) # [inputs.opcua_listener.events.event_type_node] - # id = "ns=0;i=2041" - # # Or use individual fields: - # # namespace = "" - # # identifier_type = "" - # # identifier = "" + # namespace = "" + # identifier_type = "" + # identifier = "" # # ## Nodes to monitor for event notifications associated with the defined - # ## event type. Use 'id' string OR individual fields. + # ## event type # [[inputs.opcua_listener.events.node_ids]] - # id = "ns=2;s=EventSource1" - # # Or use individual fields: - # # namespace = "" - # # identifier_type = "" - # # identifier = "" + # namespace = "" + # identifier_type = "" + # identifier = "" ## Enable workarounds required by some devices to work correctly # [inputs.opcua_listener.workarounds] @@ -347,29 +328,13 @@ An OPC UA node ID may resemble: "ns=3;s=Temperature". In this example: `identifier` value is 'Temperature' - This example temperature node has a value of 79.0 -#### Using `id` String (Recommended) - -You can specify nodes using the standard OPC UA node ID string format directly: - -```text -{name="temp", id="ns=3;s=Temperature"}, -``` - -This is simpler and matches the format shown in OPC UA browsers. - -#### Using Individual Fields - -Alternatively, you can specify each component separately: +To gather data from this node enter the following line into the 'nodes' +property above: ```text {name="temp", namespace="3", identifier_type="s", identifier="Temperature"}, ``` -> [!NOTE] -> Use either `id` OR the combination of -> `namespace`/`namespace_uri` + `identifier_type` + `identifier`. -> Do not mix both formats for the same node. - This node configuration produces a metric like this: ```text @@ -400,8 +365,7 @@ OPC UA supports two ways to specify namespaces: 2. **Namespace URI** (`namespace_uri`): A string URI that uniquely identifies the namespace. This is more stable across server restarts but requires the - plugin to fetch the namespace array from the server to resolve the URI to an - index. + plugin to fetch the namespace array from the server to resolve the URI to an index. **When to use namespace index:** diff --git a/content/telegraf/v1/input-plugins/openldap/_index.md b/content/telegraf/v1/input-plugins/openldap/_index.md index 1dc9a64b4b..8c9250aa72 100644 --- a/content/telegraf/v1/input-plugins/openldap/_index.md +++ b/content/telegraf/v1/input-plugins/openldap/_index.md @@ -10,7 +10,7 @@ introduced: "v1.4.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/openldap/README.md, OpenLDAP Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/openldap/README.md, OpenLDAP Plugin Source --- # OpenLDAP Input Plugin diff --git a/content/telegraf/v1/input-plugins/openntpd/_index.md b/content/telegraf/v1/input-plugins/openntpd/_index.md index 180ca5a469..ac3c750555 100644 --- a/content/telegraf/v1/input-plugins/openntpd/_index.md +++ b/content/telegraf/v1/input-plugins/openntpd/_index.md @@ -10,7 +10,7 @@ introduced: "v1.12.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/openntpd/README.md, OpenNTPD Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/openntpd/README.md, OpenNTPD Plugin Source --- # OpenNTPD Input Plugin diff --git a/content/telegraf/v1/input-plugins/opensearch_query/_index.md b/content/telegraf/v1/input-plugins/opensearch_query/_index.md index 19c6d994c8..52f21aee04 100644 --- a/content/telegraf/v1/input-plugins/opensearch_query/_index.md +++ b/content/telegraf/v1/input-plugins/opensearch_query/_index.md @@ -10,7 +10,7 @@ introduced: "v1.26.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/opensearch_query/README.md, OpenSearch Query Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/opensearch_query/README.md, OpenSearch Query Plugin Source --- # OpenSearch Query Input Plugin diff --git a/content/telegraf/v1/input-plugins/opensmtpd/_index.md b/content/telegraf/v1/input-plugins/opensmtpd/_index.md index 9548403962..aaae6b5207 100644 --- a/content/telegraf/v1/input-plugins/opensmtpd/_index.md +++ b/content/telegraf/v1/input-plugins/opensmtpd/_index.md @@ -10,7 +10,7 @@ introduced: "v1.5.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/opensmtpd/README.md, OpenSMTPD Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/opensmtpd/README.md, OpenSMTPD Plugin Source --- # OpenSMTPD Input Plugin diff --git a/content/telegraf/v1/input-plugins/openstack/_index.md b/content/telegraf/v1/input-plugins/openstack/_index.md index b98b8f1521..32bc6d5fcb 100644 --- a/content/telegraf/v1/input-plugins/openstack/_index.md +++ b/content/telegraf/v1/input-plugins/openstack/_index.md @@ -10,7 +10,7 @@ introduced: "v1.21.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/openstack/README.md, OpenStack Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/openstack/README.md, OpenStack Plugin Source --- # OpenStack Input Plugin diff --git a/content/telegraf/v1/input-plugins/opentelemetry/_index.md b/content/telegraf/v1/input-plugins/opentelemetry/_index.md index 4b2cafc7f5..b6e8f1031f 100644 --- a/content/telegraf/v1/input-plugins/opentelemetry/_index.md +++ b/content/telegraf/v1/input-plugins/opentelemetry/_index.md @@ -10,7 +10,7 @@ introduced: "v1.19.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/opentelemetry/README.md, OpenTelemetry Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/opentelemetry/README.md, OpenTelemetry Plugin Source --- # OpenTelemetry Input Plugin diff --git a/content/telegraf/v1/input-plugins/openweathermap/_index.md b/content/telegraf/v1/input-plugins/openweathermap/_index.md index 88e7eaf729..7d46b6649b 100644 --- a/content/telegraf/v1/input-plugins/openweathermap/_index.md +++ b/content/telegraf/v1/input-plugins/openweathermap/_index.md @@ -10,7 +10,7 @@ introduced: "v1.11.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/openweathermap/README.md, OpenWeatherMap Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/openweathermap/README.md, OpenWeatherMap Plugin Source --- # OpenWeatherMap Input Plugin diff --git a/content/telegraf/v1/input-plugins/p4runtime/_index.md b/content/telegraf/v1/input-plugins/p4runtime/_index.md index f0b79c03f2..7069ad147c 100644 --- a/content/telegraf/v1/input-plugins/p4runtime/_index.md +++ b/content/telegraf/v1/input-plugins/p4runtime/_index.md @@ -10,7 +10,7 @@ introduced: "v1.26.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/p4runtime/README.md, P4 Runtime Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/p4runtime/README.md, P4 Runtime Plugin Source --- # P4 Runtime Input Plugin diff --git a/content/telegraf/v1/input-plugins/passenger/_index.md b/content/telegraf/v1/input-plugins/passenger/_index.md index eec5673444..91113aa6ca 100644 --- a/content/telegraf/v1/input-plugins/passenger/_index.md +++ b/content/telegraf/v1/input-plugins/passenger/_index.md @@ -10,7 +10,7 @@ introduced: "v0.10.1" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/passenger/README.md, Passenger Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/passenger/README.md, Passenger Plugin Source --- # Passenger Input Plugin diff --git a/content/telegraf/v1/input-plugins/pf/_index.md b/content/telegraf/v1/input-plugins/pf/_index.md index 1c49189946..36ef96c819 100644 --- a/content/telegraf/v1/input-plugins/pf/_index.md +++ b/content/telegraf/v1/input-plugins/pf/_index.md @@ -10,7 +10,7 @@ introduced: "v1.5.0" os_support: "freebsd" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/pf/README.md, PF Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/pf/README.md, PF Plugin Source --- # PF Input Plugin diff --git a/content/telegraf/v1/input-plugins/pgbouncer/_index.md b/content/telegraf/v1/input-plugins/pgbouncer/_index.md index 802117955e..ae370ba515 100644 --- a/content/telegraf/v1/input-plugins/pgbouncer/_index.md +++ b/content/telegraf/v1/input-plugins/pgbouncer/_index.md @@ -10,7 +10,7 @@ introduced: "v1.8.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/pgbouncer/README.md, PgBouncer Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/pgbouncer/README.md, PgBouncer Plugin Source --- # PgBouncer Input Plugin diff --git a/content/telegraf/v1/input-plugins/phpfpm/_index.md b/content/telegraf/v1/input-plugins/phpfpm/_index.md index 20673fe7c2..627d72de85 100644 --- a/content/telegraf/v1/input-plugins/phpfpm/_index.md +++ b/content/telegraf/v1/input-plugins/phpfpm/_index.md @@ -10,7 +10,7 @@ introduced: "v0.1.10" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/phpfpm/README.md, PHP-FPM Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/phpfpm/README.md, PHP-FPM Plugin Source --- # PHP-FPM Input Plugin diff --git a/content/telegraf/v1/input-plugins/ping/_index.md b/content/telegraf/v1/input-plugins/ping/_index.md index 040760f29c..cd53f7175f 100644 --- a/content/telegraf/v1/input-plugins/ping/_index.md +++ b/content/telegraf/v1/input-plugins/ping/_index.md @@ -10,7 +10,7 @@ introduced: "v0.1.8" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/ping/README.md, Ping Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/ping/README.md, Ping Plugin Source --- # Ping Input Plugin diff --git a/content/telegraf/v1/input-plugins/postfix/_index.md b/content/telegraf/v1/input-plugins/postfix/_index.md index 9b62f77021..ad19afa873 100644 --- a/content/telegraf/v1/input-plugins/postfix/_index.md +++ b/content/telegraf/v1/input-plugins/postfix/_index.md @@ -10,7 +10,7 @@ introduced: "v1.5.0" os_support: "freebsd, linux, macos, solaris" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/postfix/README.md, Postfix Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/postfix/README.md, Postfix Plugin Source --- # Postfix Input Plugin diff --git a/content/telegraf/v1/input-plugins/postgresql/_index.md b/content/telegraf/v1/input-plugins/postgresql/_index.md index 13647079a0..ae1746d5c0 100644 --- a/content/telegraf/v1/input-plugins/postgresql/_index.md +++ b/content/telegraf/v1/input-plugins/postgresql/_index.md @@ -10,7 +10,7 @@ introduced: "v0.10.3" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/postgresql/README.md, PostgreSQL Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/postgresql/README.md, PostgreSQL Plugin Source --- # PostgreSQL Input Plugin diff --git a/content/telegraf/v1/input-plugins/postgresql_extensible/_index.md b/content/telegraf/v1/input-plugins/postgresql_extensible/_index.md index 8d0975b8ad..647cfea008 100644 --- a/content/telegraf/v1/input-plugins/postgresql_extensible/_index.md +++ b/content/telegraf/v1/input-plugins/postgresql_extensible/_index.md @@ -10,7 +10,7 @@ introduced: "v0.12.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/postgresql_extensible/README.md, PostgreSQL Extensible Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/postgresql_extensible/README.md, PostgreSQL Extensible Plugin Source --- # PostgreSQL Extensible Input Plugin diff --git a/content/telegraf/v1/input-plugins/powerdns/_index.md b/content/telegraf/v1/input-plugins/powerdns/_index.md index 2f6e7357a5..a749f1f63f 100644 --- a/content/telegraf/v1/input-plugins/powerdns/_index.md +++ b/content/telegraf/v1/input-plugins/powerdns/_index.md @@ -10,7 +10,7 @@ introduced: "v0.10.2" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/powerdns/README.md, PowerDNS Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/powerdns/README.md, PowerDNS Plugin Source --- # PowerDNS Input Plugin diff --git a/content/telegraf/v1/input-plugins/powerdns_recursor/_index.md b/content/telegraf/v1/input-plugins/powerdns_recursor/_index.md index 1f40df3aa3..f1236b748c 100644 --- a/content/telegraf/v1/input-plugins/powerdns_recursor/_index.md +++ b/content/telegraf/v1/input-plugins/powerdns_recursor/_index.md @@ -10,7 +10,7 @@ introduced: "v1.11.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/powerdns_recursor/README.md, PowerDNS Recursor Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/powerdns_recursor/README.md, PowerDNS Recursor Plugin Source --- # PowerDNS Recursor Input Plugin diff --git a/content/telegraf/v1/input-plugins/processes/_index.md b/content/telegraf/v1/input-plugins/processes/_index.md index 4cfd56d601..da0f0b6003 100644 --- a/content/telegraf/v1/input-plugins/processes/_index.md +++ b/content/telegraf/v1/input-plugins/processes/_index.md @@ -10,7 +10,7 @@ introduced: "v0.11.0" os_support: "freebsd, linux, macos" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/processes/README.md, Processes Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/processes/README.md, Processes Plugin Source --- # Processes Input Plugin diff --git a/content/telegraf/v1/input-plugins/procstat/_index.md b/content/telegraf/v1/input-plugins/procstat/_index.md index 6c053924ab..92abefd334 100644 --- a/content/telegraf/v1/input-plugins/procstat/_index.md +++ b/content/telegraf/v1/input-plugins/procstat/_index.md @@ -10,7 +10,7 @@ introduced: "v0.2.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/procstat/README.md, Procstat Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/procstat/README.md, Procstat Plugin Source --- # Procstat Input Plugin diff --git a/content/telegraf/v1/input-plugins/prometheus/_index.md b/content/telegraf/v1/input-plugins/prometheus/_index.md index b50c5041e6..f7fd966e59 100644 --- a/content/telegraf/v1/input-plugins/prometheus/_index.md +++ b/content/telegraf/v1/input-plugins/prometheus/_index.md @@ -10,7 +10,7 @@ introduced: "v0.1.5" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/prometheus/README.md, Prometheus Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/prometheus/README.md, Prometheus Plugin Source --- # Prometheus Input Plugin diff --git a/content/telegraf/v1/input-plugins/promql/_index.md b/content/telegraf/v1/input-plugins/promql/_index.md index b1170b421c..73a9b52dca 100644 --- a/content/telegraf/v1/input-plugins/promql/_index.md +++ b/content/telegraf/v1/input-plugins/promql/_index.md @@ -10,7 +10,7 @@ introduced: "v1.37.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/promql/README.md, PromQL Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/promql/README.md, PromQL Plugin Source --- # PromQL Input Plugin @@ -66,9 +66,6 @@ more details on how to use them. # max_idle_conn_per_host = 0 # response_timeout = "0s" - ## Use the local address for connecting, assigned by the OS by default - # local_address = "" - ## Optional proxy settings # use_system_proxy = false # http_proxy_url = "" diff --git a/content/telegraf/v1/input-plugins/proxmox/_index.md b/content/telegraf/v1/input-plugins/proxmox/_index.md index c8c0e2276a..b299a9458e 100644 --- a/content/telegraf/v1/input-plugins/proxmox/_index.md +++ b/content/telegraf/v1/input-plugins/proxmox/_index.md @@ -10,7 +10,7 @@ introduced: "v1.16.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/proxmox/README.md, Proxmox Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/proxmox/README.md, Proxmox Plugin Source --- # Proxmox Input Plugin diff --git a/content/telegraf/v1/input-plugins/puppetagent/_index.md b/content/telegraf/v1/input-plugins/puppetagent/_index.md index 89f732da61..b8e55563ef 100644 --- a/content/telegraf/v1/input-plugins/puppetagent/_index.md +++ b/content/telegraf/v1/input-plugins/puppetagent/_index.md @@ -10,7 +10,7 @@ introduced: "v0.2.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/puppetagent/README.md, Puppet Agent Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/puppetagent/README.md, Puppet Agent Plugin Source --- # Puppet Agent Input Plugin diff --git a/content/telegraf/v1/input-plugins/rabbitmq/_index.md b/content/telegraf/v1/input-plugins/rabbitmq/_index.md index f6ed7c171e..fe5adcd70d 100644 --- a/content/telegraf/v1/input-plugins/rabbitmq/_index.md +++ b/content/telegraf/v1/input-plugins/rabbitmq/_index.md @@ -10,7 +10,7 @@ introduced: "v0.1.5" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/rabbitmq/README.md, RabbitMQ Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/rabbitmq/README.md, RabbitMQ Plugin Source --- # RabbitMQ Input Plugin diff --git a/content/telegraf/v1/input-plugins/radius/_index.md b/content/telegraf/v1/input-plugins/radius/_index.md index 1850d08aef..2209af5a83 100644 --- a/content/telegraf/v1/input-plugins/radius/_index.md +++ b/content/telegraf/v1/input-plugins/radius/_index.md @@ -10,7 +10,7 @@ introduced: "v1.26.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/radius/README.md, Radius Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/radius/README.md, Radius Plugin Source --- # Radius Input Plugin diff --git a/content/telegraf/v1/input-plugins/raindrops/_index.md b/content/telegraf/v1/input-plugins/raindrops/_index.md index a31e3bf26c..9afb8c9dcd 100644 --- a/content/telegraf/v1/input-plugins/raindrops/_index.md +++ b/content/telegraf/v1/input-plugins/raindrops/_index.md @@ -10,7 +10,7 @@ introduced: "v0.10.3" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/raindrops/README.md, Raindrops Middleware Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/raindrops/README.md, Raindrops Middleware Plugin Source --- # Raindrops Middleware Input Plugin diff --git a/content/telegraf/v1/input-plugins/ras/_index.md b/content/telegraf/v1/input-plugins/ras/_index.md index a13cdfaf5a..abfc63847d 100644 --- a/content/telegraf/v1/input-plugins/ras/_index.md +++ b/content/telegraf/v1/input-plugins/ras/_index.md @@ -10,7 +10,7 @@ introduced: "v1.16.0" os_support: "linux" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/ras/README.md, RAS Daemon Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/ras/README.md, RAS Daemon Plugin Source --- # RAS Daemon Input Plugin diff --git a/content/telegraf/v1/input-plugins/ravendb/_index.md b/content/telegraf/v1/input-plugins/ravendb/_index.md index a2ec18ced0..6d1e98f1da 100644 --- a/content/telegraf/v1/input-plugins/ravendb/_index.md +++ b/content/telegraf/v1/input-plugins/ravendb/_index.md @@ -10,7 +10,7 @@ introduced: "v1.18.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/ravendb/README.md, RavenDB Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/ravendb/README.md, RavenDB Plugin Source --- # RavenDB Input Plugin diff --git a/content/telegraf/v1/input-plugins/redfish/_index.md b/content/telegraf/v1/input-plugins/redfish/_index.md index 069fbacd8f..31685bb8fd 100644 --- a/content/telegraf/v1/input-plugins/redfish/_index.md +++ b/content/telegraf/v1/input-plugins/redfish/_index.md @@ -10,7 +10,7 @@ introduced: "v1.15.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/redfish/README.md, Redfish Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/redfish/README.md, Redfish Plugin Source --- # Redfish Input Plugin diff --git a/content/telegraf/v1/input-plugins/redis/_index.md b/content/telegraf/v1/input-plugins/redis/_index.md index 9472c4ae38..fc322e18b1 100644 --- a/content/telegraf/v1/input-plugins/redis/_index.md +++ b/content/telegraf/v1/input-plugins/redis/_index.md @@ -10,7 +10,7 @@ introduced: "v0.1.1" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/redis/README.md, Redis Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/redis/README.md, Redis Plugin Source --- # Redis Input Plugin diff --git a/content/telegraf/v1/input-plugins/redis_sentinel/_index.md b/content/telegraf/v1/input-plugins/redis_sentinel/_index.md index ef77c13a0c..5d36437b6b 100644 --- a/content/telegraf/v1/input-plugins/redis_sentinel/_index.md +++ b/content/telegraf/v1/input-plugins/redis_sentinel/_index.md @@ -10,7 +10,7 @@ introduced: "v1.22.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/redis_sentinel/README.md, Redis Sentinel Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/redis_sentinel/README.md, Redis Sentinel Plugin Source --- # Redis Sentinel Input Plugin diff --git a/content/telegraf/v1/input-plugins/rethinkdb/_index.md b/content/telegraf/v1/input-plugins/rethinkdb/_index.md index 3d9881c4d5..d7cf4bb9fd 100644 --- a/content/telegraf/v1/input-plugins/rethinkdb/_index.md +++ b/content/telegraf/v1/input-plugins/rethinkdb/_index.md @@ -10,7 +10,7 @@ introduced: "v0.1.3" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/rethinkdb/README.md, RethinkDB Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/rethinkdb/README.md, RethinkDB Plugin Source --- # RethinkDB Input Plugin diff --git a/content/telegraf/v1/input-plugins/riak/_index.md b/content/telegraf/v1/input-plugins/riak/_index.md index 0531a8c986..0de9f52854 100644 --- a/content/telegraf/v1/input-plugins/riak/_index.md +++ b/content/telegraf/v1/input-plugins/riak/_index.md @@ -10,7 +10,7 @@ introduced: "v0.10.4" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/riak/README.md, Riak Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/riak/README.md, Riak Plugin Source --- # Riak Input Plugin diff --git a/content/telegraf/v1/input-plugins/riemann_listener/_index.md b/content/telegraf/v1/input-plugins/riemann_listener/_index.md index 62929360ea..806bb1dc97 100644 --- a/content/telegraf/v1/input-plugins/riemann_listener/_index.md +++ b/content/telegraf/v1/input-plugins/riemann_listener/_index.md @@ -10,7 +10,7 @@ introduced: "v1.17.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/riemann_listener/README.md, Riemann Listener Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/riemann_listener/README.md, Riemann Listener Plugin Source --- # Riemann Listener Input Plugin diff --git a/content/telegraf/v1/input-plugins/s7comm/_index.md b/content/telegraf/v1/input-plugins/s7comm/_index.md index 11776766af..1e3daa8e43 100644 --- a/content/telegraf/v1/input-plugins/s7comm/_index.md +++ b/content/telegraf/v1/input-plugins/s7comm/_index.md @@ -10,7 +10,7 @@ introduced: "v1.28.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/s7comm/README.md, Siemens S7 Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/s7comm/README.md, Siemens S7 Plugin Source --- # Siemens S7 Input Plugin diff --git a/content/telegraf/v1/input-plugins/salesforce/_index.md b/content/telegraf/v1/input-plugins/salesforce/_index.md index 990a581932..2fd239403c 100644 --- a/content/telegraf/v1/input-plugins/salesforce/_index.md +++ b/content/telegraf/v1/input-plugins/salesforce/_index.md @@ -10,7 +10,7 @@ introduced: "v1.4.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/salesforce/README.md, Salesforce Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/salesforce/README.md, Salesforce Plugin Source --- # Salesforce Input Plugin diff --git a/content/telegraf/v1/input-plugins/sensors/_index.md b/content/telegraf/v1/input-plugins/sensors/_index.md index f2da8bc248..41c6aa27ef 100644 --- a/content/telegraf/v1/input-plugins/sensors/_index.md +++ b/content/telegraf/v1/input-plugins/sensors/_index.md @@ -10,7 +10,7 @@ introduced: "v0.10.1" os_support: "linux" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/sensors/README.md, LM Sensors Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/sensors/README.md, LM Sensors Plugin Source --- # LM Sensors Input Plugin diff --git a/content/telegraf/v1/input-plugins/sflow/_index.md b/content/telegraf/v1/input-plugins/sflow/_index.md index 36de6d5de1..867e44d41e 100644 --- a/content/telegraf/v1/input-plugins/sflow/_index.md +++ b/content/telegraf/v1/input-plugins/sflow/_index.md @@ -10,7 +10,7 @@ introduced: "v1.14.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/sflow/README.md, SFlow Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/sflow/README.md, SFlow Plugin Source --- # SFlow Input Plugin diff --git a/content/telegraf/v1/input-plugins/sip/_index.md b/content/telegraf/v1/input-plugins/sip/_index.md deleted file mode 100644 index 9cabab6c7c..0000000000 --- a/content/telegraf/v1/input-plugins/sip/_index.md +++ /dev/null @@ -1,182 +0,0 @@ ---- -description: "Telegraf plugin for collecting metrics from SIP" -menu: - telegraf_v1_ref: - parent: input_plugins_reference - name: SIP - identifier: input-sip -tags: [SIP, "input-plugins", "configuration", "network"] -introduced: "v1.38.0" -os_support: "freebsd, linux, macos, solaris, windows" -related: - - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/sip/README.md, SIP Plugin Source ---- - -# SIP Input Plugin - -This plugin gathers metrics about the health and availability of -[SIP (Session Initiation Protocol)](https://datatracker.ietf.org/doc/html/rfc3261) servers such as PBX systems, SIP -proxies, registrars, and VoIP service providers. It sends SIP requests -(typically OPTIONS) and measures response times and status codes. - -**Introduced in:** Telegraf v1.38.0 -**Tags:** network -**OS support:** all - -[sip]: https://datatracker.ietf.org/doc/html/rfc3261 - -## Global configuration options - -Plugins support additional global and plugin configuration settings for tasks -such as modifying metrics, tags, and fields, creating aliases, and configuring -plugin ordering. See [CONFIGURATION.md](/telegraf/v1/configuration/#plugins) for more details. - -[CONFIGURATION.md]: ../../../docs/CONFIGURATION.md#plugins - -## Secret-store support - -This plugin supports secrets from secret-stores for the `username` and -`password` option. -See the [secret-store documentation](/telegraf/v1/configuration/#secret-store-secrets) for more details on how -to use them. - -[SECRETSTORE]: ../../../docs/CONFIGURATION.md#secret-store-secrets - -## Configuration - -```toml @sample.conf -# SIP (Session Initiation Protocol) health check plugin -[[inputs.sip]] - ## SIP server address to monitor - ## Format: sip://host[:port] or sips://host[:port] - ## sip:// - Standard SIP (default port 5060) - ## sips:// - Secure SIP with TLS (default port 5061) - server = "sip://sip.example.com:5060" - - ## Transport protocol - ## Valid values: udp, tcp, ws, wss - # transport = "udp" - - ## SIP method to use for health checks - ## Valid values: OPTIONS, INVITE, MESSAGE - # method = "OPTIONS" - - ## Request timeout - # timeout = "5s" - - ## From user as it appears in SIP header - # from_user = "telegraf" - - ## From domain (domain part of From header) - ## If not specified, uses the server hostname - # from_domain = "" - - ## To user as it appears in SIP header - ## If not specified, uses the same value as from_user - # to_user = "" - - ## Local address to use for outgoing requests - # local_address = "" - - ## SIP digest authentication credentials - ## Leave empty to use no authentication - # username = "" - # password = "" - - ## Optional TLS Config (only used for sips:// URLs or transport=tls/wss) - ## Set to true/false to enforce TLS being enabled/disabled. If not set, - ## enable TLS only if any of the other options are specified. - # tls_enable = - ## Trusted root certificates for server - # tls_ca = "/path/to/cafile" - ## Used for TLS client certificate authentication - # tls_cert = "/path/to/certfile" - ## Used for TLS client certificate authentication - # tls_key = "/path/to/keyfile" - ## Password for the key file if it is encrypted - # tls_key_pwd = "" - ## Send the specified TLS server name via SNI - # tls_server_name = "kubernetes.example.com" - ## Minimal TLS version to accept by the client - # tls_min_version = "TLS12" - ## List of ciphers to accept, by default all secure ciphers will be accepted - ## See https://pkg.go.dev/crypto/tls#pkg-constants for supported values. - ## Use "all", "secure" and "insecure" to add all support ciphers, secure - ## suites or insecure suites respectively. - # tls_cipher_suites = ["secure"] - ## Renegotiation method, "never", "once" or "freely" - # tls_renegotiation_method = "never" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false -``` - -### SIP Methods - -The plugin supports the following SIP methods: - -- **OPTIONS** (recommended): Standard SIP method for health checks. Queries - server capabilities without establishing a session. -- **INVITE**: Initiates a session. Use with caution as it may create call - records. -- **MESSAGE**: Sends an instant message. Useful for testing messaging - infrastructure. - -## Troubleshooting - -### Permission Issues - -Some SIP implementations may require specific network permissions. If you -encounter permission errors, ensure Telegraf has appropriate network access. - -### Firewall Configuration - -Ensure that: - -- Outbound connections to SIP ports (typically 5060/5061) are allowed -- If using UDP, firewall allows UDP packets -- Return traffic is permitted for the transaction - -### Timeout Issues - -If experiencing frequent timeouts: - -- Increase the `timeout` value -- Verify network connectivity to the SIP server -- Check if the SIP server is configured to respond to the chosen method -- Ensure the correct transport protocol is selected - -### Response Codes - -Different SIP servers may respond with different status codes to OPTIONS requests: - -- `200 OK` - Server is operational and responding -- `404 Not Found` - User or resource doesn't exist (may still indicate healthy server) -- `401 Unauthorized` / `407 Proxy Authentication Required` - Authentication required - -## Metrics - -- sip - - tags: - - source (the SIP server address) - - method (the SIP method used, lowercase: options, invite, message) - - transport (the transport protocol: udp, tcp, ws, wss) - - status_code (the SIP response status code, e.g., "200", "404"; not always present, e.g. on timeout) - - fields: - - response_time_s (float, seconds) - Time taken to receive response - (for timeouts, this equals the configured timeout value) - - result (string) - The outcome of the request: the SIP reason phrase when - a response is received (e.g. "OK", "Not Found", "Unauthorized"), or a - sentinel value when no valid response is received (`Timeout`, `Error`, - `No Response`) - - server_agent (string, optional) - Value of the `Server` header from the - SIP response, identifying the remote server software - -## Example Output - -```text -sip,host=telegraf-host,method=options,source=sip://sip.example.com:5060,status_code=200,transport=udp response_time_s=0.023,result="OK" 1640000000000000000 -sip,host=telegraf-host,method=options,source=sip://unreachable.example.com:5060,transport=udp response_time_s=5.0,result="Timeout" 1640000000000000000 -sip,host=telegraf-host,method=options,source=sip://sip.provider.com:5060,status_code=404,transport=udp response_time_s=0.045,result="Not Found" 1640000000000000000 -sip,host=telegraf-host,method=options,source=sips://secure.voip.example.com:5061,status_code=200,transport=tcp response_time_s=0.067,result="OK",server_agent="Asterisk PBX 18.15.0" 1640000000000000000 -``` diff --git a/content/telegraf/v1/input-plugins/slab/_index.md b/content/telegraf/v1/input-plugins/slab/_index.md index 8e699cb75e..4d4c4fbc41 100644 --- a/content/telegraf/v1/input-plugins/slab/_index.md +++ b/content/telegraf/v1/input-plugins/slab/_index.md @@ -10,7 +10,7 @@ introduced: "v1.23.0" os_support: "linux" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/slab/README.md, Slab Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/slab/README.md, Slab Plugin Source --- # Slab Input Plugin diff --git a/content/telegraf/v1/input-plugins/slurm/_index.md b/content/telegraf/v1/input-plugins/slurm/_index.md index 7b3d6a352d..ea6b981db2 100644 --- a/content/telegraf/v1/input-plugins/slurm/_index.md +++ b/content/telegraf/v1/input-plugins/slurm/_index.md @@ -10,7 +10,7 @@ introduced: "v1.32.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/slurm/README.md, SLURM Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/slurm/README.md, SLURM Plugin Source --- # SLURM Input Plugin diff --git a/content/telegraf/v1/input-plugins/smart/_index.md b/content/telegraf/v1/input-plugins/smart/_index.md index 2eff623735..65f776c90b 100644 --- a/content/telegraf/v1/input-plugins/smart/_index.md +++ b/content/telegraf/v1/input-plugins/smart/_index.md @@ -10,7 +10,7 @@ introduced: "v1.5.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/smart/README.md, S.M.A.R.T. Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/smart/README.md, S.M.A.R.T. Plugin Source --- # S.M.A.R.T. Input Plugin @@ -225,25 +225,14 @@ smartctl --scan -d nvme - serial_no - wwn - fields: - - available_spare (NVMe) - - available_spare_threshold (NVMe) - - critical_temperature_time (NVMe) - - critical_warning (NVMe) - - error_log_entries (NVMe) - exit_status - health_ok - - media_errors (NVMe) - media_wearout_indicator - percent_lifetime_remain - - percentage_used (NVMe) - - power_cycle_count - - power_on_hours - read_error_rate - - seek_error_rate + - seek_error - temp_c - udma_crc_errors - - unsafe_shutdowns (NVMe) - - warning_temperature_time (NVMe) - wear_leveling_count - smart_attribute: diff --git a/content/telegraf/v1/input-plugins/smartctl/_index.md b/content/telegraf/v1/input-plugins/smartctl/_index.md index 36c1a65b5f..36f6a30823 100644 --- a/content/telegraf/v1/input-plugins/smartctl/_index.md +++ b/content/telegraf/v1/input-plugins/smartctl/_index.md @@ -10,7 +10,7 @@ introduced: "v1.31.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/smartctl/README.md, smartctl JSON Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/smartctl/README.md, smartctl JSON Plugin Source --- # smartctl JSON Input Plugin diff --git a/content/telegraf/v1/input-plugins/snmp/_index.md b/content/telegraf/v1/input-plugins/snmp/_index.md index 5e7f7433aa..7c3c6d8dc5 100644 --- a/content/telegraf/v1/input-plugins/snmp/_index.md +++ b/content/telegraf/v1/input-plugins/snmp/_index.md @@ -10,7 +10,7 @@ introduced: "v0.10.1" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/snmp/README.md, SNMP Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/snmp/README.md, SNMP Plugin Source --- # SNMP Input Plugin diff --git a/content/telegraf/v1/input-plugins/snmp_trap/_index.md b/content/telegraf/v1/input-plugins/snmp_trap/_index.md index 0c785bae63..527317f4bb 100644 --- a/content/telegraf/v1/input-plugins/snmp_trap/_index.md +++ b/content/telegraf/v1/input-plugins/snmp_trap/_index.md @@ -10,7 +10,7 @@ introduced: "v1.13.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/snmp_trap/README.md, SNMP Trap Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/snmp_trap/README.md, SNMP Trap Plugin Source --- # SNMP Trap Input Plugin diff --git a/content/telegraf/v1/input-plugins/socket_listener/_index.md b/content/telegraf/v1/input-plugins/socket_listener/_index.md index 18c9a3bcc1..f3886afe0c 100644 --- a/content/telegraf/v1/input-plugins/socket_listener/_index.md +++ b/content/telegraf/v1/input-plugins/socket_listener/_index.md @@ -10,7 +10,7 @@ introduced: "v1.3.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/socket_listener/README.md, Socket Listener Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/socket_listener/README.md, Socket Listener Plugin Source --- # Socket Listener Input Plugin diff --git a/content/telegraf/v1/input-plugins/socketstat/_index.md b/content/telegraf/v1/input-plugins/socketstat/_index.md index eb978f037b..74cafe45a6 100644 --- a/content/telegraf/v1/input-plugins/socketstat/_index.md +++ b/content/telegraf/v1/input-plugins/socketstat/_index.md @@ -10,7 +10,7 @@ introduced: "v1.22.0" os_support: "freebsd, linux, macos" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/socketstat/README.md, Socket Statistics Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/socketstat/README.md, Socket Statistics Plugin Source --- # Socket Statistics Input Plugin diff --git a/content/telegraf/v1/input-plugins/solr/_index.md b/content/telegraf/v1/input-plugins/solr/_index.md index 7bba310eee..c2e13acc4d 100644 --- a/content/telegraf/v1/input-plugins/solr/_index.md +++ b/content/telegraf/v1/input-plugins/solr/_index.md @@ -10,7 +10,7 @@ introduced: "v1.5.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/solr/README.md, Apache Solr Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/solr/README.md, Apache Solr Plugin Source --- # Apache Solr Input Plugin diff --git a/content/telegraf/v1/input-plugins/sql/_index.md b/content/telegraf/v1/input-plugins/sql/_index.md index 0d4919ebd0..dca95f6827 100644 --- a/content/telegraf/v1/input-plugins/sql/_index.md +++ b/content/telegraf/v1/input-plugins/sql/_index.md @@ -10,7 +10,7 @@ introduced: "v1.19.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/sql/README.md, SQL Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/sql/README.md, SQL Plugin Source --- # SQL Input Plugin diff --git a/content/telegraf/v1/input-plugins/sqlserver/_index.md b/content/telegraf/v1/input-plugins/sqlserver/_index.md index 848d689e08..8248ab2132 100644 --- a/content/telegraf/v1/input-plugins/sqlserver/_index.md +++ b/content/telegraf/v1/input-plugins/sqlserver/_index.md @@ -10,7 +10,7 @@ introduced: "v0.10.1" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/sqlserver/README.md, Microsoft SQL Server Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/sqlserver/README.md, Microsoft SQL Server Plugin Source --- # Microsoft SQL Server Input Plugin diff --git a/content/telegraf/v1/input-plugins/stackdriver/_index.md b/content/telegraf/v1/input-plugins/stackdriver/_index.md index e4dd9dfa16..f3171a4b07 100644 --- a/content/telegraf/v1/input-plugins/stackdriver/_index.md +++ b/content/telegraf/v1/input-plugins/stackdriver/_index.md @@ -10,7 +10,7 @@ introduced: "v1.10.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/stackdriver/README.md, Stackdriver Google Cloud Monitoring Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/stackdriver/README.md, Stackdriver Google Cloud Monitoring Plugin Source --- # Stackdriver Google Cloud Monitoring Input Plugin diff --git a/content/telegraf/v1/input-plugins/statsd/_index.md b/content/telegraf/v1/input-plugins/statsd/_index.md index 7bb86749a7..66fc897847 100644 --- a/content/telegraf/v1/input-plugins/statsd/_index.md +++ b/content/telegraf/v1/input-plugins/statsd/_index.md @@ -10,7 +10,7 @@ introduced: "v0.2.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/statsd/README.md, StatsD Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/statsd/README.md, StatsD Plugin Source --- # StatsD Input Plugin @@ -88,7 +88,7 @@ plugin ordering. See [CONFIGURATION.md](/telegraf/v1/configuration/#plugins) for metric_separator = "_" ## Parses extensions to statsd in the datadog statsd format - ## currently supports metrics, datadog tags, events, and service checks. + ## currently supports metrics and datadog tags. ## http://docs.datadoghq.com/guides/dogstatsd/ datadog_extensions = false @@ -268,39 +268,6 @@ metric type: given time interval, a Distribution metric sends all the raw data during a time interval. -### Datadog Service Checks - -When `datadog_extensions` is enabled, the plugin also supports -[Datadog service checks](https://docs.datadoghq.com/developers/service_checks/dogstatsd_service_checks_submission/) in the format: - -```text -_sc|||d:|h:|#:|m: -``` - -- `` - service check name (required) -- `` - 0=OK, 1=Warning, 2=Critical, 3=Unknown (required) -- `d:` - optional Unix timestamp -- `h:` - optional hostname override -- `#` - optional tags (same format as metrics) -- `m:` - optional message - -Example: - -```shell -echo "_sc|my.service.check|0|#env:prod|m:Service is healthy" | nc -u -w1 127.0.0.1 8125 -``` - -Service checks produce a metric with measurement name `statsd_service_check`: - -- **Tags:** - - `check_name`: The service check name - - `source`: Hostname (from `h:` field or default) - - Plus any custom tags from the `#` section -- **Fields:** - - `status` (int): Status code (0-3) - - `status_text` (string): "ok", "warning", "critical", or "unknown" - - `message` (string): Optional message from `m:` field - ## Plugin arguments - **protocol** string: Protocol used in listener - tcp or udp options @@ -326,7 +293,7 @@ measurements and tags. [dogstatsd format](http://docs.datadoghq.com/guides/dogstatsd/) - **datadog_extensions** boolean: Enable parsing of DataDog's extensions to [dogstatsd format](http://docs.datadoghq.com/guides/dogstatsd/) - including events and service checks + and more - **datadog_distributions** boolean: Enable parsing of the Distribution metric in [DataDog's distribution format](https://docs.datadoghq.com/developers/metrics/types/?tab=distribution#definition) - **datadog_keep_container_tag** boolean: Keep or drop the container id as tag. @@ -338,7 +305,6 @@ measurements and tags. [dogstatsd_format]: http://docs.datadoghq.com/guides/dogstatsd/ [dogstatsd_distri_format]: https://docs.datadoghq.com/developers/metrics/types/?tab=distribution#definition -[dogstatsd_service_checks]: https://docs.datadoghq.com/developers/service_checks/dogstatsd_service_checks_submission/ ## Statsd bucket -> InfluxDB line-protocol Templates diff --git a/content/telegraf/v1/input-plugins/supervisor/_index.md b/content/telegraf/v1/input-plugins/supervisor/_index.md index 0308383d94..6e6fb1b6aa 100644 --- a/content/telegraf/v1/input-plugins/supervisor/_index.md +++ b/content/telegraf/v1/input-plugins/supervisor/_index.md @@ -10,7 +10,7 @@ introduced: "v1.24.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/supervisor/README.md, Supervisor Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/supervisor/README.md, Supervisor Plugin Source --- # Supervisor Input Plugin diff --git a/content/telegraf/v1/input-plugins/suricata/_index.md b/content/telegraf/v1/input-plugins/suricata/_index.md index 00db0d4512..be8e55b8ec 100644 --- a/content/telegraf/v1/input-plugins/suricata/_index.md +++ b/content/telegraf/v1/input-plugins/suricata/_index.md @@ -10,7 +10,7 @@ introduced: "v1.13.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/suricata/README.md, Suricata Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/suricata/README.md, Suricata Plugin Source --- # Suricata Input Plugin diff --git a/content/telegraf/v1/input-plugins/swap/_index.md b/content/telegraf/v1/input-plugins/swap/_index.md index 6316dac314..057c20d26c 100644 --- a/content/telegraf/v1/input-plugins/swap/_index.md +++ b/content/telegraf/v1/input-plugins/swap/_index.md @@ -10,7 +10,7 @@ introduced: "v1.7.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/swap/README.md, Swap Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/swap/README.md, Swap Plugin Source --- # Swap Input Plugin diff --git a/content/telegraf/v1/input-plugins/synproxy/_index.md b/content/telegraf/v1/input-plugins/synproxy/_index.md index b958653fa6..7293550a48 100644 --- a/content/telegraf/v1/input-plugins/synproxy/_index.md +++ b/content/telegraf/v1/input-plugins/synproxy/_index.md @@ -10,7 +10,7 @@ introduced: "v1.13.0" os_support: "linux" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/synproxy/README.md, Synproxy Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/synproxy/README.md, Synproxy Plugin Source --- # Synproxy Input Plugin diff --git a/content/telegraf/v1/input-plugins/syslog/_index.md b/content/telegraf/v1/input-plugins/syslog/_index.md index fa16c20855..77fc71a6c4 100644 --- a/content/telegraf/v1/input-plugins/syslog/_index.md +++ b/content/telegraf/v1/input-plugins/syslog/_index.md @@ -10,7 +10,7 @@ introduced: "v1.7.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/syslog/README.md, Syslog Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/syslog/README.md, Syslog Plugin Source --- # Syslog Input Plugin diff --git a/content/telegraf/v1/input-plugins/sysstat/_index.md b/content/telegraf/v1/input-plugins/sysstat/_index.md index 5dc88528ff..b61176ef97 100644 --- a/content/telegraf/v1/input-plugins/sysstat/_index.md +++ b/content/telegraf/v1/input-plugins/sysstat/_index.md @@ -10,7 +10,7 @@ introduced: "v0.12.1" os_support: "linux" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/sysstat/README.md, System Performance Statistics Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/sysstat/README.md, System Performance Statistics Plugin Source --- # System Performance Statistics Input Plugin diff --git a/content/telegraf/v1/input-plugins/system/_index.md b/content/telegraf/v1/input-plugins/system/_index.md index e8f6ddac2a..1f221504a7 100644 --- a/content/telegraf/v1/input-plugins/system/_index.md +++ b/content/telegraf/v1/input-plugins/system/_index.md @@ -10,7 +10,7 @@ introduced: "v0.1.6" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/system/README.md, System Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/system/README.md, System Plugin Source --- # System Input Plugin diff --git a/content/telegraf/v1/input-plugins/systemd_units/_index.md b/content/telegraf/v1/input-plugins/systemd_units/_index.md index d68b0d64c4..f873708c57 100644 --- a/content/telegraf/v1/input-plugins/systemd_units/_index.md +++ b/content/telegraf/v1/input-plugins/systemd_units/_index.md @@ -10,7 +10,7 @@ introduced: "v1.13.0" os_support: "linux" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/systemd_units/README.md, Systemd-Units Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/systemd_units/README.md, Systemd-Units Plugin Source --- # Systemd-Units Input Plugin diff --git a/content/telegraf/v1/input-plugins/tacacs/_index.md b/content/telegraf/v1/input-plugins/tacacs/_index.md index 085b3de279..5a74c92a85 100644 --- a/content/telegraf/v1/input-plugins/tacacs/_index.md +++ b/content/telegraf/v1/input-plugins/tacacs/_index.md @@ -10,7 +10,7 @@ introduced: "v1.28.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/tacacs/README.md, Tacacs Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/tacacs/README.md, Tacacs Plugin Source --- # Tacacs Input Plugin diff --git a/content/telegraf/v1/input-plugins/tail/_index.md b/content/telegraf/v1/input-plugins/tail/_index.md index 1c358c6925..f98ecd8c2e 100644 --- a/content/telegraf/v1/input-plugins/tail/_index.md +++ b/content/telegraf/v1/input-plugins/tail/_index.md @@ -10,7 +10,7 @@ introduced: "v1.1.2" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/tail/README.md, Tail Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/tail/README.md, Tail Plugin Source --- # Tail Input Plugin diff --git a/content/telegraf/v1/input-plugins/teamspeak/_index.md b/content/telegraf/v1/input-plugins/teamspeak/_index.md index 3834d03e13..d266fb446c 100644 --- a/content/telegraf/v1/input-plugins/teamspeak/_index.md +++ b/content/telegraf/v1/input-plugins/teamspeak/_index.md @@ -10,7 +10,7 @@ introduced: "v1.5.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/teamspeak/README.md, Teamspeak Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/teamspeak/README.md, Teamspeak Plugin Source --- # Teamspeak Input Plugin diff --git a/content/telegraf/v1/input-plugins/temp/_index.md b/content/telegraf/v1/input-plugins/temp/_index.md index e0708cdffb..38c22bdde7 100644 --- a/content/telegraf/v1/input-plugins/temp/_index.md +++ b/content/telegraf/v1/input-plugins/temp/_index.md @@ -10,7 +10,7 @@ introduced: "v1.8.0" os_support: "linux, macos, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/temp/README.md, Temperature Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/temp/README.md, Temperature Plugin Source --- # Temperature Input Plugin diff --git a/content/telegraf/v1/input-plugins/tengine/_index.md b/content/telegraf/v1/input-plugins/tengine/_index.md index 773aa9b61e..a96613537d 100644 --- a/content/telegraf/v1/input-plugins/tengine/_index.md +++ b/content/telegraf/v1/input-plugins/tengine/_index.md @@ -10,7 +10,7 @@ introduced: "v1.8.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/tengine/README.md, Tengine Web Server Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/tengine/README.md, Tengine Web Server Plugin Source --- # Tengine Web Server Input Plugin diff --git a/content/telegraf/v1/input-plugins/timex/_index.md b/content/telegraf/v1/input-plugins/timex/_index.md index 2e8c68d74a..919bb051e7 100644 --- a/content/telegraf/v1/input-plugins/timex/_index.md +++ b/content/telegraf/v1/input-plugins/timex/_index.md @@ -10,7 +10,7 @@ introduced: "v1.37.0" os_support: "linux" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/timex/README.md, Timex Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/timex/README.md, Timex Plugin Source --- # Timex Input Plugin diff --git a/content/telegraf/v1/input-plugins/tomcat/_index.md b/content/telegraf/v1/input-plugins/tomcat/_index.md index fd55d5ca4d..202aba94eb 100644 --- a/content/telegraf/v1/input-plugins/tomcat/_index.md +++ b/content/telegraf/v1/input-plugins/tomcat/_index.md @@ -10,7 +10,7 @@ introduced: "v1.4.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/tomcat/README.md, Apache Tomcat Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/tomcat/README.md, Apache Tomcat Plugin Source --- # Apache Tomcat Input Plugin diff --git a/content/telegraf/v1/input-plugins/trig/_index.md b/content/telegraf/v1/input-plugins/trig/_index.md index aaadc3279b..e193a3a743 100644 --- a/content/telegraf/v1/input-plugins/trig/_index.md +++ b/content/telegraf/v1/input-plugins/trig/_index.md @@ -10,7 +10,7 @@ introduced: "v0.3.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/trig/README.md, Trig Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/trig/README.md, Trig Plugin Source --- # Trig Input Plugin diff --git a/content/telegraf/v1/input-plugins/turbostat/_index.md b/content/telegraf/v1/input-plugins/turbostat/_index.md index b3d9a9e504..1184c74f8e 100644 --- a/content/telegraf/v1/input-plugins/turbostat/_index.md +++ b/content/telegraf/v1/input-plugins/turbostat/_index.md @@ -10,7 +10,7 @@ introduced: "v1.36.0" os_support: "linux" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/turbostat/README.md, Turbostat Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/turbostat/README.md, Turbostat Plugin Source --- # Turbostat Input Plugin diff --git a/content/telegraf/v1/input-plugins/twemproxy/_index.md b/content/telegraf/v1/input-plugins/twemproxy/_index.md index d1d1d4710c..6ef031b45e 100644 --- a/content/telegraf/v1/input-plugins/twemproxy/_index.md +++ b/content/telegraf/v1/input-plugins/twemproxy/_index.md @@ -10,7 +10,7 @@ introduced: "v0.3.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/twemproxy/README.md, Twemproxy Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/twemproxy/README.md, Twemproxy Plugin Source --- # Twemproxy Input Plugin diff --git a/content/telegraf/v1/input-plugins/unbound/_index.md b/content/telegraf/v1/input-plugins/unbound/_index.md index cbcc346bdd..426dbee024 100644 --- a/content/telegraf/v1/input-plugins/unbound/_index.md +++ b/content/telegraf/v1/input-plugins/unbound/_index.md @@ -10,7 +10,7 @@ introduced: "v1.5.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/unbound/README.md, Unbound Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/unbound/README.md, Unbound Plugin Source --- # Unbound Input Plugin diff --git a/content/telegraf/v1/input-plugins/upsd/_index.md b/content/telegraf/v1/input-plugins/upsd/_index.md index 0de57dba93..a5a573957e 100644 --- a/content/telegraf/v1/input-plugins/upsd/_index.md +++ b/content/telegraf/v1/input-plugins/upsd/_index.md @@ -10,7 +10,7 @@ introduced: "v1.24.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/upsd/README.md, UPSD Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/upsd/README.md, UPSD Plugin Source --- # UPSD Input Plugin diff --git a/content/telegraf/v1/input-plugins/uwsgi/_index.md b/content/telegraf/v1/input-plugins/uwsgi/_index.md index 0aac387b46..d6ed91769f 100644 --- a/content/telegraf/v1/input-plugins/uwsgi/_index.md +++ b/content/telegraf/v1/input-plugins/uwsgi/_index.md @@ -10,7 +10,7 @@ introduced: "v1.12.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/uwsgi/README.md, uWSGI Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/uwsgi/README.md, uWSGI Plugin Source --- # uWSGI Input Plugin diff --git a/content/telegraf/v1/input-plugins/varnish/_index.md b/content/telegraf/v1/input-plugins/varnish/_index.md index 0eeeed2483..e5b415b6e3 100644 --- a/content/telegraf/v1/input-plugins/varnish/_index.md +++ b/content/telegraf/v1/input-plugins/varnish/_index.md @@ -10,7 +10,7 @@ introduced: "v0.13.1" os_support: "freebsd, linux, macos" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/varnish/README.md, Varnish Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/varnish/README.md, Varnish Plugin Source --- # Varnish Input Plugin diff --git a/content/telegraf/v1/input-plugins/vault/_index.md b/content/telegraf/v1/input-plugins/vault/_index.md index 7ba6462ece..06e495e6a8 100644 --- a/content/telegraf/v1/input-plugins/vault/_index.md +++ b/content/telegraf/v1/input-plugins/vault/_index.md @@ -10,7 +10,7 @@ introduced: "v1.22.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/vault/README.md, Hashicorp Vault Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/vault/README.md, Hashicorp Vault Plugin Source --- # Hashicorp Vault Input Plugin diff --git a/content/telegraf/v1/input-plugins/vsphere/_index.md b/content/telegraf/v1/input-plugins/vsphere/_index.md index 9913b6f841..49cee33881 100644 --- a/content/telegraf/v1/input-plugins/vsphere/_index.md +++ b/content/telegraf/v1/input-plugins/vsphere/_index.md @@ -10,7 +10,7 @@ introduced: "v1.8.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/vsphere/README.md, VMware vSphere Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/vsphere/README.md, VMware vSphere Plugin Source --- # VMware vSphere Input Plugin diff --git a/content/telegraf/v1/input-plugins/webhooks/_index.md b/content/telegraf/v1/input-plugins/webhooks/_index.md index e653be2f06..17799c32b5 100644 --- a/content/telegraf/v1/input-plugins/webhooks/_index.md +++ b/content/telegraf/v1/input-plugins/webhooks/_index.md @@ -10,7 +10,7 @@ introduced: "v1.0.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/webhooks/README.md, Webhooks Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/webhooks/README.md, Webhooks Plugin Source --- # Webhooks Input Plugin diff --git a/content/telegraf/v1/input-plugins/whois/_index.md b/content/telegraf/v1/input-plugins/whois/_index.md index cdaea29ebb..637d9b1f81 100644 --- a/content/telegraf/v1/input-plugins/whois/_index.md +++ b/content/telegraf/v1/input-plugins/whois/_index.md @@ -10,7 +10,7 @@ introduced: "v1.35.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/whois/README.md, WHOIS Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/whois/README.md, WHOIS Plugin Source --- # WHOIS Input Plugin diff --git a/content/telegraf/v1/input-plugins/win_eventlog/_index.md b/content/telegraf/v1/input-plugins/win_eventlog/_index.md index 7c7cfb8201..d35eff5e8c 100644 --- a/content/telegraf/v1/input-plugins/win_eventlog/_index.md +++ b/content/telegraf/v1/input-plugins/win_eventlog/_index.md @@ -10,7 +10,7 @@ introduced: "v1.16.0" os_support: "windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/win_eventlog/README.md, Windows Eventlog Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/win_eventlog/README.md, Windows Eventlog Plugin Source --- # Windows Eventlog Input Plugin diff --git a/content/telegraf/v1/input-plugins/win_perf_counters/_index.md b/content/telegraf/v1/input-plugins/win_perf_counters/_index.md index 4255193186..da5128b3b9 100644 --- a/content/telegraf/v1/input-plugins/win_perf_counters/_index.md +++ b/content/telegraf/v1/input-plugins/win_perf_counters/_index.md @@ -10,7 +10,7 @@ introduced: "v0.10.2" os_support: "windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/win_perf_counters/README.md, Windows Performance Counters Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/win_perf_counters/README.md, Windows Performance Counters Plugin Source --- # Windows Performance Counters Input Plugin diff --git a/content/telegraf/v1/input-plugins/win_services/_index.md b/content/telegraf/v1/input-plugins/win_services/_index.md index 1360ba626b..230c4eb38a 100644 --- a/content/telegraf/v1/input-plugins/win_services/_index.md +++ b/content/telegraf/v1/input-plugins/win_services/_index.md @@ -10,7 +10,7 @@ introduced: "v1.4.0" os_support: "windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/win_services/README.md, Windows Services Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/win_services/README.md, Windows Services Plugin Source --- # Windows Services Input Plugin diff --git a/content/telegraf/v1/input-plugins/win_wmi/_index.md b/content/telegraf/v1/input-plugins/win_wmi/_index.md index 509fcdb912..5570241efc 100644 --- a/content/telegraf/v1/input-plugins/win_wmi/_index.md +++ b/content/telegraf/v1/input-plugins/win_wmi/_index.md @@ -10,7 +10,7 @@ introduced: "v1.26.0" os_support: "windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/win_wmi/README.md, Windows Management Instrumentation Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/win_wmi/README.md, Windows Management Instrumentation Plugin Source --- # Windows Management Instrumentation Input Plugin diff --git a/content/telegraf/v1/input-plugins/wireguard/_index.md b/content/telegraf/v1/input-plugins/wireguard/_index.md index 2b4cf5b318..56a81facd6 100644 --- a/content/telegraf/v1/input-plugins/wireguard/_index.md +++ b/content/telegraf/v1/input-plugins/wireguard/_index.md @@ -10,7 +10,7 @@ introduced: "v1.14.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/wireguard/README.md, Wireguard Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/wireguard/README.md, Wireguard Plugin Source --- # Wireguard Input Plugin diff --git a/content/telegraf/v1/input-plugins/wireless/_index.md b/content/telegraf/v1/input-plugins/wireless/_index.md index 5d4035ea8f..e4004b99ee 100644 --- a/content/telegraf/v1/input-plugins/wireless/_index.md +++ b/content/telegraf/v1/input-plugins/wireless/_index.md @@ -10,7 +10,7 @@ introduced: "v1.9.0" os_support: "linux" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/wireless/README.md, Wireless Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/wireless/README.md, Wireless Plugin Source --- # Wireless Input Plugin diff --git a/content/telegraf/v1/input-plugins/x509_cert/_index.md b/content/telegraf/v1/input-plugins/x509_cert/_index.md index 71476f6c98..2ba100c2bf 100644 --- a/content/telegraf/v1/input-plugins/x509_cert/_index.md +++ b/content/telegraf/v1/input-plugins/x509_cert/_index.md @@ -10,7 +10,7 @@ introduced: "v1.8.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/x509_cert/README.md, x509 Certificate Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/x509_cert/README.md, x509 Certificate Plugin Source --- # x509 Certificate Input Plugin diff --git a/content/telegraf/v1/input-plugins/xtremio/_index.md b/content/telegraf/v1/input-plugins/xtremio/_index.md index 9a7ce75f99..820b40f849 100644 --- a/content/telegraf/v1/input-plugins/xtremio/_index.md +++ b/content/telegraf/v1/input-plugins/xtremio/_index.md @@ -10,7 +10,7 @@ introduced: "v1.22.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/xtremio/README.md, Dell EMC XtremIO Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/xtremio/README.md, Dell EMC XtremIO Plugin Source --- # Dell EMC XtremIO Input Plugin diff --git a/content/telegraf/v1/input-plugins/zfs/_index.md b/content/telegraf/v1/input-plugins/zfs/_index.md index d054658c81..7c0db823ef 100644 --- a/content/telegraf/v1/input-plugins/zfs/_index.md +++ b/content/telegraf/v1/input-plugins/zfs/_index.md @@ -10,7 +10,7 @@ introduced: "v0.2.1" os_support: "freebsd, linux" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/zfs/README.md, ZFS Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/zfs/README.md, ZFS Plugin Source --- # ZFS Input Plugin diff --git a/content/telegraf/v1/input-plugins/zipkin/_index.md b/content/telegraf/v1/input-plugins/zipkin/_index.md index 43c8473f92..416597381e 100644 --- a/content/telegraf/v1/input-plugins/zipkin/_index.md +++ b/content/telegraf/v1/input-plugins/zipkin/_index.md @@ -10,7 +10,7 @@ introduced: "v1.4.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/zipkin/README.md, Zipkin Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/zipkin/README.md, Zipkin Plugin Source --- # Zipkin Input Plugin diff --git a/content/telegraf/v1/input-plugins/zookeeper/_index.md b/content/telegraf/v1/input-plugins/zookeeper/_index.md index 847a88b3ed..6b6995a65d 100644 --- a/content/telegraf/v1/input-plugins/zookeeper/_index.md +++ b/content/telegraf/v1/input-plugins/zookeeper/_index.md @@ -10,7 +10,7 @@ introduced: "v0.2.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/inputs/zookeeper/README.md, Apache Zookeeper Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/inputs/zookeeper/README.md, Apache Zookeeper Plugin Source --- # Apache Zookeeper Input Plugin diff --git a/content/telegraf/v1/output-plugins/amon/_index.md b/content/telegraf/v1/output-plugins/amon/_index.md index 30fb0b5e43..24c5f09e99 100644 --- a/content/telegraf/v1/output-plugins/amon/_index.md +++ b/content/telegraf/v1/output-plugins/amon/_index.md @@ -12,7 +12,7 @@ removal: v1.40.0 os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/amon/README.md, Amon Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/amon/README.md, Amon Plugin Source --- # Amon Output Plugin diff --git a/content/telegraf/v1/output-plugins/amqp/_index.md b/content/telegraf/v1/output-plugins/amqp/_index.md index 007cff3f8f..11b2b54d6d 100644 --- a/content/telegraf/v1/output-plugins/amqp/_index.md +++ b/content/telegraf/v1/output-plugins/amqp/_index.md @@ -10,7 +10,7 @@ introduced: "v0.1.9" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/amqp/README.md, AMQP Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/amqp/README.md, AMQP Plugin Source --- # AMQP Output Plugin diff --git a/content/telegraf/v1/output-plugins/application_insights/_index.md b/content/telegraf/v1/output-plugins/application_insights/_index.md index b00a7af99c..206dca9758 100644 --- a/content/telegraf/v1/output-plugins/application_insights/_index.md +++ b/content/telegraf/v1/output-plugins/application_insights/_index.md @@ -10,7 +10,7 @@ introduced: "v1.7.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/application_insights/README.md, Azure Application Insights Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/application_insights/README.md, Azure Application Insights Plugin Source --- # Azure Application Insights Output Plugin diff --git a/content/telegraf/v1/output-plugins/arc/_index.md b/content/telegraf/v1/output-plugins/arc/_index.md index 0d3ef9d2b7..231a332f86 100644 --- a/content/telegraf/v1/output-plugins/arc/_index.md +++ b/content/telegraf/v1/output-plugins/arc/_index.md @@ -10,7 +10,7 @@ introduced: "v1.37.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/arc/README.md, Arc Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/arc/README.md, Arc Plugin Source --- # Arc Output Plugin diff --git a/content/telegraf/v1/output-plugins/azure_data_explorer/_index.md b/content/telegraf/v1/output-plugins/azure_data_explorer/_index.md index 04647c41e1..06c44c9113 100644 --- a/content/telegraf/v1/output-plugins/azure_data_explorer/_index.md +++ b/content/telegraf/v1/output-plugins/azure_data_explorer/_index.md @@ -10,7 +10,7 @@ introduced: "v1.20.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/azure_data_explorer/README.md, Azure Data Explorer Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/azure_data_explorer/README.md, Azure Data Explorer Plugin Source --- # Azure Data Explorer Output Plugin diff --git a/content/telegraf/v1/output-plugins/azure_monitor/_index.md b/content/telegraf/v1/output-plugins/azure_monitor/_index.md index 6c2c587bf6..7484cef9aa 100644 --- a/content/telegraf/v1/output-plugins/azure_monitor/_index.md +++ b/content/telegraf/v1/output-plugins/azure_monitor/_index.md @@ -10,7 +10,7 @@ introduced: "v1.8.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/azure_monitor/README.md, Azure Monitor Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/azure_monitor/README.md, Azure Monitor Plugin Source --- # Azure Monitor Output Plugin diff --git a/content/telegraf/v1/output-plugins/bigquery/_index.md b/content/telegraf/v1/output-plugins/bigquery/_index.md index 81e032ce84..ddf55e6cc4 100644 --- a/content/telegraf/v1/output-plugins/bigquery/_index.md +++ b/content/telegraf/v1/output-plugins/bigquery/_index.md @@ -10,7 +10,7 @@ introduced: "v1.18.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/bigquery/README.md, Google BigQuery Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/bigquery/README.md, Google BigQuery Plugin Source --- # Google BigQuery Output Plugin diff --git a/content/telegraf/v1/output-plugins/clarify/_index.md b/content/telegraf/v1/output-plugins/clarify/_index.md index 03a277e44b..359e9799dc 100644 --- a/content/telegraf/v1/output-plugins/clarify/_index.md +++ b/content/telegraf/v1/output-plugins/clarify/_index.md @@ -10,7 +10,7 @@ introduced: "v1.27.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/clarify/README.md, Clarify Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/clarify/README.md, Clarify Plugin Source --- # Clarify Output Plugin diff --git a/content/telegraf/v1/output-plugins/cloud_pubsub/_index.md b/content/telegraf/v1/output-plugins/cloud_pubsub/_index.md index 1bfe0bccc5..193e4fbc47 100644 --- a/content/telegraf/v1/output-plugins/cloud_pubsub/_index.md +++ b/content/telegraf/v1/output-plugins/cloud_pubsub/_index.md @@ -10,7 +10,7 @@ introduced: "v1.10.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/cloud_pubsub/README.md, Google Cloud PubSub Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/cloud_pubsub/README.md, Google Cloud PubSub Plugin Source --- # Google Cloud PubSub Output Plugin diff --git a/content/telegraf/v1/output-plugins/cloudwatch/_index.md b/content/telegraf/v1/output-plugins/cloudwatch/_index.md index 0b956edea6..5ae20f45f3 100644 --- a/content/telegraf/v1/output-plugins/cloudwatch/_index.md +++ b/content/telegraf/v1/output-plugins/cloudwatch/_index.md @@ -10,7 +10,7 @@ introduced: "v0.10.1" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/cloudwatch/README.md, Amazon CloudWatch Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/cloudwatch/README.md, Amazon CloudWatch Plugin Source --- # Amazon CloudWatch Output Plugin diff --git a/content/telegraf/v1/output-plugins/cloudwatch_logs/_index.md b/content/telegraf/v1/output-plugins/cloudwatch_logs/_index.md index 007c3cd87d..2e11a5175f 100644 --- a/content/telegraf/v1/output-plugins/cloudwatch_logs/_index.md +++ b/content/telegraf/v1/output-plugins/cloudwatch_logs/_index.md @@ -10,7 +10,7 @@ introduced: "v1.19.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/cloudwatch_logs/README.md, Amazon CloudWatch Logs Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/cloudwatch_logs/README.md, Amazon CloudWatch Logs Plugin Source --- # Amazon CloudWatch Logs Output Plugin diff --git a/content/telegraf/v1/output-plugins/cratedb/_index.md b/content/telegraf/v1/output-plugins/cratedb/_index.md index 145464efdc..1a724ff001 100644 --- a/content/telegraf/v1/output-plugins/cratedb/_index.md +++ b/content/telegraf/v1/output-plugins/cratedb/_index.md @@ -10,7 +10,7 @@ introduced: "v1.5.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/cratedb/README.md, CrateDB Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/cratedb/README.md, CrateDB Plugin Source --- # CrateDB Output Plugin diff --git a/content/telegraf/v1/output-plugins/datadog/_index.md b/content/telegraf/v1/output-plugins/datadog/_index.md index 2574e93402..8eabf235f8 100644 --- a/content/telegraf/v1/output-plugins/datadog/_index.md +++ b/content/telegraf/v1/output-plugins/datadog/_index.md @@ -10,7 +10,7 @@ introduced: "v0.1.6" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/datadog/README.md, Datadog Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/datadog/README.md, Datadog Plugin Source --- # Datadog Output Plugin diff --git a/content/telegraf/v1/output-plugins/discard/_index.md b/content/telegraf/v1/output-plugins/discard/_index.md index b587e37d99..8eba9fdc94 100644 --- a/content/telegraf/v1/output-plugins/discard/_index.md +++ b/content/telegraf/v1/output-plugins/discard/_index.md @@ -10,7 +10,7 @@ introduced: "v1.2.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/discard/README.md, Discard Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/discard/README.md, Discard Plugin Source --- # Discard Output Plugin diff --git a/content/telegraf/v1/output-plugins/dynatrace/_index.md b/content/telegraf/v1/output-plugins/dynatrace/_index.md index 17dd1fa964..eafe072bb1 100644 --- a/content/telegraf/v1/output-plugins/dynatrace/_index.md +++ b/content/telegraf/v1/output-plugins/dynatrace/_index.md @@ -10,7 +10,7 @@ introduced: "v1.16.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/dynatrace/README.md, Dynatrace Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/dynatrace/README.md, Dynatrace Plugin Source --- # Dynatrace Output Plugin diff --git a/content/telegraf/v1/output-plugins/elasticsearch/_index.md b/content/telegraf/v1/output-plugins/elasticsearch/_index.md index c8f39f92dd..f1108f349c 100644 --- a/content/telegraf/v1/output-plugins/elasticsearch/_index.md +++ b/content/telegraf/v1/output-plugins/elasticsearch/_index.md @@ -10,7 +10,7 @@ introduced: "v0.1.5" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/elasticsearch/README.md, Elasticsearch Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/elasticsearch/README.md, Elasticsearch Plugin Source --- # Elasticsearch Output Plugin diff --git a/content/telegraf/v1/output-plugins/event_hubs/_index.md b/content/telegraf/v1/output-plugins/event_hubs/_index.md index d8cfa9cb8b..36df6804ec 100644 --- a/content/telegraf/v1/output-plugins/event_hubs/_index.md +++ b/content/telegraf/v1/output-plugins/event_hubs/_index.md @@ -10,7 +10,7 @@ introduced: "v1.21.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/event_hubs/README.md, Azure Event Hubs Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/event_hubs/README.md, Azure Event Hubs Plugin Source --- # Azure Event Hubs Output Plugin diff --git a/content/telegraf/v1/output-plugins/exec/_index.md b/content/telegraf/v1/output-plugins/exec/_index.md index 6b9fc2a952..b5bd947a2c 100644 --- a/content/telegraf/v1/output-plugins/exec/_index.md +++ b/content/telegraf/v1/output-plugins/exec/_index.md @@ -10,7 +10,7 @@ introduced: "v1.12.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/exec/README.md, Executable Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/exec/README.md, Executable Plugin Source --- # Executable Output Plugin diff --git a/content/telegraf/v1/output-plugins/execd/_index.md b/content/telegraf/v1/output-plugins/execd/_index.md index 0958fd4b1b..3a49d678a4 100644 --- a/content/telegraf/v1/output-plugins/execd/_index.md +++ b/content/telegraf/v1/output-plugins/execd/_index.md @@ -10,7 +10,7 @@ introduced: "v1.15.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/execd/README.md, Executable Daemon Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/execd/README.md, Executable Daemon Plugin Source --- # Executable Daemon Output Plugin diff --git a/content/telegraf/v1/output-plugins/file/_index.md b/content/telegraf/v1/output-plugins/file/_index.md index 38ce613c9b..3145396de0 100644 --- a/content/telegraf/v1/output-plugins/file/_index.md +++ b/content/telegraf/v1/output-plugins/file/_index.md @@ -10,7 +10,7 @@ introduced: "v0.10.3" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/file/README.md, File Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/file/README.md, File Plugin Source --- # File Output Plugin diff --git a/content/telegraf/v1/output-plugins/graphite/_index.md b/content/telegraf/v1/output-plugins/graphite/_index.md index ec2cb65585..2261e32cb3 100644 --- a/content/telegraf/v1/output-plugins/graphite/_index.md +++ b/content/telegraf/v1/output-plugins/graphite/_index.md @@ -10,7 +10,7 @@ introduced: "v0.10.1" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/graphite/README.md, Graphite Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/graphite/README.md, Graphite Plugin Source --- # Graphite Output Plugin diff --git a/content/telegraf/v1/output-plugins/graylog/_index.md b/content/telegraf/v1/output-plugins/graylog/_index.md index 98e2c1b3f9..f94fe15a6f 100644 --- a/content/telegraf/v1/output-plugins/graylog/_index.md +++ b/content/telegraf/v1/output-plugins/graylog/_index.md @@ -10,7 +10,7 @@ introduced: "v1.0.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/graylog/README.md, Graylog Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/graylog/README.md, Graylog Plugin Source --- # Graylog Output Plugin diff --git a/content/telegraf/v1/output-plugins/groundwork/_index.md b/content/telegraf/v1/output-plugins/groundwork/_index.md index 1308320bb6..1fb8abd099 100644 --- a/content/telegraf/v1/output-plugins/groundwork/_index.md +++ b/content/telegraf/v1/output-plugins/groundwork/_index.md @@ -10,7 +10,7 @@ introduced: "v1.21.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/groundwork/README.md, GroundWork Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/groundwork/README.md, GroundWork Plugin Source --- # GroundWork Output Plugin diff --git a/content/telegraf/v1/output-plugins/health/_index.md b/content/telegraf/v1/output-plugins/health/_index.md index 66890f70d4..c317642306 100644 --- a/content/telegraf/v1/output-plugins/health/_index.md +++ b/content/telegraf/v1/output-plugins/health/_index.md @@ -10,7 +10,7 @@ introduced: "v1.11.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/health/README.md, Health Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/health/README.md, Health Plugin Source --- # Health Output Plugin @@ -60,9 +60,6 @@ plugin ordering. See [CONFIGURATION.md](/telegraf/v1/configuration/#plugins) for # tls_cert = "/etc/telegraf/cert.pem" # tls_key = "/etc/telegraf/key.pem" - ## HTTP status code reported during startup i.e. before any write was called - # default_status = 200 - ## Maximum expected time between metrics being written ## Enforces an unhealthy state if there was no new metric seen for at least ## the specified time. The check is disabled by default and only used if a diff --git a/content/telegraf/v1/output-plugins/heartbeat/_index.md b/content/telegraf/v1/output-plugins/heartbeat/_index.md index f507c8c176..c93d4b6f57 100644 --- a/content/telegraf/v1/output-plugins/heartbeat/_index.md +++ b/content/telegraf/v1/output-plugins/heartbeat/_index.md @@ -10,7 +10,7 @@ introduced: "v1.37.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/heartbeat/README.md, Heartbeat Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/heartbeat/README.md, Heartbeat Plugin Source --- # Heartbeat Output Plugin @@ -61,42 +61,8 @@ to use them. ## hostname -- hostname of the instance running Telegraf ## statistics -- number of metrics, logged errors and warnings, etc ## configs -- redacted list of configs loaded by this instance - ## logs -- detailed log-entries for this instance - ## status -- result of the status condition evaluation # include = ["hostname"] - ## Logging information filtering, only applies if "logs" is added to "include" - # [outputs.heartbeat.logs] - # ## Number of log entries to send (unlimited by default) - # ## In case more log-entries are available entries with higher log levels - # ## and more recent entries are preferred. - # # limit = 0 - # - # ## Minimum log-level for sending the entry - # # level = "error" - - ## Logical conditions to determine the agent status, only applies if "status" - ## is included in the message - # [outputs.heartbeat.status] - # ## Conditions to signal the given status as CEL programs returning a - # ## boolean. Conditions are evaluated in the order below until a program - # ## evaluates to "true". - # # ok = "false" - # # warn = "false" - # # fail = "false" - # - # ## Evaluation order of the conditions above; available: "ok", "warn", "fail" - # # order = ["ok", "warn", "fail"] - # - # ## Default status used if none of the conditions above matches - # ## available: "ok", "warn", "fail", "undefined" - # # default = "ok" - # - # ## If set, send this initial status before the first write, otherwise - # ## compute the status from the conditions and default above. - # ## available: "ok", "warn", "fail", "undefined", "" - # # initial = "" - ## Additional HTTP headers # [outputs.heartbeat.headers] # User-Agent = "telegraf" @@ -137,143 +103,4 @@ configuration directory while a new configuration is added or removed. > information. However, sensitive information might still be contained in the > URL or the path sent. Use with care! -### Logging information - -When including `logs` in the message the actual log _messages_ are included. -This comprises the log messages of _all_ plugins _and_ the agent itself being -logged _after_ the `Connect` function of this plugin was called, i.e. you will -not see any initialization or configuration errors in the heartbeat messages! -You can limit the messages sent within the optional `outputs.heartbeat.logs` -section where you can limit the messages by log-`level` or limit the number -of messages included using the `limit` setting. - -> [!WARNING] -> As the amount of log messages can be high, especially when configuring a low -> level such as `info` the resulting heartbeat messages might be large. Restrict -> the included messages by choosing a higher log-level and/or by using a limit! -When including `logs` in the message the number of errors and warnings logged -in this Telegraf instance are included in the heartbeat message. This comprises -_all_ log messages of all plugins and the agent itself logged _after_ the -`Connect` function of this plugin was called, i.e. you will not see any -initialization or configuration errors in the heartbeat messages! - -For getting the actual log _messages_ you can include `log-details`. Via the -optional `outputs.heartbeat.status` you can limit the messages by log-`level` -or limit the number included using the `limit` setting. - -> [!WARNING] -> As the amount of log messages can be high, especially when configuring low -> level such as `info` the resulting heartbeat messages might be large. Use the -> `log-details` option with care if network traffic is a limiting factor and -> restrict the included messages to high levels and use a limit! - -When setting the `level` option only messages with this or more severe levels -are included. - -The `limit` setting allows to specify the maximum number of log-messages -included in the heartbeat message. If the number of log-messages exceeds the -given limit they are selected by the most severe level and most recent messages -first. -given limit they are selected by most severe and most recent messages first. - -### Status information - -By including `status` the message will contain the status of the Telegraf -instance as configured via the `outputs.heartbeat.status` section. - -This section allows to set an `initial` state used as long as no flush was -performed by Telegraf. If `initial` is not configured or empty, the status -expressions are evaluated also before the first flush. - -The `ok`, `warn` and `fail` settings allow to specify [CEL expressions](https://cel.dev) -evaluating to a boolean value. Available information for the expressions are -listed below. The first expression evaluating to `true` defines the status. -The `order` parameter allows to customize the evaluation order. - -> [!NOTE] -> If an expression is omitted in the `order` setting it will __not__ be -> evaluated! - -The status defined via `default` is used in case none of the status expressions -evaluate to true. - -For defining expressions you can use the following variables - -- `metrics` (int) -- number of metrics arriving at this plugin -- `log_errors` (int) -- number of errors logged -- `log_warnings` (int) -- number of warnings logged -- `last_update` (time) -- time of last successful heartbeat message, can be used - to e.g. calculate rates -- `agent` (map) -- agent statistics, see below -- `inputs` (map) -- input plugin statistics, see below -- `outputs` (map) -- output plugin statistics, see below - -The `agent` statistics variable is a `map` with information matching the -`internal_agent` metric of the [internal input plugin](/telegraf/v1/plugins/#input-internal): - -- `metrics_written` (int) -- number of metrics written in total by all outputs -- `metrics_rejected` (int) -- number of metrics rejected in total by all outputs -- `metrics_dropped` (int) -- number of metrics dropped in total by all outputs -- `metrics_gathered` (int) -- number of metrics collected in total by all inputs -- `gather_errors` (int) -- number of errors during collection by all inputs -- `gather_timeouts` (int) -- number of collection timeouts by all inputs - -The `inputs` statistics variable is a `map` with the key denoting the plugin -type (e.g. `cpu` for `inputs.cpu`) and the value being list of plugin -statistics. Each entry in the list corresponds to an input plugin instance with -information matching the `internal_gather` metric of the -[internal input plugin](/telegraf/v1/plugins/#input-internal): - -- `id` (string) -- unique plugin identifier -- `alias` (string) -- alias set for the plugin; only exists if alias - is defined -- `errors` (int) -- collection errors for this plugin instance -- `metrics_gathered` (int) -- number of metrics collected -- `gather_time_ns` (int) -- time used to gather the metrics in nanoseconds -- `gather_timeouts` (int) -- number of timeouts during metric collection -- `startup_errors` (int) -- number of times the plugin failed to start - -The `outputs` statistics variable is a `map` with the key denoting the plugin -type (e.g. `influxdb` for `outputs.influxdb`) and the value being list of plugin -statistics. Each entry in the list corresponds to an output plugin instance with -information matching the `internal_write` metric of the -[internal input plugin](/telegraf/v1/plugins/#input-internal): - -- `id` (string) -- unique plugin identifier -- `alias` (string) -- alias set for the plugin; only exists if alias - is defined -- `errors` (int) -- write errors for this plugin instance -- `metrics_filtered` (int) -- number of metrics filtered by the output -- `write_time_ns` (int) -- time used to write the metrics in nanoseconds -- `startup_errors` (int) -- number of times the plugin failed to start -- `metrics_added` (int) -- number of metrics added to the output buffer -- `metrics_written` (int) -- number of metrics written to the output -- `metrics_rejected` (int) -- number of metrics rejected by the service or - serialization -- `metrics_dropped` (int) -- number of metrics dropped e.g. due to buffer - fullness -- `buffer_size` (int) -- current number of metrics currently in the output - buffer for the plugin instance -- `buffer_limit` (int) -- capacity of the output buffer; irrelevant for - disk-based buffers -- `buffer_fullness` (float) -- current ratio of metrics in the buffer to - capacity; can be greater than one (i.e. `> 100%`) - for disk-based buffers - -If not stated otherwise, all variables are accumulated since the last successful -heartbeat message. - -The following functions are available: - -- `encoding` functions of the [CEL encoder library](https://github.com/google/cel-go/blob/master/ext/README.md#encoders) -- `math` functions of the [CEL math library](https://github.com/google/cel-go/blob/master/ext/README.md#math) -- `string` functions of the [CEL strings library](https://github.com/google/cel-go/blob/master/ext/README.md#strings) -- `now` function for getting the current time - [schema]: /plugins/outputs/heartbeat/schema_v1.json -[internal_plugin]: /plugins/inputs/internal/README.md - -[cel]: https://cel.dev -[cel_encoder]: https://github.com/google/cel-go/blob/master/ext/README.md#encoders -[cel_math]: https://github.com/google/cel-go/blob/master/ext/README.md#math -[cel_strings]: https://github.com/google/cel-go/blob/master/ext/README.md#strings diff --git a/content/telegraf/v1/output-plugins/http/_index.md b/content/telegraf/v1/output-plugins/http/_index.md index c39dd87b17..4d5c5d68a4 100644 --- a/content/telegraf/v1/output-plugins/http/_index.md +++ b/content/telegraf/v1/output-plugins/http/_index.md @@ -10,7 +10,7 @@ introduced: "v1.7.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/http/README.md, HTTP Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/http/README.md, HTTP Plugin Source --- # HTTP Output Plugin @@ -69,9 +69,6 @@ to use them. # max_idle_conn_per_host = 0 # response_timeout = "0s" - ## Use the local address for connecting, assigned by the OS by default - # local_address = "" - ## Optional proxy settings # use_system_proxy = false # http_proxy_url = "" diff --git a/content/telegraf/v1/output-plugins/influxdb/_index.md b/content/telegraf/v1/output-plugins/influxdb/_index.md index b574679c6c..27d66099f5 100644 --- a/content/telegraf/v1/output-plugins/influxdb/_index.md +++ b/content/telegraf/v1/output-plugins/influxdb/_index.md @@ -10,7 +10,7 @@ introduced: "v0.1.1" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/influxdb/README.md, InfluxDB v1.x Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/influxdb/README.md, InfluxDB v1.x Plugin Source --- # InfluxDB v1.x Output Plugin diff --git a/content/telegraf/v1/output-plugins/influxdb_v2/_index.md b/content/telegraf/v1/output-plugins/influxdb_v2/_index.md index dfbec8759e..be5f1e062c 100644 --- a/content/telegraf/v1/output-plugins/influxdb_v2/_index.md +++ b/content/telegraf/v1/output-plugins/influxdb_v2/_index.md @@ -10,7 +10,7 @@ introduced: "v1.8.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/influxdb_v2/README.md, InfluxDB v2.x Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/influxdb_v2/README.md, InfluxDB v2.x Plugin Source --- # InfluxDB v2.x Output Plugin @@ -42,7 +42,7 @@ more details on how to use them. ## Configuration ```toml @sample.conf -# Configuration for sending metrics to InfluxDB 2.x +# Configuration for sending metrics to InfluxDB 2.0 [[outputs.influxdb_v2]] ## The URLs of the InfluxDB cluster nodes. ## diff --git a/content/telegraf/v1/output-plugins/influxdb_v3/_index.md b/content/telegraf/v1/output-plugins/influxdb_v3/_index.md deleted file mode 100644 index 3ba99085df..0000000000 --- a/content/telegraf/v1/output-plugins/influxdb_v3/_index.md +++ /dev/null @@ -1,146 +0,0 @@ ---- -description: "Telegraf plugin for sending metrics to InfluxDB v3.x" -menu: - telegraf_v1_ref: - parent: output_plugins_reference - name: InfluxDB v3.x - identifier: output-influxdb_v3 -tags: [InfluxDB v3.x, "output-plugins", "configuration", "datastore"] -introduced: "v1.38.0" -os_support: "freebsd, linux, macos, solaris, windows" -related: - - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/influxdb_v3/README.md, InfluxDB v3.x Plugin Source ---- - -# InfluxDB v3.x Output Plugin - -This plugin writes metrics to a [InfluxDB v3.x](https://docs.influxdata.com) Core or Enterprise -instance via the HTTP API. - -**Introduced in:** Telegraf v1.38.0 -**Tags:** datastore -**OS support:** all - -[influxdb_v3]: https://docs.influxdata.com - -## Global configuration options - -Plugins support additional global and plugin configuration settings for tasks -such as modifying metrics, tags, and fields, creating aliases, and configuring -plugin ordering. See [CONFIGURATION.md](/telegraf/v1/configuration/#plugins) for more details. - -[CONFIGURATION.md]: ../../../docs/CONFIGURATION.md#plugins - -## Secret-store support - -This plugin supports secrets from secret-stores for the `token` option. -See the [secret-store documentation](/telegraf/v1/configuration/#secret-store-secrets) for more details on how -to use them. - -[SECRETSTORE]: ../../../docs/CONFIGURATION.md#secret-store-secrets - -## Configuration - -```toml @sample.conf -# Configuration for sending metrics to InfluxDB 3.x Core and Enterprise -[[outputs.influxdb_v3]] - ## Multiple URLs can be specified but only ONE of them will be selected - ## randomly in each interval for writing. If endpoints are unavailable another - ## one will be used until all are exhausted or the write succeeds. - urls = ["http://127.0.0.1:8181"] - - ## Token for authentication - token = "" - - ## Destination database to write into - database = "" - - ## The value of this tag will be used to determine the database. If this - ## tag is not set the 'database' option is used as the default. - # database_tag = "" - - ## If true, the database tag will not be added to the metric - # exclude_database_tag = false - - ## Wait for WAL persistence to complete synchronization - ## Setting this to false reduces latency but increases the risk of data loss. - ## See https://docs.influxdata.com/influxdb3/enterprise/write-data/http-api/v3-write-lp/#use-no_sync-for-immediate-write-responses - # sync = true - - ## Enable or disable conversion of unsigned integer fields to signed integers - ## This is useful if existing data exist as signed integers e.g. from previous - ## versions of InfluxDB. - # convert_uint_to_int = false - - ## Omit the timestamp of the metrics when sending to allow InfluxDB to set the - ## timestamp of the data during ingestion. You likely want this to be false - ## to submit the metric timestamp - # omit_timestamp = false - - ## HTTP User-Agent - # user_agent = "telegraf" - - ## Content-Encoding for write request body, available values are "gzip", - ## "none" and "identity" - # content_encoding = "gzip" - - ## Amount of time allowed to complete the HTTP request - # timeout = "5s" - - ## HTTP connection settings - # idle_conn_timeout = "0s" - # max_idle_conn = 0 - # max_idle_conn_per_host = 0 - # response_timeout = "0s" - - ## Use the local address for connecting, assigned by the OS by default - # local_address = "" - - ## Optional proxy settings - # use_system_proxy = false - # http_proxy_url = "" - - ## Optional TLS settings - ## Set to true/false to enforce TLS being enabled/disabled. If not set, - ## enable TLS only if any of the other options are specified. - # tls_enable = - ## Trusted root certificates for server - # tls_ca = "/path/to/cafile" - ## Used for TLS client certificate authentication - # tls_cert = "/path/to/certfile" - ## Used for TLS client certificate authentication - # tls_key = "/path/to/keyfile" - ## Password for the key file if it is encrypted - # tls_key_pwd = "" - ## Send the specified TLS server name via SNI - # tls_server_name = "kubernetes.example.com" - ## Minimal TLS version to accept by the client - # tls_min_version = "TLS12" - ## List of ciphers to accept, by default all secure ciphers will be accepted - ## See https://pkg.go.dev/crypto/tls#pkg-constants for supported values. - ## Use "all", "secure" and "insecure" to add all support ciphers, secure - ## suites or insecure suites respectively. - # tls_cipher_suites = ["secure"] - ## Renegotiation method, "never", "once" or "freely" - # tls_renegotiation_method = "never" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false - - ## OAuth2 Client Credentials. The options 'client_id', 'client_secret', and 'token_url' are required to use OAuth2. - # client_id = "clientid" - # client_secret = "secret" - # token_url = "https://indentityprovider/oauth2/v1/token" - # audience = "" - # scopes = ["urn:opc:idm:__myscopes__"] - - ## Optional Cookie authentication - # cookie_auth_url = "https://localhost/authMe" - # cookie_auth_method = "POST" - # cookie_auth_username = "username" - # cookie_auth_password = "pa$$word" - # cookie_auth_headers = { Content-Type = "application/json", X-MY-HEADER = "hello" } - # cookie_auth_body = '{"username": "user", "password": "pa$$word", "authenticate": "me"}' - ## cookie_auth_renewal not set or set to "0" will auth once and never renew the cookie - # cookie_auth_renewal = "0s" -``` diff --git a/content/telegraf/v1/output-plugins/inlong/_index.md b/content/telegraf/v1/output-plugins/inlong/_index.md index 1568c504b6..139eb91305 100644 --- a/content/telegraf/v1/output-plugins/inlong/_index.md +++ b/content/telegraf/v1/output-plugins/inlong/_index.md @@ -10,7 +10,7 @@ introduced: "v1.35.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/inlong/README.md, Inlong Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/inlong/README.md, Inlong Plugin Source --- # Inlong Output Plugin diff --git a/content/telegraf/v1/output-plugins/instrumental/_index.md b/content/telegraf/v1/output-plugins/instrumental/_index.md index ebc6fdb2a8..285ab02c9b 100644 --- a/content/telegraf/v1/output-plugins/instrumental/_index.md +++ b/content/telegraf/v1/output-plugins/instrumental/_index.md @@ -10,7 +10,7 @@ introduced: "v0.13.1" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/instrumental/README.md, Instrumental Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/instrumental/README.md, Instrumental Plugin Source --- # Instrumental Output Plugin diff --git a/content/telegraf/v1/output-plugins/iotdb/_index.md b/content/telegraf/v1/output-plugins/iotdb/_index.md index 45776967eb..e617d99b83 100644 --- a/content/telegraf/v1/output-plugins/iotdb/_index.md +++ b/content/telegraf/v1/output-plugins/iotdb/_index.md @@ -10,7 +10,7 @@ introduced: "v1.24.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/iotdb/README.md, Apache IoTDB Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/iotdb/README.md, Apache IoTDB Plugin Source --- # Apache IoTDB Output Plugin diff --git a/content/telegraf/v1/output-plugins/kafka/_index.md b/content/telegraf/v1/output-plugins/kafka/_index.md index d04a678479..88c5a79740 100644 --- a/content/telegraf/v1/output-plugins/kafka/_index.md +++ b/content/telegraf/v1/output-plugins/kafka/_index.md @@ -10,7 +10,7 @@ introduced: "v0.1.7" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/kafka/README.md, Kafka Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/kafka/README.md, Kafka Plugin Source --- # Kafka Output Plugin diff --git a/content/telegraf/v1/output-plugins/kinesis/_index.md b/content/telegraf/v1/output-plugins/kinesis/_index.md index 53526180e8..43f0c87c27 100644 --- a/content/telegraf/v1/output-plugins/kinesis/_index.md +++ b/content/telegraf/v1/output-plugins/kinesis/_index.md @@ -10,7 +10,7 @@ introduced: "v0.2.5" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/kinesis/README.md, Amazon Kinesis Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/kinesis/README.md, Amazon Kinesis Plugin Source --- # Amazon Kinesis Output Plugin diff --git a/content/telegraf/v1/output-plugins/librato/_index.md b/content/telegraf/v1/output-plugins/librato/_index.md index a77ad163bc..5743b3848b 100644 --- a/content/telegraf/v1/output-plugins/librato/_index.md +++ b/content/telegraf/v1/output-plugins/librato/_index.md @@ -10,7 +10,7 @@ introduced: "v0.2.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/librato/README.md, Librato Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/librato/README.md, Librato Plugin Source --- # Librato Output Plugin diff --git a/content/telegraf/v1/output-plugins/logzio/_index.md b/content/telegraf/v1/output-plugins/logzio/_index.md index 9f7e4044c5..66bcc79f89 100644 --- a/content/telegraf/v1/output-plugins/logzio/_index.md +++ b/content/telegraf/v1/output-plugins/logzio/_index.md @@ -10,7 +10,7 @@ introduced: "v1.17.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/logzio/README.md, Logz.io Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/logzio/README.md, Logz.io Plugin Source --- # Logz.io Output Plugin diff --git a/content/telegraf/v1/output-plugins/loki/_index.md b/content/telegraf/v1/output-plugins/loki/_index.md index 2d1b16ddb8..fa99386f72 100644 --- a/content/telegraf/v1/output-plugins/loki/_index.md +++ b/content/telegraf/v1/output-plugins/loki/_index.md @@ -10,7 +10,7 @@ introduced: "v1.18.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/loki/README.md, Grafana Loki Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/loki/README.md, Grafana Loki Plugin Source --- # Grafana Loki Output Plugin diff --git a/content/telegraf/v1/output-plugins/microsoft_fabric/_index.md b/content/telegraf/v1/output-plugins/microsoft_fabric/_index.md index 54a32c47c1..4607efbda4 100644 --- a/content/telegraf/v1/output-plugins/microsoft_fabric/_index.md +++ b/content/telegraf/v1/output-plugins/microsoft_fabric/_index.md @@ -10,7 +10,7 @@ introduced: "v1.35.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/microsoft_fabric/README.md, Microsoft Fabric Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/microsoft_fabric/README.md, Microsoft Fabric Plugin Source --- # Microsoft Fabric Output Plugin diff --git a/content/telegraf/v1/output-plugins/mongodb/_index.md b/content/telegraf/v1/output-plugins/mongodb/_index.md index 58f485a763..412701a0bb 100644 --- a/content/telegraf/v1/output-plugins/mongodb/_index.md +++ b/content/telegraf/v1/output-plugins/mongodb/_index.md @@ -10,7 +10,7 @@ introduced: "v1.21.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/mongodb/README.md, MongoDB Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/mongodb/README.md, MongoDB Plugin Source --- # MongoDB Output Plugin @@ -49,47 +49,43 @@ to use them. ```toml @sample.conf # A plugin that can transmit logs to mongodb [[outputs.mongodb]] - ## Connection string - ## example: dsn = "mongodb://mongod1:27017,mongod2:27017,mongod3:27017/admin&replicaSet=myReplSet&w=1" + # connection string examples for mongodb dsn = "mongodb://localhost:27017" + # dsn = "mongodb://mongod1:27017,mongod2:27017,mongod3:27017/admin&replicaSet=myReplSet&w=1" - ## Overrides serverSelectionTimeoutMS in DSN if set + # overrides serverSelectionTimeoutMS in dsn if set # timeout = "30s" - ## Authentication method, available options are NONE, PLAIN, SCRAM, X509 + # default authentication, optional # authentication = "NONE" - # ## for SCRAM-SHA-256 authentication - # # authentication = "SCRAM" - # # username = "root" - # # password = "***" - - # ## for PLAIN authentication (e.g., LDAP) - # ## IMPORTANT: PLAIN authentication sends credentials in plaintext during the - # ## authentication handshake. Always use TLS to encrypt credentials in transit. - # # authentication = "PLAIN" - # # username = "myuser" - # # password = "***" - - # ## X509 based certificate authentication - # # authentication = "X509" - # # tls_ca = "ca.pem" - # # tls_key = "client.pem" - # # # tls_key_pwd = "changeme" # required for encrypted tls_key - # # insecure_skip_verify = false - - ## Database to store measurements and time series collections + # for SCRAM-SHA-256 authentication + # authentication = "SCRAM" + # username = "root" + # password = "***" + + ## for PLAIN authentication (e.g., LDAP) + ## IMPORTANT: PLAIN authentication sends credentials in plaintext during the + ## authentication handshake. Always use TLS to encrypt credentials in transit. + # authentication = "PLAIN" + # username = "myuser" + # password = "***" + + # for x509 certificate authentication + # authentication = "X509" + # tls_ca = "ca.pem" + # tls_key = "client.pem" + # # tls_key_pwd = "changeme" # required for encrypted tls_key + # insecure_skip_verify = false + + # database to store measurements and time series collections # database = "telegraf" - ## Granularity can be seconds, minutes, or hours. - ## Configuring this value will be based on your input collection frequency - ## see https://docs.mongodb.com/manual/core/timeseries-collections/#create-a-time-series-collection + # granularity can be seconds, minutes, or hours. + # configuring this value will be based on your input collection frequency. + # see https://docs.mongodb.com/manual/core/timeseries-collections/#create-a-time-series-collection # granularity = "seconds" - ## TTL to automatically expire documents from the measurement collections. + # optionally set a TTL to automatically expire documents from the measurement collections. # ttl = "360h" - - ## If true, write multiple metrics for the same collection in a batched - ## fashion. Otherwise, write each metric individually. - # write_batch = false ``` diff --git a/content/telegraf/v1/output-plugins/mqtt/_index.md b/content/telegraf/v1/output-plugins/mqtt/_index.md index 66ec6f64e4..90a55b3a5c 100644 --- a/content/telegraf/v1/output-plugins/mqtt/_index.md +++ b/content/telegraf/v1/output-plugins/mqtt/_index.md @@ -10,7 +10,7 @@ introduced: "v0.2.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/mqtt/README.md, MQTT Producer Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/mqtt/README.md, MQTT Producer Plugin Source --- # MQTT Producer Output Plugin diff --git a/content/telegraf/v1/output-plugins/nats/_index.md b/content/telegraf/v1/output-plugins/nats/_index.md index 546b1f3c3e..a58f54476e 100644 --- a/content/telegraf/v1/output-plugins/nats/_index.md +++ b/content/telegraf/v1/output-plugins/nats/_index.md @@ -10,7 +10,7 @@ introduced: "v1.1.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/nats/README.md, NATS Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/nats/README.md, NATS Plugin Source --- # NATS Output Plugin diff --git a/content/telegraf/v1/output-plugins/nebius_cloud_monitoring/_index.md b/content/telegraf/v1/output-plugins/nebius_cloud_monitoring/_index.md index 290f93b835..8e55aad07a 100644 --- a/content/telegraf/v1/output-plugins/nebius_cloud_monitoring/_index.md +++ b/content/telegraf/v1/output-plugins/nebius_cloud_monitoring/_index.md @@ -10,7 +10,7 @@ introduced: "v1.27.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/nebius_cloud_monitoring/README.md, Nebius Cloud Monitoring Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/nebius_cloud_monitoring/README.md, Nebius Cloud Monitoring Plugin Source --- # Nebius Cloud Monitoring Output Plugin diff --git a/content/telegraf/v1/output-plugins/newrelic/_index.md b/content/telegraf/v1/output-plugins/newrelic/_index.md index 9b35ab3be5..f0df09e5d1 100644 --- a/content/telegraf/v1/output-plugins/newrelic/_index.md +++ b/content/telegraf/v1/output-plugins/newrelic/_index.md @@ -10,7 +10,7 @@ introduced: "v1.15.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/newrelic/README.md, New Relic Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/newrelic/README.md, New Relic Plugin Source --- # New Relic Output Plugin diff --git a/content/telegraf/v1/output-plugins/nsq/_index.md b/content/telegraf/v1/output-plugins/nsq/_index.md index 239fd7fa46..5dbb9bf6e8 100644 --- a/content/telegraf/v1/output-plugins/nsq/_index.md +++ b/content/telegraf/v1/output-plugins/nsq/_index.md @@ -10,7 +10,7 @@ introduced: "v0.2.1" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/nsq/README.md, NSQ Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/nsq/README.md, NSQ Plugin Source --- # NSQ Output Plugin diff --git a/content/telegraf/v1/output-plugins/opensearch/_index.md b/content/telegraf/v1/output-plugins/opensearch/_index.md index eb44158561..273d6c951e 100644 --- a/content/telegraf/v1/output-plugins/opensearch/_index.md +++ b/content/telegraf/v1/output-plugins/opensearch/_index.md @@ -10,7 +10,7 @@ introduced: "v1.29.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/opensearch/README.md, OpenSearch Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/opensearch/README.md, OpenSearch Plugin Source --- # OpenSearch Output Plugin diff --git a/content/telegraf/v1/output-plugins/opentelemetry/_index.md b/content/telegraf/v1/output-plugins/opentelemetry/_index.md index 2630571246..9828b7f89b 100644 --- a/content/telegraf/v1/output-plugins/opentelemetry/_index.md +++ b/content/telegraf/v1/output-plugins/opentelemetry/_index.md @@ -10,13 +10,13 @@ introduced: "v1.20.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/opentelemetry/README.md, OpenTelemetry Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/opentelemetry/README.md, OpenTelemetry Plugin Source --- # OpenTelemetry Output Plugin This plugin writes metrics to [OpenTelemetry](https://opentelemetry.io) servers and agents -via gRPC or HTTP. +via gRPC. **Introduced in:** Telegraf v1.20.0 **Tags:** logging, messaging @@ -38,12 +38,8 @@ plugin ordering. See [CONFIGURATION.md](/telegraf/v1/configuration/#plugins) for # Send OpenTelemetry metrics over gRPC [[outputs.opentelemetry]] ## Override the default (localhost:4317) OpenTelemetry gRPC service - ## When the protocol is grpc, address:port - ## When the protocol is http, http(s)://address:port/path + ## address:port # service_address = "localhost:4317" - ## Override the default (protobuf) encodingType when Protocol is http - ## protobuf, json - # encoding_type = "protobuf" ## Override the default (5s) request timeout # timeout = "5s" diff --git a/content/telegraf/v1/output-plugins/opentsdb/_index.md b/content/telegraf/v1/output-plugins/opentsdb/_index.md index c4c0803ff0..c6ca28884c 100644 --- a/content/telegraf/v1/output-plugins/opentsdb/_index.md +++ b/content/telegraf/v1/output-plugins/opentsdb/_index.md @@ -10,7 +10,7 @@ introduced: "v0.1.9" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/opentsdb/README.md, OpenTSDB Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/opentsdb/README.md, OpenTSDB Plugin Source --- # OpenTSDB Output Plugin diff --git a/content/telegraf/v1/output-plugins/parquet/_index.md b/content/telegraf/v1/output-plugins/parquet/_index.md index 361eb01aa1..54a93686e1 100644 --- a/content/telegraf/v1/output-plugins/parquet/_index.md +++ b/content/telegraf/v1/output-plugins/parquet/_index.md @@ -10,7 +10,7 @@ introduced: "v1.32.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/parquet/README.md, Parquet Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/parquet/README.md, Parquet Plugin Source --- # Parquet Output Plugin diff --git a/content/telegraf/v1/output-plugins/postgresql/_index.md b/content/telegraf/v1/output-plugins/postgresql/_index.md index cf227560de..e3d0fb8dc8 100644 --- a/content/telegraf/v1/output-plugins/postgresql/_index.md +++ b/content/telegraf/v1/output-plugins/postgresql/_index.md @@ -10,7 +10,7 @@ introduced: "v1.24.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/postgresql/README.md, PostgreSQL Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/postgresql/README.md, PostgreSQL Plugin Source --- # PostgreSQL Output Plugin diff --git a/content/telegraf/v1/output-plugins/prometheus_client/_index.md b/content/telegraf/v1/output-plugins/prometheus_client/_index.md index f1c6bcdeec..4538073861 100644 --- a/content/telegraf/v1/output-plugins/prometheus_client/_index.md +++ b/content/telegraf/v1/output-plugins/prometheus_client/_index.md @@ -10,7 +10,7 @@ introduced: "v0.2.1" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/prometheus_client/README.md, Prometheus Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/prometheus_client/README.md, Prometheus Plugin Source --- # Prometheus Output Plugin @@ -84,12 +84,6 @@ to use them. ## Unless set to false all string metrics will be sent as labels. # string_as_label = true - ## Control how metric names and label names are sanitized. - ## The default "legacy" keeps ASCII-only Prometheus name rules. - ## Set to "utf8" to allow UTF-8 metric and label names. - ## Valid options: "legacy", "utf8" - # name_sanitization = "legacy" - ## If set, enable TLS with the given certificate. # tls_cert = "/etc/ssl/telegraf.crt" # tls_key = "/etc/ssl/telegraf.key" diff --git a/content/telegraf/v1/output-plugins/quix/_index.md b/content/telegraf/v1/output-plugins/quix/_index.md index 15bfdde24d..06b3c9db0a 100644 --- a/content/telegraf/v1/output-plugins/quix/_index.md +++ b/content/telegraf/v1/output-plugins/quix/_index.md @@ -10,7 +10,7 @@ introduced: "v1.33.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/quix/README.md, Quix Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/quix/README.md, Quix Plugin Source --- # Quix Output Plugin diff --git a/content/telegraf/v1/output-plugins/redistimeseries/_index.md b/content/telegraf/v1/output-plugins/redistimeseries/_index.md index 66b7b1a309..b53fdcd48f 100644 --- a/content/telegraf/v1/output-plugins/redistimeseries/_index.md +++ b/content/telegraf/v1/output-plugins/redistimeseries/_index.md @@ -10,7 +10,7 @@ introduced: "v1.0.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/redistimeseries/README.md, Redis Time Series Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/redistimeseries/README.md, Redis Time Series Plugin Source --- # Redis Time Series Output Plugin @@ -56,13 +56,6 @@ to use them. ## Timeout for operations such as ping or sending metrics # timeout = "10s" - ## Set a time-to-live (TTL) on each Redis key - ## If set, Redis will expire the key after the specified duration - ## The TTL is refreshed on every write, so the key only expires - ## if no new data arrives within the configured period - ## Disabled by default (no expiry) - # expire = "" - ## Enable attempt to convert string fields to numeric values ## If "false" or in case the string value cannot be converted the string ## field will be dropped. diff --git a/content/telegraf/v1/output-plugins/remotefile/_index.md b/content/telegraf/v1/output-plugins/remotefile/_index.md index 00c444a6e8..6474bc1dd3 100644 --- a/content/telegraf/v1/output-plugins/remotefile/_index.md +++ b/content/telegraf/v1/output-plugins/remotefile/_index.md @@ -10,7 +10,7 @@ introduced: "v1.32.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/remotefile/README.md, Remote File Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/remotefile/README.md, Remote File Plugin Source --- # Remote File Output Plugin diff --git a/content/telegraf/v1/output-plugins/riemann/_index.md b/content/telegraf/v1/output-plugins/riemann/_index.md index 2a3a3a7140..2d1d3e6a43 100644 --- a/content/telegraf/v1/output-plugins/riemann/_index.md +++ b/content/telegraf/v1/output-plugins/riemann/_index.md @@ -10,7 +10,7 @@ introduced: "v1.3.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/riemann/README.md, Riemann Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/riemann/README.md, Riemann Plugin Source --- # Riemann Output Plugin diff --git a/content/telegraf/v1/output-plugins/sensu/_index.md b/content/telegraf/v1/output-plugins/sensu/_index.md index 8b81682398..28c9d31bc3 100644 --- a/content/telegraf/v1/output-plugins/sensu/_index.md +++ b/content/telegraf/v1/output-plugins/sensu/_index.md @@ -10,7 +10,7 @@ introduced: "v1.18.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/sensu/README.md, Sensu Go Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/sensu/README.md, Sensu Go Plugin Source --- # Sensu Go Output Plugin diff --git a/content/telegraf/v1/output-plugins/signalfx/_index.md b/content/telegraf/v1/output-plugins/signalfx/_index.md index 4b581611c6..6bc09f840b 100644 --- a/content/telegraf/v1/output-plugins/signalfx/_index.md +++ b/content/telegraf/v1/output-plugins/signalfx/_index.md @@ -10,7 +10,7 @@ introduced: "v1.18.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/signalfx/README.md, SignalFx Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/signalfx/README.md, SignalFx Plugin Source --- # SignalFx Output Plugin diff --git a/content/telegraf/v1/output-plugins/socket_writer/_index.md b/content/telegraf/v1/output-plugins/socket_writer/_index.md index ba29532a9b..3171264755 100644 --- a/content/telegraf/v1/output-plugins/socket_writer/_index.md +++ b/content/telegraf/v1/output-plugins/socket_writer/_index.md @@ -10,7 +10,7 @@ introduced: "v1.3.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/socket_writer/README.md, Socket Writer Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/socket_writer/README.md, Socket Writer Plugin Source --- # Socket Writer Output Plugin diff --git a/content/telegraf/v1/output-plugins/sql/_index.md b/content/telegraf/v1/output-plugins/sql/_index.md index bd7851586b..5359dbf733 100644 --- a/content/telegraf/v1/output-plugins/sql/_index.md +++ b/content/telegraf/v1/output-plugins/sql/_index.md @@ -10,7 +10,7 @@ introduced: "v1.19.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/sql/README.md, SQL Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/sql/README.md, SQL Plugin Source --- # SQL Output Plugin diff --git a/content/telegraf/v1/output-plugins/stackdriver/_index.md b/content/telegraf/v1/output-plugins/stackdriver/_index.md index c3e4c7d5d3..9945f4a364 100644 --- a/content/telegraf/v1/output-plugins/stackdriver/_index.md +++ b/content/telegraf/v1/output-plugins/stackdriver/_index.md @@ -10,7 +10,7 @@ introduced: "v1.9.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/stackdriver/README.md, Google Cloud Monitoring Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/stackdriver/README.md, Google Cloud Monitoring Plugin Source --- # Google Cloud Monitoring Output Plugin @@ -42,14 +42,6 @@ plugin ordering. See [CONFIGURATION.md](/telegraf/v1/configuration/#plugins) for [CONFIGURATION.md]: ../../../docs/CONFIGURATION.md#plugins -## Secret-store support - -This plugin supports secrets from secret-stores for the `token` option. -See the [secret-store documentation](/telegraf/v1/configuration/#secret-store-secrets) for more details on how -to use them. - -[SECRETSTORE]: ../../../docs/CONFIGURATION.md#secret-store-secrets - ## Configuration ```toml @sample.conf @@ -58,9 +50,6 @@ to use them. ## GCP Project project = "erudite-bloom-151019" - ## GCP access token for authorizing calls to Cloud Monitoring APIs - # token = "@{gcp_auth:token}" - ## Quota Project ## Specifies the Google Cloud project that should be billed for metric ingestion. ## If omitted, the quota is charged to the service account’s default project. diff --git a/content/telegraf/v1/output-plugins/stomp/_index.md b/content/telegraf/v1/output-plugins/stomp/_index.md index 6a7bcdd35f..9f1b5bffa8 100644 --- a/content/telegraf/v1/output-plugins/stomp/_index.md +++ b/content/telegraf/v1/output-plugins/stomp/_index.md @@ -10,7 +10,7 @@ introduced: "v1.24.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/stomp/README.md, ActiveMQ STOMP Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/stomp/README.md, ActiveMQ STOMP Plugin Source --- # ActiveMQ STOMP Output Plugin diff --git a/content/telegraf/v1/output-plugins/sumologic/_index.md b/content/telegraf/v1/output-plugins/sumologic/_index.md index 83b4434e4e..9535aa1d7b 100644 --- a/content/telegraf/v1/output-plugins/sumologic/_index.md +++ b/content/telegraf/v1/output-plugins/sumologic/_index.md @@ -10,7 +10,7 @@ introduced: "v1.16.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/sumologic/README.md, Sumo Logic Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/sumologic/README.md, Sumo Logic Plugin Source --- # Sumo Logic Output Plugin diff --git a/content/telegraf/v1/output-plugins/syslog/_index.md b/content/telegraf/v1/output-plugins/syslog/_index.md index 7d748fcb49..d67879247d 100644 --- a/content/telegraf/v1/output-plugins/syslog/_index.md +++ b/content/telegraf/v1/output-plugins/syslog/_index.md @@ -10,7 +10,7 @@ introduced: "v1.11.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/syslog/README.md, Syslog Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/syslog/README.md, Syslog Plugin Source --- # Syslog Output Plugin diff --git a/content/telegraf/v1/output-plugins/timestream/_index.md b/content/telegraf/v1/output-plugins/timestream/_index.md index 845caaa8c9..76a7391539 100644 --- a/content/telegraf/v1/output-plugins/timestream/_index.md +++ b/content/telegraf/v1/output-plugins/timestream/_index.md @@ -10,7 +10,7 @@ introduced: "v1.16.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/timestream/README.md, Amazon Timestream Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/timestream/README.md, Amazon Timestream Plugin Source --- # Amazon Timestream Output Plugin diff --git a/content/telegraf/v1/output-plugins/warp10/_index.md b/content/telegraf/v1/output-plugins/warp10/_index.md index 11eafbe5e4..9abc0635ca 100644 --- a/content/telegraf/v1/output-plugins/warp10/_index.md +++ b/content/telegraf/v1/output-plugins/warp10/_index.md @@ -10,7 +10,7 @@ introduced: "v1.14.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/warp10/README.md, Warp10 Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/warp10/README.md, Warp10 Plugin Source --- # Warp10 Output Plugin diff --git a/content/telegraf/v1/output-plugins/wavefront/_index.md b/content/telegraf/v1/output-plugins/wavefront/_index.md index bb1632b058..5354bbc075 100644 --- a/content/telegraf/v1/output-plugins/wavefront/_index.md +++ b/content/telegraf/v1/output-plugins/wavefront/_index.md @@ -10,7 +10,7 @@ introduced: "v1.5.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/wavefront/README.md, Wavefront Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/wavefront/README.md, Wavefront Plugin Source --- # Wavefront Output Plugin diff --git a/content/telegraf/v1/output-plugins/websocket/_index.md b/content/telegraf/v1/output-plugins/websocket/_index.md index f39c5a93aa..2cf7d56e7a 100644 --- a/content/telegraf/v1/output-plugins/websocket/_index.md +++ b/content/telegraf/v1/output-plugins/websocket/_index.md @@ -10,7 +10,7 @@ introduced: "v1.19.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/websocket/README.md, Websocket Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/websocket/README.md, Websocket Plugin Source --- # Websocket Output Plugin diff --git a/content/telegraf/v1/output-plugins/yandex_cloud_monitoring/_index.md b/content/telegraf/v1/output-plugins/yandex_cloud_monitoring/_index.md index 26bf629ed2..32e86d1d9e 100644 --- a/content/telegraf/v1/output-plugins/yandex_cloud_monitoring/_index.md +++ b/content/telegraf/v1/output-plugins/yandex_cloud_monitoring/_index.md @@ -10,7 +10,7 @@ introduced: "v1.17.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/yandex_cloud_monitoring/README.md, Yandex Cloud Monitoring Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/yandex_cloud_monitoring/README.md, Yandex Cloud Monitoring Plugin Source --- # Yandex Cloud Monitoring Output Plugin diff --git a/content/telegraf/v1/output-plugins/zabbix/_index.md b/content/telegraf/v1/output-plugins/zabbix/_index.md index 5bc8888fef..b1cbac2421 100644 --- a/content/telegraf/v1/output-plugins/zabbix/_index.md +++ b/content/telegraf/v1/output-plugins/zabbix/_index.md @@ -10,7 +10,7 @@ introduced: "v1.30.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/outputs/zabbix/README.md, Zabbix Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/outputs/zabbix/README.md, Zabbix Plugin Source --- # Zabbix Output Plugin diff --git a/content/telegraf/v1/processor-plugins/aws_ec2/_index.md b/content/telegraf/v1/processor-plugins/aws_ec2/_index.md index ca824176e4..002487e2a7 100644 --- a/content/telegraf/v1/processor-plugins/aws_ec2/_index.md +++ b/content/telegraf/v1/processor-plugins/aws_ec2/_index.md @@ -10,7 +10,7 @@ introduced: "v1.18.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/aws_ec2/README.md, AWS EC2 Metadata Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/aws_ec2/README.md, AWS EC2 Metadata Plugin Source --- # AWS EC2 Metadata Processor Plugin diff --git a/content/telegraf/v1/processor-plugins/batch/_index.md b/content/telegraf/v1/processor-plugins/batch/_index.md index 3f8d0392ff..73ff3f7880 100644 --- a/content/telegraf/v1/processor-plugins/batch/_index.md +++ b/content/telegraf/v1/processor-plugins/batch/_index.md @@ -10,7 +10,7 @@ introduced: "v1.33.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/batch/README.md, Batch Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/batch/README.md, Batch Plugin Source --- # Batch Processor Plugin diff --git a/content/telegraf/v1/processor-plugins/clone/_index.md b/content/telegraf/v1/processor-plugins/clone/_index.md index a3f1bed7bc..32519a7e27 100644 --- a/content/telegraf/v1/processor-plugins/clone/_index.md +++ b/content/telegraf/v1/processor-plugins/clone/_index.md @@ -10,7 +10,7 @@ introduced: "v1.13.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/clone/README.md, Clone Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/clone/README.md, Clone Plugin Source --- # Clone Processor Plugin diff --git a/content/telegraf/v1/processor-plugins/converter/_index.md b/content/telegraf/v1/processor-plugins/converter/_index.md index cd5dc402ac..70a2f831de 100644 --- a/content/telegraf/v1/processor-plugins/converter/_index.md +++ b/content/telegraf/v1/processor-plugins/converter/_index.md @@ -10,7 +10,7 @@ introduced: "v1.7.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/converter/README.md, Converter Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/converter/README.md, Converter Plugin Source --- # Converter Processor Plugin diff --git a/content/telegraf/v1/processor-plugins/cumulative_sum/_index.md b/content/telegraf/v1/processor-plugins/cumulative_sum/_index.md index 09807da975..b00022072c 100644 --- a/content/telegraf/v1/processor-plugins/cumulative_sum/_index.md +++ b/content/telegraf/v1/processor-plugins/cumulative_sum/_index.md @@ -10,7 +10,7 @@ introduced: "v1.35.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/cumulative_sum/README.md, Cumulative Sum Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/cumulative_sum/README.md, Cumulative Sum Plugin Source --- # Cumulative Sum Processor Plugin diff --git a/content/telegraf/v1/processor-plugins/date/_index.md b/content/telegraf/v1/processor-plugins/date/_index.md index 2ce955e9ec..6aae7d255c 100644 --- a/content/telegraf/v1/processor-plugins/date/_index.md +++ b/content/telegraf/v1/processor-plugins/date/_index.md @@ -10,7 +10,7 @@ introduced: "v1.12.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/date/README.md, Date Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/date/README.md, Date Plugin Source --- # Date Processor Plugin diff --git a/content/telegraf/v1/processor-plugins/dedup/_index.md b/content/telegraf/v1/processor-plugins/dedup/_index.md index 9691aba84b..93de9b7112 100644 --- a/content/telegraf/v1/processor-plugins/dedup/_index.md +++ b/content/telegraf/v1/processor-plugins/dedup/_index.md @@ -10,7 +10,7 @@ introduced: "v1.14.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/dedup/README.md, Dedup Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/dedup/README.md, Dedup Plugin Source --- # Dedup Processor Plugin diff --git a/content/telegraf/v1/processor-plugins/defaults/_index.md b/content/telegraf/v1/processor-plugins/defaults/_index.md index d4ebb1c82d..1547879b2d 100644 --- a/content/telegraf/v1/processor-plugins/defaults/_index.md +++ b/content/telegraf/v1/processor-plugins/defaults/_index.md @@ -10,7 +10,7 @@ introduced: "v1.15.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/defaults/README.md, Defaults Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/defaults/README.md, Defaults Plugin Source --- # Defaults Processor Plugin diff --git a/content/telegraf/v1/processor-plugins/enum/_index.md b/content/telegraf/v1/processor-plugins/enum/_index.md index d180f7105b..1bdababffe 100644 --- a/content/telegraf/v1/processor-plugins/enum/_index.md +++ b/content/telegraf/v1/processor-plugins/enum/_index.md @@ -10,7 +10,7 @@ introduced: "v1.8.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/enum/README.md, Enum Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/enum/README.md, Enum Plugin Source --- # Enum Processor Plugin diff --git a/content/telegraf/v1/processor-plugins/execd/_index.md b/content/telegraf/v1/processor-plugins/execd/_index.md index 2270dbb3fb..20eadfdfc8 100644 --- a/content/telegraf/v1/processor-plugins/execd/_index.md +++ b/content/telegraf/v1/processor-plugins/execd/_index.md @@ -10,7 +10,7 @@ introduced: "v1.15.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/execd/README.md, Execd Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/execd/README.md, Execd Plugin Source --- # Execd Processor Plugin diff --git a/content/telegraf/v1/processor-plugins/filepath/_index.md b/content/telegraf/v1/processor-plugins/filepath/_index.md index f31d1ff255..f210ae6616 100644 --- a/content/telegraf/v1/processor-plugins/filepath/_index.md +++ b/content/telegraf/v1/processor-plugins/filepath/_index.md @@ -10,7 +10,7 @@ introduced: "v1.15.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/filepath/README.md, Filepath Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/filepath/README.md, Filepath Plugin Source --- # Filepath Processor Plugin diff --git a/content/telegraf/v1/processor-plugins/filter/_index.md b/content/telegraf/v1/processor-plugins/filter/_index.md index fb8dbfdbd8..f49fca7cef 100644 --- a/content/telegraf/v1/processor-plugins/filter/_index.md +++ b/content/telegraf/v1/processor-plugins/filter/_index.md @@ -10,7 +10,7 @@ introduced: "v1.29.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/filter/README.md, Filter Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/filter/README.md, Filter Plugin Source --- # Filter Processor Plugin diff --git a/content/telegraf/v1/processor-plugins/ifname/_index.md b/content/telegraf/v1/processor-plugins/ifname/_index.md index feb994de5c..09ae567200 100644 --- a/content/telegraf/v1/processor-plugins/ifname/_index.md +++ b/content/telegraf/v1/processor-plugins/ifname/_index.md @@ -10,7 +10,7 @@ introduced: "v1.15.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/ifname/README.md, Network Interface Name Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/ifname/README.md, Network Interface Name Plugin Source --- # Network Interface Name Processor Plugin diff --git a/content/telegraf/v1/processor-plugins/lookup/_index.md b/content/telegraf/v1/processor-plugins/lookup/_index.md index 271ab8484b..61615c0849 100644 --- a/content/telegraf/v1/processor-plugins/lookup/_index.md +++ b/content/telegraf/v1/processor-plugins/lookup/_index.md @@ -10,7 +10,7 @@ introduced: "v1.15.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/lookup/README.md, Lookup Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/lookup/README.md, Lookup Plugin Source --- # Lookup Processor Plugin diff --git a/content/telegraf/v1/processor-plugins/noise/_index.md b/content/telegraf/v1/processor-plugins/noise/_index.md index 6a451438fc..327940fda1 100644 --- a/content/telegraf/v1/processor-plugins/noise/_index.md +++ b/content/telegraf/v1/processor-plugins/noise/_index.md @@ -10,7 +10,7 @@ introduced: "v1.22.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/noise/README.md, Noise Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/noise/README.md, Noise Plugin Source --- # Noise Processor Plugin diff --git a/content/telegraf/v1/processor-plugins/override/_index.md b/content/telegraf/v1/processor-plugins/override/_index.md index 42ee0f52d6..8dd1048656 100644 --- a/content/telegraf/v1/processor-plugins/override/_index.md +++ b/content/telegraf/v1/processor-plugins/override/_index.md @@ -10,7 +10,7 @@ introduced: "v1.6.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/override/README.md, Override Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/override/README.md, Override Plugin Source --- # Override Processor Plugin diff --git a/content/telegraf/v1/processor-plugins/parser/_index.md b/content/telegraf/v1/processor-plugins/parser/_index.md index 5dca40d7d9..f4afb64ee6 100644 --- a/content/telegraf/v1/processor-plugins/parser/_index.md +++ b/content/telegraf/v1/processor-plugins/parser/_index.md @@ -10,7 +10,7 @@ introduced: "v1.8.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/parser/README.md, Parser Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/parser/README.md, Parser Plugin Source --- # Parser Processor Plugin diff --git a/content/telegraf/v1/processor-plugins/pivot/_index.md b/content/telegraf/v1/processor-plugins/pivot/_index.md index 6e6e4f65f3..8e4855d9e2 100644 --- a/content/telegraf/v1/processor-plugins/pivot/_index.md +++ b/content/telegraf/v1/processor-plugins/pivot/_index.md @@ -10,7 +10,7 @@ introduced: "v1.12.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/pivot/README.md, Pivot Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/pivot/README.md, Pivot Plugin Source --- # Pivot Processor Plugin diff --git a/content/telegraf/v1/processor-plugins/port_name/_index.md b/content/telegraf/v1/processor-plugins/port_name/_index.md index 3e6244797d..f74cf96774 100644 --- a/content/telegraf/v1/processor-plugins/port_name/_index.md +++ b/content/telegraf/v1/processor-plugins/port_name/_index.md @@ -10,7 +10,7 @@ introduced: "v1.15.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/port_name/README.md, Port Name Lookup Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/port_name/README.md, Port Name Lookup Plugin Source --- # Port Name Lookup Processor Plugin diff --git a/content/telegraf/v1/processor-plugins/printer/_index.md b/content/telegraf/v1/processor-plugins/printer/_index.md index 5cda8275f9..f42278c06c 100644 --- a/content/telegraf/v1/processor-plugins/printer/_index.md +++ b/content/telegraf/v1/processor-plugins/printer/_index.md @@ -10,7 +10,7 @@ introduced: "v1.1.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/printer/README.md, Printer Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/printer/README.md, Printer Plugin Source --- # Printer Processor Plugin diff --git a/content/telegraf/v1/processor-plugins/regex/_index.md b/content/telegraf/v1/processor-plugins/regex/_index.md index f2d13c045f..ad91a57035 100644 --- a/content/telegraf/v1/processor-plugins/regex/_index.md +++ b/content/telegraf/v1/processor-plugins/regex/_index.md @@ -10,7 +10,7 @@ introduced: "v1.7.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/regex/README.md, Regex Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/regex/README.md, Regex Plugin Source --- # Regex Processor Plugin diff --git a/content/telegraf/v1/processor-plugins/rename/_index.md b/content/telegraf/v1/processor-plugins/rename/_index.md index b0becb81b9..b479855cbc 100644 --- a/content/telegraf/v1/processor-plugins/rename/_index.md +++ b/content/telegraf/v1/processor-plugins/rename/_index.md @@ -10,7 +10,7 @@ introduced: "v1.8.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/rename/README.md, Rename Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/rename/README.md, Rename Plugin Source --- # Rename Processor Plugin diff --git a/content/telegraf/v1/processor-plugins/reverse_dns/_index.md b/content/telegraf/v1/processor-plugins/reverse_dns/_index.md index 6130a66d27..0c10608c4f 100644 --- a/content/telegraf/v1/processor-plugins/reverse_dns/_index.md +++ b/content/telegraf/v1/processor-plugins/reverse_dns/_index.md @@ -10,7 +10,7 @@ introduced: "v1.15.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/reverse_dns/README.md, Reverse DNS Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/reverse_dns/README.md, Reverse DNS Plugin Source --- # Reverse DNS Processor Plugin diff --git a/content/telegraf/v1/processor-plugins/round/_index.md b/content/telegraf/v1/processor-plugins/round/_index.md index 24a97f4b88..1f8ef22874 100644 --- a/content/telegraf/v1/processor-plugins/round/_index.md +++ b/content/telegraf/v1/processor-plugins/round/_index.md @@ -10,7 +10,7 @@ introduced: "v1.36.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/round/README.md, Round Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/round/README.md, Round Plugin Source --- # Round Processor Plugin diff --git a/content/telegraf/v1/processor-plugins/s2geo/_index.md b/content/telegraf/v1/processor-plugins/s2geo/_index.md index 7b4d50bd69..0acfdb7f80 100644 --- a/content/telegraf/v1/processor-plugins/s2geo/_index.md +++ b/content/telegraf/v1/processor-plugins/s2geo/_index.md @@ -10,7 +10,7 @@ introduced: "v1.14.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/s2geo/README.md, S2 Geo Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/s2geo/README.md, S2 Geo Plugin Source --- # S2 Geo Processor Plugin diff --git a/content/telegraf/v1/processor-plugins/scale/_index.md b/content/telegraf/v1/processor-plugins/scale/_index.md index 8b47950c42..18fb212f50 100644 --- a/content/telegraf/v1/processor-plugins/scale/_index.md +++ b/content/telegraf/v1/processor-plugins/scale/_index.md @@ -10,7 +10,7 @@ introduced: "v1.27.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/scale/README.md, Scale Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/scale/README.md, Scale Plugin Source --- # Scale Processor Plugin diff --git a/content/telegraf/v1/processor-plugins/snmp_lookup/_index.md b/content/telegraf/v1/processor-plugins/snmp_lookup/_index.md index b32a1e3acf..8f67100fbc 100644 --- a/content/telegraf/v1/processor-plugins/snmp_lookup/_index.md +++ b/content/telegraf/v1/processor-plugins/snmp_lookup/_index.md @@ -10,7 +10,7 @@ introduced: "v1.30.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/snmp_lookup/README.md, SNMP Lookup Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/snmp_lookup/README.md, SNMP Lookup Plugin Source --- # SNMP Lookup Processor Plugin diff --git a/content/telegraf/v1/processor-plugins/split/_index.md b/content/telegraf/v1/processor-plugins/split/_index.md index 2407f23a97..7cde5f9677 100644 --- a/content/telegraf/v1/processor-plugins/split/_index.md +++ b/content/telegraf/v1/processor-plugins/split/_index.md @@ -10,7 +10,7 @@ introduced: "v1.28.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/split/README.md, Split Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/split/README.md, Split Plugin Source --- # Split Processor Plugin diff --git a/content/telegraf/v1/processor-plugins/starlark/_index.md b/content/telegraf/v1/processor-plugins/starlark/_index.md index 3647123ca0..6bfade8f8c 100644 --- a/content/telegraf/v1/processor-plugins/starlark/_index.md +++ b/content/telegraf/v1/processor-plugins/starlark/_index.md @@ -10,7 +10,7 @@ introduced: "v1.15.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/starlark/README.md, Starlark Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/starlark/README.md, Starlark Plugin Source --- # Starlark Processor Plugin diff --git a/content/telegraf/v1/processor-plugins/strings/_index.md b/content/telegraf/v1/processor-plugins/strings/_index.md index b5b59e9c87..45ce80ad7e 100644 --- a/content/telegraf/v1/processor-plugins/strings/_index.md +++ b/content/telegraf/v1/processor-plugins/strings/_index.md @@ -10,7 +10,7 @@ introduced: "v1.8.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/strings/README.md, Strings Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/strings/README.md, Strings Plugin Source --- # Strings Processor Plugin diff --git a/content/telegraf/v1/processor-plugins/tag_limit/_index.md b/content/telegraf/v1/processor-plugins/tag_limit/_index.md index cb1773c466..1b5c76f2b9 100644 --- a/content/telegraf/v1/processor-plugins/tag_limit/_index.md +++ b/content/telegraf/v1/processor-plugins/tag_limit/_index.md @@ -10,7 +10,7 @@ introduced: "v1.12.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/tag_limit/README.md, Tag Limit Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/tag_limit/README.md, Tag Limit Plugin Source --- # Tag Limit Processor Plugin diff --git a/content/telegraf/v1/processor-plugins/template/_index.md b/content/telegraf/v1/processor-plugins/template/_index.md index 2fe864e7c9..27bc28d996 100644 --- a/content/telegraf/v1/processor-plugins/template/_index.md +++ b/content/telegraf/v1/processor-plugins/template/_index.md @@ -10,7 +10,7 @@ introduced: "v1.14.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/template/README.md, Template Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/template/README.md, Template Plugin Source --- # Template Processor Plugin diff --git a/content/telegraf/v1/processor-plugins/timestamp/_index.md b/content/telegraf/v1/processor-plugins/timestamp/_index.md index ef3a1b872f..e0fe607236 100644 --- a/content/telegraf/v1/processor-plugins/timestamp/_index.md +++ b/content/telegraf/v1/processor-plugins/timestamp/_index.md @@ -10,7 +10,7 @@ introduced: "v1.31.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/timestamp/README.md, Timestamp Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/timestamp/README.md, Timestamp Plugin Source --- # Timestamp Processor Plugin diff --git a/content/telegraf/v1/processor-plugins/topk/_index.md b/content/telegraf/v1/processor-plugins/topk/_index.md index 14a1d32907..8b2a003bfc 100644 --- a/content/telegraf/v1/processor-plugins/topk/_index.md +++ b/content/telegraf/v1/processor-plugins/topk/_index.md @@ -10,7 +10,7 @@ introduced: "v1.7.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/topk/README.md, TopK Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/topk/README.md, TopK Plugin Source --- # TopK Processor Plugin diff --git a/content/telegraf/v1/processor-plugins/unpivot/_index.md b/content/telegraf/v1/processor-plugins/unpivot/_index.md index a56eab141e..a09c12d283 100644 --- a/content/telegraf/v1/processor-plugins/unpivot/_index.md +++ b/content/telegraf/v1/processor-plugins/unpivot/_index.md @@ -10,7 +10,7 @@ introduced: "v1.12.0" os_support: "freebsd, linux, macos, solaris, windows" related: - /telegraf/v1/configure_plugins/ - - https://github.com/influxdata/telegraf/tree/v1.38.0/plugins/processors/unpivot/README.md, Unpivot Plugin Source + - https://github.com/influxdata/telegraf/tree/v1.37.3/plugins/processors/unpivot/README.md, Unpivot Plugin Source --- # Unpivot Processor Plugin diff --git a/content/telegraf/v1/release-notes.md b/content/telegraf/v1/release-notes.md index f5ae49a89e..96094f5e1f 100644 --- a/content/telegraf/v1/release-notes.md +++ b/content/telegraf/v1/release-notes.md @@ -11,81 +11,6 @@ menu: weight: 60 --- -## v1.38.0 {date="2026-03-09"} - -### Important Changes - -- PR [#17961](https://github.com/influxdata/telegraf/pull/17961) makes the - **strict environment variable handling the default**! In case you need the old - behavior you can opt-out using the `--non-strict-env-handling` flag. - -### New Plugins - -- [#18183](https://github.com/influxdata/telegraf/pull/18183) `inputs.sip` Add plugin -- [#18223](https://github.com/influxdata/telegraf/pull/18223) `outputs.influxdb_v3` Add plugin - -### Features - -- [#18086](https://github.com/influxdata/telegraf/pull/18086) `agent` Optimise disk buffer strategy -- [#18232](https://github.com/influxdata/telegraf/pull/18232) `common.opcua` Add string configuration option for node ID -- [#18411](https://github.com/influxdata/telegraf/pull/18411) `common.opcua` Add support for datetime arrays -- [#18181](https://github.com/influxdata/telegraf/pull/18181) `inputs.docker` Implement startup error behavior options -- [#18425](https://github.com/influxdata/telegraf/pull/18425) `inputs.gnmi` Allow to emit delete metrics -- [#18466](https://github.com/influxdata/telegraf/pull/18466) `inputs.mqtt_consumer` Add option for maximum reconnect interval -- [#18063](https://github.com/influxdata/telegraf/pull/18063) `inputs.mysql` Add replication latency fields -- [#18117](https://github.com/influxdata/telegraf/pull/18117) `inputs.mysql` Add wsrep provider options fields -- [#18272](https://github.com/influxdata/telegraf/pull/18272) `inputs.mysql` Support encryption algorithm statistics if present -- [#18134](https://github.com/influxdata/telegraf/pull/18134) `inputs.nftables` Monitor set element counts -- [#18246](https://github.com/influxdata/telegraf/pull/18246) `inputs.nftables` Support named counters -- [#18259](https://github.com/influxdata/telegraf/pull/18259) `inputs.statsd` Add support for Datadog service checks -- [#18393](https://github.com/influxdata/telegraf/pull/18393) `outputs.health` Add option for setting default status -- [#18415](https://github.com/influxdata/telegraf/pull/18415) `outputs.heartbeat` Add logging information -- [#17577](https://github.com/influxdata/telegraf/pull/17577) `outputs.heartbeat` Add status evaluation -- [#18305](https://github.com/influxdata/telegraf/pull/18305) `outputs.influxdb_v2` Add trace logging for write request timing -- [#18422](https://github.com/influxdata/telegraf/pull/18422) `outputs.mongodb` Allow writing metrics in batches -- [#17997](https://github.com/influxdata/telegraf/pull/17997) `outputs.opentelemetry` Support http protocol -- [#18337](https://github.com/influxdata/telegraf/pull/18337) `outputs.redistimeseries` Add option to expire values -- [#18339](https://github.com/influxdata/telegraf/pull/18339) `outputs.stackdriver` Add credentials file support for stackdriver output plugin -- [#18341](https://github.com/influxdata/telegraf/pull/18341) `prometheus` Add UTF-8 metric and label name sanitization - -### Bugfixes - -- [#18429](https://github.com/influxdata/telegraf/pull/18429) `common.opcua` Use configured timestamp format for datetime arrays -- [#18381](https://github.com/influxdata/telegraf/pull/18381) `inputs.fibaro` Handle numeric value2 field from HC3 devices -- [#18424](https://github.com/influxdata/telegraf/pull/18424) `inputs.http` Close gzip request body on early failures -- [#18412](https://github.com/influxdata/telegraf/pull/18412) `inputs.internet_speed` Fix server_id_include filter logic -- [#18452](https://github.com/influxdata/telegraf/pull/18452) `inputs.mqtt_consumer` Rely on paho auto-reconnect to restore message flow after network disruption -- [#18392](https://github.com/influxdata/telegraf/pull/18392) `inputs.opcua_listener` Prevent panic on events with empty fields -- [#18387](https://github.com/influxdata/telegraf/pull/18387) `inputs.smart` Include NVMe SMART data in smart_device measurement -- [#18416](https://github.com/influxdata/telegraf/pull/18416) `outputs.influxdb` Prevent goroutine leak on gzip write failure -- [#18418](https://github.com/influxdata/telegraf/pull/18418) `outputs.opentelemetry` Prevent goroutine leak on gzip write failure - -### Dependency Updates - -- [#18436](https://github.com/influxdata/telegraf/pull/18436) `deps` Bump cloud.google.com/go/bigquery from 1.73.1 to 1.74.0 -- [#18444](https://github.com/influxdata/telegraf/pull/18444) `deps` Bump github.com/IBM/sarama from 1.46.3 to 1.47.0 -- [#18449](https://github.com/influxdata/telegraf/pull/18449) `deps` Bump github.com/SAP/go-hdb from 1.15.0 to 1.15.1 -- [#18398](https://github.com/influxdata/telegraf/pull/18398) `deps` Bump github.com/antchfx/xpath from 1.3.5 to 1.3.6 -- [#18442](https://github.com/influxdata/telegraf/pull/18442) `deps` Bump github.com/aws/smithy-go from 1.24.1 to 1.24.2 -- [#18400](https://github.com/influxdata/telegraf/pull/18400) `deps` Bump github.com/hashicorp/consul/api from 1.33.2 to 1.33.3 -- [#18438](https://github.com/influxdata/telegraf/pull/18438) `deps` Bump github.com/hashicorp/consul/api from 1.33.3 to 1.33.4 -- [#18446](https://github.com/influxdata/telegraf/pull/18446) `deps` Bump github.com/lxc/incus/v6 from 6.21.0 to 6.22.0 -- [#18441](https://github.com/influxdata/telegraf/pull/18441) `deps` Bump github.com/microsoft/go-mssqldb from 1.9.6 to 1.9.8 -- [#18404](https://github.com/influxdata/telegraf/pull/18404) `deps` Bump github.com/nats-io/nats.go from 1.48.0 to 1.49.0 -- [#18439](https://github.com/influxdata/telegraf/pull/18439) `deps` Bump github.com/prometheus/procfs from 0.19.2 to 0.20.1 -- [#18440](https://github.com/influxdata/telegraf/pull/18440) `deps` Bump github.com/shirou/gopsutil/v4 from 4.26.1 to 4.26.2 -- [#18402](https://github.com/influxdata/telegraf/pull/18402) `deps` Bump github.com/vmware/govmomi from 0.52.0 to 0.53.0 -- [#18399](https://github.com/influxdata/telegraf/pull/18399) `deps` Bump go.step.sm/crypto from 0.76.0 to 0.76.2 -- [#18450](https://github.com/influxdata/telegraf/pull/18450) `deps` Bump golang.org/x/net from 0.50.0 to 0.51.0 -- [#18437](https://github.com/influxdata/telegraf/pull/18437) `deps` Bump google.golang.org/api from 0.266.0 to 0.269.0 -- [#18448](https://github.com/influxdata/telegraf/pull/18448) `deps` Bump k8s.io/api from 0.35.1 to 0.35.2 -- [#18447](https://github.com/influxdata/telegraf/pull/18447) `deps` Bump k8s.io/apimachinery from 0.35.1 to 0.35.2 -- [#18443](https://github.com/influxdata/telegraf/pull/18443) `deps` Bump k8s.io/client-go from 0.35.1 to 0.35.2 -- [#18403](https://github.com/influxdata/telegraf/pull/18403) `deps` Bump modernc.org/sqlite from 1.45.0 to 1.46.1 -- [#18397](https://github.com/influxdata/telegraf/pull/18397) `deps` Bump the aws-sdk-go-v2 group with 11 updates -- [#18435](https://github.com/influxdata/telegraf/pull/18435) `deps` Bump the aws-sdk-go-v2 group with 2 updates -- [#18396](https://github.com/influxdata/telegraf/pull/18396) `deps` Bump tj-actions/changed-files from 47.0.2 to 47.0.4 - ## v1.37.3 {date="2026-02-23"} ### Bugfixes diff --git a/cypress/e2e/content/api-reference.cy.js b/cypress/e2e/content/api-reference.cy.js index ceeaffeffc..a0d50ff788 100644 --- a/cypress/e2e/content/api-reference.cy.js +++ b/cypress/e2e/content/api-reference.cy.js @@ -1,78 +1,76 @@ /// + +/** + * API Reference Documentation E2E Tests + * + * Tests: + * 1. API reference pages (link validation, content structure) + * 2. 3-column layout with TOC (for InfluxDB 3 Core/Enterprise) + * 3. Hugo-native tag page rendering + * 4. Related links from OpenAPI x-related → frontmatter → rendered HTML + * + * Run with: + * node cypress/support/run-e2e-specs.js --spec "cypress/e2e/content/api-reference.cy.js" content/influxdb3/core/reference/api/_index.md + */ + const fakeGoogleTagManager = { trackingOptIn: () => {}, - trackingOptOut: () => {} -} + trackingOptOut: () => {}, +}; describe('API reference content', () => { + // API section index pages (generated from article data) const subjects = [ - '/influxdb/cloud/api/', - '/influxdb/cloud/api/v1/', - '/influxdb/cloud/api/v1-compatibility/', - '/influxdb/cloud/api/v2/', - - '/influxdb/v2/api/', - '/influxdb/v2/api/v1/', - '/influxdb/v2/api/v1-compatibility/', - '/influxdb/v2/api/v2/', - + '/influxdb3/core/api/', + '/influxdb3/enterprise/api/', '/influxdb3/cloud-dedicated/api/', - '/influxdb3/cloud-dedicated/api/management/', - '/influxdb3/cloud-dedicated/api/v1/', - '/influxdb3/cloud-dedicated/api/v1-compatibility/', - '/influxdb3/cloud-dedicated/api/v2/', - '/influxdb3/cloud-serverless/api/', - '/influxdb3/cloud-serverless/api/v1/', - '/influxdb3/cloud-serverless/api/v1-compatibility/', - '/influxdb3/cloud-serverless/api/v2/', - '/influxdb3/clustered/api/', - // TODO '/influxdb3/clustered/api/management/', - '/influxdb3/clustered/api/v1/', - '/influxdb3/clustered/api/v1-compatibility/', - '/influxdb3/clustered/api/v2/', - - '/influxdb3/core/api/', - '/influxdb3/enterprise/api/', + '/influxdb/cloud/api/', + '/influxdb/v2/api/', ]; - subjects.forEach((subject) => { describe(subject, () => { beforeEach(() => { - // Intercept and modify the page HTML before it loads - cy.intercept('GET', '**', (req) => { - req.continue((res) => { - if (res.headers['content-type']?.includes('text/html')) { - // Modify the Kapa widget script attributes - // Avoid socket errors from fpjs in tests by disabling fingerprinting - res.body = res.body.replace( - /data-user-analytics-fingerprint-enabled="true"/, - 'data-user-analytics-fingerprint-enabled="false"' - ); - } - }); - }); + // Intercept and modify the page HTML before it loads + cy.intercept('GET', '**', (req) => { + req.continue((res) => { + if (res.headers['content-type']?.includes('text/html')) { + // Modify the Kapa widget script attributes + // Avoid socket errors from fpjs in tests by disabling fingerprinting + res.body = res.body.replace( + /data-user-analytics-fingerprint-enabled="true"/, + 'data-user-analytics-fingerprint-enabled="false"' + ); + } + }); + }); cy.visit(subject); - window.fcdsc = fakeGoogleTagManager; cy.stub(window.fcdsc, 'trackingOptIn').as('trackingOptIn'); cy.stub(window.fcdsc, 'trackingOptOut').as('trackingOptOut'); }); it(`has API info`, function () { - cy.get('script[data-user-analytics-fingerprint-enabled=false]').should('have.length', 1); + cy.get('script[data-user-analytics-fingerprint-enabled=false]').should( + 'have.length', + 1 + ); cy.get('h1').first().should('have.length', 1); - cy.get('[data-role$=description]').should('have.length', 1); + // Check for description element (either article--description class or data-role attribute) + cy.get('.article--description, [data-role$=description]').should( + 'have.length.at.least', + 1 + ); }); it('links back to the version home page', function () { - cy.get('a.back').contains('Docs') - .should('have.length', 1) - .click(); + cy.get('a.back').contains('Docs').should('have.length', 1).click(); // Path should be the first two segments and trailing slash in $subject - cy.location('pathname') - .should('eq', subject.replace(/^(\/[^/]+\/[^/]+\/).*/, '$1')); + cy.location('pathname').should( + 'eq', + subject.replace(/^(\/[^/]+\/[^/]+\/).*/, '$1') + ); cy.get('h1').should('have.length', 1); }); it('contains valid internal links', function () { @@ -88,8 +86,7 @@ describe('API reference content', () => { // cy.request doesn't show in your browser's Developer Tools // because the request comes from Node, not from the browser. cy.request($a.attr('href')).its('status').should('eq', 200); - }); - + }); }); }); it('contains valid external links', function () { @@ -109,3 +106,449 @@ describe('API reference content', () => { }); }); }); + +/** + * API Reference Layout Tests + * Tests layout for InfluxDB 3 Core/Enterprise API documentation + */ +describe('API reference layout', () => { + const layoutSubjects = [ + '/influxdb3/core/api/write-data/', + '/influxdb3/enterprise/api/write-data/', + ]; + + layoutSubjects.forEach((subject) => { + describe(`${subject} layout`, () => { + beforeEach(() => { + cy.intercept('GET', '**', (req) => { + req.continue((res) => { + if (res.headers['content-type']?.includes('text/html')) { + res.body = res.body.replace( + /data-user-analytics-fingerprint-enabled="true"/, + 'data-user-analytics-fingerprint-enabled="false"' + ); + } + }); + }); + cy.visit(subject); + }); + + describe('Layout Structure', () => { + it('displays sidebar', () => { + cy.get('.sidebar').should('be.visible'); + }); + + it('displays API content area', () => { + cy.get('.api-content, .content-wrapper, .article--content').should( + 'exist' + ); + }); + + it('displays TOC on page', () => { + cy.get('.api-toc').should('exist'); + }); + }); + + describe('Hugo-native renderer', () => { + it('renders API operations container', () => { + cy.get('.api-hugo-native, .api-operations-section').should('exist'); + }); + + it('renders operation elements', () => { + cy.get('.api-operation').should('have.length.at.least', 1); + }); + + it('operation has method badge and path', () => { + cy.get('.api-operation') + .first() + .within(() => { + cy.get('.api-method').should('exist'); + cy.get('.api-path').should('exist'); + }); + }); + }); + }); + }); +}); + +/** + * API Tag Page Tests + * Tests Hugo-native tag pages render operations correctly + */ +describe('API tag pages', () => { + const tagPages = [ + '/influxdb3/core/api/write-data/', + '/influxdb3/core/api/query-data/', + '/influxdb3/enterprise/api/write-data/', + ]; + + tagPages.forEach((page) => { + describe(`Tag page ${page}`, () => { + beforeEach(() => { + cy.intercept('GET', '**', (req) => { + req.continue((res) => { + if (res.headers['content-type']?.includes('text/html')) { + res.body = res.body.replace( + /data-user-analytics-fingerprint-enabled="true"/, + 'data-user-analytics-fingerprint-enabled="false"' + ); + } + }); + }); + cy.visit(page); + }); + + it('displays page title', () => { + cy.get('h1').should('exist'); + }); + + it('renders operations section', () => { + cy.get('.api-operations, .api-operations-section').should('exist'); + }); + + it('operations have proper structure', () => { + cy.get('.api-operation') + .first() + .within(() => { + // Check for operation header with method and path + cy.get('.api-operation-header, .api-operation-endpoint').should( + 'exist' + ); + cy.get('.api-method').should('exist'); + cy.get('.api-path').should('exist'); + }); + }); + + it('TOC contains operation links', () => { + cy.get('.api-toc-nav').should('exist'); + cy.get('.api-toc-link').should('have.length.at.least', 1); + }); + + it('TOC links have method badges', () => { + cy.get('.api-toc-link .api-method').should('have.length.at.least', 1); + }); + }); + }); +}); + +/** + * API Section Page Structure Tests + * Tests that API section pages show only tags (immediate children) + */ +describe('API section page structure', () => { + const sectionPages = ['/influxdb3/core/api/', '/influxdb3/enterprise/api/']; + + sectionPages.forEach((page) => { + describe(`Section page ${page}`, () => { + beforeEach(() => { + cy.intercept('GET', '**', (req) => { + req.continue((res) => { + if (res.headers['content-type']?.includes('text/html')) { + res.body = res.body.replace( + /data-user-analytics-fingerprint-enabled="true"/, + 'data-user-analytics-fingerprint-enabled="false"' + ); + } + }); + }); + cy.visit(page); + }); + + it('displays page title', () => { + cy.get('h1').should('contain', 'InfluxDB HTTP API'); + }); + + it('shows tag pages as children', () => { + cy.get('.children-links h3 a').should('have.length.at.least', 5); + }); + + it('does not show individual operations in content area', () => { + // Operations cards should not appear in the main content + cy.get('.article--content .api-operation-card').should('not.exist'); + }); + + it('has All endpoints link in navigation', () => { + cy.get('.sidebar a').contains('All endpoints').should('exist'); + }); + }); + }); +}); + +/** + * All Endpoints Page Tests + * Tests the "All endpoints" page shows all operations + */ +describe('All endpoints page', () => { + const allEndpointsPages = [ + '/influxdb3/core/api/all-endpoints/', + '/influxdb3/enterprise/api/all-endpoints/', + ]; + + allEndpointsPages.forEach((page) => { + describe(`All endpoints ${page}`, () => { + beforeEach(() => { + cy.intercept('GET', '**', (req) => { + req.continue((res) => { + if (res.headers['content-type']?.includes('text/html')) { + res.body = res.body.replace( + /data-user-analytics-fingerprint-enabled="true"/, + 'data-user-analytics-fingerprint-enabled="false"' + ); + } + }); + }); + cy.visit(page); + }); + + it('displays page title "All endpoints"', () => { + cy.get('h1').should('contain', 'All endpoints'); + }); + + it('shows v3 API section', () => { + cy.get('#v3-api').should('exist'); + }); + + it('displays operation cards', () => { + cy.get('.api-operation-card').should('have.length.at.least', 10); + }); + + it('operation cards have method badges', () => { + cy.get('.api-operation-card .api-method').should( + 'have.length.at.least', + 10 + ); + }); + + it('operation cards have path codes', () => { + cy.get('.api-operation-card .api-path').should( + 'have.length.at.least', + 10 + ); + }); + + it('operation cards link to tag pages with operation anchors', () => { + cy.get('.api-operation-card') + .first() + .should('have.attr', 'href') + .and('match', /\/api\/.*\/#operation\//); + }); + + it('is accessible from navigation', () => { + // Navigate back to section page + cy.get('.sidebar a').contains('InfluxDB HTTP API').click(); + // Then navigate to All endpoints + cy.get('.sidebar a').contains('All endpoints').click(); + cy.url().should('include', '/all-endpoints/'); + }); + }); + }); +}); + +/** + * API Download Button Tests + * Tests that each tag page has a download button linking to the correct spec + */ +describe('API spec download buttons', () => { + const downloadTests = [ + { + page: '/influxdb3/core/api/write-data/', + specPath: '/openapi/influxdb3-core.yml', + }, + { + page: '/influxdb3/enterprise/api/write-data/', + specPath: '/openapi/influxdb3-enterprise.yml', + }, + ]; + + downloadTests.forEach(({ page, specPath }) => { + describe(`Download button on ${page}`, () => { + beforeEach(() => { + cy.intercept('GET', '**', (req) => { + req.continue((res) => { + if (res.headers['content-type']?.includes('text/html')) { + res.body = res.body.replace( + /data-user-analytics-fingerprint-enabled="true"/, + 'data-user-analytics-fingerprint-enabled="false"' + ); + } + }); + }); + cy.visit(page); + }); + + it('has a download button', () => { + cy.get('.api-spec-download').should('exist'); + }); + + it(`download button links to ${specPath}`, () => { + cy.get('.api-spec-download') + .should('have.attr', 'href', specPath) + .and('have.attr', 'download'); + }); + }); + }); +}); + +/** + * API Code Sample Tests + * Tests that inline curl examples render correctly on tag pages + */ +describe('API code samples', () => { + const tagPages = [ + '/influxdb3/core/api/write-data/', + '/influxdb3/enterprise/api/write-data/', + ]; + + tagPages.forEach((page) => { + describe(`Code samples on ${page}`, () => { + beforeEach(() => { + cy.intercept('GET', '**', (req) => { + req.continue((res) => { + if (res.headers['content-type']?.includes('text/html')) { + res.body = res.body.replace( + /data-user-analytics-fingerprint-enabled="true"/, + 'data-user-analytics-fingerprint-enabled="false"' + ); + } + }); + }); + cy.visit(page); + }); + + it('each operation has a code sample', () => { + cy.get('.api-operation').each(($op) => { + cy.wrap($op).find('.api-code-sample').should('have.length', 1); + }); + }); + + it('code samples have header and code block', () => { + cy.get('.api-code-sample') + .first() + .within(() => { + cy.get('.api-code-sample-header').should( + 'contain', + 'Example request' + ); + cy.get('.api-code-block code').should('exist'); + }); + }); + + it('code block contains a curl command', () => { + cy.get('.api-code-block code') + .first() + .invoke('text') + .should('match', /curl --request (GET|POST|PUT|PATCH|DELETE)/); + }); + + it('curl command includes Authorization header', () => { + cy.get('.api-code-block code') + .first() + .invoke('text') + .should('include', 'Authorization: Bearer INFLUX_TOKEN'); + }); + + it('POST operations include request body in curl', () => { + cy.get('.api-operation[data-method="post"]') + .first() + .find('.api-code-block code') + .invoke('text') + .should('include', '--data-raw'); + }); + + it('code samples have Ask AI links', () => { + cy.get('.api-code-sample .api-code-ask-ai') + .first() + .should('have.attr', 'data-query') + .and('not.be.empty'); + }); + }); + }); +}); + +/** + * API Client Library Related Link Tests + * Tests that InfluxDB 3 tag pages include client library related links + */ +describe('API client library related links', () => { + const influxdb3Pages = [ + '/influxdb3/core/api/write-data/', + '/influxdb3/enterprise/api/write-data/', + ]; + + influxdb3Pages.forEach((page) => { + describe(`Client library link on ${page}`, () => { + beforeEach(() => { + cy.intercept('GET', '**', (req) => { + req.continue((res) => { + if (res.headers['content-type']?.includes('text/html')) { + res.body = res.body.replace( + /data-user-analytics-fingerprint-enabled="true"/, + 'data-user-analytics-fingerprint-enabled="false"' + ); + } + }); + }); + cy.visit(page); + }); + + it('includes InfluxDB 3 API client libraries in related links', () => { + cy.get('.related ul li a') + .filter(':contains("InfluxDB 3 API client libraries")') + .should('have.length', 1) + .and('have.attr', 'href') + .and('match', /\/influxdb3\/\w+\/reference\/client-libraries\/v3\//); + }); + }); + }); +}); + +/** + * API Related Links Tests + * Tests that x-related from OpenAPI specs renders as related links on tag pages + */ +describe('API related links', () => { + const pagesWithRelated = ['/influxdb3/core/api/write-data/']; + + pagesWithRelated.forEach((page) => { + describe(`Related links on ${page}`, () => { + beforeEach(() => { + cy.intercept('GET', '**', (req) => { + req.continue((res) => { + if (res.headers['content-type']?.includes('text/html')) { + res.body = res.body.replace( + /data-user-analytics-fingerprint-enabled="true"/, + 'data-user-analytics-fingerprint-enabled="false"' + ); + } + }); + }); + cy.visit(page); + }); + + it('displays a related section', () => { + cy.get('.related').should('exist'); + cy.get('.related h4#related').should('contain', 'Related'); + }); + + it('renders related links from x-related as anchor elements', () => { + cy.get('.related ul li a').should('have.length.at.least', 2); + }); + + it('related links have title text and valid href', () => { + cy.get('.related ul li a').each(($a) => { + // Each link has non-empty text + cy.wrap($a).invoke('text').should('not.be.empty'); + // Each link has an href starting with / + cy.wrap($a).should('have.attr', 'href').and('match', /^\//); + }); + }); + + it('related links resolve to valid pages', () => { + cy.get('.related ul li a').each(($a) => { + const href = $a.attr('href'); + cy.request(href).its('status').should('eq', 200); + }); + }); + }); + }); +}); diff --git a/data/labels.yml b/data/labels.yml deleted file mode 100644 index a4965bc49b..0000000000 --- a/data/labels.yml +++ /dev/null @@ -1,86 +0,0 @@ -# Label definitions for the docs-v2 repository. -# -# Source of truth for non-product label names, colors, and descriptions. -# Product labels are derived from data/products.yml (label_group + content_path), -# except product:shared which is defined here (applies across products). -# Review label behavior (severity, result rules) is in .github/templates/review-comment.md. -# -# When a label value is a string, it's the description and the group color applies. -# When a label value is an object, it has its own color. - -product: - description: Cross-product labels not derived from a single products.yml entry. - labels: - product:shared: - color: "#FFA500" - description: Shared content that applies to multiple products - -source: - color: "#9370DB" - description: Track how an issue or PR was created. - labels: - source:auto-detected: Created by change detection within this repo - source:dar: Generated by the DAR pipeline - source:sync: Synced from an external repository - source:feedback: From user feedback - source:manual: Human-created issue - -waiting: - color: "#FF8C00" - description: Indicate external blockers. - labels: - waiting:engineering: Waiting for engineer confirmation - waiting:pr: Blocked on another PR merging first - waiting:product: Waiting for product or PM decision - -workflow: - description: Control automation behavior. - labels: - agent-ready: - color: "#00FF00" - description: Issue can be worked autonomously by an agent - skip-review: - color: "#1E90FF" - description: PR skips the automated doc review pipeline - -area: - color: "#a89129" - description: What part of the repo a change affects. - labels: - area:agents: "AI agents, skills, hooks, and MCP config" - area:ci: "Continuous integration pipeline (verify, test, validate, publish)" - area:links: "Link management (validation, checking, fixing)" - area:site-ui: "Documentation site UI: templates, styles, JS/TS" - -release: - description: Release-gated merge workflow. - labels: - release:pending: - color: "#FEF2C0" - description: Waiting for product release before merging - release:ready: - color: "#0E8A16" - description: Product released, docs ready for review/merge - -onboarding: - description: Contributor onboarding. - labels: - good-first-issue: - color: "#f9f348" - description: Easy update. Good for first timers! - -review: - description: >- - Automated review outcomes. Mutually exclusive. - See .github/templates/review-comment.md for severity definitions - and result-to-label mapping. - labels: - review:approved: - color: "#28A745" - description: Automated review passed - review:changes-requested: - color: "#DC3545" - description: Automated review found blocking issues - review:needs-human: - color: "#FFC107" - description: Automated review inconclusive diff --git a/data/notifications.yaml b/data/notifications.yaml index e0fde317ac..5c513b0ff5 100644 --- a/data/notifications.yaml +++ b/data/notifications.yaml @@ -76,7 +76,7 @@ - /influxdb/cloud title: InfluxDB Docker latest tag changing to InfluxDB 3 Core slug: | - On **May 27, 2026**, the `latest` tag for InfluxDB Docker images will + On **April 7, 2026**, the `latest` tag for InfluxDB Docker images will point to InfluxDB 3 Core. To avoid unexpected upgrades, use specific version tags in your Docker deployments. message: | diff --git a/data/products.yml b/data/products.yml index 9693cb96dc..406df03988 100644 --- a/data/products.yml +++ b/data/products.yml @@ -2,8 +2,8 @@ influxdb3_core: name: InfluxDB 3 Core altname: InfluxDB 3 Core namespace: influxdb3 - content_path: influxdb3/core - label_group: v3-monolith + api_path: /influxdb3/core/api/ + alt_link_key: core menu_category: self-managed versions: [core] list_order: 2 @@ -40,13 +40,13 @@ influxdb3_enterprise: name: InfluxDB 3 Enterprise altname: InfluxDB 3 Enterprise namespace: influxdb3 - content_path: influxdb3/enterprise - label_group: v3-monolith + api_path: /influxdb3/enterprise/api/ + alt_link_key: enterprise menu_category: self-managed versions: [enterprise] list_order: 2 latest: enterprise - latest_patch: 3.8.4 + latest_patch: 3.8.3 placeholder_host: localhost:8181 limits: database: 100 @@ -78,8 +78,6 @@ influxdb3_explorer: name: InfluxDB 3 Explorer altname: Explorer namespace: influxdb3_explorer - content_path: influxdb3/explorer - label_group: explorer menu_category: tools list_order: 1 latest: explorer @@ -96,8 +94,8 @@ influxdb3_cloud_serverless: name: InfluxDB Cloud Serverless altname: InfluxDB Cloud namespace: influxdb - content_path: influxdb3/cloud-serverless - label_group: v3-distributed + api_path: /influxdb3/cloud-serverless/api/ + alt_link_key: cloud-serverless menu_category: managed versions: [cloud-serverless] list_order: 2 @@ -131,8 +129,8 @@ influxdb3_cloud_dedicated: name: InfluxDB Cloud Dedicated altname: InfluxDB Cloud namespace: influxdb - content_path: influxdb3/cloud-dedicated - label_group: v3-distributed + api_path: /influxdb3/cloud-dedicated/api/ + alt_link_key: cloud-dedicated menu_category: managed versions: [cloud-dedicated] list_order: 3 @@ -164,8 +162,8 @@ influxdb3_clustered: name: InfluxDB Clustered altname: InfluxDB Clustered namespace: influxdb - content_path: influxdb3/clustered - label_group: v3-distributed + api_path: /influxdb3/clustered/api/ + alt_link_key: clustered menu_category: self-managed versions: [clustered] list_order: 3 @@ -200,12 +198,8 @@ influxdb: name__v1: InfluxDB OSS v1 altname: InfluxDB OSS namespace: influxdb - content_path: - v2: influxdb/v2 - v1: influxdb/v1 - label_group: - v2: v2 - v1: v1 + api_path: /influxdb/v2/api/ + alt_link_key: v2 succeeded_by: influxdb3_core menu_category: self-managed list_order: 1 @@ -251,8 +245,8 @@ influxdb_cloud: name__vcloud: InfluxDB Cloud (TSM) altname: InfluxDB Cloud namespace: influxdb - content_path: influxdb/cloud - label_group: v2-cloud + api_path: /influxdb/cloud/api/ + alt_link_key: cloud menu_category: managed versions: [cloud] list_order: 1 @@ -282,14 +276,12 @@ influxdb_cloud: telegraf: name: Telegraf namespace: telegraf - content_path: telegraf - label_group: telegraf menu_category: other list_order: 6 versions: [v1] - latest: v1.38 + latest: v1.37 latest_patches: - v1: 1.38.0 + v1: 1.37.3 ai_sample_questions: - How do I configure Telegraf for InfluxDB 3? - How do I write a custom Telegraf plugin? @@ -308,8 +300,6 @@ telegraf_controller: chronograf: name: Chronograf namespace: chronograf - content_path: chronograf - label_group: chronograf menu_category: other list_order: 7 versions: [v1] @@ -325,8 +315,6 @@ chronograf: kapacitor: name: Kapacitor namespace: kapacitor - content_path: kapacitor - label_group: kapacitor menu_category: other list_order: 7 versions: [v1] @@ -342,8 +330,6 @@ kapacitor: enterprise_influxdb: name: 'InfluxDB Enterprise v1' namespace: enterprise_influxdb - content_path: enterprise_influxdb - label_group: v1-enterprise menu_category: self-managed list_order: 5 versions: [v1] @@ -398,8 +384,6 @@ influxdb_cloud1: flux: name: Flux namespace: flux - content_path: flux - label_group: flux menu_category: languages list_order: 8 versions: [v0] diff --git a/data/telegraf_plugins.yml b/data/telegraf_plugins.yml index 7498cfc937..8fe1b7ab6d 100644 --- a/data/telegraf_plugins.yml +++ b/data/telegraf_plugins.yml @@ -1740,7 +1740,11 @@ input: description: | This plugin gathers packets and bytes counters for rules within Linux's [nftables](https://wiki.nftables.org/wiki-nftables/index.php/Main_Page) - firewall, as well as set element counts. + firewall. + + > [!IMPORTANT] + > Rules are identified by the associated comment so those **comments have + > to be unique**! Rules without comment are ignored. introduced: v1.37.0 os_support: [linux] tags: [network, system] @@ -2365,18 +2369,6 @@ input: introduced: v1.14.0 os_support: [freebsd, linux, macos, solaris, windows] tags: [network] - - name: SIP - id: sip - description: | - This plugin gathers metrics about the health and availability of [SIP - (Session Initiation - Protocol)](https://datatracker.ietf.org/doc/html/rfc3261) servers such as - PBX systems, SIP proxies, registrars, and VoIP service providers. It - sends SIP requests (typically OPTIONS) and measures response times and - status codes. - introduced: v1.38.0 - os_support: [freebsd, linux, macos, solaris, windows] - tags: [network] - name: Slab id: slab description: | @@ -3311,15 +3303,6 @@ output: introduced: v1.8.0 os_support: [freebsd, linux, macos, solaris, windows] tags: [datastore] - - name: InfluxDB v3.x - id: influxdb_v3 - description: | - This plugin writes metrics to a [InfluxDB - v3.x](https://docs.influxdata.com) Core or Enterprise instance via the - HTTP API. - introduced: v1.38.0 - os_support: [freebsd, linux, macos, solaris, windows] - tags: [datastore] - name: Inlong id: inlong description: | @@ -3511,7 +3494,7 @@ output: id: opentelemetry description: | This plugin writes metrics to [OpenTelemetry](https://opentelemetry.io) - servers and agents via gRPC or HTTP. + servers and agents via gRPC. introduced: v1.20.0 os_support: [freebsd, linux, macos, solaris, windows] tags: [logging, messaging] diff --git a/docs/plans/2025-02-06-clustered-cloud-dedicated-api-structure-design.md b/docs/plans/2025-02-06-clustered-cloud-dedicated-api-structure-design.md new file mode 100644 index 0000000000..4ed939b6a8 --- /dev/null +++ b/docs/plans/2025-02-06-clustered-cloud-dedicated-api-structure-design.md @@ -0,0 +1,154 @@ +# Clustered & Cloud Dedicated API Documentation Structure Design + +> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task. + +**Goal:** Unified API documentation for Clustered and Cloud Dedicated with separate downloadable specs for Data API and Management API. + +**Architecture:** Single nav menu combining tags from both API specs, with dual download buttons on landing page and context-aware buttons on tag pages. + +**Tech Stack:** Hugo templates, OpenAPI specs, existing API doc generator + +*** + +## Context + +Clustered and Cloud Dedicated have two distinct API planes: + +- **Data API** - Write and Query endpoints using database tokens +- **Management API** - Databases, Tables, Tokens endpoints using management tokens + +## Design Decisions + +### Navigation Structure + +Single `/api/` section with combined nav: + +``` +InfluxDB HTTP API +├── Quick start (conceptual) +├── Authentication (conceptual, covers both token types) +├── API compatibility (conceptual) +├── Common parameters (conceptual) +├── Headers (conceptual) +├── Response codes (conceptual) +├── Database tokens (Management API) +├── Databases (Management API) +├── Tables (Management API) +├── Ping (Data API) +├── Query data (Data API) +├── Write data (Data API) +└── All endpoints (combined) +``` + +### Download Buttons + +**Landing page:** Two buttons side-by-side + +- "Download Data API Spec" → `/openapi/influxdb-{product}-v2-data-api.yml` +- "Download Management API Spec" → `/openapi/influxdb-{product}-management-api.yml` + +**Tag pages:** Context-aware single button based on `staticFilePath` + +### Authentication Page + +Single unified page covering: + +- Token types table (Management vs Database) +- Authentication schemes table (Bearer, Token, Basic, Query string) +- Which endpoints use which token type +- Security schemes from OpenAPI spec + +### OpenAPI Spec Organization + +**`v2/ref.yml` (Data API):** + +- Contains all conceptual tags (Quick start, Authentication, etc.) +- Contains Data API operation tags (Ping, Query data, Write data) + +**`management/openapi.yml`:** + +- NO CHANGES to source file +- Contains only operation tags (Database tokens, Databases, Tables) + +Generator combines both specs into unified `articles.yml`. + +### Cleanup Required + +Remove old pages: + +- `/content/influxdb3/clustered/api/v2/_index.html` +- `/content/influxdb3/cloud-dedicated/api/v2/_index.html` +- `/content/influxdb3/*/api/admin-authentication-management-operations/` +- `/content/influxdb3/*/api/management-authentication-admin-operations/` + +*** + +## Implementation Tasks + +### Task 1: Update Authentication tag in Data API specs + +Update `api-docs/influxdb3/clustered/v2/ref.yml` and `api-docs/influxdb3/cloud-dedicated/v2/ref.yml`: + +- Revise Authentication tag description to cover both token types +- Include table of token types and which endpoints use them +- Keep `showSecuritySchemes: true` for security scheme rendering + +### Task 2: Update Quick start tag in Data API specs + +Update Quick start in both v2/ref.yml files: + +- Cover both Data and Management API getting started flow +- Show management token creation, then database/token setup, then write/query + +### Task 3: Add dual download buttons to API landing page + +Modify `layouts/api/list.html` or create partial: + +- Detect Clustered/Cloud Dedicated products +- Show two download buttons on section index pages +- Style buttons side-by-side + +### Task 4: Update tag page download button logic + +Modify `layouts/api/single.html` and/or `layouts/api/list.html`: + +- Detect API type from `staticFilePath` (contains `management-api` or `v2-data-api`) +- Show appropriate download button for the API + +### Task 5: Remove old v2 HTML pages + +Delete: + +- `content/influxdb3/clustered/api/v2/` +- `content/influxdb3/cloud-dedicated/api/v2/` + +### Task 6: Remove old authentication directories + +Delete leftover directories: + +- `content/influxdb3/clustered/api/admin-authentication-management-operations/` +- `content/influxdb3/clustered/api/management-authentication-admin-operations/` +- `content/influxdb3/cloud-dedicated/api/admin-authentication-management-operations/` +- `content/influxdb3/cloud-dedicated/api/management-authentication-admin-operations/` + +### Task 7: Regenerate API docs and verify + +Run `yarn build:api-docs` and verify: + +- Nav shows combined tags from both APIs +- Authentication page has unified content +- Download buttons work correctly +- Old pages are gone + +*** + +## Success Criteria + +- [ ] Single Authentication page covers both token types clearly +- [ ] Landing page shows two download buttons for Clustered/Cloud Dedicated +- [ ] Tag pages show context-appropriate download button +- [ ] Nav combines tags from both API specs +- [ ] Old v2 HTML pages removed +- [ ] Old duplicate authentication directories removed +- [ ] `yarn build:api-docs` succeeds +- [ ] Hugo builds without errors diff --git a/docs/plans/2026-01-07-api-reference-rapidoc-migration.md b/docs/plans/2026-01-07-api-reference-rapidoc-migration.md new file mode 100644 index 0000000000..9d22a8da00 --- /dev/null +++ b/docs/plans/2026-01-07-api-reference-rapidoc-migration.md @@ -0,0 +1,184 @@ +# API Reference RapiDoc Migration Plan + +> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task. + +**Goal:** Migrate all InfluxDB products from Redoc-based API reference to the new RapiDoc-based UI that's already working for influxdb3-core and influxdb3-enterprise. + +**Architecture:** The new API reference uses RapiDoc Mini to render OpenAPI specs. The generation script (`api-docs/scripts/generate-openapi-articles.ts`) processes OpenAPI specs and creates Hugo content pages with frontmatter that references spec files. Hugo layouts (`layouts/api/` and `layouts/api-operation/`) render these pages using the RapiDoc component. + +**Tech Stack:** TypeScript (generation scripts), Hugo templates, RapiDoc web component, OpenAPI 3.0 specs + +*** + +## Overview + +### Products to Migrate + +| Product | Status | Spec Location | Target Content Path | +| ----------------------- | ------- | ----------------------------------------------------------- | ----------------------------------------- | +| influxdb3-core | ✅ Done | `api-docs/influxdb3/core/v3/ref.yml` | `content/influxdb3/core/api/` | +| influxdb3-enterprise | ✅ Done | `api-docs/influxdb3/enterprise/v3/ref.yml` | `content/influxdb3/enterprise/api/` | +| cloud-dedicated | Partial | `api-docs/influxdb3/cloud-dedicated/management/openapi.yml` | `content/influxdb3/cloud-dedicated/api/` | +| cloud-serverless | Partial | `api-docs/influxdb3/cloud-serverless/v2/ref.yml` | `content/influxdb3/cloud-serverless/api/` | +| clustered | Partial | `api-docs/influxdb3/clustered/management/openapi.yml` | `content/influxdb3/clustered/api/` | +| cloud-v2 | Partial | `api-docs/influxdb/cloud/v2/ref.yml` | `content/influxdb/cloud/api/` | +| oss-v2 | Partial | `api-docs/influxdb/v2/v2/ref.yml` | `content/influxdb/v2/api/` | +| oss-v1 | Partial | `api-docs/influxdb/v1/v1/ref.yml` | `content/influxdb/v1/api/` | +| enterprise\_influxdb-v1 | Partial | `api-docs/enterprise_influxdb/v1/v1/ref.yml` | `content/enterprise_influxdb/v1/api/` | + +### Key Files + +- **Generation script:** `api-docs/scripts/generate-openapi-articles.ts` +- **Core conversion:** `api-docs/scripts/openapi-paths-to-hugo-data/index.ts` +- **Hugo layouts:** `layouts/api/`, `layouts/api-operation/` +- **RapiDoc component:** `assets/js/components/rapidoc-mini.ts` +- **Styles:** `assets/styles/layouts/_api-layout.scss` + +*** + +**API Tag Page Consolidation Complete** + +The API documentation structure has been refactored from individual operation pages to consolidated tag pages: + +1. **Before**: Each API operation had its own page (e.g., `/api/v3/configure/distinct_cache/`) +2. **After**: All operations for a tag are rendered inline on the tag page (e.g., `/api/cache-data/#post-/api/v3/configure/distinct_cache`) + +**Key implementation details:** + +- Server-side TOC generated from frontmatter `operations` array using Hugo templates +- `safeURL` filter prevents Hugo from URL-encoding anchor slashes +- JavaScript `api-toc.ts` detects pre-rendered TOC and preserves it +- RapiDoc's `scrollToPath()` method handles TOC click navigation to shadow DOM elements +- `goto-path` attribute initializes RapiDoc to scroll to operation from URL hash on page load +- `update-route="true"` enables RapiDoc to update URL hash as user navigates + +See [API tag pages design](2026-01-21-api-tag-pages-design.md) for link anchor patterns and route information. + +## Fix all InfluxDB products + +These products already have generated content but may need spec adjustments and testing. + +### Task 1.1: Verify API Generation + +**Files:** + +- Check: `content/influxdb3/cloud-dedicated/api/_index.md` +- Check: `api-docs/influxdb3/cloud-dedicated/management/openapi.yml` +- Verify: `static/openapi/influxdb3-cloud-dedicated/` + +**Step 1: Check existing generated content** + +```bash +ls -la content/influxdb3/cloud-dedicated/api/ +cat content/influxdb3/cloud-dedicated/api/_index.md +``` + +Expected: Should see `_index.md` and subdirectories for each tag. + +**Step 2: Verify OpenAPI spec exists and is valid** + +```bash +head -50 api-docs/influxdb3/cloud-dedicated/management/openapi.yml +``` + +Expected: Valid OpenAPI 3.x spec with `openapi:`, `info:`, `paths:` sections. + +**Step 3: Run generation** + +```bash +yarn build:api-docs +``` + +Or for just this product: + +```bash +node api-docs/scripts/dist/generate-openapi-articles.js cloud-dedicated +``` + +**Step 4: Start Hugo and verify pages render** + +```bash +npx hugo server --port 1315 +``` + +Visit the product URL--for example: + +Expected: API reference pages render with RapiDoc component showing operations. + +**Step 5: Check for console errors** + +Open browser DevTools, verify no JavaScript errors related to RapiDoc. + +**Step 6: Commit if working** + +```bash +git add content/influxdb3/cloud-dedicated/api/ +git add static/openapi/influxdb3-cloud-dedicated/ +git add data/article-data/influxdb3/cloud-dedicated/ +git commit -m "feat(api): generate cloud-dedicated API reference with RapiDoc" +``` + +### How to generate API reference articles + +**Step 1: Rebuild TypeScript** + +```bash +cd api-docs/scripts && yarn build +``` + +Or from root: + +```bash +tsc --project api-docs/scripts/tsconfig.json +``` + +**Step 2: Test compilation succeeded** + +```bash +node api-docs/scripts/dist/generate-openapi-articles.js --help +``` + +**Step 3: Commit the config change** + +```bash +git add api-docs/scripts/generate-openapi-articles.ts +git commit -m "feat(api): enable cloud-v2 product config for RapiDoc migration" +``` + +*** + +## Verification Checklist + +Before considering migration complete: + +- [ ] All product API pages render without errors +- [ ] RapiDoc "Try It Out" works for each product +- [ ] Mobile responsive layout works correctly +- [ ] Navigation menus updated +- [ ] Old URLs redirect to new locations +- [ ] E2E tests pass +- [ ] No console errors in browser DevTools +- [ ] Links validation passes + +*** + +## Rollback Plan + +If issues are found: + +1. Revert the product config changes in `generate-openapi-articles.ts` +2. Remove generated content directories +3. Restore original navigation files from git history + +```bash +git checkout HEAD~N -- content/influxdb/cloud/reference/api/ +git checkout HEAD~N -- api-docs/scripts/generate-openapi-articles.ts +``` + +*** + +## Notes + +- The `useTagBasedGeneration` option creates pages organized by OpenAPI tags (used for influxdb3 products) +- The path-based generation creates pages organized by API paths (used for v2 products) +- The `skipParentMenu` option prevents duplicate menu entries when existing reference pages have menus diff --git a/docs/plans/2026-01-21-api-tag-pages-design.md b/docs/plans/2026-01-21-api-tag-pages-design.md new file mode 100644 index 0000000000..ddf6c6d6af --- /dev/null +++ b/docs/plans/2026-01-21-api-tag-pages-design.md @@ -0,0 +1,145 @@ +# API Tag Pages Design + +## Overview + +Consolidate API documentation onto tag pages, where each tag page displays all operations for that tag using RapiDoc. This replaces the previous path-based page structure. + +## Goals + +1. Keep tag-based navigation in the left sidebar +2. Remove operations as children of tags in the left sidebar +3. Each tag page displays all RapiDoc renderings for operations in that tag +4. "On this page" TOC links to Overview and each operation +5. No frame/internal scrolling - page scrolls naturally as one document +6. Consistent styling with existing implementation +7. Clear visual separation between operations + +## URL Structure + +- **Tag page:** `/influxdb3/core/api/cache-data/` +- **Operation anchor:** `/influxdb3/core/api/cache-data/#post-/api/v3/configure/distinct_cache` + +## RapiDoc Anchor Reference + +| Feature | Format/Value | +| ------------------------------ | ---------------------------------------------------------------------------------- | +| Anchor format | `#{method}-{path}` (e.g., `#post-/api/v3/configure/distinct_cache`) | +| `goto-path` attribute | Navigate to operation on load: `goto-path="post-/api/v3/configure/distinct_cache"` | +| `scrollToPath(path)` method | Programmatic navigation | +| `update-route` (default: true) | Updates URL hash as user scrolls | +| `route-prefix` (default: #) | Hash prefix for routes | +| Built-in anchors | `#overview`, `#servers`, `#auth`, `#operations-top` | + +## Page Layout + +``` +┌─────────────────────────────────────────────────────────────┐ +│ Header / Top Nav │ +├──────────┬─────────────────────────────────────┬────────────┤ +│ │ │ │ +│ Left │ Main Content │ On This │ +│ Sidebar │ │ Page TOC │ +│ (nav) │ ┌─────────────────────────────┐ │ │ +│ │ │

Cache data

│ │ Overview │ +│ │ │

Tag description...

│ │ POST ... │ +│ │ └─────────────────────────────┘ │ DELETE .. │ +│ │ │ POST ... │ +│ │ ┌─────────────────────────────┐ │ DELETE .. │ +│ │ │ RapiDoc (full height, │ │ │ +│ │ │ no internal scroll) │ │ │ +│ │ │ │ │ │ +│ │ │ - POST distinct_cache │ │ │ +│ │ │ - DELETE distinct_cache │ │ │ +│ │ │ - POST last_cache │ │ │ +│ │ │ - DELETE last_cache │ │ │ +│ │ │ │ │ │ +│ │ └─────────────────────────────┘ │ │ +│ │ │ │ +├──────────┴─────────────────────────────────────┴────────────┤ +│ Footer │ +└─────────────────────────────────────────────────────────────┘ +``` + +## RapiDoc Configuration + +- `render-style="read"` - Linear document flow (no internal scrolling) +- `spec-url` - Tag-specific spec (e.g., `/openapi/influxdb3-core/tags/tags/influxdb3-core-cache-data.yaml`) +- `update-route="true"` - URL updates as user navigates (default) +- No fixed height on container - expands to fit content + +## "On This Page" TOC + +Generated server-side from frontmatter `operations` array: + +```yaml +operations: + - operationId: PostConfigureDistinctCache + method: POST + path: /api/v3/configure/distinct_cache + summary: Create distinct cache + - operationId: DeleteConfigureDistinctCache + method: DELETE + path: /api/v3/configure/distinct_cache + summary: Delete distinct cache +``` + +TOC output: + +``` +ON THIS PAGE +- Overview +- POST /api/v3/configure/distinct_cache +- DELETE /api/v3/configure/distinct_cache +- POST /api/v3/configure/last_cache +- DELETE /api/v3/configure/last_cache +``` + +Links use `#{method}-{path}` format matching RapiDoc anchors. + +## Hash Navigation + +1. On page load, JS reads `window.location.hash` +2. If hash present, set RapiDoc's `goto-path` attribute (without the `#`) +3. RapiDoc's default `update-route=true` updates URL as user scrolls +4. Native URL sharing works + +## Files to Modify + +### Layouts + +- `layouts/api/list.html` - Embed RapiDoc instead of operation cards grid +- `layouts/partials/api/rapidoc-tag.html` - New partial for tag-level RapiDoc + +### JavaScript + +- `assets/js/components/rapidoc-mini.ts` - Add hash-based `goto-path` initialization + +### Remove/Deprecate + +- `layouts/api-path/path.html` - Path page layout +- `layouts/partials/api/rapidoc-path.html` - Path partial +- Generated path pages in `content/influxdb3/*/api/v*/` + +### Keep + +- Tag-specific spec files (`static/openapi/influxdb3-core/tags/tags/`) +- Generation script for tag pages and article data +- All endpoints page + +### Generation Script + +- Remove `generatePathPages()` function +- Keep tag page generation +- Ensure frontmatter `operations` array is complete for TOC + +## Development Scope + +Focus on `influxdb3/core` first, then migrate other products. + +## Testing + +- Verify tag pages load with all operations rendered +- Test hash navigation (direct URL, TOC clicks, browser back/forward) +- Verify no internal scrolling - page flows naturally +- Check visual separation between operations +- Test "On this page" TOC links diff --git a/docs/plans/2026-02-04-api-link-migration-design.md b/docs/plans/2026-02-04-api-link-migration-design.md new file mode 100644 index 0000000000..4927e44b57 --- /dev/null +++ b/docs/plans/2026-02-04-api-link-migration-design.md @@ -0,0 +1,174 @@ +# API Link Migration: Redoc to RapiDoc Anchors + +## Overview + +Migrate all internal API operation links from Redoc's `#operation/{operationId}` format to RapiDoc's native `#{method}-{path}` format. + +## Background + +The RapiDoc migration changes how anchor links work for API operations: + +| Source | Pattern | Example | +|--------|---------|---------| +| **Redoc (old)** | `#operation/{operationId}` | `#operation/PostTasks` | +| **RapiDoc (new)** | `#{method}-{path}` | `#post-/api/v2/tasks` | + +**Scope:** 237 links across 111 content files use the Redoc pattern. + +**Constraint:** Don't modify source OpenAPI specs—transformation happens at the link level only. + +## Goals + +1. **Prevent 404s** for external links to API pages (base URL stability) +2. **Clean migration** of all internal links to RapiDoc's native format +3. **Validation** via link-checker after migration + +## Non-Goals + +- Backward compatibility for fragment identifiers (URL fragments are client-side only; server redirects can't translate them) +- External links with old `#operation/` fragments will land on the correct page but won't auto-scroll + +## URL Structure + +**API page URLs remain stable:** +- `/influxdb/cloud/api/` — All endpoints page +- `/influxdb3/core/api/` — All endpoints page +- `/influxdb3/core/api/{tag-name}/` — Tag page + +**Anchor format changes:** +- Old: `/influxdb/cloud/api/#operation/PostTasks` +- New: `/influxdb/cloud/api/#post-/api/v2/tasks` + +## RapiDoc Anchor Format + +RapiDoc uses `#{method}-{path}` with these conventions: + +- Method is lowercase: `post`, `get`, `delete`, `put`, `patch` +- Path parameters `{param}` become `-param-`: `/tasks/{taskID}` → `/tasks/-taskID-` +- Slashes in fragments are valid per RFC 3986 + +**Examples:** +``` +#get-/api/v2/tasks +#post-/api/v2/write +#delete-/api/v2/tasks/-taskID- +#get-/api/v2/tasks/-taskID-/runs/-runID- +``` + +## Migration Script Design + +### Location + +`helper-scripts/migrate-api-links.js` (one-time migration tool, plain JS) + +### Algorithm + +**Step 1: Build lookup table from OpenAPI specs** + +Key by product to handle duplicate operationIds across specs: + +```json +{ + "influxdb3/cloud-dedicated": { + "PostWrite": "post-/api/v2/write", + "GetDatabaseTokens": "get-/api/v0/accounts/-accountId-/clusters/-clusterId-/tokens" + }, + "influxdb3/core": { + "PostWrite": "post-/api/v3/write" + }, + "influxdb/cloud": { + "PostTasks": "post-/api/v2/tasks", + "GetTasksID": "get-/api/v2/tasks/-taskID-" + } +} +``` + +**Step 2: Scan and transform content files** + +``` +For each .md file in content/: + Find all patterns: #operation/(\w+) + Extract product from link URL path + Look up operationId in product's mapping + Replace with RapiDoc anchor format + Flag unmapped operationIds for manual review +``` + +**Step 3: Report** + +- Files modified +- Links updated (count) +- Unmapped operationIds (manual review needed) +- Dry-run mode available + +### Edge Cases + +| Case | Example | Handling | +|------|---------|----------| +| Path parameters | `{taskID}` | Replace with `-taskID-` in anchor | +| Multiple params | `/tasks/{taskID}/runs/{runID}` | Replace all params | +| Missing operationId | Path exists but no operationId in spec | Flag for manual review | +| Deprecated operations | Link to removed endpoint | Flag as potentially broken | + +### Usage + +```bash +# Dry-run (report only, no changes) +node helper-scripts/migrate-api-links.js --dry-run + +# Execute migration +node helper-scripts/migrate-api-links.js + +# Review changes +git diff content/ +``` + +## Validation + +### Pre-migration + +Verify API page URLs are stable: +- Check `_index.md` files have `aliases:` if paths changed +- Confirm no 404s for existing API base paths + +### Post-migration + +```bash +# Build site +npx hugo --quiet + +# Run link-checker on full site +link-checker check public/ +``` + +## Rollback + +Git provides easy rollback: + +```bash +git checkout -- content/ +``` + +## Files to Create/Modify + +### New Files + +- `helper-scripts/migrate-api-links.js` — Migration script + +### Modified Files + +- ~111 content files containing API operation links + +## Testing Checklist + +- [ ] Dry-run reports expected changes +- [ ] All operationIds map successfully (or flagged for review) +- [ ] Links transform to correct RapiDoc format +- [ ] Hugo build succeeds after migration +- [ ] Link-checker passes on full site +- [ ] Spot-check: anchors navigate to correct operations in browser + +## Related Documents + +- [API Tag Pages Design](2026-01-21-api-tag-pages-design.md) +- [API Reference RapiDoc Migration Plan](2026-01-07-api-reference-rapidoc-migration.md) diff --git a/docs/plans/2026-02-04-api-link-migration-implementation.md b/docs/plans/2026-02-04-api-link-migration-implementation.md new file mode 100644 index 0000000000..e8f0d28880 --- /dev/null +++ b/docs/plans/2026-02-04-api-link-migration-implementation.md @@ -0,0 +1,605 @@ +# API Link Migration Implementation Plan + +> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task. + +**Goal:** Migrate all 237 internal API links from Redoc `#operation/{operationId}` format to RapiDoc `#{method}-{path}` format. + +**Architecture:** One-time Node.js script that (1) parses OpenAPI specs to build operationId→anchor mapping, (2) scans content files for `#operation/` links, (3) replaces with RapiDoc anchors using the mapping. + +**Tech Stack:** Node.js, js-yaml (already in dependencies), glob (already in dependencies) + +--- + +## Spec Files → Product URL Mapping + +| Spec File | Product URL Prefix | +|-----------|-------------------| +| `api-docs/influxdb/cloud/v2/ref.yml` | `/influxdb/cloud/api/` | +| `api-docs/influxdb/v2/v2/ref.yml` | `/influxdb/v2/api/` | +| `api-docs/influxdb/v1/v1/ref.yml` | `/influxdb/v1/api/` | +| `api-docs/enterprise_influxdb/v1/v1/ref.yml` | `/enterprise_influxdb/v1/api/` | +| `api-docs/influxdb3/core/v3/ref.yml` | `/influxdb3/core/api/` | +| `api-docs/influxdb3/enterprise/v3/ref.yml` | `/influxdb3/enterprise/api/` | +| `api-docs/influxdb3/cloud-dedicated/v2/ref.yml` | `/influxdb3/cloud-dedicated/api/` | +| `api-docs/influxdb3/cloud-dedicated/management/openapi.yml` | `/influxdb3/cloud-dedicated/api/management/` | +| `api-docs/influxdb3/cloud-serverless/v2/ref.yml` | `/influxdb3/cloud-serverless/api/` | +| `api-docs/influxdb3/clustered/v2/ref.yml` | `/influxdb3/clustered/api/` | +| `api-docs/influxdb3/clustered/management/openapi.yml` | `/influxdb3/clustered/api/management/` | + +--- + +## Task 1: Create Migration Script Skeleton + +**Files:** +- Create: `helper-scripts/migrate-api-links.js` + +**Step 1: Create the script with CLI setup** + +```javascript +#!/usr/bin/env node +/** + * migrate-api-links.js + * + * One-time migration script to convert Redoc API links to RapiDoc format. + * + * Usage: + * node helper-scripts/migrate-api-links.js --dry-run # Preview changes + * node helper-scripts/migrate-api-links.js # Execute migration + */ + +const fs = require('fs'); +const path = require('path'); +const yaml = require('js-yaml'); +const { glob } = require('glob'); + +// CLI arguments +const args = process.argv.slice(2); +const DRY_RUN = args.includes('--dry-run'); +const VERBOSE = args.includes('--verbose'); + +// Paths +const ROOT_DIR = path.resolve(__dirname, '..'); +const CONTENT_DIR = path.join(ROOT_DIR, 'content'); +const API_DOCS_DIR = path.join(ROOT_DIR, 'api-docs'); + +// Spec file → product URL mapping +const SPEC_MAPPINGS = [ + { spec: 'influxdb/cloud/v2/ref.yml', urlPrefix: '/influxdb/cloud/api/' }, + { spec: 'influxdb/v2/v2/ref.yml', urlPrefix: '/influxdb/v2/api/' }, + { spec: 'influxdb/v1/v1/ref.yml', urlPrefix: '/influxdb/v1/api/' }, + { spec: 'enterprise_influxdb/v1/v1/ref.yml', urlPrefix: '/enterprise_influxdb/v1/api/' }, + { spec: 'influxdb3/core/v3/ref.yml', urlPrefix: '/influxdb3/core/api/' }, + { spec: 'influxdb3/enterprise/v3/ref.yml', urlPrefix: '/influxdb3/enterprise/api/' }, + { spec: 'influxdb3/cloud-dedicated/v2/ref.yml', urlPrefix: '/influxdb3/cloud-dedicated/api/' }, + { spec: 'influxdb3/cloud-dedicated/management/openapi.yml', urlPrefix: '/influxdb3/cloud-dedicated/api/management/' }, + { spec: 'influxdb3/cloud-serverless/v2/ref.yml', urlPrefix: '/influxdb3/cloud-serverless/api/' }, + { spec: 'influxdb3/clustered/v2/ref.yml', urlPrefix: '/influxdb3/clustered/api/' }, + { spec: 'influxdb3/clustered/management/openapi.yml', urlPrefix: '/influxdb3/clustered/api/management/' }, +]; + +console.log(`API Link Migration Script`); +console.log(`Mode: ${DRY_RUN ? 'DRY RUN (no changes)' : 'EXECUTE'}\n`); +``` + +**Step 2: Make it executable and test** + +Run: +```bash +chmod +x helper-scripts/migrate-api-links.js +node helper-scripts/migrate-api-links.js --dry-run +``` + +Expected: Script runs and prints header without errors. + +**Step 3: Commit** + +```bash +git add helper-scripts/migrate-api-links.js +git commit -m "feat(api): add migration script skeleton" +``` + +--- + +## Task 2: Build OperationId Lookup Table + +**Files:** +- Modify: `helper-scripts/migrate-api-links.js` + +**Step 1: Add function to parse spec and extract operationIds** + +Add after the SPEC_MAPPINGS constant: + +```javascript +/** + * Convert path parameters from {param} to -param- (RapiDoc format) + */ +function convertPathParams(path) { + return path.replace(/\{([^}]+)\}/g, '-$1-'); +} + +/** + * Build RapiDoc anchor from method and path + * Format: {method}-{path} with {param} → -param- + */ +function buildAnchor(method, pathStr) { + const convertedPath = convertPathParams(pathStr); + return `${method.toLowerCase()}-${convertedPath}`; +} + +/** + * Parse OpenAPI spec and extract operationId → anchor mapping + */ +function parseSpec(specPath) { + const mapping = {}; + + try { + const content = fs.readFileSync(specPath, 'utf8'); + const spec = yaml.load(content); + + if (!spec.paths) { + console.warn(` Warning: No paths in ${specPath}`); + return mapping; + } + + for (const [pathStr, pathItem] of Object.entries(spec.paths)) { + const methods = ['get', 'post', 'put', 'patch', 'delete', 'options', 'head']; + + for (const method of methods) { + const operation = pathItem[method]; + if (operation && operation.operationId) { + const anchor = buildAnchor(method, pathStr); + mapping[operation.operationId] = anchor; + + if (VERBOSE) { + console.log(` ${operation.operationId} → #${anchor}`); + } + } + } + } + } catch (error) { + console.error(` Error parsing ${specPath}: ${error.message}`); + } + + return mapping; +} + +/** + * Build complete lookup table from all specs + * Returns: { urlPrefix: { operationId: anchor } } + */ +function buildLookupTable() { + const lookup = {}; + + console.log('Building operationId lookup table...\n'); + + for (const { spec, urlPrefix } of SPEC_MAPPINGS) { + const specPath = path.join(API_DOCS_DIR, spec); + + if (!fs.existsSync(specPath)) { + console.warn(` Skipping missing spec: ${spec}`); + continue; + } + + console.log(` Processing: ${spec}`); + const mapping = parseSpec(specPath); + lookup[urlPrefix] = mapping; + console.log(` Found ${Object.keys(mapping).length} operations`); + } + + console.log(''); + return lookup; +} + +// Test: Build and display lookup table +const lookupTable = buildLookupTable(); +console.log('Lookup table built successfully.\n'); +``` + +**Step 2: Test lookup table generation** + +Run: +```bash +node helper-scripts/migrate-api-links.js --dry-run --verbose 2>&1 | head -50 +``` + +Expected: See operationId mappings printed for each spec. + +**Step 3: Commit** + +```bash +git add helper-scripts/migrate-api-links.js +git commit -m "feat(api): add operationId lookup table generation" +``` + +--- + +## Task 3: Add Content File Scanner + +**Files:** +- Modify: `helper-scripts/migrate-api-links.js` + +**Step 1: Add function to find and parse links** + +Add after buildLookupTable function: + +```javascript +/** + * Find all #operation/ links in a file + * Returns array of { match, operationId, urlPath, fullUrl } + */ +function findOperationLinks(content) { + const links = []; + // Match patterns like: /influxdb/cloud/api/#operation/PostTasks + // or /influxdb3/cloud-dedicated/api/management/#operation/CreateDatabaseToken + const regex = /(\/[a-z0-9_/-]+\/api(?:\/management)?(?:\/[a-z0-9-]*)?\/)#operation\/(\w+)/g; + + let match; + while ((match = regex.exec(content)) !== null) { + links.push({ + match: match[0], + urlPath: match[1], + operationId: match[2], + }); + } + + return links; +} + +/** + * Find the best matching URL prefix for a given URL path + */ +function findUrlPrefix(urlPath, lookup) { + // Sort by length descending to match most specific first + const prefixes = Object.keys(lookup).sort((a, b) => b.length - a.length); + + for (const prefix of prefixes) { + if (urlPath.startsWith(prefix) || urlPath === prefix.slice(0, -1)) { + return prefix; + } + } + + return null; +} + +/** + * Scan content directory for files with #operation/ links + */ +async function scanContentFiles(lookup) { + console.log('Scanning content files for #operation/ links...\n'); + + const files = await glob('**/*.md', { cwd: CONTENT_DIR }); + const results = { + filesWithLinks: [], + totalLinks: 0, + unmapped: [], + }; + + for (const file of files) { + const filePath = path.join(CONTENT_DIR, file); + const content = fs.readFileSync(filePath, 'utf8'); + const links = findOperationLinks(content); + + if (links.length > 0) { + const fileResult = { + file, + links: [], + }; + + for (const link of links) { + const urlPrefix = findUrlPrefix(link.urlPath, lookup); + + if (!urlPrefix) { + results.unmapped.push({ file, ...link, reason: 'No matching URL prefix' }); + continue; + } + + const productLookup = lookup[urlPrefix]; + const anchor = productLookup[link.operationId]; + + if (!anchor) { + results.unmapped.push({ file, ...link, reason: 'OperationId not found in spec' }); + continue; + } + + fileResult.links.push({ + ...link, + urlPrefix, + newAnchor: anchor, + oldLink: `${link.urlPath}#operation/${link.operationId}`, + newLink: `${link.urlPath}#${anchor}`, + }); + } + + if (fileResult.links.length > 0) { + results.filesWithLinks.push(fileResult); + results.totalLinks += fileResult.links.length; + } + } + } + + return results; +} +``` + +**Step 2: Add main execution and reporting** + +Replace the test code at the bottom with: + +```javascript +async function main() { + // Build lookup table + const lookupTable = buildLookupTable(); + + // Scan content files + const results = await scanContentFiles(lookupTable); + + // Report findings + console.log('=== SCAN RESULTS ===\n'); + console.log(`Files with links: ${results.filesWithLinks.length}`); + console.log(`Total links to migrate: ${results.totalLinks}`); + console.log(`Unmapped links: ${results.unmapped.length}\n`); + + if (VERBOSE && results.filesWithLinks.length > 0) { + console.log('Links to migrate:'); + for (const { file, links } of results.filesWithLinks) { + console.log(`\n ${file}:`); + for (const link of links) { + console.log(` ${link.oldLink}`); + console.log(` → ${link.newLink}`); + } + } + } + + if (results.unmapped.length > 0) { + console.log('\n=== UNMAPPED LINKS (require manual review) ===\n'); + for (const item of results.unmapped) { + console.log(` ${item.file}:`); + console.log(` ${item.match}`); + console.log(` Reason: ${item.reason}\n`); + } + } + + if (DRY_RUN) { + console.log('\n[DRY RUN] No files modified. Run without --dry-run to apply changes.'); + } +} + +main().catch(console.error); +``` + +**Step 3: Test scanner** + +Run: +```bash +node helper-scripts/migrate-api-links.js --dry-run +``` + +Expected: See count of files and links found, plus any unmapped links. + +**Step 4: Commit** + +```bash +git add helper-scripts/migrate-api-links.js +git commit -m "feat(api): add content file scanner for operation links" +``` + +--- + +## Task 4: Add Link Replacement Logic + +**Files:** +- Modify: `helper-scripts/migrate-api-links.js` + +**Step 1: Add replacement function** + +Add before the main function: + +```javascript +/** + * Replace operation links in a file + * Returns the modified content + */ +function replaceLinks(content, links) { + let modified = content; + + for (const link of links) { + // Replace all occurrences of this specific link + modified = modified.split(link.oldLink).join(link.newLink); + } + + return modified; +} + +/** + * Apply migrations to files + */ +async function applyMigrations(results) { + console.log('\n=== APPLYING MIGRATIONS ===\n'); + + let filesModified = 0; + let linksReplaced = 0; + + for (const { file, links } of results.filesWithLinks) { + const filePath = path.join(CONTENT_DIR, file); + const originalContent = fs.readFileSync(filePath, 'utf8'); + const modifiedContent = replaceLinks(originalContent, links); + + if (originalContent !== modifiedContent) { + fs.writeFileSync(filePath, modifiedContent, 'utf8'); + filesModified++; + linksReplaced += links.length; + console.log(` ✓ ${file} (${links.length} links)`); + } + } + + console.log(`\nMigration complete: ${filesModified} files modified, ${linksReplaced} links replaced.`); +} +``` + +**Step 2: Update main function to apply changes** + +Update the main function to call applyMigrations when not in dry-run mode: + +```javascript +async function main() { + // Build lookup table + const lookupTable = buildLookupTable(); + + // Scan content files + const results = await scanContentFiles(lookupTable); + + // Report findings + console.log('=== SCAN RESULTS ===\n'); + console.log(`Files with links: ${results.filesWithLinks.length}`); + console.log(`Total links to migrate: ${results.totalLinks}`); + console.log(`Unmapped links: ${results.unmapped.length}\n`); + + if (VERBOSE && results.filesWithLinks.length > 0) { + console.log('Links to migrate:'); + for (const { file, links } of results.filesWithLinks) { + console.log(`\n ${file}:`); + for (const link of links) { + console.log(` ${link.oldLink}`); + console.log(` → ${link.newLink}`); + } + } + } + + if (results.unmapped.length > 0) { + console.log('\n=== UNMAPPED LINKS (require manual review) ===\n'); + for (const item of results.unmapped) { + console.log(` ${item.file}:`); + console.log(` ${item.match}`); + console.log(` Reason: ${item.reason}\n`); + } + } + + // Apply migrations if not dry-run + if (DRY_RUN) { + console.log('\n[DRY RUN] No files modified. Run without --dry-run to apply changes.'); + } else if (results.filesWithLinks.length > 0) { + await applyMigrations(results); + } else { + console.log('\nNo links to migrate.'); + } +} + +main().catch(console.error); +``` + +**Step 3: Test dry-run shows expected changes** + +Run: +```bash +node helper-scripts/migrate-api-links.js --dry-run --verbose 2>&1 | head -100 +``` + +Expected: See specific link transformations listed. + +**Step 4: Commit script completion** + +```bash +git add helper-scripts/migrate-api-links.js +git commit -m "feat(api): complete migration script with replacement logic" +``` + +--- + +## Task 5: Execute Migration + +**Step 1: Final dry-run review** + +Run: +```bash +node helper-scripts/migrate-api-links.js --dry-run +``` + +Review the output. Verify: +- Link count matches expectations (~237 links) +- No critical unmapped links +- Transformations look correct + +**Step 2: Execute migration** + +Run: +```bash +node helper-scripts/migrate-api-links.js +``` + +Expected: Files modified, links replaced. + +**Step 3: Review changes** + +Run: +```bash +git diff content/ | head -200 +``` + +Verify transformations look correct (spot check a few). + +**Step 4: Commit migrated content** + +```bash +git add content/ +git commit -m "refactor(api): migrate operation links to RapiDoc anchor format + +Migrated ~237 links from #operation/{operationId} to #{method}-{path} format +for RapiDoc compatibility." +``` + +--- + +## Task 6: Validate with Link-Checker + +**Step 1: Build Hugo site** + +Run: +```bash +npx hugo --quiet +``` + +Expected: Build succeeds without errors. + +**Step 2: Run link-checker** + +Run: +```bash +link-checker check public/ +``` + +Or if link-checker isn't installed globally: +```bash +# Map changed content files to HTML and check +git diff --name-only HEAD~1 HEAD | grep '\.md$' | head -20 | \ + xargs -I {} link-checker map {} | \ + xargs link-checker check +``` + +Expected: No broken links related to API anchors. + +**Step 3: Manual spot-check in browser** + +1. Start Hugo server: `npx hugo server` +2. Visit a page with migrated links +3. Click API links and verify they navigate to correct operations + +**Step 4: Final commit if any fixes needed** + +If link-checker found issues, fix and commit: +```bash +git add content/ +git commit -m "fix(api): correct link migration issues found by link-checker" +``` + +--- + +## Summary + +| Task | Description | Output | +|------|-------------|--------| +| 1 | Script skeleton | `helper-scripts/migrate-api-links.js` | +| 2 | Lookup table generation | operationId → anchor mapping | +| 3 | Content file scanner | Find all `#operation/` links | +| 4 | Replacement logic | Transform links in place | +| 5 | Execute migration | ~237 links migrated | +| 6 | Validate | Link-checker passes | diff --git a/docs/plans/2026-02-04-v1-api-deduplication-design.md b/docs/plans/2026-02-04-v1-api-deduplication-design.md new file mode 100644 index 0000000000..4fb66b9b34 --- /dev/null +++ b/docs/plans/2026-02-04-v1-api-deduplication-design.md @@ -0,0 +1,93 @@ +# InfluxDB v1 API Consistency Design + +**Date:** 2026-02-04 +**Goal:** Make InfluxDB v1 API specs consistent with other products by using the same Redocly-based overlay approach. + +## Current State + +- `api-docs/influxdb/v1/v1/ref.yml` - Complete standalone spec (OSS) +- `api-docs/enterprise_influxdb/v1/v1/ref.yml` - Complete standalone spec (Enterprise) +- Not using Redocly decorators or content overlays +- Not integrated with `getswagger.sh` + +## Target State + +- Both v1 products use `.config.yml` and `content/` overlays like other products +- Integrated with `getswagger.sh` and Redocly decorator pipeline +- Remove unused tag-groups decorator (not used by RapiDoc) + +## Design Decisions + +1. **Keep both specs as complete, standalone files** - Accept duplication for simplicity +2. **Use overlays for info and servers only** - Paths stay in each `ref.yml` +3. **Remove tag-groups entirely** - Not used by RapiDoc UI + +## Implementation + +### 1. Directory Structure + +``` +api-docs/ + influxdb/ + v1/ + .config.yml # Redocly config + v1/ + content/ + info.yml # OSS info overlay + servers.yml # OSS servers overlay + ref.yml # Complete OSS spec (exists) + enterprise_influxdb/ + v1/ + .config.yml # Redocly config + v1/ + content/ + info.yml # Enterprise info overlay + servers.yml # Enterprise servers overlay + ref.yml # Complete Enterprise spec (exists) +``` + +### 2. Redocly Decorator Changes + +**Remove (unused with RapiDoc):** + +- `openapi/plugins/decorators/tags/set-tag-groups.cjs` +- `tag-groups.yml` loading from `docs-content.cjs` +- `set-tag-groups` references in `docs-plugin.cjs` +- All `content/tag-groups.yml` files across products + +**Keep:** + +- `set-info.cjs` - merges info.yml overlay +- `set-servers.cjs` - merges servers.yml overlay +- `replace-shortcodes.cjs` - handles doc URL placeholders + +### 3. getswagger.sh Changes + +Add functions: + +```bash +function updateOSSV1 { + postProcess influxdb/v1/v1/ref.yml 'influxdb/v1/.config.yml' 'v1@1' +} + +function updateEnterpriseV1 { + postProcess enterprise_influxdb/v1/v1/ref.yml 'enterprise_influxdb/v1/.config.yml' 'v1@1' +} +``` + +## Tasks + +1. [x] Create `influxdb/v1/.config.yml` +2. [x] Create `influxdb/v1/v1/content/info.yml` +3. [x] Create `influxdb/v1/v1/content/servers.yml` +4. [x] Create `enterprise_influxdb/v1/.config.yml` +5. [x] Create `enterprise_influxdb/v1/v1/content/info.yml` +6. [x] Create `enterprise_influxdb/v1/v1/content/servers.yml` +7. [x] Remove tag-groups decorator and all tag-groups.yml files +8. [x] Add `updateOSSV1()` and `updateEnterpriseV1()` to getswagger.sh +9. [x] Test: Run getswagger.sh for both v1 products +10. [x] Test: Verify API pages render correctly + +## Completed: 2026-02-04 + +All tasks completed successfully. The v1 products now use the same Redocly overlay pattern as other products. diff --git a/docs/plans/2026-02-13-hugo-native-api-migration.md b/docs/plans/2026-02-13-hugo-native-api-migration.md new file mode 100644 index 0000000000..99bb7616a4 --- /dev/null +++ b/docs/plans/2026-02-13-hugo-native-api-migration.md @@ -0,0 +1,344 @@ +# Hugo-Native API Reference Migration Plan + +> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task. + +**Goal:** Complete migration to Hugo-native API reference rendering for all InfluxDB products, removing RapiDoc and simplifying the codebase. + +**Architecture:** The Hugo-native approach renders OpenAPI specs using Hugo templates instead of RapiDoc web components. This provides faster page loads, better SEO, consistent styling, and easier customization. Users access operations only through tag pages (no individual operation URLs). + +**Tech Stack:** TypeScript (generation scripts), Hugo templates, SCSS, OpenAPI 3.0 specs + +*** + +## Overview + +### Design Principles + +- **Consistency:** Unified look and feel across all API reference pages +- **Performance:** Fast page loads, full SEO indexability (no shadow DOM) +- **Simplicity:** No web components, no client-side rendering +- **Tag-based navigation:** Operations grouped by tag, accessed via tag pages only + +### URL Structure + +- **API index:** `/influxdb3/core/api/` +- **Tag page:** `/influxdb3/core/api/cache-distinct-values/` +- **All endpoints:** `/influxdb3/core/api/all-endpoints/` + +**Note:** Individual operation pages (e.g., `/influxdb3/core/api/v1/write/`) are being removed. Operations are accessed only through tag pages. + +*** + +## Migration Tasks + +### Task 1: Promote Hugo-native templates to default ✅ COMPLETED + +**Priority:** High | **Status:** Completed 2026-02-13 + +Move Hugo-native templates from POC location to production location. + +**Files moved:** + +- `layouts/partials/api/hugo-native/tag-renderer.html` → `layouts/partials/api/tag-renderer.html` +- `layouts/partials/api/hugo-native/operation.html` → `layouts/partials/api/operation.html` +- `layouts/partials/api/hugo-native/parameters.html` → `layouts/partials/api/parameters.html` +- `layouts/partials/api/hugo-native/parameter-row.html` → `layouts/partials/api/parameter-row.html` +- `layouts/partials/api/hugo-native/request-body.html` → `layouts/partials/api/request-body.html` +- `layouts/partials/api/hugo-native/schema.html` → `layouts/partials/api/schema.html` +- `layouts/partials/api/hugo-native/responses.html` → `layouts/partials/api/responses.html` + +**Completed steps:** + +1. ✅ Moved 7 files from `hugo-native/` subdirectory to parent directory +2. ✅ Updated `layouts/api/list.html` to use new locations (removed `hugo-native/` prefix) +3. ✅ Removed `$useHugoNative` conditional logic from `layouts/api/list.html` +4. ✅ Deleted `layouts/partials/api/hugo-native/` directory + +**Verification:** Hugo build passes, pages render correctly at `/influxdb3/core/api/` + +*** + +### Task 2: Remove RapiDoc templates and partials ✅ COMPLETED + +**Priority:** High | **Status:** Completed 2026-02-13 + +Delete RapiDoc-specific templates now that Hugo-native is the default. + +**Files deleted:** + +- `layouts/partials/api/rapidoc.html` +- `layouts/partials/api/rapidoc-tag.html` +- `layouts/partials/api/rapidoc-mini.html` + +**Verification:** `grep -r "rapidoc" layouts/` returns no results + +*** + +### Task 3: Remove RapiDoc JavaScript components ✅ COMPLETED + +**Priority:** High | **Status:** Completed 2026-02-13 + +Delete RapiDoc-specific TypeScript components. + +**Files deleted:** + +- `assets/js/components/api-rapidoc.ts` +- `assets/js/components/rapidoc-mini.ts` + +**Files updated:** + +- `assets/js/main.js` - Removed RapiDoc component imports and registrations + +**Verification:** `yarn build:ts` completes without errors + +*** + +### Task 4: Remove operation page generation ✅ COMPLETED + +**Priority:** High | **Status:** Completed 2026-02-13 + +Update generation scripts to remove dead code and RapiDoc references. + +**Files modified:** + +- `api-docs/scripts/generate-openapi-articles.ts` - Removed \~200 lines of dead `generatePathPages` function +- `api-docs/scripts/openapi-paths-to-hugo-data/index.ts` - Updated comments to remove RapiDoc references + +**Changes:** + +1. ✅ Removed dead `generatePathPages` function (operation page generation was already disabled) +2. ✅ Updated comments from "RapiDoc" to "Hugo-native templates" +3. ✅ Updated "RapiDoc fragment links" to "OpenAPI fragment links" + +**Note:** The `useHugoNative` flag was not found in the codebase - operation page generation was already disabled with a comment noting operations are rendered inline on tag pages. + +*** + +### Task 5: Update Cypress tests for Hugo-native ✅ COMPLETED + +**Priority:** High | **Status:** Completed 2026-02-13 + +Simplified Cypress tests now that we use standard HTML instead of shadow DOM. + +**Files modified:** + +- `cypress/e2e/content/api-reference.cy.js` - Rewrote test file + +**Changes:** + +1. ✅ Removed entire "RapiDoc Mini component" describe block (\~160 lines of shadow DOM tests) +2. ✅ Added "API tag pages" tests with Hugo-native selectors (`.api-operation`, `.api-method`, `.api-path`) +3. ✅ Added "API section page structure" tests +4. ✅ Added "All endpoints page" tests +5. ✅ Updated "API reference layout" tests to use Hugo-native selectors + +**New test structure implemented:** + +- `API reference content` - Tests API index pages load with valid links +- `API reference layout` - Tests 3-column layout (sidebar, content, TOC) +- `API tag pages` - Tests operation rendering, method badges, TOC links +- `API section page structure` - Tests tag listing on section pages +- `All endpoints page` - Tests operation cards with links to tag pages + +*** + +### Task 6: Clean up styles ✅ COMPLETED + +**Priority:** Medium | **Status:** Completed 2026-02-13 + +Remove RapiDoc-specific styles, JavaScript, and references from the codebase. + +**Files modified:** + +- `assets/styles/layouts/_api-layout.scss` - Removed \~40 lines of `rapi-doc::part()` CSS selectors +- `assets/styles/layouts/_api-overrides.scss` - Updated comment header +- `assets/styles/layouts/_api-security-schemes.scss` - Removed \~290 lines of dead auth modal styles +- `assets/js/main.js` - Removed dead `api-auth-input` import and registration +- `assets/js/components/api-toc.ts` - Removed RapiDoc-specific code and updated comments + +**Files deleted:** + +- `static/css/rapidoc-custom.css` - Unused static CSS file + +**Changes:** + +1. ✅ Removed `rapi-doc` container styling and `::part()` selectors from `_api-layout.scss` +2. ✅ Removed dead auth modal section from `_api-security-schemes.scss` (was for RapiDoc "Try it" integration) +3. ✅ Removed `api-auth-input` dead import from `main.js` (component file was already deleted) +4. ✅ Removed `setupRapiDocNavigation()` dead function and references from `api-toc.ts` +5. ✅ Updated comments throughout to remove RapiDoc mentions +6. ✅ Rebuilt `api-docs/scripts/dist/` to update compiled JavaScript + +**Architecture decision:** Kept operation styles separate from layout styles for cleaner separation of concerns: + +- `_api-layout.scss` handles page structure and navigation +- `_api-operations.scss` handles operation/schema component rendering (renamed from `_api-hugo-native.scss`) + +*** + +### Task 7: Fix Generation Script for Clean Regeneration ✅ COMPLETED + +**Priority:** Medium | **Status:** Completed 2026-02-17 + +Added clean regeneration to prevent stale files from accumulating when tags are renamed or removed. + +**Files modified:** + +- `api-docs/scripts/generate-openapi-articles.ts` - Added cleanup functions and CLI flags + +**Implementation:** + +1. ✅ Added `--no-clean` flag to skip cleanup (default is to clean) +2. ✅ Added `--dry-run` flag to preview what would be deleted +3. ✅ Added `getCleanupPaths()` function to identify directories/files to clean +4. ✅ Added `cleanProductOutputs()` function to delete directories and files +5. ✅ Added `showDryRunPreview()` function for dry-run output +6. ✅ Integrated cleanup into `processProduct()` (runs before generation) +7. ✅ Updated script header documentation with new usage examples + +**Cleaned directories per product:** + +- `static/openapi/{staticDirName}/` - Tag specs +- `static/openapi/{staticDirName}-*.yml` and `.json` - Root specs +- `data/article_data/influxdb/{productKey}/` - Article data +- `content/{pagesDir}/api/` - Content pages + +**Design:** See `plans/2026-02-17-api-clean-regeneration-design.md` + +*** + +### Task 8: Apply Cache Data tag split to InfluxDB 3 Enterprise + +**Priority:** Medium + +Apply the same tag split done for Core. + +**Files to modify:** + +- `api-docs/influxdb3/enterprise/v3/ref.yml` + +**Changes:** + +1. Replace "Cache data" tag with "Cache distinct values" and "Cache last value" tags +2. Update operation tag references +3. Update x-tagGroups references +4. Regenerate: `sh api-docs/generate-api-docs.sh` + +*** + +### Task 9: Migrate remaining products to Hugo-native + +**Priority:** Medium + +After the infrastructure is in place, migrate remaining products. + +**Products:** + +- [ ] cloud-dedicated (management API) +- [ ] cloud-serverless +- [ ] clustered (management API) +- [ ] cloud-v2 +- [ ] oss-v2 +- [ ] oss-v1 + +**For each product:** + +1. Review tag structure in OpenAPI spec +2. Add `x-influxdata-related` links where appropriate +3. Clean and regenerate +4. Verify all tag pages render correctly + +*** + +## Key Files Reference + +**Hugo-Native Templates (after migration):** + +- `layouts/partials/api/tag-renderer.html` - Main tag page renderer +- `layouts/partials/api/operation.html` - Individual operation renderer +- `layouts/partials/api/parameters.html` - Parameters section +- `layouts/partials/api/parameter-row.html` - Single parameter row +- `layouts/partials/api/request-body.html` - Request body section +- `layouts/partials/api/schema.html` - JSON schema renderer +- `layouts/partials/api/responses.html` - Response section + +**Layouts:** + +- `layouts/api/list.html` - Tag page layout (Hugo-native only) +- `layouts/api/section.html` - API section page layout +- `layouts/api/all-endpoints.html` - All endpoints page layout + +**Styles:** + +- `assets/styles/layouts/_api-layout.scss` - Consolidated API styles + +**Generation:** + +- `api-docs/scripts/generate-openapi-articles.ts` - Main generation script +- `api-docs/scripts/openapi-paths-to-hugo-data/index.ts` - OpenAPI processing + +*** + +## Verification Checklist + +Before considering migration complete for each product: + +- [ ] All tag pages render without errors +- [ ] Operation details (parameters, request body, responses) display correctly +- [ ] Schema references resolve and render +- [ ] `x-influxdata-related` links appear at page bottom +- [ ] Navigation shows correct tag structure +- [ ] Mobile responsive layout works +- [ ] No console errors in browser DevTools +- [ ] "On this page" TOC links work correctly +- [ ] Cypress tests pass +- [ ] No RapiDoc references remain in codebase + +## Files to Delete (Summary) + +**Already deleted (Tasks 1-3):** + +- ✅ `layouts/partials/api/rapidoc.html` +- ✅ `layouts/partials/api/rapidoc-tag.html` +- ✅ `layouts/partials/api/rapidoc-mini.html` +- ✅ `layouts/partials/api/hugo-native/` (entire directory - 7 files moved to parent) +- ✅ `assets/js/components/api-rapidoc.ts` +- ✅ `assets/js/components/rapidoc-mini.ts` + +**Still to review (Task 6):** + +- `assets/styles/layouts/_api-overrides.scss` (if RapiDoc-only) + +*** + +## Migration Findings + +### Completed Work Summary (Tasks 1-5) + +**Infrastructure changes:** + +- Hugo-native templates are now the default (no feature flag required) +- All RapiDoc code removed from layouts and JavaScript +- Generation scripts cleaned up (\~200 lines of dead code removed) +- Cypress tests simplified (no more shadow DOM piercing) + +**Key discoveries:** + +1. The `useHugoNative` flag did not exist in the codebase - operation page generation was already disabled +2. The `generatePathPages` function was dead code that could be safely removed +3. RapiDoc Mini tests were \~160 lines that are no longer needed +4. Hugo build and TypeScript compilation both pass after all changes + +**Verification status:** + +- ✅ Hugo build: `npx hugo --quiet` passes +- ✅ TypeScript: `yarn build:ts` passes +- ⏳ Cypress tests: Need to run `yarn test:e2e` to verify new tests pass +- ⏳ Visual review: Need to check pages render correctly in browser + +### Remaining Work (Tasks 6-9) + +1. **Task 6 (styles)**: Review and consolidate SCSS files +2. **Task 7 (clean regeneration)**: Add `--clean` flag to generation scripts +3. **Task 8 (Enterprise tags)**: Split Cache Data tag in Enterprise spec +4. **Task 9 (product migration)**: Apply to remaining 6 products diff --git a/docs/plans/2026-02-17-api-clean-regeneration-design.md b/docs/plans/2026-02-17-api-clean-regeneration-design.md new file mode 100644 index 0000000000..06e62e6cc0 --- /dev/null +++ b/docs/plans/2026-02-17-api-clean-regeneration-design.md @@ -0,0 +1,160 @@ +# API Clean Regeneration Design + +**Goal:** Add clean regeneration to `generate-openapi-articles.ts` to prevent stale files from accumulating when tags are renamed or removed. + +**Problem:** When OpenAPI tags are renamed (e.g., "Cache data" → "Cache distinct values" + "Cache last value"), old generated files persist alongside new ones, causing navigation confusion and stale content. + +*** + +## CLI Interface + +**New flags:** + +| Flag | Description | +| ------------ | ---------------------------------------------------- | +| `--no-clean` | Skip directory cleanup (preserve existing files) | +| `--dry-run` | Show what would be deleted without actually deleting | + +**Behavior:** + +- Default is to clean before generating (no flag needed) +- `--dry-run` implies `--no-clean` (shows deletions but doesn't execute or generate) +- Existing flags (`--validate-links`, `--skip-fetch`) continue to work + +**Usage examples:** + +```bash +# Default: clean and regenerate all products +node generate-openapi-articles.js + +# Clean and regenerate specific product +node generate-openapi-articles.js influxdb3_core + +# Preview what would be deleted +node generate-openapi-articles.js --dry-run + +# Preserve existing files (legacy behavior) +node generate-openapi-articles.js --no-clean +``` + +*** + +## Directories Cleaned Per Product + +For each product (e.g., `influxdb3_core`), the following are cleaned: + +| Location | Pattern | Example | +| ------------- | -------------------------------------------------- | -------------------------------------------- | +| Tag specs | `static/openapi/{staticDirName}/` | `static/openapi/influxdb3-core/` | +| Root specs | `static/openapi/{staticDirName}-*.yml` and `.json` | `static/openapi/influxdb3-core-ref.yml` | +| Article data | `data/article_data/influxdb/{productKey}/` | `data/article_data/influxdb/influxdb3_core/` | +| Content pages | `content/{pagesDir}/api/` | `content/influxdb3/core/api/` | + +**Boundaries:** + +- Only cleans the `api/` subdirectory within content, not the entire product +- Only cleans files matching the product's `staticDirName` pattern +- Never touches other products' files +- Multi-spec products (cloud-dedicated, clustered) clean all spec variants + +*** + +## Dry-Run Output Format + +``` +$ node generate-openapi-articles.js influxdb3_core --dry-run + +DRY RUN: Would clean the following for influxdb3_core: + +Directories to remove: + - static/openapi/influxdb3-core/ + - data/article_data/influxdb/influxdb3_core/ + - content/influxdb3/core/api/ + +Files to remove: + - static/openapi/influxdb3-core-ref.yml + - static/openapi/influxdb3-core-ref.json + +Summary: 3 directories, 2 files would be removed + +Skipping generation (dry-run mode). +``` + +*** + +## Code Structure + +**File modified:** `api-docs/scripts/generate-openapi-articles.ts` + +**New CLI flag parsing:** + +```typescript +const noClean = process.argv.includes('--no-clean'); +const dryRun = process.argv.includes('--dry-run'); +``` + +**New functions:** + +```typescript +/** + * Get all paths that would be cleaned for a product + */ +function getCleanupPaths(productKey: string, config: ProductConfig): { + directories: string[]; + files: string[]; +} + +/** + * Clean output directories for a product before regeneration + */ +function cleanProductOutputs(productKey: string, config: ProductConfig): void + +/** + * Display dry-run preview of what would be cleaned + */ +function showDryRunPreview(productKey: string, config: ProductConfig): void +``` + +**Changes to `processProduct()`:** + +```typescript +function processProduct(productKey: string, config: ProductConfig): void { + // Clean before generating (unless --no-clean or --dry-run) + if (!noClean && !dryRun) { + cleanProductOutputs(productKey, config); + } + + // Existing generation logic... +} +``` + +**Changes to `main()`:** + +```typescript +function main(): void { + // Handle dry-run mode + if (dryRun) { + productsToProcess.forEach((productKey) => { + showDryRunPreview(productKey, productConfigs[productKey]); + }); + console.log('\nDry run complete. No files were modified.'); + return; // Exit without generating + } + + // Existing processing logic... +} +``` + +**No changes to:** `openapi-paths-to-hugo-data/index.ts` + +*** + +## Verification + +After implementation: + +1. Run `--dry-run` and verify output matches expected format +2. Run without flags and verify old files are removed +3. Run with `--no-clean` and verify files are preserved +4. Verify Hugo build passes after clean regeneration +5. Verify no stale tag pages appear in navigation diff --git a/docs/plans/2026-02-17-api-clean-regeneration-implementation.md b/docs/plans/2026-02-17-api-clean-regeneration-implementation.md new file mode 100644 index 0000000000..723b140d61 --- /dev/null +++ b/docs/plans/2026-02-17-api-clean-regeneration-implementation.md @@ -0,0 +1,519 @@ +# API Clean Regeneration Implementation Plan + +> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task. + +**Goal:** Add `--no-clean` and `--dry-run` flags to `generate-openapi-articles.ts` so stale files are automatically removed before regeneration. + +**Architecture:** Delete-and-regenerate approach. Before processing each product, remove its output directories (static specs, article data, content pages), then generate fresh. Default behavior is to clean; `--no-clean` preserves existing files. + +**Tech Stack:** TypeScript, Node.js fs module, existing generation script + +**Design:** See `plans/2026-02-17-api-clean-regeneration-design.md` + +*** + +## Task 1: Add CLI Flag Parsing + +**Files:** + +- Modify: `api-docs/scripts/generate-openapi-articles.ts:88-89` + +**Step 1: Add flag constants after existing flags** + +Find the existing CLI flags section (around line 88): + +```typescript +// CLI flags +const validateLinks = process.argv.includes('--validate-links'); +const skipFetch = process.argv.includes('--skip-fetch'); +``` + +Add new flags below: + +```typescript +// CLI flags +const validateLinks = process.argv.includes('--validate-links'); +const skipFetch = process.argv.includes('--skip-fetch'); +const noClean = process.argv.includes('--no-clean'); +const dryRun = process.argv.includes('--dry-run'); +``` + +**Step 2: Verify TypeScript compiles** + +Run: `yarn build:ts` +Expected: Compiles without errors + +**Step 3: Commit** + +```bash +git add api-docs/scripts/generate-openapi-articles.ts +git commit -m "feat(api): add --no-clean and --dry-run CLI flags" +``` + +*** + +## Task 2: Add getCleanupPaths Function + +**Files:** + +- Modify: `api-docs/scripts/generate-openapi-articles.ts` + +**Step 1: Add getCleanupPaths function after getStaticDirName function (around line 170)** + +```typescript +/** + * Get all paths that would be cleaned for a product + * + * @param productKey - Product identifier (e.g., 'influxdb3_core') + * @param config - Product configuration + * @returns Object with directories and files arrays + */ +function getCleanupPaths( + productKey: string, + config: ProductConfig +): { directories: string[]; files: string[] } { + const staticDirName = getStaticDirName(productKey); + const staticPath = path.join(DOCS_ROOT, 'static/openapi'); + + const directories: string[] = []; + const files: string[] = []; + + // Tag specs directory: static/openapi/{staticDirName}/ + const tagSpecsDir = path.join(staticPath, staticDirName); + if (fs.existsSync(tagSpecsDir)) { + directories.push(tagSpecsDir); + } + + // Article data directory: data/article_data/influxdb/{productKey}/ + const articleDataDir = path.join( + DOCS_ROOT, + `data/article_data/influxdb/${productKey}` + ); + if (fs.existsSync(articleDataDir)) { + directories.push(articleDataDir); + } + + // Content pages directory: content/{pagesDir}/api/ + const contentApiDir = path.join(config.pagesDir, 'api'); + if (fs.existsSync(contentApiDir)) { + directories.push(contentApiDir); + } + + // Root spec files: static/openapi/{staticDirName}-*.yml and .json + if (fs.existsSync(staticPath)) { + const staticFiles = fs.readdirSync(staticPath); + const pattern = new RegExp(`^${staticDirName}-.*\\.(yml|json)$`); + staticFiles + .filter((f) => pattern.test(f)) + .forEach((f) => { + files.push(path.join(staticPath, f)); + }); + } + + return { directories, files }; +} +``` + +**Step 2: Verify TypeScript compiles** + +Run: `yarn build:ts` +Expected: Compiles without errors + +**Step 3: Commit** + +```bash +git add api-docs/scripts/generate-openapi-articles.ts +git commit -m "feat(api): add getCleanupPaths function" +``` + +*** + +## Task 3: Add cleanProductOutputs Function + +**Files:** + +- Modify: `api-docs/scripts/generate-openapi-articles.ts` + +**Step 1: Add cleanProductOutputs function after getCleanupPaths** + +```typescript +/** + * Clean output directories for a product before regeneration + * + * @param productKey - Product identifier + * @param config - Product configuration + */ +function cleanProductOutputs(productKey: string, config: ProductConfig): void { + const { directories, files } = getCleanupPaths(productKey, config); + + // Remove directories recursively + for (const dir of directories) { + console.log(`🧹 Removing directory: ${dir}`); + fs.rmSync(dir, { recursive: true, force: true }); + } + + // Remove individual files + for (const file of files) { + console.log(`🧹 Removing file: ${file}`); + fs.unlinkSync(file); + } + + const total = directories.length + files.length; + if (total > 0) { + console.log( + `✓ Cleaned ${directories.length} directories, ${files.length} files for ${productKey}` + ); + } +} +``` + +**Step 2: Verify TypeScript compiles** + +Run: `yarn build:ts` +Expected: Compiles without errors + +**Step 3: Commit** + +```bash +git add api-docs/scripts/generate-openapi-articles.ts +git commit -m "feat(api): add cleanProductOutputs function" +``` + +*** + +## Task 4: Add showDryRunPreview Function + +**Files:** + +- Modify: `api-docs/scripts/generate-openapi-articles.ts` + +**Step 1: Add showDryRunPreview function after cleanProductOutputs** + +```typescript +/** + * Display dry-run preview of what would be cleaned + * + * @param productKey - Product identifier + * @param config - Product configuration + */ +function showDryRunPreview(productKey: string, config: ProductConfig): void { + const { directories, files } = getCleanupPaths(productKey, config); + + console.log(`\nDRY RUN: Would clean the following for ${productKey}:\n`); + + if (directories.length > 0) { + console.log('Directories to remove:'); + directories.forEach((dir) => console.log(` - ${dir}`)); + } + + if (files.length > 0) { + console.log('\nFiles to remove:'); + files.forEach((file) => console.log(` - ${file}`)); + } + + if (directories.length === 0 && files.length === 0) { + console.log(' (no files to clean)'); + } + + console.log( + `\nSummary: ${directories.length} directories, ${files.length} files would be removed` + ); +} +``` + +**Step 2: Verify TypeScript compiles** + +Run: `yarn build:ts` +Expected: Compiles without errors + +**Step 3: Commit** + +```bash +git add api-docs/scripts/generate-openapi-articles.ts +git commit -m "feat(api): add showDryRunPreview function" +``` + +*** + +## Task 5: Integrate Cleanup into processProduct + +**Files:** + +- Modify: `api-docs/scripts/generate-openapi-articles.ts:1129-1135` + +**Step 1: Add cleanup call at the start of processProduct function** + +Find the beginning of `processProduct` function (around line 1129): + +```typescript +function processProduct(productKey: string, config: ProductConfig): void { + console.log('\n' + '='.repeat(80)); + console.log(`Processing ${config.description || productKey}`); + console.log('='.repeat(80)); +``` + +Add cleanup after the header output: + +```typescript +function processProduct(productKey: string, config: ProductConfig): void { + console.log('\n' + '='.repeat(80)); + console.log(`Processing ${config.description || productKey}`); + console.log('='.repeat(80)); + + // Clean output directories before regeneration (unless --no-clean) + if (!noClean && !dryRun) { + cleanProductOutputs(productKey, config); + } +``` + +**Step 2: Verify TypeScript compiles** + +Run: `yarn build:ts` +Expected: Compiles without errors + +**Step 3: Commit** + +```bash +git add api-docs/scripts/generate-openapi-articles.ts +git commit -m "feat(api): integrate cleanup into processProduct" +``` + +*** + +## Task 6: Add Dry-Run Mode to main Function + +**Files:** + +- Modify: `api-docs/scripts/generate-openapi-articles.ts:1307-1346` + +**Step 1: Add dry-run handling after product validation in main()** + +Find the section after product validation (around line 1340): + +```typescript + // Validate product keys + const invalidProducts = productsToProcess.filter( + (key) => !productConfigs[key] + ); + if (invalidProducts.length > 0) { + // ... error handling ... + } + + // Process each product + productsToProcess.forEach((productKey) => { +``` + +Add dry-run handling before the forEach: + +```typescript + // Validate product keys + const invalidProducts = productsToProcess.filter( + (key) => !productConfigs[key] + ); + if (invalidProducts.length > 0) { + // ... error handling ... + } + + // Handle dry-run mode + if (dryRun) { + console.log('\n📋 DRY RUN MODE - No files will be modified\n'); + productsToProcess.forEach((productKey) => { + showDryRunPreview(productKey, productConfigs[productKey]); + }); + console.log('\nDry run complete. No files were modified.'); + return; + } + + // Process each product + productsToProcess.forEach((productKey) => { +``` + +**Step 2: Verify TypeScript compiles** + +Run: `yarn build:ts` +Expected: Compiles without errors + +**Step 3: Commit** + +```bash +git add api-docs/scripts/generate-openapi-articles.ts +git commit -m "feat(api): add dry-run mode to main function" +``` + +*** + +## Task 7: Update Script Header Documentation + +**Files:** + +- Modify: `api-docs/scripts/generate-openapi-articles.ts:1-21` + +**Step 1: Update the usage documentation in the file header** + +Find the header comment block and update: + +```typescript +#!/usr/bin/env node +/** + * Generate OpenAPI Articles Script + * + * Generates Hugo data files and content pages from OpenAPI specifications + * for all InfluxDB products. + * + * This script: + * 1. Cleans output directories (unless --no-clean) + * 2. Runs getswagger.sh to fetch/bundle OpenAPI specs + * 3. Copies specs to static directory for download + * 4. Generates path group fragments (YAML and JSON) + * 5. Creates article metadata (YAML and JSON) + * 6. Generates Hugo content pages from article data + * + * Usage: + * node generate-openapi-articles.js # Clean and generate all products + * node generate-openapi-articles.js cloud-v2 # Clean and generate single product + * node generate-openapi-articles.js --no-clean # Generate without cleaning + * node generate-openapi-articles.js --dry-run # Preview what would be cleaned + * node generate-openapi-articles.js --skip-fetch # Skip getswagger.sh fetch step + * node generate-openapi-articles.js --validate-links # Validate documentation links + * + * @module generate-openapi-articles + */ +``` + +**Step 2: Verify TypeScript compiles** + +Run: `yarn build:ts` +Expected: Compiles without errors + +**Step 3: Commit** + +```bash +git add api-docs/scripts/generate-openapi-articles.ts +git commit -m "docs(api): update script header with new flags" +``` + +*** + +## Task 8: Rebuild Compiled JavaScript + +**Files:** + +- Modify: `api-docs/scripts/dist/generate-openapi-articles.js` (generated) + +**Step 1: Rebuild TypeScript** + +Run: `yarn build:ts` +Expected: Compiles without errors, updates dist/ files + +**Step 2: Verify the compiled output includes new functions** + +Run: `grep -n "getCleanupPaths\|cleanProductOutputs\|showDryRunPreview\|noClean\|dryRun" api-docs/scripts/dist/generate-openapi-articles.js | head -10` +Expected: Shows line numbers where new code appears + +**Step 3: Commit compiled output** + +```bash +git add api-docs/scripts/dist/ +git commit -m "build(api): rebuild compiled generation scripts" +``` + +*** + +## Task 9: Manual Testing + +**Step 1: Test dry-run mode** + +Run: `node api-docs/scripts/dist/generate-openapi-articles.js influxdb3_core --dry-run` + +Expected output format: + +``` +📋 DRY RUN MODE - No files will be modified + +DRY RUN: Would clean the following for influxdb3_core: + +Directories to remove: + - static/openapi/influxdb3-core + - data/article_data/influxdb/influxdb3_core + - content/influxdb3/core/api + +Files to remove: + - static/openapi/influxdb3-core-ref.yml + - static/openapi/influxdb3-core-ref.json + +Summary: 3 directories, 2 files would be removed + +Dry run complete. No files were modified. +``` + +**Step 2: Verify files were NOT deleted after dry-run** + +Run: `ls content/influxdb3/core/api/` +Expected: Directory still exists with content + +**Step 3: Test actual clean regeneration** + +Run: `node api-docs/scripts/dist/generate-openapi-articles.js influxdb3_core --skip-fetch` +Expected: Shows cleanup messages, then regenerates successfully + +**Step 4: Verify Hugo build passes** + +Run: `npx hugo --quiet` +Expected: Builds without errors + +**Step 5: Test --no-clean flag preserves files** + +First, create a marker file: + +```bash +touch content/influxdb3/core/api/MARKER_FILE.md +``` + +Run: `node api-docs/scripts/dist/generate-openapi-articles.js influxdb3_core --skip-fetch --no-clean` + +Verify marker still exists: + +```bash +ls content/influxdb3/core/api/MARKER_FILE.md +``` + +Expected: File exists + +Clean up marker: + +```bash +rm content/influxdb3/core/api/MARKER_FILE.md +``` + +*** + +## Task 10: Update Migration Plan + +**Files:** + +- Modify: `plans/2026-02-13-hugo-native-api-migration.md` + +**Step 1: Mark Task 7 as completed** + +Update the task status from planned to completed. + +**Step 2: Commit** + +```bash +git add plans/2026-02-13-hugo-native-api-migration.md +git commit -m "docs(plan): mark Task 7 (clean regeneration) as completed" +``` + +*** + +## Verification Checklist + +Before considering complete: + +- [ ] `yarn build:ts` compiles without errors +- [ ] `--dry-run` shows expected output format +- [ ] `--dry-run` does NOT delete any files +- [ ] Default mode (no flags) cleans before regenerating +- [ ] `--no-clean` preserves existing files +- [ ] `npx hugo --quiet` builds successfully after regeneration +- [ ] All new code is committed diff --git a/docs/plans/2026-03-07-api-code-samples-design.md b/docs/plans/2026-03-07-api-code-samples-design.md new file mode 100644 index 0000000000..1ef32cc2b5 --- /dev/null +++ b/docs/plans/2026-03-07-api-code-samples-design.md @@ -0,0 +1,125 @@ +# API Code Samples & Ask AI Integration Plan + +## Scope + +This plan covers: + +1. **Inline curl examples** for each API operation, generated at Hugo template time from the OpenAPI spec +2. **"Ask AI about this example"** link on each curl example, using the existing Kapa integration +3. **Client library related link** on all InfluxDB 3 API tag pages + +**Out of scope** (separate plans): + +- Site-wide Ask AI on all code blocks (render-codeblock hook) +- Client library tabbed code samples with language tabs +- Duplicate response schema rendering (already shown in Responses section) + +*** + +## Architecture + +**No build script changes for curl generation.** The curl example is constructed entirely in a Hugo partial (`api/code-sample.html`) using data already loaded by `tag-renderer.html` — the full parsed OpenAPI spec with server URLs, parameters, request body schemas, and examples. + +The existing `influxdb-url.js` automatically replaces the default placeholder host in `
` elements with the user's custom URL. No new JavaScript is needed for URL personalization.
+
+### Operation layout order (revised)
+
+1. Header (method + path + summary)
+2. Description
+3. Parameters
+4. Request Body
+5. **Example (curl + Ask AI)** — new
+6. Responses
+
+***
+
+## curl Example Generation
+
+### Partial: `layouts/partials/api/code-sample.html`
+
+Receives the operation definition (`$opDef`), spec (`$spec`), and operation metadata from `operation.html`. Constructs a curl command:
+
+1. **Server URL**: `spec.servers[0].url` — falls back to the product's `placeholder_host`. The existing `influxdb-url.js` replaces this in the DOM if the user has a custom URL.
+2. **Method**: Always explicit `--request METHOD`
+3. **Path**: Appended to server URL. `{param}` placeholders left as-is in the URL.
+4. **Query parameters**: Only required ones. Uses `example` value if available, otherwise an `UPPER_SNAKE_CASE` placeholder derived from the parameter name.
+5. **Headers**:
+   - Always: `--header "Authorization: Bearer INFLUX_TOKEN"`
+   - When request body exists: `--header "Content-Type: ..."` derived from the first key in `requestBody.content`
+6. **Request body**:
+   - `application/json`: Uses `schema.example` if present. If no example, body is omitted entirely — no synthesized fake data.
+   - `text/plain` (line protocol): Hardcoded sample: `--data-raw "measurement,tag=value field=1.0"`
+   - No example and no special content type: body omitted, shows only URL + headers.
+
+### Ask AI link
+
+Each code sample block includes an "Ask AI about this example" link using the existing `ask-ai-open` CSS class and `data-query` attribute. The existing `ask-ai-trigger.js` handles click events and opens the Kapa widget — no new JavaScript needed.
+
+```html
+
+  Ask AI about this example
+
+```
+
+***
+
+## Client Library Related Link
+
+The generation script adds a related link to `/influxdb3/{product}/reference/client-libraries/v3/` for all InfluxDB 3 product tag pages.
+
+**InfluxDB 3 products** (identified by `pagesDir` containing `influxdb3/`):
+
+- `influxdb3_core`
+- `influxdb3_enterprise`
+- `cloud-dedicated`
+- `cloud-serverless`
+- `clustered`
+
+**Excluded** (future plan with v2 client library links):
+
+- `cloud-v2`, `oss-v2`, `oss-v1`, `enterprise-v1`
+
+The `{product}` segment is derived from the `pagesDir` (e.g., `content/influxdb3/core` yields `core`).
+
+***
+
+## File Changes
+
+### New files
+
+| File                                           | Purpose                      |
+| ---------------------------------------------- | ---------------------------- |
+| `layouts/partials/api/code-sample.html`        | curl example + Ask AI link   |
+| `assets/styles/layouts/_api-code-samples.scss` | Styles for code sample block |
+
+### Modified files
+
+| File                                            | Change                                                             |
+| ----------------------------------------------- | ------------------------------------------------------------------ |
+| `layouts/partials/api/operation.html`           | Insert `code-sample.html` between request body and responses       |
+| `assets/styles/styles-default.scss`             | Import `_api-code-samples.scss`                                    |
+| `api-docs/scripts/generate-openapi-articles.ts` | Add client library reference related link for InfluxDB 3 tag pages |
+
+### Not modified
+
+| File                                                   | Reason                   |
+| ------------------------------------------------------ | ------------------------ |
+| `layouts/api/list.html`                                | No layout changes needed |
+| `assets/js/main.js`                                    | No new JS components     |
+| `assets/js/components/api-toc.ts`                      | TOC unchanged            |
+| `assets/styles/layouts/_api-layout.scss`               | Layout unchanged         |
+| `api-docs/scripts/openapi-paths-to-hugo-data/index.ts` | No data model changes    |
+
+***
+
+## Verification
+
+1. **Build**: `npx hugo --quiet` — no template errors
+2. **Visual**: Dev server — navigate to API tag page (e.g., `/influxdb3/core/api/write-data/`) — each operation has a curl example between Request Body and Responses
+3. **URL replacement**: Set a custom URL in the URL selector — verify it replaces the host in curl examples
+4. **Ask AI**: Click "Ask AI about this example" — Kapa opens with pre-populated query
+5. **Related link**: Client library reference link appears at bottom of all InfluxDB 3 API tag pages
+6. **Cypress**: Add test verifying `.api-code-sample` elements render on tag pages
+7. **Dark/light mode**: Code block renders correctly in both themes
+8. **Responsive**: Code sample block handles narrow viewports (horizontal scroll for long curl commands)
diff --git a/docs/plans/TESTING.md b/docs/plans/TESTING.md
new file mode 100644
index 0000000000..7b28886501
--- /dev/null
+++ b/docs/plans/TESTING.md
@@ -0,0 +1,77 @@
+# API Reference Testing Plan
+
+> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task.
+
+**Goal:** Validate Hugo-native API reference pages render correctly and all tests pass.
+
+**Architecture:** Hugo-native rendering uses standard HTML without shadow DOM, making tests simpler. No RapiDoc web components - operations are rendered server-side by Hugo templates.
+
+**Tech Stack:** Hugo templates, SCSS, Cypress
+
+***
+
+## Test Structure
+
+The API reference tests validate:
+
+1. **API index pages** - Main API landing pages load correctly
+2. **API tag pages** - Tag pages render operations with parameters/responses
+3. **Section structure** - Section pages list tag children correctly
+4. **All endpoints** - All endpoints page shows all operations
+5. **Layout** - 3-column layout with sidebar, content, and TOC
+
+## Running Tests
+
+### Quick validation
+
+```bash
+# Build site
+yarn hugo --quiet
+
+# Start server
+yarn hugo server &
+
+# Test pages load
+curl -s -o /dev/null -w "%{http_code}" http://localhost:1313/influxdb3/core/api/
+# Expected: 200
+
+# Run Cypress tests (for example, for InfluxDB 3 Core)
+node cypress/support/run-e2e-specs.js --spec "cypress/e2e/content/api-reference.cy.js" content/influxdb3/core/api/_index.md
+
+# Stop server
+pkill -f "hugo server"
+```
+
+### Full test suite
+
+```bash
+node cypress/support/run-e2e-specs.js --spec "cypress/e2e/content/api-reference.cy.js"
+```
+
+## Test Selectors (Hugo-Native)
+
+Since Hugo-native uses standard HTML, tests use simple CSS selectors:
+
+| Element          | Selector            |
+| ---------------- | ------------------- |
+| Page title       | `h1`                |
+| Operation        | `.api-operation`    |
+| Method badge     | `.api-method`       |
+| Path             | `.api-path`         |
+| Parameters table | `.api-parameters`   |
+| Request body     | `.api-request-body` |
+| Responses        | `.api-responses`    |
+| TOC              | `.api-toc`          |
+| Related links    | `.article--related` |
+
+## Expected Test Coverage
+
+- [ ] API index pages (Core, Enterprise, Cloud Dedicated, Clustered, Cloud Serverless)
+- [ ] Tag pages render operations
+- [ ] Parameters display correctly
+- [ ] Request body sections display
+- [ ] Response sections display
+- [ ] TOC links work
+- [ ] All endpoints page lists operations
+- [ ] Section pages list tags
+- [ ] Links are valid
diff --git a/helper-scripts/label-migration/README.md b/helper-scripts/label-migration/README.md
deleted file mode 100644
index 99e979d2fe..0000000000
--- a/helper-scripts/label-migration/README.md
+++ /dev/null
@@ -1,95 +0,0 @@
-# Label Migration Scripts
-
-Migrate the docs-v2 repository from 80+ ad-hoc labels to the 24-label taxonomy
-defined in [DOC-REVIEW-PIPELINE-PLAN.md](../../.github/DOC-REVIEW-PIPELINE-PLAN.md).
-
-## Prerequisites
-
-- `gh` CLI authenticated with access to `influxdata/docs-v2`
-- Run from any directory (scripts use `REPO` env var, defaults to `influxdata/docs-v2`)
-
-## Execution Order
-
-### Step 1: Create new labels (safe, idempotent)
-
-```bash
-./create-labels.sh           # Creates 24 new labels
-./create-labels.sh --dry-run # Preview without creating
-```
-
-Uses `gh label create --force`, which creates new labels or updates existing
-ones. Safe to run multiple times.
-
-### Step 2: Migrate issues to new labels
-
-```bash
-./migrate-labels.sh           # Adds new labels to issues with old labels
-./migrate-labels.sh --dry-run # Preview without modifying issues
-```
-
-Adds new labels to issues/PRs that have old labels. Does NOT remove old labels.
-Flags `InfluxDB v3` issues for manual review (may be monolith or distributed).
-
-### Step 3: Verify migration
-
-Before deleting old labels, verify a sample of migrated issues:
-
-```bash
-# Check issues with new product labels
-gh issue list -R influxdata/docs-v2 -l "product:v3-monolith" --state all
-gh issue list -R influxdata/docs-v2 -l "product:v3-distributed" --state all
-
-# Check the flagged InfluxDB v3 issues
-gh issue list -R influxdata/docs-v2 -l "InfluxDB v3" --state all
-```
-
-### Step 4: Delete old labels (destructive, interactive)
-
-```bash
-./delete-labels.sh           # Deletes old labels with confirmation prompts
-./delete-labels.sh --dry-run # Preview without deleting
-```
-
-Prompts for confirmation before each batch of deletions. Batches:
-1. Old product labels (15 labels)
-2. Old release labels (2 labels)
-3. Old source tracking labels (1 label)
-4. Renamed labels (2 labels)
-5. Unused/generic labels (14 labels)
-
-### Step 5: Update workflow references
-
-After deleting `sync-plugin-docs`, update these files to use `source:sync`:
-- `.github/workflows/sync-plugins.yml` (lines 28, 173, 421)
-- `.github/ISSUE_TEMPLATE/sync-plugin-docs.yml` (line 4)
-
-## Targeting a different repo
-
-```bash
-REPO=myorg/myrepo ./create-labels.sh
-REPO=myorg/myrepo ./migrate-labels.sh --dry-run
-```
-
-## Rollback
-
-If something goes wrong after Step 2 (migration):
-- Old labels still exist (not deleted until Step 4)
-- New labels can be removed: `gh label delete "product:v3-monolith" -R influxdata/docs-v2 --yes`
-- Issues retain both old and new labels until old labels are deleted
-
-If something goes wrong after Step 4 (deletion):
-- Old labels are gone but issues retain the new labels
-- Re-create old labels manually if needed: `gh label create "InfluxDB v3" -R influxdata/docs-v2 --color EC8909`
-
-## Label Taxonomy
-
-See the full taxonomy in [DOC-REVIEW-PIPELINE-PLAN.md](../../.github/DOC-REVIEW-PIPELINE-PLAN.md#11--label-taxonomy).
-
-| Category | Count | Prefix | Example |
-|----------|-------|--------|---------|
-| Product | 11 | `product:` | `product:v3-monolith` |
-| Source tracking | 4 | `source:` | `source:sync` |
-| Waiting states | 2 | `waiting:` | `waiting:engineering` |
-| Workflow states | 2 | (none) | `agent-ready`, `skip-review` |
-| Review outcomes | 3 | `review:` | `review:approved` |
-| Renamed | 2 | various | `ai:tooling`, `ci:testing` |
diff --git a/helper-scripts/label-migration/create-labels.sh b/helper-scripts/label-migration/create-labels.sh
deleted file mode 100755
index 0aea302c99..0000000000
--- a/helper-scripts/label-migration/create-labels.sh
+++ /dev/null
@@ -1,97 +0,0 @@
-#!/usr/bin/env bash
-set -euo pipefail
-
-# Create all labels defined in the doc review pipeline plan.
-# Safe and idempotent — uses --force to update existing labels.
-#
-# Usage:
-#   ./create-labels.sh              # Create labels in influxdata/docs-v2
-#   ./create-labels.sh --dry-run    # Print commands without executing
-#   REPO=owner/repo ./create-labels.sh  # Target a different repo
-
-REPO="${REPO:-influxdata/docs-v2}"
-DRY_RUN=false
-
-if [[ "${1:-}" == "--dry-run" ]]; then
-  DRY_RUN=true
-  echo "=== DRY RUN — no labels will be created ==="
-  echo
-fi
-
-create_label() {
-  local name="$1"
-  local color="$2"
-  local description="$3"
-
-  if $DRY_RUN; then
-    printf "  %-30s  #%-6s  %s\n" "$name" "$color" "$description"
-  else
-    if gh label create "$name" \
-      --repo "$REPO" \
-      --color "$color" \
-      --description "$description" \
-      --force 2>/dev/null; then
-      printf "  ✓ %-30s\n" "$name"
-    else
-      printf "  ✗ %-30s  (failed)\n" "$name"
-    fi
-  fi
-}
-
-echo "Repository: $REPO"
-echo
-
-# --- Product labels (11) — yellow ---
-echo "Product labels:"
-create_label "product:v3-monolith"    "FFA500" "InfluxDB 3 Core and Enterprise (single-node / clusterable)"
-create_label "product:v3-distributed" "FFA500" "InfluxDB 3 Cloud Serverless, Cloud Dedicated, Clustered"
-create_label "product:v2"             "FFA500" "InfluxDB v2 (Cloud TSM, OSS)"
-create_label "product:v1"             "FFA500" "InfluxDB v1 OSS"
-create_label "product:v1-enterprise"  "FFA500" "InfluxDB Enterprise v1"
-create_label "product:telegraf"       "FFA500" "Telegraf documentation"
-create_label "product:chronograf"     "FFA500" "Chronograf documentation"
-create_label "product:kapacitor"      "FFA500" "Kapacitor documentation"
-create_label "product:flux"           "FFA500" "Flux language documentation"
-create_label "product:explorer"       "FFA500" "InfluxDB 3 Explorer"
-create_label "product:shared"         "FFA500" "Shared content across products"
-echo
-
-# --- Source tracking labels (4) — purple ---
-echo "Source tracking labels:"
-create_label "source:auto-detected"   "9370DB" "Created by change detection within this repo"
-create_label "source:dar"             "9370DB" "Generated by DAR pipeline (issue analysis)"
-create_label "source:sync"            "9370DB" "Synced from an external repository"
-create_label "source:manual"          "9370DB" "Human-created issue"
-echo
-
-# --- Waiting states (2) — orange ---
-echo "Waiting state labels:"
-create_label "waiting:engineering"    "FF8C00" "Waiting for engineer confirmation"
-create_label "waiting:product"        "FF8C00" "Waiting for product/PM decision"
-echo
-
-# --- Workflow states (2) — green/blue ---
-echo "Workflow state labels:"
-create_label "agent-ready"            "00FF00" "Agent can work on this autonomously"
-create_label "skip-review"            "1E90FF" "Skip automated doc review pipeline"
-echo
-
-# --- Review outcome labels (3) — green/red/yellow ---
-echo "Review outcome labels:"
-create_label "review:approved"          "28A745" "Automated review passed — no blocking issues"
-create_label "review:changes-requested" "DC3545" "Automated review found blocking issues"
-create_label "review:needs-human"       "FFC107" "Automated review inconclusive, needs human"
-echo
-
-# --- Renamed labels (2) ---
-echo "Renamed labels:"
-create_label "ai:tooling"            "3fb91f" "Related to AI assistant infrastructure"
-create_label "ci:testing"            "a1fd0f" "CI/testing infrastructure"
-echo
-
-# --- Ensure existing workflow labels exist ---
-echo "Existing labels (ensure present):"
-create_label "release:pending"        "FEF2C0" "Waiting for product release before merging"
-echo
-
-echo "Done. Total: 24 new + 1 existing = 25 labels."
diff --git a/helper-scripts/label-migration/delete-labels.sh b/helper-scripts/label-migration/delete-labels.sh
deleted file mode 100755
index 58bc2c9ee0..0000000000
--- a/helper-scripts/label-migration/delete-labels.sh
+++ /dev/null
@@ -1,137 +0,0 @@
-#!/usr/bin/env bash
-set -euo pipefail
-
-# Delete old labels after migration is verified.
-# DESTRUCTIVE — requires interactive confirmation for each batch.
-#
-# Run this ONLY after:
-#   1. create-labels.sh has been run
-#   2. migrate-labels.sh has been run
-#   3. A sample of migrated issues has been manually verified
-#
-# Usage:
-#   ./delete-labels.sh              # Delete labels (with confirmation prompts)
-#   ./delete-labels.sh --dry-run    # Print what would be deleted
-#   REPO=owner/repo ./delete-labels.sh  # Target a different repo
-
-REPO="${REPO:-influxdata/docs-v2}"
-DRY_RUN=false
-
-if [[ "${1:-}" == "--dry-run" ]]; then
-  DRY_RUN=true
-  echo "=== DRY RUN — no labels will be deleted ==="
-  echo
-fi
-
-delete_label() {
-  local name="$1"
-
-  if $DRY_RUN; then
-    printf "  Would delete: %s\n" "$name"
-    return
-  fi
-
-  if gh label delete "$name" \
-    --repo "$REPO" \
-    --yes 2>/dev/null; then
-    printf "  ✓ Deleted: %s\n" "$name"
-  else
-    printf "  - Skipped: %s (not found or already deleted)\n" "$name"
-  fi
-}
-
-confirm_batch() {
-  local batch_name="$1"
-
-  if $DRY_RUN; then
-    return 0
-  fi
-
-  echo
-  read -r -p "Delete $batch_name labels? [y/N] " response
-  case "$response" in
-    [yY][eE][sS]|[yY]) return 0 ;;
-    *) echo "  Skipped."; return 1 ;;
-  esac
-}
-
-echo "Repository: $REPO"
-echo
-echo "⚠  This script deletes labels. Run migrate-labels.sh first."
-echo
-
-# --- Old product labels (migrated to product:* labels) ---
-echo "=== Old product labels ==="
-if confirm_batch "old product"; then
-  delete_label "InfluxDB 3 Core and Enterprise"
-  delete_label "InfluxDB v3"
-  delete_label "Processing engine"
-  delete_label "InfluxDB v2"
-  delete_label "InfluxDB v1"
-  delete_label "Enterprise 1.x"
-  delete_label "Chronograf 1.x"
-  delete_label "Kapacitor"
-  delete_label "Flux"
-  delete_label "InfluxDB 3 Explorer"
-  delete_label "InfluxDB Cloud Dedicated"
-  delete_label "InfluxDB Cloud Serverless"
-  delete_label "InfluxDB Clustered"
-  delete_label "InfluxDB Cloud"
-  delete_label "Telegraf"
-fi
-echo
-
-# --- Old release labels (migrated to release:pending) ---
-echo "=== Old release labels ==="
-if confirm_batch "old release"; then
-  delete_label "Pending Release"
-  delete_label "release/influxdb3"
-fi
-echo
-
-# --- Old source tracking labels ---
-echo "=== Old source tracking labels ==="
-if confirm_batch "old source tracking"; then
-  delete_label "sync-plugin-docs"
-fi
-echo
-
-# --- Renamed labels ---
-echo "=== Renamed labels (old names) ==="
-if confirm_batch "renamed label (old names)"; then
-  delete_label "AI assistant tooling"
-  delete_label "ci:testing-and-validation"
-fi
-echo
-
-# --- Unused/generic labels ---
-echo "=== Unused/generic labels ==="
-echo "These labels have inconsistent naming or overlap with the new taxonomy."
-if confirm_batch "unused/generic"; then
-  delete_label "bug"
-  delete_label "priority"
-  delete_label "documentation"
-  delete_label "Proposal"
-  delete_label "Research Phase"
-  delete_label "ready-for-collaboration"
-  delete_label "ui"
-  delete_label "javascript"
-  delete_label "dependencies"
-  delete_label "integration-demo-blog"
-  delete_label "API"
-  delete_label "Docker"
-  delete_label "Grafana"
-  delete_label "Ask AI"
-fi
-echo
-
-echo "=== Done ==="
-echo
-echo "Labels NOT deleted (kept intentionally or not in scope):"
-echo "  - release:pending, release:ready, release/telegraf, release/v1"
-echo "  - good-first-issue, user feedback, validation-failed"
-echo "  - duplicate, enhancement, help wanted, question, wontfix"
-echo "  - design, security, security/misc, Epic, feat, fix, chore"
-echo "  - And others not in the migration scope"
-echo
-echo "Review remaining labels with: gh label list -R $REPO"
diff --git a/helper-scripts/label-migration/migrate-labels.sh b/helper-scripts/label-migration/migrate-labels.sh
deleted file mode 100755
index a1f309ab32..0000000000
--- a/helper-scripts/label-migration/migrate-labels.sh
+++ /dev/null
@@ -1,104 +0,0 @@
-#!/usr/bin/env bash
-set -euo pipefail
-
-# Migrate issues and PRs from old labels to new labels.
-# For each mapping, finds all issues with the old label and adds the new label.
-# Does NOT remove old labels — that happens in delete-labels.sh after verification.
-#
-# Usage:
-#   ./migrate-labels.sh              # Migrate labels in influxdata/docs-v2
-#   ./migrate-labels.sh --dry-run    # Print what would happen without executing
-#   REPO=owner/repo ./migrate-labels.sh  # Target a different repo
-
-REPO="${REPO:-influxdata/docs-v2}"
-DRY_RUN=false
-
-if [[ "${1:-}" == "--dry-run" ]]; then
-  DRY_RUN=true
-  echo "=== DRY RUN — no issues will be modified ==="
-  echo
-fi
-
-migrate_label() {
-  local old_label="$1"
-  local new_label="$2"
-  local note="${3:-}"
-
-  echo "--- $old_label → $new_label"
-  if [[ -n "$note" ]]; then
-    echo "    Note: $note"
-  fi
-
-  # Get all open and closed issues/PRs with the old label
-  local numbers
-  numbers=$(gh issue list \
-    --repo "$REPO" \
-    --label "$old_label" \
-    --state all \
-    --json number \
-    --jq '.[].number' 2>/dev/null || true)
-
-  if [[ -z "$numbers" ]]; then
-    echo "    No issues found with label '$old_label'"
-    echo
-    return
-  fi
-
-  local count
-  count=$(echo "$numbers" | wc -l | tr -d ' ')
-  echo "    Found $count issue(s)"
-
-  for num in $numbers; do
-    if $DRY_RUN; then
-      echo "    Would add '$new_label' to #$num"
-    else
-      if gh issue edit "$num" \
-        --repo "$REPO" \
-        --add-label "$new_label" 2>/dev/null; then
-        echo "    ✓ #$num"
-      else
-        echo "    ✗ #$num (failed)"
-      fi
-    fi
-  done
-  echo
-}
-
-# Flag issues that need manual review instead of automatic migration
-flag_for_review() {
-  local old_label="$1"
-  local reason="$2"
-
-  echo "--- ⚠ $old_label — NEEDS MANUAL REVIEW"
-  echo "    Reason: $reason"
-
-  local numbers
-  numbers=$(gh issue list \
-    --repo "$REPO" \
-    --label "$old_label" \
-    --state all \
-    --json number,title \
-    --jq '.[] | "#\(.number) \(.title)"' 2>/dev/null || true)
-
-  if [[ -z "$numbers" ]]; then
-    echo "    No issues found"
-  else
-    echo "$numbers" | while IFS= read -r line; do
-      echo "    $line"
-    done
-  fi
-  echo
-}
-
-echo "Repository: $REPO"
-
-migrate_label "alerts"                                   "product:v2"
-migrate_label "InfluxDB v2"                              "product:v2"
-migrate_label "InfluxDB 3 Core and Enterprise"           "product:v3-monolith"
-
-echo "=== Done ==="
-echo
-echo "Next steps:"
-echo "  1. Review any issues flagged above"
-echo "  2. Verify a sample of migrated issues in the GitHub UI"
-echo "  3. Once satisfied, run delete-labels.sh to remove old labels"
diff --git a/helper-scripts/migrate-api-links.cjs b/helper-scripts/migrate-api-links.cjs
new file mode 100755
index 0000000000..98ed624a1b
--- /dev/null
+++ b/helper-scripts/migrate-api-links.cjs
@@ -0,0 +1,331 @@
+#!/usr/bin/env node
+/**
+ * migrate-api-links.js
+ *
+ * One-time migration script to convert Redoc API links to RapiDoc format.
+ *
+ * Usage:
+ *   node helper-scripts/migrate-api-links.js --dry-run  # Preview changes
+ *   node helper-scripts/migrate-api-links.js            # Execute migration
+ */
+
+const fs = require('fs');
+const path = require('path');
+const yaml = require('js-yaml');
+const { glob } = require('glob');
+
+// CLI arguments
+const args = process.argv.slice(2);
+const DRY_RUN = args.includes('--dry-run');
+const VERBOSE = args.includes('--verbose');
+
+// Paths
+const ROOT_DIR = path.resolve(__dirname, '..');
+const CONTENT_DIR = path.join(ROOT_DIR, 'content');
+const API_DOCS_DIR = path.join(ROOT_DIR, 'api-docs');
+
+// Spec file → product URL mapping
+const SPEC_MAPPINGS = [
+  { spec: 'influxdb/cloud/v2/ref.yml', urlPrefix: '/influxdb/cloud/api/' },
+  { spec: 'influxdb/v2/v2/ref.yml', urlPrefix: '/influxdb/v2/api/' },
+  { spec: 'influxdb/v1/v1/ref.yml', urlPrefix: '/influxdb/v1/api/' },
+  { spec: 'enterprise_influxdb/v1/v1/ref.yml', urlPrefix: '/enterprise_influxdb/v1/api/' },
+  { spec: 'influxdb3/core/v3/ref.yml', urlPrefix: '/influxdb3/core/api/' },
+  { spec: 'influxdb3/enterprise/v3/ref.yml', urlPrefix: '/influxdb3/enterprise/api/' },
+  { spec: 'influxdb3/cloud-dedicated/v2/ref.yml', urlPrefix: '/influxdb3/cloud-dedicated/api/' },
+  { spec: 'influxdb3/cloud-dedicated/management/openapi.yml', urlPrefix: '/influxdb3/cloud-dedicated/api/management/' },
+  { spec: 'influxdb3/cloud-serverless/v2/ref.yml', urlPrefix: '/influxdb3/cloud-serverless/api/' },
+  { spec: 'influxdb3/clustered/v2/ref.yml', urlPrefix: '/influxdb3/clustered/api/' },
+  { spec: 'influxdb3/clustered/management/openapi.yml', urlPrefix: '/influxdb3/clustered/api/management/' },
+];
+
+// Version placeholder mappings for shared content
+// Maps /version/ placeholder URLs to representative specs for operationId lookup
+const VERSION_PLACEHOLDER_MAPPINGS = [
+  // InfluxDB 3 v3 API (core/enterprise share same operationIds)
+  { pattern: /^\/influxdb3\/version\/api\/v3\//, lookupPrefix: '/influxdb3/core/api/' },
+  // InfluxDB 3 reference path variant
+  { pattern: /^\/influxdb3\/[^/]+\/reference\/api\/v3\//, lookupPrefix: '/influxdb3/core/api/' },
+  // InfluxDB v2 API - use v2 (OSS) as it has more operations than cloud (replication, etc.)
+  { pattern: /^\/influxdb\/version\/api\/v2\//, lookupPrefix: '/influxdb/v2/api/' },
+  { pattern: /^\/influxdb\/version\/api\/v1\//, lookupPrefix: '/influxdb/v2/api/' },  // v1 compat is in v2 spec
+  { pattern: /^\/influxdb\/version\/api\//, lookupPrefix: '/influxdb/v2/api/' },
+  // InfluxDB 3 version placeholder (generic)
+  { pattern: /^\/influxdb3\/version\/api\//, lookupPrefix: '/influxdb3/cloud-serverless/api/' },
+];
+
+/**
+ * Convert path parameters from {param} to -param- (RapiDoc format)
+ */
+function convertPathParams(path) {
+  return path.replace(/\{([^}]+)\}/g, '-$1-');
+}
+
+/**
+ * Build RapiDoc anchor from method and path
+ * Format: {method}-{path} with {param} → -param-
+ */
+function buildAnchor(method, pathStr) {
+  const convertedPath = convertPathParams(pathStr);
+  return `${method.toLowerCase()}-${convertedPath}`;
+}
+
+/**
+ * Parse OpenAPI spec and extract operationId → anchor mapping
+ */
+function parseSpec(specPath) {
+  const mapping = {};
+
+  try {
+    const content = fs.readFileSync(specPath, 'utf8');
+    const spec = yaml.load(content);
+
+    if (!spec.paths) {
+      console.warn(`  Warning: No paths in ${specPath}`);
+      return mapping;
+    }
+
+    for (const [pathStr, pathItem] of Object.entries(spec.paths)) {
+      const methods = ['get', 'post', 'put', 'patch', 'delete', 'options', 'head'];
+
+      for (const method of methods) {
+        const operation = pathItem[method];
+        if (operation && operation.operationId) {
+          const anchor = buildAnchor(method, pathStr);
+          mapping[operation.operationId] = anchor;
+
+          if (VERBOSE) {
+            console.log(`    ${operation.operationId} → #${anchor}`);
+          }
+        }
+      }
+    }
+  } catch (error) {
+    console.error(`  Error parsing ${specPath}: ${error.message}`);
+  }
+
+  return mapping;
+}
+
+/**
+ * Build complete lookup table from all specs
+ * Returns: { urlPrefix: { operationId: anchor } }
+ */
+function buildLookupTable() {
+  const lookup = {};
+
+  console.log('Building operationId lookup table...\n');
+
+  for (const { spec, urlPrefix } of SPEC_MAPPINGS) {
+    const specPath = path.join(API_DOCS_DIR, spec);
+
+    if (!fs.existsSync(specPath)) {
+      console.warn(`  Skipping missing spec: ${spec}`);
+      continue;
+    }
+
+    console.log(`  Processing: ${spec}`);
+    const mapping = parseSpec(specPath);
+    lookup[urlPrefix] = mapping;
+    console.log(`    Found ${Object.keys(mapping).length} operations`);
+  }
+
+  console.log('');
+  return lookup;
+}
+
+/**
+ * Find all #operation/ links in a file
+ * Returns array of { match, operationId, urlPath, fullUrl }
+ */
+function findOperationLinks(content) {
+  const links = [];
+  // Match patterns like: /influxdb/cloud/api/#operation/PostTasks
+  // or /influxdb3/cloud-dedicated/api/management/#operation/CreateDatabaseToken
+  const regex = /(\/[a-z0-9_/-]+\/api(?:\/management)?(?:\/[a-z0-9-]*)?\/)#operation\/(\w+)/g;
+
+  let match;
+  while ((match = regex.exec(content)) !== null) {
+    links.push({
+      match: match[0],
+      urlPath: match[1],
+      operationId: match[2],
+    });
+  }
+
+  return links;
+}
+
+/**
+ * Find the best matching URL prefix for a given URL path
+ * Also handles /version/ placeholders in shared content
+ */
+function findUrlPrefix(urlPath, lookup) {
+  // Sort by length descending to match most specific first
+  const prefixes = Object.keys(lookup).sort((a, b) => b.length - a.length);
+
+  for (const prefix of prefixes) {
+    if (urlPath.startsWith(prefix) || urlPath === prefix.slice(0, -1)) {
+      return prefix;
+    }
+  }
+
+  // Check version placeholder mappings for shared content
+  for (const { pattern, lookupPrefix } of VERSION_PLACEHOLDER_MAPPINGS) {
+    if (pattern.test(urlPath)) {
+      if (VERBOSE) {
+        console.log(`    Mapped ${urlPath} → ${lookupPrefix} (version placeholder)`);
+      }
+      return lookupPrefix;
+    }
+  }
+
+  return null;
+}
+
+/**
+ * Scan content directory for files with #operation/ links
+ */
+async function scanContentFiles(lookup) {
+  console.log('Scanning content files for #operation/ links...\n');
+
+  const files = await glob('**/*.md', { cwd: CONTENT_DIR });
+  const results = {
+    filesWithLinks: [],
+    totalLinks: 0,
+    unmapped: [],
+  };
+
+  for (const file of files) {
+    const filePath = path.join(CONTENT_DIR, file);
+    const content = fs.readFileSync(filePath, 'utf8');
+    const links = findOperationLinks(content);
+
+    if (links.length > 0) {
+      const fileResult = {
+        file,
+        links: [],
+      };
+
+      for (const link of links) {
+        const urlPrefix = findUrlPrefix(link.urlPath, lookup);
+
+        if (!urlPrefix) {
+          results.unmapped.push({ file, ...link, reason: 'No matching URL prefix' });
+          continue;
+        }
+
+        const productLookup = lookup[urlPrefix];
+        const anchor = productLookup[link.operationId];
+
+        if (!anchor) {
+          results.unmapped.push({ file, ...link, reason: 'OperationId not found in spec' });
+          continue;
+        }
+
+        fileResult.links.push({
+          ...link,
+          urlPrefix,
+          newAnchor: anchor,
+          oldLink: `${link.urlPath}#operation/${link.operationId}`,
+          newLink: `${link.urlPath}#${anchor}`,
+        });
+      }
+
+      if (fileResult.links.length > 0) {
+        results.filesWithLinks.push(fileResult);
+        results.totalLinks += fileResult.links.length;
+      }
+    }
+  }
+
+  return results;
+}
+
+/**
+ * Replace operation links in a file
+ * Returns the modified content
+ */
+function replaceLinks(content, links) {
+  let modified = content;
+
+  for (const link of links) {
+    // Replace all occurrences of this specific link
+    modified = modified.split(link.oldLink).join(link.newLink);
+  }
+
+  return modified;
+}
+
+/**
+ * Apply migrations to files
+ */
+async function applyMigrations(results) {
+  console.log('\n=== APPLYING MIGRATIONS ===\n');
+
+  let filesModified = 0;
+  let linksReplaced = 0;
+
+  for (const { file, links } of results.filesWithLinks) {
+    const filePath = path.join(CONTENT_DIR, file);
+    const originalContent = fs.readFileSync(filePath, 'utf8');
+    const modifiedContent = replaceLinks(originalContent, links);
+
+    if (originalContent !== modifiedContent) {
+      fs.writeFileSync(filePath, modifiedContent, 'utf8');
+      filesModified++;
+      linksReplaced += links.length;
+      console.log(`  ✓ ${file} (${links.length} links)`);
+    }
+  }
+
+  console.log(`\nMigration complete: ${filesModified} files modified, ${linksReplaced} links replaced.`);
+}
+
+async function main() {
+  console.log(`API Link Migration Script`);
+  console.log(`Mode: ${DRY_RUN ? 'DRY RUN (no changes)' : 'EXECUTE'}\n`);
+
+  // Build lookup table
+  const lookupTable = buildLookupTable();
+
+  // Scan content files
+  const results = await scanContentFiles(lookupTable);
+
+  // Report findings
+  console.log('=== SCAN RESULTS ===\n');
+  console.log(`Files with links: ${results.filesWithLinks.length}`);
+  console.log(`Total links to migrate: ${results.totalLinks}`);
+  console.log(`Unmapped links: ${results.unmapped.length}\n`);
+
+  if (VERBOSE && results.filesWithLinks.length > 0) {
+    console.log('Links to migrate:');
+    for (const { file, links } of results.filesWithLinks) {
+      console.log(`\n  ${file}:`);
+      for (const link of links) {
+        console.log(`    ${link.oldLink}`);
+        console.log(`    → ${link.newLink}`);
+      }
+    }
+  }
+
+  if (results.unmapped.length > 0) {
+    console.log('\n=== UNMAPPED LINKS (require manual review) ===\n');
+    for (const item of results.unmapped) {
+      console.log(`  ${item.file}:`);
+      console.log(`    ${item.match}`);
+      console.log(`    Reason: ${item.reason}\n`);
+    }
+  }
+
+  // Apply migrations if not dry-run
+  if (DRY_RUN) {
+    console.log('\n[DRY RUN] No files modified. Run without --dry-run to apply changes.');
+  } else if (results.filesWithLinks.length > 0) {
+    await applyMigrations(results);
+  } else {
+    console.log('\nNo links to migrate.');
+  }
+}
+
+main().catch(console.error);
diff --git a/layouts/_default/LLMS-TXT-README.md b/layouts/LLMS-TXT-README.md
similarity index 63%
rename from layouts/_default/LLMS-TXT-README.md
rename to layouts/LLMS-TXT-README.md
index 0535d8f197..3d39004243 100644
--- a/layouts/_default/LLMS-TXT-README.md
+++ b/layouts/LLMS-TXT-README.md
@@ -8,26 +8,39 @@ The llms.txt format helps LLMs discover and understand documentation structure.
 
 ## Template Files
 
-### `index.llms.txt`
-- **Location**: `/layouts/index.llms.txt`
+### `index.llmstxt.txt`
+
+- **Location**: `/layouts/index.llmstxt.txt`
 - **Output**: `/llms.txt` (site-level)
 - **Type**: Hugo template
 - **Purpose**: Primary entry point for LLM discovery
-- **Content**: Dynamically generated from `data/products.yml` with:
-  - Product descriptions from data files
-  - Organized by product category
-  - Conditional rendering for optional products
-
-### `section.llms.txt`
-- **Location**: `/layouts/_default/section.llms.txt`
-- **Output**: Product/section-level llms.txt files (e.g., `/influxdb3/core/llms.txt`)
-- **Type**: Hugo template
-- **Purpose**: Provide curated navigation for specific products/sections
+- **Content**: Hardcoded curated list of major product sections with:
+  - Direct links to product documentation
+  - Product descriptions
+  - Organized by product category (InfluxDB 3, InfluxDB 2, InfluxDB 1, Tools)
+
+### `landing-influxdb.llms.txt`
+
+- **Location**: `/layouts/section/landing-influxdb.llms.txt`
+- **Output**: Section-level llms.txt files (e.g., `/influxdb3/core/llms.txt`)
+- **Type**: Hugo template (for `landing-influxdb` layout type)
+- **Purpose**: Provide curated navigation for specific products/sections with landing-influxdb layout
 - **Content**: Dynamically generated from:
   - Product metadata from `data/products.yml`
   - Section content and child pages
   - Page descriptions
 
+### `landing-influxdb.llmstxt.txt`
+
+- **Location**: `/layouts/_default/landing-influxdb.llmstxt.txt`
+- **Output**: Landing page llms.txt files
+- **Type**: Hugo template (for `landing-influxdb` layout type in \_default)
+- **Purpose**: Generate llms.txt for landing pages
+- **Content**: Dynamically generated from:
+  - Product metadata from `data/products.yml`
+  - Page title and description
+  - Child pages list
+
 ## Hugo Configuration
 
 In `config/_default/hugo.yml`:
@@ -58,39 +71,47 @@ After building with `hugo`:
 
 ```
 public/
-├── llms.txt                              # Site-level discovery file
+├── llms.txt                              # Site-level discovery file (from index.llmstxt.txt)
 ├── influxdb3/
 │   ├── core/
-│   │   ├── llms.txt                      # InfluxDB 3 Core product index
-│   │   ├── get-started/
-│   │   │   └── llms.txt                  # Section-level index
-│   │   └── query-data/
-│   │       └── llms.txt                  # Section-level index
+│   │   └── llms.txt                      # InfluxDB 3 Core product index (landing-influxdb layout)
 │   ├── cloud-dedicated/
-│   │   └── llms.txt                      # Cloud Dedicated product index
-│   └── cloud-serverless/
-│       └── llms.txt                      # Cloud Serverless product index
+│   │   └── llms.txt                      # Cloud Dedicated product index (landing-influxdb layout)
+│   ├── cloud-serverless/
+│   │   └── llms.txt                      # Cloud Serverless product index (landing-influxdb layout)
+│   └── clustered/
+│       └── llms.txt                      # Clustered product index (landing-influxdb layout)
+├── influxdb/
+│   ├── v2/
+│   │   └── llms.txt                      # InfluxDB v2 product index (landing-influxdb layout)
+│   └── cloud/
+│       └── llms.txt                      # InfluxDB Cloud TSM index (landing-influxdb layout)
 ├── telegraf/
 │   └── v1/
-│       └── llms.txt                      # Telegraf product index
+│       └── llms.txt                      # Telegraf product index (landing-influxdb layout)
 └── flux/
     └── v0/
-        └── llms.txt                      # Flux product index
+        └── llms.txt                      # Flux product index (landing-influxdb layout)
 ```
 
+Note: llms.txt files are only generated for pages with the `landing-influxdb` layout type and for the site root.
+
 ## llmstxt.org Specification Compliance
 
 ### Required Elements
+
 - ✅ **H1 header**: Product or section name
 - ✅ **Curated links**: Not exhaustive - intentionally selective
 
 ### Optional Elements
+
 - ✅ **Blockquote summary**: Brief product/section description
 - ✅ **Content paragraphs**: Additional context (NO headings allowed)
 - ✅ **H2-delimited sections**: Organize links by category
 - ✅ **Link format**: `[Title](url): Description`
 
 ### Key Rules
+
 1. **H1 is required** - Only the product/section name
 2. **Content sections cannot have headings** - Use paragraphs only
 3. **Curate, don't list everything** - Be selective with links
@@ -101,24 +122,25 @@ public/
 
 ### For Site-Level (/llms.txt)
 
-Edit `/layouts/index.llms.txt` directly. This file is hardcoded for precise curation of top-level products.
+Edit `/layouts/index.llmstxt.txt` directly. This file is hardcoded for precise curation of top-level products.
 
-### For Product/Section-Level
+### For Product/Section-Level (landing-influxdb layout)
 
-The `/layouts/_default/section.llms.txt` template automatically generates llms.txt files for all sections.
+The `/layouts/section/landing-influxdb.llms.txt` template automatically generates llms.txt files for pages with the `landing-influxdb` layout type.
 
 **To customize a specific product's llms.txt:**
 
 1. Create a product-specific template following Hugo's lookup order:
    ```
-   layouts/influxdb3/core/section.llms.txt  # Specific to Core
-   layouts/influxdb3/section.llms.txt       # All InfluxDB 3 products
-   layouts/_default/section.llms.txt        # Default for all
+   layouts/influxdb3/core/landing-influxdb.llms.txt      # Specific to Core
+   layouts/influxdb3/landing-influxdb.llms.txt           # All InfluxDB 3 products
+   layouts/section/landing-influxdb.llms.txt             # Default for all sections
    ```
 
 2. **Example: Custom template for InfluxDB 3 Core**
 
-   Create `/layouts/influxdb3/core/section.llms.txt`:
+   Create `/layouts/influxdb3/core/landing-influxdb.llms.txt`:
+
    ```
    # InfluxDB 3 Core
 
@@ -137,6 +159,10 @@ The `/layouts/_default/section.llms.txt` template automatically generates llms.t
    - [Query with SQL](/influxdb3/core/query-data/sql/): SQL query guide
    ```
 
+### For Landing Pages in \_default
+
+The `/layouts/_default/landing-influxdb.llmstxt.txt` template generates llms.txt for landing pages that use the default layout lookup.
+
 ### Using Product Metadata from data/products.yml
 
 The template accesses product metadata:
@@ -206,7 +232,7 @@ llms.txt files are automatically generated during:
 
 ### Updating Site-Level llms.txt
 
-Edit `/layouts/index.llms.txt` to add/remove product links.
+Edit `/layouts/index.llmstxt.txt` to add/remove product links.
 
 ### Troubleshooting
 
@@ -223,4 +249,10 @@ Edit `/layouts/index.llms.txt` to add/remove product links.
 
 - [llmstxt.org specification](https://llmstxt.org/)
 - [Hugo output formats](https://gohugo.io/templates/output-formats/)
-- [InfluxData products.yml](../../data/products.yml)
+- [InfluxData products.yml](../data/products.yml)
+
+## Current Template Files
+
+- `/layouts/index.llmstxt.txt` - Root site llms.txt generator
+- `/layouts/section/landing-influxdb.llms.txt` - Section-level llms.txt for landing-influxdb layout
+- `/layouts/_default/landing-influxdb.llmstxt.txt` - Default landing page llms.txt generator
diff --git a/layouts/_default/api.html b/layouts/_default/api.html
index 68a09ae661..42987cb054 100644
--- a/layouts/_default/api.html
+++ b/layouts/_default/api.html
@@ -1 +1,111 @@
-{{ .Content }}
+{{/* API Documentation Default Layout Fallback layout for API documentation
+pages. Delegates to appropriate templates based on page type: - Section pages:
+Use section.html logic (children listing) - Pages with staticFilePath: Use
+Hugo-native renderer Note: This template exists as a catch-all but specific
+templates (api/section.html, api/list.html, api/single.html) should be
+preferred. */}}
+
+{{/* Extract product and version from URL path for download buttons */}}
+{{/* Example: /influxdb3/clustered/api/ → ["", "influxdb3", "clustered", "api", ""] */}}
+{{ $pathParts := split .RelPermalink "/" }}
+{{ $version := "" }}
+{{ if ge (len $pathParts) 3 }}
+  {{ $version = index $pathParts 2 }}
+{{ end }}
+
+{{/* Section pages without staticFilePath render content directly */}} {{ if and .IsSection (not .Params.staticFilePath)
+}} {{ partial "header.html" . }} {{ partial "topnav.html" . }}
+
+
+ {{ partial "sidebar.html" . }} + +
+
+
+
+

{{ .Title }}

+ {{ with .Description }} +

{{ . }}

+ {{ end }} +
+ + {{/* Dual download buttons for Clustered and Cloud Dedicated */}} + {{ if or (eq $version "clustered") (eq $version "cloud-dedicated") }} + + {{ end }} + + {{/* SECTION INDEX - Show page content then children listing */}} {{ + with .Content }} +
{{ . }}
+ {{ end }} {{/* Always show tag pages from article data */}} {{ partial + "api/section-children.html" . }} {{ partial "article/related.html" . }} +
+
+ + +
+
+ +{{ partial "footer.html" . }} {{ else }} {{/* Pages with staticFilePath +(operation pages) use Hugo-native renderer */}} {{ partial "header.html" . }} {{ +partial "topnav.html" . }} + +
+ {{ partial "sidebar.html" . }} +
+
+
+

{{ .Title }}

+ {{ with .Description }} +

{{ . }}

+ {{ end }} +
+ + {{/* Render API documentation using the configured renderer */}} {{ + partial "api/renderer.html" . }} +
+ +
+
+ +{{ partial "footer.html" . }} {{ end }} + + diff --git a/layouts/api/all-endpoints.html b/layouts/api/all-endpoints.html new file mode 100644 index 0000000000..b32fe8613f --- /dev/null +++ b/layouts/api/all-endpoints.html @@ -0,0 +1,48 @@ +{{/* + All Endpoints Layout + + Shows all API operations on a single page, grouped by version and sorted by path. + Used for /api/all-endpoints/ pages. + + Uses data from: + - data/article_data/influxdb/{product}/articles.yml +*/}} + +{{ partial "header.html" . }} +{{ partial "topnav.html" . }} + +
+ {{ partial "sidebar.html" . }} + +
+
+
+
+

{{ .Title }}

+ {{ with .Description }} +

{{ . }}

+ {{ end }} +
+ + {{ with .Content }} +
+ {{ . }} +
+ {{ end }} + + {{/* Get all operations from article data */}} + {{ partial "api/all-endpoints-list.html" . }} + + {{ partial "article/related.html" . }} + +
+
+ + +
+
+ +{{ partial "footer.html" . }} diff --git a/layouts/api/list.html b/layouts/api/list.html new file mode 100644 index 0000000000..4523550780 --- /dev/null +++ b/layouts/api/list.html @@ -0,0 +1,200 @@ +{{/* API Documentation Layout + Two modes: + 1. Section index (no 'tag' param) - lists tag pages from article data + 2. Tag page (has 'tag' param) - shows operations via Hugo-native templates + Conceptual pages (isConceptual: true) show content without operations. +*/}} + + +{{ partial "header.html" . }} {{ partial "topnav.html" . }} + +
+ {{ partial "sidebar.html" . }} + +
+
+
+
+

{{ .Title }}

+ {{/* Only show description in header for section index pages */}} + {{ if not (isset .Params "tag") }} + {{ with .Description }} +

{{ . }}

+ {{ end }} + {{ end }} +
+ + {{ $hasTag := isset .Params "tag" }} + {{ if not $hasTag }} + {{/* SECTION INDEX - Show intro content then tag-based children */}} + + {{ with .Content }} +
{{ . }}
+ {{ end }} + {{ partial "api/section-children.html" . }} + + {{ else }} + {{/* TAG PAGE - Show operations or conceptual content */}} + {{ $isConceptual := .Params.isConceptual | default false }} + {{ if $isConceptual }} +
+ {{ with .Content }} {{ . }} {{ else }} + {{ with .Params.tagDescription }}{{ . | markdownify }}{{ end }} + {{ end }} +
+ + {{ else }} + {{/* Operational Page - Show all operations */}} + + {{/* Download OpenAPI spec button — uses specDownloadPath from frontmatter */}} + {{ with .Params.specDownloadPath }} + + {{ end }} + + {{/* Hugo page content if any (for custom intro content) */}} + {{ with .Content }} +
{{ . }}
+ {{ end }} + + {{/* Render operations using Hugo-native templates */}} + {{ with .Params.staticFilePath }} +
+ {{ partial "api/tag-renderer.html" $ }} +
+ {{ end }} {{ end }} {{ end }} {{ partial "article/related.html" . }} +
+
+ + {{/* ON THIS PAGE TOC - Generated from operations array */}} {{ $operations + := .Params.operations }} {{ $hasTag := isset .Params "tag" }} {{ + $isConceptual := .Params.isConceptual | default false }} {{ if and $hasTag + (not $isConceptual) }} + + {{ else }} + + {{ end }} +
+
+ +{{ partial "footer.html" . }} + + diff --git a/layouts/api/section.html b/layouts/api/section.html new file mode 100644 index 0000000000..84c5e298fa --- /dev/null +++ b/layouts/api/section.html @@ -0,0 +1,86 @@ +{{/* API Documentation Section Layout Used for API section index pages (e.g., +/influxdb3/core/api/). Shows page content with children listing. +For tag pages (with 'tag' param), Hugo uses list.html instead. +*/}} + +{{/* Extract product and version from URL path for download buttons */}} +{{/* Example: /influxdb3/clustered/api/ → ["", "influxdb3", "clustered", "api", ""] */}} +{{ $pathParts := split .RelPermalink "/" }} +{{ $version := "" }} +{{ if ge (len $pathParts) 3 }} + {{ $version = index $pathParts 2 }} +{{ end }} + +{{ partial "header.html" . }} {{ partial "topnav.html" . }} + +
+ {{ partial "sidebar.html" . }} + +
+
+
+
+

{{ .Title }}

+ {{ with .Description }} +

{{ . }}

+ {{ end }} +
+ + {{/* Dual download buttons for Clustered and Cloud Dedicated */}} + {{ if or (eq $version "clustered") (eq $version "cloud-dedicated") }} + + {{ end }} + + {{/* SECTION INDEX - Show intro content then tag-based children */}} {{ + with .Content }} +
{{ . }}
+ {{ end }} {{/* Always show tag pages from article data */}} {{ partial + "api/section-children.html" . }} {{ partial "article/related.html" . }} +
+
+ + +
+
+ +{{ partial "footer.html" . }} + + diff --git a/layouts/api/single.html b/layouts/api/single.html new file mode 100644 index 0000000000..28a11e052e --- /dev/null +++ b/layouts/api/single.html @@ -0,0 +1,152 @@ +{{/* + API Documentation Single Page Layout + + Used for: + - Conceptual pages (isConceptual: true) like Authentication, Quick start + - Individual operation pages (legacy - being phased out) + + For conceptual pages: + - Shows Hugo content or tagDescription markdown + + Required frontmatter: + - title: Page title + - isConceptual: true (for conceptual pages) +*/}} + +{{ partial "header.html" . }} +{{ partial "topnav.html" . }} + +
+ {{/* Left: Existing Hugo sidebar (includes API nav via sidebar.html) */}} + {{ partial "sidebar.html" . }} + + {{/* Center + Right: Content and TOC */}} +
+
+
+
+
+
+ {{/* For operation pages, show method badge with title */}} + {{ with .Params.method }} +
+ {{ upper . }} +

{{ $.Title }}

+
+ {{ with $.Params.path }} + {{ . }} + {{ end }} + {{ else }} +

{{ .Title }}

+ {{ end }} + + {{/* Summary/Description - skip for conceptual pages (shown in content section) */}} + {{ if not (.Params.isConceptual | default false) }} + {{ with .Params.summary }} +

{{ . | markdownify }}

+ {{ else }} + {{ with .Description }} +

{{ . | markdownify }}

+ {{ end }} + {{ end }} + {{ end }} +
+ + {{/* Download OpenAPI spec button - context-aware for Clustered/Cloud Dedicated */}} + {{ with .Params.staticFilePath }} + {{/* Extract product name from path like /openapi/influxdb-oss-v2/tags/... */}} + {{ $productName := replaceRE `^/openapi/([^/]+)/.*$` "$1" . }} + + {{/* Check if this is a dual-API product (Clustered or Cloud Dedicated) */}} + {{ $isDualApi := or (strings.Contains $productName "clustered") (strings.Contains $productName "cloud-dedicated") }} + + {{ if $isDualApi }} + {{/* Determine API type from path */}} + {{ $isManagementApi := strings.Contains . "management-api" }} + {{ if $isManagementApi }} + {{ $specPath := printf "/openapi/%s-management-api.yml" $productName }} + + {{ else }} + {{ $specPath := printf "/openapi/%s-v2-data-api.yml" $productName }} + + {{ end }} + {{ else }} + {{/* Single-spec products - existing behavior */}} + {{ $completeSpecPath := printf "/openapi/%s.yml" $productName }} + + {{ end }} + {{ end }} +
+
+ + {{ $isConceptual := .Params.isConceptual | default false }} + + {{ if $isConceptual }} + {{/* Conceptual Page - Show content directly */}} +
+ {{ with .Content }} + {{ . }} + {{ else }} + {{ with .Params.tagDescription }} + {{ . | markdownify }} + {{ end }} + {{ end }} +
+ + {{/* Security Schemes from OpenAPI spec (only show if showSecuritySchemes: true) */}} + {{ if .Params.showSecuritySchemes }} + {{ partial "api/security-schemes.html" . }} + {{ end }} + {{ else }} + {{/* Operation Page - Hugo-native rendering */}} + {{/* Note: Individual operation pages are being phased out. */}} + {{/* Operations are now accessed via tag pages only. */}} + + {{/* Hugo page content shown as overview */}} + {{ with .Content }} +
+ {{ . }} +
+ {{ end }} + + {{ end }} + + {{/* Related documentation links */}} + {{ partial "article/related.html" . }} + +
+
+ + {{/* Right: Page TOC - "ON THIS PAGE" */}} + +
+
+ +{{ partial "footer.html" . }} diff --git a/layouts/partials/api/all-endpoints-list.html b/layouts/partials/api/all-endpoints-list.html new file mode 100644 index 0000000000..5bab0333a7 --- /dev/null +++ b/layouts/partials/api/all-endpoints-list.html @@ -0,0 +1,221 @@ +{{/* + All Endpoints List + + Renders all API operations grouped by version (v3, v2, v1) and sorted by path. + Links point to tag pages with hash anchors (e.g., /api/cache-data/#operation/PostConfigureDistinctCache) + Excludes conceptual/trait tag operations. + + Uses frontmatter params: + - articleDataKey: product data key (e.g., 'influxdb3-core') + - articleSection: section slug (e.g., 'api' or 'management-api') +*/}} +{{ $currentPage := . }} + +{{/* Read data key and section from frontmatter */}} +{{ $dataKey := .Params.articleDataKey | default "" }} +{{ $section := .Params.articleSection | default "" }} + +{{/* Get article data using frontmatter-driven lookup */}} +{{ $articles := slice }} +{{ if and $dataKey $section }} + {{ with site.Data.article_data }} + {{ with index . "influxdb" }} + {{ with index . $dataKey }} + {{ with index . $section }} + {{ with index . "articles" }} + {{ with .articles }} + {{ $articles = . }} + {{ end }} + {{ end }} + {{ end }} + {{ end }} + {{ end }} + {{ end }} +{{ end }} + +{{/* Build a map of tag name -> article path for URL lookups */}} +{{ $tagToPath := dict }} +{{ range $articles }} + {{ if and (reflect.IsMap .) (isset . "fields") }} + {{ $fields := index . "fields" }} + {{ if reflect.IsMap $fields }} + {{ if isset $fields "tag" }} + {{ $tag := index $fields "tag" }} + {{ $path := index . "path" }} + {{ $tagToPath = merge $tagToPath (dict $tag $path) }} + {{ end }} + {{ end }} + {{ end }} +{{ end }} + +{{/* Collect all operations from non-conceptual articles */}} +{{ $allOperations := slice }} +{{ range $articles }} + {{ if and (reflect.IsMap .) (isset . "fields") }} + {{ $fields := index . "fields" }} + {{ if reflect.IsMap $fields }} + {{ $isConceptual := false }} + {{ if isset $fields "isConceptual" }} + {{ $isConceptual = index $fields "isConceptual" }} + {{ end }} + {{ if not $isConceptual }} + {{ if isset $fields "operations" }} + {{ $tag := index $fields "tag" }} + {{ $articlePath := index . "path" }} + {{ range index $fields "operations" }} + {{ $allOperations = $allOperations | append (dict "op" . "tag" $tag "articlePath" $articlePath) }} + {{ end }} + {{ end }} + {{ end }} + {{ end }} + {{ end }} +{{ end }} + +{{ if gt (len $allOperations) 0 }} + {{/* Group operations by API version prefix */}} + {{ $v3Ops := slice }} + {{ $v2Ops := slice }} + {{ $v1Ops := slice }} + {{ $otherOps := slice }} + + {{ range $allOperations }} + {{ $path := .op.path }} + {{ if hasPrefix $path "/api/v3" }} + {{ $v3Ops = $v3Ops | append . }} + {{ else if hasPrefix $path "/api/v2" }} + {{ $v2Ops = $v2Ops | append . }} + {{ else if or (hasPrefix $path "/api/v1") (hasPrefix $path "/health") (hasPrefix $path "/ping") (hasPrefix $path "/metrics") (hasPrefix $path "/query") (hasPrefix $path "/write") }} + {{ $v1Ops = $v1Ops | append . }} + {{ else }} + {{ $otherOps = $otherOps | append . }} + {{ end }} + {{ end }} + + {{/* Sort each group by path then method */}} + {{ $sortV3 := slice }} + {{ range $v3Ops }} + {{ $sortKey := printf "%s %s" .op.path (upper .op.method) }} + {{ $sortV3 = $sortV3 | append (dict "sortKey" $sortKey "data" .) }} + {{ end }} + {{ $sortV3 = sort $sortV3 "sortKey" }} + + {{ $sortV2 := slice }} + {{ range $v2Ops }} + {{ $sortKey := printf "%s %s" .op.path (upper .op.method) }} + {{ $sortV2 = $sortV2 | append (dict "sortKey" $sortKey "data" .) }} + {{ end }} + {{ $sortV2 = sort $sortV2 "sortKey" }} + + {{ $sortV1 := slice }} + {{ range $v1Ops }} + {{ $sortKey := printf "%s %s" .op.path (upper .op.method) }} + {{ $sortV1 = $sortV1 | append (dict "sortKey" $sortKey "data" .) }} + {{ end }} + {{ $sortV1 = sort $sortV1 "sortKey" }} + + {{/* Render v3 API endpoints */}} + {{ if gt (len $sortV3) 0 }} +
+

v3 API

+
+ {{ range $sortV3 }} + {{ $op := .data.op }} + {{ $articlePath := .data.articlePath }} + {{/* Build tag page URL with hash anchor: operation/{operationId} */}} + {{/* Build tag page URL relative to the section (parent of all-endpoints) */}} + {{ $sectionUrl := $currentPage.Parent.RelPermalink }} + {{ $tagSlug := path.Base $articlePath }} + {{ $tagPageUrl := printf "%s%s/" $sectionUrl $tagSlug }} + {{ $hashAnchor := printf "#operation/%s" $op.operationId }} + + {{ upper $op.method }} + {{ $op.path }} + {{ $op.summary }} + + {{ end }} +
+
+ {{ end }} + + {{/* Render v2-compatible endpoints */}} + {{ if gt (len $sortV2) 0 }} +
+

v2-compatible API

+
+ {{ range $sortV2 }} + {{ $op := .data.op }} + {{ $articlePath := .data.articlePath }} + {{/* Build tag page URL with hash anchor: operation/{operationId} */}} + {{/* Build tag page URL relative to the section (parent of all-endpoints) */}} + {{ $sectionUrl := $currentPage.Parent.RelPermalink }} + {{ $tagSlug := path.Base $articlePath }} + {{ $tagPageUrl := printf "%s%s/" $sectionUrl $tagSlug }} + {{ $hashAnchor := printf "#operation/%s" $op.operationId }} + + {{ upper $op.method }} + {{ $op.path }} + {{ $op.summary }} + {{ with $op.compatVersion }}{{ . }}{{ end }} + + {{ end }} +
+
+ {{ end }} + + {{/* Render v1-compatible endpoints */}} + {{ if gt (len $sortV1) 0 }} +
+

v1-compatible API

+
+ {{ range $sortV1 }} + {{ $op := .data.op }} + {{ $articlePath := .data.articlePath }} + {{/* Build tag page URL with hash anchor: operation/{operationId} */}} + {{/* Build tag page URL relative to the section (parent of all-endpoints) */}} + {{ $sectionUrl := $currentPage.Parent.RelPermalink }} + {{ $tagSlug := path.Base $articlePath }} + {{ $tagPageUrl := printf "%s%s/" $sectionUrl $tagSlug }} + {{ $hashAnchor := printf "#operation/%s" $op.operationId }} + + {{ upper $op.method }} + {{ $op.path }} + {{ $op.summary }} + {{ with $op.compatVersion }}{{ . }}{{ end }} + + {{ end }} +
+
+ {{ end }} + + {{/* Render Management API endpoints (paths not matching v1/v2/v3 patterns) */}} + {{ if gt (len $otherOps) 0 }} + {{ $sortOther := slice }} + {{ range $otherOps }} + {{ $sortKey := printf "%s %s" .op.path (upper .op.method) }} + {{ $sortOther = $sortOther | append (dict "sortKey" $sortKey "data" .) }} + {{ end }} + {{ $sortOther = sort $sortOther "sortKey" }} +
+

Management API

+
+ {{ range $sortOther }} + {{ $op := .data.op }} + {{ $articlePath := .data.articlePath }} + {{/* Build tag page URL with hash anchor: operation/{operationId} */}} + {{/* Build tag page URL relative to the section (parent of all-endpoints) */}} + {{ $sectionUrl := $currentPage.Parent.RelPermalink }} + {{ $tagSlug := path.Base $articlePath }} + {{ $tagPageUrl := printf "%s%s/" $sectionUrl $tagSlug }} + {{ $hashAnchor := printf "#operation/%s" $op.operationId }} + + {{ upper $op.method }} + {{ $op.path }} + {{ $op.summary }} + + {{ end }} +
+
+ {{ end }} +{{ else }} +

No endpoints available.

+{{ end }} diff --git a/layouts/partials/api/code-sample.html b/layouts/partials/api/code-sample.html new file mode 100644 index 0000000000..c8787de74e --- /dev/null +++ b/layouts/partials/api/code-sample.html @@ -0,0 +1,235 @@ +{{/* + API Code Sample + + Renders an inline curl example for an API operation, constructed from the + OpenAPI spec at Hugo build time. The existing influxdb-url.js replaces + the default host in
 elements if the user has a custom URL set.
+
+  Params:
+    - opDef: The operation definition from the parsed spec
+    - operation: Operation metadata from frontmatter (method, path, summary, operationId)
+    - spec: The full OpenAPI spec object for resolving $ref
+    - context: The page context
+*/}}
+
+{{ $opDef := .opDef }}
+{{ $operation := .operation }}
+{{ $spec := .spec }}
+{{ $method := upper $operation.method }}
+{{ $path := $operation.path }}
+
+{{/* --- Resolve server URL --- */}}
+{{ $serverUrl := "" }}
+{{ with index ($spec.servers | default slice) 0 }}
+  {{ $serverUrl = .url | default "" }}
+  {{/* Resolve {variable} placeholders using variable defaults */}}
+  {{ range $varName, $varDef := .variables | default dict }}
+    {{ $placeholder := printf "{%s}" $varName }}
+    {{ $serverUrl = replace $serverUrl $placeholder ($varDef.default | default "") }}
+  {{ end }}
+{{ end }}
+{{ if not $serverUrl }}
+  {{ $serverUrl = "http://localhost:8086" }}
+{{ end }}
+
+{{/* --- Resolve parameters (handle $ref) --- */}}
+{{ $params := $opDef.parameters | default slice }}
+{{ $resolvedParams := slice }}
+{{ range $params }}
+  {{ $param := . }}
+  {{ if isset . "$ref" }}
+    {{ $refPath := index . "$ref" }}
+    {{ $refParts := split $refPath "/" }}
+    {{ if ge (len $refParts) 4 }}
+      {{ $paramName := index $refParts 3 }}
+      {{ with index $spec.components.parameters $paramName }}
+        {{ $param = . }}
+      {{ end }}
+    {{ end }}
+  {{ end }}
+  {{ $resolvedParams = $resolvedParams | append $param }}
+{{ end }}
+
+{{/* --- Build query string from required query parameters --- */}}
+{{ $queryParts := slice }}
+{{ range $resolvedParams }}
+  {{ if and (eq .in "query") .required }}
+    {{ $value := "" }}
+    {{ with .schema }}
+      {{ if .example }}
+        {{ $value = .example | string }}
+      {{ else if .default }}
+        {{ $value = .default | string }}
+      {{ end }}
+    {{ end }}
+    {{ if not $value }}
+      {{ $value = .name | upper | replaceRE "[^A-Z0-9]" "_" }}
+    {{ end }}
+    {{ $queryParts = $queryParts | append (printf "%s=%s" .name $value) }}
+  {{ end }}
+{{ end }}
+
+{{ $fullUrl := printf "%s%s" $serverUrl $path }}
+{{ if gt (len $queryParts) 0 }}
+  {{ $fullUrl = printf "%s?%s" $fullUrl (delimit $queryParts "&") }}
+{{ end }}
+
+{{/* --- Resolve request body (handle $ref) --- */}}
+{{ $requestBody := $opDef.requestBody | default dict }}
+{{ if isset $requestBody "$ref" }}
+  {{ $refPath := index $requestBody "$ref" }}
+  {{ $refParts := split $refPath "/" }}
+  {{ if ge (len $refParts) 4 }}
+    {{ $rbName := index $refParts 3 }}
+    {{ with index $spec.components.requestBodies $rbName }}
+      {{ $requestBody = . }}
+    {{ end }}
+  {{ end }}
+{{ end }}
+
+{{/* --- Determine content type and body --- */}}
+{{ $contentType := "" }}
+{{ $bodyFlag := "" }}
+{{ $rbContent := $requestBody.content | default dict }}
+{{ if gt (len $rbContent) 0 }}
+  {{/* Get first content type key */}}
+  {{ range $ct, $_ := $rbContent }}
+    {{ if not $contentType }}
+      {{ $contentType = $ct }}
+    {{ end }}
+  {{ end }}
+
+  {{ $mediaType := index $rbContent $contentType | default dict }}
+
+  {{ if hasPrefix $contentType "text/plain" }}
+    {{/* Line protocol — use first example value or a default sample */}}
+    {{ $lpSample := "measurement,tag=value field=1.0" }}
+    {{ with $mediaType.examples }}
+      {{ range $_, $ex := . }}
+        {{ if not $bodyFlag }}
+          {{ $lpSample = $ex.value | string }}
+          {{/* Take only the first line for single-line display */}}
+          {{ $lines := split $lpSample "\n" }}
+          {{ $lpSample = index $lines 0 }}
+        {{ end }}
+      {{ end }}
+    {{ end }}
+    {{ $bodyFlag = printf "--data-raw '%s'" $lpSample }}
+  {{ else if hasPrefix $contentType "application/json" }}
+    {{/* JSON — use schema.example, or build from properties */}}
+    {{ with $mediaType.schema }}
+      {{/* Resolve schema $ref */}}
+      {{ $schema := . }}
+      {{ if isset . "$ref" }}
+        {{ $refPath := index . "$ref" }}
+        {{ $refParts := split $refPath "/" }}
+        {{ if ge (len $refParts) 4 }}
+          {{ $schemaName := index $refParts 3 }}
+          {{ with index $spec.components.schemas $schemaName }}
+            {{ $schema = . }}
+          {{ end }}
+        {{ end }}
+      {{ end }}
+      {{ if $schema.example }}
+        {{ $bodyFlag = printf "--data-raw '%s'" (jsonify $schema.example) }}
+      {{ else if $schema.properties }}
+        {{/* Build example JSON from schema properties */}}
+        {{ $bodyObj := dict }}
+        {{ $requiredList := $schema.required | default slice }}
+        {{ range $propName, $propDef := $schema.properties }}
+          {{/* Resolve property $ref */}}
+          {{ $prop := $propDef }}
+          {{ if isset $propDef "$ref" }}
+            {{ $pRefPath := index $propDef "$ref" }}
+            {{ $pRefParts := split $pRefPath "/" }}
+            {{ if ge (len $pRefParts) 4 }}
+              {{ $pSchemaName := index $pRefParts 3 }}
+              {{ with index $spec.components.schemas $pSchemaName }}
+                {{ $prop = . }}
+              {{ end }}
+            {{ end }}
+          {{ end }}
+          {{/* Use example → default → enum[0] → type placeholder */}}
+          {{ $val := "" }}
+          {{ if ne $prop.example nil }}
+            {{ $val = $prop.example }}
+          {{ else if ne $prop.default nil }}
+            {{ $val = $prop.default }}
+          {{ else if $prop.enum }}
+            {{ $val = index $prop.enum 0 }}
+          {{ else if eq $prop.type "string" }}
+            {{ $val = printf "%s" ($propName | upper) }}
+          {{ else if eq $prop.type "integer" }}
+            {{ $val = 0 }}
+          {{ else if eq $prop.type "number" }}
+            {{ $val = 0 }}
+          {{ else if eq $prop.type "boolean" }}
+            {{ $val = false }}
+          {{ else if eq $prop.type "array" }}
+            {{ if $prop.items }}
+              {{ if eq $prop.items.type "string" }}
+                {{ $val = slice "example" }}
+              {{ else }}
+                {{ $val = slice }}
+              {{ end }}
+            {{ else }}
+              {{ $val = slice }}
+            {{ end }}
+          {{ else if eq $prop.type "object" }}
+            {{ $val = dict }}
+          {{ else }}
+            {{ $val = printf "%s" ($propName | upper) }}
+          {{ end }}
+          {{ $bodyObj = merge $bodyObj (dict $propName $val) }}
+        {{ end }}
+        {{ $bodyFlag = printf "--data-raw '%s'" (jsonify (dict "indent" "  ") $bodyObj) }}
+      {{ end }}
+    {{ end }}
+  {{ end }}
+{{ end }}
+
+{{/* --- Assemble curl command --- */}}
+{{ $lines := slice }}
+{{ $lines = $lines | append (printf "curl --request %s \\" $method) }}
+{{ $lines = $lines | append (printf "  \"%s\" \\" $fullUrl) }}
+{{ $lines = $lines | append "  --header \"Authorization: Bearer INFLUX_TOKEN\" \\" }}
+{{ if $contentType }}
+  {{ $lines = $lines | append (printf "  --header \"Content-Type: %s\" \\" $contentType) }}
+{{ end }}
+{{ if $bodyFlag }}
+  {{/* Last line — no trailing backslash */}}
+  {{ $lines = $lines | append (printf "  %s" $bodyFlag) }}
+{{ else }}
+  {{/* Remove trailing backslash from last header line */}}
+  {{ $lastIdx := sub (len $lines) 1 }}
+  {{ $lastLine := index $lines $lastIdx }}
+  {{ $lastLine = strings.TrimSuffix " \\" $lastLine }}
+  {{ $newLines := slice }}
+  {{ range $i, $line := $lines }}
+    {{ if eq $i $lastIdx }}
+      {{ $newLines = $newLines | append $lastLine }}
+    {{ else }}
+      {{ $newLines = $newLines | append $line }}
+    {{ end }}
+  {{ end }}
+  {{ $lines = $newLines }}
+{{ end }}
+
+{{ $curlCommand := delimit $lines "\n" }}
+
+{{/* --- Build Ask AI query --- */}}
+{{ $aiQuery := printf "Explain this %s %s API request and its response: %s" $method $path ($operation.summary | default "") }}
+
+{{/* --- Render --- */}}
+
+
+ Example request + + Ask AI about this + +
+
+
{{ $curlCommand }}
+
+
diff --git a/layouts/partials/api/normalize-path.html b/layouts/partials/api/normalize-path.html new file mode 100644 index 0000000000..7f3735a64e --- /dev/null +++ b/layouts/partials/api/normalize-path.html @@ -0,0 +1,22 @@ +{{/* + Normalize API path for URL generation + + Transforms an API path to a URL-friendly slug: + 1. Strips leading "/api" prefix (parent directory provides /api/) + 2. Adds v1/ prefix for paths without a version (e.g., /write → v1/write) + 3. Strips leading "/" to avoid double slashes + 4. Removes curly braces from path parameters (e.g., {db} → db) + + Parameters: + - path: The API path (e.g., "/write", "/api/v3/engine/{request_path}") + + Returns: URL-safe path slug (e.g., "v1/write", "v3/engine/request_path") +*/}} +{{ $path := . | strings.TrimPrefix "/api" }} +{{ if not (findRE `^/v\d+/` $path) }} + {{ $path = printf "/v1%s" $path }} +{{ end }} +{{ $path = $path | strings.TrimPrefix "/" }} +{{/* Remove curly braces from path parameters */}} +{{ $path = replaceRE `[{}]` "" $path }} +{{ return $path }} diff --git a/layouts/partials/api/operation.html b/layouts/partials/api/operation.html new file mode 100644 index 0000000000..12fb7881a4 --- /dev/null +++ b/layouts/partials/api/operation.html @@ -0,0 +1,69 @@ +{{/* + Hugo-Native Operation Renderer + + Renders a single API operation with parameters, request body, and responses. + Styled to match docusaurus-openapi aesthetic. + + Params: + - operation: Map with method, path, summary, operationId + - spec: The full OpenAPI spec object + - context: The page context for URL generation +*/}} + +{{ $operation := .operation }} +{{ $spec := .spec }} +{{ $method := lower $operation.method }} +{{ $path := $operation.path }} +{{ $operationId := $operation.operationId }} + +{{/* Find the operation definition in the spec */}} +{{ $pathDef := index $spec.paths $path }} +{{ $opDef := dict }} +{{ if $pathDef }} + {{ $opDef = index $pathDef $method | default dict }} +{{ end }} + +{{/* Generate anchor ID matching Redocly operation/{operationId} format */}} +{{ $anchorId := printf "operation/%s" $operationId }} + +
+ {{/* Operation Header */}} +
+
+ {{ upper $method }} + {{ $path }} +
+

{{ $operation.summary }}

+
+ + {{/* Operation Description */}} + {{ with $opDef.description }} +
+ {{ . | markdownify }} +
+ {{ end }} + + {{/* Parameters Section */}} + {{ $params := $opDef.parameters | default slice }} + {{ if gt (len $params) 0 }} + {{ partial "api/parameters.html" (dict "parameters" $params "spec" $spec) }} + {{ end }} + + {{/* Request Body Section */}} + {{ with $opDef.requestBody }} + {{ partial "api/request-body.html" (dict "requestBody" . "spec" $spec) }} + {{ end }} + + {{/* Code Sample Section */}} + {{ partial "api/code-sample.html" (dict + "opDef" $opDef + "operation" $operation + "spec" $spec + "context" .context + ) }} + + {{/* Responses Section */}} + {{ with $opDef.responses }} + {{ partial "api/responses.html" (dict "responses" . "spec" $spec) }} + {{ end }} +
diff --git a/layouts/partials/api/parameter-row.html b/layouts/partials/api/parameter-row.html new file mode 100644 index 0000000000..5f34125a97 --- /dev/null +++ b/layouts/partials/api/parameter-row.html @@ -0,0 +1,65 @@ +{{/* + Hugo-Native Parameter Row Renderer + + Renders a single parameter with name, type, required badge, and description. + + Params: + - param: Parameter object with name, schema, required, description + - spec: The full OpenAPI spec object for resolving schema $ref +*/}} + +{{ $param := .param }} +{{ $spec := .spec }} + +{{ $name := $param.name }} +{{ $required := $param.required | default false }} +{{ $description := $param.description | default "" }} + +{{/* Resolve schema type */}} +{{ $schema := $param.schema | default dict }} +{{ $type := $schema.type | default "string" }} +{{ $format := $schema.format | default "" }} +{{ $enum := $schema.enum | default slice }} +{{ $default := $schema.default }} + +{{/* Build type display string */}} +{{ $typeDisplay := $type }} +{{ if $format }} + {{ $typeDisplay = printf "%s <%s>" $type $format }} +{{ end }} + +
+
+
+ {{ $name }} + {{ if $required }} + required + {{ end }} + {{ $typeDisplay }} +
+ + {{ if $description }} +
+ {{ $description | markdownify }} +
+ {{ end }} + + {{/* Show enum values if present */}} + {{ if gt (len $enum) 0 }} +
+ Allowed values: + {{ range $i, $val := $enum }} + {{ if $i }}, {{ end }}{{ $val }} + {{ end }} +
+ {{ end }} + + {{/* Show default value if present */}} + {{ if $default }} +
+ Default: + {{ $default }} +
+ {{ end }} +
+
diff --git a/layouts/partials/api/parameters.html b/layouts/partials/api/parameters.html new file mode 100644 index 0000000000..4285feafd2 --- /dev/null +++ b/layouts/partials/api/parameters.html @@ -0,0 +1,85 @@ +{{/* + Hugo-Native Parameters Renderer + + Renders a table of API operation parameters (query, path, header). + Resolves $ref references to component parameters. + + Params: + - parameters: Array of parameter objects + - spec: The full OpenAPI spec object for resolving $ref +*/}} + +{{ $parameters := .parameters }} +{{ $spec := .spec }} + +{{/* Resolve $ref parameters and group by location */}} +{{ $queryParams := slice }} +{{ $pathParams := slice }} +{{ $headerParams := slice }} + +{{ range $parameters }} + {{ $param := . }} + + {{/* Resolve $ref if present */}} + {{ if isset . "$ref" }} + {{ $refPath := index . "$ref" }} + {{/* Parse ref like "#/components/parameters/db" */}} + {{ $refParts := split $refPath "/" }} + {{ if ge (len $refParts) 4 }} + {{ $paramName := index $refParts 3 }} + {{ with index $spec.components.parameters $paramName }} + {{ $param = . }} + {{ end }} + {{ end }} + {{ end }} + + {{/* Group by 'in' location */}} + {{ $location := $param.in | default "query" }} + {{ if eq $location "query" }} + {{ $queryParams = $queryParams | append $param }} + {{ else if eq $location "path" }} + {{ $pathParams = $pathParams | append $param }} + {{ else if eq $location "header" }} + {{ $headerParams = $headerParams | append $param }} + {{ end }} +{{ end }} + +
+

Parameters

+ + {{/* Path Parameters */}} + {{ if gt (len $pathParams) 0 }} +
+
Path parameters
+
+ {{ range $pathParams }} + {{ partial "api/parameter-row.html" (dict "param" . "spec" $spec) }} + {{ end }} +
+
+ {{ end }} + + {{/* Query Parameters */}} + {{ if gt (len $queryParams) 0 }} +
+
Query parameters
+
+ {{ range $queryParams }} + {{ partial "api/parameter-row.html" (dict "param" . "spec" $spec) }} + {{ end }} +
+
+ {{ end }} + + {{/* Header Parameters */}} + {{ if gt (len $headerParams) 0 }} +
+
Header parameters
+
+ {{ range $headerParams }} + {{ partial "api/parameter-row.html" (dict "param" . "spec" $spec) }} + {{ end }} +
+
+ {{ end }} +
diff --git a/layouts/partials/api/renderer.html b/layouts/partials/api/renderer.html new file mode 100644 index 0000000000..d2b32f3088 --- /dev/null +++ b/layouts/partials/api/renderer.html @@ -0,0 +1,12 @@ +{{/* + API Renderer + + Renders API documentation using Hugo-native templates. + This partial is maintained for backward compatibility. + + Required page params: + - staticFilePath: Path to the OpenAPI specification file + - operations: Array of operation metadata +*/}} + +{{ partial "api/tag-renderer.html" . }} diff --git a/layouts/partials/api/request-body.html b/layouts/partials/api/request-body.html new file mode 100644 index 0000000000..9663c8d564 --- /dev/null +++ b/layouts/partials/api/request-body.html @@ -0,0 +1,60 @@ +{{/* + Hugo-Native Request Body Renderer + + Renders the request body section including schema properties. + + Params: + - requestBody: OpenAPI requestBody object + - spec: The full OpenAPI spec object for resolving $ref +*/}} + +{{ $requestBody := .requestBody }} +{{ $spec := .spec }} + +{{ $required := $requestBody.required | default false }} +{{ $description := $requestBody.description | default "" }} + +{{/* Get content schema - typically application/json */}} +{{ $content := $requestBody.content | default dict }} +{{ $jsonContent := index $content "application/json" | default dict }} +{{ $schema := $jsonContent.schema | default dict }} + +{{/* Resolve $ref if present */}} +{{ $resolvedSchema := $schema }} +{{ if isset $schema "$ref" }} + {{ $refPath := index $schema "$ref" }} + {{/* Parse ref like "#/components/schemas/DistinctCacheCreateRequest" */}} + {{ $refParts := split $refPath "/" }} + {{ if ge (len $refParts) 4 }} + {{ $schemaName := index $refParts 3 }} + {{ with index $spec.components.schemas $schemaName }} + {{ $resolvedSchema = . }} + {{ end }} + {{ end }} +{{ end }} + +
+

+ Request body + {{ if $required }} + required + {{ end }} +

+ + {{ if $description }} +
+ {{ $description | markdownify }} +
+ {{ end }} + + {{/* Content type indicator */}} +
+ Content-Type: + application/json +
+ + {{/* Render schema properties */}} + {{ with $resolvedSchema }} + {{ partial "api/schema.html" (dict "schema" . "spec" $spec "level" 0) }} + {{ end }} +
diff --git a/layouts/partials/api/responses.html b/layouts/partials/api/responses.html new file mode 100644 index 0000000000..3973a431d6 --- /dev/null +++ b/layouts/partials/api/responses.html @@ -0,0 +1,79 @@ +{{/* + Hugo-Native Responses Renderer + + Renders the responses section for an API operation. + Shows status codes, descriptions, and response schemas. + + Params: + - responses: Map of status codes to response objects + - spec: The full OpenAPI spec object for resolving $ref +*/}} + +{{ $responses := .responses }} +{{ $spec := .spec }} + +
+

Responses

+ +
+ {{ range $statusCode, $response := $responses }} + {{/* Resolve $ref if present */}} + {{ $resolvedResponse := $response }} + {{ if isset $response "$ref" }} + {{ $refPath := index $response "$ref" }} + {{ $refParts := split $refPath "/" }} + {{ if ge (len $refParts) 4 }} + {{ $responseName := index $refParts 3 }} + {{ with index $spec.components.responses $responseName }} + {{ $resolvedResponse = . }} + {{ end }} + {{ end }} + {{ end }} + + {{ $description := $resolvedResponse.description | default "" }} + {{ $content := $resolvedResponse.content | default dict }} + + {{/* Determine status category for styling */}} + {{ $statusCategory := "info" }} + {{ if hasPrefix $statusCode "2" }} + {{ $statusCategory = "success" }} + {{ else if hasPrefix $statusCode "3" }} + {{ $statusCategory = "redirect" }} + {{ else if hasPrefix $statusCode "4" }} + {{ $statusCategory = "client-error" }} + {{ else if hasPrefix $statusCode "5" }} + {{ $statusCategory = "server-error" }} + {{ end }} + +
+
+ {{ $statusCode }} + {{ $description }} +
+ + {{/* Response body schema if present */}} + {{ with $content }} + {{ $jsonContent := index . "application/json" | default dict }} + {{ with $jsonContent.schema }} + {{/* Resolve schema $ref if present */}} + {{ $resolvedSchema := . }} + {{ if isset . "$ref" }} + {{ $refPath := index . "$ref" }} + {{ $refParts := split $refPath "/" }} + {{ if ge (len $refParts) 4 }} + {{ $schemaName := index $refParts 3 }} + {{ with index $spec.components.schemas $schemaName }} + {{ $resolvedSchema = . }} + {{ end }} + {{ end }} + {{ end }} + +
+ {{ partial "api/schema.html" (dict "schema" $resolvedSchema "spec" $spec "level" 0) }} +
+ {{ end }} + {{ end }} +
+ {{ end }} +
+
diff --git a/layouts/partials/api/schema.html b/layouts/partials/api/schema.html new file mode 100644 index 0000000000..29d1eb704a --- /dev/null +++ b/layouts/partials/api/schema.html @@ -0,0 +1,117 @@ +{{/* + Hugo-Native Schema Renderer + + Renders a JSON schema as a property table with nested object support. + Similar to docusaurus-openapi's schema tables. + + Params: + - schema: OpenAPI schema object + - spec: The full OpenAPI spec object for resolving $ref + - level: Nesting level (0 = root) +*/}} + +{{ $schema := .schema }} +{{ $spec := .spec }} +{{ $level := .level | default 0 }} + +{{ $type := $schema.type | default "object" }} +{{ $properties := $schema.properties | default dict }} +{{ $required := $schema.required | default slice }} +{{ $example := $schema.example }} + +{{/* Convert required slice to map for easy lookup */}} +{{ $requiredMap := dict }} +{{ range $required }} + {{ $requiredMap = merge $requiredMap (dict . true) }} +{{ end }} + +
+ {{ if gt (len $properties) 0 }} +
+ {{ range $propName, $propSchema := $properties }} + {{ $isRequired := index $requiredMap $propName | default false }} + {{ $propType := $propSchema.type | default "string" }} + {{ $propDescription := $propSchema.description | default "" }} + {{ $propFormat := $propSchema.format | default "" }} + {{ $propEnum := $propSchema.enum | default slice }} + {{ $propDefault := $propSchema.default }} + {{ $propExample := $propSchema.example }} + + {{/* Build type display */}} + {{ $typeDisplay := $propType }} + {{ if eq $propType "array" }} + {{ $itemsType := "object" }} + {{ with $propSchema.items }} + {{ $itemsType = .type | default "object" }} + {{ end }} + {{ $typeDisplay = printf "%s[]" $itemsType }} + {{ else if $propFormat }} + {{ $typeDisplay = printf "%s <%s>" $propType $propFormat }} + {{ end }} + +
+
+ {{ $propName }} + {{ if $isRequired }} + required + {{ end }} + {{ $typeDisplay }} +
+ + {{ if $propDescription }} +
+ {{ $propDescription | markdownify }} +
+ {{ end }} + + {{/* Enum values */}} + {{ if gt (len $propEnum) 0 }} +
+ Allowed: + {{ range $i, $val := $propEnum }} + {{ if $i }}, {{ end }}{{ $val }} + {{ end }} +
+ {{ end }} + + {{/* Default value */}} + {{ if $propDefault }} +
+ Default: + {{ $propDefault }} +
+ {{ end }} + + {{/* Example value */}} + {{ if $propExample }} +
+ Example: + {{ jsonify $propExample }} +
+ {{ end }} + + {{/* Nested object/array rendering (limit depth to prevent infinite loops) */}} + {{ if and (eq $propType "object") (lt $level 2) }} + {{ with $propSchema.properties }} + {{ partial "api/schema.html" (dict "schema" $propSchema "spec" $spec "level" (add $level 1)) }} + {{ end }} + {{ else if and (eq $propType "array") (lt $level 2) }} + {{ with $propSchema.items }} + {{ if isset . "properties" }} + {{ partial "api/schema.html" (dict "schema" . "spec" $spec "level" (add $level 1)) }} + {{ end }} + {{ end }} + {{ end }} +
+ {{ end }} +
+ {{ end }} + + {{/* Show example at schema level */}} + {{ if and $example (eq $level 0) }} +
+ Example request body +
{{ jsonify (dict "indent" "  ") $example }}
+
+ {{ end }} +
diff --git a/layouts/partials/api/section-children.html b/layouts/partials/api/section-children.html new file mode 100644 index 0000000000..c217ce5b5f --- /dev/null +++ b/layouts/partials/api/section-children.html @@ -0,0 +1,112 @@ +{{/* + API Section Children + + Renders tag pages from article data as a children list. + Sort order: conceptual tags (traitTags) first, then other tags alphabetically. + + Uses frontmatter params from generated pages: + - articleDataKey: product data key (e.g., 'influxdb3-core') + - articleSection: section slug (e.g., 'api' or 'management-api') + + Data path: data/article_data/influxdb/{articleDataKey}/{articleSection}/articles.yml +*/}} +{{ $currentPage := . }} + +{{/* Read data key and section from frontmatter */}} +{{ $dataKey := .Params.articleDataKey | default "" }} +{{ $section := .Params.articleSection | default "" }} + +{{/* Get article data using frontmatter-driven lookup */}} +{{ $articles := slice }} +{{ if and $dataKey $section }} + {{ with site.Data.article_data }} + {{ with index . "influxdb" }} + {{ with index . $dataKey }} + {{ with index . $section }} + {{ with index . "articles" }} + {{ with .articles }} + {{ $articles = . }} + {{ end }} + {{ end }} + {{ end }} + {{ end }} + {{ end }} + {{ end }} +{{ end }} + +{{/* Separate conceptual (traitTag) and non-conceptual articles */}} +{{ $conceptualArticles := slice }} +{{ $operationArticles := slice }} + +{{ range $articles }} + {{ if and (reflect.IsMap .) (isset . "fields") }} + {{ $fields := index . "fields" }} + {{ if reflect.IsMap $fields }} + {{ $isConceptual := false }} + {{ if isset $fields "isConceptual" }} + {{ $isConceptual = index $fields "isConceptual" }} + {{ end }} + {{ if $isConceptual }} + {{ $conceptualArticles = $conceptualArticles | append . }} + {{ else }} + {{ $operationArticles = $operationArticles | append . }} + {{ end }} + {{ end }} + {{ end }} +{{ end }} + +{{/* Sort each group by weight (default 100), then alphabetically by tag name */}} +{{ $conceptualArticles = sort $conceptualArticles "fields.weight" }} +{{ $operationArticles = sort $operationArticles "fields.weight" }} + +{{/* Combine: conceptual first, then operations */}} +{{ $sortedArticles := $conceptualArticles | append $operationArticles }} + +{{/* Also include static API pages (HTML files) not in article data */}} +{{/* These are compatibility API pages like v1-compatibility, v2, management */}} +{{ $staticApiPages := slice }} +{{ range $currentPage.Pages }} + {{/* Skip pages that are in article data (have tag param) or are all-endpoints */}} + {{ if and (not (isset .Params "tag")) (not .Params.isAllEndpoints) }} + {{ $staticApiPages = $staticApiPages | append . }} + {{ end }} +{{ end }} +{{ $staticApiPages = sort $staticApiPages "Weight" }} + + diff --git a/layouts/partials/api/security-schemes.html b/layouts/partials/api/security-schemes.html new file mode 100644 index 0000000000..87210f3a33 --- /dev/null +++ b/layouts/partials/api/security-schemes.html @@ -0,0 +1,68 @@ +{{/* + Security Schemes Display + + Renders OpenAPI security schemes as human-readable documentation. + Extracts securitySchemes from the referenced OpenAPI spec file. + + Design principles: + - Each scheme gets an h2 heading (appears in "On this page" TOC) + - Focus on human-readable descriptions, not OpenAPI schema details + - Technical details (type, scheme, in) are omitted - they're for machines + - Descriptions should include usage examples + + Required page params: + - staticFilePath: Path to the OpenAPI specification file +*/}} + +{{ $specPath := .Params.staticFilePath }} +{{ if $specPath }} + {{/* Load the OpenAPI spec file from static directory */}} + {{ $fullPath := printf "static%s" $specPath }} + {{ $specContent := readFile $fullPath }} + {{ if $specContent }} + {{ $spec := transform.Unmarshal $specContent }} + {{ with $spec.components.securitySchemes }} +
+ {{ range $name, $scheme := . }} +
+ {{/* Human-friendly title from scheme name */}} + {{ $title := $name }} + {{/* Convert common scheme names to readable titles */}} + {{/* Short names (v1 specs) */}} + {{ if eq $name "BasicAuth" }}{{ $title = "Basic Authentication" }}{{ end }} + {{ if eq $name "TokenAuth" }}{{ $title = "Token Authentication" }}{{ end }} + {{ if eq $name "QueryAuth" }}{{ $title = "Query String Authentication" }}{{ end }} + {{ if eq $name "BearerAuth" }}{{ $title = "Bearer Token Authentication" }}{{ end }} + {{ if eq $name "ApiKeyAuth" }}{{ $title = "API Key Authentication" }}{{ end }} + {{/* Long names (v2+ specs) */}} + {{ if eq $name "BasicAuthentication" }}{{ $title = "Basic Authentication" }}{{ end }} + {{ if eq $name "TokenAuthentication" }}{{ $title = "Token Authentication" }}{{ end }} + {{ if eq $name "QuerystringAuthentication" }}{{ $title = "Query String Authentication" }}{{ end }} + +

{{ $title }}

+ + {{/* Description is the primary content - should include usage examples */}} + {{ with $scheme.description }} +
+ {{ . | markdownify }} +
+ {{ else }} + {{/* Fallback descriptions when OpenAPI spec doesn't provide one */}} +
+ {{ if eq $scheme.type "http" }} + {{ if eq $scheme.scheme "basic" }} +

Use HTTP Basic Authentication by including your credentials in the request.

+ {{ else if eq $scheme.scheme "bearer" }} +

Include a bearer token in the Authorization header.

+ {{ end }} + {{ else if eq $scheme.type "apiKey" }} +

Pass your API key {{ if eq $scheme.in "header" }}in the {{ $scheme.name }} header{{ else if eq $scheme.in "query" }}as the {{ $scheme.name }} query parameter{{ end }}.

+ {{ end }} +
+ {{ end }} +
+ {{ end }} +
+ {{ end }} + {{ end }} +{{ end }} diff --git a/layouts/partials/api/tag-renderer.html b/layouts/partials/api/tag-renderer.html new file mode 100644 index 0000000000..828b46bf1e --- /dev/null +++ b/layouts/partials/api/tag-renderer.html @@ -0,0 +1,68 @@ +{{/* + Tag Page Renderer + + Renders all operations for a tag page using Hugo templates. + Parses the OpenAPI spec file and renders each operation natively. + + Required page params: + - staticFilePath: Path to the OpenAPI specification file (YAML) + - operations: Array of operation metadata from frontmatter + + Usage: + {{ partial "api/tag-renderer.html" . }} +*/}} + +{{ $page := . }} +{{ $specPath := .Params.staticFilePath }} +{{ $operations := .Params.operations | default slice }} + +{{/* Load and parse the OpenAPI spec from static/ directory */}} +{{ $spec := dict }} +{{ if $specPath }} + {{/* Build path to static file (staticFilePath has leading slash, e.g. /openapi/...) */}} + {{ $staticFile := printf "static%s" $specPath }} + + {{/* Use os.ReadFile (Hugo 0.121+) to read from static directory */}} + {{ with os.ReadFile $staticFile }} + {{ $spec = . | transform.Unmarshal }} + {{ else }} + {{/* Fallback: try unmounted resources (for assets mount configuration) */}} + {{ $cleanPath := strings.TrimPrefix "/" $specPath }} + {{ with resources.Get $cleanPath }} + {{ $spec = .Content | transform.Unmarshal }} + {{ end }} + {{ end }} +{{ end }} + +{{/* Tag description from spec */}} +{{ $tagDescription := "" }} +{{ $tagName := .Params.tag | default "" }} +{{ range $spec.tags }} + {{ if eq .name $tagName }} + {{ $tagDescription = .description | default "" }} + {{ end }} +{{ end }} + +
+ {{/* Tag Overview/Description */}} + {{ if $tagDescription }} +
+
+ {{ $tagDescription | markdownify }} +
+
+ {{ end }} + + {{/* Operations List */}} +
+ {{ range $operations }} + {{ partial "api/operation.html" (dict + "operation" . + "spec" $spec + "context" $page + ) }} + {{ end }} +
+ + {{/* Related links rendered via frontmatter + article/related.html */}} +
diff --git a/layouts/partials/article/related.html b/layouts/partials/article/related.html index 184b7f6dd3..41becf3493 100644 --- a/layouts/partials/article/related.html +++ b/layouts/partials/article/related.html @@ -1,32 +1,25 @@ -{{ $scratch := newScratch }} {{ if .Params.related }}