diff --git a/.ci/vale/styles/InfluxDataDocs/Acronyms.yml b/.ci/vale/styles/InfluxDataDocs/Acronyms.yml index aced699965..4b007968ea 100644 --- a/.ci/vale/styles/InfluxDataDocs/Acronyms.yml +++ b/.ci/vale/styles/InfluxDataDocs/Acronyms.yml @@ -49,106 +49,6 @@ exceptions: - SCSS - SDK - SQL - # SQL/InfluxQL keywords (3-5 uppercase chars trigger this rule) - # These are standard uppercase query clauses, not acronyms. - - ABS - - ADD - - ALL - - AND - - ANY - - ARE - - ASC - - AVG - - BIT - - CASE - - CAST - - COS - - DAY - - DEC - - DESC - - DROP - - ELSE - - END - - EXEC - - EXP - - FOR - - FROM - - FULL - - GRANT - - GROUP - - INNER - - INPUT - - INT - - INTO - - JOIN - - KEY - - LAST - - LEFT - - LEVEL - - LIKE - - LOG - - LOWER - - MATCH - - MAX - - MIN - - MONTH - - NAME - - NAMES - - NOT - - NOW - - "NULL" - - "ON" - - ONLY - - OPEN - - ORDER - - OUTER - - PAD - - POW - - PRIOR - - READ - - REAL - - RIGHT - - ROWS - - SET - - SIN - - SIZE - - SOME - - SPACE - - SUM - - TABLE - - TAN - - THEN - - TIME - - TOP - - TRIM - - "TRUE" - - UNION - - UPPER - - USAGE - - USING - - VALUE - - VIEW - - WHEN - - WHERE - - WITH - - WORK - - WRITE - - YEAR - - ZONE - # InfluxQL-specific - - COUNT - - FILL - - FIRST - - FLOAT - - MEAN - - BEGIN - - ALTER - - CHECK - - CLOSE - - CROSS - - FETCH - - LIMIT - - SHOW - SSH - SSL - SVG diff --git a/.claude/commands/finish.md b/.claude/commands/finish.md index 9c8f3f3bad..0ca864f86b 100644 --- a/.claude/commands/finish.md +++ b/.claude/commands/finish.md @@ -6,7 +6,7 @@ Complete development work by cleaning up ephemeral documents and preparing for m This skill handles the end of a development workflow: 1. Reads the full contents of PLAN.md -2. Posts the complete plan details as a PR comment +2. Adds the complete plan details to the PR description (or as a PR comment if the description is already long) 3. Removes ephemeral planning documents 4. Creates a cleanup commit 5. Optionally merges the PR @@ -23,7 +23,7 @@ This skill handles the end of a development workflow: - `--merge`: After cleanup, merge the PR using `gh pr merge --squash` - `--dry-run`: Preview actions without executing them -- `--no-comment`: Skip posting plan details as PR comment +- `--no-pr-update`: Skip updating PR description ## Steps @@ -59,12 +59,42 @@ Read the entire PLAN.md file and preserve all sections: PLAN_CONTENTS="$(cat PLAN.md)" ``` -### 3. Post Plan Details as PR Comment +### 3. Add Plan Details to PR -Post the **complete** PLAN.md contents as a PR comment. Do not summarize or -abbreviate the plan -- include all details so they are preserved after the file -is deleted. Always use a comment (not the PR description) to keep the -description clean and editable. +Add the **complete** PLAN.md contents to the PR. Do not summarize or abbreviate +the plan -- include all details so they are preserved after the file is deleted. + +**Strategy:** +- If the PR description is short (under ~2000 characters), append the full plan + to the PR description. +- If the PR description is already long, post the full plan as a PR comment + instead, to keep the description readable. + +**Appending to PR description:** + +```bash +# Get current PR body +CURRENT_BODY="$(gh pr view --json body -q '.body')" + +# Update PR with full plan details appended +gh pr edit --body "$(cat < +Full plan details (from PLAN.md) + +${PLAN_CONTENTS} + + +EOF +)" +``` + +**Posting as PR comment (if description is already long):** ```bash gh pr comment --body "$(cat <` block +- If the PR description is already long, posts the full plan as a PR comment instead - Squash merge is recommended to keep main branch clean - The deleted PLAN.md remains in branch history (recoverable if needed) - Works with GitHub Actions cleanup as a fallback safety net -- Use `--no-comment` to skip posting plan details to the PR +- Use `--no-pr-update` if you want to write the PR description manually diff --git a/.claude/launch.json b/.claude/launch.json deleted file mode 100644 index a9b83d1202..0000000000 --- a/.claude/launch.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "version": "0.0.1", - "configurations": [ - { - "name": "docs-dev", - "runtimeExecutable": "npx", - "runtimeArgs": ["hugo", "server"], - "port": 1313 - }, - { - "name": "docs-test", - "runtimeExecutable": "npx", - "runtimeArgs": ["hugo", "server", "--environment", "testing", "--port", "1315", "--noHTTPCache"], - "port": 1315 - } - ] -} diff --git a/.claude/settings.json b/.claude/settings.json index 4efc2a0d29..2f5aae0075 100644 --- a/.claude/settings.json +++ b/.claude/settings.json @@ -107,13 +107,25 @@ ] } ], + "WorktreeCreate": [ + { + "hooks": [ + { + "type": "command", + "command": "CYPRESS_INSTALL_BINARY=0 yarn install --frozen-lockfile 2>&1 | tail -3", + "timeout": 120, + "statusMessage": "Installing dependencies in new worktree" + } + ] + } + ], "SessionStart": [ { "hooks": [ { "type": "command", - "command": "[ ! -d node_modules ] && CYPRESS_INSTALL_BINARY=0 PUPPETEER_SKIP_DOWNLOAD=1 PLAYWRIGHT_SKIP_BROWSER_DOWNLOAD=1 yarn install --frozen-lockfile 2>&1 | tail -3 || true", - "timeout": 300, + "command": "[ ! -d node_modules ] && CYPRESS_INSTALL_BINARY=0 yarn install --frozen-lockfile 2>&1 | tail -3 || true", + "timeout": 120, "async": true, "statusMessage": "Checking dependencies" } diff --git a/.claude/skills/vale-linting/SKILL.md b/.claude/skills/vale-linting/SKILL.md index 3f061227aa..760487d522 100644 --- a/.claude/skills/vale-linting/SKILL.md +++ b/.claude/skills/vale-linting/SKILL.md @@ -37,24 +37,19 @@ This skill covers the complete Vale linting workflow for InfluxData documentatio ## Part 1: How Vale Runs -### Execution via `.ci/vale/vale.sh` +### Docker-Based Execution -The wrapper script `.ci/vale/vale.sh` runs Vale using: - -1. **Local binary** (preferred) — if `vale` is installed and version >= 3.x -2. **Docker fallback** — `jdkato/vale:v${VALE_VERSION}` (pinned version) +Vale runs inside a Docker container via `.ci/vale/vale.sh`: ```bash -# The wrapper handles binary vs Docker automatically -.ci/vale/vale.sh --config=.vale.ini content/path/ - -# In CI, the pr-vale-check.yml workflow installs the Vale binary -# directly (reads version from vale.sh), so Docker is not needed. +docker run \ + --mount type=bind,src=$(pwd),dst=/workdir \ + -w /workdir \ + jdkato/vale:latest \ + "$@" ``` -**Critical limitation:** Only files inside the repository are accessible when using Docker fallback. Files in `/tmp` or other external paths will silently fail. - -**macOS note:** The CI script `.github/scripts/vale-check.sh` uses `declare -A` (associative arrays) which requires bash 4+. macOS ships bash 3.2. Use `/opt/homebrew/bin/bash` or run tests in CI instead. +**Critical limitation:** Only files inside the repository are accessible. Files in `/tmp` or other external paths will silently fail (Vale falls back to stdin). ### Configuration Files @@ -82,36 +77,23 @@ BasedOnStyles = Vale, InfluxDataDocs, Google, write-good ### Disabled Rules (and Why) -Rules are disabled in two categories across `.vale.ini` and all product configs: - -**Mechanical rules disabled** (replaced by custom equivalents or incompatible with InfluxDB syntax): - -| Rule | Reason | -|------|--------| -| `Google.Acronyms` | Custom `InfluxDataDocs.Acronyms` handles this | -| `Google.DateFormat` | Custom `InfluxDataDocs.DateFormat` handles this | -| `Google.Ellipses` | Custom `InfluxDataDocs.Ellipses` handles this | -| `Google.Headings` | Too strict for technical doc headings | -| `Google.WordList` | Custom `InfluxDataDocs.WordList` handles this | -| `Google.Units` | Flags InfluxDB duration literals (30d, 24h); custom `InfluxDataDocs.Units` checks byte units only | -| `Vale.Spelling` | Custom `InfluxDataDocs.Spelling` handles this | -| `Vale.Terms` | False positives from URLs, file paths, and code | - -**Style rules disabled** (high false-positive rate in technical docs): - -| Rule | Reason | -|------|--------| -| `Google.Contractions` | Not relevant to InfluxData style | -| `Google.FirstPerson` | Tutorials use "I" intentionally | -| `Google.Passive` | Technical docs use passive voice legitimately | -| `Google.We` | "We recommend" is standard in docs | -| `Google.Will` | Future tense is standard in docs | -| `write-good.Cliches` | High false positive rate | -| `write-good.Passive` | Duplicate of Google.Passive concern | -| `write-good.So` | Starting with "So" is fine | -| `write-good.ThereIs` | Often the clearest phrasing | -| `write-good.TooWordy` | Flags legitimate terms: aggregate, expiration, multiple | -| `write-good.Weasel` | Context-dependent, better handled during content review | +The following rules are disabled in `.vale.ini` for specific reasons: + +```ini +# Vocabulary-based substitution creates false positives in URLs/paths +# Example: /api/v3/write flagged because "api" should be "APIs" +Vale.Terms = NO + +# Google.Units flags InfluxDB duration literals (30d, 24h) as needing spaces +# We use custom InfluxDataDocs.Units that only checks byte units +Google.Units = NO + +# Flags legitimate technical terms: aggregate, expiration, However, multiple +write-good.TooWordy = NO + +# Using custom InfluxDataDocs.Spelling instead +Vale.Spelling = NO +``` ### Active Custom Rules @@ -264,19 +246,7 @@ node -e "require('js-yaml').load(require('fs').readFileSync('path/to/rule.yml')) .ci/vale/vale.sh --config=.vale.ini --minAlertLevel=suggestion path/to/file.md ``` -## Part 6: Vale Cannot Inspect URLs - -`TokenIgnores` in `.vale.ini` strips all URLs before any rules run: - -```ini -TokenIgnores = https?://[^\s\)\]>"]+ -``` - -**This means no Vale rule can match URL content.** An earlier attempt to create a `SupportLink.yml` rule to validate `support.influxdata.com` URL patterns failed for this reason — the URLs were stripped before the rule could see them. Support URL validation uses a separate shell script (`.ci/scripts/check-support-links.sh`) instead. - -Keep this in mind when designing rules: if the pattern to match is inside a URL, use a shell script or pre-commit hook, not a Vale rule. - -## Part 7: TokenIgnores vs Rule Filters +## Part 6: TokenIgnores vs Rule Filters ### TokenIgnores (in .vale.ini) @@ -346,34 +316,27 @@ Vocab = InfluxDataDocs Packages = Google, write-good, Hugo [*.md] -BasedOnStyles = Vale, InfluxDataDocs, Cloud-Dedicated, Google, write-good +BasedOnStyles = Vale, InfluxDataDocs, Google, write-good -# --- Disabled mechanical rules --- +# These rules must be disabled in every product .vale.ini, same as the root .vale.ini. Google.Acronyms = NO Google.DateFormat = NO Google.Ellipses = NO Google.Headings = NO Google.WordList = NO +# Disable Google.Units in favor of InfluxDataDocs.Units which only checks byte +# units (GB, TB, etc). Duration literals (30d, 24h, 1h) are valid InfluxDB syntax. Google.Units = NO Vale.Spelling = NO Vale.Terms = NO - -# --- Disabled style rules (high false-positive rate in technical docs) --- -Google.Contractions = NO -Google.FirstPerson = NO -Google.Passive = NO -Google.We = NO -Google.Will = NO -write-good.Cliches = NO -write-good.Passive = NO -write-good.So = NO -write-good.ThereIs = NO write-good.TooWordy = NO -write-good.Weasel = NO TokenIgnores = /[a-zA-Z0-9/_\-\.]+, \ https?://[^\s\)\]>"]+, \ `[^`]+` + +# Product-specific overrides +InfluxDataDocs.Branding = YES EOF # 2. Run Vale with product config @@ -410,15 +373,10 @@ grep TokenIgnores .vale.ini ## Related Files -| File | Purpose | -|------|---------| -| `.vale.ini` | Main configuration | -| `.vale-instructions.ini` | Config for non-content files (READMEs, AGENTS.md, etc.) | -| `.ci/vale/vale.sh` | Vale wrapper (local binary or Docker fallback) | -| `.ci/vale/styles/` | All Vale style rules | -| `.ci/scripts/check-support-links.sh` | Support URL validation (can't use Vale — see Part 6) | -| `.github/scripts/vale-check.sh` | CI script: groups files by product config, runs Vale | -| `.github/scripts/resolve-shared-content.sh` | CI script: resolves `content/shared/*` to product pages | -| `.github/workflows/pr-vale-check.yml` | CI workflow: runs Vale on PR changes | -| `lefthook.yml` | Pre-commit hooks that run Vale | -| `DOCS-TESTING.md` | Testing documentation (includes Vale CI section) | +| File | Purpose | +| ------------------ | --------------------------------------------- | +| `.vale.ini` | Main configuration | +| `.ci/vale/vale.sh` | Docker wrapper script | +| `.ci/vale/styles/` | All Vale style rules | +| `lefthook.yml` | Pre-commit hooks that run Vale | +| `DOCS-TESTING.md` | Testing documentation (includes Vale section) | diff --git a/.claude/skills/vale-rule-config/SKILL.md b/.claude/skills/vale-rule-config/SKILL.md index 3802357f8b..0ef5c04db3 100644 --- a/.claude/skills/vale-rule-config/SKILL.md +++ b/.claude/skills/vale-rule-config/SKILL.md @@ -152,12 +152,6 @@ tokens: - '(?<=\s)Internet(?! Service Provider| Protocol)' ``` -### Critical Limitation: Vale Cannot Match URLs - -`TokenIgnores` in `.vale.ini` strips all URLs before rules run. **No rule — `existence`, `substitution`, or `raw` — can match content inside a URL.** This applies globally and cannot be overridden per-rule. - -For URL pattern validation (e.g., enforcing canonical support URLs), use a shell script or pre-commit hook instead of a Vale rule. See `.ci/scripts/check-support-links.sh` for an example. - ### tokens vs raw **tokens:** @@ -265,7 +259,20 @@ BasedOnStyles = Google, InfluxDataDocs ### Product-Specific Config -Product configs must mirror all disabled rules from root `.vale.ini` (rules disabled in root are NOT inherited). See the `vale-linting` skill for a complete product config example with all disabled rules. +Example: `content/influxdb/cloud-dedicated/.vale.ini` + +```ini +StylesPath = .ci/vale/styles +MinAlertLevel = error +Vocab = Cloud-Dedicated + +[*.md] +BasedOnStyles = Google, InfluxDataDocs, Cloud-Dedicated + +# Disable specific rules for this product +Google.Headings = NO +InfluxDataDocs.TechnicalTerms = NO +``` ### Rule Configuration diff --git a/.github/scripts/vale-check.sh b/.github/scripts/vale-check.sh index 5bcc31f023..d24f3b611f 100755 --- a/.github/scripts/vale-check.sh +++ b/.github/scripts/vale-check.sh @@ -15,9 +15,6 @@ set -euo pipefail -REPO_ROOT="$(cd "$(dirname "$0")/../.." && pwd)" -VALE_WRAPPER="${REPO_ROOT}/.ci/vale/vale.sh" - # Parse arguments FILES=() while [[ $# -gt 0 ]]; do @@ -54,8 +51,6 @@ get_vale_config() { case "$file" in content/influxdb3/cloud-dedicated/*) echo "content/influxdb3/cloud-dedicated/.vale.ini" ;; content/influxdb3/cloud-serverless/*) echo "content/influxdb3/cloud-serverless/.vale.ini" ;; - content/influxdb3/clustered/*) echo "content/influxdb3/clustered/.vale.ini" ;; - content/influxdb3/core/*) echo "content/influxdb3/core/.vale.ini" ;; content/influxdb/v2/*) echo "content/influxdb/v2/.vale.ini" ;; content/*) echo ".vale.ini" ;; *) echo ".vale-instructions.ini" ;; @@ -80,9 +75,11 @@ for config in "${!CONFIG_GROUPS[@]}"; do echo "Running Vale with config: $config (${#file_array[@]} files)" >&2 - # Run Vale via the repo wrapper (.ci/vale/vale.sh), - # which uses a local binary if available or falls back to Docker. - RESULT=$("$VALE_WRAPPER" \ + # Run Vale via Docker + RESULT=$(docker run --rm \ + -v "$(pwd)":/workdir \ + -w /workdir \ + jdkato/vale:latest \ --config="$config" \ --output=JSON \ --minAlertLevel=suggestion \ diff --git a/.github/workflows/audit-documentation.yml b/.github/workflows/audit-documentation.yml index 542c741444..742f31c659 100644 --- a/.github/workflows/audit-documentation.yml +++ b/.github/workflows/audit-documentation.yml @@ -19,10 +19,10 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v6 + - uses: actions/checkout@v4 - name: Set up Node.js - uses: actions/setup-node@v6 + uses: actions/setup-node@v4 with: node-version: '18' cache: 'yarn' @@ -42,7 +42,7 @@ jobs: node ./helper-scripts/influxdb3-monolith/audit-cli-documentation.js core $VERSION - name: Upload audit reports - uses: actions/upload-artifact@v7 + uses: actions/upload-artifact@v4 with: name: cli-audit-3-core-${{ github.event.inputs.version || 'local' }} path: helper-scripts/output/cli-audit/ @@ -53,10 +53,10 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v6 + - uses: actions/checkout@v4 - name: Set up Node.js - uses: actions/setup-node@v6 + uses: actions/setup-node@v4 with: node-version: '18' cache: 'yarn' @@ -76,7 +76,7 @@ jobs: node ./helper-scripts/influxdb3-monolith/audit-cli-documentation.js enterprise $VERSION - name: Upload audit reports - uses: actions/upload-artifact@v7 + uses: actions/upload-artifact@v4 with: name: cli-audit-3-enterprise-${{ github.event.inputs.version || 'local' }} path: helper-scripts/output/cli-audit/ @@ -87,10 +87,10 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v6 + - uses: actions/checkout@v4 - name: Set up Node.js - uses: actions/setup-node@v6 + uses: actions/setup-node@v4 with: node-version: '18' cache: 'yarn' @@ -122,7 +122,7 @@ jobs: EOF - name: Upload audit reports - uses: actions/upload-artifact@v7 + uses: actions/upload-artifact@v4 with: name: cli-audit-3-influxctl-${{ github.event.inputs.version || 'local' }} path: helper-scripts/output/cli-audit/ @@ -133,7 +133,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v6 + - uses: actions/checkout@v4 - name: Run Core API audit run: | @@ -159,7 +159,7 @@ jobs: EOF - name: Upload audit reports - uses: actions/upload-artifact@v7 + uses: actions/upload-artifact@v4 with: name: api-audit-3-core-${{ github.event.inputs.version || 'local' }} path: helper-scripts/output/api-audit/ @@ -170,7 +170,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v6 + - uses: actions/checkout@v4 - name: Run Enterprise API audit run: | @@ -197,7 +197,7 @@ jobs: EOF - name: Upload audit reports - uses: actions/upload-artifact@v7 + uses: actions/upload-artifact@v4 with: name: api-audit-3-enterprise-${{ github.event.inputs.version || 'local' }} path: helper-scripts/output/api-audit/ @@ -208,7 +208,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v6 + - uses: actions/checkout@v4 - name: Run Cloud Dedicated API audit run: | @@ -235,7 +235,7 @@ jobs: EOF - name: Upload audit reports - uses: actions/upload-artifact@v7 + uses: actions/upload-artifact@v4 with: name: api-audit-3-cloud-dedicated-${{ github.event.inputs.version || 'local' }} path: helper-scripts/output/api-audit/ @@ -246,7 +246,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v6 + - uses: actions/checkout@v4 - name: Run Clustered API audit run: | @@ -273,7 +273,7 @@ jobs: EOF - name: Upload audit reports - uses: actions/upload-artifact@v7 + uses: actions/upload-artifact@v4 with: name: api-audit-3-clustered-${{ github.event.inputs.version || 'local' }} path: helper-scripts/output/api-audit/ @@ -284,7 +284,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v6 + - uses: actions/checkout@v4 - name: Run Cloud Serverless API audit run: | @@ -311,7 +311,7 @@ jobs: EOF - name: Upload audit reports - uses: actions/upload-artifact@v7 + uses: actions/upload-artifact@v4 with: name: api-audit-3-cloud-serverless-${{ github.event.inputs.version || 'local' }} path: helper-scripts/output/api-audit/ @@ -333,15 +333,15 @@ jobs: if: always() && (github.event_name == 'schedule' || github.event.inputs.create_issue == 'true') steps: - - uses: actions/checkout@v6 + - uses: actions/checkout@v4 - name: Download all audit reports - uses: actions/download-artifact@v8 + uses: actions/download-artifact@v4 with: path: audit-reports/ - name: Create issues from audit results - uses: actions/github-script@v8 + uses: actions/github-script@v7 with: script: | const fs = require('fs'); @@ -396,10 +396,10 @@ jobs: if: always() steps: - - uses: actions/checkout@v6 + - uses: actions/checkout@v4 - name: Download all artifacts - uses: actions/download-artifact@v8 + uses: actions/download-artifact@v4 with: path: audit-artifacts/ @@ -422,7 +422,7 @@ jobs: done - name: Upload summary - uses: actions/upload-artifact@v7 + uses: actions/upload-artifact@v4 with: name: audit-summary path: summary.md diff --git a/.github/workflows/auto-label.yml b/.github/workflows/auto-label.yml index 4228a62aa6..7dfccb43c2 100644 --- a/.github/workflows/auto-label.yml +++ b/.github/workflows/auto-label.yml @@ -17,94 +17,18 @@ concurrency: cancel-in-progress: true jobs: - # ----------------------------------------------------------------- - # Check if this re-run would create skipped check runs that - # overwrite existing successful status. - # ----------------------------------------------------------------- - check-existing-success: - runs-on: ubuntu-latest - permissions: - checks: read - pull-requests: read - outputs: - should-run: ${{ steps.check.outputs.should-run }} - steps: - - name: Check if re-run would overwrite successful status - id: check - uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 - with: - script: | - const headSha = context.sha; - const runId = context.runId; - const runAttempt = Number(process.env.GITHUB_RUN_ATTEMPT) || 1; - - core.info(`Run info: SHA=${headSha}, runId=${runId}, attempt=${runAttempt}`); - - // First attempts always proceed - if (runAttempt === 1) { - core.info('First attempt - proceeding'); - core.setOutput('should-run', 'true'); - return; - } - - // Check if jobs would actually run or would skip - const pr = context.payload.pull_request; - const isDraft = pr?.draft === true; - const isFork = pr?.head?.repo?.full_name !== pr?.base?.repo?.full_name; - const isWorkflowDispatch = context.eventName === 'workflow_dispatch'; - - const jobsWouldRun = isWorkflowDispatch || (!isDraft && !isFork); - - if (jobsWouldRun) { - core.info('Re-run would execute jobs - proceeding'); - core.setOutput('should-run', 'true'); - return; - } - - // Jobs would be skipped. Check if successful runs already exist. - core.info('Re-run would skip jobs - checking for existing successful runs'); - - try { - const { data: checkRuns } = await github.rest.checks.listForRef({ - owner: context.repo.owner, - repo: context.repo.repo, - ref: headSha, - per_page: 100, - }); - - const successfulOtherRuns = checkRuns.check_runs.filter(cr => { - const isOurJob = cr.name === 'auto-label' || cr.name === 'check-existing-success'; - const isSuccessful = cr.conclusion === 'success'; - const fromDifferentRun = cr.html_url && !cr.html_url.includes(`/runs/${runId}/`); - return isOurJob && isSuccessful && fromDifferentRun; - }); - - if (successfulOtherRuns.length > 0) { - core.info(`Found ${successfulOtherRuns.length} successful runs - aborting to preserve status`); - core.setOutput('should-run', 'false'); - return; - } - } catch (error) { - core.warning(`Could not check existing runs: ${error.message}`); - } - - core.info('No existing successful runs - proceeding'); - core.setOutput('should-run', 'true'); - auto-label: - needs: check-existing-success runs-on: ubuntu-latest permissions: contents: read pull-requests: write # Skip draft PRs and fork PRs (workflow_dispatch always runs) if: | - needs.check-existing-success.outputs.should-run == 'true' && - (github.event_name == 'workflow_dispatch' || - (!github.event.pull_request.draft && - github.event.pull_request.head.repo.full_name == github.repository)) + github.event_name == 'workflow_dispatch' || + (!github.event.pull_request.draft && + github.event.pull_request.head.repo.full_name == github.repository) steps: - - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4.3.1 with: persist-credentials: false sparse-checkout: | @@ -115,7 +39,7 @@ jobs: package.json sparse-checkout-cone-mode: false - - uses: actions/setup-node@53b83947a5a98c8d113130e565377fae1a50d02f # v6.3.0 + - uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4.4.0 with: node-version: 22 @@ -123,7 +47,7 @@ jobs: run: npm install --no-save --ignore-scripts --no-package-lock --legacy-peer-deps js-yaml - name: Apply product labels - uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 + uses: actions/github-script@f28e40c7f34bde8b3046d885e986cb6290c5673b # v7.1.0 with: script: | const { diff --git a/.github/workflows/check-pinned-deps.yml b/.github/workflows/check-pinned-deps.yml index d8d3f25d18..3b83d11d77 100644 --- a/.github/workflows/check-pinned-deps.yml +++ b/.github/workflows/check-pinned-deps.yml @@ -33,7 +33,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v6 + uses: actions/checkout@v4 - name: Check for update id: check @@ -80,7 +80,7 @@ jobs: - name: Create pull request if: steps.check.outputs.up-to-date == 'false' - uses: peter-evans/create-pull-request@v8 + uses: peter-evans/create-pull-request@v7 with: commit-message: "chore(deps): update ${{ matrix.dep.name }} to v${{ steps.check.outputs.latest }}" branch: "chore/update-${{ matrix.dep.name }}-${{ steps.check.outputs.latest }}" diff --git a/.github/workflows/cleanup-ephemeral-docs.yml b/.github/workflows/cleanup-ephemeral-docs.yml index 02940a1f97..f1266c9ed1 100644 --- a/.github/workflows/cleanup-ephemeral-docs.yml +++ b/.github/workflows/cleanup-ephemeral-docs.yml @@ -1,17 +1,9 @@ # Cleanup Ephemeral Planning Documents # # This workflow automatically removes PLAN.md and other ephemeral planning -# documents from the main branch. It serves as a safety net in case the +# documents when a PR is merged. This serves as a safety net in case the # /finish skill wasn't used before merging. # -# Triggers: -# 1. pull_request_target (closed+merged) - catches files merged via PR -# 2. push to main branches - catches files pushed directly or via squash -# merge (path-filtered to only run when ephemeral files are present) -# -# Strategy: Creates a cleanup PR instead of pushing directly to the -# protected main branch, since branch protection rules block direct pushes. -# # Ephemeral documents are temporary files used during development that # shouldn't persist on the main branch: # - PLAN.md: Planning and task tracking @@ -20,14 +12,16 @@ # These files are tracked on feature branches but should be deleted before # merge. If they slip through, this action cleans them up. # -# The "Preserve plan details" step only runs for PR merges (not direct -# pushes) since there's no source PR to comment on for direct pushes. -# # Security Note: Uses pull_request_target to ensure write permissions for # PRs from forks. This is safe because: # 1. The workflow only checks out the base branch (not PR code) # 2. File paths are validated (hardcoded list of allowed files) # 3. No PR-controlled code is executed +# +# To use in your repo: +# 1. Copy this file to .github/workflows/cleanup-ephemeral-docs.yml +# 2. Update the 'branches' list to match your main branch (master or main) +# 3. Commit and push name: Cleanup ephemeral docs @@ -37,20 +31,11 @@ on: branches: - master - main - push: - branches: - - master - - main - paths: - - 'PLAN.md' - - 'HANDOVER.md' jobs: cleanup: - # Run when PR is merged OR when ephemeral files are pushed directly - if: >- - (github.event_name == 'push') || - (github.event_name == 'pull_request_target' && github.event.pull_request.merged == true) + # Only run when PR is actually merged (not just closed) + if: github.event.pull_request.merged == true runs-on: ubuntu-latest permissions: @@ -59,26 +44,23 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v6 + uses: actions/checkout@v4 with: - ref: ${{ github.event.pull_request.base.ref || github.ref }} + ref: ${{ github.event.pull_request.base.ref }} fetch-depth: 0 - name: Check for ephemeral files id: check run: | FILES_TO_REMOVE="" - FILE_LIST="" if [ -f "PLAN.md" ]; then FILES_TO_REMOVE="$FILES_TO_REMOVE PLAN.md" - FILE_LIST="${FILE_LIST:+$FILE_LIST, }PLAN.md" echo "Found: PLAN.md" fi if [ -f "HANDOVER.md" ]; then FILES_TO_REMOVE="$FILES_TO_REMOVE HANDOVER.md" - FILE_LIST="${FILE_LIST:+$FILE_LIST, }HANDOVER.md" echo "Found: HANDOVER.md" fi @@ -89,84 +71,67 @@ jobs: echo "Files to remove:$FILES_TO_REMOVE" echo "has_files=true" >> $GITHUB_OUTPUT echo "files=$FILES_TO_REMOVE" >> $GITHUB_OUTPUT - echo "file_list=$FILE_LIST" >> $GITHUB_OUTPUT fi - - name: Preserve plan details on source PR - if: >- - steps.check.outputs.has_files == 'true' && - github.event_name == 'pull_request_target' + - name: Preserve plan details on PR + if: steps.check.outputs.has_files == 'true' env: GH_TOKEN: ${{ github.token }} run: | PR_NUMBER=${{ github.event.pull_request.number }} + # Post full PLAN.md contents as a PR comment before removing if [ -f "PLAN.md" ]; then - gh pr comment "$PR_NUMBER" --body "$(cat <<'EOFCOMMENT' - ## Development Plan (preserved by cleanup workflow) + PLAN_BODY="## Development Plan (preserved by cleanup workflow) - Full plan details from `PLAN.md`, preserved before automated cleanup: + Full plan details from \`PLAN.md\`, preserved before automated cleanup: --- - EOFCOMMENT - cat PLAN.md)" + $(cat PLAN.md)" + gh pr comment "$PR_NUMBER" --body "$PLAN_BODY" echo "Posted PLAN.md contents to PR #$PR_NUMBER" fi if [ -f "HANDOVER.md" ]; then - gh pr comment "$PR_NUMBER" --body "$(cat <<'EOFCOMMENT' - ## Handover Notes (preserved by cleanup workflow) + HANDOVER_BODY="## Handover Notes (preserved by cleanup workflow) - Full contents from `HANDOVER.md`, preserved before automated cleanup: + Full contents from \`HANDOVER.md\`, preserved before automated cleanup: --- - EOFCOMMENT - cat HANDOVER.md)" + $(cat HANDOVER.md)" + gh pr comment "$PR_NUMBER" --body "$HANDOVER_BODY" echo "Posted HANDOVER.md contents to PR #$PR_NUMBER" fi - - name: Create cleanup PR + - name: Remove ephemeral files if: steps.check.outputs.has_files == 'true' - env: - GH_TOKEN: ${{ github.token }} run: | - TARGET_BRANCH="${{ github.event.pull_request.base.ref || github.ref_name }}" - CLEANUP_BRANCH="chore/cleanup-ephemeral-docs-$(date +%Y%m%d-%H%M%S)" - git config user.name "github-actions[bot]" git config user.email "github-actions[bot]@users.noreply.github.com" - # Create cleanup branch from current HEAD - git checkout -b "$CLEANUP_BRANCH" + # Pull latest changes to avoid non-fast-forward errors + # If rebase fails due to conflicts, fall back to regular merge + if ! git pull --rebase origin ${{ github.event.pull_request.base.ref }}; then + echo "Rebase failed, falling back to merge" + git rebase --abort + git pull --no-rebase --no-edit origin ${{ github.event.pull_request.base.ref }} + fi - # Remove ephemeral files + # Remove files git rm -f ${{ steps.check.outputs.files }} + # Commit with skip-ci to avoid triggering other workflows git commit -m "chore: remove ephemeral planning docs [skip ci] Automated cleanup of development planning documents. These files are used during development but shouldn't persist on the main branch. + Plan details preserved as PR comment. - Files removed: ${{ steps.check.outputs.file_list }}" - - git push -u origin "$CLEANUP_BRANCH" - - # Create PR and auto-merge - PR_URL=$(gh pr create \ - --base "$TARGET_BRANCH" \ - --head "$CLEANUP_BRANCH" \ - --title "chore: remove ephemeral planning docs" \ - --body "Automated cleanup of development planning documents (${{ steps.check.outputs.file_list }}). - - These files are used during development but shouldn't persist on the main branch. - Created by the cleanup-ephemeral-docs workflow.") - - echo "Created cleanup PR: $PR_URL" + Files removed:${{ steps.check.outputs.files }}" - # Enable auto-merge so it merges once checks pass - gh pr merge "$PR_URL" --auto --squash --delete-branch || echo "Auto-merge not available, PR requires manual merge" + git push - echo "Cleanup PR created and auto-merge enabled" + echo "✓ Removed ephemeral files and pushed cleanup commit" diff --git a/.github/workflows/cleanup-stale-previews.yml b/.github/workflows/cleanup-stale-previews.yml index 21fe52f863..5a338d0863 100644 --- a/.github/workflows/cleanup-stale-previews.yml +++ b/.github/workflows/cleanup-stale-previews.yml @@ -11,7 +11,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout gh-pages - uses: actions/checkout@v6 + uses: actions/checkout@v4 with: ref: gh-pages continue-on-error: true @@ -29,7 +29,7 @@ jobs: - name: Get open PR numbers if: steps.check-branch.outputs.has-previews == 'true' id: open-prs - uses: actions/github-script@v8 + uses: actions/github-script@v7 with: script: | const { data: prs } = await github.rest.pulls.list({ diff --git a/.github/workflows/doc-review.yml b/.github/workflows/doc-review.yml index 3de1b2553f..2d4cc3cf47 100644 --- a/.github/workflows/doc-review.yml +++ b/.github/workflows/doc-review.yml @@ -22,117 +22,26 @@ concurrency: cancel-in-progress: true jobs: - # ----------------------------------------------------------------- - # Job 0: Check if this re-run would create skipped check runs that - # overwrite existing successful status. - # - # Only aborts re-runs where jobs would be SKIPPED. If a re-run would - # actually execute jobs (potentially with failures), it proceeds - # because that's useful information. - # ----------------------------------------------------------------- - check-existing-success: - runs-on: ubuntu-latest - permissions: - checks: read - pull-requests: read - outputs: - should-run: ${{ steps.check.outputs.should-run }} - steps: - - name: Check if re-run would overwrite successful status - id: check - uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 - env: - PR_NUMBER: ${{ github.event.pull_request.number || inputs.pr_number }} - with: - script: | - const headSha = context.sha; - const runId = context.runId; - const runAttempt = Number(process.env.GITHUB_RUN_ATTEMPT) || 1; - - core.info(`Run info: SHA=${headSha}, runId=${runId}, attempt=${runAttempt}`); - - // First attempts always proceed - if (runAttempt === 1) { - core.info('First attempt - proceeding'); - core.setOutput('should-run', 'true'); - return; - } - - // This is a re-run. Check if jobs would actually run or would skip. - // We need to evaluate the same conditions that jobs use. - const pr = context.payload.pull_request; - const isDraft = pr?.draft === true; - const isFork = pr?.head?.repo?.full_name !== pr?.base?.repo?.full_name; - const hasSkipLabel = pr?.labels?.some(l => l.name === 'skip-review') || false; - const isWorkflowDispatch = context.eventName === 'workflow_dispatch'; - - core.info(`PR state: draft=${isDraft}, fork=${isFork}, skip-review=${hasSkipLabel}, workflow_dispatch=${isWorkflowDispatch}`); - - // Check if job conditions would pass (meaning jobs would actually run) - const jobsWouldRun = isWorkflowDispatch || (!isDraft && !isFork && !hasSkipLabel); - - if (jobsWouldRun) { - // Jobs would actually execute - let the re-run proceed - // Any failures would be important information - core.info('Re-run would execute jobs - proceeding'); - core.setOutput('should-run', 'true'); - return; - } - - // Jobs would be skipped. Check if successful runs already exist. - core.info('Re-run would skip jobs - checking for existing successful runs'); - - try { - const { data: checkRuns } = await github.rest.checks.listForRef({ - owner: context.repo.owner, - repo: context.repo.repo, - ref: headSha, - per_page: 100, - }); - - const docReviewJobs = ['check-existing-success', 'resolve-urls', 'copilot-review', 'copilot-visual-review', 'report-skipped']; - const successfulOtherRuns = checkRuns.check_runs.filter(cr => { - const isOurJob = docReviewJobs.includes(cr.name); - const isSuccessful = cr.conclusion === 'success'; - const fromDifferentRun = cr.html_url && !cr.html_url.includes(`/runs/${runId}/`); - return isOurJob && isSuccessful && fromDifferentRun; - }); - - if (successfulOtherRuns.length > 0) { - core.info(`Found ${successfulOtherRuns.length} successful runs - aborting to preserve status`); - successfulOtherRuns.forEach(cr => core.info(` - ${cr.name}: ${cr.html_url}`)); - core.setOutput('should-run', 'false'); - return; - } - } catch (error) { - core.warning(`Could not check existing runs: ${error.message}`); - } - - core.info('No existing successful runs - proceeding'); - core.setOutput('should-run', 'true'); - # ----------------------------------------------------------------- # Job 1: Resolve preview URLs from changed content files # ----------------------------------------------------------------- resolve-urls: - needs: check-existing-success runs-on: ubuntu-latest permissions: contents: read pull-requests: read if: | - needs.check-existing-success.outputs.should-run == 'true' && - (github.event_name == 'workflow_dispatch' || - (!github.event.pull_request.draft && - github.event.pull_request.head.repo.full_name == github.repository && - !contains(github.event.pull_request.labels.*.name, 'skip-review'))) + github.event_name == 'workflow_dispatch' || + (!github.event.pull_request.draft && + github.event.pull_request.head.repo.full_name == github.repository && + !contains(github.event.pull_request.labels.*.name, 'skip-review')) outputs: urls: ${{ steps.detect.outputs.urls }} url-count: ${{ steps.detect.outputs.url-count }} skipped: ${{ steps.detect.outputs.skipped }} skip-reason: ${{ steps.detect.outputs.skip-reason }} steps: - - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4.3.1 with: persist-credentials: false fetch-depth: 0 @@ -144,7 +53,7 @@ jobs: package.json sparse-checkout-cone-mode: false - - uses: actions/setup-node@53b83947a5a98c8d113130e565377fae1a50d02f # v6.3.0 + - uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4.4.0 with: node-version: 22 @@ -172,19 +81,17 @@ jobs: # Job 2: Copilot code review (runs in parallel with Job 1) # ----------------------------------------------------------------- copilot-review: - needs: check-existing-success runs-on: ubuntu-latest permissions: pull-requests: write if: | - needs.check-existing-success.outputs.should-run == 'true' && - (github.event_name == 'workflow_dispatch' || - (!github.event.pull_request.draft && - github.event.pull_request.head.repo.full_name == github.repository && - !contains(github.event.pull_request.labels.*.name, 'skip-review'))) + github.event_name == 'workflow_dispatch' || + (!github.event.pull_request.draft && + github.event.pull_request.head.repo.full_name == github.repository && + !contains(github.event.pull_request.labels.*.name, 'skip-review')) steps: - name: Request Copilot review - uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 + uses: actions/github-script@f28e40c7f34bde8b3046d885e986cb6290c5673b # v7.1.0 env: PR_NUMBER: ${{ github.event.pull_request.number || inputs.pr_number }} with: @@ -221,7 +128,7 @@ jobs: needs: resolve-urls if: needs.resolve-urls.result == 'success' && fromJson(needs.resolve-urls.outputs.url-count) > 0 steps: - - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4.3.1 with: persist-credentials: false sparse-checkout: .github/prompts/copilot-visual-review.md @@ -229,7 +136,7 @@ jobs: - name: Get PR head SHA id: pr-sha - uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 + uses: actions/github-script@f28e40c7f34bde8b3046d885e986cb6290c5673b # v7.1.0 env: PR_NUMBER: ${{ github.event.pull_request.number || inputs.pr_number }} with: @@ -248,7 +155,7 @@ jobs: - name: Create in-progress check run id: create-check - uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 + uses: actions/github-script@f28e40c7f34bde8b3046d885e986cb6290c5673b # v7.1.0 env: HEAD_SHA: ${{ steps.pr-sha.outputs.sha }} with: @@ -297,7 +204,7 @@ jobs: - name: Complete check run — preview available if: steps.wait.outputs.available == 'true' - uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 + uses: actions/github-script@f28e40c7f34bde8b3046d885e986cb6290c5673b # v7.1.0 env: PREVIEW_URLS: ${{ needs.resolve-urls.outputs.urls }} PR_NUMBER: ${{ github.event.pull_request.number || inputs.pr_number }} @@ -338,7 +245,7 @@ jobs: - name: Complete check run — timed out if: steps.wait.outputs.available == 'false' - uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 + uses: actions/github-script@f28e40c7f34bde8b3046d885e986cb6290c5673b # v7.1.0 env: PR_NUMBER: ${{ github.event.pull_request.number || inputs.pr_number }} CHECK_RUN_ID: ${{ steps.create-check.outputs.check-run-id }} @@ -385,7 +292,7 @@ jobs: steps: - name: Get PR head SHA id: pr-sha - uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 + uses: actions/github-script@f28e40c7f34bde8b3046d885e986cb6290c5673b # v7.1.0 env: PR_NUMBER: ${{ github.event.pull_request.number || inputs.pr_number }} with: @@ -403,7 +310,7 @@ jobs: core.setOutput('sha', sha); - name: Create skipped visual review check run - uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 + uses: actions/github-script@f28e40c7f34bde8b3046d885e986cb6290c5673b # v7.1.0 env: HEAD_SHA: ${{ steps.pr-sha.outputs.sha }} SKIP_REASON: ${{ needs.resolve-urls.outputs.skip-reason }} diff --git a/.github/workflows/influxdb3-release.yml b/.github/workflows/influxdb3-release.yml index e66f0450a0..68b782b607 100644 --- a/.github/workflows/influxdb3-release.yml +++ b/.github/workflows/influxdb3-release.yml @@ -36,10 +36,10 @@ jobs: generated: ${{ steps.generate.outputs.generated }} steps: - - uses: actions/checkout@v6 + - uses: actions/checkout@v4 - name: Set up Node.js - uses: actions/setup-node@v6 + uses: actions/setup-node@v4 with: node-version: '18' cache: 'yarn' @@ -97,7 +97,7 @@ jobs: echo "generated=true" >> $GITHUB_OUTPUT - name: Upload release notes - uses: actions/upload-artifact@v7 + uses: actions/upload-artifact@v4 with: name: release-notes-${{ github.event.inputs.product }}-${{ github.event.inputs.version }} path: helper-scripts/output/release-notes/ @@ -111,10 +111,10 @@ jobs: # generated: ${{ steps.generate.outputs.generated }} # steps: - # - uses: actions/checkout@v6 + # - uses: actions/checkout@v4 # - name: Set up Node.js - # uses: actions/setup-node@v6 + # uses: actions/setup-node@v4 # with: # node-version: '18' # cache: 'yarn' @@ -155,7 +155,7 @@ jobs: # echo "generated=true" >> $GITHUB_OUTPUT # - name: Upload release notes - # uses: actions/upload-artifact@v7 + # uses: actions/upload-artifact@v4 # with: # name: release-notes-${{ github.event.inputs.product }}-${{ github.event.inputs.version }} # path: helper-scripts/output/release-notes/ @@ -168,10 +168,10 @@ jobs: if: needs.generate-release-notes-core-enterprise.outputs.generated == 'true' && contains(fromJSON('["core", "enterprise"]'), github.event.inputs.product) steps: - - uses: actions/checkout@v6 + - uses: actions/checkout@v4 - name: Set up Node.js - uses: actions/setup-node@v6 + uses: actions/setup-node@v4 with: node-version: '18' cache: 'yarn' @@ -199,7 +199,7 @@ jobs: node ./helper-scripts/influxdb3-monolith/audit-cli-documentation.js $PRODUCT $VERSION - name: Upload CLI audit reports - uses: actions/upload-artifact@v7 + uses: actions/upload-artifact@v4 with: name: cli-audit-release-${{ github.event.inputs.product }}-${{ github.event.inputs.version }} path: helper-scripts/output/cli-audit/ @@ -212,10 +212,10 @@ jobs: # if: needs.generate-release-notes-distributed.outputs.generated == 'true' && contains(fromJSON('["clustered", "cloud-dedicated", "cloud-serverless"]'), github.event.inputs.product) # steps: - # - uses: actions/checkout@v6 + # - uses: actions/checkout@v4 # - name: Set up Node.js - # uses: actions/setup-node@v6 + # uses: actions/setup-node@v4 # with: # node-version: '18' # cache: 'yarn' @@ -258,7 +258,7 @@ jobs: # EOF # - name: Upload distributed audit reports - # uses: actions/upload-artifact@v7 + # uses: actions/upload-artifact@v4 # with: # name: distributed-audit-release-${{ github.event.inputs.product }}-${{ github.event.inputs.version }} # path: helper-scripts/output/distributed-audit/ @@ -271,10 +271,10 @@ jobs: if: github.event.inputs.dry_run != 'true' && always() && (needs.generate-release-notes-core-enterprise.result == 'success') steps: - - uses: actions/checkout@v6 + - uses: actions/checkout@v4 - name: Download artifacts - uses: actions/download-artifact@v8 + uses: actions/download-artifact@v4 with: path: artifacts/ @@ -338,7 +338,7 @@ jobs: fi - name: Create Pull Request - uses: peter-evans/create-pull-request@v8 + uses: peter-evans/create-pull-request@v5 with: token: ${{ secrets.GITHUB_TOKEN }} branch: ${{ env.BRANCH }} @@ -375,15 +375,15 @@ jobs: if: github.event.inputs.dry_run != 'true' && always() && (needs.audit-cli-documentation.result == 'success') steps: - - uses: actions/checkout@v6 + - uses: actions/checkout@v4 - name: Download audit reports - uses: actions/download-artifact@v8 + uses: actions/download-artifact@v4 with: path: audit-reports/ - name: Create issue from audit - uses: actions/github-script@v8 + uses: actions/github-script@v7 with: script: | const fs = require('fs'); diff --git a/.github/workflows/pr-link-check.yml b/.github/workflows/pr-link-check.yml index 7f0b581705..ae97f8dac8 100644 --- a/.github/workflows/pr-link-check.yml +++ b/.github/workflows/pr-link-check.yml @@ -15,7 +15,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v6 + uses: actions/checkout@v4 with: fetch-depth: 0 @@ -79,7 +79,7 @@ jobs: - name: Setup Node.js if: steps.detect.outputs.has-changes == 'true' - uses: actions/setup-node@v6 + uses: actions/setup-node@v4 with: node-version: '20' cache: 'yarn' @@ -328,7 +328,7 @@ jobs: - name: Upload detailed results if: always() && steps.detect.outputs.has-changes == 'true' && steps.mapping.outputs.public-files != '' - uses: actions/upload-artifact@v7 + uses: actions/upload-artifact@v4 with: name: link-check-results path: | diff --git a/.github/workflows/pr-preview.yml b/.github/workflows/pr-preview.yml index 9dcf38fcfe..191a52720f 100644 --- a/.github/workflows/pr-preview.yml +++ b/.github/workflows/pr-preview.yml @@ -20,84 +20,20 @@ concurrency: cancel-in-progress: true jobs: - # Skip draft PRs and check for re-runs that would overwrite successful status + # Skip draft PRs entirely check-draft: runs-on: ubuntu-latest - permissions: - checks: read - pull-requests: read outputs: should-run: ${{ steps.check.outputs.should-run }} steps: - id: check - uses: actions/github-script@v8 - with: - script: | - const pr = context.payload.pull_request; - const runAttempt = Number(process.env.GITHUB_RUN_ATTEMPT) || 1; - - // Check for draft PR - if (pr?.draft) { - core.info('Skipping draft PR'); - core.setOutput('should-run', 'false'); - return; - } - - // First attempts always proceed (after draft check) - if (runAttempt === 1) { - core.setOutput('should-run', 'true'); - return; - } - - // This is a re-run. Check if jobs would actually run or skip. - const isFork = pr?.head?.repo?.full_name !== pr?.base?.repo?.full_name; - const isClosed = context.payload.action === 'closed'; - - // For preview job: needs non-fork, non-closed - // For cleanup job: needs closed - // For fork-notice: needs fork, non-closed - // At least one job would run in most cases unless PR state changed - const previewWouldRun = !isFork && !isClosed; - const cleanupWouldRun = isClosed; - const forkNoticeWouldRun = isFork && !isClosed; - const anyJobWouldRun = previewWouldRun || cleanupWouldRun || forkNoticeWouldRun; - - if (anyJobWouldRun) { - core.info('Re-run would execute jobs - proceeding'); - core.setOutput('should-run', 'true'); - return; - } - - // Jobs would skip. Check for existing successful runs. - core.info('Re-run would skip jobs - checking for existing successful runs'); - - try { - const { data: checkRuns } = await github.rest.checks.listForRef({ - owner: context.repo.owner, - repo: context.repo.repo, - ref: context.sha, - per_page: 100, - }); - - const previewJobs = ['check-draft', 'preview', 'cleanup', 'fork-notice']; - const successfulOtherRuns = checkRuns.check_runs.filter(cr => { - const isOurJob = previewJobs.includes(cr.name); - const isSuccessful = cr.conclusion === 'success'; - const fromDifferentRun = cr.html_url && !cr.html_url.includes(`/runs/${context.runId}/`); - return isOurJob && isSuccessful && fromDifferentRun; - }); - - if (successfulOtherRuns.length > 0) { - core.info(`Found ${successfulOtherRuns.length} successful runs - aborting to preserve status`); - core.setOutput('should-run', 'false'); - return; - } - } catch (error) { - core.warning(`Could not check existing runs: ${error.message}`); - } - - core.info('No existing successful runs - proceeding'); - core.setOutput('should-run', 'true'); + run: | + if [[ "${{ github.event.pull_request.draft }}" == "true" ]]; then + echo "should-run=false" >> $GITHUB_OUTPUT + echo "Skipping draft PR" + else + echo "should-run=true" >> $GITHUB_OUTPUT + fi # Notify fork PRs that preview is not available fork-notice: @@ -109,7 +45,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Post fork notice comment - uses: actions/github-script@v8 + uses: actions/github-script@v7 with: script: | const { data: comments } = await github.rest.issues.listComments({ @@ -142,12 +78,12 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v6 + uses: actions/checkout@v4 with: fetch-depth: 0 - name: Setup Node.js - uses: actions/setup-node@v6 + uses: actions/setup-node@v4 with: node-version: '20' cache: 'yarn' @@ -164,7 +100,7 @@ jobs: - name: Post pending comment (needs input) if: steps.detect.outputs.needs-author-input == 'true' - uses: actions/github-script@v8 + uses: actions/github-script@v7 with: script: | const { upsertPreviewComment } = await import('${{ github.workspace }}/.github/scripts/preview-comment.js'); @@ -234,7 +170,7 @@ jobs: - name: Post success comment if: steps.detect.outputs.pages-to-deploy != '[]' && steps.validate-deploy.outputs.status == 'ok' - uses: actions/github-script@v8 + uses: actions/github-script@v7 with: script: | const { upsertPreviewComment } = await import('${{ github.workspace }}/.github/scripts/preview-comment.js'); @@ -250,7 +186,7 @@ jobs: - name: Post skipped comment if: steps.detect.outputs.pages-to-deploy == '[]' && steps.detect.outputs.needs-author-input != 'true' - uses: actions/github-script@v8 + uses: actions/github-script@v7 with: script: | const { upsertPreviewComment } = await import('${{ github.workspace }}/.github/scripts/preview-comment.js'); @@ -266,7 +202,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout gh-pages - uses: actions/checkout@v6 + uses: actions/checkout@v4 with: ref: gh-pages @@ -286,12 +222,12 @@ jobs: fi - name: Checkout scripts for comment deletion - uses: actions/checkout@v6 + uses: actions/checkout@v4 with: path: scripts-checkout - name: Delete preview comment - uses: actions/github-script@v8 + uses: actions/github-script@v7 with: script: | const { deletePreviewComment } = await import('${{ github.workspace }}/scripts-checkout/.github/scripts/preview-comment.js'); diff --git a/.github/workflows/pr-vale-check.yml b/.github/workflows/pr-vale-check.yml index 544e532d72..de4c2a19d0 100644 --- a/.github/workflows/pr-vale-check.yml +++ b/.github/workflows/pr-vale-check.yml @@ -22,7 +22,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v6 + uses: actions/checkout@v4 - name: Detect changed files id: detect @@ -66,19 +66,11 @@ jobs: RESOLVED_COUNT=$(wc -l < resolved_files.txt | tr -d ' ') echo "resolved-count=$RESOLVED_COUNT" >> $GITHUB_OUTPUT - - name: Install Vale - if: steps.detect.outputs.has-files == 'true' - run: | - VALE_VERSION=$(sed -n 's/^VALE_VERSION="\(.*\)"/\1/p' .ci/vale/vale.sh) - curl -sfL "https://github.com/errata-ai/vale/releases/download/v${VALE_VERSION}/vale_${VALE_VERSION}_Linux_64-bit.tar.gz" \ - | sudo tar xz -C /usr/local/bin vale - vale --version - - name: Run Vale if: steps.detect.outputs.has-files == 'true' id: vale run: | - chmod +x .github/scripts/vale-check.sh .ci/vale/vale.sh + chmod +x .github/scripts/vale-check.sh set +e # Don't exit on error .github/scripts/vale-check.sh --files resolved_files.txt > vale_results.json 2>vale_stderr.txt @@ -142,7 +134,7 @@ jobs: - name: Post PR comment if: always() && steps.detect.outputs.has-files == 'true' - uses: actions/github-script@v8 + uses: actions/github-script@v7 with: script: | const fs = require('fs'); @@ -272,7 +264,7 @@ jobs: - name: Upload results if: always() && steps.detect.outputs.has-files == 'true' - uses: actions/upload-artifact@v7 + uses: actions/upload-artifact@v4 with: name: vale-results path: | diff --git a/.github/workflows/prepare-release.yml b/.github/workflows/prepare-release.yml index a686493686..3fea325405 100644 --- a/.github/workflows/prepare-release.yml +++ b/.github/workflows/prepare-release.yml @@ -29,14 +29,14 @@ jobs: prepare-release: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v6 + - uses: actions/checkout@v4 - name: Create release branch run: | git checkout -b docs-release-v${{ inputs.version }} - name: Set up Node.js - uses: actions/setup-node@v6 + uses: actions/setup-node@v4 with: node-version: '18' cache: 'yarn' @@ -88,7 +88,7 @@ jobs: --version ${{ inputs.version }} - name: Create release checklist issue - uses: actions/github-script@v8 + uses: actions/github-script@v7 with: script: | const checklist = require('./.github/scripts/release-checklist.js'); diff --git a/.github/workflows/sync-link-checker-binary.yml b/.github/workflows/sync-link-checker-binary.yml index 8392325d2e..ef7ab3338c 100644 --- a/.github/workflows/sync-link-checker-binary.yml +++ b/.github/workflows/sync-link-checker-binary.yml @@ -15,7 +15,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v6 + uses: actions/checkout@v4 - name: Download binary from docs-tooling release run: | diff --git a/.github/workflows/sync-plugins.yml b/.github/workflows/sync-plugins.yml index b9dc77b19f..d840f42475 100644 --- a/.github/workflows/sync-plugins.yml +++ b/.github/workflows/sync-plugins.yml @@ -31,7 +31,7 @@ jobs: - name: Parse issue inputs id: parse-inputs if: github.event_name == 'issues' - uses: actions/github-script@v8 + uses: actions/github-script@v7 with: script: | const issue = context.payload.issue; @@ -63,7 +63,7 @@ jobs: - name: Update issue status if: steps.inputs.outputs.issue_number != '' - uses: actions/github-script@v8 + uses: actions/github-script@v7 with: script: | await github.rest.issues.createComment({ @@ -74,13 +74,13 @@ jobs: }); - name: Checkout docs-v2 - uses: actions/checkout@v6 + uses: actions/checkout@v4 with: path: docs-v2 token: ${{ secrets.GITHUB_TOKEN }} - name: Checkout influxdb3_plugins (sparse) - uses: actions/checkout@v6 + uses: actions/checkout@v4 with: repository: influxdata/influxdb3_plugins token: ${{ secrets.PLUGINS_CONTENT_READ_TOKEN }} @@ -91,7 +91,7 @@ jobs: ref: ${{ steps.inputs.outputs.source_commit }} - name: Setup Node.js - uses: actions/setup-node@v6 + uses: actions/setup-node@v4 with: node-version: '20' cache: 'yarn' @@ -103,7 +103,7 @@ jobs: CYPRESS_INSTALL_BINARY=0 yarn install - name: Setup Python - uses: actions/setup-python@v6 + uses: actions/setup-python@v4 with: python-version: '3.9' @@ -137,7 +137,7 @@ jobs: - name: Report validation failure if: steps.validate.outputs.validation_passed == 'false' - uses: actions/github-script@v8 + uses: actions/github-script@v7 with: script: | const fs = require('fs'); @@ -352,7 +352,7 @@ jobs: - name: Create Pull Request if: steps.validate.outputs.validation_passed == 'true' id: create-pr - uses: peter-evans/create-pull-request@v8 + uses: peter-evans/create-pull-request@v5 with: path: docs-v2 token: ${{ secrets.GITHUB_TOKEN }} @@ -396,7 +396,7 @@ jobs: - name: Update issue with success if: steps.validate.outputs.validation_passed == 'true' && steps.inputs.outputs.issue_number != '' - uses: actions/github-script@v8 + uses: actions/github-script@v7 with: script: | await github.rest.issues.createComment({ @@ -423,7 +423,7 @@ jobs: - name: Report failure if: failure() && steps.inputs.outputs.issue_number != '' - uses: actions/github-script@v8 + uses: actions/github-script@v7 with: script: | await github.rest.issues.createComment({ diff --git a/.github/workflows/trigger-on-release.yml b/.github/workflows/trigger-on-release.yml index 7827b0a6ce..cbf4419c27 100644 --- a/.github/workflows/trigger-on-release.yml +++ b/.github/workflows/trigger-on-release.yml @@ -42,7 +42,7 @@ jobs: echo "Previous Version: ${{ github.event.client_payload.previous_version }}" - name: Trigger release documentation workflow - uses: actions/github-script@v8 + uses: actions/github-script@v7 with: script: | await github.rest.actions.createWorkflowDispatch({ diff --git a/.vale.ini b/.vale.ini index 8b8599b69a..9dcb5ae5d3 100644 --- a/.vale.ini +++ b/.vale.ini @@ -16,7 +16,6 @@ TokenIgnores = /[a-zA-Z0-9/_\-\.]+, \ https?://[^\s\)\]>"]+, \ `[^`]+` -# --- Disabled mechanical rules --- Google.Acronyms = NO Google.DateFormat = NO Google.Ellipses = NO @@ -30,17 +29,7 @@ Vale.Spelling = NO # false positives from URLs, file paths, and code. The accepted terms in # accept.txt still work for spelling checks via InfluxDataDocs.Spelling. Vale.Terms = NO - -# --- Disabled style rules (high false-positive rate in technical docs) --- -Google.Contractions = NO -Google.FirstPerson = NO -Google.Passive = NO -Google.We = NO -Google.Will = NO -write-good.Cliches = NO -write-good.Passive = NO -write-good.So = NO -write-good.ThereIs = NO -# Flags legitimate technical terms like "aggregate", "expiration", "multiple". -write-good.TooWordy = NO -write-good.Weasel = NO \ No newline at end of file +# Disable write-good.TooWordy - flags legitimate technical terms like +# "aggregate", "expiration", "multiple", "However" that are standard in +# database documentation. +write-good.TooWordy = NO \ No newline at end of file diff --git a/DOCS-TESTING.md b/DOCS-TESTING.md index ee038714a2..589c845ebf 100644 --- a/DOCS-TESTING.md +++ b/DOCS-TESTING.md @@ -629,7 +629,7 @@ Vale runs automatically on pull requests that modify markdown files. The workflo 1. Detects changed markdown files (content, README, instruction files) 2. Resolves shared content to consuming product pages 3. Maps files to appropriate Vale configs (matching local Lefthook behavior) -4. Runs Vale via `.ci/vale/vale.sh` (local binary or Docker fallback) +4. Runs Vale via Docker (`jdkato/vale:latest`) 5. Reports results as inline annotations and a PR summary comment **Alert levels:** diff --git a/api-docs/influxdb3/enterprise/v3/influxdb3-enterprise-openapi.yaml b/api-docs/influxdb3/enterprise/v3/influxdb3-enterprise-openapi.yaml index b8fd78da1b..5ff481f9d0 100644 --- a/api-docs/influxdb3/enterprise/v3/influxdb3-enterprise-openapi.yaml +++ b/api-docs/influxdb3/enterprise/v3/influxdb3-enterprise-openapi.yaml @@ -18,7 +18,7 @@ info: - `/api/v2/write`: Compatibility endpoint for InfluxDB v2 workloads and clients [Download the OpenAPI specification](/openapi/influxdb3-enterprise-openapi.yaml) - version: v3.9.0 + version: v3.8.0 license: name: MIT url: https://opensource.org/licenses/MIT @@ -140,31 +140,6 @@ tags: x-related: - title: Use compatibility APIs to write data href: /influxdb3/enterprise/write-data/http-api/compatibility-apis/ - - name: Export data (beta) - description: | - Export compacted data as Parquet files for use with external tools. - - > **Beta**: Export endpoints require the [performance upgrade preview](/influxdb3/enterprise/performance-preview/) - > (`--use-pacha-tree` flag). The performance upgrade preview is a beta feature - > and **should not be used for production workloads**. - - Data must be compacted before it can be exported. - Uncompacted data is not available for export at this time. - - #### Export workflow - - 1. [List databases](#operation/GetExportDatabases) available for export. - 2. [List tables](#operation/GetExportTables) in a database. - 3. [List compacted windows](#operation/GetExportWindows) (24-hour UTC windows) for a table. - 4. [Download window data](#operation/GetExportWindowData) as Parquet files. - - You can also use the [`influxdb3 export`](/influxdb3/enterprise/performance-preview/#export-to-parquet) CLI - commands, which call these endpoints. - x-related: - - title: Performance upgrade preview - href: /influxdb3/enterprise/performance-preview/ - - title: Export to Parquet - href: /influxdb3/enterprise/performance-preview/#export-to-parquet - name: Database description: Manage databases - description: > @@ -427,124 +402,6 @@ paths: tags: - Compatibility endpoints - Write data - /api/v3/export/databases: - get: - operationId: GetExportDatabases - summary: "List databases available for export (beta)" - description: | - Returns a list of databases that have compacted data available for Parquet export. - - > **Beta**: This endpoint requires the [performance upgrade preview](/influxdb3/enterprise/performance-preview/) - > (`--use-pacha-tree` flag). The performance upgrade preview is a beta feature - > and **should not be used for production workloads**. - responses: - "200": - description: Success. Returns a list of database names. - content: - application/json: - schema: - type: object - "401": - $ref: "#/components/responses/Unauthorized" - tags: - - Export data (beta) - /api/v3/export/tables: - get: - operationId: GetExportTables - summary: "List tables available for export (beta)" - description: | - Returns a list of tables in a database that have compacted data available for Parquet export. - - > **Beta**: This endpoint requires the [performance upgrade preview](/influxdb3/enterprise/performance-preview/) - > (`--use-pacha-tree` flag). The performance upgrade preview is a beta feature - > and **should not be used for production workloads**. - parameters: - - $ref: "#/components/parameters/db" - responses: - "200": - description: Success. Returns a list of table names. - content: - application/json: - schema: - type: object - "401": - $ref: "#/components/responses/Unauthorized" - "404": - description: Database not found. - tags: - - Export data (beta) - /api/v3/export/windows: - get: - operationId: GetExportWindows - summary: "List compacted windows for a table (beta)" - description: | - Returns a list of compacted 24-hour UTC windows for a table. - Each window represents a time range of compacted data that can be exported as Parquet. - - > **Beta**: This endpoint requires the [performance upgrade preview](/influxdb3/enterprise/performance-preview/) - > (`--use-pacha-tree` flag). The performance upgrade preview is a beta feature - > and **should not be used for production workloads**. - parameters: - - $ref: "#/components/parameters/db" - - name: table - in: query - required: true - schema: - type: string - description: The table name. - responses: - "200": - description: Success. Returns a list of compacted 24-hour windows. - content: - application/json: - schema: - type: object - "401": - $ref: "#/components/responses/Unauthorized" - "404": - description: Database or table not found. - tags: - - Export data (beta) - /api/v3/export/window_data: - get: - operationId: GetExportWindowData - summary: "Export window data as Parquet (beta)" - description: | - Downloads compacted data for the specified windows as a tar archive containing Parquet files. - - > **Beta**: This endpoint requires the [performance upgrade preview](/influxdb3/enterprise/performance-preview/) - > (`--use-pacha-tree` flag). The performance upgrade preview is a beta feature - > and **should not be used for production workloads**. - parameters: - - $ref: "#/components/parameters/db" - - name: table - in: query - required: true - schema: - type: string - description: The table name. - - name: windows - in: query - required: false - schema: - type: string - description: | - Comma-separated list of window dates to export (for example, `2026-01-15,2026-01-16`). - If omitted, exports all available windows. - responses: - "200": - description: Success. Returns a tar archive containing Parquet files. - content: - application/x-tar: - schema: - type: string - format: binary - "401": - $ref: "#/components/responses/Unauthorized" - "404": - description: Database, table, or window not found. - tags: - - Export data (beta) /api/v3/configure/database: delete: operationId: DeleteConfigureDatabase @@ -1810,12 +1667,7 @@ paths: "422": description: Unprocessable entity. summary: Execute InfluxQL query - description: | - Executes an InfluxQL query to retrieve data from the specified database. - - With the [performance upgrade preview](/influxdb3/enterprise/performance-preview/) enabled - (`--use-pacha-tree` flag), queries automatically merge results from Parquet and - `.pt` files (hybrid query mode). + description: Executes an InfluxQL query to retrieve data from the specified database. parameters: - $ref: "#/components/parameters/dbQueryParam" - name: q @@ -1869,12 +1721,7 @@ paths: "422": description: Unprocessable entity. summary: Execute InfluxQL query - description: | - Executes an InfluxQL query to retrieve data from the specified database. - - With the [performance upgrade preview](/influxdb3/enterprise/performance-preview/) enabled - (`--use-pacha-tree` flag), queries automatically merge results from Parquet and - `.pt` files (hybrid query mode). + description: Executes an InfluxQL query to retrieve data from the specified database. parameters: - $ref: "#/components/parameters/AcceptQueryHeader" - $ref: "#/components/parameters/ContentType" @@ -1924,14 +1771,7 @@ paths: "422": description: Unprocessable entity. summary: Execute SQL query - description: | - Executes an SQL query to retrieve data from the specified database. - - With the [performance upgrade preview](/influxdb3/enterprise/performance-preview/) enabled - (`--use-pacha-tree` flag), queries automatically merge results from Parquet and - `.pt` files (hybrid query mode). Use the - [`/api/v3/query_sql_telemetry`](#operation/GetQuerySQLTelemetry) endpoint after executing - a query to retrieve detailed execution statistics. + description: Executes an SQL query to retrieve data from the specified database. parameters: - $ref: "#/components/parameters/db" - $ref: "#/components/parameters/querySqlParam" @@ -1978,14 +1818,7 @@ paths: "422": description: Unprocessable entity. summary: Execute SQL query - description: | - Executes an SQL query to retrieve data from the specified database. - - With the [performance upgrade preview](/influxdb3/enterprise/performance-preview/) enabled - (`--use-pacha-tree` flag), queries automatically merge results from Parquet and - `.pt` files (hybrid query mode). Use the - [`/api/v3/query_sql_telemetry`](#operation/GetQuerySQLTelemetry) endpoint after executing - a query to retrieve detailed execution statistics. + description: Executes an SQL query to retrieve data from the specified database. parameters: - $ref: "#/components/parameters/AcceptQueryHeader" - $ref: "#/components/parameters/ContentType" @@ -1993,111 +1826,6 @@ paths: $ref: "#/components/requestBodies/queryRequestBody" tags: - Query data - /api/v3/query_sql_telemetry: - get: - operationId: GetQuerySQLTelemetry - summary: "Get query telemetry (beta)" - description: | - Returns detailed execution statistics for the most recent SQL query, including per-chunk I/O, - cache hit rates, and timing breakdowns. - - Use this endpoint after executing a query to analyze performance. - - > **Beta**: This endpoint requires the [performance upgrade preview](/influxdb3/enterprise/performance-preview/) - > (`--use-pacha-tree` flag). The performance upgrade preview is a beta feature - > and **should not be used for production workloads**. - - For more information, see - [Query telemetry](/influxdb3/enterprise/performance-preview/monitor/#query-telemetry). - responses: - "200": - description: Success. Returns query telemetry data. - content: - application/json: - schema: - type: object - properties: - query_id: - type: string - description: Unique identifier for the query. - execution_time_us: - type: integer - description: Total execution time in microseconds. - chunks: - type: array - description: Per-chunk statistics. - items: - type: object - properties: - chunk_id: - type: string - files_scanned: - type: integer - blocks_processed: - type: integer - rows_read: - type: integer - rows_returned: - type: integer - bytes_read: - type: integer - cache_stats: - type: object - description: Cache hit rates by type. - properties: - gen0_hits: - type: integer - gen0_misses: - type: integer - compacted_hits: - type: integer - compacted_misses: - type: integer - example: - query_id: "q_12345" - execution_time_us: 4523 - chunks: - - chunk_id: "c_1" - files_scanned: 3 - blocks_processed: 12 - rows_read: 24000 - rows_returned: 150 - bytes_read: 1234567 - cache_stats: - gen0_hits: 5 - gen0_misses: 1 - compacted_hits: 8 - compacted_misses: 2 - "401": - $ref: "#/components/responses/Unauthorized" - tags: - - Query data - post: - operationId: PostQuerySQLTelemetry - summary: "Get query telemetry (beta)" - description: | - Returns detailed execution statistics for the most recent SQL query, including per-chunk I/O, - cache hit rates, and timing breakdowns. - - Use this endpoint after executing a query to analyze performance. - - > **Beta**: This endpoint requires the [performance upgrade preview](/influxdb3/enterprise/performance-preview/) - > (`--use-pacha-tree` flag). The performance upgrade preview is a beta feature - > and **should not be used for production workloads**. - - For more information, see - [Query telemetry](/influxdb3/enterprise/performance-preview/monitor/#query-telemetry). - responses: - "200": - description: Success. Returns query telemetry data. - content: - application/json: - schema: - type: object - "401": - $ref: "#/components/responses/Unauthorized" - tags: - - Query data /api/v3/write_lp: post: operationId: PostWriteLP @@ -2147,52 +1875,53 @@ paths: "422": description: Unprocessable entity. summary: Write line protocol - description: | + description: > Writes line protocol to the specified database. + This is the native InfluxDB 3 Enterprise write endpoint that provides enhanced control + over write behavior with advanced parameters for high-performance and fault-tolerant operations. + Use this endpoint to send data in [line protocol](/influxdb3/enterprise/reference/syntax/line-protocol/) format - to InfluxDB. Use query parameters to specify options for writing data. + to InfluxDB. - #### Features + Use query parameters to specify options for writing data. - - **Partial writes**: Use `accept_partial=true` to allow partial success when some lines in a batch fail - - **Asynchronous writes**: Use `no_sync=true` to skip waiting for WAL synchronization, allowing faster response - times but sacrificing durability guarantees - - **Flexible precision**: Automatic timestamp precision detection with `precision=auto` (default) - #### Column families (performance upgrade preview) + #### Features - With the [performance upgrade preview](/influxdb3/enterprise/performance-preview/) enabled - (`--use-pacha-tree` flag), you can assign fields to column families using the `::` delimiter - in field names. The portion before `::` is the family name; everything after is the field name. - ```txt - metrics,host=sA cpu::usage_user=55.2,cpu::usage_sys=12.1,mem::free=2048i 1000000000 - ``` + - **Partial writes**: Use `accept_partial=true` to allow partial success when some lines in a batch fail + + - **Asynchronous writes**: Use `no_sync=true` to skip waiting for WAL synchronization, allowing faster response + times but sacrificing durability guarantees - Fields in the same family are stored together on disk. For wide tables, this reduces I/O - by letting queries read only the families they need. Fields written without `::` are assigned - to auto-generated families. + - **Flexible precision**: Automatic timestamp precision detection with `precision=auto` (default) - For more information, see [Column families](/influxdb3/enterprise/performance-preview/#column-families). #### Auto precision detection + When you use `precision=auto` or omit the precision parameter, InfluxDB 3 automatically detects + the timestamp precision based on the magnitude of the timestamp value: + - Timestamps < 5e9 → Second precision (multiplied by 1,000,000,000 to convert to nanoseconds) + - Timestamps < 5e12 → Millisecond precision (multiplied by 1,000,000) + - Timestamps < 5e15 → Microsecond precision (multiplied by 1,000) + - Larger timestamps → Nanosecond precision (no conversion needed) + #### Related + - [Use the InfluxDB v3 write_lp API to write data](/influxdb3/enterprise/write-data/http-api/v3-write-lp/) - - [Performance upgrade preview](/influxdb3/enterprise/performance-preview/) requestBody: $ref: "#/components/requestBodies/lineProtocolRequestBody" tags: @@ -4062,7 +3791,6 @@ x-tagGroups: - Response codes - Compatibility endpoints - Database - - Export data (beta) - Processing engine - Server information - Table diff --git a/content/enterprise_influxdb/v1/about-the-project/release-notes.md b/content/enterprise_influxdb/v1/about-the-project/release-notes.md index 78bb493847..d7c5a22ccd 100644 --- a/content/enterprise_influxdb/v1/about-the-project/release-notes.md +++ b/content/enterprise_influxdb/v1/about-the-project/release-notes.md @@ -1,7 +1,7 @@ --- title: InfluxDB Enterprise v1 release notes description: > - Changes and updates to InfluxDB Enterprise v1. + Important changes and what's new in each version InfluxDB Enterprise v1. menu: enterprise_influxdb_v1_ref: name: Release notes @@ -13,109 +13,6 @@ alt_links: -## v1.12.3 {date="2026-03-31"} - -InfluxDB Enterprise 1.12.3 delivers substantial efficiency gains in CPU, memory, -and I/O usage, particularly in high-cardinality and large-scale environments. - -> [!Important] -> #### We strongly recommend upgrading to v1.12.3 -> -> If you’re using any previous version of InfluxDB Enterprise v1, we strongly -> recommend [upgrading to 1.12.3](/enterprise_influxdb/v1/administration/upgrading/). - -Highlights include: - -- **Faster retention enforcement:** TSI series deletion now uses batched `fsync`, - delivering **up to 100x speed improvements** for high-cardinality datasets. -- **Reduced CPU usage during compaction:** Improvements to compaction planning - (including filename generation and level caching) can reduce CPU usage by - **up to 30%**. -- **Significantly faster backups:** New configurable compression enables - **up to 5x faster backup performance**. -- **Lower I/O during compaction:** Cold shard compactions now use - **up to 3x less disk I/O**, and unnecessary work for purging replaced files - has been eliminated. - -These improvements combine to make InfluxDB Enterprise v1 clusters more -efficient, more predictable under load, and more cost-effective to operate. - -Other updates include: - -- Stability and reliability improvements -- Enhanced security and TLS management -- Improved operational visibility -- Improved cluster management tools - -> [!Important] -> #### Upgrade meta nodes first -> -> When upgrading to InfluxDB Enterprise 1.12.1+, upgrade meta nodes before -> upgrading data nodes. - -### Features - -- Add [`https-insecure-certificate` configuration option](/enterprise_influxdb/v1/administration/configure/config-meta-nodes/#https-insecure-certificate) - to meta nodes to skip file permission checking for TLS certificate and private key files. - Also available for data node - [`[cluster]`](/enterprise_influxdb/v1/administration/configure/config-data-nodes/#https-insecure-certificate) - and [`[http]`](/enterprise_influxdb/v1/administration/configure/config-data-nodes/#https-insecure-certificate-1) - sections. -- Add [`advanced-expiration` TLS configuration option](/enterprise_influxdb/v1/administration/configure/config-data-nodes/#advanced-expiration) - to configure how far in advance to log warnings about TLS certificate expiration. -- Add backup compression options - (`-gzipCompressionLevel`, `-gzipBlockCount`, `-gzipBlockSize`) to - [`influxd-ctl backup`](/enterprise_influxdb/v1/tools/influxd-ctl/backup/#flags). -- Improve [`influxd-ctl backup`](/enterprise_influxdb/v1/tools/influxd-ctl/backup/): - the `-from` flag now validates that the specified node exists in the cluster, - smarter node selection skips zero-byte copies and prefers nodes with most recent writes. - Add `-staleness-threshold`, `-bufsize`, and `-cpuprofile` flags. -- Add `-e` flag to - [`influxd-ctl show-shards`](/enterprise_influxdb/v1/tools/influxd-ctl/show-shards/) - to include expired shards in output (expired shards are filtered by default). -- Add [`-timeout` global flag](/enterprise_influxdb/v1/tools/influxd-ctl/) to - `influxd-ctl` to override the default 10-second timeout for operations. -- Add [`rpc-resettable-read-timeout`](/enterprise_influxdb/v1/administration/configure/config-data-nodes/#rpc-resettable-read-timeout) - and [`rpc-resettable-write-timeout`](/enterprise_influxdb/v1/administration/configure/config-data-nodes/#rpc-resettable-write-timeout) - configuration options for inactivity timeouts on RPC connections between data nodes. -- Add TLS certificate reloading on `SIGHUP`. -- Add [`config`](/enterprise_influxdb/v1/tools/api/#running-configuration) and - [`cq` (continuous query) statistics](/enterprise_influxdb/v1/tools/api/#continuous-query-statistics) - to the `/debug/vars` endpoint. -- Improve dropped point logging. -- [Show user when displaying or logging queries](/enterprise_influxdb/v1/troubleshooting/query_management/influxql_query_management/#list-currently-running-queries-with-show-queries). -- Add [`time_format` parameter](/enterprise_influxdb/v1/tools/api/#query-data-with-a-select-statement-and-the-time_format-parameter) for the HTTP API. -- Use dynamic logging levels (`zap.AtomicLevel`). -- [Report user query bytes](/enterprise_influxdb/v1/administration/configure/config-data-nodes/#user-query-bytes-enabled). - -### Bug fixes - -- Fix `FUTURE LIMIT` and `PAST LIMIT` - [clause order](/enterprise_influxdb/v1/query_language/manage-database/#future-limit) - in retention policy statements. -- Add locking in `ClearBadShardList`. -- Stop noisy logging about phantom shards that do not belong to a node. -- Resolve `RLock()` leakage in `Store.DeleteSeries()`. -- Fix condition check for optimization of array cursor (tsm1). -- Run `init.sh` `buildtsi` as `influxdb` user. -- Reduce unnecessary purger operations and logging. -- Sort files for adjacency testing. -- Fix operator in host detection (systemd). -- Use correct path in open WAL error message. -- Handle nested low-level files in compaction. -- Correct error logic for writing empty index files. -- Reduce lock contention and races in purger. -- Fix bug with authorizer leakage in `SHOW QUERIES`. -- Rename compact throughput logging keys. -- Fix `https-insecure-certificate` not handled properly in httpd. -- Prevent level regression when compacting mixed-level TSM files. - -### Other - -- Update Go to 1.24.13. - ---- - ## v1.12.2 {date="2025-09-15"} > [!Important] @@ -146,8 +43,8 @@ Other updates include: - Add a warning if the TLS certificate is expired. - Add authentication to the Raft portal and add the following related _data_ node configuration options: - - [`[meta].raft-portal-auth-required`](/enterprise_influxdb/v1/administration/configure/config-meta-nodes/#raft-portal-auth-required) - - [`[meta].raft-dialer-auth-required`](/enterprise_influxdb/v1/administration/configure/config-meta-nodes/#raft-dialer-auth-required) + - [`[meta].raft-portal-auth-required`](/enterprise_influxdb/v1/administration/configure/config-data-nodes/#raft-portal-auth-required) + - [`[meta].raft-dialer-auth-required`](/enterprise_influxdb/v1/administration/configure/config-data-nodes/#raft-dialer-auth-required) - Improve error handling. - InfluxQL updates: - Delete series by retention policy. @@ -510,7 +407,7 @@ Other updates include: - Add [/api/v2/delete](/enterprise_influxdb/v1/tools/api/#apiv2delete-http-endpoint) support. - Add wildcard support for retention policies in `SHOW MEASUREMENTS`. - Log slow queries even when query logging is not enabled. -- Add `--start` and `--end` [backup flags](/enterprise_influxdb/v1/administration/backup-and-restore/#backup-flags) to specify the time to include in backup. +- Add `--start` and `--end` [backup options](/enterprise_influxdb/v1/administration/backup-and-restore/#backup-options) to specify the time to include in backup. - Add Raft Status output to `inflxud-ctl show`. #### Flux updates @@ -634,7 +531,7 @@ An edge case regression was introduced into this version that may cause a consta - **Log active queries when a process is terminated**: Add the [`termination-query-log`](/enterprise_influxdb/v1/administration/configure/config-data-nodes/#termination-query-log--false) configuration option. When set to `true` all running queries are printed to the log when a data node process receives a `SIGTERM` (for example, a Kubernetes process exceeds the container memory limit or the process is terminated). -- **Log details of HTTP calls to meta nodes**. When [`cluster-tracing`](/enterprise_influxdb/v1/administration/configure/config-meta-nodes/#cluster-tracing) is enabled, all API calls to meta nodes are now logged with details providing an audit trail including IP address of caller, specific API being invoked, action being invoked, and more. +- **Log details of HTTP calls to meta nodes**. When [`cluster-tracing`](/enterprise_influxdb/v1/administration/configure/config-meta-nodes/#cluster-tracing--false) is enabled, all API calls to meta nodes are now logged with details providing an audit trail including IP address of caller, specific API being invoked, action being invoked, and more. ### Maintenance updates @@ -900,14 +797,14 @@ For details on changes incorporated from the InfluxDB OSS release, see #### Hinted handoff improvements -- Allow out-of-order writes. This change adds a configuration option `allow-out-of-order-writes` to the `[cluster]` section of the data node configuration file. This setting defaults to `false` to match the existing behavior. There are some important operational considerations to review before turning this on. But, the result is enabling this option reduces the time required to drain the hinted handoff queue and increase throughput during recovery. See [`allow-out-of-order-writes`](/enterprise_influxdb/v1/administration/configure/config-data-nodes/#allow-out-of-order-writes) for more detail. -- Make the number of pending writes configurable. This change adds a configuration option in the `[hinted-handoff]` section called `max-pending-writes`, which defaults to `1024`. See [`max-writes-pending`](/enterprise_influxdb/v1/administration/configure/config-data-nodes/#max-writes-pending) for more detail. +- Allow out-of-order writes. This change adds a configuration option `allow-out-of-order-writes` to the `[cluster]` section of the data node configuration file. This setting defaults to `false` to match the existing behavior. There are some important operational considerations to review before turning this on. But, the result is enabling this option reduces the time required to drain the hinted handoff queue and increase throughput during recovery. See [`allow-out-of-order-writes`](/enterprise_influxdb/v1/administration/config-data-nodes#allow-out-of-order-writes--false) for more detail. +- Make the number of pending writes configurable. This change adds a configuration option in the `[hinted-handoff]` section called `max-pending-writes`, which defaults to `1024`. See [max-pending-writes](/enterprise_influxdb/v1/administration/config-data-nodes#max-pending-writes-1024) for more detail. - Update the hinted handoff queue to ensure various entries to segment files occur atomically. Prior to this change, entries were written to disk in three separate writes (len, data, offset). If the process stopped in the middle of any of those writes, the hinted handoff segment file was left in an invalid state. - In certain scenarios, the hinted-handoff queue would fail to drain. Upon node startup, the queue segment files are now verified and truncated if any are corrupted. Some additional logging has been added when a node starts writing to the hinted handoff queue as well. #### `influxd-ctl` CLI improvements -- Add a verbose flag to [`influxd-ctl show-shards`](/enterprise_influxdb/v1/tools/influxd-ctl/show-shards/). This option provides more information about each shard owner, including the state (hot/cold), last modified date and time, and size on disk. +- Add a verbose flag to [`influxd-ctl show-shards`](/enterprise_influxdb/v1/administration/cluster-commands/#show-shards). This option provides more information about each shard owner, including the state (hot/cold), last modified date and time, and size on disk. ### Bug fixes @@ -935,7 +832,7 @@ For details on changes incorporated from the InfluxDB OSS release, see > To restore a meta data backup, use the `restore -full` command and specify > your backup manifest: `influxd-ctl restore -full `. -For more information, see [Perform a metadata only backup](/enterprise_influxdb/v1/administration/backup-and-restore/#perform-a-metadata-only-backup). +For more information, see [Perform a metastore only backup](/enterprise_influxdb/v1/administration/backup-and-restore/#perform-a-metastore-only-backup). #### **Incremental and full backups** @@ -1003,7 +900,7 @@ For details on changes incorporated from the InfluxDB OSS release, see [InfluxDB - Added logging when data nodes connect to meta service. ### Features -- The Flux Technical Preview has advanced to version 0.36.2. +- The Flux Technical Preview has advanced to version [0.36.2](/flux/v0.36/). --- @@ -1262,7 +1159,7 @@ Please see the [InfluxDB OSS release notes](/influxdb/v1/about_the_project/relea > This release builds off of the 1.5 release of InfluxDB OSS. Please see the [InfluxDB OSS release > notes](/influxdb/v1/about_the_project/release-notes/) for more information about the InfluxDB OSS release. -For highlights of the InfluxDB 1.5 release, see [InfluxDB 1.5 release notes](/influxdb/v1/about_the_project/release-notes/). +For highlights of the InfluxDB 1.5 release, see [What's new in InfluxDB 1.5](/influxdb/v1/about_the_project/whats_new/). ### Breaking changes @@ -1500,7 +1397,7 @@ The following configuration changes may need to changed before [upgrading](/ente We've removed the data node's `shard-writer-timeout` configuration option from the `[cluster]` section. As of version 1.2.2, the system sets `shard-writer-timeout` internally. -The configuration option can be removed from the [data node configuration file](/enterprise_influxdb/v1/administration/configure/config-data-nodes/). +The configuration option can be removed from the [data node configuration file](/enterprise_influxdb/v1/administration/configuration/#data-node-configuration). #### retention-autocreate @@ -1518,8 +1415,8 @@ This change only affects users who have disabled the `retention-autocreate` opti ##### Backup and Restore
-- Prevent the `shard not found` error by making [backups](/enterprise_influxdb/v1/tools/influxd-ctl/backup/) skip empty shards -- Prevent the `shard not found` error by making [restore](/enterprise_influxdb/v1/tools/influxd-ctl/restore/) handle empty shards +- Prevent the `shard not found` error by making [backups](/enterprise_influxdb/v1/administration/backup-and-restore/#backup) skip empty shards +- Prevent the `shard not found` error by making [restore](/enterprise_influxdb/v1/administration/backup-and-restore/#restore) handle empty shards - Ensure that restores from an incremental backup correctly handle file paths - Allow incremental backups with restrictions (for example, they use the `-db` or `rp` flags) to be stores in the same directory - Support restores on meta nodes that are not the raft leader @@ -1539,8 +1436,8 @@ This change only affects users who have disabled the `retention-autocreate` opti - Serialize access to the meta client and meta store to prevent raft log buildup - Remove sysvinit package dependency for RPM packages - Make the default retention policy creation an atomic process instead of a two-step process -- Prevent `influxd-ctl`'s [`join` argument](/enterprise_influxdb/v1/tools/influxd-ctl/join/) from completing a join when the command also specifies the help flag (`-h`) -- Fix the `influxd-ctl`'s [force removal](/enterprise_influxdb/v1/tools/influxd-ctl/remove-meta/) of meta nodes +- Prevent `influxd-ctl`'s [`join` argument](/enterprise_influxdb/v1/features/cluster-commands/#join) from completing a join when the command also specifies the help flag (`-h`) +- Fix the `influxd-ctl`'s [force removal](/enterprise_influxdb/v1/features/cluster-commands/#remove-meta) of meta nodes - Update the meta node and data node sample configuration files --- @@ -1562,9 +1459,9 @@ Please see the OSS [release notes](https://github.com/influxdata/influxdb/blob/1 ### Upgrading -* The `retention-autocreate` configuration option has moved from the meta node configuration file to the [data node configuration file](/enterprise_influxdb/v1/administration/configure/config-data-nodes/#retention-autocreate). +* The `retention-autocreate` configuration option has moved from the meta node configuration file to the [data node configuration file](/enterprise_influxdb/v1/administration/configuration/#retention-autocreate-true). To disable the auto-creation of retention policies, set `retention-autocreate` to `false` in your data node configuration files. -* The previously deprecated `influxd-ctl force-leave` command has been removed. The replacement command to remove a meta node which is never coming back online is [`influxd-ctl remove-meta -force`](/enterprise_influxdb/v1/tools/influxd-ctl/remove-meta/). +* The previously deprecated `influxd-ctl force-leave` command has been removed. The replacement command to remove a meta node which is never coming back online is [`influxd-ctl remove-meta -force`](/enterprise_influxdb/v1/features/cluster-commands/). #### Cluster-specific Features @@ -1585,7 +1482,7 @@ To disable the auto-creation of retention policies, set `retention-autocreate` t - Remove an unused configuration option (`dir`) from the backend - Fix a panic around processing remote writes - Return an error if a remote write has a field conflict -- Drop points in the hinted handoff that (1) have field conflict errors (2) have [`max-values-per-tag`](/influxdb/v1/administration/config/#max-values-per-tag) errors +- Drop points in the hinted handoff that (1) have field conflict errors (2) have [`max-values-per-tag`](/influxdb/v1/administration/config/#max-values-per-tag-100000) errors - Remove the deprecated `influxd-ctl force-leave` command - Fix issue where CQs would stop running if the first meta node in the cluster stops - Fix logging in the meta httpd handler service @@ -1675,8 +1572,8 @@ Switches to journald logging for on systemd systems. Logs are no longer sent to - Return an error if getting latest snapshot takes longer than 30 seconds - Remove any expired shards from the `/show-shards` output -- Respect the [`pprof-enabled` configuration setting](/enterprise_influxdb/v1/administration/configure/config-data-nodes/#pprof-enabled) and enable it by default on meta nodes -- Respect the [`pprof-enabled` configuration setting](/enterprise_influxdb/v1/administration/configure/config-data-nodes/#pprof-enabled) on data nodes +- Respect the [`pprof-enabled` configuration setting](/enterprise_influxdb/v1/administration/configuration/#pprof-enabled-true) and enable it by default on meta nodes +- Respect the [`pprof-enabled` configuration setting](/enterprise_influxdb/v1/administration/configuration/#pprof-enabled-true-1) on data nodes - Use the data reference instead of `Clone()` during read-only operations for performance purposes - Prevent the system from double-collecting cluster statistics - Ensure that the Meta API redirects to the cluster leader when it gets the `ErrNotLeader` error @@ -1692,7 +1589,7 @@ Switches to journald logging for on systemd systems. Logs are no longer sent to #### Cluster-specific bug fixes -- Respect the [Hinted Handoff settings](/enterprise_influxdb/v1/administration/configure/config-data-nodes/#hinted-handoff) in the configuration file +- Respect the [Hinted Handoff settings](/enterprise_influxdb/v1/administration/configuration/#hinted-handoff) in the configuration file - Fix expanding regular expressions when all shards do not exist on node that's handling the request --- diff --git a/content/enterprise_influxdb/v1/administration/backup-and-restore.md b/content/enterprise_influxdb/v1/administration/backup-and-restore.md index e87deb1b76..84f4cf3f18 100644 --- a/content/enterprise_influxdb/v1/administration/backup-and-restore.md +++ b/content/enterprise_influxdb/v1/administration/backup-and-restore.md @@ -59,14 +59,14 @@ For example, you can backup from {{< latest-patch version="1.10" >}} and restore - [Exporting and importing data](#exporting-and-importing-data) - [Exporting data](#exporting-data) - [Importing data](#importing-data) - - [Example](#example-export-and-import-for-disaster-recovery) + - [Example](#example) ### Backup utility A backup creates a copy of the [metastore](/enterprise_influxdb/v1/concepts/glossary/#metastore) and [shard](/enterprise_influxdb/v1/concepts/glossary/#shard) data at that point in time and stores the copy in the specified directory. To back up **only the cluster metastore**, use the `-strategy only-meta` backup option. -For more information, see how to [perform a metadata only backup](#perform-a-metadata-only-backup). +For more information, see how to [perform a metastore only backup](#perform-a-metastore-only-backup). All backups include a manifest, a JSON file describing what was collected during the backup. The filenames reflect the UTC timestamp of when the backup was created, for example: @@ -263,7 +263,7 @@ Backed up to backup_dir in 51.388233ms, transferred 481 bytes ##### Restore a backup Restore a backup to an existing cluster or a new cluster. -By default, a restore writes to databases using the backed-up data's [replication factor](/enterprise_influxdb/v1/concepts/glossary/#replication-factor-rf). +By default, a restore writes to databases using the backed-up data's [replication factor](/enterprise_influxdb/v1/concepts/glossary/#replication-factor). An alternate replication factor can be specified with the `-newrf` flag when restoring a single database. Restore supports both `-full` backups and incremental backups; the syntax for a restore differs depending on the backup type. @@ -501,7 +501,7 @@ The unintended data, however, include only the metastore information, not the sh InfluxDB Enterprise introduced incremental backups in version 1.2.0. To restore a backup created prior to version 1.2.0, be sure to follow the syntax -for [restoring from a `-full` backup](#restore-from-a--full-backup). +for [restoring from a full backup](#restore-from-a-full-backup). ## Exporting and importing data diff --git a/content/enterprise_influxdb/v1/administration/configure/anti-entropy/_index.md b/content/enterprise_influxdb/v1/administration/configure/anti-entropy/_index.md index 10d28384e4..053be97be0 100644 --- a/content/enterprise_influxdb/v1/administration/configure/anti-entropy/_index.md +++ b/content/enterprise_influxdb/v1/administration/configure/anti-entropy/_index.md @@ -159,7 +159,7 @@ at which point the repair is finished. The Anti-Entropy service does its best to avoid hot shards (shards that are currently receiving writes) because they change quickly. While write replication between shard owner nodes (with a -[replication factor](/enterprise_influxdb/v1/concepts/glossary/#replication-factor-rf) +[replication factor](/enterprise_influxdb/v1/concepts/glossary/#replication-factor) greater than 1) typically happens in milliseconds, this slight difference is still enough to cause the appearance of entropy where there is none. diff --git a/content/enterprise_influxdb/v1/administration/configure/config-data-nodes.md b/content/enterprise_influxdb/v1/administration/configure/config-data-nodes.md index 2fe46426b8..4a6e54d9a9 100644 --- a/content/enterprise_influxdb/v1/administration/configure/config-data-nodes.md +++ b/content/enterprise_influxdb/v1/administration/configure/config-data-nodes.md @@ -598,26 +598,6 @@ The time in which a query connection must return its response after which the sy Environment variable: `INFLUXDB_CLUSTER_SHARD_READER_TIMEOUT` -#### rpc-resettable-read-timeout {metadata="v1.12.3+"} - -Default is `"15m"`. - -Read inactivity timeout for incoming RPC connections between data nodes. -The timeout resets on each successful read operation, so it detects stalled connections rather than slow queries. -Set to `"0"` to disable. - -Environment variable: `INFLUXDB_CLUSTER_RPC_RESETTABLE_READ_TIMEOUT` - -#### rpc-resettable-write-timeout {metadata="v1.12.3+"} - -Default is `"15m"`. - -Write inactivity timeout for incoming RPC connections between data nodes. -The timeout resets on each successful write operation, so it detects stalled connections rather than slow writes. -Set to `"0"` to disable. - -Environment variable: `INFLUXDB_CLUSTER_RPC_RESETTABLE_WRITE_TIMEOUT` - #### https-enabled Default is `false`. @@ -653,14 +633,6 @@ This is useful when testing with self-signed certificates. Environment variable: `INFLUXDB_CLUSTER_HTTPS_INSECURE_TLS` -#### https-insecure-certificate {metadata="v1.12.3+"} - -Default is `false`. - -Skips file permission checking for `https-certificate` and `https-private-key` when `true`. - -Environment variable: `INFLUXDB_CLUSTER_HTTPS_INSECURE_CERTIFICATE` - #### cluster-tracing Default is `false`. @@ -1173,17 +1145,6 @@ This setting has no effect if either Environment variable: `INFLUXDB_HTTP_PPROF_AUTH_ENABLED` -#### user-query-bytes-enabled {metadata="v1.12.3+"} - -Default is `false`. - -Enables per-user query response byte tracking. -When enabled, InfluxDB records the number of bytes returned by queries for each user in the `userquerybytes` measurement, available through `SHOW STATS FOR 'userquerybytes'`, the `_internal` database, and the `/debug/vars` endpoint. - -Unauthenticated queries are attributed to `(anonymous)`. - -Environment variable: `INFLUXDB_HTTP_USER_QUERY_BYTES_ENABLED` - #### https-enabled Default is `false`. @@ -1210,14 +1171,6 @@ The location of the separate private key. Environment variable: `INFLUXDB_HTTP_HTTPS_PRIVATE_KEY` -#### https-insecure-certificate {metadata="v1.12.3+"} - -Default is `false`. - -Skips file permission checking for `https-certificate` and `https-private-key` when `true`. - -Environment variable: `INFLUXDB_HTTP_HTTPS_INSECURE_CERTIFICATE` - #### shared-secret Default is `""`. @@ -1321,15 +1274,6 @@ Default is `"info"`. Determines which level of logs will be emitted. -To change the log level without restarting the data node, edit the `level` value in the configuration file and send `SIGHUP` to the process: - -```bash -kill -SIGHUP -``` - -On receipt of `SIGHUP`, the data node reloads the configuration and applies the new log level. -`SIGHUP` also reloads TLS certificates, entitlements, and the anti-entropy service configuration. _v1.12.3+_ - Environment variable: `INFLUXDB_LOGGING_LEVEL` #### suppress-logo @@ -1703,7 +1647,7 @@ Use the `SHOW DIAGNOSTICS` command to see the version of Go used to build Influx ### Recommended server configuration for "modern compatibility" InfluxData recommends configuring your InfluxDB server's TLS settings for "modern compatibility" that provides a higher level of security and assumes that backward compatibility is not required. -Our recommended TLS configuration settings for `ciphers`, `min-version`, and `max-version` are based on Mozilla's "modern compatibility" TLS server configuration described in [Security/Server Side TLS](https://wiki.mozilla.org/Security/Server_Side_TLS). +Our recommended TLS configuration settings for `ciphers`, `min-version`, and `max-version` are based on Mozilla's "modern compatibility" TLS server configuration described in [Security/Server Side TLS](https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility). InfluxData's recommended TLS settings for "modern compatibility" are specified in the following configuration settings example. @@ -1748,14 +1692,6 @@ In the preceding example, `max-version = "tls1.3"` specifies the maximum version Environment variable: `INFLUXDB_TLS_MAX_VERSION` -#### advanced-expiration {metadata="v1.12.3+"} - -Sets how far in advance to log warnings about TLS certificate expiration. - -Default is `"5d"`. - -Environment variable: `INFLUXDB_TLS_ADVANCED_EXPIRATION` - ## Flux query management settings ### [flux-controller] diff --git a/content/enterprise_influxdb/v1/administration/configure/config-meta-nodes.md b/content/enterprise_influxdb/v1/administration/configure/config-meta-nodes.md index fd48fccc6f..68a1cb9087 100644 --- a/content/enterprise_influxdb/v1/administration/configure/config-meta-nodes.md +++ b/content/enterprise_influxdb/v1/administration/configure/config-meta-nodes.md @@ -170,14 +170,6 @@ Use either: Environment variable: `INFLUXDB_META_HTTPS_PRIVATE_KEY` -#### https-insecure-certificate {metadata="v1.12.3+"} - -Default is `false`. - -Skips file permission checking for `https-certificate` and `https-private-key` when `true`. - -Environment variable: `INFLUXDB_META_HTTPS_INSECURE_CERTIFICATE` - #### https-insecure-tls Default is `false`. @@ -349,7 +341,7 @@ The shared secret used by the internal API for JWT authentication for inter-node communication within the cluster. Set this to a long pass phrase. This value must be the same value as the -[`[meta] meta-internal-shared-secret`](/enterprise_influxdb/v1/administration/configure/config-data-nodes/#meta-internal-shared-secret) in the data node configuration file. +[`[meta] meta-internal-shared-secret`](/enterprise_influxdb/v1/administration/config-data-nodes#meta-internal-shared-secret) in the data node configuration file. To use this option, set [`auth-enabled`](#auth-enabled) to `true`. Environment variable: `INFLUXDB_META_INTERNAL_SHARED_SECRET` @@ -460,7 +452,7 @@ Environment variable: `INFLUXDB_META_ENSURE_FIPS` Default is `false`. Require Raft clients to authenticate with server using the -[`meta-internal-shared-secret`](#internal-shared-secret). +[`meta-internal-shared-secret`](#meta-internal-shared-secret). This requires that all meta nodes are running InfluxDB Enterprise v1.12.0+ and are configured with the correct `meta-internal-shared-secret`. @@ -473,7 +465,7 @@ Environment variable: `INFLUXDB_META_RAFT_PORTAL_AUTH_REQUIRED` Default is `false`. Require Raft servers to authenticate Raft clients using the -[`meta-internal-shared-secret`](#internal-shared-secret). +[`meta-internal-shared-secret`](#meta-internal-shared-secret). This requires that all meta nodes are running InfluxDB Enterprise v1.12.0+, have `raft-portal-auth-required=true`, and are configured with the correct `meta-internal-shared-secret`. For existing clusters, it is recommended to enable `raft-portal-auth-required` and restart @@ -485,7 +477,7 @@ Environment variable: `INFLUXDB_META_RAFT_DIALER_AUTH_REQUIRED` ### TLS settings -For more information, see [TLS settings for data nodes](/enterprise_influxdb/v1/administration/configure/config-data-nodes/#tls-settings). +For more information, see [TLS settings for data nodes](/enterprise_influxdb/v1/administration/config-data-nodes#tls-settings). #### Recommended "modern compatibility" cipher settings diff --git a/content/enterprise_influxdb/v1/administration/manage/clusters/rebalance.md b/content/enterprise_influxdb/v1/administration/manage/clusters/rebalance.md index c1474006ee..75bc3778ca 100644 --- a/content/enterprise_influxdb/v1/administration/manage/clusters/rebalance.md +++ b/content/enterprise_influxdb/v1/administration/manage/clusters/rebalance.md @@ -21,7 +21,7 @@ Rebalancing a cluster involves two primary goals: cluster * Ensure that every shard is on *n* number of nodes, where *n* is determined by the retention policy's -[replication factor](/enterprise_influxdb/v1/concepts/glossary/#replication-factor-rf) +[replication factor](/enterprise_influxdb/v1/concepts/glossary/#replication-factor) Rebalancing a cluster is essential for cluster health. Perform a rebalance if you add a new data node to your cluster. @@ -59,7 +59,7 @@ all meta nodes. For demonstration purposes, the next steps assume that you added a third data node to a previously two-data-node cluster that has a -[replication factor](/enterprise_influxdb/v1/concepts/glossary/#replication-factor-rf) of +[replication factor](/enterprise_influxdb/v1/concepts/glossary/#replication-factor) of two. This rebalance procedure is applicable for different cluster sizes and replication factors, but some of the specific, user-provided values will depend @@ -266,7 +266,7 @@ size on the original data nodes and increased the cluster's write throughput. For demonstration purposes, the next steps assume that you added a third data node to a previously two-data-node cluster that has a -[replication factor](/enterprise_influxdb/v1/concepts/glossary/#replication-factor-rf) of +[replication factor](/enterprise_influxdb/v1/concepts/glossary/#replication-factor) of two. This rebalance procedure is applicable for different cluster sizes and replication factors, but some of the specific, user-provided values will depend diff --git a/content/enterprise_influxdb/v1/administration/manage/clusters/replacing-nodes.md b/content/enterprise_influxdb/v1/administration/manage/clusters/replacing-nodes.md index 1f24370916..467050fab1 100644 --- a/content/enterprise_influxdb/v1/administration/manage/clusters/replacing-nodes.md +++ b/content/enterprise_influxdb/v1/administration/manage/clusters/replacing-nodes.md @@ -94,7 +94,7 @@ See [step 2.2](#22-remove-the-non-leader-meta-node) of the ### Replace responsive and unresponsive data nodes in a cluster The process of replacing both responsive and unresponsive data nodes is the same. -Follow the instructions for [replacing data nodes](#replace-data-nodes-in-an-influxdb-enterprise-cluster). +Follow the instructions for [replacing data nodes](#replace-a-data-node-in-an-influxdb-enterprise-cluster). ### Reconnect a data node with a failed disk @@ -269,14 +269,14 @@ enterprise-meta-04:8091 {{< latest-patch >}}-c{{< latest-patch >}} # <-- The new #### 2.5. Remove and replace all other non-leader meta nodes **If replacing only one meta node, no further action is required.** -If replacing others, repeat steps [2.1-2.4](#21-provision-a-new-meta-node) for all non-leader meta nodes one at a time. +If replacing others, repeat steps [2.1-2.4](#2-1-provision-a-new-meta-node) for all non-leader meta nodes one at a time. ### 3. Replace the leader node As non-leader meta nodes are removed and replaced, the leader node oversees the replication of data to each of the new meta nodes. Leave the leader up and running until at least two of the new meta nodes are up, running and healthy. -#### 3.1. Kill the meta process on the leader node +#### 3.1 - Kill the meta process on the leader node Log into the leader meta node and kill the meta process. @@ -296,9 +296,9 @@ Confirm the new leader by running: curl localhost:8091/status | jq ``` -#### 3.2. Remove and replace the old leader node +#### 3.2 - Remove and replace the old leader node -Remove the old leader node and replace it by following steps [2.1-2.4](#21-provision-a-new-meta-node). +Remove the old leader node and replace it by following steps [2.1-2.4](#2-1-provision-a-new-meta-node). The minimum number of meta nodes you should have in your cluster is 3. ## Replace data nodes in an InfluxDB Enterprise cluster @@ -369,7 +369,7 @@ ID Database Retention Policy Desired Replicas Shard Group Start 6 foo autogen 2 4 2018-03-19T00:00:00Z 2018-03-26T00:00:00Z [{5 enterprise-data-02:8088} {4 enterprise-data-03:8088}] ``` -Within the duration defined by [`anti-entropy.check-interval`](/enterprise_influxdb/v1/administration/configure/config-data-nodes/#check-interval), +Within the duration defined by [`anti-entropy.check-interval`](/enterprise_influxdb/v1/administration/config-data-nodes#check-interval-10m), the AE service begins copying shards from other shard owners to the new node. The time it takes for copying to complete is determined by the number of shards copied and how much data is stored in each. diff --git a/content/enterprise_influxdb/v1/administration/upgrading.md b/content/enterprise_influxdb/v1/administration/upgrading.md index 9d13d6ccc0..c814ba238b 100644 --- a/content/enterprise_influxdb/v1/administration/upgrading.md +++ b/content/enterprise_influxdb/v1/administration/upgrading.md @@ -41,13 +41,13 @@ Complete the following steps to upgrade meta nodes: ##### Ubuntu and Debian (64-bit) ```bash -wget https://dl.influxdata.com/enterprise/releases/influxdb-meta_{{< latest-patch >}}-c{{< latest-patch >}}_amd64.deb +wget https://dl.influxdata.com/enterprise/releases/influxdb-meta_{{< latest-patch >}}-c{{< latest-patch >}}-1_amd64.deb ``` ##### RedHat and CentOS (64-bit) ```bash -wget https://dl.influxdata.com/enterprise/releases/influxdb-meta-{{< latest-patch >}}-c{{< latest-patch >}}.x86_64.rpm +wget https://dl.influxdata.com/enterprise/releases/influxdb-meta-{{< latest-patch >}}_c{{< latest-patch >}}-1.x86_64.rpm ``` ### Install the meta node package @@ -55,13 +55,13 @@ wget https://dl.influxdata.com/enterprise/releases/influxdb-meta-{{< latest-patc ##### Ubuntu and Debian (64-bit) ```bash -sudo dpkg -i influxdb-meta_{{< latest-patch >}}-c{{< latest-patch >}}_amd64.deb +sudo dpkg -i influxdb-meta_{{< latest-patch >}}-c{{< latest-patch >}}-1_amd64.deb ``` ##### RedHat and CentOS (64-bit) ```bash -sudo yum localinstall influxdb-meta-{{< latest-patch >}}-c{{< latest-patch >}}.x86_64.rpm +sudo yum localinstall influxdb-meta-{{< latest-patch >}}-c{{< latest-patch >}}-1.x86_64.rpm ``` ### Update the meta node configuration file @@ -167,13 +167,13 @@ from other data nodes in the cluster. ##### Ubuntu and Debian (64-bit) ```bash -wget https://dl.influxdata.com/enterprise/releases/influxdb-data_{{< latest-patch >}}-c{{< latest-patch >}}_amd64.deb +wget https://dl.influxdata.com/enterprise/releases/influxdb-data_{{< latest-patch >}}-c{{< latest-patch >}}-1_amd64.deb ``` ##### RedHat and CentOS (64-bit) ```bash -wget https://dl.influxdata.com/enterprise/releases/influxdb-data-{{< latest-patch >}}-c{{< latest-patch >}}.x86_64.rpm +wget https://dl.influxdata.com/enterprise/releases/influxdb-data-{{< latest-patch >}}_c{{< latest-patch >}}-1.x86_64.rpm ``` ### Install the data node package @@ -188,7 +188,7 @@ next procedure, [Update the data node configuration file](#update-the-data-node- ##### Ubuntu & Debian (64-bit) ```bash -sudo dpkg -i influxdb-data_{{< latest-patch >}}-c{{< latest-patch >}}_amd64.deb +sudo dpkg -i influxdb-data_{{< latest-patch >}}-c{{< latest-patch >}}-1_amd64.deb ``` ##### RedHat & CentOS (64-bit) @@ -207,9 +207,9 @@ Migrate any custom settings from your previous data node configuration file. | Section | Setting | | --------| ----------------------------------------------------------| - | `[data]` |
  • To use Time Series Index (TSI) disk-based indexing, add [`index-version = "tsi1"`](/enterprise_influxdb/v1/administration/configure/config-data-nodes/#index-version)
  • To use TSM in-memory index, add [`index-version = "inmem"`](/enterprise_influxdb/v1/administration/configure/config-data-nodes/#index-version)
  • Add [`wal-fsync-delay = "0s"`](/enterprise_influxdb/v1/administration/configure/config-data-nodes/#wal-fsync-delay)
  • Add [`max-concurrent-compactions = 0`](/enterprise_influxdb/v1/administration/configure/config-data-nodes/#max-concurrent-compactions)
  • Set[`cache-max-memory-size`](/enterprise_influxdb/v1/administration/configure/config-data-nodes/#cache-max-memory-size) to `1073741824` | - | `[cluster]`|
    • Add [`pool-max-idle-streams = 100`](/enterprise_influxdb/v1/administration/configure/config-data-nodes/#pool-max-idle-streams)
    • Add[`pool-max-idle-time = "1m0s"`](/enterprise_influxdb/v1/administration/configure/config-data-nodes/#pool-max-idle-time)
    • Remove `max-remote-write-connections` - |[`[anti-entropy]`](/enterprise_influxdb/v1/administration/configure/config-data-nodes/#anti-entropy)|
      • Add `enabled = true`
      • Add `check-interval = "30s"`
      • Add `max-fetch = 10`| + | `[data]` |
        • To use Time Series Index (TSI) disk-based indexing, add [`index-version = "tsi1"`](/enterprise_influxdb/v1/administration/config-data-nodes#index-version-inmem)
        • To use TSM in-memory index, add [`index-version = "inmem"`](/enterprise_influxdb/v1/administration/config-data-nodes#index-version-inmem)
        • Add [`wal-fsync-delay = "0s"`](/enterprise_influxdb/v1/administration/config-data-nodes#wal-fsync-delay-0s)
        • Add [`max-concurrent-compactions = 0`](/enterprise_influxdb/v1/administration/config-data-nodes#max-concurrent-compactions-0)
        • Set[`cache-max-memory-size`](/enterprise_influxdb/v1/administration/config-data-nodes#cache-max-memory-size-1g) to `1073741824` | + | `[cluster]`|
          • Add [`pool-max-idle-streams = 100`](/enterprise_influxdb/v1/administration/config-data-nodes#pool-max-idle-streams-100)
          • Add[`pool-max-idle-time = "1m0s"`](/enterprise_influxdb/v1/administration/config-data-nodes#pool-max-idle-time-60s)
          • Remove `max-remote-write-connections` + |[`[anti-entropy]`](/enterprise_influxdb/v1/administration/config-data-nodes#anti-entropy)|
            • Add `enabled = true`
            • Add `check-interval = "30s"`
            • Add `max-fetch = 10`| |`[admin]`| Remove entire section.| For more information about TSI, see [TSI overview](/enterprise_influxdb/v1/concepts/time-series-index/) and [TSI details](/enterprise_influxdb/v1/concepts/tsi-details/). diff --git a/content/enterprise_influxdb/v1/features/_index.md b/content/enterprise_influxdb/v1/features/_index.md index 1b282a1384..fbdc08f90b 100644 --- a/content/enterprise_influxdb/v1/features/_index.md +++ b/content/enterprise_influxdb/v1/features/_index.md @@ -29,7 +29,7 @@ Certain configurations (e.g., 3 meta and 2 data node) provide high-availability while making certain tradeoffs in query performance when compared to a single node. Further increasing the number of nodes can improve performance in both respects. -For example, a cluster with 4 data nodes and a [replication factor](/enterprise_influxdb/v1/concepts/glossary/#replication-factor-rf) +For example, a cluster with 4 data nodes and a [replication factor](/enterprise_influxdb/v1/concepts/glossary/#replication-factor) of 2 can support a higher volume of write traffic than a single node could. It can also support a higher *query* workload, as the data is replicated in two locations. Performance of the queries may be on par with a single diff --git a/content/enterprise_influxdb/v1/guides/migration.md b/content/enterprise_influxdb/v1/guides/migration.md index 617b365b69..0ff72c7a31 100644 --- a/content/enterprise_influxdb/v1/guides/migration.md +++ b/content/enterprise_influxdb/v1/guides/migration.md @@ -48,7 +48,7 @@ with the `-portable` flag: > **Note:** InfluxDB Enterprise uses the **influxd-ctl utility** to back up and restore data. For more information, see [influxd-ctl](/enterprise_influxdb/v1/tools/influxd-ctl) -and [`restore`](/enterprise_influxdb/v1/administration/backup-and-restore/#restore-utility). +and [`restore`](/enterprise_influxdb/v1/administration/backup-and-restore/#restore). 5. To avoid data loss, dual write to both OSS and Enterprise while completing the remaining steps. This keeps the OSS and cluster active for testing and acceptance work. For more information, see [Write data with the InfluxDB API](/enterprise_influxdb/v1/guides/write_data/). @@ -200,14 +200,14 @@ Without a backup, you'll lose custom configuration settings when updating the In {{% /code-tabs %}} {{% code-tab-content %}} ```bash -wget https://dl.influxdata.com/enterprise/releases/influxdb-data_{{< latest-patch >}}-c{{< latest-patch >}}_amd64.deb -sudo dpkg -i influxdb-data_{{< latest-patch >}}-c{{< latest-patch >}}_amd64.deb +wget https://dl.influxdata.com/enterprise/releases/influxdb-data_{{< latest-patch >}}-c{{< latest-patch >}}-1_amd64.deb +sudo dpkg -i influxdb-data_{{< latest-patch >}}-c{{< latest-patch >}}-1_amd64.deb ``` {{% /code-tab-content %}} {{% code-tab-content %}} ```bash -wget https://dl.influxdata.com/enterprise/releases/influxdb-data-{{< latest-patch >}}-c{{< latest-patch >}}.x86_64.rpm -sudo yum localinstall influxdb-data-{{< latest-patch >}}-c{{< latest-patch >}}.x86_64.rpm +wget https://dl.influxdata.com/enterprise/releases/influxdb-data-{{< latest-patch >}}_c{{< latest-patch >}}-1.x86_64.rpm +sudo yum localinstall influxdb-data-{{< latest-patch >}}_c{{< latest-patch >}}-1.x86_64.rpm ``` {{% /code-tab-content %}} {{< /code-tabs-wrapper >}} @@ -314,7 +314,7 @@ It may take a few minutes before the existing data is available. ## Rebalance the cluster 1. Use the [`ALTER RETENTION POLICY`](/enterprise_influxdb/v1/query_language/manage-database/#modify-retention-policies-with-alter-retention-policy) - statement to increase the [replication factor](/enterprise_influxdb/v1/concepts/glossary/#replication-factor-rf) + statement to increase the [replication factor](/enterprise_influxdb/v1/concepts/glossary/#replication-factor) on all existing retention polices to the number of data nodes in your cluster. 2. [Rebalance your cluster manually](/enterprise_influxdb/v1/guides/rebalance/) to meet the desired replication factor for existing shards. diff --git a/content/enterprise_influxdb/v1/introduction/installation/data_node_installation.md b/content/enterprise_influxdb/v1/introduction/installation/data_node_installation.md index 97f8f794c9..f66841b1b5 100644 --- a/content/enterprise_influxdb/v1/introduction/installation/data_node_installation.md +++ b/content/enterprise_influxdb/v1/introduction/installation/data_node_installation.md @@ -152,14 +152,14 @@ Instructions for both are provided below. {{% /code-tabs %}} {{% code-tab-content %}} ```sh -wget https://dl.influxdata.com/enterprise/releases/influxdb-data_{{< latest-patch >}}-c{{< latest-patch >}}_amd64.deb -sudo dpkg -i influxdb-data_{{< latest-patch >}}-c{{< latest-patch >}}_amd64.deb +wget https://dl.influxdata.com/enterprise/releases/influxdb-data_{{< latest-patch >}}-c{{< latest-patch >}}-1_amd64.deb +sudo dpkg -i influxdb-data_{{< latest-patch >}}-c{{< latest-patch >}}-1_amd64.deb ``` {{% /code-tab-content %}} {{% code-tab-content %}} ```sh -wget https://dl.influxdata.com/enterprise/releases/fips/influxdb-data_{{< latest-patch >}}-c{{< latest-patch >}}_amd64.deb -sudo dpkg -i influxdb-data_{{< latest-patch >}}-c{{< latest-patch >}}_amd64.deb +wget https://dl.influxdata.com/enterprise/releases/fips/influxdb-data_{{< latest-patch >}}-c{{< latest-patch >}}-1_amd64.deb +sudo dpkg -i influxdb-data_{{< latest-patch >}}-c{{< latest-patch >}}-1_amd64.deb ``` {{% /code-tab-content %}} {{< /code-tabs-wrapper >}} @@ -173,14 +173,14 @@ sudo dpkg -i influxdb-data_{{< latest-patch >}}-c{{< latest-patch >}}_amd64.deb {{% /code-tabs %}} {{% code-tab-content %}} ```sh -wget https://dl.influxdata.com/enterprise/releases/influxdb-data-{{< latest-patch >}}-c{{< latest-patch >}}.x86_64.rpm -sudo yum localinstall influxdb-data-{{< latest-patch >}}-c{{< latest-patch >}}.x86_64.rpm +wget https://dl.influxdata.com/enterprise/releases/influxdb-data-{{< latest-patch >}}_c{{< latest-patch >}}-1.x86_64.rpm +sudo yum localinstall influxdb-data-{{< latest-patch >}}_c{{< latest-patch >}}-1.x86_64.rpm ``` {{% /code-tab-content %}} {{% code-tab-content %}} ```sh -wget https://dl.influxdata.com/enterprise/releases/fips/influxdb-data-{{< latest-patch >}}-c{{< latest-patch >}}.x86_64.rpm -sudo yum localinstall influxdb-data-{{< latest-patch >}}-c{{< latest-patch >}}.x86_64.rpm +wget https://dl.influxdata.com/enterprise/releases/fips/influxdb-data-{{< latest-patch >}}_c{{< latest-patch >}}-1.x86_64.rpm +sudo yum localinstall influxdb-data-{{< latest-patch >}}_c{{< latest-patch >}}-1.x86_64.rpm ``` {{% /code-tab-content %}} {{< /code-tabs-wrapper >}} @@ -205,12 +205,12 @@ For added security, follow these steps to verify the signature of your InfluxDB {{% /code-tabs %}} {{% code-tab-content %}} ```sh -wget https://dl.influxdata.com/enterprise/releases/influxdb-data-{{< latest-patch >}}-c{{< latest-patch >}}.x86_64.rpm.asc +wget https://dl.influxdata.com/enterprise/releases/influxdb-data-{{< latest-patch >}}_c{{< latest-patch >}}-1.x86_64.rpm.asc ``` {{% /code-tab-content %}} {{% code-tab-content %}} ```sh -wget https://dl.influxdata.com/enterprise/releases/fips/influxdb-data-{{< latest-patch >}}-c{{< latest-patch >}}.x86_64.rpm.asc +wget https://dl.influxdata.com/enterprise/releases/fips/influxdb-data-{{< latest-patch >}}_c{{< latest-patch >}}-1.x86_64.rpm.asc ``` {{% /code-tab-content %}} {{< /code-tabs-wrapper >}} @@ -218,7 +218,7 @@ wget https://dl.influxdata.com/enterprise/releases/fips/influxdb-data-{{< latest 3. Verify the signature with `gpg --verify`: ```sh - gpg --verify influxdb-data-{{< latest-patch >}}-c{{< latest-patch >}}.x86_64.rpm.asc influxdb-data-{{< latest-patch >}}-c{{< latest-patch >}}.x86_64.rpm + gpg --verify influxdb-data-{{< latest-patch >}}-c{{< latest-patch >}}.x86_64.rpm.asc influxdb-data-{{< latest-patch >}}_c{{< latest-patch >}}-1.x86_64.rpm ``` The output from this command should include the following: diff --git a/content/enterprise_influxdb/v1/introduction/installation/meta_node_installation.md b/content/enterprise_influxdb/v1/introduction/installation/meta_node_installation.md index 031ca134eb..92655c0b3b 100644 --- a/content/enterprise_influxdb/v1/introduction/installation/meta_node_installation.md +++ b/content/enterprise_influxdb/v1/introduction/installation/meta_node_installation.md @@ -159,14 +159,14 @@ Instructions for both are provided below. {{% /code-tabs %}} {{% code-tab-content %}} ```sh -wget https://dl.influxdata.com/enterprise/releases/influxdb-meta_{{< latest-patch >}}-c{{< latest-patch >}}_amd64.deb -sudo dpkg -i influxdb-meta_{{< latest-patch >}}-c{{< latest-patch >}}_amd64.deb +wget https://dl.influxdata.com/enterprise/releases/influxdb-meta_{{< latest-patch >}}-c{{< latest-patch >}}-1_amd64.deb +sudo dpkg -i influxdb-meta_{{< latest-patch >}}-c{{< latest-patch >}}-1_amd64.deb ``` {{% /code-tab-content %}} {{% code-tab-content %}} ```sh -wget https://dl.influxdata.com/enterprise/releases/fips/influxdb-meta_{{< latest-patch >}}-c{{< latest-patch >}}_amd64.deb -sudo dpkg -i influxdb-meta_{{< latest-patch >}}-c{{< latest-patch >}}_amd64.deb +wget https://dl.influxdata.com/enterprise/releases/fips/influxdb-meta_{{< latest-patch >}}-c{{< latest-patch >}}-1_amd64.deb +sudo dpkg -i influxdb-meta_{{< latest-patch >}}-c{{< latest-patch >}}-1_amd64.deb ``` {{% /code-tab-content %}} {{< /code-tabs-wrapper >}} @@ -180,14 +180,14 @@ sudo dpkg -i influxdb-meta_{{< latest-patch >}}-c{{< latest-patch >}}_amd64.deb {{% /code-tabs %}} {{% code-tab-content %}} ```sh -wget https://dl.influxdata.com/enterprise/releases/influxdb-meta-{{< latest-patch >}}-c{{< latest-patch >}}.x86_64.rpm -sudo yum localinstall influxdb-meta-{{< latest-patch >}}-c{{< latest-patch >}}.x86_64.rpm +wget https://dl.influxdata.com/enterprise/releases/influxdb-meta-{{< latest-patch >}}_c{{< latest-patch >}}-1.x86_64.rpm +sudo yum localinstall influxdb-meta-{{< latest-patch >}}_c{{< latest-patch >}}-1.x86_64.rpm ``` {{% /code-tab-content %}} {{% code-tab-content %}} ```sh -wget https://dl.influxdata.com/enterprise/releases/fips/influxdb-meta-{{< latest-patch >}}-c{{< latest-patch >}}.x86_64.rpm -sudo yum localinstall influxdb-meta-{{< latest-patch >}}-c{{< latest-patch >}}.x86_64.rpm +wget https://dl.influxdata.com/enterprise/releases/fips/influxdb-meta-{{< latest-patch >}}_c{{< latest-patch >}}-1.x86_64.rpm +sudo yum localinstall influxdb-meta-{{< latest-patch >}}_c{{< latest-patch >}}-1.x86_64.rpm ``` {{% /code-tab-content %}} {{< /code-tabs-wrapper >}} @@ -213,12 +213,12 @@ For added security, follow these steps to verify the signature of your InfluxDB {{% /code-tabs %}} {{% code-tab-content %}} ```sh -wget https://dl.influxdata.com/enterprise/releases/influxdb-meta-{{< latest-patch >}}-c{{< latest-patch >}}.x86_64.rpm.asc +wget https://dl.influxdata.com/enterprise/releases/influxdb-meta-{{< latest-patch >}}_c{{< latest-patch >}}-1.x86_64.rpm.asc ``` {{% /code-tab-content %}} {{% code-tab-content %}} ```sh -wget https://dl.influxdata.com/enterprise/releases/fips/influxdb-meta-{{< latest-patch >}}-c{{< latest-patch >}}.x86_64.rpm.asc +wget https://dl.influxdata.com/enterprise/releases/fips/influxdb-meta-{{< latest-patch >}}_c{{< latest-patch >}}-1.x86_64.rpm.asc ``` {{% /code-tab-content %}} {{< /code-tabs-wrapper >}} @@ -226,7 +226,7 @@ wget https://dl.influxdata.com/enterprise/releases/fips/influxdb-meta-{{< latest 3. Verify the signature with `gpg --verify`: ```sh - gpg --verify influxdb-meta-{{< latest-patch >}}-c{{< latest-patch >}}.x86_64.rpm.asc influxdb-meta-{{< latest-patch >}}-c{{< latest-patch >}}.x86_64.rpm + gpg --verify influxdb-meta-{{< latest-patch >}}_c{{< latest-patch >}}-1.x86_64.rpm.asc influxdb-meta-{{< latest-patch >}}_c{{< latest-patch >}}-1.x86_64.rpm ``` The output from this command should include the following: diff --git a/content/enterprise_influxdb/v1/introduction/installation/single-server.md b/content/enterprise_influxdb/v1/introduction/installation/single-server.md index db5fc8eb26..96b1cd5bac 100644 --- a/content/enterprise_influxdb/v1/introduction/installation/single-server.md +++ b/content/enterprise_influxdb/v1/introduction/installation/single-server.md @@ -90,14 +90,14 @@ processes running on the same server. {{% /code-tabs %}} {{% code-tab-content %}} ```sh -wget https://dl.influxdata.com/enterprise/releases/influxdb-meta_{{< latest-patch >}}-c{{< latest-patch >}}_amd64.deb -sudo dpkg -i influxdb-meta_{{< latest-patch >}}-c{{< latest-patch >}}_amd64.deb +wget https://dl.influxdata.com/enterprise/releases/influxdb-meta_{{< latest-patch >}}-c{{< latest-patch >}}-1_amd64.deb +sudo dpkg -i influxdb-meta_{{< latest-patch >}}-c{{< latest-patch >}}-1_amd64.deb ``` {{% /code-tab-content %}} {{% code-tab-content %}} ```sh -wget https://dl.influxdata.com/enterprise/releases/influxdb-meta-{{< latest-patch >}}-c{{< latest-patch >}}.x86_64.rpm -sudo yum localinstall influxdb-meta-{{< latest-patch >}}-c{{< latest-patch >}}.x86_64.rpm +wget https://dl.influxdata.com/enterprise/releases/influxdb-meta-{{< latest-patch >}}_c{{< latest-patch >}}-1.x86_64.rpm +sudo yum localinstall influxdb-meta-{{< latest-patch >}}_c{{< latest-patch >}}-1.x86_64.rpm ``` {{% /code-tab-content %}} {{< /code-tabs-wrapper >}} @@ -118,13 +118,13 @@ InfluxDB Enterprise meta service download with `gpg`. For example: ```sh - wget https://dl.influxdata.com/enterprise/releases/influxdb-meta-{{< latest-patch >}}-c{{< latest-patch >}}.x86_64.rpm.asc + wget https://dl.influxdata.com/enterprise/releases/influxdb-meta-{{< latest-patch >}}_c{{< latest-patch >}}-1.x86_64.rpm.asc ``` 3. Verify the signature with `gpg --verify`: ```sh - gpg --verify influxdb-meta-{{< latest-patch >}}-c{{< latest-patch >}}.x86_64.rpm.asc influxdb-meta-{{< latest-patch >}}-c{{< latest-patch >}}.x86_64.rpm + gpg --verify influxdb-meta-{{< latest-patch >}}_c{{< latest-patch >}}-1.x86_64.rpm.asc influxdb-meta-{{< latest-patch >}}_c{{< latest-patch >}}-1.x86_64.rpm ``` The output from this command should include the following: @@ -335,14 +335,14 @@ The InfluxDB Enterprise data service runs the InfluxDB storage and query engines {{% /code-tabs %}} {{% code-tab-content %}} ```sh -wget https://dl.influxdata.com/enterprise/releases/influxdb-data_{{< latest-patch >}}-c{{< latest-patch >}}_amd64.deb -sudo dpkg -i influxdb-data_{{< latest-patch >}}-c{{< latest-patch >}}_amd64.deb +wget https://dl.influxdata.com/enterprise/releases/influxdb-data_{{< latest-patch >}}-c{{< latest-patch >}}-1_amd64.deb +sudo dpkg -i influxdb-data_{{< latest-patch >}}-c{{< latest-patch >}}-1_amd64.deb ``` {{% /code-tab-content %}} {{% code-tab-content %}} ```sh -wget https://dl.influxdata.com/enterprise/releases/influxdb-data-{{< latest-patch >}}-c{{< latest-patch >}}.x86_64.rpm -sudo yum localinstall influxdb-data-{{< latest-patch >}}-c{{< latest-patch >}}.x86_64.rpm +wget https://dl.influxdata.com/enterprise/releases/influxdb-data-{{< latest-patch >}}_c{{< latest-patch >}}-1.x86_64.rpm +sudo yum localinstall influxdb-data-{{< latest-patch >}}_c{{< latest-patch >}}-1.x86_64.rpm ``` {{% /code-tab-content %}} {{< /code-tabs-wrapper >}} @@ -363,13 +363,13 @@ InfluxDB Enterprise data service download with `gpg`. For example: ```sh - wget https://dl.influxdata.com/enterprise/releases/influxdb-data-{{< latest-patch >}}-c{{< latest-patch >}}.x86_64.rpm.asc + wget https://dl.influxdata.com/enterprise/releases/influxdb-data-{{< latest-patch >}}_c{{< latest-patch >}}-1.x86_64.rpm.asc ``` 3. Verify the signature with `gpg --verify`: ```sh - gpg --verify influxdb-data-{{< latest-patch >}}-c{{< latest-patch >}}.x86_64.rpm.asc influxdb-data-{{< latest-patch >}}-c{{< latest-patch >}}.x86_64.rpm + gpg --verify influxdb-data-{{< latest-patch >}}-c{{< latest-patch >}}.x86_64.rpm.asc influxdb-data-{{< latest-patch >}}_c{{< latest-patch >}}-1.x86_64.rpm ``` The output from this command should include the following: diff --git a/content/enterprise_influxdb/v1/query_language/_index.md b/content/enterprise_influxdb/v1/query_language/_index.md index 2b3fe15a20..8b0f1be9c2 100644 --- a/content/enterprise_influxdb/v1/query_language/_index.md +++ b/content/enterprise_influxdb/v1/query_language/_index.md @@ -74,13 +74,13 @@ covers the use of mathematical operators in InfluxQL. #### Authentication and authorization -[Authentication and authorization](/enterprise_influxdb/v1/administration/manage/users-and-permissions/) covers how to -[set up authentication](/enterprise_influxdb/v1/administration/configure/security/authentication/#enable-authentication) +[Authentication and authorization](/enterprise_influxdb/v1/administration/authentication_and_authorization/) covers how to +[set up authentication](/enterprise_influxdb/v1/administration/authentication_and_authorization/#set-up-authentication) and how to -[authenticate requests](/enterprise_influxdb/v1/administration/configure/security/authentication/) in InfluxDB. -See also the different -[user types](/enterprise_influxdb/v1/administration/manage/users-and-permissions/authorization-influxql/#non-admin-users) and the InfluxQL for -[managing database users](/enterprise_influxdb/v1/administration/manage/users-and-permissions/authorization-influxql/#user-management-commands). +[authenticate requests](/enterprise_influxdb/v1/administration/authentication_and_authorization/#authenticate-requests) in InfluxDB. +This page also describes the different +[user types](/enterprise_influxdb/v1/administration/authentication_and_authorization/#user-types-and-privileges) and the InfluxQL for +[managing database users](/enterprise_influxdb/v1/administration/authentication_and_authorization/#user-management-commands). ## InfluxQL reference diff --git a/content/enterprise_influxdb/v1/query_language/explore-schema.md b/content/enterprise_influxdb/v1/query_language/explore-schema.md index 1968ff47bf..a79bff38ef 100644 --- a/content/enterprise_influxdb/v1/query_language/explore-schema.md +++ b/content/enterprise_influxdb/v1/query_language/explore-schema.md @@ -104,7 +104,7 @@ database in tabular format. The database has one retention policy called `autogen`. The `autogen` retention policy has an infinite [duration](/enterprise_influxdb/v1/concepts/glossary/#duration), a seven-day [shard group duration](/enterprise_influxdb/v1/concepts/glossary/#shard-group), -a [replication factor](/enterprise_influxdb/v1/concepts/glossary/#replication-factor-rf) +a [replication factor](/enterprise_influxdb/v1/concepts/glossary/#replication-factor) of one, and it is the `DEFAULT` retention policy for the database. #### Run a `SHOW RETENTION POLICIES` query without the `ON` clause diff --git a/content/enterprise_influxdb/v1/query_language/manage-database.md b/content/enterprise_influxdb/v1/query_language/manage-database.md index 880738aef1..45b5848fe6 100644 --- a/content/enterprise_influxdb/v1/query_language/manage-database.md +++ b/content/enterprise_influxdb/v1/query_language/manage-database.md @@ -102,7 +102,7 @@ CREATE DATABASE "NOAA_water_database" WITH DURATION 3d REPLICATION 1 SHARD DURAT ``` The query creates a database called `NOAA_water_database`. -It also creates a default retention policy for `NOAA_water_database` with a `DURATION` of three days, a [replication factor](/enterprise_influxdb/v1/concepts/glossary/#replication-factor-rf) of one, a [shard group](/enterprise_influxdb/v1/concepts/glossary/#shard-group) duration of one hour, and with the name `liquid`. +It also creates a default retention policy for `NOAA_water_database` with a `DURATION` of three days, a [replication factor](/enterprise_influxdb/v1/concepts/glossary/#replication-factor) of one, a [shard group](/enterprise_influxdb/v1/concepts/glossary/#shard-group) duration of one hour, and with the name `liquid`. ### Delete a database with DROP DATABASE @@ -225,7 +225,7 @@ It does not drop the associated continuous queries. A successful `DROP MEASUREMENT` query returns an empty result. {{% warn %}} Currently, InfluxDB does not support regular expressions with `DROP MEASUREMENTS`. -See GitHub Issue [#4275](https://github.com/influxdata/influxdb/issues/4275) for more information. +See GitHub Issue [#4275](https://github.com/influxdb/influxdb/issues/4275) for more information. {{% /warn %}} ### Delete a shard with DROP SHARD diff --git a/content/enterprise_influxdb/v1/query_language/spec.md b/content/enterprise_influxdb/v1/query_language/spec.md index b4bf8baa64..8dd6933fb4 100644 --- a/content/enterprise_influxdb/v1/query_language/spec.md +++ b/content/enterprise_influxdb/v1/query_language/spec.md @@ -37,6 +37,8 @@ To learn more about InfluxQL, browse the following topics: * [Explore your schema with InfluxQL](/enterprise_influxdb/v1/query_language/explore-schema/) * [Database management](/enterprise_influxdb/v1/query_language/manage-database/) * [Authentication and authorization](/enterprise_influxdb/v1/administration/authentication_and_authorization/). +* [Query engine internals](/enterprise_influxdb/v1/query_language/spec/#query-engine-internals) + ## Notation The syntax is specified using Extended Backus-Naur Form ("EBNF"). @@ -230,15 +232,12 @@ regex_lit = "/" { unicode_char } "/" . `=~` matches against `!~` doesn't match against -InfluxQL supports using regular expressions when specifying: - -- [field keys](/enterprise_influxdb/v1/concepts/glossary/#field-key) and [tag keys](/enterprise_influxdb/v1/concepts/glossary/#tag-key) in the [`SELECT` clause](/enterprise_influxdb/v1/query_language/explore-data/#the-basic-select-statement) -- [measurements](/enterprise_influxdb/v1/concepts/glossary/#measurement) in the [`FROM` clause](/enterprise_influxdb/v1/query_language/explore-data/#the-basic-select-statement) -- [tag values](/enterprise_influxdb/v1/concepts/glossary/#tag-value) and string [field values](/enterprise_influxdb/v1/concepts/glossary/#field-value) in the [`WHERE` clause](/enterprise_influxdb/v1/query_language/explore-data/#the-where-clause). -- [tag keys](/enterprise_influxdb/v1/concepts/glossary/#tag-key) in the [`GROUP BY` clause](/enterprise_influxdb/v1/query_language/explore-data/#group-by-tags) - -> [!Note] -> #### Regular expressions and non-string field values +> **Note:** InfluxQL supports using regular expressions when specifying: +> +* [field keys](/enterprise_influxdb/v1/concepts/glossary/#field-key) and [tag keys](/enterprise_influxdb/v1/concepts/glossary/#tag-key) in the [`SELECT` clause](/enterprise_influxdb/v1/query_language/explore-data/#the-basic-select-statement) +* [measurements](/enterprise_influxdb/v1/concepts/glossary/#measurement) in the [`FROM` clause](/enterprise_influxdb/v1/query_language/explore-data/#the-basic-select-statement) +* [tag values](/enterprise_influxdb/v1/concepts/glossary/#tag-value) and string [field values](/enterprise_influxdb/v1/concepts/glossary/#field-value) in the [`WHERE` clause](/enterprise_influxdb/v1/query_language/explore-data/#the-where-clause). +* [tag keys](/enterprise_influxdb/v1/concepts/glossary/#tag-key) in the [`GROUP BY` clause](/enterprise_influxdb/v1/query_language/explore-data/#group-by-tags) > >Currently, InfluxQL does not support using regular expressions to match >non-string field values in the @@ -632,7 +631,7 @@ SIZE OF BLOCKS: 931 ### EXPLAIN ANALYZE -Executes the specified SELECT statement and returns data on the query performance and storage during runtime, visualized as a tree. Use this statement to analyze query performance and storage, including [execution time](#execution_time) and [planning time](#planning_time), and the [iterator type](#iterator-type) and [cursor type](#cursor-type). +Executes the specified SELECT statement and returns data on the query performance and storage during runtime, visualized as a tree. Use this statement to analyze query performance and storage, including [execution time](#execution-time) and [planning time](#planning-time), and the [iterator type](#iterator-type) and [cursor type](#cursor-type). For example, executing the following statement: @@ -702,7 +701,7 @@ EXPLAIN ANALYZE supports the following iterator types: - `create_iterator` node represents work done by the local influxd instance──a complex composition of nested iterators combined and merged to produce the final query output. - (InfluxDB Enterprise only) `remote_iterator` node represents work done on remote machines. -For more information about iterators, see the [iterator type](#iterator-type) section above. +For more information about iterators, see [Understanding iterators](#understanding-iterators). ##### cursor type @@ -712,7 +711,7 @@ EXPLAIN ANALYZE distinguishes 3 cursor types. While the cursor types have the sa - cursor_aux: Auxiliary cursor created for simple expression projections (not selectors or an aggregation). For example, `SELECT foo FROM m` or `SELECT foo+bar FROM m`, where `foo` and `bar` are fields. - cursor_cond: Condition cursor created for fields referenced in a WHERE clause. -For more information about cursors, see the [cursor type](#cursor-type) section above. +For more information about cursors, see [Understanding cursors](#understanding-cursors). ##### block types @@ -752,7 +751,7 @@ Stop currently-running query. kill_query_statement = "KILL QUERY" query_id . ``` -Where `query_id` is the query ID, displayed in the [`SHOW QUERIES`](/enterprise_influxdb/v1/troubleshooting/query_management/influxql_query_management/#list-currently-running-queries-with-show-queries) output as `qid`. +Where `query_id` is the query ID, displayed in the [`SHOW QUERIES`](/enterprise_influxdb/v1/troubleshooting/query_management/#list-currently-running-queries-with-show-queries) output as `qid`. > ***InfluxDB Enterprise clusters:*** To kill queries on a cluster, you need to specify the query ID (qid) and the TCP host (for example, `myhost:8088`), > available in the `SHOW QUERIES` output. diff --git a/content/enterprise_influxdb/v1/reference/hardware_sizing.md b/content/enterprise_influxdb/v1/reference/hardware_sizing.md index 1c215af7b2..ddb6a22150 100644 --- a/content/enterprise_influxdb/v1/reference/hardware_sizing.md +++ b/content/enterprise_influxdb/v1/reference/hardware_sizing.md @@ -96,7 +96,7 @@ The InfluxDB Enterprise web server is primarily an HTTP server with similar load ### Data nodes -A cluster with one data node is valid but has no data redundancy. Redundancy is set by the [replication factor](/enterprise_influxdb/v1/concepts/glossary/#replication-factor-rf) on the retention policy the data is written to. Where `n` is the replication factor, a cluster can lose `n - 1` data nodes and return complete query results. +A cluster with one data node is valid but has no data redundancy. Redundancy is set by the [replication factor](/enterprise_influxdb/v1/concepts/glossary/#replication-factor) on the retention policy the data is written to. Where `n` is the replication factor, a cluster can lose `n - 1` data nodes and return complete query results. >**Note:** For optimal data distribution within the cluster, use an even number of data nodes. diff --git a/content/enterprise_influxdb/v1/tools/api.md b/content/enterprise_influxdb/v1/tools/api.md index 3643a76431..35bfa24b00 100644 --- a/content/enterprise_influxdb/v1/tools/api.md +++ b/content/enterprise_influxdb/v1/tools/api.md @@ -16,15 +16,15 @@ It uses HTTP response codes, authentication with username and password credentia The following sections assume your InfluxDB instance is running on `localhost` port `8086` and HTTPS is not enabled. -Those settings [are configurable](/enterprise_influxdb/v1/administration/configure/config-data-nodes/#http-endpoint-settings). +Those settings [are configurable](/enterprise_influxdb/v1/administration/configure/config-data-nodes/#http-endpoints-settings). -- [InfluxDB 2.x API compatibility endpoints](#influxdb-2x-api-compatibility-endpoints) +- [InfluxDB 2.x API compatibility endpoints](#influxdb-20-api-compatibility-endpoints) - [InfluxDB 1.x HTTP endpoints](#influxdb-1x-http-endpoints) ## InfluxDB 2.x API compatibility endpoints InfluxDB 1.8.0 introduced forward compatibility APIs for InfluxDB 2.x. -These APIs serve several purposes: +There are multiple reasons for introducing these: - [InfluxDB 2.x client libraries](/enterprise_influxdb/v1/tools/api_client_libraries/) are built for the InfluxDB `/api/v2` API and work with **InfluxDB 2.x** and **InfluxDB 1.8+**. @@ -49,14 +49,14 @@ The following forward compatible APIs are available: ### `/api/v2/query/` HTTP endpoint The `/api/v2/query` endpoint accepts `POST` HTTP requests. -Use this endpoint to query data using [Flux](/enterprise_influxdb/v1/flux/) and [InfluxDB 2.x client libraries](/influxdb/v2/api-guide/client-libraries/). +Use this endpoint to query data using [Flux](/enterprise_influxdb/v1/flux/) and [InfluxDB 2.x client libraries](/influxdb/v2.x/api-guide/client-libraries/). Flux is the primary language for working with data in InfluxDB 2.x. **Include the following HTTP headers:** - `Accept: application/csv` - `Content-type: application/vnd.flux` -- If [authentication is enabled](/enterprise_influxdb/v1/administration/configure/security/authentication/), +- If [authentication is enabled](/enterprise_influxdb/v1/administration/authentication_and_authorization), provide your InfluxDB username and password: `Authorization: Token USERNAME:PASSWORD` @@ -397,58 +397,6 @@ For information about InfluxDB HTTP server metrics, see the [`httpd` measurement >**Note:** The [InfluxDB input plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/influxdb) is available to collect metrics (using the `/debug/vars` endpoint) from specified Kapacitor instances. For a list of the measurements and fields, see the [InfluxDB input plugin README](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/influxdb#readme). -#### Running configuration {metadata="v1.12.3+"} - -The `/debug/vars` response includes a `config` key that contains the running [TSDB storage configuration](/enterprise_influxdb/v1/administration/configure/config-data-nodes/#data-settings). -Use this to inspect active server settings without direct access to configuration files. - -Values in the JSON output use the following representations: - -- Size values (such as `cache-max-memory-size`) appear as integers in bytes. -- Duration values (such as `cache-snapshot-write-cold-duration`) appear as human-readable strings (for example, `"10m0s"`). - -The output is similar to the following: - -```json -{ - "config": { - "cache-max-memory-size": 1073741824, - "cache-snapshot-memory-size": 26214400, - "cache-snapshot-write-cold-duration": "10m0s", - "compact-full-write-cold-duration": "4h0m0s", - "compact-throughput": 50331648, - "compact-throughput-burst": 50331648, - "dir": "/var/lib/influxdb/data", - "max-concurrent-compactions": 0, - "max-index-log-file-size": 1048576, - "max-series-per-database": 1000000, - "max-values-per-tag": 100000, - "series-id-set-cache-size": 100, - "wal-dir": "/var/lib/influxdb/wal", - "wal-fsync-delay": "0s" - } -} -``` - -> [!Note] -> InfluxDB Enterprise data nodes also expose [cluster configuration](/enterprise_influxdb/v1/administration/configure/config-data-nodes/#cluster-settings) fields through their own diagnostics, including `dial-timeout`, `shard-reader-timeout`, and `rpc-resettable-*-timeout` settings. - -#### Continuous query statistics {metadata="v1.12.3+"} - -The `/debug/vars` response includes a `cq` key with continuous query execution counters: - -```json -{ - "cq": { - "queryOk": 2, - "queryFail": 0 - } -} -``` - -- `queryOk`: Number of CQ executions that completed successfully. -- `queryFail`: Number of CQ executions that failed. - ### `/ping` HTTP endpoint The ping endpoint accepts both `GET` and `HEAD` HTTP requests. @@ -589,18 +537,17 @@ A successful [`CREATE DATABASE` query](/enterprise_influxdb/v1/query_language/ma | chunked=[true \| \] | Optional | Returns points in streamed batches instead of in a single response. If set to `true`, InfluxDB chunks responses by series or by every 10,000 points, whichever occurs first. If set to a specific value, InfluxDB chunks responses by series or by that number of points.* | | db=\ | Required for database-dependent queries (most [`SELECT`](/enterprise_influxdb/v1/query_language/spec/#select) queries and [`SHOW`](/enterprise_influxdb/v1/query_language/spec/#show-continuous-queries) queries require this parameter). | Sets the target [database](/enterprise_influxdb/v1/concepts/glossary/#database) for the query. | | epoch=[ns,u,µ,ms,s,m,h] | Optional | Returns epoch timestamps with the specified precision. By default, InfluxDB returns timestamps in RFC3339 format with nanosecond precision. Both `u` and `µ` indicate microseconds. | -| p=\ | Optional if you haven't [enabled authentication](/enterprise_influxdb/v1/administration/configure/security/authentication/#enable-authentication). Required if you've enabled authentication.** | Sets the password for authentication if you've enabled authentication. Use with the query string parameter `u`. | +| p=\ | Optional if you haven't [enabled authentication](/enterprise_influxdb/v1/administration/authentication_and_authorization/#set-up-authentication). Required if you've enabled authentication.** | Sets the password for authentication if you've enabled authentication. Use with the query string parameter `u`. | | pretty=true | Optional | Enables pretty-printed JSON output. While this is useful for debugging it is not recommended for production use as it consumes unnecessary network bandwidth. | | q=\ | Required | InfluxQL string to execute. See also [Request Body](/enterprise_influxdb/v1/tools/api/#request-body). | -| time_format=[epoch \| rfc3339] | Optional | Sets the timestamp format in query responses. `epoch` _(default)_ returns epoch timestamps (use with the `epoch` parameter to set precision). `rfc3339` returns timestamps as RFC3339Nano-formatted strings (for example, `2017-03-01T00:16:18.000000000Z`). Returns `400` if set to an invalid value. _Available in InfluxDB Enterprise v1.12.3+._ | -| u=\ | Optional if you haven't [enabled authentication](/enterprise_influxdb/v1/administration/configure/security/authentication/#enable-authentication). Required if you've enabled authentication.* | Sets the username for authentication if you've enabled authentication. The user must have read access to the database. Use with the query string parameter `p`. | +| u=\ | Optional if you haven't [enabled authentication](/enterprise_influxdb/v1/administration/authentication_and_authorization/#set-up-authentication). Required if you've enabled authentication.* | Sets the username for authentication if you've enabled authentication. The user must have read access to the database. Use with the query string parameter `p`. | \* InfluxDB does not truncate the number of rows returned for requests without the `chunked` parameter. That behavior is configurable; see the [`max-row-limit`](/enterprise_influxdb/v1/administration/configure/config-data-nodes/#max-row-limit) configuration option for more information. \** The InfluxDB API also supports basic authentication. -Use basic authentication if you've [enabled authentication](/enterprise_influxdb/v1/administration/configure/security/authentication/#enable-authentication) +Use basic authentication if you've [enabled authentication](/enterprise_influxdb/v1/administration/authentication_and_authorization/#set-up-authentication) and aren't using the query string parameters `u` and `p`. See below for an [example](#create-a-database-using-basic-authentication) of basic authentication. @@ -657,20 +604,6 @@ The response body data is similar to the following: {"results":[{"statement_id":0,"series":[{"name":"mymeas","columns":["time","myfield","mytag1","mytag2"],"values":[[1488327378,33.1,null,null],[1488327438,12.4,"12","14"]]}]}]} ``` -##### Query data with a `SELECT` statement and the `time_format` parameter {metadata="v1.12.3+"} - -- `time_format=rfc3339`: Return timestamps as RFC3339Nano-formatted strings. - -```bash -curl -G 'http://localhost:8086/query?db=mydb&time_format=rfc3339' --data-urlencode 'q=SELECT * FROM "mymeas"' -``` - -The output is similar to the following: - -```json -{"results":[{"statement_id":0,"series":[{"name":"mymeas","columns":["time","myfield","mytag1","mytag2"],"values":[["2017-03-01T00:16:18Z",33.1,null,null],["2017-03-01T00:17:18Z",12.4,"12","14"]]}]}]} -``` - ##### Create a database using HTTP authentication The following example shows how to authenticate with v1.x credentials in the query string and @@ -994,13 +927,13 @@ POST http://localhost:8086/write | :--------------------- | :---------------- | :---------- | | consistency=[any,one,quorum,all] | Optional, available with [InfluxDB Enterprise clusters](/enterprise_influxdb/v1/) only. | Sets the write consistency for the point. InfluxDB assumes that the write consistency is `one` if you do not specify `consistency`. See the [InfluxDB Enterprise documentation](/enterprise_influxdb/v1/concepts/clustering#write-consistency) for detailed descriptions of each consistency option. | | db=\ | Required | Sets the target [database](/enterprise_influxdb/v1/concepts/glossary/#database) for the write. | -| p=\ | Optional if you haven't [enabled authentication](/enterprise_influxdb/v1/administration/configure/security/authentication/#enable-authentication). Required if you've enabled authentication.* | Sets the password for authentication if you've enabled authentication. Use with the query string parameter `u`. | +| p=\ | Optional if you haven't [enabled authentication](/enterprise_influxdb/v1/administration/authentication_and_authorization/#set-up-authentication). Required if you've enabled authentication.* | Sets the password for authentication if you've enabled authentication. Use with the query string parameter `u`. | | precision=[ns,u,ms,s,m,h] | Optional | Sets the precision for the supplied Unix time values. InfluxDB assumes that timestamps are in nanoseconds if you do not specify `precision`.** | | rp=\ | Optional | Sets the target [retention policy](/enterprise_influxdb/v1/concepts/glossary/#retention-policy-rp) for the write. InfluxDB writes to the `DEFAULT` retention policy if you do not specify a retention policy. | -| u=\ | Optional if you haven't [enabled authentication](/enterprise_influxdb/v1/administration/configure/security/authentication/#enable-authentication). Required if you've enabled authentication.* | Sets the username for authentication if you've enabled authentication. The user must have write access to the database. Use with the query string parameter `p`. | +| u=\ | Optional if you haven't [enabled authentication](/enterprise_influxdb/v1/administration/authentication_and_authorization/#set-up-authentication). Required if you've enabled authentication.* | Sets the username for authentication if you've enabled authentication. The user must have write access to the database. Use with the query string parameter `p`. | \* The InfluxDB API also supports basic authentication. -Use basic authentication if you've [enabled authentication](/enterprise_influxdb/v1/administration/configure/security/authentication/#enable-authentication) +Use basic authentication if you've [enabled authentication](/enterprise_influxdb/v1/administration/authentication_and_authorization/#set-up-authentication) and aren't using the query string parameters `u` and `p`. See below for an [example](#write-a-point-to-the-database-mydb-using-basic-authentication) of basic authentication. diff --git a/content/enterprise_influxdb/v1/tools/influxd-ctl/_index.md b/content/enterprise_influxdb/v1/tools/influxd-ctl/_index.md index a956cd04e0..8462fe1e31 100644 --- a/content/enterprise_influxdb/v1/tools/influxd-ctl/_index.md +++ b/content/enterprise_influxdb/v1/tools/influxd-ctl/_index.md @@ -58,7 +58,6 @@ influxd-ctl [global-flags] [command-flags] [arguments] | `-k` | Skip certificate verification _(ignored without `-bind-tls`)_ | | `-pwd` | Password for basic authentication _(ignored without `-auth-type basic`)_ | | `-secret` | JWT shared secret _(ignored without `-auth-type jwt`)_ | -| `-timeout` | Override the default timeout of 10s for operations _(for example, `30s`, `1m`)_. _v1.12.3+_ | | `-user` | Username _(ignored without `-auth-type basic` or `jwt`)_ | ## Examples @@ -66,7 +65,6 @@ influxd-ctl [global-flags] [command-flags] [arguments] - [Bind to a remote meta node](#bind-to-a-remote-meta-node) - [Authenticate with JWT](#authenticate-with-jwt) - [Authenticate with basic authentication](#authenticate-with-basic-authentication) -- [Override the default timeout](#override-the-default-timeout) ### Bind to a remote meta node @@ -86,17 +84,11 @@ influxd-ctl -auth-type jwt -secret oatclusters influxd-ctl -auth-type basic -user admin -pwd passw0rd ``` -### Override the default timeout {metadata="v1.12.3+"} - -```sh -influxd-ctl -timeout 30s show-shards -``` - {{< expand-wrapper >}} {{% expand "Troubleshoot `influxd-ctl` authentication" %}} If authentication is enabled in the cluster's -[meta node configuration files](/enterprise_influxdb/v1/administration/configure/config-meta-nodes/#auth-enabled) +[meta node configuration files](/enterprise_influxdb/v1/administration/config-meta-nodes/#auth-enabled-false) and [data node configuration files](/enterprise_influxdb/v1/administration/configure/config-data-nodes/#meta-auth-enabled) and the `influxd-ctl` command does not include authentication details, the system returns: diff --git a/content/enterprise_influxdb/v1/tools/influxd-ctl/backup.md b/content/enterprise_influxdb/v1/tools/influxd-ctl/backup.md index 2207127f4b..0c3e064696 100644 --- a/content/enterprise_influxdb/v1/tools/influxd-ctl/backup.md +++ b/content/enterprise_influxdb/v1/tools/influxd-ctl/backup.md @@ -50,19 +50,13 @@ influxd-ctl backup [flags] | Flag | Description | | :---------- | :------------------------------------------------------------------ | -| `-bufsize` | Buffer size (in bytes) for writing gzip files. Default is `1048576` (1 MB). _v1.12.3+_ | -| `-cpuprofile` | Write CPU profile to the specified file path. For debugging backup performance. _v1.12.3+_ | | `-db` | Database to backup | | `-end` | End date for backup _(RFC3339 timestamp)_ | | `-estimate` | Estimate the size of the requested backup | -| `-from` | Data node TCP address to prefer when backing up. In v1.12.3+, the node must exist in the cluster or the command returns an error. When the preferred node doesn't own a shard, the command falls back to other owners sorted by most recent write. See [Node selection](#node-selection). | -| `-full` | Perform a full backup _(deprecated in favor of `-strategy full`)_ | -| `-gzipBlockCount` | Number of concurrent blocks for gzip compression. Default is the number of CPU cores. Recommended: 1-2x CPU cores. _v1.12.3+_ | -| `-gzipBlockSize` | Block size (in bytes) for pgzip compression. Default is `1048576` (1 MB). Recommended >1 MB for performance. _v1.12.3+_ | -| `-gzipCompressionLevel` | Gzip compression level: `default`, `full`, `speedy`, or `none`. Default is `default`. _v1.12.3+_ | +| `-from` | Data node TCP address to prefer when backing up | +| `-full` | Perform an full backup _(deprecated in favour of `-strategy full`)_ | | `-rp` | Retention policy to backup | | `-shard` | Shard ID to backup | -| `-staleness-threshold` | For incremental backups, skip shards modified within this duration of the existing backup. Default is `10m` (matches [`cache-snapshot-write-cold-duration`](/enterprise_influxdb/v1/administration/configure/config-data-nodes/#cache-snapshot-write-cold-duration)). _v1.12.3+_ | | `-start` | Start date for backup _(RFC3339 timestamp)_ | | `-strategy` | Backup strategy to use (`only-meta`, `full`, or `incremental`) | @@ -70,22 +64,6 @@ influxd-ctl backup [flags] _Also see [`influxd-ctl` global flags](/enterprise_influxdb/v1/tools/influxd-ctl/#influxd-ctl-global-flags)._ {{% /caption %}} -## Backup behavior {metadata="v1.12.3+"} - -### Node selection - -When backing up a shard, the command selects the best data node to read from: - -1. Shard copies with zero bytes are skipped. -2. Copies are sorted by most recent write time — the most recently written copy is tried first. -3. If you specify `-from`, that node is preferred. If the preferred node doesn't own the shard, the command falls back to other owners. -4. If the `-from` node doesn't exist in the cluster, the command fails with: `data node "" does not exist`. - -### Staleness threshold - -During incremental backups, the `-staleness-threshold` flag controls when a shard is considered current and can be skipped. -A shard is skipped when the existing backup timestamp plus the staleness threshold is after the shard's last modification time. - ## Examples - [Perform an incremental backup](#perform-an-incremental-backup) diff --git a/content/enterprise_influxdb/v1/tools/influxd-ctl/show-shards.md b/content/enterprise_influxdb/v1/tools/influxd-ctl/show-shards.md index 61e2125958..b87f7bea23 100644 --- a/content/enterprise_influxdb/v1/tools/influxd-ctl/show-shards.md +++ b/content/enterprise_influxdb/v1/tools/influxd-ctl/show-shards.md @@ -53,7 +53,6 @@ that are either in metadata but not on disk or on disk but not in metadata. | Flag | Description | | :--- | :-------------------------------- | -| `-e` | Include expired shards in the output. By default, expired shards are filtered out. _v1.12.3+_ | | `-v` | Return detailed shard information | | `-m` | Return inconsistent shards | diff --git a/content/enterprise_influxdb/v1/troubleshooting/query_management/influxql_query_management.md b/content/enterprise_influxdb/v1/troubleshooting/query_management/influxql_query_management.md index e6209beccb..135459f926 100644 --- a/content/enterprise_influxdb/v1/troubleshooting/query_management/influxql_query_management.md +++ b/content/enterprise_influxdb/v1/troubleshooting/query_management/influxql_query_management.md @@ -21,8 +21,8 @@ For Flux query management settings, see [Flux query management](/enterprise_infl ## List currently-running queries with `SHOW QUERIES` -`SHOW QUERIES` lists the query id, node id, TCP host, query text, relevant database, duration, -status, and user of all currently-running queries on your InfluxDB Enterprise cluster. +`SHOW QUERIES` lists the query id, query text, relevant database, and duration +of all currently-running queries on your InfluxDB instance. #### Syntax @@ -34,19 +34,17 @@ SHOW QUERIES ``` > SHOW QUERIES -qid node_id tcp_host query database duration status user ---- ------- -------- ----- -------- -------- ------ ---- -37 26 data1:8088 SHOW QUERIES 100368u running admin -36 33 data3:8088 SELECT mean(myfield) FROM mymeas mydb 3s running jdoe +qid query database duration status +--- ----- -------- -------- ------ +37 SHOW QUERIES 100368u running +36 SELECT mean(myfield) FROM mymeas mydb 3s running ``` ##### Explanation of the output -- `qid`: Query ID. Use this ID with [`KILL QUERY`](/enterprise_influxdb/v1/troubleshooting/query_management/influxql_query_management/#stop-currently-running-queries-with-kill-query). -- `node_id`: The data node ID where the query is running. -- `tcp_host`: The TCP host address of the data node. Use this value with [`KILL QUERY ... ON`](/enterprise_influxdb/v1/troubleshooting/query_management/influxql_query_management/#stop-currently-running-queries-with-kill-query) to kill queries on a specific node. -- `query`: The query text. -- `database`: The database targeted by the query. +- `qid`: Query ID. Use this ID with [`KILL - QUERY`](/enterprise_influxdb/v1/troubleshooting/query_management/influxql_query_management/#stop-currently-running-queries-with-kill-query). +- `query`: The query text. +- `database`: The database targeted by the query. - `duration`: The length of time that the query has been running. See [Query Language Reference](/enterprise_influxdb/v1/query_language/spec/#durations) for an explanation of time units in InfluxDB databases. @@ -57,7 +55,6 @@ until the query record is cleared from memory. {{% /note %}} - `status`: The current status of the query. -- `user`: The user who initiated the query. Empty if authentication is not enabled. _v1.12.3+_ ## Stop currently-running queries with `KILL QUERY` @@ -98,7 +95,7 @@ A successful `KILL QUERY` query returns no results. ## Configuration settings for query management The following configuration settings are in the -[\[cluster\]](/enterprise_influxdb/v1/administration/configure/config-data-nodes/#cluster) section of the +[coordinator](/enterprise_influxdb/v1/administration/config-data-nodes/#influxql-query-management-settings) section of the configuration file. ### `max-concurrent-queries` diff --git a/content/influxdb/v1/about_the_project/release-notes.md b/content/influxdb/v1/about_the_project/release-notes.md index b1e21b9355..b60c0aaacf 100644 --- a/content/influxdb/v1/about_the_project/release-notes.md +++ b/content/influxdb/v1/about_the_project/release-notes.md @@ -23,12 +23,12 @@ alt_links: - Add [`advanced-expiration` TLS configuration option](/influxdb/v1/administration/config/#advanced-expiration) to configure how far in advance to log warnings about TLS certificate expiration. - Add TLS certificate reloading on `SIGHUP`. -- Add [`config`](/influxdb/v1/tools/api/#running-configuration) and [`cq` (continuous query) statistics](/influxdb/v1/tools/api/#continuous-query-statistics) to the `/debug/vars` endpoint. +- Add `config` and `cq` (continuous query) diagnostics to the `/debug/vars` endpoint. - Improve dropped point logging. -- [Show user when displaying or logging queries](/influxdb/v1/troubleshooting/query_management/#list-currently-running-queries-with-show-queries). -- Add [`time_format` parameter](/influxdb/v1/tools/api/#query-data-with-a-select-statement-and-the-time_format-parameter) for the HTTP API. +- Show user when displaying or logging queries. +- Add `time_format` parameter for the HTTP API. - Use dynamic logging levels (`zap.AtomicLevel`). -- [Report user query bytes](/influxdb/v1/administration/config/#user-query-bytes-enabled). +- Report user query bytes. ### Bug fixes diff --git a/content/influxdb/v1/administration/config.md b/content/influxdb/v1/administration/config.md index 30ce0c3c8e..196d0cd3a5 100644 --- a/content/influxdb/v1/administration/config.md +++ b/content/influxdb/v1/administration/config.md @@ -929,21 +929,11 @@ effect if [`auth-enabled`](#auth-enabled) is set to `false`. **Default**: `false` **Environment variable**: `INFLUXDB_HTTP_PROM_READ_AUTH_ENABLED` -#### user-query-bytes-enabled {metadata="v1.12.3+"} - -Enables per-user query response byte tracking. -When enabled, InfluxDB records the number of bytes returned by queries for each user in the `userquerybytes` measurement, available through `SHOW STATS FOR 'userquerybytes'`, the `_internal` database, and the `/debug/vars` endpoint. - -Unauthenticated queries are attributed to `(anonymous)`. - -**Default**: `false` -**Environment variable**: `INFLUXDB_HTTP_USER_QUERY_BYTES_ENABLED` - #### http-headers -User-supplied [HTTP response headers](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers). -Configure this section to return -[security headers](https://developer.mozilla.org/en-US/docs/Web/HTTP/Reference/Headers#security) +User-supplied [HTTP response headers](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers). +Configure this section to return +[security headers](https://developer.mozilla.org/en-US/docs/Web/HTTP/Reference/Headers#security) such as `X-Frame-Options` or `Content Security Policy` where needed. Example: @@ -1603,7 +1593,7 @@ InfluxData recommends configuring your InfluxDB server's TLS settings for that backward compatibility is not required. Our recommended TLS configuration settings for `ciphers`, `min-version`, and `max-version` are based on Mozilla's "modern compatibility" TLS server configuration described in -[Security/Server Side TLS](https://wiki.mozilla.org/Security/Server_Side_TLS). +[Security/Server Side TLS](https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility). InfluxData's recommended TLS settings for "modern compatibility" are specified in the following configuration settings example: diff --git a/content/influxdb/v1/query_language/manage-database.md b/content/influxdb/v1/query_language/manage-database.md index 014fe12fc2..c6d18fba54 100644 --- a/content/influxdb/v1/query_language/manage-database.md +++ b/content/influxdb/v1/query_language/manage-database.md @@ -226,7 +226,7 @@ It does not drop the associated continuous queries. A successful `DROP MEASUREMENT` query returns an empty result. {{% warn %}} Currently, InfluxDB does not support regular expressions with `DROP MEASUREMENT`. -See GitHub Issue [#4275](https://github.com/influxdata/influxdb/issues/4275) for more information. +See GitHub Issue [#4275](https://github.com/influxdb/influxdb/issues/4275) for more information. {{% /warn %}} ### Delete a shard with DROP SHARD diff --git a/content/influxdb/v1/tools/api.md b/content/influxdb/v1/tools/api.md index dc8fed2e1c..cbb7fb70a4 100644 --- a/content/influxdb/v1/tools/api.md +++ b/content/influxdb/v1/tools/api.md @@ -64,8 +64,8 @@ InfluxDB v1 supports the following v2-compatible APIs: | Endpoint | Description | | :--------------------------------------------- | :------------------------------------------------------------------------------------------------------------------------------------ | -| [/api/v2/query](#apiv2query-http-endpoint) | Query data in InfluxDB 1.8.0+ using the InfluxDB v2 API and [Flux](/flux/latest/) | -| [/api/v2/write](#apiv2write-http-endpoint) | Write data to InfluxDB 1.8.0+ using the InfluxDB v2 API _(compatible with InfluxDB v2 client libraries)_ | +| [/api/v2/query](#api-v2-query-http-endpoint) | Query data in InfluxDB 1.8.0+ using the InfluxDB v2 API and [Flux](/flux/latest/) | +| [/api/v2/write](#api-v2-write-http-endpoint) | Write data to InfluxDB 1.8.0+ using the InfluxDB v2 API _(compatible with InfluxDB v2 client libraries)_ | | [/api/v2/buckets](#apiv2buckets-http-endpoint) | Allows some client code using buckets to run against 1.x and 2.x without modification | | [/api/v2/delete](#apiv2delete-http-endpoint) | Supports deletion by tag value, timestamp, and measurement using the InfluxDB v2 API _(compatible with InfluxDB v2 client libraries)_ | | [/health](#health-http-endpoint) | Check the health of your InfluxDB instance | @@ -284,9 +284,9 @@ The following InfluxDB 1.x API endpoints are available: | Endpoint | Description | |:---------- |:---------- | -| [/debug/pprof ](#debugpprof-http-endpoint) | Generate profiles for troubleshooting | -| [/debug/requests](#debugrequests-http-endpoint) | Track HTTP client requests to the `/write` and `/query` endpoints | -| [/debug/vars](#debugvars-http-endpoint) | Collect internal InfluxDB statistics | +| [/debug/pprof ](#debug-pprof-http-endpoint) | Generate profiles for troubleshooting | +| [/debug/requests](#debug-requests-http-endpoint) | Track HTTP client requests to the `/write` and `/query` endpoints | +| [/debug/vars](#debug-vars-http-endpoint) | Collect internal InfluxDB statistics | | [/ping](#ping-http-endpoint) | Check the status of your InfluxDB instance and your version of InfluxDB | | [/query](#query-http-endpoint) | Query data using **InfluxQL**, manage databases, retention policies, and users | | [/write](#write-http-endpoint) | Write data to a database | @@ -420,55 +420,6 @@ For information about InfluxDB HTTP server metrics, see the [`httpd` measurement >**Note:** The [InfluxDB input plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/influxdb) is available to collect metrics (using the `/debug/vars` endpoint) from specified Kapacitor instances. For a list of the measurements and fields, see the [InfluxDB input plugin README](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/influxdb#readme). -#### Running configuration {metadata="v1.12.3+"} - -The `/debug/vars` response includes a `config` key that contains the running [TSDB storage configuration](/influxdb/v1/administration/config/#data-settings). -Use this to inspect active server settings without direct access to configuration files. - -Values in the JSON output use the following representations: - -- Size values (such as `cache-max-memory-size`) appear as integers in bytes. -- Duration values (such as `cache-snapshot-write-cold-duration`) appear as human-readable strings (for example, `"10m0s"`). - -The output is similar to the following: - -```json -{ - "config": { - "cache-max-memory-size": 1073741824, - "cache-snapshot-memory-size": 26214400, - "cache-snapshot-write-cold-duration": "10m0s", - "compact-full-write-cold-duration": "4h0m0s", - "compact-throughput": 50331648, - "compact-throughput-burst": 50331648, - "dir": "/var/lib/influxdb/data", - "max-concurrent-compactions": 0, - "max-index-log-file-size": 1048576, - "max-series-per-database": 1000000, - "max-values-per-tag": 100000, - "series-id-set-cache-size": 100, - "wal-dir": "/var/lib/influxdb/wal", - "wal-fsync-delay": "0s" - } -} -``` - -#### Continuous query statistics {metadata="v1.12.3+"} - -The `/debug/vars` response includes a `cq` key with continuous query execution counters: - -```json -{ - "cq": { - "queryOk": 2, - "queryFail": 0 - } -} -``` - -- `queryOk`: Number of CQ executions that completed successfully. -- `queryFail`: Number of CQ executions that failed. - ### `/ping` HTTP endpoint The ping endpoint accepts both `GET` and `HEAD` HTTP requests. @@ -600,7 +551,6 @@ A successful [`CREATE DATABASE` query](/influxdb/v1/query_language/manage-databa | p=\ | Optional if you haven't [enabled authentication](/influxdb/v1/administration/authentication_and_authorization/#set-up-authentication). Required if you've enabled authentication.** | Sets the password for authentication if you've enabled authentication. Use with the query string parameter `u`. | | pretty=true | Optional | Enables pretty-printed JSON output. While this is useful for debugging it is not recommended for production use as it consumes unnecessary network bandwidth. | | q=\ | Required | InfluxQL string to execute. See also [Request Body](/influxdb/v1/tools/api/#request-body). | -| time_format=[epoch \| rfc3339] | Optional | Sets the timestamp format in query responses. `epoch` _(default)_ returns epoch timestamps (use with the `epoch` parameter to set precision). `rfc3339` returns timestamps as RFC3339Nano-formatted strings (for example, `2017-03-01T00:16:18.000000000Z`). Returns `400` if set to an invalid value. _Available in InfluxDB v1.12.3+._ | | u=\ | Optional if you haven't [enabled authentication](/influxdb/v1/administration/authentication_and_authorization/#set-up-authentication). Required if you've enabled authentication.* | Sets the username for authentication if you've enabled authentication. The user must have read access to the database. Use with the query string parameter `p`. | \* InfluxDB does not truncate the number of rows returned for requests without the `chunked` parameter. @@ -660,20 +610,6 @@ $ curl -G 'http://localhost:8086/query?db=mydb&epoch=s' --data-urlencode 'q=SELE {"results":[{"statement_id":0,"series":[{"name":"mymeas","columns":["time","myfield","mytag1","mytag2"],"values":[[1488327378,33.1,null,null],[1488327438,12.4,"12","14"]]}]}]} ``` -##### Query data with a `SELECT` statement and the `time_format` parameter {metadata="v1.12.3+"} - -- `time_format=rfc3339`: Return timestamps as RFC3339Nano-formatted strings. - -```bash -curl -G 'http://localhost:8086/query?db=mydb&time_format=rfc3339' --data-urlencode 'q=SELECT * FROM "mymeas"' -``` - -The output is similar to the following: - -```json -{"results":[{"statement_id":0,"series":[{"name":"mymeas","columns":["time","myfield","mytag1","mytag2"],"values":[["2017-03-01T00:16:18Z",33.1,null,null],["2017-03-01T00:17:18Z",12.4,"12","14"]]}]}]} -``` - ##### Create a database using HTTP authentication Valid credentials: diff --git a/content/influxdb/v1/troubleshooting/query_management.md b/content/influxdb/v1/troubleshooting/query_management.md index 44409e433b..471976304a 100644 --- a/content/influxdb/v1/troubleshooting/query_management.md +++ b/content/influxdb/v1/troubleshooting/query_management.md @@ -18,8 +18,8 @@ Manage your InfluxQL queries using the following: ## List currently-running queries with `SHOW QUERIES` -`SHOW QUERIES` lists the query id, query text, relevant database, duration, -status, and user of all currently-running queries on your InfluxDB instance. +`SHOW QUERIES` lists the query id, query text, relevant database, and duration +of all currently-running queries on your InfluxDB instance. #### Syntax @@ -31,17 +31,17 @@ SHOW QUERIES ``` > SHOW QUERIES -qid query database duration status user ---- ----- -------- -------- ------ ---- -37 SHOW QUERIES 100368u running admin -36 SELECT mean(myfield) FROM mymeas mydb 3s running jdoe +qid query database duration status +--- ----- -------- -------- ------ +37 SHOW QUERIES 100368u running +36 SELECT mean(myfield) FROM mymeas mydb 3s running ``` ##### Explanation of the output -- `qid`: The id number of the query. Use this value with [`KILL - QUERY`](/influxdb/v1/troubleshooting/query_management/#stop-currently-running-queries-with-kill-query). -- `query`: The query text. -- `database`: The database targeted by the query. +- `qid`: The id number of the query. Use this value with [`KILL - QUERY`](/influxdb/v1/troubleshooting/query_management/#stop-currently-running-queries-with-kill-query). +- `query`: The query text. +- `database`: The database targeted by the query. - `duration`: The length of time that the query has been running. See [Query Language Reference](/influxdb/v1/query_language/spec/#durations) for an explanation of time units in InfluxDB databases. @@ -52,7 +52,6 @@ until the query record is cleared from memory. {{% /note %}} - `status`: The current status of the query. -- `user`: The user who initiated the query. Empty if authentication is not enabled. _v1.12.3+_ ## Stop currently-running queries with `KILL QUERY` diff --git a/content/influxdb/v2/.vale.ini b/content/influxdb/v2/.vale.ini index 6ce501cf2e..3538a1d808 100644 --- a/content/influxdb/v2/.vale.ini +++ b/content/influxdb/v2/.vale.ini @@ -9,7 +9,6 @@ Packages = Google, write-good, Hugo [*.md] BasedOnStyles = Vale, InfluxDBv2, InfluxDataDocs, Google, write-good -# --- Disabled mechanical rules --- Google.Acronyms = NO Google.DateFormat = NO Google.Ellipses = NO @@ -23,20 +22,10 @@ Vale.Spelling = NO # false positives from URLs, file paths, and code. The accepted terms in # accept.txt still work for spelling checks via InfluxDataDocs.Spelling. Vale.Terms = NO - -# --- Disabled style rules (high false-positive rate in technical docs) --- -Google.Contractions = NO -Google.FirstPerson = NO -Google.Passive = NO -Google.We = NO -Google.Will = NO -write-good.Cliches = NO -write-good.Passive = NO -write-good.So = NO -write-good.ThereIs = NO -# Flags legitimate technical terms like "aggregate", "expiration", "multiple". +# Disable write-good.TooWordy - flags legitimate technical terms like +# "aggregate", "expiration", "multiple", "However" that are standard in +# database documentation. write-good.TooWordy = NO -write-good.Weasel = NO # Ignore URL paths like /api/v3/..., /cli/..., /influxdb3/... # Ignore full URLs like https://example.com/... diff --git a/content/influxdb3/cloud-dedicated/.vale.ini b/content/influxdb3/cloud-dedicated/.vale.ini index 7eb8ac1266..35dfc38e56 100644 --- a/content/influxdb3/cloud-dedicated/.vale.ini +++ b/content/influxdb3/cloud-dedicated/.vale.ini @@ -9,7 +9,6 @@ Packages = Google, write-good, Hugo [*.md] BasedOnStyles = Vale, InfluxDataDocs, Cloud-Dedicated, Google, write-good -# --- Disabled mechanical rules --- Google.Acronyms = NO Google.DateFormat = NO Google.Ellipses = NO @@ -23,20 +22,10 @@ Vale.Spelling = NO # false positives from URLs, file paths, and code. The accepted terms in # accept.txt still work for spelling checks via InfluxDataDocs.Spelling. Vale.Terms = NO - -# --- Disabled style rules (high false-positive rate in technical docs) --- -Google.Contractions = NO -Google.FirstPerson = NO -Google.Passive = NO -Google.We = NO -Google.Will = NO -write-good.Cliches = NO -write-good.Passive = NO -write-good.So = NO -write-good.ThereIs = NO -# Flags legitimate technical terms like "aggregate", "expiration", "multiple". +# Disable write-good.TooWordy - flags legitimate technical terms like +# "aggregate", "expiration", "multiple", "However" that are standard in +# database documentation. write-good.TooWordy = NO -write-good.Weasel = NO # Ignore URL paths like /api/v3/..., /cli/..., /influxdb3/... # Ignore full URLs like https://example.com/... diff --git a/content/influxdb3/cloud-serverless/.vale.ini b/content/influxdb3/cloud-serverless/.vale.ini index 1c7bb556c0..9ebc431b72 100644 --- a/content/influxdb3/cloud-serverless/.vale.ini +++ b/content/influxdb3/cloud-serverless/.vale.ini @@ -9,7 +9,6 @@ Packages = Google, write-good, Hugo [*.md] BasedOnStyles = Vale, InfluxDataDocs, Cloud-Serverless, Google, write-good -# --- Disabled mechanical rules --- Google.Acronyms = NO Google.DateFormat = NO Google.Ellipses = NO @@ -23,20 +22,10 @@ Vale.Spelling = NO # false positives from URLs, file paths, and code. The accepted terms in # accept.txt still work for spelling checks via InfluxDataDocs.Spelling. Vale.Terms = NO - -# --- Disabled style rules (high false-positive rate in technical docs) --- -Google.Contractions = NO -Google.FirstPerson = NO -Google.Passive = NO -Google.We = NO -Google.Will = NO -write-good.Cliches = NO -write-good.Passive = NO -write-good.So = NO -write-good.ThereIs = NO -# Flags legitimate technical terms like "aggregate", "expiration", "multiple". +# Disable write-good.TooWordy - flags legitimate technical terms like +# "aggregate", "expiration", "multiple", "However" that are standard in +# database documentation. write-good.TooWordy = NO -write-good.Weasel = NO # Ignore URL paths like /api/v3/..., /cli/..., /influxdb3/... # Ignore full URLs like https://example.com/... diff --git a/content/influxdb3/clustered/.vale.ini b/content/influxdb3/clustered/.vale.ini index d1ffeddd4f..2ae7567c0a 100644 --- a/content/influxdb3/clustered/.vale.ini +++ b/content/influxdb3/clustered/.vale.ini @@ -9,7 +9,6 @@ Packages = Google, write-good, Hugo [*.md] BasedOnStyles = Vale, InfluxDataDocs, Clustered, Google, write-good -# --- Disabled mechanical rules --- Google.Acronyms = NO Google.DateFormat = NO Google.Ellipses = NO @@ -23,20 +22,10 @@ Vale.Spelling = NO # false positives from URLs, file paths, and code. The accepted terms in # accept.txt still work for spelling checks via InfluxDataDocs.Spelling. Vale.Terms = NO - -# --- Disabled style rules (high false-positive rate in technical docs) --- -Google.Contractions = NO -Google.FirstPerson = NO -Google.Passive = NO -Google.We = NO -Google.Will = NO -write-good.Cliches = NO -write-good.Passive = NO -write-good.So = NO -write-good.ThereIs = NO -# Flags legitimate technical terms like "aggregate", "expiration", "multiple". +# Disable write-good.TooWordy - flags legitimate technical terms like +# "aggregate", "expiration", "multiple", "However" that are standard in +# database documentation. write-good.TooWordy = NO -write-good.Weasel = NO # Ignore URL paths like /api/v3/..., /cli/..., /influxdb3/... # Ignore full URLs like https://example.com/... diff --git a/content/influxdb3/core/.vale.ini b/content/influxdb3/core/.vale.ini index 7402618366..03f6d282a3 100644 --- a/content/influxdb3/core/.vale.ini +++ b/content/influxdb3/core/.vale.ini @@ -14,7 +14,6 @@ Packages = Google, write-good, Hugo [*.md] BasedOnStyles = Vale, InfluxDataDocs, InfluxDB3-Core, Google, write-good -# --- Disabled mechanical rules --- Google.Acronyms = NO Google.DateFormat = NO Google.Ellipses = NO @@ -28,20 +27,10 @@ Vale.Spelling = NO # false positives from URLs, file paths, and code. The accepted terms in # accept.txt still work for spelling checks via InfluxDataDocs.Spelling. Vale.Terms = NO - -# --- Disabled style rules (high false-positive rate in technical docs) --- -Google.Contractions = NO -Google.FirstPerson = NO -Google.Passive = NO -Google.We = NO -Google.Will = NO -write-good.Cliches = NO -write-good.Passive = NO -write-good.So = NO -write-good.ThereIs = NO -# Flags legitimate technical terms like "aggregate", "expiration", "multiple". +# Disable write-good.TooWordy - flags legitimate technical terms like +# "aggregate", "expiration", "multiple", "However" that are standard in +# database documentation. write-good.TooWordy = NO -write-good.Weasel = NO # Ignore URL paths like /api/v3/..., /cli/..., /influxdb3/... # Ignore full URLs like https://example.com/... diff --git a/content/influxdb3/enterprise/admin/pachatree/_index.md b/content/influxdb3/enterprise/admin/pachatree/_index.md new file mode 100644 index 0000000000..ce0dc7c7da --- /dev/null +++ b/content/influxdb3/enterprise/admin/pachatree/_index.md @@ -0,0 +1,284 @@ +--- +title: Performance upgrade preview +seotitle: Performance upgrade preview for InfluxDB 3 Enterprise +description: > + Preview performance upgrades in InfluxDB 3 Enterprise with improved + single-series query performance, consistent resource usage, wide-and-sparse + table support, column families, and bulk data export. +menu: + influxdb3_enterprise: + name: Performance upgrade preview +weight: 12 +influxdb3/enterprise/tags: [storage, performance, beta, preview] +related: + - /influxdb3/enterprise/get-started/setup/ + - /influxdb3/enterprise/admin/pachatree/configure/ + - /influxdb3/enterprise/admin/pachatree/monitor/ + - /influxdb3/enterprise/admin/performance-tuning/ +--- + +> [!Warning] +> #### Private preview beta +> The performance upgrade preview is available to {{% product-name %}} Trial +> and Commercial users as a private beta. These features are subject to breaking changes +> and **should not be used for production workloads**. +> +> To share feedback on this preview, see [Support and feedback options](#bug-reports-and-feedback). +> Your feedback on stability +> and performance at scale helps shape the future of InfluxDB 3. + +## What is the performance upgrade preview? + +{{% product-name %}} includes a private preview of major upgrades to the +storage layer that improve how data is written, stored, compressed, compacted, +and queried. +These upgrades touch every layer of the storage path -- from a new on-disk file +format to how fields are organized into column families and how compaction +manages resources. + +## Why these upgrades + +The existing storage layer in InfluxDB 3 was built around Apache Parquet and +optimized for analytical workloads. +Customers running high-cardinality, wide-schema, and query-intensive workloads +need better single-series query performance, more predictable resource usage, +and the schema flexibility that made InfluxDB v1 and v2 popular. +These upgrades address those gaps while maintaining full compatibility with +InfluxDB's data model and query languages. + +Key improvements include: + +- **Faster single-series queries** -- Single-digit millisecond response times + for highly selective time-series queries. +- **Consistent resource usage** -- Bounded CPU and memory during persistence + and compaction, eliminating spikes during heavy ingestion or compaction bursts. +- **Wide-and-sparse table support** -- Schemas with up to millions of columns + and dynamic schema evolution without expensive rewrites. +- **Column families** -- Group related fields for efficient compression and I/O, + so queries only read the data they need. +- **Automatic distinct value caches** -- Transparent caching of distinct values + for reduced latency on metadata queries. +- **Bulk data export** -- Export compacted data as Parquet files for use with + external tools. +- **Automatic Parquet upgrade** -- Seamlessly migrate existing data with + hybrid query mode during the transition. + +## Enable the preview + +Add the `--use-pacha-tree` flag to your +[`influxdb3 serve` startup command](/influxdb3/enterprise/get-started/setup/): + +```bash +influxdb3 serve \ + --node-id host01 \ + --cluster-id cluster01 \ + --object-store file \ + --data-dir ~/.influxdb3 \ + --use-pacha-tree +``` + +You can also enable the preview with an environment variable: + +```bash +export INFLUXDB3_ENTERPRISE_USE_PACHA_TREE=true +influxdb3 serve ... +``` + +The `--use-pacha-tree` flag exposes additional configuration options prefixed +with `--pt-`. +See [Configure the preview](/influxdb3/enterprise/admin/pachatree/configure/) +for tuning options, or +[Monitor the preview](/influxdb3/enterprise/admin/pachatree/monitor/) +for system tables and telemetry. + +## What's changed + +These upgrades touch every layer of the storage path -- from the on-disk file +format to how data is compressed, organized, and compacted. + +### New file format + +Data is stored in a new columnar file format (`.pt` files) optimized for +time-series workloads. +All data within a file is sorted by column family key, series key, and +timestamp, which enables efficient compaction, querying, and filtering. + +The format uses type-specific compression algorithms that adapt to data +characteristics -- delta-delta RLE for timestamps, Gorilla encoding for floats, +dictionary encoding for low-cardinality strings, and more -- typically +achieving 5-20x compression ratios. + +### Column families + +Column families let you group related fields together so that queries only read +the data they need. +Fields in the same family are stored together on disk. +For wide tables with hundreds of fields, this dramatically reduces I/O. + +Use the `::` (double-colon) delimiter in field names to assign fields to a +family. +The portion before `::` is the family name; everything after is the field name. + +```txt +metrics,host=sA cpu::usage_user=55.2,cpu::usage_sys=12.1,cpu::usage_idle=32.7 1000000000 +metrics,host=sA mem::free=2048i,mem::used=6144i,mem::cached=1024i 1000000000 +metrics,host=sA disk::read_bytes=50000i,disk::write_bytes=32000i 1000000000 +``` + +This creates three column families: + +| Family | Fields | +|:-------|:-------| +| `cpu` | `usage_user`, `usage_sys`, `usage_idle` | +| `mem` | `free`, `used`, `cached` | +| `disk` | `read_bytes`, `write_bytes` | + +When a query references only `mem::free`, the storage layer reads only the +`mem` family block and skips `cpu` and `disk` data entirely. + +> [!Note] +> Only the first `::` is significant. +> A field name like `a::b::c` creates family `a` with field `b::c`. + +Fields written without `::` are assigned to auto-generated families (named +`__0`, `__1`, etc.), each holding up to 100 fields. +Explicit family names are an excellent way to optimize performance with known +workloads, but they're not required to achieve good results. + +### Bounded compaction + +The upgraded storage layer organizes compacted data into 24-hour UTC windows +and progresses data through four compaction levels (L1 through L4). +Compaction runs continuously in the background with a byte-based memory budget +(default: 50% of system RAM), so it never causes resource spikes. + +Old files are cleaned up after a cooldown period, ensuring query replicas have +time to see new checkpoints before old data is removed. +Failures are automatically retried, and the system is designed to be +self-healing for transient issues. + +### Automatic distinct value caches + +When enabled with `--enable-auto-dvc`, the storage layer automatically caches +distinct tag values to dramatically improve the performance of metadata queries. +This benefits `SHOW TAG VALUES` queries in InfluxQL and introduces a new +`tag_values()` SQL function: + +```sql +-- InfluxQL (automatically uses cache) +SHOW TAG VALUES FROM cpu WITH KEY = "host" + +-- SQL (auto-creates cache if needed) +SELECT * FROM tag_values('cpu') +``` + +Enable Auto-DVC at startup: + +```bash +influxdb3 serve \ + # ... + --use-pacha-tree \ + --enable-auto-dvc +``` + +## Upgrade from Parquet + +Existing clusters with Parquet data can upgrade with zero manual migration. +The upgrade is fully automatic and occurs on initial startup. + +When you restart a cluster with `--use-pacha-tree`, the system: + +1. Detects existing Parquet data and enters hybrid mode. +2. Clears the legacy WAL on ingest nodes and streams Parquet files through a + conversion pipeline. +3. Integrates converted files into the new storage format through compaction. +4. Automatically transitions once all data is migrated. + +During hybrid mode, queries merge results from both the legacy and upgraded +storage layers. +If there is a conflict (same series key and timestamp), the upgraded data takes +precedence. + +### Monitor upgrade progress + +Use system tables to track upgrade status: + +```sql +-- Per-node upgrade status +SELECT * FROM system.upgrade_parquet_node + +-- Per-file migration progress +SELECT * FROM system.upgrade_parquet +``` + +### Configure upgrade behavior + +| Option | Description | Default | +|:-------|:------------|:--------| +| `--pt-disable-hybrid-query` | Disable hybrid query mode. Queries return only data from the upgraded storage layer, even during migration. | `false` | +| `--pt-upgrade-poll-interval` | Polling interval for upgrade status monitoring. | `5s` | + +## Export to Parquet + +Export compacted data as Parquet files for use with external tools like pandas +or DuckDB. + +> [!Note] +> Data must be compacted before it can be exported. +> Uncompacted data is not available for export at this time. + +### Export workflow + +```bash +# Step 1: List available databases +influxdb3 export databases + +# Step 2: List tables in a database +influxdb3 export tables -d mydb + +# Step 3: List compacted 24-hour windows for a table +influxdb3 export windows -d mydb -t cpu + +# Step 4: Export data as Parquet files +influxdb3 export data -d mydb -t cpu -o ./export_output +``` + +To export specific time windows only: + +```bash +influxdb3 export data -d mydb -t cpu -w 2026-01-15,2026-01-16 -o ./export_output +``` + +## Who should try the preview + +Consider enabling the preview in your staging or development environment if +you have workloads with: + +- High cardinality or wide tables +- Frequent backfill across time ranges +- Query-heavy access patterns requiring low latency +- Sparse schemas with dynamic column creation +- Resource constraints where bounded memory and CPU usage matter + +> [!Important] +> #### Important: New file format +> +> These upgrades use a new columnar file format (`.pt` files). +> When you enable the preview, new data is written in the new format. +> Hybrid query mode (enabled by default) allows querying across both legacy +> Parquet data and new `.pt` data seamlessly. +> +> For the beta, we recommend starting with a fresh setup for +> testing and evaluation rather than converting existing data. + +## Bug reports and feedback + +To share feedback on the performance upgrade preview: + +- Contact [InfluxData support](https://support.influxdata.com) +- Reach out to your InfluxData account team + +Your feedback on stability and performance at scale helps shape the future of +InfluxDB 3. + +{{< children hlevel="h2" readmore=true hr=true >}} diff --git a/content/influxdb3/enterprise/admin/pachatree/configure.md b/content/influxdb3/enterprise/admin/pachatree/configure.md new file mode 100644 index 0000000000..1ea8e7e264 --- /dev/null +++ b/content/influxdb3/enterprise/admin/pachatree/configure.md @@ -0,0 +1,450 @@ +--- +title: Configure the performance upgrade preview +seotitle: Performance upgrade preview configuration reference for InfluxDB 3 Enterprise +description: > + Complete reference for all configuration options available with the InfluxDB 3 Enterprise + performance upgrades, including WAL, snapshot, compaction, caching, and replication settings. +menu: + influxdb3_enterprise: + name: Configuration reference + parent: Performance upgrade preview +weight: 202 +influxdb3/enterprise/tags: [storage, configuration, beta, preview, reference] +related: + - /influxdb3/enterprise/admin/pachatree/ + - /influxdb3/enterprise/admin/pachatree/monitor/ + - /influxdb3/enterprise/admin/performance-tuning/ + - /influxdb3/enterprise/reference/config-options/ +--- + +> [!Warning] +> #### Private preview beta +> The performance upgrade preview is available to {{% product-name %}} Trial +> and Commercial users as a private beta. These features are subject to breaking changes +> and **should not be used for production workloads**. + +This page provides a complete reference for all configuration options available +with the performance upgrade preview. +All options require the `--use-pacha-tree` flag. + +If an option is omitted, the preview either derives a value from the existing +`influxdb3 serve` configuration or falls back to an engine-specific default +that balances resource usage and throughput. + +> [!Important] +> Set `--num-io-threads` to the number of cores on the machine when using the +> performance upgrade preview. + +- [General](#general) +- [WAL](#wal) +- [Snapshot](#snapshot) +- [Gen0](#gen0) +- [File cache](#file-cache) +- [Replication (query nodes)](#replication-query-nodes) +- [Compactor](#compactor) +- [L1-L4 level tuning](#l1-l4-level-tuning) +- [Example configurations](#example-configurations) + +## General + +| Option | Description | Default | +|:-------|:------------|:--------| +| `--use-pacha-tree` | Enable the performance upgrade preview. Required for any other `--pt-` option to have effect. | Disabled | +| `--pt-engine-path-prefix` | Optional path prefix for all engine data (WAL, Gen0, etc.). Max 32 characters. Must start and end with alphanumeric; inner characters allow `[a-zA-Z0-9._-]`. Shorter paths improve partitioning in object stores. | No prefix | +| `--pt-max-columns` | Maximum total columns across the entire instance. Must be at least 2. | ~6.5M | +| `--pt-enable-retention` | Enable retention enforcement. | `true` | +| `--pt-disable-hybrid-query` | Disable hybrid query mode. When the preview is enabled with existing Parquet data, queries normally merge results from both engines. Set this flag to query only from the new engine. | `false` | +| `--enable-auto-dvc` | Enable automatic distinct value caching for `SHOW TAG VALUES` queries and the `tag_values()` SQL function. See [Auto-DVC](/influxdb3/enterprise/admin/pachatree/#automatic-distinct-value-caches). | Disabled | +| `--pt-upgrade-poll-interval` | Polling interval for Parquet-to-PachaTree upgrade status monitoring. See [Upgrade from Parquet](/influxdb3/enterprise/admin/pachatree/#upgrade-from-parquet). | `5s` | + +### Engine path prefix + +Use a short prefix to improve partitioning in object stores: + +```bash +influxdb3 serve \ + # ... + --use-pacha-tree \ + --pt-engine-path-prefix mydata +``` + +### Hybrid query mode + +When you enable the preview on an instance with existing Parquet data, +hybrid query mode merges results from both the legacy Parquet engine and the new +engine. +Disable hybrid mode to query only the new engine: + +```bash +influxdb3 serve \ + # ... + --use-pacha-tree \ + --pt-disable-hybrid-query +``` + +## WAL + +Configure Write-Ahead Log (WAL) behavior for durability and performance. + +| Option | Description | Default | +|:-------|:------------|:--------| +| `--pt-wal-flush-interval` | Flush interval for the WAL. | Inherits `--wal-flush-interval` (1s) | +| `--pt-wal-flush-concurrency` | WAL flush concurrency. | `max(io_threads - 2, 2)` | +| `--pt-wal-max-buffer-size` | Maximum in-memory WAL buffer before a flush is triggered regardless of the flush interval. Increase this if WAL files are flushed before the interval elapses. | `15MB` | +| `--pt-wal-snapshots-to-keep` | Number of snapshot manifests worth of WAL history to retain. Must be greater than 0. | `5` | + +### WAL buffer size + +The WAL buffer accumulates incoming writes before flushing to object storage. +Larger buffers reduce flush frequency and produce larger WAL files, but increase +memory usage: + +```bash +influxdb3 serve \ + # ... + --use-pacha-tree \ + --pt-wal-max-buffer-size 30MB +``` + +### Flush interval and concurrency + +Control how frequently the WAL flushes and how many workers run flushes in +parallel: + +```bash +influxdb3 serve \ + # ... + --use-pacha-tree \ + --pt-wal-flush-interval 2s \ + --pt-wal-flush-concurrency 8 +``` + +## Snapshot + +Configure snapshot buffer behavior, which controls how WAL files are merged +into Gen0 files. + +| Option | Description | Default | +|:-------|:------------|:--------| +| `--pt-snapshot-size` | Maximum size of the active snapshot bucket before it is rotated for snapshotting. | `250MB` | +| `--pt-snapshot-duration` | Time-based snapshot rotation trigger. Controls how often the ingester creates snapshots. Also used on query nodes as the bucket rotation interval for the replica buffer. | `10s` | +| `--pt-max-concurrent-snapshots` | Maximum number of concurrent snapshot operations before applying backpressure to writers. | `5` | +| `--pt-merge-threshold-size` | Maximum unmerged file size before triggering a merge operation. | `--pt-snapshot-size` / 4 (62.5MB) | + +### Snapshot size and duration + +Control when snapshot rotation triggers: + +```bash +influxdb3 serve \ + # ... + --use-pacha-tree \ + --pt-snapshot-size 500MB \ + --pt-snapshot-duration 15s +``` + +### Merge threshold + +Set the size threshold that triggers background merge operations. +Lower values result in more frequent merges: + +```bash +influxdb3 serve \ + # ... + --use-pacha-tree \ + --pt-merge-threshold-size 125MB +``` + +## Gen0 + +Control the size of Gen0 files produced during merge operations. + +| Option | Description | Default | +|:-------|:------------|:--------| +| `--pt-gen0-max-rows-per-file` | Upper bound on rows per Gen0 file emitted during merge. | `10000000` (10M) | +| `--pt-gen0-max-bytes-per-file` | Upper bound on bytes per Gen0 file emitted during merge. | `100MB` | + +### Gen0 file size limits + +Control the size of Gen0 files for query and compaction performance: + +```bash +influxdb3 serve \ + # ... + --use-pacha-tree \ + --pt-gen0-max-rows-per-file 5000000 \ + --pt-gen0-max-bytes-per-file 50MB +``` + +## File cache + +Configure data file caching for query performance. + +| Option | Description | Default | +|:-------|:------------|:--------| +| `--pt-file-cache-size` | Size of the data file cache (bytes or %). Set to `0` on dedicated ingest nodes. | Mirrors `--parquet-mem-cache-size` | +| `--pt-disable-data-file-cache` | Disable data file caching. Set to `true` on dedicated ingest nodes. | `false` (automatically `true` if `--disable-parquet-mem-cache` is set) | +| `--pt-file-cache-recency` | Only cache files newer than this age. Pre-caching on all-in-one and query nodes is based on this value. | Mirrors `--parquet-mem-cache-query-path-duration` | +| `--pt-file-cache-evict-after` | Evict cached files that have not been read within this duration. | `24h` | + +> [!Note] +> #### Dedicated ingest nodes +> On dedicated ingest nodes (`--mode ingest`), disable the data file cache to avoid +> wasting memory on data that ingest nodes never query. +> Set `--pt-file-cache-size 0` or use `--pt-disable-data-file-cache`. +> These options must be explicitly set—they are not applied automatically when +> `--mode ingest` is used. +> See [Disable caching on ingest nodes](#disable-caching-on-ingest-nodes) for an example. + +### File cache size + +Set the maximum size for the data file cache: + +```bash +influxdb3 serve \ + # ... + --use-pacha-tree \ + --pt-file-cache-size 8GB +``` + +### Cache recency filter + +Only cache files containing data within a recent time window: + +```bash +influxdb3 serve \ + # ... + --use-pacha-tree \ + --pt-file-cache-recency 24h +``` + +### Disable caching on ingest nodes + +For dedicated ingest nodes, disable the data file cache to save memory: + +```bash +influxdb3 serve \ + # ... + --use-pacha-tree \ + --mode ingest \ + --pt-disable-data-file-cache +``` + +## Replication (query nodes) + +Configure replication behavior for query nodes in distributed deployments. + +| Option | Description | Default | +|:-------|:------------|:--------| +| `--pt-wal-replication-interval` | Polling interval to check for new WAL files to replicate from ingest nodes. | `250ms` | +| `--pt-wal-replica-recovery-concurrency` | Number of concurrent WAL file fetches during replica recovery or catchup. | `8` | +| `--pt-wal-replica-steady-concurrency` | Number of concurrent WAL file fetches during steady-state replication. | `8` | +| `--pt-wal-replica-queue-size` | Size of the queue between WAL file fetching and replica buffer merging. | `100` | +| `--pt-wal-replica-recovery-tail-skip-limit` | Number of consecutive missing WAL files before stopping replica recovery. | `128` | +| `--pt-replica-gen0-load-concurrency` | Limit on the number of Gen0 files loaded concurrently when the replica starts. | `16` | +| `--pt-replica-max-buffer-size` | Maximum replica buffer size (bytes or %). Used by query nodes to store WAL files replicated from ingest nodes. | 50% of available memory, capped at 16GB | + +### Recovery concurrency + +Control parallelism during query node recovery or catchup: + +```bash +influxdb3 serve \ + # ... + --use-pacha-tree \ + --mode query \ + --pt-wal-replica-recovery-concurrency 16 +``` + +### Steady-state replication + +Configure ongoing replication performance: + +```bash +influxdb3 serve \ + # ... + --use-pacha-tree \ + --mode query \ + --pt-wal-replica-steady-concurrency 4 \ + --pt-wal-replica-queue-size 200 +``` + +### Replica buffer size + +Control the maximum buffer size for replicated data on query nodes: + +```bash +influxdb3 serve \ + # ... + --use-pacha-tree \ + --mode query \ + --pt-replica-max-buffer-size 8GB +``` + +## Compactor + +Configure background compaction behavior. +The compactor organizes data into fixed 24-hour UTC windows and progresses data +through four compaction levels (L1 through L4). + +| Option | Description | Default | +|:-------|:------------|:--------| +| `--pt-partition-count` | Target number of partitions per compaction window. | `1` | +| `--pt-compactor-input-size-budget` | Maximum total input bytes across all active compaction jobs. Acts as an admission control budget for the compactor scheduler. | 50% of system memory at startup | +| `--pt-final-compaction-age` | Age threshold for final compaction. When all L1-L3 run sets in a window are older than this, a final compaction merges everything into L4. | `72h` | +| `--pt-compactor-cleanup-cooldown` | Cooldown after checkpoint publish before replaced files can be cleaned up. | `10min` | + +### Compaction budget + +Control total memory allocated to active compaction jobs: + +```bash +influxdb3 serve \ + # ... + --use-pacha-tree \ + --pt-compactor-input-size-budget 8GB +``` + +### Final compaction age + +Control when windows receive their final compaction into L4: + +```bash +influxdb3 serve \ + # ... + --use-pacha-tree \ + --pt-final-compaction-age 48h +``` + +## L1-L4 level tuning + +These options control per-level compaction parameters. +Data enters L1 from snapshot batch compaction and promotes through levels +based on run set count triggers. + +| Level | Role | Default tail target | Default file size | Promotion trigger | +|:------|:-----|:--------------------|:------------------|:------------------| +| **L1** | Ingest landing zone | 600MB | 25MB | 3 run sets | +| **L2** | First promotion tier | 1.2GB | 40MB | 3 run sets | +| **L3** | Second promotion tier | 2.5GB | 75MB | 4 run sets | +| **L4** | Terminal (fully compacted) | 50GB | 125MB | N/A | + +### L1 options + +| Option | Description | Default | +|:-------|:------------|:--------| +| `--pt-l1-tail-target-bytes` | L1 tail run set target size. | `600MB` | +| `--pt-l1-target-file-bytes` | L1 target file size. | `25MB` | +| `--pt-l1-promotion-count` | Number of L1 run sets that triggers promotion to L2. | `3` | + +### L2 options + +| Option | Description | Default | +|:-------|:------------|:--------| +| `--pt-l2-tail-target-bytes` | L2 tail run set target size. | `1.2GB` | +| `--pt-l2-target-file-bytes` | L2 target file size. | `40MB` | +| `--pt-l2-promotion-count` | Number of L2 run sets that triggers promotion to L3. | `3` | + +### L3 options + +| Option | Description | Default | +|:-------|:------------|:--------| +| `--pt-l3-tail-target-bytes` | L3 tail run set target size. | `2.5GB` | +| `--pt-l3-target-file-bytes` | L3 target file size. | `75MB` | +| `--pt-l3-promotion-count` | Number of L3 run sets that triggers promotion to L4. | `4` | + +### L4 options + +| Option | Description | Default | +|:-------|:------------|:--------| +| `--pt-l4-tail-target-bytes` | L4 tail run set target size. | `50GB` | +| `--pt-l4-target-file-bytes` | L4 target file size. | `125MB` | + +## Example configurations + +### Development (minimal resources) + +```bash +influxdb3 serve \ + --node-id dev01 \ + --cluster-id dev-cluster \ + --object-store file \ + --data-dir ~/.influxdb3 \ + --use-pacha-tree \ + --num-io-threads 2 \ + --pt-file-cache-size 512MB \ + --pt-wal-max-buffer-size 5MB \ + --pt-snapshot-size 100MB +``` + +### Production all-in-one (8 cores, 32 GB RAM) + +```bash +influxdb3 serve \ + --node-id prod01 \ + --cluster-id prod-cluster \ + --object-store s3 \ + --bucket S3_BUCKET \ + --aws-access-key-id AWS_ACCESS_KEY_ID \ + --aws-secret-access-key AWS_SECRET_ACCESS_KEY \ + --use-pacha-tree \ + --num-io-threads 8 \ + --pt-file-cache-size 8GB \ + --pt-wal-max-buffer-size 30MB \ + --pt-snapshot-size 500MB \ + --pt-wal-flush-concurrency 4 +``` + +### High-throughput ingest node + +```bash +influxdb3 serve \ + --node-id ingest01 \ + --cluster-id prod-cluster \ + --object-store s3 \ + --bucket S3_BUCKET \ + --aws-access-key-id AWS_ACCESS_KEY_ID \ + --aws-secret-access-key AWS_SECRET_ACCESS_KEY \ + --use-pacha-tree \ + --mode ingest \ + --num-io-threads 16 \ + --pt-wal-max-buffer-size 50MB \ + --pt-wal-flush-interval 2s \ + --pt-wal-flush-concurrency 8 \ + --pt-snapshot-size 1GB \ + --pt-disable-data-file-cache +``` + +### Query-optimized node + +```bash +influxdb3 serve \ + --node-id query01 \ + --cluster-id prod-cluster \ + --object-store s3 \ + --bucket S3_BUCKET \ + --aws-access-key-id AWS_ACCESS_KEY_ID \ + --aws-secret-access-key AWS_SECRET_ACCESS_KEY \ + --use-pacha-tree \ + --mode query \ + --num-io-threads 16 \ + --pt-file-cache-size 16GB \ + --pt-file-cache-recency 24h \ + --pt-replica-max-buffer-size 8GB +``` + +### Dedicated compactor + +```bash +influxdb3 serve \ + --node-id compact01 \ + --cluster-id prod-cluster \ + --object-store s3 \ + --bucket S3_BUCKET \ + --aws-access-key-id AWS_ACCESS_KEY_ID \ + --aws-secret-access-key AWS_SECRET_ACCESS_KEY \ + --use-pacha-tree \ + --mode compact \ + --num-io-threads 8 \ + --pt-compactor-input-size-budget 12GB +``` diff --git a/content/influxdb3/enterprise/admin/pachatree/monitor.md b/content/influxdb3/enterprise/admin/pachatree/monitor.md new file mode 100644 index 0000000000..6814d9d0f1 --- /dev/null +++ b/content/influxdb3/enterprise/admin/pachatree/monitor.md @@ -0,0 +1,354 @@ +--- +title: Monitor the performance upgrade preview +seotitle: Monitor the performance upgrade preview in InfluxDB 3 Enterprise +description: > + Use system tables and query telemetry to monitor file status, query execution, + and overall performance when using InfluxDB 3 Enterprise performance upgrades. +menu: + influxdb3_enterprise: + name: Monitor + parent: Performance upgrade preview +weight: 203 +influxdb3/enterprise/tags: [storage, monitoring, beta, preview, system tables] +related: + - /influxdb3/enterprise/admin/pachatree/ + - /influxdb3/enterprise/admin/pachatree/configure/ + - /influxdb3/enterprise/admin/query-system-data/ +--- + +> [!Warning] +> #### Private preview beta +> The performance upgrade preview is available to {{% product-name %}} Trial +> and Commercial users as a private beta. These features are subject to breaking changes +> and **should not be used for production workloads**. + +{{% product-name %}} provides system tables and a query telemetry endpoint to +monitor file status, query execution, and overall performance when using the +performance upgrade preview. + +## System tables + +The upgraded storage engine exposes internal state through system tables that +you can query with SQL. + +### system.pt_ingest_wal + +View WAL files and their partitions: + +```sql +SELECT * FROM system.pt_ingest_wal; +``` + +Example output: + +| wal_file_id | partition_id | database_id | table_id | min_time | max_time | row_count | size_bytes | +|:------------|:-------------|:------------|:---------|:---------|:---------|:----------|:-----------| +| wal_001 | p_1 | db_1 | t_1 | 2024-01-01T00:00:00Z | 2024-01-01T00:10:00Z | 50000 | 2456789 | +| wal_002 | p_1 | db_1 | t_1 | 2024-01-01T00:10:00Z | 2024-01-01T00:20:00Z | 48000 | 2345678 | + +Use this table to monitor: + +- **WAL accumulation**: Track the number and size of unmerged WAL files +- **Partition distribution**: See how data is distributed across partitions +- **Time coverage**: Verify data time ranges + +#### Monitor WAL backlog + +Check for WAL accumulation that may indicate merging is falling behind: + +```sql +SELECT + COUNT(*) as wal_file_count, + SUM(size_bytes) / 1024 / 1024 as total_size_mb, + MIN(min_time) as oldest_data, + MAX(max_time) as newest_data +FROM system.pt_ingest_wal; +``` + +### system.pt_ingest_files + +View Gen0 files with metadata: + +```sql +SELECT * FROM system.pt_ingest_files; +``` + +Example output: + +| file_id | generation | database_id | table_id | min_time | max_time | row_count | size_bytes | +|:--------|:-----------|:------------|:---------|:---------|:---------|:----------|:-----------| +| gen0_001 | 0 | db_1 | t_1 | 2024-01-01T00:00:00Z | 2024-01-01T01:00:00Z | 500000 | 45678901 | +| gen0_002 | 0 | db_1 | t_1 | 2024-01-01T01:00:00Z | 2024-01-01T02:00:00Z | 480000 | 43567890 | + +Use this table to monitor: + +- **File counts per generation**: Track compaction progress +- **File sizes**: Verify files are within configured limits +- **Time ranges**: Identify Gen0 files that span multiple compaction windows + +#### Monitor file distribution + +Check file distribution and compaction status: + +```sql +SELECT + generation, + COUNT(*) as file_count, + SUM(row_count) as total_rows, + SUM(size_bytes) / 1024 / 1024 as total_size_mb, + AVG(size_bytes) / 1024 / 1024 as avg_file_size_mb +FROM system.pt_ingest_files +GROUP BY generation +ORDER BY generation; +``` + +## Parquet upgrade status + +If you [upgraded from Parquet](/influxdb3/enterprise/admin/pachatree/#upgrade-from-parquet), +use these system tables to monitor migration progress. + +### system.upgrade_parquet_node + +View per-node upgrade status: + +```sql +SELECT * FROM system.upgrade_parquet_node; +``` + +Monitor this table to confirm each node reaches `completed` status. +During the upgrade, nodes progress through detection, conversion, and +finalization stages. + +### system.upgrade_parquet + +View per-file migration progress: + +```sql +SELECT * FROM system.upgrade_parquet; +``` + +Use this table to track individual file conversions during the migration. +The status updates on a polling interval (default 5 seconds, configurable with +`--pt-upgrade-poll-interval`). + +## Query telemetry + +The query telemetry endpoint provides detailed execution statistics for +analyzing query performance. + +### Enable query telemetry + +Query the telemetry endpoint after executing a query: + +```bash +curl -X GET "http://localhost:8181/api/v3/query_sql_telemetry" \ + -H "Authorization: Bearer AUTH_TOKEN" +``` + +Replace `AUTH_TOKEN` with your authentication token. + +### Telemetry response + +The response includes: + +| Field | Description | +|:------|:------------| +| `query_id` | Unique identifier for the query | +| `execution_time_us` | Total execution time in microseconds | +| `chunks` | Per-chunk statistics | +| `cache_stats` | Cache hit rates by type | +| `file_stats` | File-level read statistics | + +### Example telemetry output + +```json +{ + "query_id": "q_12345", + "execution_time_us": 4523, + "chunks": [ + { + "chunk_id": "c_1", + "files_scanned": 3, + "blocks_processed": 12, + "rows_read": 24000, + "rows_returned": 150, + "bytes_read": 1234567 + } + ], + "cache_stats": { + "gen0_hits": 5, + "gen0_misses": 1, + "compacted_hits": 8, + "compacted_misses": 2 + } +} +``` + +## Performance analysis + +### Query performance metrics + +Track these key metrics for query performance: + +| Metric | Good | Warning | Action | +|:-------|:-----|:--------|:-------| +| Cache hit rate | >80% | <60% | Increase `--pt-file-cache-size` or `--pt-file-cache-recency` | +| Rows read vs returned ratio | <100:1 | >1000:1 | Add more selective predicates | + +### Ingest performance metrics + +Monitor these metrics for write performance: + +| Metric | Healthy | Warning | Action | +|:-------|:--------|:--------|:-------| +| WAL file count | <50 | >100 | Increase `--pt-wal-flush-concurrency` | +| Gen0 file count | <100 | >200 | Increase `--pt-compactor-input-size-budget` | + +### Monitor with SQL + +Create a performance summary query: + +```sql +-- File generation summary +SELECT + 'Gen0 files' as metric, + COUNT(*) as count, + SUM(size_bytes) / 1024 / 1024 as size_mb +FROM system.pt_ingest_files +WHERE generation = 0 + +UNION ALL + +SELECT + 'Compacted files' as metric, + COUNT(*) as count, + SUM(size_bytes) / 1024 / 1024 as size_mb +FROM system.pt_ingest_files +WHERE generation > 0 + +UNION ALL + +SELECT + 'WAL files' as metric, + COUNT(*) as count, + SUM(size_bytes) / 1024 / 1024 as size_mb +FROM system.pt_ingest_wal; +``` + +## Troubleshooting + +### High WAL file count + +**Symptom**: `system.pt_ingest_wal` shows many accumulated files. + +**Possible causes**: + +- Merge operations falling behind write rate +- Insufficient flush concurrency +- Object storage latency + +**Solutions**: + +1. Increase flush concurrency: + ```bash + --pt-wal-flush-concurrency 8 + ``` + +2. Increase WAL flush interval to create larger, fewer files: + ```bash + --pt-wal-flush-interval 5s + ``` + +3. Increase the WAL buffer size so each flush produces a larger file: + ```bash + --pt-wal-max-buffer-size 30MB + ``` + +4. Check object storage performance and connectivity. + +### High cache miss rate + +**Symptom**: `cache_stats` shows >40% miss rate. + +**Possible causes**: + +- Cache size too small for working set +- Cache recency window too narrow +- Random access patterns across time ranges + +**Solutions**: + +1. Increase cache size: + ```bash + --pt-file-cache-size 16GB + ``` + +2. Extend cache recency window: + ```bash + --pt-file-cache-recency 24h + ``` + +3. Extend eviction timeout: + ```bash + --pt-file-cache-evict-after 48h + ``` + +### Slow compaction + +**Symptom**: Gen0 file count continues to grow. + +**Possible causes**: + +- Compaction budget too low for write volume +- High write rate overwhelming compaction +- Snapshot size too large, creating oversized Gen0 files + +**Solutions**: + +1. Increase the compaction input size budget: + ```bash + --pt-compactor-input-size-budget 12GB + ``` + +2. Reduce snapshot size to create smaller, more frequent Gen0 files: + ```bash + --pt-snapshot-size 125MB + ``` + +3. For distributed deployments, add dedicated compactor nodes: + ```bash + influxdb3 serve \ + # ... + --use-pacha-tree \ + --mode compact + ``` + +### Query node lag + +**Symptom**: Query nodes return stale data. + +**Possible causes**: + +- Replication falling behind +- Network latency to object storage +- Insufficient replica concurrency + +**Solutions**: + +For a full list of replication options, see +[Replication (query nodes)](/influxdb3/enterprise/admin/pachatree/configure/#replication-query-nodes). + +1. Increase replication concurrency: + ```bash + --pt-wal-replica-steady-concurrency 8 + ``` + +2. Reduce the replication polling interval: + ```bash + --pt-wal-replication-interval 100ms + ``` + +3. Increase replica queue size: + ```bash + --pt-wal-replica-queue-size 200 + ``` diff --git a/content/influxdb3/enterprise/admin/tokens/resource/create.md b/content/influxdb3/enterprise/admin/tokens/resource/create.md index c2e3b54b64..e7de10b617 100644 --- a/content/influxdb3/enterprise/admin/tokens/resource/create.md +++ b/content/influxdb3/enterprise/admin/tokens/resource/create.md @@ -31,7 +31,7 @@ list_code_example: | "token_name": "Read-write for DATABASE1, DATABASE2", "permissions": [{ "resource_type": "db", - "resource_names": ["DATABASE1","DATABASE2"], + "resource_identifier": ["DATABASE1","DATABASE2"], "actions": ["read","write"] }], "expiry_secs": 300000 @@ -130,8 +130,8 @@ In the request body, provide the following parameters: - `token_name`: a description of the token, unique within the instance - `resource_type`: the resource type for the token, which is always `db` -- `resource_names`: an array of database names to grant permissions to - - The `resource_names` field supports the `*` wildcard, which grants read or write +- `resource_identifier`: an array of database names to grant permissions to + - The resource identifier field supports the `*` wildcard, which grants read or write permissions to all databases. - `permissions`: an array of token permission actions (`"read"`, `"write"`) for the database - `expiry_secs`: Specify the token expiration time in seconds. @@ -149,7 +149,7 @@ The following example shows how to use the HTTP API to create a database token: "token_name": "Read-write for DATABASE1, DATABASE2", "permissions": [{ "resource_type": "db", - "resource_names": ["DATABASE1","DATABASE2"], + "resource_identifier": ["DATABASE1","DATABASE2"], "actions": ["read","write"] }], "expiry_secs": 300000 @@ -211,7 +211,7 @@ curl \ "token_name": "Read/write token for DATABASE_NAME", "permissions": [{ "resource_type": "db", - "resource_names": ["DATABASE_NAME"], + "resource_identifier": ["DATABASE_NAME"], "actions": ["read","write"] }] }' @@ -246,7 +246,7 @@ curl \ "token_name": "Read/write token for all databases", "permissions": [{ "resource_type": "db", - "resource_names": ["*"], + "resource_identifier": ["*"], "actions": ["read","write"] }] }' @@ -281,7 +281,7 @@ curl \ "token_name": "Read-only token for DATABASE_NAME", "permissions": [{ "resource_type": "db", - "resource_names": ["DATABASE_NAME"], + "resource_identifier": ["DATABASE_NAME"], "actions": ["read"] }] }' @@ -316,7 +316,7 @@ curl \ "token_name": "Read-only token for DATABASE_NAME, DATABASE2_NAME", "permissions": [{ "resource_type": "db", - "resource_names": ["DATABASE_NAME","DATABASE2_NAME"], + "resource_identifier": ["DATABASE_NAME","DATABASE2_NAME"], "actions": ["read"] }] }' @@ -352,7 +352,7 @@ curl \ "token_name": "Read/write token for DATABASE_NAME with 7d expiration", "permissions": [{ "resource_type": "db", - "resource_names": ["DATABASE_NAME"], + "resource_identifier": ["DATABASE_NAME"], "actions": ["read","write"] }], "expiry_secs": 604800 @@ -438,8 +438,8 @@ In the request body, provide the following parameters: - `token_name`: a description of the token, unique within the instance - `resource_type`: the resource type for the token, which is `system` for system tokens -- `resource_names`: an array of system resource names to grant permissions to - - The `resource_names` field supports the `*` wildcard, which grants read or write +- `resource_identifier`: an array of system resource names to grant permissions to + - The resource identifier field supports the `*` wildcard, which grants read or write permissions to all system information resources. - `permissions`: an array of token permission actions (only `"read"` for system tokens) - `expiry_secs`: Specify the token expiration time in seconds. @@ -458,7 +458,7 @@ curl \ "token_name": "System health token", "permissions": [{ "resource_type": "system", - "resource_names": ["health"], + "resource_identifier": ["health"], "actions": ["read"] }], "expiry_secs": 300000 diff --git a/content/influxdb3/enterprise/reference/cli/influxdb3/serve.md b/content/influxdb3/enterprise/reference/cli/influxdb3/serve.md index 7781710ec9..d685894224 100644 --- a/content/influxdb3/enterprise/reference/cli/influxdb3/serve.md +++ b/content/influxdb3/enterprise/reference/cli/influxdb3/serve.md @@ -154,8 +154,6 @@ influxdb3 serve [OPTIONS] | | `--traces-jaeger-debug-name` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#traces-jaeger-debug-name)_ | | | `--traces-jaeger-max-msgs-per-second` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#traces-jaeger-max-msgs-per-second)_ | | | `--traces-jaeger-tags` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#traces-jaeger-tags)_ | -| | `--use-pacha-tree` | Enable the [performance upgrade preview](/influxdb3/enterprise/performance-preview/). Required for any `--pt-*` option. | -| | `--pt-*` | Performance upgrade preview tuning options. Requires `--use-pacha-tree`. _See [configuration reference](/influxdb3/enterprise/performance-preview/configure/)._ | | | `--virtual-env-location` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#virtual-env-location)_ | | | `--wait-for-running-ingestor` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#wait-for-running-ingestor)_ | | | `--wal-flush-interval` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#wal-flush-interval)_ | diff --git a/content/shared/influxdb3-admin/performance-tuning.md b/content/shared/influxdb3-admin/performance-tuning.md index 2824bf8075..45201eb65a 100644 --- a/content/shared/influxdb3-admin/performance-tuning.md +++ b/content/shared/influxdb3-admin/performance-tuning.md @@ -530,7 +530,7 @@ Control memory pressure from write buffers: # Configure WAL rotation --wal-flush-interval=10s \ ---wal-snapshot-size=100 +--wal-snapshot-size=100MB ``` ## Advanced tuning options diff --git a/content/shared/v3-core-enterprise-release-notes/_index.md b/content/shared/v3-core-enterprise-release-notes/_index.md index 88f72eaada..2530ed0cd1 100644 --- a/content/shared/v3-core-enterprise-release-notes/_index.md +++ b/content/shared/v3-core-enterprise-release-notes/_index.md @@ -6,76 +6,6 @@ > All updates to Core are automatically included in Enterprise. > The Enterprise sections below only list updates exclusive to Enterprise. -## v3.9.0 {date="2026-04-02"} - -### Core - -#### Features - -- **DataFusion upgrade**: Upgraded the embedded DataFusion query engine for more - efficient query execution. - -- **Python runtime upgrade**: Updated the bundled Python runtime for processing - engine plugins with the latest security and bug fixes. - -- **Product identity in HTTP responses**: Metrics, HTTP response headers, and - metadata now distinguish between Core and Enterprise builds. - -- **Database lifecycle hardening**: Background resources such as processing - engine triggers are now cleanly decommissioned when a database is removed. - -#### Bug fixes - -- Additional bug fixes and performance improvements. - -### Enterprise - -All Core updates are included in Enterprise. -Additional Enterprise-specific features and fixes: - -#### Features - -- **Performance upgrade preview (beta)**: Preview major storage layer upgrades - with the `--use-pacha-tree` flag. Includes a new columnar file format - (`.pt` files), automatic Parquet migration with hybrid query mode, - column families for efficient wide-table I/O, and bounded compaction. - See [Performance upgrade preview](/influxdb3/enterprise/performance-preview/). - - > [!Warning] - > The performance upgrade preview is a beta feature for staging and test - > environments only. Do not use for production workloads. - -- **Bulk data export**: Export compacted data as Parquet files for use with - external tools. Use the new `influxdb3 export` subcommands to list databases, - tables, and compacted time windows, then export selected data. - See [Export to Parquet](/influxdb3/enterprise/performance-preview/#export-to-parquet). - -- **Automatic distinct value caching**: Enable automatic DVC creation for - `SHOW TAG VALUES` queries and the `tag_values()` SQL function with - `--pt-enable-auto-dvc`. Max cardinality and refresh intervals are configurable. - -- **Downgrade from performance preview**: Use - `influxdb3 downgrade-to-parquet` to revert from the performance preview back - to standard Parquet storage. Only data that existed before the upgrade - (original Parquet files) is preserved. - See [Downgrade to Parquet](/influxdb3/enterprise/performance-preview/#downgrade-to-parquet). - -- **Non-interactive delete confirmation**: Use the `--yes` (`-y`) flag with - delete commands to skip interactive confirmation prompts in automated and - headless environments. - -- **1MB default string field limit**: The maximum string field size defaults to - 1MB (previously 64KB) to support v1 migration workloads. Writes exceeding 1MB - are rejected with a validation error. - -#### Bug fixes - -- **Compaction stability**: Multiple fixes to compaction scheduling, priority - handling, and resource management for improved stability in multi-node - clusters. - -- Additional bug fixes and performance improvements. - ## v3.8.4 {date="2026-03-10"} ### Core diff --git a/content/shared/v3-line-protocol.md b/content/shared/v3-line-protocol.md index 808b0fb026..24bafdb978 100644 --- a/content/shared/v3-line-protocol.md +++ b/content/shared/v3-line-protocol.md @@ -150,11 +150,7 @@ myTable fieldKey=12485903u ### String Plain text string. - -| Element | Maximum length | -|:--------|:---------------| -| Table name, tag key, tag value, field key | 64KB | -| Field value | 1MB | +Length limit 64KB. ##### String example diff --git a/data/notifications.yaml b/data/notifications.yaml index c717705a87..80b1c82f3d 100644 --- a/data/notifications.yaml +++ b/data/notifications.yaml @@ -40,19 +40,19 @@ # - [The plan for InfluxDB 3.0 Open Source](https://influxdata.com/blog/the-plan-for-influxdb-3-0-open-source) # - [InfluxDB 3.0 benchmarks](https://influxdata.com/blog/influxdb-3-0-is-2.5x-45x-faster-compared-to-influxdb-open-source/) -- id: influxdb3-9-performance-preview +- id: influxdb3.9-performance-preview level: note scope: - /influxdb3/enterprise/ exclude: - - /influxdb3/enterprise/performance-preview/ + - /influxdb3/enterprise/admin/pachatree/ title: 'InfluxDB 3.9: Performance upgrade preview' slug: | - InfluxDB 3 Enterprise 3.9 includes a beta of major performance upgrades + InfluxDB 3 Enterprise 3.9 includes a private preview of major performance upgrades with faster single-series queries, wide-and-sparse table support, and more. message: | - InfluxDB 3 Enterprise 3.9 includes a beta of major performance and + InfluxDB 3 Enterprise 3.9 includes a private preview of major performance and feature updates. **Key improvements:** @@ -66,7 +66,7 @@ For more information, see: - - [Performance upgrade preview](/influxdb3/enterprise/performance-preview/) + - [Performance upgrade preview](/influxdb3/enterprise/admin/pachatree/) - [InfluxDB 3 Enterprise release notes](/influxdb3/enterprise/release-notes/) - [InfluxData Blog post](https://www.influxdata.com/blog/influxdb-3-9/) diff --git a/data/products.yml b/data/products.yml index 898068d7ba..a0f0c1c229 100644 --- a/data/products.yml +++ b/data/products.yml @@ -8,7 +8,7 @@ influxdb3_core: versions: [core] list_order: 2 latest: core - latest_patch: 3.9.0 + latest_patch: 3.8.3 placeholder_host: localhost:8181 limits: database: 5 @@ -46,7 +46,7 @@ influxdb3_enterprise: versions: [enterprise] list_order: 2 latest: enterprise - latest_patch: 3.9.0 + latest_patch: 3.8.4 placeholder_host: localhost:8181 limits: database: 100 @@ -349,7 +349,7 @@ enterprise_influxdb: versions: [v1] latest: v1.12 latest_patches: - v1: 1.12.3 + v1: 1.12.2 detector_config: query_languages: InfluxQL: diff --git a/layouts/partials/article/feedback.html b/layouts/partials/article/feedback.html index 14a930af50..c402dab9e9 100644 --- a/layouts/partials/article/feedback.html +++ b/layouts/partials/article/feedback.html @@ -2,7 +2,7 @@ {{ $product := index $productPathData 0 }} {{ $version := index $productPathData 1 }} {{ $productKey := cond (eq $product "influxdb3") (print "influxdb3_" (replaceRE "-" "_" $version)) $product }} -{{ $productNamespace := cond (or (eq $product "influxdb3") (eq $product "enterprise_influxdb")) "influxdb" $product }} +{{ $productNamespace := cond (eq $product "influxdb3") "influxdb" $product }} {{ $productName := (index .Site.Data.products $productKey).name }} {{ $supportBlacklist := slice "chronograf" "kapacitor" }} diff --git a/lefthook.yml b/lefthook.yml index 3333340bcc..eebc5be9d8 100644 --- a/lefthook.yml +++ b/lefthook.yml @@ -175,86 +175,56 @@ pre-push: exit $? # Manage Docker containers - # Disabled: original task fails when docker is not installed or running. - # Replaced by list-legacy-containers below, which exits 0 when docker - # is unavailable so the push is not blocked. - # prune-legacy-containers: - # priority: 1 - # tags: test - # run: '(docker container ls --format "{{.ID}}" - # --filter label=tag=influxdata-docs - # --filter status=exited | xargs docker rm) - # || true' - list-legacy-containers: + prune-legacy-containers: + priority: 1 + tags: test + run: '(docker container ls --format "{{.ID}}" + --filter label=tag=influxdata-docs + --filter status=exited | xargs docker rm) + || true' + build-pytest-image: tags: test - run: | - if ! command -v docker >/dev/null 2>&1; then - exit 0 - fi - if ! docker info >/dev/null 2>&1; then - exit 0 - fi - legacy=$(docker container ls -a \ - --format "{{.ID}} {{.Names}}" \ - --filter label=tag=influxdata-docs \ - --filter status=exited 2>/dev/null) - if [ -n "$legacy" ]; then - echo "Note: exited influxdata-docs containers found (push not blocked):" - echo "$legacy" | sed 's/^/ /' - echo "Prune with: docker container prune --filter label=tag=influxdata-docs" - fi - exit 0 - # Disabled: builds the pytest image used by the *-pytest tasks below. - # Now that those tasks are disabled, this task is dead weight and fails - # when docker is not installed. Run `yarn build:pytest:image` manually - # before invoking `yarn test:codeblocks:*` if you need a fresh image. - # build-pytest-image: - # tags: test - # run: yarn build:pytest:image + run: yarn build:pytest:image # Test code blocks in markdown files - # Disabled: codeblock tests require docker and a configured product - # environment. Run them on demand instead: - # yarn test:codeblocks:all - # yarn test:codeblocks: - # cloud-pytest: - # glob: content/influxdb/cloud/*.md - # tags: test,codeblocks,v2 - # env: - # SERVICE: cloud-pytest - # run: yarn test:codeblocks:cloud '{staged_files}' - # - # cloud-dedicated-pytest: - # tags: test,codeblocks,v3 - # glob: content/influxdb3/cloud-dedicated/*.md - # run: | - # yarn test:codeblocks:cloud-dedicated '{staged_files}' && - # ./test/scripts/monitor-tests.sh stop cloud-dedicated-pytest - # - # cloud-serverless-pytest: - # tags: test,codeblocks,v3 - # glob: content/influxdb3/cloud-serverless/*.md - # env: - # SERVICE: cloud-serverless-pytest - # run: yarn test:codeblocks:cloud-serverless '{staged_files}' - # - # clustered-pytest: - # tags: test,codeblocks,v3 - # glob: content/influxdb3/clustered/*.md - # run: | - # yarn test:codeblocks:clustered '{staged_files}' && - # ./test/scripts/monitor-tests.sh stop clustered-pytest - # - # telegraf-pytest: - # tags: test,codeblocks - # glob: content/telegraf/*.md - # env: - # SERVICE: telegraf-pytest - # run: yarn test:codeblocks:telegraf '{staged_files}' - # - # v2-pytest: - # tags: test,codeblocks,v2 - # glob: content/influxdb/v2/*.md - # env: - # SERVICE: v2-pytest - # run: yarn test:codeblocks:v2 '{staged_files}' \ No newline at end of file + cloud-pytest: + glob: content/influxdb/cloud/*.md + tags: test,codeblocks,v2 + env: + SERVICE: cloud-pytest + run: yarn test:codeblocks:cloud '{staged_files}' + + cloud-dedicated-pytest: + tags: test,codeblocks,v3 + glob: content/influxdb3/cloud-dedicated/*.md + run: | + yarn test:codeblocks:cloud-dedicated '{staged_files}' && + ./test/scripts/monitor-tests.sh stop cloud-dedicated-pytest + + cloud-serverless-pytest: + tags: test,codeblocks,v3 + glob: content/influxdb3/cloud-serverless/*.md + env: + SERVICE: cloud-serverless-pytest + run: yarn test:codeblocks:cloud-serverless '{staged_files}' + + clustered-pytest: + tags: test,codeblocks,v3 + glob: content/influxdb3/clustered/*.md + run: | + yarn test:codeblocks:clustered '{staged_files}' && + ./test/scripts/monitor-tests.sh stop clustered-pytest + + telegraf-pytest: + tags: test,codeblocks + glob: content/telegraf/*.md + env: + SERVICE: telegraf-pytest + run: yarn test:codeblocks:telegraf '{staged_files}' + + v2-pytest: + tags: test,codeblocks,v2 + glob: content/influxdb/v2/*.md + env: + SERVICE: v2-pytest + run: yarn test:codeblocks:v2 '{staged_files}' \ No newline at end of file diff --git a/static/openapi/influxdb3-enterprise-openapi.yaml b/static/openapi/influxdb3-enterprise-openapi.yaml index b8fd78da1b..3d6c70af5f 100644 --- a/static/openapi/influxdb3-enterprise-openapi.yaml +++ b/static/openapi/influxdb3-enterprise-openapi.yaml @@ -17,8 +17,8 @@ info: - `/`: Compatibility endpoints for InfluxDB v1 workloads and clients - `/api/v2/write`: Compatibility endpoint for InfluxDB v2 workloads and clients - [Download the OpenAPI specification](/openapi/influxdb3-enterprise-openapi.yaml) - version: v3.9.0 + To download the OpenAPI specification for this API, use the **Download** button above. + version: v3.8.0 license: name: MIT url: https://opensource.org/licenses/MIT @@ -140,31 +140,6 @@ tags: x-related: - title: Use compatibility APIs to write data href: /influxdb3/enterprise/write-data/http-api/compatibility-apis/ - - name: Export data (beta) - description: | - Export compacted data as Parquet files for use with external tools. - - > **Beta**: Export endpoints require the [performance upgrade preview](/influxdb3/enterprise/performance-preview/) - > (`--use-pacha-tree` flag). The performance upgrade preview is a beta feature - > and **should not be used for production workloads**. - - Data must be compacted before it can be exported. - Uncompacted data is not available for export at this time. - - #### Export workflow - - 1. [List databases](#operation/GetExportDatabases) available for export. - 2. [List tables](#operation/GetExportTables) in a database. - 3. [List compacted windows](#operation/GetExportWindows) (24-hour UTC windows) for a table. - 4. [Download window data](#operation/GetExportWindowData) as Parquet files. - - You can also use the [`influxdb3 export`](/influxdb3/enterprise/performance-preview/#export-to-parquet) CLI - commands, which call these endpoints. - x-related: - - title: Performance upgrade preview - href: /influxdb3/enterprise/performance-preview/ - - title: Export to Parquet - href: /influxdb3/enterprise/performance-preview/#export-to-parquet - name: Database description: Manage databases - description: > @@ -427,124 +402,6 @@ paths: tags: - Compatibility endpoints - Write data - /api/v3/export/databases: - get: - operationId: GetExportDatabases - summary: "List databases available for export (beta)" - description: | - Returns a list of databases that have compacted data available for Parquet export. - - > **Beta**: This endpoint requires the [performance upgrade preview](/influxdb3/enterprise/performance-preview/) - > (`--use-pacha-tree` flag). The performance upgrade preview is a beta feature - > and **should not be used for production workloads**. - responses: - "200": - description: Success. Returns a list of database names. - content: - application/json: - schema: - type: object - "401": - $ref: "#/components/responses/Unauthorized" - tags: - - Export data (beta) - /api/v3/export/tables: - get: - operationId: GetExportTables - summary: "List tables available for export (beta)" - description: | - Returns a list of tables in a database that have compacted data available for Parquet export. - - > **Beta**: This endpoint requires the [performance upgrade preview](/influxdb3/enterprise/performance-preview/) - > (`--use-pacha-tree` flag). The performance upgrade preview is a beta feature - > and **should not be used for production workloads**. - parameters: - - $ref: "#/components/parameters/db" - responses: - "200": - description: Success. Returns a list of table names. - content: - application/json: - schema: - type: object - "401": - $ref: "#/components/responses/Unauthorized" - "404": - description: Database not found. - tags: - - Export data (beta) - /api/v3/export/windows: - get: - operationId: GetExportWindows - summary: "List compacted windows for a table (beta)" - description: | - Returns a list of compacted 24-hour UTC windows for a table. - Each window represents a time range of compacted data that can be exported as Parquet. - - > **Beta**: This endpoint requires the [performance upgrade preview](/influxdb3/enterprise/performance-preview/) - > (`--use-pacha-tree` flag). The performance upgrade preview is a beta feature - > and **should not be used for production workloads**. - parameters: - - $ref: "#/components/parameters/db" - - name: table - in: query - required: true - schema: - type: string - description: The table name. - responses: - "200": - description: Success. Returns a list of compacted 24-hour windows. - content: - application/json: - schema: - type: object - "401": - $ref: "#/components/responses/Unauthorized" - "404": - description: Database or table not found. - tags: - - Export data (beta) - /api/v3/export/window_data: - get: - operationId: GetExportWindowData - summary: "Export window data as Parquet (beta)" - description: | - Downloads compacted data for the specified windows as a tar archive containing Parquet files. - - > **Beta**: This endpoint requires the [performance upgrade preview](/influxdb3/enterprise/performance-preview/) - > (`--use-pacha-tree` flag). The performance upgrade preview is a beta feature - > and **should not be used for production workloads**. - parameters: - - $ref: "#/components/parameters/db" - - name: table - in: query - required: true - schema: - type: string - description: The table name. - - name: windows - in: query - required: false - schema: - type: string - description: | - Comma-separated list of window dates to export (for example, `2026-01-15,2026-01-16`). - If omitted, exports all available windows. - responses: - "200": - description: Success. Returns a tar archive containing Parquet files. - content: - application/x-tar: - schema: - type: string - format: binary - "401": - $ref: "#/components/responses/Unauthorized" - "404": - description: Database, table, or window not found. - tags: - - Export data (beta) /api/v3/configure/database: delete: operationId: DeleteConfigureDatabase @@ -1810,12 +1667,7 @@ paths: "422": description: Unprocessable entity. summary: Execute InfluxQL query - description: | - Executes an InfluxQL query to retrieve data from the specified database. - - With the [performance upgrade preview](/influxdb3/enterprise/performance-preview/) enabled - (`--use-pacha-tree` flag), queries automatically merge results from Parquet and - `.pt` files (hybrid query mode). + description: Executes an InfluxQL query to retrieve data from the specified database. parameters: - $ref: "#/components/parameters/dbQueryParam" - name: q @@ -1869,12 +1721,7 @@ paths: "422": description: Unprocessable entity. summary: Execute InfluxQL query - description: | - Executes an InfluxQL query to retrieve data from the specified database. - - With the [performance upgrade preview](/influxdb3/enterprise/performance-preview/) enabled - (`--use-pacha-tree` flag), queries automatically merge results from Parquet and - `.pt` files (hybrid query mode). + description: Executes an InfluxQL query to retrieve data from the specified database. parameters: - $ref: "#/components/parameters/AcceptQueryHeader" - $ref: "#/components/parameters/ContentType" @@ -1924,14 +1771,7 @@ paths: "422": description: Unprocessable entity. summary: Execute SQL query - description: | - Executes an SQL query to retrieve data from the specified database. - - With the [performance upgrade preview](/influxdb3/enterprise/performance-preview/) enabled - (`--use-pacha-tree` flag), queries automatically merge results from Parquet and - `.pt` files (hybrid query mode). Use the - [`/api/v3/query_sql_telemetry`](#operation/GetQuerySQLTelemetry) endpoint after executing - a query to retrieve detailed execution statistics. + description: Executes an SQL query to retrieve data from the specified database. parameters: - $ref: "#/components/parameters/db" - $ref: "#/components/parameters/querySqlParam" @@ -1978,14 +1818,7 @@ paths: "422": description: Unprocessable entity. summary: Execute SQL query - description: | - Executes an SQL query to retrieve data from the specified database. - - With the [performance upgrade preview](/influxdb3/enterprise/performance-preview/) enabled - (`--use-pacha-tree` flag), queries automatically merge results from Parquet and - `.pt` files (hybrid query mode). Use the - [`/api/v3/query_sql_telemetry`](#operation/GetQuerySQLTelemetry) endpoint after executing - a query to retrieve detailed execution statistics. + description: Executes an SQL query to retrieve data from the specified database. parameters: - $ref: "#/components/parameters/AcceptQueryHeader" - $ref: "#/components/parameters/ContentType" @@ -1993,111 +1826,6 @@ paths: $ref: "#/components/requestBodies/queryRequestBody" tags: - Query data - /api/v3/query_sql_telemetry: - get: - operationId: GetQuerySQLTelemetry - summary: "Get query telemetry (beta)" - description: | - Returns detailed execution statistics for the most recent SQL query, including per-chunk I/O, - cache hit rates, and timing breakdowns. - - Use this endpoint after executing a query to analyze performance. - - > **Beta**: This endpoint requires the [performance upgrade preview](/influxdb3/enterprise/performance-preview/) - > (`--use-pacha-tree` flag). The performance upgrade preview is a beta feature - > and **should not be used for production workloads**. - - For more information, see - [Query telemetry](/influxdb3/enterprise/performance-preview/monitor/#query-telemetry). - responses: - "200": - description: Success. Returns query telemetry data. - content: - application/json: - schema: - type: object - properties: - query_id: - type: string - description: Unique identifier for the query. - execution_time_us: - type: integer - description: Total execution time in microseconds. - chunks: - type: array - description: Per-chunk statistics. - items: - type: object - properties: - chunk_id: - type: string - files_scanned: - type: integer - blocks_processed: - type: integer - rows_read: - type: integer - rows_returned: - type: integer - bytes_read: - type: integer - cache_stats: - type: object - description: Cache hit rates by type. - properties: - gen0_hits: - type: integer - gen0_misses: - type: integer - compacted_hits: - type: integer - compacted_misses: - type: integer - example: - query_id: "q_12345" - execution_time_us: 4523 - chunks: - - chunk_id: "c_1" - files_scanned: 3 - blocks_processed: 12 - rows_read: 24000 - rows_returned: 150 - bytes_read: 1234567 - cache_stats: - gen0_hits: 5 - gen0_misses: 1 - compacted_hits: 8 - compacted_misses: 2 - "401": - $ref: "#/components/responses/Unauthorized" - tags: - - Query data - post: - operationId: PostQuerySQLTelemetry - summary: "Get query telemetry (beta)" - description: | - Returns detailed execution statistics for the most recent SQL query, including per-chunk I/O, - cache hit rates, and timing breakdowns. - - Use this endpoint after executing a query to analyze performance. - - > **Beta**: This endpoint requires the [performance upgrade preview](/influxdb3/enterprise/performance-preview/) - > (`--use-pacha-tree` flag). The performance upgrade preview is a beta feature - > and **should not be used for production workloads**. - - For more information, see - [Query telemetry](/influxdb3/enterprise/performance-preview/monitor/#query-telemetry). - responses: - "200": - description: Success. Returns query telemetry data. - content: - application/json: - schema: - type: object - "401": - $ref: "#/components/responses/Unauthorized" - tags: - - Query data /api/v3/write_lp: post: operationId: PostWriteLP @@ -2147,52 +1875,53 @@ paths: "422": description: Unprocessable entity. summary: Write line protocol - description: | + description: > Writes line protocol to the specified database. + This is the native InfluxDB 3 Enterprise write endpoint that provides enhanced control + over write behavior with advanced parameters for high-performance and fault-tolerant operations. + Use this endpoint to send data in [line protocol](/influxdb3/enterprise/reference/syntax/line-protocol/) format - to InfluxDB. Use query parameters to specify options for writing data. + to InfluxDB. - #### Features + Use query parameters to specify options for writing data. - - **Partial writes**: Use `accept_partial=true` to allow partial success when some lines in a batch fail - - **Asynchronous writes**: Use `no_sync=true` to skip waiting for WAL synchronization, allowing faster response - times but sacrificing durability guarantees - - **Flexible precision**: Automatic timestamp precision detection with `precision=auto` (default) - #### Column families (performance upgrade preview) + #### Features - With the [performance upgrade preview](/influxdb3/enterprise/performance-preview/) enabled - (`--use-pacha-tree` flag), you can assign fields to column families using the `::` delimiter - in field names. The portion before `::` is the family name; everything after is the field name. - ```txt - metrics,host=sA cpu::usage_user=55.2,cpu::usage_sys=12.1,mem::free=2048i 1000000000 - ``` + - **Partial writes**: Use `accept_partial=true` to allow partial success when some lines in a batch fail + + - **Asynchronous writes**: Use `no_sync=true` to skip waiting for WAL synchronization, allowing faster response + times but sacrificing durability guarantees - Fields in the same family are stored together on disk. For wide tables, this reduces I/O - by letting queries read only the families they need. Fields written without `::` are assigned - to auto-generated families. + - **Flexible precision**: Automatic timestamp precision detection with `precision=auto` (default) - For more information, see [Column families](/influxdb3/enterprise/performance-preview/#column-families). #### Auto precision detection + When you use `precision=auto` or omit the precision parameter, InfluxDB 3 automatically detects + the timestamp precision based on the magnitude of the timestamp value: + - Timestamps < 5e9 → Second precision (multiplied by 1,000,000,000 to convert to nanoseconds) + - Timestamps < 5e12 → Millisecond precision (multiplied by 1,000,000) + - Timestamps < 5e15 → Microsecond precision (multiplied by 1,000) + - Larger timestamps → Nanosecond precision (no conversion needed) + #### Related + - [Use the InfluxDB v3 write_lp API to write data](/influxdb3/enterprise/write-data/http-api/v3-write-lp/) - - [Performance upgrade preview](/influxdb3/enterprise/performance-preview/) requestBody: $ref: "#/components/requestBodies/lineProtocolRequestBody" tags: @@ -4062,7 +3791,6 @@ x-tagGroups: - Response codes - Compatibility endpoints - Database - - Export data (beta) - Processing engine - Server information - Table