diff --git a/BUILD.bazel b/BUILD.bazel index feb873d3ae8f..1cb0459f719c 100644 --- a/BUILD.bazel +++ b/BUILD.bazel @@ -205,6 +205,7 @@ nogo( "//pkg/testutils/lint/passes/grpcstatuswithdetails", "//pkg/testutils/lint/passes/hash", "//pkg/testutils/lint/passes/leaktestcall", + "//pkg/testutils/lint/passes/metricmetadatainit", "//pkg/testutils/lint/passes/nilness", "//pkg/testutils/lint/passes/nocopy", "//pkg/testutils/lint/passes/redactcheck", diff --git a/build/bazelutil/nogo_config.json b/build/bazelutil/nogo_config.json index e2b94463fa26..9f5eb3332138 100644 --- a/build/bazelutil/nogo_config.json +++ b/build/bazelutil/nogo_config.json @@ -1654,5 +1654,16 @@ "external/": "exclude all third-party code for all analyzers", "pkg/sql/colexec/colexechash/hash\\.go$": "re-implements runtime.noescape for efficient hashing" } + }, + "metricmetadatainit": { + "exclude_files": { + "external/": "exclude all third-party code", + ".*\\.pb\\.go$": "generated code", + ".*\\.pb\\.gw\\.go$": "generated code", + "pkg/.*\\.eg\\.go$": "generated code", + "pkg/.*_generated\\.go$": "generated code", + "pkg/util/metric/.*\\.go$": "metric package internal usage is allowed", + "pkg/testutils/": "test utilities may construct Metadata for testing" + } } } diff --git a/build/github/check-generated-code.sh b/build/github/check-generated-code.sh index daef83e7dd97..4e77616ae77c 100755 --- a/build/github/check-generated-code.sh +++ b/build/github/check-generated-code.sh @@ -61,10 +61,6 @@ if grep TODO DEPS.bzl; then fi check_workspace_clean "Run \`./dev generate bazel\` to fix this error." -# Run gen-metric-owners to ensure metric_owners.yaml is up to date -# before //pkg/gen regenerates metrics.yaml from it. -bazel run //pkg/cmd/gen-metric-owners $ENGFLOW_ARGS -- -out="$(pwd)/pkg/internal/metricscan/metric_owners.yaml" - # Run `bazel run //pkg/gen` and ensure nothing changes. This ensures # generated documentation and checked-in go code are up to date. bazel run //pkg/gen $ENGFLOW_ARGS diff --git a/docs/generated/metrics/BUILD.bazel b/docs/generated/metrics/BUILD.bazel index ce797d2302d7..e9a169edf47a 100644 --- a/docs/generated/metrics/BUILD.bazel +++ b/docs/generated/metrics/BUILD.bazel @@ -1,10 +1,7 @@ genrule( name = "metrics", - srcs = [ - "//pkg/internal/metricscan:metric_owners.yaml", - ], outs = ["metrics.yaml"], - cmd = "$(location //pkg/cmd/cockroach-short) gen metric-list --logtostderr=NONE --metric-owners=$(location //pkg/internal/metricscan:metric_owners.yaml) > $@", + cmd = "$(location //pkg/cmd/cockroach-short) gen metric-list --logtostderr=NONE > $@", tools = ["//pkg/cmd/cockroach-short"], visibility = [ ":__pkg__", diff --git a/docs/generated/metrics/metrics.yaml b/docs/generated/metrics/metrics.yaml index 93fe9db6ee8e..488b2f5f93a5 100644 --- a/docs/generated/metrics/metrics.yaml +++ b/docs/generated/metrics/metrics.yaml @@ -11,7 +11,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/cdc - name: changefeed.aggregator_progress exported_name: changefeed_aggregator_progress description: The earliest timestamp up to which any aggregator is guaranteed to have emitted all values for @@ -20,7 +19,6 @@ layers: unit: TIMESTAMP_NS aggregation: AVG derivative: NONE - owner: cockroachdb/cdc - name: changefeed.backfill_count exported_name: changefeed_backfill_count description: Number of changefeeds currently executing backfill @@ -30,7 +28,6 @@ layers: aggregation: AVG derivative: NONE visibility: SUPPORT - owner: cockroachdb/cdc - name: changefeed.backfill_pending_ranges exported_name: changefeed_backfill_pending_ranges description: Number of ranges in an ongoing backfill that are yet to be fully emitted @@ -39,7 +36,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/cdc - name: changefeed.batch_reduction_count exported_name: changefeed_batch_reduction_count description: Number of times a changefeed aggregator node attempted to reduce the size of message batches it emitted to the sink @@ -48,7 +44,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/cdc - name: changefeed.buffer_entries.allocated_mem exported_name: changefeed_buffer_entries_allocated_mem description: Current quota pool memory allocation @@ -57,7 +52,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/cdc - name: changefeed.buffer_entries.flush.aggregator exported_name: changefeed_buffer_entries_flush_aggregator labeled_name: 'changefeed.buffer_entries.flush{buffer_type: aggregator}' @@ -67,7 +61,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/cdc - name: changefeed.buffer_entries.flush.rangefeed exported_name: changefeed_buffer_entries_flush_rangefeed labeled_name: 'changefeed.buffer_entries.flush{buffer_type: rangefeed}' @@ -77,7 +70,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/cdc - name: changefeed.buffer_entries.in.aggregator exported_name: changefeed_buffer_entries_in_aggregator labeled_name: 'changefeed.buffer_entries.in{buffer_type: aggregator}' @@ -87,7 +79,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/cdc - name: changefeed.buffer_entries.in.rangefeed exported_name: changefeed_buffer_entries_in_rangefeed labeled_name: 'changefeed.buffer_entries.in{buffer_type: rangefeed}' @@ -97,7 +88,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/cdc - name: changefeed.buffer_entries.kv.aggregator exported_name: changefeed_buffer_entries_kv_aggregator labeled_name: 'changefeed.buffer_entries.kv{buffer_type: aggregator}' @@ -107,7 +97,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/cdc - name: changefeed.buffer_entries.kv.rangefeed exported_name: changefeed_buffer_entries_kv_rangefeed labeled_name: 'changefeed.buffer_entries.kv{buffer_type: rangefeed}' @@ -117,7 +106,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/cdc - name: changefeed.buffer_entries.out.aggregator exported_name: changefeed_buffer_entries_out_aggregator labeled_name: 'changefeed.buffer_entries.out{buffer_type: aggregator}' @@ -127,7 +115,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/cdc - name: changefeed.buffer_entries.out.rangefeed exported_name: changefeed_buffer_entries_out_rangefeed labeled_name: 'changefeed.buffer_entries.out{buffer_type: rangefeed}' @@ -137,7 +124,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/cdc - name: changefeed.buffer_entries.released.aggregator exported_name: changefeed_buffer_entries_released_aggregator labeled_name: 'changefeed.buffer_entries.released{buffer_type: aggregator}' @@ -147,7 +133,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/cdc - name: changefeed.buffer_entries.released.rangefeed exported_name: changefeed_buffer_entries_released_rangefeed labeled_name: 'changefeed.buffer_entries.released{buffer_type: rangefeed}' @@ -157,7 +142,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/cdc - name: changefeed.buffer_entries.resolved.aggregator exported_name: changefeed_buffer_entries_resolved_aggregator labeled_name: 'changefeed.buffer_entries.resolved{buffer_type: aggregator}' @@ -167,7 +151,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/cdc - name: changefeed.buffer_entries.resolved.rangefeed exported_name: changefeed_buffer_entries_resolved_rangefeed labeled_name: 'changefeed.buffer_entries.resolved{buffer_type: rangefeed}' @@ -177,7 +160,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/cdc - name: changefeed.buffer_entries_mem.acquired exported_name: changefeed_buffer_entries_mem_acquired description: Total amount of memory acquired for entries as they enter the system @@ -186,7 +168,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/cdc - name: changefeed.buffer_entries_mem.released exported_name: changefeed_buffer_entries_mem_released description: Total amount of memory released by the entries after they have been emitted @@ -195,7 +176,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/cdc - name: changefeed.buffer_pushback_nanos.aggregator exported_name: changefeed_buffer_pushback_nanos_aggregator labeled_name: 'changefeed.buffer_pushback_nanos{buffer_type: aggregator}' @@ -205,7 +185,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/cdc - name: changefeed.buffer_pushback_nanos.rangefeed exported_name: changefeed_buffer_pushback_nanos_rangefeed labeled_name: 'changefeed.buffer_pushback_nanos{buffer_type: rangefeed}' @@ -215,7 +194,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/cdc - name: changefeed.bytes.messages_pushback_nanos exported_name: changefeed_bytes_messages_pushback_nanos description: Total time spent throttled for bytes quota @@ -224,7 +202,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/cdc - name: changefeed.checkpoint.create_nanos exported_name: changefeed_checkpoint_create_nanos description: Time it takes to create a changefeed checkpoint @@ -233,7 +210,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/cdc - name: changefeed.checkpoint.span_count exported_name: changefeed_checkpoint_span_count description: Number of spans in a changefeed checkpoint @@ -242,7 +218,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/cdc - name: changefeed.checkpoint.timestamp_count exported_name: changefeed_checkpoint_timestamp_count description: Number of unique timestamps in a changefeed checkpoint @@ -251,7 +226,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/cdc - name: changefeed.checkpoint.total_bytes exported_name: changefeed_checkpoint_total_bytes description: Total size of a changefeed checkpoint @@ -260,7 +234,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/cdc - name: changefeed.checkpoint_hist_nanos exported_name: changefeed_checkpoint_hist_nanos description: Time spent checkpointing changefeed progress @@ -269,7 +242,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/cdc - name: changefeed.checkpoint_progress exported_name: changefeed_checkpoint_progress description: The earliest timestamp of any changefeed's persisted checkpoint (values prior to this timestamp will never need to be re-emitted) @@ -278,7 +250,6 @@ layers: unit: TIMESTAMP_NS aggregation: AVG derivative: NONE - owner: cockroachdb/cdc - name: changefeed.cloudstorage_buffered_bytes exported_name: changefeed_cloudstorage_buffered_bytes description: The number of bytes buffered in cloudstorage sink files which have not been emitted yet @@ -287,7 +258,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/cdc - name: changefeed.commit_latency exported_name: changefeed_commit_latency description: 'Event commit latency: a difference between event MVCC timestamp and the time it was acknowledged by the downstream sink. If the sink batches events, then the difference between the oldest event in the batch and acknowledgement is recorded. Excludes latency during backfill.' @@ -298,7 +268,6 @@ layers: derivative: NONE how_to_use: This metric provides a useful context when assessing the state of changefeeds. This metric characterizes the end-to-end lag between a committed change and that change applied at the destination. visibility: ESSENTIAL - owner: cockroachdb/cdc - name: changefeed.emitted_batch_sizes exported_name: changefeed_emitted_batch_sizes description: Size of batches emitted emitted by all feeds @@ -307,7 +276,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/cdc - name: changefeed.emitted_bytes exported_name: changefeed_emitted_bytes description: Bytes emitted by all feeds @@ -318,7 +286,6 @@ layers: derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This metric provides a useful context when assessing the state of changefeeds. This metric characterizes the throughput bytes being streamed from the CockroachDB cluster. visibility: ESSENTIAL - owner: cockroachdb/cdc - name: changefeed.emitted_messages exported_name: changefeed_emitted_messages description: Messages emitted by all feeds @@ -329,7 +296,6 @@ layers: derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This metric provides a useful context when assessing the state of changefeeds. This metric characterizes the rate of changes being streamed from the CockroachDB cluster. visibility: ESSENTIAL - owner: cockroachdb/cdc - name: changefeed.error_retries exported_name: changefeed_error_retries description: Total retryable errors encountered by all changefeeds @@ -340,7 +306,6 @@ layers: derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This metric tracks transient changefeed errors. Alert on "too many" errors, such as 50 retries in 15 minutes. For example, during a rolling upgrade this counter will increase because the changefeed jobs will restart following node restarts. There is an exponential backoff, up to 10 minutes. But if there is no rolling upgrade in process or other cluster maintenance, and the error rate is high, investigate the changefeed job. visibility: ESSENTIAL - owner: cockroachdb/cdc - name: changefeed.failures exported_name: changefeed_failures description: Total number of changefeed jobs which have failed @@ -351,7 +316,6 @@ layers: derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This metric tracks the permanent changefeed job failures that the jobs system will not try to restart. Any increase in this counter should be investigated. An alert on this metric is recommended. visibility: ESSENTIAL - owner: cockroachdb/cdc - name: changefeed.filtered_messages exported_name: changefeed_filtered_messages description: Messages filtered out by all feeds. This count does not include the number of messages that may be filtered due to the range constraints. @@ -360,7 +324,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/cdc - name: changefeed.flush.messages_pushback_nanos exported_name: changefeed_flush_messages_pushback_nanos description: Total time spent throttled for flush quota @@ -369,7 +332,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/cdc - name: changefeed.flush_hist_nanos exported_name: changefeed_flush_hist_nanos description: Time spent flushing messages across all changefeeds @@ -378,7 +340,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/cdc - name: changefeed.flushed_bytes exported_name: changefeed_flushed_bytes description: Bytes emitted by all feeds; maybe different from changefeed.emitted_bytes when compression is enabled @@ -387,7 +348,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/cdc - name: changefeed.flushes exported_name: changefeed_flushes description: Total flushes across all feeds. @@ -396,7 +356,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/cdc - name: changefeed.forwarded_resolved_messages exported_name: changefeed_forwarded_resolved_messages description: Resolved timestamps forwarded from the change aggregator to the change frontier @@ -405,7 +364,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/cdc - name: changefeed.frontier_updates exported_name: changefeed_frontier_updates description: Number of change frontier updates across all feeds @@ -414,7 +372,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/cdc - name: changefeed.internal_retry_message_count exported_name: changefeed_internal_retry_message_count description: Number of messages for which an attempt to retry them within an aggregator node was made. @@ -423,7 +380,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/cdc - name: changefeed.kafka_throttling_hist_nanos exported_name: changefeed_kafka_throttling_hist_nanos description: Time spent in throttling due to exceeding kafka quota @@ -432,7 +388,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/cdc - name: changefeed.lagging_ranges exported_name: changefeed_lagging_ranges description: The number of ranges considered to be lagging behind @@ -441,7 +396,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/cdc - name: changefeed.max_behind_nanos exported_name: changefeed_max_behind_nanos description: The most any changefeed's persisted checkpoint is behind the present @@ -451,7 +405,6 @@ layers: aggregation: AVG derivative: NONE visibility: SUPPORT - owner: cockroachdb/cdc - name: changefeed.message_size_hist exported_name: changefeed_message_size_hist description: Message size histogram @@ -460,7 +413,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/cdc - name: changefeed.messages.messages_pushback_nanos exported_name: changefeed_messages_messages_pushback_nanos description: Total time spent throttled for messages quota @@ -469,7 +421,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/cdc - name: changefeed.network.bytes_in exported_name: changefeed_network_bytes_in description: The number of bytes received from the network by changefeeds @@ -478,7 +429,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/cdc - name: changefeed.network.bytes_out exported_name: changefeed_network_bytes_out description: The number of bytes sent over the network by changefeeds @@ -487,7 +437,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/cdc - name: changefeed.nprocs_consume_event_nanos exported_name: changefeed_nprocs_consume_event_nanos description: Total time spent waiting to add an event to the parallel consumer @@ -496,7 +445,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/cdc - name: changefeed.nprocs_flush_nanos exported_name: changefeed_nprocs_flush_nanos description: Total time spent idle waiting for the parallel consumer to flush @@ -505,7 +453,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/cdc - name: changefeed.nprocs_in_flight_count exported_name: changefeed_nprocs_in_flight_count description: Number of buffered events in the parallel consumer @@ -514,7 +461,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/cdc - name: changefeed.parallel_io_in_flight_keys exported_name: changefeed_parallel_io_in_flight_keys description: The number of keys currently in-flight which may contend with batches pending to be emitted @@ -523,7 +469,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/cdc - name: changefeed.parallel_io_pending_rows exported_name: changefeed_parallel_io_pending_rows description: Number of rows which are blocked from being sent due to conflicting in-flight keys. @@ -532,7 +477,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/cdc - name: changefeed.parallel_io_queue_nanos exported_name: changefeed_parallel_io_queue_nanos description: Time that outgoing requests to the sink spend waiting in a queue due to in-flight requests with conflicting keys. @@ -541,7 +485,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/cdc - name: changefeed.parallel_io_result_queue_nanos exported_name: changefeed_parallel_io_result_queue_nanos description: Time that incoming results from the sink spend waiting in parallel io emitter before they are acknowledged by the changefeed @@ -550,7 +493,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/cdc - name: changefeed.parallel_io_workers exported_name: changefeed_parallel_io_workers description: The number of workers in the ParallelIO @@ -559,7 +501,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/cdc - name: changefeed.progress_skew.span exported_name: changefeed_progress_skew_span description: The time difference between the fastest and slowest span's resolved timestamp @@ -568,7 +509,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/cdc - name: changefeed.progress_skew.table exported_name: changefeed_progress_skew_table description: The time difference between the fastest and slowest table's resolved timestamp @@ -577,7 +517,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/cdc - name: changefeed.queue_time_nanos exported_name: changefeed_queue_time_nanos description: Time KV event spent waiting to be processed @@ -586,7 +525,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/cdc - name: changefeed.running exported_name: changefeed_running description: Number of currently running changefeeds, including sinkless @@ -597,7 +535,6 @@ layers: derivative: NONE how_to_use: This metric tracks the total number of all running changefeeds. visibility: ESSENTIAL - owner: cockroachdb/cdc - name: changefeed.schema_registry.registrations exported_name: changefeed_schema_registry_registrations description: Number of registration attempts with the schema registry. @@ -606,7 +543,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/cdc - name: changefeed.schema_registry.retry_count exported_name: changefeed_schema_registry_retry_count description: Number of retries encountered when sending requests to the schema registry. @@ -615,7 +551,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/cdc - name: changefeed.schemafeed.table_history_scans exported_name: changefeed_schemafeed_table_history_scans description: The number of table history scans during polling @@ -624,7 +559,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/cdc - name: changefeed.schemafeed.table_metadata_nanos exported_name: changefeed_schemafeed_table_metadata_nanos description: Time blocked while verifying table metadata histories @@ -633,7 +567,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/cdc - name: changefeed.sink_backpressure_nanos exported_name: changefeed_sink_backpressure_nanos description: Time spent waiting for quota when emitting to the sink (back-pressure). Only populated for sinks using the batching_sink wrapper. As of writing, this includes Kafka (v2), Pub/Sub (v2), and Webhook (v2). @@ -642,7 +575,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/cdc - name: changefeed.sink_batch_hist_nanos exported_name: changefeed_sink_batch_hist_nanos description: Time spent batched in the sink buffer before being flushed and acknowledged. @@ -651,7 +583,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/cdc - name: changefeed.sink_errors exported_name: changefeed_sink_errors description: Number of changefeed errors caused by the sink @@ -660,7 +591,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/cdc - name: changefeed.sink_io_inflight exported_name: changefeed_sink_io_inflight description: The number of keys currently inflight as IO requests being sent to the sink @@ -669,7 +599,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/cdc - name: changefeed.size_based_flushes exported_name: changefeed_size_based_flushes description: Total size based flushes across all feeds. @@ -678,7 +607,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/cdc - name: changefeed.stage.checkpoint_job_progress.latency exported_name: changefeed_stage_checkpoint_job_progress_latency labeled_name: 'changefeed.stage.latency{name: checkpoint_job_progress}' @@ -688,7 +616,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/cdc - name: changefeed.stage.downstream_client_send.latency exported_name: changefeed_stage_downstream_client_send_latency labeled_name: 'changefeed.stage.latency{name: downstream_client_send}' @@ -698,7 +625,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/cdc - name: changefeed.stage.emit_row.latency exported_name: changefeed_stage_emit_row_latency labeled_name: 'changefeed.stage.latency{name: emit_row}' @@ -708,7 +634,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/cdc - name: changefeed.stage.encode.latency exported_name: changefeed_stage_encode_latency labeled_name: 'changefeed.stage.latency{name: encode}' @@ -718,7 +643,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/cdc - name: changefeed.stage.frontier_persistence.latency exported_name: changefeed_stage_frontier_persistence_latency labeled_name: 'changefeed.stage.latency{name: frontier_persistence}' @@ -728,7 +652,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/cdc - name: changefeed.stage.kv_feed_buffer.latency exported_name: changefeed_stage_kv_feed_buffer_latency labeled_name: 'changefeed.stage.latency{name: kv_feed_buffer}' @@ -738,7 +661,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/cdc - name: changefeed.stage.kv_feed_wait_for_table_event.latency exported_name: changefeed_stage_kv_feed_wait_for_table_event_latency labeled_name: 'changefeed.stage.latency{name: kv_feed_wait_for_table_event}' @@ -748,7 +670,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/cdc - name: changefeed.stage.pts.create.latency exported_name: changefeed_stage_pts_create_latency labeled_name: 'changefeed.stage.pts.latency{name: create}' @@ -758,7 +679,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/cdc - name: changefeed.stage.pts.manage.latency exported_name: changefeed_stage_pts_manage_latency labeled_name: 'changefeed.stage.pts.latency{name: manage}' @@ -768,7 +688,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/cdc - name: changefeed.stage.pts.manage_error.latency exported_name: changefeed_stage_pts_manage_error_latency labeled_name: 'changefeed.stage.pts.latency{name: manage_error}' @@ -778,7 +697,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/cdc - name: changefeed.stage.rangefeed_buffer_checkpoint.latency exported_name: changefeed_stage_rangefeed_buffer_checkpoint_latency labeled_name: 'changefeed.stage.latency{name: rangefeed_buffer_checkpoint}' @@ -788,7 +706,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/cdc - name: changefeed.stage.rangefeed_buffer_value.latency exported_name: changefeed_stage_rangefeed_buffer_value_latency labeled_name: 'changefeed.stage.latency{name: rangefeed_buffer_value}' @@ -798,7 +715,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/cdc - name: changefeed.total_ranges exported_name: changefeed_total_ranges description: The total number of ranges being watched by changefeed aggregators @@ -807,7 +723,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/cdc - name: changefeed.usage.error_count exported_name: changefeed_usage_error_count description: Count of errors encountered while generating usage metrics for changefeeds @@ -816,7 +731,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/cdc - name: changefeed.usage.query_duration exported_name: changefeed_usage_query_duration description: Time taken by the queries used to generate usage metrics for changefeeds @@ -825,7 +739,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/cdc - name: changefeed.usage.table_bytes exported_name: changefeed_usage_table_bytes description: Aggregated number of bytes of data per table watched by changefeeds @@ -834,7 +747,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/cdc - name: jobs.changefeed.currently_paused exported_name: jobs_changefeed_currently_paused labeled_name: 'jobs{name: changefeed, status: currently_paused}' @@ -846,7 +758,6 @@ layers: derivative: NONE how_to_use: Monitor and alert on this metric to safeguard against an inadvertent operational error of leaving a changefeed job in a paused state for an extended period of time. Changefeed jobs should not be paused for a long time because the protected timestamp prevents garbage collection. visibility: ESSENTIAL - owner: cockroachdb/jobs - name: jobs.changefeed.protected_age_sec exported_name: jobs_changefeed_protected_age_sec labeled_name: 'jobs.protected_age_sec{type: changefeed}' @@ -858,7 +769,6 @@ layers: derivative: NONE how_to_use: Changefeeds use protected timestamps to protect the data from being garbage collected. Ensure the protected timestamp age does not significantly exceed the GC TTL zone configuration. Alert on this metric if the protected timestamp age is greater than 3 times the GC TTL. visibility: ESSENTIAL - owner: cockroachdb/jobs - name: CROSS_CLUSTER_REPLICATION metrics: - name: physical_replication.logical_bytes @@ -871,7 +781,6 @@ layers: derivative: NON_NEGATIVE_DERIVATIVE how_to_use: Track PCR throughput visibility: ESSENTIAL - owner: cockroachdb/disaster-recovery - name: physical_replication.replicated_time_seconds exported_name: physical_replication_replicated_time_seconds description: The replicated time of the physical replication stream in seconds since the unix epoch. @@ -882,7 +791,6 @@ layers: derivative: NONE how_to_use: Track replication lag via current time - physical_replication.replicated_time_seconds visibility: ESSENTIAL - owner: cockroachdb/disaster-recovery - name: DISTRIBUTED metrics: - name: distsender.errors.notleaseholder @@ -895,7 +803,6 @@ layers: derivative: NON_NEGATIVE_DERIVATIVE how_to_use: Errors of this type are normal during elastic cluster topology changes when leaseholders are actively rebalancing. They are automatically retried. However they may create occasional response time spikes. In that case, this metric may provide the explanation of the cause. visibility: ESSENTIAL - owner: cockroachdb/kv - name: distsender.rpc.sent.nextreplicaerror exported_name: distsender_rpc_sent_nextreplicaerror description: Number of replica-addressed RPCs sent due to per-replica errors @@ -906,7 +813,6 @@ layers: derivative: NON_NEGATIVE_DERIVATIVE how_to_use: RPC errors do not necessarily indicate a problem. This metric tracks remote procedure calls that return a status value other than "success". A non-success status of an RPC should not be misconstrued as a network transport issue. It is database code logic executed on another cluster node. The non-success status is a result of an orderly execution of an RPC that reports a specific logical condition. visibility: ESSENTIAL - owner: cockroachdb/kv - name: LOGICAL_DATA_REPLICATION metrics: - name: logical_replication.commit_latency @@ -919,7 +825,6 @@ layers: derivative: NONE how_to_use: track the latency of of applying events from source to destination visibility: ESSENTIAL - owner: cockroachdb/cdc - name: logical_replication.events_dlqed exported_name: logical_replication_events_dlqed description: Row update events sent to DLQ @@ -930,7 +835,6 @@ layers: derivative: NON_NEGATIVE_DERIVATIVE how_to_use: track events sent to the dead letter queue visibility: ESSENTIAL - owner: cockroachdb/cdc - name: logical_replication.events_ingested exported_name: logical_replication_events_ingested description: Events ingested by all replication jobs @@ -941,7 +845,6 @@ layers: derivative: NON_NEGATIVE_DERIVATIVE how_to_use: track events (e.g. updates, deletes, inserts) ingested visibility: ESSENTIAL - owner: cockroachdb/cdc - name: logical_replication.logical_bytes exported_name: logical_replication_logical_bytes description: Logical bytes (sum of keys + values) received by all replication jobs @@ -952,7 +855,6 @@ layers: derivative: NON_NEGATIVE_DERIVATIVE how_to_use: track logical data replication throughput visibility: ESSENTIAL - owner: cockroachdb/cdc - name: logical_replication.replicated_time_seconds exported_name: logical_replication_replicated_time_seconds description: The replicated time of the logical replication stream in seconds since the unix epoch. @@ -963,7 +865,6 @@ layers: derivative: NONE how_to_use: Track replication lag via current time - logical_replication.replicated_time_seconds visibility: ESSENTIAL - owner: cockroachdb/cdc - name: NETWORKING metrics: - name: clock-offset.meannanos @@ -976,7 +877,6 @@ layers: derivative: NONE how_to_use: This metric gives the node's clock skew. In a well-configured environment, the actual clock skew would be in the sub-millisecond range. A skew exceeding 5 ms is likely due to a NTP service mis-configuration. Reducing the actual clock skew reduces the probability of uncertainty related conflicts and corresponding retires which has a positive impact on workload performance. Conversely, a larger actual clock skew increases the probability of retries due to uncertainty conflicts, with potentially measurable adverse effects on workload performance. visibility: ESSENTIAL - owner: cockroachdb/kv - name: rpc.connection.avg_round_trip_latency exported_name: rpc_connection_avg_round_trip_latency description: |- @@ -1001,7 +901,6 @@ layers: derivative: NONE how_to_use: This metric is helpful in understanding general network issues outside of CockroachDB that could be impacting the user’s workload. visibility: ESSENTIAL - owner: cockroachdb/kv - name: rpc.connection.failures exported_name: rpc_connection_failures description: |- @@ -1019,7 +918,6 @@ layers: derivative: NON_NEGATIVE_DERIVATIVE how_to_use: See Description. visibility: ESSENTIAL - owner: cockroachdb/kv - name: rpc.connection.healthy exported_name: rpc_connection_healthy description: Gauge of current connections in a healthy state (i.e. bidirectionally connected and heartbeating) @@ -1030,7 +928,6 @@ layers: derivative: NONE how_to_use: See Description. visibility: ESSENTIAL - owner: cockroachdb/kv - name: rpc.connection.healthy_nanos exported_name: rpc_connection_healthy_nanos description: |- @@ -1046,7 +943,6 @@ layers: derivative: NONE how_to_use: This can be useful for monitoring the stability and health of connections within your CockroachDB cluster. visibility: ESSENTIAL - owner: cockroachdb/kv - name: rpc.connection.heartbeats exported_name: rpc_connection_heartbeats description: Counter of successful heartbeats. @@ -1057,7 +953,6 @@ layers: derivative: NON_NEGATIVE_DERIVATIVE how_to_use: See Description. visibility: ESSENTIAL - owner: cockroachdb/kv - name: rpc.connection.tcp_rtt exported_name: rpc_connection_tcp_rtt description: |- @@ -1075,7 +970,6 @@ layers: derivative: NONE how_to_use: High TCP RTT values indicate network issues outside of CockroachDB that could be impacting the user's workload. visibility: ESSENTIAL - owner: cockroachdb/kv - name: rpc.connection.tcp_rtt_var exported_name: rpc_connection_tcp_rtt_var description: |- @@ -1093,7 +987,6 @@ layers: derivative: NONE how_to_use: High TCP RTT variance values indicate network stability issues outside of CockroachDB that could be impacting the user's workload. visibility: ESSENTIAL - owner: cockroachdb/kv - name: rpc.connection.unhealthy exported_name: rpc_connection_unhealthy description: Gauge of current connections in an unhealthy state (not bidirectionally connected or heartbeating) @@ -1104,7 +997,6 @@ layers: derivative: NONE how_to_use: If the value of this metric is greater than 0, this could indicate a network partition. visibility: ESSENTIAL - owner: cockroachdb/kv - name: rpc.connection.unhealthy_nanos exported_name: rpc_connection_unhealthy_nanos description: |- @@ -1120,7 +1012,6 @@ layers: derivative: NONE how_to_use: If this duration is greater than 0, this could indicate how long a network partition has been occurring. visibility: ESSENTIAL - owner: cockroachdb/kv - name: SQL metrics: - name: auth.cert.conn.latency @@ -1133,7 +1024,6 @@ layers: derivative: NONE how_to_use: See Description. visibility: ESSENTIAL - owner: cockroachdb/sql-foundations - name: auth.cert.san.conn.success exported_name: auth_cert_san_conn_success description: Number of successful SQL connections using SAN-based certificate authentication @@ -1144,7 +1034,6 @@ layers: derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This metric tracks successful authentications when SAN-based certificate validation is enabled. Use this to monitor adoption and success rate of SAN authentication. Failure rate = auth.cert.san.conn.total - auth.cert.san.conn.success. visibility: ESSENTIAL - owner: cockroachdb/sql-foundations - name: auth.cert.san.conn.total exported_name: auth_cert_san_conn_total description: Total number of SQL connection attempts using SAN-based certificate authentication @@ -1155,7 +1044,6 @@ layers: derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This metric tracks all authentication attempts when SAN-based certificate validation is enabled. Compare with auth.cert.san.conn.success to calculate failure rate. visibility: ESSENTIAL - owner: cockroachdb/sql-foundations - name: auth.gss.conn.latency exported_name: auth_gss_conn_latency description: Latency to establish and authenticate a SQL connection using GSS @@ -1166,7 +1054,6 @@ layers: derivative: NONE how_to_use: See Description. visibility: ESSENTIAL - owner: cockroachdb/sql-foundations - name: auth.jwt.conn.latency exported_name: auth_jwt_conn_latency description: Latency to establish and authenticate a SQL connection using JWT Token @@ -1177,7 +1064,6 @@ layers: derivative: NONE how_to_use: See Description. visibility: ESSENTIAL - owner: cockroachdb/sql-foundations - name: auth.ldap.conn.latency exported_name: auth_ldap_conn_latency description: Latency to establish and authenticate a SQL connection using LDAP @@ -1188,7 +1074,6 @@ layers: derivative: NONE how_to_use: See Description. visibility: ESSENTIAL - owner: cockroachdb/sql-foundations - name: auth.ldap.conn.latency.internal exported_name: auth_ldap_conn_latency_internal description: Internal Auth Latency to establish and authenticate a SQL connection using LDAP(excludes external LDAP calls) @@ -1199,7 +1084,6 @@ layers: derivative: NONE how_to_use: See Description. visibility: ESSENTIAL - owner: cockroachdb/sql-foundations - name: auth.password.conn.latency exported_name: auth_password_conn_latency description: Latency to establish and authenticate a SQL connection using password @@ -1210,7 +1094,6 @@ layers: derivative: NONE how_to_use: See Description. visibility: ESSENTIAL - owner: cockroachdb/sql-foundations - name: auth.scram.conn.latency exported_name: auth_scram_conn_latency description: Latency to establish and authenticate a SQL connection using SCRAM @@ -1221,7 +1104,6 @@ layers: derivative: NONE how_to_use: See Description. visibility: ESSENTIAL - owner: cockroachdb/sql-foundations - name: jobs.auto_create_partial_stats.currently_paused exported_name: jobs_auto_create_partial_stats_currently_paused labeled_name: 'jobs{name: auto_create_partial_stats, status: currently_paused}' @@ -1233,7 +1115,6 @@ layers: derivative: NONE how_to_use: This metric is a high-level indicator that automatically generated partial statistics jobs are paused which can lead to the query optimizer running with stale statistics. Stale statistics can cause suboptimal query plans to be selected leading to poor query performance. visibility: ESSENTIAL - owner: cockroachdb/jobs - name: jobs.auto_create_partial_stats.currently_running exported_name: jobs_auto_create_partial_stats_currently_running labeled_name: 'jobs{type: auto_create_partial_stats, status: currently_running}' @@ -1245,7 +1126,6 @@ layers: derivative: NONE how_to_use: This metric tracks the number of active automatically generated partial statistics jobs that could also be consuming resources. Ensure that foreground SQL traffic is not impacted by correlating this metric with SQL latency and query volume metrics. visibility: ESSENTIAL - owner: cockroachdb/jobs - name: jobs.auto_create_partial_stats.resume_failed exported_name: jobs_auto_create_partial_stats_resume_failed labeled_name: 'jobs.resume{name: auto_create_partial_stats, status: failed}' @@ -1257,7 +1137,6 @@ layers: derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This metric is a high-level indicator that automatically generated partial table statistics is failing. Failed statistic creation can lead to the query optimizer running with stale statistics. Stale statistics can cause suboptimal query plans to be selected leading to poor query performance. visibility: ESSENTIAL - owner: cockroachdb/jobs - name: jobs.auto_create_stats.currently_paused exported_name: jobs_auto_create_stats_currently_paused labeled_name: 'jobs{name: auto_create_stats, status: currently_paused}' @@ -1269,7 +1148,6 @@ layers: derivative: NONE how_to_use: This metric is a high-level indicator that automatically generated statistics jobs are paused which can lead to the query optimizer running with stale statistics. Stale statistics can cause suboptimal query plans to be selected leading to poor query performance. visibility: ESSENTIAL - owner: cockroachdb/jobs - name: jobs.auto_create_stats.currently_running exported_name: jobs_auto_create_stats_currently_running labeled_name: 'jobs{type: auto_create_stats, status: currently_running}' @@ -1281,7 +1159,6 @@ layers: derivative: NONE how_to_use: This metric tracks the number of active automatically generated statistics jobs that could also be consuming resources. Ensure that foreground SQL traffic is not impacted by correlating this metric with SQL latency and query volume metrics. visibility: ESSENTIAL - owner: cockroachdb/jobs - name: jobs.auto_create_stats.resume_failed exported_name: jobs_auto_create_stats_resume_failed labeled_name: 'jobs.resume{name: auto_create_stats, status: failed}' @@ -1293,7 +1170,6 @@ layers: derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This metric is a high-level indicator that automatically generated table statistics is failing. Failed statistic creation can lead to the query optimizer running with stale statistics. Stale statistics can cause suboptimal query plans to be selected leading to poor query performance. visibility: ESSENTIAL - owner: cockroachdb/jobs - name: jobs.backup.currently_paused exported_name: jobs_backup_currently_paused labeled_name: 'jobs{name: backup, status: currently_paused}' @@ -1305,7 +1181,6 @@ layers: derivative: NONE how_to_use: Monitor and alert on this metric to safeguard against an inadvertent operational error of leaving a backup job in a paused state for an extended period of time. In functional areas, a paused job can hold resources or have concurrency impact or some other negative consequence. Paused backup may break the recovery point objective (RPO). visibility: ESSENTIAL - owner: cockroachdb/jobs - name: jobs.backup.currently_running exported_name: jobs_backup_currently_running labeled_name: 'jobs{type: backup, status: currently_running}' @@ -1317,7 +1192,6 @@ layers: derivative: NONE how_to_use: See Description. visibility: ESSENTIAL - owner: cockroachdb/jobs - name: jobs.create_stats.currently_running exported_name: jobs_create_stats_currently_running labeled_name: 'jobs{type: create_stats, status: currently_running}' @@ -1329,7 +1203,6 @@ layers: derivative: NONE how_to_use: This metric tracks the number of active create statistics jobs that could also be consuming resources. Ensure that foreground SQL traffic is not impacted by correlating this metric with SQL latency and query volume metrics. visibility: ESSENTIAL - owner: cockroachdb/jobs - name: schedules.BACKUP.failed exported_name: schedules_BACKUP_failed labeled_name: 'schedules{name: BACKUP, status: failed}' @@ -1341,7 +1214,6 @@ layers: derivative: NON_NEGATIVE_DERIVATIVE how_to_use: Monitor this metric and investigate backup job failures. visibility: ESSENTIAL - owner: cockroachdb/jobs - name: schedules.BACKUP.last-completed-time exported_name: schedules_BACKUP_last_completed_time description: The unix timestamp of the most recently completed backup by a schedule specified as maintaining this metric @@ -1355,7 +1227,6 @@ layers: To make use of this metric, first, from each node, take the maximum over a rolling window equal to or greater than the backup frequency, and then take the maximum of those values across nodes. For example with a backup frequency of 60 minutes, monitor time() - max_across_nodes(max_over_time(schedules_BACKUP_last_completed_time, 60min)). visibility: ESSENTIAL - owner: cockroachdb/disaster-recovery - name: sql.conn.failures exported_name: sql_conn_failures description: Number of SQL connection failures @@ -1366,7 +1237,6 @@ layers: derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This metric is incremented whenever a connection attempt fails for any reason, including timeouts. visibility: ESSENTIAL - owner: cockroachdb/sql-foundations - name: sql.conn.latency exported_name: sql_conn_latency description: Latency to establish and authenticate a SQL connection @@ -1377,7 +1247,6 @@ layers: derivative: NONE how_to_use: These metrics characterize the database connection latency which can affect the application performance, for example, by having slow startup times. Connection failures are not recorded in these metrics. visibility: ESSENTIAL - owner: cockroachdb/sql-foundations - name: sql.conns exported_name: sql_conns description: Number of open SQL connections @@ -1388,7 +1257,6 @@ layers: derivative: NONE how_to_use: This metric shows the number of connections as well as the distribution, or balancing, of connections across cluster nodes. An imbalance can lead to nodes becoming overloaded. Review Connection Pooling. visibility: ESSENTIAL - owner: cockroachdb/sql-foundations - name: sql.ddl.count exported_name: sql_ddl_count labeled_name: 'sql.count{query_type: ddl}' @@ -1400,7 +1268,6 @@ layers: derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This high-level metric reflects workload volume. Monitor this metric to identify abnormal application behavior or patterns over time. If abnormal patterns emerge, apply the metric's time range to the SQL Activity pages to investigate interesting outliers or patterns. For example, on the Transactions page and the Statements page, sort on the Execution Count column. To find problematic sessions, on the Sessions page, sort on the Transaction Count column. Find the sessions with high transaction counts and trace back to a user or application. visibility: ESSENTIAL - owner: cockroachdb/sql-queries - name: sql.ddl.count.internal exported_name: sql_ddl_count_internal labeled_name: 'sql.count{query_type: ddl, query_internal: true}' @@ -1410,7 +1277,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.delete.count exported_name: sql_delete_count labeled_name: 'sql.count{query_type: delete}' @@ -1422,7 +1288,6 @@ layers: derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This high-level metric reflects workload volume. Monitor this metric to identify abnormal application behavior or patterns over time. If abnormal patterns emerge, apply the metric's time range to the SQL Activity pages to investigate interesting outliers or patterns. For example, on the Transactions page and the Statements page, sort on the Execution Count column. To find problematic sessions, on the Sessions page, sort on the Transaction Count column. Find the sessions with high transaction counts and trace back to a user or application. visibility: ESSENTIAL - owner: cockroachdb/sql-queries - name: sql.delete.count.internal exported_name: sql_delete_count_internal labeled_name: 'sql.count{query_type: delete, query_internal: true}' @@ -1432,7 +1297,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.delete.started.count exported_name: sql_delete_started_count labeled_name: 'sql.started.count{query_type: delete}' @@ -1443,7 +1307,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This high-level metric reflects workload volume. Monitor this metric to identify abnormal application behavior or patterns over time. If abnormal patterns emerge, apply the metric's time range to the SQL Activity pages to investigate interesting outliers or patterns. For example, on the Transactions page and the Statements page, sort on the Execution Count column. To find problematic sessions, on the Sessions page, sort on the Transaction Count column. Find the sessions with high transaction counts and trace back to a user or application. - owner: cockroachdb/sql-queries - name: sql.delete.started.count.internal exported_name: sql_delete_started_count_internal labeled_name: 'sql.started.count{query_type: delete, query_internal: true}' @@ -1453,7 +1316,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.distsql.contended_queries.count exported_name: sql_distsql_contended_queries_count description: Number of SQL queries that experienced contention @@ -1464,7 +1326,6 @@ layers: derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This metric is incremented whenever there is a non-trivial amount of contention experienced by a statement whether read-write or write-write conflicts. Monitor this metric to correlate possible workload performance issues to contention conflicts. visibility: ESSENTIAL - owner: cockroachdb/sql-queries - name: sql.failure.count exported_name: sql_failure_count description: Number of statements resulting in a planning or runtime error @@ -1475,7 +1336,6 @@ layers: derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This metric is a high-level indicator of workload and application degradation with query failures. Use the Insights page to find failed executions with their error code to troubleshoot or use application-level logs, if instrumented, to determine the cause of error. visibility: ESSENTIAL - owner: cockroachdb/sql-queries - name: sql.failure.count.internal exported_name: sql_failure_count_internal description: Number of statements resulting in a planning or runtime error (internal queries) @@ -1484,7 +1344,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.full.scan.count exported_name: sql_full_scan_count description: Number of full table or index scans @@ -1495,7 +1354,6 @@ layers: derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This metric is a high-level indicator of potentially suboptimal query plans in the workload that may require index tuning and maintenance. To identify the statements with a full table scan, use SHOW FULL TABLE SCAN or the SQL Activity Statements page with the corresponding metric time frame. The Statements page also includes explain plans and index recommendations. Not all full scans are necessarily bad especially over smaller tables. visibility: ESSENTIAL - owner: cockroachdb/sql-queries - name: sql.full.scan.count.internal exported_name: sql_full_scan_count_internal description: Number of full table or index scans (internal queries) @@ -1504,7 +1362,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.insert.count exported_name: sql_insert_count labeled_name: 'sql.count{query_type: insert}' @@ -1516,7 +1373,6 @@ layers: derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This high-level metric reflects workload volume. Monitor this metric to identify abnormal application behavior or patterns over time. If abnormal patterns emerge, apply the metric's time range to the SQL Activity pages to investigate interesting outliers or patterns. For example, on the Transactions page and the Statements page, sort on the Execution Count column. To find problematic sessions, on the Sessions page, sort on the Transaction Count column. Find the sessions with high transaction counts and trace back to a user or application. visibility: ESSENTIAL - owner: cockroachdb/sql-queries - name: sql.insert.count.internal exported_name: sql_insert_count_internal labeled_name: 'sql.count{query_type: insert, query_internal: true}' @@ -1526,7 +1382,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.insert.started.count exported_name: sql_insert_started_count labeled_name: 'sql.started.count{query_type: insert}' @@ -1537,7 +1392,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This high-level metric reflects workload volume. Monitor this metric to identify abnormal application behavior or patterns over time. If abnormal patterns emerge, apply the metric's time range to the SQL Activity pages to investigate interesting outliers or patterns. For example, on the Transactions page and the Statements page, sort on the Execution Count column. To find problematic sessions, on the Sessions page, sort on the Transaction Count column. Find the sessions with high transaction counts and trace back to a user or application. - owner: cockroachdb/sql-queries - name: sql.insert.started.count.internal exported_name: sql_insert_started_count_internal labeled_name: 'sql.started.count{query_type: insert, query_internal: true}' @@ -1547,7 +1401,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.mem.root.current exported_name: sql_mem_root_current description: Current sql statement memory usage for root @@ -1558,7 +1411,6 @@ layers: derivative: NONE how_to_use: This metric shows how memory set aside for temporary materializations, such as hash tables and intermediary result sets, is utilized. Use this metric to optimize memory allocations based on long term observations. The maximum amount is set with --max_sql_memory. If the utilization of sql memory is persistently low, perhaps some portion of this memory allocation can be shifted to --cache. visibility: ESSENTIAL - owner: cockroachdb/sql-queries - name: sql.new_conns exported_name: sql_new_conns description: Number of SQL connections created @@ -1569,7 +1421,6 @@ layers: derivative: NON_NEGATIVE_DERIVATIVE how_to_use: The rate of this metric shows how frequently new connections are being established. This can be useful in determining if a high rate of incoming new connections is causing additional load on the server due to a misconfigured application. visibility: ESSENTIAL - owner: cockroachdb/sql-foundations - name: sql.query.with_statement_hints.count exported_name: sql_query_with_statement_hints_count description: Number of SQL queries executed with external statement hints @@ -1578,7 +1429,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.query.with_statement_hints.count.internal exported_name: sql_query_with_statement_hints_count_internal description: Number of SQL queries executed with external statement hints (internal queries) @@ -1587,7 +1437,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.rls.policies_applied.count exported_name: sql_rls_policies_applied_count description: Number of SQL statements where row-level security policies were applied @@ -1596,7 +1445,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.rls.policies_applied.count.internal exported_name: sql_rls_policies_applied_count_internal description: Number of SQL statements where row-level security policies were applied (internal queries) @@ -1605,7 +1453,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.routine.delete.count exported_name: sql_routine_delete_count labeled_name: 'sql.count{query_type: routine_delete}' @@ -1617,7 +1464,6 @@ layers: derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This high-level metric reflects workload volume. Monitor this metric to identify abnormal application behavior or patterns over time. If abnormal patterns emerge, apply the metric's time range to the SQL Activity pages to investigate interesting outliers or patterns. For example, on the Transactions page and the Statements page, sort on the Execution Count column. To find problematic sessions, on the Sessions page, sort on the Transaction Count column. Find the sessions with high transaction counts and trace back to a user or application. visibility: ESSENTIAL - owner: cockroachdb/sql-queries - name: sql.routine.delete.count.internal exported_name: sql_routine_delete_count_internal labeled_name: 'sql.count{query_type: routine_delete, query_internal: true}' @@ -1627,7 +1473,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.routine.delete.started.count exported_name: sql_routine_delete_started_count labeled_name: 'sql.started.count{query_type: routine_started_delete}' @@ -1638,7 +1483,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This high-level metric reflects workload volume. Monitor this metric to identify abnormal application behavior or patterns over time. If abnormal patterns emerge, apply the metric's time range to the SQL Activity pages to investigate interesting outliers or patterns. For example, on the Transactions page and the Statements page, sort on the Execution Count column. To find problematic sessions, on the Sessions page, sort on the Transaction Count column. Find the sessions with high transaction counts and trace back to a user or application. - owner: cockroachdb/sql-queries - name: sql.routine.delete.started.count.internal exported_name: sql_routine_delete_started_count_internal labeled_name: 'sql.started.count{query_type: routine_started_delete, query_internal: true}' @@ -1648,7 +1492,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.routine.insert.count exported_name: sql_routine_insert_count labeled_name: 'sql.count{query_type: routine_insert}' @@ -1660,7 +1503,6 @@ layers: derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This high-level metric reflects workload volume. Monitor this metric to identify abnormal application behavior or patterns over time. If abnormal patterns emerge, apply the metric's time range to the SQL Activity pages to investigate interesting outliers or patterns. For example, on the Transactions page and the Statements page, sort on the Execution Count column. To find problematic sessions, on the Sessions page, sort on the Transaction Count column. Find the sessions with high transaction counts and trace back to a user or application. visibility: ESSENTIAL - owner: cockroachdb/sql-queries - name: sql.routine.insert.count.internal exported_name: sql_routine_insert_count_internal labeled_name: 'sql.count{query_type: routine_insert, query_internal: true}' @@ -1670,7 +1512,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.routine.insert.started.count exported_name: sql_routine_insert_started_count labeled_name: 'sql.started.count{query_type: routine_started_insert}' @@ -1681,7 +1522,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This high-level metric reflects workload volume. Monitor this metric to identify abnormal application behavior or patterns over time. If abnormal patterns emerge, apply the metric's time range to the SQL Activity pages to investigate interesting outliers or patterns. For example, on the Transactions page and the Statements page, sort on the Execution Count column. To find problematic sessions, on the Sessions page, sort on the Transaction Count column. Find the sessions with high transaction counts and trace back to a user or application. - owner: cockroachdb/sql-queries - name: sql.routine.insert.started.count.internal exported_name: sql_routine_insert_started_count_internal labeled_name: 'sql.started.count{query_type: routine_started_insert, query_internal: true}' @@ -1691,7 +1531,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.routine.select.count exported_name: sql_routine_select_count labeled_name: 'sql.count{query_type: routine_select}' @@ -1703,7 +1542,6 @@ layers: derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This high-level metric reflects workload volume. Monitor this metric to identify abnormal application behavior or patterns over time. If abnormal patterns emerge, apply the metric's time range to the SQL Activity pages to investigate interesting outliers or patterns. For example, on the Transactions page and the Statements page, sort on the Execution Count column. To find problematic sessions, on the Sessions page, sort on the Transaction Count column. Find the sessions with high transaction counts and trace back to a user or application. visibility: ESSENTIAL - owner: cockroachdb/sql-queries - name: sql.routine.select.count.internal exported_name: sql_routine_select_count_internal labeled_name: 'sql.count{query_type: routine_select, query_internal: true}' @@ -1713,7 +1551,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.routine.select.started.count exported_name: sql_routine_select_started_count labeled_name: 'sql.started.count{query_type: routine_started_select}' @@ -1724,7 +1561,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This high-level metric reflects workload volume. Monitor this metric to identify abnormal application behavior or patterns over time. If abnormal patterns emerge, apply the metric's time range to the SQL Activity pages to investigate interesting outliers or patterns. For example, on the Transactions page and the Statements page, sort on the Execution Count column. To find problematic sessions, on the Sessions page, sort on the Transaction Count column. Find the sessions with high transaction counts and trace back to a user or application. - owner: cockroachdb/sql-queries - name: sql.routine.select.started.count.internal exported_name: sql_routine_select_started_count_internal labeled_name: 'sql.started.count{query_type: routine_started_select, query_internal: true}' @@ -1734,7 +1570,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.routine.update.count exported_name: sql_routine_update_count labeled_name: 'sql.count{query_type: routine_update}' @@ -1746,7 +1581,6 @@ layers: derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This high-level metric reflects workload volume. Monitor this metric to identify abnormal application behavior or patterns over time. If abnormal patterns emerge, apply the metric's time range to the SQL Activity pages to investigate interesting outliers or patterns. For example, on the Transactions page and the Statements page, sort on the Execution Count column. To find problematic sessions, on the Sessions page, sort on the Transaction Count column. Find the sessions with high transaction counts and trace back to a user or application. visibility: ESSENTIAL - owner: cockroachdb/sql-queries - name: sql.routine.update.count.internal exported_name: sql_routine_update_count_internal labeled_name: 'sql.count{query_type: routine_update, query_internal: true}' @@ -1756,7 +1590,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.routine.update.started.count exported_name: sql_routine_update_started_count labeled_name: 'sql.started.count{query_type: routine_started_update}' @@ -1767,7 +1600,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This high-level metric reflects workload volume. Monitor this metric to identify abnormal application behavior or patterns over time. If abnormal patterns emerge, apply the metric's time range to the SQL Activity pages to investigate interesting outliers or patterns. For example, on the Transactions page and the Statements page, sort on the Execution Count column. To find problematic sessions, on the Sessions page, sort on the Transaction Count column. Find the sessions with high transaction counts and trace back to a user or application. - owner: cockroachdb/sql-queries - name: sql.routine.update.started.count.internal exported_name: sql_routine_update_started_count_internal labeled_name: 'sql.started.count{query_type: routine_started_update, query_internal: true}' @@ -1777,7 +1609,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.select.count exported_name: sql_select_count labeled_name: 'sql.count{query_type: select}' @@ -1789,7 +1620,6 @@ layers: derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This high-level metric reflects workload volume. Monitor this metric to identify abnormal application behavior or patterns over time. If abnormal patterns emerge, apply the metric's time range to the SQL Activity pages to investigate interesting outliers or patterns. For example, on the Transactions page and the Statements page, sort on the Execution Count column. To find problematic sessions, on the Sessions page, sort on the Transaction Count column. Find the sessions with high transaction counts and trace back to a user or application. visibility: ESSENTIAL - owner: cockroachdb/sql-queries - name: sql.select.count.internal exported_name: sql_select_count_internal labeled_name: 'sql.count{query_type: select, query_internal: true}' @@ -1799,7 +1629,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.select.started.count exported_name: sql_select_started_count labeled_name: 'sql.started.count{query_type: select}' @@ -1810,7 +1639,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This high-level metric reflects workload volume. Monitor this metric to identify abnormal application behavior or patterns over time. If abnormal patterns emerge, apply the metric's time range to the SQL Activity pages to investigate interesting outliers or patterns. For example, on the Transactions page and the Statements page, sort on the Execution Count column. To find problematic sessions, on the Sessions page, sort on the Transaction Count column. Find the sessions with high transaction counts and trace back to a user or application. - owner: cockroachdb/sql-queries - name: sql.select.started.count.internal exported_name: sql_select_started_count_internal labeled_name: 'sql.started.count{query_type: select, query_internal: true}' @@ -1820,7 +1648,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.service.latency exported_name: sql_service_latency description: Latency of SQL request execution @@ -1831,7 +1658,6 @@ layers: derivative: NONE how_to_use: These high-level metrics reflect workload performance. Monitor these metrics to understand latency over time. If abnormal patterns emerge, apply the metric's time range to the SQL Activity pages to investigate interesting outliers or patterns. The Statements page has P90 Latency and P99 latency columns to enable correlation with this metric. visibility: ESSENTIAL - owner: cockroachdb/sql-queries - name: sql.service.latency.internal exported_name: sql_service_latency_internal description: Latency of SQL request execution (internal queries) @@ -1840,7 +1666,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/sql-queries - name: sql.statements.active exported_name: sql_statements_active description: Number of currently active user SQL statements @@ -1851,7 +1676,6 @@ layers: derivative: NONE how_to_use: This high-level metric reflects workload volume. visibility: ESSENTIAL - owner: cockroachdb/sql-queries - name: sql.statements.active.internal exported_name: sql_statements_active_internal description: Number of currently active user SQL statements (internal queries) @@ -1860,7 +1684,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/sql-queries - name: sql.txn.abort.count exported_name: sql_txn_abort_count description: Number of SQL transaction abort errors @@ -1871,7 +1694,6 @@ layers: derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This high-level metric reflects workload performance. A persistently high number of SQL transaction abort errors may negatively impact the workload performance and needs to be investigated. visibility: ESSENTIAL - owner: cockroachdb/sql-queries - name: sql.txn.abort.count.internal exported_name: sql_txn_abort_count_internal description: Number of SQL transaction abort errors (internal queries) @@ -1880,7 +1702,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.txn.begin.count exported_name: sql_txn_begin_count labeled_name: 'sql.count{query_type: begin}' @@ -1892,7 +1713,6 @@ layers: derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This metric reflects workload volume by counting explicit transactions. Use this metric to determine whether explicit transactions can be refactored as implicit transactions (individual statements). visibility: ESSENTIAL - owner: cockroachdb/sql-queries - name: sql.txn.begin.count.internal exported_name: sql_txn_begin_count_internal labeled_name: 'sql.count{query_type: begin, query_internal: true}' @@ -1902,7 +1722,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.txn.commit.count exported_name: sql_txn_commit_count labeled_name: 'sql.count{query_type: commit}' @@ -1914,7 +1733,6 @@ layers: derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This metric shows the number of transactions that completed successfully. This metric can be used as a proxy to measure the number of successful explicit transactions. visibility: ESSENTIAL - owner: cockroachdb/sql-queries - name: sql.txn.commit.count.internal exported_name: sql_txn_commit_count_internal labeled_name: 'sql.count{query_type: commit, query_internal: true}' @@ -1924,7 +1742,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.txn.latency exported_name: sql_txn_latency description: Latency of SQL transactions @@ -1935,7 +1752,6 @@ layers: derivative: NONE how_to_use: These high-level metrics provide a latency histogram of all executed SQL transactions. These metrics provide an overview of the current SQL workload. visibility: ESSENTIAL - owner: cockroachdb/sql-queries - name: sql.txn.latency.internal exported_name: sql_txn_latency_internal description: Latency of SQL transactions (internal queries) @@ -1944,7 +1760,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/sql-queries - name: sql.txn.rollback.count exported_name: sql_txn_rollback_count labeled_name: 'sql.count{query_type: rollback}' @@ -1956,7 +1771,6 @@ layers: derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This metric shows the number of orderly transaction rollbacks. A persistently high number of rollbacks may negatively impact the workload performance and needs to be investigated. visibility: ESSENTIAL - owner: cockroachdb/sql-queries - name: sql.txn.rollback.count.internal exported_name: sql_txn_rollback_count_internal labeled_name: 'sql.count{query_type: rollback, query_internal: true}' @@ -1966,7 +1780,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.txns.open exported_name: sql_txns_open description: Number of currently open user SQL transactions @@ -1977,7 +1790,6 @@ layers: derivative: NONE how_to_use: This metric should roughly correspond to the number of cores * 4. If this metric is consistently larger, scale out the cluster. visibility: ESSENTIAL - owner: cockroachdb/sql-queries - name: sql.txns.open.internal exported_name: sql_txns_open_internal description: Number of currently open user SQL transactions (internal queries) @@ -1986,7 +1798,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/sql-queries - name: sql.update.count exported_name: sql_update_count labeled_name: 'sql.count{query_type: update}' @@ -1998,7 +1809,6 @@ layers: derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This high-level metric reflects workload volume. Monitor this metric to identify abnormal application behavior or patterns over time. If abnormal patterns emerge, apply the metric's time range to the SQL Activity pages to investigate interesting outliers or patterns. For example, on the Transactions page and the Statements page, sort on the Execution Count column. To find problematic sessions, on the Sessions page, sort on the Transaction Count column. Find the sessions with high transaction counts and trace back to a user or application. visibility: ESSENTIAL - owner: cockroachdb/sql-queries - name: sql.update.count.internal exported_name: sql_update_count_internal labeled_name: 'sql.count{query_type: update, query_internal: true}' @@ -2008,7 +1818,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.update.started.count exported_name: sql_update_started_count labeled_name: 'sql.started.count{query_type: update}' @@ -2019,7 +1828,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This high-level metric reflects workload volume. Monitor this metric to identify abnormal application behavior or patterns over time. If abnormal patterns emerge, apply the metric's time range to the SQL Activity pages to investigate interesting outliers or patterns. For example, on the Transactions page and the Statements page, sort on the Execution Count column. To find problematic sessions, on the Sessions page, sort on the Transaction Count column. Find the sessions with high transaction counts and trace back to a user or application. - owner: cockroachdb/sql-queries - name: sql.update.started.count.internal exported_name: sql_update_started_count_internal labeled_name: 'sql.started.count{query_type: update, query_internal: true}' @@ -2029,7 +1837,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: txn.restarts.serializable exported_name: txn_restarts_serializable description: Number of restarts due to a forwarded commit timestamp and isolation=SERIALIZABLE @@ -2040,7 +1847,6 @@ layers: derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This metric is one measure of the impact of contention conflicts on workload performance. For guidance on contention conflicts, review transaction contention best practices and performance tuning recipes. Tens of restarts per minute may be a high value, a signal of an elevated degree of contention in the workload, which should be investigated. visibility: ESSENTIAL - owner: cockroachdb/kv - name: txn.restarts.txnaborted exported_name: txn_restarts_txnaborted description: Number of restarts due to an abort by a concurrent transaction (usually due to deadlock) @@ -2051,7 +1857,6 @@ layers: derivative: NON_NEGATIVE_DERIVATIVE how_to_use: The errors tracked by this metric are generally due to deadlocks. Deadlocks can often be prevented with a considered transaction design. Identify the conflicting transactions involved in the deadlocks, then, if possible, redesign the business logic implementation prone to deadlocks. visibility: ESSENTIAL - owner: cockroachdb/kv - name: txn.restarts.txnpush exported_name: txn_restarts_txnpush description: Number of restarts due to a transaction push failure @@ -2062,7 +1867,6 @@ layers: derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This metric is one measure of the impact of contention conflicts on workload performance. For guidance on contention conflicts, review transaction contention best practices and performance tuning recipes. Tens of restarts per minute may be a high value, a signal of an elevated degree of contention in the workload, which should be investigated. visibility: ESSENTIAL - owner: cockroachdb/kv - name: txn.restarts.unknown exported_name: txn_restarts_unknown description: Number of restarts due to a unknown reasons @@ -2073,7 +1877,6 @@ layers: derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This metric is one measure of the impact of contention conflicts on workload performance. For guidance on contention conflicts, review transaction contention best practices and performance tuning recipes. Tens of restarts per minute may be a high value, a signal of an elevated degree of contention in the workload, which should be investigated. visibility: ESSENTIAL - owner: cockroachdb/kv - name: txn.restarts.writetooold exported_name: txn_restarts_writetooold description: Number of restarts due to a concurrent writer committing first @@ -2084,7 +1887,6 @@ layers: derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This metric is one measure of the impact of contention conflicts on workload performance. For guidance on contention conflicts, review transaction contention best practices and performance tuning recipes. Tens of restarts per minute may be a high value, a signal of an elevated degree of contention in the workload, which should be investigated. visibility: ESSENTIAL - owner: cockroachdb/kv - name: TTL metrics: - name: jobs.row_level_ttl.currently_paused @@ -2098,7 +1900,6 @@ layers: derivative: NONE how_to_use: Monitor this metric to ensure the Row Level TTL job does not remain paused inadvertently for an extended period. visibility: ESSENTIAL - owner: cockroachdb/jobs - name: jobs.row_level_ttl.currently_running exported_name: jobs_row_level_ttl_currently_running labeled_name: 'jobs{type: row_level_ttl, status: currently_running}' @@ -2110,7 +1911,6 @@ layers: derivative: NONE how_to_use: Monitor this metric to ensure there are not too many Row Level TTL jobs running at the same time. Generally, this metric should be in the low single digits. visibility: ESSENTIAL - owner: cockroachdb/jobs - name: jobs.row_level_ttl.delete_duration exported_name: jobs_row_level_ttl_delete_duration description: Duration for delete requests during row level TTL. @@ -2121,7 +1921,6 @@ layers: derivative: NONE how_to_use: See Description. visibility: ESSENTIAL - owner: cockroachdb/sql-queries - name: jobs.row_level_ttl.num_active_spans exported_name: jobs_row_level_ttl_num_active_spans description: Number of active spans the TTL job is deleting from. @@ -2132,7 +1931,6 @@ layers: derivative: NONE how_to_use: See Description. visibility: ESSENTIAL - owner: cockroachdb/sql-queries - name: jobs.row_level_ttl.resume_completed exported_name: jobs_row_level_ttl_resume_completed labeled_name: 'jobs.resume{name: row_level_ttl, status: completed}' @@ -2144,7 +1942,6 @@ layers: derivative: NON_NEGATIVE_DERIVATIVE how_to_use: If Row Level TTL is enabled, this metric should be nonzero and correspond to the ttl_cron setting that was chosen. If this metric is zero, it means the job is not running visibility: ESSENTIAL - owner: cockroachdb/jobs - name: jobs.row_level_ttl.resume_failed exported_name: jobs_row_level_ttl_resume_failed labeled_name: 'jobs.resume{name: row_level_ttl, status: failed}' @@ -2156,7 +1953,6 @@ layers: derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This metric should remain at zero. Repeated errors means the Row Level TTL job is not deleting data. visibility: ESSENTIAL - owner: cockroachdb/jobs - name: jobs.row_level_ttl.rows_deleted exported_name: jobs_row_level_ttl_rows_deleted description: Number of rows deleted by the row level TTL job. @@ -2167,7 +1963,6 @@ layers: derivative: NON_NEGATIVE_DERIVATIVE how_to_use: Correlate this metric with the metric jobs.row_level_ttl.rows_selected to ensure all the rows that should be deleted are actually getting deleted. visibility: ESSENTIAL - owner: cockroachdb/sql-queries - name: jobs.row_level_ttl.rows_selected exported_name: jobs_row_level_ttl_rows_selected description: Number of rows selected for deletion by the row level TTL job. @@ -2178,7 +1973,6 @@ layers: derivative: NON_NEGATIVE_DERIVATIVE how_to_use: Correlate this metric with the metric jobs.row_level_ttl.rows_deleted to ensure all the rows that should be deleted are actually getting deleted. visibility: ESSENTIAL - owner: cockroachdb/sql-queries - name: jobs.row_level_ttl.select_duration exported_name: jobs_row_level_ttl_select_duration description: Duration for select requests during row level TTL. @@ -2189,7 +1983,6 @@ layers: derivative: NONE how_to_use: See Description. visibility: ESSENTIAL - owner: cockroachdb/sql-queries - name: jobs.row_level_ttl.span_total_duration exported_name: jobs_row_level_ttl_span_total_duration description: Duration for processing a span during row level TTL. @@ -2200,7 +1993,6 @@ layers: derivative: NONE how_to_use: See Description. visibility: ESSENTIAL - owner: cockroachdb/sql-queries - name: jobs.row_level_ttl.total_expired_rows exported_name: jobs_row_level_ttl_total_expired_rows description: Approximate number of rows that have expired the TTL on the TTL table. @@ -2211,7 +2003,6 @@ layers: derivative: NONE how_to_use: See Description. visibility: ESSENTIAL - owner: cockroachdb/sql-queries - name: jobs.row_level_ttl.total_rows exported_name: jobs_row_level_ttl_total_rows description: Approximate number of rows on the TTL table. @@ -2222,7 +2013,6 @@ layers: derivative: NONE how_to_use: See Description. visibility: ESSENTIAL - owner: cockroachdb/sql-queries - name: schedules.scheduled-row-level-ttl-executor.failed exported_name: schedules_scheduled_row_level_ttl_executor_failed labeled_name: 'schedules{name: scheduled-row-level-ttl-executor, status: failed}' @@ -2234,7 +2024,6 @@ layers: derivative: NON_NEGATIVE_DERIVATIVE how_to_use: Monitor this metric to ensure the Row Level TTL job is running. If it is non-zero, it means the job could not be created. visibility: ESSENTIAL - owner: cockroachdb/jobs - name: UNSET metrics: - name: backup.last-failed-time.kms-inaccessible @@ -2245,7 +2034,6 @@ layers: unit: TIMESTAMP_SEC aggregation: AVG derivative: NONE - owner: cockroachdb/disaster-recovery - name: clock-offset.medianabsdevnanos exported_name: clock_offset_medianabsdevnanos description: Median Absolute Deviation (MAD) with other nodes @@ -2254,7 +2042,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: clock-offset.mediannanos exported_name: clock_offset_mediannanos description: Median clock offset with other nodes @@ -2263,7 +2050,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: clock-offset.stddevnanos exported_name: clock_offset_stddevnanos description: Stddev clock offset with other nodes @@ -2272,7 +2058,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: cloud.conns_opened exported_name: cloud_conns_opened description: HTTP connections opened by cloud operations @@ -2281,7 +2066,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/disaster-recovery - name: cloud.conns_reused exported_name: cloud_conns_reused description: HTTP connections reused by cloud operations @@ -2290,7 +2074,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/disaster-recovery - name: cloud.listing_results exported_name: cloud_listing_results description: Listing results by all cloud operations @@ -2299,7 +2082,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/disaster-recovery - name: cloud.listings exported_name: cloud_listings description: Listing operations by all cloud operations @@ -2308,7 +2090,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/disaster-recovery - name: cloud.open_readers exported_name: cloud_open_readers description: Currently open readers for cloud IO @@ -2317,7 +2098,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/disaster-recovery - name: cloud.open_writers exported_name: cloud_open_writers description: Currently open writers for cloud IO @@ -2326,7 +2106,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/disaster-recovery - name: cloud.read_bytes exported_name: cloud_read_bytes description: Bytes read from all cloud operations @@ -2335,7 +2114,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/disaster-recovery - name: cloud.readers_opened exported_name: cloud_readers_opened description: Readers opened by all cloud operations @@ -2344,7 +2122,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/disaster-recovery - name: cloud.tls_handshakes exported_name: cloud_tls_handshakes description: TLS handshakes done by cloud operations @@ -2353,7 +2130,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/disaster-recovery - name: cloud.write_bytes exported_name: cloud_write_bytes description: Bytes written by all cloud operations @@ -2362,7 +2138,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/disaster-recovery - name: cloud.writers_opened exported_name: cloud_writers_opened description: Writers opened by all cloud operations @@ -2371,7 +2146,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/disaster-recovery - name: cluster.preserve-downgrade-option.last-updated exported_name: cluster_preserve_downgrade_option_last_updated description: Unix timestamp of last updated time for cluster.preserve_downgrade_option @@ -2380,7 +2154,6 @@ layers: unit: TIMESTAMP_SEC aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: distsender.batch_requests.cross_region.bytes exported_name: distsender_batch_requests_cross_region_bytes description: Total byte count of replica-addressed batch requests processed cross region when region tiers are configured @@ -2389,7 +2162,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.batch_requests.cross_zone.bytes exported_name: distsender_batch_requests_cross_zone_bytes description: Total byte count of replica-addressed batch requests processed cross zone within the same region when zone tiers are configured. If region tiers are not set, it is assumed to be within the same region. To ensure accurate monitoring of cross-zone data transfer, region and zone tiers should be consistently configured across all nodes. @@ -2398,7 +2170,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.batch_requests.replica_addressed.bytes exported_name: distsender_batch_requests_replica_addressed_bytes description: Total byte count of replica-addressed batch requests processed @@ -2407,7 +2178,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.batch_responses.cross_region.bytes exported_name: distsender_batch_responses_cross_region_bytes description: Total byte count of replica-addressed batch responses received cross region when region tiers are configured @@ -2416,7 +2186,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.batch_responses.cross_zone.bytes exported_name: distsender_batch_responses_cross_zone_bytes description: Total byte count of replica-addressed batch responses received cross zone within the same region when zone tiers are configured. If region tiers are not set, it is assumed to be within the same region. To ensure accurate monitoring of cross-zone data transfer, region and zone tiers should be consistently configured across all nodes. @@ -2425,7 +2194,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.batch_responses.replica_addressed.bytes exported_name: distsender_batch_responses_replica_addressed_bytes description: Total byte count of replica-addressed batch responses received @@ -2434,7 +2202,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.batches exported_name: distsender_batches description: Number of batches processed @@ -2443,7 +2210,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.batches.async.in_progress exported_name: distsender_batches_async_in_progress description: Number of partial batches currently being executed asynchronously @@ -2452,7 +2218,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: distsender.batches.async.sent exported_name: distsender_batches_async_sent description: Number of partial batches sent asynchronously @@ -2461,7 +2226,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.batches.async.throttled exported_name: distsender_batches_async_throttled description: Number of partial batches not sent asynchronously due to throttling @@ -2470,7 +2234,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.batches.async.throttled_cumulative_duration_nanos exported_name: distsender_batches_async_throttled_cumulative_duration_nanos description: Cumulative duration of partial batches being throttled (in nanoseconds) @@ -2479,7 +2242,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.batches.partial exported_name: distsender_batches_partial description: Number of partial batches processed after being divided on range boundaries @@ -2488,7 +2250,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.circuit_breaker.replicas.count exported_name: distsender_circuit_breaker_replicas_count description: Number of replicas currently tracked by DistSender circuit breakers @@ -2497,7 +2258,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: distsender.circuit_breaker.replicas.probes.failure exported_name: distsender_circuit_breaker_replicas_probes_failure description: Cumulative number of failed DistSender replica circuit breaker probes @@ -2506,7 +2266,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.circuit_breaker.replicas.probes.running exported_name: distsender_circuit_breaker_replicas_probes_running description: Number of currently running DistSender replica circuit breaker probes @@ -2515,7 +2274,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: distsender.circuit_breaker.replicas.probes.success exported_name: distsender_circuit_breaker_replicas_probes_success description: Cumulative number of successful DistSender replica circuit breaker probes @@ -2524,7 +2282,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.circuit_breaker.replicas.requests.cancelled exported_name: distsender_circuit_breaker_replicas_requests_cancelled description: Cumulative number of requests cancelled when DistSender replica circuit breakers trip @@ -2533,7 +2290,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.circuit_breaker.replicas.requests.rejected exported_name: distsender_circuit_breaker_replicas_requests_rejected description: Cumulative number of requests rejected by tripped DistSender replica circuit breakers @@ -2542,7 +2298,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.circuit_breaker.replicas.tripped exported_name: distsender_circuit_breaker_replicas_tripped description: Number of DistSender replica circuit breakers currently tripped @@ -2551,7 +2306,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: distsender.circuit_breaker.replicas.tripped_events exported_name: distsender_circuit_breaker_replicas_tripped_events description: Cumulative number of DistSender replica circuit breakers tripped over time @@ -2560,7 +2314,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.errors.inleasetransferbackoffs exported_name: distsender_errors_inleasetransferbackoffs description: Number of times backed off due to NotLeaseHolderErrors during lease transfer @@ -2569,7 +2322,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.range_cache.leaseholder_randomized exported_name: distsender_range_cache_leaseholder_randomized description: Number of times the DistSender randomized the leaseholder for range cache entries upon seeing a context error @@ -2578,7 +2330,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rangefeed.catchup_ranges exported_name: distsender_rangefeed_catchup_ranges description: Number of ranges with an active rangefeed that are performing a catchup scan. @@ -2587,7 +2338,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: distsender.rangefeed.catchup_ranges_waiting_client_side exported_name: distsender_rangefeed_catchup_ranges_waiting_client_side description: Number of ranges waiting on the client-side limiter to perform catchup scans @@ -2596,7 +2346,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: distsender.rangefeed.error_catchup_ranges exported_name: distsender_rangefeed_error_catchup_ranges description: Number of ranges in catchup mode which experienced an error @@ -2605,7 +2354,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rangefeed.local_ranges exported_name: distsender_rangefeed_local_ranges description: Number of ranges connected to local node. @@ -2614,7 +2362,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: distsender.rangefeed.restart_ranges exported_name: distsender_rangefeed_restart_ranges description: Number of ranges that were restarted due to transient errors @@ -2623,7 +2370,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rangefeed.retry.logical_ops_missing exported_name: distsender_rangefeed_retry_logical_ops_missing description: Number of ranges that encountered retryable LOGICAL_OPS_MISSING error @@ -2632,7 +2378,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rangefeed.retry.manual_range_split exported_name: distsender_rangefeed_retry_manual_range_split description: Number of ranges that encountered retryable MANUAL_RANGE_SPLIT error @@ -2641,7 +2386,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rangefeed.retry.no_leaseholder exported_name: distsender_rangefeed_retry_no_leaseholder description: Number of ranges that encountered retryable NO_LEASEHOLDER error @@ -2650,7 +2394,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rangefeed.retry.node_not_found exported_name: distsender_rangefeed_retry_node_not_found description: Number of ranges that encountered retryable node not found error @@ -2659,7 +2402,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rangefeed.retry.raft_snapshot exported_name: distsender_rangefeed_retry_raft_snapshot description: Number of ranges that encountered retryable RAFT_SNAPSHOT error @@ -2668,7 +2410,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rangefeed.retry.range_key_mismatch exported_name: distsender_rangefeed_retry_range_key_mismatch description: Number of ranges that encountered retryable range key mismatch error @@ -2677,7 +2418,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rangefeed.retry.range_merged exported_name: distsender_rangefeed_retry_range_merged description: Number of ranges that encountered retryable RANGE_MERGED error @@ -2686,7 +2426,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rangefeed.retry.range_not_found exported_name: distsender_rangefeed_retry_range_not_found description: Number of ranges that encountered retryable range not found error @@ -2695,7 +2434,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rangefeed.retry.range_split exported_name: distsender_rangefeed_retry_range_split description: Number of ranges that encountered retryable RANGE_SPLIT error @@ -2704,7 +2442,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rangefeed.retry.rangefeed_closed exported_name: distsender_rangefeed_retry_rangefeed_closed description: Number of ranges that encountered retryable RANGEFEED_CLOSED error @@ -2713,7 +2450,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rangefeed.retry.replica_removed exported_name: distsender_rangefeed_retry_replica_removed description: Number of ranges that encountered retryable REPLICA_REMOVED error @@ -2722,7 +2458,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rangefeed.retry.send exported_name: distsender_rangefeed_retry_send description: Number of ranges that encountered retryable send error @@ -2731,7 +2466,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rangefeed.retry.slow_consumer exported_name: distsender_rangefeed_retry_slow_consumer description: Number of ranges that encountered retryable SLOW_CONSUMER error @@ -2740,7 +2474,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rangefeed.retry.store_not_found exported_name: distsender_rangefeed_retry_store_not_found description: Number of ranges that encountered retryable store not found error @@ -2749,7 +2482,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rangefeed.retry.unknown exported_name: distsender_rangefeed_retry_unknown description: Number of ranges that encountered retryable unknown error @@ -2758,7 +2490,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rangefeed.total_ranges exported_name: distsender_rangefeed_total_ranges description: Number of ranges with an active rangefeed. @@ -2767,7 +2498,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: distsender.rangelookups exported_name: distsender_rangelookups description: Number of range lookups @@ -2776,7 +2506,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rpc.addsstable.sent exported_name: distsender_rpc_addsstable_sent description: |- @@ -2788,7 +2517,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rpc.adminchangereplicas.sent exported_name: distsender_rpc_adminchangereplicas_sent description: |- @@ -2800,7 +2528,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rpc.adminmerge.sent exported_name: distsender_rpc_adminmerge_sent description: |- @@ -2812,7 +2539,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rpc.adminrelocaterange.sent exported_name: distsender_rpc_adminrelocaterange_sent description: |- @@ -2824,7 +2550,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rpc.adminscatter.sent exported_name: distsender_rpc_adminscatter_sent description: |- @@ -2836,7 +2561,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rpc.adminsplit.sent exported_name: distsender_rpc_adminsplit_sent description: |- @@ -2848,7 +2572,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rpc.admintransferlease.sent exported_name: distsender_rpc_admintransferlease_sent description: |- @@ -2860,7 +2583,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rpc.adminunsplit.sent exported_name: distsender_rpc_adminunsplit_sent description: |- @@ -2872,7 +2594,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rpc.barrier.sent exported_name: distsender_rpc_barrier_sent description: |- @@ -2884,7 +2605,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rpc.checkconsistency.sent exported_name: distsender_rpc_checkconsistency_sent description: |- @@ -2896,7 +2616,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rpc.clearrange.sent exported_name: distsender_rpc_clearrange_sent description: |- @@ -2908,7 +2627,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rpc.computechecksum.sent exported_name: distsender_rpc_computechecksum_sent description: |- @@ -2920,7 +2638,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rpc.conditionalput.sent exported_name: distsender_rpc_conditionalput_sent description: |- @@ -2932,7 +2649,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rpc.delete.sent exported_name: distsender_rpc_delete_sent description: |- @@ -2944,7 +2660,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rpc.deleterange.sent exported_name: distsender_rpc_deleterange_sent description: |- @@ -2956,7 +2671,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rpc.endtxn.sent exported_name: distsender_rpc_endtxn_sent description: |- @@ -2968,7 +2682,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rpc.err.ambiguousresulterrtype exported_name: distsender_rpc_err_ambiguousresulterrtype description: Number of AmbiguousResultErrType errors received replica-bound RPCs. @@ -2978,7 +2691,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This counts how often an error of the specified type was received back from replicas as part of executing possibly range-spanning requests. Failures to reach the target replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified errors as 'roachpb.InternalErrType'. - owner: cockroachdb/kv - name: distsender.rpc.err.batchtimestampbeforegcerrtype exported_name: distsender_rpc_err_batchtimestampbeforegcerrtype description: Number of BatchTimestampBeforeGCErrType errors received replica-bound RPCs. @@ -2988,7 +2700,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This counts how often an error of the specified type was received back from replicas as part of executing possibly range-spanning requests. Failures to reach the target replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified errors as 'roachpb.InternalErrType'. - owner: cockroachdb/kv - name: distsender.rpc.err.communicationerrtype exported_name: distsender_rpc_err_communicationerrtype description: Number of CommunicationErrType errors received replica-bound RPCs. @@ -2998,7 +2709,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This counts how often an error of the specified type was received back from replicas as part of executing possibly range-spanning requests. Failures to reach the target replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified errors as 'roachpb.InternalErrType'. - owner: cockroachdb/kv - name: distsender.rpc.err.conditionfailederrtype exported_name: distsender_rpc_err_conditionfailederrtype description: Number of ConditionFailedErrType errors received replica-bound RPCs. @@ -3008,7 +2718,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This counts how often an error of the specified type was received back from replicas as part of executing possibly range-spanning requests. Failures to reach the target replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified errors as 'roachpb.InternalErrType'. - owner: cockroachdb/kv - name: distsender.rpc.err.exclusionviolationerrtype exported_name: distsender_rpc_err_exclusionviolationerrtype description: Number of ExclusionViolationErrType errors received replica-bound RPCs. @@ -3018,7 +2727,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This counts how often an error of the specified type was received back from replicas as part of executing possibly range-spanning requests. Failures to reach the target replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified errors as 'roachpb.InternalErrType'. - owner: cockroachdb/kv - name: distsender.rpc.err.indeterminatecommiterrtype exported_name: distsender_rpc_err_indeterminatecommiterrtype description: Number of IndeterminateCommitErrType errors received replica-bound RPCs. @@ -3028,7 +2736,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This counts how often an error of the specified type was received back from replicas as part of executing possibly range-spanning requests. Failures to reach the target replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified errors as 'roachpb.InternalErrType'. - owner: cockroachdb/kv - name: distsender.rpc.err.integeroverflowerrtype exported_name: distsender_rpc_err_integeroverflowerrtype description: Number of IntegerOverflowErrType errors received replica-bound RPCs. @@ -3038,7 +2745,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This counts how often an error of the specified type was received back from replicas as part of executing possibly range-spanning requests. Failures to reach the target replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified errors as 'roachpb.InternalErrType'. - owner: cockroachdb/kv - name: distsender.rpc.err.intentmissingerrtype exported_name: distsender_rpc_err_intentmissingerrtype description: Number of IntentMissingErrType errors received replica-bound RPCs. @@ -3048,7 +2754,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This counts how often an error of the specified type was received back from replicas as part of executing possibly range-spanning requests. Failures to reach the target replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified errors as 'roachpb.InternalErrType'. - owner: cockroachdb/kv - name: distsender.rpc.err.internalerrtype exported_name: distsender_rpc_err_internalerrtype description: Number of InternalErrType errors received replica-bound RPCs. @@ -3058,7 +2763,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This counts how often an error of the specified type was received back from replicas as part of executing possibly range-spanning requests. Failures to reach the target replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified errors as 'roachpb.InternalErrType'. - owner: cockroachdb/kv - name: distsender.rpc.err.invalidleaseerrtype exported_name: distsender_rpc_err_invalidleaseerrtype description: Number of InvalidLeaseErrType errors received replica-bound RPCs. @@ -3068,7 +2772,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This counts how often an error of the specified type was received back from replicas as part of executing possibly range-spanning requests. Failures to reach the target replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified errors as 'roachpb.InternalErrType'. - owner: cockroachdb/kv - name: distsender.rpc.err.leaserejectederrtype exported_name: distsender_rpc_err_leaserejectederrtype description: Number of LeaseRejectedErrType errors received replica-bound RPCs. @@ -3078,7 +2781,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This counts how often an error of the specified type was received back from replicas as part of executing possibly range-spanning requests. Failures to reach the target replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified errors as 'roachpb.InternalErrType'. - owner: cockroachdb/kv - name: distsender.rpc.err.lockconflicterrtype exported_name: distsender_rpc_err_lockconflicterrtype description: Number of LockConflictErrType errors received replica-bound RPCs. @@ -3088,7 +2790,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This counts how often an error of the specified type was received back from replicas as part of executing possibly range-spanning requests. Failures to reach the target replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified errors as 'roachpb.InternalErrType'. - owner: cockroachdb/kv - name: distsender.rpc.err.mergeinprogresserrtype exported_name: distsender_rpc_err_mergeinprogresserrtype description: Number of MergeInProgressErrType errors received replica-bound RPCs. @@ -3098,7 +2799,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This counts how often an error of the specified type was received back from replicas as part of executing possibly range-spanning requests. Failures to reach the target replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified errors as 'roachpb.InternalErrType'. - owner: cockroachdb/kv - name: distsender.rpc.err.mintimestampboundunsatisfiableerrtype exported_name: distsender_rpc_err_mintimestampboundunsatisfiableerrtype description: Number of MinTimestampBoundUnsatisfiableErrType errors received replica-bound RPCs. @@ -3108,7 +2808,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This counts how often an error of the specified type was received back from replicas as part of executing possibly range-spanning requests. Failures to reach the target replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified errors as 'roachpb.InternalErrType'. - owner: cockroachdb/kv - name: distsender.rpc.err.mvcchistorymutationerrtype exported_name: distsender_rpc_err_mvcchistorymutationerrtype description: Number of MVCCHistoryMutationErrType errors received replica-bound RPCs. @@ -3118,7 +2817,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This counts how often an error of the specified type was received back from replicas as part of executing possibly range-spanning requests. Failures to reach the target replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified errors as 'roachpb.InternalErrType'. - owner: cockroachdb/kv - name: distsender.rpc.err.nodeunavailableerrtype exported_name: distsender_rpc_err_nodeunavailableerrtype description: Number of NodeUnavailableErrType errors received replica-bound RPCs. @@ -3128,7 +2826,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This counts how often an error of the specified type was received back from replicas as part of executing possibly range-spanning requests. Failures to reach the target replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified errors as 'roachpb.InternalErrType'. - owner: cockroachdb/kv - name: distsender.rpc.err.notleaseholdererrtype exported_name: distsender_rpc_err_notleaseholdererrtype description: Number of NotLeaseHolderErrType errors received replica-bound RPCs. @@ -3138,7 +2835,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This counts how often an error of the specified type was received back from replicas as part of executing possibly range-spanning requests. Failures to reach the target replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified errors as 'roachpb.InternalErrType'. - owner: cockroachdb/kv - name: distsender.rpc.err.oprequirestxnerrtype exported_name: distsender_rpc_err_oprequirestxnerrtype description: Number of OpRequiresTxnErrType errors received replica-bound RPCs. @@ -3148,7 +2844,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This counts how often an error of the specified type was received back from replicas as part of executing possibly range-spanning requests. Failures to reach the target replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified errors as 'roachpb.InternalErrType'. - owner: cockroachdb/kv - name: distsender.rpc.err.optimisticevalconflictserrtype exported_name: distsender_rpc_err_optimisticevalconflictserrtype description: Number of OptimisticEvalConflictsErrType errors received replica-bound RPCs. @@ -3158,7 +2853,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This counts how often an error of the specified type was received back from replicas as part of executing possibly range-spanning requests. Failures to reach the target replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified errors as 'roachpb.InternalErrType'. - owner: cockroachdb/kv - name: distsender.rpc.err.proxyfailederrtype exported_name: distsender_rpc_err_proxyfailederrtype description: Number of ProxyFailedErrType errors received replica-bound RPCs. @@ -3168,7 +2862,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This counts how often an error of the specified type was received back from replicas as part of executing possibly range-spanning requests. Failures to reach the target replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified errors as 'roachpb.InternalErrType'. - owner: cockroachdb/kv - name: distsender.rpc.err.raftgroupdeletederrtype exported_name: distsender_rpc_err_raftgroupdeletederrtype description: Number of RaftGroupDeletedErrType errors received replica-bound RPCs. @@ -3178,7 +2871,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This counts how often an error of the specified type was received back from replicas as part of executing possibly range-spanning requests. Failures to reach the target replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified errors as 'roachpb.InternalErrType'. - owner: cockroachdb/kv - name: distsender.rpc.err.rangefeedretryerrtype exported_name: distsender_rpc_err_rangefeedretryerrtype description: Number of RangeFeedRetryErrType errors received replica-bound RPCs. @@ -3188,7 +2880,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This counts how often an error of the specified type was received back from replicas as part of executing possibly range-spanning requests. Failures to reach the target replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified errors as 'roachpb.InternalErrType'. - owner: cockroachdb/kv - name: distsender.rpc.err.rangekeymismatcherrtype exported_name: distsender_rpc_err_rangekeymismatcherrtype description: Number of RangeKeyMismatchErrType errors received replica-bound RPCs. @@ -3198,7 +2889,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This counts how often an error of the specified type was received back from replicas as part of executing possibly range-spanning requests. Failures to reach the target replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified errors as 'roachpb.InternalErrType'. - owner: cockroachdb/kv - name: distsender.rpc.err.rangenotfounderrtype exported_name: distsender_rpc_err_rangenotfounderrtype description: Number of RangeNotFoundErrType errors received replica-bound RPCs. @@ -3208,7 +2898,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This counts how often an error of the specified type was received back from replicas as part of executing possibly range-spanning requests. Failures to reach the target replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified errors as 'roachpb.InternalErrType'. - owner: cockroachdb/kv - name: distsender.rpc.err.readwithinuncertaintyintervalerrtype exported_name: distsender_rpc_err_readwithinuncertaintyintervalerrtype description: Number of ReadWithinUncertaintyIntervalErrType errors received replica-bound RPCs. @@ -3218,7 +2907,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This counts how often an error of the specified type was received back from replicas as part of executing possibly range-spanning requests. Failures to reach the target replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified errors as 'roachpb.InternalErrType'. - owner: cockroachdb/kv - name: distsender.rpc.err.refreshfailederrtype exported_name: distsender_rpc_err_refreshfailederrtype description: Number of RefreshFailedErrType errors received replica-bound RPCs. @@ -3228,7 +2916,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This counts how often an error of the specified type was received back from replicas as part of executing possibly range-spanning requests. Failures to reach the target replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified errors as 'roachpb.InternalErrType'. - owner: cockroachdb/kv - name: distsender.rpc.err.replicatooolderrtype exported_name: distsender_rpc_err_replicatooolderrtype description: Number of ReplicaTooOldErrType errors received replica-bound RPCs. @@ -3238,7 +2925,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This counts how often an error of the specified type was received back from replicas as part of executing possibly range-spanning requests. Failures to reach the target replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified errors as 'roachpb.InternalErrType'. - owner: cockroachdb/kv - name: distsender.rpc.err.replicaunavailableerrtype exported_name: distsender_rpc_err_replicaunavailableerrtype description: Number of ReplicaUnavailableErrType errors received replica-bound RPCs. @@ -3248,7 +2934,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This counts how often an error of the specified type was received back from replicas as part of executing possibly range-spanning requests. Failures to reach the target replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified errors as 'roachpb.InternalErrType'. - owner: cockroachdb/kv - name: distsender.rpc.err.storenotfounderrtype exported_name: distsender_rpc_err_storenotfounderrtype description: Number of StoreNotFoundErrType errors received replica-bound RPCs. @@ -3258,7 +2943,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This counts how often an error of the specified type was received back from replicas as part of executing possibly range-spanning requests. Failures to reach the target replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified errors as 'roachpb.InternalErrType'. - owner: cockroachdb/kv - name: distsender.rpc.err.transactionabortederrtype exported_name: distsender_rpc_err_transactionabortederrtype description: Number of TransactionAbortedErrType errors received replica-bound RPCs. @@ -3268,7 +2952,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This counts how often an error of the specified type was received back from replicas as part of executing possibly range-spanning requests. Failures to reach the target replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified errors as 'roachpb.InternalErrType'. - owner: cockroachdb/kv - name: distsender.rpc.err.transactionpusherrtype exported_name: distsender_rpc_err_transactionpusherrtype description: Number of TransactionPushErrType errors received replica-bound RPCs. @@ -3278,7 +2961,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This counts how often an error of the specified type was received back from replicas as part of executing possibly range-spanning requests. Failures to reach the target replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified errors as 'roachpb.InternalErrType'. - owner: cockroachdb/kv - name: distsender.rpc.err.transactionretryerrtype exported_name: distsender_rpc_err_transactionretryerrtype description: Number of TransactionRetryErrType errors received replica-bound RPCs. @@ -3288,7 +2970,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This counts how often an error of the specified type was received back from replicas as part of executing possibly range-spanning requests. Failures to reach the target replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified errors as 'roachpb.InternalErrType'. - owner: cockroachdb/kv - name: distsender.rpc.err.transactionretrywithprotorefresherrtype exported_name: distsender_rpc_err_transactionretrywithprotorefresherrtype description: Number of TransactionRetryWithProtoRefreshErrType errors received replica-bound RPCs. @@ -3298,7 +2979,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This counts how often an error of the specified type was received back from replicas as part of executing possibly range-spanning requests. Failures to reach the target replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified errors as 'roachpb.InternalErrType'. - owner: cockroachdb/kv - name: distsender.rpc.err.transactionstatuserrtype exported_name: distsender_rpc_err_transactionstatuserrtype description: Number of TransactionStatusErrType errors received replica-bound RPCs. @@ -3308,7 +2988,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This counts how often an error of the specified type was received back from replicas as part of executing possibly range-spanning requests. Failures to reach the target replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified errors as 'roachpb.InternalErrType'. - owner: cockroachdb/kv - name: distsender.rpc.err.txnalreadyencounterederrtype exported_name: distsender_rpc_err_txnalreadyencounterederrtype description: Number of TxnAlreadyEncounteredErrType errors received replica-bound RPCs. @@ -3318,7 +2997,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This counts how often an error of the specified type was received back from replicas as part of executing possibly range-spanning requests. Failures to reach the target replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified errors as 'roachpb.InternalErrType'. - owner: cockroachdb/kv - name: distsender.rpc.err.unsupportedrequesterrtype exported_name: distsender_rpc_err_unsupportedrequesterrtype description: Number of UnsupportedRequestErrType errors received replica-bound RPCs. @@ -3328,7 +3006,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This counts how often an error of the specified type was received back from replicas as part of executing possibly range-spanning requests. Failures to reach the target replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified errors as 'roachpb.InternalErrType'. - owner: cockroachdb/kv - name: distsender.rpc.err.writeintenterrtype exported_name: distsender_rpc_err_writeintenterrtype description: Number of WriteIntentErrType errors received replica-bound RPCs. @@ -3338,7 +3015,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This counts how often an error of the specified type was received back from replicas as part of executing possibly range-spanning requests. Failures to reach the target replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified errors as 'roachpb.InternalErrType'. - owner: cockroachdb/kv - name: distsender.rpc.err.writetooolderrtype exported_name: distsender_rpc_err_writetooolderrtype description: Number of WriteTooOldErrType errors received replica-bound RPCs. @@ -3348,7 +3024,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This counts how often an error of the specified type was received back from replicas as part of executing possibly range-spanning requests. Failures to reach the target replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified errors as 'roachpb.InternalErrType'. - owner: cockroachdb/kv - name: distsender.rpc.excise.sent exported_name: distsender_rpc_excise_sent description: |- @@ -3360,7 +3035,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rpc.export.sent exported_name: distsender_rpc_export_sent description: |- @@ -3372,7 +3046,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rpc.flushlocktable.sent exported_name: distsender_rpc_flushlocktable_sent description: |- @@ -3384,7 +3057,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rpc.gc.sent exported_name: distsender_rpc_gc_sent description: |- @@ -3396,7 +3068,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rpc.get.sent exported_name: distsender_rpc_get_sent description: |- @@ -3408,7 +3079,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rpc.heartbeattxn.sent exported_name: distsender_rpc_heartbeattxn_sent description: |- @@ -3420,7 +3090,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rpc.increment.sent exported_name: distsender_rpc_increment_sent description: |- @@ -3432,7 +3101,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rpc.isspanempty.sent exported_name: distsender_rpc_isspanempty_sent description: |- @@ -3444,7 +3112,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rpc.leaseinfo.sent exported_name: distsender_rpc_leaseinfo_sent description: |- @@ -3456,7 +3123,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rpc.linkexternalsstable.sent exported_name: distsender_rpc_linkexternalsstable_sent description: |- @@ -3468,7 +3134,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rpc.merge.sent exported_name: distsender_rpc_merge_sent description: |- @@ -3480,7 +3145,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rpc.migrate.sent exported_name: distsender_rpc_migrate_sent description: |- @@ -3492,7 +3156,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rpc.probe.sent exported_name: distsender_rpc_probe_sent description: |- @@ -3504,7 +3167,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rpc.proxy.err exported_name: distsender_rpc_proxy_err description: Number of attempts by a gateway to proxy a request which resulted in a failure. @@ -3513,7 +3175,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rpc.proxy.forward.err exported_name: distsender_rpc_proxy_forward_err description: Number of attempts on a follower replica to proxy a request which resulted in a failure. @@ -3522,7 +3183,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rpc.proxy.forward.sent exported_name: distsender_rpc_proxy_forward_sent description: Number of attempts on a follower replica to proxy a request to an unreachable leaseholder. @@ -3531,7 +3191,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rpc.proxy.sent exported_name: distsender_rpc_proxy_sent description: Number of attempts by a gateway to proxy a request to an unreachable leaseholder via a follower replica. @@ -3540,7 +3199,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rpc.pushtxn.sent exported_name: distsender_rpc_pushtxn_sent description: |- @@ -3552,7 +3210,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rpc.put.sent exported_name: distsender_rpc_put_sent description: |- @@ -3564,7 +3221,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rpc.queryintent.sent exported_name: distsender_rpc_queryintent_sent description: |- @@ -3576,7 +3232,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rpc.querylocks.sent exported_name: distsender_rpc_querylocks_sent description: |- @@ -3588,7 +3243,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rpc.queryresolvedtimestamp.sent exported_name: distsender_rpc_queryresolvedtimestamp_sent description: |- @@ -3600,7 +3254,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rpc.querytxn.sent exported_name: distsender_rpc_querytxn_sent description: |- @@ -3612,7 +3265,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rpc.rangestats.sent exported_name: distsender_rpc_rangestats_sent description: |- @@ -3624,7 +3276,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rpc.recomputestats.sent exported_name: distsender_rpc_recomputestats_sent description: |- @@ -3636,7 +3287,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rpc.recovertxn.sent exported_name: distsender_rpc_recovertxn_sent description: |- @@ -3648,7 +3298,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rpc.refresh.sent exported_name: distsender_rpc_refresh_sent description: |- @@ -3660,7 +3309,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rpc.refreshrange.sent exported_name: distsender_rpc_refreshrange_sent description: |- @@ -3672,7 +3320,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rpc.requestlease.sent exported_name: distsender_rpc_requestlease_sent description: |- @@ -3684,7 +3331,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rpc.resolveintent.sent exported_name: distsender_rpc_resolveintent_sent description: |- @@ -3696,7 +3342,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rpc.resolveintentrange.sent exported_name: distsender_rpc_resolveintentrange_sent description: |- @@ -3708,7 +3353,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rpc.reversescan.sent exported_name: distsender_rpc_reversescan_sent description: |- @@ -3720,7 +3364,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rpc.revertrange.sent exported_name: distsender_rpc_revertrange_sent description: |- @@ -3732,7 +3375,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rpc.scan.sent exported_name: distsender_rpc_scan_sent description: |- @@ -3744,7 +3386,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rpc.sent exported_name: distsender_rpc_sent description: Number of replica-addressed RPCs sent @@ -3753,7 +3394,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rpc.sent.local exported_name: distsender_rpc_sent_local description: Number of replica-addressed RPCs sent through the local-server optimization @@ -3762,7 +3402,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rpc.subsume.sent exported_name: distsender_rpc_subsume_sent description: |- @@ -3774,7 +3413,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rpc.transferlease.sent exported_name: distsender_rpc_transferlease_sent description: |- @@ -3786,7 +3424,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rpc.truncatelog.sent exported_name: distsender_rpc_truncatelog_sent description: |- @@ -3798,7 +3435,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.rpc.writebatch.sent exported_name: distsender_rpc_writebatch_sent description: |- @@ -3810,7 +3446,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: distsender.slow.replicarpcs exported_name: distsender_slow_replicarpcs description: |- @@ -3822,7 +3457,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: jobs.adopt_iterations exported_name: jobs_adopt_iterations description: number of job-adopt iterations performed by the registry @@ -3831,7 +3465,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.auto_config_env_runner.currently_idle exported_name: jobs_auto_config_env_runner_currently_idle labeled_name: 'jobs{type: auto_config_env_runner, status: currently_idle}' @@ -3841,7 +3474,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.auto_config_env_runner.currently_paused exported_name: jobs_auto_config_env_runner_currently_paused labeled_name: 'jobs{name: auto_config_env_runner, status: currently_paused}' @@ -3851,7 +3483,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.auto_config_env_runner.currently_running exported_name: jobs_auto_config_env_runner_currently_running labeled_name: 'jobs{type: auto_config_env_runner, status: currently_running}' @@ -3861,7 +3492,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.auto_config_env_runner.expired_pts_records exported_name: jobs_auto_config_env_runner_expired_pts_records labeled_name: 'jobs.expired_pts_records{type: auto_config_env_runner}' @@ -3871,7 +3501,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.auto_config_env_runner.fail_or_cancel_completed exported_name: jobs_auto_config_env_runner_fail_or_cancel_completed labeled_name: 'jobs.fail_or_cancel{name: auto_config_env_runner, status: completed}' @@ -3881,7 +3510,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.auto_config_env_runner.fail_or_cancel_retry_error exported_name: jobs_auto_config_env_runner_fail_or_cancel_retry_error labeled_name: 'jobs.fail_or_cancel{name: auto_config_env_runner, status: retry_error}' @@ -3891,7 +3519,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.auto_config_env_runner.protected_age_sec exported_name: jobs_auto_config_env_runner_protected_age_sec labeled_name: 'jobs.protected_age_sec{type: auto_config_env_runner}' @@ -3901,7 +3528,6 @@ layers: unit: SECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.auto_config_env_runner.protected_record_count exported_name: jobs_auto_config_env_runner_protected_record_count labeled_name: 'jobs.protected_record_count{type: auto_config_env_runner}' @@ -3911,7 +3537,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.auto_config_env_runner.resume_completed exported_name: jobs_auto_config_env_runner_resume_completed labeled_name: 'jobs.resume{name: auto_config_env_runner, status: completed}' @@ -3921,7 +3546,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.auto_config_env_runner.resume_failed exported_name: jobs_auto_config_env_runner_resume_failed labeled_name: 'jobs.resume{name: auto_config_env_runner, status: failed}' @@ -3931,7 +3555,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.auto_config_env_runner.resume_retry_error exported_name: jobs_auto_config_env_runner_resume_retry_error labeled_name: 'jobs.resume{name: auto_config_env_runner, status: retry_error}' @@ -3941,7 +3564,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.auto_config_runner.currently_idle exported_name: jobs_auto_config_runner_currently_idle labeled_name: 'jobs{type: auto_config_runner, status: currently_idle}' @@ -3951,7 +3573,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.auto_config_runner.currently_paused exported_name: jobs_auto_config_runner_currently_paused labeled_name: 'jobs{name: auto_config_runner, status: currently_paused}' @@ -3961,7 +3582,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.auto_config_runner.currently_running exported_name: jobs_auto_config_runner_currently_running labeled_name: 'jobs{type: auto_config_runner, status: currently_running}' @@ -3971,7 +3591,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.auto_config_runner.expired_pts_records exported_name: jobs_auto_config_runner_expired_pts_records labeled_name: 'jobs.expired_pts_records{type: auto_config_runner}' @@ -3981,7 +3600,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.auto_config_runner.fail_or_cancel_completed exported_name: jobs_auto_config_runner_fail_or_cancel_completed labeled_name: 'jobs.fail_or_cancel{name: auto_config_runner, status: completed}' @@ -3991,7 +3609,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.auto_config_runner.fail_or_cancel_retry_error exported_name: jobs_auto_config_runner_fail_or_cancel_retry_error labeled_name: 'jobs.fail_or_cancel{name: auto_config_runner, status: retry_error}' @@ -4001,7 +3618,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.auto_config_runner.protected_age_sec exported_name: jobs_auto_config_runner_protected_age_sec labeled_name: 'jobs.protected_age_sec{type: auto_config_runner}' @@ -4011,7 +3627,6 @@ layers: unit: SECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.auto_config_runner.protected_record_count exported_name: jobs_auto_config_runner_protected_record_count labeled_name: 'jobs.protected_record_count{type: auto_config_runner}' @@ -4021,7 +3636,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.auto_config_runner.resume_completed exported_name: jobs_auto_config_runner_resume_completed labeled_name: 'jobs.resume{name: auto_config_runner, status: completed}' @@ -4031,7 +3645,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.auto_config_runner.resume_failed exported_name: jobs_auto_config_runner_resume_failed labeled_name: 'jobs.resume{name: auto_config_runner, status: failed}' @@ -4041,7 +3654,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.auto_config_runner.resume_retry_error exported_name: jobs_auto_config_runner_resume_retry_error labeled_name: 'jobs.resume{name: auto_config_runner, status: retry_error}' @@ -4051,7 +3663,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.auto_config_task.currently_idle exported_name: jobs_auto_config_task_currently_idle labeled_name: 'jobs{type: auto_config_task, status: currently_idle}' @@ -4061,7 +3672,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.auto_config_task.currently_paused exported_name: jobs_auto_config_task_currently_paused labeled_name: 'jobs{name: auto_config_task, status: currently_paused}' @@ -4071,7 +3681,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.auto_config_task.currently_running exported_name: jobs_auto_config_task_currently_running labeled_name: 'jobs{type: auto_config_task, status: currently_running}' @@ -4081,7 +3690,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.auto_config_task.expired_pts_records exported_name: jobs_auto_config_task_expired_pts_records labeled_name: 'jobs.expired_pts_records{type: auto_config_task}' @@ -4091,7 +3699,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.auto_config_task.fail_or_cancel_completed exported_name: jobs_auto_config_task_fail_or_cancel_completed labeled_name: 'jobs.fail_or_cancel{name: auto_config_task, status: completed}' @@ -4101,7 +3708,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.auto_config_task.fail_or_cancel_retry_error exported_name: jobs_auto_config_task_fail_or_cancel_retry_error labeled_name: 'jobs.fail_or_cancel{name: auto_config_task, status: retry_error}' @@ -4111,7 +3717,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.auto_config_task.protected_age_sec exported_name: jobs_auto_config_task_protected_age_sec labeled_name: 'jobs.protected_age_sec{type: auto_config_task}' @@ -4121,7 +3726,6 @@ layers: unit: SECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.auto_config_task.protected_record_count exported_name: jobs_auto_config_task_protected_record_count labeled_name: 'jobs.protected_record_count{type: auto_config_task}' @@ -4131,7 +3735,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.auto_config_task.resume_completed exported_name: jobs_auto_config_task_resume_completed labeled_name: 'jobs.resume{name: auto_config_task, status: completed}' @@ -4141,7 +3744,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.auto_config_task.resume_failed exported_name: jobs_auto_config_task_resume_failed labeled_name: 'jobs.resume{name: auto_config_task, status: failed}' @@ -4151,7 +3753,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.auto_config_task.resume_retry_error exported_name: jobs_auto_config_task_resume_retry_error labeled_name: 'jobs.resume{name: auto_config_task, status: retry_error}' @@ -4161,7 +3762,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.auto_create_partial_stats.currently_idle exported_name: jobs_auto_create_partial_stats_currently_idle labeled_name: 'jobs{type: auto_create_partial_stats, status: currently_idle}' @@ -4171,7 +3771,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.auto_create_partial_stats.fail_or_cancel_completed exported_name: jobs_auto_create_partial_stats_fail_or_cancel_completed labeled_name: 'jobs.fail_or_cancel{name: auto_create_partial_stats, status: completed}' @@ -4181,7 +3780,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.auto_create_partial_stats.fail_or_cancel_retry_error exported_name: jobs_auto_create_partial_stats_fail_or_cancel_retry_error labeled_name: 'jobs.fail_or_cancel{name: auto_create_partial_stats, status: retry_error}' @@ -4191,7 +3789,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.auto_create_partial_stats.resume_completed exported_name: jobs_auto_create_partial_stats_resume_completed labeled_name: 'jobs.resume{name: auto_create_partial_stats, status: completed}' @@ -4201,7 +3798,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.auto_create_partial_stats.resume_retry_error exported_name: jobs_auto_create_partial_stats_resume_retry_error labeled_name: 'jobs.resume{name: auto_create_partial_stats, status: retry_error}' @@ -4211,7 +3807,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.auto_create_stats.currently_idle exported_name: jobs_auto_create_stats_currently_idle labeled_name: 'jobs{type: auto_create_stats, status: currently_idle}' @@ -4221,7 +3816,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.auto_create_stats.fail_or_cancel_completed exported_name: jobs_auto_create_stats_fail_or_cancel_completed labeled_name: 'jobs.fail_or_cancel{name: auto_create_stats, status: completed}' @@ -4231,7 +3825,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.auto_create_stats.fail_or_cancel_retry_error exported_name: jobs_auto_create_stats_fail_or_cancel_retry_error labeled_name: 'jobs.fail_or_cancel{name: auto_create_stats, status: retry_error}' @@ -4241,7 +3834,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.auto_create_stats.resume_completed exported_name: jobs_auto_create_stats_resume_completed labeled_name: 'jobs.resume{name: auto_create_stats, status: completed}' @@ -4251,7 +3843,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.auto_create_stats.resume_retry_error exported_name: jobs_auto_create_stats_resume_retry_error labeled_name: 'jobs.resume{name: auto_create_stats, status: retry_error}' @@ -4261,7 +3852,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.auto_schema_telemetry.currently_idle exported_name: jobs_auto_schema_telemetry_currently_idle labeled_name: 'jobs{type: auto_schema_telemetry, status: currently_idle}' @@ -4271,7 +3861,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.auto_schema_telemetry.currently_paused exported_name: jobs_auto_schema_telemetry_currently_paused labeled_name: 'jobs{name: auto_schema_telemetry, status: currently_paused}' @@ -4281,7 +3870,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.auto_schema_telemetry.currently_running exported_name: jobs_auto_schema_telemetry_currently_running labeled_name: 'jobs{type: auto_schema_telemetry, status: currently_running}' @@ -4291,7 +3879,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.auto_schema_telemetry.expired_pts_records exported_name: jobs_auto_schema_telemetry_expired_pts_records labeled_name: 'jobs.expired_pts_records{type: auto_schema_telemetry}' @@ -4301,7 +3888,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.auto_schema_telemetry.fail_or_cancel_completed exported_name: jobs_auto_schema_telemetry_fail_or_cancel_completed labeled_name: 'jobs.fail_or_cancel{name: auto_schema_telemetry, status: completed}' @@ -4311,7 +3897,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.auto_schema_telemetry.fail_or_cancel_retry_error exported_name: jobs_auto_schema_telemetry_fail_or_cancel_retry_error labeled_name: 'jobs.fail_or_cancel{name: auto_schema_telemetry, status: retry_error}' @@ -4321,7 +3906,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.auto_schema_telemetry.protected_age_sec exported_name: jobs_auto_schema_telemetry_protected_age_sec labeled_name: 'jobs.protected_age_sec{type: auto_schema_telemetry}' @@ -4331,7 +3915,6 @@ layers: unit: SECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.auto_schema_telemetry.protected_record_count exported_name: jobs_auto_schema_telemetry_protected_record_count labeled_name: 'jobs.protected_record_count{type: auto_schema_telemetry}' @@ -4341,7 +3924,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.auto_schema_telemetry.resume_completed exported_name: jobs_auto_schema_telemetry_resume_completed labeled_name: 'jobs.resume{name: auto_schema_telemetry, status: completed}' @@ -4351,7 +3933,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.auto_schema_telemetry.resume_failed exported_name: jobs_auto_schema_telemetry_resume_failed labeled_name: 'jobs.resume{name: auto_schema_telemetry, status: failed}' @@ -4361,7 +3942,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.auto_schema_telemetry.resume_retry_error exported_name: jobs_auto_schema_telemetry_resume_retry_error labeled_name: 'jobs.resume{name: auto_schema_telemetry, status: retry_error}' @@ -4371,7 +3951,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.auto_span_config_reconciliation.currently_idle exported_name: jobs_auto_span_config_reconciliation_currently_idle labeled_name: 'jobs{type: auto_span_config_reconciliation, status: currently_idle}' @@ -4381,7 +3960,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.auto_span_config_reconciliation.currently_paused exported_name: jobs_auto_span_config_reconciliation_currently_paused labeled_name: 'jobs{name: auto_span_config_reconciliation, status: currently_paused}' @@ -4391,7 +3969,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.auto_span_config_reconciliation.currently_running exported_name: jobs_auto_span_config_reconciliation_currently_running labeled_name: 'jobs{type: auto_span_config_reconciliation, status: currently_running}' @@ -4401,7 +3978,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.auto_span_config_reconciliation.expired_pts_records exported_name: jobs_auto_span_config_reconciliation_expired_pts_records labeled_name: 'jobs.expired_pts_records{type: auto_span_config_reconciliation}' @@ -4411,7 +3987,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.auto_span_config_reconciliation.fail_or_cancel_completed exported_name: jobs_auto_span_config_reconciliation_fail_or_cancel_completed labeled_name: 'jobs.fail_or_cancel{name: auto_span_config_reconciliation, status: completed}' @@ -4421,7 +3996,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.auto_span_config_reconciliation.fail_or_cancel_retry_error exported_name: jobs_auto_span_config_reconciliation_fail_or_cancel_retry_error labeled_name: 'jobs.fail_or_cancel{name: auto_span_config_reconciliation, status: retry_error}' @@ -4431,7 +4005,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.auto_span_config_reconciliation.protected_age_sec exported_name: jobs_auto_span_config_reconciliation_protected_age_sec labeled_name: 'jobs.protected_age_sec{type: auto_span_config_reconciliation}' @@ -4441,7 +4014,6 @@ layers: unit: SECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.auto_span_config_reconciliation.protected_record_count exported_name: jobs_auto_span_config_reconciliation_protected_record_count labeled_name: 'jobs.protected_record_count{type: auto_span_config_reconciliation}' @@ -4451,7 +4023,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.auto_span_config_reconciliation.resume_completed exported_name: jobs_auto_span_config_reconciliation_resume_completed labeled_name: 'jobs.resume{name: auto_span_config_reconciliation, status: completed}' @@ -4461,7 +4032,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.auto_span_config_reconciliation.resume_failed exported_name: jobs_auto_span_config_reconciliation_resume_failed labeled_name: 'jobs.resume{name: auto_span_config_reconciliation, status: failed}' @@ -4471,7 +4041,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.auto_span_config_reconciliation.resume_retry_error exported_name: jobs_auto_span_config_reconciliation_resume_retry_error labeled_name: 'jobs.resume{name: auto_span_config_reconciliation, status: retry_error}' @@ -4481,7 +4050,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.auto_sql_stats_compaction.currently_idle exported_name: jobs_auto_sql_stats_compaction_currently_idle labeled_name: 'jobs{type: auto_sql_stats_compaction, status: currently_idle}' @@ -4491,7 +4059,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.auto_sql_stats_compaction.currently_paused exported_name: jobs_auto_sql_stats_compaction_currently_paused labeled_name: 'jobs{name: auto_sql_stats_compaction, status: currently_paused}' @@ -4501,7 +4068,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.auto_sql_stats_compaction.currently_running exported_name: jobs_auto_sql_stats_compaction_currently_running labeled_name: 'jobs{type: auto_sql_stats_compaction, status: currently_running}' @@ -4511,7 +4077,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.auto_sql_stats_compaction.expired_pts_records exported_name: jobs_auto_sql_stats_compaction_expired_pts_records labeled_name: 'jobs.expired_pts_records{type: auto_sql_stats_compaction}' @@ -4521,7 +4086,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.auto_sql_stats_compaction.fail_or_cancel_completed exported_name: jobs_auto_sql_stats_compaction_fail_or_cancel_completed labeled_name: 'jobs.fail_or_cancel{name: auto_sql_stats_compaction, status: completed}' @@ -4531,7 +4095,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.auto_sql_stats_compaction.fail_or_cancel_retry_error exported_name: jobs_auto_sql_stats_compaction_fail_or_cancel_retry_error labeled_name: 'jobs.fail_or_cancel{name: auto_sql_stats_compaction, status: retry_error}' @@ -4541,7 +4104,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.auto_sql_stats_compaction.protected_age_sec exported_name: jobs_auto_sql_stats_compaction_protected_age_sec labeled_name: 'jobs.protected_age_sec{type: auto_sql_stats_compaction}' @@ -4551,7 +4113,6 @@ layers: unit: SECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.auto_sql_stats_compaction.protected_record_count exported_name: jobs_auto_sql_stats_compaction_protected_record_count labeled_name: 'jobs.protected_record_count{type: auto_sql_stats_compaction}' @@ -4561,7 +4122,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.auto_sql_stats_compaction.resume_completed exported_name: jobs_auto_sql_stats_compaction_resume_completed labeled_name: 'jobs.resume{name: auto_sql_stats_compaction, status: completed}' @@ -4571,7 +4131,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.auto_sql_stats_compaction.resume_failed exported_name: jobs_auto_sql_stats_compaction_resume_failed labeled_name: 'jobs.resume{name: auto_sql_stats_compaction, status: failed}' @@ -4581,7 +4140,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.auto_sql_stats_compaction.resume_retry_error exported_name: jobs_auto_sql_stats_compaction_resume_retry_error labeled_name: 'jobs.resume{name: auto_sql_stats_compaction, status: retry_error}' @@ -4591,7 +4149,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.auto_update_sql_activity.currently_idle exported_name: jobs_auto_update_sql_activity_currently_idle labeled_name: 'jobs{type: auto_update_sql_activity, status: currently_idle}' @@ -4601,7 +4158,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.auto_update_sql_activity.currently_paused exported_name: jobs_auto_update_sql_activity_currently_paused labeled_name: 'jobs{name: auto_update_sql_activity, status: currently_paused}' @@ -4611,7 +4167,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.auto_update_sql_activity.currently_running exported_name: jobs_auto_update_sql_activity_currently_running labeled_name: 'jobs{type: auto_update_sql_activity, status: currently_running}' @@ -4621,7 +4176,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.auto_update_sql_activity.expired_pts_records exported_name: jobs_auto_update_sql_activity_expired_pts_records labeled_name: 'jobs.expired_pts_records{type: auto_update_sql_activity}' @@ -4631,7 +4185,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.auto_update_sql_activity.fail_or_cancel_completed exported_name: jobs_auto_update_sql_activity_fail_or_cancel_completed labeled_name: 'jobs.fail_or_cancel{name: auto_update_sql_activity, status: completed}' @@ -4641,7 +4194,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.auto_update_sql_activity.fail_or_cancel_retry_error exported_name: jobs_auto_update_sql_activity_fail_or_cancel_retry_error labeled_name: 'jobs.fail_or_cancel{name: auto_update_sql_activity, status: retry_error}' @@ -4651,7 +4203,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.auto_update_sql_activity.protected_age_sec exported_name: jobs_auto_update_sql_activity_protected_age_sec labeled_name: 'jobs.protected_age_sec{type: auto_update_sql_activity}' @@ -4661,7 +4212,6 @@ layers: unit: SECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.auto_update_sql_activity.protected_record_count exported_name: jobs_auto_update_sql_activity_protected_record_count labeled_name: 'jobs.protected_record_count{type: auto_update_sql_activity}' @@ -4671,7 +4221,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.auto_update_sql_activity.resume_completed exported_name: jobs_auto_update_sql_activity_resume_completed labeled_name: 'jobs.resume{name: auto_update_sql_activity, status: completed}' @@ -4681,7 +4230,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.auto_update_sql_activity.resume_failed exported_name: jobs_auto_update_sql_activity_resume_failed labeled_name: 'jobs.resume{name: auto_update_sql_activity, status: failed}' @@ -4691,7 +4239,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.auto_update_sql_activity.resume_retry_error exported_name: jobs_auto_update_sql_activity_resume_retry_error labeled_name: 'jobs.resume{name: auto_update_sql_activity, status: retry_error}' @@ -4701,7 +4248,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.backup.currently_idle exported_name: jobs_backup_currently_idle labeled_name: 'jobs{type: backup, status: currently_idle}' @@ -4711,7 +4257,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.backup.expired_pts_records exported_name: jobs_backup_expired_pts_records labeled_name: 'jobs.expired_pts_records{type: backup}' @@ -4721,7 +4266,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.backup.fail_or_cancel_completed exported_name: jobs_backup_fail_or_cancel_completed labeled_name: 'jobs.fail_or_cancel{name: backup, status: completed}' @@ -4731,7 +4275,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.backup.fail_or_cancel_retry_error exported_name: jobs_backup_fail_or_cancel_retry_error labeled_name: 'jobs.fail_or_cancel{name: backup, status: retry_error}' @@ -4741,7 +4284,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.backup.protected_age_sec exported_name: jobs_backup_protected_age_sec labeled_name: 'jobs.protected_age_sec{type: backup}' @@ -4751,7 +4293,6 @@ layers: unit: SECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.backup.protected_record_count exported_name: jobs_backup_protected_record_count labeled_name: 'jobs.protected_record_count{type: backup}' @@ -4761,7 +4302,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.backup.resume_completed exported_name: jobs_backup_resume_completed labeled_name: 'jobs.resume{name: backup, status: completed}' @@ -4771,7 +4311,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.backup.resume_failed exported_name: jobs_backup_resume_failed labeled_name: 'jobs.resume{name: backup, status: failed}' @@ -4781,7 +4320,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.backup.resume_retry_error exported_name: jobs_backup_resume_retry_error labeled_name: 'jobs.resume{name: backup, status: retry_error}' @@ -4791,7 +4329,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.changefeed.currently_idle exported_name: jobs_changefeed_currently_idle labeled_name: 'jobs{type: changefeed, status: currently_idle}' @@ -4801,7 +4338,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.changefeed.currently_running exported_name: jobs_changefeed_currently_running labeled_name: 'jobs{type: changefeed, status: currently_running}' @@ -4811,7 +4347,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.changefeed.expired_pts_records exported_name: jobs_changefeed_expired_pts_records labeled_name: 'jobs.expired_pts_records{type: changefeed}' @@ -4821,7 +4356,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.changefeed.fail_or_cancel_completed exported_name: jobs_changefeed_fail_or_cancel_completed labeled_name: 'jobs.fail_or_cancel{name: changefeed, status: completed}' @@ -4831,7 +4365,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.changefeed.fail_or_cancel_retry_error exported_name: jobs_changefeed_fail_or_cancel_retry_error labeled_name: 'jobs.fail_or_cancel{name: changefeed, status: retry_error}' @@ -4841,7 +4374,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.changefeed.protected_record_count exported_name: jobs_changefeed_protected_record_count labeled_name: 'jobs.protected_record_count{type: changefeed}' @@ -4851,7 +4383,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.changefeed.resume_completed exported_name: jobs_changefeed_resume_completed labeled_name: 'jobs.resume{name: changefeed, status: completed}' @@ -4861,7 +4392,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.changefeed.resume_failed exported_name: jobs_changefeed_resume_failed labeled_name: 'jobs.resume{name: changefeed, status: failed}' @@ -4871,7 +4401,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.changefeed.resume_retry_error exported_name: jobs_changefeed_resume_retry_error labeled_name: 'jobs.resume{name: changefeed, status: retry_error}' @@ -4881,7 +4410,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.claimed_jobs exported_name: jobs_claimed_jobs description: number of jobs claimed in job-adopt iterations @@ -4890,7 +4418,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.create_stats.currently_idle exported_name: jobs_create_stats_currently_idle labeled_name: 'jobs{type: create_stats, status: currently_idle}' @@ -4900,7 +4427,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.create_stats.currently_paused exported_name: jobs_create_stats_currently_paused labeled_name: 'jobs{name: create_stats, status: currently_paused}' @@ -4910,7 +4436,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.create_stats.fail_or_cancel_completed exported_name: jobs_create_stats_fail_or_cancel_completed labeled_name: 'jobs.fail_or_cancel{name: create_stats, status: completed}' @@ -4920,7 +4445,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.create_stats.fail_or_cancel_retry_error exported_name: jobs_create_stats_fail_or_cancel_retry_error labeled_name: 'jobs.fail_or_cancel{name: create_stats, status: retry_error}' @@ -4930,7 +4454,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.create_stats.resume_completed exported_name: jobs_create_stats_resume_completed labeled_name: 'jobs.resume{name: create_stats, status: completed}' @@ -4940,7 +4463,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.create_stats.resume_failed exported_name: jobs_create_stats_resume_failed labeled_name: 'jobs.resume{name: create_stats, status: failed}' @@ -4950,7 +4472,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.create_stats.resume_retry_error exported_name: jobs_create_stats_resume_retry_error labeled_name: 'jobs.resume{name: create_stats, status: retry_error}' @@ -4960,7 +4481,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.fingerprint.currently_idle exported_name: jobs_fingerprint_currently_idle labeled_name: 'jobs{type: fingerprint, status: currently_idle}' @@ -4970,7 +4490,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.fingerprint.currently_paused exported_name: jobs_fingerprint_currently_paused labeled_name: 'jobs{name: fingerprint, status: currently_paused}' @@ -4980,7 +4499,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.fingerprint.currently_running exported_name: jobs_fingerprint_currently_running labeled_name: 'jobs{type: fingerprint, status: currently_running}' @@ -4990,7 +4508,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.fingerprint.expired_pts_records exported_name: jobs_fingerprint_expired_pts_records labeled_name: 'jobs.expired_pts_records{type: fingerprint}' @@ -5000,7 +4517,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.fingerprint.fail_or_cancel_completed exported_name: jobs_fingerprint_fail_or_cancel_completed labeled_name: 'jobs.fail_or_cancel{name: fingerprint, status: completed}' @@ -5010,7 +4526,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.fingerprint.fail_or_cancel_retry_error exported_name: jobs_fingerprint_fail_or_cancel_retry_error labeled_name: 'jobs.fail_or_cancel{name: fingerprint, status: retry_error}' @@ -5020,7 +4535,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.fingerprint.protected_age_sec exported_name: jobs_fingerprint_protected_age_sec labeled_name: 'jobs.protected_age_sec{type: fingerprint}' @@ -5030,7 +4544,6 @@ layers: unit: SECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.fingerprint.protected_record_count exported_name: jobs_fingerprint_protected_record_count labeled_name: 'jobs.protected_record_count{type: fingerprint}' @@ -5040,7 +4553,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.fingerprint.resume_completed exported_name: jobs_fingerprint_resume_completed labeled_name: 'jobs.resume{name: fingerprint, status: completed}' @@ -5050,7 +4562,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.fingerprint.resume_failed exported_name: jobs_fingerprint_resume_failed labeled_name: 'jobs.resume{name: fingerprint, status: failed}' @@ -5060,7 +4571,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.fingerprint.resume_retry_error exported_name: jobs_fingerprint_resume_retry_error labeled_name: 'jobs.resume{name: fingerprint, status: retry_error}' @@ -5070,7 +4580,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.history_retention.currently_idle exported_name: jobs_history_retention_currently_idle labeled_name: 'jobs{type: history_retention, status: currently_idle}' @@ -5080,7 +4589,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.history_retention.currently_paused exported_name: jobs_history_retention_currently_paused labeled_name: 'jobs{name: history_retention, status: currently_paused}' @@ -5090,7 +4598,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.history_retention.currently_running exported_name: jobs_history_retention_currently_running labeled_name: 'jobs{type: history_retention, status: currently_running}' @@ -5100,7 +4607,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.history_retention.expired_pts_records exported_name: jobs_history_retention_expired_pts_records labeled_name: 'jobs.expired_pts_records{type: history_retention}' @@ -5110,7 +4616,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.history_retention.fail_or_cancel_completed exported_name: jobs_history_retention_fail_or_cancel_completed labeled_name: 'jobs.fail_or_cancel{name: history_retention, status: completed}' @@ -5120,7 +4625,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.history_retention.fail_or_cancel_retry_error exported_name: jobs_history_retention_fail_or_cancel_retry_error labeled_name: 'jobs.fail_or_cancel{name: history_retention, status: retry_error}' @@ -5130,7 +4634,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.history_retention.protected_age_sec exported_name: jobs_history_retention_protected_age_sec labeled_name: 'jobs.protected_age_sec{type: history_retention}' @@ -5140,7 +4643,6 @@ layers: unit: SECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.history_retention.protected_record_count exported_name: jobs_history_retention_protected_record_count labeled_name: 'jobs.protected_record_count{type: history_retention}' @@ -5150,7 +4652,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.history_retention.resume_completed exported_name: jobs_history_retention_resume_completed labeled_name: 'jobs.resume{name: history_retention, status: completed}' @@ -5160,7 +4661,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.history_retention.resume_failed exported_name: jobs_history_retention_resume_failed labeled_name: 'jobs.resume{name: history_retention, status: failed}' @@ -5170,7 +4670,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.history_retention.resume_retry_error exported_name: jobs_history_retention_resume_retry_error labeled_name: 'jobs.resume{name: history_retention, status: retry_error}' @@ -5180,7 +4679,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.hot_ranges_logger.currently_idle exported_name: jobs_hot_ranges_logger_currently_idle labeled_name: 'jobs{type: hot_ranges_logger, status: currently_idle}' @@ -5190,7 +4688,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.hot_ranges_logger.currently_paused exported_name: jobs_hot_ranges_logger_currently_paused labeled_name: 'jobs{name: hot_ranges_logger, status: currently_paused}' @@ -5200,7 +4697,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.hot_ranges_logger.currently_running exported_name: jobs_hot_ranges_logger_currently_running labeled_name: 'jobs{type: hot_ranges_logger, status: currently_running}' @@ -5210,7 +4706,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.hot_ranges_logger.expired_pts_records exported_name: jobs_hot_ranges_logger_expired_pts_records labeled_name: 'jobs.expired_pts_records{type: hot_ranges_logger}' @@ -5220,7 +4715,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.hot_ranges_logger.fail_or_cancel_completed exported_name: jobs_hot_ranges_logger_fail_or_cancel_completed labeled_name: 'jobs.fail_or_cancel{name: hot_ranges_logger, status: completed}' @@ -5230,7 +4724,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.hot_ranges_logger.fail_or_cancel_retry_error exported_name: jobs_hot_ranges_logger_fail_or_cancel_retry_error labeled_name: 'jobs.fail_or_cancel{name: hot_ranges_logger, status: retry_error}' @@ -5240,7 +4733,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.hot_ranges_logger.protected_age_sec exported_name: jobs_hot_ranges_logger_protected_age_sec labeled_name: 'jobs.protected_age_sec{type: hot_ranges_logger}' @@ -5250,7 +4742,6 @@ layers: unit: SECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.hot_ranges_logger.protected_record_count exported_name: jobs_hot_ranges_logger_protected_record_count labeled_name: 'jobs.protected_record_count{type: hot_ranges_logger}' @@ -5260,7 +4751,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.hot_ranges_logger.resume_completed exported_name: jobs_hot_ranges_logger_resume_completed labeled_name: 'jobs.resume{name: hot_ranges_logger, status: completed}' @@ -5270,7 +4760,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.hot_ranges_logger.resume_failed exported_name: jobs_hot_ranges_logger_resume_failed labeled_name: 'jobs.resume{name: hot_ranges_logger, status: failed}' @@ -5280,7 +4769,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.hot_ranges_logger.resume_retry_error exported_name: jobs_hot_ranges_logger_resume_retry_error labeled_name: 'jobs.resume{name: hot_ranges_logger, status: retry_error}' @@ -5290,7 +4778,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.import.currently_idle exported_name: jobs_import_currently_idle labeled_name: 'jobs{type: import, status: currently_idle}' @@ -5300,7 +4787,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.import.currently_paused exported_name: jobs_import_currently_paused labeled_name: 'jobs{name: import, status: currently_paused}' @@ -5310,7 +4796,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.import.currently_running exported_name: jobs_import_currently_running labeled_name: 'jobs{type: import, status: currently_running}' @@ -5320,7 +4805,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.import.expired_pts_records exported_name: jobs_import_expired_pts_records labeled_name: 'jobs.expired_pts_records{type: import}' @@ -5330,7 +4814,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.import.fail_or_cancel_completed exported_name: jobs_import_fail_or_cancel_completed labeled_name: 'jobs.fail_or_cancel{name: import, status: completed}' @@ -5340,7 +4823,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.import.fail_or_cancel_retry_error exported_name: jobs_import_fail_or_cancel_retry_error labeled_name: 'jobs.fail_or_cancel{name: import, status: retry_error}' @@ -5350,7 +4832,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.import.protected_age_sec exported_name: jobs_import_protected_age_sec labeled_name: 'jobs.protected_age_sec{type: import}' @@ -5360,7 +4841,6 @@ layers: unit: SECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.import.protected_record_count exported_name: jobs_import_protected_record_count labeled_name: 'jobs.protected_record_count{type: import}' @@ -5370,7 +4850,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.import.resume_completed exported_name: jobs_import_resume_completed labeled_name: 'jobs.resume{name: import, status: completed}' @@ -5380,7 +4859,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.import.resume_failed exported_name: jobs_import_resume_failed labeled_name: 'jobs.resume{name: import, status: failed}' @@ -5390,7 +4868,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.import.resume_retry_error exported_name: jobs_import_resume_retry_error labeled_name: 'jobs.resume{name: import, status: retry_error}' @@ -5400,7 +4877,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.import_rollback.currently_idle exported_name: jobs_import_rollback_currently_idle labeled_name: 'jobs{type: import_rollback, status: currently_idle}' @@ -5410,7 +4886,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.import_rollback.currently_paused exported_name: jobs_import_rollback_currently_paused labeled_name: 'jobs{name: import_rollback, status: currently_paused}' @@ -5420,7 +4895,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.import_rollback.currently_running exported_name: jobs_import_rollback_currently_running labeled_name: 'jobs{type: import_rollback, status: currently_running}' @@ -5430,7 +4904,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.import_rollback.fail_or_cancel_completed exported_name: jobs_import_rollback_fail_or_cancel_completed labeled_name: 'jobs.fail_or_cancel{name: import_rollback, status: completed}' @@ -5440,7 +4913,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.import_rollback.fail_or_cancel_retry_error exported_name: jobs_import_rollback_fail_or_cancel_retry_error labeled_name: 'jobs.fail_or_cancel{name: import_rollback, status: retry_error}' @@ -5450,7 +4922,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.import_rollback.resume_completed exported_name: jobs_import_rollback_resume_completed labeled_name: 'jobs.resume{name: import_rollback, status: completed}' @@ -5460,7 +4931,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.import_rollback.resume_failed exported_name: jobs_import_rollback_resume_failed labeled_name: 'jobs.resume{name: import_rollback, status: failed}' @@ -5470,7 +4940,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.import_rollback.resume_retry_error exported_name: jobs_import_rollback_resume_retry_error labeled_name: 'jobs.resume{name: import_rollback, status: retry_error}' @@ -5480,7 +4949,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.inspect.currently_idle exported_name: jobs_inspect_currently_idle labeled_name: 'jobs{type: inspect, status: currently_idle}' @@ -5490,7 +4958,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.inspect.currently_paused exported_name: jobs_inspect_currently_paused labeled_name: 'jobs{name: inspect, status: currently_paused}' @@ -5500,7 +4967,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.inspect.currently_running exported_name: jobs_inspect_currently_running labeled_name: 'jobs{type: inspect, status: currently_running}' @@ -5510,7 +4976,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.inspect.expired_pts_records exported_name: jobs_inspect_expired_pts_records labeled_name: 'jobs.expired_pts_records{type: inspect}' @@ -5520,7 +4985,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.inspect.fail_or_cancel_completed exported_name: jobs_inspect_fail_or_cancel_completed labeled_name: 'jobs.fail_or_cancel{name: inspect, status: completed}' @@ -5530,7 +4994,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.inspect.fail_or_cancel_retry_error exported_name: jobs_inspect_fail_or_cancel_retry_error labeled_name: 'jobs.fail_or_cancel{name: inspect, status: retry_error}' @@ -5540,7 +5003,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.inspect.issues_found exported_name: jobs_inspect_issues_found description: Total count of issues found by INSPECT jobs @@ -5549,7 +5011,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: jobs.inspect.num_active_spans exported_name: jobs_inspect_num_active_spans description: Number of spans currently being processed by INSPECT jobs @@ -5558,7 +5019,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/sql-queries - name: jobs.inspect.protected_age_sec exported_name: jobs_inspect_protected_age_sec labeled_name: 'jobs.protected_age_sec{type: inspect}' @@ -5568,7 +5028,6 @@ layers: unit: SECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.inspect.protected_record_count exported_name: jobs_inspect_protected_record_count labeled_name: 'jobs.protected_record_count{type: inspect}' @@ -5578,7 +5037,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.inspect.resume_completed exported_name: jobs_inspect_resume_completed labeled_name: 'jobs.resume{name: inspect, status: completed}' @@ -5588,7 +5046,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.inspect.resume_failed exported_name: jobs_inspect_resume_failed labeled_name: 'jobs.resume{name: inspect, status: failed}' @@ -5598,7 +5055,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.inspect.resume_retry_error exported_name: jobs_inspect_resume_retry_error labeled_name: 'jobs.resume{name: inspect, status: retry_error}' @@ -5608,7 +5064,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.inspect.runs exported_name: jobs_inspect_runs description: Number of INSPECT jobs executed @@ -5617,7 +5072,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: jobs.inspect.runs_with_issues exported_name: jobs_inspect_runs_with_issues description: Number of INSPECT jobs that found at least one issue @@ -5626,7 +5080,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: jobs.inspect.spans_processed exported_name: jobs_inspect_spans_processed description: Number of spans processed by INSPECT jobs @@ -5635,7 +5088,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: jobs.key_visualizer.currently_idle exported_name: jobs_key_visualizer_currently_idle labeled_name: 'jobs{type: key_visualizer, status: currently_idle}' @@ -5645,7 +5097,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.key_visualizer.currently_paused exported_name: jobs_key_visualizer_currently_paused labeled_name: 'jobs{name: key_visualizer, status: currently_paused}' @@ -5655,7 +5106,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.key_visualizer.currently_running exported_name: jobs_key_visualizer_currently_running labeled_name: 'jobs{type: key_visualizer, status: currently_running}' @@ -5665,7 +5115,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.key_visualizer.expired_pts_records exported_name: jobs_key_visualizer_expired_pts_records labeled_name: 'jobs.expired_pts_records{type: key_visualizer}' @@ -5675,7 +5124,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.key_visualizer.fail_or_cancel_completed exported_name: jobs_key_visualizer_fail_or_cancel_completed labeled_name: 'jobs.fail_or_cancel{name: key_visualizer, status: completed}' @@ -5685,7 +5133,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.key_visualizer.fail_or_cancel_retry_error exported_name: jobs_key_visualizer_fail_or_cancel_retry_error labeled_name: 'jobs.fail_or_cancel{name: key_visualizer, status: retry_error}' @@ -5695,7 +5142,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.key_visualizer.protected_age_sec exported_name: jobs_key_visualizer_protected_age_sec labeled_name: 'jobs.protected_age_sec{type: key_visualizer}' @@ -5705,7 +5151,6 @@ layers: unit: SECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.key_visualizer.protected_record_count exported_name: jobs_key_visualizer_protected_record_count labeled_name: 'jobs.protected_record_count{type: key_visualizer}' @@ -5715,7 +5160,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.key_visualizer.resume_completed exported_name: jobs_key_visualizer_resume_completed labeled_name: 'jobs.resume{name: key_visualizer, status: completed}' @@ -5725,7 +5169,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.key_visualizer.resume_failed exported_name: jobs_key_visualizer_resume_failed labeled_name: 'jobs.resume{name: key_visualizer, status: failed}' @@ -5735,7 +5178,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.key_visualizer.resume_retry_error exported_name: jobs_key_visualizer_resume_retry_error labeled_name: 'jobs.resume{name: key_visualizer, status: retry_error}' @@ -5745,7 +5187,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.logical_replication.currently_idle exported_name: jobs_logical_replication_currently_idle labeled_name: 'jobs{type: logical_replication, status: currently_idle}' @@ -5755,7 +5196,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.logical_replication.currently_paused exported_name: jobs_logical_replication_currently_paused labeled_name: 'jobs{name: logical_replication, status: currently_paused}' @@ -5765,7 +5205,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.logical_replication.currently_running exported_name: jobs_logical_replication_currently_running labeled_name: 'jobs{type: logical_replication, status: currently_running}' @@ -5775,7 +5214,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.logical_replication.expired_pts_records exported_name: jobs_logical_replication_expired_pts_records labeled_name: 'jobs.expired_pts_records{type: logical_replication}' @@ -5785,7 +5223,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.logical_replication.fail_or_cancel_completed exported_name: jobs_logical_replication_fail_or_cancel_completed labeled_name: 'jobs.fail_or_cancel{name: logical_replication, status: completed}' @@ -5795,7 +5232,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.logical_replication.fail_or_cancel_retry_error exported_name: jobs_logical_replication_fail_or_cancel_retry_error labeled_name: 'jobs.fail_or_cancel{name: logical_replication, status: retry_error}' @@ -5805,7 +5241,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.logical_replication.protected_age_sec exported_name: jobs_logical_replication_protected_age_sec labeled_name: 'jobs.protected_age_sec{type: logical_replication}' @@ -5815,7 +5250,6 @@ layers: unit: SECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.logical_replication.protected_record_count exported_name: jobs_logical_replication_protected_record_count labeled_name: 'jobs.protected_record_count{type: logical_replication}' @@ -5825,7 +5259,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.logical_replication.resume_completed exported_name: jobs_logical_replication_resume_completed labeled_name: 'jobs.resume{name: logical_replication, status: completed}' @@ -5835,7 +5268,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.logical_replication.resume_failed exported_name: jobs_logical_replication_resume_failed labeled_name: 'jobs.resume{name: logical_replication, status: failed}' @@ -5845,7 +5277,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.logical_replication.resume_retry_error exported_name: jobs_logical_replication_resume_retry_error labeled_name: 'jobs.resume{name: logical_replication, status: retry_error}' @@ -5855,7 +5286,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.metrics.task_failed exported_name: jobs_metrics_task_failed description: Number of metrics poller tasks that failed @@ -5864,7 +5294,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.migration.currently_idle exported_name: jobs_migration_currently_idle labeled_name: 'jobs{type: migration, status: currently_idle}' @@ -5874,7 +5303,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.migration.currently_paused exported_name: jobs_migration_currently_paused labeled_name: 'jobs{name: migration, status: currently_paused}' @@ -5884,7 +5312,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.migration.currently_running exported_name: jobs_migration_currently_running labeled_name: 'jobs{type: migration, status: currently_running}' @@ -5894,7 +5321,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.migration.expired_pts_records exported_name: jobs_migration_expired_pts_records labeled_name: 'jobs.expired_pts_records{type: migration}' @@ -5904,7 +5330,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.migration.fail_or_cancel_completed exported_name: jobs_migration_fail_or_cancel_completed labeled_name: 'jobs.fail_or_cancel{name: migration, status: completed}' @@ -5914,7 +5339,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.migration.fail_or_cancel_retry_error exported_name: jobs_migration_fail_or_cancel_retry_error labeled_name: 'jobs.fail_or_cancel{name: migration, status: retry_error}' @@ -5924,7 +5348,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.migration.protected_age_sec exported_name: jobs_migration_protected_age_sec labeled_name: 'jobs.protected_age_sec{type: migration}' @@ -5934,7 +5357,6 @@ layers: unit: SECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.migration.protected_record_count exported_name: jobs_migration_protected_record_count labeled_name: 'jobs.protected_record_count{type: migration}' @@ -5944,7 +5366,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.migration.resume_completed exported_name: jobs_migration_resume_completed labeled_name: 'jobs.resume{name: migration, status: completed}' @@ -5954,7 +5375,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.migration.resume_failed exported_name: jobs_migration_resume_failed labeled_name: 'jobs.resume{name: migration, status: failed}' @@ -5964,7 +5384,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.migration.resume_retry_error exported_name: jobs_migration_resume_retry_error labeled_name: 'jobs.resume{name: migration, status: retry_error}' @@ -5974,7 +5393,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.mvcc_statistics_update.currently_idle exported_name: jobs_mvcc_statistics_update_currently_idle labeled_name: 'jobs{type: mvcc_statistics_update, status: currently_idle}' @@ -5984,7 +5402,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.mvcc_statistics_update.currently_paused exported_name: jobs_mvcc_statistics_update_currently_paused labeled_name: 'jobs{name: mvcc_statistics_update, status: currently_paused}' @@ -5994,7 +5411,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.mvcc_statistics_update.currently_running exported_name: jobs_mvcc_statistics_update_currently_running labeled_name: 'jobs{type: mvcc_statistics_update, status: currently_running}' @@ -6004,7 +5420,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.mvcc_statistics_update.expired_pts_records exported_name: jobs_mvcc_statistics_update_expired_pts_records labeled_name: 'jobs.expired_pts_records{type: mvcc_statistics_update}' @@ -6014,7 +5429,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.mvcc_statistics_update.fail_or_cancel_completed exported_name: jobs_mvcc_statistics_update_fail_or_cancel_completed labeled_name: 'jobs.fail_or_cancel{name: mvcc_statistics_update, status: completed}' @@ -6024,7 +5438,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.mvcc_statistics_update.fail_or_cancel_retry_error exported_name: jobs_mvcc_statistics_update_fail_or_cancel_retry_error labeled_name: 'jobs.fail_or_cancel{name: mvcc_statistics_update, status: retry_error}' @@ -6034,7 +5447,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.mvcc_statistics_update.protected_age_sec exported_name: jobs_mvcc_statistics_update_protected_age_sec labeled_name: 'jobs.protected_age_sec{type: mvcc_statistics_update}' @@ -6044,7 +5456,6 @@ layers: unit: SECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.mvcc_statistics_update.protected_record_count exported_name: jobs_mvcc_statistics_update_protected_record_count labeled_name: 'jobs.protected_record_count{type: mvcc_statistics_update}' @@ -6054,7 +5465,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.mvcc_statistics_update.resume_completed exported_name: jobs_mvcc_statistics_update_resume_completed labeled_name: 'jobs.resume{name: mvcc_statistics_update, status: completed}' @@ -6064,7 +5474,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.mvcc_statistics_update.resume_failed exported_name: jobs_mvcc_statistics_update_resume_failed labeled_name: 'jobs.resume{name: mvcc_statistics_update, status: failed}' @@ -6074,7 +5483,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.mvcc_statistics_update.resume_retry_error exported_name: jobs_mvcc_statistics_update_resume_retry_error labeled_name: 'jobs.resume{name: mvcc_statistics_update, status: retry_error}' @@ -6084,7 +5492,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.new_schema_change.currently_idle exported_name: jobs_new_schema_change_currently_idle labeled_name: 'jobs{type: new_schema_change, status: currently_idle}' @@ -6094,7 +5501,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.new_schema_change.currently_paused exported_name: jobs_new_schema_change_currently_paused labeled_name: 'jobs{name: new_schema_change, status: currently_paused}' @@ -6104,7 +5510,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.new_schema_change.currently_running exported_name: jobs_new_schema_change_currently_running labeled_name: 'jobs{type: new_schema_change, status: currently_running}' @@ -6114,7 +5519,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.new_schema_change.expired_pts_records exported_name: jobs_new_schema_change_expired_pts_records labeled_name: 'jobs.expired_pts_records{type: new_schema_change}' @@ -6124,7 +5528,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.new_schema_change.fail_or_cancel_completed exported_name: jobs_new_schema_change_fail_or_cancel_completed labeled_name: 'jobs.fail_or_cancel{name: new_schema_change, status: completed}' @@ -6134,7 +5537,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.new_schema_change.fail_or_cancel_retry_error exported_name: jobs_new_schema_change_fail_or_cancel_retry_error labeled_name: 'jobs.fail_or_cancel{name: new_schema_change, status: retry_error}' @@ -6144,7 +5546,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.new_schema_change.protected_age_sec exported_name: jobs_new_schema_change_protected_age_sec labeled_name: 'jobs.protected_age_sec{type: new_schema_change}' @@ -6154,7 +5555,6 @@ layers: unit: SECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.new_schema_change.protected_record_count exported_name: jobs_new_schema_change_protected_record_count labeled_name: 'jobs.protected_record_count{type: new_schema_change}' @@ -6164,7 +5564,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.new_schema_change.resume_completed exported_name: jobs_new_schema_change_resume_completed labeled_name: 'jobs.resume{name: new_schema_change, status: completed}' @@ -6174,7 +5573,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.new_schema_change.resume_failed exported_name: jobs_new_schema_change_resume_failed labeled_name: 'jobs.resume{name: new_schema_change, status: failed}' @@ -6184,7 +5582,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.new_schema_change.resume_retry_error exported_name: jobs_new_schema_change_resume_retry_error labeled_name: 'jobs.resume{name: new_schema_change, status: retry_error}' @@ -6194,7 +5591,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.poll_jobs_stats.currently_idle exported_name: jobs_poll_jobs_stats_currently_idle labeled_name: 'jobs{type: poll_jobs_stats, status: currently_idle}' @@ -6204,7 +5600,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.poll_jobs_stats.currently_paused exported_name: jobs_poll_jobs_stats_currently_paused labeled_name: 'jobs{name: poll_jobs_stats, status: currently_paused}' @@ -6214,7 +5609,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.poll_jobs_stats.currently_running exported_name: jobs_poll_jobs_stats_currently_running labeled_name: 'jobs{type: poll_jobs_stats, status: currently_running}' @@ -6224,7 +5618,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.poll_jobs_stats.expired_pts_records exported_name: jobs_poll_jobs_stats_expired_pts_records labeled_name: 'jobs.expired_pts_records{type: poll_jobs_stats}' @@ -6234,7 +5627,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.poll_jobs_stats.fail_or_cancel_completed exported_name: jobs_poll_jobs_stats_fail_or_cancel_completed labeled_name: 'jobs.fail_or_cancel{name: poll_jobs_stats, status: completed}' @@ -6244,7 +5636,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.poll_jobs_stats.fail_or_cancel_retry_error exported_name: jobs_poll_jobs_stats_fail_or_cancel_retry_error labeled_name: 'jobs.fail_or_cancel{name: poll_jobs_stats, status: retry_error}' @@ -6254,7 +5645,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.poll_jobs_stats.protected_age_sec exported_name: jobs_poll_jobs_stats_protected_age_sec labeled_name: 'jobs.protected_age_sec{type: poll_jobs_stats}' @@ -6264,7 +5654,6 @@ layers: unit: SECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.poll_jobs_stats.protected_record_count exported_name: jobs_poll_jobs_stats_protected_record_count labeled_name: 'jobs.protected_record_count{type: poll_jobs_stats}' @@ -6274,7 +5663,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.poll_jobs_stats.resume_completed exported_name: jobs_poll_jobs_stats_resume_completed labeled_name: 'jobs.resume{name: poll_jobs_stats, status: completed}' @@ -6284,7 +5672,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.poll_jobs_stats.resume_failed exported_name: jobs_poll_jobs_stats_resume_failed labeled_name: 'jobs.resume{name: poll_jobs_stats, status: failed}' @@ -6294,7 +5681,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.poll_jobs_stats.resume_retry_error exported_name: jobs_poll_jobs_stats_resume_retry_error labeled_name: 'jobs.resume{name: poll_jobs_stats, status: retry_error}' @@ -6304,7 +5690,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.replication_stream_ingestion.currently_idle exported_name: jobs_replication_stream_ingestion_currently_idle labeled_name: 'jobs{type: replication_stream_ingestion, status: currently_idle}' @@ -6314,7 +5699,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.replication_stream_ingestion.currently_paused exported_name: jobs_replication_stream_ingestion_currently_paused labeled_name: 'jobs{name: replication_stream_ingestion, status: currently_paused}' @@ -6324,7 +5708,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.replication_stream_ingestion.currently_running exported_name: jobs_replication_stream_ingestion_currently_running labeled_name: 'jobs{type: replication_stream_ingestion, status: currently_running}' @@ -6334,7 +5717,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.replication_stream_ingestion.expired_pts_records exported_name: jobs_replication_stream_ingestion_expired_pts_records labeled_name: 'jobs.expired_pts_records{type: replication_stream_ingestion}' @@ -6344,7 +5726,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.replication_stream_ingestion.fail_or_cancel_completed exported_name: jobs_replication_stream_ingestion_fail_or_cancel_completed labeled_name: 'jobs.fail_or_cancel{name: replication_stream_ingestion, status: completed}' @@ -6354,7 +5735,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.replication_stream_ingestion.fail_or_cancel_retry_error exported_name: jobs_replication_stream_ingestion_fail_or_cancel_retry_error labeled_name: 'jobs.fail_or_cancel{name: replication_stream_ingestion, status: retry_error}' @@ -6364,7 +5744,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.replication_stream_ingestion.protected_age_sec exported_name: jobs_replication_stream_ingestion_protected_age_sec labeled_name: 'jobs.protected_age_sec{type: replication_stream_ingestion}' @@ -6374,7 +5753,6 @@ layers: unit: SECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.replication_stream_ingestion.protected_record_count exported_name: jobs_replication_stream_ingestion_protected_record_count labeled_name: 'jobs.protected_record_count{type: replication_stream_ingestion}' @@ -6384,7 +5762,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.replication_stream_ingestion.resume_completed exported_name: jobs_replication_stream_ingestion_resume_completed labeled_name: 'jobs.resume{name: replication_stream_ingestion, status: completed}' @@ -6394,7 +5771,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.replication_stream_ingestion.resume_failed exported_name: jobs_replication_stream_ingestion_resume_failed labeled_name: 'jobs.resume{name: replication_stream_ingestion, status: failed}' @@ -6404,7 +5780,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.replication_stream_ingestion.resume_retry_error exported_name: jobs_replication_stream_ingestion_resume_retry_error labeled_name: 'jobs.resume{name: replication_stream_ingestion, status: retry_error}' @@ -6414,7 +5789,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.replication_stream_producer.currently_idle exported_name: jobs_replication_stream_producer_currently_idle labeled_name: 'jobs{type: replication_stream_producer, status: currently_idle}' @@ -6424,7 +5798,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.replication_stream_producer.currently_paused exported_name: jobs_replication_stream_producer_currently_paused labeled_name: 'jobs{name: replication_stream_producer, status: currently_paused}' @@ -6434,7 +5807,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.replication_stream_producer.currently_running exported_name: jobs_replication_stream_producer_currently_running labeled_name: 'jobs{type: replication_stream_producer, status: currently_running}' @@ -6444,7 +5816,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.replication_stream_producer.expired_pts_records exported_name: jobs_replication_stream_producer_expired_pts_records labeled_name: 'jobs.expired_pts_records{type: replication_stream_producer}' @@ -6454,7 +5825,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.replication_stream_producer.fail_or_cancel_completed exported_name: jobs_replication_stream_producer_fail_or_cancel_completed labeled_name: 'jobs.fail_or_cancel{name: replication_stream_producer, status: completed}' @@ -6464,7 +5834,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.replication_stream_producer.fail_or_cancel_retry_error exported_name: jobs_replication_stream_producer_fail_or_cancel_retry_error labeled_name: 'jobs.fail_or_cancel{name: replication_stream_producer, status: retry_error}' @@ -6474,7 +5843,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.replication_stream_producer.protected_age_sec exported_name: jobs_replication_stream_producer_protected_age_sec labeled_name: 'jobs.protected_age_sec{type: replication_stream_producer}' @@ -6484,7 +5852,6 @@ layers: unit: SECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.replication_stream_producer.protected_record_count exported_name: jobs_replication_stream_producer_protected_record_count labeled_name: 'jobs.protected_record_count{type: replication_stream_producer}' @@ -6494,7 +5861,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.replication_stream_producer.resume_completed exported_name: jobs_replication_stream_producer_resume_completed labeled_name: 'jobs.resume{name: replication_stream_producer, status: completed}' @@ -6504,7 +5870,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.replication_stream_producer.resume_failed exported_name: jobs_replication_stream_producer_resume_failed labeled_name: 'jobs.resume{name: replication_stream_producer, status: failed}' @@ -6514,7 +5879,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.replication_stream_producer.resume_retry_error exported_name: jobs_replication_stream_producer_resume_retry_error labeled_name: 'jobs.resume{name: replication_stream_producer, status: retry_error}' @@ -6524,7 +5888,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.restore.currently_idle exported_name: jobs_restore_currently_idle labeled_name: 'jobs{type: restore, status: currently_idle}' @@ -6534,7 +5897,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.restore.currently_paused exported_name: jobs_restore_currently_paused labeled_name: 'jobs{name: restore, status: currently_paused}' @@ -6544,7 +5906,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.restore.currently_running exported_name: jobs_restore_currently_running labeled_name: 'jobs{type: restore, status: currently_running}' @@ -6554,7 +5915,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.restore.expired_pts_records exported_name: jobs_restore_expired_pts_records labeled_name: 'jobs.expired_pts_records{type: restore}' @@ -6564,7 +5924,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.restore.fail_or_cancel_completed exported_name: jobs_restore_fail_or_cancel_completed labeled_name: 'jobs.fail_or_cancel{name: restore, status: completed}' @@ -6574,7 +5933,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.restore.fail_or_cancel_retry_error exported_name: jobs_restore_fail_or_cancel_retry_error labeled_name: 'jobs.fail_or_cancel{name: restore, status: retry_error}' @@ -6584,7 +5942,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.restore.protected_age_sec exported_name: jobs_restore_protected_age_sec labeled_name: 'jobs.protected_age_sec{type: restore}' @@ -6594,7 +5951,6 @@ layers: unit: SECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.restore.protected_record_count exported_name: jobs_restore_protected_record_count labeled_name: 'jobs.protected_record_count{type: restore}' @@ -6604,7 +5960,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.restore.resume_completed exported_name: jobs_restore_resume_completed labeled_name: 'jobs.resume{name: restore, status: completed}' @@ -6614,7 +5969,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.restore.resume_failed exported_name: jobs_restore_resume_failed labeled_name: 'jobs.resume{name: restore, status: failed}' @@ -6624,7 +5978,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.restore.resume_retry_error exported_name: jobs_restore_resume_retry_error labeled_name: 'jobs.resume{name: restore, status: retry_error}' @@ -6634,7 +5987,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.resumed_claimed_jobs exported_name: jobs_resumed_claimed_jobs description: number of claimed-jobs resumed in job-adopt iterations @@ -6643,7 +5995,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.row_level_ttl.currently_idle exported_name: jobs_row_level_ttl_currently_idle labeled_name: 'jobs{type: row_level_ttl, status: currently_idle}' @@ -6653,7 +6004,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.row_level_ttl.expired_pts_records exported_name: jobs_row_level_ttl_expired_pts_records labeled_name: 'jobs.expired_pts_records{type: row_level_ttl}' @@ -6663,7 +6013,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.row_level_ttl.fail_or_cancel_completed exported_name: jobs_row_level_ttl_fail_or_cancel_completed labeled_name: 'jobs.fail_or_cancel{name: row_level_ttl, status: completed}' @@ -6673,7 +6022,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.row_level_ttl.fail_or_cancel_retry_error exported_name: jobs_row_level_ttl_fail_or_cancel_retry_error labeled_name: 'jobs.fail_or_cancel{name: row_level_ttl, status: retry_error}' @@ -6683,7 +6031,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.row_level_ttl.num_delete_batch_retries exported_name: jobs_row_level_ttl_num_delete_batch_retries description: Number of times the row level TTL job had to reduce the delete batch size and retry. @@ -6692,7 +6039,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: jobs.row_level_ttl.protected_age_sec exported_name: jobs_row_level_ttl_protected_age_sec labeled_name: 'jobs.protected_age_sec{type: row_level_ttl}' @@ -6702,7 +6048,6 @@ layers: unit: SECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.row_level_ttl.protected_record_count exported_name: jobs_row_level_ttl_protected_record_count labeled_name: 'jobs.protected_record_count{type: row_level_ttl}' @@ -6712,7 +6057,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.row_level_ttl.resume_retry_error exported_name: jobs_row_level_ttl_resume_retry_error labeled_name: 'jobs.resume{name: row_level_ttl, status: retry_error}' @@ -6722,7 +6066,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.running_non_idle exported_name: jobs_running_non_idle description: number of running jobs that are not idle @@ -6731,7 +6074,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.schema_change.currently_idle exported_name: jobs_schema_change_currently_idle labeled_name: 'jobs{type: schema_change, status: currently_idle}' @@ -6741,7 +6083,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.schema_change.currently_paused exported_name: jobs_schema_change_currently_paused labeled_name: 'jobs{name: schema_change, status: currently_paused}' @@ -6751,7 +6092,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.schema_change.currently_running exported_name: jobs_schema_change_currently_running labeled_name: 'jobs{type: schema_change, status: currently_running}' @@ -6761,7 +6101,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.schema_change.expired_pts_records exported_name: jobs_schema_change_expired_pts_records labeled_name: 'jobs.expired_pts_records{type: schema_change}' @@ -6771,7 +6110,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.schema_change.fail_or_cancel_completed exported_name: jobs_schema_change_fail_or_cancel_completed labeled_name: 'jobs.fail_or_cancel{name: schema_change, status: completed}' @@ -6781,7 +6119,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.schema_change.fail_or_cancel_retry_error exported_name: jobs_schema_change_fail_or_cancel_retry_error labeled_name: 'jobs.fail_or_cancel{name: schema_change, status: retry_error}' @@ -6791,7 +6128,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.schema_change.protected_age_sec exported_name: jobs_schema_change_protected_age_sec labeled_name: 'jobs.protected_age_sec{type: schema_change}' @@ -6801,7 +6137,6 @@ layers: unit: SECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.schema_change.protected_record_count exported_name: jobs_schema_change_protected_record_count labeled_name: 'jobs.protected_record_count{type: schema_change}' @@ -6811,7 +6146,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.schema_change.resume_completed exported_name: jobs_schema_change_resume_completed labeled_name: 'jobs.resume{name: schema_change, status: completed}' @@ -6821,7 +6155,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.schema_change.resume_failed exported_name: jobs_schema_change_resume_failed labeled_name: 'jobs.resume{name: schema_change, status: failed}' @@ -6831,7 +6164,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.schema_change.resume_retry_error exported_name: jobs_schema_change_resume_retry_error labeled_name: 'jobs.resume{name: schema_change, status: retry_error}' @@ -6841,7 +6173,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.schema_change_gc.currently_idle exported_name: jobs_schema_change_gc_currently_idle labeled_name: 'jobs{type: schema_change_gc, status: currently_idle}' @@ -6851,7 +6182,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.schema_change_gc.currently_paused exported_name: jobs_schema_change_gc_currently_paused labeled_name: 'jobs{name: schema_change_gc, status: currently_paused}' @@ -6861,7 +6191,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.schema_change_gc.currently_running exported_name: jobs_schema_change_gc_currently_running labeled_name: 'jobs{type: schema_change_gc, status: currently_running}' @@ -6871,7 +6200,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.schema_change_gc.expired_pts_records exported_name: jobs_schema_change_gc_expired_pts_records labeled_name: 'jobs.expired_pts_records{type: schema_change_gc}' @@ -6881,7 +6209,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.schema_change_gc.fail_or_cancel_completed exported_name: jobs_schema_change_gc_fail_or_cancel_completed labeled_name: 'jobs.fail_or_cancel{name: schema_change_gc, status: completed}' @@ -6891,7 +6218,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.schema_change_gc.fail_or_cancel_retry_error exported_name: jobs_schema_change_gc_fail_or_cancel_retry_error labeled_name: 'jobs.fail_or_cancel{name: schema_change_gc, status: retry_error}' @@ -6901,7 +6227,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.schema_change_gc.protected_age_sec exported_name: jobs_schema_change_gc_protected_age_sec labeled_name: 'jobs.protected_age_sec{type: schema_change_gc}' @@ -6911,7 +6236,6 @@ layers: unit: SECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.schema_change_gc.protected_record_count exported_name: jobs_schema_change_gc_protected_record_count labeled_name: 'jobs.protected_record_count{type: schema_change_gc}' @@ -6921,7 +6245,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.schema_change_gc.resume_completed exported_name: jobs_schema_change_gc_resume_completed labeled_name: 'jobs.resume{name: schema_change_gc, status: completed}' @@ -6931,7 +6254,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.schema_change_gc.resume_failed exported_name: jobs_schema_change_gc_resume_failed labeled_name: 'jobs.resume{name: schema_change_gc, status: failed}' @@ -6941,7 +6263,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.schema_change_gc.resume_retry_error exported_name: jobs_schema_change_gc_resume_retry_error labeled_name: 'jobs.resume{name: schema_change_gc, status: retry_error}' @@ -6951,7 +6272,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.sql_activity_flush.currently_idle exported_name: jobs_sql_activity_flush_currently_idle labeled_name: 'jobs{type: sql_activity_flush, status: currently_idle}' @@ -6961,7 +6281,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.sql_activity_flush.currently_paused exported_name: jobs_sql_activity_flush_currently_paused labeled_name: 'jobs{name: sql_activity_flush, status: currently_paused}' @@ -6971,7 +6290,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.sql_activity_flush.currently_running exported_name: jobs_sql_activity_flush_currently_running labeled_name: 'jobs{type: sql_activity_flush, status: currently_running}' @@ -6981,7 +6299,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.sql_activity_flush.expired_pts_records exported_name: jobs_sql_activity_flush_expired_pts_records labeled_name: 'jobs.expired_pts_records{type: sql_activity_flush}' @@ -6991,7 +6308,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.sql_activity_flush.fail_or_cancel_completed exported_name: jobs_sql_activity_flush_fail_or_cancel_completed labeled_name: 'jobs.fail_or_cancel{name: sql_activity_flush, status: completed}' @@ -7001,7 +6317,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.sql_activity_flush.fail_or_cancel_retry_error exported_name: jobs_sql_activity_flush_fail_or_cancel_retry_error labeled_name: 'jobs.fail_or_cancel{name: sql_activity_flush, status: retry_error}' @@ -7011,7 +6326,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.sql_activity_flush.protected_age_sec exported_name: jobs_sql_activity_flush_protected_age_sec labeled_name: 'jobs.protected_age_sec{type: sql_activity_flush}' @@ -7021,7 +6335,6 @@ layers: unit: SECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.sql_activity_flush.protected_record_count exported_name: jobs_sql_activity_flush_protected_record_count labeled_name: 'jobs.protected_record_count{type: sql_activity_flush}' @@ -7031,7 +6344,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.sql_activity_flush.resume_completed exported_name: jobs_sql_activity_flush_resume_completed labeled_name: 'jobs.resume{name: sql_activity_flush, status: completed}' @@ -7041,7 +6353,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.sql_activity_flush.resume_failed exported_name: jobs_sql_activity_flush_resume_failed labeled_name: 'jobs.resume{name: sql_activity_flush, status: failed}' @@ -7051,7 +6362,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.sql_activity_flush.resume_retry_error exported_name: jobs_sql_activity_flush_resume_retry_error labeled_name: 'jobs.resume{name: sql_activity_flush, status: retry_error}' @@ -7061,7 +6371,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.standby_read_ts_poller.currently_idle exported_name: jobs_standby_read_ts_poller_currently_idle labeled_name: 'jobs{type: standby_read_ts_poller, status: currently_idle}' @@ -7071,7 +6380,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.standby_read_ts_poller.currently_paused exported_name: jobs_standby_read_ts_poller_currently_paused labeled_name: 'jobs{name: standby_read_ts_poller, status: currently_paused}' @@ -7081,7 +6389,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.standby_read_ts_poller.currently_running exported_name: jobs_standby_read_ts_poller_currently_running labeled_name: 'jobs{type: standby_read_ts_poller, status: currently_running}' @@ -7091,7 +6398,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.standby_read_ts_poller.expired_pts_records exported_name: jobs_standby_read_ts_poller_expired_pts_records labeled_name: 'jobs.expired_pts_records{type: standby_read_ts_poller}' @@ -7101,7 +6407,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.standby_read_ts_poller.fail_or_cancel_completed exported_name: jobs_standby_read_ts_poller_fail_or_cancel_completed labeled_name: 'jobs.fail_or_cancel{name: standby_read_ts_poller, status: completed}' @@ -7111,7 +6416,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.standby_read_ts_poller.fail_or_cancel_retry_error exported_name: jobs_standby_read_ts_poller_fail_or_cancel_retry_error labeled_name: 'jobs.fail_or_cancel{name: standby_read_ts_poller, status: retry_error}' @@ -7121,7 +6425,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.standby_read_ts_poller.protected_age_sec exported_name: jobs_standby_read_ts_poller_protected_age_sec labeled_name: 'jobs.protected_age_sec{type: standby_read_ts_poller}' @@ -7131,7 +6434,6 @@ layers: unit: SECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.standby_read_ts_poller.protected_record_count exported_name: jobs_standby_read_ts_poller_protected_record_count labeled_name: 'jobs.protected_record_count{type: standby_read_ts_poller}' @@ -7141,7 +6443,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.standby_read_ts_poller.resume_completed exported_name: jobs_standby_read_ts_poller_resume_completed labeled_name: 'jobs.resume{name: standby_read_ts_poller, status: completed}' @@ -7151,7 +6452,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.standby_read_ts_poller.resume_failed exported_name: jobs_standby_read_ts_poller_resume_failed labeled_name: 'jobs.resume{name: standby_read_ts_poller, status: failed}' @@ -7161,7 +6461,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.standby_read_ts_poller.resume_retry_error exported_name: jobs_standby_read_ts_poller_resume_retry_error labeled_name: 'jobs.resume{name: standby_read_ts_poller, status: retry_error}' @@ -7171,7 +6470,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.typedesc_schema_change.currently_idle exported_name: jobs_typedesc_schema_change_currently_idle labeled_name: 'jobs{type: typedesc_schema_change, status: currently_idle}' @@ -7181,7 +6479,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.typedesc_schema_change.currently_paused exported_name: jobs_typedesc_schema_change_currently_paused labeled_name: 'jobs{name: typedesc_schema_change, status: currently_paused}' @@ -7191,7 +6488,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.typedesc_schema_change.currently_running exported_name: jobs_typedesc_schema_change_currently_running labeled_name: 'jobs{type: typedesc_schema_change, status: currently_running}' @@ -7201,7 +6497,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.typedesc_schema_change.expired_pts_records exported_name: jobs_typedesc_schema_change_expired_pts_records labeled_name: 'jobs.expired_pts_records{type: typedesc_schema_change}' @@ -7211,7 +6506,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.typedesc_schema_change.fail_or_cancel_completed exported_name: jobs_typedesc_schema_change_fail_or_cancel_completed labeled_name: 'jobs.fail_or_cancel{name: typedesc_schema_change, status: completed}' @@ -7221,7 +6515,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.typedesc_schema_change.fail_or_cancel_retry_error exported_name: jobs_typedesc_schema_change_fail_or_cancel_retry_error labeled_name: 'jobs.fail_or_cancel{name: typedesc_schema_change, status: retry_error}' @@ -7231,7 +6524,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.typedesc_schema_change.protected_age_sec exported_name: jobs_typedesc_schema_change_protected_age_sec labeled_name: 'jobs.protected_age_sec{type: typedesc_schema_change}' @@ -7241,7 +6533,6 @@ layers: unit: SECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.typedesc_schema_change.protected_record_count exported_name: jobs_typedesc_schema_change_protected_record_count labeled_name: 'jobs.protected_record_count{type: typedesc_schema_change}' @@ -7251,7 +6542,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.typedesc_schema_change.resume_completed exported_name: jobs_typedesc_schema_change_resume_completed labeled_name: 'jobs.resume{name: typedesc_schema_change, status: completed}' @@ -7261,7 +6551,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.typedesc_schema_change.resume_failed exported_name: jobs_typedesc_schema_change_resume_failed labeled_name: 'jobs.resume{name: typedesc_schema_change, status: failed}' @@ -7271,7 +6560,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.typedesc_schema_change.resume_retry_error exported_name: jobs_typedesc_schema_change_resume_retry_error labeled_name: 'jobs.resume{name: typedesc_schema_change, status: retry_error}' @@ -7281,7 +6569,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.update_table_metadata_cache.currently_idle exported_name: jobs_update_table_metadata_cache_currently_idle labeled_name: 'jobs{type: update_table_metadata_cache, status: currently_idle}' @@ -7291,7 +6578,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.update_table_metadata_cache.currently_paused exported_name: jobs_update_table_metadata_cache_currently_paused labeled_name: 'jobs{name: update_table_metadata_cache, status: currently_paused}' @@ -7301,7 +6587,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.update_table_metadata_cache.currently_running exported_name: jobs_update_table_metadata_cache_currently_running labeled_name: 'jobs{type: update_table_metadata_cache, status: currently_running}' @@ -7311,7 +6596,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.update_table_metadata_cache.expired_pts_records exported_name: jobs_update_table_metadata_cache_expired_pts_records labeled_name: 'jobs.expired_pts_records{type: update_table_metadata_cache}' @@ -7321,7 +6605,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.update_table_metadata_cache.fail_or_cancel_completed exported_name: jobs_update_table_metadata_cache_fail_or_cancel_completed labeled_name: 'jobs.fail_or_cancel{name: update_table_metadata_cache, status: completed}' @@ -7331,7 +6614,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.update_table_metadata_cache.fail_or_cancel_retry_error exported_name: jobs_update_table_metadata_cache_fail_or_cancel_retry_error labeled_name: 'jobs.fail_or_cancel{name: update_table_metadata_cache, status: retry_error}' @@ -7341,7 +6623,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.update_table_metadata_cache.protected_age_sec exported_name: jobs_update_table_metadata_cache_protected_age_sec labeled_name: 'jobs.protected_age_sec{type: update_table_metadata_cache}' @@ -7351,7 +6632,6 @@ layers: unit: SECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.update_table_metadata_cache.protected_record_count exported_name: jobs_update_table_metadata_cache_protected_record_count labeled_name: 'jobs.protected_record_count{type: update_table_metadata_cache}' @@ -7361,7 +6641,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: jobs.update_table_metadata_cache.resume_completed exported_name: jobs_update_table_metadata_cache_resume_completed labeled_name: 'jobs.resume{name: update_table_metadata_cache, status: completed}' @@ -7371,7 +6650,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.update_table_metadata_cache.resume_failed exported_name: jobs_update_table_metadata_cache_resume_failed labeled_name: 'jobs.resume{name: update_table_metadata_cache, status: failed}' @@ -7381,7 +6659,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: jobs.update_table_metadata_cache.resume_retry_error exported_name: jobs_update_table_metadata_cache_resume_retry_error labeled_name: 'jobs.resume{name: update_table_metadata_cache, status: retry_error}' @@ -7391,7 +6668,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: kv.protectedts.get_record.failed exported_name: kv_protectedts_get_record_failed description: number of failed GetRecord operations (ErrNotExists, parsing errors, or execution errors) @@ -7400,7 +6676,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: kv.protectedts.get_record.success exported_name: kv_protectedts_get_record_success description: number of successful GetRecord operations @@ -7409,7 +6684,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: kv.protectedts.protect.failed exported_name: kv_protectedts_protect_failed description: number of failed Protect operations (ErrExists, validation errors, or execution errors) @@ -7418,7 +6692,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: kv.protectedts.protect.success exported_name: kv_protectedts_protect_success description: number of successful Protect operations creating protected timestamp records @@ -7427,7 +6700,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: kv.protectedts.reconciliation.errors exported_name: kv_protectedts_reconciliation_errors description: number of errors encountered during reconciliation runs on this node @@ -7436,7 +6708,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: kv.protectedts.reconciliation.num_runs exported_name: kv_protectedts_reconciliation_num_runs description: number of successful reconciliation runs on this node @@ -7445,7 +6716,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: kv.protectedts.reconciliation.records_processed exported_name: kv_protectedts_reconciliation_records_processed description: number of records processed without error during reconciliation on this node @@ -7454,7 +6724,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: kv.protectedts.reconciliation.records_removed exported_name: kv_protectedts_reconciliation_records_removed description: number of records removed during reconciliation runs on this node @@ -7463,7 +6732,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: kv.protectedts.release.failed exported_name: kv_protectedts_release_failed description: number of failed Release operations (ErrNotExists or execution errors) @@ -7472,7 +6740,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: kv.protectedts.release.success exported_name: kv_protectedts_release_success description: number of successful Release operations removing protected timestamp records @@ -7481,7 +6748,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: kv.protectedts.update_timestamp.failed exported_name: kv_protectedts_update_timestamp_failed description: number of failed UpdateTimestamp operations (ErrNotExists or execution errors) @@ -7490,7 +6756,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: kv.protectedts.update_timestamp.success exported_name: kv_protectedts_update_timestamp_success description: number of successful UpdateTimestamp operations @@ -7499,7 +6764,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: kv.streamer.batches.in_progress exported_name: kv_streamer_batches_in_progress description: Number of BatchRequests in progress across all KV Streamer operators @@ -7508,7 +6772,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/sql-queries - name: kv.streamer.batches.sent exported_name: kv_streamer_batches_sent description: Number of BatchRequests sent across all KV Streamer operators @@ -7517,7 +6780,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: kv.streamer.batches.throttled exported_name: kv_streamer_batches_throttled description: Number of BatchRequests currently being throttled due to reaching the concurrency limit, across all KV Streamer operators @@ -7526,7 +6788,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/sql-queries - name: kv.streamer.operators.active exported_name: kv_streamer_operators_active description: Number of KV Streamer operators currently in use @@ -7535,7 +6796,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/sql-queries - name: logical_replication.batch_hist_nanos exported_name: logical_replication_batch_hist_nanos description: Time spent per row flushing a batch @@ -7544,7 +6804,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/cdc - name: logical_replication.catchup_ranges exported_name: logical_replication_catchup_ranges description: Source side ranges undergoing catch up scans (inaccurate with multiple LDR jobs) @@ -7553,7 +6812,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/cdc - name: logical_replication.catchup_ranges_by_label exported_name: logical_replication_catchup_ranges_by_label description: Source side ranges undergoing catch up scans @@ -7562,7 +6820,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/cdc - name: logical_replication.checkpoint_events_ingested exported_name: logical_replication_checkpoint_events_ingested description: Checkpoint events ingested by all replication jobs @@ -7571,7 +6828,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/cdc - name: logical_replication.events_dlqed_age exported_name: logical_replication_events_dlqed_age labeled_name: 'logical_replication.events{type: dlqed_age}' @@ -7581,7 +6837,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/cdc - name: logical_replication.events_dlqed_by_label exported_name: logical_replication_events_dlqed_by_label description: Row update events sent to DLQ by label @@ -7590,7 +6845,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/cdc - name: logical_replication.events_dlqed_errtype exported_name: logical_replication_events_dlqed_errtype labeled_name: 'logical_replication.events{type: dlqed_errtype}' @@ -7600,7 +6854,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/cdc - name: logical_replication.events_dlqed_space exported_name: logical_replication_events_dlqed_space labeled_name: 'logical_replication.events{type: dlqed_space}' @@ -7610,7 +6863,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/cdc - name: logical_replication.events_ingested_by_label exported_name: logical_replication_events_ingested_by_label description: Events ingested by all replication jobs by label @@ -7619,7 +6871,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/cdc - name: logical_replication.events_initial_failure exported_name: logical_replication_events_initial_failure labeled_name: 'logical_replication.events{type: initial_failure}' @@ -7629,7 +6880,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/cdc - name: logical_replication.events_initial_success exported_name: logical_replication_events_initial_success labeled_name: 'logical_replication.events{type: initial_success}' @@ -7639,7 +6889,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/cdc - name: logical_replication.events_retry_failure exported_name: logical_replication_events_retry_failure labeled_name: 'logical_replication.events{type: retry_failure}' @@ -7649,7 +6898,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/cdc - name: logical_replication.events_retry_success exported_name: logical_replication_events_retry_success labeled_name: 'logical_replication.events{type: retry_success}' @@ -7659,7 +6907,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/cdc - name: logical_replication.kv.update_too_old exported_name: logical_replication_kv_update_too_old description: Total number of updates that were not applied because they were too old @@ -7668,7 +6915,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/cdc - name: logical_replication.kv.value_refreshes exported_name: logical_replication_kv_value_refreshes description: Total number of batches that refreshed the previous value @@ -7677,7 +6923,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/cdc - name: logical_replication.replan_count exported_name: logical_replication_replan_count description: Total number of dist sql replanning events @@ -7686,7 +6931,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/cdc - name: logical_replication.replicated_time_by_label exported_name: logical_replication_replicated_time_by_label description: Replicated time of the logical replication stream by label @@ -7695,7 +6939,6 @@ layers: unit: SECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/cdc - name: logical_replication.retry_queue_bytes exported_name: logical_replication_retry_queue_bytes description: Logical bytes (sum of keys+values) in the retry queue @@ -7704,7 +6947,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/cdc - name: logical_replication.retry_queue_events exported_name: logical_replication_retry_queue_events description: Row update events in the retry queue @@ -7713,7 +6955,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/cdc - name: logical_replication.scanning_ranges exported_name: logical_replication_scanning_ranges description: Source side ranges undergoing an initial scan (inaccurate with multiple LDR jobs) @@ -7722,7 +6963,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/cdc - name: logical_replication.scanning_ranges_by_label exported_name: logical_replication_scanning_ranges_by_label description: Source side ranges undergoing an initial scan @@ -7731,7 +6971,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/cdc - name: obs.clustermetrics.flush.count exported_name: obs_clustermetrics_flush_count description: Number of cluster metrics flush operations @@ -7740,7 +6979,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/obs-prs - name: obs.clustermetrics.flush.errors exported_name: obs_clustermetrics_flush_errors description: Number of flush errors (write or delete failures) @@ -7749,7 +6987,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/obs-prs - name: obs.clustermetrics.flush.latency exported_name: obs_clustermetrics_flush_latency description: Latency of cluster metrics flushes to storage @@ -7758,7 +6995,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/obs-prs - name: obs.clustermetrics.flush.metrics_deleted exported_name: obs_clustermetrics_flush_metrics_deleted description: Number of individual metrics deleted per flush @@ -7767,7 +7003,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/obs-prs - name: obs.clustermetrics.flush.metrics_written exported_name: obs_clustermetrics_flush_metrics_written description: Number of individual metrics written per flush @@ -7776,7 +7011,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/obs-prs - name: obs.tablemetadata.update_job.duration exported_name: obs_tablemetadata_update_job_duration description: Time spent running the update table metadata job. @@ -7785,7 +7019,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/obs-prs - name: obs.tablemetadata.update_job.errors exported_name: obs_tablemetadata_update_job_errors description: The total number of errors that have been emitted from the update table metadata job. @@ -7794,7 +7027,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/obs-prs - name: obs.tablemetadata.update_job.runs exported_name: obs_tablemetadata_update_job_runs description: The total number of runs of the update table metadata job. @@ -7803,7 +7035,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/obs-prs - name: obs.tablemetadata.update_job.table_updates exported_name: obs_tablemetadata_update_job_table_updates description: The total number of rows that have been updated in system.table_metadata @@ -7812,7 +7043,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/obs-prs - name: physical_replication.admit_latency exported_name: physical_replication_admit_latency description: 'Event admission latency: a difference between event MVCC timestamp and the time it was admitted into ingestion processor' @@ -7821,7 +7051,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/disaster-recovery - name: physical_replication.catchup_ranges exported_name: physical_replication_catchup_ranges description: Source side ranges undergoing catch up scans @@ -7830,7 +7059,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/disaster-recovery - name: physical_replication.commit_latency exported_name: physical_replication_commit_latency description: 'Event commit latency: a difference between event MVCC timestamp and the time it was flushed into disk. If we batch events, then the difference between the oldest event in the batch and flush is recorded' @@ -7839,7 +7067,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/disaster-recovery - name: physical_replication.distsql_replan_count exported_name: physical_replication_distsql_replan_count description: Total number of dist sql replanning events @@ -7848,7 +7075,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/disaster-recovery - name: physical_replication.events_ingested exported_name: physical_replication_events_ingested description: Events ingested by all replication jobs @@ -7857,7 +7083,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/disaster-recovery - name: physical_replication.failover_progress exported_name: physical_replication_failover_progress description: The number of ranges left to revert in order to complete an inflight cutover @@ -7866,7 +7091,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/disaster-recovery - name: physical_replication.flush_hist_nanos exported_name: physical_replication_flush_hist_nanos description: Time spent flushing messages across all replication streams @@ -7875,7 +7099,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/disaster-recovery - name: physical_replication.flush_wait_nanos exported_name: physical_replication_flush_wait_nanos description: Cumulative time spent waiting to send buffer to flush loop; use rate() to compare against receive_wait_nanos @@ -7884,7 +7107,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/disaster-recovery - name: physical_replication.flushes exported_name: physical_replication_flushes description: Total flushes across all replication jobs @@ -7893,7 +7115,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/disaster-recovery - name: physical_replication.receive_wait_nanos exported_name: physical_replication_receive_wait_nanos description: Cumulative time spent waiting to receive events from producer; use rate() to compare against flush_wait_nanos @@ -7902,7 +7123,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/disaster-recovery - name: physical_replication.resolved_events_ingested exported_name: physical_replication_resolved_events_ingested description: Resolved events ingested by all replication jobs @@ -7911,7 +7131,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/disaster-recovery - name: physical_replication.running exported_name: physical_replication_running description: Number of currently running replication streams @@ -7920,7 +7139,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/disaster-recovery - name: physical_replication.scanning_ranges exported_name: physical_replication_scanning_ranges description: Source side ranges undergoing an initial scan @@ -7929,7 +7147,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/disaster-recovery - name: requests.slow.distsender exported_name: requests_slow_distsender description: |- @@ -7941,7 +7158,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: round-trip-default-class-latency exported_name: round_trip_default_class_latency description: |- @@ -7953,7 +7169,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: round-trip-latency exported_name: round_trip_latency description: |- @@ -7972,7 +7187,6 @@ layers: aggregation: AVG derivative: NONE visibility: SUPPORT - owner: cockroachdb/kv - name: round-trip-raft-class-latency exported_name: round_trip_raft_class_latency description: |- @@ -7984,7 +7198,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: round-trip-rangefeed-class-latency exported_name: round_trip_rangefeed_class_latency description: |- @@ -7996,7 +7209,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: round-trip-system-class-latency exported_name: round_trip_system_class_latency description: |- @@ -8008,7 +7220,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: rpc.client.bytes.egress exported_name: rpc_client_bytes_egress description: Counter of TCP bytes sent via gRPC on connections we initiated. @@ -8017,7 +7228,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: rpc.client.bytes.ingress exported_name: rpc_client_bytes_ingress description: Counter of TCP bytes received via gRPC on connections we initiated. @@ -8026,7 +7236,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: rpc.connection.connected exported_name: rpc_connection_connected description: |- @@ -8040,7 +7249,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: rpc.connection.inactive exported_name: rpc_connection_inactive description: Gauge of current connections in an inactive state and pending deletion; these are not healthy but are not tracked as unhealthy either because there is reason to believe that the connection is no longer relevant,for example if the node has since been seen under a new address @@ -8049,7 +7257,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: rpc.drpc.enabled exported_name: rpc_drpc_enabled description: 1 if this node is using DRPC for internode RPC, 0 otherwise. @@ -8058,7 +7265,6 @@ layers: unit: CONST aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: rpc.drpc.tls.handshake.errors exported_name: rpc_drpc_tls_handshake_errors description: Number of TLS handshake errors for DRPC connections. @@ -8067,7 +7273,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: rpc.server.request.duration.nanos exported_name: rpc_server_request_duration_nanos description: Duration of an RPC request at the server in nanoseconds. @@ -8076,7 +7281,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: rpc.server.requests.total exported_name: rpc_server_requests_total description: Total number of RPCs requests received by the server. @@ -8085,7 +7289,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: schedules.BACKUP.last-completed-time-by-virtual_cluster exported_name: schedules_BACKUP_last_completed_time_by_virtual_cluster description: The unix timestamp of the most recently completed host scheduled backup by virtual cluster specified as maintaining this metric @@ -8094,7 +7297,6 @@ layers: unit: TIMESTAMP_SEC aggregation: AVG derivative: NONE - owner: cockroachdb/disaster-recovery - name: schedules.BACKUP.protected_age_sec exported_name: schedules_BACKUP_protected_age_sec labeled_name: 'schedules.protected_age_sec{name: BACKUP}' @@ -8104,7 +7306,6 @@ layers: unit: SECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: schedules.BACKUP.protected_record_count exported_name: schedules_BACKUP_protected_record_count labeled_name: 'schedules.protected_record_count{name: BACKUP}' @@ -8114,7 +7315,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: schedules.BACKUP.started exported_name: schedules_BACKUP_started labeled_name: 'schedules{name: BACKUP, status: started}' @@ -8124,7 +7324,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: schedules.BACKUP.succeeded exported_name: schedules_BACKUP_succeeded labeled_name: 'schedules{name: BACKUP, status: succeeded}' @@ -8134,7 +7333,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: schedules.CHANGEFEED.failed exported_name: schedules_CHANGEFEED_failed labeled_name: 'schedules{name: CHANGEFEED, status: failed}' @@ -8144,7 +7342,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: schedules.CHANGEFEED.started exported_name: schedules_CHANGEFEED_started labeled_name: 'schedules{name: CHANGEFEED, status: started}' @@ -8154,7 +7351,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: schedules.CHANGEFEED.succeeded exported_name: schedules_CHANGEFEED_succeeded labeled_name: 'schedules{name: CHANGEFEED, status: succeeded}' @@ -8164,7 +7360,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: schedules.error exported_name: schedules_error description: Number of schedules which did not execute successfully @@ -8173,7 +7368,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: schedules.malformed exported_name: schedules_malformed description: Number of malformed schedules @@ -8182,7 +7376,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: schedules.round.jobs-started exported_name: schedules_round_jobs_started description: The number of jobs started @@ -8191,7 +7384,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: schedules.round.reschedule-skip exported_name: schedules_round_reschedule_skip description: The number of schedules rescheduled due to SKIP policy @@ -8200,7 +7392,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: schedules.round.reschedule-wait exported_name: schedules_round_reschedule_wait description: The number of schedules rescheduled due to WAIT policy @@ -8209,7 +7400,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/jobs - name: schedules.scheduled-row-level-ttl-executor.started exported_name: schedules_scheduled_row_level_ttl_executor_started labeled_name: 'schedules{name: scheduled-row-level-ttl-executor, status: started}' @@ -8219,7 +7409,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: schedules.scheduled-row-level-ttl-executor.succeeded exported_name: schedules_scheduled_row_level_ttl_executor_succeeded labeled_name: 'schedules{name: scheduled-row-level-ttl-executor, status: succeeded}' @@ -8229,7 +7418,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: schedules.scheduled-schema-telemetry-executor.failed exported_name: schedules_scheduled_schema_telemetry_executor_failed labeled_name: 'schedules{name: scheduled-schema-telemetry-executor, status: failed}' @@ -8239,7 +7427,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: schedules.scheduled-schema-telemetry-executor.started exported_name: schedules_scheduled_schema_telemetry_executor_started labeled_name: 'schedules{name: scheduled-schema-telemetry-executor, status: started}' @@ -8249,7 +7436,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: schedules.scheduled-schema-telemetry-executor.succeeded exported_name: schedules_scheduled_schema_telemetry_executor_succeeded labeled_name: 'schedules{name: scheduled-schema-telemetry-executor, status: succeeded}' @@ -8259,7 +7445,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: schedules.scheduled-sql-stats-compaction-executor.failed exported_name: schedules_scheduled_sql_stats_compaction_executor_failed labeled_name: 'schedules{name: scheduled-sql-stats-compaction-executor, status: failed}' @@ -8269,7 +7454,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: schedules.scheduled-sql-stats-compaction-executor.started exported_name: schedules_scheduled_sql_stats_compaction_executor_started labeled_name: 'schedules{name: scheduled-sql-stats-compaction-executor, status: started}' @@ -8279,7 +7463,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: schedules.scheduled-sql-stats-compaction-executor.succeeded exported_name: schedules_scheduled_sql_stats_compaction_executor_succeeded labeled_name: 'schedules{name: scheduled-sql-stats-compaction-executor, status: succeeded}' @@ -8289,7 +7472,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/jobs - name: server.http.request.duration.nanos exported_name: server_http_request_duration_nanos description: Duration of an HTTP request in nanoseconds. @@ -8298,7 +7480,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/unowned - name: sql.bytesin exported_name: sql_bytesin description: Number of SQL bytes received @@ -8307,7 +7488,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-foundations - name: sql.bytesout exported_name: sql_bytesout description: Number of SQL bytes sent @@ -8316,7 +7496,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-foundations - name: sql.call_stored_proc.count exported_name: sql_call_stored_proc_count labeled_name: 'sql.count{query_type: call}' @@ -8326,7 +7505,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.call_stored_proc.count.internal exported_name: sql_call_stored_proc_count_internal labeled_name: 'sql.count{query_type: call, query_internal: true}' @@ -8336,7 +7514,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.call_stored_proc.started.count exported_name: sql_call_stored_proc_started_count labeled_name: 'sql.started.count{query_type: call}' @@ -8346,7 +7523,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.call_stored_proc.started.count.internal exported_name: sql_call_stored_proc_started_count_internal labeled_name: 'sql.started.count{query_type: call, query_internal: true}' @@ -8356,7 +7532,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.conns_waiting_to_hash exported_name: sql_conns_waiting_to_hash description: Number of SQL connection attempts that are being throttled in order to limit password hashing concurrency @@ -8365,7 +7540,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/sql-foundations - name: sql.contention.resolver.failed_resolutions exported_name: sql_contention_resolver_failed_resolutions description: Number of failed transaction ID resolution attempts @@ -8374,7 +7548,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/obs-prs - name: sql.contention.resolver.queue_size exported_name: sql_contention_resolver_queue_size description: Length of queued unresolved contention events @@ -8383,7 +7556,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/obs-prs - name: sql.contention.resolver.retries exported_name: sql_contention_resolver_retries description: Number of times transaction id resolution has been retried @@ -8392,7 +7564,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/obs-prs - name: sql.contention.txn_id_cache.miss exported_name: sql_contention_txn_id_cache_miss description: Number of cache misses @@ -8401,7 +7572,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/obs-prs - name: sql.contention.txn_id_cache.read exported_name: sql_contention_txn_id_cache_read description: Number of cache read @@ -8410,7 +7580,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/obs-prs - name: sql.copy.count exported_name: sql_copy_count labeled_name: 'sql.count{query_type: copy}' @@ -8420,7 +7589,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.copy.count.internal exported_name: sql_copy_count_internal labeled_name: 'sql.count{query_type: copy, query_internal: true}' @@ -8430,7 +7598,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.copy.nonatomic.count exported_name: sql_copy_nonatomic_count description: Number of non-atomic COPY SQL statements successfully executed @@ -8439,7 +7606,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.copy.nonatomic.count.internal exported_name: sql_copy_nonatomic_count_internal description: Number of non-atomic COPY SQL statements successfully executed (internal queries) @@ -8448,7 +7614,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.copy.nonatomic.started.count exported_name: sql_copy_nonatomic_started_count description: Number of non-atomic COPY SQL statements started @@ -8457,7 +7622,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.copy.nonatomic.started.count.internal exported_name: sql_copy_nonatomic_started_count_internal description: Number of non-atomic COPY SQL statements started (internal queries) @@ -8466,7 +7630,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.copy.started.count exported_name: sql_copy_started_count labeled_name: 'sql.started.count{query_type: copy}' @@ -8476,7 +7639,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.copy.started.count.internal exported_name: sql_copy_started_count_internal labeled_name: 'sql.started.count{query_type: copy, query_internal: true}' @@ -8486,7 +7648,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.crud_query.count exported_name: sql_crud_query_count description: Number of SQL SELECT, INSERT, UPDATE, DELETE statements successfully executed @@ -8495,7 +7656,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.crud_query.count.internal exported_name: sql_crud_query_count_internal description: Number of SQL SELECT, INSERT, UPDATE, DELETE statements successfully executed (internal queries) @@ -8504,7 +7664,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.crud_query.started.count exported_name: sql_crud_query_started_count description: Number of SQL SELECT, INSERT, UPDATE, DELETE statements started @@ -8513,7 +7672,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.crud_query.started.count.internal exported_name: sql_crud_query_started_count_internal description: Number of SQL SELECT, INSERT, UPDATE, DELETE statements started (internal queries) @@ -8522,7 +7680,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.ddl.started.count exported_name: sql_ddl_started_count labeled_name: 'sql.started.count{query_type: ddl}' @@ -8532,7 +7689,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.ddl.started.count.internal exported_name: sql_ddl_started_count_internal labeled_name: 'sql.started.count{query_type: ddl, query_internal: true}' @@ -8542,7 +7698,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.disk.distsql.current exported_name: sql_disk_distsql_current description: Current sql statement disk usage for distsql @@ -8551,7 +7706,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/sql-queries - name: sql.disk.distsql.max exported_name: sql_disk_distsql_max description: Disk usage per sql statement for distsql @@ -8560,7 +7714,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/sql-queries - name: sql.disk.distsql.spilled.bytes.read exported_name: sql_disk_distsql_spilled_bytes_read description: Number of bytes read from temporary disk storage as a result of spilling @@ -8569,7 +7722,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.disk.distsql.spilled.bytes.written exported_name: sql_disk_distsql_spilled_bytes_written description: Number of bytes written to temporary disk storage as a result of spilling @@ -8578,7 +7730,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.dist_merge.first_iter.output_sst_count exported_name: sql_dist_merge_first_iter_output_sst_count description: Number of SSTs produced by the first (local) merge iteration @@ -8587,7 +7738,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/sql-foundations - name: sql.dist_merge.import.count exported_name: sql_dist_merge_import_count labeled_name: 'sql.dist_merge.count{type: import}' @@ -8597,7 +7747,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-foundations - name: sql.dist_merge.index.count exported_name: sql_dist_merge_index_count labeled_name: 'sql.dist_merge.count{type: index}' @@ -8607,7 +7756,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-foundations - name: sql.dist_merge.map_phase.sst_count exported_name: sql_dist_merge_map_phase_sst_count description: Number of SSTs from the map phase input to the first merge iteration @@ -8616,7 +7764,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/sql-foundations - name: sql.dist_merge.rpc_memory_reserved.bytes exported_name: sql_dist_merge_rpc_memory_reserved_bytes description: Memory reserved for RPC transport buffers in the final merge iteration @@ -8625,7 +7772,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/sql-foundations - name: sql.distsql.cumulative_contention_nanos exported_name: sql_distsql_cumulative_contention_nanos description: Cumulative contention across all queries (in nanoseconds) @@ -8634,7 +7780,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.distsql.dist_query_rerun_locally.count exported_name: sql_distsql_dist_query_rerun_locally_count description: Total number of cases when distributed query error resulted in a local rerun @@ -8643,7 +7788,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.distsql.dist_query_rerun_locally.failure_count exported_name: sql_distsql_dist_query_rerun_locally_failure_count description: Total number of cases when the local rerun of a distributed query resulted in an error @@ -8652,7 +7796,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.distsql.distributed_exec.count exported_name: sql_distsql_distributed_exec_count description: Number of invocations of the execution engine executed with full or partial distribution (multiple of which may occur for a single SQL statement) @@ -8661,7 +7804,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.distsql.exec.latency exported_name: sql_distsql_exec_latency description: Latency of DistSQL statement execution @@ -8670,7 +7812,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/sql-queries - name: sql.distsql.exec.latency.internal exported_name: sql_distsql_exec_latency_internal description: Latency of DistSQL statement execution (internal queries) @@ -8679,7 +7820,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/sql-queries - name: sql.distsql.flows.active exported_name: sql_distsql_flows_active description: Number of distributed SQL flows currently active @@ -8688,7 +7828,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/sql-queries - name: sql.distsql.flows.total exported_name: sql_distsql_flows_total description: Number of distributed SQL flows executed @@ -8697,7 +7836,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.distsql.parallel_runner.count exported_name: sql_distsql_parallel_runner_count description: Number of SetupFlowRequest RPCs executed concurrently via DistSQL runners @@ -8706,7 +7844,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.distsql.queries.active exported_name: sql_distsql_queries_active description: Number of invocations of the execution engine currently active (multiple of which may occur for a single SQL statement) @@ -8715,7 +7852,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/sql-queries - name: sql.distsql.queries.spilled exported_name: sql_distsql_queries_spilled description: Number of queries that have spilled to disk @@ -8724,7 +7860,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.distsql.queries.total exported_name: sql_distsql_queries_total description: Number of invocations of the execution engine executed (multiple of which may occur for a single SQL statement) @@ -8733,7 +7868,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.distsql.select.count exported_name: sql_distsql_select_count description: Number of SELECT statements planned to be distributed @@ -8742,7 +7876,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.distsql.select.count.internal exported_name: sql_distsql_select_count_internal description: Number of SELECT statements planned to be distributed (internal queries) @@ -8751,7 +7884,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.distsql.select.distributed_exec.count exported_name: sql_distsql_select_distributed_exec_count description: Number of SELECT statements that were distributed @@ -8760,7 +7892,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.distsql.select.distributed_exec.count.internal exported_name: sql_distsql_select_distributed_exec_count_internal description: Number of SELECT statements that were distributed (internal queries) @@ -8769,7 +7900,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.distsql.sequential_runner.count exported_name: sql_distsql_sequential_runner_count description: Number of SetupFlowRequest RPCs executed sequentially via the main gateway goroutine @@ -8778,7 +7908,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.distsql.service.latency exported_name: sql_distsql_service_latency description: Latency of DistSQL request execution @@ -8787,7 +7916,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/sql-queries - name: sql.distsql.service.latency.internal exported_name: sql_distsql_service_latency_internal description: Latency of DistSQL request execution (internal queries) @@ -8796,7 +7924,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/sql-queries - name: sql.distsql.vec.openfds exported_name: sql_distsql_vec_openfds description: Current number of open file descriptors used by vectorized external storage @@ -8805,7 +7932,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/sql-queries - name: sql.exec.latency exported_name: sql_exec_latency description: Latency of SQL statement execution @@ -8815,7 +7941,6 @@ layers: aggregation: AVG derivative: NONE visibility: SUPPORT - owner: cockroachdb/sql-queries - name: sql.exec.latency.consistent exported_name: sql_exec_latency_consistent description: Latency of SQL statement execution of non-historical queries @@ -8824,7 +7949,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/sql-queries - name: sql.exec.latency.consistent.internal exported_name: sql_exec_latency_consistent_internal description: Latency of SQL statement execution of non-historical queries (internal queries) @@ -8833,7 +7957,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/sql-queries - name: sql.exec.latency.detail exported_name: sql_exec_latency_detail description: Latency of SQL statement execution, by statement fingerprint @@ -8842,7 +7965,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/sql-queries - name: sql.exec.latency.detail.internal exported_name: sql_exec_latency_detail_internal description: Latency of SQL statement execution, by statement fingerprint (internal queries) @@ -8851,7 +7973,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/sql-queries - name: sql.exec.latency.historical exported_name: sql_exec_latency_historical description: Latency of SQL statement execution of historical queries @@ -8860,7 +7981,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/sql-queries - name: sql.exec.latency.historical.internal exported_name: sql_exec_latency_historical_internal description: Latency of SQL statement execution of historical queries (internal queries) @@ -8869,7 +7989,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/sql-queries - name: sql.exec.latency.internal exported_name: sql_exec_latency_internal description: Latency of SQL statement execution (internal queries) @@ -8878,7 +7997,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/sql-queries - name: sql.feature_flag_denial exported_name: sql_feature_flag_denial description: Counter of the number of statements denied by a feature flag @@ -8887,7 +8005,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/unowned - name: sql.guardrails.full_scan_rejected.count exported_name: sql_guardrails_full_scan_rejected_count description: Number of full table or index scans that have been rejected because of `disallow_full_table_scans` guardrail @@ -8896,7 +8013,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.guardrails.full_scan_rejected.count.internal exported_name: sql_guardrails_full_scan_rejected_count_internal description: Number of full table or index scans that have been rejected because of `disallow_full_table_scans` guardrail (internal queries) @@ -8905,7 +8021,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.guardrails.max_row_size_err.count exported_name: sql_guardrails_max_row_size_err_count description: Number of rows observed violating sql.guardrails.max_row_size_err @@ -8914,7 +8029,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.guardrails.max_row_size_err.count.internal exported_name: sql_guardrails_max_row_size_err_count_internal description: Number of rows observed violating sql.guardrails.max_row_size_err (internal queries) @@ -8923,7 +8037,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.guardrails.max_row_size_log.count exported_name: sql_guardrails_max_row_size_log_count description: Number of rows observed violating sql.guardrails.max_row_size_log @@ -8932,7 +8045,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.guardrails.max_row_size_log.count.internal exported_name: sql_guardrails_max_row_size_log_count_internal description: Number of rows observed violating sql.guardrails.max_row_size_log (internal queries) @@ -8941,7 +8053,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.guardrails.transaction_rows_read_err.count exported_name: sql_guardrails_transaction_rows_read_err_count description: Number of transactions errored because of transaction_rows_read_err guardrail @@ -8950,7 +8061,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.guardrails.transaction_rows_read_err.count.internal exported_name: sql_guardrails_transaction_rows_read_err_count_internal description: Number of transactions errored because of transaction_rows_read_err guardrail (internal queries) @@ -8959,7 +8069,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.guardrails.transaction_rows_read_log.count exported_name: sql_guardrails_transaction_rows_read_log_count description: Number of transactions logged because of transaction_rows_read_log guardrail @@ -8968,7 +8077,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.guardrails.transaction_rows_read_log.count.internal exported_name: sql_guardrails_transaction_rows_read_log_count_internal description: Number of transactions logged because of transaction_rows_read_log guardrail (internal queries) @@ -8977,7 +8085,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.guardrails.transaction_rows_written_err.count exported_name: sql_guardrails_transaction_rows_written_err_count description: Number of transactions errored because of transaction_rows_written_err guardrail @@ -8986,7 +8093,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.guardrails.transaction_rows_written_err.count.internal exported_name: sql_guardrails_transaction_rows_written_err_count_internal description: Number of transactions errored because of transaction_rows_written_err guardrail (internal queries) @@ -8995,7 +8101,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.guardrails.transaction_rows_written_log.count exported_name: sql_guardrails_transaction_rows_written_log_count description: Number of transactions logged because of transaction_rows_written_log guardrail @@ -9004,7 +8109,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.guardrails.transaction_rows_written_log.count.internal exported_name: sql_guardrails_transaction_rows_written_log_count_internal description: Number of transactions logged because of transaction_rows_written_log guardrail (internal queries) @@ -9013,7 +8117,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.hydrated_schema_cache.hits exported_name: sql_hydrated_schema_cache_hits description: counter on the number of cache hits @@ -9022,7 +8125,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-foundations - name: sql.hydrated_schema_cache.misses exported_name: sql_hydrated_schema_cache_misses description: counter on the number of cache misses @@ -9031,7 +8133,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-foundations - name: sql.hydrated_table_cache.hits exported_name: sql_hydrated_table_cache_hits description: counter on the number of cache hits @@ -9040,7 +8141,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-foundations - name: sql.hydrated_table_cache.misses exported_name: sql_hydrated_table_cache_misses description: counter on the number of cache misses @@ -9049,7 +8149,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-foundations - name: sql.hydrated_type_cache.hits exported_name: sql_hydrated_type_cache_hits description: counter on the number of cache hits @@ -9058,7 +8157,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-foundations - name: sql.hydrated_type_cache.misses exported_name: sql_hydrated_type_cache_misses description: counter on the number of cache misses @@ -9067,7 +8165,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-foundations - name: sql.hydrated_udf_cache.hits exported_name: sql_hydrated_udf_cache_hits description: counter on the number of cache hits @@ -9076,7 +8173,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-foundations - name: sql.hydrated_udf_cache.misses exported_name: sql_hydrated_udf_cache_misses description: counter on the number of cache misses @@ -9085,7 +8181,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-foundations - name: sql.insights.anomaly_detection.evictions exported_name: sql_insights_anomaly_detection_evictions description: Evictions of fingerprint latency summaries due to memory pressure @@ -9094,7 +8189,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/obs-prs - name: sql.insights.anomaly_detection.fingerprints exported_name: sql_insights_anomaly_detection_fingerprints description: Current number of statement fingerprints being monitored for anomaly detection @@ -9103,7 +8197,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/obs-prs - name: sql.insights.anomaly_detection.memory exported_name: sql_insights_anomaly_detection_memory description: Current memory used to support anomaly detection @@ -9112,7 +8205,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/obs-prs - name: sql.leases.active exported_name: sql_leases_active description: The number of outstanding SQL schema leases. @@ -9121,7 +8213,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/sql-foundations - name: sql.leases.expired exported_name: sql_leases_expired description: The number of outstanding session based SQL schema leases expired. @@ -9130,7 +8221,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/sql-foundations - name: sql.leases.long_wait_for_initial_version exported_name: sql_leases_long_wait_for_initial_version description: The number of wait for initial version routines taking more than the lease duration. @@ -9139,7 +8229,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/sql-foundations - name: sql.leases.long_wait_for_no_version exported_name: sql_leases_long_wait_for_no_version description: The number of wait for no versions that are taking more than the lease duration. @@ -9148,7 +8237,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/sql-foundations - name: sql.leases.long_wait_for_one_version exported_name: sql_leases_long_wait_for_one_version description: The number of wait for one versions that are taking more than the lease duration. @@ -9157,7 +8245,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/sql-foundations - name: sql.leases.long_wait_for_two_version_invariant exported_name: sql_leases_long_wait_for_two_version_invariant description: The number of two version invariant waits that are taking more than the lease duration. @@ -9166,7 +8253,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/sql-foundations - name: sql.leases.waiting_to_expire exported_name: sql_leases_waiting_to_expire description: The number of outstanding session based SQL schema leases with expiry. @@ -9175,7 +8261,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/sql-foundations - name: sql.mem.bulk.current exported_name: sql_mem_bulk_current description: Current sql statement memory usage for bulk operations @@ -9184,7 +8269,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/disaster-recovery - name: sql.mem.bulk.max exported_name: sql_mem_bulk_max description: Memory usage per sql statement for bulk operations @@ -9193,7 +8277,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/disaster-recovery - name: sql.mem.conns.current exported_name: sql_mem_conns_current description: Current sql statement memory usage for conns @@ -9202,7 +8285,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/sql-queries - name: sql.mem.conns.max exported_name: sql_mem_conns_max description: Memory usage per sql statement for conns @@ -9211,7 +8293,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/sql-queries - name: sql.mem.distsql.current exported_name: sql_mem_distsql_current description: Current sql statement memory usage for distsql @@ -9220,7 +8301,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/sql-queries - name: sql.mem.distsql.max exported_name: sql_mem_distsql_max description: Memory usage per sql statement for distsql @@ -9229,7 +8309,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/sql-queries - name: sql.mem.internal.current exported_name: sql_mem_internal_current description: Current sql statement memory usage for internal @@ -9238,7 +8317,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/sql-queries - name: sql.mem.internal.max exported_name: sql_mem_internal_max description: Memory usage per sql statement for internal @@ -9247,7 +8325,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/sql-queries - name: sql.mem.internal.session.current exported_name: sql_mem_internal_session_current description: Current sql session memory usage for internal @@ -9256,7 +8333,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/sql-queries - name: sql.mem.internal.session.max exported_name: sql_mem_internal_session_max description: Memory usage per sql session for internal @@ -9265,7 +8341,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/sql-queries - name: sql.mem.internal.session.prepared.current exported_name: sql_mem_internal_session_prepared_current description: Current sql session memory usage by prepared statements for internal @@ -9274,7 +8349,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/sql-queries - name: sql.mem.internal.session.prepared.max exported_name: sql_mem_internal_session_prepared_max description: Memory usage by prepared statements per sql session for internal @@ -9283,7 +8357,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/sql-queries - name: sql.mem.internal.txn.current exported_name: sql_mem_internal_txn_current description: Current sql transaction memory usage for internal @@ -9292,7 +8365,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/sql-queries - name: sql.mem.internal.txn.max exported_name: sql_mem_internal_txn_max description: Memory usage per sql transaction for internal @@ -9301,7 +8373,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/sql-queries - name: sql.mem.root.max exported_name: sql_mem_root_max description: Memory usage per sql statement for root @@ -9310,7 +8381,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/sql-queries - name: sql.mem.sql.current exported_name: sql_mem_sql_current description: Current sql statement memory usage for sql @@ -9319,7 +8389,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/sql-queries - name: sql.mem.sql.max exported_name: sql_mem_sql_max description: Memory usage per sql statement for sql @@ -9328,7 +8397,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/sql-queries - name: sql.mem.sql.session.current exported_name: sql_mem_sql_session_current description: Current sql session memory usage for sql @@ -9337,7 +8405,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/sql-queries - name: sql.mem.sql.session.max exported_name: sql_mem_sql_session_max description: Memory usage per sql session for sql @@ -9346,7 +8413,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/sql-queries - name: sql.mem.sql.session.prepared.current exported_name: sql_mem_sql_session_prepared_current description: Current sql session memory usage by prepared statements for sql @@ -9355,7 +8421,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/sql-queries - name: sql.mem.sql.session.prepared.max exported_name: sql_mem_sql_session_prepared_max description: Memory usage by prepared statements per sql session for sql @@ -9364,7 +8429,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/sql-queries - name: sql.mem.sql.txn.current exported_name: sql_mem_sql_txn_current description: Current sql transaction memory usage for sql @@ -9373,7 +8437,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/sql-queries - name: sql.mem.sql.txn.max exported_name: sql_mem_sql_txn_max description: Memory usage per sql transaction for sql @@ -9382,7 +8445,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/sql-queries - name: sql.misc.count exported_name: sql_misc_count labeled_name: 'sql.count{query_type: misc}' @@ -9392,7 +8454,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.misc.count.internal exported_name: sql_misc_count_internal labeled_name: 'sql.count{query_type: misc, query_internal: true}' @@ -9402,7 +8463,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.misc.started.count exported_name: sql_misc_started_count labeled_name: 'sql.started.count{query_type: misc}' @@ -9412,7 +8472,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.misc.started.count.internal exported_name: sql_misc_started_count_internal labeled_name: 'sql.started.count{query_type: misc, query_internal: true}' @@ -9422,7 +8481,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.optimizer.plan_cache.hits exported_name: sql_optimizer_plan_cache_hits description: Number of non-prepared statements for which a cached plan was used @@ -9431,7 +8489,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.optimizer.plan_cache.hits.internal exported_name: sql_optimizer_plan_cache_hits_internal description: Number of non-prepared statements for which a cached plan was used (internal queries) @@ -9440,7 +8497,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.optimizer.plan_cache.misses exported_name: sql_optimizer_plan_cache_misses description: Number of non-prepared statements for which a cached plan was not used @@ -9449,7 +8505,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.optimizer.plan_cache.misses.internal exported_name: sql_optimizer_plan_cache_misses_internal description: Number of non-prepared statements for which a cached plan was not used (internal queries) @@ -9458,7 +8513,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.pgwire.pipeline.count exported_name: sql_pgwire_pipeline_count description: Number of pgwire commands received by the server that have not yet begun processing @@ -9467,7 +8521,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/sql-foundations - name: sql.pgwire_cancel.ignored exported_name: sql_pgwire_cancel_ignored description: Number of pgwire query cancel requests that were ignored due to rate limiting @@ -9476,7 +8529,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-foundations - name: sql.pgwire_cancel.successful exported_name: sql_pgwire_cancel_successful description: Number of pgwire query cancel requests that were successful @@ -9485,7 +8537,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-foundations - name: sql.pgwire_cancel.total exported_name: sql_pgwire_cancel_total description: Number of pgwire query cancel requests @@ -9494,7 +8545,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-foundations - name: sql.pre_serve.bytesin exported_name: sql_pre_serve_bytesin description: Number of SQL bytes received prior to routing the connection to the target SQL server @@ -9503,7 +8553,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-foundations - name: sql.pre_serve.bytesout exported_name: sql_pre_serve_bytesout description: Number of SQL bytes sent prior to routing the connection to the target SQL server @@ -9512,7 +8561,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-foundations - name: sql.pre_serve.conn.failures exported_name: sql_pre_serve_conn_failures description: Number of SQL connection failures prior to routing the connection to the target SQL server @@ -9521,7 +8569,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-foundations - name: sql.pre_serve.mem.cur exported_name: sql_pre_serve_mem_cur description: Current memory usage for SQL connections prior to routing the connection to the target SQL server @@ -9530,7 +8577,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/sql-foundations - name: sql.pre_serve.mem.max exported_name: sql_pre_serve_mem_max description: Memory usage for SQL connections prior to routing the connection to the target SQL server @@ -9539,7 +8585,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/sql-foundations - name: sql.pre_serve.new_conns exported_name: sql_pre_serve_new_conns description: Number of SQL connections created prior to routing the connection to the target SQL server @@ -9548,7 +8593,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-foundations - name: sql.query.count exported_name: sql_query_count description: Number of SQL operations started including queries, and transaction control statements @@ -9558,7 +8602,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE visibility: SUPPORT - owner: cockroachdb/sql-queries - name: sql.query.count.internal exported_name: sql_query_count_internal description: Number of SQL operations started including queries, and transaction control statements (internal queries) @@ -9567,7 +8610,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.query.started.count exported_name: sql_query_started_count description: Number of SQL operations started including queries, and transaction control statements @@ -9576,7 +8618,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.query.started.count.internal exported_name: sql_query_started_count_internal description: Number of SQL operations started including queries, and transaction control statements (internal queries) @@ -9585,7 +8626,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.query.unique.count exported_name: sql_query_unique_count description: Cardinality estimate of the set of statement fingerprints @@ -9594,7 +8634,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.query.unique.count.internal exported_name: sql_query_unique_count_internal description: Cardinality estimate of the set of statement fingerprints (internal queries) @@ -9603,7 +8642,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.restart_savepoint.count exported_name: sql_restart_savepoint_count labeled_name: 'sql.count{query_type: restart_savepoint}' @@ -9613,7 +8651,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.restart_savepoint.count.internal exported_name: sql_restart_savepoint_count_internal labeled_name: 'sql.count{query_type: restart_savepoint, query_internal: true}' @@ -9623,7 +8660,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.restart_savepoint.release.count exported_name: sql_restart_savepoint_release_count labeled_name: 'sql.count{query_type: release_restart_savepoint}' @@ -9633,7 +8669,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.restart_savepoint.release.count.internal exported_name: sql_restart_savepoint_release_count_internal labeled_name: 'sql.count{query_type: release_restart_savepoint, query_internal: true}' @@ -9643,7 +8678,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.restart_savepoint.release.started.count exported_name: sql_restart_savepoint_release_started_count labeled_name: 'sql.started.count{query_type: release_restart_savepoint}' @@ -9653,7 +8687,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.restart_savepoint.release.started.count.internal exported_name: sql_restart_savepoint_release_started_count_internal labeled_name: 'sql.started.count{query_type: release_restart_savepoint, query_internal: true}' @@ -9663,7 +8696,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.restart_savepoint.rollback.count exported_name: sql_restart_savepoint_rollback_count labeled_name: 'sql.count{query_type: rollback_to_restart_savepoint}' @@ -9673,7 +8705,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.restart_savepoint.rollback.count.internal exported_name: sql_restart_savepoint_rollback_count_internal labeled_name: 'sql.count{query_type: rollback_to_restart_savepoint, query_internal: true}' @@ -9683,7 +8714,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.restart_savepoint.rollback.started.count exported_name: sql_restart_savepoint_rollback_started_count labeled_name: 'sql.started.count{query_type: rollback_to_restart_savepoint}' @@ -9693,7 +8723,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.restart_savepoint.rollback.started.count.internal exported_name: sql_restart_savepoint_rollback_started_count_internal labeled_name: 'sql.started.count{query_type: rollback_to_restart_savepoint, query_internal: true}' @@ -9703,7 +8732,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.restart_savepoint.started.count exported_name: sql_restart_savepoint_started_count labeled_name: 'sql.started.count{query_type: restart_savepoint}' @@ -9713,7 +8741,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.restart_savepoint.started.count.internal exported_name: sql_restart_savepoint_started_count_internal labeled_name: 'sql.started.count{query_type: restart_savepoint, query_internal: true}' @@ -9723,7 +8750,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.savepoint.count exported_name: sql_savepoint_count labeled_name: 'sql.count{query_type: savepoint}' @@ -9733,7 +8759,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.savepoint.count.internal exported_name: sql_savepoint_count_internal labeled_name: 'sql.count{query_type: savepoint, query_internal: true}' @@ -9743,7 +8768,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.savepoint.release.count exported_name: sql_savepoint_release_count labeled_name: 'sql.count{query_type: release_savepoint}' @@ -9753,7 +8777,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.savepoint.release.count.internal exported_name: sql_savepoint_release_count_internal labeled_name: 'sql.count{query_type: release_savepoint, query_internal: true}' @@ -9763,7 +8786,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.savepoint.release.started.count exported_name: sql_savepoint_release_started_count labeled_name: 'sql.started.count{query_type: release_savepoint}' @@ -9773,7 +8795,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.savepoint.release.started.count.internal exported_name: sql_savepoint_release_started_count_internal labeled_name: 'sql.started.count{query_type: release_savepoint, query_internal: true}' @@ -9783,7 +8804,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.savepoint.rollback.count exported_name: sql_savepoint_rollback_count labeled_name: 'sql.count{query_type: rollback_to_savepoint}' @@ -9793,7 +8813,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.savepoint.rollback.count.internal exported_name: sql_savepoint_rollback_count_internal labeled_name: 'sql.count{query_type: rollback_to_savepoint, query_internal: true}' @@ -9803,7 +8822,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.savepoint.rollback.started.count exported_name: sql_savepoint_rollback_started_count labeled_name: 'sql.started.count{query_type: rollback_to_savepoint}' @@ -9813,7 +8831,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.savepoint.rollback.started.count.internal exported_name: sql_savepoint_rollback_started_count_internal labeled_name: 'sql.started.count{query_type: rollback_to_savepoint, query_internal: true}' @@ -9823,7 +8840,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.savepoint.started.count exported_name: sql_savepoint_started_count labeled_name: 'sql.started.count{query_type: savepoint}' @@ -9833,7 +8849,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.savepoint.started.count.internal exported_name: sql_savepoint_started_count_internal labeled_name: 'sql.started.count{query_type: savepoint, query_internal: true}' @@ -9843,7 +8858,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.schema.invalid_objects exported_name: sql_schema_invalid_objects description: Gauge of detected invalid objects within the system.descriptor table (measured by querying crdb_internal.invalid_objects) @@ -9852,7 +8866,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/sql-foundations - name: sql.schema_changer.object_count exported_name: sql_schema_changer_object_count description: Counter of the number of objects in the cluster @@ -9861,7 +8874,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/sql-foundations - name: sql.service.latency.consistent exported_name: sql_service_latency_consistent description: Latency of SQL request execution of non-historical queries @@ -9870,7 +8882,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/sql-queries - name: sql.service.latency.consistent.internal exported_name: sql_service_latency_consistent_internal description: Latency of SQL request execution of non-historical queries (internal queries) @@ -9879,7 +8890,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/sql-queries - name: sql.service.latency.historical exported_name: sql_service_latency_historical description: Latency of SQL request execution of historical queries @@ -9888,7 +8898,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/sql-queries - name: sql.service.latency.historical.internal exported_name: sql_service_latency_historical_internal description: Latency of SQL request execution of historical queries (internal queries) @@ -9897,7 +8906,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/sql-queries - name: sql.statement_timeout.count exported_name: sql_statement_timeout_count description: Count of statements that failed because they exceeded the statement timeout @@ -9906,7 +8914,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.statement_timeout.count.internal exported_name: sql_statement_timeout_count_internal description: Count of statements that failed because they exceeded the statement timeout (internal queries) @@ -9915,7 +8922,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.statements.auto_retry.count exported_name: sql_statements_auto_retry_count description: Number of SQL statement automatic retries @@ -9924,7 +8930,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.statements.auto_retry.count.internal exported_name: sql_statements_auto_retry_count_internal description: Number of SQL statement automatic retries (internal queries) @@ -9933,7 +8938,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.statements.bytes_read.count exported_name: sql_statements_bytes_read_count description: Number of bytes read by SQL statements @@ -9942,7 +8946,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.statements.bytes_read.count.internal exported_name: sql_statements_bytes_read_count_internal description: Number of bytes read by SQL statements (internal queries) @@ -9951,7 +8954,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.statements.index_bytes_written.count exported_name: sql_statements_index_bytes_written_count description: Number of primary and secondary index bytes modified by SQL statements @@ -9960,7 +8962,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.statements.index_bytes_written.count.internal exported_name: sql_statements_index_bytes_written_count_internal description: Number of primary and secondary index bytes modified by SQL statements (internal queries) @@ -9969,7 +8970,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.statements.index_rows_written.count exported_name: sql_statements_index_rows_written_count description: Number of primary and secondary index rows modified by SQL statements @@ -9978,7 +8978,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.statements.index_rows_written.count.internal exported_name: sql_statements_index_rows_written_count_internal description: Number of primary and secondary index rows modified by SQL statements (internal queries) @@ -9987,7 +8986,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.statements.rows_read.count exported_name: sql_statements_rows_read_count description: Number of rows read by SQL statements @@ -9996,7 +8994,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.statements.rows_read.count.internal exported_name: sql_statements_rows_read_count_internal description: Number of rows read by SQL statements (internal queries) @@ -10005,7 +9002,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.stats.activity.update.latency exported_name: sql_stats_activity_update_latency description: The latency of updates made by the SQL activity updater job. Includes failed update attempts @@ -10014,7 +9010,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/sql-queries - name: sql.stats.activity.updates.failed exported_name: sql_stats_activity_updates_failed description: Number of update attempts made by the SQL activity updater job that failed with errors @@ -10023,7 +9018,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.stats.activity.updates.successful exported_name: sql_stats_activity_updates_successful description: Number of successful updates made by the SQL activity updater job @@ -10032,7 +9026,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.stats.cleanup.rows_removed exported_name: sql_stats_cleanup_rows_removed description: Number of stale statistics rows that are removed @@ -10041,7 +9034,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.stats.discarded.current exported_name: sql_stats_discarded_current description: Number of fingerprint statistics being discarded @@ -10050,7 +9042,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.stats.flush.done_signals.ignored exported_name: sql_stats_flush_done_signals_ignored description: Number of times the SQL Stats activity update job ignored the signal sent to it indicating a flush has completed @@ -10059,7 +9050,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.stats.flush.fingerprint.count exported_name: sql_stats_flush_fingerprint_count description: The number of unique statement and transaction fingerprints included in the SQL Stats flush @@ -10068,7 +9058,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.stats.flush.latency exported_name: sql_stats_flush_latency description: The latency of SQL Stats flushes to persistent storage. Includes failed flush attempts @@ -10077,7 +9066,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/sql-queries - name: sql.stats.flushes.failed exported_name: sql_stats_flushes_failed description: Number of attempted SQL Stats flushes that failed with errors @@ -10086,7 +9074,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.stats.flushes.successful exported_name: sql_stats_flushes_successful description: Number of times SQL Stats are flushed successfully to persistent storage @@ -10095,7 +9082,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.stats.ingester.num_processed exported_name: sql_stats_ingester_num_processed description: Number of items processed by the SQL stats ingester @@ -10104,7 +9090,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/obs-prs - name: sql.stats.ingester.queue_size exported_name: sql_stats_ingester_queue_size description: Current number of items queued in the SQL stats ingester @@ -10113,7 +9098,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/obs-prs - name: sql.stats.mem.current exported_name: sql_stats_mem_current description: Current memory usage for fingerprint storage @@ -10122,7 +9106,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/sql-queries - name: sql.stats.mem.max exported_name: sql_stats_mem_max description: Memory usage for fingerprint storage @@ -10131,7 +9114,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/sql-queries - name: sql.stats.reported.mem.current exported_name: sql_stats_reported_mem_current description: Current memory usage for reported fingerprint storage @@ -10140,7 +9122,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/sql-queries - name: sql.stats.reported.mem.max exported_name: sql_stats_reported_mem_max description: Memory usage for reported fingerprint storage @@ -10149,7 +9130,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/sql-queries - name: sql.stats.txn_stats_collection.duration exported_name: sql_stats_txn_stats_collection_duration description: Time took in nanoseconds to collect transaction stats @@ -10158,7 +9138,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/sql-queries - name: sql.temp_object_cleaner.active_cleaners exported_name: sql_temp_object_cleaner_active_cleaners description: number of cleaner tasks currently running on this node @@ -10167,7 +9146,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/sql-queries - name: sql.temp_object_cleaner.schemas_deletion_error exported_name: sql_temp_object_cleaner_schemas_deletion_error description: number of errored schema deletions by the temp object cleaner on this node @@ -10176,7 +9154,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.temp_object_cleaner.schemas_deletion_success exported_name: sql_temp_object_cleaner_schemas_deletion_success description: number of successful schema deletions by the temp object cleaner on this node @@ -10185,7 +9162,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.temp_object_cleaner.schemas_to_delete exported_name: sql_temp_object_cleaner_schemas_to_delete description: number of schemas to be deleted by the temp object cleaner on this node @@ -10194,7 +9170,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.transaction_timeout.count exported_name: sql_transaction_timeout_count description: Count of statements that failed because they exceeded the transaction timeout @@ -10203,7 +9178,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.transaction_timeout.count.internal exported_name: sql_transaction_timeout_count_internal description: Count of statements that failed because they exceeded the transaction timeout (internal queries) @@ -10212,7 +9186,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.txn.auto_retry.count exported_name: sql_txn_auto_retry_count description: Number of SQL transaction automatic retries @@ -10221,7 +9194,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.txn.auto_retry.count.internal exported_name: sql_txn_auto_retry_count_internal description: Number of SQL transaction automatic retries (internal queries) @@ -10230,7 +9202,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.txn.begin.started.count exported_name: sql_txn_begin_started_count labeled_name: 'sql.started.count{query_type: begin}' @@ -10240,7 +9211,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.txn.begin.started.count.internal exported_name: sql_txn_begin_started_count_internal labeled_name: 'sql.started.count{query_type: begin, query_internal: true}' @@ -10250,7 +9220,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.txn.commit.started.count exported_name: sql_txn_commit_started_count labeled_name: 'sql.started.count{query_type: commit}' @@ -10260,7 +9229,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.txn.commit.started.count.internal exported_name: sql_txn_commit_started_count_internal labeled_name: 'sql.started.count{query_type: commit, query_internal: true}' @@ -10270,7 +9238,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.txn.commit_prepared.count exported_name: sql_txn_commit_prepared_count labeled_name: 'sql.count{query_type: commit_prepared}' @@ -10280,7 +9247,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.txn.commit_prepared.count.internal exported_name: sql_txn_commit_prepared_count_internal labeled_name: 'sql.count{query_type: commit_prepared, query_internal: true}' @@ -10290,7 +9256,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.txn.commit_prepared.started.count exported_name: sql_txn_commit_prepared_started_count labeled_name: 'sql.started.count{query_type: commit_prepared}' @@ -10300,7 +9265,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.txn.commit_prepared.started.count.internal exported_name: sql_txn_commit_prepared_started_count_internal labeled_name: 'sql.started.count{query_type: commit_prepared, query_internal: true}' @@ -10310,7 +9274,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.txn.contended.count exported_name: sql_txn_contended_count description: Number of SQL transactions experienced contention @@ -10319,7 +9282,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.txn.contended.count.internal exported_name: sql_txn_contended_count_internal description: Number of SQL transactions experienced contention (internal queries) @@ -10328,7 +9290,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.txn.prepare.count exported_name: sql_txn_prepare_count labeled_name: 'sql.count{query_type: prepare_transaction}' @@ -10338,7 +9299,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.txn.prepare.count.internal exported_name: sql_txn_prepare_count_internal labeled_name: 'sql.count{query_type: prepare_transaction, query_internal: true}' @@ -10348,7 +9308,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.txn.prepare.started.count exported_name: sql_txn_prepare_started_count labeled_name: 'sql.started.count{query_type: prepare_transaction}' @@ -10358,7 +9317,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.txn.prepare.started.count.internal exported_name: sql_txn_prepare_started_count_internal labeled_name: 'sql.started.count{query_type: prepare_transaction, query_internal: true}' @@ -10368,7 +9326,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.txn.rollback.started.count exported_name: sql_txn_rollback_started_count labeled_name: 'sql.started.count{query_type: rollback}' @@ -10378,7 +9335,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.txn.rollback.started.count.internal exported_name: sql_txn_rollback_started_count_internal labeled_name: 'sql.started.count{query_type: rollback, query_internal: true}' @@ -10388,7 +9344,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.txn.rollback_prepared.count exported_name: sql_txn_rollback_prepared_count labeled_name: 'sql.count{query_type: rollback_prepared}' @@ -10398,7 +9353,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.txn.rollback_prepared.count.internal exported_name: sql_txn_rollback_prepared_count_internal labeled_name: 'sql.count{query_type: rollback_prepared, query_internal: true}' @@ -10408,7 +9362,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.txn.rollback_prepared.started.count exported_name: sql_txn_rollback_prepared_started_count labeled_name: 'sql.started.count{query_type: rollback_prepared}' @@ -10418,7 +9371,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.txn.rollback_prepared.started.count.internal exported_name: sql_txn_rollback_prepared_started_count_internal labeled_name: 'sql.started.count{query_type: rollback_prepared, query_internal: true}' @@ -10428,7 +9380,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.txn.upgraded_iso_level.count exported_name: sql_txn_upgraded_iso_level_count description: Number of times a weak isolation level was automatically upgraded to a stronger one @@ -10437,7 +9388,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.txn.upgraded_iso_level.count.internal exported_name: sql_txn_upgraded_iso_level_count_internal description: Number of times a weak isolation level was automatically upgraded to a stronger one (internal queries) @@ -10446,7 +9396,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sql.vecindex.pending_splits_merges exported_name: sql_vecindex_pending_splits_merges description: Total number of vector index splits and merges waiting to be processed @@ -10455,7 +9404,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/sql-queries - name: sql.vecindex.successful_splits exported_name: sql_vecindex_successful_splits description: Total number of vector index partitions split without error @@ -10464,7 +9412,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/sql-queries - name: sqlliveness.is_alive.cache_hits exported_name: sqlliveness_is_alive_cache_hits description: Number of calls to IsAlive that return from the cache @@ -10473,7 +9420,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/server - name: sqlliveness.is_alive.cache_misses exported_name: sqlliveness_is_alive_cache_misses description: Number of calls to IsAlive that do not return from the cache @@ -10482,7 +9428,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/server - name: sqlliveness.sessions_deleted exported_name: sqlliveness_sessions_deleted description: Number of expired sessions which have been deleted @@ -10491,7 +9436,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/server - name: sqlliveness.sessions_deletion_runs exported_name: sqlliveness_sessions_deletion_runs description: Number of calls to delete sessions which have been performed @@ -10500,7 +9444,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/server - name: sqlliveness.write_failures exported_name: sqlliveness_write_failures description: Number of update or insert calls which have failed @@ -10509,7 +9452,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/server - name: sqlliveness.write_successes exported_name: sqlliveness_write_successes description: Number of update or insert calls successfully performed @@ -10518,7 +9460,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/server - name: tenant.cost_client.blocked_requests exported_name: tenant_cost_client_blocked_requests description: Number of requests currently blocked by the rate limiter @@ -10527,7 +9468,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/server - name: tenant.sql_usage.cross_region_network_ru exported_name: tenant_sql_usage_cross_region_network_ru description: Total number of RUs charged for cross-region network traffic @@ -10536,7 +9476,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/server - name: tenant.sql_usage.estimated_cpu_seconds exported_name: tenant_sql_usage_estimated_cpu_seconds description: Estimated amount of CPU consumed by a virtual cluster @@ -10545,7 +9484,6 @@ layers: unit: SECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/server - name: tenant.sql_usage.estimated_kv_cpu_seconds exported_name: tenant_sql_usage_estimated_kv_cpu_seconds description: Estimated amount of CPU consumed by a virtual cluster, in the KV layer @@ -10554,7 +9492,6 @@ layers: unit: SECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/server - name: tenant.sql_usage.estimated_replication_bytes exported_name: tenant_sql_usage_estimated_replication_bytes description: Total number of estimated bytes for KV replication traffic @@ -10563,7 +9500,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/server - name: tenant.sql_usage.external_io_egress_bytes exported_name: tenant_sql_usage_external_io_egress_bytes description: Total number of bytes written to external services such as cloud storage providers @@ -10572,7 +9508,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/server - name: tenant.sql_usage.external_io_ingress_bytes exported_name: tenant_sql_usage_external_io_ingress_bytes description: Total number of bytes read from external services such as cloud storage providers @@ -10581,7 +9516,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/server - name: tenant.sql_usage.kv_request_units exported_name: tenant_sql_usage_kv_request_units description: RU consumption attributable to KV @@ -10590,7 +9524,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/server - name: tenant.sql_usage.pgwire_egress_bytes exported_name: tenant_sql_usage_pgwire_egress_bytes description: Total number of bytes transferred from a SQL pod to the client @@ -10599,7 +9532,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/server - name: tenant.sql_usage.provisioned_vcpus exported_name: tenant_sql_usage_provisioned_vcpus description: Number of vcpus available to the virtual cluster @@ -10608,7 +9540,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/server - name: tenant.sql_usage.read_batches exported_name: tenant_sql_usage_read_batches description: Total number of KV read batches @@ -10617,7 +9548,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/server - name: tenant.sql_usage.read_bytes exported_name: tenant_sql_usage_read_bytes description: Total number of bytes read from KV @@ -10626,7 +9556,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/server - name: tenant.sql_usage.read_requests exported_name: tenant_sql_usage_read_requests description: Total number of KV read requests @@ -10635,7 +9564,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/server - name: tenant.sql_usage.request_units exported_name: tenant_sql_usage_request_units description: RU consumption @@ -10644,7 +9572,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/server - name: tenant.sql_usage.sql_pods_cpu_seconds exported_name: tenant_sql_usage_sql_pods_cpu_seconds description: Total amount of CPU used by SQL pods @@ -10653,7 +9580,6 @@ layers: unit: SECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/server - name: tenant.sql_usage.write_batches exported_name: tenant_sql_usage_write_batches description: Total number of KV write batches @@ -10662,7 +9588,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/server - name: tenant.sql_usage.write_bytes exported_name: tenant_sql_usage_write_bytes description: Total number of bytes written to KV @@ -10671,7 +9596,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/server - name: tenant.sql_usage.write_requests exported_name: tenant_sql_usage_write_requests description: Total number of KV write requests @@ -10680,7 +9604,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/server - name: txn.aborts exported_name: txn_aborts description: Number of aborted KV transactions @@ -10689,7 +9612,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: txn.commit_waits exported_name: txn_commit_waits description: Number of KV transactions that had to commit-wait on commit in order to ensure linearizability. This generally happens to transactions writing to global ranges. @@ -10698,7 +9620,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: txn.commits exported_name: txn_commits description: Number of committed KV transactions (including 1PC) @@ -10707,7 +9628,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: txn.commits1PC exported_name: txn_commits1PC description: Number of KV transaction one-phase commits @@ -10716,7 +9636,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: txn.commits_read_only exported_name: txn_commits_read_only description: Number of read only KV transaction commits @@ -10725,7 +9644,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: txn.condensed_intent_spans exported_name: txn_condensed_intent_spans description: KV transactions that have exceeded their intent tracking memory budget (kv.transaction.max_intents_bytes). See also txn.condensed_intent_spans_gauge for a gauge of such transactions currently running. @@ -10734,7 +9652,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: txn.condensed_intent_spans_gauge exported_name: txn_condensed_intent_spans_gauge description: KV transactions currently running that have exceeded their intent tracking memory budget (kv.transaction.max_intents_bytes). See also txn.condensed_intent_spans for a perpetual counter/rate. @@ -10743,7 +9660,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: txn.condensed_intent_spans_rejected exported_name: txn_condensed_intent_spans_rejected description: KV transactions that have been aborted because they exceeded their intent tracking memory budget (kv.transaction.max_intents_bytes). Rejection is caused by kv.transaction.reject_over_max_intents_budget. @@ -10752,7 +9668,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: txn.count_limit_on_response exported_name: txn_count_limit_on_response description: KV transactions that have exceeded the count limit on a response @@ -10761,7 +9676,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: txn.count_limit_rejected exported_name: txn_count_limit_rejected description: KV transactions that have been aborted because they exceeded the max number of writes and locking reads allowed @@ -10770,7 +9684,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: txn.durations exported_name: txn_durations description: KV transaction durations @@ -10779,7 +9692,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: txn.inflight_locks_over_tracking_budget exported_name: txn_inflight_locks_over_tracking_budget description: KV transactions whose in-flight writes and locking reads have exceeded the intent tracking memory budget (kv.transaction.max_intents_bytes). @@ -10788,7 +9700,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: txn.parallelcommits exported_name: txn_parallelcommits description: Number of KV transaction parallel commits @@ -10797,7 +9708,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: txn.parallelcommits.auto_retries exported_name: txn_parallelcommits_auto_retries description: Number of commit tries after successful failed parallel commit attempts @@ -10806,7 +9716,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: txn.prepares exported_name: txn_prepares description: Number of prepared KV transactions @@ -10815,7 +9724,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: txn.refresh.auto_retries exported_name: txn_refresh_auto_retries description: Number of request retries after successful client-side refreshes @@ -10824,7 +9732,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: txn.refresh.fail exported_name: txn_refresh_fail description: Number of failed client-side transaction refreshes @@ -10833,7 +9740,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: txn.refresh.fail_with_condensed_spans exported_name: txn_refresh_fail_with_condensed_spans description: Number of failed client-side refreshes for transactions whose read tracking lost fidelity because of condensing. Such a failure could be a false conflict. Failures counted here are also counted in txn.refresh.fail, and the respective transactions are also counted in txn.refresh.memory_limit_exceeded. @@ -10842,7 +9748,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: txn.refresh.memory_limit_exceeded exported_name: txn_refresh_memory_limit_exceeded description: Number of transaction which exceed the refresh span bytes limit, causing their read spans to be condensed @@ -10851,7 +9756,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: txn.refresh.success exported_name: txn_refresh_success description: Number of successful client-side transaction refreshes. A refresh may be preemptive or reactive. A reactive refresh is performed after a request throws an error because a refresh is needed for it to succeed. In these cases, the request will be re-issued as an auto-retry (see txn.refresh.auto_retries) after the refresh succeeds. @@ -10860,7 +9764,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: txn.refresh.success_server_side exported_name: txn_refresh_success_server_side description: Number of successful server-side transaction refreshes @@ -10869,7 +9772,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: txn.restarts exported_name: txn_restarts description: Number of restarted KV transactions @@ -10879,7 +9781,6 @@ layers: aggregation: AVG derivative: NONE visibility: SUPPORT - owner: cockroachdb/kv - name: txn.restarts.asyncwritefailure exported_name: txn_restarts_asyncwritefailure description: Number of restarts due to async consensus writes that failed to leave intents @@ -10888,7 +9789,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: txn.restarts.commitdeadlineexceeded exported_name: txn_restarts_commitdeadlineexceeded description: Number of restarts due to a transaction exceeding its deadline @@ -10897,7 +9797,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: txn.restarts.exclusionviolation exported_name: txn_restarts_exclusionviolation description: Number of restarts due to an exclusion violation @@ -10906,7 +9805,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: txn.restarts.readwithinuncertainty exported_name: txn_restarts_readwithinuncertainty description: Number of restarts due to reading a new value within the uncertainty interval @@ -10915,7 +9813,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: txn.rollbacks.async.failed exported_name: txn_rollbacks_async_failed description: Number of KV transactions that failed to send abort asynchronously which is not always retried @@ -10924,7 +9821,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: txn.rollbacks.failed exported_name: txn_rollbacks_failed description: Number of KV transactions that failed to send final abort @@ -10933,7 +9829,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: txn.write_buffering.batches.fully_handled exported_name: txn_write_buffering_batches_fully_handled description: Number of KV batches that were fully handled by the write buffer (not sent to KV) @@ -10942,7 +9837,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: txn.write_buffering.disabled_after_buffering exported_name: txn_write_buffering_disabled_after_buffering description: Number of KV transactions that disabled write buffering after buffering some writes but before an EndTxn request @@ -10951,7 +9845,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: txn.write_buffering.memory_limit_exceeded exported_name: txn_write_buffering_memory_limit_exceeded description: Number of KV transactions that exceeded the write buffering memory limit @@ -10960,7 +9853,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: txn.write_buffering.num_enabled exported_name: txn_write_buffering_num_enabled description: Number of KV transactions that enabled buffered writes @@ -10969,7 +9861,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: SERVER categories: - name: HARDWARE @@ -10984,7 +9875,6 @@ layers: derivative: NONE how_to_use: "This metric gives the CPU utilization percentage by the CockroachDB process. \n\t\tIf it is equal to 1 (or 100%), then the CPU is overloaded. The CockroachDB process should \n\t\tnot be running with over 80% utilization for extended periods of time (hours). This metric \n\t\tis used in the DB Console CPU Percent graph." visibility: ESSENTIAL - owner: cockroachdb/obs-prs - name: sys.cpu.host.combined.percent-normalized exported_name: sys_cpu_host_combined_percent_normalized description: Current user+system cpu percentage across the whole machine, normalized 0-1 by number of cores @@ -10995,7 +9885,6 @@ layers: derivative: NONE how_to_use: "This metric gives the CPU utilization percentage of the\n\t\tunderlying server, virtual machine, or container hosting the\n\t\tCockroachDB process. It includes CPU usage from both CockroachDB\n\t\tand non-CockroachDB processes. It also accounts for time spent\n\t\tprocessing hardware (irq) and software (softirq) interrupts, as\n\t\twell as nice time, which represents low-priority user-mode\n\t\tactivity.\n\n A value of 1 (or 100%) indicates that the CPU is overloaded. Avoid\n running the CockroachDB process in an environment where the CPU\n remains overloaded for extended periods (e.g. multiple hours). This\n metric appears in the DB Console on the Host CPU Percent graph." visibility: ESSENTIAL - owner: cockroachdb/obs-prs - name: sys.cpu.sys.percent exported_name: sys_cpu_sys_percent description: Current system cpu percentage consumed by the CRDB process @@ -11006,7 +9895,6 @@ layers: derivative: NONE how_to_use: "This metric gives the CPU usage percentage at the system\n\t\t(Linux kernel) level by the CockroachDB process only. This is\n\t\tsimilar to the Linux top command output. The metric value can be\n\t\tmore than 1 (or 100%) on multi-core systems. It is best to combine\n\t\tuser and system metrics." visibility: ESSENTIAL - owner: cockroachdb/obs-prs - name: sys.cpu.user.percent exported_name: sys_cpu_user_percent description: Current user cpu percentage consumed by the CRDB process @@ -11017,7 +9905,6 @@ layers: derivative: NONE how_to_use: "This metric gives the CPU usage percentage at the user\n\t\tlevel by the CockroachDB process only. This is similar to the Linux\n\t\ttop command output. The metric value can be more than 1 (or 100%)\n\t\ton multi-core systems. It is best to combine user and system\n\t\tmetrics." visibility: ESSENTIAL - owner: cockroachdb/obs-prs - name: sys.host.disk.iopsinprogress exported_name: sys_host_disk_iopsinprogress description: IO operations currently in progress on this host (as reported by the OS) @@ -11028,7 +9915,6 @@ layers: derivative: NONE how_to_use: This metric gives the average queue length of the storage device. It characterizes the storage device's performance capability. All I/O performance metrics are Linux counters and correspond to the avgqu-sz in the Linux iostat command output. You need to view the device queue graph in the context of the actual read/write IOPS and MBPS metrics that show the actual device utilization. If the device is not keeping up, the queue will grow. Values over 10 are bad. Values around 5 mean the device is working hard trying to keep up. For internal (on chassis) NVMe devices, the queue values are typically 0. For network connected devices, such as AWS EBS volumes, the normal operating range of values is 1 to 2. Spikes in values are OK. They indicate an I/O spike where the device fell behind and then caught up. End users may experience inconsistent response times, but there should be no cluster stability issues. If the queue is greater than 5 for an extended period of time and IOPS or MBPS are low, then the storage is most likely not provisioned per Cockroach Labs guidance. In AWS EBS, it is commonly an EBS type, such as gp2, not suitable as database primary storage. If I/O is low and the queue is low, the most likely scenario is that the CPU is lacking and not driving I/O. One such case is a cluster with nodes with only 2 vcpus which is not supported sizing for production deployments. There are quite a few background processes in the database that take CPU away from the workload, so the workload is just not getting the CPU. Review storage and disk I/O. visibility: ESSENTIAL - owner: cockroachdb/obs-prs - name: sys.host.disk.read.bytes exported_name: sys_host_disk_read_bytes description: Bytes read from all disks since this process started (as reported by the OS) @@ -11039,7 +9925,6 @@ layers: derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This metric reports the effective storage device read throughput (MB/s) rate. To confirm that storage is sufficiently provisioned, assess the I/O performance rates (IOPS and MBPS) in the context of the sys.host.disk.iopsinprogress metric. visibility: ESSENTIAL - owner: cockroachdb/obs-prs - name: sys.host.disk.read.count exported_name: sys_host_disk_read_count description: Disk read operations across all disks since this process started (as reported by the OS) @@ -11050,7 +9935,6 @@ layers: derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This metric reports the effective storage device read IOPS rate. To confirm that storage is sufficiently provisioned, assess the I/O performance rates (IOPS and MBPS) in the context of the sys.host.disk.iopsinprogress metric. visibility: ESSENTIAL - owner: cockroachdb/obs-prs - name: sys.host.disk.write.bytes exported_name: sys_host_disk_write_bytes description: Bytes written to all disks since this process started (as reported by the OS) @@ -11061,7 +9945,6 @@ layers: derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This metric reports the effective storage device write throughput (MB/s) rate. To confirm that storage is sufficiently provisioned, assess the I/O performance rates (IOPS and MBPS) in the context of the sys.host.disk.iopsinprogress metric. visibility: ESSENTIAL - owner: cockroachdb/obs-prs - name: sys.host.disk.write.count exported_name: sys_host_disk_write_count description: Disk write operations across all disks since this process started (as reported by the OS) @@ -11072,7 +9955,6 @@ layers: derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This metric reports the effective storage device write IOPS rate. To confirm that storage is sufficiently provisioned, assess the I/O performance rates (IOPS and MBPS) in the context of the sys.host.disk.iopsinprogress metric. visibility: ESSENTIAL - owner: cockroachdb/obs-prs - name: sys.host.net.recv.bytes exported_name: sys_host_net_recv_bytes description: Bytes received on all network interfaces since this process started (as reported by the OS) @@ -11083,7 +9965,6 @@ layers: derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This metric gives the node's ingress/egress network transfer rates for flat sections which may indicate insufficiently provisioned networking or high error rates. CockroachDB is using a reliable TCP/IP protocol, so errors result in delivery retries that create a "slow network" effect. visibility: ESSENTIAL - owner: cockroachdb/obs-prs - name: sys.host.net.send.bytes exported_name: sys_host_net_send_bytes description: Bytes sent on all network interfaces since this process started (as reported by the OS) @@ -11094,7 +9975,6 @@ layers: derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This metric gives the node's ingress/egress network transfer rates for flat sections which may indicate insufficiently provisioned networking or high error rates. CockroachDB is using a reliable TCP/IP protocol, so errors result in delivery retries that create a "slow network" effect. visibility: ESSENTIAL - owner: cockroachdb/obs-prs - name: sys.rss exported_name: sys_rss description: Current process RSS @@ -11105,7 +9985,6 @@ layers: derivative: NONE how_to_use: "This metric gives the amount of RAM used by the\n\t\tCockroachDB process. Persistently low values over an extended\n\t\tperiod of time suggest there is underutilized memory that can be\n\t\tput to work with adjusted settings for --cache or --max_sql_memory\n\t\tor both. Conversely, a high utilization, even if a temporary spike,\n\t\tindicates an increased risk of Out-of-memory (OOM) crash\n\t\t(particularly since the swap is generally disabled)." visibility: ESSENTIAL - owner: cockroachdb/obs-prs - name: sys.runnable.goroutines.per.cpu exported_name: sys_runnable_goroutines_per_cpu description: Average number of goroutines that are waiting to run, normalized by number of cores @@ -11116,7 +9995,6 @@ layers: derivative: NONE how_to_use: If this metric has a value over 30, it indicates a CPU overload. If the condition lasts a short period of time (a few seconds), the database users are likely to experience inconsistent response times. If the condition persists for an extended period of time (tens of seconds, or minutes) the cluster may start developing stability issues. Review CPU planning. visibility: ESSENTIAL - owner: cockroachdb/obs-prs - name: sys.uptime exported_name: sys_uptime description: Process uptime @@ -11127,7 +10005,6 @@ layers: derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This metric measures the length of time, in seconds, that the CockroachDB process has been running. Monitor this metric to detect events such as node restarts, which may require investigation or intervention. visibility: ESSENTIAL - owner: cockroachdb/obs-prs - name: NETWORKING metrics: - name: sys.host.net.send.tcp.fast_retrans_segs @@ -11140,7 +10017,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/obs-prs - name: sys.host.net.send.tcp.loss_probes exported_name: sys_host_net_send_tcp_loss_probes description: |- @@ -11153,7 +10029,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/obs-prs - name: sys.host.net.send.tcp.retrans_segs exported_name: sys_host_net_send_tcp_retrans_segs description: |- @@ -11181,7 +10056,6 @@ layers: Additionally, the 'sys.host.net.send.tcp.*' may provide more insight into the specific type of retransmission. visibility: ESSENTIAL - owner: cockroachdb/obs-prs - name: sys.host.net.send.tcp.slow_start_retrans exported_name: sys_host_net_send_tcp_slow_start_retrans description: |- @@ -11193,7 +10067,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/obs-prs - name: sys.host.net.send.tcp_timeouts exported_name: sys_host_net_send_tcp_timeouts description: |- @@ -11207,7 +10080,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/obs-prs - name: UNSET metrics: - name: ash.sampler.take_sample.latency @@ -11218,7 +10090,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/obs-prs - name: ash.samples.collected exported_name: ash_samples_collected description: Total number of ASH samples collected @@ -11227,7 +10098,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/obs-prs - name: ash.work_states.active exported_name: ash_work_states_active description: Number of goroutines with an active ASH work state @@ -11236,7 +10106,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/obs-prs - name: build.timestamp exported_name: build_timestamp description: Build information @@ -11245,7 +10114,6 @@ layers: unit: TIMESTAMP_SEC aggregation: AVG derivative: NONE - owner: cockroachdb/obs-prs - name: go.scheduler_latency exported_name: go_scheduler_latency description: Go scheduling latency @@ -11255,7 +10123,6 @@ layers: aggregation: AVG derivative: NONE visibility: SUPPORT - owner: cockroachdb/admission-control - name: log.buffered.messages.dropped exported_name: log_buffered_messages_dropped description: Count of log messages that are dropped by buffered log sinks. When CRDB attempts to buffer a log message in a buffered log sink whose buffer is already full, it drops the oldest buffered messages to make space for the new message @@ -11264,7 +10131,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/obs-prs - name: log.fluent.sink.conn.attempts exported_name: log_fluent_sink_conn_attempts description: Number of connection attempts experienced by fluent-server logging sinks @@ -11273,7 +10139,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/obs-prs - name: log.fluent.sink.conn.errors exported_name: log_fluent_sink_conn_errors description: Number of connection errors experienced by fluent-server logging sinks @@ -11282,7 +10147,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/obs-prs - name: log.fluent.sink.write.attempts exported_name: log_fluent_sink_write_attempts description: Number of write attempts experienced by fluent-server logging sinks @@ -11291,7 +10155,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/obs-prs - name: log.fluent.sink.write.errors exported_name: log_fluent_sink_write_errors description: Number of write errors experienced by fluent-server logging sinks @@ -11300,7 +10163,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/obs-prs - name: log.messages.count exported_name: log_messages_count description: Count of messages logged on the node since startup. Note that this does not measure the fan-out of single log messages to the various configured logging sinks. @@ -11309,7 +10171,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/obs-prs - name: log.otlp.sink.grpc.transparent_retries exported_name: log_otlp_sink_grpc_transparent_retries description: Number of transparent retries done by otlp-server logging sinks when using GRPC @@ -11318,7 +10179,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/obs-prs - name: log.otlp.sink.write.attempts exported_name: log_otlp_sink_write_attempts description: Number of write attempts experienced by otlp-server logging sinks @@ -11327,7 +10187,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/obs-prs - name: log.otlp.sink.write.errors exported_name: log_otlp_sink_write_errors description: Number of write errors experienced by otlp-server logging sinks @@ -11336,7 +10195,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/obs-prs - name: sys.cgo.allocbytes exported_name: sys_cgo_allocbytes description: Current bytes of memory allocated by cgo @@ -11346,7 +10204,6 @@ layers: aggregation: AVG derivative: NONE visibility: SUPPORT - owner: cockroachdb/obs-prs - name: sys.cgo.totalbytes exported_name: sys_cgo_totalbytes description: Total bytes of memory allocated by cgo, but not released @@ -11356,7 +10213,6 @@ layers: aggregation: AVG derivative: NONE visibility: SUPPORT - owner: cockroachdb/obs-prs - name: sys.cgocalls exported_name: sys_cgocalls description: Total number of cgo calls @@ -11365,7 +10221,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/obs-prs - name: sys.cpu.now.ns exported_name: sys_cpu_now_ns description: The time when CPU measurements were taken, as nanoseconds since epoch @@ -11374,7 +10229,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/obs-prs - name: sys.cpu.sys.ns exported_name: sys_cpu_sys_ns description: Total system cpu time consumed by the CRDB process @@ -11384,7 +10238,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE visibility: SUPPORT - owner: cockroachdb/obs-prs - name: sys.cpu.user.ns exported_name: sys_cpu_user_ns description: Total user cpu time consumed by the CRDB process @@ -11394,7 +10247,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE visibility: SUPPORT - owner: cockroachdb/obs-prs - name: sys.fd.open exported_name: sys_fd_open description: Process open file descriptors @@ -11403,7 +10255,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/obs-prs - name: sys.fd.softlimit exported_name: sys_fd_softlimit description: Process open FD soft limit @@ -11412,7 +10263,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/obs-prs - name: sys.gc.assist.enabled exported_name: sys_gc_assist_enabled description: Indicates whether GC assist is currently enabled (1) or disabled (0) @@ -11421,7 +10271,6 @@ layers: unit: CONST aggregation: AVG derivative: NONE - owner: cockroachdb/obs-prs - name: sys.gc.assist.ns exported_name: sys_gc_assist_ns description: Estimated total CPU time user goroutines spent to assist the GC process @@ -11430,7 +10279,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/obs-prs - name: sys.gc.count exported_name: sys_gc_count description: Total number of GC runs @@ -11440,7 +10288,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE visibility: SUPPORT - owner: cockroachdb/obs-prs - name: sys.gc.pause.ns exported_name: sys_gc_pause_ns description: Total GC pause @@ -11450,7 +10297,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE visibility: SUPPORT - owner: cockroachdb/obs-prs - name: sys.gc.pause.percent exported_name: sys_gc_pause_percent description: Current GC pause percentage @@ -11460,7 +10306,6 @@ layers: aggregation: AVG derivative: NONE visibility: SUPPORT - owner: cockroachdb/obs-prs - name: sys.gc.stop.ns exported_name: sys_gc_stop_ns description: Estimated GC stop-the-world stopping latencies @@ -11469,7 +10314,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/obs-prs - name: sys.gc.total.ns exported_name: sys_gc_total_ns description: Estimated total CPU time spent performing GC tasks @@ -11478,7 +10322,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/obs-prs - name: sys.go.allocbytes exported_name: sys_go_allocbytes description: Current bytes of memory allocated by go @@ -11488,7 +10331,6 @@ layers: aggregation: AVG derivative: NONE visibility: SUPPORT - owner: cockroachdb/obs-prs - name: sys.go.heap.allocbytes exported_name: sys_go_heap_allocbytes description: Cumulative bytes allocated for heap objects. @@ -11498,7 +10340,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE visibility: SUPPORT - owner: cockroachdb/obs-prs - name: sys.go.heap.heapfragmentbytes exported_name: sys_go_heap_heapfragmentbytes description: Total heap fragmentation bytes, derived from bytes in in-use spans minus bytes allocated @@ -11508,7 +10349,6 @@ layers: aggregation: AVG derivative: NONE visibility: SUPPORT - owner: cockroachdb/obs-prs - name: sys.go.heap.heapreleasedbytes exported_name: sys_go_heap_heapreleasedbytes description: Total bytes returned to the OS from heap. @@ -11517,7 +10357,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/obs-prs - name: sys.go.heap.heapreservedbytes exported_name: sys_go_heap_heapreservedbytes description: Total bytes reserved by heap, derived from bytes in idle (unused) spans subtracts bytes returned to the OS @@ -11526,7 +10365,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/obs-prs - name: sys.go.heap.livebytes exported_name: sys_go_heap_livebytes description: Bytes of live heap objects marked by the previous GC @@ -11535,7 +10373,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/obs-prs - name: sys.go.heap.objects exported_name: sys_go_heap_objects description: Number of live objects on the heap (live + unswept) @@ -11544,7 +10381,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/obs-prs - name: sys.go.limitbytes exported_name: sys_go_limitbytes description: Go soft memory limit @@ -11553,7 +10389,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/obs-prs - name: sys.go.pause.other.ns exported_name: sys_go_pause_other_ns description: Estimated non-GC-related total pause time @@ -11562,7 +10397,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/obs-prs - name: sys.go.stack.systembytes exported_name: sys_go_stack_systembytes description: Stack memory obtained from the OS. @@ -11571,7 +10405,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/obs-prs - name: sys.go.stop.other.ns exported_name: sys_go_stop_other_ns description: Estimated non-GC-related stop-the-world stopping latencies @@ -11580,7 +10413,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/obs-prs - name: sys.go.totalbytes exported_name: sys_go_totalbytes description: Total bytes of memory allocated by go, but not released @@ -11590,7 +10422,6 @@ layers: aggregation: AVG derivative: NONE visibility: SUPPORT - owner: cockroachdb/obs-prs - name: sys.goroutines exported_name: sys_goroutines description: Current number of goroutines @@ -11600,7 +10431,6 @@ layers: aggregation: AVG derivative: NONE visibility: SUPPORT - owner: cockroachdb/obs-prs - name: sys.host.disk.io.time exported_name: sys_host_disk_io_time description: Time spent reading from or writing to all disks since this process started (as reported by the OS) @@ -11609,7 +10439,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/obs-prs - name: sys.host.disk.read.time exported_name: sys_host_disk_read_time description: Time spent reading from all disks since this process started (as reported by the OS) @@ -11618,7 +10447,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/obs-prs - name: sys.host.disk.weightedio.time exported_name: sys_host_disk_weightedio_time description: Weighted time spent reading from or writing to all disks since this process started (as reported by the OS) @@ -11627,7 +10455,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/obs-prs - name: sys.host.disk.write.time exported_name: sys_host_disk_write_time description: Time spent writing to all disks since this process started (as reported by the OS) @@ -11636,7 +10463,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/obs-prs - name: sys.host.net.recv.drop exported_name: sys_host_net_recv_drop description: Receiving packets that got dropped on all network interfaces since this process started (as reported by the OS) @@ -11645,7 +10471,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/obs-prs - name: sys.host.net.recv.err exported_name: sys_host_net_recv_err description: Error receiving packets on all network interfaces since this process started (as reported by the OS) @@ -11654,7 +10479,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/obs-prs - name: sys.host.net.recv.packets exported_name: sys_host_net_recv_packets description: Packets received on all network interfaces since this process started (as reported by the OS) @@ -11663,7 +10487,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/obs-prs - name: sys.host.net.send.drop exported_name: sys_host_net_send_drop description: Sending packets that got dropped on all network interfaces since this process started (as reported by the OS) @@ -11672,7 +10495,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/obs-prs - name: sys.host.net.send.err exported_name: sys_host_net_send_err description: Error on sending packets on all network interfaces since this process started (as reported by the OS) @@ -11681,7 +10503,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/obs-prs - name: sys.host.net.send.packets exported_name: sys_host_net_send_packets description: Packets sent on all network interfaces since this process started (as reported by the OS) @@ -11690,7 +10511,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/obs-prs - name: sys.totalmem exported_name: sys_totalmem description: Total memory (both free and used) @@ -11700,7 +10520,6 @@ layers: aggregation: AVG derivative: NONE visibility: SUPPORT - owner: cockroachdb/obs-prs - name: STORAGE categories: - name: OVERLOAD @@ -11715,7 +10534,6 @@ layers: derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This metric indicates when elastic CPU tokens are exhausted. Extended periods of elastic CPU token exhaustion may indicate high CPU utilization affecting elastic workloads. visibility: ESSENTIAL - owner: cockroachdb/admission-control - name: admission.granter.elastic_io_tokens_exhausted_duration.kv exported_name: admission_granter_elastic_io_tokens_exhausted_duration_kv description: Total duration when Elastic IO tokens were exhausted, as observed by the token granter (not waiters). This is reported in nanoseconds from 26.1 onwards, and was microseconds before that. @@ -11726,7 +10544,6 @@ layers: derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This metric indicates when elastic I/O tokens are exhausted. Extended periods of elastic token exhaustion may indicate I/O bandwidth saturation affecting elastic workloads. visibility: ESSENTIAL - owner: cockroachdb/admission-control - name: admission.granter.io_tokens_exhausted_duration.kv exported_name: admission_granter_io_tokens_exhausted_duration_kv description: Total duration when IO tokens were exhausted, as observed by the token granter (not waiters). This is reported in nanoseconds from 26.1 onwards, and was microseconds before that. @@ -11737,7 +10554,6 @@ layers: derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This metric indicates when I/O tokens are exhausted. Extended periods of token exhaustion may indicate I/O bandwidth saturation or high disk utilization requiring attention. visibility: ESSENTIAL - owner: cockroachdb/admission-control - name: admission.granter.slots_exhausted_duration.kv exported_name: admission_granter_slots_exhausted_duration_kv description: Total duration when KV slots were exhausted, as observed by the slot granter (not waiters). This is reported in nanoseconds from 26.1 onwards, and was microseconds before that. @@ -11748,7 +10564,6 @@ layers: derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This metric indicates when KV slots are exhausted. Extended periods of slot exhaustion may indicate insufficient slot allocation or high request concurrency requiring attention. visibility: ESSENTIAL - owner: cockroachdb/admission-control - name: admission.wait_durations.cpu exported_name: admission_wait_durations_cpu description: Wait time durations for requests that waited @@ -11759,7 +10574,6 @@ layers: derivative: NONE how_to_use: This is a latency histogram of wait time in the admission control queue. Non-zero wait times are expected when the corresponding resource is saturated. visibility: ESSENTIAL - owner: cockroachdb/admission-control - name: admission.wait_durations.cpu.bulk-low-pri exported_name: admission_wait_durations_cpu_bulk_low_pri description: Wait time durations for requests that waited @@ -11769,7 +10583,6 @@ layers: aggregation: AVG derivative: NONE how_to_use: This is a latency histogram of wait time in the admission control queue. Non-zero wait times are expected when the corresponding resource is saturated. - owner: cockroachdb/admission-control - name: admission.wait_durations.cpu.bulk-normal-pri exported_name: admission_wait_durations_cpu_bulk_normal_pri description: Wait time durations for requests that waited @@ -11779,7 +10592,6 @@ layers: aggregation: AVG derivative: NONE how_to_use: This is a latency histogram of wait time in the admission control queue. Non-zero wait times are expected when the corresponding resource is saturated. - owner: cockroachdb/admission-control - name: admission.wait_durations.cpu.high-pri exported_name: admission_wait_durations_cpu_high_pri description: Wait time durations for requests that waited @@ -11789,7 +10601,6 @@ layers: aggregation: AVG derivative: NONE how_to_use: This is a latency histogram of wait time in the admission control queue. Non-zero wait times are expected when the corresponding resource is saturated. - owner: cockroachdb/admission-control - name: admission.wait_durations.cpu.locking-normal-pri exported_name: admission_wait_durations_cpu_locking_normal_pri description: Wait time durations for requests that waited @@ -11799,7 +10610,6 @@ layers: aggregation: AVG derivative: NONE how_to_use: This is a latency histogram of wait time in the admission control queue. Non-zero wait times are expected when the corresponding resource is saturated. - owner: cockroachdb/admission-control - name: admission.wait_durations.cpu.locking-pri exported_name: admission_wait_durations_cpu_locking_pri description: Wait time durations for requests that waited @@ -11809,7 +10619,6 @@ layers: aggregation: AVG derivative: NONE how_to_use: This is a latency histogram of wait time in the admission control queue. Non-zero wait times are expected when the corresponding resource is saturated. - owner: cockroachdb/admission-control - name: admission.wait_durations.cpu.low-pri exported_name: admission_wait_durations_cpu_low_pri description: Wait time durations for requests that waited @@ -11819,7 +10628,6 @@ layers: aggregation: AVG derivative: NONE how_to_use: This is a latency histogram of wait time in the admission control queue. Non-zero wait times are expected when the corresponding resource is saturated. - owner: cockroachdb/admission-control - name: admission.wait_durations.cpu.normal-pri exported_name: admission_wait_durations_cpu_normal_pri description: Wait time durations for requests that waited @@ -11829,7 +10637,6 @@ layers: aggregation: AVG derivative: NONE how_to_use: This is a latency histogram of wait time in the admission control queue. Non-zero wait times are expected when the corresponding resource is saturated. - owner: cockroachdb/admission-control - name: admission.wait_durations.cpu.user-high-pri exported_name: admission_wait_durations_cpu_user_high_pri description: Wait time durations for requests that waited @@ -11839,7 +10646,6 @@ layers: aggregation: AVG derivative: NONE how_to_use: This is a latency histogram of wait time in the admission control queue. Non-zero wait times are expected when the corresponding resource is saturated. - owner: cockroachdb/admission-control - name: admission.wait_durations.cpu.user-low-pri exported_name: admission_wait_durations_cpu_user_low_pri description: Wait time durations for requests that waited @@ -11849,7 +10655,6 @@ layers: aggregation: AVG derivative: NONE how_to_use: This is a latency histogram of wait time in the admission control queue. Non-zero wait times are expected when the corresponding resource is saturated. - owner: cockroachdb/admission-control - name: admission.wait_durations.elastic-cpu exported_name: admission_wait_durations_elastic_cpu description: Wait time durations for requests that waited @@ -11860,7 +10665,6 @@ layers: derivative: NONE how_to_use: This is a latency histogram of wait time in the admission control queue. Non-zero wait times are expected when the corresponding resource is saturated. visibility: ESSENTIAL - owner: cockroachdb/admission-control - name: admission.wait_durations.elastic-cpu.bulk-low-pri exported_name: admission_wait_durations_elastic_cpu_bulk_low_pri description: Wait time durations for requests that waited @@ -11870,7 +10674,6 @@ layers: aggregation: AVG derivative: NONE how_to_use: This is a latency histogram of wait time in the admission control queue. Non-zero wait times are expected when the corresponding resource is saturated. - owner: cockroachdb/admission-control - name: admission.wait_durations.elastic-cpu.bulk-normal-pri exported_name: admission_wait_durations_elastic_cpu_bulk_normal_pri description: Wait time durations for requests that waited @@ -11880,7 +10683,6 @@ layers: aggregation: AVG derivative: NONE how_to_use: This is a latency histogram of wait time in the admission control queue. Non-zero wait times are expected when the corresponding resource is saturated. - owner: cockroachdb/admission-control - name: admission.wait_durations.elastic-cpu.high-pri exported_name: admission_wait_durations_elastic_cpu_high_pri description: Wait time durations for requests that waited @@ -11890,7 +10692,6 @@ layers: aggregation: AVG derivative: NONE how_to_use: This is a latency histogram of wait time in the admission control queue. Non-zero wait times are expected when the corresponding resource is saturated. - owner: cockroachdb/admission-control - name: admission.wait_durations.elastic-cpu.locking-normal-pri exported_name: admission_wait_durations_elastic_cpu_locking_normal_pri description: Wait time durations for requests that waited @@ -11900,7 +10701,6 @@ layers: aggregation: AVG derivative: NONE how_to_use: This is a latency histogram of wait time in the admission control queue. Non-zero wait times are expected when the corresponding resource is saturated. - owner: cockroachdb/admission-control - name: admission.wait_durations.elastic-cpu.locking-pri exported_name: admission_wait_durations_elastic_cpu_locking_pri description: Wait time durations for requests that waited @@ -11910,7 +10710,6 @@ layers: aggregation: AVG derivative: NONE how_to_use: This is a latency histogram of wait time in the admission control queue. Non-zero wait times are expected when the corresponding resource is saturated. - owner: cockroachdb/admission-control - name: admission.wait_durations.elastic-cpu.low-pri exported_name: admission_wait_durations_elastic_cpu_low_pri description: Wait time durations for requests that waited @@ -11920,7 +10719,6 @@ layers: aggregation: AVG derivative: NONE how_to_use: This is a latency histogram of wait time in the admission control queue. Non-zero wait times are expected when the corresponding resource is saturated. - owner: cockroachdb/admission-control - name: admission.wait_durations.elastic-cpu.normal-pri exported_name: admission_wait_durations_elastic_cpu_normal_pri description: Wait time durations for requests that waited @@ -11930,7 +10728,6 @@ layers: aggregation: AVG derivative: NONE how_to_use: This is a latency histogram of wait time in the admission control queue. Non-zero wait times are expected when the corresponding resource is saturated. - owner: cockroachdb/admission-control - name: admission.wait_durations.elastic-cpu.user-high-pri exported_name: admission_wait_durations_elastic_cpu_user_high_pri description: Wait time durations for requests that waited @@ -11940,7 +10737,6 @@ layers: aggregation: AVG derivative: NONE how_to_use: This is a latency histogram of wait time in the admission control queue. Non-zero wait times are expected when the corresponding resource is saturated. - owner: cockroachdb/admission-control - name: admission.wait_durations.elastic-cpu.user-low-pri exported_name: admission_wait_durations_elastic_cpu_user_low_pri description: Wait time durations for requests that waited @@ -11950,7 +10746,6 @@ layers: aggregation: AVG derivative: NONE how_to_use: This is a latency histogram of wait time in the admission control queue. Non-zero wait times are expected when the corresponding resource is saturated. - owner: cockroachdb/admission-control - name: admission.wait_durations.elastic-stores exported_name: admission_wait_durations_elastic_stores description: Wait time durations for requests that waited @@ -11961,7 +10756,6 @@ layers: derivative: NONE how_to_use: This is a latency histogram of wait time in the admission control queue. Non-zero wait times are expected when the corresponding resource is saturated. visibility: ESSENTIAL - owner: cockroachdb/admission-control - name: admission.wait_durations.elastic-stores.bulk-low-pri exported_name: admission_wait_durations_elastic_stores_bulk_low_pri description: Wait time durations for requests that waited @@ -11971,7 +10765,6 @@ layers: aggregation: AVG derivative: NONE how_to_use: This is a latency histogram of wait time in the admission control queue. Non-zero wait times are expected when the corresponding resource is saturated. - owner: cockroachdb/admission-control - name: admission.wait_durations.elastic-stores.bulk-normal-pri exported_name: admission_wait_durations_elastic_stores_bulk_normal_pri description: Wait time durations for requests that waited @@ -11981,7 +10774,6 @@ layers: aggregation: AVG derivative: NONE how_to_use: This is a latency histogram of wait time in the admission control queue. Non-zero wait times are expected when the corresponding resource is saturated. - owner: cockroachdb/admission-control - name: admission.wait_durations.elastic-stores.high-pri exported_name: admission_wait_durations_elastic_stores_high_pri description: Wait time durations for requests that waited @@ -11991,7 +10783,6 @@ layers: aggregation: AVG derivative: NONE how_to_use: This is a latency histogram of wait time in the admission control queue. Non-zero wait times are expected when the corresponding resource is saturated. - owner: cockroachdb/admission-control - name: admission.wait_durations.elastic-stores.locking-normal-pri exported_name: admission_wait_durations_elastic_stores_locking_normal_pri description: Wait time durations for requests that waited @@ -12001,7 +10792,6 @@ layers: aggregation: AVG derivative: NONE how_to_use: This is a latency histogram of wait time in the admission control queue. Non-zero wait times are expected when the corresponding resource is saturated. - owner: cockroachdb/admission-control - name: admission.wait_durations.elastic-stores.locking-pri exported_name: admission_wait_durations_elastic_stores_locking_pri description: Wait time durations for requests that waited @@ -12011,7 +10801,6 @@ layers: aggregation: AVG derivative: NONE how_to_use: This is a latency histogram of wait time in the admission control queue. Non-zero wait times are expected when the corresponding resource is saturated. - owner: cockroachdb/admission-control - name: admission.wait_durations.elastic-stores.low-pri exported_name: admission_wait_durations_elastic_stores_low_pri description: Wait time durations for requests that waited @@ -12021,7 +10810,6 @@ layers: aggregation: AVG derivative: NONE how_to_use: This is a latency histogram of wait time in the admission control queue. Non-zero wait times are expected when the corresponding resource is saturated. - owner: cockroachdb/admission-control - name: admission.wait_durations.elastic-stores.normal-pri exported_name: admission_wait_durations_elastic_stores_normal_pri description: Wait time durations for requests that waited @@ -12031,7 +10819,6 @@ layers: aggregation: AVG derivative: NONE how_to_use: This is a latency histogram of wait time in the admission control queue. Non-zero wait times are expected when the corresponding resource is saturated. - owner: cockroachdb/admission-control - name: admission.wait_durations.elastic-stores.user-high-pri exported_name: admission_wait_durations_elastic_stores_user_high_pri description: Wait time durations for requests that waited @@ -12041,7 +10828,6 @@ layers: aggregation: AVG derivative: NONE how_to_use: This is a latency histogram of wait time in the admission control queue. Non-zero wait times are expected when the corresponding resource is saturated. - owner: cockroachdb/admission-control - name: admission.wait_durations.elastic-stores.user-low-pri exported_name: admission_wait_durations_elastic_stores_user_low_pri description: Wait time durations for requests that waited @@ -12051,7 +10837,6 @@ layers: aggregation: AVG derivative: NONE how_to_use: This is a latency histogram of wait time in the admission control queue. Non-zero wait times are expected when the corresponding resource is saturated. - owner: cockroachdb/admission-control - name: admission.wait_durations.kv exported_name: admission_wait_durations_kv description: Wait time durations for requests that waited @@ -12062,7 +10847,6 @@ layers: derivative: NONE how_to_use: This is a latency histogram of wait time in the CPU utilization-based admission control queue. Non-zero wait times are expected when CPU is saturated. visibility: ESSENTIAL - owner: cockroachdb/admission-control - name: admission.wait_durations.kv-stores exported_name: admission_wait_durations_kv_stores description: Wait time durations for requests that waited @@ -12073,7 +10857,6 @@ layers: derivative: NONE how_to_use: This is a latency histogram of wait time in the I/O utilization-based admission control queue. Non-zero wait times are expected when I/O is saturated. visibility: ESSENTIAL - owner: cockroachdb/admission-control - name: admission.wait_durations.kv-stores.bulk-low-pri exported_name: admission_wait_durations_kv_stores_bulk_low_pri description: Wait time durations for requests that waited @@ -12083,7 +10866,6 @@ layers: aggregation: AVG derivative: NONE how_to_use: This is a latency histogram of wait time in the admission control queue. Non-zero wait times are expected when the corresponding resource is saturated. - owner: cockroachdb/admission-control - name: admission.wait_durations.kv-stores.bulk-normal-pri exported_name: admission_wait_durations_kv_stores_bulk_normal_pri description: Wait time durations for requests that waited @@ -12093,7 +10875,6 @@ layers: aggregation: AVG derivative: NONE how_to_use: This is a latency histogram of wait time in the admission control queue. Non-zero wait times are expected when the corresponding resource is saturated. - owner: cockroachdb/admission-control - name: admission.wait_durations.kv-stores.high-pri exported_name: admission_wait_durations_kv_stores_high_pri description: Wait time durations for requests that waited @@ -12103,7 +10884,6 @@ layers: aggregation: AVG derivative: NONE how_to_use: This is a latency histogram of wait time in the admission control queue. Non-zero wait times are expected when the corresponding resource is saturated. - owner: cockroachdb/admission-control - name: admission.wait_durations.kv-stores.locking-normal-pri exported_name: admission_wait_durations_kv_stores_locking_normal_pri description: Wait time durations for requests that waited @@ -12113,7 +10893,6 @@ layers: aggregation: AVG derivative: NONE how_to_use: This is a latency histogram of wait time in the admission control queue. Non-zero wait times are expected when the corresponding resource is saturated. - owner: cockroachdb/admission-control - name: admission.wait_durations.kv-stores.locking-pri exported_name: admission_wait_durations_kv_stores_locking_pri description: Wait time durations for requests that waited @@ -12123,7 +10902,6 @@ layers: aggregation: AVG derivative: NONE how_to_use: This is a latency histogram of wait time in the admission control queue. Non-zero wait times are expected when the corresponding resource is saturated. - owner: cockroachdb/admission-control - name: admission.wait_durations.kv-stores.low-pri exported_name: admission_wait_durations_kv_stores_low_pri description: Wait time durations for requests that waited @@ -12133,7 +10911,6 @@ layers: aggregation: AVG derivative: NONE how_to_use: This is a latency histogram of wait time in the admission control queue. Non-zero wait times are expected when the corresponding resource is saturated. - owner: cockroachdb/admission-control - name: admission.wait_durations.kv-stores.normal-pri exported_name: admission_wait_durations_kv_stores_normal_pri description: Wait time durations for requests that waited @@ -12143,7 +10920,6 @@ layers: aggregation: AVG derivative: NONE how_to_use: This is a latency histogram of wait time in the admission control queue. Non-zero wait times are expected when the corresponding resource is saturated. - owner: cockroachdb/admission-control - name: admission.wait_durations.kv-stores.user-high-pri exported_name: admission_wait_durations_kv_stores_user_high_pri description: Wait time durations for requests that waited @@ -12153,7 +10929,6 @@ layers: aggregation: AVG derivative: NONE how_to_use: This is a latency histogram of wait time in the admission control queue. Non-zero wait times are expected when the corresponding resource is saturated. - owner: cockroachdb/admission-control - name: admission.wait_durations.kv-stores.user-low-pri exported_name: admission_wait_durations_kv_stores_user_low_pri description: Wait time durations for requests that waited @@ -12163,7 +10938,6 @@ layers: aggregation: AVG derivative: NONE how_to_use: This is a latency histogram of wait time in the admission control queue. Non-zero wait times are expected when the corresponding resource is saturated. - owner: cockroachdb/admission-control - name: admission.wait_durations.kv.bulk-low-pri exported_name: admission_wait_durations_kv_bulk_low_pri description: Wait time durations for requests that waited @@ -12173,7 +10947,6 @@ layers: aggregation: AVG derivative: NONE how_to_use: This is a latency histogram of wait time in the admission control queue. Non-zero wait times are expected when the corresponding resource is saturated. - owner: cockroachdb/admission-control - name: admission.wait_durations.kv.bulk-normal-pri exported_name: admission_wait_durations_kv_bulk_normal_pri description: Wait time durations for requests that waited @@ -12183,7 +10956,6 @@ layers: aggregation: AVG derivative: NONE how_to_use: This is a latency histogram of wait time in the admission control queue. Non-zero wait times are expected when the corresponding resource is saturated. - owner: cockroachdb/admission-control - name: admission.wait_durations.kv.high-pri exported_name: admission_wait_durations_kv_high_pri description: Wait time durations for requests that waited @@ -12193,7 +10965,6 @@ layers: aggregation: AVG derivative: NONE how_to_use: This is a latency histogram of wait time in the admission control queue. Non-zero wait times are expected when the corresponding resource is saturated. - owner: cockroachdb/admission-control - name: admission.wait_durations.kv.locking-normal-pri exported_name: admission_wait_durations_kv_locking_normal_pri description: Wait time durations for requests that waited @@ -12203,7 +10974,6 @@ layers: aggregation: AVG derivative: NONE how_to_use: This is a latency histogram of wait time in the admission control queue. Non-zero wait times are expected when the corresponding resource is saturated. - owner: cockroachdb/admission-control - name: admission.wait_durations.kv.locking-pri exported_name: admission_wait_durations_kv_locking_pri description: Wait time durations for requests that waited @@ -12213,7 +10983,6 @@ layers: aggregation: AVG derivative: NONE how_to_use: This is a latency histogram of wait time in the admission control queue. Non-zero wait times are expected when the corresponding resource is saturated. - owner: cockroachdb/admission-control - name: admission.wait_durations.kv.low-pri exported_name: admission_wait_durations_kv_low_pri description: Wait time durations for requests that waited @@ -12223,7 +10992,6 @@ layers: aggregation: AVG derivative: NONE how_to_use: This is a latency histogram of wait time in the admission control queue. Non-zero wait times are expected when the corresponding resource is saturated. - owner: cockroachdb/admission-control - name: admission.wait_durations.kv.normal-pri exported_name: admission_wait_durations_kv_normal_pri description: Wait time durations for requests that waited @@ -12233,7 +11001,6 @@ layers: aggregation: AVG derivative: NONE how_to_use: This is a latency histogram of wait time in the admission control queue. Non-zero wait times are expected when the corresponding resource is saturated. - owner: cockroachdb/admission-control - name: admission.wait_durations.kv.user-high-pri exported_name: admission_wait_durations_kv_user_high_pri description: Wait time durations for requests that waited @@ -12243,7 +11010,6 @@ layers: aggregation: AVG derivative: NONE how_to_use: This is a latency histogram of wait time in the admission control queue. Non-zero wait times are expected when the corresponding resource is saturated. - owner: cockroachdb/admission-control - name: admission.wait_durations.kv.user-low-pri exported_name: admission_wait_durations_kv_user_low_pri description: Wait time durations for requests that waited @@ -12253,7 +11019,6 @@ layers: aggregation: AVG derivative: NONE how_to_use: This is a latency histogram of wait time in the admission control queue. Non-zero wait times are expected when the corresponding resource is saturated. - owner: cockroachdb/admission-control - name: admission.wait_durations.sql-kv-response exported_name: admission_wait_durations_sql_kv_response description: Wait time durations for requests that waited @@ -12264,7 +11029,6 @@ layers: derivative: NONE how_to_use: This is a latency histogram of wait time in the admission control queue. Non-zero wait times are expected when the corresponding resource is saturated. visibility: ESSENTIAL - owner: cockroachdb/admission-control - name: admission.wait_durations.sql-kv-response.bulk-low-pri exported_name: admission_wait_durations_sql_kv_response_bulk_low_pri description: Wait time durations for requests that waited @@ -12274,7 +11038,6 @@ layers: aggregation: AVG derivative: NONE how_to_use: This is a latency histogram of wait time in the admission control queue. Non-zero wait times are expected when the corresponding resource is saturated. - owner: cockroachdb/admission-control - name: admission.wait_durations.sql-kv-response.bulk-normal-pri exported_name: admission_wait_durations_sql_kv_response_bulk_normal_pri description: Wait time durations for requests that waited @@ -12284,7 +11047,6 @@ layers: aggregation: AVG derivative: NONE how_to_use: This is a latency histogram of wait time in the admission control queue. Non-zero wait times are expected when the corresponding resource is saturated. - owner: cockroachdb/admission-control - name: admission.wait_durations.sql-kv-response.high-pri exported_name: admission_wait_durations_sql_kv_response_high_pri description: Wait time durations for requests that waited @@ -12294,7 +11056,6 @@ layers: aggregation: AVG derivative: NONE how_to_use: This is a latency histogram of wait time in the admission control queue. Non-zero wait times are expected when the corresponding resource is saturated. - owner: cockroachdb/admission-control - name: admission.wait_durations.sql-kv-response.locking-normal-pri exported_name: admission_wait_durations_sql_kv_response_locking_normal_pri description: Wait time durations for requests that waited @@ -12304,7 +11065,6 @@ layers: aggregation: AVG derivative: NONE how_to_use: This is a latency histogram of wait time in the admission control queue. Non-zero wait times are expected when the corresponding resource is saturated. - owner: cockroachdb/admission-control - name: admission.wait_durations.sql-kv-response.locking-pri exported_name: admission_wait_durations_sql_kv_response_locking_pri description: Wait time durations for requests that waited @@ -12314,7 +11074,6 @@ layers: aggregation: AVG derivative: NONE how_to_use: This is a latency histogram of wait time in the admission control queue. Non-zero wait times are expected when the corresponding resource is saturated. - owner: cockroachdb/admission-control - name: admission.wait_durations.sql-kv-response.low-pri exported_name: admission_wait_durations_sql_kv_response_low_pri description: Wait time durations for requests that waited @@ -12324,7 +11083,6 @@ layers: aggregation: AVG derivative: NONE how_to_use: This is a latency histogram of wait time in the admission control queue. Non-zero wait times are expected when the corresponding resource is saturated. - owner: cockroachdb/admission-control - name: admission.wait_durations.sql-kv-response.normal-pri exported_name: admission_wait_durations_sql_kv_response_normal_pri description: Wait time durations for requests that waited @@ -12334,7 +11092,6 @@ layers: aggregation: AVG derivative: NONE how_to_use: This is a latency histogram of wait time in the admission control queue. Non-zero wait times are expected when the corresponding resource is saturated. - owner: cockroachdb/admission-control - name: admission.wait_durations.sql-kv-response.user-high-pri exported_name: admission_wait_durations_sql_kv_response_user_high_pri description: Wait time durations for requests that waited @@ -12344,7 +11101,6 @@ layers: aggregation: AVG derivative: NONE how_to_use: This is a latency histogram of wait time in the admission control queue. Non-zero wait times are expected when the corresponding resource is saturated. - owner: cockroachdb/admission-control - name: admission.wait_durations.sql-kv-response.user-low-pri exported_name: admission_wait_durations_sql_kv_response_user_low_pri description: Wait time durations for requests that waited @@ -12354,7 +11110,6 @@ layers: aggregation: AVG derivative: NONE how_to_use: This is a latency histogram of wait time in the admission control queue. Non-zero wait times are expected when the corresponding resource is saturated. - owner: cockroachdb/admission-control - name: admission.wait_durations.sql-sql-response exported_name: admission_wait_durations_sql_sql_response description: Wait time durations for requests that waited @@ -12365,7 +11120,6 @@ layers: derivative: NONE how_to_use: This is a latency histogram of wait time in the admission control queue. Non-zero wait times are expected when the corresponding resource is saturated. visibility: ESSENTIAL - owner: cockroachdb/admission-control - name: admission.wait_durations.sql-sql-response.bulk-low-pri exported_name: admission_wait_durations_sql_sql_response_bulk_low_pri description: Wait time durations for requests that waited @@ -12375,7 +11129,6 @@ layers: aggregation: AVG derivative: NONE how_to_use: This is a latency histogram of wait time in the admission control queue. Non-zero wait times are expected when the corresponding resource is saturated. - owner: cockroachdb/admission-control - name: admission.wait_durations.sql-sql-response.bulk-normal-pri exported_name: admission_wait_durations_sql_sql_response_bulk_normal_pri description: Wait time durations for requests that waited @@ -12385,7 +11138,6 @@ layers: aggregation: AVG derivative: NONE how_to_use: This is a latency histogram of wait time in the admission control queue. Non-zero wait times are expected when the corresponding resource is saturated. - owner: cockroachdb/admission-control - name: admission.wait_durations.sql-sql-response.high-pri exported_name: admission_wait_durations_sql_sql_response_high_pri description: Wait time durations for requests that waited @@ -12395,7 +11147,6 @@ layers: aggregation: AVG derivative: NONE how_to_use: This is a latency histogram of wait time in the admission control queue. Non-zero wait times are expected when the corresponding resource is saturated. - owner: cockroachdb/admission-control - name: admission.wait_durations.sql-sql-response.locking-normal-pri exported_name: admission_wait_durations_sql_sql_response_locking_normal_pri description: Wait time durations for requests that waited @@ -12405,7 +11156,6 @@ layers: aggregation: AVG derivative: NONE how_to_use: This is a latency histogram of wait time in the admission control queue. Non-zero wait times are expected when the corresponding resource is saturated. - owner: cockroachdb/admission-control - name: admission.wait_durations.sql-sql-response.locking-pri exported_name: admission_wait_durations_sql_sql_response_locking_pri description: Wait time durations for requests that waited @@ -12415,7 +11165,6 @@ layers: aggregation: AVG derivative: NONE how_to_use: This is a latency histogram of wait time in the admission control queue. Non-zero wait times are expected when the corresponding resource is saturated. - owner: cockroachdb/admission-control - name: admission.wait_durations.sql-sql-response.low-pri exported_name: admission_wait_durations_sql_sql_response_low_pri description: Wait time durations for requests that waited @@ -12425,7 +11174,6 @@ layers: aggregation: AVG derivative: NONE how_to_use: This is a latency histogram of wait time in the admission control queue. Non-zero wait times are expected when the corresponding resource is saturated. - owner: cockroachdb/admission-control - name: admission.wait_durations.sql-sql-response.normal-pri exported_name: admission_wait_durations_sql_sql_response_normal_pri description: Wait time durations for requests that waited @@ -12435,7 +11183,6 @@ layers: aggregation: AVG derivative: NONE how_to_use: This is a latency histogram of wait time in the admission control queue. Non-zero wait times are expected when the corresponding resource is saturated. - owner: cockroachdb/admission-control - name: admission.wait_durations.sql-sql-response.user-high-pri exported_name: admission_wait_durations_sql_sql_response_user_high_pri description: Wait time durations for requests that waited @@ -12445,7 +11192,6 @@ layers: aggregation: AVG derivative: NONE how_to_use: This is a latency histogram of wait time in the admission control queue. Non-zero wait times are expected when the corresponding resource is saturated. - owner: cockroachdb/admission-control - name: admission.wait_durations.sql-sql-response.user-low-pri exported_name: admission_wait_durations_sql_sql_response_user_low_pri description: Wait time durations for requests that waited @@ -12455,7 +11201,6 @@ layers: aggregation: AVG derivative: NONE how_to_use: This is a latency histogram of wait time in the admission control queue. Non-zero wait times are expected when the corresponding resource is saturated. - owner: cockroachdb/admission-control - name: kvflowcontrol.eval_wait.elastic.duration exported_name: kvflowcontrol_eval_wait_elastic_duration description: Latency histogram for time elastic requests spent waiting for flow tokens to evaluate @@ -12466,7 +11211,6 @@ layers: derivative: NONE how_to_use: This metric shows how long requests are waiting for flow tokens before evaluation. Extended wait times may indicate flow control token exhaustion or replication lag. visibility: ESSENTIAL - owner: cockroachdb/admission-control - name: kvflowcontrol.eval_wait.regular.duration exported_name: kvflowcontrol_eval_wait_regular_duration description: Latency histogram for time regular requests spent waiting for flow tokens to evaluate @@ -12477,7 +11221,6 @@ layers: derivative: NONE how_to_use: This metric shows how long requests are waiting for flow tokens before evaluation. Extended wait times may indicate flow control token exhaustion or replication lag. visibility: ESSENTIAL - owner: cockroachdb/admission-control - name: kvflowcontrol.send_queue.bytes exported_name: kvflowcontrol_send_queue_bytes description: Byte size of all raft entries queued for sending to followers, waiting on available elastic send tokens @@ -12488,7 +11231,6 @@ layers: derivative: NONE how_to_use: This metric indicates the size of queued raft entries waiting for elastic send tokens. Large or growing queue sizes may indicate replication backlog or follower lag. visibility: ESSENTIAL - owner: cockroachdb/admission-control - name: REPLICATION metrics: - name: leases.transfers.success @@ -12501,7 +11243,6 @@ layers: derivative: NON_NEGATIVE_DERIVATIVE how_to_use: A high number of lease transfers is not a negative or positive signal, rather it is a reflection of the elastic cluster activities. For example, this metric is high during cluster topology changes. A high value is often the reason for NotLeaseHolderErrors which are normal and expected during rebalancing. Observing this metric may provide a confirmation of the cause of such errors. visibility: ESSENTIAL - owner: cockroachdb/kv - name: liveness.heartbeatlatency exported_name: liveness_heartbeatlatency description: Node liveness heartbeat latency @@ -12512,7 +11253,6 @@ layers: derivative: NONE how_to_use: If this metric exceeds 1 second, it is a sign of cluster instability. visibility: ESSENTIAL - owner: cockroachdb/kv - name: liveness.livenodes exported_name: liveness_livenodes description: Number of live nodes in the cluster (will be 0 if this node is not itself live) @@ -12523,7 +11263,6 @@ layers: derivative: NONE how_to_use: This is a critical metric that tracks the live nodes in the cluster. visibility: ESSENTIAL - owner: cockroachdb/kv - name: queue.replicate.replacedecommissioningreplica.error exported_name: queue_replicate_replacedecommissioningreplica_error description: Number of failed decommissioning replica replacements processed by the replicate queue @@ -12534,7 +11273,6 @@ layers: derivative: NON_NEGATIVE_DERIVATIVE how_to_use: Refer to Decommission the node. visibility: ESSENTIAL - owner: cockroachdb/kv - name: range.merges exported_name: range_merges description: Number of range merges @@ -12545,7 +11283,6 @@ layers: derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This metric indicates how fast a workload is scaling down. Merges are Cockroach's optimization for performance. This metric indicates that there have been deletes in the workload. visibility: ESSENTIAL - owner: cockroachdb/kv - name: range.splits exported_name: range_splits description: Number of range splits @@ -12556,7 +11293,6 @@ layers: derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This metric indicates how fast a workload is scaling up. Spikes can indicate resource hotspots since the split heuristic is based on QPS. To understand whether hotspots are an issue and with which tables and indexes they are occurring, correlate this metric with other metrics such as CPU usage, such as sys.cpu.combined.percent-normalized, or use the Hot Ranges page. visibility: ESSENTIAL - owner: cockroachdb/kv - name: ranges exported_name: ranges description: Number of ranges @@ -12567,7 +11303,6 @@ layers: derivative: NONE how_to_use: This metric provides a measure of the scale of the data size. visibility: ESSENTIAL - owner: cockroachdb/kv - name: ranges.unavailable exported_name: ranges_unavailable description: Number of ranges with fewer live replicas than needed for quorum @@ -12578,7 +11313,6 @@ layers: derivative: NONE how_to_use: This metric is an indicator of replication issues. It shows whether the cluster is unhealthy and can impact workload. If an entire range is unavailable, then it will be unable to process queries. visibility: ESSENTIAL - owner: cockroachdb/kv - name: ranges.underreplicated exported_name: ranges_underreplicated description: Number of ranges with fewer live replicas than the replication target @@ -12589,7 +11323,6 @@ layers: derivative: NONE how_to_use: This metric is an indicator of replication issues. It shows whether the cluster has data that is not conforming to resilience goals. The next step is to determine the corresponding database object, such as the table or index, of these under-replicated ranges and whether the under-replication is temporarily expected. Use the statement SELECT table_name, index_name FROM [SHOW RANGES WITH INDEXES] WHERE range_id = {id of under-replicated range}; visibility: ESSENTIAL - owner: cockroachdb/kv - name: rebalancing.cpunanospersecond exported_name: rebalancing_cpunanospersecond description: Average CPU nanoseconds spent on processing replica operations in the last 30 minutes. @@ -12600,7 +11333,6 @@ layers: derivative: NONE how_to_use: A high value of this metric could indicate that one of the store's replicas is part of a hot range. visibility: ESSENTIAL - owner: cockroachdb/kv - name: rebalancing.lease.transfers exported_name: rebalancing_lease_transfers description: Number of lease transfers motivated by store-level load imbalances @@ -12611,7 +11343,6 @@ layers: derivative: NON_NEGATIVE_DERIVATIVE how_to_use: Used to identify when there has been more rebalancing activity triggered by imbalance between stores (of QPS or CPU). If this is high (when the count is rated), it indicates that more rebalancing activity is taking place due to load imbalance between stores. visibility: ESSENTIAL - owner: cockroachdb/kv - name: rebalancing.queriespersecond exported_name: rebalancing_queriespersecond description: Number of kv-level requests received per second by the store, considering the last 30 minutes, as used in rebalancing decisions. @@ -12622,7 +11353,6 @@ layers: derivative: NONE how_to_use: This metric shows hotspots along the queries per second (QPS) dimension. It provides insights into the ongoing rebalancing activities. visibility: ESSENTIAL - owner: cockroachdb/kv - name: rebalancing.range.rebalances exported_name: rebalancing_range_rebalances description: Number of range rebalance operations motivated by store-level load imbalances @@ -12633,7 +11363,6 @@ layers: derivative: NON_NEGATIVE_DERIVATIVE how_to_use: Used to identify when there has been more rebalancing activity triggered by imbalance between stores (of QPS or CPU). If this is high (when the count is rated), it indicates that more rebalancing activity is taking place due to load imbalance between stores. visibility: ESSENTIAL - owner: cockroachdb/kv - name: rebalancing.replicas.cpunanospersecond exported_name: rebalancing_replicas_cpunanospersecond description: Histogram of average CPU nanoseconds spent on processing replica operations in the last 30 minutes. @@ -12644,7 +11373,6 @@ layers: derivative: NONE how_to_use: 'A high value of this metric could indicate that one of the store''s replicas is part of a hot range. See also the non-histogram variant: rebalancing.cpunanospersecond.' visibility: ESSENTIAL - owner: cockroachdb/kv - name: rebalancing.replicas.queriespersecond exported_name: rebalancing_replicas_queriespersecond description: Histogram of average kv-level requests received per second by replicas on the store in the last 30 minutes. @@ -12655,7 +11383,6 @@ layers: derivative: NONE how_to_use: 'A high value of this metric could indicate that one of the store''s replicas is part of a hot range. See also: rebalancing_replicas_cpunanospersecond.' visibility: ESSENTIAL - owner: cockroachdb/kv - name: replicas exported_name: replicas description: Number of replicas @@ -12666,7 +11393,6 @@ layers: derivative: NONE how_to_use: This metric provides an essential characterization of the data distribution across cluster nodes. visibility: ESSENTIAL - owner: cockroachdb/kv - name: replicas.leaseholders exported_name: replicas_leaseholders description: Number of lease holders @@ -12677,7 +11403,6 @@ layers: derivative: NONE how_to_use: This metric provides an essential characterization of the data processing points across cluster nodes. visibility: ESSENTIAL - owner: cockroachdb/kv - name: SQL metrics: - name: txnwaitqueue.deadlocks_total @@ -12690,7 +11415,6 @@ layers: derivative: NON_NEGATIVE_DERIVATIVE how_to_use: Alert on this metric if its value is greater than zero, especially if transaction throughput is lower than expected. Applications should be able to detect and recover from deadlock errors. However, transaction performance and throughput can be maximized if the application logic avoids deadlock conditions in the first place, for example, by keeping transactions as short as possible. visibility: ESSENTIAL - owner: cockroachdb/kv - name: STORAGE metrics: - name: admission.io.overload @@ -12703,7 +11427,6 @@ layers: derivative: NONE how_to_use: If the value of this metric exceeds 1, then it indicates overload. You can also look at the metrics 'storage.l0-num-files', 'storage.l0-sublevels' or 'rocksdb.read-amplification' directly. A healthy LSM shape is defined as "read-amp < 20" and "L0-files < 1000", looking at cluster settings 'admission.l0_sub_level_count_overload_threshold' and 'admission.l0_file_count_overload_threshold' respectively. visibility: ESSENTIAL - owner: cockroachdb/kv - name: capacity exported_name: capacity description: Total storage capacity @@ -12714,7 +11437,6 @@ layers: derivative: NONE how_to_use: 'This metric gives total storage capacity. Measurements should comply with the following rule: CockroachDB storage volumes should not be utilized more than 60% (40% free space).' visibility: ESSENTIAL - owner: cockroachdb/kv - name: capacity.available exported_name: capacity_available description: Available storage capacity @@ -12725,7 +11447,6 @@ layers: derivative: NONE how_to_use: 'This metric gives available storage capacity. Measurements should comply with the following rule: CockroachDB storage volumes should not be utilized more than 60% (40% free space).' visibility: ESSENTIAL - owner: cockroachdb/kv - name: capacity.used exported_name: capacity_used description: Used storage capacity @@ -12736,7 +11457,6 @@ layers: derivative: NONE how_to_use: 'This metric gives used storage capacity. Measurements should comply with the following rule: CockroachDB storage volumes should not be utilized more than 60% (40% free space).' visibility: ESSENTIAL - owner: cockroachdb/kv - name: rocksdb.block.cache.hits exported_name: rocksdb_block_cache_hits description: Count of block cache hits @@ -12747,7 +11467,6 @@ layers: derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This metric gives hits to block cache which is reserved memory. It is allocated upon the start of a node process by the '--cache' flag and never shrinks. By observing block cache hits and misses, you can fine-tune memory allocations in the node process for the demands of the workload. visibility: ESSENTIAL - owner: cockroachdb/kv - name: rocksdb.block.cache.misses exported_name: rocksdb_block_cache_misses description: Count of block cache misses @@ -12758,7 +11477,6 @@ layers: derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This metric gives misses to block cache which is reserved memory. It is allocated upon the start of a node process by the '--cache' flag and never shrinks. By observing block cache hits and misses, you can fine-tune memory allocations in the node process for the demands of the workload. visibility: ESSENTIAL - owner: cockroachdb/kv - name: rocksdb.compactions exported_name: rocksdb_compactions description: Number of table compactions @@ -12769,7 +11487,6 @@ layers: derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This metric reports the number of a node's LSM compactions. If the number of compactions remains elevated while the LSM health does not improve, compactions are not keeping up with the workload. If the condition persists for an extended period, the cluster will initially exhibit performance issues that will eventually escalate into stability issues. visibility: ESSENTIAL - owner: cockroachdb/kv - name: storage.wal.failover.secondary.disk.available exported_name: storage_wal_failover_secondary_disk_available description: Available disk space on the secondary WAL failover volume. @@ -12780,7 +11497,6 @@ layers: derivative: NONE how_to_use: Only populated when WAL failover is configured. visibility: ESSENTIAL - owner: cockroachdb/kv - name: storage.wal.failover.secondary.disk.capacity exported_name: storage_wal_failover_secondary_disk_capacity description: Total disk capacity of the secondary WAL failover volume. @@ -12791,7 +11507,6 @@ layers: derivative: NONE how_to_use: Only populated when WAL failover is configured. visibility: ESSENTIAL - owner: cockroachdb/kv - name: storage.wal.failover.write_and_sync.latency exported_name: storage_wal_failover_write_and_sync_latency description: The observed latency for writing and syncing to the logical Write-Ahead Log. @@ -12802,7 +11517,6 @@ layers: derivative: NONE how_to_use: Only populated when WAL failover is configured. Without WAL failover, the relevant metric is storage.wal.fsync.latency. visibility: ESSENTIAL - owner: cockroachdb/kv - name: storage.wal.fsync.latency exported_name: storage_wal_fsync_latency description: The fsync latency to the Write-Ahead Log device. @@ -12813,7 +11527,6 @@ layers: derivative: NONE how_to_use: If this value is greater than 100ms, it is an indication of a disk stall. To mitigate the effects of disk stalls, consider deploying your cluster with WAL failover configured. When WAL failover is configured, the more relevant metric is storage.wal.failover_write_and_sync.latency, as this metric reflects the fsync latency of the primary and/or the secondary WAL device. visibility: ESSENTIAL - owner: cockroachdb/kv - name: storage.wal.secondary.file_op.latency exported_name: storage_wal_secondary_file_op_latency description: The latency of file operations on the secondary Write-Ahead Log device. @@ -12823,7 +11536,6 @@ layers: aggregation: AVG derivative: NONE how_to_use: Only populated when WAL failover is configured. This metric tracks file operation latency specifically on the secondary WAL device. - owner: cockroachdb/kv - name: storage.write-stalls exported_name: storage_write_stalls description: Number of instances of intentional write stalls to backpressure incoming writes @@ -12834,7 +11546,6 @@ layers: derivative: NON_NEGATIVE_DERIVATIVE how_to_use: This metric reports actual disk stall events. Ideally, investigate all reports of disk stalls. As a pratical guideline, one stall per minute is not likely to have a material impact on workload beyond an occasional increase in response time. However one stall per second should be viewed as problematic and investigated actively. It is particularly problematic if the rate persists over an extended period of time, and worse, if it is increasing. visibility: ESSENTIAL - owner: cockroachdb/kv - name: UNSET metrics: - name: abortspanbytes @@ -12845,7 +11556,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: addsstable.applications exported_name: addsstable_applications description: Number of SSTable ingestions applied (i.e. applied by Replicas) @@ -12854,7 +11564,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: addsstable.aswrites exported_name: addsstable_aswrites description: |- @@ -12866,7 +11575,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: addsstable.copies exported_name: addsstable_copies description: number of SSTable ingestions that required copying files during application @@ -12875,7 +11583,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: addsstable.delay.total exported_name: addsstable_delay_total description: Amount by which evaluation of AddSSTable requests was delayed @@ -12885,7 +11592,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE visibility: SUPPORT - owner: cockroachdb/kv - name: addsstable.proposals exported_name: addsstable_proposals description: Number of SSTable ingestions proposed (i.e. sent to Raft by lease holders) @@ -12895,7 +11601,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE visibility: SUPPORT - owner: cockroachdb/kv - name: admission.admitted.cpu exported_name: admission_admitted_cpu description: Number of requests admitted @@ -12904,7 +11609,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.admitted.cpu.bulk-low-pri exported_name: admission_admitted_cpu_bulk_low_pri description: Number of requests admitted @@ -12913,7 +11617,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.admitted.cpu.bulk-normal-pri exported_name: admission_admitted_cpu_bulk_normal_pri description: Number of requests admitted @@ -12922,7 +11625,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.admitted.cpu.high-pri exported_name: admission_admitted_cpu_high_pri description: Number of requests admitted @@ -12931,7 +11633,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.admitted.cpu.locking-normal-pri exported_name: admission_admitted_cpu_locking_normal_pri description: Number of requests admitted @@ -12940,7 +11641,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.admitted.cpu.locking-pri exported_name: admission_admitted_cpu_locking_pri description: Number of requests admitted @@ -12949,7 +11649,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.admitted.cpu.low-pri exported_name: admission_admitted_cpu_low_pri description: Number of requests admitted @@ -12958,7 +11657,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.admitted.cpu.normal-pri exported_name: admission_admitted_cpu_normal_pri description: Number of requests admitted @@ -12967,7 +11665,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.admitted.cpu.user-high-pri exported_name: admission_admitted_cpu_user_high_pri description: Number of requests admitted @@ -12976,7 +11673,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.admitted.cpu.user-low-pri exported_name: admission_admitted_cpu_user_low_pri description: Number of requests admitted @@ -12985,7 +11681,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.admitted.elastic-cpu exported_name: admission_admitted_elastic_cpu description: Number of requests admitted @@ -12994,7 +11689,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.admitted.elastic-cpu.bulk-low-pri exported_name: admission_admitted_elastic_cpu_bulk_low_pri description: Number of requests admitted @@ -13003,7 +11697,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.admitted.elastic-cpu.bulk-normal-pri exported_name: admission_admitted_elastic_cpu_bulk_normal_pri description: Number of requests admitted @@ -13012,7 +11705,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.admitted.elastic-cpu.high-pri exported_name: admission_admitted_elastic_cpu_high_pri description: Number of requests admitted @@ -13021,7 +11713,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.admitted.elastic-cpu.locking-normal-pri exported_name: admission_admitted_elastic_cpu_locking_normal_pri description: Number of requests admitted @@ -13030,7 +11721,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.admitted.elastic-cpu.locking-pri exported_name: admission_admitted_elastic_cpu_locking_pri description: Number of requests admitted @@ -13039,7 +11729,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.admitted.elastic-cpu.low-pri exported_name: admission_admitted_elastic_cpu_low_pri description: Number of requests admitted @@ -13048,7 +11737,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.admitted.elastic-cpu.normal-pri exported_name: admission_admitted_elastic_cpu_normal_pri description: Number of requests admitted @@ -13057,7 +11745,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.admitted.elastic-cpu.user-high-pri exported_name: admission_admitted_elastic_cpu_user_high_pri description: Number of requests admitted @@ -13066,7 +11753,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.admitted.elastic-cpu.user-low-pri exported_name: admission_admitted_elastic_cpu_user_low_pri description: Number of requests admitted @@ -13075,7 +11761,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.admitted.elastic-stores exported_name: admission_admitted_elastic_stores description: Number of requests admitted @@ -13084,7 +11769,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.admitted.elastic-stores.bulk-low-pri exported_name: admission_admitted_elastic_stores_bulk_low_pri description: Number of requests admitted @@ -13093,7 +11777,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.admitted.elastic-stores.bulk-normal-pri exported_name: admission_admitted_elastic_stores_bulk_normal_pri description: Number of requests admitted @@ -13102,7 +11785,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.admitted.elastic-stores.high-pri exported_name: admission_admitted_elastic_stores_high_pri description: Number of requests admitted @@ -13111,7 +11793,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.admitted.elastic-stores.locking-normal-pri exported_name: admission_admitted_elastic_stores_locking_normal_pri description: Number of requests admitted @@ -13120,7 +11801,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.admitted.elastic-stores.locking-pri exported_name: admission_admitted_elastic_stores_locking_pri description: Number of requests admitted @@ -13129,7 +11809,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.admitted.elastic-stores.low-pri exported_name: admission_admitted_elastic_stores_low_pri description: Number of requests admitted @@ -13138,7 +11817,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.admitted.elastic-stores.normal-pri exported_name: admission_admitted_elastic_stores_normal_pri description: Number of requests admitted @@ -13147,7 +11825,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.admitted.elastic-stores.user-high-pri exported_name: admission_admitted_elastic_stores_user_high_pri description: Number of requests admitted @@ -13156,7 +11833,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.admitted.elastic-stores.user-low-pri exported_name: admission_admitted_elastic_stores_user_low_pri description: Number of requests admitted @@ -13165,7 +11841,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.admitted.kv exported_name: admission_admitted_kv description: Number of requests admitted @@ -13174,7 +11849,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.admitted.kv-stores exported_name: admission_admitted_kv_stores description: Number of requests admitted @@ -13183,7 +11857,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.admitted.kv-stores.bulk-low-pri exported_name: admission_admitted_kv_stores_bulk_low_pri description: Number of requests admitted @@ -13192,7 +11865,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.admitted.kv-stores.bulk-normal-pri exported_name: admission_admitted_kv_stores_bulk_normal_pri description: Number of requests admitted @@ -13201,7 +11873,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.admitted.kv-stores.high-pri exported_name: admission_admitted_kv_stores_high_pri description: Number of requests admitted @@ -13210,7 +11881,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.admitted.kv-stores.locking-normal-pri exported_name: admission_admitted_kv_stores_locking_normal_pri description: Number of requests admitted @@ -13219,7 +11889,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.admitted.kv-stores.locking-pri exported_name: admission_admitted_kv_stores_locking_pri description: Number of requests admitted @@ -13228,7 +11897,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.admitted.kv-stores.low-pri exported_name: admission_admitted_kv_stores_low_pri description: Number of requests admitted @@ -13237,7 +11905,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.admitted.kv-stores.normal-pri exported_name: admission_admitted_kv_stores_normal_pri description: Number of requests admitted @@ -13246,7 +11913,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.admitted.kv-stores.user-high-pri exported_name: admission_admitted_kv_stores_user_high_pri description: Number of requests admitted @@ -13255,7 +11921,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.admitted.kv-stores.user-low-pri exported_name: admission_admitted_kv_stores_user_low_pri description: Number of requests admitted @@ -13264,7 +11929,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.admitted.kv.bulk-low-pri exported_name: admission_admitted_kv_bulk_low_pri description: Number of requests admitted @@ -13273,7 +11937,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.admitted.kv.bulk-normal-pri exported_name: admission_admitted_kv_bulk_normal_pri description: Number of requests admitted @@ -13282,7 +11945,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.admitted.kv.high-pri exported_name: admission_admitted_kv_high_pri description: Number of requests admitted @@ -13291,7 +11953,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.admitted.kv.locking-normal-pri exported_name: admission_admitted_kv_locking_normal_pri description: Number of requests admitted @@ -13300,7 +11961,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.admitted.kv.locking-pri exported_name: admission_admitted_kv_locking_pri description: Number of requests admitted @@ -13309,7 +11969,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.admitted.kv.low-pri exported_name: admission_admitted_kv_low_pri description: Number of requests admitted @@ -13318,7 +11977,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.admitted.kv.normal-pri exported_name: admission_admitted_kv_normal_pri description: Number of requests admitted @@ -13327,7 +11985,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.admitted.kv.user-high-pri exported_name: admission_admitted_kv_user_high_pri description: Number of requests admitted @@ -13336,7 +11993,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.admitted.kv.user-low-pri exported_name: admission_admitted_kv_user_low_pri description: Number of requests admitted @@ -13345,7 +12001,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.admitted.sql-kv-response exported_name: admission_admitted_sql_kv_response description: Number of requests admitted @@ -13354,7 +12009,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.admitted.sql-kv-response.bulk-low-pri exported_name: admission_admitted_sql_kv_response_bulk_low_pri description: Number of requests admitted @@ -13363,7 +12017,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.admitted.sql-kv-response.bulk-normal-pri exported_name: admission_admitted_sql_kv_response_bulk_normal_pri description: Number of requests admitted @@ -13372,7 +12025,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.admitted.sql-kv-response.high-pri exported_name: admission_admitted_sql_kv_response_high_pri description: Number of requests admitted @@ -13381,7 +12033,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.admitted.sql-kv-response.locking-normal-pri exported_name: admission_admitted_sql_kv_response_locking_normal_pri description: Number of requests admitted @@ -13390,7 +12041,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.admitted.sql-kv-response.locking-pri exported_name: admission_admitted_sql_kv_response_locking_pri description: Number of requests admitted @@ -13399,7 +12049,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.admitted.sql-kv-response.low-pri exported_name: admission_admitted_sql_kv_response_low_pri description: Number of requests admitted @@ -13408,7 +12057,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.admitted.sql-kv-response.normal-pri exported_name: admission_admitted_sql_kv_response_normal_pri description: Number of requests admitted @@ -13417,7 +12065,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.admitted.sql-kv-response.user-high-pri exported_name: admission_admitted_sql_kv_response_user_high_pri description: Number of requests admitted @@ -13426,7 +12073,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.admitted.sql-kv-response.user-low-pri exported_name: admission_admitted_sql_kv_response_user_low_pri description: Number of requests admitted @@ -13435,7 +12081,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.admitted.sql-sql-response exported_name: admission_admitted_sql_sql_response description: Number of requests admitted @@ -13444,7 +12089,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.admitted.sql-sql-response.bulk-low-pri exported_name: admission_admitted_sql_sql_response_bulk_low_pri description: Number of requests admitted @@ -13453,7 +12097,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.admitted.sql-sql-response.bulk-normal-pri exported_name: admission_admitted_sql_sql_response_bulk_normal_pri description: Number of requests admitted @@ -13462,7 +12105,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.admitted.sql-sql-response.high-pri exported_name: admission_admitted_sql_sql_response_high_pri description: Number of requests admitted @@ -13471,7 +12113,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.admitted.sql-sql-response.locking-normal-pri exported_name: admission_admitted_sql_sql_response_locking_normal_pri description: Number of requests admitted @@ -13480,7 +12121,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.admitted.sql-sql-response.locking-pri exported_name: admission_admitted_sql_sql_response_locking_pri description: Number of requests admitted @@ -13489,7 +12129,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.admitted.sql-sql-response.low-pri exported_name: admission_admitted_sql_sql_response_low_pri description: Number of requests admitted @@ -13498,7 +12137,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.admitted.sql-sql-response.normal-pri exported_name: admission_admitted_sql_sql_response_normal_pri description: Number of requests admitted @@ -13507,7 +12145,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.admitted.sql-sql-response.user-high-pri exported_name: admission_admitted_sql_sql_response_user_high_pri description: Number of requests admitted @@ -13516,7 +12153,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.admitted.sql-sql-response.user-low-pri exported_name: admission_admitted_sql_sql_response_user_low_pri description: Number of requests admitted @@ -13525,7 +12161,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.cpu_time_tokens.exhausted_duration_nanos.app_tenant.can_burst exported_name: admission_cpu_time_tokens_exhausted_duration_nanos_app_tenant_can_burst description: Cumulative nanoseconds the app_tenant/can_burst CPU time token bucket has spent exhausted (tokens <= 0); rate() gives the fraction of wall-clock time the bucket was exhausted @@ -13534,7 +12169,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.cpu_time_tokens.exhausted_duration_nanos.app_tenant.no_burst exported_name: admission_cpu_time_tokens_exhausted_duration_nanos_app_tenant_no_burst description: Cumulative nanoseconds the app_tenant/no_burst CPU time token bucket has spent exhausted (tokens <= 0); rate() gives the fraction of wall-clock time the bucket was exhausted @@ -13543,7 +12177,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.cpu_time_tokens.exhausted_duration_nanos.system_tenant.can_burst exported_name: admission_cpu_time_tokens_exhausted_duration_nanos_system_tenant_can_burst description: Cumulative nanoseconds the system_tenant/can_burst CPU time token bucket has spent exhausted (tokens <= 0); rate() gives the fraction of wall-clock time the bucket was exhausted @@ -13552,7 +12185,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.cpu_time_tokens.exhausted_duration_nanos.system_tenant.no_burst exported_name: admission_cpu_time_tokens_exhausted_duration_nanos_system_tenant_no_burst description: Cumulative nanoseconds the system_tenant/no_burst CPU time token bucket has spent exhausted (tokens <= 0); rate() gives the fraction of wall-clock time the bucket was exhausted @@ -13561,7 +12193,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.cpu_time_tokens.multiplier exported_name: admission_cpu_time_tokens_multiplier description: The token-to-CPU-time multiplier used by the CPU time token linear model to adjust refill rates; computed as the ratio of total CPU time to tracked CPU time @@ -13570,7 +12201,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.cpu_time_tokens.per_tenant.admitted_count.app_tenant exported_name: admission_cpu_time_tokens_per_tenant_admitted_count_app_tenant description: Cumulative number of requests admitted per tenant by CPU time token admission control; use with wait_time_nanos to compute mean wait time via rate(wait_time) / rate(admitted_count) @@ -13579,7 +12209,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.cpu_time_tokens.per_tenant.admitted_count.system_tenant exported_name: admission_cpu_time_tokens_per_tenant_admitted_count_system_tenant description: Cumulative number of requests admitted per tenant by CPU time token admission control; use with wait_time_nanos to compute mean wait time via rate(wait_time) / rate(admitted_count) @@ -13588,7 +12217,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.cpu_time_tokens.per_tenant.tokens_returned.app_tenant exported_name: admission_cpu_time_tokens_per_tenant_tokens_returned_app_tenant description: Cumulative CPU time tokens returned per tenant, for example when actual CPU usage was lower than the initial estimate; rate() gives the per-tenant token return rate @@ -13597,7 +12225,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.cpu_time_tokens.per_tenant.tokens_returned.system_tenant exported_name: admission_cpu_time_tokens_per_tenant_tokens_returned_system_tenant description: Cumulative CPU time tokens returned per tenant, for example when actual CPU usage was lower than the initial estimate; rate() gives the per-tenant token return rate @@ -13606,7 +12233,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.cpu_time_tokens.per_tenant.tokens_used.app_tenant exported_name: admission_cpu_time_tokens_per_tenant_tokens_used_app_tenant description: Cumulative CPU time tokens consumed per tenant by admitted work; rate() gives the per-tenant token consumption rate @@ -13615,7 +12241,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.cpu_time_tokens.per_tenant.tokens_used.system_tenant exported_name: admission_cpu_time_tokens_per_tenant_tokens_used_system_tenant description: Cumulative CPU time tokens consumed per tenant by admitted work; rate() gives the per-tenant token consumption rate @@ -13624,7 +12249,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.cpu_time_tokens.per_tenant.wait_time_nanos.app_tenant exported_name: admission_cpu_time_tokens_per_tenant_wait_time_nanos_app_tenant description: Cumulative nanoseconds of admission queue wait time per tenant in CPU time token admission control; use with admitted_count to compute mean wait time via rate(wait_time) / rate(admitted_count) @@ -13633,7 +12257,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.cpu_time_tokens.per_tenant.wait_time_nanos.system_tenant exported_name: admission_cpu_time_tokens_per_tenant_wait_time_nanos_system_tenant description: Cumulative nanoseconds of admission queue wait time per tenant in CPU time token admission control; use with admitted_count to compute mean wait time via rate(wait_time) / rate(admitted_count) @@ -13642,7 +12265,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.cpu_time_tokens.refill.added.app_tenant.can_burst exported_name: admission_cpu_time_tokens_refill_added_app_tenant_can_burst description: Cumulative tokens added to the app_tenant/can_burst CPU time token bucket via the refill process; rate() gives the effective refill rate @@ -13651,7 +12273,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.cpu_time_tokens.refill.added.app_tenant.no_burst exported_name: admission_cpu_time_tokens_refill_added_app_tenant_no_burst description: Cumulative tokens added to the app_tenant/no_burst CPU time token bucket via the refill process; rate() gives the effective refill rate @@ -13660,7 +12281,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.cpu_time_tokens.refill.added.system_tenant.can_burst exported_name: admission_cpu_time_tokens_refill_added_system_tenant_can_burst description: Cumulative tokens added to the system_tenant/can_burst CPU time token bucket via the refill process; rate() gives the effective refill rate @@ -13669,7 +12289,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.cpu_time_tokens.refill.added.system_tenant.no_burst exported_name: admission_cpu_time_tokens_refill_added_system_tenant_no_burst description: Cumulative tokens added to the system_tenant/no_burst CPU time token bucket via the refill process; rate() gives the effective refill rate @@ -13678,7 +12297,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.cpu_time_tokens.refill.removed.app_tenant.can_burst exported_name: admission_cpu_time_tokens_refill_removed_app_tenant_can_burst description: Cumulative tokens removed from the app_tenant/can_burst CPU time token bucket when refill rates decrease between intervals @@ -13687,7 +12305,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.cpu_time_tokens.refill.removed.app_tenant.no_burst exported_name: admission_cpu_time_tokens_refill_removed_app_tenant_no_burst description: Cumulative tokens removed from the app_tenant/no_burst CPU time token bucket when refill rates decrease between intervals @@ -13696,7 +12313,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.cpu_time_tokens.refill.removed.system_tenant.can_burst exported_name: admission_cpu_time_tokens_refill_removed_system_tenant_can_burst description: Cumulative tokens removed from the system_tenant/can_burst CPU time token bucket when refill rates decrease between intervals @@ -13705,7 +12321,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.cpu_time_tokens.refill.removed.system_tenant.no_burst exported_name: admission_cpu_time_tokens_refill_removed_system_tenant_no_burst description: Cumulative tokens removed from the system_tenant/no_burst CPU time token bucket when refill rates decrease between intervals @@ -13714,7 +12329,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.cpu_time_tokens.usage.consumed exported_name: admission_cpu_time_tokens_usage_consumed description: Cumulative number of CPU time tokens consumed (deducted from buckets) by admitted work @@ -13723,7 +12337,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.cpu_time_tokens.usage.returned exported_name: admission_cpu_time_tokens_usage_returned description: Cumulative number of CPU time tokens returned (credited back to buckets), for example when actual CPU usage was lower than the initial estimate @@ -13732,7 +12345,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.elastic_cpu.acquired_nanos exported_name: admission_elastic_cpu_acquired_nanos description: Total CPU nanoseconds acquired by elastic work @@ -13741,7 +12353,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.elastic_cpu.available_nanos exported_name: admission_elastic_cpu_available_nanos description: Instantaneous available CPU nanoseconds per second ignoring utilization limit @@ -13750,7 +12361,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.elastic_cpu.max_available_nanos exported_name: admission_elastic_cpu_max_available_nanos description: Maximum available CPU nanoseconds per second ignoring utilization limit @@ -13759,7 +12369,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.elastic_cpu.over_limit_durations exported_name: admission_elastic_cpu_over_limit_durations description: Measurement of how much over the prescribed limit elastic requests ran (not recorded if requests don't run over) @@ -13768,7 +12377,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.elastic_cpu.pre_work_nanos exported_name: admission_elastic_cpu_pre_work_nanos description: Total CPU nanoseconds spent doing pre-work, before doing elastic work @@ -13777,7 +12385,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.elastic_cpu.returned_nanos exported_name: admission_elastic_cpu_returned_nanos description: Total CPU nanoseconds returned by elastic work @@ -13786,7 +12393,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.elastic_cpu.utilization exported_name: admission_elastic_cpu_utilization description: CPU utilization by elastic work @@ -13795,7 +12401,6 @@ layers: unit: PERCENT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.elastic_cpu.utilization_limit exported_name: admission_elastic_cpu_utilization_limit description: Utilization limit set for the elastic CPU work @@ -13804,7 +12409,6 @@ layers: unit: PERCENT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.elastic_cpu.yield_delay_nanos exported_name: admission_elastic_cpu_yield_delay_nanos description: Total nanoseconds goroutines were delayed by runtime.Yield @@ -13813,7 +12417,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.elastic_cpu_bypassed.utilization exported_name: admission_elastic_cpu_bypassed_utilization description: CPU utilization by elastic work that bypassed admission @@ -13822,7 +12425,6 @@ layers: unit: PERCENT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.errored.cpu exported_name: admission_errored_cpu description: Number of requests not admitted due to error @@ -13831,7 +12433,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.errored.cpu.bulk-low-pri exported_name: admission_errored_cpu_bulk_low_pri description: Number of requests not admitted due to error @@ -13840,7 +12441,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.errored.cpu.bulk-normal-pri exported_name: admission_errored_cpu_bulk_normal_pri description: Number of requests not admitted due to error @@ -13849,7 +12449,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.errored.cpu.high-pri exported_name: admission_errored_cpu_high_pri description: Number of requests not admitted due to error @@ -13858,7 +12457,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.errored.cpu.locking-normal-pri exported_name: admission_errored_cpu_locking_normal_pri description: Number of requests not admitted due to error @@ -13867,7 +12465,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.errored.cpu.locking-pri exported_name: admission_errored_cpu_locking_pri description: Number of requests not admitted due to error @@ -13876,7 +12473,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.errored.cpu.low-pri exported_name: admission_errored_cpu_low_pri description: Number of requests not admitted due to error @@ -13885,7 +12481,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.errored.cpu.normal-pri exported_name: admission_errored_cpu_normal_pri description: Number of requests not admitted due to error @@ -13894,7 +12489,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.errored.cpu.user-high-pri exported_name: admission_errored_cpu_user_high_pri description: Number of requests not admitted due to error @@ -13903,7 +12497,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.errored.cpu.user-low-pri exported_name: admission_errored_cpu_user_low_pri description: Number of requests not admitted due to error @@ -13912,7 +12505,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.errored.elastic-cpu exported_name: admission_errored_elastic_cpu description: Number of requests not admitted due to error @@ -13921,7 +12513,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.errored.elastic-cpu.bulk-low-pri exported_name: admission_errored_elastic_cpu_bulk_low_pri description: Number of requests not admitted due to error @@ -13930,7 +12521,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.errored.elastic-cpu.bulk-normal-pri exported_name: admission_errored_elastic_cpu_bulk_normal_pri description: Number of requests not admitted due to error @@ -13939,7 +12529,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.errored.elastic-cpu.high-pri exported_name: admission_errored_elastic_cpu_high_pri description: Number of requests not admitted due to error @@ -13948,7 +12537,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.errored.elastic-cpu.locking-normal-pri exported_name: admission_errored_elastic_cpu_locking_normal_pri description: Number of requests not admitted due to error @@ -13957,7 +12545,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.errored.elastic-cpu.locking-pri exported_name: admission_errored_elastic_cpu_locking_pri description: Number of requests not admitted due to error @@ -13966,7 +12553,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.errored.elastic-cpu.low-pri exported_name: admission_errored_elastic_cpu_low_pri description: Number of requests not admitted due to error @@ -13975,7 +12561,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.errored.elastic-cpu.normal-pri exported_name: admission_errored_elastic_cpu_normal_pri description: Number of requests not admitted due to error @@ -13984,7 +12569,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.errored.elastic-cpu.user-high-pri exported_name: admission_errored_elastic_cpu_user_high_pri description: Number of requests not admitted due to error @@ -13993,7 +12577,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.errored.elastic-cpu.user-low-pri exported_name: admission_errored_elastic_cpu_user_low_pri description: Number of requests not admitted due to error @@ -14002,7 +12585,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.errored.elastic-stores exported_name: admission_errored_elastic_stores description: Number of requests not admitted due to error @@ -14011,7 +12593,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.errored.elastic-stores.bulk-low-pri exported_name: admission_errored_elastic_stores_bulk_low_pri description: Number of requests not admitted due to error @@ -14020,7 +12601,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.errored.elastic-stores.bulk-normal-pri exported_name: admission_errored_elastic_stores_bulk_normal_pri description: Number of requests not admitted due to error @@ -14029,7 +12609,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.errored.elastic-stores.high-pri exported_name: admission_errored_elastic_stores_high_pri description: Number of requests not admitted due to error @@ -14038,7 +12617,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.errored.elastic-stores.locking-normal-pri exported_name: admission_errored_elastic_stores_locking_normal_pri description: Number of requests not admitted due to error @@ -14047,7 +12625,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.errored.elastic-stores.locking-pri exported_name: admission_errored_elastic_stores_locking_pri description: Number of requests not admitted due to error @@ -14056,7 +12633,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.errored.elastic-stores.low-pri exported_name: admission_errored_elastic_stores_low_pri description: Number of requests not admitted due to error @@ -14065,7 +12641,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.errored.elastic-stores.normal-pri exported_name: admission_errored_elastic_stores_normal_pri description: Number of requests not admitted due to error @@ -14074,7 +12649,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.errored.elastic-stores.user-high-pri exported_name: admission_errored_elastic_stores_user_high_pri description: Number of requests not admitted due to error @@ -14083,7 +12657,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.errored.elastic-stores.user-low-pri exported_name: admission_errored_elastic_stores_user_low_pri description: Number of requests not admitted due to error @@ -14092,7 +12665,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.errored.kv exported_name: admission_errored_kv description: Number of requests not admitted due to error @@ -14101,7 +12673,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.errored.kv-stores exported_name: admission_errored_kv_stores description: Number of requests not admitted due to error @@ -14110,7 +12681,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.errored.kv-stores.bulk-low-pri exported_name: admission_errored_kv_stores_bulk_low_pri description: Number of requests not admitted due to error @@ -14119,7 +12689,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.errored.kv-stores.bulk-normal-pri exported_name: admission_errored_kv_stores_bulk_normal_pri description: Number of requests not admitted due to error @@ -14128,7 +12697,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.errored.kv-stores.high-pri exported_name: admission_errored_kv_stores_high_pri description: Number of requests not admitted due to error @@ -14137,7 +12705,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.errored.kv-stores.locking-normal-pri exported_name: admission_errored_kv_stores_locking_normal_pri description: Number of requests not admitted due to error @@ -14146,7 +12713,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.errored.kv-stores.locking-pri exported_name: admission_errored_kv_stores_locking_pri description: Number of requests not admitted due to error @@ -14155,7 +12721,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.errored.kv-stores.low-pri exported_name: admission_errored_kv_stores_low_pri description: Number of requests not admitted due to error @@ -14164,7 +12729,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.errored.kv-stores.normal-pri exported_name: admission_errored_kv_stores_normal_pri description: Number of requests not admitted due to error @@ -14173,7 +12737,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.errored.kv-stores.user-high-pri exported_name: admission_errored_kv_stores_user_high_pri description: Number of requests not admitted due to error @@ -14182,7 +12745,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.errored.kv-stores.user-low-pri exported_name: admission_errored_kv_stores_user_low_pri description: Number of requests not admitted due to error @@ -14191,7 +12753,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.errored.kv.bulk-low-pri exported_name: admission_errored_kv_bulk_low_pri description: Number of requests not admitted due to error @@ -14200,7 +12761,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.errored.kv.bulk-normal-pri exported_name: admission_errored_kv_bulk_normal_pri description: Number of requests not admitted due to error @@ -14209,7 +12769,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.errored.kv.high-pri exported_name: admission_errored_kv_high_pri description: Number of requests not admitted due to error @@ -14218,7 +12777,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.errored.kv.locking-normal-pri exported_name: admission_errored_kv_locking_normal_pri description: Number of requests not admitted due to error @@ -14227,7 +12785,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.errored.kv.locking-pri exported_name: admission_errored_kv_locking_pri description: Number of requests not admitted due to error @@ -14236,7 +12793,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.errored.kv.low-pri exported_name: admission_errored_kv_low_pri description: Number of requests not admitted due to error @@ -14245,7 +12801,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.errored.kv.normal-pri exported_name: admission_errored_kv_normal_pri description: Number of requests not admitted due to error @@ -14254,7 +12809,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.errored.kv.user-high-pri exported_name: admission_errored_kv_user_high_pri description: Number of requests not admitted due to error @@ -14263,7 +12817,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.errored.kv.user-low-pri exported_name: admission_errored_kv_user_low_pri description: Number of requests not admitted due to error @@ -14272,7 +12825,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.errored.sql-kv-response exported_name: admission_errored_sql_kv_response description: Number of requests not admitted due to error @@ -14281,7 +12833,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.errored.sql-kv-response.bulk-low-pri exported_name: admission_errored_sql_kv_response_bulk_low_pri description: Number of requests not admitted due to error @@ -14290,7 +12841,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.errored.sql-kv-response.bulk-normal-pri exported_name: admission_errored_sql_kv_response_bulk_normal_pri description: Number of requests not admitted due to error @@ -14299,7 +12849,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.errored.sql-kv-response.high-pri exported_name: admission_errored_sql_kv_response_high_pri description: Number of requests not admitted due to error @@ -14308,7 +12857,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.errored.sql-kv-response.locking-normal-pri exported_name: admission_errored_sql_kv_response_locking_normal_pri description: Number of requests not admitted due to error @@ -14317,7 +12865,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.errored.sql-kv-response.locking-pri exported_name: admission_errored_sql_kv_response_locking_pri description: Number of requests not admitted due to error @@ -14326,7 +12873,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.errored.sql-kv-response.low-pri exported_name: admission_errored_sql_kv_response_low_pri description: Number of requests not admitted due to error @@ -14335,7 +12881,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.errored.sql-kv-response.normal-pri exported_name: admission_errored_sql_kv_response_normal_pri description: Number of requests not admitted due to error @@ -14344,7 +12889,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.errored.sql-kv-response.user-high-pri exported_name: admission_errored_sql_kv_response_user_high_pri description: Number of requests not admitted due to error @@ -14353,7 +12897,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.errored.sql-kv-response.user-low-pri exported_name: admission_errored_sql_kv_response_user_low_pri description: Number of requests not admitted due to error @@ -14362,7 +12905,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.errored.sql-sql-response exported_name: admission_errored_sql_sql_response description: Number of requests not admitted due to error @@ -14371,7 +12913,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.errored.sql-sql-response.bulk-low-pri exported_name: admission_errored_sql_sql_response_bulk_low_pri description: Number of requests not admitted due to error @@ -14380,7 +12921,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.errored.sql-sql-response.bulk-normal-pri exported_name: admission_errored_sql_sql_response_bulk_normal_pri description: Number of requests not admitted due to error @@ -14389,7 +12929,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.errored.sql-sql-response.high-pri exported_name: admission_errored_sql_sql_response_high_pri description: Number of requests not admitted due to error @@ -14398,7 +12937,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.errored.sql-sql-response.locking-normal-pri exported_name: admission_errored_sql_sql_response_locking_normal_pri description: Number of requests not admitted due to error @@ -14407,7 +12945,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.errored.sql-sql-response.locking-pri exported_name: admission_errored_sql_sql_response_locking_pri description: Number of requests not admitted due to error @@ -14416,7 +12953,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.errored.sql-sql-response.low-pri exported_name: admission_errored_sql_sql_response_low_pri description: Number of requests not admitted due to error @@ -14425,7 +12961,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.errored.sql-sql-response.normal-pri exported_name: admission_errored_sql_sql_response_normal_pri description: Number of requests not admitted due to error @@ -14434,7 +12969,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.errored.sql-sql-response.user-high-pri exported_name: admission_errored_sql_sql_response_user_high_pri description: Number of requests not admitted due to error @@ -14443,7 +12977,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.errored.sql-sql-response.user-low-pri exported_name: admission_errored_sql_sql_response_user_low_pri description: Number of requests not admitted due to error @@ -14452,7 +12985,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.granter.cpu_load_long_period_duration.kv exported_name: admission_granter_cpu_load_long_period_duration_kv description: Total duration when CPULoad was being called with a long period. This is reported in nanoseconds from 26.1 onwards, and was microseconds before that. @@ -14461,7 +12993,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.granter.cpu_load_short_period_duration.kv exported_name: admission_granter_cpu_load_short_period_duration_kv description: Total duration when CPULoad was being called with a short period. This is reported in nanoseconds from 26.1 onwards, and was microseconds before that. @@ -14470,7 +13001,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.granter.disk_write_byte_tokens_exhausted_duration.kv exported_name: admission_granter_disk_write_byte_tokens_exhausted_duration_kv description: Total duration (in nanos) when disk write byte tokens were exhausted, as observed by the token granter (not waiters) @@ -14479,7 +13009,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.granter.elastic_io_tokens_available.kv exported_name: admission_granter_elastic_io_tokens_available_kv description: Number of tokens available @@ -14488,7 +13017,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.granter.io_tokens_available.kv exported_name: admission_granter_io_tokens_available_kv description: Number of tokens available @@ -14497,7 +13025,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.granter.io_tokens_bypassed.kv exported_name: admission_granter_io_tokens_bypassed_kv description: Total number of tokens taken by work bypassing admission control (for example, follower writes without flow control) @@ -14506,7 +13033,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.granter.io_tokens_returned.kv exported_name: admission_granter_io_tokens_returned_kv description: Total number of tokens returned @@ -14515,7 +13041,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.granter.io_tokens_taken.kv exported_name: admission_granter_io_tokens_taken_kv description: Total number of tokens taken @@ -14524,7 +13049,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.granter.slot_adjuster_decrements.kv exported_name: admission_granter_slot_adjuster_decrements_kv description: Number of decrements of the total KV slots @@ -14533,7 +13057,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.granter.slot_adjuster_increments.kv exported_name: admission_granter_slot_adjuster_increments_kv description: Number of increments of the total KV slots @@ -14542,7 +13065,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.granter.total_slots.kv exported_name: admission_granter_total_slots_kv description: Total slots for kv work @@ -14551,7 +13073,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.granter.used_slots.kv exported_name: admission_granter_used_slots_kv description: Used slots @@ -14560,7 +13081,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.l0_compacted_bytes.kv exported_name: admission_l0_compacted_bytes_kv description: Total bytes compacted out of L0 (used to generate IO tokens) @@ -14569,7 +13089,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.l0_tokens_produced.kv exported_name: admission_l0_tokens_produced_kv description: Total bytes produced for L0 writes @@ -14578,7 +13097,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.raft.paused_replicas exported_name: admission_raft_paused_replicas description: |- @@ -14592,7 +13110,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: admission.raft.paused_replicas_dropped_msgs exported_name: admission_raft_paused_replicas_dropped_msgs description: |- @@ -14604,7 +13121,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: admission.requested.cpu exported_name: admission_requested_cpu description: Number of requests @@ -14613,7 +13129,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.requested.cpu.bulk-low-pri exported_name: admission_requested_cpu_bulk_low_pri description: Number of requests @@ -14622,7 +13137,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.requested.cpu.bulk-normal-pri exported_name: admission_requested_cpu_bulk_normal_pri description: Number of requests @@ -14631,7 +13145,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.requested.cpu.high-pri exported_name: admission_requested_cpu_high_pri description: Number of requests @@ -14640,7 +13153,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.requested.cpu.locking-normal-pri exported_name: admission_requested_cpu_locking_normal_pri description: Number of requests @@ -14649,7 +13161,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.requested.cpu.locking-pri exported_name: admission_requested_cpu_locking_pri description: Number of requests @@ -14658,7 +13169,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.requested.cpu.low-pri exported_name: admission_requested_cpu_low_pri description: Number of requests @@ -14667,7 +13177,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.requested.cpu.normal-pri exported_name: admission_requested_cpu_normal_pri description: Number of requests @@ -14676,7 +13185,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.requested.cpu.user-high-pri exported_name: admission_requested_cpu_user_high_pri description: Number of requests @@ -14685,7 +13193,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.requested.cpu.user-low-pri exported_name: admission_requested_cpu_user_low_pri description: Number of requests @@ -14694,7 +13201,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.requested.elastic-cpu exported_name: admission_requested_elastic_cpu description: Number of requests @@ -14703,7 +13209,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.requested.elastic-cpu.bulk-low-pri exported_name: admission_requested_elastic_cpu_bulk_low_pri description: Number of requests @@ -14712,7 +13217,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.requested.elastic-cpu.bulk-normal-pri exported_name: admission_requested_elastic_cpu_bulk_normal_pri description: Number of requests @@ -14721,7 +13225,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.requested.elastic-cpu.high-pri exported_name: admission_requested_elastic_cpu_high_pri description: Number of requests @@ -14730,7 +13233,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.requested.elastic-cpu.locking-normal-pri exported_name: admission_requested_elastic_cpu_locking_normal_pri description: Number of requests @@ -14739,7 +13241,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.requested.elastic-cpu.locking-pri exported_name: admission_requested_elastic_cpu_locking_pri description: Number of requests @@ -14748,7 +13249,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.requested.elastic-cpu.low-pri exported_name: admission_requested_elastic_cpu_low_pri description: Number of requests @@ -14757,7 +13257,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.requested.elastic-cpu.normal-pri exported_name: admission_requested_elastic_cpu_normal_pri description: Number of requests @@ -14766,7 +13265,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.requested.elastic-cpu.user-high-pri exported_name: admission_requested_elastic_cpu_user_high_pri description: Number of requests @@ -14775,7 +13273,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.requested.elastic-cpu.user-low-pri exported_name: admission_requested_elastic_cpu_user_low_pri description: Number of requests @@ -14784,7 +13281,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.requested.elastic-stores exported_name: admission_requested_elastic_stores description: Number of requests @@ -14793,7 +13289,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.requested.elastic-stores.bulk-low-pri exported_name: admission_requested_elastic_stores_bulk_low_pri description: Number of requests @@ -14802,7 +13297,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.requested.elastic-stores.bulk-normal-pri exported_name: admission_requested_elastic_stores_bulk_normal_pri description: Number of requests @@ -14811,7 +13305,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.requested.elastic-stores.high-pri exported_name: admission_requested_elastic_stores_high_pri description: Number of requests @@ -14820,7 +13313,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.requested.elastic-stores.locking-normal-pri exported_name: admission_requested_elastic_stores_locking_normal_pri description: Number of requests @@ -14829,7 +13321,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.requested.elastic-stores.locking-pri exported_name: admission_requested_elastic_stores_locking_pri description: Number of requests @@ -14838,7 +13329,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.requested.elastic-stores.low-pri exported_name: admission_requested_elastic_stores_low_pri description: Number of requests @@ -14847,7 +13337,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.requested.elastic-stores.normal-pri exported_name: admission_requested_elastic_stores_normal_pri description: Number of requests @@ -14856,7 +13345,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.requested.elastic-stores.user-high-pri exported_name: admission_requested_elastic_stores_user_high_pri description: Number of requests @@ -14865,7 +13353,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.requested.elastic-stores.user-low-pri exported_name: admission_requested_elastic_stores_user_low_pri description: Number of requests @@ -14874,7 +13361,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.requested.kv exported_name: admission_requested_kv description: Number of requests @@ -14883,7 +13369,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.requested.kv-stores exported_name: admission_requested_kv_stores description: Number of requests @@ -14892,7 +13377,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.requested.kv-stores.bulk-low-pri exported_name: admission_requested_kv_stores_bulk_low_pri description: Number of requests @@ -14901,7 +13385,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.requested.kv-stores.bulk-normal-pri exported_name: admission_requested_kv_stores_bulk_normal_pri description: Number of requests @@ -14910,7 +13393,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.requested.kv-stores.high-pri exported_name: admission_requested_kv_stores_high_pri description: Number of requests @@ -14919,7 +13401,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.requested.kv-stores.locking-normal-pri exported_name: admission_requested_kv_stores_locking_normal_pri description: Number of requests @@ -14928,7 +13409,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.requested.kv-stores.locking-pri exported_name: admission_requested_kv_stores_locking_pri description: Number of requests @@ -14937,7 +13417,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.requested.kv-stores.low-pri exported_name: admission_requested_kv_stores_low_pri description: Number of requests @@ -14946,7 +13425,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.requested.kv-stores.normal-pri exported_name: admission_requested_kv_stores_normal_pri description: Number of requests @@ -14955,7 +13433,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.requested.kv-stores.user-high-pri exported_name: admission_requested_kv_stores_user_high_pri description: Number of requests @@ -14964,7 +13441,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.requested.kv-stores.user-low-pri exported_name: admission_requested_kv_stores_user_low_pri description: Number of requests @@ -14973,7 +13449,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.requested.kv.bulk-low-pri exported_name: admission_requested_kv_bulk_low_pri description: Number of requests @@ -14982,7 +13457,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.requested.kv.bulk-normal-pri exported_name: admission_requested_kv_bulk_normal_pri description: Number of requests @@ -14991,7 +13465,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.requested.kv.high-pri exported_name: admission_requested_kv_high_pri description: Number of requests @@ -15000,7 +13473,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.requested.kv.locking-normal-pri exported_name: admission_requested_kv_locking_normal_pri description: Number of requests @@ -15009,7 +13481,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.requested.kv.locking-pri exported_name: admission_requested_kv_locking_pri description: Number of requests @@ -15018,7 +13489,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.requested.kv.low-pri exported_name: admission_requested_kv_low_pri description: Number of requests @@ -15027,7 +13497,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.requested.kv.normal-pri exported_name: admission_requested_kv_normal_pri description: Number of requests @@ -15036,7 +13505,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.requested.kv.user-high-pri exported_name: admission_requested_kv_user_high_pri description: Number of requests @@ -15045,7 +13513,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.requested.kv.user-low-pri exported_name: admission_requested_kv_user_low_pri description: Number of requests @@ -15054,7 +13521,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.requested.sql-kv-response exported_name: admission_requested_sql_kv_response description: Number of requests @@ -15063,7 +13529,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.requested.sql-kv-response.bulk-low-pri exported_name: admission_requested_sql_kv_response_bulk_low_pri description: Number of requests @@ -15072,7 +13537,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.requested.sql-kv-response.bulk-normal-pri exported_name: admission_requested_sql_kv_response_bulk_normal_pri description: Number of requests @@ -15081,7 +13545,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.requested.sql-kv-response.high-pri exported_name: admission_requested_sql_kv_response_high_pri description: Number of requests @@ -15090,7 +13553,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.requested.sql-kv-response.locking-normal-pri exported_name: admission_requested_sql_kv_response_locking_normal_pri description: Number of requests @@ -15099,7 +13561,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.requested.sql-kv-response.locking-pri exported_name: admission_requested_sql_kv_response_locking_pri description: Number of requests @@ -15108,7 +13569,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.requested.sql-kv-response.low-pri exported_name: admission_requested_sql_kv_response_low_pri description: Number of requests @@ -15117,7 +13577,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.requested.sql-kv-response.normal-pri exported_name: admission_requested_sql_kv_response_normal_pri description: Number of requests @@ -15126,7 +13585,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.requested.sql-kv-response.user-high-pri exported_name: admission_requested_sql_kv_response_user_high_pri description: Number of requests @@ -15135,7 +13593,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.requested.sql-kv-response.user-low-pri exported_name: admission_requested_sql_kv_response_user_low_pri description: Number of requests @@ -15144,7 +13601,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.requested.sql-sql-response exported_name: admission_requested_sql_sql_response description: Number of requests @@ -15153,7 +13609,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.requested.sql-sql-response.bulk-low-pri exported_name: admission_requested_sql_sql_response_bulk_low_pri description: Number of requests @@ -15162,7 +13617,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.requested.sql-sql-response.bulk-normal-pri exported_name: admission_requested_sql_sql_response_bulk_normal_pri description: Number of requests @@ -15171,7 +13625,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.requested.sql-sql-response.high-pri exported_name: admission_requested_sql_sql_response_high_pri description: Number of requests @@ -15180,7 +13633,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.requested.sql-sql-response.locking-normal-pri exported_name: admission_requested_sql_sql_response_locking_normal_pri description: Number of requests @@ -15189,7 +13641,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.requested.sql-sql-response.locking-pri exported_name: admission_requested_sql_sql_response_locking_pri description: Number of requests @@ -15198,7 +13649,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.requested.sql-sql-response.low-pri exported_name: admission_requested_sql_sql_response_low_pri description: Number of requests @@ -15207,7 +13657,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.requested.sql-sql-response.normal-pri exported_name: admission_requested_sql_sql_response_normal_pri description: Number of requests @@ -15216,7 +13665,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.requested.sql-sql-response.user-high-pri exported_name: admission_requested_sql_sql_response_user_high_pri description: Number of requests @@ -15225,7 +13673,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.requested.sql-sql-response.user-low-pri exported_name: admission_requested_sql_sql_response_user_low_pri description: Number of requests @@ -15234,7 +13681,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: admission.scheduler_latency_listener.p99_nanos exported_name: admission_scheduler_latency_listener_p99_nanos description: The scheduling latency at p99 as observed by the scheduler latency listener @@ -15243,7 +13689,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_durations.snapshot_ingest exported_name: admission_wait_durations_snapshot_ingest description: Wait time for snapshot ingest requests that waited @@ -15252,7 +13697,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.cpu exported_name: admission_wait_queue_length_cpu description: Length of wait queue @@ -15261,7 +13705,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.cpu.bulk-low-pri exported_name: admission_wait_queue_length_cpu_bulk_low_pri description: Length of wait queue @@ -15270,7 +13713,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.cpu.bulk-normal-pri exported_name: admission_wait_queue_length_cpu_bulk_normal_pri description: Length of wait queue @@ -15279,7 +13721,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.cpu.high-pri exported_name: admission_wait_queue_length_cpu_high_pri description: Length of wait queue @@ -15288,7 +13729,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.cpu.locking-normal-pri exported_name: admission_wait_queue_length_cpu_locking_normal_pri description: Length of wait queue @@ -15297,7 +13737,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.cpu.locking-pri exported_name: admission_wait_queue_length_cpu_locking_pri description: Length of wait queue @@ -15306,7 +13745,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.cpu.low-pri exported_name: admission_wait_queue_length_cpu_low_pri description: Length of wait queue @@ -15315,7 +13753,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.cpu.normal-pri exported_name: admission_wait_queue_length_cpu_normal_pri description: Length of wait queue @@ -15324,7 +13761,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.cpu.user-high-pri exported_name: admission_wait_queue_length_cpu_user_high_pri description: Length of wait queue @@ -15333,7 +13769,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.cpu.user-low-pri exported_name: admission_wait_queue_length_cpu_user_low_pri description: Length of wait queue @@ -15342,7 +13777,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.elastic-cpu exported_name: admission_wait_queue_length_elastic_cpu description: Length of wait queue @@ -15351,7 +13785,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.elastic-cpu.bulk-low-pri exported_name: admission_wait_queue_length_elastic_cpu_bulk_low_pri description: Length of wait queue @@ -15360,7 +13793,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.elastic-cpu.bulk-normal-pri exported_name: admission_wait_queue_length_elastic_cpu_bulk_normal_pri description: Length of wait queue @@ -15369,7 +13801,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.elastic-cpu.high-pri exported_name: admission_wait_queue_length_elastic_cpu_high_pri description: Length of wait queue @@ -15378,7 +13809,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.elastic-cpu.locking-normal-pri exported_name: admission_wait_queue_length_elastic_cpu_locking_normal_pri description: Length of wait queue @@ -15387,7 +13817,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.elastic-cpu.locking-pri exported_name: admission_wait_queue_length_elastic_cpu_locking_pri description: Length of wait queue @@ -15396,7 +13825,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.elastic-cpu.low-pri exported_name: admission_wait_queue_length_elastic_cpu_low_pri description: Length of wait queue @@ -15405,7 +13833,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.elastic-cpu.normal-pri exported_name: admission_wait_queue_length_elastic_cpu_normal_pri description: Length of wait queue @@ -15414,7 +13841,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.elastic-cpu.user-high-pri exported_name: admission_wait_queue_length_elastic_cpu_user_high_pri description: Length of wait queue @@ -15423,7 +13849,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.elastic-cpu.user-low-pri exported_name: admission_wait_queue_length_elastic_cpu_user_low_pri description: Length of wait queue @@ -15432,7 +13857,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.elastic-stores exported_name: admission_wait_queue_length_elastic_stores description: Length of wait queue @@ -15441,7 +13865,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.elastic-stores.bulk-low-pri exported_name: admission_wait_queue_length_elastic_stores_bulk_low_pri description: Length of wait queue @@ -15450,7 +13873,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.elastic-stores.bulk-normal-pri exported_name: admission_wait_queue_length_elastic_stores_bulk_normal_pri description: Length of wait queue @@ -15459,7 +13881,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.elastic-stores.high-pri exported_name: admission_wait_queue_length_elastic_stores_high_pri description: Length of wait queue @@ -15468,7 +13889,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.elastic-stores.locking-normal-pri exported_name: admission_wait_queue_length_elastic_stores_locking_normal_pri description: Length of wait queue @@ -15477,7 +13897,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.elastic-stores.locking-pri exported_name: admission_wait_queue_length_elastic_stores_locking_pri description: Length of wait queue @@ -15486,7 +13905,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.elastic-stores.low-pri exported_name: admission_wait_queue_length_elastic_stores_low_pri description: Length of wait queue @@ -15495,7 +13913,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.elastic-stores.normal-pri exported_name: admission_wait_queue_length_elastic_stores_normal_pri description: Length of wait queue @@ -15504,7 +13921,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.elastic-stores.user-high-pri exported_name: admission_wait_queue_length_elastic_stores_user_high_pri description: Length of wait queue @@ -15513,7 +13929,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.elastic-stores.user-low-pri exported_name: admission_wait_queue_length_elastic_stores_user_low_pri description: Length of wait queue @@ -15522,7 +13937,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.kv exported_name: admission_wait_queue_length_kv description: Length of wait queue @@ -15531,7 +13945,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.kv-stores exported_name: admission_wait_queue_length_kv_stores description: Length of wait queue @@ -15540,7 +13953,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.kv-stores.bulk-low-pri exported_name: admission_wait_queue_length_kv_stores_bulk_low_pri description: Length of wait queue @@ -15549,7 +13961,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.kv-stores.bulk-normal-pri exported_name: admission_wait_queue_length_kv_stores_bulk_normal_pri description: Length of wait queue @@ -15558,7 +13969,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.kv-stores.high-pri exported_name: admission_wait_queue_length_kv_stores_high_pri description: Length of wait queue @@ -15567,7 +13977,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.kv-stores.locking-normal-pri exported_name: admission_wait_queue_length_kv_stores_locking_normal_pri description: Length of wait queue @@ -15576,7 +13985,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.kv-stores.locking-pri exported_name: admission_wait_queue_length_kv_stores_locking_pri description: Length of wait queue @@ -15585,7 +13993,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.kv-stores.low-pri exported_name: admission_wait_queue_length_kv_stores_low_pri description: Length of wait queue @@ -15594,7 +14001,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.kv-stores.normal-pri exported_name: admission_wait_queue_length_kv_stores_normal_pri description: Length of wait queue @@ -15603,7 +14009,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.kv-stores.user-high-pri exported_name: admission_wait_queue_length_kv_stores_user_high_pri description: Length of wait queue @@ -15612,7 +14017,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.kv-stores.user-low-pri exported_name: admission_wait_queue_length_kv_stores_user_low_pri description: Length of wait queue @@ -15621,7 +14025,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.kv.bulk-low-pri exported_name: admission_wait_queue_length_kv_bulk_low_pri description: Length of wait queue @@ -15630,7 +14033,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.kv.bulk-normal-pri exported_name: admission_wait_queue_length_kv_bulk_normal_pri description: Length of wait queue @@ -15639,7 +14041,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.kv.high-pri exported_name: admission_wait_queue_length_kv_high_pri description: Length of wait queue @@ -15648,7 +14049,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.kv.locking-normal-pri exported_name: admission_wait_queue_length_kv_locking_normal_pri description: Length of wait queue @@ -15657,7 +14057,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.kv.locking-pri exported_name: admission_wait_queue_length_kv_locking_pri description: Length of wait queue @@ -15666,7 +14065,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.kv.low-pri exported_name: admission_wait_queue_length_kv_low_pri description: Length of wait queue @@ -15675,7 +14073,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.kv.normal-pri exported_name: admission_wait_queue_length_kv_normal_pri description: Length of wait queue @@ -15684,7 +14081,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.kv.user-high-pri exported_name: admission_wait_queue_length_kv_user_high_pri description: Length of wait queue @@ -15693,7 +14089,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.kv.user-low-pri exported_name: admission_wait_queue_length_kv_user_low_pri description: Length of wait queue @@ -15702,7 +14097,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.sql-kv-response exported_name: admission_wait_queue_length_sql_kv_response description: Length of wait queue @@ -15711,7 +14105,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.sql-kv-response.bulk-low-pri exported_name: admission_wait_queue_length_sql_kv_response_bulk_low_pri description: Length of wait queue @@ -15720,7 +14113,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.sql-kv-response.bulk-normal-pri exported_name: admission_wait_queue_length_sql_kv_response_bulk_normal_pri description: Length of wait queue @@ -15729,7 +14121,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.sql-kv-response.high-pri exported_name: admission_wait_queue_length_sql_kv_response_high_pri description: Length of wait queue @@ -15738,7 +14129,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.sql-kv-response.locking-normal-pri exported_name: admission_wait_queue_length_sql_kv_response_locking_normal_pri description: Length of wait queue @@ -15747,7 +14137,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.sql-kv-response.locking-pri exported_name: admission_wait_queue_length_sql_kv_response_locking_pri description: Length of wait queue @@ -15756,7 +14145,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.sql-kv-response.low-pri exported_name: admission_wait_queue_length_sql_kv_response_low_pri description: Length of wait queue @@ -15765,7 +14153,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.sql-kv-response.normal-pri exported_name: admission_wait_queue_length_sql_kv_response_normal_pri description: Length of wait queue @@ -15774,7 +14161,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.sql-kv-response.user-high-pri exported_name: admission_wait_queue_length_sql_kv_response_user_high_pri description: Length of wait queue @@ -15783,7 +14169,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.sql-kv-response.user-low-pri exported_name: admission_wait_queue_length_sql_kv_response_user_low_pri description: Length of wait queue @@ -15792,7 +14177,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.sql-sql-response exported_name: admission_wait_queue_length_sql_sql_response description: Length of wait queue @@ -15801,7 +14185,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.sql-sql-response.bulk-low-pri exported_name: admission_wait_queue_length_sql_sql_response_bulk_low_pri description: Length of wait queue @@ -15810,7 +14193,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.sql-sql-response.bulk-normal-pri exported_name: admission_wait_queue_length_sql_sql_response_bulk_normal_pri description: Length of wait queue @@ -15819,7 +14201,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.sql-sql-response.high-pri exported_name: admission_wait_queue_length_sql_sql_response_high_pri description: Length of wait queue @@ -15828,7 +14209,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.sql-sql-response.locking-normal-pri exported_name: admission_wait_queue_length_sql_sql_response_locking_normal_pri description: Length of wait queue @@ -15837,7 +14217,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.sql-sql-response.locking-pri exported_name: admission_wait_queue_length_sql_sql_response_locking_pri description: Length of wait queue @@ -15846,7 +14225,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.sql-sql-response.low-pri exported_name: admission_wait_queue_length_sql_sql_response_low_pri description: Length of wait queue @@ -15855,7 +14233,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.sql-sql-response.normal-pri exported_name: admission_wait_queue_length_sql_sql_response_normal_pri description: Length of wait queue @@ -15864,7 +14241,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.sql-sql-response.user-high-pri exported_name: admission_wait_queue_length_sql_sql_response_user_high_pri description: Length of wait queue @@ -15873,7 +14249,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: admission.wait_queue_length.sql-sql-response.user-low-pri exported_name: admission_wait_queue_length_sql_sql_response_user_low_pri description: Length of wait queue @@ -15882,7 +14257,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: batch_requests.bytes exported_name: batch_requests_bytes description: Total byte count of batch requests processed @@ -15891,7 +14265,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: batch_requests.cross_region.bytes exported_name: batch_requests_cross_region_bytes description: "Total byte count of batch requests processed cross region when region\n\t\ttiers are configured" @@ -15900,7 +14273,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: batch_requests.cross_zone.bytes exported_name: batch_requests_cross_zone_bytes description: "Total bytes of batch requests processed cross zones within the same\n\t\tregion when zone tiers are configured. If region tiers are not set, it is\n\t\tassumed to be within the same region. To ensure accurate monitoring of\n\t\tcross-zone data transfer, region and zone tiers should be consistently\n\t\tconfigured across all nodes." @@ -15909,7 +14281,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: batch_responses.bytes exported_name: batch_responses_bytes description: Total byte count of batch responses received @@ -15918,7 +14289,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: batch_responses.cross_region.bytes exported_name: batch_responses_cross_region_bytes description: "Total byte count of batch responses received cross region when region\n\t\ttiers are configured" @@ -15927,7 +14297,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: batch_responses.cross_zone.bytes exported_name: batch_responses_cross_zone_bytes description: "Total bytes of batch responses received cross zones within the same\n\t\tregion when zone tiers are configured. If region tiers are not set, it is\n\t\tassumed to be within the same region. To ensure accurate monitoring of\n\t\tcross-zone data transfer, region and zone tiers should be consistently\n\t\tconfigured across all nodes." @@ -15936,7 +14305,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: capacity.reserved exported_name: capacity_reserved description: Capacity reserved for snapshots @@ -15945,7 +14313,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: exec.error exported_name: exec_error description: |- @@ -15959,7 +14326,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: exec.latency exported_name: exec_latency description: |- @@ -15979,7 +14345,6 @@ layers: aggregation: AVG derivative: NONE visibility: SUPPORT - owner: cockroachdb/kv - name: exec.success exported_name: exec_success description: |- @@ -15992,7 +14357,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: exportrequest.delay.total exported_name: exportrequest_delay_total description: Amount by which evaluation of Export requests was delayed @@ -16001,7 +14365,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: follower_reads.success_count exported_name: follower_reads_success_count description: Number of reads successfully processed by any replica @@ -16010,7 +14373,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: gcbytesage exported_name: gcbytesage description: Cumulative age of non-live data @@ -16019,7 +14381,6 @@ layers: unit: SECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: gossip.bytes.received exported_name: gossip_bytes_received description: Number of received gossip bytes @@ -16028,7 +14389,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: gossip.bytes.sent exported_name: gossip_bytes_sent description: Number of sent gossip bytes @@ -16037,7 +14397,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: gossip.callbacks.pending exported_name: gossip_callbacks_pending description: Number of gossip callbacks waiting to be processed @@ -16046,7 +14405,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: gossip.callbacks.pending_duration exported_name: gossip_callbacks_pending_duration description: Duration of gossip callback queueing to be processed @@ -16055,7 +14413,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: gossip.callbacks.processed exported_name: gossip_callbacks_processed description: Number of gossip callbacks processed @@ -16064,7 +14421,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: gossip.callbacks.processing_duration exported_name: gossip_callbacks_processing_duration description: Duration of gossip callback processing @@ -16073,7 +14429,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: gossip.connections.incoming exported_name: gossip_connections_incoming description: Number of active incoming gossip connections @@ -16082,7 +14437,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: gossip.connections.outgoing exported_name: gossip_connections_outgoing description: Number of active outgoing gossip connections @@ -16091,7 +14445,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: gossip.connections.refused exported_name: gossip_connections_refused description: Number of refused incoming gossip connections @@ -16100,7 +14453,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: gossip.infos.received exported_name: gossip_infos_received description: Number of received gossip Info objects @@ -16109,7 +14461,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: gossip.infos.sent exported_name: gossip_infos_sent description: Number of sent gossip Info objects @@ -16118,7 +14469,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: gossip.messages.received exported_name: gossip_messages_received description: Number of received gossip messages @@ -16127,7 +14477,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: gossip.messages.sent exported_name: gossip_messages_sent description: Number of sent gossip messages @@ -16136,7 +14485,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: intentage exported_name: intentage description: Cumulative age of locks @@ -16145,7 +14493,6 @@ layers: unit: SECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: intentbytes exported_name: intentbytes description: Number of bytes in intent KV pairs @@ -16154,7 +14501,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: intentcount exported_name: intentcount description: Count of intent keys @@ -16164,7 +14510,6 @@ layers: aggregation: AVG derivative: NONE visibility: SUPPORT - owner: cockroachdb/kv - name: intentresolver.async.throttled exported_name: intentresolver_async_throttled description: Number of intent resolution attempts not run asynchronously due to throttling @@ -16173,7 +14518,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: intentresolver.finalized_txns.failed exported_name: intentresolver_finalized_txns_failed description: Number of finalized transaction cleanup failures. Transaction cleanup refers to the process of resolving all of a transactions intents and then garbage collecting its transaction record. @@ -16182,7 +14526,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: intentresolver.intents.failed exported_name: intentresolver_intents_failed description: Number of intent resolution failures. The unit of measurement is a single intent, so if a batch of intent resolution requests fails, the metric will be incremented for each request in the batch. @@ -16191,7 +14534,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: intents.abort-attempts exported_name: intents_abort_attempts description: Count of (point or range) non-poisoning intent abort evaluation attempts @@ -16200,7 +14542,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: intents.poison-attempts exported_name: intents_poison_attempts description: Count of (point or range) poisoning intent abort evaluation attempts @@ -16209,7 +14550,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: intents.resolve-attempts exported_name: intents_resolve_attempts description: Count of (point or range) intent commit evaluation attempts @@ -16218,7 +14558,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: keybytes exported_name: keybytes description: Number of bytes taken up by keys @@ -16227,7 +14566,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: keycount exported_name: keycount description: Count of all keys @@ -16236,7 +14574,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: kv.allocator.load_based_lease_transfers.cannot_find_better_candidate exported_name: kv_allocator_load_based_lease_transfers_cannot_find_better_candidate description: The number times the allocator determined that the lease was on the best possible replica @@ -16245,7 +14582,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: kv.allocator.load_based_lease_transfers.delta_not_significant exported_name: kv_allocator_load_based_lease_transfers_delta_not_significant description: The number times the allocator determined that the delta between the existing store and the best candidate was not significant @@ -16254,7 +14590,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: kv.allocator.load_based_lease_transfers.existing_not_overfull exported_name: kv_allocator_load_based_lease_transfers_existing_not_overfull description: The number times the allocator determined that the lease was not on an overfull store @@ -16263,7 +14598,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: kv.allocator.load_based_lease_transfers.follow_the_workload exported_name: kv_allocator_load_based_lease_transfers_follow_the_workload description: The number times the allocator determined that the lease should be transferred to another replica for locality. @@ -16272,7 +14606,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: kv.allocator.load_based_lease_transfers.missing_stats_for_existing_stores exported_name: kv_allocator_load_based_lease_transfers_missing_stats_for_existing_stores description: The number times the allocator was missing qps stats for the leaseholder @@ -16281,7 +14614,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: kv.allocator.load_based_lease_transfers.should_transfer exported_name: kv_allocator_load_based_lease_transfers_should_transfer description: The number times the allocator determined that the lease should be transferred to another replica for better load distribution @@ -16290,7 +14622,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: kv.allocator.load_based_replica_rebalancing.cannot_find_better_candidate exported_name: kv_allocator_load_based_replica_rebalancing_cannot_find_better_candidate description: The number times the allocator determined that the range was on the best possible stores @@ -16299,7 +14630,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: kv.allocator.load_based_replica_rebalancing.delta_not_significant exported_name: kv_allocator_load_based_replica_rebalancing_delta_not_significant description: The number times the allocator determined that the delta between an existing store and the best replacement candidate was not high enough @@ -16308,7 +14638,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: kv.allocator.load_based_replica_rebalancing.existing_not_overfull exported_name: kv_allocator_load_based_replica_rebalancing_existing_not_overfull description: The number times the allocator determined that none of the range's replicas were on overfull stores @@ -16317,7 +14646,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: kv.allocator.load_based_replica_rebalancing.missing_stats_for_existing_store exported_name: kv_allocator_load_based_replica_rebalancing_missing_stats_for_existing_store description: The number times the allocator was missing the qps stats for the existing store @@ -16326,7 +14654,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: kv.allocator.load_based_replica_rebalancing.should_transfer exported_name: kv_allocator_load_based_replica_rebalancing_should_transfer description: The number times the allocator determined that the replica should be rebalanced to another store for better load distribution @@ -16335,7 +14662,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: kv.closed_timestamp.max_behind_nanos exported_name: kv_closed_timestamp_max_behind_nanos description: Largest latency between realtime and replica max closed timestamp @@ -16344,7 +14670,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: kv.closed_timestamp.policy.lag_by_cluster_setting exported_name: kv_closed_timestamp_policy_lag_by_cluster_setting description: Number of ranges with LAG_BY_CLUSTER_SETTING closed timestamp policy @@ -16353,7 +14678,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: kv.closed_timestamp.policy.lead_for_global_reads_latency_equal_or_greater_than_300ms exported_name: kv_closed_timestamp_policy_lead_for_global_reads_latency_equal_or_greater_than_300ms description: Number of ranges with LEAD_FOR_GLOBAL_READS_LATENCY_EQUAL_OR_GREATER_THAN_300MS closed timestamp policy @@ -16362,7 +14686,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: kv.closed_timestamp.policy.lead_for_global_reads_latency_less_than_100ms exported_name: kv_closed_timestamp_policy_lead_for_global_reads_latency_less_than_100ms description: Number of ranges with LEAD_FOR_GLOBAL_READS_LATENCY_LESS_THAN_100MS closed timestamp policy @@ -16371,7 +14694,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: kv.closed_timestamp.policy.lead_for_global_reads_latency_less_than_120ms exported_name: kv_closed_timestamp_policy_lead_for_global_reads_latency_less_than_120ms description: Number of ranges with LEAD_FOR_GLOBAL_READS_LATENCY_LESS_THAN_120MS closed timestamp policy @@ -16380,7 +14702,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: kv.closed_timestamp.policy.lead_for_global_reads_latency_less_than_140ms exported_name: kv_closed_timestamp_policy_lead_for_global_reads_latency_less_than_140ms description: Number of ranges with LEAD_FOR_GLOBAL_READS_LATENCY_LESS_THAN_140MS closed timestamp policy @@ -16389,7 +14710,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: kv.closed_timestamp.policy.lead_for_global_reads_latency_less_than_160ms exported_name: kv_closed_timestamp_policy_lead_for_global_reads_latency_less_than_160ms description: Number of ranges with LEAD_FOR_GLOBAL_READS_LATENCY_LESS_THAN_160MS closed timestamp policy @@ -16398,7 +14718,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: kv.closed_timestamp.policy.lead_for_global_reads_latency_less_than_180ms exported_name: kv_closed_timestamp_policy_lead_for_global_reads_latency_less_than_180ms description: Number of ranges with LEAD_FOR_GLOBAL_READS_LATENCY_LESS_THAN_180MS closed timestamp policy @@ -16407,7 +14726,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: kv.closed_timestamp.policy.lead_for_global_reads_latency_less_than_200ms exported_name: kv_closed_timestamp_policy_lead_for_global_reads_latency_less_than_200ms description: Number of ranges with LEAD_FOR_GLOBAL_READS_LATENCY_LESS_THAN_200MS closed timestamp policy @@ -16416,7 +14734,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: kv.closed_timestamp.policy.lead_for_global_reads_latency_less_than_20ms exported_name: kv_closed_timestamp_policy_lead_for_global_reads_latency_less_than_20ms description: Number of ranges with LEAD_FOR_GLOBAL_READS_LATENCY_LESS_THAN_20MS closed timestamp policy @@ -16425,7 +14742,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: kv.closed_timestamp.policy.lead_for_global_reads_latency_less_than_220ms exported_name: kv_closed_timestamp_policy_lead_for_global_reads_latency_less_than_220ms description: Number of ranges with LEAD_FOR_GLOBAL_READS_LATENCY_LESS_THAN_220MS closed timestamp policy @@ -16434,7 +14750,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: kv.closed_timestamp.policy.lead_for_global_reads_latency_less_than_240ms exported_name: kv_closed_timestamp_policy_lead_for_global_reads_latency_less_than_240ms description: Number of ranges with LEAD_FOR_GLOBAL_READS_LATENCY_LESS_THAN_240MS closed timestamp policy @@ -16443,7 +14758,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: kv.closed_timestamp.policy.lead_for_global_reads_latency_less_than_260ms exported_name: kv_closed_timestamp_policy_lead_for_global_reads_latency_less_than_260ms description: Number of ranges with LEAD_FOR_GLOBAL_READS_LATENCY_LESS_THAN_260MS closed timestamp policy @@ -16452,7 +14766,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: kv.closed_timestamp.policy.lead_for_global_reads_latency_less_than_280ms exported_name: kv_closed_timestamp_policy_lead_for_global_reads_latency_less_than_280ms description: Number of ranges with LEAD_FOR_GLOBAL_READS_LATENCY_LESS_THAN_280MS closed timestamp policy @@ -16461,7 +14774,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: kv.closed_timestamp.policy.lead_for_global_reads_latency_less_than_300ms exported_name: kv_closed_timestamp_policy_lead_for_global_reads_latency_less_than_300ms description: Number of ranges with LEAD_FOR_GLOBAL_READS_LATENCY_LESS_THAN_300MS closed timestamp policy @@ -16470,7 +14782,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: kv.closed_timestamp.policy.lead_for_global_reads_latency_less_than_40ms exported_name: kv_closed_timestamp_policy_lead_for_global_reads_latency_less_than_40ms description: Number of ranges with LEAD_FOR_GLOBAL_READS_LATENCY_LESS_THAN_40MS closed timestamp policy @@ -16479,7 +14790,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: kv.closed_timestamp.policy.lead_for_global_reads_latency_less_than_60ms exported_name: kv_closed_timestamp_policy_lead_for_global_reads_latency_less_than_60ms description: Number of ranges with LEAD_FOR_GLOBAL_READS_LATENCY_LESS_THAN_60MS closed timestamp policy @@ -16488,7 +14798,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: kv.closed_timestamp.policy.lead_for_global_reads_latency_less_than_80ms exported_name: kv_closed_timestamp_policy_lead_for_global_reads_latency_less_than_80ms description: Number of ranges with LEAD_FOR_GLOBAL_READS_LATENCY_LESS_THAN_80MS closed timestamp policy @@ -16497,7 +14806,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: kv.closed_timestamp.policy.lead_for_global_reads_with_no_latency_info exported_name: kv_closed_timestamp_policy_lead_for_global_reads_with_no_latency_info description: Number of ranges with LEAD_FOR_GLOBAL_READS_WITH_NO_LATENCY_INFO closed timestamp policy @@ -16506,7 +14814,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: kv.closed_timestamp.policy_change exported_name: kv_closed_timestamp_policy_change description: Number of times closed timestamp policy change occurred on ranges @@ -16515,7 +14822,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: kv.closed_timestamp.policy_latency_info_missing exported_name: kv_closed_timestamp_policy_latency_info_missing description: Number of times closed timestamp policy refresh had to use hardcoded network RTT due to missing node latency info for one or more replicas @@ -16524,7 +14830,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: kv.concurrency.avg_lock_hold_duration_nanos exported_name: kv_concurrency_avg_lock_hold_duration_nanos description: Average lock hold duration across locks currently held in lock tables. Does not include replicated locks (intents) that are not held in memory @@ -16533,7 +14838,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: kv.concurrency.avg_lock_wait_duration_nanos exported_name: kv_concurrency_avg_lock_wait_duration_nanos description: Average lock wait duration across requests currently waiting in lock wait-queues @@ -16542,7 +14846,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: kv.concurrency.latch_conflict_wait_durations exported_name: kv_concurrency_latch_conflict_wait_durations description: Durations in nanoseconds spent on latch acquisition waiting for conflicts with other latches @@ -16551,7 +14854,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: kv.concurrency.lock_wait_queue_waiters exported_name: kv_concurrency_lock_wait_queue_waiters description: Number of requests actively waiting in a lock wait-queue @@ -16560,7 +14862,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: kv.concurrency.locks exported_name: kv_concurrency_locks description: Number of active locks held in lock tables. Does not include replicated locks (intents) that are not held in memory @@ -16570,7 +14871,6 @@ layers: aggregation: AVG derivative: NONE visibility: SUPPORT - owner: cockroachdb/kv - name: kv.concurrency.locks_shed_due_to_memory_limit exported_name: kv_concurrency_locks_shed_due_to_memory_limit description: The number of locks that were shed because the lock table ran into memory limits @@ -16579,7 +14879,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: kv.concurrency.locks_with_wait_queues exported_name: kv_concurrency_locks_with_wait_queues description: Number of active locks held in lock tables with active wait-queues @@ -16588,7 +14887,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: kv.concurrency.max_lock_hold_duration_nanos exported_name: kv_concurrency_max_lock_hold_duration_nanos description: Maximum length of time any lock in a lock table is held. Does not include replicated locks (intents) that are not held in memory @@ -16597,7 +14895,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: kv.concurrency.max_lock_wait_duration_nanos exported_name: kv_concurrency_max_lock_wait_duration_nanos description: Maximum lock wait duration across requests currently waiting in lock wait-queues @@ -16606,7 +14903,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: kv.concurrency.max_lock_wait_queue_waiters_for_lock exported_name: kv_concurrency_max_lock_wait_queue_waiters_for_lock description: Maximum number of requests actively waiting in any single lock wait-queue @@ -16615,7 +14911,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: kv.concurrency.num_lock_shed_due_to_memory_limit_events exported_name: kv_concurrency_num_lock_shed_due_to_memory_limit_events description: The number of times locks that were shed by the lock table because it ran into memory limits @@ -16624,7 +14919,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: kv.concurrency.virtual_resolve.batch_errors exported_name: kv_concurrency_virtual_resolve_batch_errors description: Number of read batches where virtual intent resolution failed during evaluation @@ -16633,7 +14927,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: kv.concurrency.virtual_resolve.batches exported_name: kv_concurrency_virtual_resolve_batches description: Number of read batches that attempted virtual intent resolution @@ -16642,7 +14935,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: kv.concurrency.virtual_resolve.condense exported_name: kv_concurrency_virtual_resolve_condense description: Number of times point intent resolutions were condensed into range resolutions during virtual intent resolution @@ -16651,7 +14943,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: kv.concurrency.virtual_resolve.disabled exported_name: kv_concurrency_virtual_resolve_disabled description: Number of times virtual intent resolution was disabled for a request due to excessive range resolve accumulation @@ -16660,7 +14951,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: kv.concurrency.virtual_resolve.intent exported_name: kv_concurrency_virtual_resolve_intent description: Number of point intents resolved virtually during read evaluation @@ -16669,7 +14959,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: kv.concurrency.virtual_resolve.intent_range exported_name: kv_concurrency_virtual_resolve_intent_range description: Number of range intent resolutions resolved virtually during read evaluation @@ -16678,7 +14967,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: kv.loadsplitter.cleardirection exported_name: kv_loadsplitter_cleardirection description: Load-based splitter observed an access direction greater than 80% left or right in the samples. @@ -16687,7 +14975,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: kv.loadsplitter.nosplitkey exported_name: kv_loadsplitter_nosplitkey description: Load-based splitter could not find a split key. @@ -16696,7 +14983,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: kv.loadsplitter.popularkey exported_name: kv_loadsplitter_popularkey description: Load-based splitter could not find a split key and the most popular sampled split key occurs in >= 25% of the samples. @@ -16705,7 +14991,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: kv.prober.planning_attempts exported_name: kv_prober_planning_attempts description: Number of attempts at planning out probes made; in order to probe KV we need to plan out which ranges to probe; @@ -16714,7 +14999,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: kv.prober.planning_failures exported_name: kv_prober_planning_failures description: Number of attempts at planning out probes that failed; in order to probe KV we need to plan out which ranges to probe; if planning fails, then kvprober is not able to send probes to all ranges; consider alerting on this metric as a result @@ -16723,7 +15007,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: kv.prober.read.attempts exported_name: kv_prober_read_attempts description: Number of attempts made to read probe KV, regardless of outcome @@ -16732,7 +15015,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: kv.prober.read.failures exported_name: kv_prober_read_failures description: Number of attempts made to read probe KV that failed, whether due to error or timeout @@ -16741,7 +15023,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: kv.prober.read.latency exported_name: kv_prober_read_latency description: Latency of successful KV read probes @@ -16750,7 +15031,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: kv.prober.write.attempts exported_name: kv_prober_write_attempts description: Number of attempts made to write probe KV, regardless of outcome @@ -16759,7 +15039,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: kv.prober.write.failures exported_name: kv_prober_write_failures description: Number of attempts made to write probe KV that failed, whether due to error or timeout @@ -16768,7 +15047,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: kv.prober.write.latency exported_name: kv_prober_write_latency description: Latency of successful KV write probes @@ -16777,7 +15055,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: kv.prober.write.quarantine.oldest_duration exported_name: kv_prober_write_quarantine_oldest_duration description: The duration that the oldest range in the write quarantine pool has remained @@ -16786,7 +15063,6 @@ layers: unit: SECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: kv.rangefeed.budget_allocation_blocked exported_name: kv_rangefeed_budget_allocation_blocked description: Number of times RangeFeed waited for budget availability @@ -16795,7 +15071,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: kv.rangefeed.budget_allocation_failed exported_name: kv_rangefeed_budget_allocation_failed description: Number of times RangeFeed failed because memory budget was exceeded @@ -16804,7 +15079,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: kv.rangefeed.buffered_registrations exported_name: kv_rangefeed_buffered_registrations description: Number of active RangeFeed buffered registrations @@ -16813,7 +15087,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: kv.rangefeed.buffered_sender.queue_size exported_name: kv_rangefeed_buffered_sender_queue_size description: Number of entries in the buffered sender queue @@ -16822,7 +15095,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: kv.rangefeed.catchup_scan_nanos exported_name: kv_rangefeed_catchup_scan_nanos description: Time spent in RangeFeed catchup scan @@ -16832,7 +15104,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE visibility: SUPPORT - owner: cockroachdb/kv - name: kv.rangefeed.closed_timestamp.slow_ranges exported_name: kv_rangefeed_closed_timestamp_slow_ranges description: Number of ranges that have a closed timestamp lagging by more than 5x target lag. Periodically re-calculated @@ -16841,7 +15112,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: kv.rangefeed.closed_timestamp.slow_ranges.cancelled exported_name: kv_rangefeed_closed_timestamp_slow_ranges_cancelled description: Number of rangefeeds that were cancelled due to a chronically lagging closed timestamp @@ -16850,7 +15120,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: kv.rangefeed.closed_timestamp_max_behind_nanos exported_name: kv_rangefeed_closed_timestamp_max_behind_nanos description: Largest latency between realtime and replica max closed timestamp for replicas that have active rangeeds on them @@ -16859,7 +15128,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: kv.rangefeed.mem_shared exported_name: kv_rangefeed_mem_shared description: Memory usage by rangefeeds @@ -16868,7 +15136,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: kv.rangefeed.mem_system exported_name: kv_rangefeed_mem_system description: Memory usage by rangefeeds on system ranges @@ -16877,7 +15144,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: kv.rangefeed.mux_stream_send.slow_events exported_name: kv_rangefeed_mux_stream_send_slow_events description: Number of RangeFeed events that took longer than 10s to send to the client @@ -16886,7 +15152,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: kv.rangefeed.output_loop_unbuffered_registration_nanos exported_name: kv_rangefeed_output_loop_unbuffered_registration_nanos description: Duration of the Rangefeed O(range) output loop goroutine. This is only applicable for unbuffered registrations since buffered registrations spawns long-living goroutines. @@ -16895,7 +15160,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: kv.rangefeed.processors exported_name: kv_rangefeed_processors description: Number of active RangeFeed processors @@ -16904,7 +15168,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: kv.rangefeed.registrations exported_name: kv_rangefeed_registrations description: Number of active RangeFeed registrations @@ -16913,7 +15176,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: kv.rangefeed.scheduled_processor.queue_timeout exported_name: kv_rangefeed_scheduled_processor_queue_timeout description: Number of times the RangeFeed processor shutdown because of a queue send timeout @@ -16922,7 +15184,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: kv.rangefeed.scheduler.normal.latency exported_name: kv_rangefeed_scheduler_normal_latency description: KV RangeFeed normal scheduler latency @@ -16931,7 +15192,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: kv.rangefeed.scheduler.normal.queue_size exported_name: kv_rangefeed_scheduler_normal_queue_size description: Number of entries in the KV RangeFeed normal scheduler queue @@ -16940,7 +15200,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: kv.rangefeed.scheduler.system.latency exported_name: kv_rangefeed_scheduler_system_latency description: KV RangeFeed system scheduler latency @@ -16949,7 +15208,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: kv.rangefeed.scheduler.system.queue_size exported_name: kv_rangefeed_scheduler_system_queue_size description: Number of entries in the KV RangeFeed system scheduler queue @@ -16958,7 +15216,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: kv.rangefeed.unbuffered_registrations exported_name: kv_rangefeed_unbuffered_registrations description: Number of active RangeFeed unbuffered registrations @@ -16967,7 +15224,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: kv.replica_circuit_breaker.num_tripped_events exported_name: kv_replica_circuit_breaker_num_tripped_events description: Number of times the per-Replica circuit breakers tripped since process start. @@ -16976,7 +15232,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: kv.replica_circuit_breaker.num_tripped_replicas exported_name: kv_replica_circuit_breaker_num_tripped_replicas description: |- @@ -16988,7 +15243,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: kv.replica_read_batch_evaluate.dropped_latches_before_eval exported_name: kv_replica_read_batch_evaluate_dropped_latches_before_eval description: Number of times read-only batches dropped latches before evaluation. @@ -16997,7 +15251,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: kv.replica_read_batch_evaluate.latency exported_name: kv_replica_read_batch_evaluate_latency description: |- @@ -17009,7 +15262,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: kv.replica_read_batch_evaluate.without_interleaving_iter exported_name: kv_replica_read_batch_evaluate_without_interleaving_iter description: Number of read-only batches evaluated without an intent interleaving iter. @@ -17018,7 +15270,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: kv.replica_write_batch_evaluate.latency exported_name: kv_replica_write_batch_evaluate_latency description: |- @@ -17030,7 +15281,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: kv.split.estimated_stats exported_name: kv_split_estimated_stats description: Number of splits that computed estimated MVCC stats. @@ -17039,7 +15289,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: kv.split.total_bytes_estimates exported_name: kv_split_total_bytes_estimates description: Number of total bytes difference between the pre-split and post-split MVCC stats. @@ -17048,7 +15297,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: kv.tenant_rate_limit.current_blocked exported_name: kv_tenant_rate_limit_current_blocked description: Number of requests currently blocked by the rate limiter @@ -17057,7 +15305,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: kv.tenant_rate_limit.num_tenants exported_name: kv_tenant_rate_limit_num_tenants description: Number of tenants currently being tracked @@ -17066,7 +15313,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: kv.tenant_rate_limit.read_batches_admitted exported_name: kv_tenant_rate_limit_read_batches_admitted description: Number of read batches admitted by the rate limiter @@ -17075,7 +15321,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: kv.tenant_rate_limit.read_bytes_admitted exported_name: kv_tenant_rate_limit_read_bytes_admitted description: Number of read bytes admitted by the rate limiter @@ -17084,7 +15329,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: kv.tenant_rate_limit.read_requests_admitted exported_name: kv_tenant_rate_limit_read_requests_admitted description: Number of read requests admitted by the rate limiter @@ -17093,7 +15337,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: kv.tenant_rate_limit.write_batches_admitted exported_name: kv_tenant_rate_limit_write_batches_admitted description: Number of write batches admitted by the rate limiter @@ -17102,7 +15345,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: kv.tenant_rate_limit.write_bytes_admitted exported_name: kv_tenant_rate_limit_write_bytes_admitted description: Number of write bytes admitted by the rate limiter @@ -17111,7 +15353,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: kv.tenant_rate_limit.write_requests_admitted exported_name: kv_tenant_rate_limit_write_requests_admitted description: Number of write requests admitted by the rate limiter @@ -17120,7 +15361,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: kvflowcontrol.eval_wait.elastic.requests.admitted exported_name: kvflowcontrol_eval_wait_elastic_requests_admitted description: Number of elastic requests admitted by the flow controller @@ -17129,7 +15369,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: kvflowcontrol.eval_wait.elastic.requests.bypassed exported_name: kvflowcontrol_eval_wait_elastic_requests_bypassed description: Number of waiting elastic requests that bypassed the flow controller due the evaluating replica not being the leader @@ -17138,7 +15377,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: kvflowcontrol.eval_wait.elastic.requests.errored exported_name: kvflowcontrol_eval_wait_elastic_requests_errored description: Number of elastic requests that errored out while waiting for flow tokens @@ -17147,7 +15385,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: kvflowcontrol.eval_wait.elastic.requests.waiting exported_name: kvflowcontrol_eval_wait_elastic_requests_waiting description: Number of elastic requests waiting for flow tokens @@ -17156,7 +15393,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: kvflowcontrol.eval_wait.regular.requests.admitted exported_name: kvflowcontrol_eval_wait_regular_requests_admitted description: Number of regular requests admitted by the flow controller @@ -17165,7 +15401,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: kvflowcontrol.eval_wait.regular.requests.bypassed exported_name: kvflowcontrol_eval_wait_regular_requests_bypassed description: Number of waiting regular requests that bypassed the flow controller due the evaluating replica not being the leader @@ -17174,7 +15409,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: kvflowcontrol.eval_wait.regular.requests.errored exported_name: kvflowcontrol_eval_wait_regular_requests_errored description: Number of regular requests that errored out while waiting for flow tokens @@ -17183,7 +15417,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: kvflowcontrol.eval_wait.regular.requests.waiting exported_name: kvflowcontrol_eval_wait_regular_requests_waiting description: Number of regular requests waiting for flow tokens @@ -17192,7 +15425,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: kvflowcontrol.range_controller.count exported_name: kvflowcontrol_range_controller_count description: Gauge of range flow controllers currently open, this should align with the number of leaders @@ -17201,7 +15433,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: kvflowcontrol.send_queue.count exported_name: kvflowcontrol_send_queue_count description: Count of all raft entries queued for sending to followers, waiting on available elastic send tokens @@ -17210,7 +15441,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: kvflowcontrol.send_queue.prevent.count exported_name: kvflowcontrol_send_queue_prevent_count description: Counter of replication streams that were prevented from forming a send queue @@ -17219,7 +15449,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: kvflowcontrol.send_queue.scheduled.deducted_bytes exported_name: kvflowcontrol_send_queue_scheduled_deducted_bytes description: Gauge of elastic send token bytes already deducted by replication streams waiting on the scheduler @@ -17228,7 +15457,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: kvflowcontrol.send_queue.scheduled.force_flush exported_name: kvflowcontrol_send_queue_scheduled_force_flush description: Gauge of replication streams scheduled to force flush their send queue @@ -17237,7 +15465,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: kvflowcontrol.streams.eval.elastic.blocked_count exported_name: kvflowcontrol_streams_eval_elastic_blocked_count description: Number of eval replication streams with no flow tokens available for elastic requests @@ -17246,7 +15473,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: kvflowcontrol.streams.eval.elastic.total_count exported_name: kvflowcontrol_streams_eval_elastic_total_count description: Total number of eval replication streams for elastic requests @@ -17255,7 +15481,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: kvflowcontrol.streams.eval.regular.blocked_count exported_name: kvflowcontrol_streams_eval_regular_blocked_count description: Number of eval replication streams with no flow tokens available for regular requests @@ -17264,7 +15489,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: kvflowcontrol.streams.eval.regular.total_count exported_name: kvflowcontrol_streams_eval_regular_total_count description: Total number of eval replication streams for regular requests @@ -17273,7 +15497,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: kvflowcontrol.streams.send.elastic.blocked_count exported_name: kvflowcontrol_streams_send_elastic_blocked_count description: Number of send replication streams with no flow tokens available for elastic requests @@ -17282,7 +15505,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: kvflowcontrol.streams.send.elastic.total_count exported_name: kvflowcontrol_streams_send_elastic_total_count description: Total number of send replication streams for elastic requests @@ -17291,7 +15513,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: kvflowcontrol.streams.send.regular.blocked_count exported_name: kvflowcontrol_streams_send_regular_blocked_count description: Number of send replication streams with no flow tokens available for regular requests @@ -17300,7 +15521,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: kvflowcontrol.streams.send.regular.total_count exported_name: kvflowcontrol_streams_send_regular_total_count description: Total number of send replication streams for regular requests @@ -17309,7 +15529,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: kvflowcontrol.tokens.eval.elastic.available exported_name: kvflowcontrol_tokens_eval_elastic_available description: Flow eval tokens available for elastic requests, across all replication streams @@ -17318,7 +15537,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: kvflowcontrol.tokens.eval.elastic.deducted exported_name: kvflowcontrol_tokens_eval_elastic_deducted description: Flow eval tokens deducted by elastic requests, across all replication streams @@ -17327,7 +15545,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: kvflowcontrol.tokens.eval.elastic.returned exported_name: kvflowcontrol_tokens_eval_elastic_returned description: Flow eval tokens returned by elastic requests, across all replication streams @@ -17336,7 +15553,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: kvflowcontrol.tokens.eval.elastic.returned.disconnect exported_name: kvflowcontrol_tokens_eval_elastic_returned_disconnect description: Flow eval tokens returned early by elastic due disconnects, across all replication stream, this is a subset of returned tokens @@ -17345,7 +15561,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: kvflowcontrol.tokens.eval.elastic.unaccounted exported_name: kvflowcontrol_tokens_eval_elastic_unaccounted description: Flow eval tokens returned by elastic requests that were unaccounted for, across all replication streams @@ -17354,7 +15569,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: kvflowcontrol.tokens.eval.regular.available exported_name: kvflowcontrol_tokens_eval_regular_available description: Flow eval tokens available for regular requests, across all replication streams @@ -17363,7 +15577,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: kvflowcontrol.tokens.eval.regular.deducted exported_name: kvflowcontrol_tokens_eval_regular_deducted description: Flow eval tokens deducted by regular requests, across all replication streams @@ -17372,7 +15585,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: kvflowcontrol.tokens.eval.regular.returned exported_name: kvflowcontrol_tokens_eval_regular_returned description: Flow eval tokens returned by regular requests, across all replication streams @@ -17381,7 +15593,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: kvflowcontrol.tokens.eval.regular.returned.disconnect exported_name: kvflowcontrol_tokens_eval_regular_returned_disconnect description: Flow eval tokens returned early by regular due disconnects, across all replication stream, this is a subset of returned tokens @@ -17390,7 +15601,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: kvflowcontrol.tokens.eval.regular.unaccounted exported_name: kvflowcontrol_tokens_eval_regular_unaccounted description: Flow eval tokens returned by regular requests that were unaccounted for, across all replication streams @@ -17399,7 +15609,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: kvflowcontrol.tokens.send.elastic.available exported_name: kvflowcontrol_tokens_send_elastic_available description: Flow send tokens available for elastic requests, across all replication streams @@ -17408,7 +15617,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: kvflowcontrol.tokens.send.elastic.deducted exported_name: kvflowcontrol_tokens_send_elastic_deducted description: Flow send tokens deducted by elastic requests, across all replication streams @@ -17417,7 +15625,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: kvflowcontrol.tokens.send.elastic.deducted.force_flush_send_queue exported_name: kvflowcontrol_tokens_send_elastic_deducted_force_flush_send_queue description: Flow send tokens deducted by elastic requests, across all replication streams due to force flushing the stream's send queue @@ -17426,7 +15633,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: kvflowcontrol.tokens.send.elastic.deducted.prevent_send_queue exported_name: kvflowcontrol_tokens_send_elastic_deducted_prevent_send_queue description: Flow send tokens deducted by elastic requests, across all replication streams to prevent forming a send queue @@ -17435,7 +15641,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: kvflowcontrol.tokens.send.elastic.returned exported_name: kvflowcontrol_tokens_send_elastic_returned description: Flow send tokens returned by elastic requests, across all replication streams @@ -17444,7 +15649,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: kvflowcontrol.tokens.send.elastic.returned.disconnect exported_name: kvflowcontrol_tokens_send_elastic_returned_disconnect description: Flow send tokens returned early by elastic due disconnects, across all replication stream, this is a subset of returned tokens @@ -17453,7 +15657,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: kvflowcontrol.tokens.send.elastic.unaccounted exported_name: kvflowcontrol_tokens_send_elastic_unaccounted description: Flow send tokens returned by elastic requests that were unaccounted for, across all replication streams @@ -17462,7 +15665,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: kvflowcontrol.tokens.send.regular.available exported_name: kvflowcontrol_tokens_send_regular_available description: Flow send tokens available for regular requests, across all replication streams @@ -17471,7 +15673,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/admission-control - name: kvflowcontrol.tokens.send.regular.deducted exported_name: kvflowcontrol_tokens_send_regular_deducted description: Flow send tokens deducted by regular requests, across all replication streams @@ -17480,7 +15681,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: kvflowcontrol.tokens.send.regular.deducted.prevent_send_queue exported_name: kvflowcontrol_tokens_send_regular_deducted_prevent_send_queue description: Flow send tokens deducted by regular requests, across all replication streams to prevent forming a send queue @@ -17489,7 +15689,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: kvflowcontrol.tokens.send.regular.returned exported_name: kvflowcontrol_tokens_send_regular_returned description: Flow send tokens returned by regular requests, across all replication streams @@ -17498,7 +15697,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: kvflowcontrol.tokens.send.regular.returned.disconnect exported_name: kvflowcontrol_tokens_send_regular_returned_disconnect description: Flow send tokens returned early by regular due disconnects, across all replication stream, this is a subset of returned tokens @@ -17507,7 +15705,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: kvflowcontrol.tokens.send.regular.unaccounted exported_name: kvflowcontrol_tokens_send_regular_unaccounted description: Flow send tokens returned by regular requests that were unaccounted for, across all replication streams @@ -17516,7 +15713,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/admission-control - name: leases.epoch exported_name: leases_epoch description: Number of replica leaseholders using epoch-based leases @@ -17526,7 +15722,6 @@ layers: aggregation: AVG derivative: NONE visibility: SUPPORT - owner: cockroachdb/kv - name: leases.error exported_name: leases_error description: Number of failed lease requests @@ -17535,7 +15730,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: leases.expiration exported_name: leases_expiration description: Number of replica leaseholders using expiration-based leases @@ -17545,7 +15739,6 @@ layers: aggregation: AVG derivative: NONE visibility: SUPPORT - owner: cockroachdb/kv - name: leases.leader exported_name: leases_leader description: Number of replica leaseholders using leader leases @@ -17555,7 +15748,6 @@ layers: aggregation: AVG derivative: NONE visibility: SUPPORT - owner: cockroachdb/kv - name: leases.liveness exported_name: leases_liveness description: Number of replica leaseholders for the liveness range(s) @@ -17565,7 +15757,6 @@ layers: aggregation: AVG derivative: NONE visibility: SUPPORT - owner: cockroachdb/kv - name: leases.preferences.less-preferred exported_name: leases_preferences_less_preferred description: Number of replica leaseholders which satisfy a lease preference which is not the most preferred @@ -17574,7 +15765,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: leases.preferences.violating exported_name: leases_preferences_violating description: Number of replica leaseholders which violate lease preferences @@ -17583,7 +15773,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: leases.read_summary.local_compression exported_name: leases_read_summary_local_compression description: Number of times the local segment of a read summary lost precision due to compression @@ -17592,7 +15781,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: leases.requests.latency exported_name: leases_requests_latency description: Lease request latency (all types and outcomes, coalesced) @@ -17601,7 +15789,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: leases.success exported_name: leases_success description: Number of successful lease requests @@ -17610,7 +15797,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: leases.transfers.error exported_name: leases_transfers_error description: Number of failed lease transfers @@ -17620,7 +15806,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE visibility: SUPPORT - owner: cockroachdb/kv - name: leases.transfers.locks_written exported_name: leases_transfers_locks_written description: Number of locks written to storage during lease transfers @@ -17629,7 +15814,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: livebytes exported_name: livebytes description: Number of bytes of live data (keys plus values) @@ -17639,7 +15823,6 @@ layers: aggregation: AVG derivative: NONE visibility: SUPPORT - owner: cockroachdb/kv - name: livecount exported_name: livecount description: Count of live keys @@ -17648,7 +15831,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: liveness.epochincrements exported_name: liveness_epochincrements description: Number of times this node has incremented its liveness epoch @@ -17657,7 +15839,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: liveness.heartbeatfailures exported_name: liveness_heartbeatfailures description: Number of failed node liveness heartbeats from this node @@ -17667,7 +15848,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE visibility: SUPPORT - owner: cockroachdb/kv - name: liveness.heartbeatsinflight exported_name: liveness_heartbeatsinflight description: Number of in-flight liveness heartbeats from this node @@ -17676,7 +15856,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: liveness.heartbeatsuccesses exported_name: liveness_heartbeatsuccesses description: Number of successful node liveness heartbeats from this node @@ -17685,7 +15864,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: lockbytes exported_name: lockbytes description: Number of bytes taken up by replicated lock key-values (shared and exclusive strength, not intent strength) @@ -17694,7 +15872,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: lockcount exported_name: lockcount description: Count of replicated locks (shared, exclusive, and intent strength) @@ -17703,7 +15880,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: mma.change.external.lease.failure exported_name: mma_change_external_lease_failure labeled_name: 'mma.change{origin: external, type: lease, result: failure}' @@ -17713,7 +15889,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: mma.change.external.lease.success exported_name: mma_change_external_lease_success labeled_name: 'mma.change{origin: external, type: lease, result: success}' @@ -17723,7 +15898,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: mma.change.external.replica.failure exported_name: mma_change_external_replica_failure labeled_name: 'mma.change{origin: external, type: replica, result: failure}' @@ -17733,7 +15907,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: mma.change.external.replica.success exported_name: mma_change_external_replica_success labeled_name: 'mma.change{origin: external, type: replica, result: success}' @@ -17743,7 +15916,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: mma.change.rebalance.lease.failure exported_name: mma_change_rebalance_lease_failure labeled_name: 'mma.change{origin: rebalance, type: lease, result: failure}' @@ -17753,7 +15925,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: mma.change.rebalance.lease.success exported_name: mma_change_rebalance_lease_success labeled_name: 'mma.change{origin: rebalance, type: lease, result: success}' @@ -17763,7 +15934,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: mma.change.rebalance.replica.failure exported_name: mma_change_rebalance_replica_failure labeled_name: 'mma.change{origin: rebalance, type: replica, result: failure}' @@ -17773,7 +15943,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: mma.change.rebalance.replica.success exported_name: mma_change_rebalance_replica_success labeled_name: 'mma.change{origin: rebalance, type: replica, result: success}' @@ -17783,7 +15952,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: mma.dropped exported_name: mma_dropped description: Number of pending replica or lease changes that MMA dropped because its internal state became inconsistent with the actual cluster state. This can happen when an external change (e.g., from another component or admin command) modifies the cluster in a way that invalidates MMA's pending changes. A non-zero value is expected during normal operation when external changes occur. @@ -17792,7 +15960,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: mma.external.registration.failure exported_name: mma_external_registration_failure description: Number of external operations (replica/lease changes not initiated by MMA) that failed to register with MMA due to pre-check failures (e.g., the change conflicts with MMA's current state). The external operation may still proceed, but MMA will not track it until the next state synchronization. @@ -17801,7 +15968,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: mma.external.registration.success exported_name: mma_external_registration_success description: Number of external operations (replica/lease changes not initiated by MMA, e.g., from admin commands or other allocators) that were successfully registered with MMA. Registration allows MMA to track the change and keep its internal state synchronized with the cluster. @@ -17810,7 +15976,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: mma.overloaded_store.lease_grace.failure exported_name: mma_overloaded_store_lease_grace_failure labeled_name: 'mma.overloaded_store{type: lease_grace, result: failure}' @@ -17820,7 +15985,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: mma.overloaded_store.lease_grace.success exported_name: mma_overloaded_store_lease_grace_success labeled_name: 'mma.overloaded_store{type: lease_grace, result: success}' @@ -17830,7 +15994,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: mma.overloaded_store.long_dur.failure exported_name: mma_overloaded_store_long_dur_failure labeled_name: 'mma.overloaded_store{type: long_dur, result: failure}' @@ -17840,7 +16003,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: mma.overloaded_store.long_dur.success exported_name: mma_overloaded_store_long_dur_success labeled_name: 'mma.overloaded_store{type: long_dur, result: success}' @@ -17850,7 +16012,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: mma.overloaded_store.medium_dur.failure exported_name: mma_overloaded_store_medium_dur_failure labeled_name: 'mma.overloaded_store{type: medium_dur, result: failure}' @@ -17860,7 +16021,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: mma.overloaded_store.medium_dur.success exported_name: mma_overloaded_store_medium_dur_success labeled_name: 'mma.overloaded_store{type: medium_dur, result: success}' @@ -17870,7 +16030,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: mma.overloaded_store.short_dur.failure exported_name: mma_overloaded_store_short_dur_failure labeled_name: 'mma.overloaded_store{type: short_dur, result: failure}' @@ -17880,7 +16039,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: mma.overloaded_store.short_dur.success exported_name: mma_overloaded_store_short_dur_success labeled_name: 'mma.overloaded_store{type: short_dur, result: success}' @@ -17890,7 +16048,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: mma.span_config.normalization.error exported_name: mma_span_config_normalization_error labeled_name: 'mma.span_config.normalization{result: error}' @@ -17900,7 +16057,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: mma.span_config.normalization.soft_error exported_name: mma_span_config_normalization_soft_error labeled_name: 'mma.span_config.normalization{result: soft_error}' @@ -17910,7 +16066,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: mma.store.cpu.capacity exported_name: mma_store_cpu_capacity description: Logical CPU capacity estimated by MMA by extrapolating from the current load and system CPU utilization after accounting for CPU load that MMA cannot account for that scales with KV work (RPC, DistSender, etc.) and load that doesn't (SQL). @@ -17919,7 +16074,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: mma.store.cpu.load exported_name: mma_store_cpu_load description: CPU load that is attributed to the replicas on this store. This includes reads (for leaseholder) and raft. Since CPU is shared across stores on a node, we approximate this by measuring the CPU usage on the node and then dividing this equally among all stores on the node. @@ -17928,7 +16082,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: mma.store.cpu.utilization exported_name: mma_store_cpu_utilization description: Ratio of logical CPU load to capacity expressed as a percentage @@ -17937,7 +16090,6 @@ layers: unit: PERCENT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: mma.store.disk.capacity exported_name: mma_store_disk_capacity description: Logical disk capacity estimated by MMA by extrapolating from the logical bytes consumed by the replicas and the current used and free physical disk bytes. @@ -17946,7 +16098,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: mma.store.disk.logical exported_name: mma_store_disk_logical description: Logical bytes consumed by the replicas on this store as reported by MVCC statistics without accounting for any space amplification or compression. @@ -17955,7 +16106,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: mma.store.disk.utilization exported_name: mma_store_disk_utilization description: Ratio of logical disk usage to capacity expressed as a percentage @@ -17964,7 +16114,6 @@ layers: unit: PERCENT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: mma.store.write.bandwidth exported_name: mma_store_write_bandwidth description: Disk write bandwidth as observed by MMA corresponding to the store @@ -17973,7 +16122,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: node-id exported_name: node_id description: node ID with labels for advertised RPC and HTTP addresses @@ -17982,7 +16130,6 @@ layers: unit: CONST aggregation: AVG derivative: NONE - owner: cockroachdb/obs-prs - name: obs.metric_export.child.count exported_name: obs_metric_export_child_count description: 'Exported-line-weighted child count per parent metric: histogram children count their expanded Prometheus lines, others count 1' @@ -17991,7 +16138,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/obs-prs - name: obs.metric_export.codeowner.metric_count exported_name: obs_metric_export_codeowner_metric_count description: Metric count per CODEOWNER team in the Prometheus scrape (histograms expand to buckets plus count and sum) @@ -18000,7 +16146,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/obs-prs - name: obs.metric_export.line.count exported_name: obs_metric_export_line_count description: Total individual time series (all label combinations) in the most recent Prometheus scrape @@ -18009,7 +16154,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/obs-prs - name: obs.metric_export.name.count exported_name: obs_metric_export_name_count description: Number of metric families (unique metric names) in the most recent Prometheus scrape @@ -18018,7 +16162,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/obs-prs - name: queue.consistency.pending exported_name: queue_consistency_pending description: Number of pending replicas in the consistency checker queue @@ -18027,7 +16170,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: queue.consistency.process.failure exported_name: queue_consistency_process_failure description: Number of replicas which failed processing in the consistency checker queue @@ -18036,7 +16178,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.consistency.process.success exported_name: queue_consistency_process_success description: Number of replicas successfully processed by the consistency checker queue @@ -18045,7 +16186,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.consistency.processingnanos exported_name: queue_consistency_processingnanos description: Nanoseconds spent processing replicas in the consistency checker queue @@ -18054,7 +16194,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.gc.info.abortspanconsidered exported_name: queue_gc_info_abortspanconsidered description: Number of AbortSpan entries old enough to be considered for removal @@ -18063,7 +16202,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.gc.info.abortspangcnum exported_name: queue_gc_info_abortspangcnum description: Number of AbortSpan entries fit for removal @@ -18072,7 +16210,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.gc.info.abortspanscanned exported_name: queue_gc_info_abortspanscanned description: Number of transactions present in the AbortSpan scanned from the engine @@ -18081,7 +16218,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.gc.info.clearrangefailed exported_name: queue_gc_info_clearrangefailed description: Number of failed ClearRange operations during GC @@ -18090,7 +16226,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.gc.info.clearrangesuccess exported_name: queue_gc_info_clearrangesuccess description: Number of successful ClearRange operations during GC @@ -18099,7 +16234,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.gc.info.enqueuehighpriority exported_name: queue_gc_info_enqueuehighpriority description: Number of replicas enqueued for GC with high priority @@ -18108,7 +16242,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.gc.info.intentsconsidered exported_name: queue_gc_info_intentsconsidered description: Number of 'old' intents @@ -18117,7 +16250,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.gc.info.intenttxns exported_name: queue_gc_info_intenttxns description: Number of associated distinct transactions @@ -18126,7 +16258,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.gc.info.numkeysaffected exported_name: queue_gc_info_numkeysaffected description: Number of keys with GC'able data @@ -18135,7 +16266,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.gc.info.numrangekeysaffected exported_name: queue_gc_info_numrangekeysaffected description: Number of range keys GC'able @@ -18144,7 +16274,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.gc.info.pushtxn exported_name: queue_gc_info_pushtxn description: Number of attempted pushes @@ -18153,7 +16282,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.gc.info.resolvefailed exported_name: queue_gc_info_resolvefailed description: Number of cleanup intent failures during GC @@ -18162,7 +16290,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.gc.info.resolvesuccess exported_name: queue_gc_info_resolvesuccess description: Number of successful intent resolutions @@ -18171,7 +16298,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.gc.info.resolvetotal exported_name: queue_gc_info_resolvetotal description: Number of attempted intent resolutions @@ -18180,7 +16306,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.gc.info.transactionresolvefailed exported_name: queue_gc_info_transactionresolvefailed description: Number of intent cleanup failures for local transactions during GC @@ -18189,7 +16314,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.gc.info.transactionspangcaborted exported_name: queue_gc_info_transactionspangcaborted description: Number of GC'able entries corresponding to aborted txns @@ -18198,7 +16322,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.gc.info.transactionspangccommitted exported_name: queue_gc_info_transactionspangccommitted description: Number of GC'able entries corresponding to committed txns @@ -18207,7 +16330,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.gc.info.transactionspangcpending exported_name: queue_gc_info_transactionspangcpending description: Number of GC'able entries corresponding to pending txns @@ -18216,7 +16338,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.gc.info.transactionspangcprepared exported_name: queue_gc_info_transactionspangcprepared description: Number of GC'able entries corresponding to prepared txns @@ -18225,7 +16346,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.gc.info.transactionspangcstaging exported_name: queue_gc_info_transactionspangcstaging description: Number of GC'able entries corresponding to staging txns @@ -18234,7 +16354,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.gc.info.transactionspanscanned exported_name: queue_gc_info_transactionspanscanned description: Number of entries in transaction spans scanned from the engine @@ -18243,7 +16362,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.gc.pending exported_name: queue_gc_pending description: Number of pending replicas in the MVCC GC queue @@ -18253,7 +16371,6 @@ layers: aggregation: AVG derivative: NONE visibility: SUPPORT - owner: cockroachdb/kv - name: queue.gc.process.failure exported_name: queue_gc_process_failure description: Number of replicas which failed processing in the MVCC GC queue @@ -18263,7 +16380,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE visibility: SUPPORT - owner: cockroachdb/kv - name: queue.gc.process.success exported_name: queue_gc_process_success description: Number of replicas successfully processed by the MVCC GC queue @@ -18272,7 +16388,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.gc.processingnanos exported_name: queue_gc_processingnanos description: Nanoseconds spent processing replicas in the MVCC GC queue @@ -18281,7 +16396,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.lease.pending exported_name: queue_lease_pending description: Number of pending replicas in the replica lease queue @@ -18291,7 +16405,6 @@ layers: aggregation: AVG derivative: NONE visibility: SUPPORT - owner: cockroachdb/kv - name: queue.lease.process.failure exported_name: queue_lease_process_failure description: Number of replicas which failed processing in the replica lease queue @@ -18300,7 +16413,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.lease.process.success exported_name: queue_lease_process_success description: Number of replicas successfully processed by the replica lease queue @@ -18309,7 +16421,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.lease.processingnanos exported_name: queue_lease_processingnanos description: Nanoseconds spent processing replicas in the replica lease queue @@ -18318,7 +16429,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.lease.purgatory exported_name: queue_lease_purgatory description: Number of replicas in the lease queue's purgatory, awaiting lease transfer operations @@ -18327,7 +16437,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: queue.merge.pending exported_name: queue_merge_pending description: Number of pending replicas in the merge queue @@ -18337,7 +16446,6 @@ layers: aggregation: AVG derivative: NONE visibility: SUPPORT - owner: cockroachdb/kv - name: queue.merge.process.failure exported_name: queue_merge_process_failure description: Number of replicas which failed processing in the merge queue @@ -18347,7 +16455,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE visibility: SUPPORT - owner: cockroachdb/kv - name: queue.merge.process.success exported_name: queue_merge_process_success description: Number of replicas successfully processed by the merge queue @@ -18357,7 +16464,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE visibility: SUPPORT - owner: cockroachdb/kv - name: queue.merge.processingnanos exported_name: queue_merge_processingnanos description: Nanoseconds spent processing replicas in the merge queue @@ -18367,7 +16473,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE visibility: SUPPORT - owner: cockroachdb/kv - name: queue.merge.purgatory exported_name: queue_merge_purgatory description: Number of replicas in the merge queue's purgatory, waiting to become mergeable @@ -18376,7 +16481,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: queue.raftlog.pending exported_name: queue_raftlog_pending description: Number of pending replicas in the Raft log queue @@ -18386,7 +16490,6 @@ layers: aggregation: AVG derivative: NONE visibility: SUPPORT - owner: cockroachdb/kv - name: queue.raftlog.process.failure exported_name: queue_raftlog_process_failure description: Number of replicas which failed processing in the Raft log queue @@ -18396,7 +16499,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE visibility: SUPPORT - owner: cockroachdb/kv - name: queue.raftlog.process.success exported_name: queue_raftlog_process_success description: Number of replicas successfully processed by the Raft log queue @@ -18406,7 +16508,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE visibility: SUPPORT - owner: cockroachdb/kv - name: queue.raftlog.processingnanos exported_name: queue_raftlog_processingnanos description: Nanoseconds spent processing replicas in the Raft log queue @@ -18416,7 +16517,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE visibility: SUPPORT - owner: cockroachdb/kv - name: queue.raftsnapshot.pending exported_name: queue_raftsnapshot_pending description: Number of pending replicas in the Raft repair queue @@ -18425,7 +16525,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: queue.raftsnapshot.process.failure exported_name: queue_raftsnapshot_process_failure description: Number of replicas which failed processing in the Raft repair queue @@ -18434,7 +16533,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.raftsnapshot.process.success exported_name: queue_raftsnapshot_process_success description: Number of replicas successfully processed by the Raft repair queue @@ -18443,7 +16541,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.raftsnapshot.processingnanos exported_name: queue_raftsnapshot_processingnanos description: Nanoseconds spent processing replicas in the Raft repair queue @@ -18452,7 +16549,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.replicagc.pending exported_name: queue_replicagc_pending description: Number of pending replicas in the replica GC queue @@ -18462,7 +16558,6 @@ layers: aggregation: AVG derivative: NONE visibility: SUPPORT - owner: cockroachdb/kv - name: queue.replicagc.process.failure exported_name: queue_replicagc_process_failure description: Number of replicas which failed processing in the replica GC queue @@ -18472,7 +16567,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE visibility: SUPPORT - owner: cockroachdb/kv - name: queue.replicagc.process.success exported_name: queue_replicagc_process_success description: Number of replicas successfully processed by the replica GC queue @@ -18482,7 +16576,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE visibility: SUPPORT - owner: cockroachdb/kv - name: queue.replicagc.processingnanos exported_name: queue_replicagc_processingnanos description: Nanoseconds spent processing replicas in the replica GC queue @@ -18491,7 +16584,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.replicagc.purgatory exported_name: queue_replicagc_purgatory description: Number of replicas in the replica GC queue's purgatory, waiting for a prerequisite (e.g. left neighbor GC) before they can be GC'd @@ -18501,7 +16593,6 @@ layers: aggregation: AVG derivative: NONE visibility: SUPPORT - owner: cockroachdb/kv - name: queue.replicagc.removereplica exported_name: queue_replicagc_removereplica description: Number of replica removals attempted by the replica GC queue @@ -18510,7 +16601,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.replicate.addnonvoterreplica exported_name: queue_replicate_addnonvoterreplica description: Number of non-voter replica additions attempted by the replicate queue @@ -18519,7 +16609,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.replicate.addreplica exported_name: queue_replicate_addreplica description: Number of replica additions attempted by the replicate queue @@ -18528,7 +16617,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.replicate.addreplica.error exported_name: queue_replicate_addreplica_error description: Number of failed replica additions processed by the replicate queue @@ -18537,7 +16625,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.replicate.addreplica.success exported_name: queue_replicate_addreplica_success description: Number of successful replica additions processed by the replicate queue @@ -18546,7 +16633,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.replicate.addvoterreplica exported_name: queue_replicate_addvoterreplica description: Number of voter replica additions attempted by the replicate queue @@ -18555,7 +16641,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.replicate.enqueue.add exported_name: queue_replicate_enqueue_add description: Number of replicas successfully added to the replicate queue @@ -18564,7 +16649,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.replicate.enqueue.failedprecondition exported_name: queue_replicate_enqueue_failedprecondition description: Number of replicas that failed the precondition checks and were therefore not added to the replicate queue @@ -18573,7 +16657,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.replicate.enqueue.noaction exported_name: queue_replicate_enqueue_noaction description: Number of replicas for which ShouldQueue determined no action was needed and were therefore not added to the replicate queue @@ -18582,7 +16665,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.replicate.enqueue.unexpectederror exported_name: queue_replicate_enqueue_unexpectederror description: Number of replicas that were expected to be enqueued (ShouldQueue returned true or the caller decided to add to the replicate queue directly), but failed to be enqueued due to unexpected errors @@ -18591,7 +16673,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.replicate.nonvoterpromotions exported_name: queue_replicate_nonvoterpromotions description: Number of non-voters promoted to voters by the replicate queue @@ -18600,7 +16681,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.replicate.pending exported_name: queue_replicate_pending description: Number of pending replicas in the replicate queue @@ -18610,7 +16690,6 @@ layers: aggregation: AVG derivative: NONE visibility: SUPPORT - owner: cockroachdb/kv - name: queue.replicate.priority_inversion.requeue exported_name: queue_replicate_priority_inversion_requeue description: Number of priority inversions in the replicate queue that resulted in requeuing of the replicas. A priority inversion occurs when the priority at processing time ends up being lower than at enqueue time. When the priority has changed from a high priority repair action to rebalance, the change is requeued to avoid unfairness. @@ -18619,7 +16698,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.replicate.priority_inversion.total exported_name: queue_replicate_priority_inversion_total description: Total number of priority inversions in the replicate queue. A priority inversion occurs when the priority at processing time ends up being lower than at enqueue time @@ -18628,7 +16706,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.replicate.process.failure exported_name: queue_replicate_process_failure description: Number of replicas which failed processing in the replicate queue @@ -18638,7 +16715,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE visibility: SUPPORT - owner: cockroachdb/kv - name: queue.replicate.process.success exported_name: queue_replicate_process_success description: Number of replicas successfully processed by the replicate queue @@ -18648,7 +16724,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE visibility: SUPPORT - owner: cockroachdb/kv - name: queue.replicate.processingnanos exported_name: queue_replicate_processingnanos description: Nanoseconds spent processing replicas in the replicate queue @@ -18657,7 +16732,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.replicate.purgatory exported_name: queue_replicate_purgatory description: Number of replicas in the replicate queue's purgatory, awaiting allocation options @@ -18666,7 +16740,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: queue.replicate.queue_full exported_name: queue_replicate_queue_full description: Number of times a replica was dropped from the queue due to queue fullness @@ -18675,7 +16748,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.replicate.rebalancenonvoterreplica exported_name: queue_replicate_rebalancenonvoterreplica description: Number of non-voter replica rebalancer-initiated additions attempted by the replicate queue @@ -18684,7 +16756,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.replicate.rebalancereplica exported_name: queue_replicate_rebalancereplica description: Number of replica rebalancer-initiated additions attempted by the replicate queue @@ -18693,7 +16764,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.replicate.rebalancevoterreplica exported_name: queue_replicate_rebalancevoterreplica description: Number of voter replica rebalancer-initiated additions attempted by the replicate queue @@ -18702,7 +16772,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.replicate.removedeadnonvoterreplica exported_name: queue_replicate_removedeadnonvoterreplica description: Number of dead non-voter replica removals attempted by the replicate queue (typically in response to a node outage) @@ -18711,7 +16780,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.replicate.removedeadreplica exported_name: queue_replicate_removedeadreplica description: Number of dead replica removals attempted by the replicate queue (typically in response to a node outage) @@ -18720,7 +16788,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.replicate.removedeadreplica.error exported_name: queue_replicate_removedeadreplica_error description: Number of failed dead replica removals processed by the replicate queue @@ -18729,7 +16796,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.replicate.removedeadreplica.success exported_name: queue_replicate_removedeadreplica_success description: Number of successful dead replica removals processed by the replicate queue @@ -18738,7 +16804,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.replicate.removedeadvoterreplica exported_name: queue_replicate_removedeadvoterreplica description: Number of dead voter replica removals attempted by the replicate queue (typically in response to a node outage) @@ -18747,7 +16812,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.replicate.removedecommissioningnonvoterreplica exported_name: queue_replicate_removedecommissioningnonvoterreplica description: Number of decommissioning non-voter replica removals attempted by the replicate queue (typically in response to a node outage) @@ -18756,7 +16820,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.replicate.removedecommissioningreplica exported_name: queue_replicate_removedecommissioningreplica description: Number of decommissioning replica removals attempted by the replicate queue (typically in response to a node outage) @@ -18765,7 +16828,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.replicate.removedecommissioningreplica.error exported_name: queue_replicate_removedecommissioningreplica_error description: Number of failed decommissioning replica removals processed by the replicate queue @@ -18774,7 +16836,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.replicate.removedecommissioningreplica.success exported_name: queue_replicate_removedecommissioningreplica_success description: Number of successful decommissioning replica removals processed by the replicate queue @@ -18783,7 +16844,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.replicate.removedecommissioningvoterreplica exported_name: queue_replicate_removedecommissioningvoterreplica description: Number of decommissioning voter replica removals attempted by the replicate queue (typically in response to a node outage) @@ -18792,7 +16852,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.replicate.removelearnerreplica exported_name: queue_replicate_removelearnerreplica description: Number of learner replica removals attempted by the replicate queue (typically due to internal race conditions) @@ -18801,7 +16860,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.replicate.removenonvoterreplica exported_name: queue_replicate_removenonvoterreplica description: Number of non-voter replica removals attempted by the replicate queue (typically in response to a rebalancer-initiated addition) @@ -18810,7 +16868,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.replicate.removereplica exported_name: queue_replicate_removereplica description: Number of replica removals attempted by the replicate queue (typically in response to a rebalancer-initiated addition) @@ -18819,7 +16876,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.replicate.removereplica.error exported_name: queue_replicate_removereplica_error description: Number of failed replica removals processed by the replicate queue @@ -18828,7 +16884,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.replicate.removereplica.success exported_name: queue_replicate_removereplica_success description: Number of successful replica removals processed by the replicate queue @@ -18837,7 +16892,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.replicate.removevoterreplica exported_name: queue_replicate_removevoterreplica description: Number of voter replica removals attempted by the replicate queue (typically in response to a rebalancer-initiated addition) @@ -18846,7 +16900,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.replicate.replacedeadreplica.error exported_name: queue_replicate_replacedeadreplica_error description: Number of failed dead replica replacements processed by the replicate queue @@ -18855,7 +16908,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.replicate.replacedeadreplica.success exported_name: queue_replicate_replacedeadreplica_success description: Number of successful dead replica replacements processed by the replicate queue @@ -18864,7 +16916,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.replicate.replacedecommissioningreplica.success exported_name: queue_replicate_replacedecommissioningreplica_success description: Number of successful decommissioning replica replacements processed by the replicate queue @@ -18873,7 +16924,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.replicate.transferlease exported_name: queue_replicate_transferlease description: Number of range lease transfers attempted by the replicate queue @@ -18882,7 +16932,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.replicate.voterdemotions exported_name: queue_replicate_voterdemotions description: Number of voters demoted to non-voters by the replicate queue @@ -18891,7 +16940,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.split.load_based exported_name: queue_split_load_based description: Number of range splits due to a range being greater than the configured max range load @@ -18900,7 +16948,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.split.pending exported_name: queue_split_pending description: Number of pending replicas in the split queue @@ -18909,7 +16956,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: queue.split.process.failure exported_name: queue_split_process_failure description: Number of replicas which failed processing in the split queue @@ -18918,7 +16964,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.split.process.success exported_name: queue_split_process_success description: Number of replicas successfully processed by the split queue @@ -18927,7 +16972,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.split.processingnanos exported_name: queue_split_processingnanos description: Nanoseconds spent processing replicas in the split queue @@ -18936,7 +16980,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.split.purgatory exported_name: queue_split_purgatory description: Number of replicas in the split queue's purgatory, waiting to become splittable @@ -18945,7 +16988,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: queue.split.size_based exported_name: queue_split_size_based description: Number of range splits due to a range being greater than the configured max range size @@ -18954,7 +16996,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.split.span_config_based exported_name: queue_split_span_config_based description: Number of range splits due to span configuration @@ -18963,7 +17004,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.tsmaintenance.pending exported_name: queue_tsmaintenance_pending description: Number of pending replicas in the time series maintenance queue @@ -18972,7 +17012,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: queue.tsmaintenance.process.failure exported_name: queue_tsmaintenance_process_failure description: Number of replicas which failed processing in the time series maintenance queue @@ -18981,7 +17020,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.tsmaintenance.process.success exported_name: queue_tsmaintenance_process_success description: Number of replicas successfully processed by the time series maintenance queue @@ -18990,7 +17028,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: queue.tsmaintenance.processingnanos exported_name: queue_tsmaintenance_processingnanos description: Nanoseconds spent processing replicas in the time series maintenance queue @@ -18999,7 +17036,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: raft.commands.pending exported_name: raft_commands_pending description: |- @@ -19011,7 +17047,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: raft.commands.proposed exported_name: raft_commands_proposed description: |- @@ -19023,7 +17058,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: raft.commands.reproposed.new-lai exported_name: raft_commands_reproposed_new_lai description: |- @@ -19035,7 +17069,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: raft.commands.reproposed.unchanged exported_name: raft_commands_reproposed_unchanged description: |- @@ -19047,7 +17080,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: raft.commandsapplied exported_name: raft_commandsapplied description: |- @@ -19059,7 +17091,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: raft.dropped exported_name: raft_dropped description: Number of Raft proposals dropped (this counts individial raftpb.Entry, not raftpb.MsgProp) @@ -19068,7 +17099,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: raft.dropped_leader exported_name: raft_dropped_leader description: Number of Raft proposals dropped by a Replica that believes itself to be the leader; each update also increments 'raft.dropped' (this counts individial raftpb.Entry, not raftpb.MsgProp) @@ -19077,7 +17107,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: raft.entrycache.accesses exported_name: raft_entrycache_accesses description: Number of cache lookups in the Raft entry cache @@ -19086,7 +17115,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: raft.entrycache.bytes exported_name: raft_entrycache_bytes description: Aggregate size of all Raft entries in the Raft entry cache @@ -19095,7 +17123,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: raft.entrycache.hits exported_name: raft_entrycache_hits description: Number of successful cache lookups in the Raft entry cache @@ -19104,7 +17131,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: raft.entrycache.read_bytes exported_name: raft_entrycache_read_bytes description: Counter of bytes in entries returned from the Raft entry cache @@ -19113,7 +17139,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: raft.entrycache.size exported_name: raft_entrycache_size description: Number of Raft entries in the Raft entry cache @@ -19122,7 +17147,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: raft.flows.entered.state_probe exported_name: raft_flows_entered_state_probe description: The number of leader->peer flows transitioned to StateProbe @@ -19131,7 +17155,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: raft.flows.entered.state_replicate exported_name: raft_flows_entered_state_replicate description: The number of leader->peer flows transitioned to StateReplicate @@ -19140,7 +17163,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: raft.flows.entered.state_snapshot exported_name: raft_flows_entered_state_snapshot description: The number of of leader->peer flows transitioned to StateSnapshot @@ -19149,7 +17171,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: raft.flows.state_probe exported_name: raft_flows_state_probe description: Number of leader->peer flows in StateProbe @@ -19158,7 +17179,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: raft.flows.state_replicate exported_name: raft_flows_state_replicate description: Number of leader->peer flows in StateReplicate @@ -19167,7 +17187,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: raft.flows.state_snapshot exported_name: raft_flows_state_snapshot description: Number of leader->peer flows in StateSnapshot @@ -19176,7 +17195,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: raft.fortification.skipped_no_support exported_name: raft_fortification_skipped_no_support description: The number of fortification requests that were skipped (not sent) due to lack of store liveness support @@ -19185,7 +17203,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: raft.fortification_resp.accepted exported_name: raft_fortification_resp_accepted description: The number of accepted fortification responses. Calculated on the raft leader @@ -19194,7 +17211,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: raft.fortification_resp.rejected exported_name: raft_fortification_resp_rejected description: The number of rejected fortification responses. Calculated on the raft leader @@ -19203,7 +17219,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: raft.heartbeats.pending exported_name: raft_heartbeats_pending description: Number of pending heartbeats and responses waiting to be coalesced @@ -19212,7 +17227,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: raft.loaded_entries.bytes exported_name: raft_loaded_entries_bytes description: Bytes allocated by raft Storage.Entries calls that are still kept in memory @@ -19221,7 +17235,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: raft.loaded_entries.reserved.bytes exported_name: raft_loaded_entries_reserved_bytes description: Bytes allocated by raft Storage.Entries calls that are still kept in memory @@ -19230,7 +17243,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: raft.process.applycommitted.latency exported_name: raft_process_applycommitted_latency description: |- @@ -19242,7 +17254,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: raft.process.commandcommit.latency exported_name: raft_process_commandcommit_latency description: |- @@ -19256,7 +17267,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: raft.process.handleready.latency exported_name: raft_process_handleready_latency description: |- @@ -19272,7 +17282,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: raft.process.logcommit.latency exported_name: raft_process_logcommit_latency description: |- @@ -19284,7 +17293,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: raft.process.tickingnanos exported_name: raft_process_tickingnanos description: Nanoseconds spent in store.processRaft() processing replica.Tick() @@ -19293,7 +17301,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: raft.process.workingnanos exported_name: raft_process_workingnanos description: |- @@ -19305,7 +17312,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: raft.quota_pool.percent_used exported_name: raft_quota_pool_percent_used description: Histogram of proposal quota pool utilization (0-100) per leaseholder per metrics interval @@ -19314,7 +17320,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: raft.rcvd.app exported_name: raft_rcvd_app description: Number of MsgApp messages received by this store @@ -19323,7 +17328,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: raft.rcvd.appresp exported_name: raft_rcvd_appresp description: Number of MsgAppResp messages received by this store @@ -19332,7 +17336,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: raft.rcvd.bytes exported_name: raft_rcvd_bytes description: Number of bytes in Raft messages received by this store. Note that this does not include raft snapshot received. @@ -19341,7 +17344,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: raft.rcvd.cross_region.bytes exported_name: raft_rcvd_cross_region_bytes description: Number of bytes received by this store for cross region Raft messages when region tiers are configured. Note that this does not include raft snapshot received. @@ -19350,7 +17352,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: raft.rcvd.cross_zone.bytes exported_name: raft_rcvd_cross_zone_bytes description: Number of bytes received by this store for cross zone, same region Raft messages when zone tiers are configured. If region tiers are not set, it is assumed to be within the same region. To ensure accurate monitoring of cross-zone data transfer, region and zone tiers should be consistently configured across all nodes. Note that this does not include raft snapshot received. @@ -19359,7 +17360,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: raft.rcvd.defortifyleader exported_name: raft_rcvd_defortifyleader description: Number of MsgDeFortifyLeader messages received by this store @@ -19368,7 +17368,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: raft.rcvd.dropped exported_name: raft_rcvd_dropped description: Number of incoming Raft messages dropped (due to queue length or size) @@ -19377,7 +17376,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: raft.rcvd.dropped_bytes exported_name: raft_rcvd_dropped_bytes description: Bytes of dropped incoming Raft messages @@ -19386,7 +17384,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: raft.rcvd.fortifyleader exported_name: raft_rcvd_fortifyleader description: Number of MsgFortifyLeader messages received by this store @@ -19395,7 +17392,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: raft.rcvd.fortifyleaderresp exported_name: raft_rcvd_fortifyleaderresp description: Number of MsgFortifyLeaderResp messages received by this store @@ -19404,7 +17400,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: raft.rcvd.heartbeat exported_name: raft_rcvd_heartbeat description: Number of (coalesced, if enabled) MsgHeartbeat messages received by this store @@ -19413,7 +17408,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: raft.rcvd.heartbeatresp exported_name: raft_rcvd_heartbeatresp description: Number of (coalesced, if enabled) MsgHeartbeatResp messages received by this store @@ -19422,7 +17416,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: raft.rcvd.prevote exported_name: raft_rcvd_prevote description: Number of MsgPreVote messages received by this store @@ -19431,7 +17424,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: raft.rcvd.prevoteresp exported_name: raft_rcvd_prevoteresp description: Number of MsgPreVoteResp messages received by this store @@ -19440,7 +17432,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: raft.rcvd.prop exported_name: raft_rcvd_prop description: Number of MsgProp messages received by this store @@ -19449,7 +17440,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: raft.rcvd.queued_bytes exported_name: raft_rcvd_queued_bytes description: Number of bytes in messages currently waiting for raft processing @@ -19458,7 +17448,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: raft.rcvd.snap exported_name: raft_rcvd_snap description: Number of MsgSnap messages received by this store @@ -19467,7 +17456,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: raft.rcvd.stepped_bytes exported_name: raft_rcvd_stepped_bytes description: |- @@ -19479,7 +17467,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: raft.rcvd.timeoutnow exported_name: raft_rcvd_timeoutnow description: Number of MsgTimeoutNow messages received by this store @@ -19488,7 +17475,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: raft.rcvd.transferleader exported_name: raft_rcvd_transferleader description: Number of MsgTransferLeader messages received by this store @@ -19497,7 +17483,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: raft.rcvd.vote exported_name: raft_rcvd_vote description: Number of MsgVote messages received by this store @@ -19506,7 +17491,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: raft.rcvd.voteresp exported_name: raft_rcvd_voteresp description: Number of MsgVoteResp messages received by this store @@ -19515,7 +17499,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: raft.replication.latency exported_name: raft_replication_latency description: |- @@ -19535,7 +17518,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: raft.scheduler.latency exported_name: raft_scheduler_latency description: |- @@ -19548,7 +17530,6 @@ layers: aggregation: AVG derivative: NONE visibility: SUPPORT - owner: cockroachdb/kv - name: raft.sent.bytes exported_name: raft_sent_bytes description: Number of bytes in Raft messages sent by this store. Note that this does not include raft snapshot sent. @@ -19557,7 +17538,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: raft.sent.cross_region.bytes exported_name: raft_sent_cross_region_bytes description: "Number of bytes sent by this store for cross region Raft messages when region\ttiers are configured. Note that this does not include raft snapshot sent." @@ -19566,7 +17546,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: raft.sent.cross_zone.bytes exported_name: raft_sent_cross_zone_bytes description: Number of bytes sent by this store for cross zone, same region Raft messages when zone tiers are configured. If region tiers are not set, it is assumed to be within the same region. To ensure accurate monitoring of cross-zone data transfer, region and zone tiers should be consistently configured across all nodes. Note that this does not include raft snapshot sent. @@ -19575,7 +17554,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: raft.storage.error exported_name: raft_storage_error description: Number of Raft storage errors @@ -19584,7 +17562,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: raft.storage.read_bytes exported_name: raft_storage_read_bytes description: |- @@ -19604,7 +17581,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: raft.ticks exported_name: raft_ticks description: Number of Raft ticks queued @@ -19613,7 +17589,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: raft.timeoutcampaign exported_name: raft_timeoutcampaign description: Number of Raft replicas campaigning after missed heartbeats from leader @@ -19622,7 +17597,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: raft.transport.flow-token-dispatches-dropped exported_name: raft_transport_flow_token_dispatches_dropped description: Number of flow token dispatches dropped by the Raft Transport @@ -19631,7 +17605,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: raft.transport.rcvd exported_name: raft_transport_rcvd description: Number of Raft messages received by the Raft Transport @@ -19640,7 +17613,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: raft.transport.reverse-rcvd exported_name: raft_transport_reverse_rcvd description: |- @@ -19653,7 +17625,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: raft.transport.reverse-sent exported_name: raft_transport_reverse_sent description: |- @@ -19666,7 +17637,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: raft.transport.send-queue-bytes exported_name: raft_transport_send_queue_bytes description: |- @@ -19681,7 +17651,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: raft.transport.send-queue-size exported_name: raft_transport_send_queue_size description: |- @@ -19696,7 +17665,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: raft.transport.sends-dropped exported_name: raft_transport_sends_dropped description: Number of Raft message sends dropped by the Raft Transport @@ -19705,7 +17673,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: raft.transport.sent exported_name: raft_transport_sent description: Number of Raft messages sent by the Raft Transport @@ -19714,7 +17681,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: raftlog.behind exported_name: raftlog_behind description: |- @@ -19727,7 +17693,6 @@ layers: aggregation: AVG derivative: NONE visibility: SUPPORT - owner: cockroachdb/kv - name: raftlog.size.max exported_name: raftlog_size_max description: Approximate size of the largest Raft log on the store. @@ -19736,7 +17701,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: raftlog.size.total exported_name: raftlog_size_total description: Approximate size of all Raft logs on the store. @@ -19745,7 +17709,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: raftlog.truncated exported_name: raftlog_truncated description: Number of Raft log entries truncated @@ -19754,7 +17717,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: range.adds exported_name: range_adds description: Number of range additions @@ -19764,7 +17726,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE visibility: SUPPORT - owner: cockroachdb/kv - name: range.raftleaderremovals exported_name: range_raftleaderremovals description: Number of times the current Raft leader was removed from a range @@ -19773,7 +17734,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: range.raftleadertransfers exported_name: range_raftleadertransfers description: Number of raft leader transfers @@ -19782,7 +17742,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: range.recoveries exported_name: range_recoveries description: |- @@ -19794,7 +17753,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: range.removes exported_name: range_removes description: Number of range removals @@ -19803,7 +17761,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: range.snapshots.applied-initial exported_name: range_snapshots_applied_initial description: Number of snapshots applied for initial upreplication @@ -19812,7 +17769,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: range.snapshots.applied-non-voter exported_name: range_snapshots_applied_non_voter description: Number of snapshots applied by non-voter replicas @@ -19821,7 +17777,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: range.snapshots.applied-voter exported_name: range_snapshots_applied_voter description: Number of snapshots applied by voter replicas @@ -19830,7 +17785,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: range.snapshots.cross-region.rcvd-bytes exported_name: range_snapshots_cross_region_rcvd_bytes description: Number of snapshot bytes received cross region by this store when region tiers are configured @@ -19839,7 +17793,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: range.snapshots.cross-region.sent-bytes exported_name: range_snapshots_cross_region_sent_bytes description: Number of snapshot bytes sent cross region by this store when region tiers are configured @@ -19848,7 +17801,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: range.snapshots.cross-zone.rcvd-bytes exported_name: range_snapshots_cross_zone_rcvd_bytes description: Number of snapshot bytes received cross zone within the same region by this store when zone tiers are configured. If region tiers are not set, it is assumed to be within the same region. To ensure accurate monitoring of cross-zone data transfer, region and zone tiers should be consistently configured across all nodes. @@ -19857,7 +17809,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: range.snapshots.cross-zone.sent-bytes exported_name: range_snapshots_cross_zone_sent_bytes description: Number of snapshot bytes sent cross zone within the same region by this store when zone tiers are configured. If region tiers are not set, it is assumed to be within the same region. To ensure accurate monitoring of cross-zone data transfer, region and zone tiers should be consistently configured across all nodes. @@ -19866,7 +17817,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: range.snapshots.delegate.failures exported_name: range_snapshots_delegate_failures description: Number of snapshots that were delegated to a different node and resulted in failure on that delegate. There are numerous reasons a failure can occur on a delegate such as timeout, the delegate Raft log being too far behind or the delegate being too busy to send. @@ -19875,7 +17825,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: range.snapshots.delegate.in-progress exported_name: range_snapshots_delegate_in_progress description: Number of delegated snapshots that are currently in-flight. @@ -19884,7 +17833,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: range.snapshots.delegate.sent-bytes exported_name: range_snapshots_delegate_sent_bytes description: |- @@ -19896,7 +17844,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: range.snapshots.delegate.successes exported_name: range_snapshots_delegate_successes description: Number of snapshots that were delegated to a different node and resulted in success on that delegate. This does not count self delegated snapshots. @@ -19905,7 +17852,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: range.snapshots.generated exported_name: range_snapshots_generated description: Number of generated snapshots @@ -19914,7 +17860,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: range.snapshots.rcvd-bytes exported_name: range_snapshots_rcvd_bytes description: Number of snapshot bytes received @@ -19923,7 +17868,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: range.snapshots.rebalancing.rcvd-bytes exported_name: range_snapshots_rebalancing_rcvd_bytes description: Number of rebalancing snapshot bytes received @@ -19932,7 +17876,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: range.snapshots.rebalancing.sent-bytes exported_name: range_snapshots_rebalancing_sent_bytes description: Number of rebalancing snapshot bytes sent @@ -19941,7 +17884,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: range.snapshots.recovery.rcvd-bytes exported_name: range_snapshots_recovery_rcvd_bytes description: Number of raft recovery snapshot bytes received @@ -19950,7 +17892,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: range.snapshots.recovery.sent-bytes exported_name: range_snapshots_recovery_sent_bytes description: Number of raft recovery snapshot bytes sent @@ -19959,7 +17900,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: range.snapshots.recv-failed exported_name: range_snapshots_recv_failed description: Number of range snapshot initialization messages that errored out on the recipient, typically before any data is transferred @@ -19968,7 +17908,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: range.snapshots.recv-in-progress exported_name: range_snapshots_recv_in_progress description: Number of non-empty snapshots being received @@ -19977,7 +17916,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: range.snapshots.recv-queue exported_name: range_snapshots_recv_queue description: Number of snapshots queued to receive @@ -19986,7 +17924,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: range.snapshots.recv-queue-bytes exported_name: range_snapshots_recv_queue_bytes description: Total size of all snapshots in the snapshot receive queue @@ -19995,7 +17932,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: range.snapshots.recv-total-in-progress exported_name: range_snapshots_recv_total_in_progress description: Number of total snapshots being received @@ -20004,7 +17940,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: range.snapshots.recv-unusable exported_name: range_snapshots_recv_unusable description: Number of range snapshot that were fully transmitted but determined to be unnecessary or unusable @@ -20013,7 +17948,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: range.snapshots.send-in-progress exported_name: range_snapshots_send_in_progress description: Number of non-empty snapshots being sent @@ -20022,7 +17956,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: range.snapshots.send-queue exported_name: range_snapshots_send_queue description: Number of snapshots queued to send @@ -20032,7 +17965,6 @@ layers: aggregation: AVG derivative: NONE visibility: SUPPORT - owner: cockroachdb/kv - name: range.snapshots.send-queue-bytes exported_name: range_snapshots_send_queue_bytes description: Total size of all snapshots in the snapshot send queue @@ -20041,7 +17973,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: range.snapshots.send-total-in-progress exported_name: range_snapshots_send_total_in_progress description: Number of total snapshots being sent @@ -20050,7 +17981,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: range.snapshots.sent-bytes exported_name: range_snapshots_sent_bytes description: Number of snapshot bytes sent @@ -20059,7 +17989,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: range.snapshots.unknown.rcvd-bytes exported_name: range_snapshots_unknown_rcvd_bytes description: Number of unknown snapshot bytes received @@ -20068,7 +17997,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: range.snapshots.unknown.sent-bytes exported_name: range_snapshots_unknown_sent_bytes description: Number of unknown snapshot bytes sent @@ -20077,7 +18005,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: range.snapshots.upreplication.rcvd-bytes exported_name: range_snapshots_upreplication_rcvd_bytes description: Number of upreplication snapshot bytes received @@ -20086,7 +18013,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: range.snapshots.upreplication.sent-bytes exported_name: range_snapshots_upreplication_sent_bytes description: Number of upreplication snapshot bytes sent @@ -20095,7 +18021,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: rangekeybytes exported_name: rangekeybytes description: Number of bytes taken up by range keys (e.g. MVCC range tombstones) @@ -20104,7 +18029,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: rangekeycount exported_name: rangekeycount description: Count of all range keys (e.g. MVCC range tombstones) @@ -20113,7 +18037,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: ranges.decommissioning exported_name: ranges_decommissioning description: Number of ranges with at lease one replica on a decommissioning node @@ -20123,7 +18046,6 @@ layers: aggregation: AVG derivative: NONE visibility: SUPPORT - owner: cockroachdb/kv - name: ranges.decommissioning.nudger.enqueue exported_name: ranges_decommissioning_nudger_enqueue labeled_name: 'ranges.decommissioning.nudger.enqueue{status: enqueue}' @@ -20133,7 +18055,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: ranges.decommissioning.nudger.enqueue.failure exported_name: ranges_decommissioning_nudger_enqueue_failure labeled_name: ranges.decommissioning.nudger.enqueue.failure @@ -20143,7 +18064,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: ranges.decommissioning.nudger.enqueue.success exported_name: ranges_decommissioning_nudger_enqueue_success labeled_name: ranges.decommissioning.nudger.enqueue.success @@ -20153,7 +18073,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: ranges.decommissioning.nudger.not_leaseholder_or_invalid_lease exported_name: ranges_decommissioning_nudger_not_leaseholder_or_invalid_lease labeled_name: ranges.decommissioning.nudger.not_leaseholder_or_invalid_lease @@ -20163,7 +18082,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: ranges.decommissioning.nudger.process.failure exported_name: ranges_decommissioning_nudger_process_failure labeled_name: ranges.decommissioning.nudger.process.failure @@ -20173,7 +18091,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: ranges.decommissioning.nudger.process.success exported_name: ranges_decommissioning_nudger_process_success labeled_name: ranges.decommissioning.nudger.process.success @@ -20183,7 +18100,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: ranges.overreplicated exported_name: ranges_overreplicated description: Number of ranges with more live replicas than the replication target @@ -20192,7 +18108,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: rangevalbytes exported_name: rangevalbytes description: Number of bytes taken up by range key values (e.g. MVCC range tombstones) @@ -20201,7 +18116,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: rangevalcount exported_name: rangevalcount description: Count of all range key values (e.g. MVCC range tombstones) @@ -20210,7 +18124,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: rebalancing.readbytespersecond exported_name: rebalancing_readbytespersecond description: Number of bytes read recently per second, considering the last 30 minutes. @@ -20219,7 +18132,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: rebalancing.readspersecond exported_name: rebalancing_readspersecond description: Number of keys read recently per second, considering the last 30 minutes. @@ -20228,7 +18140,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: rebalancing.requestspersecond exported_name: rebalancing_requestspersecond description: Number of requests received recently per second, considering the last 30 minutes. @@ -20237,7 +18148,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: rebalancing.state.imbalanced_overfull_options_exhausted exported_name: rebalancing_state_imbalanced_overfull_options_exhausted description: Number of occurrences where this store was overfull but failed to shed load after exhausting available rebalance options @@ -20246,7 +18156,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: rebalancing.writebytespersecond exported_name: rebalancing_writebytespersecond description: Number of bytes written recently per second, considering the last 30 minutes. @@ -20255,7 +18164,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: rebalancing.writespersecond exported_name: rebalancing_writespersecond description: Number of keys written (i.e. applied by raft) per second to the store, considering the last 30 minutes. @@ -20264,7 +18172,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: replicas.asleep exported_name: replicas_asleep description: Number of asleep replicas. Similarly to quiesced replicas, asleep replicas do not tick in Raft. @@ -20273,7 +18180,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: replicas.cpunanospersecond exported_name: replicas_cpunanospersecond description: Nanoseconds of CPU time in Replica request processing including evaluation but not replication @@ -20282,7 +18188,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: replicas.leaders exported_name: replicas_leaders description: Number of raft leaders @@ -20291,7 +18196,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: replicas.leaders_invalid_lease exported_name: replicas_leaders_invalid_lease description: Number of replicas that are Raft leaders whose lease is invalid @@ -20300,7 +18204,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: replicas.leaders_not_fortified exported_name: replicas_leaders_not_fortified description: Number of replicas that are not fortified Raft leaders @@ -20309,7 +18212,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: replicas.leaders_not_leaseholders exported_name: replicas_leaders_not_leaseholders description: Number of replicas that are Raft leaders whose range lease is held by another store @@ -20318,7 +18220,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: replicas.quiescent exported_name: replicas_quiescent description: Number of quiesced replicas @@ -20327,7 +18228,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: replicas.reserved exported_name: replicas_reserved description: Number of replicas reserved for snapshots @@ -20336,7 +18236,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: replicas.uninitialized exported_name: replicas_uninitialized description: Number of uninitialized replicas, this does not include uninitialized replicas that can lie dormant in a persistent state. @@ -20345,7 +18244,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: requests.backpressure.split exported_name: requests_backpressure_split description: |- @@ -20357,7 +18255,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: requests.slow.latch exported_name: requests_slow_latch description: |- @@ -20372,7 +18269,6 @@ layers: aggregation: AVG derivative: NONE visibility: SUPPORT - owner: cockroachdb/kv - name: requests.slow.lease exported_name: requests_slow_lease description: |- @@ -20385,7 +18281,6 @@ layers: aggregation: AVG derivative: NONE visibility: SUPPORT - owner: cockroachdb/kv - name: requests.slow.raft exported_name: requests_slow_raft description: |- @@ -20400,7 +18295,6 @@ layers: aggregation: AVG derivative: NONE visibility: SUPPORT - owner: cockroachdb/kv - name: rocksdb.block.cache.usage exported_name: rocksdb_block_cache_usage description: Bytes used by the block cache @@ -20409,7 +18303,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: rocksdb.bloom.filter.prefix.checked exported_name: rocksdb_bloom_filter_prefix_checked description: Number of times the bloom filter was checked @@ -20418,7 +18311,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: rocksdb.bloom.filter.prefix.useful exported_name: rocksdb_bloom_filter_prefix_useful description: Number of times the bloom filter helped avoid iterator creation @@ -20427,7 +18319,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: rocksdb.compacted-bytes-read exported_name: rocksdb_compacted_bytes_read description: Bytes read during compaction @@ -20436,7 +18327,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: rocksdb.compacted-bytes-written exported_name: rocksdb_compacted_bytes_written description: Bytes written during compaction @@ -20445,7 +18335,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: rocksdb.encryption.algorithm exported_name: rocksdb_encryption_algorithm description: Algorithm in use for encryption-at-rest, see storage/enginepb/key_registry.proto @@ -20454,7 +18343,6 @@ layers: unit: CONST aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: rocksdb.estimated-pending-compaction exported_name: rocksdb_estimated_pending_compaction description: Estimated pending compaction bytes @@ -20463,7 +18351,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: rocksdb.flushed-bytes exported_name: rocksdb_flushed_bytes description: Bytes written during flush @@ -20472,7 +18359,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: rocksdb.flushes exported_name: rocksdb_flushes description: Number of table flushes @@ -20481,7 +18367,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: rocksdb.ingested-bytes exported_name: rocksdb_ingested_bytes description: Bytes ingested @@ -20490,7 +18375,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: rocksdb.memtable.total-size exported_name: rocksdb_memtable_total_size description: Current size of memtable in bytes @@ -20499,7 +18383,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: rocksdb.num-sstables exported_name: rocksdb_num_sstables description: Number of storage engine SSTables @@ -20508,7 +18391,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: rocksdb.read-amplification exported_name: rocksdb_read_amplification description: Number of disk reads per query @@ -20518,7 +18400,6 @@ layers: aggregation: AVG derivative: NONE visibility: SUPPORT - owner: cockroachdb/kv - name: rocksdb.table-readers-mem-estimate exported_name: rocksdb_table_readers_mem_estimate description: Memory used by index and filter blocks @@ -20527,7 +18408,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: rpc.batches.recv exported_name: rpc_batches_recv description: Number of batches processed @@ -20536,7 +18416,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: rpc.method.addsstable.recv exported_name: rpc_method_addsstable_recv description: Number of AddSSTable requests processed @@ -20545,7 +18424,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: rpc.method.adminchangereplicas.recv exported_name: rpc_method_adminchangereplicas_recv description: Number of AdminChangeReplicas requests processed @@ -20554,7 +18432,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: rpc.method.adminmerge.recv exported_name: rpc_method_adminmerge_recv description: Number of AdminMerge requests processed @@ -20563,7 +18440,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: rpc.method.adminrelocaterange.recv exported_name: rpc_method_adminrelocaterange_recv description: Number of AdminRelocateRange requests processed @@ -20572,7 +18448,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: rpc.method.adminscatter.recv exported_name: rpc_method_adminscatter_recv description: Number of AdminScatter requests processed @@ -20581,7 +18456,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: rpc.method.adminsplit.recv exported_name: rpc_method_adminsplit_recv description: Number of AdminSplit requests processed @@ -20590,7 +18464,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: rpc.method.admintransferlease.recv exported_name: rpc_method_admintransferlease_recv description: Number of AdminTransferLease requests processed @@ -20599,7 +18472,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: rpc.method.adminunsplit.recv exported_name: rpc_method_adminunsplit_recv description: Number of AdminUnsplit requests processed @@ -20608,7 +18480,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: rpc.method.barrier.recv exported_name: rpc_method_barrier_recv description: Number of Barrier requests processed @@ -20617,7 +18488,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: rpc.method.checkconsistency.recv exported_name: rpc_method_checkconsistency_recv description: Number of CheckConsistency requests processed @@ -20626,7 +18496,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: rpc.method.clearrange.recv exported_name: rpc_method_clearrange_recv description: Number of ClearRange requests processed @@ -20635,7 +18504,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: rpc.method.computechecksum.recv exported_name: rpc_method_computechecksum_recv description: Number of ComputeChecksum requests processed @@ -20644,7 +18512,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: rpc.method.conditionalput.recv exported_name: rpc_method_conditionalput_recv description: Number of ConditionalPut requests processed @@ -20653,7 +18520,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: rpc.method.delete.recv exported_name: rpc_method_delete_recv description: Number of Delete requests processed @@ -20662,7 +18528,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: rpc.method.deleterange.recv exported_name: rpc_method_deleterange_recv description: Number of DeleteRange requests processed @@ -20671,7 +18536,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: rpc.method.endtxn.recv exported_name: rpc_method_endtxn_recv description: Number of EndTxn requests processed @@ -20680,7 +18544,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: rpc.method.excise.recv exported_name: rpc_method_excise_recv description: Number of Excise requests processed @@ -20689,7 +18552,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: rpc.method.export.recv exported_name: rpc_method_export_recv description: Number of Export requests processed @@ -20698,7 +18560,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: rpc.method.flushlocktable.recv exported_name: rpc_method_flushlocktable_recv description: Number of FlushLockTable requests processed @@ -20707,7 +18568,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: rpc.method.gc.recv exported_name: rpc_method_gc_recv description: Number of GC requests processed @@ -20716,7 +18576,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: rpc.method.get.recv exported_name: rpc_method_get_recv description: Number of Get requests processed @@ -20725,7 +18584,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: rpc.method.heartbeattxn.recv exported_name: rpc_method_heartbeattxn_recv description: Number of HeartbeatTxn requests processed @@ -20734,7 +18592,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: rpc.method.increment.recv exported_name: rpc_method_increment_recv description: Number of Increment requests processed @@ -20743,7 +18600,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: rpc.method.isspanempty.recv exported_name: rpc_method_isspanempty_recv description: Number of IsSpanEmpty requests processed @@ -20752,7 +18608,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: rpc.method.leaseinfo.recv exported_name: rpc_method_leaseinfo_recv description: Number of LeaseInfo requests processed @@ -20761,7 +18616,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: rpc.method.linkexternalsstable.recv exported_name: rpc_method_linkexternalsstable_recv description: Number of LinkExternalSSTable requests processed @@ -20770,7 +18624,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: rpc.method.merge.recv exported_name: rpc_method_merge_recv description: Number of Merge requests processed @@ -20779,7 +18632,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: rpc.method.migrate.recv exported_name: rpc_method_migrate_recv description: Number of Migrate requests processed @@ -20788,7 +18640,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: rpc.method.probe.recv exported_name: rpc_method_probe_recv description: Number of Probe requests processed @@ -20797,7 +18648,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: rpc.method.pushtxn.recv exported_name: rpc_method_pushtxn_recv description: Number of PushTxn requests processed @@ -20806,7 +18656,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: rpc.method.put.recv exported_name: rpc_method_put_recv description: Number of Put requests processed @@ -20815,7 +18664,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: rpc.method.queryintent.recv exported_name: rpc_method_queryintent_recv description: Number of QueryIntent requests processed @@ -20824,7 +18672,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: rpc.method.querylocks.recv exported_name: rpc_method_querylocks_recv description: Number of QueryLocks requests processed @@ -20833,7 +18680,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: rpc.method.queryresolvedtimestamp.recv exported_name: rpc_method_queryresolvedtimestamp_recv description: Number of QueryResolvedTimestamp requests processed @@ -20842,7 +18688,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: rpc.method.querytxn.recv exported_name: rpc_method_querytxn_recv description: Number of QueryTxn requests processed @@ -20851,7 +18696,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: rpc.method.rangestats.recv exported_name: rpc_method_rangestats_recv description: Number of RangeStats requests processed @@ -20860,7 +18704,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: rpc.method.recomputestats.recv exported_name: rpc_method_recomputestats_recv description: Number of RecomputeStats requests processed @@ -20869,7 +18712,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: rpc.method.recovertxn.recv exported_name: rpc_method_recovertxn_recv description: Number of RecoverTxn requests processed @@ -20878,7 +18720,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: rpc.method.refresh.recv exported_name: rpc_method_refresh_recv description: Number of Refresh requests processed @@ -20887,7 +18728,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: rpc.method.refreshrange.recv exported_name: rpc_method_refreshrange_recv description: Number of RefreshRange requests processed @@ -20896,7 +18736,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: rpc.method.requestlease.recv exported_name: rpc_method_requestlease_recv description: Number of RequestLease requests processed @@ -20905,7 +18744,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: rpc.method.resolveintent.recv exported_name: rpc_method_resolveintent_recv description: Number of ResolveIntent requests processed @@ -20914,7 +18752,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: rpc.method.resolveintentrange.recv exported_name: rpc_method_resolveintentrange_recv description: Number of ResolveIntentRange requests processed @@ -20923,7 +18760,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: rpc.method.reversescan.recv exported_name: rpc_method_reversescan_recv description: Number of ReverseScan requests processed @@ -20932,7 +18768,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: rpc.method.revertrange.recv exported_name: rpc_method_revertrange_recv description: Number of RevertRange requests processed @@ -20941,7 +18776,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: rpc.method.scan.recv exported_name: rpc_method_scan_recv description: Number of Scan requests processed @@ -20950,7 +18784,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: rpc.method.subsume.recv exported_name: rpc_method_subsume_recv description: Number of Subsume requests processed @@ -20959,7 +18792,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: rpc.method.transferlease.recv exported_name: rpc_method_transferlease_recv description: Number of TransferLease requests processed @@ -20968,7 +18800,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: rpc.method.truncatelog.recv exported_name: rpc_method_truncatelog_recv description: Number of TruncateLog requests processed @@ -20977,7 +18808,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: rpc.method.writebatch.recv exported_name: rpc_method_writebatch_recv description: Number of WriteBatch requests processed @@ -20986,7 +18816,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: rpc.streams.mux_rangefeed.active exported_name: rpc_streams_mux_rangefeed_active description: Number of currently running MuxRangeFeed streams @@ -20995,7 +18824,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: rpc.streams.mux_rangefeed.recv exported_name: rpc_streams_mux_rangefeed_recv description: Total number of MuxRangeFeed streams @@ -21004,7 +18832,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: spanconfig.kvsubscriber.oldest_protected_record_nanos exported_name: spanconfig_kvsubscriber_oldest_protected_record_nanos description: Difference between the current time and the oldest protected timestamp (sudden drops indicate a record being released; an ever increasing number indicates that the oldest record is around and preventing GC if > configured GC TTL) @@ -21013,7 +18840,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: spanconfig.kvsubscriber.protected_record_count exported_name: spanconfig_kvsubscriber_protected_record_count description: Number of protected timestamp records, as seen by KV @@ -21022,7 +18848,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: spanconfig.kvsubscriber.update_behind_nanos exported_name: spanconfig_kvsubscriber_update_behind_nanos description: Difference between the current time and when the KVSubscriber received its last update (an ever increasing number indicates that we're no longer receiving updates) @@ -21031,7 +18856,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: storage.batch-commit.commit-wait.duration exported_name: storage_batch_commit_commit_wait_duration description: Cumulative time spent waiting for WAL sync, for batch commit. See storage.AggregatedBatchCommitStats for details. @@ -21040,7 +18864,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.batch-commit.count exported_name: storage_batch_commit_count description: Count of batch commits. See storage.AggregatedBatchCommitStats for details. @@ -21049,7 +18872,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.batch-commit.duration exported_name: storage_batch_commit_duration description: Cumulative time spent in batch commit. See storage.AggregatedBatchCommitStats for details. @@ -21058,7 +18880,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.batch-commit.l0-stall.duration exported_name: storage_batch_commit_l0_stall_duration description: Cumulative time spent in a write stall due to high read amplification in L0, for batch commit. See storage.AggregatedBatchCommitStats for details. @@ -21067,7 +18888,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.batch-commit.mem-stall.duration exported_name: storage_batch_commit_mem_stall_duration description: Cumulative time spent in a write stall due to too many memtables, for batch commit. See storage.AggregatedBatchCommitStats for details. @@ -21076,7 +18896,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.batch-commit.sem-wait.duration exported_name: storage_batch_commit_sem_wait_duration description: Cumulative time spent in semaphore wait, for batch commit. See storage.AggregatedBatchCommitStats for details. @@ -21085,7 +18904,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.batch-commit.wal-queue-wait.duration exported_name: storage_batch_commit_wal_queue_wait_duration description: Cumulative time spent waiting for memory blocks in the WAL queue, for batch commit. See storage.AggregatedBatchCommitStats for details. @@ -21094,7 +18912,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.batch-commit.wal-rotation.duration exported_name: storage_batch_commit_wal_rotation_duration description: Cumulative time spent waiting for WAL rotation, for batch commit. See storage.AggregatedBatchCommitStats for details. @@ -21103,7 +18920,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.block-load.active exported_name: storage_block_load_active description: The number of sstable block loads currently in progress @@ -21112,7 +18928,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: storage.block-load.queued exported_name: storage_block_load_queued description: 'The cumulative number of SSTable block loads that were delayed because too many loads were active (see also: ''storage.block_load.node_max_active'')' @@ -21121,7 +18936,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.bytes-compressed.l5.data exported_name: storage_bytes_compressed_l5_data labeled_name: 'storage.bytes-compressed{level: 5, type: data}' @@ -21131,7 +18945,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.bytes-compressed.l5.values exported_name: storage_bytes_compressed_l5_values labeled_name: 'storage.bytes-compressed{level: 5, type: values}' @@ -21141,7 +18954,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.bytes-compressed.l6.data exported_name: storage_bytes_compressed_l6_data labeled_name: 'storage.bytes-compressed{level: 6, type: data}' @@ -21151,7 +18963,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.bytes-compressed.l6.values exported_name: storage_bytes_compressed_l6_values labeled_name: 'storage.bytes-compressed{level: 6, type: values}' @@ -21161,7 +18972,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.bytes-decompressed.l5.data exported_name: storage_bytes_decompressed_l5_data labeled_name: 'storage.bytes-decompressed{level: 5, type: data}' @@ -21171,7 +18981,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.bytes-decompressed.l5.values exported_name: storage_bytes_decompressed_l5_values labeled_name: 'storage.bytes-decompressed{level: 5, type: values}' @@ -21181,7 +18990,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.bytes-decompressed.l6.data exported_name: storage_bytes_decompressed_l6_data labeled_name: 'storage.bytes-decompressed{level: 6, type: data}' @@ -21191,7 +18999,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.bytes-decompressed.l6.values exported_name: storage_bytes_decompressed_l6_values labeled_name: 'storage.bytes-decompressed{level: 6, type: values}' @@ -21201,7 +19008,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.category-crdb-log.bytes-written exported_name: storage_category_crdb_log_bytes_written description: Bytes written to disk @@ -21210,7 +19016,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.category-encryption-registry.bytes-written exported_name: storage_category_encryption_registry_bytes_written description: Bytes written to disk @@ -21219,7 +19024,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.category-pebble-compaction.bytes-written exported_name: storage_category_pebble_compaction_bytes_written description: Bytes written to disk @@ -21228,7 +19032,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.category-pebble-ingestion.bytes-written exported_name: storage_category_pebble_ingestion_bytes_written description: Bytes written to disk @@ -21237,7 +19040,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.category-pebble-manifest.bytes-written exported_name: storage_category_pebble_manifest_bytes_written description: Bytes written to disk @@ -21246,7 +19048,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.category-pebble-memtable-flush.bytes-written exported_name: storage_category_pebble_memtable_flush_bytes_written description: Bytes written to disk @@ -21255,7 +19056,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.category-pebble-wal.bytes-written exported_name: storage_category_pebble_wal_bytes_written description: Bytes written to disk @@ -21264,7 +19064,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.category-raft-snapshot.bytes-written exported_name: storage_category_raft_snapshot_bytes_written description: Bytes written to disk @@ -21273,7 +19072,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.category-sql-col-spill.bytes-written exported_name: storage_category_sql_col_spill_bytes_written description: Bytes written to disk @@ -21282,7 +19080,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.category-unspecified.bytes-written exported_name: storage_category_unspecified_bytes_written description: Bytes written to disk @@ -21291,7 +19088,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.checkpoints exported_name: storage_checkpoints description: |- @@ -21305,7 +19101,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: storage.compactions.cancelled.bytes exported_name: storage_compactions_cancelled_bytes description: Cumulative volume of data written to sstables during compactions that were ultimately cancelled due to a conflicting operation. @@ -21314,7 +19109,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.compactions.cancelled.count exported_name: storage_compactions_cancelled_count description: Cumulative count of compactions that were cancelled before they completed due to a conflicting operation. @@ -21323,7 +19117,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.compactions.duration exported_name: storage_compactions_duration description: |- @@ -21335,7 +19128,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.compactions.keys.pinned.bytes exported_name: storage_compactions_keys_pinned_bytes description: |- @@ -21347,7 +19139,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.compactions.keys.pinned.count exported_name: storage_compactions_keys_pinned_count description: |- @@ -21359,7 +19150,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.compression.cr exported_name: storage_compression_cr description: Average compression ratio of sstable and blob value data. @@ -21368,7 +19158,6 @@ layers: unit: CONST aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: storage.compression.minlz.bytes exported_name: storage_compression_minlz_bytes description: Total on disk size of sstable and blob value data that is compressed with the MinLZ algorithm. @@ -21377,7 +19166,6 @@ layers: unit: CONST aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: storage.compression.minlz.cr exported_name: storage_compression_minlz_cr description: Average compression ratio of sstable and blob value data that is compressed with the MinLZ algorithm. @@ -21386,7 +19174,6 @@ layers: unit: CONST aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: storage.compression.none.bytes exported_name: storage_compression_none_bytes description: Total on disk size of sstable and blob value data that is not compressed. @@ -21395,7 +19182,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: storage.compression.snappy.bytes exported_name: storage_compression_snappy_bytes description: Total on disk size of sstable and blob value data that is compressed with the Snappy algorithm. @@ -21404,7 +19190,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: storage.compression.snappy.cr exported_name: storage_compression_snappy_cr description: Average compression ratio of sstable and blob value data that is compressed with the snappy algorithm. @@ -21413,7 +19198,6 @@ layers: unit: CONST aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: storage.compression.unknown.bytes exported_name: storage_compression_unknown_bytes description: Total on disk size of sstable and blob value data that is compressed but for which we have no compression statistics. @@ -21422,7 +19206,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: storage.compression.zstd.bytes exported_name: storage_compression_zstd_bytes description: Total on disk size of sstable and blob value data that is compressed with the Zstd algorithm. @@ -21431,7 +19214,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: storage.compression.zstd.cr exported_name: storage_compression_zstd_cr description: Average compression ratio of sstable and blob value data that is compressed with the Zstd algorithm. @@ -21440,7 +19222,6 @@ layers: unit: CONST aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: storage.disk-slow exported_name: storage_disk_slow description: Number of instances of disk operations taking longer than 10s @@ -21450,7 +19231,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE visibility: SUPPORT - owner: cockroachdb/kv - name: storage.disk-stalled exported_name: storage_disk_stalled description: Number of instances of disk operations taking longer than 20s @@ -21460,7 +19240,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE visibility: SUPPORT - owner: cockroachdb/kv - name: storage.disk-unhealthy.duration exported_name: storage_disk_unhealthy_duration description: Total disk unhealthy duration in nanos @@ -21469,7 +19248,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.disk.io.time exported_name: storage_disk_io_time description: Time spent reading from or writing to the store's disk since this process started (as reported by the OS) @@ -21478,7 +19256,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.disk.iopsinprogress exported_name: storage_disk_iopsinprogress description: IO operations currently in progress on the store's disk (as reported by the OS) @@ -21488,7 +19265,6 @@ layers: aggregation: AVG derivative: NONE visibility: SUPPORT - owner: cockroachdb/kv - name: storage.disk.read-max.bytespersecond exported_name: storage_disk_read_max_bytespersecond description: Maximum rate at which bytes were read from disk (as reported by the OS) @@ -21498,7 +19274,6 @@ layers: aggregation: AVG derivative: NONE visibility: SUPPORT - owner: cockroachdb/kv - name: storage.disk.read-max.iops exported_name: storage_disk_read_max_iops description: Maximum rate of read operations performed on the disk (as reported by the OS) @@ -21507,7 +19282,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: storage.disk.read.bytes exported_name: storage_disk_read_bytes description: Bytes read from the store's disk since this process started (as reported by the OS) @@ -21517,7 +19291,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE visibility: SUPPORT - owner: cockroachdb/kv - name: storage.disk.read.count exported_name: storage_disk_read_count description: Disk read operations on the store's disk since this process started (as reported by the OS) @@ -21527,7 +19300,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE visibility: SUPPORT - owner: cockroachdb/kv - name: storage.disk.read.time exported_name: storage_disk_read_time description: Time spent reading from the store's disk since this process started (as reported by the OS) @@ -21536,7 +19308,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.disk.weightedio.time exported_name: storage_disk_weightedio_time description: Weighted time spent reading from or writing to the store's disk since this process started (as reported by the OS) @@ -21545,7 +19316,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.disk.write-max.bytespersecond exported_name: storage_disk_write_max_bytespersecond description: Maximum rate at which bytes were written to disk (as reported by the OS) @@ -21555,7 +19325,6 @@ layers: aggregation: AVG derivative: NONE visibility: SUPPORT - owner: cockroachdb/kv - name: storage.disk.write-max.iops exported_name: storage_disk_write_max_iops description: Maximum rate of write operations performed on the disk (as reported by the OS) @@ -21564,7 +19333,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: storage.disk.write.bytes exported_name: storage_disk_write_bytes description: Bytes written to the store's disk since this process started (as reported by the OS) @@ -21574,7 +19342,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE visibility: SUPPORT - owner: cockroachdb/kv - name: storage.disk.write.count exported_name: storage_disk_write_count description: Disk write operations on the store's disk since this process started (as reported by the OS) @@ -21584,7 +19351,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE visibility: SUPPORT - owner: cockroachdb/kv - name: storage.disk.write.time exported_name: storage_disk_write_time description: Time spent writing to the store's disks since this process started (as reported by the OS) @@ -21593,7 +19359,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.flush.ingest.count exported_name: storage_flush_ingest_count description: Flushes performing an ingest (flushable ingestions) @@ -21602,7 +19367,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.flush.ingest.table.bytes exported_name: storage_flush_ingest_table_bytes description: Bytes ingested via flushes (flushable ingestions) @@ -21611,7 +19375,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.flush.ingest.table.count exported_name: storage_flush_ingest_table_count description: Tables ingested via flushes (flushable ingestions) @@ -21620,7 +19383,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.flush.utilization exported_name: storage_flush_utilization description: The percentage of time the storage engine is actively flushing memtables to disk. @@ -21629,7 +19391,6 @@ layers: unit: PERCENT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: storage.ingest.count exported_name: storage_ingest_count description: Number of successful ingestions performed @@ -21638,7 +19399,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: storage.initial_stats_complete exported_name: storage_initial_stats_complete description: Set to 1 when initial table stats collection is complete. @@ -21647,7 +19407,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: storage.iterator.block-load.bytes exported_name: storage_iterator_block_load_bytes description: Bytes loaded by storage engine iterators (possibly cached). See storage.AggregatedIteratorStats for details. @@ -21656,7 +19415,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.iterator.block-load.cached-bytes exported_name: storage_iterator_block_load_cached_bytes description: Bytes loaded by storage engine iterators from the block cache. See storage.AggregatedIteratorStats for details. @@ -21665,7 +19423,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.iterator.block-load.read-duration exported_name: storage_iterator_block_load_read_duration description: Cumulative time storage engine iterators spent loading blocks from durable storage. See storage.AggregatedIteratorStats for details. @@ -21674,7 +19431,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.iterator.category-abort-span.block-load.bytes exported_name: storage_iterator_category_abort_span_block_load_bytes description: Bytes loaded by storage sstable iterators (possibly cached). @@ -21683,7 +19439,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.iterator.category-abort-span.block-load.cached-bytes exported_name: storage_iterator_category_abort_span_block_load_cached_bytes description: Bytes loaded by storage sstable iterators from the block cache @@ -21692,7 +19447,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.iterator.category-abort-span.block-load.latency-sum exported_name: storage_iterator_category_abort_span_block_load_latency_sum description: Cumulative latency for loading bytes not in the block cache, by storage sstable iterators @@ -21701,7 +19455,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.iterator.category-backup.block-load.bytes exported_name: storage_iterator_category_backup_block_load_bytes description: Bytes loaded by storage sstable iterators (possibly cached). @@ -21710,7 +19463,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.iterator.category-backup.block-load.cached-bytes exported_name: storage_iterator_category_backup_block_load_cached_bytes description: Bytes loaded by storage sstable iterators from the block cache @@ -21719,7 +19471,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.iterator.category-backup.block-load.latency-sum exported_name: storage_iterator_category_backup_block_load_latency_sum description: Cumulative latency for loading bytes not in the block cache, by storage sstable iterators @@ -21728,7 +19479,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.iterator.category-batch-eval.block-load.bytes exported_name: storage_iterator_category_batch_eval_block_load_bytes description: Bytes loaded by storage sstable iterators (possibly cached). @@ -21737,7 +19487,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.iterator.category-batch-eval.block-load.cached-bytes exported_name: storage_iterator_category_batch_eval_block_load_cached_bytes description: Bytes loaded by storage sstable iterators from the block cache @@ -21746,7 +19495,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.iterator.category-batch-eval.block-load.latency-sum exported_name: storage_iterator_category_batch_eval_block_load_latency_sum description: Cumulative latency for loading bytes not in the block cache, by storage sstable iterators @@ -21755,7 +19503,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.iterator.category-consistency-checker.block-load.bytes exported_name: storage_iterator_category_consistency_checker_block_load_bytes description: Bytes loaded by storage sstable iterators (possibly cached). @@ -21764,7 +19511,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.iterator.category-consistency-checker.block-load.cached-bytes exported_name: storage_iterator_category_consistency_checker_block_load_cached_bytes description: Bytes loaded by storage sstable iterators from the block cache @@ -21773,7 +19519,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.iterator.category-consistency-checker.block-load.latency-sum exported_name: storage_iterator_category_consistency_checker_block_load_latency_sum description: Cumulative latency for loading bytes not in the block cache, by storage sstable iterators @@ -21782,7 +19527,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.iterator.category-crdb-unknown.block-load.bytes exported_name: storage_iterator_category_crdb_unknown_block_load_bytes description: Bytes loaded by storage sstable iterators (possibly cached). @@ -21791,7 +19535,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.iterator.category-crdb-unknown.block-load.cached-bytes exported_name: storage_iterator_category_crdb_unknown_block_load_cached_bytes description: Bytes loaded by storage sstable iterators from the block cache @@ -21800,7 +19543,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.iterator.category-crdb-unknown.block-load.latency-sum exported_name: storage_iterator_category_crdb_unknown_block_load_latency_sum description: Cumulative latency for loading bytes not in the block cache, by storage sstable iterators @@ -21809,7 +19551,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.iterator.category-intent-resolution.block-load.bytes exported_name: storage_iterator_category_intent_resolution_block_load_bytes description: Bytes loaded by storage sstable iterators (possibly cached). @@ -21818,7 +19559,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.iterator.category-intent-resolution.block-load.cached-bytes exported_name: storage_iterator_category_intent_resolution_block_load_cached_bytes description: Bytes loaded by storage sstable iterators from the block cache @@ -21827,7 +19567,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.iterator.category-intent-resolution.block-load.latency-sum exported_name: storage_iterator_category_intent_resolution_block_load_latency_sum description: Cumulative latency for loading bytes not in the block cache, by storage sstable iterators @@ -21836,7 +19575,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.iterator.category-mvcc-gc.block-load.bytes exported_name: storage_iterator_category_mvcc_gc_block_load_bytes description: Bytes loaded by storage sstable iterators (possibly cached). @@ -21845,7 +19583,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.iterator.category-mvcc-gc.block-load.cached-bytes exported_name: storage_iterator_category_mvcc_gc_block_load_cached_bytes description: Bytes loaded by storage sstable iterators from the block cache @@ -21854,7 +19591,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.iterator.category-mvcc-gc.block-load.latency-sum exported_name: storage_iterator_category_mvcc_gc_block_load_latency_sum description: Cumulative latency for loading bytes not in the block cache, by storage sstable iterators @@ -21863,7 +19599,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.iterator.category-pebble-compaction.block-load.bytes exported_name: storage_iterator_category_pebble_compaction_block_load_bytes description: Bytes loaded by storage sstable iterators (possibly cached). @@ -21872,7 +19607,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.iterator.category-pebble-compaction.block-load.cached-bytes exported_name: storage_iterator_category_pebble_compaction_block_load_cached_bytes description: Bytes loaded by storage sstable iterators from the block cache @@ -21881,7 +19615,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.iterator.category-pebble-compaction.block-load.latency-sum exported_name: storage_iterator_category_pebble_compaction_block_load_latency_sum description: Cumulative latency for loading bytes not in the block cache, by storage sstable iterators @@ -21890,7 +19623,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.iterator.category-pebble-get.block-load.bytes exported_name: storage_iterator_category_pebble_get_block_load_bytes description: Bytes loaded by storage sstable iterators (possibly cached). @@ -21899,7 +19631,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.iterator.category-pebble-get.block-load.cached-bytes exported_name: storage_iterator_category_pebble_get_block_load_cached_bytes description: Bytes loaded by storage sstable iterators from the block cache @@ -21908,7 +19639,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.iterator.category-pebble-get.block-load.latency-sum exported_name: storage_iterator_category_pebble_get_block_load_latency_sum description: Cumulative latency for loading bytes not in the block cache, by storage sstable iterators @@ -21917,7 +19647,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.iterator.category-pebble-ingest.block-load.bytes exported_name: storage_iterator_category_pebble_ingest_block_load_bytes description: Bytes loaded by storage sstable iterators (possibly cached). @@ -21926,7 +19655,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.iterator.category-pebble-ingest.block-load.cached-bytes exported_name: storage_iterator_category_pebble_ingest_block_load_cached_bytes description: Bytes loaded by storage sstable iterators from the block cache @@ -21935,7 +19663,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.iterator.category-pebble-ingest.block-load.latency-sum exported_name: storage_iterator_category_pebble_ingest_block_load_latency_sum description: Cumulative latency for loading bytes not in the block cache, by storage sstable iterators @@ -21944,7 +19671,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.iterator.category-range-snap.block-load.bytes exported_name: storage_iterator_category_range_snap_block_load_bytes description: Bytes loaded by storage sstable iterators (possibly cached). @@ -21953,7 +19679,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.iterator.category-range-snap.block-load.cached-bytes exported_name: storage_iterator_category_range_snap_block_load_cached_bytes description: Bytes loaded by storage sstable iterators from the block cache @@ -21962,7 +19687,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.iterator.category-range-snap.block-load.latency-sum exported_name: storage_iterator_category_range_snap_block_load_latency_sum description: Cumulative latency for loading bytes not in the block cache, by storage sstable iterators @@ -21971,7 +19695,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.iterator.category-rangefeed.block-load.bytes exported_name: storage_iterator_category_rangefeed_block_load_bytes description: Bytes loaded by storage sstable iterators (possibly cached). @@ -21980,7 +19703,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.iterator.category-rangefeed.block-load.cached-bytes exported_name: storage_iterator_category_rangefeed_block_load_cached_bytes description: Bytes loaded by storage sstable iterators from the block cache @@ -21989,7 +19711,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.iterator.category-rangefeed.block-load.latency-sum exported_name: storage_iterator_category_rangefeed_block_load_latency_sum description: Cumulative latency for loading bytes not in the block cache, by storage sstable iterators @@ -21998,7 +19719,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.iterator.category-replication.block-load.bytes exported_name: storage_iterator_category_replication_block_load_bytes description: Bytes loaded by storage sstable iterators (possibly cached). @@ -22007,7 +19727,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.iterator.category-replication.block-load.cached-bytes exported_name: storage_iterator_category_replication_block_load_cached_bytes description: Bytes loaded by storage sstable iterators from the block cache @@ -22016,7 +19735,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.iterator.category-replication.block-load.latency-sum exported_name: storage_iterator_category_replication_block_load_latency_sum description: Cumulative latency for loading bytes not in the block cache, by storage sstable iterators @@ -22025,7 +19743,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.iterator.category-scan-background.block-load.bytes exported_name: storage_iterator_category_scan_background_block_load_bytes description: Bytes loaded by storage sstable iterators (possibly cached). @@ -22034,7 +19751,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.iterator.category-scan-background.block-load.cached-bytes exported_name: storage_iterator_category_scan_background_block_load_cached_bytes description: Bytes loaded by storage sstable iterators from the block cache @@ -22043,7 +19759,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.iterator.category-scan-background.block-load.latency-sum exported_name: storage_iterator_category_scan_background_block_load_latency_sum description: Cumulative latency for loading bytes not in the block cache, by storage sstable iterators @@ -22052,7 +19767,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.iterator.category-scan-regular.block-load.bytes exported_name: storage_iterator_category_scan_regular_block_load_bytes description: Bytes loaded by storage sstable iterators (possibly cached). @@ -22061,7 +19775,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.iterator.category-scan-regular.block-load.cached-bytes exported_name: storage_iterator_category_scan_regular_block_load_cached_bytes description: Bytes loaded by storage sstable iterators from the block cache @@ -22070,7 +19783,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.iterator.category-scan-regular.block-load.latency-sum exported_name: storage_iterator_category_scan_regular_block_load_latency_sum description: Cumulative latency for loading bytes not in the block cache, by storage sstable iterators @@ -22079,7 +19791,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.iterator.category-unknown.block-load.bytes exported_name: storage_iterator_category_unknown_block_load_bytes description: Bytes loaded by storage sstable iterators (possibly cached). @@ -22088,7 +19799,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.iterator.category-unknown.block-load.cached-bytes exported_name: storage_iterator_category_unknown_block_load_cached_bytes description: Bytes loaded by storage sstable iterators from the block cache @@ -22097,7 +19807,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.iterator.category-unknown.block-load.latency-sum exported_name: storage_iterator_category_unknown_block_load_latency_sum description: Cumulative latency for loading bytes not in the block cache, by storage sstable iterators @@ -22106,7 +19815,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.iterator.external.seeks exported_name: storage_iterator_external_seeks description: Cumulative count of seeks performed on storage engine iterators. See storage.AggregatedIteratorStats for details. @@ -22115,7 +19823,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.iterator.external.steps exported_name: storage_iterator_external_steps description: Cumulative count of steps performed on storage engine iterators. See storage.AggregatedIteratorStats for details. @@ -22124,7 +19831,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.iterator.internal.seeks exported_name: storage_iterator_internal_seeks description: |- @@ -22138,7 +19844,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.iterator.internal.steps exported_name: storage_iterator_internal_steps description: |- @@ -22152,7 +19857,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.keys.range-key-set.count exported_name: storage_keys_range_key_set_count description: Approximate count of RangeKeySet internal keys across the storage engine. @@ -22161,7 +19865,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: storage.keys.tombstone.count exported_name: storage_keys_tombstone_count description: Approximate count of DEL, SINGLEDEL and RANGEDEL internal keys across the storage engine. @@ -22171,7 +19874,6 @@ layers: aggregation: AVG derivative: NONE visibility: SUPPORT - owner: cockroachdb/kv - name: storage.l0-bytes-flushed exported_name: storage_l0_bytes_flushed description: Number of bytes flushed (from memtables) into Level 0 @@ -22180,7 +19882,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.l0-bytes-ingested exported_name: storage_l0_bytes_ingested description: Number of bytes ingested directly into Level 0 @@ -22189,7 +19890,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.l0-level-score exported_name: storage_l0_level_score description: Compaction score of level 0 @@ -22198,7 +19898,6 @@ layers: unit: CONST aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: storage.l0-level-size exported_name: storage_l0_level_size description: Size of the SSTables in level 0 @@ -22208,7 +19907,6 @@ layers: aggregation: AVG derivative: NONE visibility: SUPPORT - owner: cockroachdb/kv - name: storage.l0-num-files exported_name: storage_l0_num_files description: Number of SSTables in Level 0 @@ -22217,7 +19915,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: storage.l0-sublevels exported_name: storage_l0_sublevels description: Number of Level 0 sublevels @@ -22226,7 +19923,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: storage.l1-bytes-ingested exported_name: storage_l1_bytes_ingested description: Number of bytes ingested directly into Level 1 @@ -22235,7 +19931,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.l1-level-score exported_name: storage_l1_level_score description: Compaction score of level 1 @@ -22244,7 +19939,6 @@ layers: unit: CONST aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: storage.l1-level-size exported_name: storage_l1_level_size description: Size of the SSTables in level 1 @@ -22253,7 +19947,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: storage.l2-bytes-ingested exported_name: storage_l2_bytes_ingested description: Number of bytes ingested directly into Level 2 @@ -22262,7 +19955,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.l2-level-score exported_name: storage_l2_level_score description: Compaction score of level 2 @@ -22271,7 +19963,6 @@ layers: unit: CONST aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: storage.l2-level-size exported_name: storage_l2_level_size description: Size of the SSTables in level 2 @@ -22280,7 +19971,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: storage.l3-bytes-ingested exported_name: storage_l3_bytes_ingested description: Number of bytes ingested directly into Level 3 @@ -22289,7 +19979,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.l3-level-score exported_name: storage_l3_level_score description: Compaction score of level 3 @@ -22298,7 +19987,6 @@ layers: unit: CONST aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: storage.l3-level-size exported_name: storage_l3_level_size description: Size of the SSTables in level 3 @@ -22307,7 +19995,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: storage.l4-bytes-ingested exported_name: storage_l4_bytes_ingested description: Number of bytes ingested directly into Level 4 @@ -22316,7 +20003,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.l4-level-score exported_name: storage_l4_level_score description: Compaction score of level 4 @@ -22325,7 +20011,6 @@ layers: unit: CONST aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: storage.l4-level-size exported_name: storage_l4_level_size description: Size of the SSTables in level 4 @@ -22334,7 +20019,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: storage.l5-bytes-ingested exported_name: storage_l5_bytes_ingested description: Number of bytes ingested directly into Level 5 @@ -22343,7 +20027,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.l5-level-score exported_name: storage_l5_level_score description: Compaction score of level 5 @@ -22352,7 +20035,6 @@ layers: unit: CONST aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: storage.l5-level-size exported_name: storage_l5_level_size description: Size of the SSTables in level 5 @@ -22361,7 +20043,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: storage.l6-bytes-ingested exported_name: storage_l6_bytes_ingested description: Number of bytes ingested directly into Level 6 @@ -22370,7 +20051,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.l6-level-score exported_name: storage_l6_level_score description: Compaction score of level 6 @@ -22379,7 +20059,6 @@ layers: unit: CONST aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: storage.l6-level-size exported_name: storage_l6_level_size description: Size of the SSTables in level 6 @@ -22388,7 +20067,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: storage.marked-for-compaction-files exported_name: storage_marked_for_compaction_files description: Count of SSTables marked for compaction @@ -22397,7 +20075,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: storage.point_deletions.bytes exported_name: storage_point_deletions_bytes description: |- @@ -22409,7 +20086,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: storage.range_deletions.bytes exported_name: storage_range_deletions_bytes description: |- @@ -22421,7 +20097,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: storage.secondary-cache.count exported_name: storage_secondary_cache_count description: The count of cache blocks in the secondary cache (not sstable blocks) @@ -22430,7 +20105,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: storage.secondary-cache.evictions exported_name: storage_secondary_cache_evictions description: The number of times a cache block was evicted from the secondary cache @@ -22439,7 +20113,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.secondary-cache.reads-full-hit exported_name: storage_secondary_cache_reads_full_hit description: The number of reads where all data returned was read from the secondary cache @@ -22448,7 +20121,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.secondary-cache.reads-multi-block exported_name: storage_secondary_cache_reads_multi_block description: The number of secondary cache reads that require reading data from 2+ cache blocks @@ -22457,7 +20129,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.secondary-cache.reads-multi-shard exported_name: storage_secondary_cache_reads_multi_shard description: The number of secondary cache reads that require reading data from 2+ shards @@ -22466,7 +20137,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.secondary-cache.reads-no-hit exported_name: storage_secondary_cache_reads_no_hit description: The number of reads where no data returned was read from the secondary cache @@ -22475,7 +20145,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.secondary-cache.reads-partial-hit exported_name: storage_secondary_cache_reads_partial_hit description: The number of reads where some data returned was read from the secondary cache @@ -22484,7 +20153,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.secondary-cache.reads-total exported_name: storage_secondary_cache_reads_total description: The number of reads from the secondary cache @@ -22493,7 +20161,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.secondary-cache.size exported_name: storage_secondary_cache_size description: The number of sstable bytes stored in the secondary cache @@ -22502,7 +20169,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: storage.secondary-cache.write-back-failures exported_name: storage_secondary_cache_write_back_failures description: The number of times writing a cache block to the secondary cache failed @@ -22511,7 +20177,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.shared-storage.read exported_name: storage_shared_storage_read description: Bytes read from shared storage @@ -22520,7 +20185,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.shared-storage.write exported_name: storage_shared_storage_write description: Bytes written to external storage @@ -22529,7 +20193,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.single-delete.ineffectual exported_name: storage_single_delete_ineffectual description: Number of SingleDeletes that were ineffectual @@ -22538,7 +20201,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.single-delete.invariant-violation exported_name: storage_single_delete_invariant_violation description: Number of SingleDelete invariant violations @@ -22547,7 +20209,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.sstable.remote.bytes exported_name: storage_sstable_remote_bytes description: Bytes in SSTables that are stored off-disk (remotely) in object storage. @@ -22556,7 +20217,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: storage.sstable.remote.count exported_name: storage_sstable_remote_count description: Count of SSTables that are stored off-disk (remotely) in object storage. @@ -22565,7 +20225,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: storage.sstable.zombie.bytes exported_name: storage_sstable_zombie_bytes description: Bytes in SSTables that have been logically deleted, but can't yet be physically deleted because an open iterator may be reading them. @@ -22574,7 +20233,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: storage.value_separation.blob_files.count exported_name: storage_value_separation_blob_files_count description: The number of blob files that are used to store separated values within the storage engine. @@ -22583,7 +20241,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: storage.value_separation.blob_files.size exported_name: storage_value_separation_blob_files_size description: The size of the physical blob files that are used to store separated values within the storage engine. This sum is the physical post-compression sum of value_bytes.referenced and value_bytes.unreferenced. @@ -22592,7 +20249,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: storage.value_separation.value_bytes.referenced exported_name: storage_value_separation_value_bytes_referenced description: The size of storage engine value bytes (pre-compression) that are stored separately in blob files and referenced by a live sstable. @@ -22601,7 +20257,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: storage.value_separation.value_bytes.unreferenced exported_name: storage_value_separation_value_bytes_unreferenced description: The size of storage engine value bytes (pre-compression) that are stored separately in blob files and not referenced by any live sstable. These bytes are garbage that could be reclaimed by a compaction. @@ -22610,7 +20265,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: storage.value_separation.value_retrieval.count exported_name: storage_value_separation_value_retrieval_count description: The number of value retrievals of values separated into blob files. @@ -22619,7 +20273,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.wal.bytes_in exported_name: storage_wal_bytes_in description: The number of logical bytes the storage engine has written to the Write-Ahead Log. @@ -22628,7 +20281,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.wal.bytes_written exported_name: storage_wal_bytes_written description: The number of bytes the storage engine has written to the Write-Ahead Log. @@ -22637,7 +20289,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storage.wal.failover.primary.duration exported_name: storage_wal_failover_primary_duration description: Cumulative time spent writing to the primary WAL directory. @@ -22647,7 +20298,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE how_to_use: Only populated when WAL failover is configured. - owner: cockroachdb/kv - name: storage.wal.failover.secondary.duration exported_name: storage_wal_failover_secondary_duration description: Cumulative time spent writing to the secondary WAL directory. @@ -22657,7 +20307,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE how_to_use: Only populated when WAL failover is configured. - owner: cockroachdb/kv - name: storage.wal.failover.switch.count exported_name: storage_wal_failover_switch_count description: Count of the number of times WAL writing has switched from primary to secondary and vice versa. @@ -22668,7 +20317,6 @@ layers: derivative: NON_NEGATIVE_DERIVATIVE how_to_use: Only populated when WAL failover is configured. A high switch count indicates that many disk stalls were encountered. visibility: SUPPORT - owner: cockroachdb/kv - name: storage.write-amplification exported_name: storage_write_amplification description: |- @@ -22680,7 +20328,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: storage.write-stall-nanos exported_name: storage_write_stall_nanos description: Total write stall duration in nanos @@ -22690,7 +20337,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE visibility: SUPPORT - owner: cockroachdb/kv - name: storeliveness.callbacks.processing_duration exported_name: storeliveness_callbacks_processing_duration description: Duration of support withdrawal callback processing @@ -22699,7 +20345,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: storeliveness.heartbeat.failures exported_name: storeliveness_heartbeat_failures description: Number of Store Liveness heartbeats that failed to be sent out by the Store Liveness Support Manager @@ -22709,7 +20354,6 @@ layers: aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE visibility: SUPPORT - owner: cockroachdb/kv - name: storeliveness.heartbeat.persist_duration exported_name: storeliveness_heartbeat_persist_duration description: Latency of persisting Store Liveness requester meta before sending heartbeats @@ -22718,7 +20362,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: storeliveness.heartbeat.successes exported_name: storeliveness_heartbeat_successes description: Number of Store Liveness heartbeats sent out by the Store Liveness Support Manager @@ -22727,7 +20370,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storeliveness.message_handle.failures exported_name: storeliveness_message_handle_failures description: Number of incoming Store Liveness messages that failed to be handled by the Store Liveness Support Manager @@ -22736,7 +20378,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storeliveness.message_handle.persist_duration exported_name: storeliveness_message_handle_persist_duration description: Latency of persisting Store Liveness state when handling incoming messages @@ -22745,7 +20386,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: storeliveness.message_handle.successes exported_name: storeliveness_message_handle_successes description: Number of incoming Store Liveness messages handled by the Store Liveness Support Manager @@ -22754,7 +20394,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storeliveness.support_for.stores exported_name: storeliveness_support_for_stores description: Number of stores that the Store Liveness Support Manager has ever provided support for @@ -22763,7 +20402,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: storeliveness.support_from.stores exported_name: storeliveness_support_from_stores description: Number of stores that the Store Liveness Support Manager is requesting support from by sending heartbeats @@ -22772,7 +20410,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: storeliveness.support_withdraw.failures exported_name: storeliveness_support_withdraw_failures description: Number of times the Store Liveness Support Manager has encountered an error while withdrawing support for another store @@ -22781,7 +20418,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storeliveness.support_withdraw.persist_duration exported_name: storeliveness_support_withdraw_persist_duration description: Latency of persisting Store Liveness state when withdrawing support @@ -22790,7 +20426,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: storeliveness.support_withdraw.successes exported_name: storeliveness_support_withdraw_successes description: Number of times the Store Liveness Support Manager has successfully withdrawn support for another store @@ -22799,7 +20434,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storeliveness.transport.batches-received exported_name: storeliveness_transport_batches_received description: Number of message batches received by the Store Liveness Transport @@ -22808,7 +20442,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storeliveness.transport.batches-sent exported_name: storeliveness_transport_batches_sent description: Number of message batches sent by the Store Liveness Transport @@ -22817,7 +20450,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storeliveness.transport.receive-queue-bytes exported_name: storeliveness_transport_receive_queue_bytes description: Total byte size of pending incoming messages from Store Liveness Transport @@ -22826,7 +20458,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: storeliveness.transport.receive-queue-size exported_name: storeliveness_transport_receive_queue_size description: Number of pending incoming messages from the Store Liveness Transport @@ -22835,7 +20466,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: storeliveness.transport.receive_dropped exported_name: storeliveness_transport_receive_dropped description: Number of Store Liveness messages dropped by the Store Liveness Transport on the receiver side @@ -22844,7 +20474,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storeliveness.transport.received exported_name: storeliveness_transport_received description: Number of Store Liveness messages received by the Store Liveness Transport @@ -22853,7 +20482,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storeliveness.transport.send-queue-bytes exported_name: storeliveness_transport_send_queue_bytes description: Total byte size of pending outgoing messages in all Store Liveness Transport per-store send queues @@ -22862,7 +20490,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: storeliveness.transport.send-queue-idle exported_name: storeliveness_transport_send_queue_idle description: Number of Store Liveness Transport per-store send queues that have become idle due to no recently-sent messages @@ -22871,7 +20498,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storeliveness.transport.send-queue-size exported_name: storeliveness_transport_send_queue_size description: Number of pending outgoing messages in all Store Liveness Transport per-store send queues @@ -22880,7 +20506,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: storeliveness.transport.send_dropped exported_name: storeliveness_transport_send_dropped description: Number of Store Liveness messages dropped by the Store Liveness Transport on the sender side @@ -22889,7 +20514,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: storeliveness.transport.sent exported_name: storeliveness_transport_sent description: Number of Store Liveness messages sent by the Store Liveness Transport @@ -22898,7 +20522,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: subsume.locks_written exported_name: subsume_locks_written description: Number of locks written to storage during subsume (range merge) @@ -22907,7 +20530,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: sysbytes exported_name: sysbytes description: Number of bytes in system KV pairs @@ -22916,7 +20538,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: syscount exported_name: syscount description: Count of system KV pairs @@ -22925,7 +20546,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: tenant.consumption.cross_region_network_ru exported_name: tenant_consumption_cross_region_network_ru description: Total number of RUs charged for cross-region network traffic @@ -22934,7 +20554,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/server - name: tenant.consumption.external_io_egress_bytes exported_name: tenant_consumption_external_io_egress_bytes description: Total number of bytes written to external services such as cloud storage providers @@ -22943,7 +20562,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/server - name: tenant.consumption.external_io_ingress_bytes exported_name: tenant_consumption_external_io_ingress_bytes description: Total number of bytes read from external services such as cloud storage providers @@ -22952,7 +20570,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/server - name: tenant.consumption.kv_request_units exported_name: tenant_consumption_kv_request_units description: RU consumption attributable to KV @@ -22961,7 +20578,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/server - name: tenant.consumption.pgwire_egress_bytes exported_name: tenant_consumption_pgwire_egress_bytes description: Total number of bytes transferred from a SQL pod to the client @@ -22970,7 +20586,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/server - name: tenant.consumption.read_batches exported_name: tenant_consumption_read_batches description: Total number of KV read batches @@ -22979,7 +20594,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/server - name: tenant.consumption.read_bytes exported_name: tenant_consumption_read_bytes description: Total number of bytes read from KV @@ -22988,7 +20602,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/server - name: tenant.consumption.read_requests exported_name: tenant_consumption_read_requests description: Total number of KV read requests @@ -22997,7 +20610,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/server - name: tenant.consumption.request_units exported_name: tenant_consumption_request_units description: Total RU consumption @@ -23006,7 +20618,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/server - name: tenant.consumption.sql_pods_cpu_seconds exported_name: tenant_consumption_sql_pods_cpu_seconds description: Total amount of CPU used by SQL pods @@ -23015,7 +20626,6 @@ layers: unit: SECONDS aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/server - name: tenant.consumption.write_batches exported_name: tenant_consumption_write_batches description: Total number of KV write batches @@ -23024,7 +20634,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/server - name: tenant.consumption.write_bytes exported_name: tenant_consumption_write_bytes description: Total number of bytes written to KV @@ -23033,7 +20642,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/server - name: tenant.consumption.write_requests exported_name: tenant_consumption_write_requests description: Total number of KV write requests @@ -23042,7 +20650,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/server - name: timeseries.write.bytes exported_name: timeseries_write_bytes description: Total size in bytes of metric samples written to disk @@ -23051,7 +20658,6 @@ layers: unit: BYTES aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: timeseries.write.errors exported_name: timeseries_write_errors description: Total errors encountered while attempting to write metrics to disk @@ -23060,7 +20666,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: timeseries.write.samples exported_name: timeseries_write_samples description: Total number of metric samples written to disk @@ -23069,7 +20674,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: totalbytes exported_name: totalbytes description: Total number of bytes taken up by keys and values including non-live data @@ -23078,7 +20682,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: tscache.skl.pages exported_name: tscache_skl_pages description: Number of pages in the timestamp cache @@ -23087,7 +20690,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: tscache.skl.rotations exported_name: tscache_skl_rotations description: Number of page rotations in the timestamp cache @@ -23096,7 +20698,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: txn.commit_waits.before_commit_trigger exported_name: txn_commit_waits_before_commit_trigger description: Number of KV transactions that had to commit-wait on the server before committing because they had a commit trigger @@ -23105,7 +20706,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: txn.server_side.1PC.failure exported_name: txn_server_side_1PC_failure description: Number of batches that attempted to commit using 1PC and failed @@ -23114,7 +20714,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: txn.server_side.1PC.success exported_name: txn_server_side_1PC_success description: Number of batches that attempted to commit using 1PC and succeeded @@ -23123,7 +20722,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: txn.server_side_retry.read_evaluation.failure exported_name: txn_server_side_retry_read_evaluation_failure description: Number of read batches that were not successfully refreshed server side @@ -23132,7 +20730,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: txn.server_side_retry.read_evaluation.success exported_name: txn_server_side_retry_read_evaluation_success description: Number of read batches that were successfully refreshed server side @@ -23141,7 +20738,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: txn.server_side_retry.uncertainty_interval_error.failure exported_name: txn_server_side_retry_uncertainty_interval_error_failure description: Number of batches that ran into uncertainty interval errors that were not successfully refreshed server side @@ -23150,7 +20746,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: txn.server_side_retry.uncertainty_interval_error.success exported_name: txn_server_side_retry_uncertainty_interval_error_success description: Number of batches that ran into uncertainty interval errors that were successfully refreshed server side @@ -23159,7 +20754,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: txn.server_side_retry.write_evaluation.failure exported_name: txn_server_side_retry_write_evaluation_failure description: Number of write batches that were not successfully refreshed server side @@ -23168,7 +20762,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: txn.server_side_retry.write_evaluation.success exported_name: txn_server_side_retry_write_evaluation_success description: Number of write batches that were successfully refreshed server side @@ -23177,7 +20770,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: txnrecovery.attempts.pending exported_name: txnrecovery_attempts_pending description: Number of transaction recovery attempts currently in-flight @@ -23186,7 +20778,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: txnrecovery.attempts.total exported_name: txnrecovery_attempts_total description: Number of transaction recovery attempts executed @@ -23195,7 +20786,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: txnrecovery.failures exported_name: txnrecovery_failures description: Number of transaction recovery attempts that failed @@ -23204,7 +20794,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: txnrecovery.successes.aborted exported_name: txnrecovery_successes_aborted description: Number of transaction recovery attempts that aborted a transaction @@ -23213,7 +20802,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: txnrecovery.successes.committed exported_name: txnrecovery_successes_committed description: Number of transaction recovery attempts that committed a transaction @@ -23222,7 +20810,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: txnrecovery.successes.pending exported_name: txnrecovery_successes_pending description: Number of transaction recovery attempts that left a transaction pending @@ -23231,7 +20818,6 @@ layers: unit: COUNT aggregation: AVG derivative: NON_NEGATIVE_DERIVATIVE - owner: cockroachdb/kv - name: txnwaitqueue.pushee.waiting exported_name: txnwaitqueue_pushee_waiting description: Number of pushees on the txn wait queue @@ -23240,7 +20826,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: txnwaitqueue.pusher.slow exported_name: txnwaitqueue_pusher_slow description: The total number of cases where a pusher waited more than the excessive wait threshold @@ -23249,7 +20834,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: txnwaitqueue.pusher.wait_time exported_name: txnwaitqueue_pusher_wait_time description: Histogram of durations spent in queue by pushers @@ -23258,7 +20842,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: txnwaitqueue.pusher.waiting exported_name: txnwaitqueue_pusher_waiting description: Number of pushers on the txn wait queue @@ -23267,7 +20850,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: txnwaitqueue.query.wait_time exported_name: txnwaitqueue_query_wait_time description: Histogram of durations spent in queue by queries @@ -23276,7 +20858,6 @@ layers: unit: NANOSECONDS aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: txnwaitqueue.query.waiting exported_name: txnwaitqueue_query_waiting description: Number of transaction status queries waiting for an updated transaction record @@ -23285,7 +20866,6 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: valbytes exported_name: valbytes description: Number of bytes taken up by values @@ -23294,7 +20874,6 @@ layers: unit: BYTES aggregation: AVG derivative: NONE - owner: cockroachdb/kv - name: valcount exported_name: valcount description: Count of all values @@ -23303,4 +20882,3 @@ layers: unit: COUNT aggregation: AVG derivative: NONE - owner: cockroachdb/kv diff --git a/pkg/BUILD.bazel b/pkg/BUILD.bazel index 0ab00664eeb4..5eac43f42bab 100644 --- a/pkg/BUILD.bazel +++ b/pkg/BUILD.bazel @@ -1213,8 +1213,6 @@ GO_TARGETS = [ "//pkg/cmd/flaky-test-notifier:flaky-test-notifier_lib", "//pkg/cmd/fuzz:fuzz", "//pkg/cmd/fuzz:fuzz_lib", - "//pkg/cmd/gen-metric-owners:gen-metric-owners", - "//pkg/cmd/gen-metric-owners:gen-metric-owners_lib", "//pkg/cmd/generate-acceptance-tests:generate-acceptance-tests", "//pkg/cmd/generate-acceptance-tests:generate-acceptance-tests_lib", "//pkg/cmd/generate-ash-inventory:generate-ash-inventory", @@ -2771,6 +2769,7 @@ GO_TARGETS = [ "//pkg/testutils/lint/passes/hash:hash_test", "//pkg/testutils/lint/passes/leaktestcall:leaktestcall", "//pkg/testutils/lint/passes/leaktestcall:leaktestcall_test", + "//pkg/testutils/lint/passes/metricmetadatainit:metricmetadatainit", "//pkg/testutils/lint/passes/nilness:nilness", "//pkg/testutils/lint/passes/nilness:nilness_test", "//pkg/testutils/lint/passes/nocopy:nocopy", diff --git a/pkg/backup/backup_metrics.go b/pkg/backup/backup_metrics.go index f6aa0ed46431..a80b31289397 100644 --- a/pkg/backup/backup_metrics.go +++ b/pkg/backup/backup_metrics.go @@ -22,12 +22,12 @@ func (b BackupMetrics) MetricStruct() {} // MakeBackupMetrics instantiates the metrics for backup. func MakeBackupMetrics(time.Duration) metric.Struct { m := &BackupMetrics{ - LastKMSInaccessibleErrorTime: metric.NewGauge(metric.Metadata{ + LastKMSInaccessibleErrorTime: metric.NewGauge(metric.InitMetadata(metric.Metadata{ Name: "backup.last-failed-time.kms-inaccessible", Help: "The unix timestamp of the most recent failure of backup due to errKMSInaccessible by a backup specified as maintaining this metric", Measurement: "Jobs", Unit: metric.Unit_TIMESTAMP_SEC, - }), + })), } return m } diff --git a/pkg/backup/schedule_exec.go b/pkg/backup/schedule_exec.go index ab244b3f2075..4ee9cedf327a 100644 --- a/pkg/backup/schedule_exec.go +++ b/pkg/backup/schedule_exec.go @@ -608,7 +608,7 @@ func init() { metrics: backupMetrics{ ExecutorMetrics: &m, ExecutorPTSMetrics: &pm, - RpoMetric: metric.NewGauge(metric.Metadata{ + RpoMetric: metric.NewGauge(metric.InitMetadata(metric.Metadata{ Name: "schedules.BACKUP.last-completed-time", Help: crstrings.UnwrapText(` The unix timestamp of the most recently completed backup by a @@ -631,8 +631,8 @@ func init() { For example with a backup frequency of 60 minutes, monitor time() - max_across_nodes(max_over_time(schedules_BACKUP_last_completed_time, 60min)). `), - }), - RpoTenantMetric: metric.NewExportedGaugeVec(metric.Metadata{ + })), + RpoTenantMetric: metric.NewExportedGaugeVec(metric.InitMetadata(metric.Metadata{ Name: "schedules.BACKUP.last-completed-time-by-virtual_cluster", Help: crstrings.UnwrapText(` The unix timestamp of the most recently completed host scheduled @@ -640,7 +640,7 @@ func init() { `), Measurement: "Jobs", Unit: metric.Unit_TIMESTAMP_SEC, - }, []string{"tenant_id"}), + }), []string{"tenant_id"}), }, }, nil }) diff --git a/pkg/backup/schedule_exec_test.go b/pkg/backup/schedule_exec_test.go index a500ed057532..4efb7a69c319 100644 --- a/pkg/backup/schedule_exec_test.go +++ b/pkg/backup/schedule_exec_test.go @@ -27,8 +27,8 @@ func TestBackupSucceededUpdatesMetrics(t *testing.T) { ctx := context.Background() executor := &scheduledBackupExecutor{ metrics: backupMetrics{ - RpoMetric: metric.NewGauge(metric.Metadata{}), - RpoTenantMetric: metric.NewExportedGaugeVec(metric.Metadata{}, []string{"tenant_id"}), + RpoMetric: metric.NewGauge(metric.InitMetadata(metric.Metadata{})), + RpoTenantMetric: metric.NewExportedGaugeVec(metric.InitMetadata(metric.Metadata{}), []string{"tenant_id"}), }, } @@ -63,8 +63,8 @@ func TestBackupSucceededUpdatesMetrics(t *testing.T) { // Use a fresh executor to ensure RpoMetric starts at zero. freshExecutor := &scheduledBackupExecutor{ metrics: backupMetrics{ - RpoMetric: metric.NewGauge(metric.Metadata{}), - RpoTenantMetric: metric.NewExportedGaugeVec(metric.Metadata{}, []string{"tenant_id"}), + RpoMetric: metric.NewGauge(metric.InitMetadata(metric.Metadata{})), + RpoTenantMetric: metric.NewExportedGaugeVec(metric.InitMetadata(metric.Metadata{}), []string{"tenant_id"}), }, } schedule := createSchedule(t, true) diff --git a/pkg/ccl/changefeedccl/cdcutils/throttle.go b/pkg/ccl/changefeedccl/cdcutils/throttle.go index 10431938f21f..56d592522d15 100644 --- a/pkg/ccl/changefeedccl/cdcutils/throttle.go +++ b/pkg/ccl/changefeedccl/cdcutils/throttle.go @@ -145,13 +145,13 @@ type Metrics struct { // MakeMetrics constructs a Metrics struct with the provided histogram window. func MakeMetrics(histogramWindow time.Duration) Metrics { makeMetric := func(n string) metric.Metadata { - return metric.Metadata{ + return metric.InitMetadata(metric.Metadata{ Name: fmt.Sprintf("changefeed.%s.messages_pushback_nanos", n), Help: fmt.Sprintf("Total time spent throttled for %s quota", n), Measurement: "Nanoseconds", Unit: metric.Unit_NANOSECONDS, Category: metric.Metadata_CHANGEFEEDS, - } + }) } return Metrics{ diff --git a/pkg/ccl/changefeedccl/checkpoint/metrics.go b/pkg/ccl/changefeedccl/checkpoint/metrics.go index 42b08ddab1d9..6ce5feef7e9a 100644 --- a/pkg/ccl/changefeedccl/checkpoint/metrics.go +++ b/pkg/ccl/changefeedccl/checkpoint/metrics.go @@ -12,37 +12,37 @@ import ( ) var ( - metaCreateNanos = metric.Metadata{ + metaCreateNanos = metric.InitMetadata(metric.Metadata{ Name: "changefeed.checkpoint.create_nanos", Help: "Time it takes to create a changefeed checkpoint", Unit: metric.Unit_NANOSECONDS, Measurement: "Nanoseconds", Category: metric.Metadata_CHANGEFEEDS, - } + }) - metaTotalBytes = metric.Metadata{ + metaTotalBytes = metric.InitMetadata(metric.Metadata{ Name: "changefeed.checkpoint.total_bytes", Help: "Total size of a changefeed checkpoint", Unit: metric.Unit_BYTES, Measurement: "Bytes", Category: metric.Metadata_CHANGEFEEDS, - } + }) - metaTimestampCount = metric.Metadata{ + metaTimestampCount = metric.InitMetadata(metric.Metadata{ Name: "changefeed.checkpoint.timestamp_count", Help: "Number of unique timestamps in a changefeed checkpoint", Unit: metric.Unit_COUNT, Measurement: "Timestamps", Category: metric.Metadata_CHANGEFEEDS, - } + }) - metaSpanCount = metric.Metadata{ + metaSpanCount = metric.InitMetadata(metric.Metadata{ Name: "changefeed.checkpoint.span_count", Help: "Number of spans in a changefeed checkpoint", Unit: metric.Unit_COUNT, Measurement: "Spans", Category: metric.Metadata_CHANGEFEEDS, - } + }) ) type AggMetrics struct { diff --git a/pkg/ccl/changefeedccl/kvevent/metrics.go b/pkg/ccl/changefeedccl/kvevent/metrics.go index b9fa541b80f4..94cbe2b5df59 100644 --- a/pkg/ccl/changefeedccl/kvevent/metrics.go +++ b/pkg/ccl/changefeedccl/kvevent/metrics.go @@ -13,55 +13,55 @@ import ( ) var ( - metaChangefeedBufferEntriesIn = metric.Metadata{ + metaChangefeedBufferEntriesIn = metric.InitMetadata(metric.Metadata{ Name: "changefeed.buffer_entries.in", Help: "Total entries entering the buffer between raft and changefeed sinks", Measurement: "Entries", Unit: metric.Unit_COUNT, Category: metric.Metadata_CHANGEFEEDS, - } - metaChangefeedBufferEntriesOut = metric.Metadata{ + }) + metaChangefeedBufferEntriesOut = metric.InitMetadata(metric.Metadata{ Name: "changefeed.buffer_entries.out", Help: "Total entries leaving the buffer between raft and changefeed sinks", Measurement: "Entries", Unit: metric.Unit_COUNT, Category: metric.Metadata_CHANGEFEEDS, - } - metaChangefeedBufferEntriesReleased = metric.Metadata{ + }) + metaChangefeedBufferEntriesReleased = metric.InitMetadata(metric.Metadata{ Name: "changefeed.buffer_entries.released", Help: "Total entries processed, emitted and acknowledged by the sinks", Measurement: "Entries", Unit: metric.Unit_COUNT, Category: metric.Metadata_CHANGEFEEDS, - } - metaChangefeedBufferMemAcquired = metric.Metadata{ + }) + metaChangefeedBufferMemAcquired = metric.InitMetadata(metric.Metadata{ Name: "changefeed.buffer_entries_mem.acquired", Help: "Total amount of memory acquired for entries as they enter the system", Measurement: "Entries", Unit: metric.Unit_COUNT, Category: metric.Metadata_CHANGEFEEDS, - } - metaChangefeedBufferMemReleased = metric.Metadata{ + }) + metaChangefeedBufferMemReleased = metric.InitMetadata(metric.Metadata{ Name: "changefeed.buffer_entries_mem.released", Help: "Total amount of memory released by the entries after they have been emitted", Measurement: "Entries", Unit: metric.Unit_COUNT, Category: metric.Metadata_CHANGEFEEDS, - } - metaChangefeedBufferPushbackNanos = metric.Metadata{ + }) + metaChangefeedBufferPushbackNanos = metric.InitMetadata(metric.Metadata{ Name: "changefeed.buffer_pushback_nanos", Help: "Total time spent waiting while the buffer was full", Measurement: "Nanoseconds", Unit: metric.Unit_NANOSECONDS, Category: metric.Metadata_CHANGEFEEDS, - } - metaChangefeedAllocatedMemory = metric.Metadata{ + }) + metaChangefeedAllocatedMemory = metric.InitMetadata(metric.Metadata{ Name: "changefeed.buffer_entries.allocated_mem", Help: "Current quota pool memory allocation", Measurement: "Bytes", Unit: metric.Unit_BYTES, Category: metric.Metadata_CHANGEFEEDS, - } + }) ) // Metrics is a metric.Struct for kvfeed metrics. @@ -140,13 +140,13 @@ func MakeMetrics(histogramWindow time.Duration) Metrics { return "resolved" } }() - return metric.Metadata{ + return metric.InitMetadata(metric.Metadata{ Name: fmt.Sprintf("changefeed.buffer_entries.%s", eventTypeName), Help: fmt.Sprintf("Number of %s elements added to the buffer", eventTypeName), Measurement: "Events", Unit: metric.Unit_COUNT, Category: metric.Metadata_CHANGEFEEDS, - } + }) } commonBufferMetrics := CommonBufferMetrics{ BufferEntriesMemAcquired: metric.NewCounter(metaChangefeedBufferMemAcquired), diff --git a/pkg/ccl/changefeedccl/metrics.go b/pkg/ccl/changefeedccl/metrics.go index 64d9f359b3c8..52f39790beae 100644 --- a/pkg/ccl/changefeedccl/metrics.go +++ b/pkg/ccl/changefeedccl/metrics.go @@ -755,14 +755,14 @@ func (w *wrappingCostController) timers() *timers.ScopedTimers { } var ( - metaChangefeedForwardedResolvedMessages = metric.Metadata{ + metaChangefeedForwardedResolvedMessages = metric.InitMetadata(metric.Metadata{ Name: "changefeed.forwarded_resolved_messages", Help: "Resolved timestamps forwarded from the change aggregator to the change frontier", Measurement: "Messages", Unit: metric.Unit_COUNT, Category: metric.Metadata_CHANGEFEEDS, - } - metaChangefeedErrorRetries = metric.Metadata{ + }) + metaChangefeedErrorRetries = metric.InitMetadata(metric.Metadata{ Name: "changefeed.error_retries", Help: "Total retryable errors encountered by all changefeeds", Measurement: "Errors", @@ -777,8 +777,8 @@ var ( minutes. But if there is no rolling upgrade in process or other cluster maintenance, and the error rate is high, investigate the changefeed job. `), - } - metaChangefeedFailures = metric.Metadata{ + }) + metaChangefeedFailures = metric.InitMetadata(metric.Metadata{ Name: "changefeed.failures", Help: "Total number of changefeed jobs which have failed", Measurement: "Errors", @@ -790,91 +790,91 @@ var ( system will not try to restart. Any increase in this counter should be investigated. An alert on this metric is recommended. `), - } + }) - metaEventQueueTime = metric.Metadata{ + metaEventQueueTime = metric.InitMetadata(metric.Metadata{ Name: "changefeed.queue_time_nanos", Help: "Time KV event spent waiting to be processed", Measurement: "Nanoseconds", Unit: metric.Unit_NANOSECONDS, Category: metric.Metadata_CHANGEFEEDS, - } + }) - metaChangefeedCheckpointHistNanos = metric.Metadata{ + metaChangefeedCheckpointHistNanos = metric.InitMetadata(metric.Metadata{ Name: "changefeed.checkpoint_hist_nanos", Help: "Time spent checkpointing changefeed progress", Measurement: "Nanoseconds", Unit: metric.Unit_NANOSECONDS, Category: metric.Metadata_CHANGEFEEDS, - } + }) - metaChangefeedFrontierUpdates = metric.Metadata{ + metaChangefeedFrontierUpdates = metric.InitMetadata(metric.Metadata{ Name: "changefeed.frontier_updates", Help: "Number of change frontier updates across all feeds", Measurement: "Updates", Unit: metric.Unit_COUNT, Category: metric.Metadata_CHANGEFEEDS, - } - metaChangefeedEventConsumerFlushNanos = metric.Metadata{ + }) + metaChangefeedEventConsumerFlushNanos = metric.InitMetadata(metric.Metadata{ Name: "changefeed.nprocs_flush_nanos", Help: "Total time spent idle waiting for the parallel consumer to flush", Measurement: "Nanoseconds", Unit: metric.Unit_NANOSECONDS, Category: metric.Metadata_CHANGEFEEDS, - } - metaChangefeedEventConsumerConsumeNanos = metric.Metadata{ + }) + metaChangefeedEventConsumerConsumeNanos = metric.InitMetadata(metric.Metadata{ Name: "changefeed.nprocs_consume_event_nanos", Help: "Total time spent waiting to add an event to the parallel consumer", Measurement: "Nanoseconds", Unit: metric.Unit_NANOSECONDS, Category: metric.Metadata_CHANGEFEEDS, - } - metaChangefeedEventConsumerInFlightEvents = metric.Metadata{ + }) + metaChangefeedEventConsumerInFlightEvents = metric.InitMetadata(metric.Metadata{ Name: "changefeed.nprocs_in_flight_count", Help: "Number of buffered events in the parallel consumer", Measurement: "Count of Events", Unit: metric.Unit_COUNT, Category: metric.Metadata_CHANGEFEEDS, - } - metaChangefeedTableBytes = metric.Metadata{ + }) + metaChangefeedTableBytes = metric.InitMetadata(metric.Metadata{ Name: "changefeed.usage.table_bytes", Help: "Aggregated number of bytes of data per table watched by changefeeds", Measurement: "Storage", Unit: metric.Unit_BYTES, Category: metric.Metadata_CHANGEFEEDS, - } - metaChangefeedUsageErrorCount = metric.Metadata{ + }) + metaChangefeedUsageErrorCount = metric.InitMetadata(metric.Metadata{ Name: "changefeed.usage.error_count", Help: "Count of errors encountered while generating usage metrics for changefeeds", Measurement: "Errors", Unit: metric.Unit_COUNT, Category: metric.Metadata_CHANGEFEEDS, - } - metaChangefeedUsageQueryDuration = metric.Metadata{ + }) + metaChangefeedUsageQueryDuration = metric.InitMetadata(metric.Metadata{ Name: "changefeed.usage.query_duration", Help: "Time taken by the queries used to generate usage metrics for changefeeds", Measurement: "Nanoseconds", Unit: metric.Unit_NANOSECONDS, Category: metric.Metadata_CHANGEFEEDS, - } - metaNetworkBytesIn = metric.Metadata{ + }) + metaNetworkBytesIn = metric.InitMetadata(metric.Metadata{ Name: "changefeed.network.bytes_in", Help: "The number of bytes received from the network by changefeeds", Measurement: "Bytes", Unit: metric.Unit_COUNT, Category: metric.Metadata_CHANGEFEEDS, - } - metaNetworkBytesOut = metric.Metadata{ + }) + metaNetworkBytesOut = metric.InitMetadata(metric.Metadata{ Name: "changefeed.network.bytes_out", Help: "The number of bytes sent over the network by changefeeds", Measurement: "Bytes", Unit: metric.Unit_COUNT, Category: metric.Metadata_CHANGEFEEDS, - } + }) ) func newAggregateMetrics(histogramWindow time.Duration, lookup *cidr.Lookup) *AggMetrics { - metaChangefeedEmittedMessages := metric.Metadata{ + metaChangefeedEmittedMessages := metric.InitMetadata(metric.Metadata{ Name: "changefeed.emitted_messages", Help: "Messages emitted by all feeds", Measurement: "Messages", @@ -886,15 +886,15 @@ func newAggregateMetrics(histogramWindow time.Duration, lookup *cidr.Lookup) *Ag changefeeds. This metric characterizes the rate of changes being streamed from the CockroachDB cluster. `), - } - metaChangefeedEmittedBatchSizes := metric.Metadata{ + }) + metaChangefeedEmittedBatchSizes := metric.InitMetadata(metric.Metadata{ Name: "changefeed.emitted_batch_sizes", Help: "Size of batches emitted emitted by all feeds", Measurement: "Number of Messages in Batch", Unit: metric.Unit_COUNT, Category: metric.Metadata_CHANGEFEEDS, - } - metaChangefeedFilteredMessages := metric.Metadata{ + }) + metaChangefeedFilteredMessages := metric.InitMetadata(metric.Metadata{ Name: "changefeed.filtered_messages", Help: crstrings.UnwrapText(` Messages filtered out by all feeds. This count does not include the number @@ -903,8 +903,8 @@ func newAggregateMetrics(histogramWindow time.Duration, lookup *cidr.Lookup) *Ag Measurement: "Messages", Unit: metric.Unit_COUNT, Category: metric.Metadata_CHANGEFEEDS, - } - metaChangefeedEmittedBytes := metric.Metadata{ + }) + metaChangefeedEmittedBytes := metric.InitMetadata(metric.Metadata{ Name: "changefeed.emitted_bytes", Help: "Bytes emitted by all feeds", Measurement: "Bytes", @@ -916,8 +916,8 @@ func newAggregateMetrics(histogramWindow time.Duration, lookup *cidr.Lookup) *Ag changefeeds. This metric characterizes the throughput bytes being streamed from the CockroachDB cluster. `), - } - metaChangefeedFlushedBytes := metric.Metadata{ + }) + metaChangefeedFlushedBytes := metric.InitMetadata(metric.Metadata{ Name: "changefeed.flushed_bytes", Help: crstrings.UnwrapText(` Bytes emitted by all feeds; maybe different from changefeed.emitted_bytes @@ -926,36 +926,36 @@ func newAggregateMetrics(histogramWindow time.Duration, lookup *cidr.Lookup) *Ag Measurement: "Bytes", Unit: metric.Unit_BYTES, Category: metric.Metadata_CHANGEFEEDS, - } - metaChangefeedFlushes := metric.Metadata{ + }) + metaChangefeedFlushes := metric.InitMetadata(metric.Metadata{ Name: "changefeed.flushes", Help: "Total flushes across all feeds.", Measurement: "Flushes", Unit: metric.Unit_COUNT, Category: metric.Metadata_CHANGEFEEDS, - } - metaSizeBasedFlushes := metric.Metadata{ + }) + metaSizeBasedFlushes := metric.InitMetadata(metric.Metadata{ Name: "changefeed.size_based_flushes", Help: "Total size based flushes across all feeds.", Measurement: "Flushes", Unit: metric.Unit_COUNT, Category: metric.Metadata_CHANGEFEEDS, - } - metaChangefeedBatchHistNanos := metric.Metadata{ + }) + metaChangefeedBatchHistNanos := metric.InitMetadata(metric.Metadata{ Name: "changefeed.sink_batch_hist_nanos", Help: "Time spent batched in the sink buffer before being flushed and acknowledged.", Measurement: "Changefeeds", Unit: metric.Unit_NANOSECONDS, Category: metric.Metadata_CHANGEFEEDS, - } - metaChangefeedFlushHistNanos := metric.Metadata{ + }) + metaChangefeedFlushHistNanos := metric.InitMetadata(metric.Metadata{ Name: "changefeed.flush_hist_nanos", Help: "Time spent flushing messages across all changefeeds", Measurement: "Changefeeds", Unit: metric.Unit_NANOSECONDS, Category: metric.Metadata_CHANGEFEEDS, - } - metaCommitLatency := metric.Metadata{ + }) + metaCommitLatency := metric.InitMetadata(metric.Metadata{ Name: "changefeed.commit_latency", Help: crstrings.UnwrapText(` Event commit latency: a difference between event MVCC timestamp and the @@ -972,8 +972,8 @@ func newAggregateMetrics(histogramWindow time.Duration, lookup *cidr.Lookup) *Ag changefeeds. This metric characterizes the end-to-end lag between a committed change and that change applied at the destination. `), - } - metaAdmitLatency := metric.Metadata{ + }) + metaAdmitLatency := metric.InitMetadata(metric.Metadata{ Name: "changefeed.admit_latency", Help: crstrings.UnwrapText(` Event admission latency: a difference between event MVCC timestamp @@ -985,23 +985,23 @@ func newAggregateMetrics(histogramWindow time.Duration, lookup *cidr.Lookup) *Ag Measurement: "Nanoseconds", Unit: metric.Unit_NANOSECONDS, Category: metric.Metadata_CHANGEFEEDS, - } - metaChangefeedBackfillCount := metric.Metadata{ + }) + metaChangefeedBackfillCount := metric.InitMetadata(metric.Metadata{ Name: "changefeed.backfill_count", Help: "Number of changefeeds currently executing backfill", Measurement: "Count", Unit: metric.Unit_COUNT, Visibility: metric.Metadata_SUPPORT, Category: metric.Metadata_CHANGEFEEDS, - } - metaChangefeedBackfillPendingRanges := metric.Metadata{ + }) + metaChangefeedBackfillPendingRanges := metric.InitMetadata(metric.Metadata{ Name: "changefeed.backfill_pending_ranges", Help: "Number of ranges in an ongoing backfill that are yet to be fully emitted", Measurement: "Count", Unit: metric.Unit_COUNT, Category: metric.Metadata_CHANGEFEEDS, - } - metaChangefeedRunning := metric.Metadata{ + }) + metaChangefeedRunning := metric.InitMetadata(metric.Metadata{ Name: "changefeed.running", Help: "Number of currently running changefeeds, including sinkless", Measurement: "Changefeeds", @@ -1009,43 +1009,43 @@ func newAggregateMetrics(histogramWindow time.Duration, lookup *cidr.Lookup) *Ag Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_CHANGEFEEDS, HowToUse: `This metric tracks the total number of all running changefeeds.`, - } - metaMessageSize := metric.Metadata{ + }) + metaMessageSize := metric.InitMetadata(metric.Metadata{ Name: "changefeed.message_size_hist", Help: "Message size histogram", Measurement: "Bytes", Unit: metric.Unit_BYTES, Category: metric.Metadata_CHANGEFEEDS, - } - metaBatchReductionCount := metric.Metadata{ + }) + metaBatchReductionCount := metric.InitMetadata(metric.Metadata{ Name: "changefeed.batch_reduction_count", Help: "Number of times a changefeed aggregator node attempted to reduce the size of message batches it emitted to the sink", Measurement: "Batch Size Reductions", Unit: metric.Unit_COUNT, Category: metric.Metadata_CHANGEFEEDS, - } - metaInternalRetryMessageCount := metric.Metadata{ + }) + metaInternalRetryMessageCount := metric.InitMetadata(metric.Metadata{ Name: "changefeed.internal_retry_message_count", Help: "Number of messages for which an attempt to retry them within an aggregator node was made.", Measurement: "Messages", Unit: metric.Unit_COUNT, Category: metric.Metadata_CHANGEFEEDS, - } - metaSchemaRegistryRetriesCount := metric.Metadata{ + }) + metaSchemaRegistryRetriesCount := metric.InitMetadata(metric.Metadata{ Name: "changefeed.schema_registry.retry_count", Help: "Number of retries encountered when sending requests to the schema registry.", Measurement: "Retries", Unit: metric.Unit_COUNT, Category: metric.Metadata_CHANGEFEEDS, - } - metaSchemaRegistryRegistrations := metric.Metadata{ + }) + metaSchemaRegistryRegistrations := metric.InitMetadata(metric.Metadata{ Name: "changefeed.schema_registry.registrations", Help: "Number of registration attempts with the schema registry.", Measurement: "Registrations", Unit: metric.Unit_COUNT, Category: metric.Metadata_CHANGEFEEDS, - } - metaChangefeedParallelIOQueueNanos := metric.Metadata{ + }) + metaChangefeedParallelIOQueueNanos := metric.InitMetadata(metric.Metadata{ Name: "changefeed.parallel_io_queue_nanos", Help: crstrings.UnwrapText(` Time that outgoing requests to the sink spend waiting in a queue due to @@ -1054,15 +1054,15 @@ func newAggregateMetrics(histogramWindow time.Duration, lookup *cidr.Lookup) *Ag Measurement: "Nanoseconds", Unit: metric.Unit_NANOSECONDS, Category: metric.Metadata_CHANGEFEEDS, - } - metaChangefeedParallelIOPendingRows := metric.Metadata{ + }) + metaChangefeedParallelIOPendingRows := metric.InitMetadata(metric.Metadata{ Name: "changefeed.parallel_io_pending_rows", Help: "Number of rows which are blocked from being sent due to conflicting in-flight keys.", Measurement: "Messages", Unit: metric.Unit_COUNT, Category: metric.Metadata_CHANGEFEEDS, - } - metaChangefeedParallelIOResultQueueNanos := metric.Metadata{ + }) + metaChangefeedParallelIOResultQueueNanos := metric.InitMetadata(metric.Metadata{ Name: "changefeed.parallel_io_result_queue_nanos", Help: crstrings.UnwrapText(` Time that incoming results from the sink spend waiting in parallel io emitter @@ -1071,29 +1071,29 @@ func newAggregateMetrics(histogramWindow time.Duration, lookup *cidr.Lookup) *Ag Measurement: "Nanoseconds", Unit: metric.Unit_NANOSECONDS, Category: metric.Metadata_CHANGEFEEDS, - } - metaChangefeedParallelIOInFlightKeys := metric.Metadata{ + }) + metaChangefeedParallelIOInFlightKeys := metric.InitMetadata(metric.Metadata{ Name: "changefeed.parallel_io_in_flight_keys", Help: "The number of keys currently in-flight which may contend with batches pending to be emitted", Measurement: "Keys", Unit: metric.Unit_COUNT, Category: metric.Metadata_CHANGEFEEDS, - } - metaChangefeedParallelIOWorkers := metric.Metadata{ + }) + metaChangefeedParallelIOWorkers := metric.InitMetadata(metric.Metadata{ Name: "changefeed.parallel_io_workers", Help: "The number of workers in the ParallelIO", Measurement: "Workers", Unit: metric.Unit_COUNT, Category: metric.Metadata_CHANGEFEEDS, - } - metaChangefeedSinkIOInflight := metric.Metadata{ + }) + metaChangefeedSinkIOInflight := metric.InitMetadata(metric.Metadata{ Name: "changefeed.sink_io_inflight", Help: "The number of keys currently inflight as IO requests being sent to the sink", Measurement: "Messages", Unit: metric.Unit_COUNT, Category: metric.Metadata_CHANGEFEEDS, - } - metaChangefeedSinkBackpressureNanos := metric.Metadata{ + }) + metaChangefeedSinkBackpressureNanos := metric.InitMetadata(metric.Metadata{ Name: "changefeed.sink_backpressure_nanos", Help: crstrings.UnwrapText(` Time spent waiting for quota when emitting to the sink (back-pressure). @@ -1103,82 +1103,82 @@ func newAggregateMetrics(histogramWindow time.Duration, lookup *cidr.Lookup) *Ag Measurement: "Nanoseconds", Unit: metric.Unit_NANOSECONDS, Category: metric.Metadata_CHANGEFEEDS, - } - metaAggregatorProgress := metric.Metadata{ + }) + metaAggregatorProgress := metric.InitMetadata(metric.Metadata{ Name: "changefeed.aggregator_progress", Help: "The earliest timestamp up to which any aggregator is guaranteed to have emitted all values for", Measurement: "Unix Timestamp Nanoseconds", Unit: metric.Unit_TIMESTAMP_NS, Category: metric.Metadata_CHANGEFEEDS, - } - metaCheckpointProgress := metric.Metadata{ + }) + metaCheckpointProgress := metric.InitMetadata(metric.Metadata{ Name: "changefeed.checkpoint_progress", Help: "The earliest timestamp of any changefeed's persisted checkpoint (values prior to this timestamp will never need to be re-emitted)", Measurement: "Unix Timestamp Nanoseconds", Unit: metric.Unit_TIMESTAMP_NS, Category: metric.Metadata_CHANGEFEEDS, - } - metaLaggingRanges := metric.Metadata{ + }) + metaLaggingRanges := metric.InitMetadata(metric.Metadata{ Name: "changefeed.lagging_ranges", Help: "The number of ranges considered to be lagging behind", Measurement: "Ranges", Unit: metric.Unit_COUNT, Category: metric.Metadata_CHANGEFEEDS, - } - metaTotalRanges := metric.Metadata{ + }) + metaTotalRanges := metric.InitMetadata(metric.Metadata{ Name: "changefeed.total_ranges", Help: "The total number of ranges being watched by changefeed aggregators", Measurement: "Ranges", Unit: metric.Unit_COUNT, Category: metric.Metadata_CHANGEFEEDS, - } - metaCloudstorageBufferedBytes := metric.Metadata{ + }) + metaCloudstorageBufferedBytes := metric.InitMetadata(metric.Metadata{ Name: "changefeed.cloudstorage_buffered_bytes", Help: "The number of bytes buffered in cloudstorage sink files which have not been emitted yet", Measurement: "Bytes", Unit: metric.Unit_COUNT, Category: metric.Metadata_CHANGEFEEDS, - } - metaChangefeedKafkaThrottlingNanos := metric.Metadata{ + }) + metaChangefeedKafkaThrottlingNanos := metric.InitMetadata(metric.Metadata{ Name: "changefeed.kafka_throttling_hist_nanos", Help: "Time spent in throttling due to exceeding kafka quota", Measurement: "Nanoseconds", Unit: metric.Unit_NANOSECONDS, Category: metric.Metadata_CHANGEFEEDS, - } - metaSinkErrors := metric.Metadata{ + }) + metaSinkErrors := metric.InitMetadata(metric.Metadata{ Name: "changefeed.sink_errors", Help: "Number of changefeed errors caused by the sink", Measurement: "Count", Unit: metric.Unit_COUNT, Category: metric.Metadata_CHANGEFEEDS, - } + }) // TODO(dan): This was intended to be a measure of the minimum distance of // any changefeed ahead of its gc ttl threshold, but keeping that correct in // the face of changing zone configs is much harder, so this will have to do // for now. - metaChangefeedMaxBehindNanos := metric.Metadata{ + metaChangefeedMaxBehindNanos := metric.InitMetadata(metric.Metadata{ Name: "changefeed.max_behind_nanos", Help: "The most any changefeed's persisted checkpoint is behind the present", Measurement: "Nanoseconds", Unit: metric.Unit_NANOSECONDS, Visibility: metric.Metadata_SUPPORT, Category: metric.Metadata_CHANGEFEEDS, - } - metaChangefeedSpanProgressSkew := metric.Metadata{ + }) + metaChangefeedSpanProgressSkew := metric.InitMetadata(metric.Metadata{ Name: "changefeed.progress_skew.span", Help: "The time difference between the fastest and slowest span's resolved timestamp", Measurement: "Nanoseconds", Unit: metric.Unit_NANOSECONDS, Category: metric.Metadata_CHANGEFEEDS, - } - metaChangefeedTableProgressSkew := metric.Metadata{ + }) + metaChangefeedTableProgressSkew := metric.InitMetadata(metric.Metadata{ Name: "changefeed.progress_skew.table", Help: "The time difference between the fastest and slowest table's resolved timestamp", Measurement: "Nanoseconds", Unit: metric.Unit_NANOSECONDS, Category: metric.Metadata_CHANGEFEEDS, - } + }) functionalGaugeMinFn := func(childValues []int64) int64 { var min int64 @@ -1548,18 +1548,18 @@ func MakeMetrics(histogramWindow time.Duration, lookup *cidr.Lookup) metric.Stru } var ( - metaMemMaxBytes = metric.Metadata{ + metaMemMaxBytes = metric.InitMetadata(metric.Metadata{ Name: "sql.mem.changefeed.max", Help: "Maximum memory usage across all changefeeds", Measurement: "Memory", Unit: metric.Unit_BYTES, - } - metaMemCurBytes = metric.Metadata{ + }) + metaMemCurBytes = metric.InitMetadata(metric.Metadata{ Name: "sql.mem.changefeed.current", Help: "Current memory usage across all changefeeds", Measurement: "Memory", Unit: metric.Unit_BYTES, - } + }) ) // See pkg/sql/mem_metrics.go diff --git a/pkg/ccl/changefeedccl/schemafeed/metrics.go b/pkg/ccl/changefeedccl/schemafeed/metrics.go index 8f60c2c1070e..0b18031cc1ea 100644 --- a/pkg/ccl/changefeedccl/schemafeed/metrics.go +++ b/pkg/ccl/changefeedccl/schemafeed/metrics.go @@ -11,21 +11,21 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/metric" ) -var metaChangefeedTableMetadataNanos = metric.Metadata{ +var metaChangefeedTableMetadataNanos = metric.InitMetadata(metric.Metadata{ Name: "changefeed.schemafeed.table_metadata_nanos", Help: "Time blocked while verifying table metadata histories", Measurement: "Nanoseconds", Unit: metric.Unit_NANOSECONDS, Category: metric.Metadata_CHANGEFEEDS, -} +}) -var metaChangefeedTableHistoryScans = metric.Metadata{ +var metaChangefeedTableHistoryScans = metric.InitMetadata(metric.Metadata{ Name: "changefeed.schemafeed.table_history_scans", Help: "The number of table history scans during polling", Measurement: "Counts", Unit: metric.Unit_COUNT, Category: metric.Metadata_CHANGEFEEDS, -} +}) // Metrics is a metric.Struct for schemafeed metrics. type Metrics struct { diff --git a/pkg/ccl/changefeedccl/timers/timers.go b/pkg/ccl/changefeedccl/timers/timers.go index a1cb2597c377..f78fda12c09d 100644 --- a/pkg/ccl/changefeedccl/timers/timers.go +++ b/pkg/ccl/changefeedccl/timers/timers.go @@ -43,7 +43,7 @@ func New(histogramWindow time.Duration) *Timers { histogramOptsFor := func(name, labeledName, labelName, desc string) metric.HistogramOptions { return metric.HistogramOptions{ - Metadata: metric.Metadata{ + Metadata: metric.InitMetadata(metric.Metadata{ Name: name, Help: desc, Unit: metric.Unit_NANOSECONDS, @@ -51,7 +51,7 @@ func New(histogramWindow time.Duration) *Timers { LabeledName: labeledName, StaticLabels: metric.MakeLabelPairs(metric.LabelName, labelName), Category: metric.Metadata_CHANGEFEEDS, - }, + }), Duration: histogramWindow, Buckets: prometheus.ExponentialBucketsRange(float64(1*time.Microsecond), float64(1*time.Hour), 60), Mode: metric.HistogramModePrometheus, diff --git a/pkg/ccl/changefeedccl/timers/timers_bench_test.go b/pkg/ccl/changefeedccl/timers/timers_bench_test.go index 959af598bd13..cb6f6dc090a3 100644 --- a/pkg/ccl/changefeedccl/timers/timers_bench_test.go +++ b/pkg/ccl/changefeedccl/timers/timers_bench_test.go @@ -16,12 +16,12 @@ import ( func BenchmarkTimerHandle(b *testing.B) { histOpts := metric.HistogramOptions{ - Metadata: metric.Metadata{ + Metadata: metric.InitMetadata(metric.Metadata{ Name: "test.timer", Help: "Test timer", Unit: metric.Unit_NANOSECONDS, Measurement: "Latency", - }, + }), Duration: time.Hour, Buckets: prometheus.ExponentialBucketsRange(float64(1*time.Microsecond), float64(1*time.Hour), 60), Mode: metric.HistogramModePrometheus, diff --git a/pkg/ccl/multitenantccl/tenantcostclient/metrics.go b/pkg/ccl/multitenantccl/tenantcostclient/metrics.go index 85ad6ecc96f3..d5296bc39800 100644 --- a/pkg/ccl/multitenantccl/tenantcostclient/metrics.go +++ b/pkg/ccl/multitenantccl/tenantcostclient/metrics.go @@ -17,116 +17,116 @@ import ( ) var ( - metaCurrentBlocked = metric.Metadata{ + metaCurrentBlocked = metric.InitMetadata(metric.Metadata{ Name: "tenant.cost_client.blocked_requests", Help: "Number of requests currently blocked by the rate limiter", Measurement: "Requests", Unit: metric.Unit_COUNT, - } + }) // SQL usage related metrics. - metaTotalRU = metric.Metadata{ + metaTotalRU = metric.InitMetadata(metric.Metadata{ Name: "tenant.sql_usage.request_units", Help: "RU consumption", Measurement: "Request Units", Unit: metric.Unit_COUNT, - } - metaTotalKVRU = metric.Metadata{ + }) + metaTotalKVRU = metric.InitMetadata(metric.Metadata{ Name: "tenant.sql_usage.kv_request_units", Help: "RU consumption attributable to KV", Measurement: "Request Units", Unit: metric.Unit_COUNT, - } - metaTotalReadBatches = metric.Metadata{ + }) + metaTotalReadBatches = metric.InitMetadata(metric.Metadata{ Name: "tenant.sql_usage.read_batches", Help: "Total number of KV read batches", Measurement: "Requests", Unit: metric.Unit_COUNT, - } - metaTotalReadRequests = metric.Metadata{ + }) + metaTotalReadRequests = metric.InitMetadata(metric.Metadata{ Name: "tenant.sql_usage.read_requests", Help: "Total number of KV read requests", Measurement: "Requests", Unit: metric.Unit_COUNT, - } - metaTotalReadBytes = metric.Metadata{ + }) + metaTotalReadBytes = metric.InitMetadata(metric.Metadata{ Name: "tenant.sql_usage.read_bytes", Help: "Total number of bytes read from KV", Measurement: "Bytes", Unit: metric.Unit_COUNT, - } - metaTotalWriteBatches = metric.Metadata{ + }) + metaTotalWriteBatches = metric.InitMetadata(metric.Metadata{ Name: "tenant.sql_usage.write_batches", Help: "Total number of KV write batches", Measurement: "Requests", Unit: metric.Unit_COUNT, - } - metaTotalWriteRequests = metric.Metadata{ + }) + metaTotalWriteRequests = metric.InitMetadata(metric.Metadata{ Name: "tenant.sql_usage.write_requests", Help: "Total number of KV write requests", Measurement: "Requests", Unit: metric.Unit_COUNT, - } - metaTotalWriteBytes = metric.Metadata{ + }) + metaTotalWriteBytes = metric.InitMetadata(metric.Metadata{ Name: "tenant.sql_usage.write_bytes", Help: "Total number of bytes written to KV", Measurement: "Bytes", Unit: metric.Unit_COUNT, - } - metaTotalSQLPodsCPUSeconds = metric.Metadata{ + }) + metaTotalSQLPodsCPUSeconds = metric.InitMetadata(metric.Metadata{ Name: "tenant.sql_usage.sql_pods_cpu_seconds", Help: "Total amount of CPU used by SQL pods", Measurement: "CPU Seconds", Unit: metric.Unit_SECONDS, - } - metaTotalPGWireEgressBytes = metric.Metadata{ + }) + metaTotalPGWireEgressBytes = metric.InitMetadata(metric.Metadata{ Name: "tenant.sql_usage.pgwire_egress_bytes", Help: "Total number of bytes transferred from a SQL pod to the client", Measurement: "Bytes", Unit: metric.Unit_COUNT, - } - metaTotalExternalIOIngressBytes = metric.Metadata{ + }) + metaTotalExternalIOIngressBytes = metric.InitMetadata(metric.Metadata{ Name: "tenant.sql_usage.external_io_ingress_bytes", Help: "Total number of bytes read from external services such as cloud storage providers", Measurement: "Bytes", Unit: metric.Unit_COUNT, - } - metaTotalExternalIOEgressBytes = metric.Metadata{ + }) + metaTotalExternalIOEgressBytes = metric.InitMetadata(metric.Metadata{ Name: "tenant.sql_usage.external_io_egress_bytes", Help: "Total number of bytes written to external services such as cloud storage providers", Measurement: "Bytes", Unit: metric.Unit_COUNT, - } - metaTotalCrossRegionNetworkRU = metric.Metadata{ + }) + metaTotalCrossRegionNetworkRU = metric.InitMetadata(metric.Metadata{ Name: "tenant.sql_usage.cross_region_network_ru", Help: "Total number of RUs charged for cross-region network traffic", Measurement: "Request Units", Unit: metric.Unit_COUNT, - } - metaTotalEstimatedKVCPUSeconds = metric.Metadata{ + }) + metaTotalEstimatedKVCPUSeconds = metric.InitMetadata(metric.Metadata{ Name: "tenant.sql_usage.estimated_kv_cpu_seconds", Help: "Estimated amount of CPU consumed by a virtual cluster, in the KV layer", Measurement: "CPU Seconds", Unit: metric.Unit_SECONDS, - } - metaTotalEstimatedCPUSeconds = metric.Metadata{ + }) + metaTotalEstimatedCPUSeconds = metric.InitMetadata(metric.Metadata{ Name: "tenant.sql_usage.estimated_cpu_seconds", Help: "Estimated amount of CPU consumed by a virtual cluster", Measurement: "CPU Seconds", Unit: metric.Unit_SECONDS, - } - metaTotalEstimatedReplicationBytes = metric.Metadata{ + }) + metaTotalEstimatedReplicationBytes = metric.InitMetadata(metric.Metadata{ Name: "tenant.sql_usage.estimated_replication_bytes", Help: "Total number of estimated bytes for KV replication traffic", Measurement: "Bytes", Unit: metric.Unit_COUNT, - } - metaProvisionedVcpus = metric.Metadata{ + }) + metaProvisionedVcpus = metric.InitMetadata(metric.Metadata{ Name: "tenant.sql_usage.provisioned_vcpus", Help: "Number of vcpus available to the virtual cluster", Measurement: "Count", Unit: metric.Unit_COUNT, - } + }) ) // metrics manage the metrics used by the tenant cost client. diff --git a/pkg/ccl/multitenantccl/tenantcostserver/metrics.go b/pkg/ccl/multitenantccl/tenantcostserver/metrics.go index 9791e494595b..bd7db4a16cd3 100644 --- a/pkg/ccl/multitenantccl/tenantcostserver/metrics.go +++ b/pkg/ccl/multitenantccl/tenantcostserver/metrics.go @@ -54,84 +54,84 @@ var _ metric.Struct = (*Metrics)(nil) func (m *Metrics) MetricStruct() {} var ( - metaTotalRU = metric.Metadata{ + metaTotalRU = metric.InitMetadata(metric.Metadata{ Name: "tenant.consumption.request_units", Help: "Total RU consumption", Measurement: "Request Units", Unit: metric.Unit_COUNT, - } - metaTotalKVRU = metric.Metadata{ + }) + metaTotalKVRU = metric.InitMetadata(metric.Metadata{ Name: "tenant.consumption.kv_request_units", Help: "RU consumption attributable to KV", Measurement: "Request Units", Unit: metric.Unit_COUNT, - } - metaTotalReadBatches = metric.Metadata{ + }) + metaTotalReadBatches = metric.InitMetadata(metric.Metadata{ Name: "tenant.consumption.read_batches", Help: "Total number of KV read batches", Measurement: "Requests", Unit: metric.Unit_COUNT, - } - metaTotalReadRequests = metric.Metadata{ + }) + metaTotalReadRequests = metric.InitMetadata(metric.Metadata{ Name: "tenant.consumption.read_requests", Help: "Total number of KV read requests", Measurement: "Requests", Unit: metric.Unit_COUNT, - } - metaTotalReadBytes = metric.Metadata{ + }) + metaTotalReadBytes = metric.InitMetadata(metric.Metadata{ Name: "tenant.consumption.read_bytes", Help: "Total number of bytes read from KV", Measurement: "Bytes", Unit: metric.Unit_COUNT, - } - metaTotalWriteBatches = metric.Metadata{ + }) + metaTotalWriteBatches = metric.InitMetadata(metric.Metadata{ Name: "tenant.consumption.write_batches", Help: "Total number of KV write batches", Measurement: "Requests", Unit: metric.Unit_COUNT, - } - metaTotalWriteRequests = metric.Metadata{ + }) + metaTotalWriteRequests = metric.InitMetadata(metric.Metadata{ Name: "tenant.consumption.write_requests", Help: "Total number of KV write requests", Measurement: "Requests", Unit: metric.Unit_COUNT, - } - metaTotalWriteBytes = metric.Metadata{ + }) + metaTotalWriteBytes = metric.InitMetadata(metric.Metadata{ Name: "tenant.consumption.write_bytes", Help: "Total number of bytes written to KV", Measurement: "Bytes", Unit: metric.Unit_COUNT, - } - metaTotalSQLPodsCPUSeconds = metric.Metadata{ + }) + metaTotalSQLPodsCPUSeconds = metric.InitMetadata(metric.Metadata{ Name: "tenant.consumption.sql_pods_cpu_seconds", Help: "Total amount of CPU used by SQL pods", Measurement: "CPU Seconds", Unit: metric.Unit_SECONDS, - } - metaTotalPGWireEgressBytes = metric.Metadata{ + }) + metaTotalPGWireEgressBytes = metric.InitMetadata(metric.Metadata{ Name: "tenant.consumption.pgwire_egress_bytes", Help: "Total number of bytes transferred from a SQL pod to the client", Measurement: "Bytes", Unit: metric.Unit_COUNT, - } - metaTotalExternalIOIngressBytes = metric.Metadata{ + }) + metaTotalExternalIOIngressBytes = metric.InitMetadata(metric.Metadata{ Name: "tenant.consumption.external_io_ingress_bytes", Help: "Total number of bytes read from external services such as cloud storage providers", Measurement: "Bytes", Unit: metric.Unit_COUNT, - } - metaTotalExternalIOEgressBytes = metric.Metadata{ + }) + metaTotalExternalIOEgressBytes = metric.InitMetadata(metric.Metadata{ Name: "tenant.consumption.external_io_egress_bytes", Help: "Total number of bytes written to external services such as cloud storage providers", Measurement: "Bytes", Unit: metric.Unit_COUNT, - } - metaTotalCrossRegionNetworkRU = metric.Metadata{ + }) + metaTotalCrossRegionNetworkRU = metric.InitMetadata(metric.Metadata{ Name: "tenant.consumption.cross_region_network_ru", Help: "Total number of RUs charged for cross-region network traffic", Measurement: "Request Units", Unit: metric.Unit_COUNT, - } + }) ) func (m *Metrics) init() { diff --git a/pkg/ccl/sqlproxyccl/acl/file_test.go b/pkg/ccl/sqlproxyccl/acl/file_test.go index 28a1bca91137..3dfaf2166d13 100644 --- a/pkg/ccl/sqlproxyccl/acl/file_test.go +++ b/pkg/ccl/sqlproxyccl/acl/file_test.go @@ -532,7 +532,7 @@ func TestParsingErrorHandling(t *testing.T) { tempDir := t.TempDir() t.Run("error on initial file parse", func(t *testing.T) { - errorCountMetric := metric.NewGauge(metric.Metadata{}) + errorCountMetric := metric.NewGauge(metric.InitMetadata(metric.Metadata{})) filename := filepath.Join(tempDir, "error_file.idk") require.NoError(t, os.WriteFile(filename, []byte("not yaml"), 0777)) @@ -547,7 +547,7 @@ func TestParsingErrorHandling(t *testing.T) { }) t.Run("error after update", func(t *testing.T) { - errorCountMetric := metric.NewGauge(metric.Metadata{}) + errorCountMetric := metric.NewGauge(metric.InitMetadata(metric.Metadata{})) // Create access controller and watcher with a valid file filename := filepath.Join(tempDir, "allowlist.yaml") diff --git a/pkg/ccl/sqlproxyccl/balancer/metrics.go b/pkg/ccl/sqlproxyccl/balancer/metrics.go index b9e27039c99b..08bc428667d0 100644 --- a/pkg/ccl/sqlproxyccl/balancer/metrics.go +++ b/pkg/ccl/sqlproxyccl/balancer/metrics.go @@ -21,24 +21,24 @@ func (Metrics) MetricStruct() {} var _ metric.Struct = Metrics{} var ( - metaRebalanceReqRunning = metric.Metadata{ + metaRebalanceReqRunning = metric.InitMetadata(metric.Metadata{ Name: "proxy.balancer.rebalance.running", Help: "Number of rebalance requests currently running", Measurement: "Rebalance Requests", Unit: metric.Unit_COUNT, - } - metaRebalanceReqQueued = metric.Metadata{ + }) + metaRebalanceReqQueued = metric.InitMetadata(metric.Metadata{ Name: "proxy.balancer.rebalance.queued", Help: "Number of rebalance requests currently queued", Measurement: "Rebalance Requests", Unit: metric.Unit_COUNT, - } - metaRebalanceReqTotal = metric.Metadata{ + }) + metaRebalanceReqTotal = metric.InitMetadata(metric.Metadata{ Name: "proxy.balancer.rebalance.total", Help: "Number of rebalance requests that were processed", Measurement: "Rebalance Requests", Unit: metric.Unit_COUNT, - } + }) ) // NewMetrics instantiates the metrics holder for balancer monitoring. diff --git a/pkg/ccl/sqlproxyccl/metrics.go b/pkg/ccl/sqlproxyccl/metrics.go index cfd08e8c5c61..42e2d7f40dcd 100644 --- a/pkg/ccl/sqlproxyccl/metrics.go +++ b/pkg/ccl/sqlproxyccl/metrics.go @@ -66,167 +66,167 @@ const ( ) var ( - metaCurConnCount = metric.Metadata{ + metaCurConnCount = metric.InitMetadata(metric.Metadata{ Name: "proxy.sql.conns", Help: "Number of connections being proxied", Measurement: "Connections", Unit: metric.Unit_COUNT, - } - metaRoutingErrCount = metric.Metadata{ + }) + metaRoutingErrCount = metric.InitMetadata(metric.Metadata{ Name: "proxy.err.routing", Help: "Number of errors encountered when attempting to route clients", Measurement: "Errors", Unit: metric.Unit_COUNT, - } - metaBackendDownCount = metric.Metadata{ + }) + metaBackendDownCount = metric.InitMetadata(metric.Metadata{ Name: "proxy.err.backend_down", Help: "Number of errors encountered when connecting to backend servers", Measurement: "Errors", Unit: metric.Unit_COUNT, - } - metaBackendDisconnectCount = metric.Metadata{ + }) + metaBackendDisconnectCount = metric.InitMetadata(metric.Metadata{ Name: "proxy.err.backend_disconnect", Help: "Number of disconnects initiated by proxied backends", Measurement: "Disconnects", Unit: metric.Unit_COUNT, - } - metaIdleDisconnectCount = metric.Metadata{ + }) + metaIdleDisconnectCount = metric.InitMetadata(metric.Metadata{ Name: "proxy.err.idle_disconnect", Help: "Number of disconnects due to idle timeout", Measurement: "Idle Disconnects", Unit: metric.Unit_COUNT, - } - metaClientDisconnectCount = metric.Metadata{ + }) + metaClientDisconnectCount = metric.InitMetadata(metric.Metadata{ Name: "proxy.err.client_disconnect", Help: "Number of disconnects initiated by clients", Measurement: "Client Disconnects", Unit: metric.Unit_COUNT, - } - metaAcceptedConnCount = metric.Metadata{ + }) + metaAcceptedConnCount = metric.InitMetadata(metric.Metadata{ Name: "proxy.sql.accepted_conns", Help: "Number of accepted connections", Measurement: "Accepted connections", Unit: metric.Unit_COUNT, - } - metaRefusedConnCount = metric.Metadata{ + }) + metaRefusedConnCount = metric.InitMetadata(metric.Metadata{ Name: "proxy.err.refused_conn", Help: "Number of refused connections initiated by a given IP", Measurement: "Refused", Unit: metric.Unit_COUNT, - } - metaSuccessfulConnCount = metric.Metadata{ + }) + metaSuccessfulConnCount = metric.InitMetadata(metric.Metadata{ Name: "proxy.sql.successful_conns", Help: "Number of successful connections that were/are being proxied", Measurement: "Successful Connections", Unit: metric.Unit_COUNT, - } - metaAuthFailedCount = metric.Metadata{ + }) + metaAuthFailedCount = metric.InitMetadata(metric.Metadata{ Name: "proxy.sql.authentication_failures", Help: "Number of authentication failures", Measurement: "Authentication Failures", Unit: metric.Unit_COUNT, - } - metaExpiredClientConnCount = metric.Metadata{ + }) + metaExpiredClientConnCount = metric.InitMetadata(metric.Metadata{ Name: "proxy.sql.expired_client_conns", Help: "Number of expired client connections", Measurement: "Expired Client Connections", Unit: metric.Unit_COUNT, - } - metaDialTenantLatency = metric.Metadata{ + }) + metaDialTenantLatency = metric.InitMetadata(metric.Metadata{ Name: "proxy.dial_tenant.latency", Unit: metric.Unit_NANOSECONDS, Help: "Latency histogram for establishing a tcp connection to a tenant cluster.", Measurement: "Latency", - } - metaDialTenantRetries = metric.Metadata{ + }) + metaDialTenantRetries = metric.InitMetadata(metric.Metadata{ Name: "proxy.dial_tenant.retries", Unit: metric.Unit_COUNT, Help: "Number of retries dialing a tenant cluster.", Measurement: "Retries", - } + }) // Connection migration metrics. // // attempted = success + error_fatal + error_recoverable - metaConnMigrationSuccessCount = metric.Metadata{ + metaConnMigrationSuccessCount = metric.InitMetadata(metric.Metadata{ Name: "proxy.conn_migration.success", Help: "Number of successful connection migrations", Measurement: "Connection Migrations", Unit: metric.Unit_COUNT, - } - metaConnMigrationErrorFatalCount = metric.Metadata{ + }) + metaConnMigrationErrorFatalCount = metric.InitMetadata(metric.Metadata{ // When connection migrations errored out, connections will be closed. Name: "proxy.conn_migration.error_fatal", Help: "Number of failed connection migrations which resulted in terminations", Measurement: "Connection Migrations", Unit: metric.Unit_COUNT, - } - metaConnMigrationErrorRecoverableCount = metric.Metadata{ + }) + metaConnMigrationErrorRecoverableCount = metric.InitMetadata(metric.Metadata{ // Connections are recoverable, so they won't be closed. Name: "proxy.conn_migration.error_recoverable", Help: "Number of failed connection migrations that were recoverable", Measurement: "Connection Migrations", Unit: metric.Unit_COUNT, - } - metaConnMigrationAttemptedCount = metric.Metadata{ + }) + metaConnMigrationAttemptedCount = metric.InitMetadata(metric.Metadata{ Name: "proxy.conn_migration.attempted", Help: "Number of attempted connection migrations", Measurement: "Connection Migrations", Unit: metric.Unit_COUNT, - } - metaConnMigrationAttemptedLatency = metric.Metadata{ + }) + metaConnMigrationAttemptedLatency = metric.InitMetadata(metric.Metadata{ Name: "proxy.conn_migration.attempted.latency", Help: "Latency histogram for attempted connection migrations", Measurement: "Latency", Unit: metric.Unit_NANOSECONDS, - } - metaConnMigrationTransferResponseMessageSize = metric.Metadata{ + }) + metaConnMigrationTransferResponseMessageSize = metric.InitMetadata(metric.Metadata{ Name: "proxy.conn_migration.transfer_response.message_size", Help: "Message size for the SHOW TRANSFER STATE response", Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - metaQueryCancelReceivedPGWire = metric.Metadata{ + }) + metaQueryCancelReceivedPGWire = metric.InitMetadata(metric.Metadata{ Name: "proxy.query_cancel.received.pgwire", Help: "Number of query cancel requests this proxy received over pgwire", Measurement: "Query Cancel Requests", Unit: metric.Unit_COUNT, - } - metaQueryCancelReceivedHTTP = metric.Metadata{ + }) + metaQueryCancelReceivedHTTP = metric.InitMetadata(metric.Metadata{ Name: "proxy.query_cancel.received.http", Help: "Number of query cancel requests this proxy received over HTTP", Measurement: "Query Cancel Requests", Unit: metric.Unit_COUNT, - } - metaQueryCancelIgnored = metric.Metadata{ + }) + metaQueryCancelIgnored = metric.InitMetadata(metric.Metadata{ Name: "proxy.query_cancel.ignored", Help: "Number of query cancel requests this proxy ignored", Measurement: "Query Cancel Requests", Unit: metric.Unit_COUNT, - } - metaQueryCancelForwarded = metric.Metadata{ + }) + metaQueryCancelForwarded = metric.InitMetadata(metric.Metadata{ Name: "proxy.query_cancel.forwarded", Help: "Number of query cancel requests this proxy forwarded to another proxy", Measurement: "Query Cancel Requests", Unit: metric.Unit_COUNT, - } - metaQueryCancelSuccessful = metric.Metadata{ + }) + metaQueryCancelSuccessful = metric.InitMetadata(metric.Metadata{ Name: "proxy.query_cancel.successful", Help: "Number of query cancel requests this proxy forwarded to the tenant", Measurement: "Query Cancel Requests", Unit: metric.Unit_COUNT, - } - metaAccessControlFileErrorCount = metric.Metadata{ + }) + metaAccessControlFileErrorCount = metric.InitMetadata(metric.Metadata{ Name: "proxy.access_control.errors", Help: "Numbers of access control list files that are currently having errors", Measurement: "Access Control File Errors", Unit: metric.Unit_COUNT, - } - metaRoutingMethodCount = metric.Metadata{ + }) + metaRoutingMethodCount = metric.InitMetadata(metric.Metadata{ Name: "proxy.sql.routing_method_count", Help: "Number of occurrences of each proxy routing method", Measurement: "Number of occurrences", Unit: metric.Unit_COUNT, - } + }) ) // makeProxyMetrics instantiates the metrics holder for proxy monitoring. diff --git a/pkg/cli/BUILD.bazel b/pkg/cli/BUILD.bazel index 74c9772e4268..b2368a722db3 100644 --- a/pkg/cli/BUILD.bazel +++ b/pkg/cli/BUILD.bazel @@ -152,8 +152,6 @@ go_library( "//pkg/geo/geos", "//pkg/gossip", "//pkg/internal/codeowners", - "//pkg/internal/metricscan", - "//pkg/internal/reporoot", "//pkg/jobs", "//pkg/jobs/jobspb", "//pkg/keys", @@ -247,6 +245,7 @@ go_library( "//pkg/util/log/logflags", "//pkg/util/log/logpb", "//pkg/util/log/severity", + "//pkg/util/metric", "//pkg/util/netutil/addr", "//pkg/util/protoutil", "//pkg/util/retry", diff --git a/pkg/cli/gen.go b/pkg/cli/gen.go index fd7aca8e445e..e8a77040eb2f 100644 --- a/pkg/cli/gen.go +++ b/pkg/cli/gen.go @@ -21,8 +21,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/cli/cliflags" "github.com/cockroachdb/cockroach/pkg/cli/clisqlexec" "github.com/cockroachdb/cockroach/pkg/internal/codeowners" - "github.com/cockroachdb/cockroach/pkg/internal/metricscan" - "github.com/cockroachdb/cockroach/pkg/internal/reporoot" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/server/serverpb" @@ -31,6 +29,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/ts/catalog" "github.com/cockroachdb/cockroach/pkg/upgrade/upgrades" + "github.com/cockroachdb/cockroach/pkg/util/metric" "github.com/cockroachdb/errors" "github.com/cockroachdb/errors/oserror" slugify "github.com/mozillazg/go-slugify" @@ -69,7 +68,6 @@ type YAMLOutput struct { } var manPath string -var metricOwnersFile string var genManCmd = &cobra.Command{ Use: "man", @@ -387,59 +385,37 @@ func init() { "label to use in the output for the various setting classes") genMetricListCmd.Flags().Bool("essential", false, "only emit essential metrics") - genMetricListCmd.Flags().StringVar(&metricOwnersFile, "metric-owners", "", - "path to pre-computed metric owners YAML file (avoids AST scan + CODEOWNERS)") GenCmd.AddCommand(genCmds...) } -// loadMetricOwners fetches a mapping from metric name to owning team. -// When --metric-owners is provided (Bazel genrule), the pre-computed -// file is loaded directly. Otherwise, the mapping is built from an -// AST scan of Go sources combined with CODEOWNERS. -func loadMetricOwners() *metricscan.MetricOwners { - if metricOwnersFile != "" { - data, err := os.ReadFile(metricOwnersFile) - if err != nil { - fmt.Fprintf(os.Stderr, - "reading metric owners failed (owner will be omitted): %v\n", err) - return nil - } - mo, err := metricscan.LoadMetricOwners(data) - if err != nil { - fmt.Fprintf(os.Stderr, - "parsing metric owners failed (owner will be omitted): %v\n", err) - return nil - } - return mo - } - repoRoot := reporoot.Get() - if repoRoot == "" { - return nil - } - scanResult, scanErr := metricscan.Scan(repoRoot) - if scanErr != nil { - fmt.Fprintf(os.Stderr, - "metric source scan failed (owner will be omitted): %v\n", scanErr) - return nil - } - owners, ownersErr := codeowners.DefaultLoadCodeOwners() - if ownersErr != nil { +// loadCodeOwners loads the CODEOWNERS file for resolving metric ownership +// from source file paths embedded in metric.Metadata. +func loadCodeOwners() *codeowners.CodeOwners { + owners, err := codeowners.DefaultLoadCodeOwners() + if err != nil { fmt.Fprintf(os.Stderr, - "CODEOWNERS load failed (owner will be omitted): %v\n", ownersErr) + "CODEOWNERS load failed (owner will be omitted): %v\n", err) return nil } - return metricscan.BuildMetricOwners(scanResult, func(file string) string { - teams := owners.Match(file) - if len(teams) > 0 { - return string(teams[0].Name()) - } + return owners +} + +// resolveOwner returns the owning team for the given source file path +// using the CODEOWNERS rules. Returns an empty string if no owner is found. +func resolveOwner(owners *codeowners.CodeOwners, sourceFile string) string { + if owners == nil || sourceFile == "" { return "" - }) + } + teams := owners.Match(sourceFile) + if len(teams) > 0 { + return string(teams[0].Name()) + } + return "" } func generateMetricList(ctx context.Context, skipFiltering bool) (map[string]*Layer, error) { - metricOwners := loadMetricOwners() + owners := loadCodeOwners() sArgs := base.TestServerArgs{ Insecure: true, @@ -470,6 +446,20 @@ func generateMetricList(ctx context.Context, skipFiltering bool) (map[string]*La return nil, errors.AssertionFailedf("could not initialize server in time") } + // Build a map of metric name → source file from the server's metadata. + // The SourceFile field is set by metric.InitMetadata() at definition time. + metadataMap := make(map[string]metric.Metadata) + nodeMd, appMd, srvMd := srv.MetricsRecorder().GetMetricsMetadata(false /* combine */) + for k, v := range nodeMd { + metadataMap[k] = v + } + for k, v := range appMd { + metadataMap[k] = v + } + for k, v := range srvMd { + metadataMap[k] = v + } + var sections []catalog.ChartSection // Retrieve the chart catalog (metric list) for the system tenant over RPC. @@ -553,13 +543,12 @@ func generateMetricList(ctx context.Context, skipFiltering bool) (map[string]*La if visibility == "INTERNAL" { visibility = "" } - // Resolve the owning team for this metric. + // Resolve the owning team for this metric via its source file. var owner string - if metricOwners != nil { - exportedName := chart.Metrics[0].ExportedName - owner, _ = metricOwners.Resolve(exportedName) + if md, ok := metadataMap[chart.Metrics[0].Name]; ok { + owner = resolveOwner(owners, md.SourceFile) } - metric := MetricInfo{ + info := MetricInfo{ Name: chart.Metrics[0].Name, ExportedName: chart.Metrics[0].ExportedName, LabeledName: chart.Metrics[0].LabeledName, @@ -573,7 +562,7 @@ func generateMetricList(ctx context.Context, skipFiltering bool) (map[string]*La Visibility: visibility, Owner: owner, } - category.Metrics = append(category.Metrics, metric) + category.Metrics = append(category.Metrics, info) } layer.Categories = append(layer.Categories, category) diff --git a/pkg/cloud/metrics.go b/pkg/cloud/metrics.go index 5c802bf8367b..c15fecb52608 100644 --- a/pkg/cloud/metrics.go +++ b/pkg/cloud/metrics.go @@ -46,83 +46,83 @@ type Metrics struct { // MakeMetrics returns a new instance of Metrics. func MakeMetrics(cidrLookup *cidr.Lookup) metric.Struct { - cloudReaders := metric.Metadata{ + cloudReaders := metric.InitMetadata(metric.Metadata{ Name: "cloud.readers_opened", Help: "Readers opened by all cloud operations", Measurement: "Files", Unit: metric.Unit_COUNT, MetricType: io_prometheus_client.MetricType_COUNTER, - } - cloudReadBytes := metric.Metadata{ + }) + cloudReadBytes := metric.InitMetadata(metric.Metadata{ Name: "cloud.read_bytes", Help: "Bytes read from all cloud operations", Measurement: "Bytes", Unit: metric.Unit_BYTES, MetricType: io_prometheus_client.MetricType_COUNTER, - } - cloudWriters := metric.Metadata{ + }) + cloudWriters := metric.InitMetadata(metric.Metadata{ Name: "cloud.writers_opened", Help: "Writers opened by all cloud operations", Measurement: "files", Unit: metric.Unit_BYTES, MetricType: io_prometheus_client.MetricType_COUNTER, - } - cloudWriteBytes := metric.Metadata{ + }) + cloudWriteBytes := metric.InitMetadata(metric.Metadata{ Name: "cloud.write_bytes", Help: "Bytes written by all cloud operations", Measurement: "Bytes", Unit: metric.Unit_BYTES, MetricType: io_prometheus_client.MetricType_COUNTER, - } - listings := metric.Metadata{ + }) + listings := metric.InitMetadata(metric.Metadata{ Name: "cloud.listings", Help: "Listing operations by all cloud operations", Measurement: "Calls", Unit: metric.Unit_COUNT, MetricType: io_prometheus_client.MetricType_COUNTER, - } - listingResults := metric.Metadata{ + }) + listingResults := metric.InitMetadata(metric.Metadata{ Name: "cloud.listing_results", Help: "Listing results by all cloud operations", Measurement: "Results", Unit: metric.Unit_COUNT, MetricType: io_prometheus_client.MetricType_COUNTER, - } - connsOpened := metric.Metadata{ + }) + connsOpened := metric.InitMetadata(metric.Metadata{ Name: "cloud.conns_opened", Help: "HTTP connections opened by cloud operations", Measurement: "Connections", Unit: metric.Unit_COUNT, MetricType: io_prometheus_client.MetricType_COUNTER, - } - connsReused := metric.Metadata{ + }) + connsReused := metric.InitMetadata(metric.Metadata{ Name: "cloud.conns_reused", Help: "HTTP connections reused by cloud operations", Measurement: "Connections", Unit: metric.Unit_COUNT, MetricType: io_prometheus_client.MetricType_COUNTER, - } - tlsHandhakes := metric.Metadata{ + }) + tlsHandhakes := metric.InitMetadata(metric.Metadata{ Name: "cloud.tls_handshakes", Help: "TLS handshakes done by cloud operations", Measurement: "Handshakes", Unit: metric.Unit_COUNT, MetricType: io_prometheus_client.MetricType_COUNTER, - } - cloudOpenReaders := metric.Metadata{ + }) + cloudOpenReaders := metric.InitMetadata(metric.Metadata{ Name: "cloud.open_readers", Help: "Currently open readers for cloud IO", Measurement: "Readers", Unit: metric.Unit_COUNT, MetricType: io_prometheus_client.MetricType_GAUGE, - } - cloudOpenWriters := metric.Metadata{ + }) + cloudOpenWriters := metric.InitMetadata(metric.Metadata{ Name: "cloud.open_writers", Help: "Currently open writers for cloud IO", Measurement: "Writers", Unit: metric.Unit_COUNT, MetricType: io_prometheus_client.MetricType_GAUGE, - } + }) return &Metrics{ CreatedReaders: metric.NewCounter(cloudReaders), OpenReaders: metric.NewGauge(cloudOpenReaders), diff --git a/pkg/clusterversion/setting.go b/pkg/clusterversion/setting.go index 4a63a728c134..f19c423415f2 100644 --- a/pkg/clusterversion/setting.go +++ b/pkg/clusterversion/setting.go @@ -269,12 +269,12 @@ var AutoUpgradeSystemClusterFromMeta1Leaseholder = settings.RegisterBoolSetting( true, ) -var metaPreserveDowngradeLastUpdated = metric.Metadata{ +var metaPreserveDowngradeLastUpdated = metric.InitMetadata(metric.Metadata{ Name: "cluster.preserve-downgrade-option.last-updated", Help: "Unix timestamp of last updated time for cluster.preserve_downgrade_option", Measurement: "Timestamp", Unit: metric.Unit_TIMESTAMP_SEC, -} +}) // RegisterOnVersionChangeCallback is a callback function that updates the // cluster.preserve-downgrade-option.last-updated when the diff --git a/pkg/cmd/dev/generate.go b/pkg/cmd/dev/generate.go index f63eefaa30bc..21b3936a9957 100644 --- a/pkg/cmd/dev/generate.go +++ b/pkg/cmd/dev/generate.go @@ -187,11 +187,7 @@ func (d *dev) generateBazel(cmd *cobra.Command) error { } func (d *dev) generateDocs(cmd *cobra.Command) error { - ctx := cmd.Context() - if err := d.generateMetricOwners(ctx); err != nil { - return err - } - return d.generateTarget(ctx, "//pkg/gen:docs") + return d.generateTarget(cmd.Context(), "//pkg/gen:docs") } func (d *dev) generateExecgen(cmd *cobra.Command) error { @@ -199,11 +195,7 @@ func (d *dev) generateExecgen(cmd *cobra.Command) error { } func (d *dev) generateGoAndDocs(cmd *cobra.Command) error { - ctx := cmd.Context() - if err := d.generateMetricOwners(ctx); err != nil { - return err - } - return d.generateTarget(ctx, "//pkg/gen") + return d.generateTarget(cmd.Context(), "//pkg/gen") } func (d *dev) generateGo(cmd *cobra.Command) error { @@ -224,20 +216,6 @@ func (d *dev) generateGoNoCgo(cmd *cobra.Command) error { return d.generateTarget(cmd.Context(), "//pkg/gen:code") } -func (d *dev) generateMetricOwners(ctx context.Context) error { - workspace, err := d.getWorkspace(ctx) - if err != nil { - return err - } - // Write directly to the metricscan package where it is embedded - // at build time via go:embed and referenced by the metrics genrule. - yamlPath := filepath.Join(workspace, "pkg", "internal", "metricscan", "metric_owners.yaml") - return d.exec.CommandContextInheritingStdStreams( - ctx, "bazel", "run", "//pkg/cmd/gen-metric-owners", "--", - fmt.Sprintf("-out=%s", yamlPath), - ) -} - func (d *dev) generateLogicTest(cmd *cobra.Command) error { ctx := cmd.Context() workspace, err := d.getWorkspace(ctx) diff --git a/pkg/cmd/dev/testdata/datadriven/generate b/pkg/cmd/dev/testdata/datadriven/generate index eac7a3c12199..103e14e09e1c 100644 --- a/pkg/cmd/dev/testdata/datadriven/generate +++ b/pkg/cmd/dev/testdata/datadriven/generate @@ -42,8 +42,6 @@ crdb-checkout/build/bazelutil/check.sh exec dev generate docs ---- -bazel info workspace --color=no -bazel run //pkg/cmd/gen-metric-owners -- -out=crdb-checkout/pkg/internal/metricscan/metric_owners.yaml bazel run //pkg/gen:docs export COCKROACH_BAZEL_CHECK_FAST=1 bazel info workspace --color=no @@ -52,8 +50,6 @@ crdb-checkout/build/bazelutil/check.sh exec dev gen go_nocgo docs ---- -bazel info workspace --color=no -bazel run //pkg/cmd/gen-metric-owners -- -out=crdb-checkout/pkg/internal/metricscan/metric_owners.yaml bazel run //pkg/gen export COCKROACH_BAZEL_CHECK_FAST=1 bazel info workspace --color=no diff --git a/pkg/cmd/gen-metric-owners/BUILD.bazel b/pkg/cmd/gen-metric-owners/BUILD.bazel deleted file mode 100644 index 00bcb50f1d77..000000000000 --- a/pkg/cmd/gen-metric-owners/BUILD.bazel +++ /dev/null @@ -1,20 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library") - -go_library( - name = "gen-metric-owners_lib", - srcs = ["main.go"], - importpath = "github.com/cockroachdb/cockroach/pkg/cmd/gen-metric-owners", - visibility = ["//visibility:private"], - deps = [ - "//pkg/internal/codeowners", - "//pkg/internal/metricscan", - "//pkg/internal/reporoot", - "@in_gopkg_yaml_v2//:yaml_v2", - ], -) - -go_binary( - name = "gen-metric-owners", - embed = [":gen-metric-owners_lib"], - visibility = ["//visibility:public"], -) diff --git a/pkg/cmd/gen-metric-owners/main.go b/pkg/cmd/gen-metric-owners/main.go deleted file mode 100644 index 528a21fa2a53..000000000000 --- a/pkg/cmd/gen-metric-owners/main.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2026 The Cockroach Authors. -// -// Use of this software is governed by the CockroachDB Software License -// included in the /LICENSE file. - -// gen-metric-owners scans Go source files for metric.Metadata -// definitions, resolves each to its owning team via CODEOWNERS, and -// writes a metric_owners.yaml consumed by `cockroach gen metric-list -// --metric-owners`. -package main - -import ( - "flag" - "fmt" - "os" - - "github.com/cockroachdb/cockroach/pkg/internal/codeowners" - "github.com/cockroachdb/cockroach/pkg/internal/metricscan" - "github.com/cockroachdb/cockroach/pkg/internal/reporoot" - "gopkg.in/yaml.v2" -) - -var out = flag.String("out", "", "path to write the metric owners file") - -func main() { - flag.Parse() - if *out == "" { - fmt.Fprintln(os.Stderr, "usage: gen-metric-owners -out=PATH") - os.Exit(1) - } - // Prefer BUILD_WORKSPACE_DIRECTORY (set by `bazel run`) over - // reporoot.Get(), because the latter resolves to the Bazel - // execroot which does not contain the Go source files. - root := os.Getenv("BUILD_WORKSPACE_DIRECTORY") - if root == "" { - root = reporoot.Get() - } - if root == "" { - fmt.Fprintln(os.Stderr, "could not determine repo root") - os.Exit(1) - } - result, err := metricscan.Scan(root) - if err != nil { - fmt.Fprintf(os.Stderr, "metricscan: %v\n", err) - os.Exit(1) - } - owners, err := codeowners.DefaultLoadCodeOwners() - if err != nil { - fmt.Fprintf(os.Stderr, "loading CODEOWNERS: %v\n", err) - os.Exit(1) - } - mo := metricscan.BuildMetricOwners(result, func(file string) string { - teams := owners.Match(file) - if len(teams) > 0 { - return string(teams[0].Name()) - } - return "" - }) - data, err := yaml.Marshal(mo) - if err != nil { - fmt.Fprintf(os.Stderr, "marshaling YAML: %v\n", err) - os.Exit(1) - } - if err := os.WriteFile(*out, data, 0644); err != nil { - fmt.Fprintf(os.Stderr, "writing output: %v\n", err) - os.Exit(1) - } -} diff --git a/pkg/cmd/kv/datadoggen/metrics.go b/pkg/cmd/kv/datadoggen/metrics.go index fe3cf6a05cbd..c508a1b74e4e 100644 --- a/pkg/cmd/kv/datadoggen/metrics.go +++ b/pkg/cmd/kv/datadoggen/metrics.go @@ -714,10 +714,13 @@ func loadMetricsFromGoFile(goFilePath string) (map[string]YAMLMetric, error) { content := string(data) metrics := make(map[string]YAMLMetric) - // Pattern to match metric.Metadata blocks - // Matches: variableName = metric.Metadata{ ... } - // Uses (?s) for DOTALL mode so . matches newlines - metadataPattern := regexp.MustCompile(`(?s)(\w+)\s*=\s*metric\.Metadata\s*\{([^}]+)\}`) + // Pattern to match metric.Metadata blocks in all supported forms: + // variableName = metric.Metadata{ ... } + // variableName = metric.InitMetadata(metric.Metadata{ ... }) + // Uses (?s) for DOTALL mode so . matches newlines. + metadataPattern := regexp.MustCompile( + `(?s)(\w+)\s*=\s*metric\.(?:InitMetadata\(metric\.)?Metadata\s*\{([^}]+)\}`, + ) matches := metadataPattern.FindAllStringSubmatch(content, -1) for _, match := range matches { diff --git a/pkg/cmd/kv/datadoggen/metrics_test.go b/pkg/cmd/kv/datadoggen/metrics_test.go index 85afc1f875a3..dd8df81d2480 100644 --- a/pkg/cmd/kv/datadoggen/metrics_test.go +++ b/pkg/cmd/kv/datadoggen/metrics_test.go @@ -575,26 +575,26 @@ func TestLoadMetricsFromGoFile(t *testing.T) { import "github.com/cockroachdb/cockroach/pkg/util/metric" var ( - metaReplicaCount = metric.Metadata{ + metaReplicaCount = metric.InitMetadata(metric.Metadata{ Name: "replicas", Help: "Number of replicas", Measurement: "Replicas", Unit: metric.Unit_COUNT, - } + }) - metaLatency = metric.Metadata{ + metaLatency = metric.InitMetadata(metric.Metadata{ Name: "request.latency", Help: "Request latency in nanoseconds", Measurement: "Latency", Unit: metric.Unit_NANOSECONDS, - } + }) - metaQueueSize = metric.Metadata{ + metaQueueSize = metric.InitMetadata(metric.Metadata{ Name: "queue.size", Help: "Current queue size", Measurement: "Queue Size", Unit: metric.Unit_COUNT, - } + }) ) // Usage in struct diff --git a/pkg/crosscluster/logical/metrics.go b/pkg/crosscluster/logical/metrics.go index 96a8cc6f5fa2..3e45f7e4c126 100644 --- a/pkg/crosscluster/logical/metrics.go +++ b/pkg/crosscluster/logical/metrics.go @@ -14,7 +14,7 @@ import ( var ( // Top-line metrics. - metaAppliedRowUpdates = metric.Metadata{ + metaAppliedRowUpdates = metric.InitMetadata(metric.Metadata{ Name: "logical_replication.events_ingested", Help: "Events ingested by all replication jobs", Measurement: "Events", @@ -22,8 +22,8 @@ var ( Category: metric.Metadata_LOGICAL_DATA_REPLICATION, Unit: metric.Unit_COUNT, HowToUse: "track events (e.g. updates, deletes, inserts) ingested", - } - metaDLQedRowUpdates = metric.Metadata{ + }) + metaDLQedRowUpdates = metric.InitMetadata(metric.Metadata{ Name: "logical_replication.events_dlqed", Help: "Row update events sent to DLQ", Measurement: "Failures", @@ -31,8 +31,8 @@ var ( Category: metric.Metadata_LOGICAL_DATA_REPLICATION, Unit: metric.Unit_COUNT, HowToUse: "track events sent to the dead letter queue", - } - metaReceivedLogicalBytes = metric.Metadata{ + }) + metaReceivedLogicalBytes = metric.InitMetadata(metric.Metadata{ Name: "logical_replication.logical_bytes", Help: "Logical bytes (sum of keys + values) received by all replication jobs", Visibility: metric.Metadata_ESSENTIAL, @@ -40,8 +40,8 @@ var ( Measurement: "Bytes", Unit: metric.Unit_BYTES, HowToUse: "track logical data replication throughput", - } - metaCommitToCommitLatency = metric.Metadata{ + }) + metaCommitToCommitLatency = metric.InitMetadata(metric.Metadata{ Name: "logical_replication.commit_latency", Help: crstrings.UnwrapText(` Event commit latency: a difference between event MVCC timestamp @@ -53,8 +53,8 @@ var ( Category: metric.Metadata_LOGICAL_DATA_REPLICATION, Unit: metric.Unit_NANOSECONDS, HowToUse: "track the latency of of applying events from source to destination", - } - metaReplicatedTimeSeconds = metric.Metadata{ + }) + metaReplicatedTimeSeconds = metric.InitMetadata(metric.Metadata{ Name: "logical_replication.replicated_time_seconds", Help: "The replicated time of the logical replication stream in seconds since the unix epoch.", Measurement: "Seconds", @@ -62,28 +62,28 @@ var ( Category: metric.Metadata_LOGICAL_DATA_REPLICATION, Unit: metric.Unit_SECONDS, HowToUse: "Track replication lag via current time - logical_replication.replicated_time_seconds", - } + }) // User-visible health and ops metrics. - metaRetryQueueBytes = metric.Metadata{ + metaRetryQueueBytes = metric.InitMetadata(metric.Metadata{ Name: "logical_replication.retry_queue_bytes", Help: "Logical bytes (sum of keys+values) in the retry queue", Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - metaRetryQueueEvents = metric.Metadata{ + }) + metaRetryQueueEvents = metric.InitMetadata(metric.Metadata{ Name: "logical_replication.retry_queue_events", Help: "Row update events in the retry queue", Measurement: "Events", Unit: metric.Unit_COUNT, - } - metaApplyBatchNanosHist = metric.Metadata{ + }) + metaApplyBatchNanosHist = metric.InitMetadata(metric.Metadata{ Name: "logical_replication.batch_hist_nanos", Help: "Time spent per row flushing a batch", Measurement: "Nanoseconds", Unit: metric.Unit_NANOSECONDS, - } - metaInitialApplySuccess = metric.Metadata{ + }) + metaInitialApplySuccess = metric.InitMetadata(metric.Metadata{ Name: "logical_replication.events_initial_success", Help: "Successful applications of an incoming row update", Measurement: "Successes", @@ -92,8 +92,8 @@ var ( StaticLabels: metric.MakeLabelPairs( metric.LabelType, "initial_success", ), - } - metaInitialApplyFailures = metric.Metadata{ + }) + metaInitialApplyFailures = metric.InitMetadata(metric.Metadata{ Name: "logical_replication.events_initial_failure", Help: "Failed attempts to apply an incoming row update", Measurement: "Failures", @@ -102,8 +102,8 @@ var ( StaticLabels: metric.MakeLabelPairs( metric.LabelType, "initial_failure", ), - } - metaRetriedApplySuccesses = metric.Metadata{ + }) + metaRetriedApplySuccesses = metric.InitMetadata(metric.Metadata{ Name: "logical_replication.events_retry_success", Help: "Row update events applied after one or more retries", Measurement: "Successes", @@ -112,8 +112,8 @@ var ( StaticLabels: metric.MakeLabelPairs( metric.LabelType, "retry_success", ), - } - metaRetriedApplyFailures = metric.Metadata{ + }) + metaRetriedApplyFailures = metric.InitMetadata(metric.Metadata{ Name: "logical_replication.events_retry_failure", Help: "Failed re-attempts to apply a row update", Measurement: "Failures", @@ -122,9 +122,9 @@ var ( StaticLabels: metric.MakeLabelPairs( metric.LabelType, "retry_failure", ), - } + }) - metaDLQedDueToAge = metric.Metadata{ + metaDLQedDueToAge = metric.InitMetadata(metric.Metadata{ Name: "logical_replication.events_dlqed_age", Help: "Row update events sent to DLQ due to reaching the maximum time allowed in the retry queue", Measurement: "Failures", @@ -133,8 +133,8 @@ var ( StaticLabels: metric.MakeLabelPairs( metric.LabelType, "dlqed_age", ), - } - metaDLQedDueToQueueSpace = metric.Metadata{ + }) + metaDLQedDueToQueueSpace = metric.InitMetadata(metric.Metadata{ Name: "logical_replication.events_dlqed_space", Help: "Row update events sent to DLQ due to capacity of the retry queue", Measurement: "Failures", @@ -143,8 +143,8 @@ var ( StaticLabels: metric.MakeLabelPairs( metric.LabelType, "dlqed_space", ), - } - metaDLQedDueToErrType = metric.Metadata{ + }) + metaDLQedDueToErrType = metric.InitMetadata(metric.Metadata{ Name: "logical_replication.events_dlqed_errtype", Help: "Row update events sent to DLQ due to an error not considered retryable", Measurement: "Failures", @@ -153,77 +153,77 @@ var ( StaticLabels: metric.MakeLabelPairs( metric.LabelType, "dlqed_errtype", ), - } + }) // Internal metrics. - metaCheckpointEvents = metric.Metadata{ + metaCheckpointEvents = metric.InitMetadata(metric.Metadata{ Name: "logical_replication.checkpoint_events_ingested", Help: "Checkpoint events ingested by all replication jobs", Measurement: "Events", Unit: metric.Unit_COUNT, - } - metaDistSQLReplanCount = metric.Metadata{ + }) + metaDistSQLReplanCount = metric.InitMetadata(metric.Metadata{ Name: "logical_replication.replan_count", Help: "Total number of dist sql replanning events", Measurement: "Events", Unit: metric.Unit_COUNT, - } - metaKVUpdateTooOld = metric.Metadata{ + }) + metaKVUpdateTooOld = metric.InitMetadata(metric.Metadata{ Name: "logical_replication.kv.update_too_old", Help: "Total number of updates that were not applied because they were too old", Measurement: "Events", Unit: metric.Unit_COUNT, - } - metaKVValueRefreshes = metric.Metadata{ + }) + metaKVValueRefreshes = metric.InitMetadata(metric.Metadata{ Name: "logical_replication.kv.value_refreshes", Help: "Total number of batches that refreshed the previous value", Measurement: "Events", Unit: metric.Unit_COUNT, - } - metaScanningRanges = metric.Metadata{ + }) + metaScanningRanges = metric.InitMetadata(metric.Metadata{ Name: "logical_replication.scanning_ranges", Help: "Source side ranges undergoing an initial scan (inaccurate with multiple LDR jobs)", Measurement: "Ranges", Unit: metric.Unit_COUNT, - } - metaCatchupRanges = metric.Metadata{ + }) + metaCatchupRanges = metric.InitMetadata(metric.Metadata{ Name: "logical_replication.catchup_ranges", Help: "Source side ranges undergoing catch up scans (inaccurate with multiple LDR jobs)", Measurement: "Ranges", Unit: metric.Unit_COUNT, - } + }) // Labeled metrics. - metaLabeledReplicatedTime = metric.Metadata{ + metaLabeledReplicatedTime = metric.InitMetadata(metric.Metadata{ Name: "logical_replication.replicated_time_by_label", Help: "Replicated time of the logical replication stream by label", Measurement: "Seconds", Unit: metric.Unit_SECONDS, - } - metaLabeledEventsIngetsted = metric.Metadata{ + }) + metaLabeledEventsIngetsted = metric.InitMetadata(metric.Metadata{ Name: "logical_replication.events_ingested_by_label", Help: "Events ingested by all replication jobs by label", Measurement: "Events", Unit: metric.Unit_COUNT, - } - metaLabeledEventsDLQed = metric.Metadata{ + }) + metaLabeledEventsDLQed = metric.InitMetadata(metric.Metadata{ Name: "logical_replication.events_dlqed_by_label", Help: "Row update events sent to DLQ by label", Measurement: "Failures", Unit: metric.Unit_COUNT, - } - metaLabeledScanningRanges = metric.Metadata{ + }) + metaLabeledScanningRanges = metric.InitMetadata(metric.Metadata{ Name: "logical_replication.scanning_ranges_by_label", Help: "Source side ranges undergoing an initial scan", Measurement: "Ranges", Unit: metric.Unit_COUNT, - } - metaLabeledCatchupRanges = metric.Metadata{ + }) + metaLabeledCatchupRanges = metric.InitMetadata(metric.Metadata{ Name: "logical_replication.catchup_ranges_by_label", Help: "Source side ranges undergoing catch up scans", Measurement: "Ranges", Unit: metric.Unit_COUNT, - } + }) ) // Metrics are for production monitoring of logical replication jobs. diff --git a/pkg/crosscluster/logical/purgatory_test.go b/pkg/crosscluster/logical/purgatory_test.go index f5be86dfe7f3..aabc920cac08 100644 --- a/pkg/crosscluster/logical/purgatory_test.go +++ b/pkg/crosscluster/logical/purgatory_test.go @@ -36,8 +36,8 @@ func TestPurgatory(t *testing.T) { byteLimit: func() int64 { return 5 << 20 }, delay: func() time.Duration { return 0 }, deadline: func() time.Duration { return 0 }, - bytesGauge: metric.NewGauge(metric.Metadata{}), - eventsGauge: metric.NewGauge(metric.Metadata{}), + bytesGauge: metric.NewGauge(metric.InitMetadata(metric.Metadata{})), + eventsGauge: metric.NewGauge(metric.InitMetadata(metric.Metadata{})), flush: func( _ context.Context, ev []streampb.StreamEvent_KV, _ bool, _ retryEligibility, ) ([]streampb.StreamEvent_KV, int64, error) { diff --git a/pkg/crosscluster/physical/metrics.go b/pkg/crosscluster/physical/metrics.go index 6b21595013a3..81da68726a03 100644 --- a/pkg/crosscluster/physical/metrics.go +++ b/pkg/crosscluster/physical/metrics.go @@ -19,19 +19,19 @@ const ( ) var ( - metaReplicationEventsIngested = metric.Metadata{ + metaReplicationEventsIngested = metric.InitMetadata(metric.Metadata{ Name: "physical_replication.events_ingested", Help: "Events ingested by all replication jobs", Measurement: "Events", Unit: metric.Unit_COUNT, - } - metaReplicationResolvedEventsIngested = metric.Metadata{ + }) + metaReplicationResolvedEventsIngested = metric.InitMetadata(metric.Metadata{ Name: "physical_replication.resolved_events_ingested", Help: "Resolved events ingested by all replication jobs", Measurement: "Events", Unit: metric.Unit_COUNT, - } - metaReplicationIngestedBytes = metric.Metadata{ + }) + metaReplicationIngestedBytes = metric.InitMetadata(metric.Metadata{ Name: "physical_replication.logical_bytes", Help: "Logical bytes (sum of keys + values) ingested by all replication jobs", Measurement: "Bytes", @@ -39,43 +39,43 @@ var ( Category: metric.Metadata_CROSS_CLUSTER_REPLICATION, Unit: metric.Unit_BYTES, HowToUse: "Track PCR throughput", - } - metaReplicationFlushes = metric.Metadata{ + }) + metaReplicationFlushes = metric.InitMetadata(metric.Metadata{ Name: "physical_replication.flushes", Help: "Total flushes across all replication jobs", Measurement: "Flushes", Unit: metric.Unit_COUNT, - } + }) - metaReplicationFlushHistNanos = metric.Metadata{ + metaReplicationFlushHistNanos = metric.InitMetadata(metric.Metadata{ Name: "physical_replication.flush_hist_nanos", Help: "Time spent flushing messages across all replication streams", Measurement: "Nanoseconds", Unit: metric.Unit_NANOSECONDS, - } - metaReplicationCommitLatency = metric.Metadata{ + }) + metaReplicationCommitLatency = metric.InitMetadata(metric.Metadata{ Name: "physical_replication.commit_latency", Help: "Event commit latency: a difference between event MVCC timestamp " + "and the time it was flushed into disk. If we batch events, then the difference " + "between the oldest event in the batch and flush is recorded", Measurement: "Nanoseconds", Unit: metric.Unit_NANOSECONDS, - } - metaReplicationAdmitLatency = metric.Metadata{ + }) + metaReplicationAdmitLatency = metric.InitMetadata(metric.Metadata{ Name: "physical_replication.admit_latency", Help: "Event admission latency: a difference between event MVCC timestamp " + "and the time it was admitted into ingestion processor", Measurement: "Nanoseconds", Unit: metric.Unit_NANOSECONDS, - } - metaStreamsRunning = metric.Metadata{ + }) + metaStreamsRunning = metric.InitMetadata(metric.Metadata{ Name: "physical_replication.running", Help: "Number of currently running replication streams", Measurement: "Replication Streams", Unit: metric.Unit_COUNT, - } + }) - metaReplicatedTimeSeconds = metric.Metadata{ + metaReplicatedTimeSeconds = metric.InitMetadata(metric.Metadata{ Name: "physical_replication.replicated_time_seconds", Help: "The replicated time of the physical replication stream in seconds since the unix epoch.", Measurement: "Seconds", @@ -83,49 +83,49 @@ var ( Category: metric.Metadata_CROSS_CLUSTER_REPLICATION, Unit: metric.Unit_SECONDS, HowToUse: "Track replication lag via current time - physical_replication.replicated_time_seconds", - } + }) // This metric would be 0 until cutover begins, and then it will be updated to // the total number of ranges that need to be reverted, and then gradually go // down to 0 again. NB: that the number of ranges is the total number of // ranges left to be reverted, but some may not have writes and therefore the // revert will be a no-op for those ranges. - metaReplicationCutoverProgress = metric.Metadata{ + metaReplicationCutoverProgress = metric.InitMetadata(metric.Metadata{ Name: "physical_replication.failover_progress", Help: "The number of ranges left to revert in order to complete an inflight cutover", Measurement: "Ranges", Unit: metric.Unit_COUNT, - } - metaDistSQLReplanCount = metric.Metadata{ + }) + metaDistSQLReplanCount = metric.InitMetadata(metric.Metadata{ Name: "physical_replication.distsql_replan_count", Help: "Total number of dist sql replanning events", Measurement: "Events", Unit: metric.Unit_COUNT, - } + }) - metaScanningRanges = metric.Metadata{ + metaScanningRanges = metric.InitMetadata(metric.Metadata{ Name: "physical_replication.scanning_ranges", Help: "Source side ranges undergoing an initial scan", Measurement: "Ranges", Unit: metric.Unit_COUNT, - } - metaCatchupRanges = metric.Metadata{ + }) + metaCatchupRanges = metric.InitMetadata(metric.Metadata{ Name: "physical_replication.catchup_ranges", Help: "Source side ranges undergoing catch up scans", Measurement: "Ranges", Unit: metric.Unit_COUNT, - } - metaReceiveWaitNanos = metric.Metadata{ + }) + metaReceiveWaitNanos = metric.InitMetadata(metric.Metadata{ Name: "physical_replication.receive_wait_nanos", Help: "Cumulative time spent waiting to receive events from producer; use rate() to compare against flush_wait_nanos", Measurement: "Nanoseconds", Unit: metric.Unit_NANOSECONDS, - } - metaFlushWaitNanos = metric.Metadata{ + }) + metaFlushWaitNanos = metric.InitMetadata(metric.Metadata{ Name: "physical_replication.flush_wait_nanos", Help: "Cumulative time spent waiting to send buffer to flush loop; use rate() to compare against receive_wait_nanos", Measurement: "Nanoseconds", Unit: metric.Unit_NANOSECONDS, - } + }) ) // Metrics are for production monitoring of stream ingestion jobs. diff --git a/pkg/featureflag/feature_flags.go b/pkg/featureflag/feature_flags.go index 75dbd1212320..f2307a99dbc9 100644 --- a/pkg/featureflag/feature_flags.go +++ b/pkg/featureflag/feature_flags.go @@ -68,12 +68,12 @@ func CheckEnabled( // metaFeatureDenialMetric is a metric counting the statements denied by a // feature flag. -var metaFeatureDenialMetric = metric.Metadata{ +var metaFeatureDenialMetric = metric.InitMetadata(metric.Metadata{ Name: "sql.feature_flag_denial", Help: "Counter of the number of statements denied by a feature flag", Measurement: "Statements", Unit: metric.Unit_COUNT, -} +}) // DenialMetrics is a struct corresponding to any metrics related to feature // flag denials. Future metrics related to feature flags should be added to diff --git a/pkg/gossip/gossip.go b/pkg/gossip/gossip.go index 3620171803c8..fe8830290f1a 100644 --- a/pkg/gossip/gossip.go +++ b/pkg/gossip/gossip.go @@ -129,84 +129,84 @@ const ( // Gossip metrics counter names. var ( - MetaConnectionsIncomingGauge = metric.Metadata{ + MetaConnectionsIncomingGauge = metric.InitMetadata(metric.Metadata{ Name: "gossip.connections.incoming", Help: "Number of active incoming gossip connections", Measurement: "Connections", Unit: metric.Unit_COUNT, - } - MetaConnectionsOutgoingGauge = metric.Metadata{ + }) + MetaConnectionsOutgoingGauge = metric.InitMetadata(metric.Metadata{ Name: "gossip.connections.outgoing", Help: "Number of active outgoing gossip connections", Measurement: "Connections", Unit: metric.Unit_COUNT, - } - MetaConnectionsRefused = metric.Metadata{ + }) + MetaConnectionsRefused = metric.InitMetadata(metric.Metadata{ Name: "gossip.connections.refused", Help: "Number of refused incoming gossip connections", Measurement: "Connections", Unit: metric.Unit_COUNT, - } - MetaMessagesSent = metric.Metadata{ + }) + MetaMessagesSent = metric.InitMetadata(metric.Metadata{ Name: "gossip.messages.sent", Help: "Number of sent gossip messages", Measurement: "Messages", Unit: metric.Unit_COUNT, - } - MetaMessagesReceived = metric.Metadata{ + }) + MetaMessagesReceived = metric.InitMetadata(metric.Metadata{ Name: "gossip.messages.received", Help: "Number of received gossip messages", Measurement: "Messages", Unit: metric.Unit_COUNT, - } - MetaInfosSent = metric.Metadata{ + }) + MetaInfosSent = metric.InitMetadata(metric.Metadata{ Name: "gossip.infos.sent", Help: "Number of sent gossip Info objects", Measurement: "Infos", Unit: metric.Unit_COUNT, - } - MetaInfosReceived = metric.Metadata{ + }) + MetaInfosReceived = metric.InitMetadata(metric.Metadata{ Name: "gossip.infos.received", Help: "Number of received gossip Info objects", Measurement: "Infos", Unit: metric.Unit_COUNT, - } - MetaBytesSent = metric.Metadata{ + }) + MetaBytesSent = metric.InitMetadata(metric.Metadata{ Name: "gossip.bytes.sent", Help: "Number of sent gossip bytes", Measurement: "Gossip Bytes", Unit: metric.Unit_BYTES, - } - MetaBytesReceived = metric.Metadata{ + }) + MetaBytesReceived = metric.InitMetadata(metric.Metadata{ Name: "gossip.bytes.received", Help: "Number of received gossip bytes", Measurement: "Gossip Bytes", Unit: metric.Unit_BYTES, - } - MetaCallbacksProcessed = metric.Metadata{ + }) + MetaCallbacksProcessed = metric.InitMetadata(metric.Metadata{ Name: "gossip.callbacks.processed", Help: "Number of gossip callbacks processed", Measurement: "Callbacks", Unit: metric.Unit_COUNT, - } - MetaCallbacksPending = metric.Metadata{ + }) + MetaCallbacksPending = metric.InitMetadata(metric.Metadata{ Name: "gossip.callbacks.pending", Help: "Number of gossip callbacks waiting to be processed", Measurement: "Callbacks", Unit: metric.Unit_COUNT, - } - MetaCallbacksProcessingDuration = metric.Metadata{ + }) + MetaCallbacksProcessingDuration = metric.InitMetadata(metric.Metadata{ Name: "gossip.callbacks.processing_duration", Help: "Duration of gossip callback processing", Measurement: "Duration", Unit: metric.Unit_NANOSECONDS, - } - MetaCallbacksPendingDuration = metric.Metadata{ + }) + MetaCallbacksPendingDuration = metric.InitMetadata(metric.Metadata{ Name: "gossip.callbacks.pending_duration", Help: "Duration of gossip callback queueing to be processed", Measurement: "Duration", Unit: metric.Unit_NANOSECONDS, - } + }) ) // KeyNotPresentError is returned by gossip when queried for a key that doesn't diff --git a/pkg/gossip/infostore_test.go b/pkg/gossip/infostore_test.go index 969a0806c00f..04f842b04f3f 100644 --- a/pkg/gossip/infostore_test.go +++ b/pkg/gossip/infostore_test.go @@ -432,7 +432,7 @@ func TestLeastUseful(t *testing.T) { is, stopper := newTestInfoStore() defer stopper.Stop(context.Background()) - set := makeNodeSet(3, metric.NewGauge(metric.Metadata{Name: ""})) + set := makeNodeSet(3, metric.NewGauge(metric.InitMetadata(metric.Metadata{Name: ""}))) if is.leastUseful(set) != 0 { t.Error("not expecting a node from an empty set") } diff --git a/pkg/gossip/node_set.go b/pkg/gossip/node_set.go index 10b5f4f0c99b..7f0dd2d36a12 100644 --- a/pkg/gossip/node_set.go +++ b/pkg/gossip/node_set.go @@ -56,7 +56,7 @@ func (as nodeSet) asSlice() []roachpb.NodeID { // gauge object from the parent. func (as nodeSet) filter(filterFn func(node roachpb.NodeID) bool) nodeSet { avail := makeNodeSet(as.maxSize, - metric.NewGauge(metric.Metadata{Name: "TODO(marc)", Help: "TODO(marc)"})) + metric.NewGauge(metric.InitMetadata(metric.Metadata{Name: "TODO(marc)", Help: "TODO(marc)"}))) for node := range as.nodes { if filterFn(node) { avail.addNode(node) diff --git a/pkg/gossip/node_set_test.go b/pkg/gossip/node_set_test.go index cf854fbe40ed..c95305082a9a 100644 --- a/pkg/gossip/node_set_test.go +++ b/pkg/gossip/node_set_test.go @@ -15,7 +15,7 @@ import ( func TestNodeSetMaxSize(t *testing.T) { defer leaktest.AfterTest(t)() - nodes := makeNodeSet(1, metric.NewGauge(metric.Metadata{Name: ""})) + nodes := makeNodeSet(1, metric.NewGauge(metric.InitMetadata(metric.Metadata{Name: ""}))) if !nodes.hasSpace() { t.Error("set should have space") } @@ -27,7 +27,7 @@ func TestNodeSetMaxSize(t *testing.T) { func TestNodeSetHasNode(t *testing.T) { defer leaktest.AfterTest(t)() - nodes := makeNodeSet(2, metric.NewGauge(metric.Metadata{Name: ""})) + nodes := makeNodeSet(2, metric.NewGauge(metric.InitMetadata(metric.Metadata{Name: ""}))) node := roachpb.NodeID(1) if nodes.hasNode(node) { t.Error("node wasn't added and should not be valid") @@ -41,7 +41,7 @@ func TestNodeSetHasNode(t *testing.T) { func TestNodeSetAddAndRemoveNode(t *testing.T) { defer leaktest.AfterTest(t)() - nodes := makeNodeSet(2, metric.NewGauge(metric.Metadata{Name: ""})) + nodes := makeNodeSet(2, metric.NewGauge(metric.InitMetadata(metric.Metadata{Name: ""}))) node0 := roachpb.NodeID(1) node1 := roachpb.NodeID(2) nodes.addNode(node0) @@ -61,13 +61,13 @@ func TestNodeSetAddAndRemoveNode(t *testing.T) { func TestNodeSetFilter(t *testing.T) { defer leaktest.AfterTest(t)() - nodes1 := makeNodeSet(2, metric.NewGauge(metric.Metadata{Name: ""})) + nodes1 := makeNodeSet(2, metric.NewGauge(metric.InitMetadata(metric.Metadata{Name: ""}))) node0 := roachpb.NodeID(1) node1 := roachpb.NodeID(2) nodes1.addNode(node0) nodes1.addNode(node1) - nodes2 := makeNodeSet(1, metric.NewGauge(metric.Metadata{Name: ""})) + nodes2 := makeNodeSet(1, metric.NewGauge(metric.InitMetadata(metric.Metadata{Name: ""}))) nodes2.addNode(node1) filtered := nodes1.filter(func(a roachpb.NodeID) bool { @@ -80,7 +80,7 @@ func TestNodeSetFilter(t *testing.T) { func TestNodeSetAsSlice(t *testing.T) { defer leaktest.AfterTest(t)() - nodes := makeNodeSet(2, metric.NewGauge(metric.Metadata{Name: ""})) + nodes := makeNodeSet(2, metric.NewGauge(metric.InitMetadata(metric.Metadata{Name: ""}))) node0 := roachpb.NodeID(1) node1 := roachpb.NodeID(2) nodes.addNode(node0) diff --git a/pkg/internal/metricscan/BUILD.bazel b/pkg/internal/metricscan/BUILD.bazel index a4a5611d2892..84de380f87c5 100644 --- a/pkg/internal/metricscan/BUILD.bazel +++ b/pkg/internal/metricscan/BUILD.bazel @@ -1,18 +1,11 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") -exports_files( - ["metric_owners.yaml"], - visibility = ["//docs/generated/metrics:__pkg__"], -) - go_library( name = "metricscan", srcs = [ - "metric_owners_generated.go", # keep "owners.go", "scan.go", ], - embedsrcs = ["metric_owners.yaml"], # keep importpath = "github.com/cockroachdb/cockroach/pkg/internal/metricscan", visibility = ["//visibility:public"], deps = ["//pkg/util/yamlutil"], diff --git a/pkg/internal/metricscan/metric_owners.yaml b/pkg/internal/metricscan/metric_owners.yaml deleted file mode 100644 index a962031b6b47..000000000000 --- a/pkg/internal/metricscan/metric_owners.yaml +++ /dev/null @@ -1,1396 +0,0 @@ -owners: - ^admission_admitted_: cockroachdb/admission-control - ^admission_errored_: cockroachdb/admission-control - ^admission_granter_used_slots_: cockroachdb/admission-control - ^admission_requested_: cockroachdb/admission-control - ^admission_wait_durations_: cockroachdb/admission-control - ^admission_wait_queue_length_: cockroachdb/admission-control - ^kv_rangefeed_mem_: cockroachdb/kv - ^sql_mem_: cockroachdb/sql-queries - _current: cockroachdb/sql-queries - _max: cockroachdb/sql-queries - _session_current: cockroachdb/sql-queries - _session_max: cockroachdb/sql-queries - _session_prepared_current: cockroachdb/sql-queries - _session_prepared_max: cockroachdb/sql-queries - _txn_current: cockroachdb/sql-queries - _txn_max: cockroachdb/sql-queries - TODO_marc_: cockroachdb/kv - abortspanbytes: cockroachdb/kv - addsstable_applications: cockroachdb/kv - addsstable_aswrites: cockroachdb/kv - addsstable_copies: cockroachdb/kv - addsstable_delay_total: cockroachdb/kv - addsstable_proposals: cockroachdb/kv - admission.cpu_time_tokens.exhausted_duration_nanos.%s.%s: cockroachdb/admission-control - admission.cpu_time_tokens.per_tenant.admitted_count.%s: cockroachdb/admission-control - admission.cpu_time_tokens.per_tenant.tokens_returned.%s: cockroachdb/admission-control - admission.cpu_time_tokens.per_tenant.tokens_used.%s: cockroachdb/admission-control - admission.cpu_time_tokens.per_tenant.wait_time_nanos.%s: cockroachdb/admission-control - admission.cpu_time_tokens.refill.added.%s.%s: cockroachdb/admission-control - admission.cpu_time_tokens.refill.removed.%s.%s: cockroachdb/admission-control - admission_admitted_snapshot_bytes: cockroachdb/admission-control - admission_cpu_time_tokens_multiplier: cockroachdb/admission-control - admission_cpu_time_tokens_usage_consumed: cockroachdb/admission-control - admission_cpu_time_tokens_usage_returned: cockroachdb/admission-control - admission_elastic_cpu_acquired_nanos: cockroachdb/admission-control - admission_elastic_cpu_available_nanos: cockroachdb/admission-control - admission_elastic_cpu_bypassed_utilization: cockroachdb/admission-control - admission_elastic_cpu_max_available_nanos: cockroachdb/admission-control - admission_elastic_cpu_nanos_exhausted_duration: cockroachdb/admission-control - admission_elastic_cpu_over_limit_durations: cockroachdb/admission-control - admission_elastic_cpu_pre_work_nanos: cockroachdb/admission-control - admission_elastic_cpu_returned_nanos: cockroachdb/admission-control - admission_elastic_cpu_utilization: cockroachdb/admission-control - admission_elastic_cpu_utilization_limit: cockroachdb/admission-control - admission_elastic_cpu_yield_delay_nanos: cockroachdb/admission-control - admission_granter_cpu_load_long_period_duration_kv: cockroachdb/admission-control - admission_granter_cpu_load_short_period_duration_kv: cockroachdb/admission-control - admission_granter_disk_write_byte_tokens_exhausted_duration_kv: cockroachdb/admission-control - admission_granter_elastic_io_tokens_available_kv: cockroachdb/admission-control - admission_granter_elastic_io_tokens_exhausted_duration_kv: cockroachdb/admission-control - admission_granter_io_tokens_available_kv: cockroachdb/admission-control - admission_granter_io_tokens_bypassed_kv: cockroachdb/admission-control - admission_granter_io_tokens_exhausted_duration_kv: cockroachdb/admission-control - admission_granter_io_tokens_returned_kv: cockroachdb/admission-control - admission_granter_io_tokens_taken_kv: cockroachdb/admission-control - admission_granter_slot_adjuster_decrements_kv: cockroachdb/admission-control - admission_granter_slot_adjuster_increments_kv: cockroachdb/admission-control - admission_granter_slots_exhausted_duration_kv: cockroachdb/admission-control - admission_granter_total_slots_kv: cockroachdb/admission-control - admission_io_overload: cockroachdb/kv - admission_l0_compacted_bytes_kv: cockroachdb/admission-control - admission_l0_tokens_produced_kv: cockroachdb/admission-control - admission_raft_paused_replicas: cockroachdb/kv - admission_raft_paused_replicas_dropped_msgs: cockroachdb/kv - admission_scheduler_latency_listener_p99_nanos: cockroachdb/admission-control - admission_wait_durations_snapshot_ingest: cockroachdb/admission-control - ash_sampler_take_sample_latency: cockroachdb/obs-prs - ash_samples_collected: cockroachdb/obs-prs - ash_work_states_active: cockroachdb/obs-prs - auth_cert_conn_latency: cockroachdb/sql-foundations - auth_cert_san_conn_success: cockroachdb/sql-foundations - auth_cert_san_conn_total: cockroachdb/sql-foundations - auth_gss_conn_latency: cockroachdb/sql-foundations - auth_jwt_conn_latency: cockroachdb/sql-foundations - auth_ldap_conn_latency: cockroachdb/sql-foundations - auth_ldap_conn_latency_internal: cockroachdb/sql-foundations - auth_password_conn_latency: cockroachdb/sql-foundations - auth_scram_conn_latency: cockroachdb/sql-foundations - backup_last_failed_time_kms_inaccessible: cockroachdb/disaster-recovery - batch_requests_bytes: cockroachdb/kv - batch_requests_cross_region_bytes: cockroachdb/kv - batch_requests_cross_zone_bytes: cockroachdb/kv - batch_responses_bytes: cockroachdb/kv - batch_responses_cross_region_bytes: cockroachdb/kv - batch_responses_cross_zone_bytes: cockroachdb/kv - build_timestamp: cockroachdb/obs-prs - capacity: cockroachdb/kv - capacity_available: cockroachdb/kv - capacity_reserved: cockroachdb/kv - capacity_used: cockroachdb/kv - changefeed.%s.messages_pushback_nanos: cockroachdb/cdc - changefeed.buffer_entries.%s: cockroachdb/cdc - changefeed_admit_latency: cockroachdb/cdc - changefeed_aggregator_progress: cockroachdb/cdc - changefeed_backfill_count: cockroachdb/cdc - changefeed_backfill_pending_ranges: cockroachdb/cdc - changefeed_batch_reduction_count: cockroachdb/cdc - changefeed_buffer_entries_allocated_mem: cockroachdb/cdc - changefeed_buffer_entries_in: cockroachdb/cdc - changefeed_buffer_entries_mem_acquired: cockroachdb/cdc - changefeed_buffer_entries_mem_released: cockroachdb/cdc - changefeed_buffer_entries_out: cockroachdb/cdc - changefeed_buffer_entries_released: cockroachdb/cdc - changefeed_buffer_pushback_nanos: cockroachdb/cdc - changefeed_checkpoint_create_nanos: cockroachdb/cdc - changefeed_checkpoint_hist_nanos: cockroachdb/cdc - changefeed_checkpoint_progress: cockroachdb/cdc - changefeed_checkpoint_span_count: cockroachdb/cdc - changefeed_checkpoint_timestamp_count: cockroachdb/cdc - changefeed_checkpoint_total_bytes: cockroachdb/cdc - changefeed_cloudstorage_buffered_bytes: cockroachdb/cdc - changefeed_commit_latency: cockroachdb/cdc - changefeed_emitted_batch_sizes: cockroachdb/cdc - changefeed_emitted_bytes: cockroachdb/cdc - changefeed_emitted_messages: cockroachdb/cdc - changefeed_error_retries: cockroachdb/cdc - changefeed_failures: cockroachdb/cdc - changefeed_filtered_messages: cockroachdb/cdc - changefeed_flush_hist_nanos: cockroachdb/cdc - changefeed_flushed_bytes: cockroachdb/cdc - changefeed_flushes: cockroachdb/cdc - changefeed_forwarded_resolved_messages: cockroachdb/cdc - changefeed_frontier_updates: cockroachdb/cdc - changefeed_internal_retry_message_count: cockroachdb/cdc - changefeed_kafka_throttling_hist_nanos: cockroachdb/cdc - changefeed_lagging_ranges: cockroachdb/cdc - changefeed_max_behind_nanos: cockroachdb/cdc - changefeed_message_size_hist: cockroachdb/cdc - changefeed_network_bytes_in: cockroachdb/cdc - changefeed_network_bytes_out: cockroachdb/cdc - changefeed_nprocs_consume_event_nanos: cockroachdb/cdc - changefeed_nprocs_flush_nanos: cockroachdb/cdc - changefeed_nprocs_in_flight_count: cockroachdb/cdc - changefeed_parallel_io_in_flight_keys: cockroachdb/cdc - changefeed_parallel_io_pending_rows: cockroachdb/cdc - changefeed_parallel_io_queue_nanos: cockroachdb/cdc - changefeed_parallel_io_result_queue_nanos: cockroachdb/cdc - changefeed_parallel_io_workers: cockroachdb/cdc - changefeed_progress_skew_span: cockroachdb/cdc - changefeed_progress_skew_table: cockroachdb/cdc - changefeed_queue_time_nanos: cockroachdb/cdc - changefeed_running: cockroachdb/cdc - changefeed_schema_registry_registrations: cockroachdb/cdc - changefeed_schema_registry_retry_count: cockroachdb/cdc - changefeed_schemafeed_table_history_scans: cockroachdb/cdc - changefeed_schemafeed_table_metadata_nanos: cockroachdb/cdc - changefeed_sink_backpressure_nanos: cockroachdb/cdc - changefeed_sink_batch_hist_nanos: cockroachdb/cdc - changefeed_sink_errors: cockroachdb/cdc - changefeed_sink_io_inflight: cockroachdb/cdc - changefeed_size_based_flushes: cockroachdb/cdc - changefeed_stage_checkpoint_job_progress_latency: cockroachdb/cdc - changefeed_stage_downstream_client_send_latency: cockroachdb/cdc - changefeed_stage_emit_row_latency: cockroachdb/cdc - changefeed_stage_encode_latency: cockroachdb/cdc - changefeed_stage_frontier_persistence_latency: cockroachdb/cdc - changefeed_stage_kv_feed_buffer_latency: cockroachdb/cdc - changefeed_stage_kv_feed_wait_for_table_event_latency: cockroachdb/cdc - changefeed_stage_pts_create_latency: cockroachdb/cdc - changefeed_stage_pts_manage_error_latency: cockroachdb/cdc - changefeed_stage_pts_manage_latency: cockroachdb/cdc - changefeed_stage_rangefeed_buffer_checkpoint_latency: cockroachdb/cdc - changefeed_stage_rangefeed_buffer_value_latency: cockroachdb/cdc - changefeed_total_ranges: cockroachdb/cdc - changefeed_usage_error_count: cockroachdb/cdc - changefeed_usage_query_duration: cockroachdb/cdc - changefeed_usage_table_bytes: cockroachdb/cdc - clock_offset_meannanos: cockroachdb/kv - clock_offset_medianabsdevnanos: cockroachdb/kv - clock_offset_mediannanos: cockroachdb/kv - clock_offset_stddevnanos: cockroachdb/kv - cloud_conns_opened: cockroachdb/disaster-recovery - cloud_conns_reused: cockroachdb/disaster-recovery - cloud_listing_results: cockroachdb/disaster-recovery - cloud_listings: cockroachdb/disaster-recovery - cloud_open_readers: cockroachdb/disaster-recovery - cloud_open_writers: cockroachdb/disaster-recovery - cloud_read_bytes: cockroachdb/disaster-recovery - cloud_readers_opened: cockroachdb/disaster-recovery - cloud_tls_handshakes: cockroachdb/disaster-recovery - cloud_write_bytes: cockroachdb/disaster-recovery - cloud_writers_opened: cockroachdb/disaster-recovery - cluster_preserve_downgrade_option_last_updated: cockroachdb/kv - distsender.rangefeed.retry.%s: cockroachdb/kv - distsender.rpc.%s.sent: cockroachdb/kv - distsender.rpc.err.%s: cockroachdb/kv - distsender_batch_requests_cross_region_bytes: cockroachdb/kv - distsender_batch_requests_cross_zone_bytes: cockroachdb/kv - distsender_batch_requests_replica_addressed_bytes: cockroachdb/kv - distsender_batch_responses_cross_region_bytes: cockroachdb/kv - distsender_batch_responses_cross_zone_bytes: cockroachdb/kv - distsender_batch_responses_replica_addressed_bytes: cockroachdb/kv - distsender_batches: cockroachdb/kv - distsender_batches_async_in_progress: cockroachdb/kv - distsender_batches_async_sent: cockroachdb/kv - distsender_batches_async_throttled: cockroachdb/kv - distsender_batches_async_throttled_cumulative_duration_nanos: cockroachdb/kv - distsender_batches_partial: cockroachdb/kv - distsender_circuit_breaker_replicas_count: cockroachdb/kv - distsender_circuit_breaker_replicas_probes_failure: cockroachdb/kv - distsender_circuit_breaker_replicas_probes_running: cockroachdb/kv - distsender_circuit_breaker_replicas_probes_success: cockroachdb/kv - distsender_circuit_breaker_replicas_requests_cancelled: cockroachdb/kv - distsender_circuit_breaker_replicas_requests_rejected: cockroachdb/kv - distsender_circuit_breaker_replicas_tripped: cockroachdb/kv - distsender_circuit_breaker_replicas_tripped_events: cockroachdb/kv - distsender_errors_inleasetransferbackoffs: cockroachdb/kv - distsender_errors_notleaseholder: cockroachdb/kv - distsender_range_cache_leaseholder_randomized: cockroachdb/kv - distsender_rangefeed_catchup_ranges: cockroachdb/kv - distsender_rangefeed_catchup_ranges_waiting_client_side: cockroachdb/kv - distsender_rangefeed_error_catchup_ranges: cockroachdb/kv - distsender_rangefeed_local_ranges: cockroachdb/kv - distsender_rangefeed_restart_ranges: cockroachdb/kv - distsender_rangefeed_total_ranges: cockroachdb/kv - distsender_rangelookups: cockroachdb/kv - distsender_rpc_proxy_err: cockroachdb/kv - distsender_rpc_proxy_forward_err: cockroachdb/kv - distsender_rpc_proxy_forward_sent: cockroachdb/kv - distsender_rpc_proxy_sent: cockroachdb/kv - distsender_rpc_sent: cockroachdb/kv - distsender_rpc_sent_local: cockroachdb/kv - distsender_rpc_sent_nextreplicaerror: cockroachdb/kv - distsender_slow_replicarpcs: cockroachdb/kv - exec_error: cockroachdb/kv - exec_latency: cockroachdb/kv - exec_success: cockroachdb/kv - exportrequest_delay_total: cockroachdb/kv - follower_reads_success_count: cockroachdb/kv - gcbytesage: cockroachdb/kv - go_scheduler_latency: cockroachdb/admission-control - gossip_bytes_received: cockroachdb/kv - gossip_bytes_sent: cockroachdb/kv - gossip_callbacks_pending: cockroachdb/kv - gossip_callbacks_pending_duration: cockroachdb/kv - gossip_callbacks_processed: cockroachdb/kv - gossip_callbacks_processing_duration: cockroachdb/kv - gossip_connections_incoming: cockroachdb/kv - gossip_connections_outgoing: cockroachdb/kv - gossip_connections_refused: cockroachdb/kv - gossip_infos_received: cockroachdb/kv - gossip_infos_sent: cockroachdb/kv - gossip_messages_received: cockroachdb/kv - gossip_messages_sent: cockroachdb/kv - intentage: cockroachdb/kv - intentbytes: cockroachdb/kv - intentcount: cockroachdb/kv - intentresolver_async_throttled: cockroachdb/kv - intentresolver_finalized_txns_failed: cockroachdb/kv - intentresolver_intents_failed: cockroachdb/kv - intents_abort_attempts: cockroachdb/kv - intents_poison_attempts: cockroachdb/kv - intents_resolve_attempts: cockroachdb/kv - jobs: cockroachdb/jobs - jobs.%s.currently_idle: cockroachdb/jobs - jobs.%s.currently_paused: cockroachdb/jobs - jobs.%s.currently_running: cockroachdb/jobs - jobs.%s.expired_pts_records: cockroachdb/jobs - jobs.%s.fail_or_cancel_completed: cockroachdb/jobs - jobs.%s.fail_or_cancel_retry_error: cockroachdb/jobs - jobs.%s.protected_age_sec: cockroachdb/jobs - jobs.%s.protected_record_count: cockroachdb/jobs - jobs.%s.resume_completed: cockroachdb/jobs - jobs.%s.resume_failed: cockroachdb/jobs - jobs.%s.resume_retry_error: cockroachdb/jobs - jobs_adopt_iterations: cockroachdb/jobs - jobs_claimed_jobs: cockroachdb/jobs - jobs_expired_pts_records: cockroachdb/jobs - jobs_fail_or_cancel: cockroachdb/jobs - jobs_inspect_issues_found: cockroachdb/sql-queries - jobs_inspect_num_active_spans: cockroachdb/sql-queries - jobs_inspect_runs: cockroachdb/sql-queries - jobs_inspect_runs_with_issues: cockroachdb/sql-queries - jobs_inspect_spans_processed: cockroachdb/sql-queries - jobs_metrics_task_failed: cockroachdb/jobs - jobs_protected_age_sec: cockroachdb/jobs - jobs_protected_record_count: cockroachdb/jobs - jobs_resume: cockroachdb/jobs - jobs_resumed_claimed_jobs: cockroachdb/jobs - jobs_row_level_ttl_delete_duration: cockroachdb/sql-queries - jobs_row_level_ttl_num_active_spans: cockroachdb/sql-queries - jobs_row_level_ttl_num_delete_batch_retries: cockroachdb/sql-queries - jobs_row_level_ttl_rows_deleted: cockroachdb/sql-queries - jobs_row_level_ttl_rows_selected: cockroachdb/sql-queries - jobs_row_level_ttl_select_duration: cockroachdb/sql-queries - jobs_row_level_ttl_span_total_duration: cockroachdb/sql-queries - jobs_row_level_ttl_total_expired_rows: cockroachdb/sql-queries - jobs_row_level_ttl_total_rows: cockroachdb/sql-queries - jobs_running_non_idle: cockroachdb/jobs - keybytes: cockroachdb/kv - keycount: cockroachdb/kv - kv.closed_timestamp.policy.%s: cockroachdb/kv - kv.rangefeed.scheduler.%s.latency: cockroachdb/kv - kv.rangefeed.scheduler.%s.queue_size: cockroachdb/kv - kv_allocator_load_based_lease_transfers_cannot_find_better_candidate: cockroachdb/kv - kv_allocator_load_based_lease_transfers_delta_not_significant: cockroachdb/kv - kv_allocator_load_based_lease_transfers_existing_not_overfull: cockroachdb/kv - kv_allocator_load_based_lease_transfers_follow_the_workload: cockroachdb/kv - kv_allocator_load_based_lease_transfers_missing_stats_for_existing_stores: cockroachdb/kv - kv_allocator_load_based_lease_transfers_should_transfer: cockroachdb/kv - kv_allocator_load_based_replica_rebalancing_cannot_find_better_candidate: cockroachdb/kv - kv_allocator_load_based_replica_rebalancing_delta_not_significant: cockroachdb/kv - kv_allocator_load_based_replica_rebalancing_existing_not_overfull: cockroachdb/kv - kv_allocator_load_based_replica_rebalancing_missing_stats_for_existing_store: cockroachdb/kv - kv_allocator_load_based_replica_rebalancing_should_transfer: cockroachdb/kv - kv_closed_timestamp_max_behind_nanos: cockroachdb/kv - kv_closed_timestamp_policy_change: cockroachdb/kv - kv_closed_timestamp_policy_latency_info_missing: cockroachdb/kv - kv_concurrency_avg_lock_hold_duration_nanos: cockroachdb/kv - kv_concurrency_avg_lock_wait_duration_nanos: cockroachdb/kv - kv_concurrency_latch_conflict_wait_durations: cockroachdb/kv - kv_concurrency_lock_wait_queue_waiters: cockroachdb/kv - kv_concurrency_locks: cockroachdb/kv - kv_concurrency_locks_shed_due_to_memory_limit: cockroachdb/kv - kv_concurrency_locks_with_wait_queues: cockroachdb/kv - kv_concurrency_max_lock_hold_duration_nanos: cockroachdb/kv - kv_concurrency_max_lock_wait_duration_nanos: cockroachdb/kv - kv_concurrency_max_lock_wait_queue_waiters_for_lock: cockroachdb/kv - kv_concurrency_num_lock_shed_due_to_memory_limit_events: cockroachdb/kv - kv_concurrency_virtual_resolve_batch_errors: cockroachdb/kv - kv_concurrency_virtual_resolve_batches: cockroachdb/kv - kv_concurrency_virtual_resolve_condense: cockroachdb/kv - kv_concurrency_virtual_resolve_disabled: cockroachdb/kv - kv_concurrency_virtual_resolve_intent: cockroachdb/kv - kv_concurrency_virtual_resolve_intent_range: cockroachdb/kv - kv_loadsplitter_cleardirection: cockroachdb/kv - kv_loadsplitter_nosplitkey: cockroachdb/kv - kv_loadsplitter_popularkey: cockroachdb/kv - kv_prober_planning_attempts: cockroachdb/kv - kv_prober_planning_failures: cockroachdb/kv - kv_prober_read_attempts: cockroachdb/kv - kv_prober_read_failures: cockroachdb/kv - kv_prober_read_latency: cockroachdb/kv - kv_prober_write_attempts: cockroachdb/kv - kv_prober_write_failures: cockroachdb/kv - kv_prober_write_latency: cockroachdb/kv - kv_prober_write_quarantine_oldest_duration: cockroachdb/kv - kv_protectedts_get_record_failed: cockroachdb/kv - kv_protectedts_get_record_success: cockroachdb/kv - kv_protectedts_protect_failed: cockroachdb/kv - kv_protectedts_protect_success: cockroachdb/kv - kv_protectedts_reconciliation_errors: cockroachdb/kv - kv_protectedts_reconciliation_num_runs: cockroachdb/kv - kv_protectedts_reconciliation_records_processed: cockroachdb/kv - kv_protectedts_reconciliation_records_removed: cockroachdb/kv - kv_protectedts_release_failed: cockroachdb/kv - kv_protectedts_release_success: cockroachdb/kv - kv_protectedts_update_timestamp_failed: cockroachdb/kv - kv_protectedts_update_timestamp_success: cockroachdb/kv - kv_rangefeed_budget_allocation_blocked: cockroachdb/kv - kv_rangefeed_budget_allocation_failed: cockroachdb/kv - kv_rangefeed_buffered_registrations: cockroachdb/kv - kv_rangefeed_buffered_sender_queue_size: cockroachdb/kv - kv_rangefeed_catchup_scan_nanos: cockroachdb/kv - kv_rangefeed_closed_timestamp_max_behind_nanos: cockroachdb/kv - kv_rangefeed_closed_timestamp_slow_ranges: cockroachdb/kv - kv_rangefeed_closed_timestamp_slow_ranges_cancelled: cockroachdb/kv - kv_rangefeed_mux_stream_send_slow_events: cockroachdb/kv - kv_rangefeed_output_loop_unbuffered_registration_nanos: cockroachdb/kv - kv_rangefeed_processors: cockroachdb/kv - kv_rangefeed_registrations: cockroachdb/kv - kv_rangefeed_scheduled_processor_queue_timeout: cockroachdb/kv - kv_rangefeed_unbuffered_registrations: cockroachdb/kv - kv_replica_circuit_breaker_num_tripped_events: cockroachdb/kv - kv_replica_circuit_breaker_num_tripped_replicas: cockroachdb/kv - kv_replica_read_batch_evaluate_dropped_latches_before_eval: cockroachdb/kv - kv_replica_read_batch_evaluate_latency: cockroachdb/kv - kv_replica_read_batch_evaluate_without_interleaving_iter: cockroachdb/kv - kv_replica_write_batch_evaluate_latency: cockroachdb/kv - kv_split_estimated_stats: cockroachdb/kv - kv_split_total_bytes_estimates: cockroachdb/kv - kv_streamer_batches_in_progress: cockroachdb/sql-queries - kv_streamer_batches_sent: cockroachdb/sql-queries - kv_streamer_batches_throttled: cockroachdb/sql-queries - kv_streamer_operators_active: cockroachdb/sql-queries - kv_tenant_rate_limit_current_blocked: cockroachdb/kv - kv_tenant_rate_limit_num_tenants: cockroachdb/kv - kv_tenant_rate_limit_read_batches_admitted: cockroachdb/kv - kv_tenant_rate_limit_read_bytes_admitted: cockroachdb/kv - kv_tenant_rate_limit_read_requests_admitted: cockroachdb/kv - kv_tenant_rate_limit_write_batches_admitted: cockroachdb/kv - kv_tenant_rate_limit_write_bytes_admitted: cockroachdb/kv - kv_tenant_rate_limit_write_requests_admitted: cockroachdb/kv - kvflowcontrol.eval_wait.%s.duration: cockroachdb/admission-control - kvflowcontrol.eval_wait.%s.requests.admitted: cockroachdb/admission-control - kvflowcontrol.eval_wait.%s.requests.bypassed: cockroachdb/admission-control - kvflowcontrol.eval_wait.%s.requests.errored: cockroachdb/admission-control - kvflowcontrol.eval_wait.%s.requests.waiting: cockroachdb/admission-control - kvflowcontrol.streams.%s.%s.blocked_count: cockroachdb/admission-control - kvflowcontrol.streams.%s.%s.total_count: cockroachdb/admission-control - kvflowcontrol.tokens.%s.%s.available: cockroachdb/admission-control - kvflowcontrol.tokens.%s.%s.deducted: cockroachdb/admission-control - kvflowcontrol.tokens.%s.%s.returned: cockroachdb/admission-control - kvflowcontrol.tokens.%s.%s.returned.disconnect: cockroachdb/admission-control - kvflowcontrol.tokens.%s.%s.unaccounted: cockroachdb/admission-control - kvflowcontrol.tokens.send.%s.deducted.prevent_send_queue: cockroachdb/admission-control - kvflowcontrol_range_controller_count: cockroachdb/admission-control - kvflowcontrol_send_queue_bytes: cockroachdb/admission-control - kvflowcontrol_send_queue_count: cockroachdb/admission-control - kvflowcontrol_send_queue_prevent_count: cockroachdb/admission-control - kvflowcontrol_send_queue_scheduled_deducted_bytes: cockroachdb/admission-control - kvflowcontrol_send_queue_scheduled_force_flush: cockroachdb/admission-control - kvflowcontrol_tokens_send_elastic_deducted_force_flush_send_queue: cockroachdb/admission-control - leases_epoch: cockroachdb/kv - leases_error: cockroachdb/kv - leases_expiration: cockroachdb/kv - leases_leader: cockroachdb/kv - leases_liveness: cockroachdb/kv - leases_preferences_less_preferred: cockroachdb/kv - leases_preferences_violating: cockroachdb/kv - leases_read_summary_local_compression: cockroachdb/kv - leases_requests_latency: cockroachdb/kv - leases_success: cockroachdb/kv - leases_transfers_error: cockroachdb/kv - leases_transfers_locks_written: cockroachdb/kv - leases_transfers_success: cockroachdb/kv - livebytes: cockroachdb/kv - livecount: cockroachdb/kv - liveness_epochincrements: cockroachdb/kv - liveness_heartbeatfailures: cockroachdb/kv - liveness_heartbeatlatency: cockroachdb/kv - liveness_heartbeatsinflight: cockroachdb/kv - liveness_heartbeatsuccesses: cockroachdb/kv - liveness_livenodes: cockroachdb/kv - lockbytes: cockroachdb/kv - lockcount: cockroachdb/kv - log_buffered_messages_dropped: cockroachdb/obs-prs - log_fluent_sink_conn_attempts: cockroachdb/obs-prs - log_fluent_sink_conn_errors: cockroachdb/obs-prs - log_fluent_sink_write_attempts: cockroachdb/obs-prs - log_fluent_sink_write_errors: cockroachdb/obs-prs - log_messages_count: cockroachdb/obs-prs - log_otlp_sink_grpc_transparent_retries: cockroachdb/obs-prs - log_otlp_sink_write_attempts: cockroachdb/obs-prs - log_otlp_sink_write_errors: cockroachdb/obs-prs - logical_replication_batch_hist_nanos: cockroachdb/cdc - logical_replication_catchup_ranges: cockroachdb/cdc - logical_replication_catchup_ranges_by_label: cockroachdb/cdc - logical_replication_checkpoint_events_ingested: cockroachdb/cdc - logical_replication_commit_latency: cockroachdb/cdc - logical_replication_events: cockroachdb/cdc - logical_replication_events_dlqed: cockroachdb/cdc - logical_replication_events_dlqed_age: cockroachdb/cdc - logical_replication_events_dlqed_by_label: cockroachdb/cdc - logical_replication_events_dlqed_errtype: cockroachdb/cdc - logical_replication_events_dlqed_space: cockroachdb/cdc - logical_replication_events_ingested: cockroachdb/cdc - logical_replication_events_ingested_by_label: cockroachdb/cdc - logical_replication_events_initial_failure: cockroachdb/cdc - logical_replication_events_initial_success: cockroachdb/cdc - logical_replication_events_retry_failure: cockroachdb/cdc - logical_replication_events_retry_success: cockroachdb/cdc - logical_replication_kv_update_too_old: cockroachdb/cdc - logical_replication_kv_value_refreshes: cockroachdb/cdc - logical_replication_logical_bytes: cockroachdb/cdc - logical_replication_replan_count: cockroachdb/cdc - logical_replication_replicated_time_by_label: cockroachdb/cdc - logical_replication_replicated_time_seconds: cockroachdb/cdc - logical_replication_retry_queue_bytes: cockroachdb/cdc - logical_replication_retry_queue_events: cockroachdb/cdc - logical_replication_scanning_ranges: cockroachdb/cdc - logical_replication_scanning_ranges_by_label: cockroachdb/cdc - mma_change: cockroachdb/kv - mma_change_external_lease_failure: cockroachdb/kv - mma_change_external_lease_success: cockroachdb/kv - mma_change_external_replica_failure: cockroachdb/kv - mma_change_external_replica_success: cockroachdb/kv - mma_change_rebalance_lease_failure: cockroachdb/kv - mma_change_rebalance_lease_success: cockroachdb/kv - mma_change_rebalance_replica_failure: cockroachdb/kv - mma_change_rebalance_replica_success: cockroachdb/kv - mma_dropped: cockroachdb/kv - mma_external_registration_failure: cockroachdb/kv - mma_external_registration_success: cockroachdb/kv - mma_overloaded_store: cockroachdb/kv - mma_overloaded_store_lease_grace_failure: cockroachdb/kv - mma_overloaded_store_lease_grace_success: cockroachdb/kv - mma_overloaded_store_long_dur_failure: cockroachdb/kv - mma_overloaded_store_long_dur_success: cockroachdb/kv - mma_overloaded_store_medium_dur_failure: cockroachdb/kv - mma_overloaded_store_medium_dur_success: cockroachdb/kv - mma_overloaded_store_short_dur_failure: cockroachdb/kv - mma_overloaded_store_short_dur_success: cockroachdb/kv - mma_span_config_normalization: cockroachdb/kv - mma_span_config_normalization_error: cockroachdb/kv - mma_span_config_normalization_soft_error: cockroachdb/kv - mma_store_cpu_capacity: cockroachdb/kv - mma_store_cpu_load: cockroachdb/kv - mma_store_cpu_utilization: cockroachdb/kv - mma_store_disk_capacity: cockroachdb/kv - mma_store_disk_logical: cockroachdb/kv - mma_store_disk_utilization: cockroachdb/kv - mma_store_write_bandwidth: cockroachdb/kv - node_id: cockroachdb/obs-prs - obs_clustermetrics_flush_count: cockroachdb/obs-prs - obs_clustermetrics_flush_errors: cockroachdb/obs-prs - obs_clustermetrics_flush_latency: cockroachdb/obs-prs - obs_clustermetrics_flush_metrics_deleted: cockroachdb/obs-prs - obs_clustermetrics_flush_metrics_written: cockroachdb/obs-prs - obs_metric_export_child_count: cockroachdb/obs-prs - obs_metric_export_codeowner_metric_count: cockroachdb/obs-prs - obs_metric_export_line_count: cockroachdb/obs-prs - obs_metric_export_name_count: cockroachdb/obs-prs - obs_tablemetadata_update_job_duration: cockroachdb/obs-prs - obs_tablemetadata_update_job_errors: cockroachdb/obs-prs - obs_tablemetadata_update_job_runs: cockroachdb/obs-prs - obs_tablemetadata_update_job_table_updates: cockroachdb/obs-prs - physical_replication_admit_latency: cockroachdb/disaster-recovery - physical_replication_catchup_ranges: cockroachdb/disaster-recovery - physical_replication_commit_latency: cockroachdb/disaster-recovery - physical_replication_distsql_replan_count: cockroachdb/disaster-recovery - physical_replication_events_ingested: cockroachdb/disaster-recovery - physical_replication_failover_progress: cockroachdb/disaster-recovery - physical_replication_flush_hist_nanos: cockroachdb/disaster-recovery - physical_replication_flush_wait_nanos: cockroachdb/disaster-recovery - physical_replication_flushes: cockroachdb/disaster-recovery - physical_replication_logical_bytes: cockroachdb/disaster-recovery - physical_replication_receive_wait_nanos: cockroachdb/disaster-recovery - physical_replication_replicated_time_seconds: cockroachdb/disaster-recovery - physical_replication_resolved_events_ingested: cockroachdb/disaster-recovery - physical_replication_running: cockroachdb/disaster-recovery - physical_replication_scanning_ranges: cockroachdb/disaster-recovery - proxy_access_control_errors: cockroachdb/sqlproxy-prs - proxy_balancer_rebalance_queued: cockroachdb/sqlproxy-prs - proxy_balancer_rebalance_running: cockroachdb/sqlproxy-prs - proxy_balancer_rebalance_total: cockroachdb/sqlproxy-prs - proxy_conn_migration_attempted: cockroachdb/sqlproxy-prs - proxy_conn_migration_attempted_latency: cockroachdb/sqlproxy-prs - proxy_conn_migration_error_fatal: cockroachdb/sqlproxy-prs - proxy_conn_migration_error_recoverable: cockroachdb/sqlproxy-prs - proxy_conn_migration_success: cockroachdb/sqlproxy-prs - proxy_conn_migration_transfer_response_message_size: cockroachdb/sqlproxy-prs - proxy_dial_tenant_latency: cockroachdb/sqlproxy-prs - proxy_dial_tenant_retries: cockroachdb/sqlproxy-prs - proxy_err_backend_disconnect: cockroachdb/sqlproxy-prs - proxy_err_backend_down: cockroachdb/sqlproxy-prs - proxy_err_client_disconnect: cockroachdb/sqlproxy-prs - proxy_err_idle_disconnect: cockroachdb/sqlproxy-prs - proxy_err_refused_conn: cockroachdb/sqlproxy-prs - proxy_err_routing: cockroachdb/sqlproxy-prs - proxy_query_cancel_forwarded: cockroachdb/sqlproxy-prs - proxy_query_cancel_ignored: cockroachdb/sqlproxy-prs - proxy_query_cancel_received_http: cockroachdb/sqlproxy-prs - proxy_query_cancel_received_pgwire: cockroachdb/sqlproxy-prs - proxy_query_cancel_successful: cockroachdb/sqlproxy-prs - proxy_sql_accepted_conns: cockroachdb/sqlproxy-prs - proxy_sql_authentication_failures: cockroachdb/sqlproxy-prs - proxy_sql_conns: cockroachdb/sqlproxy-prs - proxy_sql_expired_client_conns: cockroachdb/sqlproxy-prs - proxy_sql_routing_method_count: cockroachdb/sqlproxy-prs - proxy_sql_successful_conns: cockroachdb/sqlproxy-prs - queue_consistency_pending: cockroachdb/kv - queue_consistency_process_failure: cockroachdb/kv - queue_consistency_process_success: cockroachdb/kv - queue_consistency_processingnanos: cockroachdb/kv - queue_gc_info_abortspanconsidered: cockroachdb/kv - queue_gc_info_abortspangcnum: cockroachdb/kv - queue_gc_info_abortspanscanned: cockroachdb/kv - queue_gc_info_clearrangefailed: cockroachdb/kv - queue_gc_info_clearrangesuccess: cockroachdb/kv - queue_gc_info_enqueuehighpriority: cockroachdb/kv - queue_gc_info_intentsconsidered: cockroachdb/kv - queue_gc_info_intenttxns: cockroachdb/kv - queue_gc_info_numkeysaffected: cockroachdb/kv - queue_gc_info_numrangekeysaffected: cockroachdb/kv - queue_gc_info_pushtxn: cockroachdb/kv - queue_gc_info_resolvefailed: cockroachdb/kv - queue_gc_info_resolvesuccess: cockroachdb/kv - queue_gc_info_resolvetotal: cockroachdb/kv - queue_gc_info_transactionresolvefailed: cockroachdb/kv - queue_gc_info_transactionspangcaborted: cockroachdb/kv - queue_gc_info_transactionspangccommitted: cockroachdb/kv - queue_gc_info_transactionspangcpending: cockroachdb/kv - queue_gc_info_transactionspangcprepared: cockroachdb/kv - queue_gc_info_transactionspangcstaging: cockroachdb/kv - queue_gc_info_transactionspanscanned: cockroachdb/kv - queue_gc_pending: cockroachdb/kv - queue_gc_process_failure: cockroachdb/kv - queue_gc_process_success: cockroachdb/kv - queue_gc_processingnanos: cockroachdb/kv - queue_lease_pending: cockroachdb/kv - queue_lease_process_failure: cockroachdb/kv - queue_lease_process_success: cockroachdb/kv - queue_lease_processingnanos: cockroachdb/kv - queue_lease_purgatory: cockroachdb/kv - queue_merge_pending: cockroachdb/kv - queue_merge_process_failure: cockroachdb/kv - queue_merge_process_success: cockroachdb/kv - queue_merge_processingnanos: cockroachdb/kv - queue_merge_purgatory: cockroachdb/kv - queue_raftlog_pending: cockroachdb/kv - queue_raftlog_process_failure: cockroachdb/kv - queue_raftlog_process_success: cockroachdb/kv - queue_raftlog_processingnanos: cockroachdb/kv - queue_raftsnapshot_pending: cockroachdb/kv - queue_raftsnapshot_process_failure: cockroachdb/kv - queue_raftsnapshot_process_success: cockroachdb/kv - queue_raftsnapshot_processingnanos: cockroachdb/kv - queue_replicagc_pending: cockroachdb/kv - queue_replicagc_process_failure: cockroachdb/kv - queue_replicagc_process_success: cockroachdb/kv - queue_replicagc_processingnanos: cockroachdb/kv - queue_replicagc_purgatory: cockroachdb/kv - queue_replicagc_removereplica: cockroachdb/kv - queue_replicate_addnonvoterreplica: cockroachdb/kv - queue_replicate_addreplica: cockroachdb/kv - queue_replicate_addreplica_error: cockroachdb/kv - queue_replicate_addreplica_success: cockroachdb/kv - queue_replicate_addvoterreplica: cockroachdb/kv - queue_replicate_enqueue_add: cockroachdb/kv - queue_replicate_enqueue_failedprecondition: cockroachdb/kv - queue_replicate_enqueue_noaction: cockroachdb/kv - queue_replicate_enqueue_unexpectederror: cockroachdb/kv - queue_replicate_nonvoterpromotions: cockroachdb/kv - queue_replicate_pending: cockroachdb/kv - queue_replicate_priority_inversion_requeue: cockroachdb/kv - queue_replicate_priority_inversion_total: cockroachdb/kv - queue_replicate_process_failure: cockroachdb/kv - queue_replicate_process_success: cockroachdb/kv - queue_replicate_processingnanos: cockroachdb/kv - queue_replicate_purgatory: cockroachdb/kv - queue_replicate_queue_full: cockroachdb/kv - queue_replicate_rebalancenonvoterreplica: cockroachdb/kv - queue_replicate_rebalancereplica: cockroachdb/kv - queue_replicate_rebalancevoterreplica: cockroachdb/kv - queue_replicate_removedeadnonvoterreplica: cockroachdb/kv - queue_replicate_removedeadreplica: cockroachdb/kv - queue_replicate_removedeadreplica_error: cockroachdb/kv - queue_replicate_removedeadreplica_success: cockroachdb/kv - queue_replicate_removedeadvoterreplica: cockroachdb/kv - queue_replicate_removedecommissioningnonvoterreplica: cockroachdb/kv - queue_replicate_removedecommissioningreplica: cockroachdb/kv - queue_replicate_removedecommissioningreplica_error: cockroachdb/kv - queue_replicate_removedecommissioningreplica_success: cockroachdb/kv - queue_replicate_removedecommissioningvoterreplica: cockroachdb/kv - queue_replicate_removelearnerreplica: cockroachdb/kv - queue_replicate_removenonvoterreplica: cockroachdb/kv - queue_replicate_removereplica: cockroachdb/kv - queue_replicate_removereplica_error: cockroachdb/kv - queue_replicate_removereplica_success: cockroachdb/kv - queue_replicate_removevoterreplica: cockroachdb/kv - queue_replicate_replacedeadreplica_error: cockroachdb/kv - queue_replicate_replacedeadreplica_success: cockroachdb/kv - queue_replicate_replacedecommissioningreplica_error: cockroachdb/kv - queue_replicate_replacedecommissioningreplica_success: cockroachdb/kv - queue_replicate_transferlease: cockroachdb/kv - queue_replicate_voterdemotions: cockroachdb/kv - queue_split_load_based: cockroachdb/kv - queue_split_pending: cockroachdb/kv - queue_split_process_failure: cockroachdb/kv - queue_split_process_success: cockroachdb/kv - queue_split_processingnanos: cockroachdb/kv - queue_split_purgatory: cockroachdb/kv - queue_split_size_based: cockroachdb/kv - queue_split_span_config_based: cockroachdb/kv - queue_tsmaintenance_pending: cockroachdb/kv - queue_tsmaintenance_process_failure: cockroachdb/kv - queue_tsmaintenance_process_success: cockroachdb/kv - queue_tsmaintenance_processingnanos: cockroachdb/kv - raft_commands_pending: cockroachdb/kv - raft_commands_proposed: cockroachdb/kv - raft_commands_reproposed_new_lai: cockroachdb/kv - raft_commands_reproposed_unchanged: cockroachdb/kv - raft_commandsapplied: cockroachdb/kv - raft_dropped: cockroachdb/kv - raft_dropped_leader: cockroachdb/kv - raft_entrycache_accesses: cockroachdb/kv - raft_entrycache_bytes: cockroachdb/kv - raft_entrycache_hits: cockroachdb/kv - raft_entrycache_read_bytes: cockroachdb/kv - raft_entrycache_size: cockroachdb/kv - raft_flows_entered_state_probe: cockroachdb/kv - raft_flows_entered_state_replicate: cockroachdb/kv - raft_flows_entered_state_snapshot: cockroachdb/kv - raft_flows_state_probe: cockroachdb/kv - raft_flows_state_replicate: cockroachdb/kv - raft_flows_state_snapshot: cockroachdb/kv - raft_fortification_resp_accepted: cockroachdb/kv - raft_fortification_resp_rejected: cockroachdb/kv - raft_fortification_skipped_no_support: cockroachdb/kv - raft_heartbeats_pending: cockroachdb/kv - raft_loaded_entries_bytes: cockroachdb/kv - raft_loaded_entries_reserved_bytes: cockroachdb/kv - raft_process_applycommitted_latency: cockroachdb/kv - raft_process_commandcommit_latency: cockroachdb/kv - raft_process_handleready_latency: cockroachdb/kv - raft_process_logcommit_latency: cockroachdb/kv - raft_process_tickingnanos: cockroachdb/kv - raft_process_workingnanos: cockroachdb/kv - raft_quota_pool_percent_used: cockroachdb/kv - raft_rcvd_app: cockroachdb/kv - raft_rcvd_appresp: cockroachdb/kv - raft_rcvd_bytes: cockroachdb/kv - raft_rcvd_cross_region_bytes: cockroachdb/kv - raft_rcvd_cross_zone_bytes: cockroachdb/kv - raft_rcvd_defortifyleader: cockroachdb/kv - raft_rcvd_dropped: cockroachdb/kv - raft_rcvd_dropped_bytes: cockroachdb/kv - raft_rcvd_fortifyleader: cockroachdb/kv - raft_rcvd_fortifyleaderresp: cockroachdb/kv - raft_rcvd_heartbeat: cockroachdb/kv - raft_rcvd_heartbeatresp: cockroachdb/kv - raft_rcvd_prevote: cockroachdb/kv - raft_rcvd_prevoteresp: cockroachdb/kv - raft_rcvd_prop: cockroachdb/kv - raft_rcvd_queued_bytes: cockroachdb/kv - raft_rcvd_snap: cockroachdb/kv - raft_rcvd_stepped_bytes: cockroachdb/kv - raft_rcvd_timeoutnow: cockroachdb/kv - raft_rcvd_transferleader: cockroachdb/kv - raft_rcvd_vote: cockroachdb/kv - raft_rcvd_voteresp: cockroachdb/kv - raft_replication_latency: cockroachdb/kv - raft_scheduler_latency: cockroachdb/kv - raft_sent_bytes: cockroachdb/kv - raft_sent_cross_region_bytes: cockroachdb/kv - raft_sent_cross_zone_bytes: cockroachdb/kv - raft_storage_error: cockroachdb/kv - raft_storage_read_bytes: cockroachdb/kv - raft_ticks: cockroachdb/kv - raft_timeoutcampaign: cockroachdb/kv - raft_transport_flow_token_dispatches_dropped: cockroachdb/kv - raft_transport_rcvd: cockroachdb/kv - raft_transport_reverse_rcvd: cockroachdb/kv - raft_transport_reverse_sent: cockroachdb/kv - raft_transport_send_queue_bytes: cockroachdb/kv - raft_transport_send_queue_size: cockroachdb/kv - raft_transport_sends_dropped: cockroachdb/kv - raft_transport_sent: cockroachdb/kv - raftlog_behind: cockroachdb/kv - raftlog_size_max: cockroachdb/kv - raftlog_size_total: cockroachdb/kv - raftlog_truncated: cockroachdb/kv - range_adds: cockroachdb/kv - range_merges: cockroachdb/kv - range_raftleaderremovals: cockroachdb/kv - range_raftleadertransfers: cockroachdb/kv - range_recoveries: cockroachdb/kv - range_removes: cockroachdb/kv - range_snapshots_applied_initial: cockroachdb/kv - range_snapshots_applied_non_voter: cockroachdb/kv - range_snapshots_applied_voter: cockroachdb/kv - range_snapshots_cross_region_rcvd_bytes: cockroachdb/kv - range_snapshots_cross_region_sent_bytes: cockroachdb/kv - range_snapshots_cross_zone_rcvd_bytes: cockroachdb/kv - range_snapshots_cross_zone_sent_bytes: cockroachdb/kv - range_snapshots_delegate_failures: cockroachdb/kv - range_snapshots_delegate_in_progress: cockroachdb/kv - range_snapshots_delegate_sent_bytes: cockroachdb/kv - range_snapshots_delegate_successes: cockroachdb/kv - range_snapshots_generated: cockroachdb/kv - range_snapshots_rcvd_bytes: cockroachdb/kv - range_snapshots_rebalancing_rcvd_bytes: cockroachdb/kv - range_snapshots_rebalancing_sent_bytes: cockroachdb/kv - range_snapshots_recovery_rcvd_bytes: cockroachdb/kv - range_snapshots_recovery_sent_bytes: cockroachdb/kv - range_snapshots_recv_failed: cockroachdb/kv - range_snapshots_recv_in_progress: cockroachdb/kv - range_snapshots_recv_queue: cockroachdb/kv - range_snapshots_recv_queue_bytes: cockroachdb/kv - range_snapshots_recv_total_in_progress: cockroachdb/kv - range_snapshots_recv_unusable: cockroachdb/kv - range_snapshots_send_in_progress: cockroachdb/kv - range_snapshots_send_queue: cockroachdb/kv - range_snapshots_send_queue_bytes: cockroachdb/kv - range_snapshots_send_total_in_progress: cockroachdb/kv - range_snapshots_sent_bytes: cockroachdb/kv - range_snapshots_unknown_rcvd_bytes: cockroachdb/kv - range_snapshots_unknown_sent_bytes: cockroachdb/kv - range_snapshots_upreplication_rcvd_bytes: cockroachdb/kv - range_snapshots_upreplication_sent_bytes: cockroachdb/kv - range_splits: cockroachdb/kv - rangekeybytes: cockroachdb/kv - rangekeycount: cockroachdb/kv - ranges: cockroachdb/kv - ranges_decommissioning: cockroachdb/kv - ranges_decommissioning_nudger_enqueue: cockroachdb/kv - ranges_decommissioning_nudger_enqueue_failure: cockroachdb/kv - ranges_decommissioning_nudger_enqueue_success: cockroachdb/kv - ranges_decommissioning_nudger_not_leaseholder_or_invalid_lease: cockroachdb/kv - ranges_decommissioning_nudger_process_failure: cockroachdb/kv - ranges_decommissioning_nudger_process_success: cockroachdb/kv - ranges_overreplicated: cockroachdb/kv - ranges_unavailable: cockroachdb/kv - ranges_underreplicated: cockroachdb/kv - rangevalbytes: cockroachdb/kv - rangevalcount: cockroachdb/kv - rebalancing_cpunanospersecond: cockroachdb/kv - rebalancing_lease_transfers: cockroachdb/kv - rebalancing_queriespersecond: cockroachdb/kv - rebalancing_range_rebalances: cockroachdb/kv - rebalancing_readbytespersecond: cockroachdb/kv - rebalancing_readspersecond: cockroachdb/kv - rebalancing_replicas_cpunanospersecond: cockroachdb/kv - rebalancing_replicas_queriespersecond: cockroachdb/kv - rebalancing_requestspersecond: cockroachdb/kv - rebalancing_state_imbalanced_overfull_options_exhausted: cockroachdb/kv - rebalancing_writebytespersecond: cockroachdb/kv - rebalancing_writespersecond: cockroachdb/kv - replicas: cockroachdb/kv - replicas_asleep: cockroachdb/kv - replicas_cpunanospersecond: cockroachdb/kv - replicas_leaders: cockroachdb/kv - replicas_leaders_invalid_lease: cockroachdb/kv - replicas_leaders_not_fortified: cockroachdb/kv - replicas_leaders_not_leaseholders: cockroachdb/kv - replicas_leaseholders: cockroachdb/kv - replicas_quiescent: cockroachdb/kv - replicas_reserved: cockroachdb/kv - replicas_uninitialized: cockroachdb/kv - requests_backpressure_split: cockroachdb/kv - requests_slow_distsender: cockroachdb/kv - requests_slow_latch: cockroachdb/kv - requests_slow_lease: cockroachdb/kv - requests_slow_raft: cockroachdb/kv - rocksdb_block_cache_hits: cockroachdb/kv - rocksdb_block_cache_misses: cockroachdb/kv - rocksdb_block_cache_usage: cockroachdb/kv - rocksdb_bloom_filter_prefix_checked: cockroachdb/kv - rocksdb_bloom_filter_prefix_useful: cockroachdb/kv - rocksdb_compacted_bytes_read: cockroachdb/kv - rocksdb_compacted_bytes_written: cockroachdb/kv - rocksdb_compactions: cockroachdb/kv - rocksdb_encryption_algorithm: cockroachdb/kv - rocksdb_estimated_pending_compaction: cockroachdb/kv - rocksdb_flushed_bytes: cockroachdb/kv - rocksdb_flushes: cockroachdb/kv - rocksdb_ingested_bytes: cockroachdb/kv - rocksdb_memtable_total_size: cockroachdb/kv - rocksdb_num_sstables: cockroachdb/kv - rocksdb_read_amplification: cockroachdb/kv - rocksdb_table_readers_mem_estimate: cockroachdb/kv - round_trip_default_class_latency: cockroachdb/kv - round_trip_latency: cockroachdb/kv - round_trip_raft_class_latency: cockroachdb/kv - round_trip_rangefeed_class_latency: cockroachdb/kv - round_trip_system_class_latency: cockroachdb/kv - rpc.method.%s.recv: cockroachdb/kv - rpc_batches_recv: cockroachdb/kv - rpc_client_bytes_egress: cockroachdb/kv - rpc_client_bytes_ingress: cockroachdb/kv - rpc_client_request_duration_nanos: cockroachdb/kv - rpc_client_requests_total: cockroachdb/kv - rpc_connection_avg_round_trip_latency: cockroachdb/kv - rpc_connection_connected: cockroachdb/kv - rpc_connection_failures: cockroachdb/kv - rpc_connection_healthy: cockroachdb/kv - rpc_connection_healthy_nanos: cockroachdb/kv - rpc_connection_heartbeats: cockroachdb/kv - rpc_connection_inactive: cockroachdb/kv - rpc_connection_tcp_rtt: cockroachdb/kv - rpc_connection_tcp_rtt_var: cockroachdb/kv - rpc_connection_unhealthy: cockroachdb/kv - rpc_connection_unhealthy_nanos: cockroachdb/kv - rpc_drpc_enabled: cockroachdb/kv - rpc_drpc_pool_hits_total: cockroachdb/kv - rpc_drpc_pool_miss_total: cockroachdb/kv - rpc_drpc_pool_size: cockroachdb/kv - rpc_drpc_tls_handshake_errors: cockroachdb/kv - rpc_server_request_duration_nanos: cockroachdb/kv - rpc_server_requests_total: cockroachdb/kv - rpc_streams_mux_rangefeed_active: cockroachdb/kv - rpc_streams_mux_rangefeed_recv: cockroachdb/kv - schedules: cockroachdb/jobs - schedules.%s.failed: cockroachdb/jobs - schedules.%s.protected_age_sec: cockroachdb/jobs - schedules.%s.protected_record_count: cockroachdb/jobs - schedules.%s.started: cockroachdb/jobs - schedules.%s.succeeded: cockroachdb/jobs - schedules_BACKUP_last_completed_time: cockroachdb/disaster-recovery - schedules_BACKUP_last_completed_time_by_virtual_cluster: cockroachdb/disaster-recovery - schedules_error: cockroachdb/jobs - schedules_malformed: cockroachdb/jobs - schedules_protected_age_sec: cockroachdb/jobs - schedules_protected_record_count: cockroachdb/jobs - schedules_round_jobs_started: cockroachdb/jobs - schedules_round_reschedule_skip: cockroachdb/jobs - schedules_round_reschedule_wait: cockroachdb/jobs - seconds_until_enterprise_license_expiry: cockroachdb/sql-foundations - seconds_until_license_expiry: cockroachdb/sql-foundations - security_certificate_expiration: cockroachdb/security-engineering - security_certificate_expiration_ca: cockroachdb/security-engineering - security_certificate_expiration_ca_client_tenant: cockroachdb/security-engineering - security_certificate_expiration_client: cockroachdb/security-engineering - security_certificate_expiration_client_ca: cockroachdb/security-engineering - security_certificate_expiration_client_tenant: cockroachdb/security-engineering - security_certificate_expiration_node: cockroachdb/security-engineering - security_certificate_expiration_node_client: cockroachdb/security-engineering - security_certificate_expiration_ui: cockroachdb/security-engineering - security_certificate_expiration_ui_ca: cockroachdb/security-engineering - security_certificate_ttl: cockroachdb/security-engineering - security_certificate_ttl_ca: cockroachdb/security-engineering - security_certificate_ttl_ca_client_tenant: cockroachdb/security-engineering - security_certificate_ttl_client: cockroachdb/security-engineering - security_certificate_ttl_client_ca: cockroachdb/security-engineering - security_certificate_ttl_client_tenant: cockroachdb/security-engineering - security_certificate_ttl_node: cockroachdb/security-engineering - security_certificate_ttl_node_client: cockroachdb/security-engineering - security_certificate_ttl_ui: cockroachdb/security-engineering - security_certificate_ttl_ui_ca: cockroachdb/security-engineering - server_http_request_duration_nanos: cockroachdb/unowned - spanconfig_kvsubscriber_oldest_protected_record_nanos: cockroachdb/kv - spanconfig_kvsubscriber_protected_record_count: cockroachdb/kv - spanconfig_kvsubscriber_update_behind_nanos: cockroachdb/kv - sql_aggregated_livebytes: cockroachdb/sql-queries - sql_bytesin: cockroachdb/sql-foundations - sql_bytesout: cockroachdb/sql-foundations - sql_call_stored_proc_count: cockroachdb/sql-queries - sql_call_stored_proc_started_count: cockroachdb/sql-queries - sql_conn_failures: cockroachdb/sql-foundations - sql_conn_latency: cockroachdb/sql-foundations - sql_conns: cockroachdb/sql-foundations - sql_conns_waiting_to_hash: cockroachdb/sql-foundations - sql_contention_resolver_failed_resolutions: cockroachdb/obs-prs - sql_contention_resolver_queue_size: cockroachdb/obs-prs - sql_contention_resolver_retries: cockroachdb/obs-prs - sql_contention_txn_id_cache_miss: cockroachdb/obs-prs - sql_contention_txn_id_cache_read: cockroachdb/obs-prs - sql_copy_count: cockroachdb/sql-queries - sql_copy_nonatomic_count: cockroachdb/sql-queries - sql_copy_nonatomic_started_count: cockroachdb/sql-queries - sql_copy_started_count: cockroachdb/sql-queries - sql_count: cockroachdb/sql-queries - sql_crud_query_count: cockroachdb/sql-queries - sql_crud_query_started_count: cockroachdb/sql-queries - sql_ddl_count: cockroachdb/sql-queries - sql_ddl_started_count: cockroachdb/sql-queries - sql_delete_count: cockroachdb/sql-queries - sql_delete_started_count: cockroachdb/sql-queries - sql_disk_distsql_current: cockroachdb/sql-queries - sql_disk_distsql_max: cockroachdb/sql-queries - sql_disk_distsql_spilled_bytes_read: cockroachdb/sql-queries - sql_disk_distsql_spilled_bytes_written: cockroachdb/sql-queries - sql_dist_merge_count: cockroachdb/sql-foundations - sql_dist_merge_first_iter_output_sst_count: cockroachdb/sql-foundations - sql_dist_merge_import_count: cockroachdb/sql-foundations - sql_dist_merge_index_count: cockroachdb/sql-foundations - sql_dist_merge_map_phase_sst_count: cockroachdb/sql-foundations - sql_dist_merge_rpc_memory_reserved_bytes: cockroachdb/sql-foundations - sql_distsql_contended_queries_count: cockroachdb/sql-queries - sql_distsql_cumulative_contention_nanos: cockroachdb/sql-queries - sql_distsql_dist_query_rerun_locally_count: cockroachdb/sql-queries - sql_distsql_dist_query_rerun_locally_failure_count: cockroachdb/sql-queries - sql_distsql_distributed_exec_count: cockroachdb/sql-queries - sql_distsql_exec_latency: cockroachdb/sql-queries - sql_distsql_flows_active: cockroachdb/sql-queries - sql_distsql_flows_total: cockroachdb/sql-queries - sql_distsql_parallel_runner_count: cockroachdb/sql-queries - sql_distsql_queries_active: cockroachdb/sql-queries - sql_distsql_queries_spilled: cockroachdb/sql-queries - sql_distsql_queries_total: cockroachdb/sql-queries - sql_distsql_select_count: cockroachdb/sql-queries - sql_distsql_select_distributed_exec_count: cockroachdb/sql-queries - sql_distsql_sequential_runner_count: cockroachdb/sql-queries - sql_distsql_service_latency: cockroachdb/sql-queries - sql_distsql_vec_openfds: cockroachdb/sql-queries - sql_exec_latency: cockroachdb/sql-queries - sql_exec_latency_consistent: cockroachdb/sql-queries - sql_exec_latency_detail: cockroachdb/sql-queries - sql_exec_latency_historical: cockroachdb/sql-queries - sql_failure_count: cockroachdb/sql-queries - sql_feature_flag_denial: cockroachdb/unowned - sql_full_scan_count: cockroachdb/sql-queries - sql_guardrails_full_scan_rejected_count: cockroachdb/sql-queries - sql_guardrails_max_row_size_err_count: cockroachdb/sql-queries - sql_guardrails_max_row_size_log_count: cockroachdb/sql-queries - sql_guardrails_transaction_rows_read_err_count: cockroachdb/sql-queries - sql_guardrails_transaction_rows_read_log_count: cockroachdb/sql-queries - sql_guardrails_transaction_rows_written_err_count: cockroachdb/sql-queries - sql_guardrails_transaction_rows_written_log_count: cockroachdb/sql-queries - sql_hydrated_schema_cache_hits: cockroachdb/sql-foundations - sql_hydrated_schema_cache_misses: cockroachdb/sql-foundations - sql_hydrated_table_cache_hits: cockroachdb/sql-foundations - sql_hydrated_table_cache_misses: cockroachdb/sql-foundations - sql_hydrated_type_cache_hits: cockroachdb/sql-foundations - sql_hydrated_type_cache_misses: cockroachdb/sql-foundations - sql_hydrated_udf_cache_hits: cockroachdb/sql-foundations - sql_hydrated_udf_cache_misses: cockroachdb/sql-foundations - sql_insert_count: cockroachdb/sql-queries - sql_insert_started_count: cockroachdb/sql-queries - sql_insights_anomaly_detection_evictions: cockroachdb/obs-prs - sql_insights_anomaly_detection_fingerprints: cockroachdb/obs-prs - sql_insights_anomaly_detection_memory: cockroachdb/obs-prs - sql_leases_active: cockroachdb/sql-foundations - sql_leases_expired: cockroachdb/sql-foundations - sql_leases_lease_cur_bytes_count: cockroachdb/sql-foundations - sql_leases_lease_max_bytes_hist: cockroachdb/sql-foundations - sql_leases_long_wait_for_initial_version: cockroachdb/sql-foundations - sql_leases_long_wait_for_no_version: cockroachdb/sql-foundations - sql_leases_long_wait_for_one_version: cockroachdb/sql-foundations - sql_leases_long_wait_for_two_version_invariant: cockroachdb/sql-foundations - sql_leases_waiting_to_expire: cockroachdb/sql-foundations - sql_mem_bulk_current: cockroachdb/disaster-recovery - sql_mem_bulk_max: cockroachdb/disaster-recovery - sql_mem_changefeed_current: cockroachdb/cdc - sql_mem_changefeed_max: cockroachdb/cdc - sql_mem_distsql_current: cockroachdb/sql-queries - sql_mem_distsql_max: cockroachdb/sql-queries - sql_misc_count: cockroachdb/sql-queries - sql_misc_started_count: cockroachdb/sql-queries - sql_new_conns: cockroachdb/sql-foundations - sql_optimizer_plan_cache_hits: cockroachdb/sql-queries - sql_optimizer_plan_cache_misses: cockroachdb/sql-queries - sql_pgwire_cancel_ignored: cockroachdb/sql-foundations - sql_pgwire_cancel_successful: cockroachdb/sql-foundations - sql_pgwire_cancel_total: cockroachdb/sql-foundations - sql_pgwire_pipeline_count: cockroachdb/sql-foundations - sql_pre_serve_bytesin: cockroachdb/sql-foundations - sql_pre_serve_bytesout: cockroachdb/sql-foundations - sql_pre_serve_conn_failures: cockroachdb/sql-foundations - sql_pre_serve_mem_cur: cockroachdb/sql-foundations - sql_pre_serve_mem_max: cockroachdb/sql-foundations - sql_pre_serve_new_conns: cockroachdb/sql-foundations - sql_query_count: cockroachdb/sql-queries - sql_query_started_count: cockroachdb/sql-queries - sql_query_unique_count: cockroachdb/sql-queries - sql_query_with_statement_hints_count: cockroachdb/sql-queries - sql_restart_savepoint_count: cockroachdb/sql-queries - sql_restart_savepoint_release_count: cockroachdb/sql-queries - sql_restart_savepoint_release_started_count: cockroachdb/sql-queries - sql_restart_savepoint_rollback_count: cockroachdb/sql-queries - sql_restart_savepoint_rollback_started_count: cockroachdb/sql-queries - sql_restart_savepoint_started_count: cockroachdb/sql-queries - sql_rls_policies_applied_count: cockroachdb/sql-queries - sql_routine_delete_count: cockroachdb/sql-queries - sql_routine_delete_started_count: cockroachdb/sql-queries - sql_routine_insert_count: cockroachdb/sql-queries - sql_routine_insert_started_count: cockroachdb/sql-queries - sql_routine_select_count: cockroachdb/sql-queries - sql_routine_select_started_count: cockroachdb/sql-queries - sql_routine_update_count: cockroachdb/sql-queries - sql_routine_update_started_count: cockroachdb/sql-queries - sql_savepoint_count: cockroachdb/sql-queries - sql_savepoint_release_count: cockroachdb/sql-queries - sql_savepoint_release_started_count: cockroachdb/sql-queries - sql_savepoint_rollback_count: cockroachdb/sql-queries - sql_savepoint_rollback_started_count: cockroachdb/sql-queries - sql_savepoint_started_count: cockroachdb/sql-queries - sql_schema_changer_object_count: cockroachdb/sql-foundations - sql_schema_invalid_objects: cockroachdb/sql-foundations - sql_select_count: cockroachdb/sql-queries - sql_select_started_count: cockroachdb/sql-queries - sql_service_latency: cockroachdb/sql-queries - sql_service_latency_consistent: cockroachdb/sql-queries - sql_service_latency_historical: cockroachdb/sql-queries - sql_started_count: cockroachdb/sql-queries - sql_statement_timeout_count: cockroachdb/sql-queries - sql_statements_active: cockroachdb/sql-queries - sql_statements_auto_retry_count: cockroachdb/sql-queries - sql_statements_bytes_read_count: cockroachdb/sql-queries - sql_statements_index_bytes_written_count: cockroachdb/sql-queries - sql_statements_index_rows_written_count: cockroachdb/sql-queries - sql_statements_rows_read_count: cockroachdb/sql-queries - sql_stats_activity_update_latency: cockroachdb/sql-queries - sql_stats_activity_updates_failed: cockroachdb/sql-queries - sql_stats_activity_updates_successful: cockroachdb/sql-queries - sql_stats_cleanup_rows_removed: cockroachdb/sql-queries - sql_stats_discarded_current: cockroachdb/sql-queries - sql_stats_flush_done_signals_ignored: cockroachdb/sql-queries - sql_stats_flush_fingerprint_count: cockroachdb/sql-queries - sql_stats_flush_latency: cockroachdb/sql-queries - sql_stats_flushes_failed: cockroachdb/sql-queries - sql_stats_flushes_successful: cockroachdb/sql-queries - sql_stats_ingester_num_processed: cockroachdb/obs-prs - sql_stats_ingester_queue_size: cockroachdb/obs-prs - sql_stats_mem_current: cockroachdb/sql-queries - sql_stats_mem_max: cockroachdb/sql-queries - sql_stats_reported_mem_current: cockroachdb/sql-queries - sql_stats_reported_mem_max: cockroachdb/sql-queries - sql_stats_txn_stats_collection_duration: cockroachdb/sql-queries - sql_temp_object_cleaner_active_cleaners: cockroachdb/sql-queries - sql_temp_object_cleaner_schemas_deletion_error: cockroachdb/sql-queries - sql_temp_object_cleaner_schemas_deletion_success: cockroachdb/sql-queries - sql_temp_object_cleaner_schemas_to_delete: cockroachdb/sql-queries - sql_transaction_timeout_count: cockroachdb/sql-queries - sql_txn_abort_count: cockroachdb/sql-queries - sql_txn_auto_retry_count: cockroachdb/sql-queries - sql_txn_begin_count: cockroachdb/sql-queries - sql_txn_begin_started_count: cockroachdb/sql-queries - sql_txn_commit_count: cockroachdb/sql-queries - sql_txn_commit_prepared_count: cockroachdb/sql-queries - sql_txn_commit_prepared_started_count: cockroachdb/sql-queries - sql_txn_commit_started_count: cockroachdb/sql-queries - sql_txn_contended_count: cockroachdb/sql-queries - sql_txn_latency: cockroachdb/sql-queries - sql_txn_prepare_count: cockroachdb/sql-queries - sql_txn_prepare_started_count: cockroachdb/sql-queries - sql_txn_rollback_count: cockroachdb/sql-queries - sql_txn_rollback_prepared_count: cockroachdb/sql-queries - sql_txn_rollback_prepared_started_count: cockroachdb/sql-queries - sql_txn_rollback_started_count: cockroachdb/sql-queries - sql_txn_upgraded_iso_level_count: cockroachdb/sql-queries - sql_txns_open: cockroachdb/sql-queries - sql_update_count: cockroachdb/sql-queries - sql_update_started_count: cockroachdb/sql-queries - sql_vecindex_pending_splits_merges: cockroachdb/sql-queries - sql_vecindex_successful_splits: cockroachdb/sql-queries - sqlliveness_is_alive_cache_hits: cockroachdb/server - sqlliveness_is_alive_cache_misses: cockroachdb/server - sqlliveness_sessions_deleted: cockroachdb/server - sqlliveness_sessions_deletion_runs: cockroachdb/server - sqlliveness_write_failures: cockroachdb/server - sqlliveness_write_successes: cockroachdb/server - stmt_svc_latency: cockroachdb/obs-prs - storage.category-%s.bytes-written: cockroachdb/kv - storage.iterator.category-%s.block-load.bytes: cockroachdb/kv - storage.iterator.category-%s.block-load.cached-bytes: cockroachdb/kv - storage.iterator.category-%s.block-load.latency-sum: cockroachdb/kv - storage.l%d-%s: cockroachdb/kv - storage_batch_commit_commit_wait_duration: cockroachdb/kv - storage_batch_commit_count: cockroachdb/kv - storage_batch_commit_duration: cockroachdb/kv - storage_batch_commit_l0_stall_duration: cockroachdb/kv - storage_batch_commit_mem_stall_duration: cockroachdb/kv - storage_batch_commit_sem_wait_duration: cockroachdb/kv - storage_batch_commit_wal_queue_wait_duration: cockroachdb/kv - storage_batch_commit_wal_rotation_duration: cockroachdb/kv - storage_block_load_active: cockroachdb/kv - storage_block_load_queued: cockroachdb/kv - storage_bytes_compressed: cockroachdb/kv - storage_bytes_compressed_l5_data: cockroachdb/kv - storage_bytes_compressed_l5_values: cockroachdb/kv - storage_bytes_compressed_l6_data: cockroachdb/kv - storage_bytes_compressed_l6_values: cockroachdb/kv - storage_bytes_decompressed: cockroachdb/kv - storage_bytes_decompressed_l5_data: cockroachdb/kv - storage_bytes_decompressed_l5_values: cockroachdb/kv - storage_bytes_decompressed_l6_data: cockroachdb/kv - storage_bytes_decompressed_l6_values: cockroachdb/kv - storage_checkpoints: cockroachdb/kv - storage_compactions_cancelled_bytes: cockroachdb/kv - storage_compactions_cancelled_count: cockroachdb/kv - storage_compactions_duration: cockroachdb/kv - storage_compactions_keys_pinned_bytes: cockroachdb/kv - storage_compactions_keys_pinned_count: cockroachdb/kv - storage_compression_cr: cockroachdb/kv - storage_compression_minlz_bytes: cockroachdb/kv - storage_compression_minlz_cr: cockroachdb/kv - storage_compression_none_bytes: cockroachdb/kv - storage_compression_snappy_bytes: cockroachdb/kv - storage_compression_snappy_cr: cockroachdb/kv - storage_compression_unknown_bytes: cockroachdb/kv - storage_compression_zstd_bytes: cockroachdb/kv - storage_compression_zstd_cr: cockroachdb/kv - storage_disk_io_time: cockroachdb/kv - storage_disk_iopsinprogress: cockroachdb/kv - storage_disk_read_bytes: cockroachdb/kv - storage_disk_read_count: cockroachdb/kv - storage_disk_read_max_bytespersecond: cockroachdb/kv - storage_disk_read_max_iops: cockroachdb/kv - storage_disk_read_time: cockroachdb/kv - storage_disk_slow: cockroachdb/kv - storage_disk_stalled: cockroachdb/kv - storage_disk_unhealthy_duration: cockroachdb/kv - storage_disk_weightedio_time: cockroachdb/kv - storage_disk_write_bytes: cockroachdb/kv - storage_disk_write_count: cockroachdb/kv - storage_disk_write_max_bytespersecond: cockroachdb/kv - storage_disk_write_max_iops: cockroachdb/kv - storage_disk_write_time: cockroachdb/kv - storage_flush_ingest_count: cockroachdb/kv - storage_flush_ingest_table_bytes: cockroachdb/kv - storage_flush_ingest_table_count: cockroachdb/kv - storage_flush_utilization: cockroachdb/kv - storage_ingest_count: cockroachdb/kv - storage_initial_stats_complete: cockroachdb/kv - storage_iterator_block_load_bytes: cockroachdb/kv - storage_iterator_block_load_cached_bytes: cockroachdb/kv - storage_iterator_block_load_read_duration: cockroachdb/kv - storage_iterator_external_seeks: cockroachdb/kv - storage_iterator_external_steps: cockroachdb/kv - storage_iterator_internal_seeks: cockroachdb/kv - storage_iterator_internal_steps: cockroachdb/kv - storage_keys_range_key_set_count: cockroachdb/kv - storage_keys_tombstone_count: cockroachdb/kv - storage_marked_for_compaction_files: cockroachdb/kv - storage_point_deletions_bytes: cockroachdb/kv - storage_range_deletions_bytes: cockroachdb/kv - storage_secondary_cache_count: cockroachdb/kv - storage_secondary_cache_evictions: cockroachdb/kv - storage_secondary_cache_reads_full_hit: cockroachdb/kv - storage_secondary_cache_reads_multi_block: cockroachdb/kv - storage_secondary_cache_reads_multi_shard: cockroachdb/kv - storage_secondary_cache_reads_no_hit: cockroachdb/kv - storage_secondary_cache_reads_partial_hit: cockroachdb/kv - storage_secondary_cache_reads_total: cockroachdb/kv - storage_secondary_cache_size: cockroachdb/kv - storage_secondary_cache_write_back_failures: cockroachdb/kv - storage_shared_storage_read: cockroachdb/kv - storage_shared_storage_write: cockroachdb/kv - storage_single_delete_ineffectual: cockroachdb/kv - storage_single_delete_invariant_violation: cockroachdb/kv - storage_sstable_remote_bytes: cockroachdb/kv - storage_sstable_remote_count: cockroachdb/kv - storage_sstable_zombie_bytes: cockroachdb/kv - storage_value_separation_blob_files_count: cockroachdb/kv - storage_value_separation_blob_files_size: cockroachdb/kv - storage_value_separation_value_bytes_referenced: cockroachdb/kv - storage_value_separation_value_bytes_unreferenced: cockroachdb/kv - storage_value_separation_value_retrieval_count: cockroachdb/kv - storage_wal_bytes_in: cockroachdb/kv - storage_wal_bytes_written: cockroachdb/kv - storage_wal_failover_primary_duration: cockroachdb/kv - storage_wal_failover_secondary_disk_available: cockroachdb/kv - storage_wal_failover_secondary_disk_capacity: cockroachdb/kv - storage_wal_failover_secondary_duration: cockroachdb/kv - storage_wal_failover_switch_count: cockroachdb/kv - storage_wal_failover_write_and_sync_latency: cockroachdb/kv - storage_wal_fsync_latency: cockroachdb/kv - storage_wal_secondary_file_op_latency: cockroachdb/kv - storage_write_amplification: cockroachdb/kv - storage_write_stall_nanos: cockroachdb/kv - storage_write_stalls: cockroachdb/kv - storeliveness_callbacks_processing_duration: cockroachdb/kv - storeliveness_heartbeat_failures: cockroachdb/kv - storeliveness_heartbeat_persist_duration: cockroachdb/kv - storeliveness_heartbeat_successes: cockroachdb/kv - storeliveness_message_handle_failures: cockroachdb/kv - storeliveness_message_handle_persist_duration: cockroachdb/kv - storeliveness_message_handle_successes: cockroachdb/kv - storeliveness_support_for_stores: cockroachdb/kv - storeliveness_support_from_stores: cockroachdb/kv - storeliveness_support_withdraw_failures: cockroachdb/kv - storeliveness_support_withdraw_persist_duration: cockroachdb/kv - storeliveness_support_withdraw_successes: cockroachdb/kv - storeliveness_transport_batches_received: cockroachdb/kv - storeliveness_transport_batches_sent: cockroachdb/kv - storeliveness_transport_receive_dropped: cockroachdb/kv - storeliveness_transport_receive_queue_bytes: cockroachdb/kv - storeliveness_transport_receive_queue_size: cockroachdb/kv - storeliveness_transport_received: cockroachdb/kv - storeliveness_transport_send_dropped: cockroachdb/kv - storeliveness_transport_send_queue_bytes: cockroachdb/kv - storeliveness_transport_send_queue_idle: cockroachdb/kv - storeliveness_transport_send_queue_size: cockroachdb/kv - storeliveness_transport_sent: cockroachdb/kv - subsume_locks_written: cockroachdb/kv - sys_cgo_allocbytes: cockroachdb/obs-prs - sys_cgo_totalbytes: cockroachdb/obs-prs - sys_cgocalls: cockroachdb/obs-prs - sys_cpu_combined_percent_normalized: cockroachdb/obs-prs - sys_cpu_host_combined_percent_normalized: cockroachdb/obs-prs - sys_cpu_now_ns: cockroachdb/obs-prs - sys_cpu_sys_ns: cockroachdb/obs-prs - sys_cpu_sys_percent: cockroachdb/obs-prs - sys_cpu_user_ns: cockroachdb/obs-prs - sys_cpu_user_percent: cockroachdb/obs-prs - sys_fd_open: cockroachdb/obs-prs - sys_fd_softlimit: cockroachdb/obs-prs - sys_gc_assist_enabled: cockroachdb/obs-prs - sys_gc_assist_ns: cockroachdb/obs-prs - sys_gc_count: cockroachdb/obs-prs - sys_gc_pause_ns: cockroachdb/obs-prs - sys_gc_pause_percent: cockroachdb/obs-prs - sys_gc_stop_ns: cockroachdb/obs-prs - sys_gc_total_ns: cockroachdb/obs-prs - sys_go_allocbytes: cockroachdb/obs-prs - sys_go_heap_allocbytes: cockroachdb/obs-prs - sys_go_heap_heapfragmentbytes: cockroachdb/obs-prs - sys_go_heap_heapreleasedbytes: cockroachdb/obs-prs - sys_go_heap_heapreservedbytes: cockroachdb/obs-prs - sys_go_heap_livebytes: cockroachdb/obs-prs - sys_go_heap_objects: cockroachdb/obs-prs - sys_go_limitbytes: cockroachdb/obs-prs - sys_go_pause_other_ns: cockroachdb/obs-prs - sys_go_stack_systembytes: cockroachdb/obs-prs - sys_go_stop_other_ns: cockroachdb/obs-prs - sys_go_totalbytes: cockroachdb/obs-prs - sys_goroutines: cockroachdb/obs-prs - sys_host_disk_io_time: cockroachdb/obs-prs - sys_host_disk_iopsinprogress: cockroachdb/obs-prs - sys_host_disk_read_bytes: cockroachdb/obs-prs - sys_host_disk_read_count: cockroachdb/obs-prs - sys_host_disk_read_time: cockroachdb/obs-prs - sys_host_disk_weightedio_time: cockroachdb/obs-prs - sys_host_disk_write_bytes: cockroachdb/obs-prs - sys_host_disk_write_count: cockroachdb/obs-prs - sys_host_disk_write_time: cockroachdb/obs-prs - sys_host_net_recv_bytes: cockroachdb/obs-prs - sys_host_net_recv_drop: cockroachdb/obs-prs - sys_host_net_recv_err: cockroachdb/obs-prs - sys_host_net_recv_packets: cockroachdb/obs-prs - sys_host_net_send_bytes: cockroachdb/obs-prs - sys_host_net_send_drop: cockroachdb/obs-prs - sys_host_net_send_err: cockroachdb/obs-prs - sys_host_net_send_packets: cockroachdb/obs-prs - sys_host_net_send_tcp_fast_retrans_segs: cockroachdb/obs-prs - sys_host_net_send_tcp_loss_probes: cockroachdb/obs-prs - sys_host_net_send_tcp_retrans_segs: cockroachdb/obs-prs - sys_host_net_send_tcp_slow_start_retrans: cockroachdb/obs-prs - sys_host_net_send_tcp_timeouts: cockroachdb/obs-prs - sys_rss: cockroachdb/obs-prs - sys_runnable_goroutines_per_cpu: cockroachdb/obs-prs - sys_totalmem: cockroachdb/obs-prs - sys_uptime: cockroachdb/obs-prs - sysbytes: cockroachdb/kv - syscount: cockroachdb/kv - tenant_consumption_cross_region_network_ru: cockroachdb/server - tenant_consumption_external_io_egress_bytes: cockroachdb/server - tenant_consumption_external_io_ingress_bytes: cockroachdb/server - tenant_consumption_kv_request_units: cockroachdb/server - tenant_consumption_pgwire_egress_bytes: cockroachdb/server - tenant_consumption_read_batches: cockroachdb/server - tenant_consumption_read_bytes: cockroachdb/server - tenant_consumption_read_requests: cockroachdb/server - tenant_consumption_request_units: cockroachdb/server - tenant_consumption_sql_pods_cpu_seconds: cockroachdb/server - tenant_consumption_write_batches: cockroachdb/server - tenant_consumption_write_bytes: cockroachdb/server - tenant_consumption_write_requests: cockroachdb/server - tenant_cost_client_blocked_requests: cockroachdb/server - tenant_sql_usage_cross_region_network_ru: cockroachdb/server - tenant_sql_usage_estimated_cpu_seconds: cockroachdb/server - tenant_sql_usage_estimated_kv_cpu_seconds: cockroachdb/server - tenant_sql_usage_estimated_replication_bytes: cockroachdb/server - tenant_sql_usage_external_io_egress_bytes: cockroachdb/server - tenant_sql_usage_external_io_ingress_bytes: cockroachdb/server - tenant_sql_usage_kv_request_units: cockroachdb/server - tenant_sql_usage_pgwire_egress_bytes: cockroachdb/server - tenant_sql_usage_provisioned_vcpus: cockroachdb/server - tenant_sql_usage_read_batches: cockroachdb/server - tenant_sql_usage_read_bytes: cockroachdb/server - tenant_sql_usage_read_requests: cockroachdb/server - tenant_sql_usage_request_units: cockroachdb/server - tenant_sql_usage_sql_pods_cpu_seconds: cockroachdb/server - tenant_sql_usage_write_batches: cockroachdb/server - tenant_sql_usage_write_bytes: cockroachdb/server - tenant_sql_usage_write_requests: cockroachdb/server - timeseries_write_bytes: cockroachdb/kv - timeseries_write_errors: cockroachdb/kv - timeseries_write_samples: cockroachdb/kv - totalbytes: cockroachdb/kv - tscache_skl_pages: cockroachdb/kv - tscache_skl_rotations: cockroachdb/kv - txn_aborts: cockroachdb/kv - txn_commit_waits: cockroachdb/kv - txn_commit_waits_before_commit_trigger: cockroachdb/kv - txn_commits: cockroachdb/kv - txn_commits_read_only: cockroachdb/kv - txn_commits1PC: cockroachdb/kv - txn_condensed_intent_spans: cockroachdb/kv - txn_condensed_intent_spans_gauge: cockroachdb/kv - txn_condensed_intent_spans_rejected: cockroachdb/kv - txn_count_limit_on_response: cockroachdb/kv - txn_count_limit_rejected: cockroachdb/kv - txn_durations: cockroachdb/kv - txn_inflight_locks_over_tracking_budget: cockroachdb/kv - txn_parallelcommits: cockroachdb/kv - txn_parallelcommits_auto_retries: cockroachdb/kv - txn_prepares: cockroachdb/kv - txn_refresh_auto_retries: cockroachdb/kv - txn_refresh_fail: cockroachdb/kv - txn_refresh_fail_with_condensed_spans: cockroachdb/kv - txn_refresh_memory_limit_exceeded: cockroachdb/kv - txn_refresh_success: cockroachdb/kv - txn_refresh_success_server_side: cockroachdb/kv - txn_restarts: cockroachdb/kv - txn_restarts_asyncwritefailure: cockroachdb/kv - txn_restarts_commitdeadlineexceeded: cockroachdb/kv - txn_restarts_exclusionviolation: cockroachdb/kv - txn_restarts_readwithinuncertainty: cockroachdb/kv - txn_restarts_serializable: cockroachdb/kv - txn_restarts_txnaborted: cockroachdb/kv - txn_restarts_txnpush: cockroachdb/kv - txn_restarts_unknown: cockroachdb/kv - txn_restarts_writetooold: cockroachdb/kv - txn_rollbacks_async_failed: cockroachdb/kv - txn_rollbacks_failed: cockroachdb/kv - txn_server_side_1PC_failure: cockroachdb/kv - txn_server_side_1PC_success: cockroachdb/kv - txn_server_side_retry_read_evaluation_failure: cockroachdb/kv - txn_server_side_retry_read_evaluation_success: cockroachdb/kv - txn_server_side_retry_uncertainty_interval_error_failure: cockroachdb/kv - txn_server_side_retry_uncertainty_interval_error_success: cockroachdb/kv - txn_server_side_retry_write_evaluation_failure: cockroachdb/kv - txn_server_side_retry_write_evaluation_success: cockroachdb/kv - txn_write_buffering_batches_fully_handled: cockroachdb/kv - txn_write_buffering_disabled_after_buffering: cockroachdb/kv - txn_write_buffering_memory_limit_exceeded: cockroachdb/kv - txn_write_buffering_num_enabled: cockroachdb/kv - txnrecovery_attempts_pending: cockroachdb/kv - txnrecovery_attempts_total: cockroachdb/kv - txnrecovery_failures: cockroachdb/kv - txnrecovery_successes_aborted: cockroachdb/kv - txnrecovery_successes_committed: cockroachdb/kv - txnrecovery_successes_pending: cockroachdb/kv - txnwaitqueue_deadlocks_total: cockroachdb/kv - txnwaitqueue_pushee_waiting: cockroachdb/kv - txnwaitqueue_pusher_slow: cockroachdb/kv - txnwaitqueue_pusher_wait_time: cockroachdb/kv - txnwaitqueue_pusher_waiting: cockroachdb/kv - txnwaitqueue_query_wait_time: cockroachdb/kv - txnwaitqueue_query_waiting: cockroachdb/kv - valbytes: cockroachdb/kv - valcount: cockroachdb/kv diff --git a/pkg/internal/metricscan/metric_owners_generated.go b/pkg/internal/metricscan/metric_owners_generated.go deleted file mode 100644 index f2f1a0f85a06..000000000000 --- a/pkg/internal/metricscan/metric_owners_generated.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2026 The Cockroach Authors. -// -// Use of this software is governed by the CockroachDB Software License -// included in the /LICENSE file. - -package metricscan - -import _ "embed" - -// metricOwnersData holds the metric_owners.yaml generated by -// gen-metric-owners, embedded at build time. The placeholder is -// overwritten when `./dev generate docs` (or equivalent) runs. -// -//go:embed metric_owners.yaml -var metricOwnersData []byte diff --git a/pkg/internal/metricscan/owners.go b/pkg/internal/metricscan/owners.go index 211fbf386434..dda7aa3fe7fa 100644 --- a/pkg/internal/metricscan/owners.go +++ b/pkg/internal/metricscan/owners.go @@ -124,14 +124,6 @@ func BuildMetricOwners(r *Result, resolveOwner func(file string) string) *Metric return mo } -// DefaultMetricOwners returns a MetricOwners loaded from the -// generated data embedded by gen-metric-owners. This makes the -// metric-to-team mapping available at runtime without reading an -// external file. -func DefaultMetricOwners() (*MetricOwners, error) { - return LoadMetricOwners(metricOwnersData) -} - // LoadMetricOwners deserializes a MetricOwners from YAML data and // classifies entries into exact matches, prefixes, and patterns. func LoadMetricOwners(data []byte) (*MetricOwners, error) { diff --git a/pkg/jobs/jobs_test.go b/pkg/jobs/jobs_test.go index 7fc7cd605968..d2459ca8c10f 100644 --- a/pkg/jobs/jobs_test.go +++ b/pkg/jobs/jobs_test.go @@ -2653,13 +2653,13 @@ func (fm fakeMetrics) MetricStruct() {} func makeFakeMetrics() fakeMetrics { return fakeMetrics{ - N: metric.NewCounter(metric.Metadata{ + N: metric.NewCounter(metric.InitMetadata(metric.Metadata{ Name: "fake.count", Help: "utterly fake metric", Measurement: "N", Unit: metric.Unit_COUNT, MetricType: io_prometheus_client.MetricType_COUNTER, - }), + })), } } func TestMetrics(t *testing.T) { diff --git a/pkg/jobs/metrics.go b/pkg/jobs/metrics.go index bd82ca4003d3..fa9d253c4fb8 100644 --- a/pkg/jobs/metrics.go +++ b/pkg/jobs/metrics.go @@ -88,7 +88,7 @@ func typeToString(jobType jobspb.Type) string { func makeMetaCurrentlyRunning(jt jobspb.Type) metric.Metadata { typeStr := typeToString(jt) - m := metric.Metadata{ + m := metric.InitMetadata(metric.Metadata{ Name: fmt.Sprintf("jobs.%s.currently_running", typeStr), Help: fmt.Sprintf("Number of %s jobs currently running in Resume or OnFailOrCancel state", typeStr), @@ -100,7 +100,7 @@ func makeMetaCurrentlyRunning(jt jobspb.Type) metric.Metadata { metric.LabelType, typeStr, metric.LabelStatus, "currently_running", ), - } + }) switch jt { case jobspb.TypeCreateStats, jobspb.TypeAutoCreateStats, jobspb.TypeAutoCreatePartialStats: @@ -130,7 +130,7 @@ func makeMetaCurrentlyRunning(jt jobspb.Type) metric.Metadata { func makeMetaCurrentlyIdle(jt jobspb.Type) metric.Metadata { typeStr := typeToString(jt) - return metric.Metadata{ + return metric.InitMetadata(metric.Metadata{ Name: fmt.Sprintf("jobs.%s.currently_idle", typeStr), Help: fmt.Sprintf("Number of %s jobs currently considered Idle and can be freely shut down", typeStr), @@ -142,12 +142,12 @@ func makeMetaCurrentlyIdle(jt jobspb.Type) metric.Metadata { metric.LabelType, typeStr, metric.LabelStatus, "currently_idle", ), - } + }) } func makeMetaCurrentlyPaused(jt jobspb.Type) metric.Metadata { typeStr := typeToString(jt) - m := metric.Metadata{ + m := metric.InitMetadata(metric.Metadata{ Name: fmt.Sprintf("jobs.%s.currently_paused", typeStr), Help: fmt.Sprintf("Number of %s jobs currently considered Paused", typeStr), @@ -159,7 +159,7 @@ func makeMetaCurrentlyPaused(jt jobspb.Type) metric.Metadata { metric.LabelName, typeStr, metric.LabelStatus, "currently_paused", ), - } + }) switch jt { case jobspb.TypeAutoCreateStats, jobspb.TypeAutoCreatePartialStats: m.Visibility = metric.Metadata_ESSENTIAL @@ -187,7 +187,7 @@ func makeMetaCurrentlyPaused(jt jobspb.Type) metric.Metadata { func makeMetaResumeCompeted(jt jobspb.Type) metric.Metadata { typeStr := typeToString(jt) - m := metric.Metadata{ + m := metric.InitMetadata(metric.Metadata{ Name: fmt.Sprintf("jobs.%s.resume_completed", typeStr), Help: fmt.Sprintf("Number of %s jobs which successfully resumed to completion", typeStr), @@ -199,7 +199,7 @@ func makeMetaResumeCompeted(jt jobspb.Type) metric.Metadata { metric.LabelName, typeStr, metric.LabelStatus, "completed", ), - } + }) switch jt { case jobspb.TypeRowLevelTTL: @@ -212,7 +212,7 @@ func makeMetaResumeCompeted(jt jobspb.Type) metric.Metadata { func makeMetaResumeRetryError(jt jobspb.Type) metric.Metadata { typeStr := typeToString(jt) - return metric.Metadata{ + return metric.InitMetadata(metric.Metadata{ Name: fmt.Sprintf("jobs.%s.resume_retry_error", typeStr), Help: fmt.Sprintf("Number of %s jobs which failed with a retriable error", typeStr), @@ -224,12 +224,12 @@ func makeMetaResumeRetryError(jt jobspb.Type) metric.Metadata { metric.LabelName, typeStr, metric.LabelStatus, "retry_error", ), - } + }) } func makeMetaResumeFailed(jt jobspb.Type) metric.Metadata { typeStr := typeToString(jt) - m := metric.Metadata{ + m := metric.InitMetadata(metric.Metadata{ Name: fmt.Sprintf("jobs.%s.resume_failed", typeStr), Help: fmt.Sprintf("Number of %s jobs which failed with a non-retriable error", typeStr), @@ -241,7 +241,7 @@ func makeMetaResumeFailed(jt jobspb.Type) metric.Metadata { metric.LabelName, typeStr, metric.LabelStatus, "failed", ), - } + }) switch jt { case jobspb.TypeAutoCreateStats, jobspb.TypeAutoCreatePartialStats: @@ -262,7 +262,7 @@ func makeMetaResumeFailed(jt jobspb.Type) metric.Metadata { func makeMetaFailOrCancelCompeted(jt jobspb.Type) metric.Metadata { typeStr := typeToString(jt) - return metric.Metadata{ + return metric.InitMetadata(metric.Metadata{ Name: fmt.Sprintf("jobs.%s.fail_or_cancel_completed", typeStr), Help: fmt.Sprintf("Number of %s jobs which successfully completed "+ "their failure or cancelation process", @@ -275,12 +275,12 @@ func makeMetaFailOrCancelCompeted(jt jobspb.Type) metric.Metadata { metric.LabelName, typeStr, metric.LabelStatus, "completed", ), - } + }) } func makeMetaFailOrCancelRetryError(jt jobspb.Type) metric.Metadata { typeStr := typeToString(jt) - return metric.Metadata{ + return metric.InitMetadata(metric.Metadata{ Name: fmt.Sprintf("jobs.%s.fail_or_cancel_retry_error", typeStr), Help: fmt.Sprintf("Number of %s jobs which failed with a retriable "+ "error on their failure or cancelation process", @@ -293,12 +293,12 @@ func makeMetaFailOrCancelRetryError(jt jobspb.Type) metric.Metadata { metric.LabelName, typeStr, metric.LabelStatus, "retry_error", ), - } + }) } func makeMetaProtectedCount(jt jobspb.Type) metric.Metadata { typeStr := typeToString(jt) - return metric.Metadata{ + return metric.InitMetadata(metric.Metadata{ Name: fmt.Sprintf("jobs.%s.protected_record_count", typeStr), Help: fmt.Sprintf("Number of protected timestamp records held by %s jobs", typeStr), Measurement: "records", @@ -308,12 +308,12 @@ func makeMetaProtectedCount(jt jobspb.Type) metric.Metadata { StaticLabels: metric.MakeLabelPairs( metric.LabelType, typeStr, ), - } + }) } func makeMetaProtectedAge(jt jobspb.Type) metric.Metadata { typeStr := typeToString(jt) - m := metric.Metadata{ + m := metric.InitMetadata(metric.Metadata{ Name: fmt.Sprintf("jobs.%s.protected_age_sec", typeStr), Help: fmt.Sprintf("The age of the oldest PTS record protected by %s jobs", typeStr), Measurement: "seconds", @@ -323,7 +323,7 @@ func makeMetaProtectedAge(jt jobspb.Type) metric.Metadata { StaticLabels: metric.MakeLabelPairs( metric.LabelType, typeStr, ), - } + }) switch jt { case jobspb.TypeChangefeed: @@ -337,7 +337,7 @@ func makeMetaProtectedAge(jt jobspb.Type) metric.Metadata { func makeMetaExpiredPTS(jt jobspb.Type) metric.Metadata { typeStr := typeToString(jt) - return metric.Metadata{ + return metric.InitMetadata(metric.Metadata{ Name: fmt.Sprintf("jobs.%s.expired_pts_records", typeStr), Help: fmt.Sprintf("Number of expired protected timestamp records owned by %s jobs", typeStr), Measurement: "records", @@ -347,43 +347,43 @@ func makeMetaExpiredPTS(jt jobspb.Type) metric.Metadata { StaticLabels: metric.MakeLabelPairs( metric.LabelType, typeStr, ), - } + }) } var ( - metaAdoptIterations = metric.Metadata{ + metaAdoptIterations = metric.InitMetadata(metric.Metadata{ Name: "jobs.adopt_iterations", Help: "number of job-adopt iterations performed by the registry", Measurement: "iterations", Unit: metric.Unit_COUNT, MetricType: io_prometheus_client.MetricType_GAUGE, - } + }) - metaClaimedJobs = metric.Metadata{ + metaClaimedJobs = metric.InitMetadata(metric.Metadata{ Name: "jobs.claimed_jobs", Help: "number of jobs claimed in job-adopt iterations", Measurement: "jobs", Unit: metric.Unit_COUNT, MetricType: io_prometheus_client.MetricType_GAUGE, - } + }) - metaResumedClaimedJobs = metric.Metadata{ + metaResumedClaimedJobs = metric.InitMetadata(metric.Metadata{ Name: "jobs.resumed_claimed_jobs", Help: "number of claimed-jobs resumed in job-adopt iterations", Measurement: "jobs", Unit: metric.Unit_COUNT, MetricType: io_prometheus_client.MetricType_GAUGE, - } + }) // MetaRunningNonIdleJobs is the count of currently running jobs that are not // reporting as being idle. - MetaRunningNonIdleJobs = metric.Metadata{ + MetaRunningNonIdleJobs = metric.InitMetadata(metric.Metadata{ Name: "jobs.running_non_idle", Help: "number of running jobs that are not idle", Measurement: "jobs", Unit: metric.Unit_COUNT, MetricType: io_prometheus_client.MetricType_GAUGE, - } + }) ) // MetricStruct implements the metric.Struct interface. diff --git a/pkg/jobs/metricspoller/poller.go b/pkg/jobs/metricspoller/poller.go index 9e6bcdc58f66..db6d190ffc86 100644 --- a/pkg/jobs/metricspoller/poller.go +++ b/pkg/jobs/metricspoller/poller.go @@ -113,13 +113,13 @@ func (m pollerMetrics) MetricStruct() {} func newPollerMetrics() metric.Struct { return pollerMetrics{ - NumErrors: metric.NewCounter(metric.Metadata{ + NumErrors: metric.NewCounter(metric.InitMetadata(metric.Metadata{ Name: "jobs.metrics.task_failed", Help: "Number of metrics poller tasks that failed", Measurement: "errors", Unit: metric.Unit_COUNT, MetricType: io_prometheus_client.MetricType_COUNTER, - }), + })), } } diff --git a/pkg/jobs/schedule_metrics.go b/pkg/jobs/schedule_metrics.go index 693d37ba119f..388d28f2c406 100644 --- a/pkg/jobs/schedule_metrics.go +++ b/pkg/jobs/schedule_metrics.go @@ -61,40 +61,40 @@ type SchedulerMetrics struct { // MakeSchedulerMetrics returns metrics for scheduled job daemon. func MakeSchedulerMetrics() SchedulerMetrics { return SchedulerMetrics{ - NumStarted: metric.NewGauge(metric.Metadata{ + NumStarted: metric.NewGauge(metric.InitMetadata(metric.Metadata{ Name: "schedules.round.jobs-started", Help: "The number of jobs started", Measurement: "Jobs", Unit: metric.Unit_COUNT, - }), + })), - RescheduleSkip: metric.NewGauge(metric.Metadata{ + RescheduleSkip: metric.NewGauge(metric.InitMetadata(metric.Metadata{ Name: "schedules.round.reschedule-skip", Help: "The number of schedules rescheduled due to SKIP policy", Measurement: "Schedules", Unit: metric.Unit_COUNT, - }), + })), - RescheduleWait: metric.NewGauge(metric.Metadata{ + RescheduleWait: metric.NewGauge(metric.InitMetadata(metric.Metadata{ Name: "schedules.round.reschedule-wait", Help: "The number of schedules rescheduled due to WAIT policy", Measurement: "Schedules", Unit: metric.Unit_COUNT, - }), + })), - NumErrSchedules: metric.NewGauge(metric.Metadata{ + NumErrSchedules: metric.NewGauge(metric.InitMetadata(metric.Metadata{ Name: "schedules.error", Help: "Number of schedules which did not execute successfully", Measurement: "Schedules", Unit: metric.Unit_COUNT, - }), + })), - NumMalformedSchedules: metric.NewGauge(metric.Metadata{ + NumMalformedSchedules: metric.NewGauge(metric.InitMetadata(metric.Metadata{ Name: "schedules.malformed", Help: "Number of malformed schedules", Measurement: "Schedules", Unit: metric.Unit_COUNT, - }), + })), } } @@ -106,7 +106,7 @@ var _ metric.Struct = &SchedulerMetrics{} // MakeExecutorMetrics creates metrics for scheduled job executor. func MakeExecutorMetrics(name string) ExecutorMetrics { m := ExecutorMetrics{ - NumStarted: metric.NewCounter(metric.Metadata{ + NumStarted: metric.NewCounter(metric.InitMetadata(metric.Metadata{ Name: fmt.Sprintf("schedules.%s.started", name), Help: fmt.Sprintf("Number of %s jobs started", name), Measurement: "Jobs", @@ -116,9 +116,9 @@ func MakeExecutorMetrics(name string) ExecutorMetrics { metric.LabelName, name, metric.LabelStatus, "started", ), - }), + })), - NumSucceeded: metric.NewCounter(metric.Metadata{ + NumSucceeded: metric.NewCounter(metric.InitMetadata(metric.Metadata{ Name: fmt.Sprintf("schedules.%s.succeeded", name), Help: fmt.Sprintf("Number of %s jobs succeeded", name), Measurement: "Jobs", @@ -128,9 +128,9 @@ func MakeExecutorMetrics(name string) ExecutorMetrics { metric.LabelName, name, metric.LabelStatus, "succeeded", ), - }), + })), - NumFailed: metric.NewCounter(metric.Metadata{ + NumFailed: metric.NewCounter(metric.InitMetadata(metric.Metadata{ Name: fmt.Sprintf("schedules.%s.failed", name), Help: fmt.Sprintf("Number of %s jobs failed", name), Measurement: "Jobs", @@ -140,7 +140,7 @@ func MakeExecutorMetrics(name string) ExecutorMetrics { metric.LabelName, name, metric.LabelStatus, "failed", ), - }), + })), } if name == tree.ScheduledBackupExecutor.UserName() { @@ -161,7 +161,7 @@ func MakeExecutorMetrics(name string) ExecutorMetrics { // MakeExecutorPTSMetrics creates PTS metrics. func MakeExecutorPTSMetrics(name string) ExecutorPTSMetrics { return ExecutorPTSMetrics{ - NumWithPTS: metric.NewGauge(metric.Metadata{ + NumWithPTS: metric.NewGauge(metric.InitMetadata(metric.Metadata{ Name: fmt.Sprintf("schedules.%s.protected_record_count", name), Help: fmt.Sprintf("Number of PTS records held by %s schedules", name), Measurement: "Records", @@ -171,8 +171,8 @@ func MakeExecutorPTSMetrics(name string) ExecutorPTSMetrics { StaticLabels: metric.MakeLabelPairs( metric.LabelName, name, ), - }), - PTSAge: metric.NewGauge(metric.Metadata{ + })), + PTSAge: metric.NewGauge(metric.InitMetadata(metric.Metadata{ Name: fmt.Sprintf("schedules.%s.protected_age_sec", name), Help: fmt.Sprintf("The age of the oldest PTS record protected by %s schedules", name), Measurement: "Seconds", @@ -182,6 +182,6 @@ func MakeExecutorPTSMetrics(name string) ExecutorPTSMetrics { StaticLabels: metric.MakeLabelPairs( metric.LabelName, name, ), - }), + })), } } diff --git a/pkg/kv/bulk/bulk_metrics.go b/pkg/kv/bulk/bulk_metrics.go index 753112b94390..3faff17ac9ab 100644 --- a/pkg/kv/bulk/bulk_metrics.go +++ b/pkg/kv/bulk/bulk_metrics.go @@ -25,18 +25,18 @@ func (Metrics) MetricStruct() {} var _ metric.Struct = Metrics{} var ( - metaMemMaxBytes = metric.Metadata{ + metaMemMaxBytes = metric.InitMetadata(metric.Metadata{ Name: "sql.mem.bulk.max", Help: "Memory usage per sql statement for bulk operations", Measurement: "Memory", Unit: metric.Unit_BYTES, - } - metaMemCurBytes = metric.Metadata{ + }) + metaMemCurBytes = metric.InitMetadata(metric.Metadata{ Name: "sql.mem.bulk.current", Help: "Current sql statement memory usage for bulk operations", Measurement: "Memory", Unit: metric.Unit_BYTES, - } + }) ) // See pkg/sql/mem_metrics.go diff --git a/pkg/kv/kvclient/kvcoord/dist_sender.go b/pkg/kv/kvclient/kvcoord/dist_sender.go index 7696bd0f2469..fd61aa810b1a 100644 --- a/pkg/kv/kvclient/kvcoord/dist_sender.go +++ b/pkg/kv/kvclient/kvcoord/dist_sender.go @@ -48,31 +48,31 @@ import ( ) var ( - metaDistSenderBatchCount = metric.Metadata{ + metaDistSenderBatchCount = metric.InitMetadata(metric.Metadata{ Name: "distsender.batches", Help: "Number of batches processed", Measurement: "Batches", Unit: metric.Unit_COUNT, - } - metaDistSenderPartialBatchCount = metric.Metadata{ + }) + metaDistSenderPartialBatchCount = metric.InitMetadata(metric.Metadata{ Name: "distsender.batches.partial", Help: "Number of partial batches processed after being divided on range boundaries", Measurement: "Partial Batches", Unit: metric.Unit_COUNT, - } - metaDistSenderReplicaAddressedBatchRequestBytes = metric.Metadata{ + }) + metaDistSenderReplicaAddressedBatchRequestBytes = metric.InitMetadata(metric.Metadata{ Name: "distsender.batch_requests.replica_addressed.bytes", Help: `Total byte count of replica-addressed batch requests processed`, Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - metaDistSenderReplicaAddressedBatchResponseBytes = metric.Metadata{ + }) + metaDistSenderReplicaAddressedBatchResponseBytes = metric.InitMetadata(metric.Metadata{ Name: "distsender.batch_responses.replica_addressed.bytes", Help: `Total byte count of replica-addressed batch responses received`, Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - metaDistSenderCrossRegionBatchRequestBytes = metric.Metadata{ + }) + metaDistSenderCrossRegionBatchRequestBytes = metric.InitMetadata(metric.Metadata{ Name: "distsender.batch_requests.cross_region.bytes", Help: crstrings.UnwrapText(` Total byte count of replica-addressed batch requests processed cross @@ -80,8 +80,8 @@ var ( `), Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - metaDistSenderCrossRegionBatchResponseBytes = metric.Metadata{ + }) + metaDistSenderCrossRegionBatchResponseBytes = metric.InitMetadata(metric.Metadata{ Name: "distsender.batch_responses.cross_region.bytes", Help: crstrings.UnwrapText(` Total byte count of replica-addressed batch responses received cross @@ -89,8 +89,8 @@ var ( `), Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - metaDistSenderCrossZoneBatchRequestBytes = metric.Metadata{ + }) + metaDistSenderCrossZoneBatchRequestBytes = metric.InitMetadata(metric.Metadata{ Name: "distsender.batch_requests.cross_zone.bytes", Help: crstrings.UnwrapText(` Total byte count of replica-addressed batch requests processed cross zone @@ -101,8 +101,8 @@ var ( `), Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - metaDistSenderCrossZoneBatchResponseBytes = metric.Metadata{ + }) + metaDistSenderCrossZoneBatchResponseBytes = metric.InitMetadata(metric.Metadata{ Name: "distsender.batch_responses.cross_zone.bytes", Help: crstrings.UnwrapText(` Total byte count of replica-addressed batch responses received cross zone @@ -113,44 +113,44 @@ var ( `), Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - metaDistSenderAsyncSentCount = metric.Metadata{ + }) + metaDistSenderAsyncSentCount = metric.InitMetadata(metric.Metadata{ Name: "distsender.batches.async.sent", Help: "Number of partial batches sent asynchronously", Measurement: "Partial Batches", Unit: metric.Unit_COUNT, - } - metaDistSenderAsyncInProgress = metric.Metadata{ + }) + metaDistSenderAsyncInProgress = metric.InitMetadata(metric.Metadata{ Name: "distsender.batches.async.in_progress", Help: "Number of partial batches currently being executed asynchronously", Measurement: "Partial Batches", Unit: metric.Unit_COUNT, - } - metaDistSenderAsyncThrottledCount = metric.Metadata{ + }) + metaDistSenderAsyncThrottledCount = metric.InitMetadata(metric.Metadata{ Name: "distsender.batches.async.throttled", Help: "Number of partial batches not sent asynchronously due to throttling", Measurement: "Partial Batches", Unit: metric.Unit_COUNT, - } - metaDistSenderAsyncThrottledDuration = metric.Metadata{ + }) + metaDistSenderAsyncThrottledDuration = metric.InitMetadata(metric.Metadata{ Name: "distsender.batches.async.throttled_cumulative_duration_nanos", Help: "Cumulative duration of partial batches being throttled (in nanoseconds)", Measurement: "Throttled Duration", Unit: metric.Unit_NANOSECONDS, - } - metaTransportSentCount = metric.Metadata{ + }) + metaTransportSentCount = metric.InitMetadata(metric.Metadata{ Name: "distsender.rpc.sent", Help: "Number of replica-addressed RPCs sent", Measurement: "RPCs", Unit: metric.Unit_COUNT, - } - metaTransportLocalSentCount = metric.Metadata{ + }) + metaTransportLocalSentCount = metric.InitMetadata(metric.Metadata{ Name: "distsender.rpc.sent.local", Help: "Number of replica-addressed RPCs sent through the local-server optimization", Measurement: "RPCs", Unit: metric.Unit_COUNT, - } - metaTransportSenderNextReplicaErrCount = metric.Metadata{ + }) + metaTransportSenderNextReplicaErrCount = metric.InitMetadata(metric.Metadata{ Name: "distsender.rpc.sent.nextreplicaerror", Help: "Number of replica-addressed RPCs sent due to per-replica errors", Measurement: "RPCs", @@ -165,8 +165,8 @@ var ( node. The non-success status is a result of an orderly execution of an RPC that reports a specific logical condition. `), - } - metaDistSenderNotLeaseHolderErrCount = metric.Metadata{ + }) + metaDistSenderNotLeaseHolderErrCount = metric.InitMetadata(metric.Metadata{ Name: "distsender.errors.notleaseholder", Help: "Number of NotLeaseHolderErrors encountered from replica-addressed RPCs", Measurement: "Errors", @@ -179,20 +179,20 @@ var ( retried. However they may create occasional response time spikes. In that case, this metric may provide the explanation of the cause. `), - } - metaDistSenderInLeaseTransferBackoffsCount = metric.Metadata{ + }) + metaDistSenderInLeaseTransferBackoffsCount = metric.InitMetadata(metric.Metadata{ Name: "distsender.errors.inleasetransferbackoffs", Help: "Number of times backed off due to NotLeaseHolderErrors during lease transfer", Measurement: "Errors", Unit: metric.Unit_COUNT, - } - metaDistSenderRangeLookups = metric.Metadata{ + }) + metaDistSenderRangeLookups = metric.InitMetadata(metric.Metadata{ Name: "distsender.rangelookups", Help: "Number of range lookups", Measurement: "Range Lookups", Unit: metric.Unit_COUNT, - } - metaDistSenderSlowRPCs = metric.Metadata{ + }) + metaDistSenderSlowRPCs = metric.InitMetadata(metric.Metadata{ Name: "requests.slow.distsender", Help: crstrings.UnwrapText(` Number of range-bound RPCs currently stuck or retrying for a long time. @@ -204,8 +204,8 @@ var ( `), Measurement: "Requests", Unit: metric.Unit_COUNT, - } - metaDistSenderSlowReplicaRPCs = metric.Metadata{ + }) + metaDistSenderSlowReplicaRPCs = metric.InitMetadata(metric.Metadata{ Name: "distsender.slow.replicarpcs", Help: crstrings.UnwrapText(` Number of slow replica-bound RPCs. @@ -217,8 +217,8 @@ var ( `), Measurement: "Requests", Unit: metric.Unit_COUNT, - } - metaDistSenderMethodCountTmpl = metric.Metadata{ + }) + metaDistSenderMethodCountTmpl = metric.InitMetadata(metric.Metadata{ Name: "distsender.rpc.%s.sent", Help: crstrings.UnwrapText(` Number of %s requests processed. @@ -228,8 +228,8 @@ var ( `), Measurement: "RPCs", Unit: metric.Unit_COUNT, - } - metaDistSenderErrCountTmpl = metric.Metadata{ + }) + metaDistSenderErrCountTmpl = metric.InitMetadata(metric.Metadata{ Name: "distsender.rpc.err.%s", Help: "Number of %s errors received replica-bound RPCs.", Measurement: "Errors", @@ -241,124 +241,124 @@ var ( 'roachpb.CommunicationErrType' and unclassified errors as 'roachpb.InternalErrType'. `), - } - metaDistSenderProxySentCount = metric.Metadata{ + }) + metaDistSenderProxySentCount = metric.InitMetadata(metric.Metadata{ Name: "distsender.rpc.proxy.sent", Help: "Number of attempts by a gateway to proxy a request to an unreachable leaseholder via a follower replica.", Measurement: "RPCs", Unit: metric.Unit_COUNT, - } - metaDistSenderProxyErrCount = metric.Metadata{ + }) + metaDistSenderProxyErrCount = metric.InitMetadata(metric.Metadata{ Name: "distsender.rpc.proxy.err", Help: "Number of attempts by a gateway to proxy a request which resulted in a failure.", Measurement: "RPCs", Unit: metric.Unit_COUNT, - } - metaDistSenderProxyForwardSentCount = metric.Metadata{ + }) + metaDistSenderProxyForwardSentCount = metric.InitMetadata(metric.Metadata{ Name: "distsender.rpc.proxy.forward.sent", Help: "Number of attempts on a follower replica to proxy a request to an unreachable leaseholder.", Measurement: "RPCs", Unit: metric.Unit_COUNT, - } - metaDistSenderProxyForwardErrCount = metric.Metadata{ + }) + metaDistSenderProxyForwardErrCount = metric.InitMetadata(metric.Metadata{ Name: "distsender.rpc.proxy.forward.err", Help: "Number of attempts on a follower replica to proxy a request which resulted in a failure.", Measurement: "RPCs", Unit: metric.Unit_COUNT, - } - metaDistSenderRangefeedTotalRanges = metric.Metadata{ + }) + metaDistSenderRangefeedTotalRanges = metric.InitMetadata(metric.Metadata{ Name: "distsender.rangefeed.total_ranges", Help: crstrings.UnwrapText(` Number of ranges with an active rangefeed. `), Measurement: "Ranges", Unit: metric.Unit_COUNT, - } - metaDistSenderRangefeedCatchupRanges = metric.Metadata{ + }) + metaDistSenderRangefeedCatchupRanges = metric.InitMetadata(metric.Metadata{ Name: "distsender.rangefeed.catchup_ranges", Help: "Number of ranges with an active rangefeed that are performing a catchup scan.", Measurement: "Ranges", Unit: metric.Unit_COUNT, - } - metaDistSenderRangefeedCatchupRangesWaitingClientSide = metric.Metadata{ + }) + metaDistSenderRangefeedCatchupRangesWaitingClientSide = metric.InitMetadata(metric.Metadata{ Name: "distsender.rangefeed.catchup_ranges_waiting_client_side", Help: `Number of ranges waiting on the client-side limiter to perform catchup scans`, Measurement: "Ranges", Unit: metric.Unit_COUNT, - } - metaDistSenderRangefeedLocalRanges = metric.Metadata{ + }) + metaDistSenderRangefeedLocalRanges = metric.InitMetadata(metric.Metadata{ Name: "distsender.rangefeed.local_ranges", Help: `Number of ranges connected to local node.`, Measurement: "Ranges", Unit: metric.Unit_COUNT, - } - metaDistSenderRangefeedErrorCatchupRanges = metric.Metadata{ + }) + metaDistSenderRangefeedErrorCatchupRanges = metric.InitMetadata(metric.Metadata{ Name: "distsender.rangefeed.error_catchup_ranges", Help: `Number of ranges in catchup mode which experienced an error`, Measurement: "Ranges", Unit: metric.Unit_COUNT, - } - metaDistSenderRangefeedRestartRanges = metric.Metadata{ + }) + metaDistSenderRangefeedRestartRanges = metric.InitMetadata(metric.Metadata{ Name: "distsender.rangefeed.restart_ranges", Help: `Number of ranges that were restarted due to transient errors`, Measurement: "Ranges", Unit: metric.Unit_COUNT, - } + }) - metaDistSenderCircuitBreakerReplicasCount = metric.Metadata{ + metaDistSenderCircuitBreakerReplicasCount = metric.InitMetadata(metric.Metadata{ Name: "distsender.circuit_breaker.replicas.count", Help: `Number of replicas currently tracked by DistSender circuit breakers`, Measurement: "Replicas", Unit: metric.Unit_COUNT, - } - metaDistSenderCircuitBreakerReplicasTripped = metric.Metadata{ + }) + metaDistSenderCircuitBreakerReplicasTripped = metric.InitMetadata(metric.Metadata{ Name: "distsender.circuit_breaker.replicas.tripped", Help: `Number of DistSender replica circuit breakers currently tripped`, Measurement: "Replicas", Unit: metric.Unit_COUNT, - } - metaDistSenderCircuitBreakerReplicasTrippedEvents = metric.Metadata{ + }) + metaDistSenderCircuitBreakerReplicasTrippedEvents = metric.InitMetadata(metric.Metadata{ Name: "distsender.circuit_breaker.replicas.tripped_events", Help: `Cumulative number of DistSender replica circuit breakers tripped over time`, Measurement: "Replicas", Unit: metric.Unit_COUNT, - } - metaDistSenderCircuitBreakerReplicasProbesRunning = metric.Metadata{ + }) + metaDistSenderCircuitBreakerReplicasProbesRunning = metric.InitMetadata(metric.Metadata{ Name: "distsender.circuit_breaker.replicas.probes.running", Help: `Number of currently running DistSender replica circuit breaker probes`, Measurement: "Probes", Unit: metric.Unit_COUNT, - } - metaDistSenderCircuitBreakerReplicasProbesSuccess = metric.Metadata{ + }) + metaDistSenderCircuitBreakerReplicasProbesSuccess = metric.InitMetadata(metric.Metadata{ Name: "distsender.circuit_breaker.replicas.probes.success", Help: `Cumulative number of successful DistSender replica circuit breaker probes`, Measurement: "Probes", Unit: metric.Unit_COUNT, - } - metaDistSenderCircuitBreakerReplicasProbesFailure = metric.Metadata{ + }) + metaDistSenderCircuitBreakerReplicasProbesFailure = metric.InitMetadata(metric.Metadata{ Name: "distsender.circuit_breaker.replicas.probes.failure", Help: `Cumulative number of failed DistSender replica circuit breaker probes`, Measurement: "Probes", Unit: metric.Unit_COUNT, - } - metaDistSenderCircuitBreakerReplicasRequestsCancelled = metric.Metadata{ + }) + metaDistSenderCircuitBreakerReplicasRequestsCancelled = metric.InitMetadata(metric.Metadata{ Name: "distsender.circuit_breaker.replicas.requests.cancelled", Help: `Cumulative number of requests cancelled when DistSender replica circuit breakers trip`, Measurement: "Requests", Unit: metric.Unit_COUNT, - } - metaDistSenderCircuitBreakerReplicasRequestsRejected = metric.Metadata{ + }) + metaDistSenderCircuitBreakerReplicasRequestsRejected = metric.InitMetadata(metric.Metadata{ Name: "distsender.circuit_breaker.replicas.requests.rejected", Help: `Cumulative number of requests rejected by tripped DistSender replica circuit breakers`, Measurement: "Requests", Unit: metric.Unit_COUNT, - } - metaDistSenderLeaseholderRandomizedOnContextError = metric.Metadata{ + }) + metaDistSenderLeaseholderRandomizedOnContextError = metric.InitMetadata(metric.Metadata{ Name: "distsender.range_cache.leaseholder_randomized", Help: `Number of times the DistSender randomized the leaseholder for range cache entries upon seeing a context error`, Measurement: "Count", Unit: metric.Unit_COUNT, - } + }) ) // metamorphicRouteToLeaseholderFirst is used to control the behavior of the @@ -601,21 +601,21 @@ func makeRangeFeedErrorCounters() rangeFeedErrorCounters { retryCounters := make(map[int32]*metric.Counter, len(kvpb.RangeFeedRetryError_Reason_value)) for name, idx := range kvpb.RangeFeedRetryError_Reason_value { name = strings.TrimPrefix(name, "REASON_") - retryCounters[idx] = metric.NewCounter(metric.Metadata{ + retryCounters[idx] = metric.NewCounter(metric.InitMetadata(metric.Metadata{ Name: fmt.Sprintf("distsender.rangefeed.retry.%s", strings.ToLower(name)), Help: fmt.Sprintf(`Number of ranges that encountered retryable %s error`, name), Measurement: "Ranges", Unit: metric.Unit_COUNT, - }) + })) } retryMeta := func(name string) metric.Metadata { - return metric.Metadata{ + return metric.InitMetadata(metric.Metadata{ Name: fmt.Sprintf("distsender.rangefeed.retry.%s", strings.ReplaceAll(name, " ", "_")), Help: fmt.Sprintf("Number of ranges that encountered retryable %s error", name), Measurement: "Ranges", Unit: metric.Unit_COUNT, - } + }) } return rangeFeedErrorCounters{ diff --git a/pkg/kv/kvclient/kvcoord/txn_metrics.go b/pkg/kv/kvclient/kvcoord/txn_metrics.go index 57dc258c5187..42a2ca39ec78 100644 --- a/pkg/kv/kvclient/kvcoord/txn_metrics.go +++ b/pkg/kv/kvclient/kvcoord/txn_metrics.go @@ -66,57 +66,57 @@ type TxnMetrics struct { } var ( - metaAbortsRates = metric.Metadata{ + metaAbortsRates = metric.InitMetadata(metric.Metadata{ Name: "txn.aborts", Help: "Number of aborted KV transactions", Measurement: "KV Transactions", Unit: metric.Unit_COUNT, - } - metaCommitsRates = metric.Metadata{ + }) + metaCommitsRates = metric.InitMetadata(metric.Metadata{ Name: "txn.commits", Help: "Number of committed KV transactions (including 1PC)", Measurement: "KV Transactions", Unit: metric.Unit_COUNT, - } - metaCommits1PCRates = metric.Metadata{ + }) + metaCommits1PCRates = metric.InitMetadata(metric.Metadata{ Name: "txn.commits1PC", Help: "Number of KV transaction one-phase commits", Measurement: "KV Transactions", Unit: metric.Unit_COUNT, - } - metaCommitsReadOnly = metric.Metadata{ + }) + metaCommitsReadOnly = metric.InitMetadata(metric.Metadata{ Name: "txn.commits_read_only", Help: "Number of read only KV transaction commits", Measurement: "KV Transactions", Unit: metric.Unit_COUNT, - } - metaParallelCommitsRates = metric.Metadata{ + }) + metaParallelCommitsRates = metric.InitMetadata(metric.Metadata{ Name: "txn.parallelcommits", Help: "Number of KV transaction parallel commits", Measurement: "KV Transactions", Unit: metric.Unit_COUNT, - } - metaParallelCommitAutoRetries = metric.Metadata{ + }) + metaParallelCommitAutoRetries = metric.InitMetadata(metric.Metadata{ Name: "txn.parallelcommits.auto_retries", Help: "Number of commit tries after successful failed parallel commit attempts", Measurement: "Retries", Unit: metric.Unit_COUNT, - } - metaCommitWaitCount = metric.Metadata{ + }) + metaCommitWaitCount = metric.InitMetadata(metric.Metadata{ Name: "txn.commit_waits", Help: "Number of KV transactions that had to commit-wait on commit " + "in order to ensure linearizability. This generally happens to " + "transactions writing to global ranges.", Measurement: "KV Transactions", Unit: metric.Unit_COUNT, - } - metaPreparesRates = metric.Metadata{ + }) + metaPreparesRates = metric.InitMetadata(metric.Metadata{ Name: "txn.prepares", Help: "Number of prepared KV transactions", Measurement: "KV Transactions", Unit: metric.Unit_COUNT, - } - metaClientRefreshSuccess = metric.Metadata{ + }) + metaClientRefreshSuccess = metric.InitMetadata(metric.Metadata{ Name: "txn.refresh.success", Help: "Number of successful client-side transaction refreshes. A refresh may be " + "preemptive or reactive. A reactive refresh is performed after a " + @@ -125,14 +125,14 @@ var ( "auto-retry (see txn.refresh.auto_retries) after the refresh succeeds.", Measurement: "Refreshes", Unit: metric.Unit_COUNT, - } - metaClientRefreshFail = metric.Metadata{ + }) + metaClientRefreshFail = metric.InitMetadata(metric.Metadata{ Name: "txn.refresh.fail", Help: "Number of failed client-side transaction refreshes", Measurement: "Refreshes", Unit: metric.Unit_COUNT, - } - metaClientRefreshFailWithCondensedSpans = metric.Metadata{ + }) + metaClientRefreshFailWithCondensedSpans = metric.InitMetadata(metric.Metadata{ Name: "txn.refresh.fail_with_condensed_spans", Help: "Number of failed client-side refreshes for transactions whose read " + "tracking lost fidelity because of condensing. Such a failure " + @@ -141,83 +141,83 @@ var ( "txn.refresh.memory_limit_exceeded.", Measurement: "Refreshes", Unit: metric.Unit_COUNT, - } - metaClientRefreshMemoryLimitExceeded = metric.Metadata{ + }) + metaClientRefreshMemoryLimitExceeded = metric.InitMetadata(metric.Metadata{ Name: "txn.refresh.memory_limit_exceeded", Help: "Number of transaction which exceed the refresh span bytes limit, causing " + "their read spans to be condensed", Measurement: "Transactions", Unit: metric.Unit_COUNT, - } - metaClientRefreshAutoRetries = metric.Metadata{ + }) + metaClientRefreshAutoRetries = metric.InitMetadata(metric.Metadata{ Name: "txn.refresh.auto_retries", Help: "Number of request retries after successful client-side refreshes", Measurement: "Retries", Unit: metric.Unit_COUNT, - } - metaServerRefreshSuccess = metric.Metadata{ + }) + metaServerRefreshSuccess = metric.InitMetadata(metric.Metadata{ Name: "txn.refresh.success_server_side", Help: "Number of successful server-side transaction refreshes", Measurement: "Refreshes", Unit: metric.Unit_COUNT, - } - metaDurationsHistograms = metric.Metadata{ + }) + metaDurationsHistograms = metric.InitMetadata(metric.Metadata{ Name: "txn.durations", Help: "KV transaction durations", Measurement: "KV Txn Duration", Unit: metric.Unit_NANOSECONDS, - } - metaTxnsWithCondensedIntentSpans = metric.Metadata{ + }) + metaTxnsWithCondensedIntentSpans = metric.InitMetadata(metric.Metadata{ Name: "txn.condensed_intent_spans", Help: "KV transactions that have exceeded their intent tracking " + "memory budget (kv.transaction.max_intents_bytes). See also " + "txn.condensed_intent_spans_gauge for a gauge of such transactions currently running.", Measurement: "KV Transactions", Unit: metric.Unit_COUNT, - } - metaTxnsWithCondensedIntentSpansGauge = metric.Metadata{ + }) + metaTxnsWithCondensedIntentSpansGauge = metric.InitMetadata(metric.Metadata{ Name: "txn.condensed_intent_spans_gauge", Help: "KV transactions currently running that have exceeded their intent tracking " + "memory budget (kv.transaction.max_intents_bytes). See also txn.condensed_intent_spans " + "for a perpetual counter/rate.", Measurement: "KV Transactions", Unit: metric.Unit_COUNT, - } - metaTxnsRejectedByLockSpanBudget = metric.Metadata{ + }) + metaTxnsRejectedByLockSpanBudget = metric.InitMetadata(metric.Metadata{ Name: "txn.condensed_intent_spans_rejected", Help: "KV transactions that have been aborted because they exceeded their intent tracking " + "memory budget (kv.transaction.max_intents_bytes). " + "Rejection is caused by kv.transaction.reject_over_max_intents_budget.", Measurement: "KV Transactions", Unit: metric.Unit_COUNT, - } - metaTxnsRejectedByCountLimit = metric.Metadata{ + }) + metaTxnsRejectedByCountLimit = metric.InitMetadata(metric.Metadata{ Name: "txn.count_limit_rejected", Help: "KV transactions that have been aborted because they exceeded the max number of writes and locking reads allowed", Measurement: "KV Transactions", Unit: metric.Unit_COUNT, - } - metaTxnsResponseOverCountLimit = metric.Metadata{ + }) + metaTxnsResponseOverCountLimit = metric.InitMetadata(metric.Metadata{ Name: "txn.count_limit_on_response", Help: "KV transactions that have exceeded the count limit on a response", Measurement: "KV Transactions", Unit: metric.Unit_COUNT, - } - metaTxnsInflightLocksOverTrackingBudget = metric.Metadata{ + }) + metaTxnsInflightLocksOverTrackingBudget = metric.InitMetadata(metric.Metadata{ Name: "txn.inflight_locks_over_tracking_budget", Help: "KV transactions whose in-flight writes and locking reads have exceeded " + "the intent tracking memory budget (kv.transaction.max_intents_bytes).", Measurement: "KV Transactions", Unit: metric.Unit_COUNT, - } + }) - metaRestartsHistogram = metric.Metadata{ + metaRestartsHistogram = metric.InitMetadata(metric.Metadata{ Name: "txn.restarts", Help: "Number of restarted KV transactions", Measurement: "KV Transactions", Unit: metric.Unit_COUNT, Visibility: metric.Metadata_SUPPORT, - } + }) // There are two ways we can get "write too old" restarts. In both cases, a // WriteTooOldError is generated in the MVCC layer. This is intercepted on // the way out by the Store, which performs a single retry at a pushed @@ -225,7 +225,7 @@ var ( // the WriteTooOld flag is set on the Transaction, which causes EndTxn to // return a/ TransactionRetryError with RETRY_WRITE_TOO_OLD. These are // captured as txn.restarts.writetooold. - metaRestartsWriteTooOld = metric.Metadata{ + metaRestartsWriteTooOld = metric.InitMetadata(metric.Metadata{ Name: "txn.restarts.writetooold", Help: "Number of restarts due to a concurrent writer committing first", Measurement: "Restarted Transactions", @@ -239,8 +239,8 @@ var ( of restarts per minute may be a high value, a signal of an elevated degree of contention in the workload, which should be investigated. `), - } - metaRestartsSerializable = metric.Metadata{ + }) + metaRestartsSerializable = metric.InitMetadata(metric.Metadata{ Name: "txn.restarts.serializable", Help: "Number of restarts due to a forwarded commit timestamp and isolation=SERIALIZABLE", Measurement: "Restarted Transactions", @@ -254,32 +254,32 @@ var ( of restarts per minute may be a high value, a signal of an elevated degree of contention in the workload, which should be investigated. `), - } - metaRestartsAsyncWriteFailure = metric.Metadata{ + }) + metaRestartsAsyncWriteFailure = metric.InitMetadata(metric.Metadata{ Name: "txn.restarts.asyncwritefailure", Help: "Number of restarts due to async consensus writes that failed to leave intents", Measurement: "Restarted Transactions", Unit: metric.Unit_COUNT, - } - metaRestartsCommitDeadlineExceeded = metric.Metadata{ + }) + metaRestartsCommitDeadlineExceeded = metric.InitMetadata(metric.Metadata{ Name: "txn.restarts.commitdeadlineexceeded", Help: "Number of restarts due to a transaction exceeding its deadline", Measurement: "Restarted Transactions", Unit: metric.Unit_COUNT, - } - metaRestartsReadWithinUncertainty = metric.Metadata{ + }) + metaRestartsReadWithinUncertainty = metric.InitMetadata(metric.Metadata{ Name: "txn.restarts.readwithinuncertainty", Help: "Number of restarts due to reading a new value within the uncertainty interval", Measurement: "Restarted Transactions", Unit: metric.Unit_COUNT, - } - metaRestartsExclusionViolation = metric.Metadata{ + }) + metaRestartsExclusionViolation = metric.InitMetadata(metric.Metadata{ Name: "txn.restarts.exclusionviolation", Help: "Number of restarts due to an exclusion violation", Measurement: "Restarted Transactions", Unit: metric.Unit_COUNT, - } - metaRestartsTxnAborted = metric.Metadata{ + }) + metaRestartsTxnAborted = metric.InitMetadata(metric.Metadata{ Name: "txn.restarts.txnaborted", Help: "Number of restarts due to an abort by a concurrent transaction (usually due to deadlock)", Measurement: "Restarted Transactions", @@ -292,14 +292,14 @@ var ( Identify the conflicting transactions involved in the deadlocks, then, if possible, redesign the business logic implementation prone to deadlocks. `), - } + }) // TransactionPushErrors at this level are unusual. They are // normally handled at the Store level with the txnwait and // contention queues. However, they can reach this level and be // retried in tests that disable the store-level retries, and // there may be edge cases that allow them to reach this point in // production. - metaRestartsTxnPush = metric.Metadata{ + metaRestartsTxnPush = metric.InitMetadata(metric.Metadata{ Name: "txn.restarts.txnpush", Help: "Number of restarts due to a transaction push failure", Measurement: "Restarted Transactions", @@ -313,8 +313,8 @@ var ( of restarts per minute may be a high value, a signal of an elevated degree of contention in the workload, which should be investigated. `), - } - metaRestartsUnknown = metric.Metadata{ + }) + metaRestartsUnknown = metric.InitMetadata(metric.Metadata{ Name: "txn.restarts.unknown", Help: "Number of restarts due to a unknown reasons", Measurement: "Restarted Transactions", @@ -328,43 +328,43 @@ var ( of restarts per minute may be a high value, a signal of an elevated degree of contention in the workload, which should be investigated. `), - } - metaRollbacksFailed = metric.Metadata{ + }) + metaRollbacksFailed = metric.InitMetadata(metric.Metadata{ Name: "txn.rollbacks.failed", Help: "Number of KV transactions that failed to send final abort", Measurement: "KV Transactions", Unit: metric.Unit_COUNT, - } - metaAsyncRollbacksFailed = metric.Metadata{ + }) + metaAsyncRollbacksFailed = metric.InitMetadata(metric.Metadata{ Name: "txn.rollbacks.async.failed", Help: "Number of KV transactions that failed to send abort asynchronously which is not always retried", Measurement: "KV Transactions", Unit: metric.Unit_COUNT, - } - metaTxnWriteBufferEnabled = metric.Metadata{ + }) + metaTxnWriteBufferEnabled = metric.InitMetadata(metric.Metadata{ Name: "txn.write_buffering.num_enabled", Help: "Number of KV transactions that enabled buffered writes", Measurement: "KV Transactions", Unit: metric.Unit_COUNT, - } - metaTxnWriteBufferDisabledAfterBuffering = metric.Metadata{ + }) + metaTxnWriteBufferDisabledAfterBuffering = metric.InitMetadata(metric.Metadata{ Name: "txn.write_buffering.disabled_after_buffering", Help: "Number of KV transactions that disabled write buffering after buffering some writes but before an EndTxn request", Measurement: "KV Transactions", Unit: metric.Unit_COUNT, - } - metaTxnWriteBufferLimitExceeded = metric.Metadata{ + }) + metaTxnWriteBufferLimitExceeded = metric.InitMetadata(metric.Metadata{ Name: "txn.write_buffering.memory_limit_exceeded", Help: "Number of KV transactions that exceeded the write buffering memory limit", Measurement: "KV Transactions", Unit: metric.Unit_COUNT, - } - metaTxnWriteBufferFullyHandledBatches = metric.Metadata{ + }) + metaTxnWriteBufferFullyHandledBatches = metric.InitMetadata(metric.Metadata{ Name: "txn.write_buffering.batches.fully_handled", Help: "Number of KV batches that were fully handled by the write buffer (not sent to KV)", Measurement: "KV Batches", Unit: metric.Unit_COUNT, - } + }) ) // MakeTxnMetrics returns a TxnMetrics struct that contains metrics whose diff --git a/pkg/kv/kvclient/kvstreamer/metrics.go b/pkg/kv/kvclient/kvstreamer/metrics.go index 269b420a763d..97af8d660d5f 100644 --- a/pkg/kv/kvclient/kvstreamer/metrics.go +++ b/pkg/kv/kvclient/kvstreamer/metrics.go @@ -8,30 +8,30 @@ package kvstreamer import "github.com/cockroachdb/cockroach/pkg/util/metric" var ( - metaStreamerCount = metric.Metadata{ + metaStreamerCount = metric.InitMetadata(metric.Metadata{ Name: "kv.streamer.operators.active", Help: "Number of KV Streamer operators currently in use", Measurement: "Operators", Unit: metric.Unit_COUNT, - } - metaBatchesSent = metric.Metadata{ + }) + metaBatchesSent = metric.InitMetadata(metric.Metadata{ Name: "kv.streamer.batches.sent", Help: "Number of BatchRequests sent across all KV Streamer operators", Measurement: "Batches", Unit: metric.Unit_COUNT, - } - metaBatchesInProgress = metric.Metadata{ + }) + metaBatchesInProgress = metric.InitMetadata(metric.Metadata{ Name: "kv.streamer.batches.in_progress", Help: "Number of BatchRequests in progress across all KV Streamer operators", Measurement: "Batches", Unit: metric.Unit_COUNT, - } - metaBatchesThrottled = metric.Metadata{ + }) + metaBatchesThrottled = metric.InitMetadata(metric.Metadata{ Name: "kv.streamer.batches.throttled", Help: "Number of BatchRequests currently being throttled due to reaching the concurrency limit, across all KV Streamer operators", Measurement: "Batches", Unit: metric.Unit_COUNT, - } + }) ) type Metrics struct { diff --git a/pkg/kv/kvprober/kvprober.go b/pkg/kv/kvprober/kvprober.go index a3a0e8702bf6..ded19d418d89 100644 --- a/pkg/kv/kvprober/kvprober.go +++ b/pkg/kv/kvprober/kvprober.go @@ -70,60 +70,60 @@ type Opts struct { } var ( - metaReadProbeAttempts = metric.Metadata{ + metaReadProbeAttempts = metric.InitMetadata(metric.Metadata{ Name: "kv.prober.read.attempts", Help: "Number of attempts made to read probe KV, regardless of outcome", Measurement: "Queries", Unit: metric.Unit_COUNT, - } - metaReadProbeFailures = metric.Metadata{ + }) + metaReadProbeFailures = metric.InitMetadata(metric.Metadata{ Name: "kv.prober.read.failures", Help: "Number of attempts made to read probe KV that failed, " + "whether due to error or timeout", Measurement: "Queries", Unit: metric.Unit_COUNT, - } - metaReadProbeLatency = metric.Metadata{ + }) + metaReadProbeLatency = metric.InitMetadata(metric.Metadata{ Name: "kv.prober.read.latency", Help: "Latency of successful KV read probes", Measurement: "Latency", Unit: metric.Unit_NANOSECONDS, - } - metaWriteProbeAttempts = metric.Metadata{ + }) + metaWriteProbeAttempts = metric.InitMetadata(metric.Metadata{ Name: "kv.prober.write.attempts", Help: "Number of attempts made to write probe KV, regardless of outcome", Measurement: "Queries", Unit: metric.Unit_COUNT, - } - metaWriteProbeFailures = metric.Metadata{ + }) + metaWriteProbeFailures = metric.InitMetadata(metric.Metadata{ Name: "kv.prober.write.failures", Help: "Number of attempts made to write probe KV that failed, " + "whether due to error or timeout", Measurement: "Queries", Unit: metric.Unit_COUNT, - } - metaWriteProbeLatency = metric.Metadata{ + }) + metaWriteProbeLatency = metric.InitMetadata(metric.Metadata{ Name: "kv.prober.write.latency", Help: "Latency of successful KV write probes", Measurement: "Latency", Unit: metric.Unit_NANOSECONDS, - } - metaWriteProbeQuarantineOldestDuration = metric.Metadata{ + }) + metaWriteProbeQuarantineOldestDuration = metric.InitMetadata(metric.Metadata{ Name: "kv.prober.write.quarantine.oldest_duration", Help: "The duration that the oldest range in the " + "write quarantine pool has remained", Measurement: "Seconds", Unit: metric.Unit_SECONDS, - } + }) - metaProbePlanAttempts = metric.Metadata{ + metaProbePlanAttempts = metric.InitMetadata(metric.Metadata{ Name: "kv.prober.planning_attempts", Help: "Number of attempts at planning out probes made; " + "in order to probe KV we need to plan out which ranges to probe;", Measurement: "Runs", Unit: metric.Unit_COUNT, - } - metaProbePlanFailures = metric.Metadata{ + }) + metaProbePlanFailures = metric.InitMetadata(metric.Metadata{ Name: "kv.prober.planning_failures", Help: "Number of attempts at planning out probes that failed; " + "in order to probe KV we need to plan out which ranges to probe; " + @@ -131,7 +131,7 @@ var ( "all ranges; consider alerting on this metric as a result", Measurement: "Runs", Unit: metric.Unit_COUNT, - } + }) // TODO(josh): Add a histogram that captures where in the "rangespace" errors // are occurring. This will allow operators to see at a glance what percentage // of ranges are affected. diff --git a/pkg/kv/kvserver/allocator/allocatorimpl/allocator.go b/pkg/kv/kvserver/allocator/allocatorimpl/allocator.go index 46cacf001ae0..09ec1b949217 100644 --- a/pkg/kv/kvserver/allocator/allocatorimpl/allocator.go +++ b/pkg/kv/kvserver/allocator/allocatorimpl/allocator.go @@ -516,83 +516,83 @@ func makeAllocatorRand(source rand.Source) allocatorRand { var ( // Load-based lease transfers. - metaLBLeaseTransferCannotFindBetterCandidate = metric.Metadata{ + metaLBLeaseTransferCannotFindBetterCandidate = metric.InitMetadata(metric.Metadata{ Name: "kv.allocator.load_based_lease_transfers.cannot_find_better_candidate", Help: "The number times the allocator determined that the lease was on the best" + " possible replica", Measurement: "Attempts", Unit: metric.Unit_COUNT, - } - metaLBLeaseTransferExistingNotOverfull = metric.Metadata{ + }) + metaLBLeaseTransferExistingNotOverfull = metric.InitMetadata(metric.Metadata{ Name: "kv.allocator.load_based_lease_transfers.existing_not_overfull", Help: "The number times the allocator determined that the lease was not on an" + " overfull store", Measurement: "Attempts", Unit: metric.Unit_COUNT, - } - metaLBLeaseTransferDeltaNotSignificant = metric.Metadata{ + }) + metaLBLeaseTransferDeltaNotSignificant = metric.InitMetadata(metric.Metadata{ Name: "kv.allocator.load_based_lease_transfers.delta_not_significant", Help: "The number times the allocator determined that the delta between the existing" + " store and the best candidate was not significant", Measurement: "Attempts", Unit: metric.Unit_COUNT, - } - metaLBLeaseTransferMissingStatsForExistingStore = metric.Metadata{ + }) + metaLBLeaseTransferMissingStatsForExistingStore = metric.InitMetadata(metric.Metadata{ Name: "kv.allocator.load_based_lease_transfers.missing_stats_for_existing_stores", Help: "The number times the allocator was missing qps stats for the leaseholder", Measurement: "Attempts", Unit: metric.Unit_COUNT, - } - metaLBLeaseTransferShouldTransfer = metric.Metadata{ + }) + metaLBLeaseTransferShouldTransfer = metric.InitMetadata(metric.Metadata{ Name: "kv.allocator.load_based_lease_transfers.should_transfer", Help: "The number times the allocator determined that the lease should be" + " transferred to another replica for better load distribution", Measurement: "Attempts", Unit: metric.Unit_COUNT, - } - metaLBLeaseTransferFollowTheWorkload = metric.Metadata{ + }) + metaLBLeaseTransferFollowTheWorkload = metric.InitMetadata(metric.Metadata{ Name: "kv.allocator.load_based_lease_transfers.follow_the_workload", Help: "The number times the allocator determined that the lease should be" + " transferred to another replica for locality.", Measurement: "Attempts", Unit: metric.Unit_COUNT, - } + }) // Load-based replica rebalances. - metaLBReplicaRebalancingCannotFindBetterCandidate = metric.Metadata{ + metaLBReplicaRebalancingCannotFindBetterCandidate = metric.InitMetadata(metric.Metadata{ Name: "kv.allocator.load_based_replica_rebalancing.cannot_find_better_candidate", Help: "The number times the allocator determined that the range was on the best" + " possible stores", Measurement: "Attempts", Unit: metric.Unit_COUNT, - } - metaLBReplicaRebalancingExistingNotOverfull = metric.Metadata{ + }) + metaLBReplicaRebalancingExistingNotOverfull = metric.InitMetadata(metric.Metadata{ Name: "kv.allocator.load_based_replica_rebalancing.existing_not_overfull", Help: "The number times the allocator determined that none of the range's replicas" + " were on overfull stores", Measurement: "Attempts", Unit: metric.Unit_COUNT, - } - metaLBReplicaRebalancingDeltaNotSignificant = metric.Metadata{ + }) + metaLBReplicaRebalancingDeltaNotSignificant = metric.InitMetadata(metric.Metadata{ Name: "kv.allocator.load_based_replica_rebalancing.delta_not_significant", Help: "The number times the allocator determined that the delta between an" + " existing store and the best replacement candidate was not high enough", Measurement: "Attempts", Unit: metric.Unit_COUNT, - } - metaLBReplicaRebalancingMissingStatsForExistingStore = metric.Metadata{ + }) + metaLBReplicaRebalancingMissingStatsForExistingStore = metric.InitMetadata(metric.Metadata{ Name: "kv.allocator.load_based_replica_rebalancing.missing_stats_for_existing_store", Help: "The number times the allocator was missing the qps stats for the existing store", Measurement: "Attempts", Unit: metric.Unit_COUNT, - } - metaLBReplicaRebalancingShouldTransfer = metric.Metadata{ + }) + metaLBReplicaRebalancingShouldTransfer = metric.InitMetadata(metric.Metadata{ Name: "kv.allocator.load_based_replica_rebalancing.should_transfer", Help: "The number times the allocator determined that the replica should be" + " rebalanced to another store for better load distribution", Measurement: "Attempts", Unit: metric.Unit_COUNT, - } + }) ) type loadBasedLeaseTransferMetrics struct { diff --git a/pkg/kv/kvserver/allocator/mmaprototype/mma_metrics.go b/pkg/kv/kvserver/allocator/mmaprototype/mma_metrics.go index 9ed0088e89bd..fddb2bb02e64 100644 --- a/pkg/kv/kvserver/allocator/mmaprototype/mma_metrics.go +++ b/pkg/kv/kvserver/allocator/mmaprototype/mma_metrics.go @@ -96,7 +96,7 @@ func makeRangeOperationMetrics() *rangeOperationMetrics { } var ( - metaDroppedDueToStateInconsistency = metric.Metadata{ + metaDroppedDueToStateInconsistency = metric.InitMetadata(metric.Metadata{ Name: "mma.dropped", Help: "Number of pending replica or lease changes that MMA dropped because its internal " + "state became inconsistent with the actual cluster state. This can happen when an " + @@ -105,8 +105,8 @@ var ( "expected during normal operation when external changes occur.", Measurement: "Replica/Lease Change", Unit: metric.Unit_COUNT, - } - metaExternalRegisterSuccess = metric.Metadata{ + }) + metaExternalRegisterSuccess = metric.InitMetadata(metric.Metadata{ Name: "mma.external.registration.success", Help: "Number of external operations (replica/lease changes not initiated by MMA, e.g., " + "from admin commands or other allocators) that were successfully registered with " + @@ -114,8 +114,8 @@ var ( "synchronized with the cluster.", Measurement: "Replica/Lease Change", Unit: metric.Unit_COUNT, - } - metaExternalRegisterFailure = metric.Metadata{ + }) + metaExternalRegisterFailure = metric.InitMetadata(metric.Metadata{ Name: "mma.external.registration.failure", Help: "Number of external operations (replica/lease changes not initiated by MMA) that " + "failed to register with MMA due to pre-check failures (e.g., the change conflicts " + @@ -123,8 +123,8 @@ var ( "not track it until the next state synchronization.", Measurement: "Replica/Lease Change", Unit: metric.Unit_COUNT, - } - metaExternalReplicaChangeSuccess = metric.Metadata{ + }) + metaExternalReplicaChangeSuccess = metric.InitMetadata(metric.Metadata{ Name: "mma.change.external.replica.success", Help: "Number of external replica changes (adding, removing, or moving replicas not " + "initiated by MMA) that completed successfully. External changes come from other " + @@ -134,8 +134,8 @@ var ( LabeledName: "mma.change", StaticLabels: metric.MakeLabelPairs( metric.LabelOrigin, "external", metric.LabelType, "replica", metric.LabelResult, "success"), - } - metaExternalReplicaChangeFailure = metric.Metadata{ + }) + metaExternalReplicaChangeFailure = metric.InitMetadata(metric.Metadata{ Name: "mma.change.external.replica.failure", Help: "Number of external replica changes (adding, removing, or moving replicas not " + "initiated by MMA) that failed. External changes come from other components like " + @@ -146,8 +146,8 @@ var ( LabeledName: "mma.change", StaticLabels: metric.MakeLabelPairs( metric.LabelOrigin, "external", metric.LabelType, "replica", metric.LabelResult, "failure"), - } - metaExternalLeaseChangeSuccess = metric.Metadata{ + }) + metaExternalLeaseChangeSuccess = metric.InitMetadata(metric.Metadata{ Name: "mma.change.external.lease.success", Help: "Number of external lease transfers (moving the leaseholder to a different replica, " + "not initiated by MMA) that completed successfully. External changes come from other " + @@ -157,8 +157,8 @@ var ( LabeledName: "mma.change", StaticLabels: metric.MakeLabelPairs( metric.LabelOrigin, "external", metric.LabelType, "lease", metric.LabelResult, "success"), - } - metaExternalLeaseChangeFailure = metric.Metadata{ + }) + metaExternalLeaseChangeFailure = metric.InitMetadata(metric.Metadata{ Name: "mma.change.external.lease.failure", Help: "Number of external lease transfers (moving the leaseholder to a different replica, " + "not initiated by MMA) that failed. External changes come from other components like " + @@ -169,8 +169,8 @@ var ( LabeledName: "mma.change", StaticLabels: metric.MakeLabelPairs( metric.LabelOrigin, "external", metric.LabelType, "lease", metric.LabelResult, "failure"), - } - metaRebalanceReplicaChangeSuccess = metric.Metadata{ + }) + metaRebalanceReplicaChangeSuccess = metric.InitMetadata(metric.Metadata{ Name: "mma.change.rebalance.replica.success", Help: "Number of MMA-initiated replica changes (adding, removing, or moving replicas to " + "balance load) that completed successfully. MMA moves replicas away from overloaded " + @@ -180,8 +180,8 @@ var ( LabeledName: "mma.change", StaticLabels: metric.MakeLabelPairs( metric.LabelOrigin, "rebalance", metric.LabelType, "replica", metric.LabelResult, "success"), - } - metaRebalanceReplicaChangeFailure = metric.Metadata{ + }) + metaRebalanceReplicaChangeFailure = metric.InitMetadata(metric.Metadata{ Name: "mma.change.rebalance.replica.failure", Help: "Number of MMA-initiated replica changes (adding, removing, or moving replicas to " + "balance load) that failed. Failures may indicate constraint violations, unavailable " + @@ -191,8 +191,8 @@ var ( LabeledName: "mma.change", StaticLabels: metric.MakeLabelPairs( metric.LabelOrigin, "rebalance", metric.LabelType, "replica", metric.LabelResult, "failure"), - } - metaRebalanceLeaseChangeSuccess = metric.Metadata{ + }) + metaRebalanceLeaseChangeSuccess = metric.InitMetadata(metric.Metadata{ Name: "mma.change.rebalance.lease.success", Help: "Number of MMA-initiated lease transfers (moving the leaseholder to balance load) " + "that completed successfully. MMA transfers leases away from CPU-overloaded stores " + @@ -202,8 +202,8 @@ var ( LabeledName: "mma.change", StaticLabels: metric.MakeLabelPairs( metric.LabelOrigin, "rebalance", metric.LabelType, "lease", metric.LabelResult, "success"), - } - metaRebalanceLeaseChangeFailure = metric.Metadata{ + }) + metaRebalanceLeaseChangeFailure = metric.InitMetadata(metric.Metadata{ Name: "mma.change.rebalance.lease.failure", Help: "Number of MMA-initiated lease transfers (moving the leaseholder to balance load) " + "that failed. Failures may indicate the target replica is unavailable, doesn't meet " + @@ -213,8 +213,8 @@ var ( LabeledName: "mma.change", StaticLabels: metric.MakeLabelPairs( metric.LabelOrigin, "rebalance", metric.LabelType, "lease", metric.LabelResult, "failure"), - } - metaSpanConfigNormalizationError = metric.Metadata{ + }) + metaSpanConfigNormalizationError = metric.InitMetadata(metric.Metadata{ Name: "mma.span_config.normalization.error", Help: "Number of ranges where the local store is leaseholder and MMA encountered errors " + "while normalizing the span config (zone configuration). This includes both hard " + @@ -226,8 +226,8 @@ var ( LabeledName: "mma.span_config.normalization", StaticLabels: metric.MakeLabelPairs( metric.LabelResult, "error"), - } - metaSpanConfigNormalizationSoftError = metric.Metadata{ + }) + metaSpanConfigNormalizationSoftError = metric.InitMetadata(metric.Metadata{ Name: "mma.span_config.normalization.soft_error", Help: "Number of ranges where the local store is leaseholder and MMA's structural " + "normalization of the span config failed, but MMA produced a best-effort usable " + @@ -239,7 +239,7 @@ var ( LabeledName: "mma.span_config.normalization", StaticLabels: metric.MakeLabelPairs( metric.LabelResult, "soft_error"), - } + }) // Future: we will add additional origins for MMA-initiated operations that // are other than rebalance. Eventually, the external label value will go @@ -266,7 +266,7 @@ type loadAndCapacityMetrics struct { } var ( - metaStoreCPULoad = metric.Metadata{ + metaStoreCPULoad = metric.InitMetadata(metric.Metadata{ Name: "mma.store.cpu.load", Help: crstrings.UnwrapText(` CPU load that is attributed to the replicas on this store. This @@ -277,8 +277,8 @@ var ( `), Measurement: "Nanoseconds/Sec", Unit: metric.Unit_NANOSECONDS, - } - metaStoreCPUCapacity = metric.Metadata{ + }) + metaStoreCPUCapacity = metric.InitMetadata(metric.Metadata{ Name: "mma.store.cpu.capacity", Help: crstrings.UnwrapText(` Logical CPU capacity estimated by MMA by extrapolating from the @@ -288,20 +288,20 @@ var ( `), Measurement: "Nanoseconds/Sec", Unit: metric.Unit_NANOSECONDS, - } - metaStoreCPUUtilization = metric.Metadata{ + }) + metaStoreCPUUtilization = metric.InitMetadata(metric.Metadata{ Name: "mma.store.cpu.utilization", Help: "Ratio of logical CPU load to capacity expressed as a percentage", Measurement: "CPU Utilization", Unit: metric.Unit_PERCENT, - } - metaStoreWriteBandwidth = metric.Metadata{ + }) + metaStoreWriteBandwidth = metric.InitMetadata(metric.Metadata{ Name: "mma.store.write.bandwidth", Help: "Disk write bandwidth as observed by MMA corresponding to the store", Measurement: "Bytes/Sec", Unit: metric.Unit_BYTES, - } - metaStoreDiskLoad = metric.Metadata{ + }) + metaStoreDiskLoad = metric.InitMetadata(metric.Metadata{ Name: "mma.store.disk.logical", Help: crstrings.UnwrapText(` Logical bytes consumed by the replicas on this store as reported by @@ -310,8 +310,8 @@ var ( `), Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - metaStoreDiskCapacity = metric.Metadata{ + }) + metaStoreDiskCapacity = metric.InitMetadata(metric.Metadata{ Name: "mma.store.disk.capacity", Help: crstrings.UnwrapText(` Logical disk capacity estimated by MMA by extrapolating from the @@ -320,13 +320,13 @@ var ( `), Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - metaStoreDiskUtilization = metric.Metadata{ + }) + metaStoreDiskUtilization = metric.InitMetadata(metric.Metadata{ Name: "mma.store.disk.utilization", Help: "Ratio of logical disk usage to capacity expressed as a percentage", Measurement: "Disk Utilization", Unit: metric.Unit_PERCENT, - } + }) ) func makeLoadAndCapacityMetrics() *loadAndCapacityMetrics { @@ -494,7 +494,7 @@ type reasonCount struct { } var ( - metaOverloadedStoreLeaseGraceSuccess = metric.Metadata{ + metaOverloadedStoreLeaseGraceSuccess = metric.InitMetadata(metric.Metadata{ Name: "mma.overloaded_store.lease_grace.success", Help: "Number of overloaded stores in the lease shedding grace period (first 2 min of " + "overload) where at least one lease or replica was successfully moved away during " + @@ -506,8 +506,8 @@ var ( LabeledName: "mma.overloaded_store", StaticLabels: metric.MakeLabelPairs( metric.LabelType, "lease_grace", metric.LabelResult, "success"), - } - metaOverloadedStoreLeaseGraceFailure = metric.Metadata{ + }) + metaOverloadedStoreLeaseGraceFailure = metric.InitMetadata(metric.Metadata{ Name: "mma.overloaded_store.lease_grace.failure", Help: "Number of overloaded stores in the lease shedding grace period (first 2 min of " + "overload) where the local store failed to shed any leases or replicas during " + @@ -519,8 +519,8 @@ var ( LabeledName: "mma.overloaded_store", StaticLabels: metric.MakeLabelPairs( metric.LabelType, "lease_grace", metric.LabelResult, "failure"), - } - metaOverloadedStoreShortDurSuccess = metric.Metadata{ + }) + metaOverloadedStoreShortDurSuccess = metric.InitMetadata(metric.Metadata{ Name: "mma.overloaded_store.short_dur.success", Help: "Number of stores overloaded for a short duration (2-5 min) where at least one " + "lease or replica was successfully moved away during this store's MMA rebalancing " + @@ -531,8 +531,8 @@ var ( LabeledName: "mma.overloaded_store", StaticLabels: metric.MakeLabelPairs( metric.LabelType, "short_dur", metric.LabelResult, "success"), - } - metaOverloadedStoreShortDurFailure = metric.Metadata{ + }) + metaOverloadedStoreShortDurFailure = metric.InitMetadata(metric.Metadata{ Name: "mma.overloaded_store.short_dur.failure", Help: "Number of stores overloaded for a short duration (2-5 min) where all shedding " + "attempts failed during this store's MMA rebalancing pass. Failures occur when no " + @@ -543,8 +543,8 @@ var ( LabeledName: "mma.overloaded_store", StaticLabels: metric.MakeLabelPairs( metric.LabelType, "short_dur", metric.LabelResult, "failure"), - } - metaOverloadedStoreMediumDurSuccess = metric.Metadata{ + }) + metaOverloadedStoreMediumDurSuccess = metric.InitMetadata(metric.Metadata{ Name: "mma.overloaded_store.medium_dur.success", Help: "Number of stores overloaded for a medium duration (5-8 min) where at least one " + "lease or replica was successfully moved away during this store's MMA rebalancing " + @@ -555,8 +555,8 @@ var ( LabeledName: "mma.overloaded_store", StaticLabels: metric.MakeLabelPairs( metric.LabelType, "medium_dur", metric.LabelResult, "success"), - } - metaOverloadedStoreMediumDurFailure = metric.Metadata{ + }) + metaOverloadedStoreMediumDurFailure = metric.InitMetadata(metric.Metadata{ Name: "mma.overloaded_store.medium_dur.failure", Help: "Number of stores overloaded for a medium duration (5-8 min) where all shedding " + "attempts failed during this store's MMA rebalancing pass. Despite considering " + @@ -567,8 +567,8 @@ var ( LabeledName: "mma.overloaded_store", StaticLabels: metric.MakeLabelPairs( metric.LabelType, "medium_dur", metric.LabelResult, "failure"), - } - metaOverloadedStoreLongDurSuccess = metric.Metadata{ + }) + metaOverloadedStoreLongDurSuccess = metric.InitMetadata(metric.Metadata{ Name: "mma.overloaded_store.long_dur.success", Help: "Number of stores overloaded for a long duration (8+ min) where at least one " + "lease or replica was successfully moved away during this store's MMA rebalancing " + @@ -579,8 +579,8 @@ var ( LabeledName: "mma.overloaded_store", StaticLabels: metric.MakeLabelPairs( metric.LabelType, "long_dur", metric.LabelResult, "success"), - } - metaOverloadedStoreLongDurFailure = metric.Metadata{ + }) + metaOverloadedStoreLongDurFailure = metric.InitMetadata(metric.Metadata{ Name: "mma.overloaded_store.long_dur.failure", Help: "Number of stores overloaded for a long duration (8+ min) where all shedding " + "attempts failed during this store's MMA rebalancing pass. Even with the most " + @@ -591,7 +591,7 @@ var ( LabeledName: "mma.overloaded_store", StaticLabels: metric.MakeLabelPairs( metric.LabelType, "long_dur", metric.LabelResult, "failure"), - } + }) ) type gaugeMetrics struct { diff --git a/pkg/kv/kvserver/asim/state/split_decider.go b/pkg/kv/kvserver/asim/state/split_decider.go index 92aad43bfe47..ad40a16220d7 100644 --- a/pkg/kv/kvserver/asim/state/split_decider.go +++ b/pkg/kv/kvserver/asim/state/split_decider.go @@ -86,9 +86,9 @@ func NewSplitDecider(settings *config.SimulationSettings) *SplitDecider { func (s *SplitDecider) newDecider() *split.Decider { decider := &split.Decider{} split.Init(decider, s.splitConfig, &split.LoadSplitterMetrics{ - PopularKeyCount: metric.NewCounter(metric.Metadata{}), - NoSplitKeyCount: metric.NewCounter(metric.Metadata{}), - ClearDirectionCount: metric.NewCounter(metric.Metadata{}), + PopularKeyCount: metric.NewCounter(metric.InitMetadata(metric.Metadata{})), + NoSplitKeyCount: metric.NewCounter(metric.InitMetadata(metric.Metadata{})), + ClearDirectionCount: metric.NewCounter(metric.InitMetadata(metric.Metadata{})), }, split.SplitQPS) return decider } diff --git a/pkg/kv/kvserver/concurrency/metrics.go b/pkg/kv/kvserver/concurrency/metrics.go index ced76a076ee9..f21384b2b193 100644 --- a/pkg/kv/kvserver/concurrency/metrics.go +++ b/pkg/kv/kvserver/concurrency/metrics.go @@ -129,72 +129,72 @@ func addToTopK(topK []LockMetrics, lm LockMetrics, cmp func(LockMetrics) int64) } } -var MetaConcurrencyLocksShedDueToMemoryLimit = metric.Metadata{ +var MetaConcurrencyLocksShedDueToMemoryLimit = metric.InitMetadata(metric.Metadata{ Name: "kv.concurrency.locks_shed_due_to_memory_limit", Help: "The number of locks that were shed because the lock table ran into memory limits", Measurement: "Locks", Unit: metric.Unit_COUNT, -} +}) -var MetaConcurrencyNumLockShedDueToMemoryLimitEvents = metric.Metadata{ +var MetaConcurrencyNumLockShedDueToMemoryLimitEvents = metric.InitMetadata(metric.Metadata{ Name: "kv.concurrency.num_lock_shed_due_to_memory_limit_events", Help: "The number of times locks that were shed by the lock table because it ran into memory limits", Measurement: "Lock Shed Events", Unit: metric.Unit_COUNT, -} +}) // MetaVirtualResolveCondense counts the number of times point intent // resolutions were condensed into range resolutions during VIR scanning. -var MetaVirtualResolveCondense = metric.Metadata{ +var MetaVirtualResolveCondense = metric.InitMetadata(metric.Metadata{ Name: "kv.concurrency.virtual_resolve.condense", Help: "Number of times point intent resolutions were condensed into range resolutions during virtual intent resolution", Measurement: "Condense Events", Unit: metric.Unit_COUNT, -} +}) // MetaVirtualResolveDisabled counts the number of times VIR was disabled for // a request because too many distinct transactions accumulated range resolves. -var MetaVirtualResolveDisabled = metric.Metadata{ +var MetaVirtualResolveDisabled = metric.InitMetadata(metric.Metadata{ Name: "kv.concurrency.virtual_resolve.disabled", Help: "Number of times virtual intent resolution was disabled for a request due to excessive range resolve accumulation", Measurement: "Disable Events", Unit: metric.Unit_COUNT, -} +}) // MetaVirtualResolveIntent counts point intents resolved virtually. -var MetaVirtualResolveIntent = metric.Metadata{ +var MetaVirtualResolveIntent = metric.InitMetadata(metric.Metadata{ Name: "kv.concurrency.virtual_resolve.intent", Help: "Number of point intents resolved virtually during read evaluation", Measurement: "Intents", Unit: metric.Unit_COUNT, -} +}) // MetaVirtualResolveIntentRange counts range intent resolutions resolved // virtually. -var MetaVirtualResolveIntentRange = metric.Metadata{ +var MetaVirtualResolveIntentRange = metric.InitMetadata(metric.Metadata{ Name: "kv.concurrency.virtual_resolve.intent_range", Help: "Number of range intent resolutions resolved virtually during read evaluation", Measurement: "Intent Ranges", Unit: metric.Unit_COUNT, -} +}) // MetaVirtualResolveBatches counts read batches that attempted virtual intent // resolution. -var MetaVirtualResolveBatches = metric.Metadata{ +var MetaVirtualResolveBatches = metric.InitMetadata(metric.Metadata{ Name: "kv.concurrency.virtual_resolve.batches", Help: "Number of read batches that attempted virtual intent resolution", Measurement: "Batches", Unit: metric.Unit_COUNT, -} +}) // MetaVirtualResolveBatchErrors counts read batches where virtual intent // resolution failed. -var MetaVirtualResolveBatchErrors = metric.Metadata{ +var MetaVirtualResolveBatchErrors = metric.InitMetadata(metric.Metadata{ Name: "kv.concurrency.virtual_resolve.batch_errors", Help: "Number of read batches where virtual intent resolution failed during evaluation", Measurement: "Batches", Unit: metric.Unit_COUNT, -} +}) // TestingLockTableMetricsCfg is a subset of store metrics that are required to // construct a new lock table to be used for testing purposes. diff --git a/pkg/kv/kvserver/intentresolver/metrics.go b/pkg/kv/kvserver/intentresolver/metrics.go index eb0aaf655fd5..5ff23abe217f 100644 --- a/pkg/kv/kvserver/intentresolver/metrics.go +++ b/pkg/kv/kvserver/intentresolver/metrics.go @@ -9,28 +9,28 @@ import "github.com/cockroachdb/cockroach/pkg/util/metric" var ( // Intent resolver metrics. - metaIntentResolverAsyncThrottled = metric.Metadata{ + metaIntentResolverAsyncThrottled = metric.InitMetadata(metric.Metadata{ Name: "intentresolver.async.throttled", Help: "Number of intent resolution attempts not run asynchronously due to throttling", Measurement: "Intent Resolutions", Unit: metric.Unit_COUNT, - } - metaFinalizedTxnCleanupFailed = metric.Metadata{ + }) + metaFinalizedTxnCleanupFailed = metric.InitMetadata(metric.Metadata{ Name: "intentresolver.finalized_txns.failed", Help: "Number of finalized transaction cleanup failures. Transaction " + "cleanup refers to the process of resolving all of a transactions intents " + "and then garbage collecting its transaction record.", Measurement: "Intent Resolutions", Unit: metric.Unit_COUNT, - } - metaIntentCleanupFailed = metric.Metadata{ + }) + metaIntentCleanupFailed = metric.InitMetadata(metric.Metadata{ Name: "intentresolver.intents.failed", Help: "Number of intent resolution failures. The unit of measurement " + "is a single intent, so if a batch of intent resolution requests fails, " + "the metric will be incremented for each request in the batch.", Measurement: "Intent Resolutions", Unit: metric.Unit_COUNT, - } + }) ) // Metrics contains the metrics for the IntentResolver. diff --git a/pkg/kv/kvserver/kvflowcontrol/rac2/metrics.go b/pkg/kv/kvserver/kvflowcontrol/rac2/metrics.go index 7351ec9ad222..b1c20819caf5 100644 --- a/pkg/kv/kvserver/kvflowcontrol/rac2/metrics.go +++ b/pkg/kv/kvserver/kvflowcontrol/rac2/metrics.go @@ -20,92 +20,92 @@ const regular, elastic = admissionpb.RegularWorkClass, admissionpb.ElasticWorkCl var ( // TokenCounter metrics. - flowTokensAvailable = metric.Metadata{ + flowTokensAvailable = metric.InitMetadata(metric.Metadata{ Name: "kvflowcontrol.tokens.%s.%s.available", Help: "Flow %s tokens available for %s requests, across all replication streams", Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - flowTokensDeducted = metric.Metadata{ + }) + flowTokensDeducted = metric.InitMetadata(metric.Metadata{ Name: "kvflowcontrol.tokens.%s.%s.deducted", Help: "Flow %s tokens deducted by %s requests, across all replication streams", Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - flowTokensReturned = metric.Metadata{ + }) + flowTokensReturned = metric.InitMetadata(metric.Metadata{ Name: "kvflowcontrol.tokens.%s.%s.returned", Help: "Flow %s tokens returned by %s requests, across all replication streams", Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - flowTokensUnaccounted = metric.Metadata{ + }) + flowTokensUnaccounted = metric.InitMetadata(metric.Metadata{ Name: "kvflowcontrol.tokens.%s.%s.unaccounted", Help: "Flow %s tokens returned by %s requests that were unaccounted for, across all replication streams", Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - flowTokensDisconnectReturn = metric.Metadata{ + }) + flowTokensDisconnectReturn = metric.InitMetadata(metric.Metadata{ Name: "kvflowcontrol.tokens.%s.%s.returned.disconnect", Help: "Flow %s tokens returned early by %s due disconnects, across all replication stream, this is a subset of returned tokens", Measurement: "Bytes", Unit: metric.Unit_BYTES, - } + }) // SendQueue TokenCounter metrics. - flowTokensSendQueuePreventionDeduct = metric.Metadata{ + flowTokensSendQueuePreventionDeduct = metric.InitMetadata(metric.Metadata{ Name: "kvflowcontrol.tokens.send.%s.deducted.prevent_send_queue", Help: "Flow send tokens deducted by %s requests, across all replication streams to prevent forming a send queue", Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - flowTokensSendQueueForceFlushDeduct = metric.Metadata{ + }) + flowTokensSendQueueForceFlushDeduct = metric.InitMetadata(metric.Metadata{ Name: "kvflowcontrol.tokens.send.elastic.deducted.force_flush_send_queue", Help: "Flow send tokens deducted by elastic requests, across all replication streams due to force flushing the stream's send queue", Measurement: "Bytes", Unit: metric.Unit_BYTES, - } + }) // TokenStream metrics. - totalStreamCount = metric.Metadata{ + totalStreamCount = metric.InitMetadata(metric.Metadata{ Name: "kvflowcontrol.streams.%s.%s.total_count", Help: "Total number of %s replication streams for %s requests", Measurement: "Count", Unit: metric.Unit_COUNT, - } - blockedStreamCount = metric.Metadata{ + }) + blockedStreamCount = metric.InitMetadata(metric.Metadata{ Name: "kvflowcontrol.streams.%s.%s.blocked_count", Help: "Number of %s replication streams with no flow tokens available for %s requests", Measurement: "Count", Unit: metric.Unit_COUNT, - } + }) // WaitForEval metrics. - requestsWaiting = metric.Metadata{ + requestsWaiting = metric.InitMetadata(metric.Metadata{ Name: "kvflowcontrol.eval_wait.%s.requests.waiting", Help: "Number of %s requests waiting for flow tokens", Measurement: "Requests", Unit: metric.Unit_COUNT, - } - requestsAdmitted = metric.Metadata{ + }) + requestsAdmitted = metric.InitMetadata(metric.Metadata{ Name: "kvflowcontrol.eval_wait.%s.requests.admitted", Help: "Number of %s requests admitted by the flow controller", Measurement: "Requests", Unit: metric.Unit_COUNT, - } - requestsErrored = metric.Metadata{ + }) + requestsErrored = metric.InitMetadata(metric.Metadata{ Name: "kvflowcontrol.eval_wait.%s.requests.errored", Help: "Number of %s requests that errored out while waiting for flow tokens", Measurement: "Requests", Unit: metric.Unit_COUNT, - } - requestsBypassed = metric.Metadata{ + }) + requestsBypassed = metric.InitMetadata(metric.Metadata{ Name: "kvflowcontrol.eval_wait.%s.requests.bypassed", Help: "Number of waiting %s requests that bypassed the flow " + "controller due the evaluating replica not being the leader", Measurement: "Requests", Unit: metric.Unit_COUNT, - } - waitDuration = metric.Metadata{ + }) + waitDuration = metric.InitMetadata(metric.Metadata{ Name: "kvflowcontrol.eval_wait.%s.duration", Help: "Latency histogram for time %s requests spent waiting for flow tokens to evaluate", Measurement: "Nanoseconds", @@ -113,18 +113,18 @@ var ( Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_OVERLOAD, HowToUse: "This metric shows how long requests are waiting for flow tokens before evaluation. Extended wait times may indicate flow control token exhaustion or replication lag.", - } + }) // RangeController metrics. - rangeFlowControllerCount = metric.Metadata{ + rangeFlowControllerCount = metric.InitMetadata(metric.Metadata{ Name: "kvflowcontrol.range_controller.count", Help: "Gauge of range flow controllers currently open, this should align with the number of leaders", Measurement: "Count", Unit: metric.Unit_COUNT, - } + }) // SendQueue metrics. - sendQueueBytes = metric.Metadata{ + sendQueueBytes = metric.InitMetadata(metric.Metadata{ Name: "kvflowcontrol.send_queue.bytes", Help: "Byte size of all raft entries queued for sending to followers, waiting on available elastic send tokens", Measurement: "Bytes", @@ -132,31 +132,31 @@ var ( Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_OVERLOAD, HowToUse: "This metric indicates the size of queued raft entries waiting for elastic send tokens. Large or growing queue sizes may indicate replication backlog or follower lag.", - } - sendQueueCount = metric.Metadata{ + }) + sendQueueCount = metric.InitMetadata(metric.Metadata{ Name: "kvflowcontrol.send_queue.count", Help: "Count of all raft entries queued for sending to followers, waiting on available elastic send tokens", Measurement: "Bytes", Unit: metric.Unit_COUNT, - } - sendQueueForceFlushScheduledCount = metric.Metadata{ + }) + sendQueueForceFlushScheduledCount = metric.InitMetadata(metric.Metadata{ Name: "kvflowcontrol.send_queue.scheduled.force_flush", Help: "Gauge of replication streams scheduled to force flush their send queue", Measurement: "Scheduled force flushes", Unit: metric.Unit_COUNT, - } - sendQueueDeductedForSchedulerBytes = metric.Metadata{ + }) + sendQueueDeductedForSchedulerBytes = metric.InitMetadata(metric.Metadata{ Name: "kvflowcontrol.send_queue.scheduled.deducted_bytes", Help: "Gauge of elastic send token bytes already deducted by replication streams waiting on the scheduler", Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - sendQueuePreventionCount = metric.Metadata{ + }) + sendQueuePreventionCount = metric.InitMetadata(metric.Metadata{ Name: "kvflowcontrol.send_queue.prevent.count", Help: "Counter of replication streams that were prevented from forming a send queue", Measurement: "Preventions", Unit: metric.Unit_COUNT, - } + }) ) // annotateMetricTemplateWithWorkClass uses the given metric template to build diff --git a/pkg/kv/kvserver/liveness/liveness.go b/pkg/kv/kvserver/liveness/liveness.go index 4813748969e8..ff2571396568 100644 --- a/pkg/kv/kvserver/liveness/liveness.go +++ b/pkg/kv/kvserver/liveness/liveness.go @@ -166,7 +166,7 @@ func isErrRetryLiveness(ctx context.Context, err error) bool { // Node liveness metrics counter names. var ( - metaLiveNodes = metric.Metadata{ + metaLiveNodes = metric.InitMetadata(metric.Metadata{ Name: "liveness.livenodes", Help: "Number of live nodes in the cluster (will be 0 if this node is not itself live)", Measurement: "Nodes", @@ -174,33 +174,33 @@ var ( Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_REPLICATION, HowToUse: "This is a critical metric that tracks the live nodes in the cluster.", - } - metaHeartbeatsInFlight = metric.Metadata{ + }) + metaHeartbeatsInFlight = metric.InitMetadata(metric.Metadata{ Name: "liveness.heartbeatsinflight", Help: "Number of in-flight liveness heartbeats from this node", Measurement: "Requests", Unit: metric.Unit_COUNT, - } - metaHeartbeatSuccesses = metric.Metadata{ + }) + metaHeartbeatSuccesses = metric.InitMetadata(metric.Metadata{ Name: "liveness.heartbeatsuccesses", Help: "Number of successful node liveness heartbeats from this node", Measurement: "Messages", Unit: metric.Unit_COUNT, - } - metaHeartbeatFailures = metric.Metadata{ + }) + metaHeartbeatFailures = metric.InitMetadata(metric.Metadata{ Name: "liveness.heartbeatfailures", Help: "Number of failed node liveness heartbeats from this node", Measurement: "Messages", Unit: metric.Unit_COUNT, Visibility: metric.Metadata_SUPPORT, - } - metaEpochIncrements = metric.Metadata{ + }) + metaEpochIncrements = metric.InitMetadata(metric.Metadata{ Name: "liveness.epochincrements", Help: "Number of times this node has incremented its liveness epoch", Measurement: "Epochs", Unit: metric.Unit_COUNT, - } - metaHeartbeatLatency = metric.Metadata{ + }) + metaHeartbeatLatency = metric.InitMetadata(metric.Metadata{ Name: "liveness.heartbeatlatency", Help: "Node liveness heartbeat latency", Measurement: "Latency", @@ -208,7 +208,7 @@ var ( Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_REPLICATION, HowToUse: "If this metric exceeds 1 second, it is a sign of cluster instability.", - } + }) ) // Metrics holds metrics for use with node liveness activity. diff --git a/pkg/kv/kvserver/logstore/bytes_tracker.go b/pkg/kv/kvserver/logstore/bytes_tracker.go index f76e5746177e..182e4a73e7c5 100644 --- a/pkg/kv/kvserver/logstore/bytes_tracker.go +++ b/pkg/kv/kvserver/logstore/bytes_tracker.go @@ -21,12 +21,12 @@ var raftEntriesMemoryLimit = envutil.EnvOrDefaultBytes( // NewRaftEntriesSoftLimit returns the SoftLimit configured with the default // memory limit. func NewRaftEntriesSoftLimit() *SoftLimit { - reservedBytesMetric := metric.NewGauge(metric.Metadata{ + reservedBytesMetric := metric.NewGauge(metric.InitMetadata(metric.Metadata{ Name: "raft.loaded_entries.reserved.bytes", Help: "Bytes allocated by raft Storage.Entries calls that are still kept in memory", Measurement: "Memory", Unit: metric.Unit_BYTES, - }) + })) return &SoftLimit{Metric: reservedBytesMetric, Limit: raftEntriesMemoryLimit} } diff --git a/pkg/kv/kvserver/logstore/bytes_tracker_test.go b/pkg/kv/kvserver/logstore/bytes_tracker_test.go index 50305f74626f..9c1b1faf7171 100644 --- a/pkg/kv/kvserver/logstore/bytes_tracker_test.go +++ b/pkg/kv/kvserver/logstore/bytes_tracker_test.go @@ -13,7 +13,7 @@ import ( ) func TestSoftLimiter(t *testing.T) { - lim := SoftLimit{Metric: metric.NewGauge(metric.Metadata{}), Limit: 1000} + lim := SoftLimit{Metric: metric.NewGauge(metric.InitMetadata(metric.Metadata{})), Limit: 1000} require.Zero(t, lim.Metric.Value()) lim.acquire(100) require.Equal(t, int64(100), lim.Metric.Value()) @@ -42,7 +42,7 @@ func TestSoftLimiter(t *testing.T) { } func TestBytesAccount(t *testing.T) { - lim := SoftLimit{Metric: metric.NewGauge(metric.Metadata{}), Limit: 1 << 20} + lim := SoftLimit{Metric: metric.NewGauge(metric.InitMetadata(metric.Metadata{})), Limit: 1 << 20} a1, a2 := lim.NewAccount(nil), lim.NewAccount(nil) require.True(t, a1.Grow(256<<10)) @@ -70,7 +70,7 @@ func TestBytesAccount(t *testing.T) { } func TestSizeHelper(t *testing.T) { - lim := SoftLimit{Metric: metric.NewGauge(metric.Metadata{}), Limit: 1 << 20} + lim := SoftLimit{Metric: metric.NewGauge(metric.InitMetadata(metric.Metadata{})), Limit: 1 << 20} for _, tt := range []struct { max uint64 sizes []uint64 diff --git a/pkg/kv/kvserver/metrics.go b/pkg/kv/kvserver/metrics.go index 4ca730036f99..17df3050d159 100644 --- a/pkg/kv/kvserver/metrics.go +++ b/pkg/kv/kvserver/metrics.go @@ -46,7 +46,7 @@ func init() { var ( // Replica metrics. - metaReplicaCount = metric.Metadata{ + metaReplicaCount = metric.InitMetadata(metric.Metadata{ Name: "replicas", Help: "Number of replicas", Measurement: "Replicas", @@ -54,38 +54,38 @@ var ( Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_REPLICATION, HowToUse: `This metric provides an essential characterization of the data distribution across cluster nodes.`, - } - metaReservedReplicaCount = metric.Metadata{ + }) + metaReservedReplicaCount = metric.InitMetadata(metric.Metadata{ Name: "replicas.reserved", Help: "Number of replicas reserved for snapshots", Measurement: "Replicas", Unit: metric.Unit_COUNT, - } - metaRaftLeaderCount = metric.Metadata{ + }) + metaRaftLeaderCount = metric.InitMetadata(metric.Metadata{ Name: "replicas.leaders", Help: "Number of raft leaders", Measurement: "Raft Leaders", Unit: metric.Unit_COUNT, - } - metaRaftLeaderNotLeaseHolderCount = metric.Metadata{ + }) + metaRaftLeaderNotLeaseHolderCount = metric.InitMetadata(metric.Metadata{ Name: "replicas.leaders_not_leaseholders", Help: "Number of replicas that are Raft leaders whose range lease is held by another store", Measurement: "Replicas", Unit: metric.Unit_COUNT, - } - metaRaftLeaderNotFortifiedCount = metric.Metadata{ + }) + metaRaftLeaderNotFortifiedCount = metric.InitMetadata(metric.Metadata{ Name: "replicas.leaders_not_fortified", Help: "Number of replicas that are not fortified Raft leaders", Measurement: "Replicas", Unit: metric.Unit_COUNT, - } - metaRaftLeaderInvalidLeaseCount = metric.Metadata{ + }) + metaRaftLeaderInvalidLeaseCount = metric.InitMetadata(metric.Metadata{ Name: "replicas.leaders_invalid_lease", Help: "Number of replicas that are Raft leaders whose lease is invalid", Measurement: "Replicas", Unit: metric.Unit_COUNT, - } - metaLeaseHolderCount = metric.Metadata{ + }) + metaLeaseHolderCount = metric.InitMetadata(metric.Metadata{ Name: "replicas.leaseholders", Help: "Number of lease holders", Measurement: "Replicas", @@ -93,47 +93,47 @@ var ( Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_REPLICATION, HowToUse: `This metric provides an essential characterization of the data processing points across cluster nodes.`, - } - metaQuiescentCount = metric.Metadata{ + }) + metaQuiescentCount = metric.InitMetadata(metric.Metadata{ Name: "replicas.quiescent", Help: "Number of quiesced replicas", Measurement: "Replicas", Unit: metric.Unit_COUNT, - } - metaAsleepCount = metric.Metadata{ + }) + metaAsleepCount = metric.InitMetadata(metric.Metadata{ Name: "replicas.asleep", Help: "Number of asleep replicas. Similarly to quiesced replicas, asleep replicas do not tick in Raft.", Measurement: "Replicas", Unit: metric.Unit_COUNT, - } - metaUninitializedCount = metric.Metadata{ + }) + metaUninitializedCount = metric.InitMetadata(metric.Metadata{ Name: "replicas.uninitialized", Help: "Number of uninitialized replicas, this does not include uninitialized replicas that can lie dormant in a persistent state.", Measurement: "Replicas", Unit: metric.Unit_COUNT, - } + }) - metaRaftFlowsReplicate = metric.Metadata{ + metaRaftFlowsReplicate = metric.InitMetadata(metric.Metadata{ Name: "raft.flows.state_replicate", Help: "Number of leader->peer flows in StateReplicate", Measurement: "Flows", Unit: metric.Unit_COUNT, - } - metaRaftFlowsProbe = metric.Metadata{ + }) + metaRaftFlowsProbe = metric.InitMetadata(metric.Metadata{ Name: "raft.flows.state_probe", Help: "Number of leader->peer flows in StateProbe", Measurement: "Flows", Unit: metric.Unit_COUNT, - } - metaRaftFlowsSnapshot = metric.Metadata{ + }) + metaRaftFlowsSnapshot = metric.InitMetadata(metric.Metadata{ Name: "raft.flows.state_snapshot", Help: "Number of leader->peer flows in StateSnapshot", Measurement: "Flows", Unit: metric.Unit_COUNT, - } + }) // Range metrics. - metaRangeCount = metric.Metadata{ + metaRangeCount = metric.InitMetadata(metric.Metadata{ Name: "ranges", Help: "Number of ranges", Measurement: "Ranges", @@ -141,8 +141,8 @@ var ( Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_REPLICATION, HowToUse: `This metric provides a measure of the scale of the data size.`, - } - metaUnavailableRangeCount = metric.Metadata{ + }) + metaUnavailableRangeCount = metric.InitMetadata(metric.Metadata{ Name: "ranges.unavailable", Help: "Number of ranges with fewer live replicas than needed for quorum", Measurement: "Ranges", @@ -154,8 +154,8 @@ var ( cluster is unhealthy and can impact workload. If an entire range is unavailable, then it will be unable to process queries. `), - } - metaUnderReplicatedRangeCount = metric.Metadata{ + }) + metaUnderReplicatedRangeCount = metric.InitMetadata(metric.Metadata{ Name: "ranges.underreplicated", Help: "Number of ranges with fewer live replicas than the replication target", Measurement: "Ranges", @@ -163,23 +163,23 @@ var ( Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_REPLICATION, HowToUse: `This metric is an indicator of replication issues. It shows whether the cluster has data that is not conforming to resilience goals. The next step is to determine the corresponding database object, such as the table or index, of these under-replicated ranges and whether the under-replication is temporarily expected. Use the statement SELECT table_name, index_name FROM [SHOW RANGES WITH INDEXES] WHERE range_id = {id of under-replicated range};`, - } - metaOverReplicatedRangeCount = metric.Metadata{ + }) + metaOverReplicatedRangeCount = metric.InitMetadata(metric.Metadata{ Name: "ranges.overreplicated", Help: "Number of ranges with more live replicas than the replication target", Measurement: "Ranges", Unit: metric.Unit_COUNT, - } - metaDecommissioningRangeCount = metric.Metadata{ + }) + metaDecommissioningRangeCount = metric.InitMetadata(metric.Metadata{ Name: "ranges.decommissioning", Help: "Number of ranges with at lease one replica on a decommissioning node", Measurement: "Ranges", Unit: metric.Unit_COUNT, Visibility: metric.Metadata_SUPPORT, - } + }) // Decommisioning nudger metrics. - metaDecommissioningNudgerEnqueue = metric.Metadata{ + metaDecommissioningNudgerEnqueue = metric.InitMetadata(metric.Metadata{ Name: "ranges.decommissioning.nudger.enqueue", Help: crstrings.UnwrapText(` Number of enqueued enqueues of a range for decommissioning by the @@ -191,22 +191,22 @@ var ( Unit: metric.Unit_COUNT, LabeledName: "ranges.decommissioning.nudger.enqueue", StaticLabels: metric.MakeLabelPairs(metric.LabelStatus, "enqueue"), - } - metaDecommissioningNudgerEnqueueSuccess = metric.Metadata{ + }) + metaDecommissioningNudgerEnqueueSuccess = metric.InitMetadata(metric.Metadata{ Name: "ranges.decommissioning.nudger.enqueue.success", Help: "Number of ranges that were successfully enqueued by the decommisioning nudger", Measurement: "Ranges", Unit: metric.Unit_COUNT, LabeledName: "ranges.decommissioning.nudger.enqueue.success", - } - metaDecommissioningNudgerEnqueueFailure = metric.Metadata{ + }) + metaDecommissioningNudgerEnqueueFailure = metric.InitMetadata(metric.Metadata{ Name: "ranges.decommissioning.nudger.enqueue.failure", Help: "Number of ranges that failed to enqueue at the replicate queue", Measurement: "Ranges", Unit: metric.Unit_COUNT, LabeledName: "ranges.decommissioning.nudger.enqueue.failure", - } - metaDecommissioningNudgerProcessSuccess = metric.Metadata{ + }) + metaDecommissioningNudgerProcessSuccess = metric.InitMetadata(metric.Metadata{ Name: "ranges.decommissioning.nudger.process.success", Help: crstrings.UnwrapText(` Number of ranges enqueued by the decommissioning nudger that were @@ -215,8 +215,8 @@ var ( Measurement: "Ranges", Unit: metric.Unit_COUNT, LabeledName: "ranges.decommissioning.nudger.process.success", - } - metaDecommissioningNudgerProcessFailure = metric.Metadata{ + }) + metaDecommissioningNudgerProcessFailure = metric.InitMetadata(metric.Metadata{ Name: "ranges.decommissioning.nudger.process.failure", Help: crstrings.UnwrapText(` Number of ranges enqueued by the decommissioning nudger that failed to @@ -225,8 +225,8 @@ var ( Measurement: "Ranges", Unit: metric.Unit_COUNT, LabeledName: "ranges.decommissioning.nudger.process.failure", - } - metaDecommissioningNudgerNotLeaseholderOrInvalidLease = metric.Metadata{ + }) + metaDecommissioningNudgerNotLeaseholderOrInvalidLease = metric.InitMetadata(metric.Metadata{ Name: "ranges.decommissioning.nudger.not_leaseholder_or_invalid_lease", Help: crstrings.UnwrapText(` Number of ranges that were not the leaseholder or had an invalid lease at @@ -235,28 +235,28 @@ var ( Measurement: "Ranges", Unit: metric.Unit_COUNT, LabeledName: "ranges.decommissioning.nudger.not_leaseholder_or_invalid_lease", - } + }) // Lease request metrics. - metaLeaseRequestSuccessCount = metric.Metadata{ + metaLeaseRequestSuccessCount = metric.InitMetadata(metric.Metadata{ Name: "leases.success", Help: "Number of successful lease requests", Measurement: "Lease Requests", Unit: metric.Unit_COUNT, - } - metaLeaseRequestErrorCount = metric.Metadata{ + }) + metaLeaseRequestErrorCount = metric.InitMetadata(metric.Metadata{ Name: "leases.error", Help: "Number of failed lease requests", Measurement: "Lease Requests", Unit: metric.Unit_COUNT, - } - metaLeaseRequestLatency = metric.Metadata{ + }) + metaLeaseRequestLatency = metric.InitMetadata(metric.Metadata{ Name: "leases.requests.latency", Help: "Lease request latency (all types and outcomes, coalesced)", Measurement: "Latency", Unit: metric.Unit_NANOSECONDS, - } - metaLeaseTransferSuccessCount = metric.Metadata{ + }) + metaLeaseTransferSuccessCount = metric.InitMetadata(metric.Metadata{ Name: "leases.transfers.success", Help: "Number of successful lease transfers", Measurement: "Lease Transfers", @@ -264,61 +264,61 @@ var ( Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_REPLICATION, HowToUse: `A high number of lease transfers is not a negative or positive signal, rather it is a reflection of the elastic cluster activities. For example, this metric is high during cluster topology changes. A high value is often the reason for NotLeaseHolderErrors which are normal and expected during rebalancing. Observing this metric may provide a confirmation of the cause of such errors.`, - } - metaLeaseTransferErrorCount = metric.Metadata{ + }) + metaLeaseTransferErrorCount = metric.InitMetadata(metric.Metadata{ Name: "leases.transfers.error", Help: "Number of failed lease transfers", Measurement: "Lease Transfers", Unit: metric.Unit_COUNT, Visibility: metric.Metadata_SUPPORT, - } - metaLeaseTransferLocksWrittenCount = metric.Metadata{ + }) + metaLeaseTransferLocksWrittenCount = metric.InitMetadata(metric.Metadata{ Name: "leases.transfers.locks_written", Help: "Number of locks written to storage during lease transfers", Measurement: "Locks Written", Unit: metric.Unit_COUNT, - } - metaReadSummaryLocalCompressionCount = metric.Metadata{ + }) + metaReadSummaryLocalCompressionCount = metric.InitMetadata(metric.Metadata{ Name: "leases.read_summary.local_compression", Help: "Number of times the local segment of a read summary lost precision due to compression", Measurement: "Compressions", Unit: metric.Unit_COUNT, - } - metaLeaseExpirationCount = metric.Metadata{ + }) + metaLeaseExpirationCount = metric.InitMetadata(metric.Metadata{ Name: "leases.expiration", Help: "Number of replica leaseholders using expiration-based leases", Measurement: "Replicas", Unit: metric.Unit_COUNT, Visibility: metric.Metadata_SUPPORT, - } - metaLeaseEpochCount = metric.Metadata{ + }) + metaLeaseEpochCount = metric.InitMetadata(metric.Metadata{ Name: "leases.epoch", Help: "Number of replica leaseholders using epoch-based leases", Measurement: "Replicas", Unit: metric.Unit_COUNT, Visibility: metric.Metadata_SUPPORT, - } - metaLeaseLeaderCount = metric.Metadata{ + }) + metaLeaseLeaderCount = metric.InitMetadata(metric.Metadata{ Name: "leases.leader", Help: "Number of replica leaseholders using leader leases", Measurement: "Replicas", Unit: metric.Unit_COUNT, Visibility: metric.Metadata_SUPPORT, - } - metaLeaseLivenessCount = metric.Metadata{ + }) + metaLeaseLivenessCount = metric.InitMetadata(metric.Metadata{ Name: "leases.liveness", Help: "Number of replica leaseholders for the liveness range(s)", Measurement: "Replicas", Unit: metric.Unit_COUNT, Visibility: metric.Metadata_SUPPORT, - } - metaLeaseViolatingPreferencesCount = metric.Metadata{ + }) + metaLeaseViolatingPreferencesCount = metric.InitMetadata(metric.Metadata{ Name: "leases.preferences.violating", Help: "Number of replica leaseholders which violate lease preferences", Measurement: "Replicas", Unit: metric.Unit_COUNT, - } - metaLeaseLessPreferredCount = metric.Metadata{ + }) + metaLeaseLessPreferredCount = metric.InitMetadata(metric.Metadata{ Name: "leases.preferences.less-preferred", Help: crstrings.UnwrapText(` Number of replica leaseholders which satisfy a lease preference which is @@ -326,148 +326,148 @@ var ( `), Measurement: "Replicas", Unit: metric.Unit_COUNT, - } + }) - metaSubsumeLocksWrittenCount = metric.Metadata{ + metaSubsumeLocksWrittenCount = metric.InitMetadata(metric.Metadata{ Name: "subsume.locks_written", Help: "Number of locks written to storage during subsume (range merge)", Measurement: "Locks Written", Unit: metric.Unit_COUNT, - } + }) - metaReqCPUNanos = metric.Metadata{ + metaReqCPUNanos = metric.InitMetadata(metric.Metadata{ Name: "replicas.cpunanospersecond", Help: "Nanoseconds of CPU time in Replica request processing including evaluation but not replication", Measurement: "Nanoseconds", Unit: metric.Unit_NANOSECONDS, - } + }) // Storage metrics. - metaLiveBytes = metric.Metadata{ + metaLiveBytes = metric.InitMetadata(metric.Metadata{ Name: "livebytes", Help: "Number of bytes of live data (keys plus values)", Measurement: "Storage", Unit: metric.Unit_BYTES, Visibility: metric.Metadata_SUPPORT, - } - metaKeyBytes = metric.Metadata{ + }) + metaKeyBytes = metric.InitMetadata(metric.Metadata{ Name: "keybytes", Help: "Number of bytes taken up by keys", Measurement: "Storage", Unit: metric.Unit_BYTES, - } - metaValBytes = metric.Metadata{ + }) + metaValBytes = metric.InitMetadata(metric.Metadata{ Name: "valbytes", Help: "Number of bytes taken up by values", Measurement: "Storage", Unit: metric.Unit_BYTES, - } - metaRangeKeyBytes = metric.Metadata{ + }) + metaRangeKeyBytes = metric.InitMetadata(metric.Metadata{ Name: "rangekeybytes", Help: "Number of bytes taken up by range keys (e.g. MVCC range tombstones)", Measurement: "Storage", Unit: metric.Unit_BYTES, - } - metaRangeValBytes = metric.Metadata{ + }) + metaRangeValBytes = metric.InitMetadata(metric.Metadata{ Name: "rangevalbytes", Help: "Number of bytes taken up by range key values (e.g. MVCC range tombstones)", Measurement: "Storage", Unit: metric.Unit_BYTES, - } - metaTotalBytes = metric.Metadata{ + }) + metaTotalBytes = metric.InitMetadata(metric.Metadata{ Name: "totalbytes", Help: "Total number of bytes taken up by keys and values including non-live data", Measurement: "Storage", Unit: metric.Unit_BYTES, - } - metaIntentBytes = metric.Metadata{ + }) + metaIntentBytes = metric.InitMetadata(metric.Metadata{ Name: "intentbytes", Help: "Number of bytes in intent KV pairs", Measurement: "Storage", Unit: metric.Unit_BYTES, - } - metaLockBytes = metric.Metadata{ + }) + metaLockBytes = metric.InitMetadata(metric.Metadata{ Name: "lockbytes", Help: "Number of bytes taken up by replicated lock key-values (shared and exclusive strength, not intent strength)", Measurement: "Storage", Unit: metric.Unit_BYTES, - } - metaLiveCount = metric.Metadata{ + }) + metaLiveCount = metric.InitMetadata(metric.Metadata{ Name: "livecount", Help: "Count of live keys", Measurement: "Keys", Unit: metric.Unit_COUNT, - } - metaKeyCount = metric.Metadata{ + }) + metaKeyCount = metric.InitMetadata(metric.Metadata{ Name: "keycount", Help: "Count of all keys", Measurement: "Keys", Unit: metric.Unit_COUNT, - } - metaValCount = metric.Metadata{ + }) + metaValCount = metric.InitMetadata(metric.Metadata{ Name: "valcount", Help: "Count of all values", Measurement: "MVCC Values", Unit: metric.Unit_COUNT, - } - metaRangeKeyCount = metric.Metadata{ + }) + metaRangeKeyCount = metric.InitMetadata(metric.Metadata{ Name: "rangekeycount", Help: "Count of all range keys (e.g. MVCC range tombstones)", Measurement: "Keys", Unit: metric.Unit_COUNT, - } - metaRangeValCount = metric.Metadata{ + }) + metaRangeValCount = metric.InitMetadata(metric.Metadata{ Name: "rangevalcount", Help: "Count of all range key values (e.g. MVCC range tombstones)", Measurement: "MVCC Values", Unit: metric.Unit_COUNT, - } - metaIntentCount = metric.Metadata{ + }) + metaIntentCount = metric.InitMetadata(metric.Metadata{ Name: "intentcount", Help: "Count of intent keys", Measurement: "Keys", Unit: metric.Unit_COUNT, Visibility: metric.Metadata_SUPPORT, - } - metaLockCount = metric.Metadata{ + }) + metaLockCount = metric.InitMetadata(metric.Metadata{ Name: "lockcount", Help: "Count of replicated locks (shared, exclusive, and intent strength)", Measurement: "Locks", Unit: metric.Unit_COUNT, - } + }) // TODO(nvanbenschoten): rename "intentage" metric to "lockage". - metaLockAge = metric.Metadata{ + metaLockAge = metric.InitMetadata(metric.Metadata{ Name: "intentage", Help: "Cumulative age of locks", Measurement: "Age", Unit: metric.Unit_SECONDS, - } - metaGcBytesAge = metric.Metadata{ + }) + metaGcBytesAge = metric.InitMetadata(metric.Metadata{ Name: "gcbytesage", Help: "Cumulative age of non-live data", Measurement: "Age", Unit: metric.Unit_SECONDS, - } + }) // Contention and intent resolution metrics. - metaResolveCommit = metric.Metadata{ + metaResolveCommit = metric.InitMetadata(metric.Metadata{ Name: "intents.resolve-attempts", Help: "Count of (point or range) intent commit evaluation attempts", Measurement: "Operations", Unit: metric.Unit_COUNT, - } - metaResolveAbort = metric.Metadata{ + }) + metaResolveAbort = metric.InitMetadata(metric.Metadata{ Name: "intents.abort-attempts", Help: "Count of (point or range) non-poisoning intent abort evaluation attempts", Measurement: "Operations", Unit: metric.Unit_COUNT, - } - metaResolvePoison = metric.Metadata{ + }) + metaResolvePoison = metric.InitMetadata(metric.Metadata{ Name: "intents.poison-attempts", Help: "Count of (point or range) poisoning intent abort evaluation attempts", Measurement: "Operations", Unit: metric.Unit_COUNT, - } + }) // Disk usage diagram (CR=Cockroach): // --------------------------------- @@ -478,7 +478,7 @@ var ( // "used": |=========| // "available": |=======| // "usable" (computed in UI): |=================| - metaCapacity = metric.Metadata{ + metaCapacity = metric.InitMetadata(metric.Metadata{ Name: "capacity", Help: "Total storage capacity", Measurement: "Storage", @@ -490,8 +490,8 @@ var ( the following rule: CockroachDB storage volumes should not be utilized more than 60% (40% free space). `), - } - metaAvailable = metric.Metadata{ + }) + metaAvailable = metric.InitMetadata(metric.Metadata{ Name: "capacity.available", Help: "Available storage capacity", Measurement: "Storage", @@ -503,8 +503,8 @@ var ( with the following rule: CockroachDB storage volumes should not be utilized more than 60% (40% free space). `), - } - metaUsed = metric.Metadata{ + }) + metaUsed = metric.InitMetadata(metric.Metadata{ Name: "capacity.used", Help: "Used storage capacity", Measurement: "Storage", @@ -516,35 +516,35 @@ var ( the following rule: CockroachDB storage volumes should not be utilized more than 60% (40% free space). `), - } + }) - metaReserved = metric.Metadata{ + metaReserved = metric.InitMetadata(metric.Metadata{ Name: "capacity.reserved", Help: "Capacity reserved for snapshots", Measurement: "Storage", Unit: metric.Unit_BYTES, - } - metaSysBytes = metric.Metadata{ + }) + metaSysBytes = metric.InitMetadata(metric.Metadata{ Name: "sysbytes", Help: "Number of bytes in system KV pairs", Measurement: "Storage", Unit: metric.Unit_BYTES, - } - metaSysCount = metric.Metadata{ + }) + metaSysCount = metric.InitMetadata(metric.Metadata{ Name: "syscount", Help: "Count of system KV pairs", Measurement: "Keys", Unit: metric.Unit_COUNT, - } - metaAbortSpanBytes = metric.Metadata{ + }) + metaAbortSpanBytes = metric.InitMetadata(metric.Metadata{ Name: "abortspanbytes", Help: "Number of bytes in the abort span", Measurement: "Storage", Unit: metric.Unit_BYTES, - } + }) // Metrics used by the rebalancing logic that aren't already captured elsewhere. - metaAverageQueriesPerSecond = metric.Metadata{ + metaAverageQueriesPerSecond = metric.InitMetadata(metric.Metadata{ Name: "rebalancing.queriespersecond", Help: "Number of kv-level requests received per second by the store, considering the last 30 minutes, as used in rebalancing decisions.", Measurement: "Queries/Sec", @@ -552,38 +552,38 @@ var ( Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_REPLICATION, HowToUse: `This metric shows hotspots along the queries per second (QPS) dimension. It provides insights into the ongoing rebalancing activities.`, - } - metaAverageWritesPerSecond = metric.Metadata{ + }) + metaAverageWritesPerSecond = metric.InitMetadata(metric.Metadata{ Name: "rebalancing.writespersecond", Help: "Number of keys written (i.e. applied by raft) per second to the store, considering the last 30 minutes.", Measurement: "Keys/Sec", Unit: metric.Unit_COUNT, - } - metaAverageRequestsPerSecond = metric.Metadata{ + }) + metaAverageRequestsPerSecond = metric.InitMetadata(metric.Metadata{ Name: "rebalancing.requestspersecond", Help: "Number of requests received recently per second, considering the last 30 minutes.", Measurement: "Requests/Sec", Unit: metric.Unit_COUNT, - } - metaAverageReadsPerSecond = metric.Metadata{ + }) + metaAverageReadsPerSecond = metric.InitMetadata(metric.Metadata{ Name: "rebalancing.readspersecond", Help: "Number of keys read recently per second, considering the last 30 minutes.", Measurement: "Keys/Sec", Unit: metric.Unit_COUNT, - } - metaAverageWriteBytesPerSecond = metric.Metadata{ + }) + metaAverageWriteBytesPerSecond = metric.InitMetadata(metric.Metadata{ Name: "rebalancing.writebytespersecond", Help: "Number of bytes written recently per second, considering the last 30 minutes.", Measurement: "Bytes/Sec", Unit: metric.Unit_BYTES, - } - metaAverageReadBytesPerSecond = metric.Metadata{ + }) + metaAverageReadBytesPerSecond = metric.InitMetadata(metric.Metadata{ Name: "rebalancing.readbytespersecond", Help: "Number of bytes read recently per second, considering the last 30 minutes.", Measurement: "Bytes/Sec", Unit: metric.Unit_BYTES, - } - metaAverageCPUNanosPerSecond = metric.Metadata{ + }) + metaAverageCPUNanosPerSecond = metric.InitMetadata(metric.Metadata{ Name: "rebalancing.cpunanospersecond", Help: "Average CPU nanoseconds spent on processing replica operations in the last 30 minutes.", Measurement: "Nanoseconds/Sec", @@ -591,8 +591,8 @@ var ( Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_REPLICATION, HowToUse: `A high value of this metric could indicate that one of the store's replicas is part of a hot range.`, - } - metaRecentReplicaCPUNanosPerSecond = metric.Metadata{ + }) + metaRecentReplicaCPUNanosPerSecond = metric.InitMetadata(metric.Metadata{ Name: "rebalancing.replicas.cpunanospersecond", Help: crstrings.UnwrapText(` Histogram of average CPU nanoseconds spent on processing replica @@ -607,8 +607,8 @@ var ( replicas is part of a hot range. See also the non-histogram variant: rebalancing.cpunanospersecond. `), - } - metaRecentReplicaQueriesPerSecond = metric.Metadata{ + }) + metaRecentReplicaQueriesPerSecond = metric.InitMetadata(metric.Metadata{ Name: "rebalancing.replicas.queriespersecond", Help: crstrings.UnwrapText(` Histogram of average kv-level requests received per second by replicas on @@ -619,18 +619,18 @@ var ( Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_REPLICATION, HowToUse: `A high value of this metric could indicate that one of the store's replicas is part of a hot range. See also: rebalancing_replicas_cpunanospersecond.`, - } + }) // Metric for tracking follower reads. - metaFollowerReadsCount = metric.Metadata{ + metaFollowerReadsCount = metric.InitMetadata(metric.Metadata{ Name: "follower_reads.success_count", Help: "Number of reads successfully processed by any replica", Measurement: "Read Ops", Unit: metric.Unit_COUNT, - } + }) // Server-side transaction metrics. - metaCommitWaitBeforeCommitTriggerCount = metric.Metadata{ + metaCommitWaitBeforeCommitTriggerCount = metric.InitMetadata(metric.Metadata{ Name: "txn.commit_waits.before_commit_trigger", Help: crstrings.UnwrapText(` Number of KV transactions that had to commit-wait on the server before @@ -638,32 +638,32 @@ var ( `), Measurement: "KV Transactions", Unit: metric.Unit_COUNT, - } - metaWriteEvaluationServerSideRetrySuccess = metric.Metadata{ + }) + metaWriteEvaluationServerSideRetrySuccess = metric.InitMetadata(metric.Metadata{ Name: "txn.server_side_retry.write_evaluation.success", Help: "Number of write batches that were successfully refreshed server side", Measurement: "KV Transactions", Unit: metric.Unit_COUNT, - } - metaWriteEvaluationServerSideRetryFailure = metric.Metadata{ + }) + metaWriteEvaluationServerSideRetryFailure = metric.InitMetadata(metric.Metadata{ Name: "txn.server_side_retry.write_evaluation.failure", Help: "Number of write batches that were not successfully refreshed server side", Measurement: "KV Transactions", Unit: metric.Unit_COUNT, - } - metaReadEvaluationServerSideRetrySuccess = metric.Metadata{ + }) + metaReadEvaluationServerSideRetrySuccess = metric.InitMetadata(metric.Metadata{ Name: "txn.server_side_retry.read_evaluation.success", Help: "Number of read batches that were successfully refreshed server side", Measurement: "KV Transactions", Unit: metric.Unit_COUNT, - } - metaReadEvaluationServerSideRetryFailure = metric.Metadata{ + }) + metaReadEvaluationServerSideRetryFailure = metric.InitMetadata(metric.Metadata{ Name: "txn.server_side_retry.read_evaluation.failure", Help: "Number of read batches that were not successfully refreshed server side", Measurement: "KV Transactions", Unit: metric.Unit_COUNT, - } - metaReadWithinUncertaintyIntervalErrorServerSideRetrySuccess = metric.Metadata{ + }) + metaReadWithinUncertaintyIntervalErrorServerSideRetrySuccess = metric.InitMetadata(metric.Metadata{ Name: "txn.server_side_retry.uncertainty_interval_error.success", Help: crstrings.UnwrapText(` Number of batches that ran into uncertainty interval errors that were @@ -671,8 +671,8 @@ var ( `), Measurement: "KV Transactions", Unit: metric.Unit_COUNT, - } - metaReadWithinUncertaintyIntervalErrorServerSideRetryFailure = metric.Metadata{ + }) + metaReadWithinUncertaintyIntervalErrorServerSideRetryFailure = metric.InitMetadata(metric.Metadata{ Name: "txn.server_side_retry.uncertainty_interval_error.failure", Help: crstrings.UnwrapText(` Number of batches that ran into uncertainty interval errors that were not @@ -680,30 +680,30 @@ var ( `), Measurement: "KV Transactions", Unit: metric.Unit_COUNT, - } - metaOnePhaseCommitSuccess = metric.Metadata{ + }) + metaOnePhaseCommitSuccess = metric.InitMetadata(metric.Metadata{ Name: "txn.server_side.1PC.success", Help: "Number of batches that attempted to commit using 1PC and succeeded", Measurement: "KV Transactions", Unit: metric.Unit_COUNT, - } - metaOnePhaseCommitFailure = metric.Metadata{ + }) + metaOnePhaseCommitFailure = metric.InitMetadata(metric.Metadata{ Name: "txn.server_side.1PC.failure", Help: "Number of batches that attempted to commit using 1PC and failed", Measurement: "KV Transactions", Unit: metric.Unit_COUNT, - } + }) //Ingest metrics - metaIngestCount = metric.Metadata{ + metaIngestCount = metric.InitMetadata(metric.Metadata{ Name: "storage.ingest.count", Help: "Number of successful ingestions performed", Measurement: "Events", Unit: metric.Unit_COUNT, - } + }) // Pebble metrics. - metaRdbBlockCacheHits = metric.Metadata{ + metaRdbBlockCacheHits = metric.InitMetadata(metric.Metadata{ Name: "rocksdb.block.cache.hits", Help: "Count of block cache hits", Measurement: "Cache Ops", @@ -716,8 +716,8 @@ var ( shrinks. By observing block cache hits and misses, you can fine-tune memory allocations in the node process for the demands of the workload. `), - } - metaRdbBlockCacheMisses = metric.Metadata{ + }) + metaRdbBlockCacheMisses = metric.InitMetadata(metric.Metadata{ Name: "rocksdb.block.cache.misses", Help: "Count of block cache misses", Measurement: "Cache Ops", @@ -730,44 +730,44 @@ var ( shrinks. By observing block cache hits and misses, you can fine-tune memory allocations in the node process for the demands of the workload. `), - } - metaRdbBlockCacheUsage = metric.Metadata{ + }) + metaRdbBlockCacheUsage = metric.InitMetadata(metric.Metadata{ Name: "rocksdb.block.cache.usage", Help: "Bytes used by the block cache", Measurement: "Memory", Unit: metric.Unit_BYTES, - } - metaRdbBloomFilterPrefixChecked = metric.Metadata{ + }) + metaRdbBloomFilterPrefixChecked = metric.InitMetadata(metric.Metadata{ Name: "rocksdb.bloom.filter.prefix.checked", Help: "Number of times the bloom filter was checked", Measurement: "Bloom Filter Ops", Unit: metric.Unit_COUNT, - } - metaRdbBloomFilterPrefixUseful = metric.Metadata{ + }) + metaRdbBloomFilterPrefixUseful = metric.InitMetadata(metric.Metadata{ Name: "rocksdb.bloom.filter.prefix.useful", Help: "Number of times the bloom filter helped avoid iterator creation", Measurement: "Bloom Filter Ops", Unit: metric.Unit_COUNT, - } - metaRdbMemtableTotalSize = metric.Metadata{ + }) + metaRdbMemtableTotalSize = metric.InitMetadata(metric.Metadata{ Name: "rocksdb.memtable.total-size", Help: "Current size of memtable in bytes", Measurement: "Memory", Unit: metric.Unit_BYTES, - } - metaRdbFlushes = metric.Metadata{ + }) + metaRdbFlushes = metric.InitMetadata(metric.Metadata{ Name: "rocksdb.flushes", Help: "Number of table flushes", Measurement: "Flushes", Unit: metric.Unit_COUNT, - } - metaRdbFlushedBytes = metric.Metadata{ + }) + metaRdbFlushedBytes = metric.InitMetadata(metric.Metadata{ Name: "rocksdb.flushed-bytes", Help: "Bytes written during flush", Measurement: "Bytes Written", Unit: metric.Unit_BYTES, - } - metaRdbCompactions = metric.Metadata{ + }) + metaRdbCompactions = metric.InitMetadata(metric.Metadata{ Name: "rocksdb.compactions", Help: "Number of table compactions", Measurement: "Compactions", @@ -781,69 +781,69 @@ var ( for an extended period, the cluster will initially exhibit performance issues that will eventually escalate into stability issues. `), - } - metaRdbIngestedBytes = metric.Metadata{ + }) + metaRdbIngestedBytes = metric.InitMetadata(metric.Metadata{ Name: "rocksdb.ingested-bytes", Help: "Bytes ingested", Measurement: "Bytes Ingested", Unit: metric.Unit_BYTES, - } - metaRdbCompactedBytesRead = metric.Metadata{ + }) + metaRdbCompactedBytesRead = metric.InitMetadata(metric.Metadata{ Name: "rocksdb.compacted-bytes-read", Help: "Bytes read during compaction", Measurement: "Bytes Read", Unit: metric.Unit_BYTES, - } - metaRdbCompactedBytesWritten = metric.Metadata{ + }) + metaRdbCompactedBytesWritten = metric.InitMetadata(metric.Metadata{ Name: "rocksdb.compacted-bytes-written", Help: "Bytes written during compaction", Measurement: "Bytes Written", Unit: metric.Unit_BYTES, - } - metaRdbTableReadersMemEstimate = metric.Metadata{ + }) + metaRdbTableReadersMemEstimate = metric.InitMetadata(metric.Metadata{ Name: "rocksdb.table-readers-mem-estimate", Help: "Memory used by index and filter blocks", Measurement: "Memory", Unit: metric.Unit_BYTES, - } - metaRdbReadAmplification = metric.Metadata{ + }) + metaRdbReadAmplification = metric.InitMetadata(metric.Metadata{ Name: "rocksdb.read-amplification", Help: "Number of disk reads per query", Measurement: "Disk Reads per Query", Unit: metric.Unit_CONST, Visibility: metric.Metadata_SUPPORT, - } - metaRdbNumSSTables = metric.Metadata{ + }) + metaRdbNumSSTables = metric.InitMetadata(metric.Metadata{ Name: "rocksdb.num-sstables", Help: "Number of storage engine SSTables", Measurement: "SSTables", Unit: metric.Unit_COUNT, - } - metaRdbPendingCompaction = metric.Metadata{ + }) + metaRdbPendingCompaction = metric.InitMetadata(metric.Metadata{ Name: "rocksdb.estimated-pending-compaction", Help: "Estimated pending compaction bytes", Measurement: "Storage", Unit: metric.Unit_BYTES, - } - metaRdbMarkedForCompactionFiles = metric.Metadata{ + }) + metaRdbMarkedForCompactionFiles = metric.InitMetadata(metric.Metadata{ Name: "storage.marked-for-compaction-files", Help: "Count of SSTables marked for compaction", Measurement: "SSTables", Unit: metric.Unit_COUNT, - } - metaRdbKeysRangeKeySets = metric.Metadata{ + }) + metaRdbKeysRangeKeySets = metric.InitMetadata(metric.Metadata{ Name: "storage.keys.range-key-set.count", Help: "Approximate count of RangeKeySet internal keys across the storage engine.", Measurement: "Keys", Unit: metric.Unit_COUNT, - } - metaRdbKeysTombstones = metric.Metadata{ + }) + metaRdbKeysTombstones = metric.InitMetadata(metric.Metadata{ Name: "storage.keys.tombstone.count", Help: "Approximate count of DEL, SINGLEDEL and RANGEDEL internal keys across the storage engine.", Measurement: "Keys", Unit: metric.Unit_COUNT, Visibility: metric.Metadata_SUPPORT, - } + }) // NB: bytes only ever get flushed into L0, so this metric does not // exist for any other level. metaRdbL0BytesFlushed = storageLevelMetricMetadata( @@ -899,7 +899,7 @@ var ( nil, // No visibility overrides ) - metaRdbWriteStalls = metric.Metadata{ + metaRdbWriteStalls = metric.InitMetadata(metric.Metadata{ Name: "storage.write-stalls", Help: "Number of instances of intentional write stalls to backpressure incoming writes", Measurement: "Events", @@ -915,16 +915,16 @@ var ( rate persists over an extended period of time, and worse, if it is increasing. `), - } - metaRdbWriteStallNanos = metric.Metadata{ + }) + metaRdbWriteStallNanos = metric.InitMetadata(metric.Metadata{ Name: "storage.write-stall-nanos", Help: "Total write stall duration in nanos", Measurement: "Nanoseconds", Unit: metric.Unit_NANOSECONDS, Visibility: metric.Metadata_SUPPORT, - } + }) - metaRdbCheckpoints = metric.Metadata{ + metaRdbCheckpoints = metric.InitMetadata(metric.Metadata{ Name: "storage.checkpoints", Help: crstrings.UnwrapText(` The number of checkpoint directories found in storage. @@ -945,39 +945,39 @@ var ( Measurement: "Directories", Unit: metric.Unit_COUNT, - } + }) - metaBlockBytes = metric.Metadata{ + metaBlockBytes = metric.InitMetadata(metric.Metadata{ Name: "storage.iterator.block-load.bytes", Help: "Bytes loaded by storage engine iterators (possibly cached). See storage.AggregatedIteratorStats for details.", Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - metaBlockBytesInCache = metric.Metadata{ + }) + metaBlockBytesInCache = metric.InitMetadata(metric.Metadata{ Name: "storage.iterator.block-load.cached-bytes", Help: "Bytes loaded by storage engine iterators from the block cache. See storage.AggregatedIteratorStats for details.", Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - metaBlockReadDuration = metric.Metadata{ + }) + metaBlockReadDuration = metric.InitMetadata(metric.Metadata{ Name: "storage.iterator.block-load.read-duration", Help: "Cumulative time storage engine iterators spent loading blocks from durable storage. See storage.AggregatedIteratorStats for details.", Measurement: "Nanoseconds", Unit: metric.Unit_NANOSECONDS, - } - metaIterExternalSeeks = metric.Metadata{ + }) + metaIterExternalSeeks = metric.InitMetadata(metric.Metadata{ Name: "storage.iterator.external.seeks", Help: "Cumulative count of seeks performed on storage engine iterators. See storage.AggregatedIteratorStats for details.", Measurement: "Iterator Ops", Unit: metric.Unit_COUNT, - } - metaIterExternalSteps = metric.Metadata{ + }) + metaIterExternalSteps = metric.InitMetadata(metric.Metadata{ Name: "storage.iterator.external.steps", Help: "Cumulative count of steps performed on storage engine iterators. See storage.AggregatedIteratorStats for details.", Measurement: "Iterator Ops", Unit: metric.Unit_COUNT, - } - metaIterInternalSeeks = metric.Metadata{ + }) + metaIterInternalSeeks = metric.InitMetadata(metric.Metadata{ Name: "storage.iterator.internal.seeks", Help: crstrings.UnwrapText(` Cumulative count of seeks performed internally within storage engine @@ -991,8 +991,8 @@ var ( `), Measurement: "Iterator Ops", Unit: metric.Unit_COUNT, - } - metaIterInternalSteps = metric.Metadata{ + }) + metaIterInternalSteps = metric.InitMetadata(metric.Metadata{ Name: "storage.iterator.internal.steps", Help: crstrings.UnwrapText(` Cumulative count of steps performed internally within storage engine @@ -1006,8 +1006,8 @@ var ( `), Measurement: "Iterator Ops", Unit: metric.Unit_COUNT, - } - metaStorageCompactionsDuration = metric.Metadata{ + }) + metaStorageCompactionsDuration = metric.InitMetadata(metric.Metadata{ Name: "storage.compactions.duration", Help: crstrings.UnwrapText(` Cumulative sum of all compaction durations. @@ -1018,8 +1018,8 @@ var ( `), Measurement: "Processing Time", Unit: metric.Unit_NANOSECONDS, - } - metaStorageWriteAmplification = metric.Metadata{ + }) + metaStorageWriteAmplification = metric.InitMetadata(metric.Metadata{ Name: "storage.write-amplification", Help: crstrings.UnwrapText(` Running measure of write-amplification. @@ -1031,8 +1031,8 @@ var ( `), Measurement: "Ratio of bytes written to logical bytes", Unit: metric.Unit_COUNT, - } - metaStorageCompactionsKeysPinnedCount = metric.Metadata{ + }) + metaStorageCompactionsKeysPinnedCount = metric.InitMetadata(metric.Metadata{ Name: "storage.compactions.keys.pinned.count", Help: crstrings.UnwrapText(` Cumulative count of storage engine KVs written to sstables during @@ -1049,8 +1049,8 @@ var ( `), Measurement: "Keys", Unit: metric.Unit_COUNT, - } - metaStorageCompactionsKeysPinnedBytes = metric.Metadata{ + }) + metaStorageCompactionsKeysPinnedBytes = metric.InitMetadata(metric.Metadata{ Name: "storage.compactions.keys.pinned.bytes", Help: crstrings.UnwrapText(` Cumulative size of storage engine KVs written to sstables during flushes @@ -1066,20 +1066,20 @@ var ( `), Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - metaStorageCompactionsCancelledCount = metric.Metadata{ + }) + metaStorageCompactionsCancelledCount = metric.InitMetadata(metric.Metadata{ Name: "storage.compactions.cancelled.count", Help: `Cumulative count of compactions that were cancelled before they completed due to a conflicting operation.`, Measurement: "Compactions", Unit: metric.Unit_COUNT, - } - metaStorageCompactionsCancelledBytes = metric.Metadata{ + }) + metaStorageCompactionsCancelledBytes = metric.InitMetadata(metric.Metadata{ Name: "storage.compactions.cancelled.bytes", Help: `Cumulative volume of data written to sstables during compactions that were ultimately cancelled due to a conflicting operation.`, Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - metaStoragePointDeletionsBytes = metric.Metadata{ + }) + metaStoragePointDeletionsBytes = metric.InitMetadata(metric.Metadata{ Name: "storage.point_deletions.bytes", Help: crstrings.UnwrapText(` Estimated file bytes that will be saved by compacting all point @@ -1090,8 +1090,8 @@ var ( `), Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - metaStorageRangeDeletionsBytes = metric.Metadata{ + }) + metaStorageRangeDeletionsBytes = metric.InitMetadata(metric.Metadata{ Name: "storage.range_deletions.bytes", Help: crstrings.UnwrapText(` Estimated file bytes that will be saved by compacting all range @@ -1102,46 +1102,46 @@ var ( `), Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - metaInitialStatsComplete = metric.Metadata{ + }) + metaInitialStatsComplete = metric.InitMetadata(metric.Metadata{ Name: "storage.initial_stats_complete", Help: "Set to 1 when initial table stats collection is complete.", Measurement: "Boolean", Unit: metric.Unit_COUNT, - } + }) // TODO(sumeer): remove, since can fire due to delete-only compactions. - metaStorageSingleDelInvariantViolationCount = metric.Metadata{ + metaStorageSingleDelInvariantViolationCount = metric.InitMetadata(metric.Metadata{ Name: "storage.single-delete.invariant-violation", Help: "Number of SingleDelete invariant violations", Measurement: "Events", Unit: metric.Unit_COUNT, - } + }) // TODO(sumeer): remove, since can fire due to delete-only compactions. - metaStorageSingleDelIneffectualCount = metric.Metadata{ + metaStorageSingleDelIneffectualCount = metric.InitMetadata(metric.Metadata{ Name: "storage.single-delete.ineffectual", Help: "Number of SingleDeletes that were ineffectual", Measurement: "Events", Unit: metric.Unit_COUNT, - } - metaSharedStorageBytesWritten = metric.Metadata{ + }) + metaSharedStorageBytesWritten = metric.InitMetadata(metric.Metadata{ Name: "storage.shared-storage.write", Help: "Bytes written to external storage", Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - metaSharedStorageBytesRead = metric.Metadata{ + }) + metaSharedStorageBytesRead = metric.InitMetadata(metric.Metadata{ Name: "storage.shared-storage.read", Help: "Bytes read from shared storage", Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - metaBlockLoadsInProgress = metric.Metadata{ + }) + metaBlockLoadsInProgress = metric.InitMetadata(metric.Metadata{ Name: "storage.block-load.active", Help: "The number of sstable block loads currently in progress", Measurement: "Block loads", Unit: metric.Unit_COUNT, - } - metaBlockLoadsQueued = metric.Metadata{ + }) + metaBlockLoadsQueued = metric.InitMetadata(metric.Metadata{ Name: "storage.block-load.queued", Help: crstrings.UnwrapText(` The cumulative number of SSTable block loads that were delayed because @@ -1149,92 +1149,92 @@ var ( `), Measurement: "Block loads", Unit: metric.Unit_COUNT, - } - metaSecondaryCacheSize = metric.Metadata{ + }) + metaSecondaryCacheSize = metric.InitMetadata(metric.Metadata{ Name: "storage.secondary-cache.size", Help: "The number of sstable bytes stored in the secondary cache", Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - metaSecondaryCacheCount = metric.Metadata{ + }) + metaSecondaryCacheCount = metric.InitMetadata(metric.Metadata{ Name: "storage.secondary-cache.count", Help: "The count of cache blocks in the secondary cache (not sstable blocks)", Measurement: "Cache items", Unit: metric.Unit_COUNT, - } - metaSecondaryCacheTotalReads = metric.Metadata{ + }) + metaSecondaryCacheTotalReads = metric.InitMetadata(metric.Metadata{ Name: "storage.secondary-cache.reads-total", Help: "The number of reads from the secondary cache", Measurement: "Num reads", Unit: metric.Unit_COUNT, - } - metaSecondaryCacheMultiShardReads = metric.Metadata{ + }) + metaSecondaryCacheMultiShardReads = metric.InitMetadata(metric.Metadata{ Name: "storage.secondary-cache.reads-multi-shard", Help: "The number of secondary cache reads that require reading data from 2+ shards", Measurement: "Num reads", Unit: metric.Unit_COUNT, - } - metaSecondaryCacheMultiBlockReads = metric.Metadata{ + }) + metaSecondaryCacheMultiBlockReads = metric.InitMetadata(metric.Metadata{ Name: "storage.secondary-cache.reads-multi-block", Help: "The number of secondary cache reads that require reading data from 2+ cache blocks", Measurement: "Num reads", Unit: metric.Unit_COUNT, - } - metaSecondaryCacheReadsWithFullHit = metric.Metadata{ + }) + metaSecondaryCacheReadsWithFullHit = metric.InitMetadata(metric.Metadata{ Name: "storage.secondary-cache.reads-full-hit", Help: "The number of reads where all data returned was read from the secondary cache", Measurement: "Num reads", Unit: metric.Unit_COUNT, - } - metaSecondaryCacheReadsWithPartialHit = metric.Metadata{ + }) + metaSecondaryCacheReadsWithPartialHit = metric.InitMetadata(metric.Metadata{ Name: "storage.secondary-cache.reads-partial-hit", Help: "The number of reads where some data returned was read from the secondary cache", Measurement: "Num reads", Unit: metric.Unit_COUNT, - } - metaSecondaryCacheReadsWithNoHit = metric.Metadata{ + }) + metaSecondaryCacheReadsWithNoHit = metric.InitMetadata(metric.Metadata{ Name: "storage.secondary-cache.reads-no-hit", Help: "The number of reads where no data returned was read from the secondary cache", Measurement: "Num reads", Unit: metric.Unit_COUNT, - } - metaSecondaryCacheEvictions = metric.Metadata{ + }) + metaSecondaryCacheEvictions = metric.InitMetadata(metric.Metadata{ Name: "storage.secondary-cache.evictions", Help: "The number of times a cache block was evicted from the secondary cache", Measurement: "Num evictions", Unit: metric.Unit_COUNT, - } - metaSecondaryCacheWriteBackFailures = metric.Metadata{ + }) + metaSecondaryCacheWriteBackFailures = metric.InitMetadata(metric.Metadata{ Name: "storage.secondary-cache.write-back-failures", Help: "The number of times writing a cache block to the secondary cache failed", Measurement: "Num failures", Unit: metric.Unit_COUNT, - } - metaFlushableIngestCount = metric.Metadata{ + }) + metaFlushableIngestCount = metric.InitMetadata(metric.Metadata{ Name: "storage.flush.ingest.count", Help: "Flushes performing an ingest (flushable ingestions)", Measurement: "Flushes", Unit: metric.Unit_COUNT, - } - metaFlushableIngestTableCount = metric.Metadata{ + }) + metaFlushableIngestTableCount = metric.InitMetadata(metric.Metadata{ Name: "storage.flush.ingest.table.count", Help: "Tables ingested via flushes (flushable ingestions)", Measurement: "Tables", Unit: metric.Unit_COUNT, - } - metaFlushableIngestTableBytes = metric.Metadata{ + }) + metaFlushableIngestTableBytes = metric.InitMetadata(metric.Metadata{ Name: "storage.flush.ingest.table.bytes", Help: "Bytes ingested via flushes (flushable ingestions)", Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - metaBatchCommitCount = metric.Metadata{ + }) + metaBatchCommitCount = metric.InitMetadata(metric.Metadata{ Name: "storage.batch-commit.count", Help: "Count of batch commits. See storage.AggregatedBatchCommitStats for details.", Measurement: "Commit Ops", Unit: metric.Unit_COUNT, - } - metaBatchCommitDuration = metric.Metadata{ + }) + metaBatchCommitDuration = metric.InitMetadata(metric.Metadata{ Name: "storage.batch-commit.duration", Help: crstrings.UnwrapText(` Cumulative time spent in batch commit. See @@ -1242,8 +1242,8 @@ var ( `), Measurement: "Nanoseconds", Unit: metric.Unit_NANOSECONDS, - } - metaBatchCommitSemWaitDuration = metric.Metadata{ + }) + metaBatchCommitSemWaitDuration = metric.InitMetadata(metric.Metadata{ Name: "storage.batch-commit.sem-wait.duration", Help: crstrings.UnwrapText(` Cumulative time spent in semaphore wait, for batch commit. See @@ -1251,8 +1251,8 @@ var ( `), Measurement: "Nanoseconds", Unit: metric.Unit_NANOSECONDS, - } - metaBatchCommitWALQWaitDuration = metric.Metadata{ + }) + metaBatchCommitWALQWaitDuration = metric.InitMetadata(metric.Metadata{ Name: "storage.batch-commit.wal-queue-wait.duration", Help: crstrings.UnwrapText(` Cumulative time spent waiting for memory blocks in the WAL queue, for @@ -1260,8 +1260,8 @@ var ( `), Measurement: "Nanoseconds", Unit: metric.Unit_NANOSECONDS, - } - metaBatchCommitMemStallDuration = metric.Metadata{ + }) + metaBatchCommitMemStallDuration = metric.InitMetadata(metric.Metadata{ Name: "storage.batch-commit.mem-stall.duration", Help: crstrings.UnwrapText(` Cumulative time spent in a write stall due to too many memtables, for @@ -1269,8 +1269,8 @@ var ( `), Measurement: "Nanoseconds", Unit: metric.Unit_NANOSECONDS, - } - metaBatchCommitL0StallDuration = metric.Metadata{ + }) + metaBatchCommitL0StallDuration = metric.InitMetadata(metric.Metadata{ Name: "storage.batch-commit.l0-stall.duration", Help: crstrings.UnwrapText(` Cumulative time spent in a write stall due to high read amplification in @@ -1278,8 +1278,8 @@ var ( `), Measurement: "Nanoseconds", Unit: metric.Unit_NANOSECONDS, - } - metaBatchCommitWALRotDuration = metric.Metadata{ + }) + metaBatchCommitWALRotDuration = metric.InitMetadata(metric.Metadata{ Name: "storage.batch-commit.wal-rotation.duration", Help: crstrings.UnwrapText(` Cumulative time spent waiting for WAL rotation, for batch commit. See @@ -1287,8 +1287,8 @@ var ( `), Measurement: "Nanoseconds", Unit: metric.Unit_NANOSECONDS, - } - metaBatchCommitCommitWaitDuration = metric.Metadata{ + }) + metaBatchCommitCommitWaitDuration = metric.InitMetadata(metric.Metadata{ Name: "storage.batch-commit.commit-wait.duration", Help: crstrings.UnwrapText(` Cumulative time spent waiting for WAL sync, for batch commit. See @@ -1296,8 +1296,8 @@ var ( `), Measurement: "Nanoseconds", Unit: metric.Unit_NANOSECONDS, - } - metaSSTableZombieBytes = metric.Metadata{ + }) + metaSSTableZombieBytes = metric.InitMetadata(metric.Metadata{ Name: "storage.sstable.zombie.bytes", Help: crstrings.UnwrapText(` Bytes in SSTables that have been logically deleted, but can't yet be @@ -1305,171 +1305,171 @@ var ( `), Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - metaSSTableRemoteBytes = metric.Metadata{ + }) + metaSSTableRemoteBytes = metric.InitMetadata(metric.Metadata{ Name: "storage.sstable.remote.bytes", Help: crstrings.UnwrapText(` Bytes in SSTables that are stored off-disk (remotely) in object storage. `), Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - metaSSTableRemoteCount = metric.Metadata{ + }) + metaSSTableRemoteCount = metric.InitMetadata(metric.Metadata{ Name: "storage.sstable.remote.count", Help: crstrings.UnwrapText(` Count of SSTables that are stored off-disk (remotely) in object storage. `), Measurement: "SSTables", Unit: metric.Unit_COUNT, - } + }) - metaCompressionSnappyBytes = metric.Metadata{ + metaCompressionSnappyBytes = metric.InitMetadata(metric.Metadata{ Name: "storage.compression.snappy.bytes", Help: "Total on disk size of sstable and blob value data that is compressed with the Snappy algorithm.", Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - metaCompressionSnappyCR = metric.Metadata{ + }) + metaCompressionSnappyCR = metric.InitMetadata(metric.Metadata{ Name: "storage.compression.snappy.cr", Help: "Average compression ratio of sstable and blob value data that is compressed with the snappy algorithm.", Measurement: "Ratio", Unit: metric.Unit_CONST, - } - metaCompressionMinLZBytes = metric.Metadata{ + }) + metaCompressionMinLZBytes = metric.InitMetadata(metric.Metadata{ Name: "storage.compression.minlz.bytes", Help: "Total on disk size of sstable and blob value data that is compressed with the MinLZ algorithm.", Measurement: "Bytes", Unit: metric.Unit_CONST, - } - metaCompressionMinLZCR = metric.Metadata{ + }) + metaCompressionMinLZCR = metric.InitMetadata(metric.Metadata{ Name: "storage.compression.minlz.cr", Help: "Average compression ratio of sstable and blob value data that is compressed with the MinLZ algorithm.", Measurement: "Ratio", Unit: metric.Unit_CONST, - } - metaCompressionZstdBytes = metric.Metadata{ + }) + metaCompressionZstdBytes = metric.InitMetadata(metric.Metadata{ Name: "storage.compression.zstd.bytes", Help: "Total on disk size of sstable and blob value data that is compressed with the Zstd algorithm.", Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - metaCompressionZstdCR = metric.Metadata{ + }) + metaCompressionZstdCR = metric.InitMetadata(metric.Metadata{ Name: "storage.compression.zstd.cr", Help: "Average compression ratio of sstable and blob value data that is compressed with the Zstd algorithm.", Measurement: "Ratio", Unit: metric.Unit_CONST, - } - metaCompressionNoneBytes = metric.Metadata{ + }) + metaCompressionNoneBytes = metric.InitMetadata(metric.Metadata{ Name: "storage.compression.none.bytes", Help: "Total on disk size of sstable and blob value data that is not compressed.", Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - metaCompressionUnknownBytes = metric.Metadata{ + }) + metaCompressionUnknownBytes = metric.InitMetadata(metric.Metadata{ Name: "storage.compression.unknown.bytes", Help: "Total on disk size of sstable and blob value data that is compressed but for which we have no compression statistics.", Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - metaCompressionOverallCR = metric.Metadata{ + }) + metaCompressionOverallCR = metric.InitMetadata(metric.Metadata{ Name: "storage.compression.cr", Help: "Average compression ratio of sstable and blob value data.", Measurement: "Ratio", Unit: metric.Unit_CONST, - } - metaBytesCompressedL5Data = metric.Metadata{ + }) + metaBytesCompressedL5Data = metric.InitMetadata(metric.Metadata{ Name: "storage.bytes-compressed.l5.data", Help: "Total number of logical bytes compressed for L5 data blocks.", Measurement: "Bytes", Unit: metric.Unit_BYTES, LabeledName: "storage.bytes-compressed", StaticLabels: metric.MakeLabelPairs(metric.LabelLevel, "5", metric.LabelType, "data"), - } - metaBytesDecompressedL5Data = metric.Metadata{ + }) + metaBytesDecompressedL5Data = metric.InitMetadata(metric.Metadata{ Name: "storage.bytes-decompressed.l5.data", Help: "Total number of logical bytes decompressed for L5 data blocks.", Measurement: "Bytes", Unit: metric.Unit_BYTES, LabeledName: "storage.bytes-decompressed", StaticLabels: metric.MakeLabelPairs(metric.LabelLevel, "5", metric.LabelType, "data"), - } - metaBytesCompressedL5Values = metric.Metadata{ + }) + metaBytesCompressedL5Values = metric.InitMetadata(metric.Metadata{ Name: "storage.bytes-compressed.l5.values", Help: "Total number of logical bytes compressed for L5 value blocks.", Measurement: "Bytes", Unit: metric.Unit_BYTES, LabeledName: "storage.bytes-compressed", StaticLabels: metric.MakeLabelPairs(metric.LabelLevel, "5", metric.LabelType, "values"), - } - metaBytesDecompressedL5Values = metric.Metadata{ + }) + metaBytesDecompressedL5Values = metric.InitMetadata(metric.Metadata{ Name: "storage.bytes-decompressed.l5.values", Help: "Total number of logical bytes decompressed for L5 value blocks.", Measurement: "Bytes", Unit: metric.Unit_BYTES, LabeledName: "storage.bytes-decompressed", StaticLabels: metric.MakeLabelPairs(metric.LabelLevel, "5", metric.LabelType, "values"), - } - metaBytesCompressedL6Data = metric.Metadata{ + }) + metaBytesCompressedL6Data = metric.InitMetadata(metric.Metadata{ Name: "storage.bytes-compressed.l6.data", Help: "Total number of logical bytes compressed for L6 data blocks.", Measurement: "Bytes", Unit: metric.Unit_BYTES, LabeledName: "storage.bytes-compressed", StaticLabels: metric.MakeLabelPairs(metric.LabelLevel, "6", metric.LabelType, "data"), - } - metaBytesDecompressedL6Data = metric.Metadata{ + }) + metaBytesDecompressedL6Data = metric.InitMetadata(metric.Metadata{ Name: "storage.bytes-decompressed.l6.data", Help: "Total number of logical bytes decompressed for L6 data blocks.", Measurement: "Bytes", Unit: metric.Unit_BYTES, LabeledName: "storage.bytes-decompressed", StaticLabels: metric.MakeLabelPairs(metric.LabelLevel, "6", metric.LabelType, "data"), - } - metaBytesCompressedL6Values = metric.Metadata{ + }) + metaBytesCompressedL6Values = metric.InitMetadata(metric.Metadata{ Name: "storage.bytes-compressed.l6.values", Help: "Total number of logical bytes compressed for L6 value blocks.", Measurement: "Bytes", Unit: metric.Unit_BYTES, LabeledName: "storage.bytes-compressed", StaticLabels: metric.MakeLabelPairs(metric.LabelLevel, "6", metric.LabelType, "values"), - } - metaBytesDecompressedL6Values = metric.Metadata{ + }) + metaBytesDecompressedL6Values = metric.InitMetadata(metric.Metadata{ Name: "storage.bytes-decompressed.l6.values", Help: "Total number of logical bytes decompressed for L6 value blocks.", Measurement: "Bytes", Unit: metric.Unit_BYTES, LabeledName: "storage.bytes-decompressed", StaticLabels: metric.MakeLabelPairs(metric.LabelLevel, "6", metric.LabelType, "values"), - } + }) ) var ( // Disk health metrics. - metaDiskSlow = metric.Metadata{ + metaDiskSlow = metric.InitMetadata(metric.Metadata{ Name: "storage.disk-slow", Help: "Number of instances of disk operations taking longer than 10s", Measurement: "Events", Unit: metric.Unit_COUNT, Visibility: metric.Metadata_SUPPORT, - } + }) // TODO(jackson): Consider removing the `storage.disk-stalled` metric. // Stalls fatal the node. It's unlikely this metric will ever be reported // greater than zero. - metaDiskStalled = metric.Metadata{ + metaDiskStalled = metric.InitMetadata(metric.Metadata{ Name: "storage.disk-stalled", Help: "Number of instances of disk operations taking longer than 20s", Measurement: "Events", Unit: metric.Unit_COUNT, Visibility: metric.Metadata_SUPPORT, - } - metaDiskUnhealthyDuration = metric.Metadata{ + }) + metaDiskUnhealthyDuration = metric.InitMetadata(metric.Metadata{ Name: "storage.disk-unhealthy.duration", Help: "Total disk unhealthy duration in nanos", Measurement: "Nanoseconds", Unit: metric.Unit_NANOSECONDS, - } + }) // Range event metrics. - metaRangeSplits = metric.Metadata{ + metaRangeSplits = metric.InitMetadata(metric.Metadata{ Name: "range.splits", Help: "Number of range splits", Measurement: "Range Ops", @@ -1477,8 +1477,8 @@ var ( Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_REPLICATION, HowToUse: `This metric indicates how fast a workload is scaling up. Spikes can indicate resource hotspots since the split heuristic is based on QPS. To understand whether hotspots are an issue and with which tables and indexes they are occurring, correlate this metric with other metrics such as CPU usage, such as sys.cpu.combined.percent-normalized, or use the Hot Ranges page.`, - } - metaRangeMerges = metric.Metadata{ + }) + metaRangeMerges = metric.InitMetadata(metric.Metadata{ Name: "range.merges", Help: "Number of range merges", Measurement: "Range Ops", @@ -1486,117 +1486,117 @@ var ( Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_REPLICATION, HowToUse: `This metric indicates how fast a workload is scaling down. Merges are Cockroach's optimization for performance. This metric indicates that there have been deletes in the workload.`, - } - metaRangeAdds = metric.Metadata{ + }) + metaRangeAdds = metric.InitMetadata(metric.Metadata{ Name: "range.adds", Help: "Number of range additions", Measurement: "Range Ops", Unit: metric.Unit_COUNT, Visibility: metric.Metadata_SUPPORT, - } - metaRangeRemoves = metric.Metadata{ + }) + metaRangeRemoves = metric.InitMetadata(metric.Metadata{ Name: "range.removes", Help: "Number of range removals", Measurement: "Range Ops", Unit: metric.Unit_COUNT, - } - metaRangeSnapshotsGenerated = metric.Metadata{ + }) + metaRangeSnapshotsGenerated = metric.InitMetadata(metric.Metadata{ Name: "range.snapshots.generated", Help: "Number of generated snapshots", Measurement: "Snapshots", Unit: metric.Unit_COUNT, - } - metaRangeSnapshotsAppliedByVoters = metric.Metadata{ + }) + metaRangeSnapshotsAppliedByVoters = metric.InitMetadata(metric.Metadata{ Name: "range.snapshots.applied-voter", Help: "Number of snapshots applied by voter replicas", Measurement: "Snapshots", Unit: metric.Unit_COUNT, - } - metaRangeSnapshotsAppliedForInitialUpreplication = metric.Metadata{ + }) + metaRangeSnapshotsAppliedForInitialUpreplication = metric.InitMetadata(metric.Metadata{ Name: "range.snapshots.applied-initial", Help: "Number of snapshots applied for initial upreplication", Measurement: "Snapshots", Unit: metric.Unit_COUNT, - } - metaRangeSnapshotsAppliedByNonVoter = metric.Metadata{ + }) + metaRangeSnapshotsAppliedByNonVoter = metric.InitMetadata(metric.Metadata{ Name: "range.snapshots.applied-non-voter", Help: "Number of snapshots applied by non-voter replicas", Measurement: "Snapshots", Unit: metric.Unit_COUNT, - } - metaRangeSnapshotRcvdBytes = metric.Metadata{ + }) + metaRangeSnapshotRcvdBytes = metric.InitMetadata(metric.Metadata{ Name: "range.snapshots.rcvd-bytes", Help: "Number of snapshot bytes received", Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - metaRangeSnapshotSentBytes = metric.Metadata{ + }) + metaRangeSnapshotSentBytes = metric.InitMetadata(metric.Metadata{ Name: "range.snapshots.sent-bytes", Help: "Number of snapshot bytes sent", Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - metaRangeSnapshotUnknownRcvdBytes = metric.Metadata{ + }) + metaRangeSnapshotUnknownRcvdBytes = metric.InitMetadata(metric.Metadata{ Name: "range.snapshots.unknown.rcvd-bytes", Help: "Number of unknown snapshot bytes received", Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - metaRangeSnapshotUnknownSentBytes = metric.Metadata{ + }) + metaRangeSnapshotUnknownSentBytes = metric.InitMetadata(metric.Metadata{ Name: "range.snapshots.unknown.sent-bytes", Help: "Number of unknown snapshot bytes sent", Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - metaRangeSnapshotRebalancingRcvdBytes = metric.Metadata{ + }) + metaRangeSnapshotRebalancingRcvdBytes = metric.InitMetadata(metric.Metadata{ Name: "range.snapshots.rebalancing.rcvd-bytes", Help: "Number of rebalancing snapshot bytes received", Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - metaRangeSnapshotRebalancingSentBytes = metric.Metadata{ + }) + metaRangeSnapshotRebalancingSentBytes = metric.InitMetadata(metric.Metadata{ Name: "range.snapshots.rebalancing.sent-bytes", Help: "Number of rebalancing snapshot bytes sent", Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - metaRangeSnapshotRecoveryRcvdBytes = metric.Metadata{ + }) + metaRangeSnapshotRecoveryRcvdBytes = metric.InitMetadata(metric.Metadata{ Name: "range.snapshots.recovery.rcvd-bytes", Help: "Number of raft recovery snapshot bytes received", Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - metaRangeSnapshotRecoverySentBytes = metric.Metadata{ + }) + metaRangeSnapshotRecoverySentBytes = metric.InitMetadata(metric.Metadata{ Name: "range.snapshots.recovery.sent-bytes", Help: "Number of raft recovery snapshot bytes sent", Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - metaRangeSnapshotUpreplicationRcvdBytes = metric.Metadata{ + }) + metaRangeSnapshotUpreplicationRcvdBytes = metric.InitMetadata(metric.Metadata{ Name: "range.snapshots.upreplication.rcvd-bytes", Help: "Number of upreplication snapshot bytes received", Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - metaRangeSnapshotUpreplicationSentBytes = metric.Metadata{ + }) + metaRangeSnapshotUpreplicationSentBytes = metric.InitMetadata(metric.Metadata{ Name: "range.snapshots.upreplication.sent-bytes", Help: "Number of upreplication snapshot bytes sent", Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - metaRangeSnapshotRecvFailed = metric.Metadata{ + }) + metaRangeSnapshotRecvFailed = metric.InitMetadata(metric.Metadata{ Name: "range.snapshots.recv-failed", Help: "Number of range snapshot initialization messages that errored out on the recipient, typically before any data is transferred", Measurement: "Snapshots", Unit: metric.Unit_COUNT, - } - metaRangeSnapshotRecvUnusable = metric.Metadata{ + }) + metaRangeSnapshotRecvUnusable = metric.InitMetadata(metric.Metadata{ Name: "range.snapshots.recv-unusable", Help: "Number of range snapshot that were fully transmitted but determined to be unnecessary or unusable", Measurement: "Snapshots", Unit: metric.Unit_COUNT, - } - metaRangeSnapShotCrossRegionSentBytes = metric.Metadata{ + }) + metaRangeSnapShotCrossRegionSentBytes = metric.InitMetadata(metric.Metadata{ Name: "range.snapshots.cross-region.sent-bytes", Help: crstrings.UnwrapText(` Number of snapshot bytes sent cross region by this store when region @@ -1604,8 +1604,8 @@ var ( `), Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - metaRangeSnapShotCrossRegionRcvdBytes = metric.Metadata{ + }) + metaRangeSnapShotCrossRegionRcvdBytes = metric.InitMetadata(metric.Metadata{ Name: "range.snapshots.cross-region.rcvd-bytes", Help: crstrings.UnwrapText(` Number of snapshot bytes received cross region by this store when region @@ -1613,8 +1613,8 @@ var ( `), Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - metaRangeSnapShotCrossZoneSentBytes = metric.Metadata{ + }) + metaRangeSnapShotCrossZoneSentBytes = metric.InitMetadata(metric.Metadata{ Name: "range.snapshots.cross-zone.sent-bytes", Help: crstrings.UnwrapText(` Number of snapshot bytes sent cross zone within the same region by this @@ -1625,8 +1625,8 @@ var ( `), Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - metaRangeSnapShotCrossZoneRcvdBytes = metric.Metadata{ + }) + metaRangeSnapShotCrossZoneRcvdBytes = metric.InitMetadata(metric.Metadata{ Name: "range.snapshots.cross-zone.rcvd-bytes", Help: crstrings.UnwrapText(` Number of snapshot bytes received cross zone within the same region by @@ -1637,70 +1637,70 @@ var ( `), Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - metaRangeSnapshotSendQueueLength = metric.Metadata{ + }) + metaRangeSnapshotSendQueueLength = metric.InitMetadata(metric.Metadata{ Name: "range.snapshots.send-queue", Help: "Number of snapshots queued to send", Measurement: "Snapshots", Unit: metric.Unit_COUNT, Visibility: metric.Metadata_SUPPORT, - } - metaRangeSnapshotRecvQueueLength = metric.Metadata{ + }) + metaRangeSnapshotRecvQueueLength = metric.InitMetadata(metric.Metadata{ Name: "range.snapshots.recv-queue", Help: "Number of snapshots queued to receive", Measurement: "Snapshots", Unit: metric.Unit_COUNT, - } - metaRangeSnapshotSendInProgress = metric.Metadata{ + }) + metaRangeSnapshotSendInProgress = metric.InitMetadata(metric.Metadata{ Name: "range.snapshots.send-in-progress", Help: "Number of non-empty snapshots being sent", Measurement: "Snapshots", Unit: metric.Unit_COUNT, - } - metaRangeSnapshotRecvInProgress = metric.Metadata{ + }) + metaRangeSnapshotRecvInProgress = metric.InitMetadata(metric.Metadata{ Name: "range.snapshots.recv-in-progress", Help: "Number of non-empty snapshots being received", Measurement: "Snapshots", Unit: metric.Unit_COUNT, - } - metaRangeSnapshotSendTotalInProgress = metric.Metadata{ + }) + metaRangeSnapshotSendTotalInProgress = metric.InitMetadata(metric.Metadata{ Name: "range.snapshots.send-total-in-progress", Help: "Number of total snapshots being sent", Measurement: "Snapshots", Unit: metric.Unit_COUNT, - } - metaRangeSnapshotRecvTotalInProgress = metric.Metadata{ + }) + metaRangeSnapshotRecvTotalInProgress = metric.InitMetadata(metric.Metadata{ Name: "range.snapshots.recv-total-in-progress", Help: "Number of total snapshots being received", Measurement: "Snapshots", Unit: metric.Unit_COUNT, - } - metaRangeSnapshotSendQueueSize = metric.Metadata{ + }) + metaRangeSnapshotSendQueueSize = metric.InitMetadata(metric.Metadata{ Name: "range.snapshots.send-queue-bytes", Help: "Total size of all snapshots in the snapshot send queue", Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - metaRangeSnapshotRecvQueueSize = metric.Metadata{ + }) + metaRangeSnapshotRecvQueueSize = metric.InitMetadata(metric.Metadata{ Name: "range.snapshots.recv-queue-bytes", Help: "Total size of all snapshots in the snapshot receive queue", Measurement: "Bytes", Unit: metric.Unit_BYTES, - } + }) - metaRangeRaftLeaderTransfers = metric.Metadata{ + metaRangeRaftLeaderTransfers = metric.InitMetadata(metric.Metadata{ Name: "range.raftleadertransfers", Help: "Number of raft leader transfers", Measurement: "Leader Transfers", Unit: metric.Unit_COUNT, - } - metaRangeRaftLeaderRemovals = metric.Metadata{ + }) + metaRangeRaftLeaderRemovals = metric.InitMetadata(metric.Metadata{ Name: "range.raftleaderremovals", Help: "Number of times the current Raft leader was removed from a range", Measurement: "Raft leader removals", Unit: metric.Unit_COUNT, - } - metaRangeLossOfQuorumRecoveries = metric.Metadata{ + }) + metaRangeLossOfQuorumRecoveries = metric.InitMetadata(metric.Metadata{ Name: "range.recoveries", Help: crstrings.UnwrapText(` Count of offline loss of quorum recovery operations performed on ranges. @@ -1711,8 +1711,8 @@ var ( `), Measurement: "Quorum Recoveries", Unit: metric.Unit_COUNT, - } - metaDelegateSnapshotSendBytes = metric.Metadata{ + }) + metaDelegateSnapshotSendBytes = metric.InitMetadata(metric.Metadata{ Name: "range.snapshots.delegate.sent-bytes", Help: crstrings.UnwrapText(` Bytes sent using a delegate. @@ -1723,8 +1723,8 @@ var ( `), Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - metaDelegateSnapshotSuccesses = metric.Metadata{ + }) + metaDelegateSnapshotSuccesses = metric.InitMetadata(metric.Metadata{ Name: "range.snapshots.delegate.successes", Help: crstrings.UnwrapText(` Number of snapshots that were delegated to a different node and resulted @@ -1732,8 +1732,8 @@ var ( `), Measurement: "Snapshots", Unit: metric.Unit_COUNT, - } - metaDelegateSnapshotFailures = metric.Metadata{ + }) + metaDelegateSnapshotFailures = metric.InitMetadata(metric.Metadata{ Name: "range.snapshots.delegate.failures", Help: crstrings.UnwrapText(` Number of snapshots that were delegated to a different node and resulted @@ -1743,45 +1743,45 @@ var ( `), Measurement: "Snapshots", Unit: metric.Unit_COUNT, - } - metaDelegateSnapshotInProgress = metric.Metadata{ + }) + metaDelegateSnapshotInProgress = metric.InitMetadata(metric.Metadata{ Name: "range.snapshots.delegate.in-progress", Help: `Number of delegated snapshots that are currently in-flight.`, Measurement: "Snapshots", Unit: metric.Unit_COUNT, - } + }) // Quota pool metrics. - metaRaftQuotaPoolPercentUsed = metric.Metadata{ + metaRaftQuotaPoolPercentUsed = metric.InitMetadata(metric.Metadata{ Name: "raft.quota_pool.percent_used", Help: `Histogram of proposal quota pool utilization (0-100) per leaseholder per metrics interval`, Measurement: "Percent", // TODO(kv-obs): There is Unit_PERCENT but it seems to operate on float64 // (0 to 1.0) so it probably won't produce useful results here. Unit: metric.Unit_COUNT, - } + }) // Raft entry bytes loaded in memory. - metaRaftLoadedEntriesBytes = metric.Metadata{ + metaRaftLoadedEntriesBytes = metric.InitMetadata(metric.Metadata{ Name: "raft.loaded_entries.bytes", Help: `Bytes allocated by raft Storage.Entries calls that are still kept in memory`, Measurement: "Bytes", Unit: metric.Unit_BYTES, - } + }) // Raft processing metrics. - metaRaftTicks = metric.Metadata{ + metaRaftTicks = metric.InitMetadata(metric.Metadata{ Name: "raft.ticks", Help: "Number of Raft ticks queued", Measurement: "Ticks", Unit: metric.Unit_COUNT, - } - metaRaftProposalsDropped = metric.Metadata{ + }) + metaRaftProposalsDropped = metric.InitMetadata(metric.Metadata{ Name: "raft.dropped", Help: "Number of Raft proposals dropped (this counts individial raftpb.Entry, not raftpb.MsgProp)", Measurement: "Proposals", Unit: metric.Unit_COUNT, - } - metaRaftProposalsDroppedLeader = metric.Metadata{ + }) + metaRaftProposalsDroppedLeader = metric.InitMetadata(metric.Metadata{ Name: "raft.dropped_leader", Help: crstrings.UnwrapText(` Number of Raft proposals dropped by a Replica that believes itself to be @@ -1790,8 +1790,8 @@ var ( `), Measurement: "Proposals", Unit: metric.Unit_COUNT, - } - metaRaftWorkingDurationNanos = metric.Metadata{ + }) + metaRaftWorkingDurationNanos = metric.InitMetadata(metric.Metadata{ Name: "raft.process.workingnanos", Help: crstrings.UnwrapText(` Nanoseconds spent in store.processRaft() working. @@ -1801,14 +1801,14 @@ var ( `), Measurement: "Processing Time", Unit: metric.Unit_NANOSECONDS, - } - metaRaftTickingDurationNanos = metric.Metadata{ + }) + metaRaftTickingDurationNanos = metric.InitMetadata(metric.Metadata{ Name: "raft.process.tickingnanos", Help: "Nanoseconds spent in store.processRaft() processing replica.Tick()", Measurement: "Processing Time", Unit: metric.Unit_NANOSECONDS, - } - metaRaftCommandsProposed = metric.Metadata{ + }) + metaRaftCommandsProposed = metric.InitMetadata(metric.Metadata{ Name: "raft.commands.proposed", Help: crstrings.UnwrapText(` Number of Raft commands proposed. @@ -1819,8 +1819,8 @@ var ( `), Measurement: "Commands", Unit: metric.Unit_COUNT, - } - metaRaftCommandsReproposed = metric.Metadata{ + }) + metaRaftCommandsReproposed = metric.InitMetadata(metric.Metadata{ Name: "raft.commands.reproposed.unchanged", Help: crstrings.UnwrapText(` Number of Raft commands re-proposed without modification. @@ -1832,8 +1832,8 @@ var ( `), Measurement: "Commands", Unit: metric.Unit_COUNT, - } - metaRaftCommandsReproposedLAI = metric.Metadata{ + }) + metaRaftCommandsReproposedLAI = metric.InitMetadata(metric.Metadata{ Name: "raft.commands.reproposed.new-lai", Help: crstrings.UnwrapText(` Number of Raft commands re-proposed with a newer LAI. @@ -1844,8 +1844,8 @@ var ( `), Measurement: "Commands", Unit: metric.Unit_COUNT, - } - metaRaftCommandsPending = metric.Metadata{ + }) + metaRaftCommandsPending = metric.InitMetadata(metric.Metadata{ Name: "raft.commands.pending", Help: crstrings.UnwrapText(` Number of Raft commands proposed and pending. @@ -1856,8 +1856,8 @@ var ( `), Measurement: "Commands", Unit: metric.Unit_COUNT, - } - metaRaftCommandsApplied = metric.Metadata{ + }) + metaRaftCommandsApplied = metric.InitMetadata(metric.Metadata{ Name: "raft.commandsapplied", Help: crstrings.UnwrapText(` Number of Raft commands applied. @@ -1871,8 +1871,8 @@ var ( `), Measurement: "Commands", Unit: metric.Unit_COUNT, - } - metaRaftLogCommitLatency = metric.Metadata{ + }) + metaRaftLogCommitLatency = metric.InitMetadata(metric.Metadata{ Name: "raft.process.logcommit.latency", Help: crstrings.UnwrapText(` Latency histogram for committing Raft log entries to stable storage @@ -1888,8 +1888,8 @@ var ( `), Measurement: "Latency", Unit: metric.Unit_NANOSECONDS, - } - metaRaftCommandCommitLatency = metric.Metadata{ + }) + metaRaftCommandCommitLatency = metric.InitMetadata(metric.Metadata{ Name: "raft.process.commandcommit.latency", Help: crstrings.UnwrapText(` Latency histogram for applying a batch of Raft commands to the state @@ -1907,7 +1907,7 @@ var ( `), Measurement: "Latency", Unit: metric.Unit_NANOSECONDS, - } + }) // TODO(tbg): I think this metric skews low because we will often handle Readies // for which the result is that there is nothing to do. Do we want to change this // metric to only record ready handling when there is a Ready? That seems more @@ -1915,7 +1915,7 @@ var ( // Though they aren't really no-ops, they still have to get a mutex and check // for a Ready, etc, but I still think it would be better to avoid those measure- // ments and to count the number of noops instead if we really want to. - metaRaftHandleReadyLatency = metric.Metadata{ + metaRaftHandleReadyLatency = metric.InitMetadata(metric.Metadata{ Name: "raft.process.handleready.latency", Help: crstrings.UnwrapText(` Latency histogram for handling a Raft ready. @@ -1941,8 +1941,8 @@ var ( `), Measurement: "Latency", Unit: metric.Unit_NANOSECONDS, - } - metaRaftApplyCommittedLatency = metric.Metadata{ + }) + metaRaftApplyCommittedLatency = metric.InitMetadata(metric.Metadata{ Name: "raft.process.applycommitted.latency", Help: crstrings.UnwrapText(` Latency histogram for applying all committed Raft commands in a Raft @@ -1955,8 +1955,8 @@ var ( `), Measurement: "Latency", Unit: metric.Unit_NANOSECONDS, - } - metaRaftReplicationLatency = metric.Metadata{ + }) + metaRaftReplicationLatency = metric.InitMetadata(metric.Metadata{ Name: "raft.replication.latency", Help: crstrings.UnwrapText(` The duration elapsed between having evaluated a BatchRequest and it being @@ -1986,8 +1986,8 @@ var ( `), Measurement: "Latency", Unit: metric.Unit_COUNT, - } - metaRaftSchedulerLatency = metric.Metadata{ + }) + metaRaftSchedulerLatency = metric.InitMetadata(metric.Metadata{ Name: "raft.scheduler.latency", Help: crstrings.UnwrapText(` Queueing durations for ranges waiting to be processed by the Raft @@ -2000,14 +2000,14 @@ var ( Measurement: "Latency", Unit: metric.Unit_NANOSECONDS, Visibility: metric.Metadata_SUPPORT, - } - metaRaftTimeoutCampaign = metric.Metadata{ + }) + metaRaftTimeoutCampaign = metric.InitMetadata(metric.Metadata{ Name: "raft.timeoutcampaign", Help: "Number of Raft replicas campaigning after missed heartbeats from leader", Measurement: "Elections called after timeout", Unit: metric.Unit_COUNT, - } - metaRaftStorageReadBytes = metric.Metadata{ + }) + metaRaftStorageReadBytes = metric.InitMetadata(metric.Metadata{ Name: "raft.storage.read_bytes", Help: crstrings.UnwrapText(` Counter of raftpb.Entry.Size() read from pebble for raft log entries. @@ -2036,124 +2036,124 @@ var ( `), Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - metaRaftStorageError = metric.Metadata{ + }) + metaRaftStorageError = metric.InitMetadata(metric.Metadata{ Name: "raft.storage.error", Help: "Number of Raft storage errors", Measurement: "Error Count", Unit: metric.Unit_COUNT, - } + }) // Raft message metrics. - metaRaftRcvdProp = metric.Metadata{ + metaRaftRcvdProp = metric.InitMetadata(metric.Metadata{ Name: "raft.rcvd.prop", Help: "Number of MsgProp messages received by this store", Measurement: "Messages", Unit: metric.Unit_COUNT, - } - metaRaftRcvdApp = metric.Metadata{ + }) + metaRaftRcvdApp = metric.InitMetadata(metric.Metadata{ Name: "raft.rcvd.app", Help: "Number of MsgApp messages received by this store", Measurement: "Messages", Unit: metric.Unit_COUNT, - } - metaRaftRcvdAppResp = metric.Metadata{ + }) + metaRaftRcvdAppResp = metric.InitMetadata(metric.Metadata{ Name: "raft.rcvd.appresp", Help: "Number of MsgAppResp messages received by this store", Measurement: "Messages", Unit: metric.Unit_COUNT, - } - metaRaftRcvdVote = metric.Metadata{ + }) + metaRaftRcvdVote = metric.InitMetadata(metric.Metadata{ Name: "raft.rcvd.vote", Help: "Number of MsgVote messages received by this store", Measurement: "Messages", Unit: metric.Unit_COUNT, - } - metaRaftRcvdVoteResp = metric.Metadata{ + }) + metaRaftRcvdVoteResp = metric.InitMetadata(metric.Metadata{ Name: "raft.rcvd.voteresp", Help: "Number of MsgVoteResp messages received by this store", Measurement: "Messages", Unit: metric.Unit_COUNT, - } - metaRaftRcvdPreVote = metric.Metadata{ + }) + metaRaftRcvdPreVote = metric.InitMetadata(metric.Metadata{ Name: "raft.rcvd.prevote", Help: "Number of MsgPreVote messages received by this store", Measurement: "Messages", Unit: metric.Unit_COUNT, - } - metaRaftRcvdPreVoteResp = metric.Metadata{ + }) + metaRaftRcvdPreVoteResp = metric.InitMetadata(metric.Metadata{ Name: "raft.rcvd.prevoteresp", Help: "Number of MsgPreVoteResp messages received by this store", Measurement: "Messages", Unit: metric.Unit_COUNT, - } - metaRaftRcvdSnap = metric.Metadata{ + }) + metaRaftRcvdSnap = metric.InitMetadata(metric.Metadata{ Name: "raft.rcvd.snap", Help: "Number of MsgSnap messages received by this store", Measurement: "Messages", Unit: metric.Unit_COUNT, - } - metaRaftRcvdHeartbeat = metric.Metadata{ + }) + metaRaftRcvdHeartbeat = metric.InitMetadata(metric.Metadata{ Name: "raft.rcvd.heartbeat", Help: "Number of (coalesced, if enabled) MsgHeartbeat messages received by this store", Measurement: "Messages", Unit: metric.Unit_COUNT, - } - metaRaftRcvdHeartbeatResp = metric.Metadata{ + }) + metaRaftRcvdHeartbeatResp = metric.InitMetadata(metric.Metadata{ Name: "raft.rcvd.heartbeatresp", Help: "Number of (coalesced, if enabled) MsgHeartbeatResp messages received by this store", Measurement: "Messages", Unit: metric.Unit_COUNT, - } - metaRaftRcvdTransferLeader = metric.Metadata{ + }) + metaRaftRcvdTransferLeader = metric.InitMetadata(metric.Metadata{ Name: "raft.rcvd.transferleader", Help: "Number of MsgTransferLeader messages received by this store", Measurement: "Messages", Unit: metric.Unit_COUNT, - } - metaRaftRcvdTimeoutNow = metric.Metadata{ + }) + metaRaftRcvdTimeoutNow = metric.InitMetadata(metric.Metadata{ Name: "raft.rcvd.timeoutnow", Help: "Number of MsgTimeoutNow messages received by this store", Measurement: "Messages", Unit: metric.Unit_COUNT, - } - metaRaftRcvdFortifyLeader = metric.Metadata{ + }) + metaRaftRcvdFortifyLeader = metric.InitMetadata(metric.Metadata{ Name: "raft.rcvd.fortifyleader", Help: "Number of MsgFortifyLeader messages received by this store", Measurement: "Messages", Unit: metric.Unit_COUNT, - } - metaRaftRcvdFortifyLeaderResp = metric.Metadata{ + }) + metaRaftRcvdFortifyLeaderResp = metric.InitMetadata(metric.Metadata{ Name: "raft.rcvd.fortifyleaderresp", Help: "Number of MsgFortifyLeaderResp messages received by this store", Measurement: "Messages", Unit: metric.Unit_COUNT, - } - metaRaftRcvdDeFortifyLeader = metric.Metadata{ + }) + metaRaftRcvdDeFortifyLeader = metric.InitMetadata(metric.Metadata{ Name: "raft.rcvd.defortifyleader", Help: "Number of MsgDeFortifyLeader messages received by this store", Measurement: "Messages", Unit: metric.Unit_COUNT, - } - metaRaftRcvdDropped = metric.Metadata{ + }) + metaRaftRcvdDropped = metric.InitMetadata(metric.Metadata{ Name: "raft.rcvd.dropped", Help: "Number of incoming Raft messages dropped (due to queue length or size)", Measurement: "Messages", Unit: metric.Unit_COUNT, - } - metaRaftRcvdDroppedBytes = metric.Metadata{ + }) + metaRaftRcvdDroppedBytes = metric.InitMetadata(metric.Metadata{ Name: "raft.rcvd.dropped_bytes", Help: "Bytes of dropped incoming Raft messages", Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - metaRaftRcvdQueuedBytes = metric.Metadata{ + }) + metaRaftRcvdQueuedBytes = metric.InitMetadata(metric.Metadata{ Name: "raft.rcvd.queued_bytes", Help: "Number of bytes in messages currently waiting for raft processing", Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - metaRaftRcvdSteppedBytes = metric.Metadata{ + }) + metaRaftRcvdSteppedBytes = metric.InitMetadata(metric.Metadata{ Name: "raft.rcvd.stepped_bytes", Help: crstrings.UnwrapText(` Number of bytes in messages processed by Raft. @@ -2169,9 +2169,9 @@ var ( `), Measurement: "Bytes", Unit: metric.Unit_BYTES, - } + }) - metaRaftRcvdBytes = metric.Metadata{ + metaRaftRcvdBytes = metric.InitMetadata(metric.Metadata{ Name: "raft.rcvd.bytes", Help: crstrings.UnwrapText(` Number of bytes in Raft messages received by this store. Note that this @@ -2179,8 +2179,8 @@ var ( `), Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - metaRaftRcvdCrossRegionBytes = metric.Metadata{ + }) + metaRaftRcvdCrossRegionBytes = metric.InitMetadata(metric.Metadata{ Name: "raft.rcvd.cross_region.bytes", Help: crstrings.UnwrapText(` Number of bytes received by this store for cross region Raft messages when @@ -2189,8 +2189,8 @@ var ( `), Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - metaRaftRcvdCrossZoneBytes = metric.Metadata{ + }) + metaRaftRcvdCrossZoneBytes = metric.InitMetadata(metric.Metadata{ Name: "raft.rcvd.cross_zone.bytes", Help: crstrings.UnwrapText(` Number of bytes received by this store for cross @@ -2202,8 +2202,8 @@ var ( `), Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - metaRaftSentBytes = metric.Metadata{ + }) + metaRaftSentBytes = metric.InitMetadata(metric.Metadata{ Name: "raft.sent.bytes", Help: crstrings.UnwrapText(` Number of bytes in Raft messages sent by this store. Note that this does @@ -2211,8 +2211,8 @@ var ( `), Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - metaRaftSentCrossRegionBytes = metric.Metadata{ + }) + metaRaftSentCrossRegionBytes = metric.InitMetadata(metric.Metadata{ Name: "raft.sent.cross_region.bytes", Help: crstrings.UnwrapText(` Number of bytes sent by this store for cross region Raft messages when @@ -2221,8 +2221,8 @@ var ( `), Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - metaRaftSentCrossZoneBytes = metric.Metadata{ + }) + metaRaftSentCrossZoneBytes = metric.InitMetadata(metric.Metadata{ Name: "raft.sent.cross_zone.bytes", Help: crstrings.UnwrapText(` Number of bytes sent by this store for cross zone, same region Raft @@ -2234,17 +2234,17 @@ var ( `), Measurement: "Bytes", Unit: metric.Unit_BYTES, - } + }) - metaRaftCoalescedHeartbeatsPending = metric.Metadata{ + metaRaftCoalescedHeartbeatsPending = metric.InitMetadata(metric.Metadata{ Name: "raft.heartbeats.pending", Help: "Number of pending heartbeats and responses waiting to be coalesced", Measurement: "Messages", Unit: metric.Unit_COUNT, - } + }) // Raft log metrics. - metaRaftLogFollowerBehindCount = metric.Metadata{ + metaRaftLogFollowerBehindCount = metric.InitMetadata(metric.Metadata{ Name: "raftlog.behind", Help: crstrings.UnwrapText(` Number of Raft log entries followers on other stores are behind. @@ -2260,28 +2260,28 @@ var ( Measurement: "Log Entries", Unit: metric.Unit_COUNT, Visibility: metric.Metadata_SUPPORT, - } - metaRaftLogTruncated = metric.Metadata{ + }) + metaRaftLogTruncated = metric.InitMetadata(metric.Metadata{ Name: "raftlog.truncated", Help: "Number of Raft log entries truncated", Measurement: "Log Entries", Unit: metric.Unit_COUNT, - } + }) - metaRaftLogTotalSize = metric.Metadata{ + metaRaftLogTotalSize = metric.InitMetadata(metric.Metadata{ Name: "raftlog.size.total", Help: "Approximate size of all Raft logs on the store.", Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - metaRaftLogMaxSize = metric.Metadata{ + }) + metaRaftLogMaxSize = metric.InitMetadata(metric.Metadata{ Name: "raftlog.size.max", Help: "Approximate size of the largest Raft log on the store.", Measurement: "Bytes", Unit: metric.Unit_BYTES, - } + }) - metaRaftFollowerPaused = metric.Metadata{ + metaRaftFollowerPaused = metric.InitMetadata(metric.Metadata{ Name: "admission.raft.paused_replicas", Help: crstrings.UnwrapText(` Number of followers (i.e. Replicas) to which replication is currently @@ -2296,8 +2296,8 @@ var ( `), Measurement: "Followers", Unit: metric.Unit_COUNT, - } - metaRaftPausedFollowerDroppedMsgs = metric.Metadata{ + }) + metaRaftPausedFollowerDroppedMsgs = metric.InitMetadata(metric.Metadata{ Name: "admission.raft.paused_replicas_dropped_msgs", Help: crstrings.UnwrapText(` Number of messages dropped instead of being sent to paused replicas. @@ -2307,9 +2307,9 @@ var ( `), Measurement: "Messages", Unit: metric.Unit_COUNT, - } + }) - metaIOOverload = metric.Metadata{ + metaIOOverload = metric.InitMetadata(metric.Metadata{ Name: "admission.io.overload", Help: crstrings.UnwrapText(` 1-normalized float indicating whether IO admission control considers the @@ -2328,185 +2328,185 @@ var ( 'admission.l0_sub_level_count_overload_threshold' and 'admission.l0_file_count_overload_threshold' respectively. `), - } + }) - metaMVCCGCQueueSuccesses = metric.Metadata{ + metaMVCCGCQueueSuccesses = metric.InitMetadata(metric.Metadata{ Name: "queue.gc.process.success", Help: "Number of replicas successfully processed by the MVCC GC queue", Measurement: "Replicas", Unit: metric.Unit_COUNT, - } - metaMVCCGCQueueFailures = metric.Metadata{ + }) + metaMVCCGCQueueFailures = metric.InitMetadata(metric.Metadata{ Name: "queue.gc.process.failure", Help: "Number of replicas which failed processing in the MVCC GC queue", Measurement: "Replicas", Unit: metric.Unit_COUNT, Visibility: metric.Metadata_SUPPORT, - } - metaMVCCGCQueuePending = metric.Metadata{ + }) + metaMVCCGCQueuePending = metric.InitMetadata(metric.Metadata{ Name: "queue.gc.pending", Help: "Number of pending replicas in the MVCC GC queue", Measurement: "Replicas", Unit: metric.Unit_COUNT, Visibility: metric.Metadata_SUPPORT, - } - metaMVCCGCQueueProcessingNanos = metric.Metadata{ + }) + metaMVCCGCQueueProcessingNanos = metric.InitMetadata(metric.Metadata{ Name: "queue.gc.processingnanos", Help: "Nanoseconds spent processing replicas in the MVCC GC queue", Measurement: "Processing Time", Unit: metric.Unit_NANOSECONDS, - } - metaMergeQueueSuccesses = metric.Metadata{ + }) + metaMergeQueueSuccesses = metric.InitMetadata(metric.Metadata{ Name: "queue.merge.process.success", Help: "Number of replicas successfully processed by the merge queue", Measurement: "Replicas", Unit: metric.Unit_COUNT, Visibility: metric.Metadata_SUPPORT, - } - metaMergeQueueFailures = metric.Metadata{ + }) + metaMergeQueueFailures = metric.InitMetadata(metric.Metadata{ Name: "queue.merge.process.failure", Help: "Number of replicas which failed processing in the merge queue", Measurement: "Replicas", Unit: metric.Unit_COUNT, Visibility: metric.Metadata_SUPPORT, - } - metaMergeQueuePending = metric.Metadata{ + }) + metaMergeQueuePending = metric.InitMetadata(metric.Metadata{ Name: "queue.merge.pending", Help: "Number of pending replicas in the merge queue", Measurement: "Replicas", Unit: metric.Unit_COUNT, Visibility: metric.Metadata_SUPPORT, - } - metaMergeQueueProcessingNanos = metric.Metadata{ + }) + metaMergeQueueProcessingNanos = metric.InitMetadata(metric.Metadata{ Name: "queue.merge.processingnanos", Help: "Nanoseconds spent processing replicas in the merge queue", Measurement: "Processing Time", Unit: metric.Unit_NANOSECONDS, Visibility: metric.Metadata_SUPPORT, - } - metaMergeQueuePurgatory = metric.Metadata{ + }) + metaMergeQueuePurgatory = metric.InitMetadata(metric.Metadata{ Name: "queue.merge.purgatory", Help: "Number of replicas in the merge queue's purgatory, waiting to become mergeable", Measurement: "Replicas", Unit: metric.Unit_COUNT, - } - metaRaftLogQueueSuccesses = metric.Metadata{ + }) + metaRaftLogQueueSuccesses = metric.InitMetadata(metric.Metadata{ Name: "queue.raftlog.process.success", Help: "Number of replicas successfully processed by the Raft log queue", Measurement: "Replicas", Unit: metric.Unit_COUNT, Visibility: metric.Metadata_SUPPORT, - } - metaRaftLogQueueFailures = metric.Metadata{ + }) + metaRaftLogQueueFailures = metric.InitMetadata(metric.Metadata{ Name: "queue.raftlog.process.failure", Help: "Number of replicas which failed processing in the Raft log queue", Measurement: "Replicas", Unit: metric.Unit_COUNT, Visibility: metric.Metadata_SUPPORT, - } - metaRaftLogQueuePending = metric.Metadata{ + }) + metaRaftLogQueuePending = metric.InitMetadata(metric.Metadata{ Name: "queue.raftlog.pending", Help: "Number of pending replicas in the Raft log queue", Measurement: "Replicas", Unit: metric.Unit_COUNT, Visibility: metric.Metadata_SUPPORT, - } - metaRaftLogQueueProcessingNanos = metric.Metadata{ + }) + metaRaftLogQueueProcessingNanos = metric.InitMetadata(metric.Metadata{ Name: "queue.raftlog.processingnanos", Help: "Nanoseconds spent processing replicas in the Raft log queue", Measurement: "Processing Time", Unit: metric.Unit_NANOSECONDS, Visibility: metric.Metadata_SUPPORT, - } - metaRaftSnapshotQueueSuccesses = metric.Metadata{ + }) + metaRaftSnapshotQueueSuccesses = metric.InitMetadata(metric.Metadata{ Name: "queue.raftsnapshot.process.success", Help: "Number of replicas successfully processed by the Raft repair queue", Measurement: "Replicas", Unit: metric.Unit_COUNT, - } - metaRaftSnapshotQueueFailures = metric.Metadata{ + }) + metaRaftSnapshotQueueFailures = metric.InitMetadata(metric.Metadata{ Name: "queue.raftsnapshot.process.failure", Help: "Number of replicas which failed processing in the Raft repair queue", Measurement: "Replicas", Unit: metric.Unit_COUNT, - } - metaRaftSnapshotQueuePending = metric.Metadata{ + }) + metaRaftSnapshotQueuePending = metric.InitMetadata(metric.Metadata{ Name: "queue.raftsnapshot.pending", Help: "Number of pending replicas in the Raft repair queue", Measurement: "Replicas", Unit: metric.Unit_COUNT, - } - metaRaftSnapshotQueueProcessingNanos = metric.Metadata{ + }) + metaRaftSnapshotQueueProcessingNanos = metric.InitMetadata(metric.Metadata{ Name: "queue.raftsnapshot.processingnanos", Help: "Nanoseconds spent processing replicas in the Raft repair queue", Measurement: "Processing Time", Unit: metric.Unit_NANOSECONDS, - } - metaConsistencyQueueSuccesses = metric.Metadata{ + }) + metaConsistencyQueueSuccesses = metric.InitMetadata(metric.Metadata{ Name: "queue.consistency.process.success", Help: "Number of replicas successfully processed by the consistency checker queue", Measurement: "Replicas", Unit: metric.Unit_COUNT, - } - metaConsistencyQueueFailures = metric.Metadata{ + }) + metaConsistencyQueueFailures = metric.InitMetadata(metric.Metadata{ Name: "queue.consistency.process.failure", Help: "Number of replicas which failed processing in the consistency checker queue", Measurement: "Replicas", Unit: metric.Unit_COUNT, - } - metaConsistencyQueuePending = metric.Metadata{ + }) + metaConsistencyQueuePending = metric.InitMetadata(metric.Metadata{ Name: "queue.consistency.pending", Help: "Number of pending replicas in the consistency checker queue", Measurement: "Replicas", Unit: metric.Unit_COUNT, - } - metaConsistencyQueueProcessingNanos = metric.Metadata{ + }) + metaConsistencyQueueProcessingNanos = metric.InitMetadata(metric.Metadata{ Name: "queue.consistency.processingnanos", Help: "Nanoseconds spent processing replicas in the consistency checker queue", Measurement: "Processing Time", Unit: metric.Unit_NANOSECONDS, - } - metaReplicaGCQueueSuccesses = metric.Metadata{ + }) + metaReplicaGCQueueSuccesses = metric.InitMetadata(metric.Metadata{ Name: "queue.replicagc.process.success", Help: "Number of replicas successfully processed by the replica GC queue", Measurement: "Replicas", Unit: metric.Unit_COUNT, Visibility: metric.Metadata_SUPPORT, - } - metaReplicaGCQueueFailures = metric.Metadata{ + }) + metaReplicaGCQueueFailures = metric.InitMetadata(metric.Metadata{ Name: "queue.replicagc.process.failure", Help: "Number of replicas which failed processing in the replica GC queue", Measurement: "Replicas", Unit: metric.Unit_COUNT, Visibility: metric.Metadata_SUPPORT, - } - metaReplicaGCQueuePending = metric.Metadata{ + }) + metaReplicaGCQueuePending = metric.InitMetadata(metric.Metadata{ Name: "queue.replicagc.pending", Help: "Number of pending replicas in the replica GC queue", Measurement: "Replicas", Unit: metric.Unit_COUNT, Visibility: metric.Metadata_SUPPORT, - } - metaReplicaGCQueuePurgatory = metric.Metadata{ + }) + metaReplicaGCQueuePurgatory = metric.InitMetadata(metric.Metadata{ Name: "queue.replicagc.purgatory", Help: "Number of replicas in the replica GC queue's purgatory, waiting for a prerequisite (e.g. left neighbor GC) before they can be GC'd", Measurement: "Replicas", Unit: metric.Unit_COUNT, Visibility: metric.Metadata_SUPPORT, - } - metaReplicaGCQueueProcessingNanos = metric.Metadata{ + }) + metaReplicaGCQueueProcessingNanos = metric.InitMetadata(metric.Metadata{ Name: "queue.replicagc.processingnanos", Help: "Nanoseconds spent processing replicas in the replica GC queue", Measurement: "Processing Time", Unit: metric.Unit_NANOSECONDS, - } - metaReplicateQueueEnqueueAdd = metric.Metadata{ + }) + metaReplicateQueueEnqueueAdd = metric.InitMetadata(metric.Metadata{ Name: "queue.replicate.enqueue.add", Help: "Number of replicas successfully added to the replicate queue", Measurement: "Replicas", Unit: metric.Unit_COUNT, - } - metaReplicateQueueEnqueueFailedPrecondition = metric.Metadata{ + }) + metaReplicateQueueEnqueueFailedPrecondition = metric.InitMetadata(metric.Metadata{ Name: "queue.replicate.enqueue.failedprecondition", Help: crstrings.UnwrapText(` Number of replicas that failed the precondition checks and were therefore @@ -2514,8 +2514,8 @@ var ( `), Measurement: "Replicas", Unit: metric.Unit_COUNT, - } - metaReplicateQueueEnqueueNoAction = metric.Metadata{ + }) + metaReplicateQueueEnqueueNoAction = metric.InitMetadata(metric.Metadata{ Name: "queue.replicate.enqueue.noaction", Help: crstrings.UnwrapText(` Number of replicas for which ShouldQueue determined no action was needed @@ -2523,8 +2523,8 @@ var ( `), Measurement: "Replicas", Unit: metric.Unit_COUNT, - } - metaReplicateQueueEnqueueUnexpectedError = metric.Metadata{ + }) + metaReplicateQueueEnqueueUnexpectedError = metric.InitMetadata(metric.Metadata{ Name: "queue.replicate.enqueue.unexpectederror", Help: crstrings.UnwrapText(` Number of replicas that were expected to be enqueued (ShouldQueue returned @@ -2533,262 +2533,262 @@ var ( `), Measurement: "Replicas", Unit: metric.Unit_COUNT, - } - metaLeaseQueueSuccesses = metric.Metadata{ + }) + metaLeaseQueueSuccesses = metric.InitMetadata(metric.Metadata{ Name: "queue.lease.process.success", Help: "Number of replicas successfully processed by the replica lease queue", Measurement: "Replicas", Unit: metric.Unit_COUNT, - } - metaLeaseQueueFailures = metric.Metadata{ + }) + metaLeaseQueueFailures = metric.InitMetadata(metric.Metadata{ Name: "queue.lease.process.failure", Help: "Number of replicas which failed processing in the replica lease queue", Measurement: "Replicas", Unit: metric.Unit_COUNT, - } - metaLeaseQueuePending = metric.Metadata{ + }) + metaLeaseQueuePending = metric.InitMetadata(metric.Metadata{ Name: "queue.lease.pending", Help: "Number of pending replicas in the replica lease queue", Measurement: "Replicas", Unit: metric.Unit_COUNT, Visibility: metric.Metadata_SUPPORT, - } - metaLeaseQueueProcessingNanos = metric.Metadata{ + }) + metaLeaseQueueProcessingNanos = metric.InitMetadata(metric.Metadata{ Name: "queue.lease.processingnanos", Help: "Nanoseconds spent processing replicas in the replica lease queue", Measurement: "Processing Time", Unit: metric.Unit_NANOSECONDS, - } - metaReplicateQueueSuccesses = metric.Metadata{ + }) + metaReplicateQueueSuccesses = metric.InitMetadata(metric.Metadata{ Name: "queue.replicate.process.success", Help: "Number of replicas successfully processed by the replicate queue", Measurement: "Replicas", Unit: metric.Unit_COUNT, Visibility: metric.Metadata_SUPPORT, - } - metaReplicateQueueFailures = metric.Metadata{ + }) + metaReplicateQueueFailures = metric.InitMetadata(metric.Metadata{ Name: "queue.replicate.process.failure", Help: "Number of replicas which failed processing in the replicate queue", Measurement: "Replicas", Unit: metric.Unit_COUNT, Visibility: metric.Metadata_SUPPORT, - } - metaLeaseQueuePurgatory = metric.Metadata{ + }) + metaLeaseQueuePurgatory = metric.InitMetadata(metric.Metadata{ Name: "queue.lease.purgatory", Help: "Number of replicas in the lease queue's purgatory, awaiting lease transfer operations", Measurement: "Replicas", Unit: metric.Unit_COUNT, - } - metaReplicateQueuePending = metric.Metadata{ + }) + metaReplicateQueuePending = metric.InitMetadata(metric.Metadata{ Name: "queue.replicate.pending", Help: "Number of pending replicas in the replicate queue", Measurement: "Replicas", Unit: metric.Unit_COUNT, Visibility: metric.Metadata_SUPPORT, - } - metaReplicateQueueFull = metric.Metadata{ + }) + metaReplicateQueueFull = metric.InitMetadata(metric.Metadata{ Name: "queue.replicate.queue_full", Help: "Number of times a replica was dropped from the queue due to queue fullness", Measurement: "Replicas", Unit: metric.Unit_COUNT, - } - metaReplicateQueueProcessingNanos = metric.Metadata{ + }) + metaReplicateQueueProcessingNanos = metric.InitMetadata(metric.Metadata{ Name: "queue.replicate.processingnanos", Help: "Nanoseconds spent processing replicas in the replicate queue", Measurement: "Processing Time", Unit: metric.Unit_NANOSECONDS, - } - metaReplicateQueuePurgatory = metric.Metadata{ + }) + metaReplicateQueuePurgatory = metric.InitMetadata(metric.Metadata{ Name: "queue.replicate.purgatory", Help: "Number of replicas in the replicate queue's purgatory, awaiting allocation options", Measurement: "Replicas", Unit: metric.Unit_COUNT, - } - metaSplitQueueSuccesses = metric.Metadata{ + }) + metaSplitQueueSuccesses = metric.InitMetadata(metric.Metadata{ Name: "queue.split.process.success", Help: "Number of replicas successfully processed by the split queue", Measurement: "Replicas", Unit: metric.Unit_COUNT, - } - metaSplitQueueFailures = metric.Metadata{ + }) + metaSplitQueueFailures = metric.InitMetadata(metric.Metadata{ Name: "queue.split.process.failure", Help: "Number of replicas which failed processing in the split queue", Measurement: "Replicas", Unit: metric.Unit_COUNT, - } - metaSplitQueuePending = metric.Metadata{ + }) + metaSplitQueuePending = metric.InitMetadata(metric.Metadata{ Name: "queue.split.pending", Help: "Number of pending replicas in the split queue", Measurement: "Replicas", Unit: metric.Unit_COUNT, - } - metaSplitQueueProcessingNanos = metric.Metadata{ + }) + metaSplitQueueProcessingNanos = metric.InitMetadata(metric.Metadata{ Name: "queue.split.processingnanos", Help: "Nanoseconds spent processing replicas in the split queue", Measurement: "Processing Time", Unit: metric.Unit_NANOSECONDS, - } - metaSplitQueuePurgatory = metric.Metadata{ + }) + metaSplitQueuePurgatory = metric.InitMetadata(metric.Metadata{ Name: "queue.split.purgatory", Help: "Number of replicas in the split queue's purgatory, waiting to become splittable", Measurement: "Replicas", Unit: metric.Unit_COUNT, - } - metaTimeSeriesMaintenanceQueueSuccesses = metric.Metadata{ + }) + metaTimeSeriesMaintenanceQueueSuccesses = metric.InitMetadata(metric.Metadata{ Name: "queue.tsmaintenance.process.success", Help: "Number of replicas successfully processed by the time series maintenance queue", Measurement: "Replicas", Unit: metric.Unit_COUNT, - } - metaTimeSeriesMaintenanceQueueFailures = metric.Metadata{ + }) + metaTimeSeriesMaintenanceQueueFailures = metric.InitMetadata(metric.Metadata{ Name: "queue.tsmaintenance.process.failure", Help: "Number of replicas which failed processing in the time series maintenance queue", Measurement: "Replicas", Unit: metric.Unit_COUNT, - } - metaTimeSeriesMaintenanceQueuePending = metric.Metadata{ + }) + metaTimeSeriesMaintenanceQueuePending = metric.InitMetadata(metric.Metadata{ Name: "queue.tsmaintenance.pending", Help: "Number of pending replicas in the time series maintenance queue", Measurement: "Replicas", Unit: metric.Unit_COUNT, - } - metaTimeSeriesMaintenanceQueueProcessingNanos = metric.Metadata{ + }) + metaTimeSeriesMaintenanceQueueProcessingNanos = metric.InitMetadata(metric.Metadata{ Name: "queue.tsmaintenance.processingnanos", Help: "Nanoseconds spent processing replicas in the time series maintenance queue", Measurement: "Processing Time", Unit: metric.Unit_NANOSECONDS, - } + }) // GCInfo cumulative totals. - metaGCNumKeysAffected = metric.Metadata{ + metaGCNumKeysAffected = metric.InitMetadata(metric.Metadata{ Name: "queue.gc.info.numkeysaffected", Help: "Number of keys with GC'able data", Measurement: "Keys", Unit: metric.Unit_COUNT, - } - metaGCNumRangeKeysAffected = metric.Metadata{ + }) + metaGCNumRangeKeysAffected = metric.InitMetadata(metric.Metadata{ Name: "queue.gc.info.numrangekeysaffected", Help: "Number of range keys GC'able", Measurement: "Range Keys", Unit: metric.Unit_COUNT, - } - metaGCIntentsConsidered = metric.Metadata{ + }) + metaGCIntentsConsidered = metric.InitMetadata(metric.Metadata{ Name: "queue.gc.info.intentsconsidered", Help: "Number of 'old' intents", Measurement: "Intents", Unit: metric.Unit_COUNT, - } - metaGCIntentTxns = metric.Metadata{ + }) + metaGCIntentTxns = metric.InitMetadata(metric.Metadata{ Name: "queue.gc.info.intenttxns", Help: "Number of associated distinct transactions", Measurement: "Txns", Unit: metric.Unit_COUNT, - } - metaGCTransactionSpanScanned = metric.Metadata{ + }) + metaGCTransactionSpanScanned = metric.InitMetadata(metric.Metadata{ Name: "queue.gc.info.transactionspanscanned", Help: "Number of entries in transaction spans scanned from the engine", Measurement: "Txn Entries", Unit: metric.Unit_COUNT, - } - metaGCTransactionSpanGCAborted = metric.Metadata{ + }) + metaGCTransactionSpanGCAborted = metric.InitMetadata(metric.Metadata{ Name: "queue.gc.info.transactionspangcaborted", Help: "Number of GC'able entries corresponding to aborted txns", Measurement: "Txn Entries", Unit: metric.Unit_COUNT, - } - metaGCTransactionSpanGCCommitted = metric.Metadata{ + }) + metaGCTransactionSpanGCCommitted = metric.InitMetadata(metric.Metadata{ Name: "queue.gc.info.transactionspangccommitted", Help: "Number of GC'able entries corresponding to committed txns", Measurement: "Txn Entries", Unit: metric.Unit_COUNT, - } - metaGCTransactionSpanGCStaging = metric.Metadata{ + }) + metaGCTransactionSpanGCStaging = metric.InitMetadata(metric.Metadata{ Name: "queue.gc.info.transactionspangcstaging", Help: "Number of GC'able entries corresponding to staging txns", Measurement: "Txn Entries", Unit: metric.Unit_COUNT, - } - metaGCTransactionSpanGCPending = metric.Metadata{ + }) + metaGCTransactionSpanGCPending = metric.InitMetadata(metric.Metadata{ Name: "queue.gc.info.transactionspangcpending", Help: "Number of GC'able entries corresponding to pending txns", Measurement: "Txn Entries", Unit: metric.Unit_COUNT, - } - metaGCTransactionSpanGCPrepared = metric.Metadata{ + }) + metaGCTransactionSpanGCPrepared = metric.InitMetadata(metric.Metadata{ Name: "queue.gc.info.transactionspangcprepared", Help: "Number of GC'able entries corresponding to prepared txns", Measurement: "Txn Entries", Unit: metric.Unit_COUNT, - } - metaGCAbortSpanScanned = metric.Metadata{ + }) + metaGCAbortSpanScanned = metric.InitMetadata(metric.Metadata{ Name: "queue.gc.info.abortspanscanned", Help: "Number of transactions present in the AbortSpan scanned from the engine", Measurement: "Txn Entries", Unit: metric.Unit_COUNT, - } - metaGCAbortSpanConsidered = metric.Metadata{ + }) + metaGCAbortSpanConsidered = metric.InitMetadata(metric.Metadata{ Name: "queue.gc.info.abortspanconsidered", Help: "Number of AbortSpan entries old enough to be considered for removal", Measurement: "Txn Entries", Unit: metric.Unit_COUNT, - } - metaGCAbortSpanGCNum = metric.Metadata{ + }) + metaGCAbortSpanGCNum = metric.InitMetadata(metric.Metadata{ Name: "queue.gc.info.abortspangcnum", Help: "Number of AbortSpan entries fit for removal", Measurement: "Txn Entries", Unit: metric.Unit_COUNT, - } - metaGCPushTxn = metric.Metadata{ + }) + metaGCPushTxn = metric.InitMetadata(metric.Metadata{ Name: "queue.gc.info.pushtxn", Help: "Number of attempted pushes", Measurement: "Pushes", Unit: metric.Unit_COUNT, - } - metaGCResolveTotal = metric.Metadata{ + }) + metaGCResolveTotal = metric.InitMetadata(metric.Metadata{ Name: "queue.gc.info.resolvetotal", Help: "Number of attempted intent resolutions", Measurement: "Intent Resolutions", Unit: metric.Unit_COUNT, - } - metaGCResolveSuccess = metric.Metadata{ + }) + metaGCResolveSuccess = metric.InitMetadata(metric.Metadata{ Name: "queue.gc.info.resolvesuccess", Help: "Number of successful intent resolutions", Measurement: "Intent Resolutions", Unit: metric.Unit_COUNT, - } - metaGCResolveFailed = metric.Metadata{ + }) + metaGCResolveFailed = metric.InitMetadata(metric.Metadata{ Name: "queue.gc.info.resolvefailed", Help: "Number of cleanup intent failures during GC", Measurement: "Intent Resolutions", Unit: metric.Unit_COUNT, - } - metaGCTxnIntentsResolveFailed = metric.Metadata{ + }) + metaGCTxnIntentsResolveFailed = metric.InitMetadata(metric.Metadata{ Name: "queue.gc.info.transactionresolvefailed", Help: "Number of intent cleanup failures for local transactions during GC", Measurement: "Intent Resolutions", Unit: metric.Unit_COUNT, - } - metaGCUsedClearRange = metric.Metadata{ + }) + metaGCUsedClearRange = metric.InitMetadata(metric.Metadata{ Name: "queue.gc.info.clearrangesuccess", Help: "Number of successful ClearRange operations during GC", Measurement: "Requests", Unit: metric.Unit_COUNT, - } - metaGCFailedClearRange = metric.Metadata{ + }) + metaGCFailedClearRange = metric.InitMetadata(metric.Metadata{ Name: "queue.gc.info.clearrangefailed", Help: "Number of failed ClearRange operations during GC", Measurement: "Requests", Unit: metric.Unit_COUNT, - } - metaGCEnqueueHighPriority = metric.Metadata{ + }) + metaGCEnqueueHighPriority = metric.InitMetadata(metric.Metadata{ Name: "queue.gc.info.enqueuehighpriority", Help: "Number of replicas enqueued for GC with high priority", Measurement: "Replicas", Unit: metric.Unit_COUNT, - } + }) // Slow request metrics. - metaLatchRequests = metric.Metadata{ + metaLatchRequests = metric.InitMetadata(metric.Metadata{ Name: "requests.slow.latch", Help: crstrings.UnwrapText(` Number of requests that have been stuck for a long time acquiring @@ -2807,8 +2807,8 @@ var ( Measurement: "Requests", Unit: metric.Unit_COUNT, Visibility: metric.Metadata_SUPPORT, - } - metaSlowLeaseRequests = metric.Metadata{ + }) + metaSlowLeaseRequests = metric.InitMetadata(metric.Metadata{ Name: "requests.slow.lease", Help: crstrings.UnwrapText(` Number of requests that have been stuck for a long time acquiring a @@ -2823,8 +2823,8 @@ var ( Measurement: "Requests", Unit: metric.Unit_COUNT, Visibility: metric.Metadata_SUPPORT, - } - metaSlowRaftRequests = metric.Metadata{ + }) + metaSlowRaftRequests = metric.InitMetadata(metric.Metadata{ Name: "requests.slow.raft", Help: crstrings.UnwrapText(` Number of requests that have been stuck for a long time in the replication @@ -2841,10 +2841,10 @@ var ( Measurement: "Requests", Unit: metric.Unit_COUNT, Visibility: metric.Metadata_SUPPORT, - } + }) // Backpressure metrics. - metaBackpressuredOnSplitRequests = metric.Metadata{ + metaBackpressuredOnSplitRequests = metric.InitMetadata(metric.Metadata{ Name: "requests.backpressure.split", Help: crstrings.UnwrapText(` Number of backpressured writes waiting on a Range split. @@ -2856,29 +2856,29 @@ var ( `), Measurement: "Writes", Unit: metric.Unit_COUNT, - } + }) // AddSSTable metrics. - metaAddSSTableProposals = metric.Metadata{ + metaAddSSTableProposals = metric.InitMetadata(metric.Metadata{ Name: "addsstable.proposals", Help: "Number of SSTable ingestions proposed (i.e. sent to Raft by lease holders)", Measurement: "Ingestions", Unit: metric.Unit_COUNT, Visibility: metric.Metadata_SUPPORT, - } - metaAddSSTableApplications = metric.Metadata{ + }) + metaAddSSTableApplications = metric.InitMetadata(metric.Metadata{ Name: "addsstable.applications", Help: "Number of SSTable ingestions applied (i.e. applied by Replicas)", Measurement: "Ingestions", Unit: metric.Unit_COUNT, - } - metaAddSSTableApplicationCopies = metric.Metadata{ + }) + metaAddSSTableApplicationCopies = metric.InitMetadata(metric.Metadata{ Name: "addsstable.copies", Help: "number of SSTable ingestions that required copying files during application", Measurement: "Ingestions", Unit: metric.Unit_COUNT, - } - metaAddSSTableAsWrites = metric.Metadata{ + }) + metaAddSSTableAsWrites = metric.InitMetadata(metric.Metadata{ Name: "addsstable.aswrites", Help: crstrings.UnwrapText(` Number of SSTables ingested as normal writes. @@ -2891,41 +2891,41 @@ var ( `), Measurement: "Ingestions", Unit: metric.Unit_COUNT, - } - metaAddSSTableEvalTotalDelay = metric.Metadata{ + }) + metaAddSSTableEvalTotalDelay = metric.InitMetadata(metric.Metadata{ Name: "addsstable.delay.total", Help: "Amount by which evaluation of AddSSTable requests was delayed", Measurement: "Nanoseconds", Unit: metric.Unit_NANOSECONDS, Visibility: metric.Metadata_SUPPORT, - } + }) // Export request counter. - metaExportEvalTotalDelay = metric.Metadata{ + metaExportEvalTotalDelay = metric.InitMetadata(metric.Metadata{ Name: "exportrequest.delay.total", Help: "Amount by which evaluation of Export requests was delayed", Measurement: "Nanoseconds", Unit: metric.Unit_NANOSECONDS, - } + }) // Encryption-at-rest metrics. // TODO(mberhault): metrics for key age, per-key file/bytes counts. - metaEncryptionAlgorithm = metric.Metadata{ + metaEncryptionAlgorithm = metric.InitMetadata(metric.Metadata{ Name: "rocksdb.encryption.algorithm", Help: "Algorithm in use for encryption-at-rest, see storage/enginepb/key_registry.proto", Measurement: "Encryption At Rest", Unit: metric.Unit_CONST, - } + }) // Concurrency control metrics. - metaConcurrencyLocks = metric.Metadata{ + metaConcurrencyLocks = metric.InitMetadata(metric.Metadata{ Name: "kv.concurrency.locks", Help: "Number of active locks held in lock tables. Does not include replicated locks (intents) that are not held in memory", Measurement: "Locks", Unit: metric.Unit_COUNT, Visibility: metric.Metadata_SUPPORT, - } - metaConcurrencyAverageLockHoldDurationNanos = metric.Metadata{ + }) + metaConcurrencyAverageLockHoldDurationNanos = metric.InitMetadata(metric.Metadata{ Name: "kv.concurrency.avg_lock_hold_duration_nanos", Help: crstrings.UnwrapText(` Average lock hold duration across locks currently held in lock tables. @@ -2933,8 +2933,8 @@ var ( `), Measurement: "Nanoseconds", Unit: metric.Unit_NANOSECONDS, - } - metaConcurrencyMaxLockHoldDurationNanos = metric.Metadata{ + }) + metaConcurrencyMaxLockHoldDurationNanos = metric.InitMetadata(metric.Metadata{ Name: "kv.concurrency.max_lock_hold_duration_nanos", Help: crstrings.UnwrapText(` Maximum length of time any lock in a lock table is held. Does not include @@ -2942,61 +2942,61 @@ var ( `), Measurement: "Nanoseconds", Unit: metric.Unit_NANOSECONDS, - } - metaConcurrencyLocksWithWaitQueues = metric.Metadata{ + }) + metaConcurrencyLocksWithWaitQueues = metric.InitMetadata(metric.Metadata{ Name: "kv.concurrency.locks_with_wait_queues", Help: "Number of active locks held in lock tables with active wait-queues", Measurement: "Locks", Unit: metric.Unit_COUNT, - } - metaConcurrencyLockWaitQueueWaiters = metric.Metadata{ + }) + metaConcurrencyLockWaitQueueWaiters = metric.InitMetadata(metric.Metadata{ Name: "kv.concurrency.lock_wait_queue_waiters", Help: "Number of requests actively waiting in a lock wait-queue", Measurement: "Lock-Queue Waiters", Unit: metric.Unit_COUNT, - } - metaConcurrencyAverageLockWaitDurationNanos = metric.Metadata{ + }) + metaConcurrencyAverageLockWaitDurationNanos = metric.InitMetadata(metric.Metadata{ Name: "kv.concurrency.avg_lock_wait_duration_nanos", Help: "Average lock wait duration across requests currently waiting in lock wait-queues", Measurement: "Nanoseconds", Unit: metric.Unit_NANOSECONDS, - } - metaConcurrencyMaxLockWaitDurationNanos = metric.Metadata{ + }) + metaConcurrencyMaxLockWaitDurationNanos = metric.InitMetadata(metric.Metadata{ Name: "kv.concurrency.max_lock_wait_duration_nanos", Help: "Maximum lock wait duration across requests currently waiting in lock wait-queues", Measurement: "Nanoseconds", Unit: metric.Unit_NANOSECONDS, - } - metaConcurrencyMaxLockWaitQueueWaitersForLock = metric.Metadata{ + }) + metaConcurrencyMaxLockWaitQueueWaitersForLock = metric.InitMetadata(metric.Metadata{ Name: "kv.concurrency.max_lock_wait_queue_waiters_for_lock", Help: "Maximum number of requests actively waiting in any single lock wait-queue", Measurement: "Lock-Queue Waiters", Unit: metric.Unit_COUNT, - } - metaLatchConflictWaitDurations = metric.Metadata{ + }) + metaLatchConflictWaitDurations = metric.InitMetadata(metric.Metadata{ Name: "kv.concurrency.latch_conflict_wait_durations", Help: "Durations in nanoseconds spent on latch acquisition waiting for conflicts with other latches", Measurement: "Nanoseconds", Unit: metric.Unit_NANOSECONDS, - } + }) // Closed timestamp metrics. - metaClosedTimestampMaxBehindNanos = metric.Metadata{ + metaClosedTimestampMaxBehindNanos = metric.InitMetadata(metric.Metadata{ Name: "kv.closed_timestamp.max_behind_nanos", Help: "Largest latency between realtime and replica max closed timestamp", Measurement: "Nanoseconds", Unit: metric.Unit_NANOSECONDS, - } + }) // Closed timestamp policy change metrics. - metaClosedTimestampPolicyChange = metric.Metadata{ + metaClosedTimestampPolicyChange = metric.InitMetadata(metric.Metadata{ Name: "kv.closed_timestamp.policy_change", Help: "Number of times closed timestamp policy change occurred on ranges", Measurement: "Events", Unit: metric.Unit_COUNT, - } + }) - metaClosedTimestampLatencyInfoMissing = metric.Metadata{ + metaClosedTimestampLatencyInfoMissing = metric.InitMetadata(metric.Metadata{ Name: "kv.closed_timestamp.policy_latency_info_missing", Help: crstrings.UnwrapText(` Number of times closed timestamp policy refresh had to use hardcoded @@ -3004,10 +3004,10 @@ var ( `), Measurement: "Events", Unit: metric.Unit_COUNT, - } + }) // Replica circuit breaker. - metaReplicaCircuitBreakerCurTripped = metric.Metadata{ + metaReplicaCircuitBreakerCurTripped = metric.InitMetadata(metric.Metadata{ Name: "kv.replica_circuit_breaker.num_tripped_replicas", Help: crstrings.UnwrapText(` Number of Replicas for which the per-Replica circuit breaker is currently @@ -3018,17 +3018,17 @@ var ( `), Measurement: "Replicas", Unit: metric.Unit_COUNT, - } + }) // Replica circuit breaker. - metaReplicaCircuitBreakerCumTripped = metric.Metadata{ + metaReplicaCircuitBreakerCumTripped = metric.InitMetadata(metric.Metadata{ Name: "kv.replica_circuit_breaker.num_tripped_events", Help: `Number of times the per-Replica circuit breakers tripped since process start.`, Measurement: "Events", Unit: metric.Unit_COUNT, - } + }) // Replica read batch evaluation. - metaReplicaReadBatchEvaluationLatency = metric.Metadata{ + metaReplicaReadBatchEvaluationLatency = metric.InitMetadata(metric.Metadata{ Name: "kv.replica_read_batch_evaluate.latency", Help: crstrings.UnwrapText(` Execution duration for evaluating a BatchRequest on the read-only path @@ -3039,9 +3039,9 @@ var ( `), Measurement: "Nanoseconds", Unit: metric.Unit_NANOSECONDS, - } + }) // Replica read-write batch evaluation. - metaReplicaWriteBatchEvaluationLatency = metric.Metadata{ + metaReplicaWriteBatchEvaluationLatency = metric.InitMetadata(metric.Metadata{ Name: "kv.replica_write_batch_evaluate.latency", Help: crstrings.UnwrapText(` Execution duration for evaluating a BatchRequest on the read-write path @@ -3054,94 +3054,94 @@ var ( `), Measurement: "Nanoseconds", Unit: metric.Unit_NANOSECONDS, - } - metaPopularKeyCount = metric.Metadata{ + }) + metaPopularKeyCount = metric.InitMetadata(metric.Metadata{ Name: "kv.loadsplitter.popularkey", Help: "Load-based splitter could not find a split key and the most popular sampled split key occurs in >= 25% of the samples.", Measurement: "Occurrences", Unit: metric.Unit_COUNT, - } + }) - metaNoSplitKeyCount = metric.Metadata{ + metaNoSplitKeyCount = metric.InitMetadata(metric.Metadata{ Name: "kv.loadsplitter.nosplitkey", Help: "Load-based splitter could not find a split key.", Measurement: "Occurrences", Unit: metric.Unit_COUNT, - } + }) - metaClearDirectionCount = metric.Metadata{ + metaClearDirectionCount = metric.InitMetadata(metric.Metadata{ Name: "kv.loadsplitter.cleardirection", Help: "Load-based splitter observed an access direction greater than 80% left or right in the samples.", Measurement: "Occurrences", Unit: metric.Unit_COUNT, - } + }) - metaSplitEstimatedStats = metric.Metadata{ + metaSplitEstimatedStats = metric.InitMetadata(metric.Metadata{ Name: "kv.split.estimated_stats", Help: "Number of splits that computed estimated MVCC stats.", Measurement: "Events", Unit: metric.Unit_COUNT, - } + }) - metaSplitEstimatedTotalBytesDiff = metric.Metadata{ + metaSplitEstimatedTotalBytesDiff = metric.InitMetadata(metric.Metadata{ Name: "kv.split.total_bytes_estimates", Help: "Number of total bytes difference between the pre-split and post-split MVCC stats.", Measurement: "Bytes", Unit: metric.Unit_BYTES, - } + }) - metaStorageFlushUtilization = metric.Metadata{ + metaStorageFlushUtilization = metric.InitMetadata(metric.Metadata{ Name: "storage.flush.utilization", Help: "The percentage of time the storage engine is actively flushing memtables to disk.", Measurement: "Flush Utilization", Unit: metric.Unit_PERCENT, - } - metaValueSeparationBytesReferenced = metric.Metadata{ + }) + metaValueSeparationBytesReferenced = metric.InitMetadata(metric.Metadata{ Name: "storage.value_separation.value_bytes.referenced", Help: "The size of storage engine value bytes (pre-compression) that are stored separately in blob files and referenced by a live sstable.", Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - metaValueSeparationBytesUnreferenced = metric.Metadata{ + }) + metaValueSeparationBytesUnreferenced = metric.InitMetadata(metric.Metadata{ Name: "storage.value_separation.value_bytes.unreferenced", Help: "The size of storage engine value bytes (pre-compression) that are stored separately in blob files and not referenced by any live sstable. These bytes are garbage that could be reclaimed by a compaction.", Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - metaValueSeparationBlobFileCount = metric.Metadata{ + }) + metaValueSeparationBlobFileCount = metric.InitMetadata(metric.Metadata{ Name: "storage.value_separation.blob_files.count", Help: "The number of blob files that are used to store separated values within the storage engine.", Measurement: "Files", Unit: metric.Unit_COUNT, - } - metaValueSeparationBlobFileSize = metric.Metadata{ + }) + metaValueSeparationBlobFileSize = metric.InitMetadata(metric.Metadata{ Name: "storage.value_separation.blob_files.size", Help: "The size of the physical blob files that are used to store separated values within the storage engine. This sum is the physical post-compression sum of value_bytes.referenced and value_bytes.unreferenced.", Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - metaValueSeparationValueRetrievalCount = metric.Metadata{ + }) + metaValueSeparationValueRetrievalCount = metric.InitMetadata(metric.Metadata{ Name: "storage.value_separation.value_retrieval.count", Help: "The number of value retrievals of values separated into blob files.", Measurement: "Events", Unit: metric.Unit_COUNT, - } - metaWALBytesWritten = metric.Metadata{ + }) + metaWALBytesWritten = metric.InitMetadata(metric.Metadata{ Name: "storage.wal.bytes_written", Help: "The number of bytes the storage engine has written to the Write-Ahead Log.", Measurement: "Events", Unit: metric.Unit_COUNT, - } - metaWALBytesIn = metric.Metadata{ + }) + metaWALBytesIn = metric.InitMetadata(metric.Metadata{ Name: "storage.wal.bytes_in", Help: "The number of logical bytes the storage engine has written to the Write-Ahead Log.", Measurement: "Events", Unit: metric.Unit_COUNT, - } + }) // metaStorageFsyncLatency tracks the primary WAL device latency. // For secondary WAL device latency (when WAL failover is configured), // see metaStorageWALSecondaryFileOpLatency. - metaStorageFsyncLatency = metric.Metadata{ + metaStorageFsyncLatency = metric.InitMetadata(metric.Metadata{ Name: "storage.wal.fsync.latency", Help: "The fsync latency to the Write-Ahead Log device.", Measurement: "Fsync Latency", @@ -3156,16 +3156,16 @@ var ( metric reflects the fsync latency of the primary and/or the secondary WAL device. `), - } - metaStorageWALSecondaryFileOpLatency = metric.Metadata{ + }) + metaStorageWALSecondaryFileOpLatency = metric.InitMetadata(metric.Metadata{ Name: "storage.wal.secondary.file_op.latency", Help: "The latency of file operations on the secondary Write-Ahead Log device.", Measurement: "File Op Latency", Unit: metric.Unit_NANOSECONDS, Category: metric.Metadata_STORAGE, HowToUse: "Only populated when WAL failover is configured. This metric tracks file operation latency specifically on the secondary WAL device.", - } - metaStorageWALFailoverSwitchCount = metric.Metadata{ + }) + metaStorageWALFailoverSwitchCount = metric.InitMetadata(metric.Metadata{ Name: "storage.wal.failover.switch.count", Help: crstrings.UnwrapText(` Count of the number of times WAL writing has switched from primary to @@ -3178,22 +3178,22 @@ var ( Only populated when WAL failover is configured. A high switch count indicates that many disk stalls were encountered. `), - } - metaStorageWALFailoverPrimaryDuration = metric.Metadata{ + }) + metaStorageWALFailoverPrimaryDuration = metric.InitMetadata(metric.Metadata{ Name: "storage.wal.failover.primary.duration", Help: "Cumulative time spent writing to the primary WAL directory.", Measurement: "Nanoseconds", Unit: metric.Unit_NANOSECONDS, HowToUse: "Only populated when WAL failover is configured.", - } - metaStorageWALFailoverSecondaryDuration = metric.Metadata{ + }) + metaStorageWALFailoverSecondaryDuration = metric.InitMetadata(metric.Metadata{ Name: "storage.wal.failover.secondary.duration", Help: "Cumulative time spent writing to the secondary WAL directory.", Measurement: "Nanoseconds", Unit: metric.Unit_NANOSECONDS, HowToUse: "Only populated when WAL failover is configured.", - } - metaStorageWALFailoverWriteAndSyncLatency = metric.Metadata{ + }) + metaStorageWALFailoverWriteAndSyncLatency = metric.InitMetadata(metric.Metadata{ Name: "storage.wal.failover.write_and_sync.latency", Help: "The observed latency for writing and syncing to the logical Write-Ahead Log.", Measurement: "Nanoseconds", @@ -3204,8 +3204,8 @@ var ( Only populated when WAL failover is configured. Without WAL failover, the relevant metric is storage.wal.fsync.latency. `), - } - metaStorageWALFailoverSecondaryDiskCapacity = metric.Metadata{ + }) + metaStorageWALFailoverSecondaryDiskCapacity = metric.InitMetadata(metric.Metadata{ Name: "storage.wal.failover.secondary.disk.capacity", Help: "Total disk capacity of the secondary WAL failover volume.", Measurement: "Storage", @@ -3213,8 +3213,8 @@ var ( Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_STORAGE, HowToUse: "Only populated when WAL failover is configured.", - } - metaStorageWALFailoverSecondaryDiskAvailable = metric.Metadata{ + }) + metaStorageWALFailoverSecondaryDiskAvailable = metric.InitMetadata(metric.Metadata{ Name: "storage.wal.failover.secondary.disk.available", Help: "Available disk space on the secondary WAL failover volume.", Measurement: "Storage", @@ -3222,78 +3222,78 @@ var ( Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_STORAGE, HowToUse: "Only populated when WAL failover is configured.", - } - metaReplicaReadBatchDroppedLatchesBeforeEval = metric.Metadata{ + }) + metaReplicaReadBatchDroppedLatchesBeforeEval = metric.InitMetadata(metric.Metadata{ Name: "kv.replica_read_batch_evaluate.dropped_latches_before_eval", Help: `Number of times read-only batches dropped latches before evaluation.`, Measurement: "Batches", Unit: metric.Unit_COUNT, - } - metaReplicaReadBatchWithoutInterleavingIter = metric.Metadata{ + }) + metaReplicaReadBatchWithoutInterleavingIter = metric.InitMetadata(metric.Metadata{ Name: "kv.replica_read_batch_evaluate.without_interleaving_iter", Help: `Number of read-only batches evaluated without an intent interleaving iter.`, Measurement: "Batches", Unit: metric.Unit_COUNT, - } - metaDiskReadCount = metric.Metadata{ + }) + metaDiskReadCount = metric.InitMetadata(metric.Metadata{ Name: "storage.disk.read.count", Unit: metric.Unit_COUNT, Measurement: "Operations", Help: "Disk read operations on the store's disk since this process started (as reported by the OS)", Visibility: metric.Metadata_SUPPORT, - } - metaDiskReadBytes = metric.Metadata{ + }) + metaDiskReadBytes = metric.InitMetadata(metric.Metadata{ Name: "storage.disk.read.bytes", Unit: metric.Unit_BYTES, Measurement: "Bytes", Help: "Bytes read from the store's disk since this process started (as reported by the OS)", Visibility: metric.Metadata_SUPPORT, - } - metaDiskReadTime = metric.Metadata{ + }) + metaDiskReadTime = metric.InitMetadata(metric.Metadata{ Name: "storage.disk.read.time", Unit: metric.Unit_NANOSECONDS, Measurement: "Time", Help: "Time spent reading from the store's disk since this process started (as reported by the OS)", - } - metaDiskWriteCount = metric.Metadata{ + }) + metaDiskWriteCount = metric.InitMetadata(metric.Metadata{ Name: "storage.disk.write.count", Unit: metric.Unit_COUNT, Measurement: "Operations", Help: "Disk write operations on the store's disk since this process started (as reported by the OS)", Visibility: metric.Metadata_SUPPORT, - } - metaDiskWriteBytes = metric.Metadata{ + }) + metaDiskWriteBytes = metric.InitMetadata(metric.Metadata{ Name: "storage.disk.write.bytes", Unit: metric.Unit_BYTES, Measurement: "Bytes", Help: "Bytes written to the store's disk since this process started (as reported by the OS)", Visibility: metric.Metadata_SUPPORT, - } - metaDiskWriteTime = metric.Metadata{ + }) + metaDiskWriteTime = metric.InitMetadata(metric.Metadata{ Name: "storage.disk.write.time", Unit: metric.Unit_NANOSECONDS, Measurement: "Time", Help: "Time spent writing to the store's disks since this process started (as reported by the OS)", - } - metaDiskIOTime = metric.Metadata{ + }) + metaDiskIOTime = metric.InitMetadata(metric.Metadata{ Name: "storage.disk.io.time", Unit: metric.Unit_NANOSECONDS, Measurement: "Time", Help: "Time spent reading from or writing to the store's disk since this process started (as reported by the OS)", - } - metaDiskWeightedIOTime = metric.Metadata{ + }) + metaDiskWeightedIOTime = metric.InitMetadata(metric.Metadata{ Name: "storage.disk.weightedio.time", Unit: metric.Unit_NANOSECONDS, Measurement: "Time", Help: "Weighted time spent reading from or writing to the store's disk since this process started (as reported by the OS)", - } - metaDiskIopsInProgress = metric.Metadata{ + }) + metaDiskIopsInProgress = metric.InitMetadata(metric.Metadata{ Name: "storage.disk.iopsinprogress", Unit: metric.Unit_COUNT, Measurement: "Operations", Help: "IO operations currently in progress on the store's disk (as reported by the OS)", Visibility: metric.Metadata_SUPPORT, - } + }) // The max disk rate metrics are computed using data sampled at // DefaultDiskStatsPollingInterval, which defaults to 100ms, and scaled up // to be a per-second rate. This is useful to observe short duration spikes @@ -3307,32 +3307,32 @@ var ( // If these max rate metrics show usage close to the provisioned value, one // can blame the high usage for the higher latency, and not blame it on // unrelated slowness in the disk infrastructure. - metaDiskReadMaxBytesPerSecond = metric.Metadata{ + metaDiskReadMaxBytesPerSecond = metric.InitMetadata(metric.Metadata{ Name: "storage.disk.read-max.bytespersecond", Unit: metric.Unit_BYTES, Measurement: "Bytes", Help: "Maximum rate at which bytes were read from disk (as reported by the OS)", Visibility: metric.Metadata_SUPPORT, - } - metaDiskWriteMaxBytesPerSecond = metric.Metadata{ + }) + metaDiskWriteMaxBytesPerSecond = metric.InitMetadata(metric.Metadata{ Name: "storage.disk.write-max.bytespersecond", Unit: metric.Unit_BYTES, Measurement: "Bytes", Help: "Maximum rate at which bytes were written to disk (as reported by the OS)", Visibility: metric.Metadata_SUPPORT, - } - metaDiskReadMaxIOPS = metric.Metadata{ + }) + metaDiskReadMaxIOPS = metric.InitMetadata(metric.Metadata{ Name: "storage.disk.read-max.iops", Unit: metric.Unit_COUNT, Measurement: "Operations", Help: "Maximum rate of read operations performed on the disk (as reported by the OS)", - } - metaDiskWriteMaxIOPS = metric.Metadata{ + }) + metaDiskWriteMaxIOPS = metric.InitMetadata(metric.Metadata{ Name: "storage.disk.write-max.iops", Unit: metric.Unit_COUNT, Measurement: "Operations", Help: "Maximum rate of write operations performed on the disk (as reported by the OS)", - } + }) ) // StoreMetrics is the set of metrics for a given store. @@ -5035,12 +5035,12 @@ func raftFlowStateGaugeSlice() [tracker.StateCount]*metric.Gauge { func makePolicyRefresherMetrics() [ctpb.MAX_CLOSED_TIMESTAMP_POLICY]*metric.Gauge { var policyGauges [ctpb.MAX_CLOSED_TIMESTAMP_POLICY]*metric.Gauge for policy := ctpb.LAG_BY_CLUSTER_SETTING; policy < ctpb.MAX_CLOSED_TIMESTAMP_POLICY; policy++ { - meta := metric.Metadata{ + meta := metric.InitMetadata(metric.Metadata{ Name: fmt.Sprintf("kv.closed_timestamp.policy.%s", strings.ToLower(policy.String())), Help: fmt.Sprintf("Number of ranges with %s closed timestamp policy", policy.String()), Measurement: "Ranges", Unit: metric.Unit_COUNT, - } + }) policyGauges[policy] = metric.NewGauge(meta) } return policyGauges @@ -5053,12 +5053,12 @@ func storageLevelMetricMetadata( ) [7]metric.Metadata { var sl [7]metric.Metadata for i := range sl { - sl[i] = metric.Metadata{ + sl[i] = metric.InitMetadata(metric.Metadata{ Name: fmt.Sprintf("storage.l%d-%s", i, name), Help: fmt.Sprintf(helpTpl, i), Measurement: measurement, Unit: unit, - } + }) // Apply visibility override if specified for this level if visibility, ok := visibilityOverrides[i]; ok { sl[i].Visibility = visibility @@ -5115,24 +5115,24 @@ type pebbleCategoryIterMetrics struct { } func makePebbleCategorizedIterMetrics(category block.Category) pebbleCategoryIterMetrics { - metaBlockBytes := metric.Metadata{ + metaBlockBytes := metric.InitMetadata(metric.Metadata{ Name: fmt.Sprintf("storage.iterator.category-%s.block-load.bytes", category), Help: "Bytes loaded by storage sstable iterators (possibly cached).", Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - metaBlockBytesInCache := metric.Metadata{ + }) + metaBlockBytesInCache := metric.InitMetadata(metric.Metadata{ Name: fmt.Sprintf("storage.iterator.category-%s.block-load.cached-bytes", category), Help: "Bytes loaded by storage sstable iterators from the block cache", Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - metaBlockReadLatencySum := metric.Metadata{ + }) + metaBlockReadLatencySum := metric.InitMetadata(metric.Metadata{ Name: fmt.Sprintf("storage.iterator.category-%s.block-load.latency-sum", category), Help: "Cumulative latency for loading bytes not in the block cache, by storage sstable iterators", Measurement: "Latency", Unit: metric.Unit_NANOSECONDS, - } + }) return pebbleCategoryIterMetrics{ IterBlockBytes: metric.NewCounter(metaBlockBytes), IterBlockBytesInCache: metric.NewCounter(metaBlockBytesInCache), @@ -5180,12 +5180,12 @@ type pebbleCategoryDiskWriteMetrics struct { func makePebbleCategorizedWriteMetrics( category vfs.DiskWriteCategory, ) *pebbleCategoryDiskWriteMetrics { - metaDiskBytesWritten := metric.Metadata{ + metaDiskBytesWritten := metric.InitMetadata(metric.Metadata{ Name: fmt.Sprintf("storage.category-%s.bytes-written", category), Help: "Bytes written to disk", Measurement: "Bytes", Unit: metric.Unit_BYTES, - } + }) return &pebbleCategoryDiskWriteMetrics{BytesWritten: metric.NewCounter(metaDiskBytesWritten)} } diff --git a/pkg/kv/kvserver/metrics_test.go b/pkg/kv/kvserver/metrics_test.go index 5f5415b4224d..7c2e1d8182ef 100644 --- a/pkg/kv/kvserver/metrics_test.go +++ b/pkg/kv/kvserver/metrics_test.go @@ -199,7 +199,7 @@ func TestUpdateDiskCounter(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { ctx := context.Background() - counter := metric.NewCounter(metric.Metadata{Name: "test"}) + counter := metric.NewCounter(metric.InitMetadata(metric.Metadata{Name: "test"})) if tt.initial > 0 { counter.Update(tt.initial) } diff --git a/pkg/kv/kvserver/protectedts/ptreconcile/metrics.go b/pkg/kv/kvserver/protectedts/ptreconcile/metrics.go index 1923a9339621..601ee1765296 100644 --- a/pkg/kv/kvserver/protectedts/ptreconcile/metrics.go +++ b/pkg/kv/kvserver/protectedts/ptreconcile/metrics.go @@ -33,32 +33,32 @@ var _ metric.Struct = (*Metrics)(nil) func (m *Metrics) MetricStruct() {} var ( - metaReconciliationRuns = metric.Metadata{ + metaReconciliationRuns = metric.InitMetadata(metric.Metadata{ Name: "kv.protectedts.reconciliation.num_runs", Help: "number of successful reconciliation runs on this node", Measurement: "Count", Unit: metric.Unit_COUNT, MetricType: io_prometheus_client.MetricType_COUNTER, - } - metaRecordsProcessed = metric.Metadata{ + }) + metaRecordsProcessed = metric.InitMetadata(metric.Metadata{ Name: "kv.protectedts.reconciliation.records_processed", Help: "number of records processed without error during reconciliation on this node", Measurement: "Count", Unit: metric.Unit_COUNT, MetricType: io_prometheus_client.MetricType_COUNTER, - } - metaRecordsRemoved = metric.Metadata{ + }) + metaRecordsRemoved = metric.InitMetadata(metric.Metadata{ Name: "kv.protectedts.reconciliation.records_removed", Help: "number of records removed during reconciliation runs on this node", Measurement: "Count", Unit: metric.Unit_COUNT, MetricType: io_prometheus_client.MetricType_COUNTER, - } - metaReconciliationErrors = metric.Metadata{ + }) + metaReconciliationErrors = metric.InitMetadata(metric.Metadata{ Name: "kv.protectedts.reconciliation.errors", Help: "number of errors encountered during reconciliation runs on this node", Measurement: "Count", Unit: metric.Unit_COUNT, MetricType: io_prometheus_client.MetricType_COUNTER, - } + }) ) diff --git a/pkg/kv/kvserver/protectedts/ptstorage/metrics.go b/pkg/kv/kvserver/protectedts/ptstorage/metrics.go index ce13ef8fdd85..c9f0a885b0fd 100644 --- a/pkg/kv/kvserver/protectedts/ptstorage/metrics.go +++ b/pkg/kv/kvserver/protectedts/ptstorage/metrics.go @@ -41,60 +41,60 @@ func makeMetrics() Metrics { } var ( - metaProtectSuccess = metric.Metadata{ + metaProtectSuccess = metric.InitMetadata(metric.Metadata{ Name: "kv.protectedts.protect.success", Help: "number of successful Protect operations creating protected timestamp records", Measurement: "Operations", Unit: metric.Unit_COUNT, MetricType: io_prometheus_client.MetricType_COUNTER, - } - metaProtectFailed = metric.Metadata{ + }) + metaProtectFailed = metric.InitMetadata(metric.Metadata{ Name: "kv.protectedts.protect.failed", Help: "number of failed Protect operations (ErrExists, validation errors, or execution errors)", Measurement: "Operations", Unit: metric.Unit_COUNT, MetricType: io_prometheus_client.MetricType_COUNTER, - } - metaReleaseSuccess = metric.Metadata{ + }) + metaReleaseSuccess = metric.InitMetadata(metric.Metadata{ Name: "kv.protectedts.release.success", Help: "number of successful Release operations removing protected timestamp records", Measurement: "Operations", Unit: metric.Unit_COUNT, MetricType: io_prometheus_client.MetricType_COUNTER, - } - metaReleaseFailed = metric.Metadata{ + }) + metaReleaseFailed = metric.InitMetadata(metric.Metadata{ Name: "kv.protectedts.release.failed", Help: "number of failed Release operations (ErrNotExists or execution errors)", Measurement: "Operations", Unit: metric.Unit_COUNT, MetricType: io_prometheus_client.MetricType_COUNTER, - } - metaUpdateTimestampSuccess = metric.Metadata{ + }) + metaUpdateTimestampSuccess = metric.InitMetadata(metric.Metadata{ Name: "kv.protectedts.update_timestamp.success", Help: "number of successful UpdateTimestamp operations", Measurement: "Operations", Unit: metric.Unit_COUNT, MetricType: io_prometheus_client.MetricType_COUNTER, - } - metaUpdateTimestampFailed = metric.Metadata{ + }) + metaUpdateTimestampFailed = metric.InitMetadata(metric.Metadata{ Name: "kv.protectedts.update_timestamp.failed", Help: "number of failed UpdateTimestamp operations (ErrNotExists or execution errors)", Measurement: "Operations", Unit: metric.Unit_COUNT, MetricType: io_prometheus_client.MetricType_COUNTER, - } - metaGetRecordSuccess = metric.Metadata{ + }) + metaGetRecordSuccess = metric.InitMetadata(metric.Metadata{ Name: "kv.protectedts.get_record.success", Help: "number of successful GetRecord operations", Measurement: "Operations", Unit: metric.Unit_COUNT, MetricType: io_prometheus_client.MetricType_COUNTER, - } - metaGetRecordFailed = metric.Metadata{ + }) + metaGetRecordFailed = metric.InitMetadata(metric.Metadata{ Name: "kv.protectedts.get_record.failed", Help: "number of failed GetRecord operations (ErrNotExists, parsing errors, or execution errors)", Measurement: "Operations", Unit: metric.Unit_COUNT, MetricType: io_prometheus_client.MetricType_COUNTER, - } + }) ) diff --git a/pkg/kv/kvserver/queue_concurrency_test.go b/pkg/kv/kvserver/queue_concurrency_test.go index e80774bb4ef4..b92845000a0c 100644 --- a/pkg/kv/kvserver/queue_concurrency_test.go +++ b/pkg/kv/kvserver/queue_concurrency_test.go @@ -56,11 +56,11 @@ func TestBaseQueueConcurrent(t *testing.T) { acceptsUnsplitRanges: true, processTimeoutFunc: constantTimeoutFunc(time.Millisecond), // We don't care about these, but we don't want to crash. - successes: metric.NewCounter(metric.Metadata{Name: "processed"}), - failures: metric.NewCounter(metric.Metadata{Name: "failures"}), - pending: metric.NewGauge(metric.Metadata{Name: "pending"}), - processingNanos: metric.NewCounter(metric.Metadata{Name: "processingnanos"}), - purgatory: metric.NewGauge(metric.Metadata{Name: "purgatory"}), + successes: metric.NewCounter(metric.InitMetadata(metric.Metadata{Name: "processed"})), + failures: metric.NewCounter(metric.InitMetadata(metric.Metadata{Name: "failures"})), + pending: metric.NewGauge(metric.InitMetadata(metric.Metadata{Name: "pending"})), + processingNanos: metric.NewCounter(metric.InitMetadata(metric.Metadata{Name: "processingnanos"})), + purgatory: metric.NewGauge(metric.InitMetadata(metric.Metadata{Name: "purgatory"})), disabledConfig: testQueueEnabled, } diff --git a/pkg/kv/kvserver/queue_test.go b/pkg/kv/kvserver/queue_test.go index 711f8609d418..0ace2da57339 100644 --- a/pkg/kv/kvserver/queue_test.go +++ b/pkg/kv/kvserver/queue_test.go @@ -106,13 +106,13 @@ func makeTestBaseQueue(name string, impl queueImpl, store *Store, cfg queueConfi // Needed in order to pass the validation in newBaseQueue. cfg.needsSpanConfigs = true } - cfg.successes = metric.NewCounter(metric.Metadata{Name: "processed"}) - cfg.failures = metric.NewCounter(metric.Metadata{Name: "failures"}) - cfg.pending = metric.NewGauge(metric.Metadata{Name: "pending"}) - cfg.processingNanos = metric.NewCounter(metric.Metadata{Name: "processingnanos"}) - cfg.purgatory = metric.NewGauge(metric.Metadata{Name: "purgatory"}) - cfg.enqueueAdd = metric.NewCounter(metric.Metadata{Name: "enqueueadd"}) - cfg.enqueueUnexpectedError = metric.NewCounter(metric.Metadata{Name: "enqueueunexpectederror"}) + cfg.successes = metric.NewCounter(metric.InitMetadata(metric.Metadata{Name: "processed"})) + cfg.failures = metric.NewCounter(metric.InitMetadata(metric.Metadata{Name: "failures"})) + cfg.pending = metric.NewGauge(metric.InitMetadata(metric.Metadata{Name: "pending"})) + cfg.processingNanos = metric.NewCounter(metric.InitMetadata(metric.Metadata{Name: "processingnanos"})) + cfg.purgatory = metric.NewGauge(metric.InitMetadata(metric.Metadata{Name: "purgatory"})) + cfg.enqueueAdd = metric.NewCounter(metric.InitMetadata(metric.Metadata{Name: "enqueueadd"})) + cfg.enqueueUnexpectedError = metric.NewCounter(metric.InitMetadata(metric.Metadata{Name: "enqueueunexpectederror"})) cfg.disabledConfig = testQueueEnabled return newBaseQueue(name, impl, store, cfg) } diff --git a/pkg/kv/kvserver/raft_transport_metrics.go b/pkg/kv/kvserver/raft_transport_metrics.go index 81255a42930a..01c0a26e0998 100644 --- a/pkg/kv/kvserver/raft_transport_metrics.go +++ b/pkg/kv/kvserver/raft_transport_metrics.go @@ -24,7 +24,7 @@ type RaftTransportMetrics struct { func (t *RaftTransport) initMetrics() { t.metrics = &RaftTransportMetrics{ - SendQueueSize: metric.NewFunctionalGauge(metric.Metadata{ + SendQueueSize: metric.NewFunctionalGauge(metric.InitMetadata(metric.Metadata{ Name: "raft.transport.send-queue-size", Help: `Number of pending outgoing messages in the Raft Transport queue. @@ -34,9 +34,9 @@ messages to at least one peer. Use this metric in conjunction with send-queue-bytes.`, Measurement: "Messages", Unit: metric.Unit_COUNT, - }, t.queueMessageCount), + }), t.queueMessageCount), - SendQueueBytes: metric.NewFunctionalGauge(metric.Metadata{ + SendQueueBytes: metric.NewFunctionalGauge(metric.InitMetadata(metric.Metadata{ Name: "raft.transport.send-queue-bytes", Help: `The total byte size of pending outgoing messages in the queue. @@ -46,30 +46,30 @@ messages to at least one peer. Use this metric together with send-queue-size, to have a fuller picture.`, Measurement: "Bytes", Unit: metric.Unit_BYTES, - }, t.queueByteSize), + }), t.queueByteSize), - MessagesDropped: metric.NewCounter(metric.Metadata{ + MessagesDropped: metric.NewCounter(metric.InitMetadata(metric.Metadata{ Name: "raft.transport.sends-dropped", Help: "Number of Raft message sends dropped by the Raft Transport", Measurement: "Messages", Unit: metric.Unit_COUNT, - }), + })), - MessagesSent: metric.NewCounter(metric.Metadata{ + MessagesSent: metric.NewCounter(metric.InitMetadata(metric.Metadata{ Name: "raft.transport.sent", Help: "Number of Raft messages sent by the Raft Transport", Measurement: "Messages", Unit: metric.Unit_COUNT, - }), + })), - MessagesRcvd: metric.NewCounter(metric.Metadata{ + MessagesRcvd: metric.NewCounter(metric.InitMetadata(metric.Metadata{ Name: "raft.transport.rcvd", Help: "Number of Raft messages received by the Raft Transport", Measurement: "Messages", Unit: metric.Unit_COUNT, - }), + })), - ReverseSent: metric.NewCounter(metric.Metadata{ + ReverseSent: metric.NewCounter(metric.InitMetadata(metric.Metadata{ Name: "raft.transport.reverse-sent", Help: `Messages sent in the reverse direction of a stream. @@ -77,9 +77,9 @@ These messages should be rare. They are mostly informational, and are not actual responses to Raft messages. Responses are sent over another stream.`, Measurement: "Messages", Unit: metric.Unit_COUNT, - }), + })), - ReverseRcvd: metric.NewCounter(metric.Metadata{ + ReverseRcvd: metric.NewCounter(metric.InitMetadata(metric.Metadata{ Name: "raft.transport.reverse-rcvd", Help: `Messages received from the reverse direction of a stream. @@ -87,14 +87,14 @@ These messages should be rare. They are mostly informational, and are not actual responses to Raft messages. Responses are received over another stream.`, Measurement: "Messages", Unit: metric.Unit_COUNT, - }), + })), // This counts RACv2 piggybacked MsgAppResps. - FlowTokenDispatchesDropped: metric.NewCounter(metric.Metadata{ + FlowTokenDispatchesDropped: metric.NewCounter(metric.InitMetadata(metric.Metadata{ Name: "raft.transport.flow-token-dispatches-dropped", Help: "Number of flow token dispatches dropped by the Raft Transport", Measurement: "Dispatches", Unit: metric.Unit_COUNT, - }), + })), } } diff --git a/pkg/kv/kvserver/raftentry/metrics.go b/pkg/kv/kvserver/raftentry/metrics.go index 8561f5c382cf..5e941d0f2a00 100644 --- a/pkg/kv/kvserver/raftentry/metrics.go +++ b/pkg/kv/kvserver/raftentry/metrics.go @@ -8,36 +8,36 @@ package raftentry import "github.com/cockroachdb/cockroach/pkg/util/metric" var ( - metaEntryCacheSize = metric.Metadata{ + metaEntryCacheSize = metric.InitMetadata(metric.Metadata{ Name: "raft.entrycache.size", Help: "Number of Raft entries in the Raft entry cache", Measurement: "Entry Count", Unit: metric.Unit_COUNT, - } - metaEntryCacheBytes = metric.Metadata{ + }) + metaEntryCacheBytes = metric.InitMetadata(metric.Metadata{ Name: "raft.entrycache.bytes", Help: "Aggregate size of all Raft entries in the Raft entry cache", Measurement: "Entry Bytes", Unit: metric.Unit_BYTES, - } - metaEntryCacheAccesses = metric.Metadata{ + }) + metaEntryCacheAccesses = metric.InitMetadata(metric.Metadata{ Name: "raft.entrycache.accesses", Help: "Number of cache lookups in the Raft entry cache", Measurement: "Accesses", Unit: metric.Unit_COUNT, - } - metaEntryCacheHits = metric.Metadata{ + }) + metaEntryCacheHits = metric.InitMetadata(metric.Metadata{ Name: "raft.entrycache.hits", Help: "Number of successful cache lookups in the Raft entry cache", Measurement: "Hits", Unit: metric.Unit_COUNT, - } - metaEntryCacheReadBytes = metric.Metadata{ + }) + metaEntryCacheReadBytes = metric.InitMetadata(metric.Metadata{ Name: "raft.entrycache.read_bytes", Help: "Counter of bytes in entries returned from the Raft entry cache", Measurement: "Bytes", Unit: metric.Unit_BYTES, - } + }) ) // Metrics is the set of metrics for the raft entry cache. diff --git a/pkg/kv/kvserver/rangefeed/metrics.go b/pkg/kv/kvserver/rangefeed/metrics.go index b164e93aaf3a..6371933a19c0 100644 --- a/pkg/kv/kvserver/rangefeed/metrics.go +++ b/pkg/kv/kvserver/rangefeed/metrics.go @@ -14,95 +14,95 @@ import ( ) var ( - metaRangeFeedCatchUpScanNanos = metric.Metadata{ + metaRangeFeedCatchUpScanNanos = metric.InitMetadata(metric.Metadata{ Name: "kv.rangefeed.catchup_scan_nanos", Help: "Time spent in RangeFeed catchup scan", Measurement: "Nanoseconds", Unit: metric.Unit_NANOSECONDS, Visibility: metric.Metadata_SUPPORT, - } - metaRangeFeedExhausted = metric.Metadata{ + }) + metaRangeFeedExhausted = metric.InitMetadata(metric.Metadata{ Name: "kv.rangefeed.budget_allocation_failed", Help: "Number of times RangeFeed failed because memory budget was exceeded", Measurement: "Events", Unit: metric.Unit_COUNT, - } - metaRangeFeedBudgetBlocked = metric.Metadata{ + }) + metaRangeFeedBudgetBlocked = metric.InitMetadata(metric.Metadata{ Name: "kv.rangefeed.budget_allocation_blocked", Help: "Number of times RangeFeed waited for budget availability", Measurement: "Events", Unit: metric.Unit_COUNT, - } - metaRangeFeedRegistrations = metric.Metadata{ + }) + metaRangeFeedRegistrations = metric.InitMetadata(metric.Metadata{ Name: "kv.rangefeed.registrations", Help: "Number of active RangeFeed registrations", Measurement: "Registrations", Unit: metric.Unit_COUNT, - } - metaRangeFeedBufferedRegistrations = metric.Metadata{ + }) + metaRangeFeedBufferedRegistrations = metric.InitMetadata(metric.Metadata{ Name: "kv.rangefeed.buffered_registrations", Help: "Number of active RangeFeed buffered registrations", Measurement: "Registrations", Unit: metric.Unit_COUNT, - } - metaRangeFeedUnbufferedRegistrations = metric.Metadata{ + }) + metaRangeFeedUnbufferedRegistrations = metric.InitMetadata(metric.Metadata{ Name: "kv.rangefeed.unbuffered_registrations", Help: "Number of active RangeFeed unbuffered registrations", Measurement: "Registrations", Unit: metric.Unit_COUNT, - } - metaRangeFeedClosedTimestampMaxBehindNanos = metric.Metadata{ + }) + metaRangeFeedClosedTimestampMaxBehindNanos = metric.InitMetadata(metric.Metadata{ Name: "kv.rangefeed.closed_timestamp_max_behind_nanos", Help: "Largest latency between realtime and replica max closed timestamp for replicas " + "that have active rangeeds on them", Measurement: "Nanoseconds", Unit: metric.Unit_NANOSECONDS, - } - metaRangefeedSlowClosedTimestampRanges = metric.Metadata{ + }) + metaRangefeedSlowClosedTimestampRanges = metric.InitMetadata(metric.Metadata{ Name: "kv.rangefeed.closed_timestamp.slow_ranges", Help: "Number of ranges that have a closed timestamp lagging by more than 5x target lag. " + "Periodically re-calculated", Measurement: "Ranges", Unit: metric.Unit_COUNT, - } - metaRangeFeedSlowClosedTimestampCancelledRanges = metric.Metadata{ + }) + metaRangeFeedSlowClosedTimestampCancelledRanges = metric.InitMetadata(metric.Metadata{ Name: "kv.rangefeed.closed_timestamp.slow_ranges.cancelled", Help: "Number of rangefeeds that were cancelled due to a chronically " + "lagging closed timestamp", Measurement: "Cancellation Count", Unit: metric.Unit_COUNT, - } - metaRangeFeedProcessors = metric.Metadata{ + }) + metaRangeFeedProcessors = metric.InitMetadata(metric.Metadata{ Name: "kv.rangefeed.processors", Help: "Number of active RangeFeed processors", Measurement: "Processors", Unit: metric.Unit_COUNT, - } - metaQueueTimeHistogramsTemplate = metric.Metadata{ + }) + metaQueueTimeHistogramsTemplate = metric.InitMetadata(metric.Metadata{ Name: "kv.rangefeed.scheduler.%s.latency", Help: "KV RangeFeed %s scheduler latency", Measurement: "Latency", Unit: metric.Unit_NANOSECONDS, - } - metaQueueSizeTemplate = metric.Metadata{ + }) + metaQueueSizeTemplate = metric.InitMetadata(metric.Metadata{ Name: "kv.rangefeed.scheduler.%s.queue_size", Help: "Number of entries in the KV RangeFeed %s scheduler queue", Measurement: "Pending Ranges", Unit: metric.Unit_COUNT, - } - metaQueueTimeout = metric.Metadata{ + }) + metaQueueTimeout = metric.InitMetadata(metric.Metadata{ Name: "kv.rangefeed.scheduled_processor.queue_timeout", Help: "Number of times the RangeFeed processor shutdown because of a queue send timeout", Measurement: "Failure Count", Unit: metric.Unit_COUNT, - } - metaRangeFeedOutputLoopNanosUnbufferedRegistration = metric.Metadata{ + }) + metaRangeFeedOutputLoopNanosUnbufferedRegistration = metric.InitMetadata(metric.Metadata{ Name: "kv.rangefeed.output_loop_unbuffered_registration_nanos", Help: "Duration of the Rangefeed O(range) output loop goroutine. This is only applicable for " + "unbuffered registrations since buffered registrations spawns long-living goroutines.", Measurement: "Nanoseconds", Unit: metric.Unit_NANOSECONDS, - } + }) ) // Metrics are for production monitoring of RangeFeeds. @@ -163,12 +163,12 @@ func (FeedBudgetPoolMetrics) MetricStruct() {} // NewFeedBudgetMetrics creates new metrics for RangeFeed budgets. func NewFeedBudgetMetrics(histogramWindow time.Duration) *FeedBudgetPoolMetrics { makeMemMetricMetadata := func(name, help string) metric.Metadata { - return metric.Metadata{ + return metric.InitMetadata(metric.Metadata{ Name: "kv.rangefeed.mem_" + name, Help: help, Measurement: "Memory", Unit: metric.Unit_BYTES, - } + }) } return &FeedBudgetPoolMetrics{ @@ -233,18 +233,18 @@ type StreamManagerMetrics struct { } var ( - metaActiveMuxRangeFeed = metric.Metadata{ + metaActiveMuxRangeFeed = metric.InitMetadata(metric.Metadata{ Name: "rpc.streams.mux_rangefeed.active", Help: `Number of currently running MuxRangeFeed streams`, Measurement: "Streams", Unit: metric.Unit_COUNT, - } - metaTotalMuxRangeFeed = metric.Metadata{ + }) + metaTotalMuxRangeFeed = metric.InitMetadata(metric.Metadata{ Name: "rpc.streams.mux_rangefeed.recv", Help: `Total number of MuxRangeFeed streams`, Measurement: "Streams", Unit: metric.Unit_COUNT, - } + }) ) func (*StreamManagerMetrics) MetricStruct() {} @@ -263,12 +263,12 @@ type BufferedSenderMetrics struct { } var ( - metaBufferedSenderQueueSize = metric.Metadata{ + metaBufferedSenderQueueSize = metric.InitMetadata(metric.Metadata{ Name: "kv.rangefeed.buffered_sender.queue_size", Help: `Number of entries in the buffered sender queue`, Measurement: "Pending Events", Unit: metric.Unit_COUNT, - } + }) ) func (*BufferedSenderMetrics) MetricStruct() {} @@ -281,12 +281,12 @@ func NewBufferedSenderMetrics() *BufferedSenderMetrics { } var ( - metaRangeFeedMuxStreamSlowSends = metric.Metadata{ + metaRangeFeedMuxStreamSlowSends = metric.InitMetadata(metric.Metadata{ Name: "kv.rangefeed.mux_stream_send.slow_events", Help: "Number of RangeFeed events that took longer than 10s to send to the client", Measurement: "Events", Unit: metric.Unit_COUNT, - } + }) ) type LockedMuxStreamMetrics struct { diff --git a/pkg/kv/kvserver/replica_gc_queue.go b/pkg/kv/kvserver/replica_gc_queue.go index 7aa878938ba6..38dd97c4cae0 100644 --- a/pkg/kv/kvserver/replica_gc_queue.go +++ b/pkg/kv/kvserver/replica_gc_queue.go @@ -62,12 +62,12 @@ const ( ) var ( - metaReplicaGCQueueRemoveReplicaCount = metric.Metadata{ + metaReplicaGCQueueRemoveReplicaCount = metric.InitMetadata(metric.Metadata{ Name: "queue.replicagc.removereplica", Help: "Number of replica removals attempted by the replica GC queue", Measurement: "Replica Removals", Unit: metric.Unit_COUNT, - } + }) ) // ReplicaGCQueueMetrics is the set of metrics for the replica GC queue. diff --git a/pkg/kv/kvserver/replicate_queue.go b/pkg/kv/kvserver/replicate_queue.go index ea46b7fb99b0..84e12d8f822e 100644 --- a/pkg/kv/kvserver/replicate_queue.go +++ b/pkg/kv/kvserver/replicate_queue.go @@ -135,163 +135,163 @@ var ReplicateQueueMaxSize = settings.RegisterIntSetting( ) var ( - metaReplicateQueueAddReplicaCount = metric.Metadata{ + metaReplicateQueueAddReplicaCount = metric.InitMetadata(metric.Metadata{ Name: "queue.replicate.addreplica", Help: "Number of replica additions attempted by the replicate queue", Measurement: "Replica Additions", Unit: metric.Unit_COUNT, - } - metaReplicateQueueAddVoterReplicaCount = metric.Metadata{ + }) + metaReplicateQueueAddVoterReplicaCount = metric.InitMetadata(metric.Metadata{ Name: "queue.replicate.addvoterreplica", Help: "Number of voter replica additions attempted by the replicate queue", Measurement: "Replica Additions", Unit: metric.Unit_COUNT, - } - metaReplicateQueueAddNonVoterReplicaCount = metric.Metadata{ + }) + metaReplicateQueueAddNonVoterReplicaCount = metric.InitMetadata(metric.Metadata{ Name: "queue.replicate.addnonvoterreplica", Help: "Number of non-voter replica additions attempted by the replicate queue", Measurement: "Replica Additions", Unit: metric.Unit_COUNT, - } - metaReplicateQueueRemoveReplicaCount = metric.Metadata{ + }) + metaReplicateQueueRemoveReplicaCount = metric.InitMetadata(metric.Metadata{ Name: "queue.replicate.removereplica", Help: "Number of replica removals attempted by the replicate queue (typically in response to a rebalancer-initiated addition)", Measurement: "Replica Removals", Unit: metric.Unit_COUNT, - } - metaReplicateQueueRemoveVoterReplicaCount = metric.Metadata{ + }) + metaReplicateQueueRemoveVoterReplicaCount = metric.InitMetadata(metric.Metadata{ Name: "queue.replicate.removevoterreplica", Help: "Number of voter replica removals attempted by the replicate queue (typically in response to a rebalancer-initiated addition)", Measurement: "Replica Removals", Unit: metric.Unit_COUNT, - } - metaReplicateQueueRemoveNonVoterReplicaCount = metric.Metadata{ + }) + metaReplicateQueueRemoveNonVoterReplicaCount = metric.InitMetadata(metric.Metadata{ Name: "queue.replicate.removenonvoterreplica", Help: "Number of non-voter replica removals attempted by the replicate queue (typically in response to a rebalancer-initiated addition)", Measurement: "Replica Removals", Unit: metric.Unit_COUNT, - } - metaReplicateQueueRemoveDeadReplicaCount = metric.Metadata{ + }) + metaReplicateQueueRemoveDeadReplicaCount = metric.InitMetadata(metric.Metadata{ Name: "queue.replicate.removedeadreplica", Help: "Number of dead replica removals attempted by the replicate queue (typically in response to a node outage)", Measurement: "Replica Removals", Unit: metric.Unit_COUNT, - } - metaReplicateQueueRemoveDeadVoterReplicaCount = metric.Metadata{ + }) + metaReplicateQueueRemoveDeadVoterReplicaCount = metric.InitMetadata(metric.Metadata{ Name: "queue.replicate.removedeadvoterreplica", Help: "Number of dead voter replica removals attempted by the replicate queue (typically in response to a node outage)", Measurement: "Replica Removals", Unit: metric.Unit_COUNT, - } - metaReplicateQueueRemoveDeadNonVoterReplicaCount = metric.Metadata{ + }) + metaReplicateQueueRemoveDeadNonVoterReplicaCount = metric.InitMetadata(metric.Metadata{ Name: "queue.replicate.removedeadnonvoterreplica", Help: "Number of dead non-voter replica removals attempted by the replicate queue (typically in response to a node outage)", Measurement: "Replica Removals", Unit: metric.Unit_COUNT, - } - metaReplicateQueueRemoveDecommissioningReplicaCount = metric.Metadata{ + }) + metaReplicateQueueRemoveDecommissioningReplicaCount = metric.InitMetadata(metric.Metadata{ Name: "queue.replicate.removedecommissioningreplica", Help: "Number of decommissioning replica removals attempted by the replicate queue (typically in response to a node outage)", Measurement: "Replica Removals", Unit: metric.Unit_COUNT, - } - metaReplicateQueueRemoveDecommissioningVoterReplicaCount = metric.Metadata{ + }) + metaReplicateQueueRemoveDecommissioningVoterReplicaCount = metric.InitMetadata(metric.Metadata{ Name: "queue.replicate.removedecommissioningvoterreplica", Help: "Number of decommissioning voter replica removals attempted by the replicate queue (typically in response to a node outage)", Measurement: "Replica Removals", Unit: metric.Unit_COUNT, - } - metaReplicateQueueRemoveDecommissioningNonVoterReplicaCount = metric.Metadata{ + }) + metaReplicateQueueRemoveDecommissioningNonVoterReplicaCount = metric.InitMetadata(metric.Metadata{ Name: "queue.replicate.removedecommissioningnonvoterreplica", Help: "Number of decommissioning non-voter replica removals attempted by the replicate queue (typically in response to a node outage)", Measurement: "Replica Removals", Unit: metric.Unit_COUNT, - } - metaReplicateQueueRemoveLearnerReplicaCount = metric.Metadata{ + }) + metaReplicateQueueRemoveLearnerReplicaCount = metric.InitMetadata(metric.Metadata{ Name: "queue.replicate.removelearnerreplica", Help: "Number of learner replica removals attempted by the replicate queue (typically due to internal race conditions)", Measurement: "Replica Removals", Unit: metric.Unit_COUNT, - } - metaReplicateQueueRebalanceReplicaCount = metric.Metadata{ + }) + metaReplicateQueueRebalanceReplicaCount = metric.InitMetadata(metric.Metadata{ Name: "queue.replicate.rebalancereplica", Help: "Number of replica rebalancer-initiated additions attempted by the replicate queue", Measurement: "Replica Additions", Unit: metric.Unit_COUNT, - } - metaReplicateQueueRebalanceVoterReplicaCount = metric.Metadata{ + }) + metaReplicateQueueRebalanceVoterReplicaCount = metric.InitMetadata(metric.Metadata{ Name: "queue.replicate.rebalancevoterreplica", Help: "Number of voter replica rebalancer-initiated additions attempted by the replicate queue", Measurement: "Replica Additions", Unit: metric.Unit_COUNT, - } - metaReplicateQueueRebalanceNonVoterReplicaCount = metric.Metadata{ + }) + metaReplicateQueueRebalanceNonVoterReplicaCount = metric.InitMetadata(metric.Metadata{ Name: "queue.replicate.rebalancenonvoterreplica", Help: "Number of non-voter replica rebalancer-initiated additions attempted by the replicate queue", Measurement: "Replica Additions", Unit: metric.Unit_COUNT, - } - metaReplicateQueueTransferLeaseCount = metric.Metadata{ + }) + metaReplicateQueueTransferLeaseCount = metric.InitMetadata(metric.Metadata{ Name: "queue.replicate.transferlease", Help: "Number of range lease transfers attempted by the replicate queue", Measurement: "Lease Transfers", Unit: metric.Unit_COUNT, - } - metaReplicateQueueNonVoterPromotionsCount = metric.Metadata{ + }) + metaReplicateQueueNonVoterPromotionsCount = metric.InitMetadata(metric.Metadata{ Name: "queue.replicate.nonvoterpromotions", Help: "Number of non-voters promoted to voters by the replicate queue", Measurement: "Promotions of Non Voters to Voters", Unit: metric.Unit_COUNT, - } - metaReplicateQueueVoterDemotionsCount = metric.Metadata{ + }) + metaReplicateQueueVoterDemotionsCount = metric.InitMetadata(metric.Metadata{ Name: "queue.replicate.voterdemotions", Help: "Number of voters demoted to non-voters by the replicate queue", Measurement: "Demotions of Voters to Non Voters", Unit: metric.Unit_COUNT, - } - metaReplicateQueueAddReplicaSuccessCount = metric.Metadata{ + }) + metaReplicateQueueAddReplicaSuccessCount = metric.InitMetadata(metric.Metadata{ Name: "queue.replicate.addreplica.success", Help: "Number of successful replica additions processed by the replicate queue", Measurement: "Replicas", Unit: metric.Unit_COUNT, - } - metaReplicateQueueAddReplicaErrorCount = metric.Metadata{ + }) + metaReplicateQueueAddReplicaErrorCount = metric.InitMetadata(metric.Metadata{ Name: "queue.replicate.addreplica.error", Help: "Number of failed replica additions processed by the replicate queue", Measurement: "Replicas", Unit: metric.Unit_COUNT, - } - metaReplicateQueueRemoveReplicaSuccessCount = metric.Metadata{ + }) + metaReplicateQueueRemoveReplicaSuccessCount = metric.InitMetadata(metric.Metadata{ Name: "queue.replicate.removereplica.success", Help: "Number of successful replica removals processed by the replicate queue", Measurement: "Replicas", Unit: metric.Unit_COUNT, - } - metaReplicateQueueRemoveReplicaErrorCount = metric.Metadata{ + }) + metaReplicateQueueRemoveReplicaErrorCount = metric.InitMetadata(metric.Metadata{ Name: "queue.replicate.removereplica.error", Help: "Number of failed replica removals processed by the replicate queue", Measurement: "Replicas", Unit: metric.Unit_COUNT, - } - metaReplicateQueueReplaceDeadReplicaSuccessCount = metric.Metadata{ + }) + metaReplicateQueueReplaceDeadReplicaSuccessCount = metric.InitMetadata(metric.Metadata{ Name: "queue.replicate.replacedeadreplica.success", Help: "Number of successful dead replica replacements processed by the replicate queue", Measurement: "Replicas", Unit: metric.Unit_COUNT, - } - metaReplicateQueueReplaceDeadReplicaErrorCount = metric.Metadata{ + }) + metaReplicateQueueReplaceDeadReplicaErrorCount = metric.InitMetadata(metric.Metadata{ Name: "queue.replicate.replacedeadreplica.error", Help: "Number of failed dead replica replacements processed by the replicate queue", Measurement: "Replicas", Unit: metric.Unit_COUNT, - } - metaReplicateQueueReplaceDecommissioningReplicaSuccessCount = metric.Metadata{ + }) + metaReplicateQueueReplaceDecommissioningReplicaSuccessCount = metric.InitMetadata(metric.Metadata{ Name: "queue.replicate.replacedecommissioningreplica.success", Help: "Number of successful decommissioning replica replacements processed by the replicate queue", Measurement: "Replicas", Unit: metric.Unit_COUNT, - } - metaReplicateQueueReplaceDecommissioningReplicaErrorCount = metric.Metadata{ + }) + metaReplicateQueueReplaceDecommissioningReplicaErrorCount = metric.InitMetadata(metric.Metadata{ Name: "queue.replicate.replacedecommissioningreplica.error", Help: "Number of failed decommissioning replica replacements processed by the replicate queue", Measurement: "Replicas", @@ -299,32 +299,32 @@ var ( Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_REPLICATION, HowToUse: `Refer to Decommission the node.`, - } - metaReplicateQueueRemoveDecommissioningReplicaSuccessCount = metric.Metadata{ + }) + metaReplicateQueueRemoveDecommissioningReplicaSuccessCount = metric.InitMetadata(metric.Metadata{ Name: "queue.replicate.removedecommissioningreplica.success", Help: "Number of successful decommissioning replica removals processed by the replicate queue", Measurement: "Replicas", Unit: metric.Unit_COUNT, - } - metaReplicateQueueRemoveDecommissioningReplicaErrorCount = metric.Metadata{ + }) + metaReplicateQueueRemoveDecommissioningReplicaErrorCount = metric.InitMetadata(metric.Metadata{ Name: "queue.replicate.removedecommissioningreplica.error", Help: "Number of failed decommissioning replica removals processed by the replicate queue", Measurement: "Replicas", Unit: metric.Unit_COUNT, - } - metaReplicateQueueRemoveDeadReplicaSuccessCount = metric.Metadata{ + }) + metaReplicateQueueRemoveDeadReplicaSuccessCount = metric.InitMetadata(metric.Metadata{ Name: "queue.replicate.removedeadreplica.success", Help: "Number of successful dead replica removals processed by the replicate queue", Measurement: "Replicas", Unit: metric.Unit_COUNT, - } - metaReplicateQueueRemoveDeadReplicaErrorCount = metric.Metadata{ + }) + metaReplicateQueueRemoveDeadReplicaErrorCount = metric.InitMetadata(metric.Metadata{ Name: "queue.replicate.removedeadreplica.error", Help: "Number of failed dead replica removals processed by the replicate queue", Measurement: "Replicas", Unit: metric.Unit_COUNT, - } - metaReplicateQueueRequeueDueToPriorityInversion = metric.Metadata{ + }) + metaReplicateQueueRequeueDueToPriorityInversion = metric.InitMetadata(metric.Metadata{ Name: "queue.replicate.priority_inversion.requeue", Help: "Number of priority inversions in the replicate queue that resulted in requeuing of the replicas. " + "A priority inversion occurs when the priority at processing time ends up being lower " + @@ -332,14 +332,14 @@ var ( "the change is requeued to avoid unfairness.", Measurement: "Replicas", Unit: metric.Unit_COUNT, - } - metaReplicateQueuePriorityInversionTotal = metric.Metadata{ + }) + metaReplicateQueuePriorityInversionTotal = metric.InitMetadata(metric.Metadata{ Name: "queue.replicate.priority_inversion.total", Help: "Total number of priority inversions in the replicate queue. " + "A priority inversion occurs when the priority at processing time ends up being lower than at enqueue time", Measurement: "Replicas", Unit: metric.Unit_COUNT, - } + }) ) // quorumError indicates a retryable error condition which sends replicas being diff --git a/pkg/kv/kvserver/split/decider_test.go b/pkg/kv/kvserver/split/decider_test.go index 36948050a8ba..872c388e00be 100644 --- a/pkg/kv/kvserver/split/decider_test.go +++ b/pkg/kv/kvserver/split/decider_test.go @@ -73,9 +73,9 @@ func ms(i int) time.Time { func newSplitterMetrics() *LoadSplitterMetrics { return &LoadSplitterMetrics{ - PopularKeyCount: metric.NewCounter(metric.Metadata{}), - NoSplitKeyCount: metric.NewCounter(metric.Metadata{}), - ClearDirectionCount: metric.NewCounter(metric.Metadata{}), + PopularKeyCount: metric.NewCounter(metric.InitMetadata(metric.Metadata{})), + NoSplitKeyCount: metric.NewCounter(metric.InitMetadata(metric.Metadata{})), + ClearDirectionCount: metric.NewCounter(metric.InitMetadata(metric.Metadata{})), } } diff --git a/pkg/kv/kvserver/split_queue.go b/pkg/kv/kvserver/split_queue.go index a09d42f22bdd..004146f509a6 100644 --- a/pkg/kv/kvserver/split_queue.go +++ b/pkg/kv/kvserver/split_queue.go @@ -54,24 +54,24 @@ var SlowSplitTracingThreshold = settings.RegisterDurationSetting( ) var ( - metaSizeBasedSplitCount = metric.Metadata{ + metaSizeBasedSplitCount = metric.InitMetadata(metric.Metadata{ Name: "queue.split.size_based", Help: "Number of range splits due to a range being greater than the configured max range size", Measurement: "Range Splits", Unit: metric.Unit_COUNT, - } - metaLoadBasedSplitCount = metric.Metadata{ + }) + metaLoadBasedSplitCount = metric.InitMetadata(metric.Metadata{ Name: "queue.split.load_based", Help: "Number of range splits due to a range being greater than the configured max range load", Measurement: "Range Splits", Unit: metric.Unit_COUNT, - } - metaSpanConfigBasedSplitCount = metric.Metadata{ + }) + metaSpanConfigBasedSplitCount = metric.InitMetadata(metric.Metadata{ Name: "queue.split.span_config_based", Help: "Number of range splits due to span configuration", Measurement: "Range Splits", Unit: metric.Unit_COUNT, - } + }) ) // SplitQueueMetrics is the set of metrics for the split queue. diff --git a/pkg/kv/kvserver/store_raft_test.go b/pkg/kv/kvserver/store_raft_test.go index 5a43641bc7f1..ef1d768d2fed 100644 --- a/pkg/kv/kvserver/store_raft_test.go +++ b/pkg/kv/kvserver/store_raft_test.go @@ -40,7 +40,7 @@ func TestRaftReceiveQueue(t *testing.T) { defer leaktest.AfterTest(t)() st := cluster.MakeTestingClusterSettings() - g := metric.NewGauge(metric.Metadata{}) + g := metric.NewGauge(metric.InitMetadata(metric.Metadata{})) m := mon.NewUnlimitedMonitor(context.Background(), mon.Options{ Name: mon.MakeName("test"), CurCount: g, @@ -233,7 +233,7 @@ func TestRaftReceiveQueuesEnforceMaxLenConcurrency(t *testing.T) { skip.UnderDuress(t, "slow test") st := cluster.MakeTestingClusterSettings() - g := metric.NewGauge(metric.Metadata{}) + g := metric.NewGauge(metric.InitMetadata(metric.Metadata{})) m := mon.NewUnlimitedMonitor(context.Background(), mon.Options{ Name: mon.MakeName("test"), CurCount: g, @@ -296,7 +296,7 @@ func TestRaftReceiveQueuesEnforceMaxLen(t *testing.T) { defer leaktest.AfterTest(t)() st := cluster.MakeTestingClusterSettings() - g := metric.NewGauge(metric.Metadata{}) + g := metric.NewGauge(metric.InitMetadata(metric.Metadata{})) m := mon.NewUnlimitedMonitor(context.Background(), mon.Options{ Name: mon.MakeName("test"), CurCount: g, diff --git a/pkg/kv/kvserver/store_rebalancer.go b/pkg/kv/kvserver/store_rebalancer.go index b0716d6c1776..ddb4821f1ca3 100644 --- a/pkg/kv/kvserver/store_rebalancer.go +++ b/pkg/kv/kvserver/store_rebalancer.go @@ -27,7 +27,7 @@ import ( ) var ( - metaStoreRebalancerLeaseTransferCount = metric.Metadata{ + metaStoreRebalancerLeaseTransferCount = metric.InitMetadata(metric.Metadata{ Name: "rebalancing.lease.transfers", Help: "Number of lease transfers motivated by store-level load imbalances", Measurement: "Lease Transfers", @@ -35,8 +35,8 @@ var ( Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_REPLICATION, HowToUse: `Used to identify when there has been more rebalancing activity triggered by imbalance between stores (of QPS or CPU). If this is high (when the count is rated), it indicates that more rebalancing activity is taking place due to load imbalance between stores.`, - } - metaStoreRebalancerRangeRebalanceCount = metric.Metadata{ + }) + metaStoreRebalancerRangeRebalanceCount = metric.InitMetadata(metric.Metadata{ Name: "rebalancing.range.rebalances", Help: "Number of range rebalance operations motivated by store-level load imbalances", Measurement: "Range Rebalances", @@ -44,14 +44,14 @@ var ( Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_REPLICATION, HowToUse: `Used to identify when there has been more rebalancing activity triggered by imbalance between stores (of QPS or CPU). If this is high (when the count is rated), it indicates that more rebalancing activity is taking place due to load imbalance between stores.`, - } - metaStoreRebalancerImbalancedOverfullOptionsExhausted = metric.Metadata{ + }) + metaStoreRebalancerImbalancedOverfullOptionsExhausted = metric.InitMetadata(metric.Metadata{ Name: "rebalancing.state.imbalanced_overfull_options_exhausted", Help: "Number of occurrences where this store was overfull but failed to " + "shed load after exhausting available rebalance options", Measurement: "Overfull Options Exhausted", Unit: metric.Unit_COUNT, - } + }) ) // StoreRebalancerMetrics is the set of metrics for the store-level rebalancer. diff --git a/pkg/kv/kvserver/storeliveness/metrics.go b/pkg/kv/kvserver/storeliveness/metrics.go index 69939599b558..28ee933452bd 100644 --- a/pkg/kv/kvserver/storeliveness/metrics.go +++ b/pkg/kv/kvserver/storeliveness/metrics.go @@ -115,160 +115,160 @@ func newSupportManagerMetrics() *SupportManagerMetrics { } var ( - metaSendQueueSize = metric.Metadata{ + metaSendQueueSize = metric.InitMetadata(metric.Metadata{ Name: "storeliveness.transport.send-queue-size", Help: "Number of pending outgoing messages in all " + "Store Liveness Transport per-store send queues", Measurement: "Messages", Unit: metric.Unit_COUNT, - } - metaSendQueueBytes = metric.Metadata{ + }) + metaSendQueueBytes = metric.InitMetadata(metric.Metadata{ Name: "storeliveness.transport.send-queue-bytes", Help: "Total byte size of pending outgoing messages in all " + "Store Liveness Transport per-store send queues", Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - metaSendQueueIdle = metric.Metadata{ + }) + metaSendQueueIdle = metric.InitMetadata(metric.Metadata{ Name: "storeliveness.transport.send-queue-idle", Help: "Number of Store Liveness Transport per-store send queues " + "that have become idle due to no recently-sent messages", Measurement: "Messages", Unit: metric.Unit_COUNT, - } - metaMessagesSent = metric.Metadata{ + }) + metaMessagesSent = metric.InitMetadata(metric.Metadata{ Name: "storeliveness.transport.sent", Help: "Number of Store Liveness messages sent by the " + "Store Liveness Transport", Measurement: "Messages", Unit: metric.Unit_COUNT, - } - metaMessagesReceived = metric.Metadata{ + }) + metaMessagesReceived = metric.InitMetadata(metric.Metadata{ Name: "storeliveness.transport.received", Help: "Number of Store Liveness messages received by the " + "Store Liveness Transport", Measurement: "Messages", Unit: metric.Unit_COUNT, - } - metaMessagesSendDropped = metric.Metadata{ + }) + metaMessagesSendDropped = metric.InitMetadata(metric.Metadata{ Name: "storeliveness.transport.send_dropped", Help: "Number of Store Liveness messages dropped by the " + "Store Liveness Transport on the sender side", Measurement: "Messages", Unit: metric.Unit_COUNT, - } - metaMessagesReceiveDropped = metric.Metadata{ + }) + metaMessagesReceiveDropped = metric.InitMetadata(metric.Metadata{ Name: "storeliveness.transport.receive_dropped", Help: "Number of Store Liveness messages dropped by the " + "Store Liveness Transport on the receiver side", Measurement: "Messages", Unit: metric.Unit_COUNT, - } - metaHeartbeatSuccesses = metric.Metadata{ + }) + metaHeartbeatSuccesses = metric.InitMetadata(metric.Metadata{ Name: "storeliveness.heartbeat.successes", Help: "Number of Store Liveness heartbeats sent out by the " + "Store Liveness Support Manager", Measurement: "Heartbeats", Unit: metric.Unit_COUNT, - } - metaHeartbeatFailures = metric.Metadata{ + }) + metaHeartbeatFailures = metric.InitMetadata(metric.Metadata{ Name: "storeliveness.heartbeat.failures", Help: "Number of Store Liveness heartbeats that failed to be sent out by the " + "Store Liveness Support Manager", Measurement: "Heartbeats", Unit: metric.Unit_COUNT, Visibility: metric.Metadata_SUPPORT, - } - metaMessageHandleSuccesses = metric.Metadata{ + }) + metaMessageHandleSuccesses = metric.InitMetadata(metric.Metadata{ Name: "storeliveness.message_handle.successes", Help: "Number of incoming Store Liveness messages handled by the " + "Store Liveness Support Manager", Measurement: "Messages", Unit: metric.Unit_COUNT, - } - metaMessageHandleFailures = metric.Metadata{ + }) + metaMessageHandleFailures = metric.InitMetadata(metric.Metadata{ Name: "storeliveness.message_handle.failures", Help: "Number of incoming Store Liveness messages that failed to be handled by the " + "Store Liveness Support Manager", Measurement: "Messages", Unit: metric.Unit_COUNT, - } - metaSupportWithdrawSuccesses = metric.Metadata{ + }) + metaSupportWithdrawSuccesses = metric.InitMetadata(metric.Metadata{ Name: "storeliveness.support_withdraw.successes", Help: "Number of times the Store Liveness Support Manager has successfully withdrawn " + "support for another store", Measurement: "Support Withdrawals", Unit: metric.Unit_COUNT, - } - metaSupportWithdrawFailures = metric.Metadata{ + }) + metaSupportWithdrawFailures = metric.InitMetadata(metric.Metadata{ Name: "storeliveness.support_withdraw.failures", Help: "Number of times the Store Liveness Support Manager has encountered an error " + "while withdrawing support for another store", Measurement: "Support Withdrawals", Unit: metric.Unit_COUNT, - } - metaSupportFromStores = metric.Metadata{ + }) + metaSupportFromStores = metric.InitMetadata(metric.Metadata{ Name: "storeliveness.support_from.stores", Help: "Number of stores that the Store Liveness Support Manager is requesting " + "support from by sending heartbeats", Measurement: "Stores", Unit: metric.Unit_COUNT, - } - metaSupportForStores = metric.Metadata{ + }) + metaSupportForStores = metric.InitMetadata(metric.Metadata{ Name: "storeliveness.support_for.stores", Help: "Number of stores that the Store Liveness Support Manager has ever " + "provided support for", Measurement: "Stores", Unit: metric.Unit_COUNT, - } - metaReceiveQueueSize = metric.Metadata{ + }) + metaReceiveQueueSize = metric.InitMetadata(metric.Metadata{ Name: "storeliveness.transport.receive-queue-size", Help: "Number of pending incoming messages from the " + "Store Liveness Transport", Measurement: "Messages", Unit: metric.Unit_COUNT, - } - metaReceiveQueueBytes = metric.Metadata{ + }) + metaReceiveQueueBytes = metric.InitMetadata(metric.Metadata{ Name: "storeliveness.transport.receive-queue-bytes", Help: "Total byte size of pending incoming messages from " + "Store Liveness Transport", Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - metaCallbacksProcessingDuration = metric.Metadata{ + }) + metaCallbacksProcessingDuration = metric.InitMetadata(metric.Metadata{ Name: "storeliveness.callbacks.processing_duration", Help: "Duration of support withdrawal callback processing", Measurement: "Duration", Unit: metric.Unit_NANOSECONDS, - } - metaBatchesSent = metric.Metadata{ + }) + metaBatchesSent = metric.InitMetadata(metric.Metadata{ Name: "storeliveness.transport.batches-sent", Help: "Number of message batches sent by the Store Liveness Transport", Measurement: "Batches", Unit: metric.Unit_COUNT, - } - metaBatchesReceived = metric.Metadata{ + }) + metaBatchesReceived = metric.InitMetadata(metric.Metadata{ Name: "storeliveness.transport.batches-received", Help: "Number of message batches received by the Store Liveness Transport", Measurement: "Batches", Unit: metric.Unit_COUNT, - } - metaHeartbeatPersistDuration = metric.Metadata{ + }) + metaHeartbeatPersistDuration = metric.InitMetadata(metric.Metadata{ Name: "storeliveness.heartbeat.persist_duration", Help: "Latency of persisting Store Liveness requester meta before sending heartbeats", Measurement: "Duration", Unit: metric.Unit_NANOSECONDS, - } - metaMessageHandlePersistDuration = metric.Metadata{ + }) + metaMessageHandlePersistDuration = metric.InitMetadata(metric.Metadata{ Name: "storeliveness.message_handle.persist_duration", Help: "Latency of persisting Store Liveness state when handling incoming messages", Measurement: "Duration", Unit: metric.Unit_NANOSECONDS, - } - metaSupportWithdrawPersistDuration = metric.Metadata{ + }) + metaSupportWithdrawPersistDuration = metric.InitMetadata(metric.Metadata{ Name: "storeliveness.support_withdraw.persist_duration", Help: "Latency of persisting Store Liveness state when withdrawing support", Measurement: "Duration", Unit: metric.Unit_NANOSECONDS, - } + }) ) diff --git a/pkg/kv/kvserver/tenantrate/metrics.go b/pkg/kv/kvserver/tenantrate/metrics.go index 679155faeff7..22ec06a8a3e9 100644 --- a/pkg/kv/kvserver/tenantrate/metrics.go +++ b/pkg/kv/kvserver/tenantrate/metrics.go @@ -27,54 +27,54 @@ type Metrics struct { var _ metric.Struct = (*Metrics)(nil) var ( - metaTenants = metric.Metadata{ + metaTenants = metric.InitMetadata(metric.Metadata{ Name: "kv.tenant_rate_limit.num_tenants", Help: "Number of tenants currently being tracked", Measurement: "Tenants", Unit: metric.Unit_COUNT, - } - metaCurrentBlocked = metric.Metadata{ + }) + metaCurrentBlocked = metric.InitMetadata(metric.Metadata{ Name: "kv.tenant_rate_limit.current_blocked", Help: "Number of requests currently blocked by the rate limiter", Measurement: "Requests", Unit: metric.Unit_COUNT, - } - metaReadBatchesAdmitted = metric.Metadata{ + }) + metaReadBatchesAdmitted = metric.InitMetadata(metric.Metadata{ Name: "kv.tenant_rate_limit.read_batches_admitted", Help: "Number of read batches admitted by the rate limiter", Measurement: "Requests", Unit: metric.Unit_COUNT, - } - metaWriteBatchesAdmitted = metric.Metadata{ + }) + metaWriteBatchesAdmitted = metric.InitMetadata(metric.Metadata{ Name: "kv.tenant_rate_limit.write_batches_admitted", Help: "Number of write batches admitted by the rate limiter", Measurement: "Requests", Unit: metric.Unit_COUNT, - } - metaReadRequestsAdmitted = metric.Metadata{ + }) + metaReadRequestsAdmitted = metric.InitMetadata(metric.Metadata{ Name: "kv.tenant_rate_limit.read_requests_admitted", Help: "Number of read requests admitted by the rate limiter", Measurement: "Requests", Unit: metric.Unit_COUNT, - } - metaWriteRequestsAdmitted = metric.Metadata{ + }) + metaWriteRequestsAdmitted = metric.InitMetadata(metric.Metadata{ Name: "kv.tenant_rate_limit.write_requests_admitted", Help: "Number of write requests admitted by the rate limiter", Measurement: "Requests", Unit: metric.Unit_COUNT, - } - metaReadBytesAdmitted = metric.Metadata{ + }) + metaReadBytesAdmitted = metric.InitMetadata(metric.Metadata{ Name: "kv.tenant_rate_limit.read_bytes_admitted", Help: "Number of read bytes admitted by the rate limiter", Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - metaWriteBytesAdmitted = metric.Metadata{ + }) + metaWriteBytesAdmitted = metric.InitMetadata(metric.Metadata{ Name: "kv.tenant_rate_limit.write_bytes_admitted", Help: "Number of write bytes admitted by the rate limiter", Measurement: "Bytes", Unit: metric.Unit_BYTES, - } + }) ) func makeMetrics() Metrics { diff --git a/pkg/kv/kvserver/tscache/metrics.go b/pkg/kv/kvserver/tscache/metrics.go index b8b1ae9d4b0f..6b9076507808 100644 --- a/pkg/kv/kvserver/tscache/metrics.go +++ b/pkg/kv/kvserver/tscache/metrics.go @@ -24,18 +24,18 @@ func (sklMetrics) MetricStruct() {} var _ metric.Struct = sklMetrics{} var ( - metaSklPages = metric.Metadata{ + metaSklPages = metric.InitMetadata(metric.Metadata{ Name: "tscache.skl.pages", Help: "Number of pages in the timestamp cache", Measurement: "Pages", Unit: metric.Unit_COUNT, - } - metaSklRotations = metric.Metadata{ + }) + metaSklRotations = metric.InitMetadata(metric.Metadata{ Name: "tscache.skl.rotations", Help: "Number of page rotations in the timestamp cache", Measurement: "Page Rotations", Unit: metric.Unit_COUNT, - } + }) ) func makeMetrics() Metrics { diff --git a/pkg/kv/kvserver/txnrecovery/metrics.go b/pkg/kv/kvserver/txnrecovery/metrics.go index 29ee3d4cee28..70ed5f4822bb 100644 --- a/pkg/kv/kvserver/txnrecovery/metrics.go +++ b/pkg/kv/kvserver/txnrecovery/metrics.go @@ -21,57 +21,57 @@ type Metrics struct { func makeMetrics() Metrics { return Metrics{ AttemptsPending: metric.NewGauge( - metric.Metadata{ + metric.InitMetadata(metric.Metadata{ Name: "txnrecovery.attempts.pending", Help: "Number of transaction recovery attempts currently in-flight", Measurement: "Recovery Attempts", Unit: metric.Unit_COUNT, - }, + }), ), Attempts: metric.NewCounter( - metric.Metadata{ + metric.InitMetadata(metric.Metadata{ Name: "txnrecovery.attempts.total", Help: "Number of transaction recovery attempts executed", Measurement: "Recovery Attempts", Unit: metric.Unit_COUNT, - }, + }), ), SuccessesAsCommitted: metric.NewCounter( - metric.Metadata{ + metric.InitMetadata(metric.Metadata{ Name: "txnrecovery.successes.committed", Help: "Number of transaction recovery attempts that committed a transaction", Measurement: "Recovery Attempts", Unit: metric.Unit_COUNT, - }, + }), ), SuccessesAsAborted: metric.NewCounter( - metric.Metadata{ + metric.InitMetadata(metric.Metadata{ Name: "txnrecovery.successes.aborted", Help: "Number of transaction recovery attempts that aborted a transaction", Measurement: "Recovery Attempts", Unit: metric.Unit_COUNT, - }, + }), ), SuccessesAsPending: metric.NewCounter( - metric.Metadata{ + metric.InitMetadata(metric.Metadata{ Name: "txnrecovery.successes.pending", Help: "Number of transaction recovery attempts that left a transaction pending", Measurement: "Recovery Attempts", Unit: metric.Unit_COUNT, - }, + }), ), Failures: metric.NewCounter( - metric.Metadata{ + metric.InitMetadata(metric.Metadata{ Name: "txnrecovery.failures", Help: "Number of transaction recovery attempts that failed", Measurement: "Recovery Attempts", Unit: metric.Unit_COUNT, - }, + }), ), } } diff --git a/pkg/kv/kvserver/txnwait/metrics.go b/pkg/kv/kvserver/txnwait/metrics.go index 5c9b9e0d242b..be019b9e14a7 100644 --- a/pkg/kv/kvserver/txnwait/metrics.go +++ b/pkg/kv/kvserver/txnwait/metrics.go @@ -26,48 +26,48 @@ type Metrics struct { func NewMetrics(histogramWindowInterval time.Duration) *Metrics { return &Metrics{ PusheeWaiting: metric.NewGauge( - metric.Metadata{ + metric.InitMetadata(metric.Metadata{ Name: "txnwaitqueue.pushee.waiting", Help: "Number of pushees on the txn wait queue", Measurement: "Waiting Pushees", Unit: metric.Unit_COUNT, - }, + }), ), PusherWaiting: metric.NewGauge( - metric.Metadata{ + metric.InitMetadata(metric.Metadata{ Name: "txnwaitqueue.pusher.waiting", Help: "Number of pushers on the txn wait queue", Measurement: "Waiting Pushers", Unit: metric.Unit_COUNT, - }, + }), ), QueryWaiting: metric.NewGauge( - metric.Metadata{ + metric.InitMetadata(metric.Metadata{ Name: "txnwaitqueue.query.waiting", Help: "Number of transaction status queries waiting for an updated transaction record", Measurement: "Waiting Queries", Unit: metric.Unit_COUNT, - }, + }), ), PusherSlow: metric.NewGauge( - metric.Metadata{ + metric.InitMetadata(metric.Metadata{ Name: "txnwaitqueue.pusher.slow", Help: "The total number of cases where a pusher waited more than the excessive wait threshold", Measurement: "Slow Pushers", Unit: metric.Unit_COUNT, - }, + }), ), PusherWaitTime: metric.NewHistogram(metric.HistogramOptions{ - Metadata: metric.Metadata{ + Metadata: metric.InitMetadata(metric.Metadata{ Name: "txnwaitqueue.pusher.wait_time", Help: "Histogram of durations spent in queue by pushers", Measurement: "Pusher wait time", Unit: metric.Unit_NANOSECONDS, - }, + }), MaxVal: time.Hour.Nanoseconds(), SigFigs: 1, Duration: histogramWindowInterval, @@ -75,12 +75,12 @@ func NewMetrics(histogramWindowInterval time.Duration) *Metrics { }), QueryWaitTime: metric.NewHistogram(metric.HistogramOptions{ - Metadata: metric.Metadata{ + Metadata: metric.InitMetadata(metric.Metadata{ Name: "txnwaitqueue.query.wait_time", Help: "Histogram of durations spent in queue by queries", Measurement: "Query wait time", Unit: metric.Unit_NANOSECONDS, - }, + }), MaxVal: time.Hour.Nanoseconds(), SigFigs: 1, Duration: histogramWindowInterval, @@ -88,7 +88,7 @@ func NewMetrics(histogramWindowInterval time.Duration) *Metrics { }), DeadlocksTotal: metric.NewCounter( - metric.Metadata{ + metric.InitMetadata(metric.Metadata{ Name: "txnwaitqueue.deadlocks_total", Help: "Number of deadlocks detected by the txn wait queue", Measurement: "Deadlocks", @@ -96,7 +96,7 @@ func NewMetrics(histogramWindowInterval time.Duration) *Metrics { Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_SQL, HowToUse: `Alert on this metric if its value is greater than zero, especially if transaction throughput is lower than expected. Applications should be able to detect and recover from deadlock errors. However, transaction performance and throughput can be maximized if the application logic avoids deadlock conditions in the first place, for example, by keeping transactions as short as possible.`, - }, + }), ), } } diff --git a/pkg/obs/ash/metrics.go b/pkg/obs/ash/metrics.go index 39929ef91b55..ddc4de1cf513 100644 --- a/pkg/obs/ash/metrics.go +++ b/pkg/obs/ash/metrics.go @@ -12,27 +12,27 @@ import ( ) var ( - metaActiveWorkStates = metric.Metadata{ + metaActiveWorkStates = metric.InitMetadata(metric.Metadata{ Name: "ash.work_states.active", Help: "Number of goroutines with an active ASH work state", Measurement: "Goroutines", Unit: metric.Unit_COUNT, MetricType: io_prometheus_client.MetricType_GAUGE, - } - metaTakeSampleLatency = metric.Metadata{ + }) + metaTakeSampleLatency = metric.InitMetadata(metric.Metadata{ Name: "ash.sampler.take_sample.latency", Help: "Latency of ASH sample collection ticks", Measurement: "Latency", Unit: metric.Unit_NANOSECONDS, MetricType: io_prometheus_client.MetricType_HISTOGRAM, - } - metaSamplesCollected = metric.Metadata{ + }) + metaSamplesCollected = metric.InitMetadata(metric.Metadata{ Name: "ash.samples.collected", Help: "Total number of ASH samples collected", Measurement: "Samples", Unit: metric.Unit_COUNT, MetricType: io_prometheus_client.MetricType_COUNTER, - } + }) ) // Metrics holds the metrics for the ASH sampler subsystem. diff --git a/pkg/obs/clustermetrics/cmmetrics/metadata.go b/pkg/obs/clustermetrics/cmmetrics/metadata.go index ec6cdb4f435e..f92d5b8cc3b8 100644 --- a/pkg/obs/clustermetrics/cmmetrics/metadata.go +++ b/pkg/obs/clustermetrics/cmmetrics/metadata.go @@ -64,7 +64,7 @@ func GetClusterMetricMetadata(name string) (metric.Metadata, []string, bool) { defer metadata.mu.RUnlock() meta, ok := metadata.mu.registeredMetadata[name] if !ok { - return metric.Metadata{}, nil, false + return metric.InitMetadata(metric.Metadata{}), nil, false } labels := metadata.mu.registeredLabeledMetrics[name] return meta, labels, true diff --git a/pkg/obs/clustermetrics/cmreader/registry_syncer_integration_test.go b/pkg/obs/clustermetrics/cmreader/registry_syncer_integration_test.go index 5cbbe5f05378..d7b0926cf15f 100644 --- a/pkg/obs/clustermetrics/cmreader/registry_syncer_integration_test.go +++ b/pkg/obs/clustermetrics/cmreader/registry_syncer_integration_test.go @@ -44,29 +44,29 @@ func TestRegistrySyncer(t *testing.T) { // Register test metric metadata so ToMetric() can resolve them. defer cmmetrics.TestingRegisterLabeledClusterMetric( - "test.gauge_labeled", metric.Metadata{ + "test.gauge_labeled", metric.InitMetadata(metric.Metadata{ Name: "test.gauge_labeled", Help: "A test gauge", - }, + }), []string{"store"}, )() - defer cmmetrics.TestingRegisterClusterMetric("test.counter", metric.Metadata{ + defer cmmetrics.TestingRegisterClusterMetric("test.counter", metric.InitMetadata(metric.Metadata{ Name: "test.counter", Help: "A test counter", - })() - defer cmmetrics.TestingRegisterClusterMetric("test.scalar", metric.Metadata{ + }))() + defer cmmetrics.TestingRegisterClusterMetric("test.scalar", metric.InitMetadata(metric.Metadata{ Name: "test.scalar", Help: "A scalar gauge for value verification", - })() - defer clustermetrics.TestingRegisterClusterMetric("test.stopwatch", metric.Metadata{ + }))() + defer clustermetrics.TestingRegisterClusterMetric("test.stopwatch", metric.InitMetadata(metric.Metadata{ Name: "test.stopwatch", Help: "A scalar stopwatch", - })() + }))() defer clustermetrics.TestingRegisterLabeledClusterMetric( - "test.stopwatch_labeled", metric.Metadata{ + "test.stopwatch_labeled", metric.InitMetadata(metric.Metadata{ Name: "test.stopwatch_labeled", Help: "A labeled stopwatch", - }, + }), []string{"store"}, )() @@ -248,10 +248,10 @@ func TestRegistrySyncer(t *testing.T) { // --------------------------------------------------------------- // Insert a new metric after the initial scan (via OnUpsert). // --------------------------------------------------------------- - defer cmmetrics.TestingRegisterClusterMetric("test.newgauge", metric.Metadata{ + defer cmmetrics.TestingRegisterClusterMetric("test.newgauge", metric.InitMetadata(metric.Metadata{ Name: "test.newgauge", Help: "A new gauge added after initial scan", - })() + }))() r.Exec(t, `INSERT INTO system.cluster_metrics (id, name, labels, type, value, node_id) @@ -740,10 +740,10 @@ func TestRegistrySyncerMultiTenant(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - defer clustermetrics.TestingRegisterClusterMetric("test.mt_gauge", metric.Metadata{ + defer clustermetrics.TestingRegisterClusterMetric("test.mt_gauge", metric.InitMetadata(metric.Metadata{ Name: "test.mt_gauge", Help: "A gauge for multi-tenant isolation testing", - })() + }))() ctx := context.Background() diff --git a/pkg/obs/clustermetrics/cmreader/registry_syncer_test.go b/pkg/obs/clustermetrics/cmreader/registry_syncer_test.go index 0de96262e249..bef91420336d 100644 --- a/pkg/obs/clustermetrics/cmreader/registry_syncer_test.go +++ b/pkg/obs/clustermetrics/cmreader/registry_syncer_test.go @@ -42,10 +42,10 @@ func TestUpdateMetricLocked(t *testing.T) { name: "new gauge", setup: func() func() { return cmmetrics.TestingRegisterClusterMetric( - "test.gauge", metric.Metadata{ + "test.gauge", metric.InitMetadata(metric.Metadata{ Name: "test.gauge", Help: "A test gauge", - }) + })) }, rows: []cmwatcher.ClusterMetricRow{{ ID: 1, Name: "test.gauge", Type: "GAUGE", Value: 42, @@ -69,10 +69,10 @@ func TestUpdateMetricLocked(t *testing.T) { name: "existing gauge update", setup: func() func() { return cmmetrics.TestingRegisterClusterMetric( - "test.gauge", metric.Metadata{ + "test.gauge", metric.InitMetadata(metric.Metadata{ Name: "test.gauge", Help: "A test gauge", - }) + })) }, rows: []cmwatcher.ClusterMetricRow{{ ID: 1, Name: "test.gauge", Type: "GAUGE", Value: 10, @@ -87,10 +87,10 @@ func TestUpdateMetricLocked(t *testing.T) { name: "counter", setup: func() func() { return cmmetrics.TestingRegisterClusterMetric( - "test.counter", metric.Metadata{ + "test.counter", metric.InitMetadata(metric.Metadata{ Name: "test.counter", Help: "A test counter", - }) + })) }, rows: []cmwatcher.ClusterMetricRow{{ ID: 1, Name: "test.counter", Type: "COUNTER", Value: 0, @@ -115,10 +115,10 @@ func TestUpdateMetricLocked(t *testing.T) { name: "new gauge vec", setup: func() func() { return cmmetrics.TestingRegisterLabeledClusterMetric( - "test.gaugevec", metric.Metadata{ + "test.gaugevec", metric.InitMetadata(metric.Metadata{ Name: "test.gaugevec", Help: "A test gauge vec", - }, []string{"store"}) + }), []string{"store"}) }, rows: []cmwatcher.ClusterMetricRow{{ ID: 1, Name: "test.gaugevec", @@ -143,10 +143,10 @@ func TestUpdateMetricLocked(t *testing.T) { name: "vec second label set", setup: func() func() { return cmmetrics.TestingRegisterLabeledClusterMetric( - "test.gaugevec", metric.Metadata{ + "test.gaugevec", metric.InitMetadata(metric.Metadata{ Name: "test.gaugevec", Help: "A test gauge vec", - }, []string{"store"}) + }), []string{"store"}) }, rows: []cmwatcher.ClusterMetricRow{{ ID: 1, Name: "test.gaugevec", @@ -167,10 +167,10 @@ func TestUpdateMetricLocked(t *testing.T) { name: "existing gauge vec update", setup: func() func() { return cmmetrics.TestingRegisterLabeledClusterMetric( - "test.gaugevec", metric.Metadata{ + "test.gaugevec", metric.InitMetadata(metric.Metadata{ Name: "test.gaugevec", Help: "A test gauge vec", - }, []string{"store"}) + }), []string{"store"}) }, rows: []cmwatcher.ClusterMetricRow{{ ID: 1, Name: "test.gaugevec", @@ -190,10 +190,10 @@ func TestUpdateMetricLocked(t *testing.T) { name: "counter vec", setup: func() func() { return cmmetrics.TestingRegisterLabeledClusterMetric( - "test.countervec", metric.Metadata{ + "test.countervec", metric.InitMetadata(metric.Metadata{ Name: "test.countervec", Help: "A test counter vec", - }, []string{"store"}) + }), []string{"store"}) }, rows: []cmwatcher.ClusterMetricRow{{ ID: 1, Name: "test.countervec", @@ -222,10 +222,10 @@ func TestUpdateMetricLocked(t *testing.T) { name: "new stopwatch", setup: func() func() { return cmmetrics.TestingRegisterClusterMetric( - "test.stopwatch", metric.Metadata{ + "test.stopwatch", metric.InitMetadata(metric.Metadata{ Name: "test.stopwatch", Help: "A test stopwatch", - }) + })) }, rows: []cmwatcher.ClusterMetricRow{{ ID: 1, Name: "test.stopwatch", Type: "STOPWATCH", @@ -253,10 +253,10 @@ func TestUpdateMetricLocked(t *testing.T) { name: "existing stopwatch update", setup: func() func() { return cmmetrics.TestingRegisterClusterMetric( - "test.stopwatch", metric.Metadata{ + "test.stopwatch", metric.InitMetadata(metric.Metadata{ Name: "test.stopwatch", Help: "A test stopwatch", - }) + })) }, rows: []cmwatcher.ClusterMetricRow{{ ID: 1, Name: "test.stopwatch", Type: "STOPWATCH", @@ -276,10 +276,10 @@ func TestUpdateMetricLocked(t *testing.T) { name: "new stopwatch vec", setup: func() func() { return cmmetrics.TestingRegisterLabeledClusterMetric( - "test.stopwatchvec", metric.Metadata{ + "test.stopwatchvec", metric.InitMetadata(metric.Metadata{ Name: "test.stopwatchvec", Help: "A test stopwatch vec", - }, []string{"store"}) + }), []string{"store"}) }, rows: []cmwatcher.ClusterMetricRow{{ ID: 1, Name: "test.stopwatchvec", @@ -305,10 +305,10 @@ func TestUpdateMetricLocked(t *testing.T) { name: "stopwatch vec second label set", setup: func() func() { return cmmetrics.TestingRegisterLabeledClusterMetric( - "test.stopwatchvec", metric.Metadata{ + "test.stopwatchvec", metric.InitMetadata(metric.Metadata{ Name: "test.stopwatchvec", Help: "A test stopwatch vec", - }, []string{"store"}) + }), []string{"store"}) }, rows: []cmwatcher.ClusterMetricRow{{ ID: 1, Name: "test.stopwatchvec", @@ -361,10 +361,10 @@ func TestDeleteMetricLocked(t *testing.T) { name: "scalar gauge", setup: func() func() { return cmmetrics.TestingRegisterClusterMetric( - "test.gauge", metric.Metadata{ + "test.gauge", metric.InitMetadata(metric.Metadata{ Name: "test.gauge", Help: "A test gauge", - }) + })) }, insertRows: []cmwatcher.ClusterMetricRow{{ ID: 1, Name: "test.gauge", Type: "GAUGE", Value: 42, @@ -382,10 +382,10 @@ func TestDeleteMetricLocked(t *testing.T) { name: "vec metric label set", setup: func() func() { return cmmetrics.TestingRegisterLabeledClusterMetric( - "test.vec", metric.Metadata{ + "test.vec", metric.InitMetadata(metric.Metadata{ Name: "test.vec", Help: "A test vector gauge", - }, []string{"store"}) + }), []string{"store"}) }, insertRows: []cmwatcher.ClusterMetricRow{{ ID: 1, Name: "test.vec", @@ -441,14 +441,14 @@ func TestStop(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - defer cmmetrics.TestingRegisterClusterMetric("gauge_one", metric.Metadata{ + defer cmmetrics.TestingRegisterClusterMetric("gauge_one", metric.InitMetadata(metric.Metadata{ Name: "gauge_one", Help: "First gauge", - })() - defer cmmetrics.TestingRegisterClusterMetric("gauge_two", metric.Metadata{ + }))() + defer cmmetrics.TestingRegisterClusterMetric("gauge_two", metric.InitMetadata(metric.Metadata{ Name: "gauge_two", Help: "Second gauge", - })() + }))() ctx := context.Background() u, reg := makeTestRegistrySyncer() @@ -477,18 +477,18 @@ func TestOnRefresh(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - defer cmmetrics.TestingRegisterClusterMetric("gauge.old", metric.Metadata{ + defer cmmetrics.TestingRegisterClusterMetric("gauge.old", metric.InitMetadata(metric.Metadata{ Name: "gauge.old", Help: "Old gauge", - })() - defer cmmetrics.TestingRegisterClusterMetric("gauge.new", metric.Metadata{ + }))() + defer cmmetrics.TestingRegisterClusterMetric("gauge.new", metric.InitMetadata(metric.Metadata{ Name: "gauge.new", Help: "New gauge", - })() - defer cmmetrics.TestingRegisterClusterMetric("counter.new", metric.Metadata{ + }))() + defer cmmetrics.TestingRegisterClusterMetric("counter.new", metric.InitMetadata(metric.Metadata{ Name: "counter.new", Help: "New counter", - })() + }))() ctx := context.Background() u, reg := makeTestRegistrySyncer() diff --git a/pkg/obs/clustermetrics/cmwatcher/cluster_metric_row_test.go b/pkg/obs/clustermetrics/cmwatcher/cluster_metric_row_test.go index 87f199a74757..d420dcd4a48a 100644 --- a/pkg/obs/clustermetrics/cmwatcher/cluster_metric_row_test.go +++ b/pkg/obs/clustermetrics/cmwatcher/cluster_metric_row_test.go @@ -97,14 +97,14 @@ func TestToMetric(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - gaugeMeta := metric.Metadata{Name: "test.gauge", Help: "A test gauge"} - counterMeta := metric.Metadata{Name: "test.counter", Help: "A test counter"} - stopwatchMeta := metric.Metadata{Name: "test.stopwatch", Help: "A test stopwatch"} - labeledGaugeMeta := metric.Metadata{Name: "test.labeled.gauge", Help: "A labeled gauge"} - labeledCounterMeta := metric.Metadata{Name: "test.labeled.counter", Help: "A labeled counter"} - labeledStopwatchMeta := metric.Metadata{ + gaugeMeta := metric.InitMetadata(metric.Metadata{Name: "test.gauge", Help: "A test gauge"}) + counterMeta := metric.InitMetadata(metric.Metadata{Name: "test.counter", Help: "A test counter"}) + stopwatchMeta := metric.InitMetadata(metric.Metadata{Name: "test.stopwatch", Help: "A test stopwatch"}) + labeledGaugeMeta := metric.InitMetadata(metric.Metadata{Name: "test.labeled.gauge", Help: "A labeled gauge"}) + labeledCounterMeta := metric.InitMetadata(metric.Metadata{Name: "test.labeled.counter", Help: "A labeled counter"}) + labeledStopwatchMeta := metric.InitMetadata(metric.Metadata{ Name: "test.labeled.stopwatch", Help: "A labeled stopwatch", - } + }) cleanupGauge := clustermetrics.TestingRegisterClusterMetric("test.gauge", gaugeMeta) defer cleanupGauge() diff --git a/pkg/obs/clustermetrics/cmwriter/sql_store.go b/pkg/obs/clustermetrics/cmwriter/sql_store.go index 71d7da9ec90a..30aa6f93adbf 100644 --- a/pkg/obs/clustermetrics/cmwriter/sql_store.go +++ b/pkg/obs/clustermetrics/cmwriter/sql_store.go @@ -175,7 +175,7 @@ func (s *SQLStore) Get(ctx context.Context) ([]cmmetrics.WritableMetric, error) name := string(tree.MustBeDString(row[0])) typ := string(tree.MustBeDString(row[1])) value := int64(tree.MustBeDInt(row[2])) - md := metric.Metadata{Name: name} + md := metric.InitMetadata(metric.Metadata{Name: name}) switch typ { case "COUNTER": c := cmmetrics.NewCounter(md) diff --git a/pkg/obs/clustermetrics/cmwriter/sql_store_test.go b/pkg/obs/clustermetrics/cmwriter/sql_store_test.go index 863706880217..39ec12ab641c 100644 --- a/pkg/obs/clustermetrics/cmwriter/sql_store_test.go +++ b/pkg/obs/clustermetrics/cmwriter/sql_store_test.go @@ -61,7 +61,7 @@ func TestSQLStore_Write(t *testing.T) { t.Run("counter", func(t *testing.T) { env := newSQLStoreTestEnv(t) - c := cmmetrics.NewCounter(metric.Metadata{Name: "test.counter"}) + c := cmmetrics.NewCounter(metric.InitMetadata(metric.Metadata{Name: "test.counter"})) c.Inc(42) require.NoError(t, env.store.Write(env.ctx, []cmmetrics.WritableMetric{c})) @@ -76,7 +76,7 @@ func TestSQLStore_Write(t *testing.T) { t.Run("gauge", func(t *testing.T) { env := newSQLStoreTestEnv(t) - g := cmmetrics.NewGauge(metric.Metadata{Name: "test.gauge"}) + g := cmmetrics.NewGauge(metric.InitMetadata(metric.Metadata{Name: "test.gauge"})) g.Update(100) require.NoError(t, env.store.Write(env.ctx, []cmmetrics.WritableMetric{g})) @@ -91,7 +91,7 @@ func TestSQLStore_Write(t *testing.T) { t.Run("counter accumulates on upsert", func(t *testing.T) { env := newSQLStoreTestEnv(t) - c := cmmetrics.NewCounter(metric.Metadata{Name: "test.counter.accum"}) + c := cmmetrics.NewCounter(metric.InitMetadata(metric.Metadata{Name: "test.counter.accum"})) c.Inc(10) require.NoError(t, env.store.Write(env.ctx, []cmmetrics.WritableMetric{c})) @@ -110,7 +110,7 @@ func TestSQLStore_Write(t *testing.T) { t.Run("gauge replaces on upsert", func(t *testing.T) { env := newSQLStoreTestEnv(t) - g := cmmetrics.NewGauge(metric.Metadata{Name: "test.gauge.replace"}) + g := cmmetrics.NewGauge(metric.InitMetadata(metric.Metadata{Name: "test.gauge.replace"})) g.Update(100) require.NoError(t, env.store.Write(env.ctx, []cmmetrics.WritableMetric{g})) @@ -127,7 +127,7 @@ func TestSQLStore_Write(t *testing.T) { t.Run("stopwatch", func(t *testing.T) { env := newSQLStoreTestEnv(t) - sw := cmmetrics.NewWriteStopwatch(metric.Metadata{Name: "test.stopwatch"}, timeutil.DefaultTimeSource{}) + sw := cmmetrics.NewWriteStopwatch(metric.InitMetadata(metric.Metadata{Name: "test.stopwatch"}), timeutil.DefaultTimeSource{}) sw.SetStartTime() require.NoError(t, env.store.Write(env.ctx, []cmmetrics.WritableMetric{sw})) @@ -142,7 +142,7 @@ func TestSQLStore_Write(t *testing.T) { t.Run("stopwatch replaces on upsert", func(t *testing.T) { env := newSQLStoreTestEnv(t) - sw := cmmetrics.NewWriteStopwatch(metric.Metadata{Name: "test.stopwatch.replace"}, timeutil.DefaultTimeSource{}) + sw := cmmetrics.NewWriteStopwatch(metric.InitMetadata(metric.Metadata{Name: "test.stopwatch.replace"}), timeutil.DefaultTimeSource{}) // Write initial timestamp. sw.Gauge.Update(1000) @@ -160,8 +160,8 @@ func TestSQLStore_Write(t *testing.T) { t.Run("multiple metrics", func(t *testing.T) { env := newSQLStoreTestEnv(t) - c := cmmetrics.NewCounter(metric.Metadata{Name: "test.multi.counter"}) - g := cmmetrics.NewGauge(metric.Metadata{Name: "test.multi.gauge"}) + c := cmmetrics.NewCounter(metric.InitMetadata(metric.Metadata{Name: "test.multi.counter"})) + g := cmmetrics.NewGauge(metric.InitMetadata(metric.Metadata{Name: "test.multi.gauge"})) c.Inc(7) g.Update(200) @@ -188,7 +188,7 @@ func TestSQLStore_Delete(t *testing.T) { t.Run("delete scalar", func(t *testing.T) { env := newSQLStoreTestEnv(t) - c := cmmetrics.NewCounter(metric.Metadata{Name: "test.del.counter"}) + c := cmmetrics.NewCounter(metric.InitMetadata(metric.Metadata{Name: "test.del.counter"})) c.Inc(42) require.NoError(t, env.store.Write(env.ctx, []cmmetrics.WritableMetric{c})) @@ -202,7 +202,7 @@ func TestSQLStore_Delete(t *testing.T) { t.Run("delete labeled metric", func(t *testing.T) { env := newSQLStoreTestEnv(t) - gv := cmmetrics.NewGaugeVec(metric.Metadata{Name: "test.del.gv"}, "region") + gv := cmmetrics.NewGaugeVec(metric.InitMetadata(metric.Metadata{Name: "test.del.gv"}), "region") // Write two children. var children []cmmetrics.WritableMetric @@ -229,15 +229,15 @@ func TestSQLStore_Delete(t *testing.T) { env := newSQLStoreTestEnv(t) // Deleting a metric that doesn't exist should not error. - c := cmmetrics.NewCounter(metric.Metadata{Name: "nonexistent.metric"}) + c := cmmetrics.NewCounter(metric.InitMetadata(metric.Metadata{Name: "nonexistent.metric"})) require.NoError(t, env.store.Delete(env.ctx, []cmmetrics.WritableMetric{c})) }) t.Run("delete multiple", func(t *testing.T) { env := newSQLStoreTestEnv(t) - c1 := cmmetrics.NewCounter(metric.Metadata{Name: "test.del.m1"}) - c2 := cmmetrics.NewCounter(metric.Metadata{Name: "test.del.m2"}) - g := cmmetrics.NewGauge(metric.Metadata{Name: "test.del.m3"}) + c1 := cmmetrics.NewCounter(metric.InitMetadata(metric.Metadata{Name: "test.del.m1"})) + c2 := cmmetrics.NewCounter(metric.InitMetadata(metric.Metadata{Name: "test.del.m2"})) + g := cmmetrics.NewGauge(metric.InitMetadata(metric.Metadata{Name: "test.del.m3"})) c1.Inc(1) c2.Inc(2) g.Update(3) diff --git a/pkg/obs/clustermetrics/cmwriter/writer_metrics.go b/pkg/obs/clustermetrics/cmwriter/writer_metrics.go index 3a98cebcad25..f438216dfc8a 100644 --- a/pkg/obs/clustermetrics/cmwriter/writer_metrics.go +++ b/pkg/obs/clustermetrics/cmwriter/writer_metrics.go @@ -8,36 +8,36 @@ package cmwriter import "github.com/cockroachdb/cockroach/pkg/util/metric" var ( - metaFlushLatency = metric.Metadata{ + metaFlushLatency = metric.InitMetadata(metric.Metadata{ Name: "obs.clustermetrics.flush.latency", Help: "Latency of cluster metrics flushes to storage", Measurement: "Latency", Unit: metric.Unit_NANOSECONDS, - } - metaFlushCount = metric.Metadata{ + }) + metaFlushCount = metric.InitMetadata(metric.Metadata{ Name: "obs.clustermetrics.flush.count", Help: "Number of cluster metrics flush operations", Measurement: "Flushes", Unit: metric.Unit_COUNT, - } - metaFlushErrors = metric.Metadata{ + }) + metaFlushErrors = metric.InitMetadata(metric.Metadata{ Name: "obs.clustermetrics.flush.errors", Help: "Number of flush errors (write or delete failures)", Measurement: "Errors", Unit: metric.Unit_COUNT, - } - metaMetricsWritten = metric.Metadata{ + }) + metaMetricsWritten = metric.InitMetadata(metric.Metadata{ Name: "obs.clustermetrics.flush.metrics_written", Help: "Number of individual metrics written per flush", Measurement: "Metrics", Unit: metric.Unit_COUNT, - } - metaMetricsDeleted = metric.Metadata{ + }) + metaMetricsDeleted = metric.InitMetadata(metric.Metadata{ Name: "obs.clustermetrics.flush.metrics_deleted", Help: "Number of individual metrics deleted per flush", Measurement: "Metrics", Unit: metric.Unit_COUNT, - } + }) ) // WriterMetrics holds operational metrics for the Writer. diff --git a/pkg/obs/clustermetrics/cmwriter/writer_test.go b/pkg/obs/clustermetrics/cmwriter/writer_test.go index cc11e323efa9..953c1fd9b512 100644 --- a/pkg/obs/clustermetrics/cmwriter/writer_test.go +++ b/pkg/obs/clustermetrics/cmwriter/writer_test.go @@ -142,7 +142,7 @@ func TestWriter_Flush(t *testing.T) { }{{ name: "counter incremented is stored", setup: func(env *testEnv) { - c := cmmetrics.NewCounter(metric.Metadata{Name: "c"}) + c := cmmetrics.NewCounter(metric.InitMetadata(metric.Metadata{Name: "c"})) env.writer.AddMetric(c) c.Inc(5) }, @@ -150,7 +150,7 @@ func TestWriter_Flush(t *testing.T) { }, { name: "counter accumulates multiple increments", setup: func(env *testEnv) { - c := cmmetrics.NewCounter(metric.Metadata{Name: "c"}) + c := cmmetrics.NewCounter(metric.InitMetadata(metric.Metadata{Name: "c"})) env.writer.AddMetric(c) c.Inc(5) c.Inc(3) @@ -159,14 +159,14 @@ func TestWriter_Flush(t *testing.T) { }, { name: "counter not incremented is not stored", setup: func(env *testEnv) { - c := cmmetrics.NewCounter(metric.Metadata{Name: "c"}) + c := cmmetrics.NewCounter(metric.InitMetadata(metric.Metadata{Name: "c"})) env.writer.AddMetric(c) }, wantNotStored: []string{"c"}, }, { name: "gauge updated is stored", setup: func(env *testEnv) { - g := cmmetrics.NewGauge(metric.Metadata{Name: "g"}) + g := cmmetrics.NewGauge(metric.InitMetadata(metric.Metadata{Name: "g"})) env.writer.AddMetric(g) g.Update(100) }, @@ -174,14 +174,14 @@ func TestWriter_Flush(t *testing.T) { }, { name: "gauge not updated is not stored", setup: func(env *testEnv) { - g := cmmetrics.NewGauge(metric.Metadata{Name: "g"}) + g := cmmetrics.NewGauge(metric.InitMetadata(metric.Metadata{Name: "g"})) env.writer.AddMetric(g) }, wantNotStored: []string{"g"}, }, { name: "gauge Inc marks dirty", setup: func(env *testEnv) { - g := cmmetrics.NewGauge(metric.Metadata{Name: "g"}) + g := cmmetrics.NewGauge(metric.InitMetadata(metric.Metadata{Name: "g"})) env.writer.AddMetric(g) g.Inc(10) }, @@ -189,7 +189,7 @@ func TestWriter_Flush(t *testing.T) { }, { name: "gauge Dec marks dirty", setup: func(env *testEnv) { - g := cmmetrics.NewGauge(metric.Metadata{Name: "g"}) + g := cmmetrics.NewGauge(metric.InitMetadata(metric.Metadata{Name: "g"})) env.writer.AddMetric(g) g.Dec(-5) // Dec of negative = increase }, @@ -197,8 +197,8 @@ func TestWriter_Flush(t *testing.T) { }, { name: "multiple counters all stored", setup: func(env *testEnv) { - c1 := cmmetrics.NewCounter(metric.Metadata{Name: "c1"}) - c2 := cmmetrics.NewCounter(metric.Metadata{Name: "c2"}) + c1 := cmmetrics.NewCounter(metric.InitMetadata(metric.Metadata{Name: "c1"})) + c2 := cmmetrics.NewCounter(metric.InitMetadata(metric.Metadata{Name: "c2"})) env.writer.AddMetric(c1) env.writer.AddMetric(c2) c1.Inc(10) @@ -208,8 +208,8 @@ func TestWriter_Flush(t *testing.T) { }, { name: "multiple gauges all stored", setup: func(env *testEnv) { - g1 := cmmetrics.NewGauge(metric.Metadata{Name: "g1"}) - g2 := cmmetrics.NewGauge(metric.Metadata{Name: "g2"}) + g1 := cmmetrics.NewGauge(metric.InitMetadata(metric.Metadata{Name: "g1"})) + g2 := cmmetrics.NewGauge(metric.InitMetadata(metric.Metadata{Name: "g2"})) env.writer.AddMetric(g1) env.writer.AddMetric(g2) g1.Update(100) @@ -219,8 +219,8 @@ func TestWriter_Flush(t *testing.T) { }, { name: "mixed counters and gauges all stored", setup: func(env *testEnv) { - c := cmmetrics.NewCounter(metric.Metadata{Name: "c"}) - g := cmmetrics.NewGauge(metric.Metadata{Name: "g"}) + c := cmmetrics.NewCounter(metric.InitMetadata(metric.Metadata{Name: "c"})) + g := cmmetrics.NewGauge(metric.InitMetadata(metric.Metadata{Name: "g"})) env.writer.AddMetric(c) env.writer.AddMetric(g) c.Inc(10) @@ -230,10 +230,10 @@ func TestWriter_Flush(t *testing.T) { }, { name: "only dirty metrics stored", setup: func(env *testEnv) { - c1 := cmmetrics.NewCounter(metric.Metadata{Name: "c1"}) - c2 := cmmetrics.NewCounter(metric.Metadata{Name: "c2"}) - g1 := cmmetrics.NewGauge(metric.Metadata{Name: "g1"}) - g2 := cmmetrics.NewGauge(metric.Metadata{Name: "g2"}) + c1 := cmmetrics.NewCounter(metric.InitMetadata(metric.Metadata{Name: "c1"})) + c2 := cmmetrics.NewCounter(metric.InitMetadata(metric.Metadata{Name: "c2"})) + g1 := cmmetrics.NewGauge(metric.InitMetadata(metric.Metadata{Name: "g1"})) + g2 := cmmetrics.NewGauge(metric.InitMetadata(metric.Metadata{Name: "g2"})) env.writer.AddMetric(c1) env.writer.AddMetric(c2) env.writer.AddMetric(g1) @@ -248,7 +248,7 @@ func TestWriter_Flush(t *testing.T) { }, { name: "stopwatch started is stored", setup: func(env *testEnv) { - sw := cmmetrics.NewWriteStopwatch(metric.Metadata{Name: "sw"}, timeutil.DefaultTimeSource{}) + sw := cmmetrics.NewWriteStopwatch(metric.InitMetadata(metric.Metadata{Name: "sw"}), timeutil.DefaultTimeSource{}) env.writer.AddMetric(sw) sw.SetStartTime() }, @@ -260,7 +260,7 @@ func TestWriter_Flush(t *testing.T) { }, { name: "stopwatch not started is not stored", setup: func(env *testEnv) { - sw := cmmetrics.NewWriteStopwatch(metric.Metadata{Name: "sw"}, timeutil.DefaultTimeSource{}) + sw := cmmetrics.NewWriteStopwatch(metric.InitMetadata(metric.Metadata{Name: "sw"}), timeutil.DefaultTimeSource{}) env.writer.AddMetric(sw) }, wantNotStored: []string{"sw"}, @@ -272,8 +272,8 @@ func TestWriter_Flush(t *testing.T) { Status *cmmetrics.Gauge } m := TestMetrics{ - Count: cmmetrics.NewCounter(metric.Metadata{Name: "count"}), - Status: cmmetrics.NewGauge(metric.Metadata{Name: "status"}), + Count: cmmetrics.NewCounter(metric.InitMetadata(metric.Metadata{Name: "count"})), + Status: cmmetrics.NewGauge(metric.InitMetadata(metric.Metadata{Name: "status"})), } env.writer.AddMetricStruct(&m) m.Count.Inc(15) @@ -308,7 +308,7 @@ func TestWriter_Flush(t *testing.T) { t.Run("gauge unchanged on second flush not written", func(t *testing.T) { env := newTestEnv() - g := cmmetrics.NewGauge(metric.Metadata{Name: "g"}) + g := cmmetrics.NewGauge(metric.InitMetadata(metric.Metadata{Name: "g"})) env.writer.AddMetric(g) g.Update(50) @@ -327,7 +327,7 @@ func TestWriter_Flush(t *testing.T) { t.Run("gauge retains value after flush", func(t *testing.T) { env := newTestEnv() - g := cmmetrics.NewGauge(metric.Metadata{Name: "g"}) + g := cmmetrics.NewGauge(metric.InitMetadata(metric.Metadata{Name: "g"})) env.writer.AddMetric(g) g.Update(42) @@ -338,7 +338,7 @@ func TestWriter_Flush(t *testing.T) { t.Run("counter accumulates across increments then resets", func(t *testing.T) { env := newTestEnv() - c := cmmetrics.NewCounter(metric.Metadata{Name: "c"}) + c := cmmetrics.NewCounter(metric.InitMetadata(metric.Metadata{Name: "c"})) env.writer.AddMetric(c) c.Inc(5) @@ -359,8 +359,8 @@ func TestWriter_Flush(t *testing.T) { t.Run("mixed dirty and clean on second flush", func(t *testing.T) { env := newTestEnv() - c := cmmetrics.NewCounter(metric.Metadata{Name: "c"}) - g := cmmetrics.NewGauge(metric.Metadata{Name: "g"}) + c := cmmetrics.NewCounter(metric.InitMetadata(metric.Metadata{Name: "c"})) + g := cmmetrics.NewGauge(metric.InitMetadata(metric.Metadata{Name: "g"})) env.writer.AddMetric(c) env.writer.AddMetric(g) @@ -390,7 +390,7 @@ func TestWriter_OperationalMetrics(t *testing.T) { t.Run("records flush metrics", func(t *testing.T) { env := newTestEnv() - c := cmmetrics.NewCounter(metric.Metadata{Name: "c"}) + c := cmmetrics.NewCounter(metric.InitMetadata(metric.Metadata{Name: "c"})) env.writer.AddMetric(c) c.Inc(1) @@ -428,7 +428,7 @@ func TestWriter_PeriodicFlush(t *testing.T) { writerMetrics := NewWriterMetrics() w := NewWriterWithStore(store, writerMetrics) - c := cmmetrics.NewCounter(metric.Metadata{Name: "c"}) + c := cmmetrics.NewCounter(metric.InitMetadata(metric.Metadata{Name: "c"})) w.AddMetric(c) c.Inc(5) @@ -455,7 +455,7 @@ func TestWriter_PeriodicFlush(t *testing.T) { writerMetrics := NewWriterMetrics() w := NewWriterWithStore(store, writerMetrics) - c := cmmetrics.NewCounter(metric.Metadata{Name: "c"}) + c := cmmetrics.NewCounter(metric.InitMetadata(metric.Metadata{Name: "c"})) w.AddMetric(c) c.Inc(5) @@ -482,7 +482,7 @@ func TestWriter_VecMetrics(t *testing.T) { t.Run("gauge vec children persisted with labels", func(t *testing.T) { env := newTestEnv() - gv := cmmetrics.NewGaugeVec(metric.Metadata{Name: "gv"}, "region", "status") + gv := cmmetrics.NewGaugeVec(metric.InitMetadata(metric.Metadata{Name: "gv"}), "region", "status") env.writer.AddMetric(gv) gv.Update(map[string]string{"region": "us-east", "status": "ok"}, 10) @@ -500,7 +500,7 @@ func TestWriter_VecMetrics(t *testing.T) { t.Run("counter vec children persisted with labels", func(t *testing.T) { env := newTestEnv() - cv := cmmetrics.NewCounterVec(metric.Metadata{Name: "cv"}, "method") + cv := cmmetrics.NewCounterVec(metric.InitMetadata(metric.Metadata{Name: "cv"}), "method") env.writer.AddMetric(cv) cv.Inc(map[string]string{"method": "GET"}, 5) @@ -518,7 +518,7 @@ func TestWriter_VecMetrics(t *testing.T) { t.Run("vec with no children is not flushed", func(t *testing.T) { env := newTestEnv() - gv := cmmetrics.NewGaugeVec(metric.Metadata{Name: "gv"}, "region") + gv := cmmetrics.NewGaugeVec(metric.InitMetadata(metric.Metadata{Name: "gv"}), "region") env.writer.AddMetric(gv) env.writer.Flush(env.ctx) @@ -527,7 +527,7 @@ func TestWriter_VecMetrics(t *testing.T) { t.Run("vec children cleared after flush", func(t *testing.T) { env := newTestEnv() - gv := cmmetrics.NewGaugeVec(metric.Metadata{Name: "gv"}, "region") + gv := cmmetrics.NewGaugeVec(metric.InitMetadata(metric.Metadata{Name: "gv"}), "region") env.writer.AddMetric(gv) gv.Update(map[string]string{"region": "us-east"}, 10) @@ -546,8 +546,8 @@ func TestWriter_VecMetrics(t *testing.T) { t.Run("only dirty vec children are written", func(t *testing.T) { env := newTestEnv() - gv := cmmetrics.NewGaugeVec(metric.Metadata{Name: "gv"}, "region") - cv := cmmetrics.NewCounterVec(metric.Metadata{Name: "cv"}, "method") + gv := cmmetrics.NewGaugeVec(metric.InitMetadata(metric.Metadata{Name: "gv"}), "region") + cv := cmmetrics.NewCounterVec(metric.InitMetadata(metric.Metadata{Name: "cv"}), "method") env.writer.AddMetric(gv) env.writer.AddMetric(cv) @@ -561,7 +561,7 @@ func TestWriter_VecMetrics(t *testing.T) { t.Run("stopwatch vec children persisted with labels", func(t *testing.T) { env := newTestEnv() - sv := cmmetrics.NewWriteStopwatchVec(metric.Metadata{Name: "sv"}, timeutil.DefaultTimeSource{}, "job") + sv := cmmetrics.NewWriteStopwatchVec(metric.InitMetadata(metric.Metadata{Name: "sv"}), timeutil.DefaultTimeSource{}, "job") env.writer.AddMetric(sv) sv.SetStartTime(map[string]string{"job": "backup"}) @@ -579,7 +579,7 @@ func TestWriter_VecMetrics(t *testing.T) { t.Run("stopwatch vec with no children is not flushed", func(t *testing.T) { env := newTestEnv() - sv := cmmetrics.NewWriteStopwatchVec(metric.Metadata{Name: "sv"}, timeutil.DefaultTimeSource{}, "job") + sv := cmmetrics.NewWriteStopwatchVec(metric.InitMetadata(metric.Metadata{Name: "sv"}), timeutil.DefaultTimeSource{}, "job") env.writer.AddMetric(sv) env.writer.Flush(env.ctx) @@ -588,7 +588,7 @@ func TestWriter_VecMetrics(t *testing.T) { t.Run("stopwatch vec children cleared after flush", func(t *testing.T) { env := newTestEnv() - sv := cmmetrics.NewWriteStopwatchVec(metric.Metadata{Name: "sv"}, timeutil.DefaultTimeSource{}, "job") + sv := cmmetrics.NewWriteStopwatchVec(metric.InitMetadata(metric.Metadata{Name: "sv"}), timeutil.DefaultTimeSource{}, "job") env.writer.AddMetric(sv) sv.SetStartTime(map[string]string{"job": "backup"}) @@ -654,7 +654,7 @@ func TestWriter_Delete(t *testing.T) { t.Run("scalar counter delete", func(t *testing.T) { env := newTestEnv() - c := cmmetrics.NewCounter(metric.Metadata{Name: "c"}) + c := cmmetrics.NewCounter(metric.InitMetadata(metric.Metadata{Name: "c"})) env.writer.AddMetric(c) c.Inc(5) @@ -671,7 +671,7 @@ func TestWriter_Delete(t *testing.T) { t.Run("scalar gauge delete", func(t *testing.T) { env := newTestEnv() - g := cmmetrics.NewGauge(metric.Metadata{Name: "g"}) + g := cmmetrics.NewGauge(metric.InitMetadata(metric.Metadata{Name: "g"})) env.writer.AddMetric(g) g.Update(42) @@ -688,7 +688,7 @@ func TestWriter_Delete(t *testing.T) { t.Run("scalar inc then delete before flush", func(t *testing.T) { env := newTestEnv() - c := cmmetrics.NewCounter(metric.Metadata{Name: "c"}) + c := cmmetrics.NewCounter(metric.InitMetadata(metric.Metadata{Name: "c"})) env.writer.AddMetric(c) c.Inc(5) @@ -704,7 +704,7 @@ func TestWriter_Delete(t *testing.T) { t.Run("scalar delete then inc before flush", func(t *testing.T) { env := newTestEnv() - c := cmmetrics.NewCounter(metric.Metadata{Name: "c"}) + c := cmmetrics.NewCounter(metric.InitMetadata(metric.Metadata{Name: "c"})) env.writer.AddMetric(c) c.Delete() @@ -720,7 +720,7 @@ func TestWriter_Delete(t *testing.T) { t.Run("scalar delete then update gauge", func(t *testing.T) { env := newTestEnv() - g := cmmetrics.NewGauge(metric.Metadata{Name: "g"}) + g := cmmetrics.NewGauge(metric.InitMetadata(metric.Metadata{Name: "g"})) env.writer.AddMetric(g) g.Update(10) @@ -738,7 +738,7 @@ func TestWriter_Delete(t *testing.T) { t.Run("vec single child delete", func(t *testing.T) { env := newTestEnv() - gv := cmmetrics.NewGaugeVec(metric.Metadata{Name: "gv"}, "region") + gv := cmmetrics.NewGaugeVec(metric.InitMetadata(metric.Metadata{Name: "gv"}), "region") env.writer.AddMetric(gv) gv.Update(map[string]string{"region": "us-east"}, 10) @@ -758,7 +758,7 @@ func TestWriter_Delete(t *testing.T) { t.Run("vec all children deleted", func(t *testing.T) { env := newTestEnv() - gv := cmmetrics.NewGaugeVec(metric.Metadata{Name: "gv"}, "region") + gv := cmmetrics.NewGaugeVec(metric.InitMetadata(metric.Metadata{Name: "gv"}), "region") env.writer.AddMetric(gv) gv.Update(map[string]string{"region": "us-east"}, 10) @@ -774,7 +774,7 @@ func TestWriter_Delete(t *testing.T) { t.Run("vec delete and re-add same cycle", func(t *testing.T) { env := newTestEnv() - gv := cmmetrics.NewGaugeVec(metric.Metadata{Name: "gv"}, "region") + gv := cmmetrics.NewGaugeVec(metric.InitMetadata(metric.Metadata{Name: "gv"}), "region") env.writer.AddMetric(gv) gv.Update(map[string]string{"region": "us-east"}, 10) @@ -793,7 +793,7 @@ func TestWriter_Delete(t *testing.T) { t.Run("counter vec delete", func(t *testing.T) { env := newTestEnv() - cv := cmmetrics.NewCounterVec(metric.Metadata{Name: "cv"}, "method") + cv := cmmetrics.NewCounterVec(metric.InitMetadata(metric.Metadata{Name: "cv"}), "method") env.writer.AddMetric(cv) cv.Inc(map[string]string{"method": "GET"}, 5) @@ -813,7 +813,7 @@ func TestWriter_Delete(t *testing.T) { t.Run("stopwatch vec delete", func(t *testing.T) { env := newTestEnv() sv := cmmetrics.NewWriteStopwatchVec( - metric.Metadata{Name: "sv"}, timeutil.DefaultTimeSource{}, "job", + metric.InitMetadata(metric.Metadata{Name: "sv"}), timeutil.DefaultTimeSource{}, "job", ) env.writer.AddMetric(sv) @@ -834,7 +834,7 @@ func TestWriter_Delete(t *testing.T) { t.Run("scalar stopwatch delete", func(t *testing.T) { env := newTestEnv() sw := cmmetrics.NewWriteStopwatch( - metric.Metadata{Name: "sw"}, timeutil.DefaultTimeSource{}, + metric.InitMetadata(metric.Metadata{Name: "sw"}), timeutil.DefaultTimeSource{}, ) env.writer.AddMetric(sw) @@ -852,7 +852,7 @@ func TestWriter_Delete(t *testing.T) { t.Run("scalar inc after flushed delete is noop", func(t *testing.T) { env := newTestEnv() - c := cmmetrics.NewCounter(metric.Metadata{Name: "c"}) + c := cmmetrics.NewCounter(metric.InitMetadata(metric.Metadata{Name: "c"})) env.writer.AddMetric(c) c.Inc(5) @@ -883,7 +883,7 @@ func TestWriter_Delete(t *testing.T) { writerMetrics := NewWriterMetrics() w := NewWriterWithStore(es, writerMetrics) - c := cmmetrics.NewCounter(metric.Metadata{Name: "c"}) + c := cmmetrics.NewCounter(metric.InitMetadata(metric.Metadata{Name: "c"})) w.AddMetric(c) c.Inc(5) w.Flush(ctx) @@ -909,7 +909,7 @@ func TestWriter_Delete(t *testing.T) { t.Run("operational metrics", func(t *testing.T) { env := newTestEnv() - gv := cmmetrics.NewGaugeVec(metric.Metadata{Name: "gv"}, "region") + gv := cmmetrics.NewGaugeVec(metric.InitMetadata(metric.Metadata{Name: "gv"}), "region") env.writer.AddMetric(gv) gv.Update(map[string]string{"region": "us-east"}, 10) diff --git a/pkg/obs/clustermetrics/doc.go b/pkg/obs/clustermetrics/doc.go index 6043265dbf67..ed0a5fdb0cbf 100644 --- a/pkg/obs/clustermetrics/doc.go +++ b/pkg/obs/clustermetrics/doc.go @@ -53,7 +53,7 @@ package clustermetrics // There's an api for creating, and registering cluster metrics similar to // those seen in the util/metrics package. // -// var counter = clustermetrics.NewCounter(metric.Metadata{Name: "test.counter"}) +// var counter = clustermetrics.NewCounter(metric.InitMetadata(metric.Metadata{Name: "test.counter"})) // ... // writer.AddMetric(counter) // cmwriter.Writer // diff --git a/pkg/raft/metrics.go b/pkg/raft/metrics.go index cf5a21010859..31530332d044 100644 --- a/pkg/raft/metrics.go +++ b/pkg/raft/metrics.go @@ -19,44 +19,44 @@ type Metrics struct { } var ( - acceptedFortificationResponsesMeta = metric.Metadata{ + acceptedFortificationResponsesMeta = metric.InitMetadata(metric.Metadata{ Name: "raft.fortification_resp.accepted", Help: "The number of accepted fortification responses. Calculated on the raft leader", Measurement: "Accepted Fortification Responses", Unit: metric.Unit_COUNT, - } - rejectedFortificationResponsesMeta = metric.Metadata{ + }) + rejectedFortificationResponsesMeta = metric.InitMetadata(metric.Metadata{ Name: "raft.fortification_resp.rejected", Help: "The number of rejected fortification responses. Calculated on the raft leader", Measurement: "Rejected Fortification Responses", Unit: metric.Unit_COUNT, - } - skippedFortificationDueToLackOfSupportMeta = metric.Metadata{ + }) + skippedFortificationDueToLackOfSupportMeta = metric.InitMetadata(metric.Metadata{ Name: "raft.fortification.skipped_no_support", Help: "The number of fortification requests that were skipped (not sent) due to lack of store" + " liveness support", Measurement: "Skipped Fortifications", Unit: metric.Unit_COUNT, - } + }) - metaRaftFlowsEnteredProbe = metric.Metadata{ + metaRaftFlowsEnteredProbe = metric.InitMetadata(metric.Metadata{ Name: "raft.flows.entered.state_probe", Help: "The number of leader->peer flows transitioned to StateProbe", Measurement: "Flows", Unit: metric.Unit_COUNT, - } - metaRaftFlowsEnteredReplicate = metric.Metadata{ + }) + metaRaftFlowsEnteredReplicate = metric.InitMetadata(metric.Metadata{ Name: "raft.flows.entered.state_replicate", Help: "The number of leader->peer flows transitioned to StateReplicate", Measurement: "Flows", Unit: metric.Unit_COUNT, - } - metaRaftFlowsEnteredSnapshot = metric.Metadata{ + }) + metaRaftFlowsEnteredSnapshot = metric.InitMetadata(metric.Metadata{ Name: "raft.flows.entered.state_snapshot", Help: "The number of of leader->peer flows transitioned to StateSnapshot", Measurement: "Flows", Unit: metric.Unit_COUNT, - } + }) ) // NewMetrics creates a new Metrics instance with all related metric fields. diff --git a/pkg/rpc/clock_offset.go b/pkg/rpc/clock_offset.go index 1ebf17edb3ba..1f97f1a8d419 100644 --- a/pkg/rpc/clock_offset.go +++ b/pkg/rpc/clock_offset.go @@ -44,7 +44,7 @@ type RemoteClockMetrics struct { const avgLatencyMeasurementAge = 20.0 var ( - metaClockOffsetMeanNanos = metric.Metadata{ + metaClockOffsetMeanNanos = metric.InitMetadata(metric.Metadata{ Name: "clock-offset.meannanos", Help: "Mean clock offset with other nodes", Measurement: "Clock Offset", @@ -52,14 +52,14 @@ var ( Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_NETWORKING, HowToUse: "This metric gives the node's clock skew. In a well-configured environment, the actual clock skew would be in the sub-millisecond range. A skew exceeding 5 ms is likely due to a NTP service mis-configuration. Reducing the actual clock skew reduces the probability of uncertainty related conflicts and corresponding retires which has a positive impact on workload performance. Conversely, a larger actual clock skew increases the probability of retries due to uncertainty conflicts, with potentially measurable adverse effects on workload performance.", - } - metaClockOffsetStdDevNanos = metric.Metadata{ + }) + metaClockOffsetStdDevNanos = metric.InitMetadata(metric.Metadata{ Name: "clock-offset.stddevnanos", Help: "Stddev clock offset with other nodes", Measurement: "Clock Offset", Unit: metric.Unit_NANOSECONDS, - } - metaClockOffsetMedianNanos = metric.Metadata{ + }) + metaClockOffsetMedianNanos = metric.InitMetadata(metric.Metadata{ // An outlier resistant measure of centrality, useful for // diagnosing unhealthy nodes. // Demo: https://docs.google.com/spreadsheets/d/1gmzQxEVYDKb_b-Mn50ZTje-LqZw6TZwUxxUPY2rG69M/edit?gid=0#gid=0 @@ -67,8 +67,8 @@ var ( Help: "Median clock offset with other nodes", Measurement: "Clock Offset", Unit: metric.Unit_NANOSECONDS, - } - metaClockOffsetMedianAbsDevNanos = metric.Metadata{ + }) + metaClockOffsetMedianAbsDevNanos = metric.InitMetadata(metric.Metadata{ // An outlier resistant measure of dispersion, see // https://en.wikipedia.org/wiki/Median_absolute_deviation // and demo above. @@ -76,8 +76,8 @@ var ( Help: "Median Absolute Deviation (MAD) with other nodes", Measurement: "Clock Offset", Unit: metric.Unit_NANOSECONDS, - } - metaConnectionRoundTripLatency = metric.Metadata{ + }) + metaConnectionRoundTripLatency = metric.InitMetadata(metric.Metadata{ // NB: the name is legacy and should not be changed since customers // rely on it. Name: "round-trip-latency", @@ -94,9 +94,9 @@ rare or short-lived degradations. Measurement: "Round-trip time", Unit: metric.Unit_NANOSECONDS, Visibility: metric.Metadata_SUPPORT, - } + }) - metaDefaultConnectionRoundTripLatency = metric.Metadata{ + metaDefaultConnectionRoundTripLatency = metric.InitMetadata(metric.Metadata{ Name: "round-trip-default-class-latency", Help: `Distribution of round-trip latencies with other nodes. @@ -104,8 +104,8 @@ Similar to round-trip-latency, but only for default class connections. `, Measurement: "Round-trip time", Unit: metric.Unit_NANOSECONDS, - } - metaSystemConnectionRoundTripLatency = metric.Metadata{ + }) + metaSystemConnectionRoundTripLatency = metric.InitMetadata(metric.Metadata{ Name: "round-trip-system-class-latency", Help: `Distribution of round-trip latencies with other nodes. @@ -113,8 +113,8 @@ Similar to round-trip-latency, but only for system class connections. `, Measurement: "Round-trip time", Unit: metric.Unit_NANOSECONDS, - } - metaRangefeedConnectionRoundTripLatency = metric.Metadata{ + }) + metaRangefeedConnectionRoundTripLatency = metric.InitMetadata(metric.Metadata{ Name: "round-trip-rangefeed-class-latency", Help: `Distribution of round-trip latencies with other nodes. @@ -122,8 +122,8 @@ Similar to round-trip-latency, but only for rangefeed class connections. `, Measurement: "Round-trip time", Unit: metric.Unit_NANOSECONDS, - } - metaRaftConnectionRoundTripLatency = metric.Metadata{ + }) + metaRaftConnectionRoundTripLatency = metric.InitMetadata(metric.Metadata{ Name: "round-trip-raft-class-latency", Help: `Distribution of round-trip latencies with other nodes. @@ -131,7 +131,7 @@ Similar to round-trip-latency, but only for raft class connections. `, Measurement: "Round-trip time", Unit: metric.Unit_NANOSECONDS, - } + }) ) // A stateful trigger that fires once when exceeding a threshold, then must diff --git a/pkg/rpc/metrics.go b/pkg/rpc/metrics.go index 468a4f15c922..94f74212b55e 100644 --- a/pkg/rpc/metrics.go +++ b/pkg/rpc/metrics.go @@ -48,7 +48,7 @@ var ( // Together these metrics should provide a picture of the state of current // connections. - metaConnectionHealthy = metric.Metadata{ + metaConnectionHealthy = metric.InitMetadata(metric.Metadata{ Name: "rpc.connection.healthy", Help: "Gauge of current connections in a healthy state (i.e. bidirectionally connected and heartbeating)", Measurement: "Connections", @@ -56,9 +56,9 @@ var ( Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_NETWORKING, HowToUse: `See Description.`, - } + }) - metaConnectionUnhealthy = metric.Metadata{ + metaConnectionUnhealthy = metric.InitMetadata(metric.Metadata{ Name: "rpc.connection.unhealthy", Help: "Gauge of current connections in an unhealthy state (not bidirectionally connected or heartbeating)", Measurement: "Connections", @@ -66,9 +66,9 @@ var ( Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_NETWORKING, HowToUse: `If the value of this metric is greater than 0, this could indicate a network partition.`, - } + }) - metaConnectionInactive = metric.Metadata{ + metaConnectionInactive = metric.InitMetadata(metric.Metadata{ Name: "rpc.connection.inactive", Help: "Gauge of current connections in an inactive state and pending deletion; " + "these are not healthy but are not tracked as unhealthy either because " + @@ -76,9 +76,9 @@ var ( "for example if the node has since been seen under a new address", Measurement: "Connections", Unit: metric.Unit_COUNT, - } + }) - metaConnectionHealthyNanos = metric.Metadata{ + metaConnectionHealthyNanos = metric.InitMetadata(metric.Metadata{ Name: "rpc.connection.healthy_nanos", Help: `Gauge of nanoseconds of healthy connection time @@ -90,9 +90,9 @@ for how long a given peer has been connected`, Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_NETWORKING, HowToUse: `This can be useful for monitoring the stability and health of connections within your CockroachDB cluster.`, - } + }) - metaConnectionUnhealthyNanos = metric.Metadata{ + metaConnectionUnhealthyNanos = metric.InitMetadata(metric.Metadata{ Name: "rpc.connection.unhealthy_nanos", Help: `Gauge of nanoseconds of unhealthy connection time. @@ -104,9 +104,9 @@ for how long a given peer has been unreachable`, Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_NETWORKING, HowToUse: `If this duration is greater than 0, this could indicate how long a network partition has been occurring.`, - } + }) - metaConnectionHeartbeats = metric.Metadata{ + metaConnectionHeartbeats = metric.InitMetadata(metric.Metadata{ Name: "rpc.connection.heartbeats", Help: `Counter of successful heartbeats.`, Measurement: "Heartbeats", @@ -114,9 +114,9 @@ for how long a given peer has been unreachable`, Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_NETWORKING, HowToUse: `See Description.`, - } + }) - metaConnectionFailures = metric.Metadata{ + metaConnectionFailures = metric.InitMetadata(metric.Metadata{ Name: "rpc.connection.failures", Help: `Counter of failed connections. @@ -131,9 +131,9 @@ Decommissioned peers are excluded. Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_NETWORKING, HowToUse: `See Description.`, - } + }) - metaConnectionAvgRoundTripLatency = metric.Metadata{ + metaConnectionAvgRoundTripLatency = metric.InitMetadata(metric.Metadata{ Name: "rpc.connection.avg_round_trip_latency", Unit: metric.Unit_NANOSECONDS, Help: `Sum of exponentially weighted moving average of round-trip latencies, as measured through a gRPC RPC. @@ -155,9 +155,9 @@ is reset to zero. Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_NETWORKING, HowToUse: `This metric is helpful in understanding general network issues outside of CockroachDB that could be impacting the user’s workload.`, - } + }) - metaConnectionTCPRTT = metric.Metadata{ + metaConnectionTCPRTT = metric.InitMetadata(metric.Metadata{ Name: "rpc.connection.tcp_rtt", Unit: metric.Unit_NANOSECONDS, Help: `Kernel-level TCP round-trip time as measured by the Linux TCP stack. @@ -172,9 +172,9 @@ This metric is only available on Linux. Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_NETWORKING, HowToUse: `High TCP RTT values indicate network issues outside of CockroachDB that could be impacting the user's workload.`, - } + }) - metaConnectionTCPRTTVar = metric.Metadata{ + metaConnectionTCPRTTVar = metric.InitMetadata(metric.Metadata{ Name: "rpc.connection.tcp_rtt_var", Unit: metric.Unit_NANOSECONDS, Help: `Kernel-level TCP round-trip time variance as measured by the Linux TCP stack. @@ -189,8 +189,8 @@ This metric is only available on Linux. Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_NETWORKING, HowToUse: `High TCP RTT variance values indicate network stability issues outside of CockroachDB that could be impacting the user's workload.`, - } - metaConnectionConnected = metric.Metadata{ + }) + metaConnectionConnected = metric.InitMetadata(metric.Metadata{ Name: "rpc.connection.connected", Help: `Counter of TCP level connected connections. @@ -200,82 +200,82 @@ over this connection. `, Measurement: "Connections", Unit: metric.Unit_COUNT, - } - metaNetworkBytesEgress = metric.Metadata{ + }) + metaNetworkBytesEgress = metric.InitMetadata(metric.Metadata{ Name: "rpc.client.bytes.egress", Unit: metric.Unit_BYTES, Help: `Counter of TCP bytes sent via gRPC on connections we initiated.`, Measurement: "Bytes", - } - metaNetworkBytesIngress = metric.Metadata{ + }) + metaNetworkBytesIngress = metric.InitMetadata(metric.Metadata{ Name: "rpc.client.bytes.ingress", Unit: metric.Unit_BYTES, Help: `Counter of TCP bytes received via gRPC on connections we initiated.`, Measurement: "Bytes", - } - metaServerRequestDuration = metric.Metadata{ + }) + metaServerRequestDuration = metric.InitMetadata(metric.Metadata{ Name: "rpc.server.request.duration.nanos", Help: "Duration of an RPC request at the server in nanoseconds.", Measurement: "Duration", Unit: metric.Unit_NANOSECONDS, MetricType: prometheusgo.MetricType_HISTOGRAM, - } - metaServerRequestsTotal = metric.Metadata{ + }) + metaServerRequestsTotal = metric.InitMetadata(metric.Metadata{ Name: "rpc.server.requests.total", Help: "Total number of RPCs requests received by the server.", Measurement: "Requests", Unit: metric.Unit_COUNT, MetricType: prometheusgo.MetricType_COUNTER, - } - metaClientRequestsTotal = metric.Metadata{ + }) + metaClientRequestsTotal = metric.InitMetadata(metric.Metadata{ Name: "rpc.client.requests.total", Help: "Total number of RPC requests sent by the client.", Measurement: "Requests", Unit: metric.Unit_COUNT, MetricType: prometheusgo.MetricType_COUNTER, - } - metaClientRequestDuration = metric.Metadata{ + }) + metaClientRequestDuration = metric.InitMetadata(metric.Metadata{ Name: "rpc.client.request.duration.nanos", Help: "Duration of a RPC request at the client in nanoseconds.", Measurement: "Duration", Unit: metric.Unit_NANOSECONDS, MetricType: prometheusgo.MetricType_HISTOGRAM, - } - metaDRPCPoolSize = metric.Metadata{ + }) + metaDRPCPoolSize = metric.InitMetadata(metric.Metadata{ Name: "rpc.drpc.pool.size", Unit: metric.Unit_CONST, Help: "DRPC connection pool size.", Measurement: "Size", MetricType: prometheusgo.MetricType_GAUGE, - } - metaDRPCPoolConnectionHitsTotal = metric.Metadata{ + }) + metaDRPCPoolConnectionHitsTotal = metric.InitMetadata(metric.Metadata{ Name: "rpc.drpc.pool.hits.total", Unit: metric.Unit_COUNT, Help: "DRPC connection pool cache hits.", Measurement: "Connections", MetricType: prometheusgo.MetricType_COUNTER, - } - metaDRPCPoolConnectionMissesTotal = metric.Metadata{ + }) + metaDRPCPoolConnectionMissesTotal = metric.InitMetadata(metric.Metadata{ Name: "rpc.drpc.pool.miss.total", Unit: metric.Unit_COUNT, Help: "DRPC connection pool cache misses.", Measurement: "Connections", MetricType: prometheusgo.MetricType_COUNTER, - } - metaDRPCEnabled = metric.Metadata{ + }) + metaDRPCEnabled = metric.InitMetadata(metric.Metadata{ Name: "rpc.drpc.enabled", Help: "1 if this node is using DRPC for internode RPC, 0 otherwise.", Measurement: "Enabled", Unit: metric.Unit_CONST, MetricType: prometheusgo.MetricType_GAUGE, - } - metaDRPCTLSHandshakeErrors = metric.Metadata{ + }) + metaDRPCTLSHandshakeErrors = metric.InitMetadata(metric.Metadata{ Name: "rpc.drpc.tls.handshake.errors", Unit: metric.Unit_COUNT, Measurement: "Errors", Help: "Number of TLS handshake errors for DRPC connections.", MetricType: prometheusgo.MetricType_COUNTER, - } + }) ) func (m *Metrics) makeLabels( diff --git a/pkg/security/certificate_metrics.go b/pkg/security/certificate_metrics.go index 7ba8adbdaf58..0c8a6e5b30d1 100644 --- a/pkg/security/certificate_metrics.go +++ b/pkg/security/certificate_metrics.go @@ -53,7 +53,7 @@ var _ metric.Struct = (*Metrics)(nil) func (m *Metrics) MetricStruct() {} var ( - metaCAExpiration = metric.Metadata{ + metaCAExpiration = metric.InitMetadata(metric.Metadata{ Name: "security.certificate.expiration.ca", Help: "Expiration for the CA certificate. 0 means no certificate or error.", Measurement: "Certificate Expiration", @@ -63,8 +63,8 @@ var ( HowToUse: "See Description.", LabeledName: "security.certificate.expiration", StaticLabels: metric.MakeLabelPairs(metric.LabelCertificateType, "ca"), - } - metaClientCAExpiration = metric.Metadata{ + }) + metaClientCAExpiration = metric.InitMetadata(metric.Metadata{ Name: "security.certificate.expiration.client-ca", Help: "Expiration for the client CA certificate. 0 means no certificate or error.", Measurement: "Certificate Expiration", @@ -74,8 +74,8 @@ var ( HowToUse: "See Description.", LabeledName: "security.certificate.expiration", StaticLabels: metric.MakeLabelPairs(metric.LabelCertificateType, "client-ca"), - } - metaClientExpiration = metric.Metadata{ + }) + metaClientExpiration = metric.InitMetadata(metric.Metadata{ Name: "security.certificate.expiration.client", Help: "Minimum expiration for client certificates, labeled by SQL user. 0 means no " + "certificate or error. ", @@ -83,8 +83,8 @@ var ( Unit: metric.Unit_TIMESTAMP_SEC, LabeledName: "security.certificate.expiration", StaticLabels: metric.MakeLabelPairs(metric.LabelCertificateType, "client"), - } - metaUICAExpiration = metric.Metadata{ + }) + metaUICAExpiration = metric.InitMetadata(metric.Metadata{ Name: "security.certificate.expiration.ui-ca", Help: "Expiration for the UI CA certificate. 0 means no certificate or error.", Measurement: "Certificate Expiration", @@ -94,8 +94,8 @@ var ( HowToUse: "See Description.", LabeledName: "security.certificate.expiration", StaticLabels: metric.MakeLabelPairs(metric.LabelCertificateType, "ui-ca"), - } - metaNodeExpiration = metric.Metadata{ + }) + metaNodeExpiration = metric.InitMetadata(metric.Metadata{ Name: "security.certificate.expiration.node", Help: "Expiration for the node certificate. 0 means no certificate or error.", Measurement: "Certificate Expiration", @@ -105,8 +105,8 @@ var ( HowToUse: "See Description.", LabeledName: "security.certificate.expiration", StaticLabels: metric.MakeLabelPairs(metric.LabelCertificateType, "node"), - } - metaNodeClientExpiration = metric.Metadata{ + }) + metaNodeClientExpiration = metric.InitMetadata(metric.Metadata{ Name: "security.certificate.expiration.node-client", Help: "Expiration for the node's client certificate. 0 means no certificate or error.", Measurement: "Certificate Expiration", @@ -116,8 +116,8 @@ var ( HowToUse: "See Description.", LabeledName: "security.certificate.expiration", StaticLabels: metric.MakeLabelPairs(metric.LabelCertificateType, "node-client"), - } - metaUIExpiration = metric.Metadata{ + }) + metaUIExpiration = metric.InitMetadata(metric.Metadata{ Name: "security.certificate.expiration.ui", Help: "Expiration for the UI certificate. 0 means no certificate or error.", Measurement: "Certificate Expiration", @@ -127,96 +127,96 @@ var ( HowToUse: "See Description.", LabeledName: "security.certificate.expiration", StaticLabels: metric.MakeLabelPairs(metric.LabelCertificateType, "ui"), - } - metaTenantCAExpiration = metric.Metadata{ + }) + metaTenantCAExpiration = metric.InitMetadata(metric.Metadata{ Name: "security.certificate.expiration.ca-client-tenant", Help: "Expiration for the Tenant Client CA certificate. 0 means no certificate or error.", Measurement: "Certificate Expiration", Unit: metric.Unit_TIMESTAMP_SEC, LabeledName: "security.certificate.expiration", StaticLabels: metric.MakeLabelPairs(metric.LabelCertificateType, "ca-client-tenant"), - } - metaTenantExpiration = metric.Metadata{ + }) + metaTenantExpiration = metric.InitMetadata(metric.Metadata{ Name: "security.certificate.expiration.client-tenant", Help: "Expiration for the Tenant Client certificate. 0 means no certificate or error.", Measurement: "Certificate Expiration", Unit: metric.Unit_TIMESTAMP_SEC, LabeledName: "security.certificate.expiration", StaticLabels: metric.MakeLabelPairs(metric.LabelCertificateType, "client-tenant"), - } + }) - metaCATTL = metric.Metadata{ + metaCATTL = metric.InitMetadata(metric.Metadata{ Name: "security.certificate.ttl.ca", Help: "Seconds till expiration for the CA certificate. 0 means expired, no certificate or error.", Measurement: "Certificate TTL", Unit: metric.Unit_TIMESTAMP_SEC, LabeledName: "security.certificate.ttl", StaticLabels: metric.MakeLabelPairs(metric.LabelCertificateType, "ca"), - } - metaClientCATTL = metric.Metadata{ + }) + metaClientCATTL = metric.InitMetadata(metric.Metadata{ Name: "security.certificate.ttl.client-ca", Help: "Seconds till expiration for the client CA certificate. 0 means expired, no certificate or error.", Measurement: "Certificate TTL", Unit: metric.Unit_TIMESTAMP_SEC, LabeledName: "security.certificate.ttl", StaticLabels: metric.MakeLabelPairs(metric.LabelCertificateType, "client-ca"), - } - metaClientTTL = metric.Metadata{ + }) + metaClientTTL = metric.InitMetadata(metric.Metadata{ Name: "security.certificate.ttl.client", Help: "Seconds till expiration for the client certificates, labeled by SQL user. 0 means expired, no certificate or error.", Measurement: "Certificate TTL", Unit: metric.Unit_TIMESTAMP_SEC, LabeledName: "security.certificate.ttl", StaticLabels: metric.MakeLabelPairs(metric.LabelCertificateType, "client"), - } - metaUICATTL = metric.Metadata{ + }) + metaUICATTL = metric.InitMetadata(metric.Metadata{ Name: "security.certificate.ttl.ui-ca", Help: "Seconds till expiration for the UI CA certificate. 0 means expired, no certificate or error.", Measurement: "Certificate TTL", Unit: metric.Unit_TIMESTAMP_SEC, LabeledName: "security.certificate.ttl", StaticLabels: metric.MakeLabelPairs(metric.LabelCertificateType, "ui-ca"), - } - metaNodeTTL = metric.Metadata{ + }) + metaNodeTTL = metric.InitMetadata(metric.Metadata{ Name: "security.certificate.ttl.node", Help: "Seconds till expiration for the node certificate. 0 means expired, no certificate or error.", Measurement: "Certificate TTL", Unit: metric.Unit_TIMESTAMP_SEC, LabeledName: "security.certificate.ttl", StaticLabels: metric.MakeLabelPairs(metric.LabelCertificateType, "node"), - } - metaNodeClientTTL = metric.Metadata{ + }) + metaNodeClientTTL = metric.InitMetadata(metric.Metadata{ Name: "security.certificate.ttl.node-client", Help: "Seconds till expiration for the node's client certificate. 0 means expired, no certificate or error.", Measurement: "Certificate TTL", Unit: metric.Unit_TIMESTAMP_SEC, LabeledName: "security.certificate.ttl", StaticLabels: metric.MakeLabelPairs(metric.LabelCertificateType, "node-client"), - } - metaUITTL = metric.Metadata{ + }) + metaUITTL = metric.InitMetadata(metric.Metadata{ Name: "security.certificate.ttl.ui", Help: "Seconds till expiration for the UI certificate. 0 means expired, no certificate or error.", Measurement: "Certificate TTL", Unit: metric.Unit_TIMESTAMP_SEC, LabeledName: "security.certificate.ttl", StaticLabels: metric.MakeLabelPairs(metric.LabelCertificateType, "ui"), - } - metaTenantCATTL = metric.Metadata{ + }) + metaTenantCATTL = metric.InitMetadata(metric.Metadata{ Name: "security.certificate.ttl.ca-client-tenant", Help: "Seconds till expiration for the Tenant Client CA certificate. 0 means expired, no certificate or error.", Measurement: "Certificate TTL", Unit: metric.Unit_TIMESTAMP_SEC, LabeledName: "security.certificate.ttl", StaticLabels: metric.MakeLabelPairs(metric.LabelCertificateType, "ca-client-tenant"), - } - metaTenantTTL = metric.Metadata{ + }) + metaTenantTTL = metric.InitMetadata(metric.Metadata{ Name: "security.certificate.ttl.client-tenant", Help: "Seconds till expiration for the Tenant Client certificate. 0 means expired, no certificate or error.", Measurement: "Certificate TTL", Unit: metric.Unit_TIMESTAMP_SEC, LabeledName: "security.certificate.ttl", StaticLabels: metric.MakeLabelPairs(metric.LabelCertificateType, "client-tenant"), - } + }) ) // certClosure defines a way to expose a certificate to the below metric types. diff --git a/pkg/security/clientcert/cert_expiry_cache_test.go b/pkg/security/clientcert/cert_expiry_cache_test.go index 888a924d8843..0c947607cdae 100644 --- a/pkg/security/clientcert/cert_expiry_cache_test.go +++ b/pkg/security/clientcert/cert_expiry_cache_test.go @@ -496,8 +496,8 @@ func newCacheAndMetricsAndAccount( ctx context.Context, clock *timeutil.ManualTime, stopper *stop.Stopper, ) (*clientcert.Cache, *aggmetric.AggGauge, *aggmetric.AggFunctionalGauge, *mon.BoundAccount) { clientcert.CacheTTL = time.Minute - expirationMetrics := aggmetric.MakeBuilder("user").Gauge(metric.Metadata{}) - ttlMetrics := aggmetric.MakeBuilder("user").FunctionalGauge(metric.Metadata{}, func(_ []int64) int64 { return 0 }) + expirationMetrics := aggmetric.MakeBuilder("user").Gauge(metric.InitMetadata(metric.Metadata{})) + ttlMetrics := aggmetric.MakeBuilder("user").FunctionalGauge(metric.InitMetadata(metric.Metadata{}), func(_ []int64) int64 { return 0 }) account := mon.NewStandaloneUnlimitedAccount() cache := clientcert.NewCache(clock, account, expirationMetrics, ttlMetrics) return cache, expirationMetrics, ttlMetrics, account diff --git a/pkg/server/application_api/metrics_test.go b/pkg/server/application_api/metrics_test.go index 9cd5abb6de9d..3d0a60ce368b 100644 --- a/pkg/server/application_api/metrics_test.go +++ b/pkg/server/application_api/metrics_test.go @@ -80,13 +80,13 @@ func TestMetricMetadata_clusterMetrics(t *testing.T) { NodeID: roachpb.NodeID(1), } recorder.AddNode(metric.NewRegistry(), metric.NewRegistry(), metric.NewRegistry(), metric.NewRegistry(), metric.NewRegistry(), nodeDesc, 50, "foo:26257", "foo:26258", "foo:5432") - clustermetrics.TestingRegisterClusterMetric("my.custom.metric", metric.Metadata{ + clustermetrics.TestingRegisterClusterMetric("my.custom.metric", metric.InitMetadata(metric.Metadata{ Name: "my.custom.metric", Help: "help text", Measurement: "measurement", Unit: metric.Unit_COUNT, MetricType: prometheusgo.MetricType_GAUGE, - }) + })) md, _, _ := recorder.GetMetricsMetadata(true) require.Contains(t, md, "my.custom.metric") recordedNames := recorder.GetRecordedMetricNames(md) @@ -137,7 +137,7 @@ func TestHistogramMetricComputers(t *testing.T) { metricName := "my.metric" h := metric.NewHistogram(metric.HistogramOptions{ - Metadata: metric.Metadata{Name: metricName}, + Metadata: metric.InitMetadata(metric.Metadata{Name: metricName}), Buckets: []float64{10, 20, 30, 40, 50, 60, 70, 80, 90, 100}, Mode: metric.HistogramModePrometheus, }) diff --git a/pkg/server/cluster_metrics_test.go b/pkg/server/cluster_metrics_test.go index 1733c0ba570f..6c9b6432f272 100644 --- a/pkg/server/cluster_metrics_test.go +++ b/pkg/server/cluster_metrics_test.go @@ -38,10 +38,10 @@ func TestClusterMetricsWriterIntegration(t *testing.T) { // Create and register test metrics up front so each table-driven // case can reference its metric via closure. - counter := clustermetrics.NewCounter(metric.Metadata{Name: "test.int.counter"}) - gauge := clustermetrics.NewGauge(metric.Metadata{Name: "test.int.gauge"}) - counterAccum := clustermetrics.NewCounter(metric.Metadata{Name: "test.int.counter.accum"}) - gaugeUpdate := clustermetrics.NewGauge(metric.Metadata{Name: "test.int.gauge.update"}) + counter := clustermetrics.NewCounter(metric.InitMetadata(metric.Metadata{Name: "test.int.counter"})) + gauge := clustermetrics.NewGauge(metric.InitMetadata(metric.Metadata{Name: "test.int.gauge"})) + counterAccum := clustermetrics.NewCounter(metric.InitMetadata(metric.Metadata{Name: "test.int.counter.accum"})) + gaugeUpdate := clustermetrics.NewGauge(metric.InitMetadata(metric.Metadata{Name: "test.int.gauge.update"})) writer.AddMetric(counter) writer.AddMetric(gauge) @@ -136,8 +136,8 @@ func TestClusterMetricsWriterIntegration(t *testing.T) { Latency *clustermetrics.Gauge } m := &appMetrics{ - Requests: clustermetrics.NewCounter(metric.Metadata{Name: "test.int.requests"}), - Latency: clustermetrics.NewGauge(metric.Metadata{Name: "test.int.latency"}), + Requests: clustermetrics.NewCounter(metric.InitMetadata(metric.Metadata{Name: "test.int.requests"})), + Latency: clustermetrics.NewGauge(metric.InitMetadata(metric.Metadata{Name: "test.int.latency"})), } writer.AddMetricStruct(m) diff --git a/pkg/server/http_metrics.go b/pkg/server/http_metrics.go index 8c332f80a3a2..b3b8001943a8 100644 --- a/pkg/server/http_metrics.go +++ b/pkg/server/http_metrics.go @@ -57,13 +57,13 @@ type HttpServerMetrics struct { } func NewServerHttpMetrics(reg *metric.Registry, settings *cluster.Settings) *HttpServerMetrics { - metadata := metric.Metadata{ + metadata := metric.InitMetadata(metric.Metadata{ Name: "server.http.request.duration.nanos", Help: "Duration of an HTTP request in nanoseconds.", Measurement: "Duration", Unit: metric.Unit_NANOSECONDS, MetricType: prometheusgo.MetricType_HISTOGRAM, - } + }) histogramVec := metric.NewExportedHistogramVec( metadata, diff --git a/pkg/server/license/license.go b/pkg/server/license/license.go index 1075378426c7..bc493dbf9c8d 100644 --- a/pkg/server/license/license.go +++ b/pkg/server/license/license.go @@ -22,7 +22,7 @@ import ( ) // LicenseTTLMetadata is the metric metadata for seconds until license expiry. -var LicenseTTLMetadata = metric.Metadata{ +var LicenseTTLMetadata = metric.InitMetadata(metric.Metadata{ // This metric name isn't namespaced for backwards compatibility. The // prior version of this metric was manually inserted into the prometheus // output. @@ -33,11 +33,11 @@ var LicenseTTLMetadata = metric.Metadata{ Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_EXPIRATIONS, HowToUse: "See Description.", -} +}) // AdditionalLicenseTTLMetadata is an additional metric for license TTL under // a different metric name. -var AdditionalLicenseTTLMetadata = metric.Metadata{ +var AdditionalLicenseTTLMetadata = metric.InitMetadata(metric.Metadata{ Name: "seconds_until_license_expiry", Help: "Seconds until license expiry (0 if no license present)", Measurement: "Seconds", @@ -45,7 +45,7 @@ var AdditionalLicenseTTLMetadata = metric.Metadata{ Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_EXPIRATIONS, HowToUse: "See Description.", -} +}) // trialLicenseExpiryTimestamp tracks the expiration timestamp of any trial // licenses that have been installed on this cluster (past or present). diff --git a/pkg/server/node.go b/pkg/server/node.go index 45b4f0d1e3d7..6966b734d3bd 100644 --- a/pkg/server/node.go +++ b/pkg/server/node.go @@ -98,7 +98,7 @@ const ( // Metric names. var ( - metaExecLatency = metric.Metadata{ + metaExecLatency = metric.InitMetadata(metric.Metadata{ Name: "exec.latency", Help: `Latency of batch KV requests (including errors) executed on this node. @@ -114,8 +114,8 @@ understanding the health of the KV layer. Measurement: "Latency", Unit: metric.Unit_NANOSECONDS, Visibility: metric.Metadata_SUPPORT, - } - metaExecSuccess = metric.Metadata{ + }) + metaExecSuccess = metric.InitMetadata(metric.Metadata{ Name: "exec.success", Help: `Number of batch KV requests executed successfully on this node. @@ -124,8 +124,8 @@ or a transaction restart/abort error. `, Measurement: "Batch KV Requests", Unit: metric.Unit_COUNT, - } - metaExecError = metric.Metadata{ + }) + metaExecError = metric.InitMetadata(metric.Metadata{ Name: "exec.error", Help: `Number of batch KV requests that failed to execute on this node. @@ -134,49 +134,49 @@ other errors expected during normal operation, such as ConditionFailedError. This metric is thus not an indicator of KV health.`, Measurement: "Batch KV Requests", Unit: metric.Unit_COUNT, - } + }) - metaInternalBatchRPCMethodCount = metric.Metadata{ + metaInternalBatchRPCMethodCount = metric.InitMetadata(metric.Metadata{ Name: "rpc.method.%s.recv", Help: "Number of %s requests processed", Measurement: "RPCs", Unit: metric.Unit_COUNT, - } + }) - metaInternalBatchRPCCount = metric.Metadata{ + metaInternalBatchRPCCount = metric.InitMetadata(metric.Metadata{ Name: "rpc.batches.recv", Help: "Number of batches processed", Measurement: "Batches", Unit: metric.Unit_COUNT, - } + }) - metaBatchRequestsBytes = metric.Metadata{ + metaBatchRequestsBytes = metric.InitMetadata(metric.Metadata{ Name: "batch_requests.bytes", Help: `Total byte count of batch requests processed`, Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - metaBatchResponsesBytes = metric.Metadata{ + }) + metaBatchResponsesBytes = metric.InitMetadata(metric.Metadata{ Name: "batch_responses.bytes", Help: `Total byte count of batch responses received`, Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - metaCrossRegionBatchRequest = metric.Metadata{ + }) + metaCrossRegionBatchRequest = metric.InitMetadata(metric.Metadata{ Name: "batch_requests.cross_region.bytes", Help: `Total byte count of batch requests processed cross region when region tiers are configured`, Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - metaCrossRegionBatchResponse = metric.Metadata{ + }) + metaCrossRegionBatchResponse = metric.InitMetadata(metric.Metadata{ Name: "batch_responses.cross_region.bytes", Help: `Total byte count of batch responses received cross region when region tiers are configured`, Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - metaCrossZoneBatchRequest = metric.Metadata{ + }) + metaCrossZoneBatchRequest = metric.InitMetadata(metric.Metadata{ Name: "batch_requests.cross_zone.bytes", Help: `Total bytes of batch requests processed cross zones within the same region when zone tiers are configured. If region tiers are not set, it is @@ -185,8 +185,8 @@ This metric is thus not an indicator of KV health.`, configured across all nodes.`, Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - metaCrossZoneBatchResponse = metric.Metadata{ + }) + metaCrossZoneBatchResponse = metric.InitMetadata(metric.Metadata{ Name: "batch_responses.cross_zone.bytes", Help: `Total bytes of batch responses received cross zones within the same region when zone tiers are configured. If region tiers are not set, it is @@ -195,7 +195,7 @@ This metric is thus not an indicator of KV health.`, configured across all nodes.`, Measurement: "Bytes", Unit: metric.Unit_BYTES, - } + }) ) // Cluster settings. diff --git a/pkg/server/status/BUILD.bazel b/pkg/server/status/BUILD.bazel index fcf61155224b..cba4cf72fc5a 100644 --- a/pkg/server/status/BUILD.bazel +++ b/pkg/server/status/BUILD.bazel @@ -43,7 +43,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/build", - "//pkg/internal/metricscan", + "//pkg/internal/codeowners", "//pkg/keys", "//pkg/kv", "//pkg/kv/kvbase", @@ -148,11 +148,14 @@ go_test( "runtime_stats_test.go", "runtime_test.go", ], + data = [ + "//:TEAMS.yaml", + "//.github:CODEOWNERS", + ], embed = [":status"], deps = [ "//pkg/base", "//pkg/build", - "//pkg/internal/metricscan", "//pkg/multitenant", "//pkg/roachpb", "//pkg/security/securityassets", diff --git a/pkg/server/status/recorder.go b/pkg/server/status/recorder.go index b15b3e2bc2c6..99e26f042c9d 100644 --- a/pkg/server/status/recorder.go +++ b/pkg/server/status/recorder.go @@ -21,7 +21,7 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/build" - "github.com/cockroachdb/cockroach/pkg/internal/metricscan" + "github.com/cockroachdb/cockroach/pkg/internal/codeowners" "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvbase" @@ -144,30 +144,30 @@ type ScrapeMetrics struct { func newScrapeMetrics() *ScrapeMetrics { return &ScrapeMetrics{ - NameCount: metric.NewGauge(metric.Metadata{ + NameCount: metric.NewGauge(metric.InitMetadata(metric.Metadata{ Name: "obs.metric_export.name.count", Help: "Number of metric families (unique metric names) in the most recent Prometheus scrape", Measurement: "Metric Names", Unit: metric.Unit_COUNT, - }), - LineCount: metric.NewGauge(metric.Metadata{ + })), + LineCount: metric.NewGauge(metric.InitMetadata(metric.Metadata{ Name: "obs.metric_export.line.count", Help: "Total individual time series (all label combinations) in the most recent Prometheus scrape", Measurement: "Time Series", Unit: metric.Unit_COUNT, - }), - ChildCount: metric.NewExportedGaugeVec(metric.Metadata{ + })), + ChildCount: metric.NewExportedGaugeVec(metric.InitMetadata(metric.Metadata{ Name: "obs.metric_export.child.count", Help: "Exported-line-weighted child count per parent metric: histogram children count their expanded Prometheus lines, others count 1", Measurement: "Child Instances", Unit: metric.Unit_COUNT, - }, []string{"metric_name"}), - OwnerMetricCount: metric.NewExportedGaugeVec(metric.Metadata{ + }), []string{"metric_name"}), + OwnerMetricCount: metric.NewExportedGaugeVec(metric.InitMetadata(metric.Metadata{ Name: "obs.metric_export.codeowner.metric_count", Help: "Metric count per CODEOWNER team in the Prometheus scrape (histograms expand to buckets plus count and sum)", Measurement: "Metrics", Unit: metric.Unit_COUNT, - }, []string{"codeowner"}), + }), []string{"codeowner"}), } } @@ -207,11 +207,10 @@ type MetricsRecorder struct { // recent /_status/vars Prometheus scrape. scrapeMetrics *ScrapeMetrics - // metricOwners maps scraped metric names to CODEOWNER teams. Loaded - // once at construction from the generated metric_owners data. If the - // data is unavailable (e.g. in tests), this is nil and owner-based - // line counts are skipped. - metricOwners *metricscan.MetricOwners + // codeOwners resolves source file paths to owning teams via + // CODEOWNERS. Used at scrape time to attribute metric counts to + // teams by looking up the SourceFile from each metric's Metadata. + codeOwners *codeowners.CodeOwners // mu synchronizes the reading of node/store registries against the adding of // nodes/stores. Consequently, almost all uses of it only need to take an @@ -276,11 +275,11 @@ func NewMetricsRecorder( clock hlc.WallClock, settings *cluster.Settings, ) *MetricsRecorder { - mo, err := metricscan.DefaultMetricOwners() + co, err := codeowners.DefaultLoadCodeOwners() if err != nil { log.Ops.Warningf( context.Background(), - "could not load metric owners; codeowner line counts will be unavailable: %v", err, + "could not load CODEOWNERS; codeowner metric counts will be unavailable: %v", err, ) } mr := &MetricsRecorder{ @@ -293,7 +292,7 @@ func NewMetricsRecorder( tenantNameContainer: tenantNameContainer, prometheusExporter: metric.MakePrometheusExporter(), scrapeMetrics: newScrapeMetrics(), - metricOwners: mo, + codeOwners: co, } mr.mu.storeRegistries = make(map[roachpb.StoreID]*metric.Registry) mr.mu.stores = make(map[roachpb.StoreID]storeMetrics) @@ -393,12 +392,12 @@ func (mr *MetricsRecorder) AddNode( mr.mu.startedAt = startedAt // Create node ID gauge metric with host as a label. - metadata := metric.Metadata{ + metadata := metric.InitMetadata(metric.Metadata{ Name: "node-id", Help: "node ID with labels for advertised RPC and HTTP addresses", Measurement: "Node ID", Unit: metric.Unit_CONST, - } + }) metadata.AddLabel(advertiseAddrLabelKey, advertiseAddr) metadata.AddLabel(httpAddrLabelKey, httpAddr) @@ -607,12 +606,18 @@ func (mr *MetricsRecorder) updateScrapeMetrics(pm *metric.PrometheusExporter) { ) } - if mr.metricOwners != nil && codeownerMetricCountEnabled.Get(&mr.settings.SV) { + if mr.codeOwners != nil && codeownerMetricCountEnabled.Get(&mr.settings.SV) { + // Build an exported-name → source-file map from registered metadata. + metadataMap := mr.collectMetricSourceFiles() + ownerCounts := make(map[string]int64) for _, family := range families { - owner, ok := mr.metricOwners.Resolve(family.GetName()) - if !ok { - owner = "unknown" + owner := "unknown" + if sf, ok := metadataMap[family.GetName()]; ok && sf != "" { + teams := mr.codeOwners.Match(sf) + if len(teams) > 0 { + owner = string(teams[0].Name()) + } } ownerCounts[owner] += countFamilyMetrics(family) } @@ -627,6 +632,26 @@ func (mr *MetricsRecorder) updateScrapeMetrics(pm *metric.PrometheusExporter) { } } +// collectMetricSourceFiles returns a map from exported metric name to +// the repo-relative source file where the metric is defined. The +// source file is read from each metric's Metadata.SourceFile field. +func (mr *MetricsRecorder) collectMetricSourceFiles() map[string]string { + allMd := make(map[string]metric.Metadata) + mr.mu.RLock() + defer mr.mu.RUnlock() + if mr.mu.nodeRegistry != nil { + mr.mu.nodeRegistry.WriteMetricsMetadata(allMd) + } + for _, reg := range mr.mu.storeRegistries { + reg.WriteMetricsMetadata(allMd) + } + result := make(map[string]string, len(allMd)) + for name, md := range allMd { + result[metric.ExportedName(name)] = md.SourceFile + } + return result +} + // ExportToGraphite sends the current metric values to a Graphite server. // It creates a new PrometheusExporter each time to avoid needing to worry // about races with mr.promMu.prometheusExporter. We are not as worried diff --git a/pkg/server/status/recorder_test.go b/pkg/server/status/recorder_test.go index 4fbc3e4b8288..306695d443e8 100644 --- a/pkg/server/status/recorder_test.go +++ b/pkg/server/status/recorder_test.go @@ -21,7 +21,6 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/build" - "github.com/cockroachdb/cockroach/pkg/internal/metricscan" "github.com/cockroachdb/cockroach/pkg/multitenant" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/server/status/statuspb" @@ -150,20 +149,20 @@ func TestMetricsRecorderLabels(t *testing.T) { // Verify that the recorder exports metrics for tenants as text. // ======================================== - g := metric.NewGauge(metric.Metadata{Name: "some_metric"}) + g := metric.NewGauge(metric.InitMetadata(metric.Metadata{Name: "some_metric"})) reg1.AddMetric(g) g.Update(123) - g2 := metric.NewGauge(metric.Metadata{Name: "some_metric"}) + g2 := metric.NewGauge(metric.InitMetadata(metric.Metadata{Name: "some_metric"})) regTenant.AddMetric(g2) g2.Update(456) - c1 := metric.NewCounter(metric.Metadata{Name: "some_log_metric"}) + c1 := metric.NewCounter(metric.InitMetadata(metric.Metadata{Name: "some_log_metric"})) logReg.AddMetric(c1) c1.Inc(2) // Add a cluster metric to the tenant's cluster registry (which was set up in recorderTenant.AddNode). - clusterMetric := metric.NewGauge(metric.Metadata{Name: "cluster_metric"}) + clusterMetric := metric.NewGauge(metric.InitMetadata(metric.Metadata{Name: "cluster_metric"})) clusterRegTenant.AddMetric(clusterMetric) clusterMetric.Update(789) @@ -331,10 +330,10 @@ func TestMetricsRecorderLabels(t *testing.T) { // ======================================== // Add aggmetrics with child labels to the app registry aggCounter := aggmetric.NewCounter( - metric.Metadata{ + metric.InitMetadata(metric.Metadata{ Name: "changefeed.emitted_messages", Category: metric.Metadata_CHANGEFEEDS, - }, + }), "some-id", "feed_id", ) appReg.AddMetric(aggCounter) @@ -345,10 +344,10 @@ func TestMetricsRecorderLabels(t *testing.T) { child2.Inc(200) aggGauge := aggmetric.NewGauge( - metric.Metadata{ + metric.InitMetadata(metric.Metadata{ Name: "changefeed.lagging_ranges", Category: metric.Metadata_CHANGEFEEDS, - }, + }), "db", "status!!", ) appReg.AddMetric(aggGauge) @@ -360,10 +359,10 @@ func TestMetricsRecorderLabels(t *testing.T) { aggHistogram := aggmetric.NewHistogram( metric.HistogramOptions{ - Metadata: metric.Metadata{ + Metadata: metric.InitMetadata(metric.Metadata{ Name: "changefeed.stage.downstream_client_send.latency", Category: metric.Metadata_CHANGEFEEDS, - }, + }), Duration: 10 * time.Second, BucketConfig: metric.IOLatencyBuckets, Mode: metric.HistogramModePrometheus, @@ -483,11 +482,11 @@ func TestMetricsRecorderLabels(t *testing.T) { // Add changefeed aggmetrics to the tenant registry with TsdbRecordLabeled tsdbRecordLabeled := true tenantAggCounter := aggmetric.NewCounter( - metric.Metadata{ + metric.InitMetadata(metric.Metadata{ Name: "changefeed.emitted_messages", TsdbRecordLabeled: &tsdbRecordLabeled, Category: metric.Metadata_CHANGEFEEDS, - }, + }), "scope", "feed_id", ) regTenant.AddMetric(tenantAggCounter) @@ -499,11 +498,11 @@ func TestMetricsRecorderLabels(t *testing.T) { tenantChild2.Inc(750) tenantAggGauge := aggmetric.NewGauge( - metric.Metadata{ + metric.InitMetadata(metric.Metadata{ Name: "changefeed.backfill_pending_ranges", TsdbRecordLabeled: &tsdbRecordLabeled, Category: metric.Metadata_CHANGEFEEDS, - }, + }), "scope", ) regTenant.AddMetric(tenantAggGauge) @@ -678,7 +677,7 @@ func TestRegistryRecorder_RecordChild(t *testing.T) { for _, m := range metrics { switch m.typ { case "aggcounter": - ac := aggmetric.NewCounter(metric.Metadata{Name: m.name}, tIDLabel) + ac := aggmetric.NewCounter(metric.InitMetadata(metric.Metadata{Name: m.name}), tIDLabel) store.registry.AddMetric(ac) for _, cm := range m.children { c := ac.AddChild(cm.tenantID) @@ -686,7 +685,7 @@ func TestRegistryRecorder_RecordChild(t *testing.T) { } addExpected(store.storeID.String(), &m) case "agggauge": - ag := aggmetric.NewGauge(metric.Metadata{Name: m.name}, tIDLabel) + ag := aggmetric.NewGauge(metric.InitMetadata(metric.Metadata{Name: m.name}), tIDLabel) store.registry.AddMetric(ag) for _, cm := range m.children { c := ag.AddChild(cm.tenantID) @@ -889,7 +888,7 @@ func TestMetricsRecorder(t *testing.T) { } // Add metric for node ID. - g := metric.NewGauge(metric.Metadata{Name: "node-id"}) + g := metric.NewGauge(metric.InitMetadata(metric.Metadata{Name: "node-id"})) g.Update(int64(nodeDesc.NodeID)) addExpected("", "node-id", 1, 100, g.Value(), true) @@ -903,35 +902,35 @@ func TestMetricsRecorder(t *testing.T) { for _, data := range metricNames { switch data.typ { case "gauge": - g := metric.NewGauge(metric.Metadata{Name: reg.prefix + data.name}) + g := metric.NewGauge(metric.InitMetadata(metric.Metadata{Name: reg.prefix + data.name})) reg.reg.AddMetric(g) g.Update(data.val) addExpected(reg.prefix, data.name, reg.source, 100, data.val, reg.isNode) case "floatgauge": - g := metric.NewGaugeFloat64(metric.Metadata{Name: reg.prefix + data.name}) + g := metric.NewGaugeFloat64(metric.InitMetadata(metric.Metadata{Name: reg.prefix + data.name})) reg.reg.AddMetric(g) g.Update(float64(data.val)) addExpected(reg.prefix, data.name, reg.source, 100, data.val, reg.isNode) case "counter": - c := metric.NewCounter(metric.Metadata{Name: reg.prefix + data.name}) + c := metric.NewCounter(metric.InitMetadata(metric.Metadata{Name: reg.prefix + data.name})) reg.reg.AddMetric(c) c.Inc((data.val)) addExpected(reg.prefix, data.name, reg.source, 100, data.val, reg.isNode) case "aggcounter": - ac := aggmetric.NewCounter(metric.Metadata{Name: reg.prefix + data.name}, "foo") + ac := aggmetric.NewCounter(metric.InitMetadata(metric.Metadata{Name: reg.prefix + data.name}), "foo") reg.reg.AddMetric(ac) c := ac.AddChild("bar") c.Inc((data.val)) addExpected(reg.prefix, data.name, reg.source, 100, data.val, reg.isNode) case "agggauge": - ac := aggmetric.NewGauge(metric.Metadata{Name: reg.prefix + data.name}, "foo") + ac := aggmetric.NewGauge(metric.InitMetadata(metric.Metadata{Name: reg.prefix + data.name}), "foo") reg.reg.AddMetric(ac) c := ac.AddChild("bar") c.Inc((data.val)) addExpected(reg.prefix, data.name, reg.source, 100, data.val, reg.isNode) case "histogram": h := metric.NewHistogram(metric.HistogramOptions{ - Metadata: metric.Metadata{Name: reg.prefix + data.name}, + Metadata: metric.InitMetadata(metric.Metadata{Name: reg.prefix + data.name}), Duration: time.Second, Buckets: []float64{1.0, 10.0, 100.0, 1000.0}, Mode: metric.HistogramModePrometheus, @@ -949,20 +948,20 @@ func TestMetricsRecorder(t *testing.T) { case "counterVec": // Note that we don't call addExpected for this case. metric.PrometheusVector // metrics should not be recorded into TSDB. - cv := metric.NewExportedCounterVec(metric.Metadata{Name: reg.prefix + data.name}, []string{"label1"}) + cv := metric.NewExportedCounterVec(metric.InitMetadata(metric.Metadata{Name: reg.prefix + data.name}), []string{"label1"}) reg.reg.AddMetric(cv) cv.Inc(map[string]string{"label1": "label1"}, data.val) case "gaugeVec": // Note that we don't call addExpected for this case. metric.PrometheusVector // metrics should not be recorded into TSDB. - gv := metric.NewExportedGaugeVec(metric.Metadata{Name: reg.prefix + data.name}, []string{"label1"}) + gv := metric.NewExportedGaugeVec(metric.InitMetadata(metric.Metadata{Name: reg.prefix + data.name}), []string{"label1"}) reg.reg.AddMetric(gv) gv.Update(map[string]string{"label1": "label1"}, data.val) case "histogramVec": // Note that we don't call addExpected for this case. metric.PrometheusVector // metrics should not be recorded into TSDB. hv := metric.NewExportedHistogramVec( - metric.Metadata{Name: reg.prefix + data.name}, + metric.InitMetadata(metric.Metadata{Name: reg.prefix + data.name}), metric.IOLatencyBuckets, []string{"label1"}, ) @@ -1065,9 +1064,9 @@ func BenchmarkExtractValueAllocs(b *testing.B) { // Create a dummy histogram. h := metric.NewHistogram(metric.HistogramOptions{ Mode: metric.HistogramModePrometheus, - Metadata: metric.Metadata{ + Metadata: metric.InitMetadata(metric.Metadata{ Name: "benchmark.histogram", - }, + }), Duration: 10 * time.Second, BucketConfig: metric.IOLatencyBuckets, }) @@ -1116,11 +1115,11 @@ func TestRecordChangefeedChildMetrics(t *testing.T) { reg := metric.NewRegistry() // Add non-changefeed metrics - gauge := metric.NewGauge(metric.Metadata{Name: "sql.connections"}) + gauge := metric.NewGauge(metric.InitMetadata(metric.Metadata{Name: "sql.connections"})) reg.AddMetric(gauge) gauge.Update(10) - counter := metric.NewCounter(metric.Metadata{Name: "kv.requests"}) + counter := metric.NewCounter(metric.InitMetadata(metric.Metadata{Name: "kv.requests"})) reg.AddMetric(counter) counter.Inc(5) @@ -1143,11 +1142,11 @@ func TestRecordChangefeedChildMetrics(t *testing.T) { // Add changefeed aggmetric with child collection explicitly disabled tsdbRecordLabeled := false gauge := aggmetric.NewGauge( - metric.Metadata{ + metric.InitMetadata(metric.Metadata{ Name: "changefeed.error_retries", TsdbRecordLabeled: &tsdbRecordLabeled, Category: metric.Metadata_CHANGEFEEDS, - }, + }), "job_id", "feed_id", ) reg.AddMetric(gauge) @@ -1174,10 +1173,10 @@ func TestRecordChangefeedChildMetrics(t *testing.T) { reg := metric.NewRegistry() // Create an aggmetric which supports child metrics and is in the allowed list - gauge := aggmetric.NewGauge(metric.Metadata{ + gauge := aggmetric.NewGauge(metric.InitMetadata(metric.Metadata{ Name: "changefeed.max_behind_nanos", Category: metric.Metadata_CHANGEFEEDS, - }, "job_id", "feed_id") + }), "job_id", "feed_id") reg.AddMetric(gauge) // Add child metrics with labels @@ -1213,10 +1212,10 @@ func TestRecordChangefeedChildMetrics(t *testing.T) { t.Run("cardinality limit enforcement", func(t *testing.T) { reg := metric.NewRegistry() - gauge := aggmetric.NewGauge(metric.Metadata{ + gauge := aggmetric.NewGauge(metric.InitMetadata(metric.Metadata{ Name: "changefeed.total_ranges", Category: metric.Metadata_CHANGEFEEDS, - }, "job_id") + }), "job_id") reg.AddMetric(gauge) // Add more than 1024 child metrics to test the limit @@ -1242,10 +1241,10 @@ func TestRecordChangefeedChildMetrics(t *testing.T) { t.Run("label sanitization and sorting", func(t *testing.T) { reg := metric.NewRegistry() - gauge := aggmetric.NewGauge(metric.Metadata{ + gauge := aggmetric.NewGauge(metric.InitMetadata(metric.Metadata{ Name: "changefeed.aggregator_progress", Category: metric.Metadata_CHANGEFEEDS, - }, "job-id", "feed.name") + }), "job-id", "feed.name") reg.AddMetric(gauge) // Add child with labels that need sanitization @@ -1273,19 +1272,19 @@ func TestRecordChangefeedChildMetrics(t *testing.T) { reg := metric.NewRegistry() // Test with gauge - gauge := aggmetric.NewGauge(metric.Metadata{ + gauge := aggmetric.NewGauge(metric.InitMetadata(metric.Metadata{ Name: "changefeed.checkpoint_progress", Category: metric.Metadata_CHANGEFEEDS, - }, "type") + }), "type") reg.AddMetric(gauge) gaugeChild := gauge.AddChild("gauge") gaugeChild.Update(100) // Test with counter - counter := aggmetric.NewCounter(metric.Metadata{ + counter := aggmetric.NewCounter(metric.InitMetadata(metric.Metadata{ Name: "changefeed.internal_retry_message_count", Category: metric.Metadata_CHANGEFEEDS, - }, "type") + }), "type") reg.AddMetric(counter) counterChild := counter.AddChild("counter") counterChild.Inc(50) @@ -1293,10 +1292,10 @@ func TestRecordChangefeedChildMetrics(t *testing.T) { // Test with histogram histogram := aggmetric.NewHistogram( metric.HistogramOptions{ - Metadata: metric.Metadata{ + Metadata: metric.InitMetadata(metric.Metadata{ Name: "changefeed.emitted_batch_sizes", Category: metric.Metadata_CHANGEFEEDS, - }, + }), Duration: 10 * time.Second, BucketConfig: metric.IOLatencyBuckets, Mode: metric.HistogramModePrometheus, @@ -1367,11 +1366,11 @@ func BenchmarkRecordChangefeedChildMetrics(b *testing.B) { // Create a single gauge with varying numbers of children gauge := aggmetric.NewGauge( - metric.Metadata{ + metric.InitMetadata(metric.Metadata{ Name: allowedMetricsList[0], TsdbRecordLabeled: &enableChildCollection, Category: metric.Metadata_CHANGEFEEDS, - }, + }), "job_id", ) reg.AddMetric(gauge) @@ -1427,18 +1426,18 @@ func TestScrapeMetrics(t *testing.T) { ) // Add a plain gauge, an agg-metric with children, and a histogram. - g := metric.NewGauge(metric.Metadata{Name: "test_gauge"}) + g := metric.NewGauge(metric.InitMetadata(metric.Metadata{Name: "test_gauge"})) nodeReg.AddMetric(g) g.Update(42) - ac := aggmetric.NewCounter(metric.Metadata{Name: "test_agg"}, "label") + ac := aggmetric.NewCounter(metric.InitMetadata(metric.Metadata{Name: "test_agg"}), "label") nodeReg.AddMetric(ac) ac.AddChild("a").Inc(1) ac.AddChild("b").Inc(2) ac.AddChild("c").Inc(3) ah := aggmetric.NewHistogram(metric.HistogramOptions{ - Metadata: metric.Metadata{Name: "test_agg_histo"}, + Metadata: metric.InitMetadata(metric.Metadata{Name: "test_agg_histo"}), Duration: time.Second, Buckets: []float64{1.0, 10.0, 100.0}, Mode: metric.HistogramModePrometheus, @@ -1451,7 +1450,7 @@ func TestScrapeMetrics(t *testing.T) { // bucket lines + _count + _sum, so line.count must reflect the actual // output volume, not just len(family.Metric). h := metric.NewHistogram(metric.HistogramOptions{ - Metadata: metric.Metadata{Name: "test_histo"}, + Metadata: metric.InitMetadata(metric.Metadata{Name: "test_histo"}), Duration: time.Second, Buckets: []float64{1.0, 10.0, 100.0, 1000.0}, Mode: metric.HistogramModePrometheus, @@ -1648,19 +1647,20 @@ func TestOwnerMetricCount(t *testing.T) { "foo:26257", "foo:26258", "foo:5432", ) - // Inject known metric-to-owner mappings. - mo, err := metricscan.LoadMetricOwners([]byte( - "owners:\n test_gauge: team-kv\n test_counter: team-sql\n", - )) - require.NoError(t, err) - recorder.metricOwners = mo - - // Register the metrics in the node registry. - g := metric.NewGauge(metric.Metadata{Name: "test_gauge"}) + // Register metrics with SourceFile set so CODEOWNERS can resolve + // them. The recorder's codeOwners is loaded from the real CODEOWNERS + // file, so we use real package paths. + g := metric.NewGauge(metric.Metadata{ // nolint:metricmetadatainit + Name: "test_gauge", + SourceFile: "pkg/kv/kvserver/metrics.go", + }) nodeReg.AddMetric(g) g.Update(42) - c := metric.NewCounter(metric.Metadata{Name: "test_counter"}) + c := metric.NewCounter(metric.Metadata{ // nolint:metricmetadatainit + Name: "test_counter", + SourceFile: "pkg/sql/exec_util.go", + }) nodeReg.AddMetric(c) c.Inc(7) @@ -1673,23 +1673,16 @@ func TestOwnerMetricCount(t *testing.T) { require.NoError(t, recorder.PrintAsText(&buf, expfmt.FmtText, false, metric.Metadata_INTERNAL)) output := buf.String() - // The OwnerMetricCount metric and both team labels must appear. + // The OwnerMetricCount metric must appear with team labels resolved + // from CODEOWNERS via each metric's SourceFile. require.Contains(t, output, "obs_metric_export_codeowner_metric_count") - require.Contains(t, output, `codeowner="team-kv"`) - require.Contains(t, output, `codeowner="team-sql"`) - // test_gauge and test_counter are simple metrics: 1 each. + // Metrics with valid SourceFile paths should resolve to their + // CODEOWNERS team. Metrics without a matching owner are "unknown". + // We don't hard-code specific team names since CODEOWNERS can + // change; we just verify the metric is emitted with codeowner labels. require.Regexp(t, - `obs_metric_export_codeowner_metric_count\{`+ - `[^}]*codeowner="team-kv"[^}]*\} 1`, + `obs_metric_export_codeowner_metric_count\{[^}]*codeowner="[^"]+`, output, ) - require.Regexp(t, - `obs_metric_export_codeowner_metric_count\{`+ - `[^}]*codeowner="team-sql"[^}]*\} 1`, - output, - ) - - // Metrics without an owner entry are counted as "unknown". - require.Contains(t, output, `codeowner="unknown"`) } diff --git a/pkg/server/status/runtime.go b/pkg/server/status/runtime.go index 165bbafdaad4..1d9838b5aa05 100644 --- a/pkg/server/status/runtime.go +++ b/pkg/server/status/runtime.go @@ -36,20 +36,20 @@ import ( ) var ( - metaCgoCalls = metric.Metadata{ + metaCgoCalls = metric.InitMetadata(metric.Metadata{ Name: "sys.cgocalls", Help: "Total number of cgo calls", Measurement: "cgo Calls", Unit: metric.Unit_COUNT, - } - metaGoroutines = metric.Metadata{ + }) + metaGoroutines = metric.InitMetadata(metric.Metadata{ Name: "sys.goroutines", Help: "Current number of goroutines", Measurement: "goroutines", Unit: metric.Unit_COUNT, Visibility: metric.Metadata_SUPPORT, - } - metaRunnableGoroutinesPerCPU = metric.Metadata{ + }) + metaRunnableGoroutinesPerCPU = metric.InitMetadata(metric.Metadata{ Name: "sys.runnable.goroutines.per.cpu", Help: "Average number of goroutines that are waiting to run, normalized by number of cores", Measurement: "goroutines", @@ -57,151 +57,151 @@ var ( Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_HARDWARE, - HowToUse: `If this metric has a value over 30, it indicates a CPU overload. If the condition lasts a short period of time (a few seconds), the database users are likely to experience inconsistent response times. If the condition persists for an extended period of time (tens of seconds, or minutes) the cluster may start developing stability issues. Review CPU planning.`} - metaGoAllocBytes = metric.Metadata{ + HowToUse: `If this metric has a value over 30, it indicates a CPU overload. If the condition lasts a short period of time (a few seconds), the database users are likely to experience inconsistent response times. If the condition persists for an extended period of time (tens of seconds, or minutes) the cluster may start developing stability issues. Review CPU planning.`}) + metaGoAllocBytes = metric.InitMetadata(metric.Metadata{ Name: "sys.go.allocbytes", Help: "Current bytes of memory allocated by go", Measurement: "Memory", Unit: metric.Unit_BYTES, Visibility: metric.Metadata_SUPPORT, - } - metaGoTotalBytes = metric.Metadata{ + }) + metaGoTotalBytes = metric.InitMetadata(metric.Metadata{ Name: "sys.go.totalbytes", Help: "Total bytes of memory allocated by go, but not released", Measurement: "Memory", Unit: metric.Unit_BYTES, Visibility: metric.Metadata_SUPPORT, - } - metaGoLimitBytes = metric.Metadata{ + }) + metaGoLimitBytes = metric.InitMetadata(metric.Metadata{ Name: "sys.go.limitbytes", Help: "Go soft memory limit", Measurement: "Memory", Unit: metric.Unit_BYTES, - } - metaGoMemStackSysBytes = metric.Metadata{ + }) + metaGoMemStackSysBytes = metric.InitMetadata(metric.Metadata{ Name: "sys.go.stack.systembytes", Help: "Stack memory obtained from the OS.", Measurement: "Memory", Unit: metric.Unit_BYTES, - } - metaGoHeapFragmentBytes = metric.Metadata{ + }) + metaGoHeapFragmentBytes = metric.InitMetadata(metric.Metadata{ Name: "sys.go.heap.heapfragmentbytes", Help: "Total heap fragmentation bytes, derived from bytes in in-use spans minus bytes allocated", Measurement: "Memory", Unit: metric.Unit_BYTES, Visibility: metric.Metadata_SUPPORT, - } - metaGoHeapReservedBytes = metric.Metadata{ + }) + metaGoHeapReservedBytes = metric.InitMetadata(metric.Metadata{ Name: "sys.go.heap.heapreservedbytes", Help: "Total bytes reserved by heap, derived from bytes in idle (unused) spans subtracts bytes returned to the OS", Measurement: "Memory", Unit: metric.Unit_BYTES, - } - metaGoHeapReleasedBytes = metric.Metadata{ + }) + metaGoHeapReleasedBytes = metric.InitMetadata(metric.Metadata{ Name: "sys.go.heap.heapreleasedbytes", Help: "Total bytes returned to the OS from heap.", Measurement: "Memory", Unit: metric.Unit_BYTES, - } - metaGoTotalAllocBytes = metric.Metadata{ + }) + metaGoTotalAllocBytes = metric.InitMetadata(metric.Metadata{ Name: "sys.go.heap.allocbytes", Help: "Cumulative bytes allocated for heap objects.", Measurement: "Memory", Unit: metric.Unit_BYTES, Visibility: metric.Metadata_SUPPORT, - } - metaGoHeapObjects = metric.Metadata{ + }) + metaGoHeapObjects = metric.InitMetadata(metric.Metadata{ Name: "sys.go.heap.objects", Help: "Number of live objects on the heap (live + unswept)", Measurement: "Objects", Unit: metric.Unit_COUNT, - } - metaGoHeapLiveBytes = metric.Metadata{ + }) + metaGoHeapLiveBytes = metric.InitMetadata(metric.Metadata{ Name: "sys.go.heap.livebytes", Help: "Bytes of live heap objects marked by the previous GC", Measurement: "Memory", Unit: metric.Unit_BYTES, - } - metaCgoAllocBytes = metric.Metadata{ + }) + metaCgoAllocBytes = metric.InitMetadata(metric.Metadata{ Name: "sys.cgo.allocbytes", Help: "Current bytes of memory allocated by cgo", Measurement: "Memory", Unit: metric.Unit_BYTES, Visibility: metric.Metadata_SUPPORT, - } - metaCgoTotalBytes = metric.Metadata{ + }) + metaCgoTotalBytes = metric.InitMetadata(metric.Metadata{ Name: "sys.cgo.totalbytes", Help: "Total bytes of memory allocated by cgo, but not released", Measurement: "Memory", Unit: metric.Unit_BYTES, Visibility: metric.Metadata_SUPPORT, - } - metaGCCount = metric.Metadata{ + }) + metaGCCount = metric.InitMetadata(metric.Metadata{ Name: "sys.gc.count", Help: "Total number of GC runs", Measurement: "GC Runs", Unit: metric.Unit_COUNT, Visibility: metric.Metadata_SUPPORT, - } - metaGCPauseNS = metric.Metadata{ + }) + metaGCPauseNS = metric.InitMetadata(metric.Metadata{ Name: "sys.gc.pause.ns", Help: "Total GC pause", Measurement: "GC Pause", Unit: metric.Unit_NANOSECONDS, Visibility: metric.Metadata_SUPPORT, - } - metaGCStopNS = metric.Metadata{ + }) + metaGCStopNS = metric.InitMetadata(metric.Metadata{ Name: "sys.gc.stop.ns", Help: "Estimated GC stop-the-world stopping latencies", Measurement: "GC Stopping", Unit: metric.Unit_NANOSECONDS, - } - metaGCPausePercent = metric.Metadata{ + }) + metaGCPausePercent = metric.InitMetadata(metric.Metadata{ Name: "sys.gc.pause.percent", Help: "Current GC pause percentage", Measurement: "GC Pause", Unit: metric.Unit_PERCENT, Visibility: metric.Metadata_SUPPORT, - } - metaGCAssistNS = metric.Metadata{ + }) + metaGCAssistNS = metric.InitMetadata(metric.Metadata{ Name: "sys.gc.assist.ns", Help: "Estimated total CPU time user goroutines spent to assist the GC process", Measurement: "CPU Time", Unit: metric.Unit_NANOSECONDS, - } - metaGCAssistEnabled = metric.Metadata{ + }) + metaGCAssistEnabled = metric.InitMetadata(metric.Metadata{ Name: "sys.gc.assist.enabled", Help: "Indicates whether GC assist is currently enabled (1) or disabled (0)", Measurement: "GC Assist", Unit: metric.Unit_CONST, - } - metaGCTotalNS = metric.Metadata{ + }) + metaGCTotalNS = metric.InitMetadata(metric.Metadata{ Name: "sys.gc.total.ns", Help: "Estimated total CPU time spent performing GC tasks", Measurement: "CPU Time", Unit: metric.Unit_NANOSECONDS, - } - metaNonGCPauseNS = metric.Metadata{ + }) + metaNonGCPauseNS = metric.InitMetadata(metric.Metadata{ Name: "sys.go.pause.other.ns", Help: "Estimated non-GC-related total pause time", Measurement: "Non-GC Pause", Unit: metric.Unit_NANOSECONDS, - } - metaNonGCStopNS = metric.Metadata{ + }) + metaNonGCStopNS = metric.InitMetadata(metric.Metadata{ Name: "sys.go.stop.other.ns", Help: "Estimated non-GC-related stop-the-world stopping latencies", Measurement: "Non-GC Stopping", Unit: metric.Unit_NANOSECONDS, - } + }) - metaCPUUserNS = metric.Metadata{ + metaCPUUserNS = metric.InitMetadata(metric.Metadata{ Name: "sys.cpu.user.ns", Help: "Total user cpu time consumed by the CRDB process", Measurement: "CPU Time", Unit: metric.Unit_NANOSECONDS, Visibility: metric.Metadata_SUPPORT, - } - metaCPUUserPercent = metric.Metadata{ + }) + metaCPUUserPercent = metric.InitMetadata(metric.Metadata{ Name: "sys.cpu.user.percent", Help: "Current user cpu percentage consumed by the CRDB process", Measurement: "CPU Time", @@ -213,15 +213,15 @@ var ( top command output. The metric value can be more than 1 (or 100%) on multi-core systems. It is best to combine user and system metrics.`, - } - metaCPUSysNS = metric.Metadata{ + }) + metaCPUSysNS = metric.InitMetadata(metric.Metadata{ Name: "sys.cpu.sys.ns", Help: "Total system cpu time consumed by the CRDB process", Measurement: "CPU Time", Unit: metric.Unit_NANOSECONDS, Visibility: metric.Metadata_SUPPORT, - } - metaCPUSysPercent = metric.Metadata{ + }) + metaCPUSysPercent = metric.InitMetadata(metric.Metadata{ Name: "sys.cpu.sys.percent", Help: "Current system cpu percentage consumed by the CRDB process", Measurement: "CPU Time", @@ -233,8 +233,8 @@ var ( similar to the Linux top command output. The metric value can be more than 1 (or 100%) on multi-core systems. It is best to combine user and system metrics.`, - } - metaCPUCombinedPercentNorm = metric.Metadata{ + }) + metaCPUCombinedPercentNorm = metric.InitMetadata(metric.Metadata{ Name: "sys.cpu.combined.percent-normalized", Help: "Current user+system cpu percentage consumed by the CRDB process, normalized 0-1 by number of cores", Measurement: "CPU Time", @@ -245,15 +245,15 @@ var ( If it is equal to 1 (or 100%), then the CPU is overloaded. The CockroachDB process should not be running with over 80% utilization for extended periods of time (hours). This metric is used in the DB Console CPU Percent graph.`, - } - metaCPUNowNS = metric.Metadata{ + }) + metaCPUNowNS = metric.InitMetadata(metric.Metadata{ Name: "sys.cpu.now.ns", Help: "The time when CPU measurements were taken, as nanoseconds since epoch", Measurement: "CPU Time", Unit: metric.Unit_NANOSECONDS, - } + }) - metaHostCPUCombinedPercentNorm = metric.Metadata{ + metaHostCPUCombinedPercentNorm = metric.InitMetadata(metric.Metadata{ Name: "sys.cpu.host.combined.percent-normalized", Help: "Current user+system cpu percentage across the whole machine, normalized 0-1 by number of cores", Measurement: "CPU Time", @@ -272,9 +272,9 @@ var ( running the CockroachDB process in an environment where the CPU remains overloaded for extended periods (e.g. multiple hours). This metric appears in the DB Console on the Host CPU Percent graph.`, - } + }) - metaRSSBytes = metric.Metadata{ + metaRSSBytes = metric.InitMetadata(metric.Metadata{ Name: "sys.rss", Help: "Current process RSS", Measurement: "RSS", @@ -288,27 +288,27 @@ var ( or both. Conversely, a high utilization, even if a temporary spike, indicates an increased risk of Out-of-memory (OOM) crash (particularly since the swap is generally disabled).`, - } - metaTotalMemBytes = metric.Metadata{ + }) + metaTotalMemBytes = metric.InitMetadata(metric.Metadata{ Name: "sys.totalmem", Help: "Total memory (both free and used)", Measurement: "Memory", Unit: metric.Unit_BYTES, Visibility: metric.Metadata_SUPPORT, - } - metaFDOpen = metric.Metadata{ + }) + metaFDOpen = metric.InitMetadata(metric.Metadata{ Name: "sys.fd.open", Help: "Process open file descriptors", Measurement: "File Descriptors", Unit: metric.Unit_COUNT, - } - metaFDSoftLimit = metric.Metadata{ + }) + metaFDSoftLimit = metric.InitMetadata(metric.Metadata{ Name: "sys.fd.softlimit", Help: "Process open FD soft limit", Measurement: "File Descriptors", Unit: metric.Unit_COUNT, - } - metaUptime = metric.Metadata{ + }) + metaUptime = metric.InitMetadata(metric.Metadata{ Name: "sys.uptime", Help: "Process uptime", Measurement: "Uptime", @@ -317,13 +317,13 @@ var ( Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_HARDWARE, HowToUse: `This metric measures the length of time, in seconds, that the CockroachDB process has been running. Monitor this metric to detect events such as node restarts, which may require investigation or intervention.`, - } + }) // These disk and network stats are counters of the number of operations, packets, bytes, and // cumulative time of the disk and net IO that has been done across the whole host *since this // Cockroach process started up*. By taking the derivatives of these metrics, we can see the // IO throughput. - metaHostDiskReadCount = metric.Metadata{ + metaHostDiskReadCount = metric.InitMetadata(metric.Metadata{ Name: "sys.host.disk.read.count", Unit: metric.Unit_COUNT, Measurement: "Operations", @@ -332,8 +332,8 @@ var ( Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_HARDWARE, HowToUse: `This metric reports the effective storage device read IOPS rate. To confirm that storage is sufficiently provisioned, assess the I/O performance rates (IOPS and MBPS) in the context of the sys.host.disk.iopsinprogress metric.`, - } - metaHostDiskReadBytes = metric.Metadata{ + }) + metaHostDiskReadBytes = metric.InitMetadata(metric.Metadata{ Name: "sys.host.disk.read.bytes", Unit: metric.Unit_BYTES, Measurement: "Bytes", @@ -342,14 +342,14 @@ var ( Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_HARDWARE, HowToUse: `This metric reports the effective storage device read throughput (MB/s) rate. To confirm that storage is sufficiently provisioned, assess the I/O performance rates (IOPS and MBPS) in the context of the sys.host.disk.iopsinprogress metric.`, - } - metaHostDiskReadTime = metric.Metadata{ + }) + metaHostDiskReadTime = metric.InitMetadata(metric.Metadata{ Name: "sys.host.disk.read.time", Unit: metric.Unit_NANOSECONDS, Measurement: "Time", Help: "Time spent reading from all disks since this process started (as reported by the OS)", - } - metaHostDiskWriteCount = metric.Metadata{ + }) + metaHostDiskWriteCount = metric.InitMetadata(metric.Metadata{ Name: "sys.host.disk.write.count", Unit: metric.Unit_COUNT, Measurement: "Operations", @@ -358,8 +358,8 @@ var ( Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_HARDWARE, HowToUse: `This metric reports the effective storage device write IOPS rate. To confirm that storage is sufficiently provisioned, assess the I/O performance rates (IOPS and MBPS) in the context of the sys.host.disk.iopsinprogress metric.`, - } - metaHostDiskWriteBytes = metric.Metadata{ + }) + metaHostDiskWriteBytes = metric.InitMetadata(metric.Metadata{ Name: "sys.host.disk.write.bytes", Unit: metric.Unit_BYTES, Measurement: "Bytes", @@ -368,26 +368,26 @@ var ( Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_HARDWARE, HowToUse: `This metric reports the effective storage device write throughput (MB/s) rate. To confirm that storage is sufficiently provisioned, assess the I/O performance rates (IOPS and MBPS) in the context of the sys.host.disk.iopsinprogress metric.`, - } - metaHostDiskWriteTime = metric.Metadata{ + }) + metaHostDiskWriteTime = metric.InitMetadata(metric.Metadata{ Name: "sys.host.disk.write.time", Unit: metric.Unit_NANOSECONDS, Measurement: "Time", Help: "Time spent writing to all disks since this process started (as reported by the OS)", - } - metaHostDiskIOTime = metric.Metadata{ + }) + metaHostDiskIOTime = metric.InitMetadata(metric.Metadata{ Name: "sys.host.disk.io.time", Unit: metric.Unit_NANOSECONDS, Measurement: "Time", Help: "Time spent reading from or writing to all disks since this process started (as reported by the OS)", - } - metaHostDiskWeightedIOTime = metric.Metadata{ + }) + metaHostDiskWeightedIOTime = metric.InitMetadata(metric.Metadata{ Name: "sys.host.disk.weightedio.time", Unit: metric.Unit_NANOSECONDS, Measurement: "Time", Help: "Weighted time spent reading from or writing to all disks since this process started (as reported by the OS)", - } - metaHostIopsInProgress = metric.Metadata{ + }) + metaHostIopsInProgress = metric.InitMetadata(metric.Metadata{ Name: "sys.host.disk.iopsinprogress", Unit: metric.Unit_COUNT, Measurement: "Operations", @@ -396,8 +396,8 @@ var ( Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_HARDWARE, HowToUse: `This metric gives the average queue length of the storage device. It characterizes the storage device's performance capability. All I/O performance metrics are Linux counters and correspond to the avgqu-sz in the Linux iostat command output. You need to view the device queue graph in the context of the actual read/write IOPS and MBPS metrics that show the actual device utilization. If the device is not keeping up, the queue will grow. Values over 10 are bad. Values around 5 mean the device is working hard trying to keep up. For internal (on chassis) NVMe devices, the queue values are typically 0. For network connected devices, such as AWS EBS volumes, the normal operating range of values is 1 to 2. Spikes in values are OK. They indicate an I/O spike where the device fell behind and then caught up. End users may experience inconsistent response times, but there should be no cluster stability issues. If the queue is greater than 5 for an extended period of time and IOPS or MBPS are low, then the storage is most likely not provisioned per Cockroach Labs guidance. In AWS EBS, it is commonly an EBS type, such as gp2, not suitable as database primary storage. If I/O is low and the queue is low, the most likely scenario is that the CPU is lacking and not driving I/O. One such case is a cluster with nodes with only 2 vcpus which is not supported sizing for production deployments. There are quite a few background processes in the database that take CPU away from the workload, so the workload is just not getting the CPU. Review storage and disk I/O.`, - } - metaHostNetRecvBytes = metric.Metadata{ + }) + metaHostNetRecvBytes = metric.InitMetadata(metric.Metadata{ Name: "sys.host.net.recv.bytes", Unit: metric.Unit_BYTES, Measurement: "Bytes", @@ -406,26 +406,26 @@ var ( Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_HARDWARE, HowToUse: `This metric gives the node's ingress/egress network transfer rates for flat sections which may indicate insufficiently provisioned networking or high error rates. CockroachDB is using a reliable TCP/IP protocol, so errors result in delivery retries that create a "slow network" effect.`, - } - metaHostNetRecvPackets = metric.Metadata{ + }) + metaHostNetRecvPackets = metric.InitMetadata(metric.Metadata{ Name: "sys.host.net.recv.packets", Unit: metric.Unit_COUNT, Measurement: "Packets", Help: "Packets received on all network interfaces since this process started (as reported by the OS)", - } - metaHostNetRecvErr = metric.Metadata{ + }) + metaHostNetRecvErr = metric.InitMetadata(metric.Metadata{ Name: "sys.host.net.recv.err", Unit: metric.Unit_COUNT, Measurement: "Packets", Help: "Error receiving packets on all network interfaces since this process started (as reported by the OS)", - } - metaHostNetRecvDrop = metric.Metadata{ + }) + metaHostNetRecvDrop = metric.InitMetadata(metric.Metadata{ Name: "sys.host.net.recv.drop", Unit: metric.Unit_COUNT, Measurement: "Packets", Help: "Receiving packets that got dropped on all network interfaces since this process started (as reported by the OS)", - } - metaHostNetSendBytes = metric.Metadata{ + }) + metaHostNetSendBytes = metric.InitMetadata(metric.Metadata{ Name: "sys.host.net.send.bytes", Unit: metric.Unit_BYTES, Measurement: "Bytes", @@ -434,26 +434,26 @@ var ( Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_HARDWARE, HowToUse: `This metric gives the node's ingress/egress network transfer rates for flat sections which may indicate insufficiently provisioned networking or high error rates. CockroachDB is using a reliable TCP/IP protocol, so errors result in delivery retries that create a "slow network" effect.`, - } - metaHostNetSendPackets = metric.Metadata{ + }) + metaHostNetSendPackets = metric.InitMetadata(metric.Metadata{ Name: "sys.host.net.send.packets", Unit: metric.Unit_COUNT, Measurement: "Packets", Help: "Packets sent on all network interfaces since this process started (as reported by the OS)", - } - metaHostNetSendErr = metric.Metadata{ + }) + metaHostNetSendErr = metric.InitMetadata(metric.Metadata{ Name: "sys.host.net.send.err", Unit: metric.Unit_COUNT, Measurement: "Packets", Help: "Error on sending packets on all network interfaces since this process started (as reported by the OS)", - } - metaHostNetSendDrop = metric.Metadata{ + }) + metaHostNetSendDrop = metric.InitMetadata(metric.Metadata{ Name: "sys.host.net.send.drop", Unit: metric.Unit_COUNT, Measurement: "Packets", Help: "Sending packets that got dropped on all network interfaces since this process started (as reported by the OS)", - } - metaHostNetSendTCPRetransSegs = metric.Metadata{ + }) + metaHostNetSendTCPRetransSegs = metric.InitMetadata(metric.Metadata{ Name: "sys.host.net.send.tcp.retrans_segs", Unit: metric.Unit_COUNT, Measurement: "Segments", @@ -480,16 +480,16 @@ The linux tool 'ss -i' can show the Linux kernel's smoothed view of round-trip latency and variance on a per-connection basis. Additionally, 'netstat -s' shows all TCP counters maintained by the kernel. `, - } - metaHostNetSendTCPFastRetrans = metric.Metadata{ + }) + metaHostNetSendTCPFastRetrans = metric.InitMetadata(metric.Metadata{ Name: "sys.host.net.send.tcp.fast_retrans_segs", Unit: metric.Unit_COUNT, Measurement: "Segments", Help: `Segments retransmitted due to the fast retransmission mechanism in TCP. Fast retransmissions occur when the sender learns that intermediate segments have been lost.`, Category: metric.Metadata_NETWORKING, - } - metaHostNetSendTCPTimeouts = metric.Metadata{ + }) + metaHostNetSendTCPTimeouts = metric.InitMetadata(metric.Metadata{ Name: "sys.host.net.send.tcp_timeouts", Unit: metric.Unit_COUNT, Measurement: "Timeouts", @@ -501,8 +501,8 @@ retransmission timeouts. Anecdotally, they still occasionally present themselves even in supposedly healthy cloud environments. `, Category: metric.Metadata_NETWORKING, - } - metaHostNetSendTCPSlowStartRetrans = metric.Metadata{ + }) + metaHostNetSendTCPSlowStartRetrans = metric.InitMetadata(metric.Metadata{ Name: "sys.host.net.send.tcp.slow_start_retrans", Unit: metric.Unit_COUNT, Measurement: "Segments", @@ -512,8 +512,8 @@ is unable to support the initial fast ramp-up in window size, and can be a sign of packet loss or congestion. `, Category: metric.Metadata_NETWORKING, - } - metaHostNetSendTCPLossProbes = metric.Metadata{ + }) + metaHostNetSendTCPLossProbes = metric.InitMetadata(metric.Metadata{ Name: "sys.host.net.send.tcp.loss_probes", Unit: metric.Unit_COUNT, Measurement: "Probes", @@ -523,7 +523,7 @@ loss of the last packet earlier than the retransmission timer, and can indicate network issues. Tail loss probes are aggressive, so the base rate is often nonzero even in healthy networks.`, Category: metric.Metadata_NETWORKING, - } + }) ) // diskMetricsIgnoredDevices is a regex that matches any block devices that must be @@ -913,12 +913,12 @@ func NewRuntimeStatSampler(ctx context.Context, clock hlc.WallClock) *RuntimeSta // queries to filter or group by CockroachDB version — for example, to // identify which nodes are running a particular release series. year, release := build.BranchReleaseSeries() - metaBuildTimestamp := metric.Metadata{ + metaBuildTimestamp := metric.InitMetadata(metric.Metadata{ Name: "build.timestamp", Help: "Build information", Measurement: "Build Time", Unit: metric.Unit_TIMESTAMP_SEC, - } + }) metaBuildTimestamp.AddLabel("tag", info.Tag) metaBuildTimestamp.AddLabel("go_version", info.GoVersion) metaBuildTimestamp.AddLabel("major", strconv.Itoa(year)) diff --git a/pkg/server/telemetry/features_test.go b/pkg/server/telemetry/features_test.go index 49925842791e..1bf333e4614b 100644 --- a/pkg/server/telemetry/features_test.go +++ b/pkg/server/telemetry/features_test.go @@ -91,8 +91,8 @@ func TestBucket(t *testing.T) { // TestCounterWithMetric verifies that only the telemetry is reset to zero when, // for example, a report is created. func TestCounterWithMetric(t *testing.T) { - cm := telemetry.NewCounterWithMetric(metric.Metadata{Name: "test-metric"}) - cag := telemetry.NewCounterWithAggMetric(metric.Metadata{Name: "test-agg-metric"}) + cm := telemetry.NewCounterWithMetric(metric.InitMetadata(metric.Metadata{Name: "test-metric"})) + cag := telemetry.NewCounterWithAggMetric(metric.InitMetadata(metric.Metadata{Name: "test-agg-metric"})) cm.Inc() cag.Inc("test-db", "test-app") diff --git a/pkg/spanconfig/spanconfigkvsubscriber/kvsubscriber.go b/pkg/spanconfig/spanconfigkvsubscriber/kvsubscriber.go index c6abe5c94bcb..e388dff5f87e 100644 --- a/pkg/spanconfig/spanconfigkvsubscriber/kvsubscriber.go +++ b/pkg/spanconfig/spanconfigkvsubscriber/kvsubscriber.go @@ -28,29 +28,29 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/timeutil" ) -var updateBehindNanos = metric.Metadata{ +var updateBehindNanos = metric.InitMetadata(metric.Metadata{ Name: "spanconfig.kvsubscriber.update_behind_nanos", Help: "Difference between the current time and when the KVSubscriber received its last update" + " (an ever increasing number indicates that we're no longer receiving updates)", Measurement: "Nanoseconds", Unit: metric.Unit_NANOSECONDS, -} +}) -var protectedRecordCount = metric.Metadata{ +var protectedRecordCount = metric.InitMetadata(metric.Metadata{ Name: "spanconfig.kvsubscriber.protected_record_count", Help: "Number of protected timestamp records, as seen by KV", Measurement: "Records", Unit: metric.Unit_COUNT, -} +}) -var oldestProtectedRecordNanos = metric.Metadata{ +var oldestProtectedRecordNanos = metric.InitMetadata(metric.Metadata{ Name: "spanconfig.kvsubscriber.oldest_protected_record_nanos", Help: "Difference between the current time and the oldest protected timestamp" + " (sudden drops indicate a record being released; an ever increasing" + " number indicates that the oldest record is around and preventing GC if > configured GC TTL)", Measurement: "Nanoseconds", Unit: metric.Unit_NANOSECONDS, -} +}) // metricsPollerInterval determines the frequency at which we refresh internal // metrics. diff --git a/pkg/sql/catalog/hydrateddesccache/hydratedcache.go b/pkg/sql/catalog/hydrateddesccache/hydratedcache.go index b38c5584e3ad..be60f9004bf5 100644 --- a/pkg/sql/catalog/hydrateddesccache/hydratedcache.go +++ b/pkg/sql/catalog/hydrateddesccache/hydratedcache.go @@ -96,62 +96,62 @@ func makeMetrics() Metrics { func (m *Metrics) MetricStruct() {} var ( - tableMetaHits = metric.Metadata{ + tableMetaHits = metric.InitMetadata(metric.Metadata{ Name: "sql.hydrated_table_cache.hits", Help: "counter on the number of cache hits", Measurement: "reads", Unit: metric.Unit_COUNT, MetricType: io_prometheus_client.MetricType_COUNTER, - } - tableMetaMisses = metric.Metadata{ + }) + tableMetaMisses = metric.InitMetadata(metric.Metadata{ Name: "sql.hydrated_table_cache.misses", Help: "counter on the number of cache misses", Measurement: "reads", Unit: metric.Unit_COUNT, MetricType: io_prometheus_client.MetricType_COUNTER, - } - funcMetaHits = metric.Metadata{ + }) + funcMetaHits = metric.InitMetadata(metric.Metadata{ Name: "sql.hydrated_udf_cache.hits", Help: "counter on the number of cache hits", Measurement: "reads", Unit: metric.Unit_COUNT, MetricType: io_prometheus_client.MetricType_COUNTER, - } - funcMetaMisses = metric.Metadata{ + }) + funcMetaMisses = metric.InitMetadata(metric.Metadata{ Name: "sql.hydrated_udf_cache.misses", Help: "counter on the number of cache misses", Measurement: "reads", Unit: metric.Unit_COUNT, MetricType: io_prometheus_client.MetricType_COUNTER, - } - schemaMetaHits = metric.Metadata{ + }) + schemaMetaHits = metric.InitMetadata(metric.Metadata{ Name: "sql.hydrated_schema_cache.hits", Help: "counter on the number of cache hits", Measurement: "reads", Unit: metric.Unit_COUNT, MetricType: io_prometheus_client.MetricType_COUNTER, - } - schemaMetaMisses = metric.Metadata{ + }) + schemaMetaMisses = metric.InitMetadata(metric.Metadata{ Name: "sql.hydrated_schema_cache.misses", Help: "counter on the number of cache misses", Measurement: "reads", Unit: metric.Unit_COUNT, MetricType: io_prometheus_client.MetricType_COUNTER, - } - typeMetaHits = metric.Metadata{ + }) + typeMetaHits = metric.InitMetadata(metric.Metadata{ Name: "sql.hydrated_type_cache.hits", Help: "counter on the number of cache hits", Measurement: "reads", Unit: metric.Unit_COUNT, MetricType: io_prometheus_client.MetricType_COUNTER, - } - typeMetaMisses = metric.Metadata{ + }) + typeMetaMisses = metric.InitMetadata(metric.Metadata{ Name: "sql.hydrated_type_cache.misses", Help: "counter on the number of cache misses", Measurement: "reads", Unit: metric.Unit_COUNT, MetricType: io_prometheus_client.MetricType_COUNTER, - } + }) ) // CacheSize controls the size of the LRU cache. diff --git a/pkg/sql/catalog/lease/lease.go b/pkg/sql/catalog/lease/lease.go index d88fd58d6c2e..d0d793467ba1 100644 --- a/pkg/sql/catalog/lease/lease.go +++ b/pkg/sql/catalog/lease/lease.go @@ -2123,61 +2123,61 @@ func NewLeaseManager( group: singleflight.NewGroup("acquire-lease", "descriptor ID"), testingKnobs: testingKnobs.LeaseStoreTestingKnobs, leasingMetrics: leasingMetrics{ - outstandingLeases: metric.NewGauge(metric.Metadata{ + outstandingLeases: metric.NewGauge(metric.InitMetadata(metric.Metadata{ Name: "sql.leases.active", Help: "The number of outstanding SQL schema leases.", Measurement: "Outstanding leases", Unit: metric.Unit_COUNT, - }), - sessionBasedLeasesWaitingToExpire: metric.NewGauge(metric.Metadata{ + })), + sessionBasedLeasesWaitingToExpire: metric.NewGauge(metric.InitMetadata(metric.Metadata{ Name: "sql.leases.waiting_to_expire", Help: "The number of outstanding session based SQL schema leases with expiry.", Measurement: "Outstanding Leases Waiting to Expire", Unit: metric.Unit_COUNT, - }), - sessionBasedLeasesExpired: metric.NewGauge(metric.Metadata{ + })), + sessionBasedLeasesExpired: metric.NewGauge(metric.InitMetadata(metric.Metadata{ Name: "sql.leases.expired", Help: "The number of outstanding session based SQL schema leases expired.", Measurement: "Leases expired because of a new version", Unit: metric.Unit_COUNT, - }), - longWaitForNoVersionsActive: metric.NewGauge(metric.Metadata{ + })), + longWaitForNoVersionsActive: metric.NewGauge(metric.InitMetadata(metric.Metadata{ Name: "sql.leases.long_wait_for_no_version", Help: "The number of wait for no versions that are taking more than the lease duration.", Measurement: "Number of wait for long wait for no version routines executing", Unit: metric.Unit_COUNT, - }), - longWaitForOneVersionsActive: metric.NewGauge(metric.Metadata{ + })), + longWaitForOneVersionsActive: metric.NewGauge(metric.InitMetadata(metric.Metadata{ Name: "sql.leases.long_wait_for_one_version", Help: "The number of wait for one versions that are taking more than the lease duration.", Measurement: "Number of wait for long wait for one version routines executing", Unit: metric.Unit_COUNT, - }), - longTwoVersionInvariantViolationWaitActive: metric.NewGauge(metric.Metadata{ + })), + longTwoVersionInvariantViolationWaitActive: metric.NewGauge(metric.InitMetadata(metric.Metadata{ Name: "sql.leases.long_wait_for_two_version_invariant", Help: "The number of two version invariant waits that are taking more than the lease duration.", Measurement: "Number of two version invariant wait routines executing", Unit: metric.Unit_COUNT, - }), - longWaitForInitialVersionActive: metric.NewGauge(metric.Metadata{ + })), + longWaitForInitialVersionActive: metric.NewGauge(metric.InitMetadata(metric.Metadata{ Name: "sql.leases.long_wait_for_initial_version", Help: "The number of wait for initial version routines taking more than the lease duration.", Measurement: "Number of wait for initial version routines executing", Unit: metric.Unit_COUNT, - }), - leaseCurBytesCount: metric.NewGauge(metric.Metadata{ + })), + leaseCurBytesCount: metric.NewGauge(metric.InitMetadata(metric.Metadata{ Name: "sql.leases.lease_cur_bytes_count", Help: "The current number of bytes used by the lease manager.", Measurement: "Number of bytes used by the lease manager.", Unit: metric.Unit_BYTES, - }), + })), leaseMaxBytesHist: metric.NewHistogram(metric.HistogramOptions{ - Metadata: metric.Metadata{ + Metadata: metric.InitMetadata(metric.Metadata{ Name: "sql.leases.lease_max_bytes_hist", Help: "Memory used by the lease manager.", Measurement: "Number of bytes used by the lease manager.", Unit: metric.Unit_BYTES, - }, + }), Duration: base.DefaultHistogramWindowInterval(), MaxVal: log10int64times1000, SigFigs: 3, diff --git a/pkg/sql/catalog/schematelemetry/schema_telemetry_job.go b/pkg/sql/catalog/schematelemetry/schema_telemetry_job.go index 068574927d31..dcca968411d6 100644 --- a/pkg/sql/catalog/schematelemetry/schema_telemetry_job.go +++ b/pkg/sql/catalog/schematelemetry/schema_telemetry_job.go @@ -36,12 +36,12 @@ type Metrics struct { func newMetrics() Metrics { return Metrics{ - InvalidObjects: metric.NewGauge(metric.Metadata{ + InvalidObjects: metric.NewGauge(metric.InitMetadata(metric.Metadata{ Name: "sql.schema.invalid_objects", Help: "Gauge of detected invalid objects within the system.descriptor table (measured by querying crdb_internal.invalid_objects)", Measurement: "Objects", Unit: metric.Unit_COUNT, - }), + })), } } diff --git a/pkg/sql/colflow/vectorized_flow_test.go b/pkg/sql/colflow/vectorized_flow_test.go index 5f1528c2180b..8efd66b5c1e1 100644 --- a/pkg/sql/colflow/vectorized_flow_test.go +++ b/pkg/sql/colflow/vectorized_flow_test.go @@ -312,7 +312,7 @@ func TestVectorizedFlowTempDirectory(t *testing.T) { // LazilyCreated asserts that a directory is not created during flow Setup // but is done so when an operator spills to disk. t.Run("LazilyCreated", func(t *testing.T) { - spilledCounter := metric.NewCounter(metric.Metadata{}) + spilledCounter := metric.NewCounter(metric.InitMetadata(metric.Metadata{})) vf := newVectorizedFlow(spilledCounter) var creator *vectorizedFlowCreator vf.testingKnobs.onSetupFlow = func(c *vectorizedFlowCreator) { @@ -353,7 +353,7 @@ func TestVectorizedFlowTempDirectory(t *testing.T) { }) t.Run("DirCreationRace", func(t *testing.T) { - spilledCounter := metric.NewCounter(metric.Metadata{}) + spilledCounter := metric.NewCounter(metric.InitMetadata(metric.Metadata{})) vf := newVectorizedFlow(spilledCounter) var creator *vectorizedFlowCreator vf.testingKnobs.onSetupFlow = func(c *vectorizedFlowCreator) { diff --git a/pkg/sql/contention/metrics.go b/pkg/sql/contention/metrics.go index 2a3c701d6f4f..8610fefc762b 100644 --- a/pkg/sql/contention/metrics.go +++ b/pkg/sql/contention/metrics.go @@ -23,23 +23,23 @@ func (Metrics) MetricStruct() {} // NewMetrics returns a new instance of Metrics. func NewMetrics() Metrics { return Metrics{ - ResolverQueueSize: metric.NewGauge(metric.Metadata{ + ResolverQueueSize: metric.NewGauge(metric.InitMetadata(metric.Metadata{ Name: "sql.contention.resolver.queue_size", Help: "Length of queued unresolved contention events", Measurement: "Queue length", Unit: metric.Unit_COUNT, - }), - ResolverRetries: metric.NewCounter(metric.Metadata{ + })), + ResolverRetries: metric.NewCounter(metric.InitMetadata(metric.Metadata{ Name: "sql.contention.resolver.retries", Help: "Number of times transaction id resolution has been retried", Measurement: "Retry count", Unit: metric.Unit_COUNT, - }), - ResolverFailed: metric.NewCounter(metric.Metadata{ + })), + ResolverFailed: metric.NewCounter(metric.InitMetadata(metric.Metadata{ Name: "sql.contention.resolver.failed_resolutions", Help: "Number of failed transaction ID resolution attempts", Measurement: "Failed transaction ID resolution count", Unit: metric.Unit_COUNT, - }), + })), } } diff --git a/pkg/sql/contention/txnidcache/metrics.go b/pkg/sql/contention/txnidcache/metrics.go index c3372dcb1f54..189a9cdc1cc3 100644 --- a/pkg/sql/contention/txnidcache/metrics.go +++ b/pkg/sql/contention/txnidcache/metrics.go @@ -21,17 +21,17 @@ func (Metrics) MetricStruct() {} // NewMetrics returns a new instance of Metrics. func NewMetrics() Metrics { return Metrics{ - CacheMissCounter: metric.NewCounter(metric.Metadata{ + CacheMissCounter: metric.NewCounter(metric.InitMetadata(metric.Metadata{ Name: "sql.contention.txn_id_cache.miss", Help: "Number of cache misses", Measurement: "Cache miss", Unit: metric.Unit_COUNT, - }), - CacheReadCounter: metric.NewCounter(metric.Metadata{ + })), + CacheReadCounter: metric.NewCounter(metric.InitMetadata(metric.Metadata{ Name: "sql.contention.txn_id_cache.read", Help: "Number of cache read", Measurement: "Cache read", Unit: metric.Unit_COUNT, - }), + })), } } diff --git a/pkg/sql/descmetadata/metadata_updater.go b/pkg/sql/descmetadata/metadata_updater.go index 9e3a9ac72c36..281585ff4260 100644 --- a/pkg/sql/descmetadata/metadata_updater.go +++ b/pkg/sql/descmetadata/metadata_updater.go @@ -45,10 +45,10 @@ type metadataUpdater struct { clusterID uuid.UUID } -// NewMetadataUpdater creates a new comment updater, which can be used to +// InitMetadataUpdater creates a new comment updater, which can be used to // create / destroy metadata (i.e. comments) associated with different // schema objects. -func NewMetadataUpdater( +func InitMetadataUpdater( ctx context.Context, txn isql.Txn, descriptors *descs.Collection, diff --git a/pkg/sql/drop_database.go b/pkg/sql/drop_database.go index 0a2828e5e7de..66ad78e17c6b 100644 --- a/pkg/sql/drop_database.go +++ b/pkg/sql/drop_database.go @@ -195,7 +195,7 @@ func (n *dropDatabaseNode) startExec(params runParams) error { return err } - metadataUpdater := descmetadata.NewMetadataUpdater( + metadataUpdater := descmetadata.InitMetadataUpdater( ctx, p.InternalSQLTxn(), p.Descriptors(), diff --git a/pkg/sql/exec_util.go b/pkg/sql/exec_util.go index 705a38e090d9..d14f04f61bbf 100644 --- a/pkg/sql/exec_util.go +++ b/pkg/sql/exec_util.go @@ -834,33 +834,33 @@ const metricsSampleInterval = 10 * time.Second // Fully-qualified names for metrics. var ( - MetaSQLExecLatency = metric.Metadata{ + MetaSQLExecLatency = metric.InitMetadata(metric.Metadata{ Name: "sql.exec.latency", Help: "Latency of SQL statement execution", Measurement: "Latency", Unit: metric.Unit_NANOSECONDS, Visibility: metric.Metadata_SUPPORT, - } - MetaSQLExecLatencyConsistent = metric.Metadata{ + }) + MetaSQLExecLatencyConsistent = metric.InitMetadata(metric.Metadata{ Name: "sql.exec.latency.consistent", Help: "Latency of SQL statement execution of non-historical queries", Measurement: "Latency", Unit: metric.Unit_NANOSECONDS, - } - MetaSQLExecLatencyHistorical = metric.Metadata{ + }) + MetaSQLExecLatencyHistorical = metric.InitMetadata(metric.Metadata{ Name: "sql.exec.latency.historical", Help: "Latency of SQL statement execution of historical queries", Measurement: "Latency", Unit: metric.Unit_NANOSECONDS, - } - MetaSQLExecLatencyDetail = metric.Metadata{ + }) + MetaSQLExecLatencyDetail = metric.InitMetadata(metric.Metadata{ Name: "sql.exec.latency.detail", Help: "Latency of SQL statement execution, by statement fingerprint", Measurement: "Latency", MetricType: io_prometheus_client.MetricType_HISTOGRAM, Unit: metric.Unit_NANOSECONDS, - } - MetaSQLServiceLatency = metric.Metadata{ + }) + MetaSQLServiceLatency = metric.InitMetadata(metric.Metadata{ Name: "sql.service.latency", Help: "Latency of SQL request execution", Measurement: "Latency", @@ -868,78 +868,78 @@ var ( Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_SQL, HowToUse: "These high-level metrics reflect workload performance. Monitor these metrics to understand latency over time. If abnormal patterns emerge, apply the metric's time range to the SQL Activity pages to investigate interesting outliers or patterns. The Statements page has P90 Latency and P99 latency columns to enable correlation with this metric.", - } - MetaSQLServiceLatencyConsistent = metric.Metadata{ + }) + MetaSQLServiceLatencyConsistent = metric.InitMetadata(metric.Metadata{ Name: "sql.service.latency.consistent", Help: "Latency of SQL request execution of non-historical queries", Measurement: "Latency", Unit: metric.Unit_NANOSECONDS, - } - MetaSQLServiceLatencyHistorical = metric.Metadata{ + }) + MetaSQLServiceLatencyHistorical = metric.InitMetadata(metric.Metadata{ Name: "sql.service.latency.historical", Help: "Latency of SQL request execution of historical queries", Measurement: "Latency", Unit: metric.Unit_NANOSECONDS, - } - MetaSQLOptPlanCacheHits = metric.Metadata{ + }) + MetaSQLOptPlanCacheHits = metric.InitMetadata(metric.Metadata{ Name: "sql.optimizer.plan_cache.hits", Help: "Number of non-prepared statements for which a cached plan was used", Measurement: "SQL Statements", Unit: metric.Unit_COUNT, - } - MetaSQLOptPlanCacheMisses = metric.Metadata{ + }) + MetaSQLOptPlanCacheMisses = metric.InitMetadata(metric.Metadata{ Name: "sql.optimizer.plan_cache.misses", Help: "Number of non-prepared statements for which a cached plan was not used", Measurement: "SQL Statements", Unit: metric.Unit_COUNT, - } - MetaDistSQLSelect = metric.Metadata{ + }) + MetaDistSQLSelect = metric.InitMetadata(metric.Metadata{ Name: "sql.distsql.select.count", Help: "Number of SELECT statements planned to be distributed", Measurement: "SQL Statements", Unit: metric.Unit_COUNT, - } - MetaDistSQLSelectDistributed = metric.Metadata{ + }) + MetaDistSQLSelectDistributed = metric.InitMetadata(metric.Metadata{ Name: "sql.distsql.select.distributed_exec.count", Help: "Number of SELECT statements that were distributed", Measurement: "SQL Statements", Unit: metric.Unit_COUNT, - } - MetaDistSQLExecLatency = metric.Metadata{ + }) + MetaDistSQLExecLatency = metric.InitMetadata(metric.Metadata{ Name: "sql.distsql.exec.latency", Help: "Latency of DistSQL statement execution", Measurement: "Latency", MetricType: io_prometheus_client.MetricType_HISTOGRAM, Unit: metric.Unit_NANOSECONDS, - } - MetaDistSQLServiceLatency = metric.Metadata{ + }) + MetaDistSQLServiceLatency = metric.InitMetadata(metric.Metadata{ Name: "sql.distsql.service.latency", Help: "Latency of DistSQL request execution", Measurement: "Latency", MetricType: io_prometheus_client.MetricType_HISTOGRAM, Unit: metric.Unit_NANOSECONDS, - } - MetaUniqueStatementCount = metric.Metadata{ + }) + MetaUniqueStatementCount = metric.InitMetadata(metric.Metadata{ Name: "sql.query.unique.count", Help: "Cardinality estimate of the set of statement fingerprints", Measurement: "SQL Statements", Unit: metric.Unit_COUNT, - } - MetaQueryWithStatementHints = metric.Metadata{ + }) + MetaQueryWithStatementHints = metric.InitMetadata(metric.Metadata{ Name: "sql.query.with_statement_hints.count", Help: "Number of SQL queries executed with external statement hints", Measurement: "SQL Statements", Unit: metric.Unit_COUNT, Category: metric.Metadata_SQL, - } - MetaRLSPoliciesApplied = metric.Metadata{ + }) + MetaRLSPoliciesApplied = metric.InitMetadata(metric.Metadata{ Name: "sql.rls.policies_applied.count", Help: "Number of SQL statements where row-level security policies were applied", Measurement: "SQL Statements", Unit: metric.Unit_COUNT, Category: metric.Metadata_SQL, - } - MetaTxnAbort = metric.Metadata{ + }) + MetaTxnAbort = metric.InitMetadata(metric.Metadata{ Name: "sql.txn.abort.count", Help: "Number of SQL transaction abort errors", Measurement: "SQL Statements", @@ -947,8 +947,8 @@ var ( Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_SQL, HowToUse: `This high-level metric reflects workload performance. A persistently high number of SQL transaction abort errors may negatively impact the workload performance and needs to be investigated.`, - } - MetaFailure = metric.Metadata{ + }) + MetaFailure = metric.InitMetadata(metric.Metadata{ Name: "sql.failure.count", Help: "Number of statements resulting in a planning or runtime error", Measurement: "SQL Statements", @@ -956,20 +956,20 @@ var ( Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_SQL, HowToUse: `This metric is a high-level indicator of workload and application degradation with query failures. Use the Insights page to find failed executions with their error code to troubleshoot or use application-level logs, if instrumented, to determine the cause of error.`, - } - MetaStatementTimeout = metric.Metadata{ + }) + MetaStatementTimeout = metric.InitMetadata(metric.Metadata{ Name: "sql.statement_timeout.count", Help: "Count of statements that failed because they exceeded the statement timeout", Measurement: "SQL Statements", Unit: metric.Unit_COUNT, - } - MetaTransactionTimeout = metric.Metadata{ + }) + MetaTransactionTimeout = metric.InitMetadata(metric.Metadata{ Name: "sql.transaction_timeout.count", Help: "Count of statements that failed because they exceeded the transaction timeout", Measurement: "SQL Statements", Unit: metric.Unit_COUNT, - } - MetaSQLTxnLatency = metric.Metadata{ + }) + MetaSQLTxnLatency = metric.InitMetadata(metric.Metadata{ Name: "sql.txn.latency", Help: "Latency of SQL transactions", Measurement: "Latency", @@ -977,8 +977,8 @@ var ( Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_SQL, HowToUse: `These high-level metrics provide a latency histogram of all executed SQL transactions. These metrics provide an overview of the current SQL workload.`, - } - MetaSQLTxnsOpen = metric.Metadata{ + }) + MetaSQLTxnsOpen = metric.InitMetadata(metric.Metadata{ Name: "sql.txns.open", Help: "Number of currently open user SQL transactions", Measurement: "Open SQL Transactions", @@ -986,8 +986,8 @@ var ( Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_SQL, HowToUse: `This metric should roughly correspond to the number of cores * 4. If this metric is consistently larger, scale out the cluster.`, - } - MetaSQLActiveQueries = metric.Metadata{ + }) + MetaSQLActiveQueries = metric.InitMetadata(metric.Metadata{ Name: "sql.statements.active", Help: "Number of currently active user SQL statements", Measurement: "Active Statements", @@ -995,8 +995,8 @@ var ( Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_SQL, HowToUse: `This high-level metric reflects workload volume.`, - } - MetaFullTableOrIndexScan = metric.Metadata{ + }) + MetaFullTableOrIndexScan = metric.InitMetadata(metric.Metadata{ Name: "sql.full.scan.count", Help: "Number of full table or index scans", Measurement: "SQL Statements", @@ -1004,64 +1004,64 @@ var ( Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_SQL, HowToUse: `This metric is a high-level indicator of potentially suboptimal query plans in the workload that may require index tuning and maintenance. To identify the statements with a full table scan, use SHOW FULL TABLE SCAN or the SQL Activity Statements page with the corresponding metric time frame. The Statements page also includes explain plans and index recommendations. Not all full scans are necessarily bad especially over smaller tables.`, - } + }) // Below are the metadata for the statement started counters. - MetaQueryStarted = metric.Metadata{ + MetaQueryStarted = metric.InitMetadata(metric.Metadata{ Name: "sql.query.started.count", Help: "Number of SQL operations started including queries, and transaction control statements", Measurement: "SQL Statements", Unit: metric.Unit_COUNT, - } - MetaTxnBeginStarted = metric.Metadata{ + }) + MetaTxnBeginStarted = metric.InitMetadata(metric.Metadata{ Name: "sql.txn.begin.started.count", Help: "Number of SQL transaction BEGIN statements started", Measurement: "SQL Statements", Unit: metric.Unit_COUNT, LabeledName: "sql.started.count", StaticLabels: metric.MakeLabelPairs(metric.LabelQueryType, "begin"), - } - MetaTxnCommitStarted = metric.Metadata{ + }) + MetaTxnCommitStarted = metric.InitMetadata(metric.Metadata{ Name: "sql.txn.commit.started.count", Help: "Number of SQL transaction COMMIT statements started", Measurement: "SQL Statements", Unit: metric.Unit_COUNT, LabeledName: "sql.started.count", StaticLabels: metric.MakeLabelPairs(metric.LabelQueryType, "commit"), - } - MetaTxnRollbackStarted = metric.Metadata{ + }) + MetaTxnRollbackStarted = metric.InitMetadata(metric.Metadata{ Name: "sql.txn.rollback.started.count", Help: "Number of SQL transaction ROLLBACK statements started", Measurement: "SQL Statements", Unit: metric.Unit_COUNT, LabeledName: "sql.started.count", StaticLabels: metric.MakeLabelPairs(metric.LabelQueryType, "rollback"), - } - MetaTxnPrepareStarted = metric.Metadata{ + }) + MetaTxnPrepareStarted = metric.InitMetadata(metric.Metadata{ Name: "sql.txn.prepare.started.count", Help: "Number of SQL PREPARE TRANSACTION statements started", Measurement: "SQL Statements", Unit: metric.Unit_COUNT, LabeledName: "sql.started.count", StaticLabels: metric.MakeLabelPairs(metric.LabelQueryType, "prepare_transaction"), - } - MetaTxnCommitPreparedStarted = metric.Metadata{ + }) + MetaTxnCommitPreparedStarted = metric.InitMetadata(metric.Metadata{ Name: "sql.txn.commit_prepared.started.count", Help: "Number of SQL COMMIT PREPARED statements started", Measurement: "SQL Statements", Unit: metric.Unit_COUNT, LabeledName: "sql.started.count", StaticLabels: metric.MakeLabelPairs(metric.LabelQueryType, "commit_prepared"), - } - MetaTxnRollbackPreparedStarted = metric.Metadata{ + }) + MetaTxnRollbackPreparedStarted = metric.InitMetadata(metric.Metadata{ Name: "sql.txn.rollback_prepared.started.count", Help: "Number of SQL ROLLBACK PREPARED statements started", Measurement: "SQL Statements", Unit: metric.Unit_COUNT, LabeledName: "sql.started.count", StaticLabels: metric.MakeLabelPairs(metric.LabelQueryType, "rollback_prepared"), - } - MetaSelectStarted = metric.Metadata{ + }) + MetaSelectStarted = metric.InitMetadata(metric.Metadata{ Name: "sql.select.started.count", Help: "Number of SQL SELECT statements started", Measurement: "SQL Statements", @@ -1070,8 +1070,8 @@ var ( StaticLabels: metric.MakeLabelPairs(metric.LabelQueryType, "select"), Category: metric.Metadata_SQL, HowToUse: "This high-level metric reflects workload volume. Monitor this metric to identify abnormal application behavior or patterns over time. If abnormal patterns emerge, apply the metric's time range to the SQL Activity pages to investigate interesting outliers or patterns. For example, on the Transactions page and the Statements page, sort on the Execution Count column. To find problematic sessions, on the Sessions page, sort on the Transaction Count column. Find the sessions with high transaction counts and trace back to a user or application.", - } - MetaUpdateStarted = metric.Metadata{ + }) + MetaUpdateStarted = metric.InitMetadata(metric.Metadata{ Name: "sql.update.started.count", Help: "Number of SQL UPDATE statements started", Measurement: "SQL Statements", @@ -1080,8 +1080,8 @@ var ( StaticLabels: metric.MakeLabelPairs(metric.LabelQueryType, "update"), Category: metric.Metadata_SQL, HowToUse: "This high-level metric reflects workload volume. Monitor this metric to identify abnormal application behavior or patterns over time. If abnormal patterns emerge, apply the metric's time range to the SQL Activity pages to investigate interesting outliers or patterns. For example, on the Transactions page and the Statements page, sort on the Execution Count column. To find problematic sessions, on the Sessions page, sort on the Transaction Count column. Find the sessions with high transaction counts and trace back to a user or application.", - } - MetaInsertStarted = metric.Metadata{ + }) + MetaInsertStarted = metric.InitMetadata(metric.Metadata{ Name: "sql.insert.started.count", Help: "Number of SQL INSERT statements started", Measurement: "SQL Statements", @@ -1090,8 +1090,8 @@ var ( StaticLabels: metric.MakeLabelPairs(metric.LabelQueryType, "insert"), Category: metric.Metadata_SQL, HowToUse: "This high-level metric reflects workload volume. Monitor this metric to identify abnormal application behavior or patterns over time. If abnormal patterns emerge, apply the metric's time range to the SQL Activity pages to investigate interesting outliers or patterns. For example, on the Transactions page and the Statements page, sort on the Execution Count column. To find problematic sessions, on the Sessions page, sort on the Transaction Count column. Find the sessions with high transaction counts and trace back to a user or application.", - } - MetaDeleteStarted = metric.Metadata{ + }) + MetaDeleteStarted = metric.InitMetadata(metric.Metadata{ Name: "sql.delete.started.count", Help: "Number of SQL DELETE statements started", Measurement: "SQL Statements", @@ -1100,100 +1100,100 @@ var ( StaticLabels: metric.MakeLabelPairs(metric.LabelQueryType, "delete"), Category: metric.Metadata_SQL, HowToUse: "This high-level metric reflects workload volume. Monitor this metric to identify abnormal application behavior or patterns over time. If abnormal patterns emerge, apply the metric's time range to the SQL Activity pages to investigate interesting outliers or patterns. For example, on the Transactions page and the Statements page, sort on the Execution Count column. To find problematic sessions, on the Sessions page, sort on the Transaction Count column. Find the sessions with high transaction counts and trace back to a user or application.", - } - MetaCRUDStarted = metric.Metadata{ + }) + MetaCRUDStarted = metric.InitMetadata(metric.Metadata{ Name: "sql.crud_query.started.count", Help: "Number of SQL SELECT, INSERT, UPDATE, DELETE statements started", Measurement: "SQL Statements", Unit: metric.Unit_COUNT, - } - MetaSavepointStarted = metric.Metadata{ + }) + MetaSavepointStarted = metric.InitMetadata(metric.Metadata{ Name: "sql.savepoint.started.count", Help: "Number of SQL SAVEPOINT statements started", Measurement: "SQL Statements", Unit: metric.Unit_COUNT, LabeledName: "sql.started.count", StaticLabels: metric.MakeLabelPairs(metric.LabelQueryType, "savepoint"), - } - MetaReleaseSavepointStarted = metric.Metadata{ + }) + MetaReleaseSavepointStarted = metric.InitMetadata(metric.Metadata{ Name: "sql.savepoint.release.started.count", Help: "Number of `RELEASE SAVEPOINT` statements started", Measurement: "SQL Statements", Unit: metric.Unit_COUNT, LabeledName: "sql.started.count", StaticLabels: metric.MakeLabelPairs(metric.LabelQueryType, "release_savepoint"), - } - MetaRollbackToSavepointStarted = metric.Metadata{ + }) + MetaRollbackToSavepointStarted = metric.InitMetadata(metric.Metadata{ Name: "sql.savepoint.rollback.started.count", Help: "Number of `ROLLBACK TO SAVEPOINT` statements started", Measurement: "SQL Statements", Unit: metric.Unit_COUNT, LabeledName: "sql.started.count", StaticLabels: metric.MakeLabelPairs(metric.LabelQueryType, "rollback_to_savepoint"), - } - MetaRestartSavepointStarted = metric.Metadata{ + }) + MetaRestartSavepointStarted = metric.InitMetadata(metric.Metadata{ Name: "sql.restart_savepoint.started.count", Help: "Number of `SAVEPOINT cockroach_restart` statements started", Measurement: "SQL Statements", Unit: metric.Unit_COUNT, LabeledName: "sql.started.count", StaticLabels: metric.MakeLabelPairs(metric.LabelQueryType, "restart_savepoint"), - } - MetaReleaseRestartSavepointStarted = metric.Metadata{ + }) + MetaReleaseRestartSavepointStarted = metric.InitMetadata(metric.Metadata{ Name: "sql.restart_savepoint.release.started.count", Help: "Number of `RELEASE SAVEPOINT cockroach_restart` statements started", Measurement: "SQL Statements", Unit: metric.Unit_COUNT, LabeledName: "sql.started.count", StaticLabels: metric.MakeLabelPairs(metric.LabelQueryType, "release_restart_savepoint"), - } - MetaRollbackToRestartSavepointStarted = metric.Metadata{ + }) + MetaRollbackToRestartSavepointStarted = metric.InitMetadata(metric.Metadata{ Name: "sql.restart_savepoint.rollback.started.count", Help: "Number of `ROLLBACK TO SAVEPOINT cockroach_restart` statements started", Measurement: "SQL Statements", Unit: metric.Unit_COUNT, LabeledName: "sql.started.count", StaticLabels: metric.MakeLabelPairs(metric.LabelQueryType, "rollback_to_restart_savepoint"), - } - MetaDdlStarted = metric.Metadata{ + }) + MetaDdlStarted = metric.InitMetadata(metric.Metadata{ Name: "sql.ddl.started.count", Help: "Number of SQL DDL statements started", Measurement: "SQL Statements", Unit: metric.Unit_COUNT, LabeledName: "sql.started.count", StaticLabels: metric.MakeLabelPairs(metric.LabelQueryType, "ddl"), - } - MetaCopyStarted = metric.Metadata{ + }) + MetaCopyStarted = metric.InitMetadata(metric.Metadata{ Name: "sql.copy.started.count", Help: "Number of COPY SQL statements started", Measurement: "SQL Statements", Unit: metric.Unit_COUNT, LabeledName: "sql.started.count", StaticLabels: metric.MakeLabelPairs(metric.LabelQueryType, "copy"), - } - MetaCopyNonAtomicStarted = metric.Metadata{ + }) + MetaCopyNonAtomicStarted = metric.InitMetadata(metric.Metadata{ Name: "sql.copy.nonatomic.started.count", Help: "Number of non-atomic COPY SQL statements started", Measurement: "SQL Statements", Unit: metric.Unit_COUNT, - } - MetaCallStoredProcStarted = metric.Metadata{ + }) + MetaCallStoredProcStarted = metric.InitMetadata(metric.Metadata{ Name: "sql.call_stored_proc.started.count", Help: "Number of invocation of stored procedures via CALL statements", Measurement: "SQL Statements", Unit: metric.Unit_COUNT, LabeledName: "sql.started.count", StaticLabels: metric.MakeLabelPairs(metric.LabelQueryType, "call"), - } - MetaMiscStarted = metric.Metadata{ + }) + MetaMiscStarted = metric.InitMetadata(metric.Metadata{ Name: "sql.misc.started.count", Help: "Number of other SQL statements started", Measurement: "SQL Statements", Unit: metric.Unit_COUNT, LabeledName: "sql.started.count", StaticLabels: metric.MakeLabelPairs(metric.LabelQueryType, "misc"), - } - MetaRoutineSelectStarted = metric.Metadata{ + }) + MetaRoutineSelectStarted = metric.InitMetadata(metric.Metadata{ Name: "sql.routine.select.started.count", Help: "Number of SQL SELECT statements started within routine invocation", Measurement: "SQL Statements", @@ -1202,8 +1202,8 @@ var ( StaticLabels: metric.MakeLabelPairs(metric.LabelQueryType, "routine_started_select"), Category: metric.Metadata_SQL, HowToUse: "This high-level metric reflects workload volume. Monitor this metric to identify abnormal application behavior or patterns over time. If abnormal patterns emerge, apply the metric's time range to the SQL Activity pages to investigate interesting outliers or patterns. For example, on the Transactions page and the Statements page, sort on the Execution Count column. To find problematic sessions, on the Sessions page, sort on the Transaction Count column. Find the sessions with high transaction counts and trace back to a user or application.", - } - MetaRoutineUpdateStarted = metric.Metadata{ + }) + MetaRoutineUpdateStarted = metric.InitMetadata(metric.Metadata{ Name: "sql.routine.update.started.count", Help: "Number of SQL UPDATE statements started within routine invocation", Measurement: "SQL Statements", @@ -1212,8 +1212,8 @@ var ( StaticLabels: metric.MakeLabelPairs(metric.LabelQueryType, "routine_started_update"), Category: metric.Metadata_SQL, HowToUse: "This high-level metric reflects workload volume. Monitor this metric to identify abnormal application behavior or patterns over time. If abnormal patterns emerge, apply the metric's time range to the SQL Activity pages to investigate interesting outliers or patterns. For example, on the Transactions page and the Statements page, sort on the Execution Count column. To find problematic sessions, on the Sessions page, sort on the Transaction Count column. Find the sessions with high transaction counts and trace back to a user or application.", - } - MetaRoutineInsertStarted = metric.Metadata{ + }) + MetaRoutineInsertStarted = metric.InitMetadata(metric.Metadata{ Name: "sql.routine.insert.started.count", Help: "Number of SQL INSERT statements started within routine invocation", Measurement: "SQL Statements", @@ -1222,8 +1222,8 @@ var ( StaticLabels: metric.MakeLabelPairs(metric.LabelQueryType, "routine_started_insert"), Category: metric.Metadata_SQL, HowToUse: "This high-level metric reflects workload volume. Monitor this metric to identify abnormal application behavior or patterns over time. If abnormal patterns emerge, apply the metric's time range to the SQL Activity pages to investigate interesting outliers or patterns. For example, on the Transactions page and the Statements page, sort on the Execution Count column. To find problematic sessions, on the Sessions page, sort on the Transaction Count column. Find the sessions with high transaction counts and trace back to a user or application.", - } - MetaRoutineDeleteStarted = metric.Metadata{ + }) + MetaRoutineDeleteStarted = metric.InitMetadata(metric.Metadata{ Name: "sql.routine.delete.started.count", Help: "Number of SQL DELETE statements started within routine invocation", Measurement: "SQL Statements", @@ -1232,17 +1232,17 @@ var ( StaticLabels: metric.MakeLabelPairs(metric.LabelQueryType, "routine_started_delete"), Category: metric.Metadata_SQL, HowToUse: "This high-level metric reflects workload volume. Monitor this metric to identify abnormal application behavior or patterns over time. If abnormal patterns emerge, apply the metric's time range to the SQL Activity pages to investigate interesting outliers or patterns. For example, on the Transactions page and the Statements page, sort on the Execution Count column. To find problematic sessions, on the Sessions page, sort on the Transaction Count column. Find the sessions with high transaction counts and trace back to a user or application.", - } + }) // Below are the metadata for the statement executed counters. - MetaQueryExecuted = metric.Metadata{ + MetaQueryExecuted = metric.InitMetadata(metric.Metadata{ Name: "sql.query.count", Help: "Number of SQL operations started including queries, and transaction control statements", Measurement: "SQL Statements", Unit: metric.Unit_COUNT, Visibility: metric.Metadata_SUPPORT, - } - MetaTxnBeginExecuted = metric.Metadata{ + }) + MetaTxnBeginExecuted = metric.InitMetadata(metric.Metadata{ Name: "sql.txn.begin.count", Help: "Number of SQL transaction BEGIN statements successfully executed", Measurement: "SQL Statements", @@ -1252,8 +1252,8 @@ var ( Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_SQL, HowToUse: "This metric reflects workload volume by counting explicit transactions. Use this metric to determine whether explicit transactions can be refactored as implicit transactions (individual statements).", - } - MetaTxnCommitExecuted = metric.Metadata{ + }) + MetaTxnCommitExecuted = metric.InitMetadata(metric.Metadata{ Name: "sql.txn.commit.count", Help: "Number of SQL transaction COMMIT statements successfully executed", Measurement: "SQL Statements", @@ -1263,8 +1263,8 @@ var ( Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_SQL, HowToUse: "This metric shows the number of transactions that completed successfully. This metric can be used as a proxy to measure the number of successful explicit transactions.", - } - MetaTxnRollbackExecuted = metric.Metadata{ + }) + MetaTxnRollbackExecuted = metric.InitMetadata(metric.Metadata{ Name: "sql.txn.rollback.count", Help: "Number of SQL transaction ROLLBACK statements successfully executed", Measurement: "SQL Statements", @@ -1274,32 +1274,32 @@ var ( Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_SQL, HowToUse: "This metric shows the number of orderly transaction rollbacks. A persistently high number of rollbacks may negatively impact the workload performance and needs to be investigated.", - } - MetaTxnPrepareExecuted = metric.Metadata{ + }) + MetaTxnPrepareExecuted = metric.InitMetadata(metric.Metadata{ Name: "sql.txn.prepare.count", Help: "Number of SQL PREPARE TRANSACTION statements successfully executed", Measurement: "SQL Statements", Unit: metric.Unit_COUNT, LabeledName: "sql.count", StaticLabels: metric.MakeLabelPairs(metric.LabelQueryType, "prepare_transaction"), - } - MetaTxnCommitPreparedExecuted = metric.Metadata{ + }) + MetaTxnCommitPreparedExecuted = metric.InitMetadata(metric.Metadata{ Name: "sql.txn.commit_prepared.count", Help: "Number of SQL COMMIT PREPARED statements successfully executed", Measurement: "SQL Statements", Unit: metric.Unit_COUNT, LabeledName: "sql.count", StaticLabels: metric.MakeLabelPairs(metric.LabelQueryType, "commit_prepared"), - } - MetaTxnRollbackPreparedExecuted = metric.Metadata{ + }) + MetaTxnRollbackPreparedExecuted = metric.InitMetadata(metric.Metadata{ Name: "sql.txn.rollback_prepared.count", Help: "Number of SQL ROLLBACK PREPARED statements successfully executed", Measurement: "SQL Statements", Unit: metric.Unit_COUNT, LabeledName: "sql.count", StaticLabels: metric.MakeLabelPairs(metric.LabelQueryType, "rollback_prepared"), - } - MetaSelectExecuted = metric.Metadata{ + }) + MetaSelectExecuted = metric.InitMetadata(metric.Metadata{ Name: "sql.select.count", Help: "Number of SQL SELECT statements successfully executed", Measurement: "SQL Statements", @@ -1309,8 +1309,8 @@ var ( Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_SQL, HowToUse: "This high-level metric reflects workload volume. Monitor this metric to identify abnormal application behavior or patterns over time. If abnormal patterns emerge, apply the metric's time range to the SQL Activity pages to investigate interesting outliers or patterns. For example, on the Transactions page and the Statements page, sort on the Execution Count column. To find problematic sessions, on the Sessions page, sort on the Transaction Count column. Find the sessions with high transaction counts and trace back to a user or application.", - } - MetaUpdateExecuted = metric.Metadata{ + }) + MetaUpdateExecuted = metric.InitMetadata(metric.Metadata{ Name: "sql.update.count", Help: "Number of SQL UPDATE statements successfully executed", Measurement: "SQL Statements", @@ -1320,8 +1320,8 @@ var ( Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_SQL, HowToUse: "This high-level metric reflects workload volume. Monitor this metric to identify abnormal application behavior or patterns over time. If abnormal patterns emerge, apply the metric's time range to the SQL Activity pages to investigate interesting outliers or patterns. For example, on the Transactions page and the Statements page, sort on the Execution Count column. To find problematic sessions, on the Sessions page, sort on the Transaction Count column. Find the sessions with high transaction counts and trace back to a user or application.", - } - MetaInsertExecuted = metric.Metadata{ + }) + MetaInsertExecuted = metric.InitMetadata(metric.Metadata{ Name: "sql.insert.count", Help: "Number of SQL INSERT statements successfully executed", Measurement: "SQL Statements", @@ -1331,8 +1331,8 @@ var ( Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_SQL, HowToUse: "This high-level metric reflects workload volume. Monitor this metric to identify abnormal application behavior or patterns over time. If abnormal patterns emerge, apply the metric's time range to the SQL Activity pages to investigate interesting outliers or patterns. For example, on the Transactions page and the Statements page, sort on the Execution Count column. To find problematic sessions, on the Sessions page, sort on the Transaction Count column. Find the sessions with high transaction counts and trace back to a user or application.", - } - MetaDeleteExecuted = metric.Metadata{ + }) + MetaDeleteExecuted = metric.InitMetadata(metric.Metadata{ Name: "sql.delete.count", Help: "Number of SQL DELETE statements successfully executed", Measurement: "SQL Statements", @@ -1342,62 +1342,62 @@ var ( Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_SQL, HowToUse: "This high-level metric reflects workload volume. Monitor this metric to identify abnormal application behavior or patterns over time. If abnormal patterns emerge, apply the metric's time range to the SQL Activity pages to investigate interesting outliers or patterns. For example, on the Transactions page and the Statements page, sort on the Execution Count column. To find problematic sessions, on the Sessions page, sort on the Transaction Count column. Find the sessions with high transaction counts and trace back to a user or application.", - } - MetaCRUDExecuted = metric.Metadata{ + }) + MetaCRUDExecuted = metric.InitMetadata(metric.Metadata{ Name: "sql.crud_query.count", Help: "Number of SQL SELECT, INSERT, UPDATE, DELETE statements successfully executed", Measurement: "SQL Statements", Unit: metric.Unit_COUNT, - } - MetaSavepointExecuted = metric.Metadata{ + }) + MetaSavepointExecuted = metric.InitMetadata(metric.Metadata{ Name: "sql.savepoint.count", Help: "Number of SQL SAVEPOINT statements successfully executed", Measurement: "SQL Statements", Unit: metric.Unit_COUNT, LabeledName: "sql.count", StaticLabels: metric.MakeLabelPairs(metric.LabelQueryType, "savepoint"), - } - MetaReleaseSavepointExecuted = metric.Metadata{ + }) + MetaReleaseSavepointExecuted = metric.InitMetadata(metric.Metadata{ Name: "sql.savepoint.release.count", Help: "Number of `RELEASE SAVEPOINT` statements successfully executed", Measurement: "SQL Statements", Unit: metric.Unit_COUNT, LabeledName: "sql.count", StaticLabels: metric.MakeLabelPairs(metric.LabelQueryType, "release_savepoint"), - } - MetaRollbackToSavepointExecuted = metric.Metadata{ + }) + MetaRollbackToSavepointExecuted = metric.InitMetadata(metric.Metadata{ Name: "sql.savepoint.rollback.count", Help: "Number of `ROLLBACK TO SAVEPOINT` statements successfully executed", Measurement: "SQL Statements", Unit: metric.Unit_COUNT, LabeledName: "sql.count", StaticLabels: metric.MakeLabelPairs(metric.LabelQueryType, "rollback_to_savepoint"), - } - MetaRestartSavepointExecuted = metric.Metadata{ + }) + MetaRestartSavepointExecuted = metric.InitMetadata(metric.Metadata{ Name: "sql.restart_savepoint.count", Help: "Number of `SAVEPOINT cockroach_restart` statements successfully executed", Measurement: "SQL Statements", Unit: metric.Unit_COUNT, LabeledName: "sql.count", StaticLabels: metric.MakeLabelPairs(metric.LabelQueryType, "restart_savepoint"), - } - MetaReleaseRestartSavepointExecuted = metric.Metadata{ + }) + MetaReleaseRestartSavepointExecuted = metric.InitMetadata(metric.Metadata{ Name: "sql.restart_savepoint.release.count", Help: "Number of `RELEASE SAVEPOINT cockroach_restart` statements successfully executed", Measurement: "SQL Statements", Unit: metric.Unit_COUNT, LabeledName: "sql.count", StaticLabels: metric.MakeLabelPairs(metric.LabelQueryType, "release_restart_savepoint"), - } - MetaRollbackToRestartSavepointExecuted = metric.Metadata{ + }) + MetaRollbackToRestartSavepointExecuted = metric.InitMetadata(metric.Metadata{ Name: "sql.restart_savepoint.rollback.count", Help: "Number of `ROLLBACK TO SAVEPOINT cockroach_restart` statements successfully executed", Measurement: "SQL Statements", Unit: metric.Unit_COUNT, LabeledName: "sql.count", StaticLabels: metric.MakeLabelPairs(metric.LabelQueryType, "rollback_to_restart_savepoint"), - } - MetaDdlExecuted = metric.Metadata{ + }) + MetaDdlExecuted = metric.InitMetadata(metric.Metadata{ Name: "sql.ddl.count", Help: "Number of SQL DDL statements successfully executed", Measurement: "SQL Statements", @@ -1407,38 +1407,38 @@ var ( Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_SQL, HowToUse: "This high-level metric reflects workload volume. Monitor this metric to identify abnormal application behavior or patterns over time. If abnormal patterns emerge, apply the metric's time range to the SQL Activity pages to investigate interesting outliers or patterns. For example, on the Transactions page and the Statements page, sort on the Execution Count column. To find problematic sessions, on the Sessions page, sort on the Transaction Count column. Find the sessions with high transaction counts and trace back to a user or application.", - } - MetaCopyExecuted = metric.Metadata{ + }) + MetaCopyExecuted = metric.InitMetadata(metric.Metadata{ Name: "sql.copy.count", Help: "Number of COPY SQL statements successfully executed", Measurement: "SQL Statements", Unit: metric.Unit_COUNT, LabeledName: "sql.count", StaticLabels: metric.MakeLabelPairs(metric.LabelQueryType, "copy"), - } - MetaCopyNonAtomicExecuted = metric.Metadata{ + }) + MetaCopyNonAtomicExecuted = metric.InitMetadata(metric.Metadata{ Name: "sql.copy.nonatomic.count", Help: "Number of non-atomic COPY SQL statements successfully executed", Measurement: "SQL Statements", Unit: metric.Unit_COUNT, - } - MetaCallStoredProcExecuted = metric.Metadata{ + }) + MetaCallStoredProcExecuted = metric.InitMetadata(metric.Metadata{ Name: "sql.call_stored_proc.count", Help: "Number of successfully executed stored procedure calls", Measurement: "SQL Statements", Unit: metric.Unit_COUNT, LabeledName: "sql.count", StaticLabels: metric.MakeLabelPairs(metric.LabelQueryType, "call"), - } - MetaMiscExecuted = metric.Metadata{ + }) + MetaMiscExecuted = metric.InitMetadata(metric.Metadata{ Name: "sql.misc.count", Help: "Number of other SQL statements successfully executed", Measurement: "SQL Statements", Unit: metric.Unit_COUNT, LabeledName: "sql.count", StaticLabels: metric.MakeLabelPairs(metric.LabelQueryType, "misc"), - } - MetaRoutineSelectExecuted = metric.Metadata{ + }) + MetaRoutineSelectExecuted = metric.InitMetadata(metric.Metadata{ Name: "sql.routine.select.count", Help: "Number of SQL SELECT statements successfully executed within routine invocation", Measurement: "SQL Statements", @@ -1448,8 +1448,8 @@ var ( Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_SQL, HowToUse: "This high-level metric reflects workload volume. Monitor this metric to identify abnormal application behavior or patterns over time. If abnormal patterns emerge, apply the metric's time range to the SQL Activity pages to investigate interesting outliers or patterns. For example, on the Transactions page and the Statements page, sort on the Execution Count column. To find problematic sessions, on the Sessions page, sort on the Transaction Count column. Find the sessions with high transaction counts and trace back to a user or application.", - } - MetaRoutineUpdateExecuted = metric.Metadata{ + }) + MetaRoutineUpdateExecuted = metric.InitMetadata(metric.Metadata{ Name: "sql.routine.update.count", Help: "Number of SQL UPDATE statements successfully executed within routine invocation", Measurement: "SQL Statements", @@ -1459,8 +1459,8 @@ var ( Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_SQL, HowToUse: "This high-level metric reflects workload volume. Monitor this metric to identify abnormal application behavior or patterns over time. If abnormal patterns emerge, apply the metric's time range to the SQL Activity pages to investigate interesting outliers or patterns. For example, on the Transactions page and the Statements page, sort on the Execution Count column. To find problematic sessions, on the Sessions page, sort on the Transaction Count column. Find the sessions with high transaction counts and trace back to a user or application.", - } - MetaRoutineInsertExecuted = metric.Metadata{ + }) + MetaRoutineInsertExecuted = metric.InitMetadata(metric.Metadata{ Name: "sql.routine.insert.count", Help: "Number of SQL INSERT statements successfully executed within routine invocation", Measurement: "SQL Statements", @@ -1470,8 +1470,8 @@ var ( Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_SQL, HowToUse: "This high-level metric reflects workload volume. Monitor this metric to identify abnormal application behavior or patterns over time. If abnormal patterns emerge, apply the metric's time range to the SQL Activity pages to investigate interesting outliers or patterns. For example, on the Transactions page and the Statements page, sort on the Execution Count column. To find problematic sessions, on the Sessions page, sort on the Transaction Count column. Find the sessions with high transaction counts and trace back to a user or application.", - } - MetaRoutineDeleteExecuted = metric.Metadata{ + }) + MetaRoutineDeleteExecuted = metric.InitMetadata(metric.Metadata{ Name: "sql.routine.delete.count", Help: "Number of SQL DELETE statements successfully executed within routine invocation", Measurement: "SQL Statements", @@ -1481,161 +1481,161 @@ var ( Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_SQL, HowToUse: "This high-level metric reflects workload volume. Monitor this metric to identify abnormal application behavior or patterns over time. If abnormal patterns emerge, apply the metric's time range to the SQL Activity pages to investigate interesting outliers or patterns. For example, on the Transactions page and the Statements page, sort on the Execution Count column. To find problematic sessions, on the Sessions page, sort on the Transaction Count column. Find the sessions with high transaction counts and trace back to a user or application.", - } + }) // Miscellaneous metrics. - MetaSQLTxnContended = metric.Metadata{ + MetaSQLTxnContended = metric.InitMetadata(metric.Metadata{ Name: "sql.txn.contended.count", Help: "Number of SQL transactions experienced contention", Measurement: "Contention", Unit: metric.Unit_COUNT, - } - MetaTxnUpgradedFromWeakIsolation = metric.Metadata{ + }) + MetaTxnUpgradedFromWeakIsolation = metric.InitMetadata(metric.Metadata{ Name: "sql.txn.upgraded_iso_level.count", Help: "Number of times a weak isolation level was automatically upgraded to a stronger one", Measurement: "SQL Statements", Unit: metric.Unit_COUNT, - } - MetaSQLStatsMemMaxBytes = metric.Metadata{ + }) + MetaSQLStatsMemMaxBytes = metric.InitMetadata(metric.Metadata{ Name: "sql.stats.mem.max", Help: "Memory usage for fingerprint storage", Measurement: "Memory", Unit: metric.Unit_BYTES, - } - MetaSQLStatsMemCurBytes = metric.Metadata{ + }) + MetaSQLStatsMemCurBytes = metric.InitMetadata(metric.Metadata{ Name: "sql.stats.mem.current", Help: "Current memory usage for fingerprint storage", Measurement: "Memory", Unit: metric.Unit_BYTES, - } - MetaReportedSQLStatsMemMaxBytes = metric.Metadata{ + }) + MetaReportedSQLStatsMemMaxBytes = metric.InitMetadata(metric.Metadata{ Name: "sql.stats.reported.mem.max", Help: "Memory usage for reported fingerprint storage", Measurement: "Memory", Unit: metric.Unit_BYTES, - } - MetaReportedSQLStatsMemCurBytes = metric.Metadata{ + }) + MetaReportedSQLStatsMemCurBytes = metric.InitMetadata(metric.Metadata{ Name: "sql.stats.reported.mem.current", Help: "Current memory usage for reported fingerprint storage", Measurement: "Memory", Unit: metric.Unit_BYTES, - } - MetaDiscardedSQLStats = metric.Metadata{ + }) + MetaDiscardedSQLStats = metric.InitMetadata(metric.Metadata{ Name: "sql.stats.discarded.current", Help: "Number of fingerprint statistics being discarded", Measurement: "Discarded SQL Stats", Unit: metric.Unit_COUNT, - } - MetaSQLStatsFlushesSuccessful = metric.Metadata{ + }) + MetaSQLStatsFlushesSuccessful = metric.InitMetadata(metric.Metadata{ Name: "sql.stats.flushes.successful", Help: "Number of times SQL Stats are flushed successfully to persistent storage", Measurement: "successful flushes", Unit: metric.Unit_COUNT, - } - MetaSQLStatsFlushFingerprintCount = metric.Metadata{ + }) + MetaSQLStatsFlushFingerprintCount = metric.InitMetadata(metric.Metadata{ Name: "sql.stats.flush.fingerprint.count", Help: "The number of unique statement and transaction fingerprints included in the SQL Stats flush", Measurement: "statement & transaction fingerprints", Unit: metric.Unit_COUNT, - } - MetaSQLStatsFlushDoneSignalsIgnored = metric.Metadata{ + }) + MetaSQLStatsFlushDoneSignalsIgnored = metric.InitMetadata(metric.Metadata{ Name: "sql.stats.flush.done_signals.ignored", Help: "Number of times the SQL Stats activity update job ignored the signal sent to it indicating " + "a flush has completed", Measurement: "flush done signals ignored", Unit: metric.Unit_COUNT, MetricType: io_prometheus_client.MetricType_COUNTER, - } - MetaSQLStatsFlushesFailed = metric.Metadata{ + }) + MetaSQLStatsFlushesFailed = metric.InitMetadata(metric.Metadata{ Name: "sql.stats.flushes.failed", Help: "Number of attempted SQL Stats flushes that failed with errors", Measurement: "failed flushes", Unit: metric.Unit_COUNT, - } - MetaSQLStatsFlushLatency = metric.Metadata{ + }) + MetaSQLStatsFlushLatency = metric.InitMetadata(metric.Metadata{ Name: "sql.stats.flush.latency", Help: "The latency of SQL Stats flushes to persistent storage. Includes failed flush attempts", Measurement: "nanoseconds", Unit: metric.Unit_NANOSECONDS, - } - MetaSQLStatsRemovedRows = metric.Metadata{ + }) + MetaSQLStatsRemovedRows = metric.InitMetadata(metric.Metadata{ Name: "sql.stats.cleanup.rows_removed", Help: "Number of stale statistics rows that are removed", Measurement: "SQL Stats Cleanup", Unit: metric.Unit_COUNT, - } - MetaSQLTxnStatsCollectionOverhead = metric.Metadata{ + }) + MetaSQLTxnStatsCollectionOverhead = metric.InitMetadata(metric.Metadata{ Name: "sql.stats.txn_stats_collection.duration", Help: "Time took in nanoseconds to collect transaction stats", Measurement: "SQL Transaction Stats Collection Overhead", Unit: metric.Unit_NANOSECONDS, - } - MetaTxnRowsWrittenLog = metric.Metadata{ + }) + MetaTxnRowsWrittenLog = metric.InitMetadata(metric.Metadata{ Name: "sql.guardrails.transaction_rows_written_log.count", Help: "Number of transactions logged because of transaction_rows_written_log guardrail", Measurement: "Logged transactions", Unit: metric.Unit_COUNT, - } - MetaTxnRowsWrittenErr = metric.Metadata{ + }) + MetaTxnRowsWrittenErr = metric.InitMetadata(metric.Metadata{ Name: "sql.guardrails.transaction_rows_written_err.count", Help: "Number of transactions errored because of transaction_rows_written_err guardrail", Measurement: "Errored transactions", Unit: metric.Unit_COUNT, - } - MetaTxnRowsReadLog = metric.Metadata{ + }) + MetaTxnRowsReadLog = metric.InitMetadata(metric.Metadata{ Name: "sql.guardrails.transaction_rows_read_log.count", Help: "Number of transactions logged because of transaction_rows_read_log guardrail", Measurement: "Logged transactions", Unit: metric.Unit_COUNT, - } - MetaTxnRowsReadErr = metric.Metadata{ + }) + MetaTxnRowsReadErr = metric.InitMetadata(metric.Metadata{ Name: "sql.guardrails.transaction_rows_read_err.count", Help: "Number of transactions errored because of transaction_rows_read_err guardrail", Measurement: "Errored transactions", Unit: metric.Unit_COUNT, - } - MetaFullTableOrIndexScanRejected = metric.Metadata{ + }) + MetaFullTableOrIndexScanRejected = metric.InitMetadata(metric.Metadata{ Name: "sql.guardrails.full_scan_rejected.count", Help: "Number of full table or index scans that have been rejected because of `disallow_full_table_scans` guardrail", Measurement: "SQL Statements", Unit: metric.Unit_COUNT, - } - MetaTxnRetry = metric.Metadata{ + }) + MetaTxnRetry = metric.InitMetadata(metric.Metadata{ Name: "sql.txn.auto_retry.count", Help: "Number of SQL transaction automatic retries", Measurement: "SQL Transactions", Unit: metric.Unit_COUNT, - } - MetaStatementRetry = metric.Metadata{ + }) + MetaStatementRetry = metric.InitMetadata(metric.Metadata{ Name: "sql.statements.auto_retry.count", Help: "Number of SQL statement automatic retries", Measurement: "SQL Statements", Unit: metric.Unit_COUNT, - } - MetaStatementRowsRead = metric.Metadata{ + }) + MetaStatementRowsRead = metric.InitMetadata(metric.Metadata{ Name: "sql.statements.rows_read.count", Help: "Number of rows read by SQL statements", Measurement: "Rows", Unit: metric.Unit_COUNT, - } - MetaStatementBytesRead = metric.Metadata{ + }) + MetaStatementBytesRead = metric.InitMetadata(metric.Metadata{ Name: "sql.statements.bytes_read.count", Help: "Number of bytes read by SQL statements", Measurement: "Bytes", Unit: metric.Unit_BYTES, - } - MetaStatementIndexRowsWritten = metric.Metadata{ + }) + MetaStatementIndexRowsWritten = metric.InitMetadata(metric.Metadata{ Name: "sql.statements.index_rows_written.count", Help: "Number of primary and secondary index rows modified by SQL statements", Measurement: "Rows", Unit: metric.Unit_COUNT, - } - MetaStatementIndexBytesWritten = metric.Metadata{ + }) + MetaStatementIndexBytesWritten = metric.InitMetadata(metric.Metadata{ Name: "sql.statements.index_bytes_written.count", Help: "Number of primary and secondary index bytes modified by SQL statements", Measurement: "Bytes", Unit: metric.Unit_BYTES, - } + }) ) func getMetricMeta(meta metric.Metadata, internal bool) metric.Metadata { diff --git a/pkg/sql/execinfra/merge_metrics.go b/pkg/sql/execinfra/merge_metrics.go index 28b0ccb80943..67f7d930f045 100644 --- a/pkg/sql/execinfra/merge_metrics.go +++ b/pkg/sql/execinfra/merge_metrics.go @@ -27,40 +27,40 @@ func (BulkMergeMetrics) MetricStruct() {} var _ metric.Struct = BulkMergeMetrics{} var ( - metaDistMergeIndexCount = metric.Metadata{ + metaDistMergeIndexCount = metric.InitMetadata(metric.Metadata{ Name: "sql.dist_merge.index.count", Help: "Number of distributed merge operations for index backfill", Measurement: "Operations", Unit: metric.Unit_COUNT, LabeledName: "sql.dist_merge.count", StaticLabels: metric.MakeLabelPairs(metric.LabelType, "index"), - } - metaDistMergeImportCount = metric.Metadata{ + }) + metaDistMergeImportCount = metric.InitMetadata(metric.Metadata{ Name: "sql.dist_merge.import.count", Help: "Number of distributed merge operations for import", Measurement: "Operations", Unit: metric.Unit_COUNT, LabeledName: "sql.dist_merge.count", StaticLabels: metric.MakeLabelPairs(metric.LabelType, "import"), - } - metaDistMergeRPCMemory = metric.Metadata{ + }) + metaDistMergeRPCMemory = metric.InitMetadata(metric.Metadata{ Name: "sql.dist_merge.rpc_memory_reserved.bytes", Help: "Memory reserved for RPC transport buffers in the final merge iteration", Measurement: "Memory", Unit: metric.Unit_BYTES, - } - metaDistMergeMapPhaseSSTs = metric.Metadata{ + }) + metaDistMergeMapPhaseSSTs = metric.InitMetadata(metric.Metadata{ Name: "sql.dist_merge.map_phase.sst_count", Help: "Number of SSTs from the map phase input to the first merge iteration", Measurement: "SSTs", Unit: metric.Unit_COUNT, - } - metaDistMergeFirstIterOutputSSTs = metric.Metadata{ + }) + metaDistMergeFirstIterOutputSSTs = metric.InitMetadata(metric.Metadata{ Name: "sql.dist_merge.first_iter.output_sst_count", Help: "Number of SSTs produced by the first (local) merge iteration", Measurement: "SSTs", Unit: metric.Unit_COUNT, - } + }) ) // mergeMetricsLog10Int64Times1000 duplicates log10int64times1000 from diff --git a/pkg/sql/execinfra/metrics.go b/pkg/sql/execinfra/metrics.go index b8fba07f6f78..dd8eba2305b3 100644 --- a/pkg/sql/execinfra/metrics.go +++ b/pkg/sql/execinfra/metrics.go @@ -42,25 +42,25 @@ func (DistSQLMetrics) MetricStruct() {} var _ metric.Struct = DistSQLMetrics{} var ( - metaQueriesActive = metric.Metadata{ + metaQueriesActive = metric.InitMetadata(metric.Metadata{ Name: "sql.distsql.queries.active", Help: "Number of invocations of the execution engine currently active (multiple of which may occur for a single SQL statement)", Measurement: "DistSQL runs", Unit: metric.Unit_COUNT, - } - metaQueriesTotal = metric.Metadata{ + }) + metaQueriesTotal = metric.InitMetadata(metric.Metadata{ Name: "sql.distsql.queries.total", Help: "Number of invocations of the execution engine executed (multiple of which may occur for a single SQL statement)", Measurement: "DistSQL runs", Unit: metric.Unit_COUNT, - } - metaDistributedCount = metric.Metadata{ + }) + metaDistributedCount = metric.InitMetadata(metric.Metadata{ Name: "sql.distsql.distributed_exec.count", Help: "Number of invocations of the execution engine executed with full or partial distribution (multiple of which may occur for a single SQL statement)", Measurement: "DistSQL runs", Unit: metric.Unit_COUNT, - } - metaContendedQueriesCount = metric.Metadata{ + }) + metaContendedQueriesCount = metric.InitMetadata(metric.Metadata{ Name: "sql.distsql.contended_queries.count", Help: "Number of SQL queries that experienced contention", Measurement: "Queries", @@ -68,97 +68,97 @@ var ( Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_SQL, HowToUse: `This metric is incremented whenever there is a non-trivial amount of contention experienced by a statement whether read-write or write-write conflicts. Monitor this metric to correlate possible workload performance issues to contention conflicts.`, - } - metaCumulativeContentionNanos = metric.Metadata{ + }) + metaCumulativeContentionNanos = metric.InitMetadata(metric.Metadata{ Name: "sql.distsql.cumulative_contention_nanos", Help: "Cumulative contention across all queries (in nanoseconds)", Measurement: "Nanoseconds", Unit: metric.Unit_NANOSECONDS, - } - metaFlowsActive = metric.Metadata{ + }) + metaFlowsActive = metric.InitMetadata(metric.Metadata{ Name: "sql.distsql.flows.active", Help: "Number of distributed SQL flows currently active", Measurement: "Flows", Unit: metric.Unit_COUNT, - } - metaFlowsTotal = metric.Metadata{ + }) + metaFlowsTotal = metric.InitMetadata(metric.Metadata{ Name: "sql.distsql.flows.total", Help: "Number of distributed SQL flows executed", Measurement: "Flows", Unit: metric.Unit_COUNT, - } - metaRunnerReqParallelCount = metric.Metadata{ + }) + metaRunnerReqParallelCount = metric.InitMetadata(metric.Metadata{ Name: "sql.distsql.parallel_runner.count", Help: "Number of SetupFlowRequest RPCs executed concurrently via DistSQL runners", Measurement: "Requests", Unit: metric.Unit_COUNT, - } - metaRunnerReqSequentialCount = metric.Metadata{ + }) + metaRunnerReqSequentialCount = metric.InitMetadata(metric.Metadata{ Name: "sql.distsql.sequential_runner.count", Help: "Number of SetupFlowRequest RPCs executed sequentially via the main gateway goroutine", Measurement: "Requests", Unit: metric.Unit_COUNT, - } - metaMemMaxBytes = metric.Metadata{ + }) + metaMemMaxBytes = metric.InitMetadata(metric.Metadata{ Name: "sql.mem.distsql.max", Help: "Memory usage per sql statement for distsql", Measurement: "Memory", Unit: metric.Unit_BYTES, - } - metaMemCurBytes = metric.Metadata{ + }) + metaMemCurBytes = metric.InitMetadata(metric.Metadata{ Name: "sql.mem.distsql.current", Help: "Current sql statement memory usage for distsql", Measurement: "Memory", Unit: metric.Unit_BYTES, - } - metaVecOpenFDs = metric.Metadata{ + }) + metaVecOpenFDs = metric.InitMetadata(metric.Metadata{ Name: "sql.distsql.vec.openfds", Help: "Current number of open file descriptors used by vectorized external storage", Measurement: "Files", Unit: metric.Unit_COUNT, - } - metaDiskCurBytes = metric.Metadata{ + }) + metaDiskCurBytes = metric.InitMetadata(metric.Metadata{ Name: "sql.disk.distsql.current", Help: "Current sql statement disk usage for distsql", Measurement: "Disk", Unit: metric.Unit_BYTES, - } - metaDiskMaxBytes = metric.Metadata{ + }) + metaDiskMaxBytes = metric.InitMetadata(metric.Metadata{ Name: "sql.disk.distsql.max", Help: "Disk usage per sql statement for distsql", Measurement: "Disk", Unit: metric.Unit_BYTES, - } - metaQueriesSpilled = metric.Metadata{ + }) + metaQueriesSpilled = metric.InitMetadata(metric.Metadata{ Name: "sql.distsql.queries.spilled", Help: "Number of queries that have spilled to disk", Measurement: "Queries", Unit: metric.Unit_COUNT, - } - metaSpilledBytesWritten = metric.Metadata{ + }) + metaSpilledBytesWritten = metric.InitMetadata(metric.Metadata{ Name: "sql.disk.distsql.spilled.bytes.written", Help: "Number of bytes written to temporary disk storage as a result of spilling", Measurement: "Disk", Unit: metric.Unit_BYTES, - } - metaSpilledBytesRead = metric.Metadata{ + }) + metaSpilledBytesRead = metric.InitMetadata(metric.Metadata{ Name: "sql.disk.distsql.spilled.bytes.read", Help: "Number of bytes read from temporary disk storage as a result of spilling", Measurement: "Disk", Unit: metric.Unit_BYTES, - } - metaDistErrorLocalRetryAttempts = metric.Metadata{ + }) + metaDistErrorLocalRetryAttempts = metric.InitMetadata(metric.Metadata{ Name: "sql.distsql.dist_query_rerun_locally.count", Help: "Total number of cases when distributed query error resulted in a local rerun", Measurement: "Queries", Unit: metric.Unit_COUNT, - } - metaDistErrorLocalRetryFailures = metric.Metadata{ + }) + metaDistErrorLocalRetryFailures = metric.InitMetadata(metric.Metadata{ Name: "sql.distsql.dist_query_rerun_locally.failure_count", Help: "Total number of cases when the local rerun of a distributed query resulted in an error", Measurement: "Queries", Unit: metric.Unit_COUNT, - } + }) ) // See pkg/sql/mem_metrics.go diff --git a/pkg/sql/inspect/inspect_metrics.go b/pkg/sql/inspect/inspect_metrics.go index fe6124bfbd5b..947eeb9e306c 100644 --- a/pkg/sql/inspect/inspect_metrics.go +++ b/pkg/sql/inspect/inspect_metrics.go @@ -27,36 +27,36 @@ var _ metric.Struct = (*InspectMetrics)(nil) func (InspectMetrics) MetricStruct() {} var ( - metaInspectRuns = metric.Metadata{ + metaInspectRuns = metric.InitMetadata(metric.Metadata{ Name: "jobs.inspect.runs", Help: "Number of INSPECT jobs executed", Measurement: "Jobs", Unit: metric.Unit_COUNT, - } - metaInspectRunsWithIssues = metric.Metadata{ + }) + metaInspectRunsWithIssues = metric.InitMetadata(metric.Metadata{ Name: "jobs.inspect.runs_with_issues", Help: "Number of INSPECT jobs that found at least one issue", Measurement: "Jobs", Unit: metric.Unit_COUNT, - } - metaInspectIssuesFound = metric.Metadata{ + }) + metaInspectIssuesFound = metric.InitMetadata(metric.Metadata{ Name: "jobs.inspect.issues_found", Help: "Total count of issues found by INSPECT jobs", Measurement: "Issues", Unit: metric.Unit_COUNT, - } - metaInspectSpansProcessed = metric.Metadata{ + }) + metaInspectSpansProcessed = metric.InitMetadata(metric.Metadata{ Name: "jobs.inspect.spans_processed", Help: "Number of spans processed by INSPECT jobs", Measurement: "Spans", Unit: metric.Unit_COUNT, - } - metaInspectNumActiveSpans = metric.Metadata{ + }) + metaInspectNumActiveSpans = metric.InitMetadata(metric.Metadata{ Name: "jobs.inspect.num_active_spans", Help: "Number of spans currently being processed by INSPECT jobs", Measurement: "Spans", Unit: metric.Unit_COUNT, - } + }) ) // MakeInspectMetrics instantiates the metrics for INSPECT jobs. diff --git a/pkg/sql/mem_metrics.go b/pkg/sql/mem_metrics.go index f7157ce567c7..f05fd6b4a2ef 100644 --- a/pkg/sql/mem_metrics.go +++ b/pkg/sql/mem_metrics.go @@ -60,12 +60,12 @@ var _ metric.Struct = MemoryMetrics{} const log10int64times1000 = 19 * 1000 func makeMemMetricMetadata(name, help string) metric.Metadata { - return metric.Metadata{ + return metric.InitMetadata(metric.Metadata{ Name: name, Help: help, Measurement: "Memory", Unit: metric.Unit_BYTES, - } + }) } func makeMemMetricHistogram( diff --git a/pkg/sql/mvcc_statistics_update_job.go b/pkg/sql/mvcc_statistics_update_job.go index 1563117a5c60..b780f075262d 100644 --- a/pkg/sql/mvcc_statistics_update_job.go +++ b/pkg/sql/mvcc_statistics_update_job.go @@ -168,12 +168,12 @@ func init() { jobspb.TypeMVCCStatisticsUpdate, func(job *jobs.Job, settings *cluster.Settings) jobs.Resumer { exporter := &mvccStatisticsUpdateJob{job: job, st: settings} - exporter.dynamicMetrics.livebytes = metric.NewGauge(metric.Metadata{ + exporter.dynamicMetrics.livebytes = metric.NewGauge(metric.InitMetadata(metric.Metadata{ Name: "sql.aggregated_livebytes", Help: "Aggregated number of bytes of live data (keys plus values)", Measurement: "Storage", Unit: metric.Unit_BYTES, - }) + })) return exporter }, jobs.DisablesTenantCostControl, diff --git a/pkg/sql/pgwire/pre_serve.go b/pkg/sql/pgwire/pre_serve.go index 78117ea4ecc8..c6b5c3219d5b 100644 --- a/pkg/sql/pgwire/pre_serve.go +++ b/pkg/sql/pgwire/pre_serve.go @@ -33,42 +33,42 @@ import ( // Fully-qualified names for metrics. var ( - MetaPreServeNewConns = metric.Metadata{ + MetaPreServeNewConns = metric.InitMetadata(metric.Metadata{ Name: "sql.pre_serve.new_conns", Help: "Number of SQL connections created prior to routing the connection to the target SQL server", Measurement: "Connections", Unit: metric.Unit_COUNT, - } - MetaPreServeBytesIn = metric.Metadata{ + }) + MetaPreServeBytesIn = metric.InitMetadata(metric.Metadata{ Name: "sql.pre_serve.bytesin", Help: "Number of SQL bytes received prior to routing the connection to the target SQL server", Measurement: "SQL Bytes", Unit: metric.Unit_BYTES, - } - MetaPreServeBytesOut = metric.Metadata{ + }) + MetaPreServeBytesOut = metric.InitMetadata(metric.Metadata{ Name: "sql.pre_serve.bytesout", Help: "Number of SQL bytes sent prior to routing the connection to the target SQL server", Measurement: "SQL Bytes", Unit: metric.Unit_BYTES, - } - MetaPreServeConnFailures = metric.Metadata{ + }) + MetaPreServeConnFailures = metric.InitMetadata(metric.Metadata{ Name: "sql.pre_serve.conn.failures", Help: "Number of SQL connection failures prior to routing the connection to the target SQL server", Measurement: "Connections", Unit: metric.Unit_COUNT, - } - MetaPreServeMaxBytes = metric.Metadata{ + }) + MetaPreServeMaxBytes = metric.InitMetadata(metric.Metadata{ Name: "sql.pre_serve.mem.max", Help: "Memory usage for SQL connections prior to routing the connection to the target SQL server", Measurement: "Memory", Unit: metric.Unit_BYTES, - } - MetaPreServeCurBytes = metric.Metadata{ + }) + MetaPreServeCurBytes = metric.InitMetadata(metric.Metadata{ Name: "sql.pre_serve.mem.cur", Help: "Current memory usage for SQL connections prior to routing the connection to the target SQL server", Measurement: "Memory", Unit: metric.Unit_BYTES, - } + }) ) // PreServeConnHandler implements the early initialization of an incoming diff --git a/pkg/sql/pgwire/server.go b/pkg/sql/pgwire/server.go index 60d9d65655f8..94dcef817e17 100644 --- a/pkg/sql/pgwire/server.go +++ b/pkg/sql/pgwire/server.go @@ -119,7 +119,7 @@ const ( // Fully-qualified names for metrics. var ( - MetaConns = metric.Metadata{ + MetaConns = metric.InitMetadata(metric.Metadata{ Name: "sql.conns", Help: "Number of open SQL connections", Measurement: "Connections", @@ -127,8 +127,8 @@ var ( Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_SQL, HowToUse: `This metric shows the number of connections as well as the distribution, or balancing, of connections across cluster nodes. An imbalance can lead to nodes becoming overloaded. Review Connection Pooling.`, - } - MetaNewConns = metric.Metadata{ + }) + MetaNewConns = metric.InitMetadata(metric.Metadata{ Name: "sql.new_conns", Help: "Number of SQL connections created", Measurement: "Connections", @@ -136,26 +136,26 @@ var ( Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_SQL, HowToUse: `The rate of this metric shows how frequently new connections are being established. This can be useful in determining if a high rate of incoming new connections is causing additional load on the server due to a misconfigured application.`, - } - MetaConnsWaitingToHash = metric.Metadata{ + }) + MetaConnsWaitingToHash = metric.InitMetadata(metric.Metadata{ Name: "sql.conns_waiting_to_hash", Help: "Number of SQL connection attempts that are being throttled in order to limit password hashing concurrency", Measurement: "Connections", Unit: metric.Unit_COUNT, - } - MetaBytesIn = metric.Metadata{ + }) + MetaBytesIn = metric.InitMetadata(metric.Metadata{ Name: "sql.bytesin", Help: "Number of SQL bytes received", Measurement: "SQL Bytes", Unit: metric.Unit_BYTES, - } - MetaBytesOut = metric.Metadata{ + }) + MetaBytesOut = metric.InitMetadata(metric.Metadata{ Name: "sql.bytesout", Help: "Number of SQL bytes sent", Measurement: "SQL Bytes", Unit: metric.Unit_BYTES, - } - MetaConnLatency = metric.Metadata{ + }) + MetaConnLatency = metric.InitMetadata(metric.Metadata{ Name: "sql.conn.latency", Help: "Latency to establish and authenticate a SQL connection", Measurement: "Nanoseconds", @@ -163,8 +163,8 @@ var ( Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_SQL, HowToUse: "These metrics characterize the database connection latency which can affect the application performance, for example, by having slow startup times. Connection failures are not recorded in these metrics.", - } - MetaConnFailures = metric.Metadata{ + }) + MetaConnFailures = metric.InitMetadata(metric.Metadata{ Name: "sql.conn.failures", Help: "Number of SQL connection failures", Measurement: "Connections", @@ -172,33 +172,33 @@ var ( Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_SQL, HowToUse: "This metric is incremented whenever a connection attempt fails for any reason, including timeouts.", - } - MetaPGWireCancelTotal = metric.Metadata{ + }) + MetaPGWireCancelTotal = metric.InitMetadata(metric.Metadata{ Name: "sql.pgwire_cancel.total", Help: "Number of pgwire query cancel requests", Measurement: "Requests", Unit: metric.Unit_COUNT, - } - MetaPGWireCancelIgnored = metric.Metadata{ + }) + MetaPGWireCancelIgnored = metric.InitMetadata(metric.Metadata{ Name: "sql.pgwire_cancel.ignored", Help: "Number of pgwire query cancel requests that were ignored due to rate limiting", Measurement: "Requests", Unit: metric.Unit_COUNT, - } - MetaPGWireCancelSuccessful = metric.Metadata{ + }) + MetaPGWireCancelSuccessful = metric.InitMetadata(metric.Metadata{ Name: "sql.pgwire_cancel.successful", Help: "Number of pgwire query cancel requests that were successful", Measurement: "Requests", Unit: metric.Unit_COUNT, - } - MetaPGWirePipelineCount = metric.Metadata{ + }) + MetaPGWirePipelineCount = metric.InitMetadata(metric.Metadata{ Name: "sql.pgwire.pipeline.count", Help: "Number of pgwire commands received by the server that have not yet begun processing", Measurement: "Commands", Unit: metric.Unit_COUNT, MetricType: io_prometheus_client.MetricType_GAUGE, - } - AuthJWTConnLatency = metric.Metadata{ + }) + AuthJWTConnLatency = metric.InitMetadata(metric.Metadata{ Name: "auth.jwt.conn.latency", Help: "Latency to establish and authenticate a SQL connection using JWT Token", Measurement: "Nanoseconds", @@ -206,8 +206,8 @@ var ( Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_SQL, HowToUse: `See Description.`, - } - AuthCertConnLatency = metric.Metadata{ + }) + AuthCertConnLatency = metric.InitMetadata(metric.Metadata{ Name: "auth.cert.conn.latency", Help: "Latency to establish and authenticate a SQL connection using certificate", Measurement: "Nanoseconds", @@ -215,8 +215,8 @@ var ( Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_SQL, HowToUse: `See Description.`, - } - AuthPassConnLatency = metric.Metadata{ + }) + AuthPassConnLatency = metric.InitMetadata(metric.Metadata{ Name: "auth.password.conn.latency", Help: "Latency to establish and authenticate a SQL connection using password", Measurement: "Nanoseconds", @@ -224,8 +224,8 @@ var ( Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_SQL, HowToUse: `See Description.`, - } - AuthLDAPConnLatency = metric.Metadata{ + }) + AuthLDAPConnLatency = metric.InitMetadata(metric.Metadata{ Name: "auth.ldap.conn.latency", Help: "Latency to establish and authenticate a SQL connection using LDAP", Measurement: "Nanoseconds", @@ -233,8 +233,8 @@ var ( Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_SQL, HowToUse: `See Description.`, - } - AuthGSSConnLatency = metric.Metadata{ + }) + AuthGSSConnLatency = metric.InitMetadata(metric.Metadata{ Name: "auth.gss.conn.latency", Help: "Latency to establish and authenticate a SQL connection using GSS", Measurement: "Nanoseconds", @@ -242,8 +242,8 @@ var ( Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_SQL, HowToUse: `See Description.`, - } - AuthScramConnLatency = metric.Metadata{ + }) + AuthScramConnLatency = metric.InitMetadata(metric.Metadata{ Name: "auth.scram.conn.latency", Help: "Latency to establish and authenticate a SQL connection using SCRAM", Measurement: "Nanoseconds", @@ -251,8 +251,8 @@ var ( Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_SQL, HowToUse: `See Description.`, - } - AuthLDAPConnLatencyInternal = metric.Metadata{ + }) + AuthLDAPConnLatencyInternal = metric.InitMetadata(metric.Metadata{ Name: "auth.ldap.conn.latency.internal", Help: "Internal Auth Latency to establish and authenticate a SQL connection using LDAP(excludes external LDAP calls)", Measurement: "Nanoseconds", @@ -260,8 +260,8 @@ var ( Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_SQL, HowToUse: `See Description.`, - } - AuthCertSANConnTotal = metric.Metadata{ + }) + AuthCertSANConnTotal = metric.InitMetadata(metric.Metadata{ Name: "auth.cert.san.conn.total", Help: "Total number of SQL connection attempts using SAN-based certificate authentication", Measurement: "Connections", @@ -269,8 +269,8 @@ var ( Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_SQL, HowToUse: "This metric tracks all authentication attempts when SAN-based certificate validation is enabled. Compare with auth.cert.san.conn.success to calculate failure rate.", - } - AuthCertSANConnSuccess = metric.Metadata{ + }) + AuthCertSANConnSuccess = metric.InitMetadata(metric.Metadata{ Name: "auth.cert.san.conn.success", Help: "Number of successful SQL connections using SAN-based certificate authentication", Measurement: "Connections", @@ -278,7 +278,7 @@ var ( Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_SQL, HowToUse: "This metric tracks successful authentications when SAN-based certificate validation is enabled. Use this to monitor adoption and success rate of SAN authentication. Failure rate = auth.cert.san.conn.total - auth.cert.san.conn.success.", - } + }) ) const ( diff --git a/pkg/sql/rename_table.go b/pkg/sql/rename_table.go index ac0b349f84d7..f16ab791ba4c 100644 --- a/pkg/sql/rename_table.go +++ b/pkg/sql/rename_table.go @@ -230,7 +230,7 @@ func (n *renameTableNode) startExec(params runParams) error { return err } - metadataUpdater := descmetadata.NewMetadataUpdater( + metadataUpdater := descmetadata.InitMetadataUpdater( ctx, p.InternalSQLTxn(), p.Descriptors(), diff --git a/pkg/sql/rowinfra/metrics.go b/pkg/sql/rowinfra/metrics.go index 7311725382ec..718aa67d5a27 100644 --- a/pkg/sql/rowinfra/metrics.go +++ b/pkg/sql/rowinfra/metrics.go @@ -10,20 +10,20 @@ import "github.com/cockroachdb/cockroach/pkg/util/metric" var ( // MetaMaxRowSizeLog is metadata for the // sql.guardrails.max_row_size_log.count{.internal} metrics. - MetaMaxRowSizeLog = metric.Metadata{ + MetaMaxRowSizeLog = metric.InitMetadata(metric.Metadata{ Name: "sql.guardrails.max_row_size_log.count", Help: "Number of rows observed violating sql.guardrails.max_row_size_log", Measurement: "Rows", Unit: metric.Unit_COUNT, - } + }) // MetaMaxRowSizeErr is metadata for the // sql.guardrails.max_row_size_err.count{.internal} metrics. - MetaMaxRowSizeErr = metric.Metadata{ + MetaMaxRowSizeErr = metric.InitMetadata(metric.Metadata{ Name: "sql.guardrails.max_row_size_err.count", Help: "Number of rows observed violating sql.guardrails.max_row_size_err", Measurement: "Rows", Unit: metric.Unit_COUNT, - } + }) ) // Metrics holds metrics measuring calls into the KV layer by various parts of diff --git a/pkg/sql/schema_change_plan_node.go b/pkg/sql/schema_change_plan_node.go index 7a4d7948599b..c83e8c6edc12 100644 --- a/pkg/sql/schema_change_plan_node.go +++ b/pkg/sql/schema_change_plan_node.go @@ -290,7 +290,7 @@ func newSchemaChangerTxnRunDependencies( schemaChangerJobID jobspb.JobID, stmts []string, ) scexec.Dependencies { - metaDataUpdater := descmetadata.NewMetadataUpdater( + metaDataUpdater := descmetadata.InitMetadataUpdater( ctx, txn, descriptors, diff --git a/pkg/sql/schema_changer_metrics.go b/pkg/sql/schema_changer_metrics.go index 0e214562a2d8..36d67825c7bc 100644 --- a/pkg/sql/schema_changer_metrics.go +++ b/pkg/sql/schema_changer_metrics.go @@ -18,12 +18,12 @@ import ( // TODO(ajwerner): Add many more metrics. var ( - metaObjects = metric.Metadata{ + metaObjects = metric.InitMetadata(metric.Metadata{ Name: "sql.schema_changer.object_count", Help: "Counter of the number of objects in the cluster", Measurement: "Objects", Unit: metric.Unit_COUNT, - } + }) ) // SchemaChangerMetrics are metrics corresponding to the schema changer. diff --git a/pkg/sql/schemachanger/scjob/job.go b/pkg/sql/schemachanger/scjob/job.go index 1c13e70e6fe5..4c86ff89872a 100644 --- a/pkg/sql/schemachanger/scjob/job.go +++ b/pkg/sql/schemachanger/scjob/job.go @@ -134,7 +134,7 @@ func (n *newSchemaChangeResumer) run(ctx context.Context, execCtxI interface{}) execCfg.Settings, execCfg.Validator, func(ctx context.Context, descriptors *descs.Collection, txn isql.Txn) scexec.DescriptorMetadataUpdater { - return descmetadata.NewMetadataUpdater(ctx, + return descmetadata.InitMetadataUpdater(ctx, txn, descriptors, &execCfg.Settings.SV, diff --git a/pkg/sql/sql_activity_update_job.go b/pkg/sql/sql_activity_update_job.go index a49999cb42e7..e9e5ef6a97c5 100644 --- a/pkg/sql/sql_activity_update_job.go +++ b/pkg/sql/sql_activity_update_job.go @@ -139,28 +139,28 @@ func (m ActivityUpdaterMetrics) MetricStruct() {} func newActivityUpdaterMetrics() metric.Struct { return ActivityUpdaterMetrics{ - NumFailedUpdates: metric.NewCounter(metric.Metadata{ + NumFailedUpdates: metric.NewCounter(metric.InitMetadata(metric.Metadata{ Name: "sql.stats.activity.updates.failed", Help: "Number of update attempts made by the SQL activity updater job that failed with errors", Measurement: "failed updates", Unit: metric.Unit_COUNT, MetricType: io_prometheus_client.MetricType_COUNTER, - }), - NumSuccessfulUpdates: metric.NewCounter(metric.Metadata{ + })), + NumSuccessfulUpdates: metric.NewCounter(metric.InitMetadata(metric.Metadata{ Name: "sql.stats.activity.updates.successful", Help: "Number of successful updates made by the SQL activity updater job", Measurement: "successful updates", Unit: metric.Unit_COUNT, MetricType: io_prometheus_client.MetricType_COUNTER, - }), + })), UpdateLatency: metric.NewHistogram(metric.HistogramOptions{ - Metadata: metric.Metadata{ + Metadata: metric.InitMetadata(metric.Metadata{ Name: "sql.stats.activity.update.latency", Help: "The latency of updates made by the SQL activity updater job. Includes failed update attempts", Measurement: "Nanoseconds", Unit: metric.Unit_NANOSECONDS, MetricType: io_prometheus_client.MetricType_HISTOGRAM, - }, + }), Duration: base.DefaultHistogramWindowInterval(), BucketConfig: metric.LongRunning60mLatencyBuckets, Mode: metric.HistogramModePrometheus, diff --git a/pkg/sql/sqlliveness/slstorage/metrics.go b/pkg/sql/sqlliveness/slstorage/metrics.go index d1c5c6bb055e..5b3c9c6b0839 100644 --- a/pkg/sql/sqlliveness/slstorage/metrics.go +++ b/pkg/sql/sqlliveness/slstorage/metrics.go @@ -11,48 +11,48 @@ import ( ) var ( - metaIsAliveCacheHits = metric.Metadata{ + metaIsAliveCacheHits = metric.InitMetadata(metric.Metadata{ Name: "sqlliveness.is_alive.cache_hits", Help: "Number of calls to IsAlive that return from the cache", Measurement: "Calls", Unit: metric.Unit_COUNT, MetricType: io_prometheus_client.MetricType_COUNTER, - } - metaIsAliveCacheMisses = metric.Metadata{ + }) + metaIsAliveCacheMisses = metric.InitMetadata(metric.Metadata{ Name: "sqlliveness.is_alive.cache_misses", Help: "Number of calls to IsAlive that do not return from the cache", Measurement: "Calls", Unit: metric.Unit_COUNT, MetricType: io_prometheus_client.MetricType_COUNTER, - } - metaSessionsDeleted = metric.Metadata{ + }) + metaSessionsDeleted = metric.InitMetadata(metric.Metadata{ Name: "sqlliveness.sessions_deleted", Help: "Number of expired sessions which have been deleted", Measurement: "Sessions", Unit: metric.Unit_COUNT, MetricType: io_prometheus_client.MetricType_COUNTER, - } - metaSessionDeletionRuns = metric.Metadata{ + }) + metaSessionDeletionRuns = metric.InitMetadata(metric.Metadata{ Name: "sqlliveness.sessions_deletion_runs", Help: "Number of calls to delete sessions which have been performed", Measurement: "Sessions", Unit: metric.Unit_COUNT, MetricType: io_prometheus_client.MetricType_COUNTER, - } - metaWriteSuccesses = metric.Metadata{ + }) + metaWriteSuccesses = metric.InitMetadata(metric.Metadata{ Name: "sqlliveness.write_successes", Help: "Number of update or insert calls successfully performed", Measurement: "Writes", Unit: metric.Unit_COUNT, MetricType: io_prometheus_client.MetricType_COUNTER, - } - metaWriteFailures = metric.Metadata{ + }) + metaWriteFailures = metric.InitMetadata(metric.Metadata{ Name: "sqlliveness.write_failures", Help: "Number of update or insert calls which have failed", Measurement: "Writes", Unit: metric.Unit_COUNT, MetricType: io_prometheus_client.MetricType_COUNTER, - } + }) ) // Metrics is a metric.Struct which holds metrics for slstorage. diff --git a/pkg/sql/sqlstats/aggregate.go b/pkg/sql/sqlstats/aggregate.go index 224b707dbe51..af1ee8f43559 100644 --- a/pkg/sql/sqlstats/aggregate.go +++ b/pkg/sql/sqlstats/aggregate.go @@ -72,11 +72,11 @@ func NewStmtStatsAggregator( func() *StmtStatistics { return &StmtStatistics{ ServiceLatency: metric.NewHistogram(metric.HistogramOptions{ - Metadata: metric.Metadata{ + Metadata: metric.InitMetadata(metric.Metadata{ Name: "stmt.svc.latency", Measurement: "Aggregate service latency of statement executions", Unit: metric.Unit_NANOSECONDS, - }, + }), Duration: base.DefaultHistogramWindowInterval(), BucketConfig: metric.IOLatencyBuckets, }), diff --git a/pkg/sql/sqlstats/insights/insights.go b/pkg/sql/sqlstats/insights/insights.go index 562122efa6b0..8ecbe9a00126 100644 --- a/pkg/sql/sqlstats/insights/insights.go +++ b/pkg/sql/sqlstats/insights/insights.go @@ -101,27 +101,27 @@ var _ metric.Struct = Metrics{} // NewMetrics builds a new instance of our Metrics struct. func NewMetrics() Metrics { return Metrics{ - Fingerprints: metric.NewGauge(metric.Metadata{ + Fingerprints: metric.NewGauge(metric.InitMetadata(metric.Metadata{ Name: "sql.insights.anomaly_detection.fingerprints", Help: "Current number of statement fingerprints being monitored for anomaly detection", Measurement: "Fingerprints", Unit: metric.Unit_COUNT, MetricType: prometheus.MetricType_GAUGE, - }), - Memory: metric.NewGauge(metric.Metadata{ + })), + Memory: metric.NewGauge(metric.InitMetadata(metric.Metadata{ Name: "sql.insights.anomaly_detection.memory", Help: "Current memory used to support anomaly detection", Measurement: "Memory", Unit: metric.Unit_BYTES, MetricType: prometheus.MetricType_GAUGE, - }), - Evictions: metric.NewCounter(metric.Metadata{ + })), + Evictions: metric.NewCounter(metric.InitMetadata(metric.Metadata{ Name: "sql.insights.anomaly_detection.evictions", Help: "Evictions of fingerprint latency summaries due to memory pressure", Measurement: "Evictions", Unit: metric.Unit_COUNT, MetricType: prometheus.MetricType_COUNTER, - }), + })), } } diff --git a/pkg/sql/sqlstats/persistedsqlstats/compaction_test.go b/pkg/sql/sqlstats/persistedsqlstats/compaction_test.go index 33a44eeb7138..cfd976d302d5 100644 --- a/pkg/sql/sqlstats/persistedsqlstats/compaction_test.go +++ b/pkg/sql/sqlstats/persistedsqlstats/compaction_test.go @@ -56,7 +56,7 @@ func TestSQLStatsCompactorNilTestingKnobCheck(t *testing.T) { statsCompactor := persistedsqlstats.NewStatsCompactor( server.ClusterSettings(), server.InternalDB().(isql.DB), - metric.NewCounter(metric.Metadata{}), + metric.NewCounter(metric.InitMetadata(metric.Metadata{})), nil, /* knobs */ ) @@ -211,7 +211,7 @@ func TestSQLStatsCompactor(t *testing.T) { statsCompactor := persistedsqlstats.NewStatsCompactor( server.ClusterSettings(), server.InternalDB().(isql.DB), - metric.NewCounter(metric.Metadata{}), + metric.NewCounter(metric.InitMetadata(metric.Metadata{})), sqlStatsKnobs, ) diff --git a/pkg/sql/sqlstats/sslocal/sql_stats_ingestor.go b/pkg/sql/sqlstats/sslocal/sql_stats_ingestor.go index e690646f3672..a2acbb33bc63 100644 --- a/pkg/sql/sqlstats/sslocal/sql_stats_ingestor.go +++ b/pkg/sql/sqlstats/sslocal/sql_stats_ingestor.go @@ -46,20 +46,20 @@ var _ metric.Struct = Metrics{} // NewIngesterMetrics builds a new instance of our Metrics struct. func NewIngesterMetrics() Metrics { return Metrics{ - NumProcessed: metric.NewCounter(metric.Metadata{ + NumProcessed: metric.NewCounter(metric.InitMetadata(metric.Metadata{ Name: "sql.stats.ingester.num_processed", Help: "Number of items processed by the SQL stats ingester", Measurement: "Items", Unit: metric.Unit_COUNT, MetricType: prometheus.MetricType_COUNTER, - }), - QueueSize: metric.NewGauge(metric.Metadata{ + })), + QueueSize: metric.NewGauge(metric.InitMetadata(metric.Metadata{ Name: "sql.stats.ingester.queue_size", Help: "Current number of items queued in the SQL stats ingester", Measurement: "Items", Unit: metric.Unit_COUNT, MetricType: prometheus.MetricType_GAUGE, - }), + })), } } diff --git a/pkg/sql/tablemetadatacache/update_table_metadata_cache_job.go b/pkg/sql/tablemetadatacache/update_table_metadata_cache_job.go index 95efd502aab0..ff003a86ad3b 100644 --- a/pkg/sql/tablemetadatacache/update_table_metadata_cache_job.go +++ b/pkg/sql/tablemetadatacache/update_table_metadata_cache_job.go @@ -208,33 +208,33 @@ func (TableMetadataUpdateJobMetrics) MetricStruct() {} func newTableMetadataUpdateJobMetrics() metric.Struct { return TableMetadataUpdateJobMetrics{ - NumRuns: metric.NewCounter(metric.Metadata{ + NumRuns: metric.NewCounter(metric.InitMetadata(metric.Metadata{ Name: "obs.tablemetadata.update_job.runs", Help: "The total number of runs of the update table metadata job.", Measurement: "Executions", Unit: metric.Unit_COUNT, MetricType: io_prometheus_client.MetricType_COUNTER, - }), - UpdatedTables: metric.NewCounter(metric.Metadata{ + })), + UpdatedTables: metric.NewCounter(metric.InitMetadata(metric.Metadata{ Name: "obs.tablemetadata.update_job.table_updates", Help: "The total number of rows that have been updated in system.table_metadata", Measurement: "Rows Updated", Unit: metric.Unit_COUNT, MetricType: io_prometheus_client.MetricType_COUNTER, - }), - Errors: metric.NewCounter(metric.Metadata{ + })), + Errors: metric.NewCounter(metric.InitMetadata(metric.Metadata{ Name: "obs.tablemetadata.update_job.errors", Help: "The total number of errors that have been emitted from the update table metadata job.", Measurement: "Errors", Unit: metric.Unit_COUNT, MetricType: io_prometheus_client.MetricType_COUNTER, - }), + })), Duration: metric.NewHistogram(metric.HistogramOptions{ - Metadata: metric.Metadata{ + Metadata: metric.InitMetadata(metric.Metadata{ Name: "obs.tablemetadata.update_job.duration", Help: "Time spent running the update table metadata job.", Measurement: "Duration", - Unit: metric.Unit_NANOSECONDS}, + Unit: metric.Unit_NANOSECONDS}), Duration: base.DefaultHistogramWindowInterval(), BucketConfig: metric.LongRunning60mLatencyBuckets, Mode: metric.HistogramModePrometheus, diff --git a/pkg/sql/temporary_schema.go b/pkg/sql/temporary_schema.go index 240c6601a818..1f5b26e1c049 100644 --- a/pkg/sql/temporary_schema.go +++ b/pkg/sql/temporary_schema.go @@ -74,34 +74,34 @@ var TempObjectCleanupBatchSize = settings.RegisterIntSetting( settings.WithPublic) var ( - temporaryObjectCleanerActiveCleanersMetric = metric.Metadata{ + temporaryObjectCleanerActiveCleanersMetric = metric.InitMetadata(metric.Metadata{ Name: "sql.temp_object_cleaner.active_cleaners", Help: "number of cleaner tasks currently running on this node", Measurement: "Count", Unit: metric.Unit_COUNT, MetricType: io_prometheus_client.MetricType_GAUGE, - } - temporaryObjectCleanerSchemasToDeleteMetric = metric.Metadata{ + }) + temporaryObjectCleanerSchemasToDeleteMetric = metric.InitMetadata(metric.Metadata{ Name: "sql.temp_object_cleaner.schemas_to_delete", Help: "number of schemas to be deleted by the temp object cleaner on this node", Measurement: "Count", Unit: metric.Unit_COUNT, MetricType: io_prometheus_client.MetricType_COUNTER, - } - temporaryObjectCleanerSchemasDeletionErrorMetric = metric.Metadata{ + }) + temporaryObjectCleanerSchemasDeletionErrorMetric = metric.InitMetadata(metric.Metadata{ Name: "sql.temp_object_cleaner.schemas_deletion_error", Help: "number of errored schema deletions by the temp object cleaner on this node", Measurement: "Count", Unit: metric.Unit_COUNT, MetricType: io_prometheus_client.MetricType_COUNTER, - } - temporaryObjectCleanerSchemasDeletionSuccessMetric = metric.Metadata{ + }) + temporaryObjectCleanerSchemasDeletionSuccessMetric = metric.InitMetadata(metric.Metadata{ Name: "sql.temp_object_cleaner.schemas_deletion_success", Help: "number of successful schema deletions by the temp object cleaner on this node", Measurement: "Count", Unit: metric.Unit_COUNT, MetricType: io_prometheus_client.MetricType_COUNTER, - } + }) ) func (p *planner) InsertTemporarySchema( diff --git a/pkg/sql/ttl/ttljob/ttljob_metrics.go b/pkg/sql/ttl/ttljob/ttljob_metrics.go index e92a221976d7..3fb02d6fc814 100644 --- a/pkg/sql/ttl/ttljob/ttljob_metrics.go +++ b/pkg/sql/ttl/ttljob/ttljob_metrics.go @@ -97,7 +97,7 @@ func makeRowLevelTTLAggMetrics(histogramWindowInterval time.Duration) metric.Str b := aggmetric.MakeBuilder("relation") ret := &RowLevelTTLAggMetrics{ SpanTotalDuration: b.Histogram(metric.HistogramOptions{ - Metadata: metric.Metadata{ + Metadata: metric.InitMetadata(metric.Metadata{ Name: "jobs.row_level_ttl.span_total_duration", Help: "Duration for processing a span during row level TTL.", Measurement: "nanoseconds", @@ -106,14 +106,14 @@ func makeRowLevelTTLAggMetrics(histogramWindowInterval time.Duration) metric.Str Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_TTL, HowToUse: `See Description.`, - }, + }), MaxVal: time.Hour.Nanoseconds(), SigFigs: sigFigs, Duration: histogramWindowInterval, BucketConfig: metric.LongRunning60mLatencyBuckets, }), SelectDuration: b.Histogram(metric.HistogramOptions{ - Metadata: metric.Metadata{ + Metadata: metric.InitMetadata(metric.Metadata{ Name: "jobs.row_level_ttl.select_duration", Help: "Duration for select requests during row level TTL.", Measurement: "nanoseconds", @@ -122,14 +122,14 @@ func makeRowLevelTTLAggMetrics(histogramWindowInterval time.Duration) metric.Str Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_TTL, HowToUse: `See Description.`, - }, + }), MaxVal: time.Minute.Nanoseconds(), SigFigs: sigFigs, Duration: histogramWindowInterval, BucketConfig: metric.BatchProcessLatencyBuckets, }), DeleteDuration: b.Histogram(metric.HistogramOptions{ - Metadata: metric.Metadata{ + Metadata: metric.InitMetadata(metric.Metadata{ Name: "jobs.row_level_ttl.delete_duration", Help: "Duration for delete requests during row level TTL.", Measurement: "nanoseconds", @@ -138,14 +138,14 @@ func makeRowLevelTTLAggMetrics(histogramWindowInterval time.Duration) metric.Str Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_TTL, HowToUse: `See Description.`, - }, + }), MaxVal: time.Minute.Nanoseconds(), SigFigs: sigFigs, Duration: histogramWindowInterval, BucketConfig: metric.BatchProcessLatencyBuckets, }), RowSelections: b.Counter( - metric.Metadata{ + metric.InitMetadata(metric.Metadata{ Name: "jobs.row_level_ttl.rows_selected", Help: "Number of rows selected for deletion by the row level TTL job.", Measurement: "num_rows", @@ -154,10 +154,10 @@ func makeRowLevelTTLAggMetrics(histogramWindowInterval time.Duration) metric.Str Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_TTL, HowToUse: `Correlate this metric with the metric jobs.row_level_ttl.rows_deleted to ensure all the rows that should be deleted are actually getting deleted.`, - }, + }), ), RowDeletions: b.Counter( - metric.Metadata{ + metric.InitMetadata(metric.Metadata{ Name: "jobs.row_level_ttl.rows_deleted", Help: "Number of rows deleted by the row level TTL job.", Measurement: "num_rows", @@ -166,18 +166,18 @@ func makeRowLevelTTLAggMetrics(histogramWindowInterval time.Duration) metric.Str Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_TTL, HowToUse: `Correlate this metric with the metric jobs.row_level_ttl.rows_selected to ensure all the rows that should be deleted are actually getting deleted.`, - }, + }), ), NumDeleteBatchRetries: b.Counter( - metric.Metadata{ + metric.InitMetadata(metric.Metadata{ Name: "jobs.row_level_ttl.num_delete_batch_retries", Help: "Number of times the row level TTL job had to reduce the delete batch size and retry.", Measurement: "num_retries", Unit: metric.Unit_COUNT, - }, + }), ), NumActiveSpans: b.Gauge( - metric.Metadata{ + metric.InitMetadata(metric.Metadata{ Name: "jobs.row_level_ttl.num_active_spans", Help: "Number of active spans the TTL job is deleting from.", Measurement: "num_active_spans", @@ -185,10 +185,10 @@ func makeRowLevelTTLAggMetrics(histogramWindowInterval time.Duration) metric.Str Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_TTL, HowToUse: `See Description.`, - }, + }), ), TotalRows: b.Gauge( - metric.Metadata{ + metric.InitMetadata(metric.Metadata{ Name: "jobs.row_level_ttl.total_rows", Help: "Approximate number of rows on the TTL table.", Measurement: "total_rows", @@ -196,10 +196,10 @@ func makeRowLevelTTLAggMetrics(histogramWindowInterval time.Duration) metric.Str Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_TTL, HowToUse: `See Description.`, - }, + }), ), TotalExpiredRows: b.Gauge( - metric.Metadata{ + metric.InitMetadata(metric.Metadata{ Name: "jobs.row_level_ttl.total_expired_rows", Help: "Approximate number of rows that have expired the TTL on the TTL table.", Measurement: "total_expired_rows", @@ -207,7 +207,7 @@ func makeRowLevelTTLAggMetrics(histogramWindowInterval time.Duration) metric.Str Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_TTL, HowToUse: `See Description.`, - }, + }), ), } ret.defaultRowLevelMetrics = ret.metricsWithChildren("default") diff --git a/pkg/sql/vecindex/metrics.go b/pkg/sql/vecindex/metrics.go index fa24e96a2c5f..660913d30434 100644 --- a/pkg/sql/vecindex/metrics.go +++ b/pkg/sql/vecindex/metrics.go @@ -8,19 +8,19 @@ package vecindex import "github.com/cockroachdb/cockroach/pkg/util/metric" var ( - metaSuccessfulSplits = metric.Metadata{ + metaSuccessfulSplits = metric.InitMetadata(metric.Metadata{ Name: "sql.vecindex.successful_splits", Help: "Total number of vector index partitions split without error", Measurement: "Splits", Unit: metric.Unit_COUNT, - } + }) - metaPendingSplitsMerges = metric.Metadata{ + metaPendingSplitsMerges = metric.InitMetadata(metric.Metadata{ Name: "sql.vecindex.pending_splits_merges", Help: "Total number of vector index splits and merges waiting to be processed", Measurement: "Pending Splits/Merges", Unit: metric.Unit_COUNT, - } + }) ) // Metrics contain useful metrics for building and mantaining vector indexes. diff --git a/pkg/testutils/lint/passes/metricmetadatainit/BUILD.bazel b/pkg/testutils/lint/passes/metricmetadatainit/BUILD.bazel new file mode 100644 index 000000000000..3857f52fbbeb --- /dev/null +++ b/pkg/testutils/lint/passes/metricmetadatainit/BUILD.bazel @@ -0,0 +1,14 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "metricmetadatainit", + srcs = ["metricmetadatainit.go"], + importpath = "github.com/cockroachdb/cockroach/pkg/testutils/lint/passes/metricmetadatainit", + visibility = ["//visibility:public"], + deps = [ + "//pkg/testutils/lint/passes/passesutil", + "@org_golang_x_tools//go/analysis", + "@org_golang_x_tools//go/analysis/passes/inspect", + "@org_golang_x_tools//go/ast/inspector", + ], +) diff --git a/pkg/testutils/lint/passes/metricmetadatainit/metricmetadatainit.go b/pkg/testutils/lint/passes/metricmetadatainit/metricmetadatainit.go new file mode 100644 index 000000000000..c8c2144983d7 --- /dev/null +++ b/pkg/testutils/lint/passes/metricmetadatainit/metricmetadatainit.go @@ -0,0 +1,111 @@ +// Copyright 2026 The Cockroach Authors. +// +// Use of this software is governed by the CockroachDB Software License +// included in the /LICENSE file. + +// Package metricmetadatainit provides an Analyzer that flags direct +// construction of metric.Metadata composite literals that are not wrapped +// in a metric.InitMetadata() or metric.NewMetadata() call. The +// constructors record the caller's source file, which is needed to +// resolve metric ownership via CODEOWNERS at generation time. +package metricmetadatainit + +import ( + "go/ast" + "go/types" + + "github.com/cockroachdb/cockroach/pkg/testutils/lint/passes/passesutil" + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +const metricPkgPath = "github.com/cockroachdb/cockroach/pkg/util/metric" + +// Analyzer flags metric.Metadata{...} composite literals that are not +// wrapped in a metric.InitMetadata() call. Note that metric.NewMetadata() +// does not use composite literals, so it is not affected by this check. +var Analyzer = &analysis.Analyzer{ + Name: "metricmetadatainit", + Doc: "checks that metric.Metadata is constructed via metric.InitMetadata() or metric.NewMetadata()", + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Run: run, +} + +func run(pass *analysis.Pass) (interface{}, error) { + insp := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + // Build a set of CompositeLit nodes that are arguments to + // InitMetadata calls, so we can skip them. + wrappedLits := make(map[ast.Node]bool) + insp.Preorder([]ast.Node{(*ast.CallExpr)(nil)}, func(n ast.Node) { + call := n.(*ast.CallExpr) + if !isInitMetadataCall(call, pass) { + return + } + for _, arg := range call.Args { + wrappedLits[arg] = true + } + }) + + insp.Preorder([]ast.Node{ + (*ast.CompositeLit)(nil), + }, func(n ast.Node) { + lit := n.(*ast.CompositeLit) + if lit.Type == nil { + return + } + if !isMetadataType(pass.TypesInfo.TypeOf(lit)) { + return + } + // Allow if this literal is an argument to InitMetadata(). + if wrappedLits[lit] { + return + } + if passesutil.HasNolintComment(pass, n, "metricmetadatainit") { + return + } + pass.Report(analysis.Diagnostic{ + Pos: n.Pos(), + Message: "direct metric.Metadata{} literal must be wrapped " + + "in metric.InitMetadata() or use metric.NewMetadata() to " + + "record the source file for CODEOWNERS resolution", + }) + }) + return nil, nil +} + +// isInitMetadataCall returns true if call is to metric.InitMetadata. +func isInitMetadataCall(call *ast.CallExpr, pass *analysis.Pass) bool { + sel, ok := call.Fun.(*ast.SelectorExpr) + if !ok || sel.Sel.Name != "InitMetadata" { + return false + } + obj, ok := pass.TypesInfo.Uses[sel.Sel] + if !ok { + return false + } + fn, ok := obj.(*types.Func) + if !ok { + return false + } + return fn.Pkg() != nil && fn.Pkg().Path() == metricPkgPath +} + +// isMetadataType returns true if t is metric.Metadata (or *metric.Metadata). +func isMetadataType(t types.Type) bool { + if t == nil { + return false + } + if ptr, ok := t.(*types.Pointer); ok { + t = ptr.Elem() + } + named, ok := t.(*types.Named) + if !ok { + return false + } + obj := named.Obj() + return obj.Pkg() != nil && + obj.Pkg().Path() == metricPkgPath && + obj.Name() == "Metadata" +} diff --git a/pkg/ts/metrics.go b/pkg/ts/metrics.go index 198f2c793dbf..0396e7481492 100644 --- a/pkg/ts/metrics.go +++ b/pkg/ts/metrics.go @@ -9,24 +9,24 @@ import "github.com/cockroachdb/cockroach/pkg/util/metric" var ( // Storage metrics. - metaWriteSamples = metric.Metadata{ + metaWriteSamples = metric.InitMetadata(metric.Metadata{ Name: "timeseries.write.samples", Help: "Total number of metric samples written to disk", Measurement: "Metric Samples", Unit: metric.Unit_COUNT, - } - metaWriteBytes = metric.Metadata{ + }) + metaWriteBytes = metric.InitMetadata(metric.Metadata{ Name: "timeseries.write.bytes", Help: "Total size in bytes of metric samples written to disk", Measurement: "Storage", Unit: metric.Unit_BYTES, - } - metaWriteErrors = metric.Metadata{ + }) + metaWriteErrors = metric.InitMetadata(metric.Metadata{ Name: "timeseries.write.errors", Help: "Total errors encountered while attempting to write metrics to disk", Measurement: "Errors", Unit: metric.Unit_COUNT, - } + }) ) // TimeSeriesMetrics contains metrics relevant to the time series system. diff --git a/pkg/util/admission/cpu_time_token_granter_test.go b/pkg/util/admission/cpu_time_token_granter_test.go index 4171772abdbd..3c96bf11c76c 100644 --- a/pkg/util/admission/cpu_time_token_granter_test.go +++ b/pkg/util/admission/cpu_time_token_granter_test.go @@ -218,7 +218,7 @@ func TestExhaustedDuration(t *testing.T) { defer log.Scope(t).Close(t) t0 := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC) - c := metric.NewCounter(metric.Metadata{Name: "test_exhausted_nanos"}) + c := metric.NewCounter(metric.InitMetadata(metric.Metadata{Name: "test_exhausted_nanos"})) tb := tokenBucket{ tokens: 0, exhaustedStart: t0, diff --git a/pkg/util/admission/cpu_time_token_metrics.go b/pkg/util/admission/cpu_time_token_metrics.go index a6740d5d6d90..c67c339242f5 100644 --- a/pkg/util/admission/cpu_time_token_metrics.go +++ b/pkg/util/admission/cpu_time_token_metrics.go @@ -14,7 +14,7 @@ import ( ) var ( - cpuTimeTokenMultiplierMeta = metric.Metadata{ + cpuTimeTokenMultiplierMeta = metric.InitMetadata(metric.Metadata{ Name: "admission.cpu_time_tokens.multiplier", Help: crstrings.UnwrapText(` The token-to-CPU-time multiplier used by the CPU time token @@ -22,18 +22,18 @@ var ( total CPU time to tracked CPU time`), Measurement: "Multiplier", Unit: metric.Unit_COUNT, - } + }) - cpuTimeTokensConsumedMeta = metric.Metadata{ + cpuTimeTokensConsumedMeta = metric.InitMetadata(metric.Metadata{ Name: "admission.cpu_time_tokens.usage.consumed", Help: crstrings.UnwrapText(` Cumulative number of CPU time tokens consumed (deducted from buckets) by admitted work`), Measurement: "Tokens", Unit: metric.Unit_COUNT, - } + }) - cpuTimeTokensReturnedMeta = metric.Metadata{ + cpuTimeTokensReturnedMeta = metric.InitMetadata(metric.Metadata{ Name: "admission.cpu_time_tokens.usage.returned", Help: crstrings.UnwrapText(` Cumulative number of CPU time tokens returned (credited back to @@ -41,13 +41,13 @@ var ( initial estimate`), Measurement: "Tokens", Unit: metric.Unit_COUNT, - } + }) // NB: The per-tenant metric metadata templates below are used to create // one AggCounter per resource tier (system_tenant / app_tenant). The tier // suffix is appended in makeCPUTimeTokenMetrics. See the comment on // cpuTimeTokenMetrics.AdmittedCountPerTenant for the rationale. - cpuTimeTokenAdmittedCountPerTenantMetaBase = metric.Metadata{ + cpuTimeTokenAdmittedCountPerTenantMetaBase = metric.InitMetadata(metric.Metadata{ Name: "admission.cpu_time_tokens.per_tenant.admitted_count.%s", Help: crstrings.UnwrapText(` Cumulative number of requests admitted per tenant by CPU time @@ -55,9 +55,9 @@ var ( mean wait time via rate(wait_time) / rate(admitted_count)`), Measurement: "Requests", Unit: metric.Unit_COUNT, - } + }) - cpuTimeTokenWaitTimeNanosPerTenantMetaBase = metric.Metadata{ + cpuTimeTokenWaitTimeNanosPerTenantMetaBase = metric.InitMetadata(metric.Metadata{ Name: "admission.cpu_time_tokens.per_tenant.wait_time_nanos.%s", Help: crstrings.UnwrapText(` Cumulative nanoseconds of admission queue wait time per tenant @@ -65,18 +65,18 @@ var ( compute mean wait time via rate(wait_time) / rate(admitted_count)`), Measurement: "Nanoseconds", Unit: metric.Unit_NANOSECONDS, - } + }) - cpuTimeTokensUsedPerTenantMetaBase = metric.Metadata{ + cpuTimeTokensUsedPerTenantMetaBase = metric.InitMetadata(metric.Metadata{ Name: "admission.cpu_time_tokens.per_tenant.tokens_used.%s", Help: crstrings.UnwrapText(` Cumulative CPU time tokens consumed per tenant by admitted work; rate() gives the per-tenant token consumption rate`), Measurement: "Tokens", Unit: metric.Unit_COUNT, - } + }) - cpuTimeTokensReturnedPerTenantMetaBase = metric.Metadata{ + cpuTimeTokensReturnedPerTenantMetaBase = metric.InitMetadata(metric.Metadata{ Name: "admission.cpu_time_tokens.per_tenant.tokens_returned.%s", Help: crstrings.UnwrapText(` Cumulative CPU time tokens returned per tenant, for example @@ -84,7 +84,7 @@ var ( rate() gives the per-tenant token return rate`), Measurement: "Tokens", Unit: metric.Unit_COUNT, - } + }) ) // cpuTimeTokenMetrics tracks metrics for the CPU time token admission @@ -177,7 +177,7 @@ func makeCPUTimeTokenMetrics() *cpuTimeTokenMetrics { for tier := resourceTier(0); tier < numResourceTiers; tier++ { for qual := burstQualification(0); qual < numBurstQualifications; qual++ { idx := perBucketIdx(tier, qual) - m.ExhaustedDurationNanos[idx] = metric.NewCounter(metric.Metadata{ + m.ExhaustedDurationNanos[idx] = metric.NewCounter(metric.InitMetadata(metric.Metadata{ Name: fmt.Sprintf( "admission.cpu_time_tokens.exhausted_duration_nanos.%s.%s", tier, qual), @@ -188,8 +188,8 @@ func makeCPUTimeTokenMetrics() *cpuTimeTokenMetrics { tier, qual), Measurement: "Nanoseconds", Unit: metric.Unit_NANOSECONDS, - }) - m.RefillAdded[idx] = metric.NewCounter(metric.Metadata{ + })) + m.RefillAdded[idx] = metric.NewCounter(metric.InitMetadata(metric.Metadata{ Name: fmt.Sprintf( "admission.cpu_time_tokens.refill.added.%s.%s", tier, qual), @@ -199,8 +199,8 @@ func makeCPUTimeTokenMetrics() *cpuTimeTokenMetrics { tier, qual), Measurement: "Tokens", Unit: metric.Unit_COUNT, - }) - m.RefillRemoved[idx] = metric.NewCounter(metric.Metadata{ + })) + m.RefillRemoved[idx] = metric.NewCounter(metric.InitMetadata(metric.Metadata{ Name: fmt.Sprintf( "admission.cpu_time_tokens.refill.removed.%s.%s", tier, qual), @@ -210,7 +210,7 @@ func makeCPUTimeTokenMetrics() *cpuTimeTokenMetrics { tier, qual), Measurement: "Tokens", Unit: metric.Unit_COUNT, - }) + })) } } return m diff --git a/pkg/util/admission/elastic_cpu_granter.go b/pkg/util/admission/elastic_cpu_granter.go index 2887c04936fd..9e699864fe78 100644 --- a/pkg/util/admission/elastic_cpu_granter.go +++ b/pkg/util/admission/elastic_cpu_granter.go @@ -283,37 +283,37 @@ func (e *elasticCPUGranter) RecordYieldDelay(d time.Duration) { // Can this value be auto-estimated? var ( // granter-side metrics (some of these have parallels on the requester side, but are still useful to have) - elasticCPUAcquiredNanos = metric.Metadata{ + elasticCPUAcquiredNanos = metric.InitMetadata(metric.Metadata{ Name: "admission.elastic_cpu.acquired_nanos", Help: "Total CPU nanoseconds acquired by elastic work", Measurement: "Nanoseconds", Unit: metric.Unit_NANOSECONDS, - } + }) - elasticCPUReturnedNanos = metric.Metadata{ + elasticCPUReturnedNanos = metric.InitMetadata(metric.Metadata{ Name: "admission.elastic_cpu.returned_nanos", Help: "Total CPU nanoseconds returned by elastic work", Measurement: "Nanoseconds", Unit: metric.Unit_NANOSECONDS, - } + }) - elasticCPUPreWorkNanos = metric.Metadata{ + elasticCPUPreWorkNanos = metric.InitMetadata(metric.Metadata{ Name: "admission.elastic_cpu.pre_work_nanos", Help: "Total CPU nanoseconds spent doing pre-work, before doing elastic work", Measurement: "Nanoseconds", Unit: metric.Unit_NANOSECONDS, - } + }) // elasticCPUMaxAvailableNanos is a static metric, useful for computing the // % utilization: (acquired - returned)/max available. - elasticCPUMaxAvailableNanos = metric.Metadata{ + elasticCPUMaxAvailableNanos = metric.InitMetadata(metric.Metadata{ Name: "admission.elastic_cpu.max_available_nanos", Help: "Maximum available CPU nanoseconds per second ignoring utilization limit", Measurement: "Nanoseconds", Unit: metric.Unit_NANOSECONDS, - } + }) - elasticCPUNanosExhaustedDuration = metric.Metadata{ + elasticCPUNanosExhaustedDuration = metric.InitMetadata(metric.Metadata{ Name: "admission.elastic_cpu.nanos_exhausted_duration", Help: "Total duration when elastic CPU tokens (tokens measured in nanoseconds) " + "were exhausted, as observed by the token granter (not waiters). This is reported in " + @@ -323,49 +323,49 @@ var ( // granter-side metrics (some of these have parallels on the requester sid Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_OVERLOAD, HowToUse: "This metric indicates when elastic CPU tokens are exhausted. Extended periods of elastic CPU token exhaustion may indicate high CPU utilization affecting elastic workloads.", - } + }) - elasticCPUOverLimitDurations = metric.Metadata{ + elasticCPUOverLimitDurations = metric.InitMetadata(metric.Metadata{ Name: "admission.elastic_cpu.over_limit_durations", Help: "Measurement of how much over the prescribed limit elastic requests ran (not recorded if requests don't run over)", Measurement: "Nanoseconds", Unit: metric.Unit_NANOSECONDS, - } + }) - elasticCPUAvailableNanos = metric.Metadata{ + elasticCPUAvailableNanos = metric.InitMetadata(metric.Metadata{ Name: "admission.elastic_cpu.available_nanos", Help: "Instantaneous available CPU nanoseconds per second ignoring utilization limit", Measurement: "Nanoseconds", Unit: metric.Unit_NANOSECONDS, - } + }) - elasticCPUGranterUtilization = metric.Metadata{ + elasticCPUGranterUtilization = metric.InitMetadata(metric.Metadata{ Name: "admission.elastic_cpu.utilization", Help: "CPU utilization by elastic work", Measurement: "CPU Time", Unit: metric.Unit_PERCENT, - } + }) - elasticCPUGranterBypassedUtilization = metric.Metadata{ + elasticCPUGranterBypassedUtilization = metric.InitMetadata(metric.Metadata{ Name: "admission.elastic_cpu_bypassed.utilization", Help: "CPU utilization by elastic work that bypassed admission", Measurement: "CPU Time", Unit: metric.Unit_PERCENT, - } + }) - elasticCPUGranterUtilizationLimit = metric.Metadata{ + elasticCPUGranterUtilizationLimit = metric.InitMetadata(metric.Metadata{ Name: "admission.elastic_cpu.utilization_limit", Help: "Utilization limit set for the elastic CPU work", Measurement: "CPU Time", Unit: metric.Unit_PERCENT, - } + }) - elasticCPUYieldDelayNanos = metric.Metadata{ + elasticCPUYieldDelayNanos = metric.InitMetadata(metric.Metadata{ Name: "admission.elastic_cpu.yield_delay_nanos", Help: "Total nanoseconds goroutines were delayed by runtime.Yield", Measurement: "Nanoseconds", Unit: metric.Unit_NANOSECONDS, - } + }) ) // elasticCPUGranterMetrics are the metrics associated with an instance of the diff --git a/pkg/util/admission/granter.go b/pkg/util/admission/granter.go index bb5d9a43ab0c..6d6eb39c1a06 100644 --- a/pkg/util/admission/granter.go +++ b/pkg/util/admission/granter.go @@ -1030,22 +1030,22 @@ type DiskStats struct { } var ( - totalSlots = metric.Metadata{ + totalSlots = metric.InitMetadata(metric.Metadata{ Name: "admission.granter.total_slots.kv", Help: "Total slots for kv work", Measurement: "Slots", Unit: metric.Unit_COUNT, - } - usedSlots = metric.Metadata{ + }) + usedSlots = metric.InitMetadata(metric.Metadata{ // Note: we append a WorkKind string to this name. Name: "admission.granter.used_slots.", Help: "Used slots", Measurement: "Slots", Unit: metric.Unit_COUNT, - } + }) // NB: this metric is independent of whether slots enforcement is happening // or not. - kvSlotsExhaustedDuration = metric.Metadata{ + kvSlotsExhaustedDuration = metric.InitMetadata(metric.Metadata{ Name: "admission.granter.slots_exhausted_duration.kv", Help: "Total duration when KV slots were exhausted, as observed by the slot " + "granter (not waiters). This is reported in nanoseconds from 26.1 onwards, " + @@ -1055,38 +1055,38 @@ var ( Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_OVERLOAD, HowToUse: "This metric indicates when KV slots are exhausted. Extended periods of slot exhaustion may indicate insufficient slot allocation or high request concurrency requiring attention.", - } + }) // We have a metric for both short and long period. These metrics use the // period provided in CPULoad and not wall time. So if the sum of the rate // of these two is < 1sec/sec, the CPULoad ticks are not happening at the // expected frequency (this could happen due to CPU overload). - kvCPULoadShortPeriodDuration = metric.Metadata{ + kvCPULoadShortPeriodDuration = metric.InitMetadata(metric.Metadata{ Name: "admission.granter.cpu_load_short_period_duration.kv", Help: "Total duration when CPULoad was being called with a short period. This is " + "reported in nanoseconds from 26.1 onwards, and was microseconds before that.", Measurement: "Nanoseconds", Unit: metric.Unit_NANOSECONDS, - } - kvCPULoadLongPeriodDuration = metric.Metadata{ + }) + kvCPULoadLongPeriodDuration = metric.InitMetadata(metric.Metadata{ Name: "admission.granter.cpu_load_long_period_duration.kv", Help: "Total duration when CPULoad was being called with a long period. This is " + "reported in nanoseconds from 26.1 onwards, and was microseconds before that.", Measurement: "Nanoseconds", Unit: metric.Unit_NANOSECONDS, - } - kvSlotAdjusterIncrements = metric.Metadata{ + }) + kvSlotAdjusterIncrements = metric.InitMetadata(metric.Metadata{ Name: "admission.granter.slot_adjuster_increments.kv", Help: "Number of increments of the total KV slots", Measurement: "Slots", Unit: metric.Unit_COUNT, - } - kvSlotAdjusterDecrements = metric.Metadata{ + }) + kvSlotAdjusterDecrements = metric.InitMetadata(metric.Metadata{ Name: "admission.granter.slot_adjuster_decrements.kv", Help: "Number of decrements of the total KV slots", Measurement: "Slots", Unit: metric.Unit_COUNT, - } - kvIOTokensExhaustedDuration = metric.Metadata{ + }) + kvIOTokensExhaustedDuration = metric.InitMetadata(metric.Metadata{ Name: "admission.granter.io_tokens_exhausted_duration.kv", Help: "Total duration when IO tokens were exhausted, as observed by the token granter " + "(not waiters). This is reported in nanoseconds from 26.1 onwards, and was " + @@ -1096,8 +1096,8 @@ var ( Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_OVERLOAD, HowToUse: "This metric indicates when I/O tokens are exhausted. Extended periods of token exhaustion may indicate I/O bandwidth saturation or high disk utilization requiring attention.", - } - kvElasticIOTokensExhaustedDuration = metric.Metadata{ + }) + kvElasticIOTokensExhaustedDuration = metric.InitMetadata(metric.Metadata{ Name: "admission.granter.elastic_io_tokens_exhausted_duration.kv", Help: "Total duration when Elastic IO tokens were exhausted, as observed by the token " + "granter (not waiters). This is reported in nanoseconds from 26.1 onwards, and was " + @@ -1107,56 +1107,56 @@ var ( Visibility: metric.Metadata_ESSENTIAL, Category: metric.Metadata_OVERLOAD, HowToUse: "This metric indicates when elastic I/O tokens are exhausted. Extended periods of elastic token exhaustion may indicate I/O bandwidth saturation affecting elastic workloads.", - } - kvIOTokensTaken = metric.Metadata{ + }) + kvIOTokensTaken = metric.InitMetadata(metric.Metadata{ Name: "admission.granter.io_tokens_taken.kv", Help: "Total number of tokens taken", Measurement: "Tokens", Unit: metric.Unit_COUNT, - } - kvIOTokensReturned = metric.Metadata{ + }) + kvIOTokensReturned = metric.InitMetadata(metric.Metadata{ Name: "admission.granter.io_tokens_returned.kv", Help: "Total number of tokens returned", Measurement: "Tokens", Unit: metric.Unit_COUNT, - } - kvIOTokensBypassed = metric.Metadata{ + }) + kvIOTokensBypassed = metric.InitMetadata(metric.Metadata{ Name: "admission.granter.io_tokens_bypassed.kv", Help: "Total number of tokens taken by work bypassing admission control (for example, follower writes without flow control)", Measurement: "Tokens", Unit: metric.Unit_COUNT, - } - kvIOTokensAvailable = metric.Metadata{ + }) + kvIOTokensAvailable = metric.InitMetadata(metric.Metadata{ Name: "admission.granter.io_tokens_available.kv", Help: "Number of tokens available", Measurement: "Tokens", Unit: metric.Unit_COUNT, - } - kvElasticIOTokensAvailable = metric.Metadata{ + }) + kvElasticIOTokensAvailable = metric.InitMetadata(metric.Metadata{ Name: "admission.granter.elastic_io_tokens_available.kv", Help: "Number of tokens available", Measurement: "Tokens", Unit: metric.Unit_COUNT, - } - kvDiskWriteByteTokensExhaustedDuration = metric.Metadata{ + }) + kvDiskWriteByteTokensExhaustedDuration = metric.InitMetadata(metric.Metadata{ Name: "admission.granter.disk_write_byte_tokens_exhausted_duration.kv", Help: "Total duration (in nanos) when disk write byte tokens were exhausted, as observed by " + "the token granter (not waiters)", Measurement: "Nanoseconds", Unit: metric.Unit_NANOSECONDS, - } - l0CompactedBytes = metric.Metadata{ + }) + l0CompactedBytes = metric.InitMetadata(metric.Metadata{ Name: "admission.l0_compacted_bytes.kv", Help: "Total bytes compacted out of L0 (used to generate IO tokens)", Measurement: "Tokens", Unit: metric.Unit_COUNT, - } - l0TokensProduced = metric.Metadata{ + }) + l0TokensProduced = metric.InitMetadata(metric.Metadata{ Name: "admission.l0_tokens_produced.kv", Help: "Total bytes produced for L0 writes", Measurement: "Tokens", Unit: metric.Unit_COUNT, - } + }) ) // TODO(irfansharif): we are lacking metrics for IO tokens and load, including diff --git a/pkg/util/admission/replicated_write_admission_test.go b/pkg/util/admission/replicated_write_admission_test.go index 322523880a25..085505ae114e 100644 --- a/pkg/util/admission/replicated_write_admission_test.go +++ b/pkg/util/admission/replicated_write_admission_test.go @@ -118,7 +118,7 @@ func TestReplicatedWriteAdmission(t *testing.T) { tg[admissionpb.RegularWorkClass], tg[admissionpb.ElasticWorkClass], }, - st, workQueueMetrics, opts, knobs, &noopOnLogEntryAdmitted{}, metric.NewCounter(metric.Metadata{}), &mockCoordMu, + st, workQueueMetrics, opts, knobs, &noopOnLogEntryAdmitted{}, metric.NewCounter(metric.InitMetadata(metric.Metadata{})), &mockCoordMu, ).(*StoreWorkQueue) tg[admissionpb.RegularWorkClass].r = storeWorkQueue.getRequesters()[admissionpb.RegularWorkClass] tg[admissionpb.ElasticWorkClass].r = storeWorkQueue.getRequesters()[admissionpb.ElasticWorkClass] diff --git a/pkg/util/admission/scheduler_latency_listener.go b/pkg/util/admission/scheduler_latency_listener.go index a393012912b3..e2f37e0e070b 100644 --- a/pkg/util/admission/scheduler_latency_listener.go +++ b/pkg/util/admission/scheduler_latency_listener.go @@ -258,12 +258,12 @@ var ( // cluster settings to control how elastic CPU % is adjusted ) var ( - p99SchedulerLatency = metric.Metadata{ + p99SchedulerLatency = metric.InitMetadata(metric.Metadata{ Name: "admission.scheduler_latency_listener.p99_nanos", Help: "The scheduling latency at p99 as observed by the scheduler latency listener", Measurement: "Nanoseconds", Unit: metric.Unit_NANOSECONDS, - } + }) ) // schedulerLatencyListenerMetrics are the metrics associated with an instance diff --git a/pkg/util/admission/snapshot_queue.go b/pkg/util/admission/snapshot_queue.go index 7bf3959d4443..a2f1c94bbdaa 100644 --- a/pkg/util/admission/snapshot_queue.go +++ b/pkg/util/admission/snapshot_queue.go @@ -73,12 +73,12 @@ var DiskBandwidthForSnapshotIngestMinRateEnabled = settings.RegisterBoolSetting( settings.WithPublic, ) -var snapshotWaitDur = metric.Metadata{ +var snapshotWaitDur = metric.InitMetadata(metric.Metadata{ Name: "admission.wait_durations.snapshot_ingest", Help: "Wait time for snapshot ingest requests that waited", Measurement: "Wait time Duration", Unit: metric.Unit_NANOSECONDS, -} +}) type SnapshotMetrics struct { WaitDurations metric.IHistogram @@ -93,12 +93,12 @@ func makeSnapshotQueueMetrics(registry *metric.Registry) *SnapshotMetrics { Duration: base.DefaultHistogramWindowInterval(), BucketConfig: metric.IOLatencyBuckets, }), - AdmittedSnapshotBytes: *metric.NewCounter(metric.Metadata{ + AdmittedSnapshotBytes: *metric.NewCounter(metric.InitMetadata(metric.Metadata{ Name: "admission.admitted_snapshot_bytes", Help: "Number of bytes admitted for snapshot ingests when provisioned bandwidth AC is enabled", Measurement: "Bytes", Unit: metric.Unit_BYTES, - }), + })), } registry.AddMetricStruct(m) return m diff --git a/pkg/util/admission/work_queue.go b/pkg/util/admission/work_queue.go index d8cb107b0843..8892f1d4329a 100644 --- a/pkg/util/admission/work_queue.go +++ b/pkg/util/admission/work_queue.go @@ -2135,54 +2135,54 @@ func (oeh *openEpochsHeap) Pop() interface{} { } var ( - requestedMeta = metric.Metadata{ + requestedMeta = metric.InitMetadata(metric.Metadata{ Name: "admission.requested.", Help: "Number of requests", Measurement: "Requests", Unit: metric.Unit_COUNT, - } - admittedMeta = metric.Metadata{ + }) + admittedMeta = metric.InitMetadata(metric.Metadata{ Name: "admission.admitted.", Help: "Number of requests admitted", Measurement: "Requests", Unit: metric.Unit_COUNT, - } - erroredMeta = metric.Metadata{ + }) + erroredMeta = metric.InitMetadata(metric.Metadata{ Name: "admission.errored.", Help: "Number of requests not admitted due to error", Measurement: "Requests", Unit: metric.Unit_COUNT, - } - waitDurationsMeta = metric.Metadata{ + }) + waitDurationsMeta = metric.InitMetadata(metric.Metadata{ Name: "admission.wait_durations.", Help: "Wait time durations for requests that waited", Measurement: "Wait time Duration", Unit: metric.Unit_NANOSECONDS, Category: metric.Metadata_OVERLOAD, HowToUse: "This is a latency histogram of wait time in the admission control queue. Non-zero wait times are expected when the corresponding resource is saturated.", - } - kvWaitDurationsMeta = metric.Metadata{ + }) + kvWaitDurationsMeta = metric.InitMetadata(metric.Metadata{ Name: "admission.wait_durations.", Help: "Wait time durations for requests that waited", Measurement: "Wait time Duration", Unit: metric.Unit_NANOSECONDS, Category: metric.Metadata_OVERLOAD, HowToUse: "This is a latency histogram of wait time in the CPU utilization-based admission control queue. Non-zero wait times are expected when CPU is saturated.", - } - kvStoresWaitDurationsMeta = metric.Metadata{ + }) + kvStoresWaitDurationsMeta = metric.InitMetadata(metric.Metadata{ Name: "admission.wait_durations.", Help: "Wait time durations for requests that waited", Measurement: "Wait time Duration", Unit: metric.Unit_NANOSECONDS, Category: metric.Metadata_OVERLOAD, HowToUse: "This is a latency histogram of wait time in the I/O utilization-based admission control queue. Non-zero wait times are expected when I/O is saturated.", - } - waitQueueLengthMeta = metric.Metadata{ + }) + waitQueueLengthMeta = metric.InitMetadata(metric.Metadata{ Name: "admission.wait_queue_length.", Help: "Length of wait queue", Measurement: "Requests", Unit: metric.Unit_COUNT, - } + }) ) func addName(name string, meta metric.Metadata) metric.Metadata { diff --git a/pkg/util/admission/work_queue_test.go b/pkg/util/admission/work_queue_test.go index ec7ea6b740b8..128b45e08fcb 100644 --- a/pkg/util/admission/work_queue_test.go +++ b/pkg/util/admission/work_queue_test.go @@ -1089,7 +1089,7 @@ func TestStoreWorkQueueBasic(t *testing.T) { tg[admissionpb.RegularWorkClass], tg[admissionpb.ElasticWorkClass], }, - st, workQueueMetrics, opts, nil /* testing knobs */, &noopOnLogEntryAdmitted{}, metric.NewCounter(metric.Metadata{}), &mockCoordMu).(*StoreWorkQueue) + st, workQueueMetrics, opts, nil /* testing knobs */, &noopOnLogEntryAdmitted{}, metric.NewCounter(metric.InitMetadata(metric.Metadata{})), &mockCoordMu).(*StoreWorkQueue) tg[admissionpb.RegularWorkClass].r = q.getRequesters()[admissionpb.RegularWorkClass] tg[admissionpb.ElasticWorkClass].r = q.getRequesters()[admissionpb.ElasticWorkClass] wrkMap.resetMap() diff --git a/pkg/util/cidr/cidr_test.go b/pkg/util/cidr/cidr_test.go index 65a2043aef7c..bd68b75d3034 100644 --- a/pkg/util/cidr/cidr_test.go +++ b/pkg/util/cidr/cidr_test.go @@ -171,18 +171,18 @@ func TestRefresh(t *testing.T) { }) } -var writeBytes = metric.Metadata{ +var writeBytes = metric.InitMetadata(metric.Metadata{ Name: "write_bytes", Help: "Number of bytes written", Measurement: "Bytes", Unit: metric.Unit_BYTES, -} -var readBytes = metric.Metadata{ +}) +var readBytes = metric.InitMetadata(metric.Metadata{ Name: "read_bytes", Help: "Number of bytes read", Measurement: "Bytes", Unit: metric.Unit_BYTES, -} +}) // TestWrapHTTP validates the metrics for a HTTP connections. func TestWrapHTTP(t *testing.T) { diff --git a/pkg/util/log/logmetrics/metrics.go b/pkg/util/log/logmetrics/metrics.go index 9a1c13ffb305..af4a8b65096c 100644 --- a/pkg/util/log/logmetrics/metrics.go +++ b/pkg/util/log/logmetrics/metrics.go @@ -15,69 +15,69 @@ import ( var ( // logMetricsReg is a singleton instance of the logMetricsRegistry. logMetricsReg = newLogMetricsRegistry() - fluentSinkConnAttempts = metric.Metadata{ + fluentSinkConnAttempts = metric.InitMetadata(metric.Metadata{ Name: "log.fluent.sink.conn.attempts", Help: "Number of connection attempts experienced by fluent-server logging sinks", Measurement: "Attempts", Unit: metric.Unit_COUNT, MetricType: io_prometheus_client.MetricType_COUNTER, - } - fluentSinkConnErrors = metric.Metadata{ + }) + fluentSinkConnErrors = metric.InitMetadata(metric.Metadata{ Name: "log.fluent.sink.conn.errors", Help: "Number of connection errors experienced by fluent-server logging sinks", Measurement: "Errors", Unit: metric.Unit_COUNT, MetricType: io_prometheus_client.MetricType_COUNTER, - } - fluentSinkWriteAttempts = metric.Metadata{ + }) + fluentSinkWriteAttempts = metric.InitMetadata(metric.Metadata{ Name: "log.fluent.sink.write.attempts", Help: "Number of write attempts experienced by fluent-server logging sinks", Measurement: "Attempts", Unit: metric.Unit_COUNT, MetricType: io_prometheus_client.MetricType_COUNTER, - } - fluentSinkWriteErrors = metric.Metadata{ + }) + fluentSinkWriteErrors = metric.InitMetadata(metric.Metadata{ Name: "log.fluent.sink.write.errors", Help: "Number of write errors experienced by fluent-server logging sinks", Measurement: "Errors", Unit: metric.Unit_COUNT, MetricType: io_prometheus_client.MetricType_COUNTER, - } - otlpSinkWriteAttempts = metric.Metadata{ + }) + otlpSinkWriteAttempts = metric.InitMetadata(metric.Metadata{ Name: "log.otlp.sink.write.attempts", Help: "Number of write attempts experienced by otlp-server logging sinks", Measurement: "Attempts", Unit: metric.Unit_COUNT, MetricType: io_prometheus_client.MetricType_COUNTER, - } - otlpSinkWriteErrors = metric.Metadata{ + }) + otlpSinkWriteErrors = metric.InitMetadata(metric.Metadata{ Name: "log.otlp.sink.write.errors", Help: "Number of write errors experienced by otlp-server logging sinks", Measurement: "Errors", Unit: metric.Unit_COUNT, MetricType: io_prometheus_client.MetricType_COUNTER, - } - otlpSinkGRPCTransparentRetries = metric.Metadata{ + }) + otlpSinkGRPCTransparentRetries = metric.InitMetadata(metric.Metadata{ Name: "log.otlp.sink.grpc.transparent_retries", Help: "Number of transparent retries done by otlp-server logging sinks when using GRPC", Measurement: "Retries", Unit: metric.Unit_COUNT, MetricType: io_prometheus_client.MetricType_COUNTER, - } - bufferedSinkMessagesDropped = metric.Metadata{ + }) + bufferedSinkMessagesDropped = metric.InitMetadata(metric.Metadata{ Name: "log.buffered.messages.dropped", Help: "Count of log messages that are dropped by buffered log sinks. When CRDB attempts to buffer a log message in a buffered log sink whose buffer is already full, it drops the oldest buffered messages to make space for the new message", Measurement: "Messages", Unit: metric.Unit_COUNT, MetricType: io_prometheus_client.MetricType_COUNTER, - } - logMessageCount = metric.Metadata{ + }) + logMessageCount = metric.InitMetadata(metric.Metadata{ Name: "log.messages.count", Help: "Count of messages logged on the node since startup. Note that this does not measure the fan-out of single log messages to the various configured logging sinks.", Measurement: "Messages", Unit: metric.Unit_COUNT, MetricType: io_prometheus_client.MetricType_COUNTER, - } + }) ) // Inject our singleton logMetricsRegistry into the logging diff --git a/pkg/util/metric/BUILD.bazel b/pkg/util/metric/BUILD.bazel index baaea3a4e872..9bfe96b6235e 100644 --- a/pkg/util/metric/BUILD.bazel +++ b/pkg/util/metric/BUILD.bazel @@ -11,6 +11,7 @@ go_library( "histogram_buckets.go", "histogram_snapshot.go", "label_slice_cache.go", + "metadata.go", "metric.go", "prometheus_exporter.go", "prometheus_rule_exporter.go", @@ -53,6 +54,7 @@ go_test( srcs = [ "histogram_buckets_test.go", "label_slice_cache_test.go", + "metadata_test.go", "metric_ext_test.go", "metric_test.go", "prometheus_exporter_test.go", diff --git a/pkg/util/metric/metadata.go b/pkg/util/metric/metadata.go new file mode 100644 index 000000000000..9db56a709ec9 --- /dev/null +++ b/pkg/util/metric/metadata.go @@ -0,0 +1,73 @@ +// Copyright 2026 The Cockroach Authors. +// +// Use of this software is governed by the CockroachDB Software License +// included in the /LICENSE file. + +package metric + +import ( + "runtime" + "strings" +) + +const modulePrefix = "github.com/cockroachdb/cockroach/" + +// NewMetadata creates a Metadata with the four fields that almost every +// metric needs, and automatically sets SourceFile to the caller's +// source file. Use this for the common case where no labels, visibility, +// or other optional fields are required. +// +// Usage: +// +// var myMeta = metric.NewMetadata( +// "sql.conns", "Number of connections.", +// "Connections", metric.Unit_COUNT, +// ) +func NewMetadata(name, help, measurement string, unit Unit) Metadata { + return Metadata{ + Name: name, + Help: help, + Measurement: measurement, + Unit: unit, + SourceFile: callerSourceFile(1), + } +} + +// InitMetadata returns a copy of m with SourceFile automatically set to +// the caller's source file. Use this when you need to set optional +// fields beyond Name, Help, Measurement, and Unit (e.g. Labels, +// Visibility, Category, HowToUse). For the common case, prefer +// NewMetadata. +// +// Usage: +// +// var myMeta = metric.InitMetadata(metric.Metadata{ +// Name: "sql.conns", +// Help: "Number of connections.", +// Measurement: "Connections", +// Unit: metric.Unit_COUNT, +// Visibility: metric.Metadata_ESSENTIAL, +// }) +func InitMetadata(m Metadata) Metadata { + m.SourceFile = callerSourceFile(1) + return m +} + +// callerSourceFile returns the repo-relative source file of the caller +// at the given depth (0 = callerSourceFile's caller). Go records +// module-relative paths in frame.File for code within a module, so +// the result is stable across local builds, Bazel sandboxes, and CI. +// +// For example, a call from pkg/kv/kvserver/metrics.go returns +// "pkg/kv/kvserver/metrics.go". +func callerSourceFile(skip int) string { + var pcs [1]uintptr + if runtime.Callers(skip+2, pcs[:]) == 0 { + return "" + } + frame, _ := runtime.CallersFrames(pcs[:]).Next() + if frame.File == "" { + return "" + } + return strings.TrimPrefix(frame.File, modulePrefix) +} diff --git a/pkg/util/metric/metadata_test.go b/pkg/util/metric/metadata_test.go new file mode 100644 index 000000000000..2ee2cd0c2038 --- /dev/null +++ b/pkg/util/metric/metadata_test.go @@ -0,0 +1,68 @@ +// Copyright 2026 The Cockroach Authors. +// +// Use of this software is governed by the CockroachDB Software License +// included in the /LICENSE file. + +package metric + +import ( + "strings" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestCallerSourceFile(t *testing.T) { + // callerSourceFile(0) should return this test file's repo-relative path. + got := callerSourceFile(0) + require.True(t, strings.HasSuffix(got, "pkg/util/metric/metadata_test.go"), + "expected path ending in pkg/util/metric/metadata_test.go, got %q", got) +} + +func TestCallerSourceFileFromMethod(t *testing.T) { + // Verify that callerSourceFile works correctly when called from a + // method receiver, which produces a fully-qualified function name + // like "pkg.(*Type).Method" containing multiple dots. + h := &metadataTestHelper{} + got := h.sourceFile() + require.True(t, strings.HasSuffix(got, "pkg/util/metric/metadata_test.go"), + "expected path ending in pkg/util/metric/metadata_test.go, got %q", got) +} + +type metadataTestHelper struct{} + +func (h *metadataTestHelper) sourceFile() string { + return callerSourceFile(0) +} + +func TestCallerSourceFileFromClosure(t *testing.T) { + // Closures produce function names like "pkg.TestFoo.func1". + var got string + func() { + got = callerSourceFile(0) + }() + require.True(t, strings.HasSuffix(got, "pkg/util/metric/metadata_test.go"), + "expected path ending in pkg/util/metric/metadata_test.go, got %q", got) +} + +func TestNewMetadata(t *testing.T) { + m := NewMetadata("test.metric", "A test metric.", "Tests", Unit_COUNT) + require.Equal(t, "test.metric", m.Name) + require.Equal(t, "A test metric.", m.Help) + require.Equal(t, "Tests", m.Measurement) + require.Equal(t, Unit_COUNT, m.Unit) + require.True(t, strings.HasSuffix(m.SourceFile, "pkg/util/metric/metadata_test.go"), + "expected SourceFile ending in pkg/util/metric/metadata_test.go, got %q", m.SourceFile) +} + +func TestInitMetadata(t *testing.T) { + m := InitMetadata(Metadata{ + Name: "test.metric", + Help: "A test metric.", + Measurement: "Tests", + Unit: Unit_COUNT, + }) + require.Equal(t, "test.metric", m.Name) + require.True(t, strings.HasSuffix(m.SourceFile, "pkg/util/metric/metadata_test.go"), + "expected SourceFile ending in pkg/util/metric/metadata_test.go, got %q", m.SourceFile) +} diff --git a/pkg/util/metric/metric.proto b/pkg/util/metric/metric.proto index 73af8f4956a4..a8eea5f9d900 100644 --- a/pkg/util/metric/metric.proto +++ b/pkg/util/metric/metric.proto @@ -153,5 +153,11 @@ message Metadata { // labels will be recorded in the time series database. optional bool tsdb_record_labeled = 13; - // Next ID: 14. + // source_file is the repo-relative path of the Go source file where this + // metric is defined (e.g., "pkg/kv/kvserver/metrics.go"). It is set + // automatically by NewMetadata via runtime.Caller and used at generation + // time to resolve the owning team via CODEOWNERS. + optional string source_file = 14 [(gogoproto.nullable) = false]; + + // Next ID: 15. } diff --git a/pkg/util/metric/registry.go b/pkg/util/metric/registry.go index d76ea0432a93..f21d30132933 100644 --- a/pkg/util/metric/registry.go +++ b/pkg/util/metric/registry.go @@ -142,6 +142,12 @@ func (r *Registry) AddMetric(metric Iterable) { "skipping non-exportable metric %s (%T)", name, metric) return } + if meta := metric.GetMetadata(); meta.SourceFile == "" { + name := metric.GetName(false /* useStaticLabels */) + log.Dev.Warningf(context.TODO(), + "metric %s (%T) is missing SourceFile; use metric.InitMetadata() to construct metric metadata", + name, metric) + } r.Lock() defer r.Unlock() r.tracked[metric.GetName(false /* useStaticLabels */)] = metric diff --git a/pkg/util/mon/bytes_usage_test.go b/pkg/util/mon/bytes_usage_test.go index 318f8d5598bc..1b0e2cdc544e 100644 --- a/pkg/util/mon/bytes_usage_test.go +++ b/pkg/util/mon/bytes_usage_test.go @@ -363,7 +363,7 @@ func TestMemoryAllocationEdgeCases(t *testing.T) { func TestMultiSharedGauge(t *testing.T) { ctx := context.Background() - resourceGauge := metric.NewGauge(metric.Metadata{}) + resourceGauge := metric.NewGauge(metric.InitMetadata(metric.Metadata{})) minAllocation := int64(1000) parent := NewMonitor(Options{ diff --git a/pkg/util/schedulerlatency/histogram_test.go b/pkg/util/schedulerlatency/histogram_test.go index 8ecc63acbe4c..e29279945e92 100644 --- a/pkg/util/schedulerlatency/histogram_test.go +++ b/pkg/util/schedulerlatency/histogram_test.go @@ -77,7 +77,7 @@ func TestRuntimeHistogram(t *testing.T) { switch d.Cmd { case "init": buckets := parseBuckets(t, d.Input) - rh = newRuntimeHistogram(metric.Metadata{}, buckets) + rh = newRuntimeHistogram(metric.InitMetadata(metric.Metadata{}), buckets) rh.mult = 1.0 return "" diff --git a/pkg/util/schedulerlatency/sampler.go b/pkg/util/schedulerlatency/sampler.go index b0a38cfd09a7..540fd2e2a1ab 100644 --- a/pkg/util/schedulerlatency/sampler.go +++ b/pkg/util/schedulerlatency/sampler.go @@ -44,13 +44,13 @@ var sampleDuration = settings.RegisterDurationSetting( settings.DurationWithMinimum(100*time.Millisecond), ) -var schedulerLatency = metric.Metadata{ +var schedulerLatency = metric.InitMetadata(metric.Metadata{ Name: "go.scheduler_latency", Help: "Go scheduling latency", Measurement: "Nanoseconds", Unit: metric.Unit_NANOSECONDS, Visibility: metric.Metadata_SUPPORT, -} +}) // StartSampler spawn a goroutine to periodically sample the scheduler latencies // and invoke all registered callbacks. diff --git a/pkg/util/schedulerlatency/scheduler_latency_test.go b/pkg/util/schedulerlatency/scheduler_latency_test.go index c968b5067d88..efb3c6921ec5 100644 --- a/pkg/util/schedulerlatency/scheduler_latency_test.go +++ b/pkg/util/schedulerlatency/scheduler_latency_test.go @@ -177,7 +177,7 @@ func TestComputeSchedulerPercentileAgainstPrometheus(t *testing.T) { // Compare values against metric.Histogram (prometheus-based implementation) promhist := metric.NewHistogram(metric.HistogramOptions{ Mode: metric.HistogramModePrometheus, - Metadata: metric.Metadata{}, + Metadata: metric.InitMetadata(metric.Metadata{}), Duration: time.Hour, Buckets: hist.Buckets, }) @@ -277,7 +277,7 @@ func TestCloneHistogram(t *testing.T) { // - Replace windowedCounts with latest delta for TSDB percentiles (10 → 6) func TestRuntimeHistogramCumulativeVsWindowed(t *testing.T) { buckets := []float64{0, 10, 20, 30, 40, 50} - rh := newRuntimeHistogram(metric.Metadata{Name: "test.histogram"}, buckets) + rh := newRuntimeHistogram(metric.InitMetadata(metric.Metadata{Name: "test.histogram"}), buckets) rh.mult = 1.0 // Simulate first statsInterval (e.g., 0s-10s): 10 accumulated observations.