Skip to content

Commit af392df

Browse files
authored
Merge pull request #20 from AstroHQ/jfro/eng-5091
fix: upgrade to Rust 2024 edition (ENG-5091)
2 parents 416b9c6 + 3cfb964 commit af392df

8 files changed

Lines changed: 28 additions & 28 deletions

File tree

Cargo.toml

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,12 @@
11
[package]
22
name = "metrics-sqlite"
3-
version = "0.5.3"
3+
version = "0.6.0"
44
authors = ["Jeremy Knope <jeremy@astropad.com>"]
55
description = "Library for providing SQLite backend for metrics"
66
keywords = ["metrics", "sqlite"]
77
categories = ["development-tools::debugging"]
8-
edition = "2018"
8+
edition = "2024"
9+
rust-version = "1.85"
910
license = "MIT OR Apache-2.0"
1011
readme = "README.md"
1112
documentation = "https://docs.rs/metrics-sqlite"

examples/import_csv.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
use metrics_sqlite::MetricsDb;
2-
use tracing_subscriber::{fmt, layer::SubscriberExt, util::SubscriberInitExt, EnvFilter};
2+
use tracing_subscriber::{EnvFilter, fmt, layer::SubscriberExt, util::SubscriberInitExt};
33

44
fn main() {
55
let fmt_layer = fmt::layer();

examples/large-file.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@ use metrics::{counter, gauge};
22
use metrics_sqlite::SqliteExporter;
33
use std::time::Duration;
44
use tracing_subscriber::prelude::*;
5-
use tracing_subscriber::{fmt, EnvFilter};
5+
use tracing_subscriber::{EnvFilter, fmt};
66

77
fn setup_metrics() {
88
let exporter = SqliteExporter::new(

examples/simple.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
use metrics::{counter, gauge};
22
use metrics_sqlite::SqliteExporter;
33
use std::time::Duration;
4-
use tracing_subscriber::{fmt, layer::SubscriberExt, util::SubscriberInitExt, EnvFilter};
4+
use tracing_subscriber::{EnvFilter, fmt, layer::SubscriberExt, util::SubscriberInitExt};
55

66
fn setup_metrics() {
77
let exporter = SqliteExporter::new(

examples/summary.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ use std::time::Duration;
33
use chrono::{DateTime, Local, TimeZone};
44
use clap::Parser;
55
use metrics_sqlite::MetricsDb;
6-
use tracing_subscriber::{fmt, layer::SubscriberExt, util::SubscriberInitExt, EnvFilter};
6+
use tracing_subscriber::{EnvFilter, fmt, layer::SubscriberExt, util::SubscriberInitExt};
77

88
#[derive(Parser)]
99
struct Args {

src/lib.rs

Lines changed: 17 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -352,18 +352,20 @@ fn run_worker(
352352
// Check if we need to flush based on elapsed time
353353
let time_based_flush = state.last_flush.elapsed() >= flush_duration;
354354

355-
let (should_flush, should_exit) = match receiver.recv_timeout(flush_duration) {
355+
let mut should_flush = false;
356+
let mut should_exit = false;
357+
match receiver.recv_timeout(flush_duration) {
356358
Ok(Event::Stop) => {
357359
info!("Stopping SQLiteExporter worker, flushing & exiting");
358-
(true, true)
360+
should_flush = true;
361+
should_exit = true;
359362
}
360363
Ok(Event::SetHousekeeping {
361364
retention_period,
362365
housekeeping_period,
363366
record_limit,
364367
}) => {
365368
state.set_housekeeping(retention_period, housekeeping_period, record_limit);
366-
(false, false)
367369
}
368370
Ok(Event::DescribeKey(_key_type, key, unit, desc)) => {
369371
info!("Describing key {:?}", key);
@@ -375,11 +377,9 @@ fn run_worker(
375377
) {
376378
error!("Failed to create key entry: {:?}", e);
377379
}
378-
(false, false)
379380
}
380381
Ok(Event::RegisterKey(_key_type, _key, _handle)) => {
381382
// we currently don't do anything with register...
382-
(false, false)
383383
}
384384
Ok(Event::IncrementCounter(timestamp, key, value)) => {
385385
let key_name = key.name();
@@ -391,16 +391,15 @@ fn run_worker(
391391
if let Err(e) = state.queue_metric(timestamp, key_name, value as _) {
392392
error!("Error queueing metric: {:?}", e);
393393
}
394-
395-
(state.should_flush(), false)
394+
should_flush = state.should_flush();
396395
}
397396
Ok(Event::AbsoluteCounter(timestamp, key, value)) => {
398397
let key_name = key.name();
399398
state.counters.insert(key.clone(), value);
400399
if let Err(e) = state.queue_metric(timestamp, key_name, value as _) {
401400
error!("Error queueing metric: {:?}", e);
402401
}
403-
(state.should_flush(), false)
402+
should_flush = state.should_flush();
404403
}
405404
Ok(Event::UpdateGauge(timestamp, key, value)) => {
406405
let key_name = key.name();
@@ -422,15 +421,14 @@ fn run_worker(
422421
if let Err(e) = state.queue_metric(timestamp, key_name, value) {
423422
error!("Error queueing metric: {:?}", e);
424423
}
425-
(state.should_flush(), false)
424+
should_flush = state.should_flush();
426425
}
427426
Ok(Event::UpdateHistogram(timestamp, key, value)) => {
428427
let key_name = key.name();
429428
if let Err(e) = state.queue_metric(timestamp, key_name, value) {
430429
error!("Error queueing metric: {:?}", e);
431430
}
432-
433-
(state.should_flush(), false)
431+
should_flush = state.should_flush();
434432
}
435433
Ok(Event::RequestSummaryFromSignpost {
436434
signpost_key,
@@ -468,16 +466,16 @@ fn run_worker(
468466
}
469467
}
470468
}
471-
(false, false)
472469
}
473470
Err(RecvTimeoutError::Timeout) => {
474-
(true, false)
471+
should_flush = true;
475472
}
476473
Err(RecvTimeoutError::Disconnected) => {
477474
warn!("SQLiteExporter channel disconnected, exiting worker");
478-
(true, true)
475+
should_flush = true;
476+
should_exit = true;
479477
}
480-
};
478+
}
481479

482480
// Flush if time-based flush is triggered OR if event-based flush is triggered
483481
if time_based_flush || should_flush {
@@ -587,11 +585,11 @@ impl SqliteExporter {
587585
let excess = records - record_limit + (record_limit / 4); // delete excess + 25% of limit
588586
trace!(
589587
"Exceeded limit! {} > {}, deleting {} oldest",
590-
records,
591-
record_limit,
592-
excess
588+
records, record_limit, excess
589+
);
590+
let query = format!(
591+
"DELETE FROM metrics WHERE id IN (SELECT id FROM metrics ORDER BY timestamp ASC LIMIT {excess});"
593592
);
594-
let query = format!("DELETE FROM metrics WHERE id IN (SELECT id FROM metrics ORDER BY timestamp ASC LIMIT {excess});");
595593
if let Err(e) = sql_query(query).execute(db) {
596594
error!("Failed to delete excessive records: {:?}", e);
597595
}

src/metrics_db.rs

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,8 @@
11
//! Metrics DB, to use/query/etc metrics SQLite databases
22
use super::{
3+
MetricsError, Result,
34
models::{Metric, MetricKey},
4-
setup_db, MetricsError, Result,
5+
setup_db,
56
};
67
use diesel::prelude::*;
78
#[cfg(feature = "import_csv")]
@@ -205,7 +206,7 @@ impl MetricsDb {
205206
error!("Skipping record due to error reading CSV record: {:?}", e);
206207
}
207208
}
208-
if flush_counter.is_multiple_of(200) {
209+
if flush_counter % 200 == 0 {
209210
trace!("Flushing");
210211
inner.flush()?;
211212
}

src/recorder.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ use metrics::{
44
Recorder, SharedString, Unit,
55
};
66
use std::{
7-
sync::{mpsc::SyncSender, Arc},
7+
sync::{Arc, mpsc::SyncSender},
88
time::SystemTime,
99
};
1010
use tracing::error;

0 commit comments

Comments
 (0)