11use std:: borrow:: Cow ;
2+ use std:: collections:: HashSet ;
3+ use std:: time:: Duration ;
24
35use anyhow:: Result ;
46use clap:: Args ;
57
6- use crate :: api:: { Api , Dataset , FetchEventsOptions } ;
8+ use crate :: api:: { Api , Dataset , FetchEventsOptions , LogEntry } ;
79use crate :: config:: Config ;
810use crate :: utils:: formatting:: Table ;
911
@@ -55,6 +57,14 @@ pub(super) struct ListLogsArgs {
5557 #[ arg( long = "query" , default_value = "" ) ]
5658 #[ arg( help = "Query to filter logs. Example: \" level:error\" " ) ]
5759 query : String ,
60+
61+ #[ arg( long = "live" ) ]
62+ #[ arg( help = "Live stream logs." ) ]
63+ live : bool ,
64+
65+ #[ arg( long = "poll-interval" , default_value = "2" ) ]
66+ #[ arg( help = "Poll interval in seconds. Only used when --live is specified." ) ]
67+ poll_interval : u64 ,
5868}
5969
6070pub ( super ) fn execute ( args : ListLogsArgs ) -> Result < ( ) > {
@@ -90,7 +100,11 @@ pub(super) fn execute(args: ListLogsArgs) -> Result<()> {
90100 ( Cow :: Owned ( query) , None )
91101 } ;
92102
93- execute_single_fetch ( & api, org, project_id, & query, LOG_FIELDS , & args)
103+ if args. live {
104+ execute_live_streaming ( & api, org, project_id, & query, LOG_FIELDS , & args)
105+ } else {
106+ execute_single_fetch ( & api, org, project_id, & query, LOG_FIELDS , & args)
107+ }
94108}
95109
96110fn execute_single_fetch (
@@ -153,6 +167,8 @@ struct LogDeduplicator {
153167 max_size : usize ,
154168}
155169
170+ const MAX_DEDUP_BUFFER_SIZE : usize = 10_000 ;
171+
156172impl LogDeduplicator {
157173 fn new ( max_size : usize ) -> Self {
158174 Self {
@@ -187,15 +203,15 @@ impl LogDeduplicator {
187203fn execute_live_streaming (
188204 api : & Api ,
189205 org : & str ,
190- project : & str ,
191- query : Option < & str > ,
206+ project : Option < & str > ,
207+ query : & str ,
192208 fields : & [ & str ] ,
193209 args : & ListLogsArgs ,
194210) -> Result < ( ) > {
195211 let mut deduplicator = LogDeduplicator :: new ( MAX_DEDUP_BUFFER_SIZE ) ;
196212 let poll_duration = Duration :: from_secs ( args. poll_interval ) ;
197213 let mut consecutive_new_only_count = 0 ;
198- const WARNING_THRESHOLD : usize = 3 ; // Show warning after 3 consecutive new-only responses
214+ const WARNING_THRESHOLD : usize = 3 ; // Show message every 3 consecutive empty polls
199215
200216 println ! ( "Starting live log streaming..." ) ;
201217 println ! (
@@ -217,14 +233,14 @@ fn execute_live_streaming(
217233
218234 loop {
219235 let options = FetchEventsOptions {
220- dataset : Dataset :: OurLogs ,
236+ dataset : Dataset :: Logs ,
221237 fields,
222- project_id : Some ( project) ,
238+ project_id : project,
223239 cursor : None ,
224240 query,
225- per_page : Some ( args. max_rows ) ,
226- stats_period : Some ( "1h" ) ,
227- sort : Some ( "-timestamp" ) ,
241+ per_page : args. max_rows ,
242+ stats_period : "10m" ,
243+ sort : "-timestamp" ,
228244 } ;
229245
230246 match api
@@ -237,12 +253,17 @@ fn execute_live_streaming(
237253 if unique_logs. is_empty ( ) {
238254 consecutive_new_only_count += 1 ;
239255
240- if consecutive_new_only_count >= WARNING_THRESHOLD && args. query . is_empty ( ) {
241- eprintln ! (
242- "\n ⚠️ Warning: No new logs found for {consecutive_new_only_count} consecutive polls."
243- ) ;
244-
245- // Reset counter to avoid spam
256+ if consecutive_new_only_count >= WARNING_THRESHOLD {
257+ if args. query . trim ( ) . is_empty ( ) {
258+ eprintln ! ( "\n No logs found in the last {WARNING_THRESHOLD} polls." ) ;
259+ } else {
260+ eprintln ! (
261+ "\n No logs found in the last {WARNING_THRESHOLD} polls. Consider adjusting your query filter: \" {}\" " ,
262+ args. query
263+ ) ;
264+ }
265+
266+ // Reset counter to show again after the next threshold
246267 consecutive_new_only_count = 0 ;
247268 }
248269 } else {
@@ -390,30 +411,31 @@ mod tests {
390411
391412 assert_eq ! ( deduplicator. seen_ids. len( ) , 4 ) ;
392413 assert_eq ! ( deduplicator. buffer. len( ) , 4 ) ;
393- #[ test]
394- fn test_is_numeric_project_id_purely_numeric ( ) {
395- assert ! ( is_numeric_project_id( "123456" ) ) ;
396- assert ! ( is_numeric_project_id( "1" ) ) ;
397- assert ! ( is_numeric_project_id( "999999999" ) ) ;
398- }
414+ }
399415
400- #[ test]
401- fn test_is_numeric_project_id_alphanumeric ( ) {
402- assert ! ( ! is_numeric_project_id( "abc123 " ) ) ;
403- assert ! ( ! is_numeric_project_id( "123abc " ) ) ;
404- assert ! ( ! is_numeric_project_id( "my-project " ) ) ;
405- }
416+ #[ test]
417+ fn test_is_numeric_project_id_purely_numeric ( ) {
418+ assert ! ( is_numeric_project_id( "123456 " ) ) ;
419+ assert ! ( is_numeric_project_id( "1 " ) ) ;
420+ assert ! ( is_numeric_project_id( "999999999 " ) ) ;
421+ }
406422
407- #[ test]
408- fn test_is_numeric_project_id_numeric_with_dash ( ) {
409- assert ! ( !is_numeric_project_id( "123-45 " ) ) ;
410- assert ! ( !is_numeric_project_id( "1-2-3 " ) ) ;
411- assert ! ( !is_numeric_project_id( "999-888 " ) ) ;
412- }
423+ #[ test]
424+ fn test_is_numeric_project_id_alphanumeric ( ) {
425+ assert ! ( !is_numeric_project_id( "abc123 " ) ) ;
426+ assert ! ( !is_numeric_project_id( "123abc " ) ) ;
427+ assert ! ( !is_numeric_project_id( "my-project " ) ) ;
428+ }
413429
414- #[ test]
415- fn test_is_numeric_project_id_empty_string ( ) {
416- assert ! ( !is_numeric_project_id( "" ) ) ;
417- }
430+ #[ test]
431+ fn test_is_numeric_project_id_numeric_with_dash ( ) {
432+ assert ! ( !is_numeric_project_id( "123-45" ) ) ;
433+ assert ! ( !is_numeric_project_id( "1-2-3" ) ) ;
434+ assert ! ( !is_numeric_project_id( "999-888" ) ) ;
435+ }
436+
437+ #[ test]
438+ fn test_is_numeric_project_id_empty_string ( ) {
439+ assert ! ( !is_numeric_project_id( "" ) ) ;
418440 }
419441}
0 commit comments