33import argparse
44import sys
55import json
6+ import pandas as pd
67from tmll .tmll_client import TMLLClient
78from tmll .common .models .experiment import Experiment
89from tmll .ml .modules .anomaly_detection .anomaly_detection_module import AnomalyDetection
1112from tmll .ml .modules .root_cause .correlation_module import CorrelationAnalysis
1213from tmll .ml .modules .resource_optimization .idle_resource_detection_module import IdleResourceDetection
1314from tmll .ml .modules .predictive_maintenance .capacity_planning_module import CapacityPlanning
14- from tmll .ml .unsupervised .clustering import Clustering
1515
1616
1717def get_experiment (client , exp_uuid ):
@@ -30,7 +30,10 @@ def create_experiment(args):
3030 client = TMLLClient (args .host , args .port , verbose = args .verbose )
3131 traces = [{"path" : os .path .expanduser (path )} for path in args .traces ]
3232 experiment = client .create_experiment (traces = traces , experiment_name = args .name )
33- print (f"Created experiment: { experiment .name } (UUID: { experiment .UUID } )" )
33+ if not experiment :
34+ print ("Failed to create experiment" )
35+ return
36+ print (f"Created experiment: { experiment .name } (UUID: { experiment .uuid } )" )
3437
3538
3639def list_outputs (args ):
@@ -65,11 +68,22 @@ def fetch_data_cmd(args):
6568 data = client .fetch_data (experiment , outputs_with_tree )
6669
6770 if args .output :
68- for key , df in data .items ():
69- df .to_csv (f"{ args .output } _{ key } .csv" , index = False )
71+ for key , value in data .items ():
72+ if isinstance (value , pd .DataFrame ):
73+ value .to_csv (f"{ args .output } _{ key } .csv" , index = False )
74+ elif isinstance (value , dict ):
75+ for sub_key , df in value .items ():
76+ if isinstance (df , pd .DataFrame ):
77+ df .to_csv (f"{ args .output } _{ key } _{ sub_key } .csv" , index = False )
7078 print (f"Data exported to { args .output } _*.csv" )
7179 else :
72- print (json .dumps ({k : v .to_dict () for k , v in data .items ()}, indent = 2 ))
80+ result = {}
81+ for k , v in data .items ():
82+ if isinstance (v , pd .DataFrame ):
83+ result [k ] = v .to_dict ()
84+ elif isinstance (v , dict ):
85+ result [k ] = {sk : sv .to_dict () for sk , sv in v .items () if isinstance (sv , pd .DataFrame )}
86+ print (json .dumps (result , indent = 2 , default = str ))
7387
7488
7589def detect_anomalies (args ):
@@ -106,15 +120,9 @@ def detect_memory_leak(args):
106120 print ("Experiment not found" )
107121 return
108122
109- outputs = experiment .find_outputs (keyword = args .keywords , type = ['xy' ])
110-
111- if not outputs :
112- print ("No outputs found" )
113- return
114-
115- mld = MemoryLeakDetection (client , experiment , outputs )
116- result = mld .detect_memory_leak ()
117- print (f"Memory leak detected: { result } " )
123+ mld = MemoryLeakDetection (client , experiment )
124+ result = mld .analyze_memory_leaks ()
125+ print (f"Memory leak analysis: { result } " )
118126
119127
120128def detect_changepoints (args ):
@@ -133,12 +141,12 @@ def detect_changepoints(args):
133141 return
134142
135143 cpa = ChangePointAnalysis (client , experiment , outputs )
136- changepoints = cpa .get_change_points (method = args .method )
144+ changepoints = cpa .get_change_points (methods = args .methods )
137145
138146 if args .plot :
139147 cpa .plot_change_points (changepoints )
140148 else :
141- print (f"Found { len (changepoints ) } change points " )
149+ print (f"Found { len (changepoints . metrics ) if changepoints else 0 } change point metrics " )
142150
143151
144152def analyze_correlation (args ):
@@ -157,10 +165,10 @@ def analyze_correlation(args):
157165 return
158166
159167 ca = CorrelationAnalysis (client , experiment , outputs )
160- correlations = ca .analyze_correlation (method = args .method )
168+ correlations = ca .analyze_correlations (method = args .method )
161169
162170 if args .plot :
163- ca .plot_correlation (correlations )
171+ ca .plot_correlation_matrix (correlations )
164172 else :
165173 print (f"Correlation results: { correlations } " )
166174
@@ -181,7 +189,11 @@ def detect_idle_resources(args):
181189 return
182190
183191 ird = IdleResourceDetection (client , experiment , outputs )
184- idle = ird .detect_idle_resources (threshold = args .threshold )
192+ idle = ird .analyze_idle_resources (
193+ cpu_idle_threshold = args .cpu_idle_threshold ,
194+ memory_idle_threshold = args .memory_idle_threshold ,
195+ disk_idle_threshold = args .disk_idle_threshold ,
196+ )
185197 print (f"Idle resources: { idle } " )
186198
187199
@@ -201,8 +213,8 @@ def plan_capacity(args):
201213 return
202214
203215 cp = CapacityPlanning (client , experiment , outputs )
204- plan = cp .plan_capacity ( horizon = args .horizon )
205- print (f"Capacity plan : { plan } " )
216+ plan = cp .forecast_capacity ( forecast_steps = args .horizon )
217+ print (f"Capacity forecast : { plan } " )
206218
207219
208220def list_experiments (args ):
@@ -225,28 +237,6 @@ def delete_experiment(args):
225237 print (f"Deleted experiment: { args .experiment } " )
226238
227239
228- def cluster_data (args ):
229- """Perform clustering analysis"""
230- client = TMLLClient (args .host , args .port , verbose = args .verbose )
231- experiment = get_experiment (client , args .experiment )
232-
233- if not experiment :
234- print ("Experiment not found" )
235- return
236-
237- outputs = experiment .find_outputs (keyword = args .keywords , type = ['xy' ])
238-
239- if not outputs :
240- print ("No outputs found" )
241- return
242-
243- outputs_with_tree = client .fetch_outputs_with_tree (experiment , [o .id for o in outputs ])
244- data = client .fetch_data (experiment , outputs_with_tree )
245- clustering = Clustering (data , n_clusters = args .n_clusters , model = args .method )
246- clusters = clustering .get_clusters ()
247- print (f"Clustering results:\n { clusters } " )
248-
249-
250240def main ():
251241 parser = argparse .ArgumentParser (description = "TMLL CLI - Trace-Server Machine Learning Library" )
252242 parser .add_argument ("--host" , default = "localhost" , help = "Trace server host" )
@@ -287,56 +277,51 @@ def main():
287277
288278 # anomaly command
289279 anomaly_parser = subparsers .add_parser ("anomaly" , help = "Detect anomalies" )
290- anomaly_parser .add_argument ("experiment" , help = "Experiment UUID or name " )
280+ anomaly_parser .add_argument ("experiment" , help = "Experiment UUID" )
291281 anomaly_parser .add_argument ("-k" , "--keywords" , nargs = "+" , default = ["cpu usage" ], help = "Output keywords" )
292282 anomaly_parser .add_argument ("-m" , "--method" , default = "iforest" , help = "Detection method" )
293283 anomaly_parser .add_argument ("-p" , "--plot" , action = "store_true" , help = "Plot anomalies" )
294284 anomaly_parser .set_defaults (func = detect_anomalies )
295285
296286 # memory-leak command
297287 memleak_parser = subparsers .add_parser ("memory-leak" , help = "Detect memory leaks" )
298- memleak_parser .add_argument ("experiment" , help = "Experiment UUID or name " )
288+ memleak_parser .add_argument ("experiment" , help = "Experiment UUID" )
299289 memleak_parser .add_argument ("-k" , "--keywords" , nargs = "+" , default = ["memory" ], help = "Output keywords" )
300290 memleak_parser .set_defaults (func = detect_memory_leak )
301291
302292 # changepoint command
303293 cp_parser = subparsers .add_parser ("changepoint" , help = "Detect change points" )
304- cp_parser .add_argument ("experiment" , help = "Experiment UUID or name " )
294+ cp_parser .add_argument ("experiment" , help = "Experiment UUID" )
305295 cp_parser .add_argument ("-k" , "--keywords" , nargs = "+" , default = ["cpu usage" ], help = "Output keywords" )
306- cp_parser .add_argument ("-m" , "--method" , default = "pelt" , help = "Detection method" )
296+ cp_parser .add_argument ("-m" , "--methods" , nargs = "+" , default = ["single" , "zscore" , "voting" , "pca" ],
297+ help = "Analysis methods (single, zscore, voting, pca)" )
307298 cp_parser .add_argument ("-p" , "--plot" , action = "store_true" , help = "Plot change points" )
308299 cp_parser .set_defaults (func = detect_changepoints )
309300
310301 # correlation command
311302 corr_parser = subparsers .add_parser ("correlation" , help = "Analyze correlation" )
312- corr_parser .add_argument ("experiment" , help = "Experiment UUID or name " )
303+ corr_parser .add_argument ("experiment" , help = "Experiment UUID" )
313304 corr_parser .add_argument ("-k" , "--keywords" , nargs = "+" , default = ["cpu" , "memory" ], help = "Output keywords" )
314305 corr_parser .add_argument ("-m" , "--method" , default = "pearson" , help = "Correlation method" )
315306 corr_parser .add_argument ("-p" , "--plot" , action = "store_true" , help = "Plot correlation" )
316307 corr_parser .set_defaults (func = analyze_correlation )
317308
318309 # idle-resources command
319310 idle_parser = subparsers .add_parser ("idle-resources" , help = "Detect idle resources" )
320- idle_parser .add_argument ("experiment" , help = "Experiment UUID or name " )
311+ idle_parser .add_argument ("experiment" , help = "Experiment UUID" )
321312 idle_parser .add_argument ("-k" , "--keywords" , nargs = "+" , default = ["cpu usage" ], help = "Output keywords" )
322- idle_parser .add_argument ("-t" , "--threshold" , type = float , default = 5.0 , help = "Idle threshold" )
313+ idle_parser .add_argument ("--cpu-idle-threshold" , type = float , default = 5.0 , help = "CPU idle threshold percentage" )
314+ idle_parser .add_argument ("--memory-idle-threshold" , type = float , default = 5.0 , help = "Memory idle threshold percentage" )
315+ idle_parser .add_argument ("--disk-idle-threshold" , type = float , default = 5.0 , help = "Disk idle threshold percentage" )
323316 idle_parser .set_defaults (func = detect_idle_resources )
324317
325318 # capacity command
326319 capacity_parser = subparsers .add_parser ("capacity" , help = "Perform capacity planning" )
327- capacity_parser .add_argument ("experiment" , help = "Experiment UUID or name " )
320+ capacity_parser .add_argument ("experiment" , help = "Experiment UUID" )
328321 capacity_parser .add_argument ("-k" , "--keywords" , nargs = "+" , default = ["cpu usage" ], help = "Output keywords" )
329- capacity_parser .add_argument ("-H" , "--horizon" , type = int , default = 30 , help = "Planning horizon (days) " )
322+ capacity_parser .add_argument ("-H" , "--horizon" , type = int , default = 100 , help = "Forecast steps " )
330323 capacity_parser .set_defaults (func = plan_capacity )
331324
332- # cluster command
333- cluster_parser = subparsers .add_parser ("cluster" , help = "Perform clustering analysis" )
334- cluster_parser .add_argument ("experiment" , help = "Experiment UUID or name" )
335- cluster_parser .add_argument ("-k" , "--keywords" , nargs = "+" , default = ["cpu usage" ], help = "Output keywords" )
336- cluster_parser .add_argument ("-n" , "--n-clusters" , type = int , default = 3 , help = "Number of clusters" )
337- cluster_parser .add_argument ("-m" , "--method" , default = "kmeans" , help = "Clustering method" )
338- cluster_parser .set_defaults (func = cluster_data )
339-
340325 args = parser .parse_args ()
341326
342327 if args .log_stderr :
0 commit comments