Skip to content

Commit afffa12

Browse files
Add MCP to TMLL
Add the following MCP mcp.json { "mcpServers": { "tmll": { "command": "/usr/bin/python3", "args": "path-to/tmll/mcp_server_cli.py"], "env": { "PYTHONPATH": "path-to/tmll" } } } } This code creation was assisted by claude-sonnet-4.5 Signed-off-by: Matthew Khouzam <matthew.khouzam@ericsson.com>
1 parent 406c444 commit afffa12

5 files changed

Lines changed: 317 additions & 62 deletions

File tree

requirements.txt

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,4 +10,5 @@ matplotlib>=3.8.0
1010
seaborn==0.13.2
1111
statsmodels>=0.14.1
1212
ruptures==1.1.9
13-
rich==13.9.4
13+
rich==13.9.4
14+
mcp==1.27.0

tmll/mcp/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+

tmll_cli.py renamed to tmll/mcp/cli.py

Lines changed: 45 additions & 60 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
import argparse
44
import sys
55
import json
6+
import pandas as pd
67
from tmll.tmll_client import TMLLClient
78
from tmll.common.models.experiment import Experiment
89
from tmll.ml.modules.anomaly_detection.anomaly_detection_module import AnomalyDetection
@@ -11,7 +12,6 @@
1112
from tmll.ml.modules.root_cause.correlation_module import CorrelationAnalysis
1213
from tmll.ml.modules.resource_optimization.idle_resource_detection_module import IdleResourceDetection
1314
from tmll.ml.modules.predictive_maintenance.capacity_planning_module import CapacityPlanning
14-
from tmll.ml.unsupervised.clustering import Clustering
1515

1616

1717
def get_experiment(client, exp_uuid):
@@ -30,7 +30,10 @@ def create_experiment(args):
3030
client = TMLLClient(args.host, args.port, verbose=args.verbose)
3131
traces = [{"path": os.path.expanduser(path)} for path in args.traces]
3232
experiment = client.create_experiment(traces=traces, experiment_name=args.name)
33-
print(f"Created experiment: {experiment.name} (UUID: {experiment.UUID})")
33+
if not experiment:
34+
print("Failed to create experiment")
35+
return
36+
print(f"Created experiment: {experiment.name} (UUID: {experiment.uuid})")
3437

3538

3639
def list_outputs(args):
@@ -65,11 +68,22 @@ def fetch_data_cmd(args):
6568
data = client.fetch_data(experiment, outputs_with_tree)
6669

6770
if args.output:
68-
for key, df in data.items():
69-
df.to_csv(f"{args.output}_{key}.csv", index=False)
71+
for key, value in data.items():
72+
if isinstance(value, pd.DataFrame):
73+
value.to_csv(f"{args.output}_{key}.csv", index=False)
74+
elif isinstance(value, dict):
75+
for sub_key, df in value.items():
76+
if isinstance(df, pd.DataFrame):
77+
df.to_csv(f"{args.output}_{key}_{sub_key}.csv", index=False)
7078
print(f"Data exported to {args.output}_*.csv")
7179
else:
72-
print(json.dumps({k: v.to_dict() for k, v in data.items()}, indent=2))
80+
result = {}
81+
for k, v in data.items():
82+
if isinstance(v, pd.DataFrame):
83+
result[k] = v.to_dict()
84+
elif isinstance(v, dict):
85+
result[k] = {sk: sv.to_dict() for sk, sv in v.items() if isinstance(sv, pd.DataFrame)}
86+
print(json.dumps(result, indent=2, default=str))
7387

7488

7589
def detect_anomalies(args):
@@ -106,15 +120,9 @@ def detect_memory_leak(args):
106120
print("Experiment not found")
107121
return
108122

109-
outputs = experiment.find_outputs(keyword=args.keywords, type=['xy'])
110-
111-
if not outputs:
112-
print("No outputs found")
113-
return
114-
115-
mld = MemoryLeakDetection(client, experiment, outputs)
116-
result = mld.detect_memory_leak()
117-
print(f"Memory leak detected: {result}")
123+
mld = MemoryLeakDetection(client, experiment)
124+
result = mld.analyze_memory_leaks()
125+
print(f"Memory leak analysis: {result}")
118126

119127

120128
def detect_changepoints(args):
@@ -133,12 +141,12 @@ def detect_changepoints(args):
133141
return
134142

135143
cpa = ChangePointAnalysis(client, experiment, outputs)
136-
changepoints = cpa.get_change_points(method=args.method)
144+
changepoints = cpa.get_change_points(methods=args.methods)
137145

138146
if args.plot:
139147
cpa.plot_change_points(changepoints)
140148
else:
141-
print(f"Found {len(changepoints)} change points")
149+
print(f"Found {len(changepoints.metrics) if changepoints else 0} change point metrics")
142150

143151

144152
def analyze_correlation(args):
@@ -157,10 +165,10 @@ def analyze_correlation(args):
157165
return
158166

159167
ca = CorrelationAnalysis(client, experiment, outputs)
160-
correlations = ca.analyze_correlation(method=args.method)
168+
correlations = ca.analyze_correlations(method=args.method)
161169

162170
if args.plot:
163-
ca.plot_correlation(correlations)
171+
ca.plot_correlation_matrix(correlations)
164172
else:
165173
print(f"Correlation results: {correlations}")
166174

@@ -181,7 +189,11 @@ def detect_idle_resources(args):
181189
return
182190

183191
ird = IdleResourceDetection(client, experiment, outputs)
184-
idle = ird.detect_idle_resources(threshold=args.threshold)
192+
idle = ird.analyze_idle_resources(
193+
cpu_idle_threshold=args.cpu_idle_threshold,
194+
memory_idle_threshold=args.memory_idle_threshold,
195+
disk_idle_threshold=args.disk_idle_threshold,
196+
)
185197
print(f"Idle resources: {idle}")
186198

187199

@@ -201,8 +213,8 @@ def plan_capacity(args):
201213
return
202214

203215
cp = CapacityPlanning(client, experiment, outputs)
204-
plan = cp.plan_capacity(horizon=args.horizon)
205-
print(f"Capacity plan: {plan}")
216+
plan = cp.forecast_capacity(forecast_steps=args.horizon)
217+
print(f"Capacity forecast: {plan}")
206218

207219

208220
def list_experiments(args):
@@ -225,28 +237,6 @@ def delete_experiment(args):
225237
print(f"Deleted experiment: {args.experiment}")
226238

227239

228-
def cluster_data(args):
229-
"""Perform clustering analysis"""
230-
client = TMLLClient(args.host, args.port, verbose=args.verbose)
231-
experiment = get_experiment(client, args.experiment)
232-
233-
if not experiment:
234-
print("Experiment not found")
235-
return
236-
237-
outputs = experiment.find_outputs(keyword=args.keywords, type=['xy'])
238-
239-
if not outputs:
240-
print("No outputs found")
241-
return
242-
243-
outputs_with_tree = client.fetch_outputs_with_tree(experiment, [o.id for o in outputs])
244-
data = client.fetch_data(experiment, outputs_with_tree)
245-
clustering = Clustering(data, n_clusters=args.n_clusters, model=args.method)
246-
clusters = clustering.get_clusters()
247-
print(f"Clustering results:\n{clusters}")
248-
249-
250240
def main():
251241
parser = argparse.ArgumentParser(description="TMLL CLI - Trace-Server Machine Learning Library")
252242
parser.add_argument("--host", default="localhost", help="Trace server host")
@@ -287,56 +277,51 @@ def main():
287277

288278
# anomaly command
289279
anomaly_parser = subparsers.add_parser("anomaly", help="Detect anomalies")
290-
anomaly_parser.add_argument("experiment", help="Experiment UUID or name")
280+
anomaly_parser.add_argument("experiment", help="Experiment UUID")
291281
anomaly_parser.add_argument("-k", "--keywords", nargs="+", default=["cpu usage"], help="Output keywords")
292282
anomaly_parser.add_argument("-m", "--method", default="iforest", help="Detection method")
293283
anomaly_parser.add_argument("-p", "--plot", action="store_true", help="Plot anomalies")
294284
anomaly_parser.set_defaults(func=detect_anomalies)
295285

296286
# memory-leak command
297287
memleak_parser = subparsers.add_parser("memory-leak", help="Detect memory leaks")
298-
memleak_parser.add_argument("experiment", help="Experiment UUID or name")
288+
memleak_parser.add_argument("experiment", help="Experiment UUID")
299289
memleak_parser.add_argument("-k", "--keywords", nargs="+", default=["memory"], help="Output keywords")
300290
memleak_parser.set_defaults(func=detect_memory_leak)
301291

302292
# changepoint command
303293
cp_parser = subparsers.add_parser("changepoint", help="Detect change points")
304-
cp_parser.add_argument("experiment", help="Experiment UUID or name")
294+
cp_parser.add_argument("experiment", help="Experiment UUID")
305295
cp_parser.add_argument("-k", "--keywords", nargs="+", default=["cpu usage"], help="Output keywords")
306-
cp_parser.add_argument("-m", "--method", default="pelt", help="Detection method")
296+
cp_parser.add_argument("-m", "--methods", nargs="+", default=["single", "zscore", "voting", "pca"],
297+
help="Analysis methods (single, zscore, voting, pca)")
307298
cp_parser.add_argument("-p", "--plot", action="store_true", help="Plot change points")
308299
cp_parser.set_defaults(func=detect_changepoints)
309300

310301
# correlation command
311302
corr_parser = subparsers.add_parser("correlation", help="Analyze correlation")
312-
corr_parser.add_argument("experiment", help="Experiment UUID or name")
303+
corr_parser.add_argument("experiment", help="Experiment UUID")
313304
corr_parser.add_argument("-k", "--keywords", nargs="+", default=["cpu", "memory"], help="Output keywords")
314305
corr_parser.add_argument("-m", "--method", default="pearson", help="Correlation method")
315306
corr_parser.add_argument("-p", "--plot", action="store_true", help="Plot correlation")
316307
corr_parser.set_defaults(func=analyze_correlation)
317308

318309
# idle-resources command
319310
idle_parser = subparsers.add_parser("idle-resources", help="Detect idle resources")
320-
idle_parser.add_argument("experiment", help="Experiment UUID or name")
311+
idle_parser.add_argument("experiment", help="Experiment UUID")
321312
idle_parser.add_argument("-k", "--keywords", nargs="+", default=["cpu usage"], help="Output keywords")
322-
idle_parser.add_argument("-t", "--threshold", type=float, default=5.0, help="Idle threshold")
313+
idle_parser.add_argument("--cpu-idle-threshold", type=float, default=5.0, help="CPU idle threshold percentage")
314+
idle_parser.add_argument("--memory-idle-threshold", type=float, default=5.0, help="Memory idle threshold percentage")
315+
idle_parser.add_argument("--disk-idle-threshold", type=float, default=5.0, help="Disk idle threshold percentage")
323316
idle_parser.set_defaults(func=detect_idle_resources)
324317

325318
# capacity command
326319
capacity_parser = subparsers.add_parser("capacity", help="Perform capacity planning")
327-
capacity_parser.add_argument("experiment", help="Experiment UUID or name")
320+
capacity_parser.add_argument("experiment", help="Experiment UUID")
328321
capacity_parser.add_argument("-k", "--keywords", nargs="+", default=["cpu usage"], help="Output keywords")
329-
capacity_parser.add_argument("-H", "--horizon", type=int, default=30, help="Planning horizon (days)")
322+
capacity_parser.add_argument("-H", "--horizon", type=int, default=100, help="Forecast steps")
330323
capacity_parser.set_defaults(func=plan_capacity)
331324

332-
# cluster command
333-
cluster_parser = subparsers.add_parser("cluster", help="Perform clustering analysis")
334-
cluster_parser.add_argument("experiment", help="Experiment UUID or name")
335-
cluster_parser.add_argument("-k", "--keywords", nargs="+", default=["cpu usage"], help="Output keywords")
336-
cluster_parser.add_argument("-n", "--n-clusters", type=int, default=3, help="Number of clusters")
337-
cluster_parser.add_argument("-m", "--method", default="kmeans", help="Clustering method")
338-
cluster_parser.set_defaults(func=cluster_data)
339-
340325
args = parser.parse_args()
341326

342327
if args.log_stderr:

0 commit comments

Comments
 (0)