Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion src/robusta/core/model/base_params.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,6 @@ class ResourceInfo(BaseModel):


class HolmesParams(ActionParams):

holmes_url: Optional[str]
model: Optional[str]
@validator("holmes_url", allow_reuse=True)
Expand Down Expand Up @@ -190,6 +189,7 @@ class HolmesChatParams(HolmesParams):

ask: str
conversation_history: Optional[list[dict]] = None
render_graph_images: bool = False


class HolmesIssueChatParams(HolmesChatParams):
Expand Down
52 changes: 47 additions & 5 deletions src/robusta/core/playbooks/internal/ai_integration.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,22 +2,26 @@
import logging

import requests
from prometrix import PrometheusQueryResult

from robusta.core.model.base_params import (
AIInvestigateParams,
ChartValuesFormat,
HolmesChatParams,
HolmesConversationParams,
HolmesIssueChatParams,
HolmesWorkloadHealthChatParams,
HolmesWorkloadHealthParams,
ResourceInfo,
HolmesWorkloadHealthChatParams
)
from robusta.core.model.events import ExecutionBaseEvent
from robusta.core.playbooks.actions_registry import action
from robusta.core.playbooks.prometheus_enrichment_utils import build_chart_from_prometheus_result
from robusta.core.reporting import Finding, FindingSubject
from robusta.core.reporting.base import EnrichmentType
from robusta.core.reporting.consts import FindingSubjectType, FindingType
from robusta.core.reporting.holmes import (
FileBlock,
HolmesChatRequest,
HolmesChatResult,
HolmesChatResultsBlock,
Expand All @@ -29,6 +33,7 @@
HolmesResultsBlock,
HolmesWorkloadHealthRequest,
)
from robusta.core.reporting.utils import convert_svg_to_png
from robusta.core.schedule.model import FixedDelayRepeat
from robusta.integrations.kubernetes.autogenerated.events import KubernetesAnyChangeEvent
from robusta.integrations.prometheus.utils import HolmesDiscovery
Expand Down Expand Up @@ -65,12 +70,18 @@ def ask_holmes(event: ExecutionBaseEvent, params: AIInvestigateParams):
)

if params.stream:
with requests.post(f"{holmes_url}/api/stream/investigate", data=holmes_req.json(), stream=True, headers={"Connection": "keep-alive"}) as resp:
with requests.post(
f"{holmes_url}/api/stream/investigate",
data=holmes_req.json(),
stream=True,
headers={"Connection": "keep-alive"},
) as resp:
resp.raise_for_status()
for line in resp.iter_content(chunk_size=None, decode_unicode=True): # Avoid streaming chunks from holmes. send them as they arrive.
for line in resp.iter_content(
chunk_size=None, decode_unicode=True
): # Avoid streaming chunks from holmes. send them as they arrive.
if line:
event.ws(data=line)

return

else:
Expand Down Expand Up @@ -182,7 +193,9 @@ def build_conversation_title(params: HolmesConversationParams) -> str:


def add_labels_to_ask(params: HolmesConversationParams) -> str:
label_string = f"the alert has the following labels: {params.context.get('labels')}" if params.context.get("labels") else ""
label_string = (
f"the alert has the following labels: {params.context.get('labels')}" if params.context.get("labels") else ""
)
ask = f"{params.ask}, {label_string}" if label_string else params.ask
logging.debug(f"holmes ask query: {ask}")
return ask
Expand Down Expand Up @@ -342,6 +355,34 @@ def holmes_chat(event: ExecutionBaseEvent, params: HolmesChatParams):
result = requests.post(f"{holmes_url}/api/chat", data=holmes_req.json())
result.raise_for_status()
holmes_result = HolmesChatResult(**json.loads(result.text))
holmes_result.files = []
if params.render_graph_images:
try:
for tool in holmes_result.tool_calls:
if tool.tool_name != "execute_prometheus_range_query":
continue

json_content = json.loads(tool.result)
query_result = PrometheusQueryResult(data=json_content.get("data", {}))
try:
output_type_str = json_content.get("output_type", "Plain")
output_type = ChartValuesFormat[output_type_str]
except KeyError:
output_type = ChartValuesFormat.Plain # fallback in case of an invalid string

chart = build_chart_from_prometheus_result(
query_result, json_content.get("description", "graph"), values_format=output_type
)
contents = convert_svg_to_png(chart.render())
name = json_content.get("description", "graph").replace(" ", "_")
holmes_result.files.append(FileBlock(f"{name}.png", contents))

holmes_result.tool_calls = [
tool for tool in holmes_result.tool_calls if tool.tool_name != "execute_prometheus_range_query"
]

except Exception:
logging.exception(f"Failed to convert tools to images")

finding = Finding(
title="AI Ask Chat",
Expand All @@ -352,6 +393,7 @@ def holmes_chat(event: ExecutionBaseEvent, params: HolmesChatParams):
finding_type=FindingType.AI_ANALYSIS,
failure=False,
)

finding.add_enrichment(
[HolmesChatResultsBlock(holmes_result=holmes_result)], enrichment_type=EnrichmentType.ai_analysis
)
Expand Down
135 changes: 135 additions & 0 deletions src/robusta/core/playbooks/prometheus_enrichment_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -401,6 +401,141 @@ def create_chart_from_prometheus_query(
)


def build_chart_from_prometheus_result(
prometheus_query_result: PrometheusQueryResult,
chart_title: Optional[str] = "Prometheus Chart",
values_format: Optional[ChartValuesFormat] = None,
) -> pygal.Graph:
if prometheus_query_result.result_type != "matrix":
raise ValueError(f"Expected 'matrix' result_type, got '{prometheus_query_result.result_type}'")

HIGHEST_END = 32536799999
LOWEST_START = 0

min_time = HIGHEST_END
max_time = LOWEST_START
max_y_value = 0

plot_data_list: List[PlotData] = []
series_list_result = prometheus_query_result.series_list_result
COLOR_PALETTE = [
"#1f77b4",
"#ff7f0e",
"#2ca02c",
"#d62728",
"#9467bd",
"#8c564b",
"#e377c2",
"#7f7f7f",
"#bcbd22",
"#17becf",
"#393b79",
"#637939",
"#8c6d31",
"#843c39",
"#7b4173",
"#5254a3",
"#9c9ede",
"#6b6ecf",
"#b5cf6b",
"#e7ba52",
"#e7969c",
"#de9ed6",
"#9edae5",
"#c7c7c7",
"#c49c94",
"#f7b6d2",
"#dbdb8d",
"#aec7e8",
"#ffbb78",
"#98df8a",
]
for i, series in enumerate(series_list_result):
label = get_target_name(series)
if not label:
label = "\n".join([v for (key, v) in series["metric"].items() if key != "job"])

values = []
for idx, timestamp in enumerate(series["timestamps"]):
val = round(float(series["values"][idx]), FLOAT_PRECISION_LIMIT)
values.append((timestamp, val))
max_y_value = max(max_y_value, val)

min_time = min(min_time, min(series["timestamps"]))
max_time = max(max_time, max(series["timestamps"]))

plot_data = PlotData(
plot=(label, values),
color=COLOR_PALETTE[i % len(COLOR_PALETTE)],
show_dots=False,
stroke_style={"width": 8, "dasharray": "8", "linecap": "round", "linejoin": "round"},
)
plot_data_list.append(plot_data)

if min_time == HIGHEST_END:
raise ValueError("No valid data points found in time series.")

config = pygal.Config()
custom_css = PlotCustomCSS().get_css_file_path()
config.css.append(f"file://{custom_css}")

graph_colors = [plot_data.color for plot_data in plot_data_list]
graph_colors.extend(["#1e0047", "#2a0065"])

chart = pygal.XY(
config,
show_dots=True,
style=charts_style(graph_colors=tuple(graph_colors)),
truncate_legend=15,
include_x_axis=True,
width=1280,
height=500,
show_legend=True,
)

chart.range = (0, max_y_value + (max_y_value * 0.2))
interval = chart.range[1] / 4
if values_format == ChartValuesFormat.Percentage:
chart.y_labels = [round(i * interval * 100) / 100 for i in range(5)]
else:
chart.y_labels = [round(i * interval) for i in range(5)]
chart.y_labels_major = chart.y_labels

chart.show_x_guides = True
chart.show_y_guides = True
chart.spacing = 20
chart.margin_top = 10
chart.margin_bottom = 50
chart.x_label_rotation = 35
chart.truncate_label = -1
chart.x_value_formatter = lambda timestamp: datetime.fromtimestamp(timestamp).strftime("%b %-d %H:%M")
chart.legend_at_bottom = True
chart.legend_at_bottom_columns = 5
chart.legend_box_size = 8

value_formatters = {
ChartValuesFormat.Plain: lambda val: str(val),
ChartValuesFormat.Bytes: lambda val: humanize.naturalsize(val, binary=True),
ChartValuesFormat.Percentage: lambda val: f"{(100 * val):.1f}%",
ChartValuesFormat.CPUUsage: lambda val: f"{(1000 * val):.1f}m",
}
chart.value_formatter = value_formatters.get(values_format, lambda val: str(val))

chart.title = chart_title

for plot_data in plot_data_list:
chart.add(
plot_data.plot[0],
plot_data.plot[1],
stroke_style=plot_data.stroke_style,
show_dots=plot_data.show_dots,
dots_size=plot_data.dots_size,
stroke=plot_data.stroke,
)

return chart


def run_prometheus_query(prometheus_params: PrometheusParams, query: str) -> PrometheusQueryResult:
"""
This function runs prometheus query and returns the result (usually a vector),
Expand Down
2 changes: 2 additions & 0 deletions src/robusta/core/reporting/holmes.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
ResourceInfo,
)
from robusta.core.reporting import BaseBlock
from robusta.core.reporting.blocks import FileBlock


class HolmesRequest(BaseModel):
Expand Down Expand Up @@ -68,6 +69,7 @@ class HolmesResultsBlock(BaseBlock):

class HolmesChatResult(BaseModel):
analysis: Optional[str] = None
files: Optional[List[FileBlock]] = None
tool_calls: Optional[List[ToolCallResult]] = None
conversation_history: Optional[List[dict]] = None

Expand Down
13 changes: 13 additions & 0 deletions src/robusta/core/sinks/robusta/dal/model_conversion.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,18 @@ def append_to_structured_data_tool_calls(tool_calls: List[ToolCallResult], struc
data_obj["metadata"] = {"description": tool_call.description, "tool_name": tool_call.tool_name}
structured_data.append(data_obj)

@staticmethod
def append_to_structured_files(files: List[FileBlock], structured_data) -> None:
if not files:
return
for file in files:
file_name = file.filename # changes after zip
file.zip()
data_obj = ModelConversion.get_file_object(file)
data_obj["metadata"] = {
"file_name": file_name,
}
structured_data.append(data_obj)

@staticmethod
def add_ai_chat_data(structured_data: List[Dict], block: HolmesChatResultsBlock):
Expand All @@ -110,6 +122,7 @@ def add_ai_chat_data(structured_data: List[Dict], block: HolmesChatResultsBlock)
"data": Transformer.to_github_markdown(block.holmes_result.analysis),
}
)
ModelConversion.append_to_structured_files(block.holmes_result.files, structured_data)
ModelConversion.append_to_structured_data_tool_calls(block.holmes_result.tool_calls, structured_data)

conversation_history_block = FileBlock(
Expand Down
Loading