|
7 | 7 | import re |
8 | 8 | import sqlite3 |
9 | 9 | import threading |
| 10 | +from urllib.parse import urlparse |
10 | 11 | from datetime import datetime, timedelta |
11 | 12 | from flask import Blueprint, jsonify, request |
12 | 13 |
|
|
16 | 17 |
|
17 | 18 | from pegaprox.utils.auth import require_auth, load_users |
18 | 19 | from pegaprox.utils.rbac import get_user_clusters |
19 | | -from pegaprox.api.helpers import check_cluster_access |
| 20 | +from pegaprox.api.helpers import check_cluster_access, load_server_settings |
20 | 21 | from pegaprox.background.metrics import load_metrics_history, start_metrics_collector |
21 | 22 | from pegaprox.background.syslog_server import DB_FILE, SEVERITY_MAP |
22 | 23 | from pegaprox.api.schedules import start_scheduler |
@@ -57,6 +58,53 @@ def _syslog_like_clause(search_text): |
57 | 58 | [like, like, like, like, like, like], |
58 | 59 | ) |
59 | 60 |
|
| 61 | + |
| 62 | +def _syslog_hostname_tokens(value): |
| 63 | + value = str(value or '').strip().lower() |
| 64 | + if not value: |
| 65 | + return set() |
| 66 | + if '://' in value: |
| 67 | + parsed = urlparse(value) |
| 68 | + value = parsed.hostname or value |
| 69 | + value = value.split('/')[0].split('@')[-1] |
| 70 | + if value.startswith('[') and ']' in value: |
| 71 | + value = value[1:value.index(']')] |
| 72 | + elif ':' in value and value.count(':') == 1: |
| 73 | + value = value.rsplit(':', 1)[0] |
| 74 | + value = value.strip('.') |
| 75 | + if not value: |
| 76 | + return set() |
| 77 | + tokens = {value} |
| 78 | + if '.' in value: |
| 79 | + tokens.add(value.split('.', 1)[0]) |
| 80 | + return tokens |
| 81 | + |
| 82 | + |
| 83 | +def _syslog_cluster_hostnames(cluster_id): |
| 84 | + manager = cluster_managers.get(cluster_id) |
| 85 | + if not manager: |
| 86 | + return set() |
| 87 | + |
| 88 | + hostnames = set() |
| 89 | + config = getattr(manager, 'config', None) |
| 90 | + for value in ( |
| 91 | + getattr(manager, 'host', ''), |
| 92 | + getattr(config, 'host', '') if config else '', |
| 93 | + getattr(config, 'name', '') if config else '', |
| 94 | + ): |
| 95 | + hostnames.update(_syslog_hostname_tokens(value)) |
| 96 | + |
| 97 | + try: |
| 98 | + node_status = manager.get_node_status() or {} |
| 99 | + for node_name in node_status.keys(): |
| 100 | + hostnames.update(_syslog_hostname_tokens(node_name)) |
| 101 | + except Exception as exc: |
| 102 | + logging.debug(f"[Syslog] Could not load nodes for cluster filter {cluster_id}: {exc}") |
| 103 | + for node_name in getattr(manager, 'ha_node_status', {}).keys(): |
| 104 | + hostnames.update(_syslog_hostname_tokens(node_name)) |
| 105 | + |
| 106 | + return hostnames |
| 107 | + |
60 | 108 | @bp.route('/api/reports/summary', methods=['GET']) |
61 | 109 | @require_auth() |
62 | 110 | def get_reports_summary(): |
@@ -180,6 +228,7 @@ def get_integrated_syslog_events(): |
180 | 228 | hostname = (request.args.get('hostname') or '').strip() |
181 | 229 | source_ip = (request.args.get('source_ip') or '').strip() |
182 | 230 | facility = (request.args.get('facility') or '').strip() |
| 231 | + cluster_id = (request.args.get('cluster_id') or '').strip() |
183 | 232 |
|
184 | 233 | sort_map = { |
185 | 234 | 'id': 'logs.id', |
@@ -267,6 +316,22 @@ def get_integrated_syslog_events(): |
267 | 316 | except ValueError: |
268 | 317 | pass |
269 | 318 |
|
| 319 | + if cluster_id and load_server_settings().get('syslog_filter_by_selected_cluster', False): |
| 320 | + ok, err = check_cluster_access(cluster_id) |
| 321 | + if not ok: |
| 322 | + return err |
| 323 | + cluster_hostnames = sorted(_syslog_cluster_hostnames(cluster_id)) |
| 324 | + if cluster_hostnames: |
| 325 | + cluster_hostname_where = [] |
| 326 | + for value in cluster_hostnames: |
| 327 | + cluster_hostname_where.append("LOWER(logs.hostname) = ?") |
| 328 | + params.append(value) |
| 329 | + cluster_hostname_where.append("LOWER(logs.hostname) LIKE ?") |
| 330 | + params.append(f"{value}.%") |
| 331 | + where.append(f"({' OR '.join(cluster_hostname_where)})") |
| 332 | + else: |
| 333 | + where.append("1 = 0") |
| 334 | + |
270 | 335 | where_sql = f"WHERE {' AND '.join(where)}" if where else '' |
271 | 336 | joins_sql = f"{' '.join(joins)}" if joins else '' |
272 | 337 | offset = (page - 1) * per_page |
|
0 commit comments