|
5 | 5 |
|
6 | 6 | header('Content-Type: application/json'); |
7 | 7 |
|
| 8 | +/** |
| 9 | + * Choose an aggregation bucket (in seconds) to keep chart payloads reasonable for long time ranges. |
| 10 | + * 0 means "no aggregation". |
| 11 | + */ |
| 12 | +function choose_bucket_seconds(?string $period, ?string $startDate, ?string $endDate): int |
| 13 | +{ |
| 14 | + // Fixed presets for the built-in UI periods |
| 15 | + switch ($period) { |
| 16 | + case '30d': |
| 17 | + return 7200; // 2 hours |
| 18 | + case '7d': |
| 19 | + return 1800; // 30 minutes |
| 20 | + case '24h': |
| 21 | + return 300; // 5 minutes |
| 22 | + case '8h': |
| 23 | + return 60; // 1 minute |
| 24 | + case '120m': |
| 25 | + case '60m': |
| 26 | + case '30m': |
| 27 | + return 0; |
| 28 | + } |
| 29 | + |
| 30 | + // For custom ranges, pick a bucket that targets ~2000 points max. |
| 31 | + if ($startDate && $endDate) { |
| 32 | + try { |
| 33 | + $start = new DateTime($startDate); |
| 34 | + $end = new DateTime($endDate); |
| 35 | + } catch (Exception $e) { |
| 36 | + return 0; |
| 37 | + } |
| 38 | + |
| 39 | + $seconds = $end->getTimestamp() - $start->getTimestamp(); |
| 40 | + if ($seconds <= 0) return 0; |
| 41 | + |
| 42 | + $targetPoints = 2000; |
| 43 | + $rawBucket = (int)ceil($seconds / $targetPoints); |
| 44 | + |
| 45 | + // Round up to a "nice" bucket size. |
| 46 | + $nice = [60, 300, 900, 1800, 3600, 7200, 14400, 21600, 43200, 86400]; |
| 47 | + foreach ($nice as $b) { |
| 48 | + if ($rawBucket <= $b) return $b; |
| 49 | + } |
| 50 | + return 86400; |
| 51 | + } |
| 52 | + |
| 53 | + return 0; |
| 54 | +} |
| 55 | + |
8 | 56 | $host_id = $_GET['host_id'] ?? null; |
9 | 57 | $limit = $_GET['limit'] ?? null; |
10 | 58 | $hop = $_GET['hop'] ?? 'last'; |
|
55 | 103 |
|
56 | 104 | $limit_sql = $limit ? "LIMIT " . (int)$limit : ""; |
57 | 105 |
|
| 106 | +// Keep non-chart payloads bounded even when the requested time range is large. |
| 107 | +// This prevents the API from timing out / exhausting memory once enough MTR data accumulates. |
| 108 | +$table_result_limit = $limit ? max(1, min((int)$limit, 300)) : 200; // number of mtr_results rows per host |
| 109 | +$events_limit = $limit ? max(1, min((int)$limit, 5000)) : 2000; |
| 110 | +$res_limit_sql = "LIMIT " . (int)$table_result_limit; |
| 111 | +$events_limit_sql = "LIMIT " . (int)$events_limit; |
| 112 | + |
| 113 | +$bucket_seconds = choose_bucket_seconds($period, $start_date, $end_date); |
| 114 | + |
58 | 115 | $response_data = []; |
59 | 116 | $response_events = []; |
60 | 117 | $response_mtr = []; |
61 | 118 |
|
62 | | -// Fetch all hosts |
63 | | -$stmt_all_hosts = $pdo->query("SELECT id, name, api_key, speedtest_server_id FROM hosts ORDER BY name ASC"); |
64 | | -$all_hosts = $stmt_all_hosts->fetchAll(); |
| 119 | +try { |
| 120 | + // Fetch all hosts |
| 121 | + $stmt_all_hosts = $pdo->query("SELECT id, name, api_key, speedtest_server_id FROM hosts ORDER BY name ASC"); |
| 122 | + $all_hosts = $stmt_all_hosts->fetchAll(); |
65 | 123 |
|
66 | 124 | // 1. Fetch CHART data |
67 | | -if (in_array($metric, ['bufferbloat', 'download', 'upload', 'speedtest'])) { |
| 125 | +if (in_array($metric, ['bufferbloat', 'download', 'upload', 'speedtest'], true)) { |
68 | 126 | foreach ($all_hosts as $host) { |
69 | 127 | if ($host_id && $host_id != $host['id']) continue; |
70 | | - |
71 | | - $sql = "SELECT timestamp, download_mbps, upload_mbps, latency_idle, latency_download, latency_upload FROM speed_tests WHERE host_id = ? AND $time_clause ORDER BY timestamp DESC $limit_sql"; |
| 128 | + |
| 129 | + if ($bucket_seconds > 0) { |
| 130 | + $bucket = (int)$bucket_seconds; |
| 131 | + $bucket_ts = "FROM_UNIXTIME(FLOOR(UNIX_TIMESTAMP(s.timestamp) / $bucket) * $bucket)"; |
| 132 | + $sql = " |
| 133 | + SELECT |
| 134 | + $bucket_ts AS timestamp, |
| 135 | + AVG(s.download_mbps) AS download_mbps, |
| 136 | + AVG(s.upload_mbps) AS upload_mbps, |
| 137 | + AVG(s.latency_idle) AS latency_idle, |
| 138 | + AVG(s.latency_download) AS latency_download, |
| 139 | + AVG(s.latency_upload) AS latency_upload |
| 140 | + FROM speed_tests s |
| 141 | + WHERE s.host_id = ? AND s.$time_clause |
| 142 | + GROUP BY timestamp |
| 143 | + ORDER BY timestamp ASC |
| 144 | + "; |
| 145 | + } else { |
| 146 | + $sql = "SELECT s.timestamp, s.download_mbps, s.upload_mbps, s.latency_idle, s.latency_download, s.latency_upload FROM speed_tests s WHERE s.host_id = ? AND s.$time_clause ORDER BY s.timestamp DESC $limit_sql"; |
| 147 | + } |
| 148 | + |
72 | 149 | $stmt = $pdo->prepare($sql); |
73 | 150 | $stmt->execute(array_merge([$host['id']], $params_base)); |
74 | 151 | $tests = $stmt->fetchAll(); |
75 | | - |
| 152 | + if ($bucket_seconds === 0) { |
| 153 | + $tests = array_reverse($tests); |
| 154 | + } |
| 155 | + |
76 | 156 | if ($metric === 'bufferbloat') { |
77 | | - $response_data[$host['name'] . ' (Down Bloat)'] = array_reverse(array_map(fn($t) => ['timestamp' => $t['timestamp'], 'value' => max(0, $t['latency_download'] - $t['latency_idle']), 'is_under_load' => false], $tests)); |
78 | | - $response_data[$host['name'] . ' (Up Bloat)'] = array_reverse(array_map(fn($t) => ['timestamp' => $t['timestamp'], 'value' => max(0, $t['latency_upload'] - $t['latency_idle']), 'is_under_load' => false], $tests)); |
| 157 | + $response_data[$host['name'] . ' (Down Bloat)'] = array_map( |
| 158 | + static fn($t) => ['timestamp' => $t['timestamp'], 'value' => max(0, (float)$t['latency_download'] - (float)$t['latency_idle']), 'is_under_load' => false], |
| 159 | + $tests |
| 160 | + ); |
| 161 | + $response_data[$host['name'] . ' (Up Bloat)'] = array_map( |
| 162 | + static fn($t) => ['timestamp' => $t['timestamp'], 'value' => max(0, (float)$t['latency_upload'] - (float)$t['latency_idle']), 'is_under_load' => false], |
| 163 | + $tests |
| 164 | + ); |
79 | 165 | } elseif ($metric === 'speedtest') { |
80 | | - $response_data[$host['name'] . ' (Download)'] = array_reverse(array_map(fn($t) => ['timestamp' => $t['timestamp'], 'value' => $t['download_mbps'], 'is_under_load' => false], $tests)); |
81 | | - $response_data[$host['name'] . ' (Upload)'] = array_reverse(array_map(fn($t) => ['timestamp' => $t['timestamp'], 'value' => $t['upload_mbps'], 'is_under_load' => false], $tests)); |
| 166 | + $response_data[$host['name'] . ' (Download)'] = array_map( |
| 167 | + static fn($t) => ['timestamp' => $t['timestamp'], 'value' => $t['download_mbps'], 'is_under_load' => false], |
| 168 | + $tests |
| 169 | + ); |
| 170 | + $response_data[$host['name'] . ' (Upload)'] = array_map( |
| 171 | + static fn($t) => ['timestamp' => $t['timestamp'], 'value' => $t['upload_mbps'], 'is_under_load' => false], |
| 172 | + $tests |
| 173 | + ); |
82 | 174 | } else { |
83 | 175 | $col = ($metric === 'download') ? 'download_mbps' : 'upload_mbps'; |
84 | | - $response_data[$host['name']] = array_reverse(array_map(fn($t) => ['timestamp' => $t['timestamp'], 'value' => $t[$col], 'is_under_load' => false], $tests)); |
| 176 | + $response_data[$host['name']] = array_map( |
| 177 | + static fn($t) => ['timestamp' => $t['timestamp'], 'value' => $t[$col], 'is_under_load' => false], |
| 178 | + $tests |
| 179 | + ); |
85 | 180 | } |
86 | 181 | } |
87 | 182 | } else { |
88 | 183 | foreach ($all_hosts as $host) { |
89 | 184 | if ($host_id && $host_id != $host['id']) continue; |
90 | | - |
| 185 | + |
91 | 186 | $current_params = array_merge([$host['id']], $params_base); |
92 | 187 | if ($hop !== 'last') $current_params[] = (int)$hop; |
93 | | - |
94 | | - $sql = "SELECT r.timestamp, h.`$metric_col` as value, r.is_under_load FROM mtr_results r JOIN mtr_hops h ON r.id = h.result_id WHERE r.host_id = ? AND r.$time_clause AND h.hop_number = $hop_sql ORDER BY r.timestamp DESC $limit_sql"; |
| 188 | + |
| 189 | + if ($bucket_seconds > 0) { |
| 190 | + $bucket = (int)$bucket_seconds; |
| 191 | + $bucket_ts = "FROM_UNIXTIME(FLOOR(UNIX_TIMESTAMP(r.timestamp) / $bucket) * $bucket)"; |
| 192 | + $sql = " |
| 193 | + SELECT |
| 194 | + $bucket_ts AS timestamp, |
| 195 | + AVG(h.`$metric_col`) AS value, |
| 196 | + MAX(r.is_under_load) AS is_under_load |
| 197 | + FROM mtr_results r |
| 198 | + JOIN mtr_hops h ON r.id = h.result_id |
| 199 | + WHERE r.host_id = ? AND r.$time_clause AND h.hop_number = $hop_sql |
| 200 | + GROUP BY timestamp |
| 201 | + ORDER BY timestamp ASC |
| 202 | + "; |
| 203 | + } else { |
| 204 | + $sql = "SELECT r.timestamp, h.`$metric_col` as value, r.is_under_load FROM mtr_results r JOIN mtr_hops h ON r.id = h.result_id WHERE r.host_id = ? AND r.$time_clause AND h.hop_number = $hop_sql ORDER BY r.timestamp DESC $limit_sql"; |
| 205 | + } |
| 206 | + |
95 | 207 | $stmt = $pdo->prepare($sql); |
96 | 208 | $stmt->execute($current_params); |
97 | | - $response_data[$host['name']] = array_reverse($stmt->fetchAll()); |
| 209 | + $rows = $stmt->fetchAll(); |
| 210 | + if ($bucket_seconds === 0) { |
| 211 | + $rows = array_reverse($rows); |
| 212 | + } |
| 213 | + $response_data[$host['name']] = $rows; |
98 | 214 | } |
99 | 215 | } |
100 | 216 |
|
101 | 217 | // 2. Fetch MTR data for the table (ALL HOPS) |
| 218 | +// Always bounded by `$table_result_limit` most recent results per host. |
102 | 219 | foreach ($all_hosts as $host) { |
103 | 220 | if ($host_id && $host_id != $host['id']) continue; |
104 | | - |
105 | | - $res_limit_sql = $limit ? "LIMIT " . (int)$limit : ""; |
106 | | - |
| 221 | + |
107 | 222 | $sql_mtr = " |
108 | 223 | SELECT r.timestamp, r.is_under_load, r.target, h.hop_number, h.hostname, h.avg, h.loss |
109 | 224 | FROM mtr_results r |
110 | 225 | JOIN mtr_hops h ON r.id = h.result_id |
111 | 226 | WHERE r.host_id = ? AND r.$time_clause |
112 | | - AND r.id IN (SELECT id FROM (SELECT id FROM mtr_results WHERE host_id = ? AND $time_clause ORDER BY timestamp DESC $res_limit_sql) tmp) |
| 227 | + AND r.id IN ( |
| 228 | + SELECT id FROM ( |
| 229 | + SELECT id |
| 230 | + FROM mtr_results |
| 231 | + WHERE host_id = ? AND $time_clause |
| 232 | + ORDER BY timestamp DESC |
| 233 | + $res_limit_sql |
| 234 | + ) tmp |
| 235 | + ) |
113 | 236 | ORDER BY r.timestamp DESC, h.hop_number ASC |
114 | 237 | "; |
115 | | - |
| 238 | + |
116 | 239 | $stmt_mtr = $pdo->prepare($sql_mtr); |
117 | 240 | $stmt_mtr->execute(array_merge([$host['id']], $params_base, [$host['id']], $params_base)); |
118 | 241 | $response_mtr[$host['name']] = $stmt_mtr->fetchAll(); |
119 | 242 | } |
120 | 243 |
|
121 | | -// 3. Fetch Events |
122 | | -$sql_events = "SELECT s.timestamp, s.download_mbps, s.upload_mbps, s.latency_idle, s.latency_download, s.latency_upload, s.result_url, h.name as host_name FROM speed_tests s JOIN hosts h ON s.host_id = h.id WHERE s.$time_clause ORDER BY s.timestamp ASC"; |
| 244 | +// 3. Fetch Events (bounded) |
| 245 | +$sql_events = " |
| 246 | + SELECT s.timestamp, s.download_mbps, s.upload_mbps, s.latency_idle, s.latency_download, s.latency_upload, s.result_url, h.name as host_name |
| 247 | + FROM speed_tests s |
| 248 | + JOIN hosts h ON s.host_id = h.id |
| 249 | + WHERE s.$time_clause |
| 250 | + ORDER BY s.timestamp ASC |
| 251 | + $events_limit_sql |
| 252 | +"; |
123 | 253 | $stmt_events = $pdo->prepare($sql_events); |
124 | 254 | $stmt_events->execute($params_base); |
125 | 255 | $response_events = $stmt_events->fetchAll(); |
126 | 256 |
|
127 | | -// 4. Max Hop |
128 | | -$max_hop_sql = "SELECT MAX(h.hop_number) FROM mtr_hops h JOIN mtr_results r ON h.result_id = r.id WHERE " . ($host_id ? "r.host_id = ? AND " : "") . "r.$time_clause"; |
| 257 | +// 4. Max Hop (computed over a bounded recent set to avoid scanning huge history) |
| 258 | +$max_hop_scope = 500; |
| 259 | +$max_hop_sql = " |
| 260 | + SELECT MAX(h.hop_number) |
| 261 | + FROM mtr_hops h |
| 262 | + WHERE h.result_id IN ( |
| 263 | + SELECT id FROM ( |
| 264 | + SELECT id |
| 265 | + FROM mtr_results |
| 266 | + WHERE " . ($host_id ? "host_id = ? AND " : "") . "$time_clause |
| 267 | + ORDER BY timestamp DESC |
| 268 | + LIMIT " . (int)$max_hop_scope . " |
| 269 | + ) t |
| 270 | + ) |
| 271 | +"; |
129 | 272 | $stmt_max_hop = $pdo->prepare($max_hop_sql); |
130 | 273 | $stmt_max_hop->execute($host_id ? array_merge([(int)$host_id], $params_base) : $params_base); |
131 | 274 | $max_hop = $stmt_max_hop->fetchColumn() ?: 0; |
132 | 275 |
|
133 | | -echo json_encode([ |
134 | | - 'data' => $response_data, |
135 | | - 'events' => $response_events, |
136 | | - 'mtr' => $response_mtr, |
137 | | - 'hosts' => $all_hosts, |
138 | | - 'max_hop' => (int)$max_hop |
139 | | -]); |
| 276 | + echo json_encode([ |
| 277 | + 'data' => $response_data, |
| 278 | + 'events' => $response_events, |
| 279 | + 'mtr' => $response_mtr, |
| 280 | + 'hosts' => $all_hosts, |
| 281 | + 'max_hop' => (int)$max_hop, |
| 282 | + 'bucket_seconds' => (int)$bucket_seconds |
| 283 | + ]); |
| 284 | +} catch (Throwable $e) { |
| 285 | + http_response_code(500); |
| 286 | + echo json_encode([ |
| 287 | + 'error' => 'Server error while fetching data.', |
| 288 | + 'details' => $e->getMessage() |
| 289 | + ]); |
| 290 | +} |
0 commit comments