-
Notifications
You must be signed in to change notification settings - Fork 136
Expand file tree
/
Copy pathreport_build_status.py
More file actions
763 lines (662 loc) · 29.6 KB
/
report_build_status.py
File metadata and controls
763 lines (662 loc) · 29.6 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Installing prerequisites:
#
# sudo python3 -m pip install python-dateutil progress attrs
"""A utility to report on daily build status.
USAGE:
python3 scripts/gha/report_build_status.py \
--token ${{github.token}}
"""
import datetime
import dateutil
import dateutil.parser
import dateutil.relativedelta
import dateutil.utils
import fcntl
import io
import os
import pickle
import progress
import progress.bar
import re
import requests
import shutil
import sys
import tempfile
import zipfile
import pickle
from absl import app
from absl import flags
from absl import logging
import firebase_github
import summarize_test_results
FLAGS = flags.FLAGS
flags.DEFINE_string(
"token", None,
"github.token: A token to authenticate on your repository.")
flags.DEFINE_string(
"start", None,
"start date of the range to report, default = [--days] days before start")
flags.DEFINE_string(
"end", None,
"end date of the range to report, default = today")
flags.DEFINE_string(
"days", '7',
"If start date is unspecified, go this many days back")
flags.DEFINE_bool(
"output_markdown", False,
"Output a Markdown-formatted table.")
flags.DEFINE_bool(
"reverse", False,
"Reverse output, so most recent is first.")
flags.DEFINE_bool(
"output_header", True,
"Output a table header row. Forced true if outputting markdown.")
flags.DEFINE_bool(
"output_username", False,
"Include a username column in the outputted table, otherwise include a blank column in text or no column in Markdown.")
flags.DEFINE_bool(
"include_blank_column", True,
"In text output, include a blank column to match the build log spreadsheet format.")
flags.DEFINE_string(
"write_cache", None,
"Write a cache file that can be used with --read_cache on a subsequent run.")
flags.DEFINE_string(
"read_cache", None,
"Read a cache file that was written by a previous run via --write_cache.")
flags.DEFINE_enum(
"report", "daily_log",
["daily_log", "test_summary"],
"Choose whether to report a daily build/test log or a summary of failing and flaky tests.")
flags.DEFINE_integer(
"summary_count", 10,
"If --report=test_summary, how many of the top tests to show.")
flags.DEFINE_enum(
"summary_type", "all", ["all", "errors", "flakes"],
"Whether to include flakes, errors, or all in the test summary.")
flags.DEFINE_bool(
"summary_include_crashes", True,
"Whether to include CRASH/TIMEOUT in the test summary.")
flags.DEFINE_bool(
"firestore", False,
"Report on Firestore tests rather than on general tests.")
_WORKFLOW_TESTS = 'integration_tests.yml'
_WORKFLOW_PACKAGING = 'cpp-packaging.yml'
_TRIGGER_USER = 'firebase-workflow-trigger[bot]'
_BRANCH = 'main'
_LIMIT = 400 # Hard limit on how many jobs to fetch.
_PASS_TEXT = "Pass"
_FAILURE_TEXT = "Failure"
_FLAKY_TEXT = "Pass (flaky)"
_MISSING_TEXT = "Missing"
general_test_time = ' 09:0'
firestore_test_time = ' 10:0'
def rename_key(old_dict,old_name,new_name):
"""Rename a key in a dictionary, preserving the order."""
new_dict = {}
for key,value in zip(old_dict.keys(),old_dict.values()):
new_key = key if key != old_name else new_name
new_dict[new_key] = old_dict[key]
return new_dict
def english_list(items, sep=','):
"""Format a list in English. If there are two items, separate with "and".
If more than 2 items, separate with commas as well.
"""
if len(items) == 2:
return items[0] + " and " + items[1]
else:
if len(items) > 2:
items[len(items)-1] = 'and ' + items[len(items)-1]
return (sep+' ').join(items)
def decorate_url(text, url):
"""Put the text in a URL and replace spaces with nonbreaking spaces.
If not outputting Markdown, this does nothing.
"""
if not FLAGS.output_markdown:
return text
return ("[%s](%s)" % (text.replace(" ", " "), url))
def analyze_log(text, url):
"""Do a simple analysis of the log summary text to determine if the build
or test succeeded, flaked, or failed.
"""
if not text: text = ""
build_status = decorate_url(_PASS_TEXT, url)
test_status = decorate_url(_PASS_TEXT, url)
if '[BUILD] [ERROR]' in text or '[BUILD] [FAILURE]' in text:
build_status = decorate_url(_FAILURE_TEXT, url)
elif '[BUILD] [FLAKINESS]' in text:
build_status =decorate_url(_FLAKY_TEXT, url)
if '[TEST] [ERROR]' in text or '[TEST] [FAILURE]' in text:
test_status = decorate_url(_FAILURE_TEXT, url)
elif '[TEST] [FLAKINESS]' in text:
test_status = decorate_url(_FLAKY_TEXT, url)
return (build_status, test_status)
def format_errors(all_errors, severity, event):
"""Return a list of English-language formatted errors."""
product_errors = []
if severity not in all_errors: return None
if event not in all_errors[severity]: return None
errors = all_errors[severity][event]
total_errors = 0
individual_errors = 0
for product, platform_dict in errors.items():
platforms = list(platform_dict.keys())
if product == 'missing_log':
product_name = 'missing logs'
elif product == 'ump':
product_name = product.upper()
else:
product_name = product.replace('_', ' ').title()
if 'iOS' in platforms:
all_simulator = True
for descriptors in platform_dict['iOS']['description']:
if 'simulator_' not in descriptors:
all_simulator = False
if all_simulator:
platform_dict = rename_key(platform_dict, 'iOS', 'iOS simulator')
platforms = list(platform_dict.keys())
if 'Android' in platforms:
all_emulator = True
for descriptors in platform_dict['Android']['description']:
if 'emulator_' not in descriptors:
all_emulator = False
if all_emulator:
platform_dict = rename_key(platform_dict, 'Android', 'Android emulator')
platforms = list(platform_dict.keys())
total_errors += 1
individual_errors += len(platforms)
platforms_text = english_list(platforms)
if product == 'missing_log':
product_errors.insert(0, '%s on %s' % (product_name, platforms_text))
else:
product_errors.append('%s on %s' % (product_name, platforms_text))
event_text = event.lower()
severity_text = 'flake' if severity == 'FLAKINESS' else severity.lower()
if total_errors == 0:
return None
final_text = english_list(product_errors, ';' if ',' in ''.join(product_errors) else ',')
if total_errors == 1:
if 'missing logs' in final_text:
final_text = final_text.replace('missing logs', 'missing %s logs' % event_text)
return final_text
else:
final_text = 'in ' + final_text
else:
final_text = ('including ' if 'missing logs' in final_text else 'in ') + final_text
final_text = '%s%s %s%s %s' % ('a ' if individual_errors == 1 else '',
event_text,
severity_text,
's' if individual_errors > 1 else '',
final_text)
return final_text
def aggregate_errors_from_log(text, debug=False):
if not text: return {}
text += '\n'
errors = {}
lines = text.split('\n')
current_product = None
event = None
severity = None
platform = None
other = None
product = None
for line in lines:
if debug: print(line)
if not current_product:
m = re.search(r'^([a-z_]+):', line)
if m:
current_product = m.group(1)
else:
# Got a current product
if len(line) == 0:
current_product = None
else:
m = re.search(
r'\[(BUILD|TEST)\] \[(ERROR|FAILURE|FLAKINESS)\] \[([a-zA-Z]+)\] (\[.*\])',
line)
if m:
event = m.group(1)
severity = m.group(2)
if severity == "FAILURE": severity = "ERROR"
platform = m.group(3)
other = m.group(4)
product = current_product
if severity not in errors:
errors[severity] = {}
if event not in errors[severity]:
errors[severity][event] = {}
if product not in errors[severity][event]:
errors[severity][event][product] = {}
if platform not in errors[severity][event][product]:
errors[severity][event][product][platform] = {}
errors[severity][event][product][platform]['description'] = set()
errors[severity][event][product][platform]['test_list'] = set()
errors[severity][event][product][platform]['description'].add(other)
else:
m2 = re.search(r"failed tests: \[\'(.*)\'\]", line)
if m2:
test_list = m2.group(1).split("', '")
for test_name in test_list:
errors[severity][event][product][platform]['test_list'].add(test_name)
return errors
def create_notes(text, debug=False):
"""Combine the sets of errors into a single string.
"""
if not text: return ''
errors = aggregate_errors_from_log(text, debug)
log_items = []
text = format_errors(errors, 'ERROR', 'BUILD')
if text: log_items.append(text)
text = format_errors(errors, 'ERROR', 'TEST')
if text: log_items.append(text)
text = format_errors(errors, 'FLAKINESS', 'TEST')
if text: log_items.append(text)
if len(log_items) == 0:
text = format_errors(errors, 'FLAKINESS', 'BUILD')
if text: log_items.append(text)
if len(log_items) == 0:
return ''
if len(log_items) == 2 and ' and ' in ''.join(log_items):
log_items[0] += ','
final_text = english_list(log_items)
final_text = final_text[0].capitalize() + final_text[1:] + '.'
return final_text
def get_message_from_github_log(logs_zip,
regex_filename,
regex_line, debug=False):
"""Find a specific line inside a single file from a GitHub run's logs."""
for log in logs_zip.namelist():
if re.search(regex_filename, log):
log_text = logs_zip.read(log).decode()
if debug: print(log_text)
m = re.search(regex_line, log_text, re.MULTILINE | re.DOTALL)
if m:
return m
return None
def main(argv):
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
if not FLAGS.verbosity:
logging.set_verbosity(logging.WARN)
end_date = (dateutil.parser.parse(FLAGS.end) if FLAGS.end else dateutil.utils.today()).date()
start_date = (dateutil.parser.parse(FLAGS.start) if FLAGS.start else dateutil.utils.today() - dateutil.relativedelta.relativedelta(days=int(FLAGS.days)-1)).date()
all_days = set()
if FLAGS.output_markdown:
# Forced options if outputting Markdown.
FLAGS.output_header = True
FLAGS.output_username = False
global _FAILURE_TEXT, _PASS_TEXT, _FLAKY_TEXT, _MISSING_TEXT
_FAILURE_TEXT = "❌ **" + _FAILURE_TEXT + "**"
_PASS_TEXT = "✅ " + _PASS_TEXT
_FLAKY_TEXT = _PASS_TEXT + " (flaky)"
_MISSING_TEXT = "❌ **" + _MISSING_TEXT + "**"
if FLAGS.read_cache:
logging.info("Reading cache file: %s", FLAGS.read_cache)
with open(FLAGS.read_cache, "rb") as handle:
fcntl.lockf(handle, fcntl.LOCK_SH) # For reading, shared lock is OK.
_cache = pickle.load(handle)
fcntl.lockf(handle, fcntl.LOCK_UN)
all_days = _cache['all_days']
source_tests = _cache['source_tests']
packaging_runs = _cache['packaging_runs']
package_tests = _cache['package_tests']
else:
_cache = {}
with progress.bar.Bar('Reading jobs...', max=3) as bar:
workflow_id = _WORKFLOW_TESTS
all_runs = firebase_github.list_workflow_runs(FLAGS.token, workflow_id, _BRANCH, 'schedule', _LIMIT)
bar.next()
source_tests = {}
for run in reversed(all_runs):
run['date'] = dateutil.parser.parse(run['created_at'], ignoretz=True)
run['day'] = run['date'].date()
day = str(run['date'].date())
if day in source_tests: continue
if run['status'] != 'completed': continue
if run['day'] < start_date or run['day'] > end_date: continue
run['duration'] = dateutil.parser.parse(run['updated_at'], ignoretz=True) - run['date']
compare_test_time = firestore_test_time if FLAGS.firestore else general_test_time
if compare_test_time in str(run['date']):
source_tests[day] = run
all_days.add(day)
workflow_id = _WORKFLOW_PACKAGING
all_runs = firebase_github.list_workflow_runs(FLAGS.token, workflow_id, _BRANCH, 'schedule', _LIMIT)
bar.next()
packaging_runs = {}
packaging_run_ids = set()
for run in reversed(all_runs):
run['date'] = dateutil.parser.parse(run['created_at'], ignoretz=True)
day = str(run['date'].date())
run['day'] = run['date'].date()
if day in packaging_runs: continue
if run['status'] != 'completed': continue
if run['day'] < start_date or run['day'] > end_date: continue
day = str(run['date'].date())
all_days.add(day)
packaging_runs[day] = run
packaging_run_ids.add(str(run['id']))
workflow_id = _WORKFLOW_TESTS
all_runs = firebase_github.list_workflow_runs(FLAGS.token, workflow_id, _BRANCH, 'workflow_dispatch', _LIMIT)
bar.next()
package_tests_all = []
for run in reversed(all_runs):
run['date'] = dateutil.parser.parse(run['created_at'], ignoretz=True)
day = str(run['date'].date())
run['day'] = run['date'].date()
if day not in packaging_runs: continue
if run['status'] != 'completed': continue
if run['day'] < start_date or run['day'] > end_date: continue
if run['triggering_actor']['login'] != _TRIGGER_USER: continue
package_tests_all.append(run)
# For each workflow_trigger run of the tests, determine which packaging run it goes with.
package_tests = {}
logging.info("Source tests: %s %s", list(source_tests.keys()), [source_tests[r]['id'] for r in source_tests.keys()])
logging.info("Packaging runs: %s %s", list(packaging_runs.keys()), [packaging_runs[r]['id'] for r in packaging_runs.keys()])
with progress.bar.Bar('Downloading triggered workflow logs...', max=len(package_tests_all)) as bar:
for run in package_tests_all:
day = str(run['date'].date())
if day in package_tests:
# Packaging triggers two tests. For Firestore, we want the larger run ID (the second run triggered).
if FLAGS.firestore and int(package_tests[day]['id']) > int(run['id']):
bar.next()
continue
# For general tests we want the smaller run ID (the first run triggered).
if not FLAGS.firestore and int(package_tests[day]['id']) < int(run['id']):
bar.next()
continue
packaging_run = 0
# Because of the retry logic, there can be multiple attempts.
# The default log location however only include the last attempt.
# Thus, we iterate over the attempts to look for the check_and_prepare file
for attempt in range(1, run['run_attempt']):
logs_url = run['url'] + '/attempts/%d/logs' % attempt
headers = {'Accept': 'application/vnd.github.v3+json', 'Authorization': 'Bearer %s' % FLAGS.token}
with requests.get(logs_url, headers=headers, stream=True) as response:
if response.status_code == 200:
logs_compressed_data = io.BytesIO(response.content)
logs_zip = zipfile.ZipFile(logs_compressed_data)
m = get_message_from_github_log(
logs_zip,
r'check_and_prepare\.txt',
r'\[warning\]Downloading SDK package from previous run:[^\n]*/([0-9]*)$')
if m:
packaging_run = m.group(1)
if str(packaging_run) in packaging_run_ids:
package_tests[day] = run
break
bar.next()
logging.info("Package tests: %s %s", list(package_tests.keys()), [package_tests[r]['id'] for r in package_tests.keys()])
with progress.bar.Bar('Downloading test summaries...', max=len(source_tests)+len(package_tests)) as bar:
for tests in source_tests, package_tests:
for day in tests:
run = tests[day]
run['log_success'] = True
run['log_results'] = ''
artifacts = firebase_github.list_artifacts(FLAGS.token, run['id'])
found_artifacts = False
# There are possibly multiple artifacts, so iterate through all of them,
# and extract the relevant ones into a temp folder, and then summarize them all.
# Prioritize artifacts by date, older ones might be expired.
sorted_artifacts = sorted(artifacts, key=lambda art: dateutil.parser.parse(art['created_at']), reverse=True)
with tempfile.TemporaryDirectory() as tmpdir:
for a in sorted_artifacts: # Iterate over sorted artifacts
if 'log-artifact' in a['name']:
logging.debug("Attempting to download artifact: %s (ID: %s, Created: %s)", a['name'], a['id'], a['created_at'])
# Pass tmpdir to download_artifact to save directly
artifact_downloaded_path = os.path.join(tmpdir, f"{a['id']}.zip")
# Attempt to download the artifact with a timeout
download_success = False # Initialize download_success
try:
# download_artifact now returns True on success, None on failure.
if firebase_github.download_artifact(FLAGS.token, a['id'], output_path=artifact_downloaded_path):
download_success = True
except requests.exceptions.Timeout:
logging.warning(f"Timeout while trying to download artifact: {a['name']} (ID: {a['id']})")
# download_success remains False
if download_success and os.path.exists(artifact_downloaded_path):
try:
with open(artifact_downloaded_path, "rb") as f:
artifact_contents = f.read()
if artifact_contents: # Ensure content was read
found_artifacts = True
artifact_data = io.BytesIO(artifact_contents)
with zipfile.ZipFile(artifact_data) as artifact_zip: # Use with statement for ZipFile
artifact_zip.extractall(path=tmpdir)
logging.info("Successfully downloaded and extracted artifact: %s", a['name'])
else:
logging.warning("Artifact %s (ID: %s) was downloaded but is empty.", a['name'], a['id'])
except zipfile.BadZipFile:
logging.error("Failed to open zip file for artifact %s (ID: %s). It might be corrupted or not a zip file.", a['name'], a['id'])
except Exception as e:
logging.error("An error occurred during artifact processing %s (ID: %s): %s", a['name'], a['id'], e)
finally:
# Clean up the downloaded zip file whether it was processed successfully or not
if os.path.exists(artifact_downloaded_path):
os.remove(artifact_downloaded_path)
elif not download_success : # Covers False or None from download_artifact
# Logging for non-timeout failures is now primarily handled within download_artifact
# We only log a general failure here if it wasn't a timeout (already logged)
# and download_artifact indicated failure (returned None).
# This avoids double logging for specific HTTP errors like 410.
pass # Most specific logging is now in firebase_github.py
if found_artifacts:
(success, results) = summarize_test_results.summarize_logs(tmpdir, False, False, True)
logging.info("Summarized logs results - Success: %s, Results (first 100 chars): %.100s", success, results)
run['log_success'] = success
run['log_results'] = results
else:
logging.warning("No artifacts could be successfully downloaded and processed for run %s on day %s.", run['id'], day)
if not found_artifacts:
# Artifacts expire after some time, or download failed, so if they are gone, we need
# to read the GitHub logs instead. This is much slower, so we
# prefer to read artifacts instead whenever possible.
logging.info("Reading github logs for run %s instead", run['id'])
logs_url = run['logs_url']
headers = {'Accept': 'application/vnd.github.v3+json', 'Authorization': 'Bearer %s' % FLAGS.token}
with requests.get(logs_url, headers=headers, stream=True) as response:
if response.status_code == 200:
logs_compressed_data = io.BytesIO(response.content)
logs_zip = zipfile.ZipFile(logs_compressed_data)
m = get_message_from_github_log(
logs_zip,
r'summarize-results\.txt',
r'\[error\]INTEGRATION TEST FAILURES\n—+\n(.*)$')
if m:
run['log_success'] = False
m2 = re.match(r'(.*?)^' + day, m.group(1), re.MULTILINE | re.DOTALL)
if m2:
run['log_results'] = m2.group(1)
else:
run['log_results'] = m.group(1)
logging.debug("Integration test results: %s", run['log_results'])
tests[day] = run
bar.next()
_cache['all_days'] = all_days
_cache['source_tests'] = source_tests
_cache['packaging_runs'] = packaging_runs
_cache['package_tests'] = package_tests
if FLAGS.write_cache:
logging.info("Writing cache file: %s", FLAGS.write_cache)
with open(FLAGS.write_cache, "wb") as handle:
fcntl.lockf(handle, fcntl.LOCK_EX) # For writing, need exclusive lock.
pickle.dump(_cache, handle, protocol=pickle.HIGHEST_PROTOCOL)
fcntl.lockf(handle, fcntl.LOCK_UN)
prev_notes = ''
last_good_day = None
output = ""
if FLAGS.output_markdown:
output += "### Testing History (last %d days)\n\n" % len(all_days)
table_fields = (
["Date"] +
(["Username"] if FLAGS.output_username else ([] if FLAGS.output_markdown else [""])) +
([""] if FLAGS.include_blank_column and not FLAGS.output_markdown else []) +
["Build vs Source Repo", "Test vs Source Repo",
"SDK Packaging", "Build vs SDK Package", "Test vs SDK Package",
"Notes"]
)
if FLAGS.output_markdown:
row_prefix = "| "
row_separator = "|"
row_suffix = " |"
else:
row_prefix = row_suffix = ""
row_separator = "\t"
table_header_string = row_prefix + row_separator.join(table_fields) + row_suffix
table_row_fmt = row_prefix + row_separator.join(["%s" for f in table_fields]) + row_suffix
if FLAGS.output_header:
output += table_header_string + "\n"
if FLAGS.output_markdown:
output += table_row_fmt.replace("%s", "---").replace(" ", "") + "\n"
days_sorted = sorted(all_days)
if FLAGS.reverse: days_sorted = reversed(days_sorted)
for day in days_sorted:
day_str = day
if FLAGS.output_markdown:
day_str = day_str.replace("-", "‑") # non-breaking hyphen.
if day in source_tests:
source_tests_log = analyze_log(source_tests[day]['log_results'], source_tests[day]['html_url'])
source_results = source_tests[day]['log_results']
else:
# Mark as failure if missing
source_tests_log = (decorate_url(_MISSING_TEXT, ""), decorate_url(_MISSING_TEXT, ""))
source_results = ""
if day in packaging_runs:
if packaging_runs[day]['conclusion'] == "success":
package_build_log = _PASS_TEXT
else:
package_build_log = _FAILURE_TEXT
package_build_log = decorate_url(package_build_log, packaging_runs[day]['html_url'])
else:
package_build_log = decorate_url(_MISSING_TEXT, "")
if day in package_tests:
package_tests_log = analyze_log(package_tests[day]['log_results'], package_tests[day]['html_url'])
package_results = package_tests[day]['log_results']
else:
package_tests_log = (decorate_url(_MISSING_TEXT, ""), decorate_url(_MISSING_TEXT, ""))
package_results = ""
notes = create_notes(source_results if source_results else package_results)
if FLAGS.output_markdown and notes:
notes = "<details><summary> </summary>" + notes + "</details>"
if notes == prev_notes and not FLAGS.output_markdown:
if len(notes) > 0: notes = "'''" # Creates a "ditto" mark.
else:
prev_notes = notes
table_row_contents = (
[day_str] +
([os.getlogin()] if FLAGS.output_username else ([] if FLAGS.output_markdown else [""])) +
([""] if FLAGS.include_blank_column and not FLAGS.output_markdown else []) +
[source_tests_log[0],
source_tests_log[1],
package_build_log,
package_tests_log[0],
package_tests_log[1],
notes]
)
output += (table_row_fmt % tuple(table_row_contents)) + "\n"
if FLAGS.report == "daily_log":
print(output)
elif FLAGS.report == "test_summary":
test_list = {}
for day in days_sorted:
if day in source_tests and source_tests[day]['log_results']:
errors = aggregate_errors_from_log(source_tests[day]['log_results'])
test_link = source_tests[day]['html_url']
elif day in package_tests and package_tests[day]['log_results']:
errors = aggregate_errors_from_log(package_tests[day]['log_results'])
test_link = package_tests[day]['html_url']
else:
continue
sev_list = []
if FLAGS.summary_type == "all" or FLAGS.summary_type == "flakes":
sev_list.append('FLAKINESS')
if FLAGS.summary_type == "all" or FLAGS.summary_type == "errors":
sev_list.append('ERROR')
for sev in sev_list:
if sev in errors and 'TEST' in errors[sev]:
test_entries = errors[sev]['TEST']
for product, platform_dict in test_entries.items():
if product == "missing_log":
continue
platforms = list(platform_dict.keys())
for platform in platforms:
test_names = list(test_entries[product][platform]['test_list'])
if not test_names:
test_names = ['Unspecified test']
for test_name in test_names:
if test_name == "CRASH/TIMEOUT":
if not FLAGS.summary_include_crashes: continue
else: test_name = "Crash or timeout"
test_id = "%s | %s | %s | %s" % (sev.lower(), product, platform, test_name)
if test_id not in test_list:
test_list[test_id] = {}
test_list[test_id]['count'] = 0
test_list[test_id]['links'] = []
test_list[test_id]['count'] += 1
test_list[test_id]['links'].append(test_link)
test_list[test_id]['latest'] = day
test_list_sorted = reversed(sorted(test_list.keys(), key=lambda x: test_list[x]['count']))
if FLAGS.output_header:
if FLAGS.output_markdown:
print("| # | Latest | Product | Platform | Test Info |")
print("|---|---|---|---|---|")
else:
print("Count\tLatest\tSeverity\tProduct\tPlatform\tTest Name")
num_shown = 0
for test_id in test_list_sorted:
(severity, product, platform, test_name) = test_id.split(" | ")
days_ago = (dateutil.utils.today() - dateutil.parser.parse(test_list[test_id]['latest'])).days
if days_ago <= 0:
latest = "Today"
else:
latest = "%s day%s ago" % (days_ago, '' if days_ago == 1 else 's')
if FLAGS.output_markdown:
if severity == "error":
severity = "(failure)"
elif severity == "flakiness":
severity = "(flaky)"
latest = latest.replace(" ", " ")
product = product.replace("_", " ")
product = product.upper() if product == "gma" else product.title()
product = product.upper() if product == "ump" else product.title()
if len(test_list[test_id]['links']) > 0:
latest = "[%s](%s)" % (latest, test_list[test_id]['links'][-1])
link_list = []
seen = set()
num = 1
for link in test_list[test_id]['links']:
if link not in seen:
seen.add(link)
link_list.append("[%d](%s)" % (num, link))
num += 1
# If test_name looks like FirebaseSomethingTest.Something, link it to code search.
m = re.match(r"(Firebase[A-Za-z]*Test)\.(.*)", test_name)
if m:
search_url = "http://github.com/search?q=repo:firebase/firebase-cpp-sdk%%20\"%s,%%20%s\"" % (m.group(1), m.group(2))
test_name_str = "[%s](%s)" % (test_name, search_url)
else:
test_name_str = test_name
product_display_name = product.replace("_", " ").title()
print("| %d | %s | %s | %s | %s %s<br/> Logs: %s |" % (
test_list[test_id]['count'], latest,
product_display_name, platform,
test_name_str, severity, " ".join(link_list)))
else:
print("%d\t%s\t%s\t%s\t%s\t%s" % (test_list[test_id]['count'], latest, severity, product, platform, test_name))
num_shown += 1
if num_shown >= FLAGS.summary_count:
break
if __name__ == "__main__":
flags.mark_flag_as_required("token")
app.run(main)