forked from Vindaar/TimepixAnalysis
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathcast_log_reader.nim
More file actions
1474 lines (1341 loc) · 59.5 KB
/
cast_log_reader.nim
File metadata and controls
1474 lines (1341 loc) · 59.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
## This program reads a set of log files from CAST (slow control and tracking)
## and appends the data to a HDF5 file, which was created using the `raw_data_manipulation`
## and `reconstruction` programs if desired.
##
## Alternatively, information about the magnet history can be computed.
## CAST log file reader. Reads log files (Slow Control or Tracking Logs)
## and outputs information about the files.
##
## Note: This tool can be compiled with the `-d:pure` flag to avoid pulling
## in the HDF5 dependency.
##
## If this tool is run with a single slow control and / or tracking log
## file (i.e. not a folder containing several log files), the tool only
## prints information from that file. In case of a tracking log this is
## the start and end date of a contained tracking (if any). For the slow
## control log currently no useful output is produced.
##
## If a path to tracking log files is given, and a H5 file is given via
## the `--h5out` option, which InGrid data for the (some of) the times
## covered by the log files being read. The start and end times of a run
## will be added to the attributes to the TOS runs contained within.
##
## If only a single H5 file is given together with the `--delete` argument
## all tracking information will be deleted from the attributes in the file.
import std / [times, strutils, strformat, os, algorithm,
sequtils, strutils, parseutils, hashes, options, streams]
import ggplotnim, cligen, flatty, untar
import unchained except Time
when not defined(pure):
import nimhdf5
import ingrid/[tos_helpers, projectDefs]
type
TrackingKind* = enum
rkTracking, # if tracking took place in this log
rkNoTracking # if no tracking took place in this log
LogFileKind* = enum
lkSlowControl, lkTracking
# object storing all slow control log data
Temperatures = object
amb: float
iron: float
mrb: float
mfb: float
ext: float
vent: float
SlowControlLog = object
filename: string
# store date of log file as string or Time?
date: Time
# time of day, stored as time duration starting from date
# each log starts at 12am
timestamps: seq[int] #seq[Duration] Unix timestamp
# pressures relevant for InGrid
pmm, p3, p3_ba: seq[float]
# what's called `I_magnet` which is ``not`` the current, but the
# magnetic field in tesla!
B_magnet: seq[float]
# environmental ambient temperature
temps: seq[Temperatures]
humidity: seq[float]
# pointing positions as angles
h_angle, v_angle: seq[float]
# and encoder values
h_encoder, v_encoder: seq[uint16]
# MM gas alarm (if on, gas supply closed!)
mm_gas: seq[bool]
badLineCount: int # number of broken lines in this file
# object storing the tracking log data
TrackingLog* = object
# date the tracking log covers
name*: string
date*: Time
case kind*: TrackingKind
of rkNoTracking: discard
of rkTracking:
tracking_start*, tracking_stop*: Time
# and the line based data
timestamps*: seq[int]
#speedH: seq[float]
#speedV: seq[float]
isMoving*: seq[bool]
isTracking*: seq[bool]
magB*: seq[float] # magnetic field
# apparently magB is not the real value in the tracking logs anymore!
vertical*: seq[float] # precise vertical position (`V. precis.`)
horizontal*: seq[float] # precise horizontal position (`H precis.`)
verticalEnc*: seq[int] # vertical encoder position
horizontalEnc*: seq[int] # horizontal encoder position (`H precis.`)
badLineCount: int
## A single version schema
Version = distinct string
VersionSchema = object
version: Version ## version number of the given schema
fTab: Table[string, int] ## field table mapping field name to column index
## A file describing multiple version schemas
VersionSchemaFile = object
versions: Table[Version, VersionSchema] ## maps version names to a schema
proc hash(v: Version): Hash {.borrow.}
proc `==`(v1, v2: Version): bool {.borrow.}
proc len(v: VersionSchema): int = v.fTab.len
proc `[]=`(v: var VersionSchemaFile, key: Version, schema: VersionSchema) =
v.versions[key] = schema
proc `[]`(v: VersionSchemaFile, key: Version): VersionSchema =
v.versions[key]
proc `[]`(v: VersionSchema, key: string): Option[int] =
## Either returns the index of the given `key` if it exists or `None`
if key in v.fTab:
some(v.fTab[key])
else:
none[int]()
proc `$`(v: VersionSchema): string =
result = "{version: " & v.version.string
result.add ", fTab: " & $v.ftab & "}"
proc `$`(v: VersionSchemaFile): string =
result = "{versions: "
for key in keys(v.versions):
result.add key.string & ", "
result.add "}"
proc contains(v: VersionSchemaFile, key: Version): bool = key in v.versions
proc parseField(line: string, tok: var string, idx: var int) =
inc idx, line.parseUntil(tok, until = "##", start = idx)
inc idx, 2
proc parseVersionSchemaFile(path: string): VersionSchemaFile =
for schemaProto in readFile(path).splitLines:
# first field is version. Parse it separate
var tok: string
var idx = 0
var col = 0
parseField(schemaProto, tok, idx)
## XXX: there is a refc bug that causes the `ver` variable to point to the same
## string as `tok`!
when not defined(gcDestructors):
let ver = Version(deepCopy tok)
else:
let ver = Version(tok)
var tab = initTable[string, int]()
# now parse all fields
while idx < schemaProto.len:
parseField(schemaProto, tok, idx)
tab[tok] = col
inc col
result[ver] = VersionSchema(version: ver, fTab: tab)
proc getVersionSchema(schemaFile: VersionSchemaFile, fname: string): Option[VersionSchema] =
let (dir, fn, ext) = splitFile(fname)
if fn.startsWith("SCDV"):
# should be a schema version file
var idx = 4
var verBuf = ""
inc idx, parseUntil(fn, verBuf, until = '_', idx)
if idx != 8:
# failed to parse
echo "[WARNING] Could not parse version number from filename: ", fn, ". Skipping file."
elif Version(verBuf) notin schemaFile:
# bad version
echo "[WARNING] Could not find version ", verBuf, " in table of version numbers. Skipping file."
else:
result = some(schemaFile[Version(verBuf)])
else:
# bad file
echo "[WARNING] Input file ", fn, " does not follow convention. Skipping file."
proc newSlowControlLog(name: string): SlowControlLog =
result.filename = name
result.date = fromUnix(0)
proc hash(x: TrackingLog): Hash =
## proc to define a hash value for a tracking log object
## needed to store it in a hash Table
var h: Hash = 0
# can use hashing of Time
h = h !& hash(x.name)
h = h !& hash(x.date.toUnix)
case x.kind
of rkTracking:
h = h !& hash(x.tracking_start.toUnix)
h = h !& hash(x.tracking_stop.toUnix)
else: discard
result = !$h
proc `==`(x, y: TrackingLog): bool =
## equality of tracking logs, since variant types do not provide an equality
## operator by itself
# don't compare names, as file names can be different
result = x.date == y.date
result = result and (x.kind == y.kind)
if x.kind == y.kind and x.kind == rkTracking:
# if tracking true, compare start and end times
result = result and (x.tracking_start == y.tracking_start)
result = result and (x.tracking_stop == y.tracking_stop)
proc parseTime(time_str: string): Duration =
## proc to parse the time from a string hh:mm:ss returned as a
## time duration from midnight
let t = time_str.split(":")
# create the time interval based on the date given
result = initDuration(hours = parseInt(t[0]), minutes = parseInt(t[1]), seconds = parseInt(t[2]))
proc is_magnet_moving(h_me, v_me: (int, int)): bool {.inline.} =
## determine whether magnet is moving horizontally and (!) vertically
## by checking previous and current motor encoder values
result = if h_me[0] != h_me[1] and v_me[0] != v_me[1]: true else: false
proc sortTrackingLogs(tr_logs: seq[TrackingLog], order = SortOrder.Ascending): seq[TrackingLog] =
## proc to sort a sequence of tracking logs by date
var
gt = 0
lt = 0
# check the sorting order. Depending on case, we define the variables greater and
# lesser than to either be 1 or 0
case order
of Ascending:
gt = 1
lt = 0
of Descending:
gt = 0
lt = 1
result = sorted(tr_logs) do (r, t: TrackingLog) -> int:
let c = times.`<`(r.date, t.date)
result = if c == true: lt else: gt
proc filterTrackingLogs(logs: seq[TrackingLog], startDate, endDate: Time): seq[TrackingLog] =
echo "Filtering tracking logs to date: ", startDate, " and ", endDate
result = logs.filterIt(it.kind == rkNoTracking or (it.tracking_start >= startDate and it.tracking_stop < endDate))
proc sortAndFilter(logs: seq[TrackingLog], startTime, endTime: string): seq[TrackingLog] =
let startDate = if startTime.len > 0: parseTime(startTime, "YYYY/MM/dd", utc())
else: fromUnix(0)
let endDate = if endTime.len > 0: parseTime(endTime, "YYYY/MM/dd", utc())
else: fromUnix(int32.high)
result = sortTrackingLogs(logs)
# filter to stard / end dates
result = result.filterTrackingLogs(startDate, endDate)
proc print_tracking_logs(logs: seq[TrackingLog], print_type: TrackingKind,
startTime, endTime = "",
outfile = "") =
## proc to pretty print a seq of TrackingLogs using org date format
## inputs:
## logs: seq[TrackingLog] = seq of (potentially mixed) tracking logs to be
## printed using org mode date
## print_type: TrackingKind = the kind of logs to be printed. Either tracking
## no tracking
# 0. construct a DF to write a CSV of the tracking information
var df = newDataFrame()
# 1. first print the *mapped* tracking logs:
echo "========== Found trackings =========="
let s_logs = logs.sortAndFilter(startTime, endTime)
for log in s_logs:
case log.kind
of rkTracking:
echo "<$#> <$#>" % [formatAsOrgDate(log.tracking_start), formatAsOrgDate(log.tracking_stop)]
# 2. add fields to DF
let duration = (log.tracking_stop - log.tracking_start).inSeconds()
df.add (&"<{formatAsOrgDate(log.tracking_start)}>", &"<{formatAsOrgDate(log.tracking_stop)}>", duration)
of rkNoTracking:
echo "<$#>" % formatAsOrgDate(log.date)
case print_type
of rkTracking:
echo &"There are {s_logs.len} solar trackings found in the log file directory"
var trackTime: Duration
for log in s_logs:
if log.kind == rkTracking:
doAssert log.tracking_stop > log.tracking_start, "the fuck " & $log
trackTime += (log.tracking_stop - log.tracking_start)
echo &"The total time of all trackings: {trackTime.inHours()} h (exact: {tracktime})"
of rkNoTracking:
echo &"There are {s_logs.len} runs without solar tracking found in the log file directory"
# 3. write out the output file if desired
if df.len > 0:
df = df.rename(f{"Tracking start" <- "Field0"}, f{"Tracking stop" <- "Field1"}, f{"Duration [s]" <- "Field2"})
if outfile.len > 0:
df.writeCsv(outfile)
writeFile(outfile.replace(".csv", ".org"), df.toOrgTable())
# compute total time magnet was on
when true:
## NOTE: this is wrong for modern tracking logs!
var sumB: Second
for log in s_logs:
#echo "log ", log.date, " has Bs: ", log.magB.filterIt(it > 0.0), " in name ", log.name
#if log.date.utc().year > 2015: quit()
for i in 1 ..< log.timestamps.len:
let diff = log.timestamps[i].s - log.timestamps[i-1].s
#echo "log.times ", log.timestamps[i], " to ", log.timestamps[i-1], " is ", diff
if log.magB[i] > 8.0:
sumB = sumB + diff
echo &"Total time the magnet was on (> 1 T): {sumB.to(Hour)} h"
proc print_slow_control_logs(logs: seq[SlowControlLog], magnetField = 8.0) =
## proc to pretty print useful information about the Slow Control data
## Mainly related to magnet activity.
# compute total time magnet was on, here we do it based on difference between last and this
# timestamp. Then add that diff if magnet is on now.
var sumB: Second
var Bvals = newSeq[float]()
var Tdiffs = newSeq[Second]()
let sortedLogs = logs.filterIt(it.timestamps.len > 0).sortedByIt(it.date)
var oldTime = sortedLogs[0].timestamps[0]
for log in sortedLogs:
if log.B_magnet.len == 0: continue # no magnet data, nothing to learn
for i in 1 ..< log.timestamps.len:
let curTime = log.timestamps[i]
let diff = (curTime - oldTime).Second
Tdiffs.add diff
if log.B_magnet[i] > magnetField:
sumB = sumB + diff
if log.B_magnet[i] > 0.0:
Bvals.add log.B_magnet[i]
#elif log.B_magnet[i] > 0.0 and log.B_magnet[i] < magnetField:
oldTime = curTime
let df = toDf({"B" : Bvals})
ggplot(df.filter(f{`B` <= magnetField}), aes("B")) + geom_histogram(bins = 100) + ggsave(&"/tmp/B_field_larger_0_smaller_{magnetField}.pdf")
ggplot(df.filter(f{float -> bool: `B` >= magnetField}), aes("B")) + geom_histogram(bins = 100) + ggsave(&"/tmp/B_field_larger_{magnetField}.pdf")
ggplot(df, aes("B")) + geom_histogram(bins = 100) + ggsave(&"/tmp/B_field_larger_0.pdf")
let dfT = toDf({"Tdiff" : Tdiffs.mapIt(it.float)})
.filter(f{`Tdiff` > 0.1 and `Tdiff` < 500.0})
echo dfT
ggplot(dfT, aes("Tdiff")) + geom_histogram(bins = 100) +
scale_y_continuous() + scale_x_continuous() +
ggsave("/tmp/T_diffs.pdf")
echo &"Total time the magnet was on (> {magnetField} T): {sumB.to(Hour)} h"
when not defined(pure):
proc print_mapped_tracking_logs(logs: seq[TrackingLog], map: Table[TrackingLog, int],
startTime, endTime: string,
outfile: string) =
let logs = sortAndFilter(logs, startTime, endTime)
# 0. construct a DF to write a CSV of the tracking information
var df = newDataFrame()
# 1. first print the *mapped* tracking logs:
echo "========== Logs mapped to trackings in the output file: =========="
for log in logs.filterIt(it in map): ## filter and get the run to output in sorted order!
let run = map[log]
doAssert log.kind == rkTracking
echo "<$#> <$#> for run: $#" % [formatAsOrgDate(log.tracking_start),
formatAsOrgDate(log.tracking_stop),
$run]
df.add (&"<{formatAsOrgDate(log.tracking_start)}>", &"<{formatAsOrgDate(log.tracking_stop)}>", run)
# 2. print those runs *not* mapped to anything:
echo "==================================================================\n"
echo "========== Logs *not* mapped to a run ============================"
for log in logs.filterIt(it notin map):
doAssert log.kind == rkTracking
echo "<$#> <$#>" % [formatAsOrgDate(log.tracking_start),
formatAsOrgDate(log.tracking_stop)]
df.add (&"<{formatAsOrgDate(log.tracking_start)}>", &"<{formatAsOrgDate(log.tracking_stop)}>", -1)
echo "=================================================================="
df = df.rename(f{"Tracking start" <- "Field0"}, f{"Tracking stop" <- "Field1"}, f{"Run" <- "Field2"})
df.writeCsv(outfile)
writeFile(outfile.replace(".csv", ".org"), df.toOrgTable())
proc map_log_to_run(logs: seq[TrackingLog], h5file: string,
startTime, endTime: string): Table[TrackingLog, int] =
## maps a sequence of tracking logs to run numbers given in a H5 file
## inputs:
## logs: seq[TrackingLog] = tracking logs to map
## h5file: string = path to H5 file containing runs
## outputs:
## Table[TrackingLog, int] = table mapping each tracking log to the
## run number in which the tracking is contained
## throws:
## HDF5LibraryError = if a call to the H5 library fails
result = initTable[TrackingLog, int]()
var h5f = H5open(h5file, "r")
var run_times = initTable[int, (int, int)]()
for grp in items(h5f, "/reconstruction", depth = 1):
if "/reconstruction/run" in grp.name:
# given a run, check its start and end time
var tstamp = h5f[(grp.name & "/timestamp").dset_str]
let
t_start = tstamp[0, int]
t_stop = tstamp[tstamp.high, int]
# for now need mutable attributes object to access
var attrs = grp.attrs
let run_num = attrs["runNumber", int]
# add start and end time to table of run times
run_times[run_num] = (t_start, t_stop)
# now iterate over tracking logs and for each log determine in which run
# it fits
for log in logs:
case log.kind
of rkTracking:
# now given start time, look for run for which it fits. Filter elements
# where run covers tracking start - end
proc filterRuns(runs: seq[int], log: TrackingLog): int =
result = -1
for r in runs:
let
t_start = int(log.tracking_start.toUnix)
t_stop = int(log.tracking_stop.toUnix)
if run_times[r][0] < t_start and run_times[r][1] > t_stop:
doAssert result < 0, "There are multiple trackings? Something is wrong. " & $result & " and " & $r
result = r
let run_num = filterRuns(toSeq(run_times.keys), log)
if run_num > 0:
result[log] = run_num
else: discard
discard h5f.close()
# in addition print the information about all logs we found. Both the
# logs that _are_ mapped (and which runs they are mapped to), but also those
# that are _not_ mapped, i.e. runs that we missed!
const outpath = TpxDir / "resources"
let outfile = h5file.extractFilename.replace(".h5", "_tracking_times.csv")
print_mapped_tracking_logs(logs, result, startTime, endTime, outpath / outfile)
proc deleteTrackingAttributes(h5file: string) =
## proc to delete all tracking related attributes in a H5 file
withH5(h5file, "rw"):
let baseName = if "/runs" in h5f: rawDataBase()
elif "/reconstruction" in h5f: recoBase()
else: raise newException(IOError, "Invalid input file " &
$h5file & ". It contains neither `runs` nor `reconstruction` group!")
for (run, grpName) in runs(h5f, baseName):
# get number of tracking related attributes
# for now need mutable attributes object to access
let grp = h5f[grpName.grp_str]
var attrs = grp.attrs
var num_tr = 0
var mgrp = grp
try:
num_tr = attrs["num_trackings", int]
except KeyError:
# no trackings, continue
continue
for i in 0 ..< num_tr:
var deleted = false
let
attr_start = "tracking_start_$#" % $i
attr_stop = "tracking_stop_$#" % $i
attr_num = "num_trackings"
deleted = mgrp.deleteAttribute(attr_start)
deleted = mgrp.deleteAttribute(attr_stop)
deleted = mgrp.deleteAttribute(attr_num)
if deleted == false:
echo "Could not delete one of " &
"$#, $# or $# in group $#" % [$attr_start, $attr_stop, $attr_num, $mgrp.name]
proc write_tracking_h5(trck_tab: Table[TrackingLog, int], h5file: string) =
## proc to write the mapping of tracking logs to run numbers to the appropriate
## groups of the H5 file
## inputs:
## trck_tab: Table[TrackingLog, int] = table mapping tracking logs to run numbers
## h5file: string = h5 file in which to write the information
## throws:
## HDF5LibraryError = if a call to the H5 library fails
var runTab = initCountTable[int]()
withH5(h5file, "rw"):
for log, run_number in trck_tab:
# increment run table of current run number
inc(runTab, runNumber)
let num = runTab[runNumber]
let baseName = if "/runs" in h5f: rawDataBase()
elif "/reconstruction" in h5f: recoBase()
else: raise newException(IOError, "Invalid input file " &
$h5file & ". It contains neither `runs` nor `reconstruction` group!")
# take run number, build group name and add attributes
let runName = baseName & $run_number
var runGrp = h5f[runName.grp_str]
# check if there is a number of trackings already
if "num_trackings" notin runGrp.attrs:
runGrp.attrs["num_trackings"] = 1
else:
runGrp.attrs["num_trackings"] = num
# store trackings zero indexed
let numIdx = num - 1
# add tracking. Trackings will be zero indexed
runGrp.attrs["tracking_start_$#" % $numIdx] = $log.tracking_start
runGrp.attrs["tracking_stop_$#" % $numIdx] = $log.tracking_stop
proc tryParseTime(s: string, formatStrings: openArray[string]): Time =
## Attempts to parse the time string `s` using any of the `formatStrings`
for fmt in formatStrings:
try:
return toTime(parse(s.strip(chars = {'\0'}), fmt, tz = utc()))
except TimeParseError:
continue
raise newException(TimeParseError, "Parsing of string: " & $s &
" failed with all format strings: " & $formatStrings)
proc copyMemStrBuf(s: var string, buf: var string, len: int) =
s.setLen(len) # set to `len` to fit buffer exactly
copyMem(s[0].addr, buf[0].addr, len * sizeof(char))
proc splitMinTwo(lineData: var seq[string],
idx: var int, # global file index
buf: var string, # buffer for the current column field
validData: var bool, # whether we have arrived at valid data
isHeaderLine: var bool,
data: string, # the full file data
expected: int,
lineCnt: int): int =
## line data is the sequence of fields that will be filled
var bufIdx = 0
var spaceCount = 0
var lastWasSpace = false
var count = 0
var parseWithoutStore = false
while idx < data.len:
case data[idx]
of '\0':
discard # ignore null characters
of '\n', '\r':
# end of line
break
of ' ':
lastWasSpace = true
inc spaceCount
of '\t':
lastWasSpace = true
inc spaceCount, 4 # count tab as multiple spaces so that single tab is enough to parse
of '#':
isHeaderLine = true
else:
if lastWasSpace and spaceCount > 1: # need more than 1 space:
# add last element
if count >= lineData.len:
parseWithoutStore = true
elif bufIdx > 0: # only count if we actually parsed something (otherwise line starting ' ' will count)
copyMemStrBuf(lineData[count], buf, bufIdx)
if not validData and count < 2 and lineData[count].startsWith("DATE"):
isHeaderLine = true
validData = true # should be fine from here on?
parseWithoutStore = true
if bufIdx > 0:
inc count
bufIdx = 0
buf[bufIdx] = data[idx]
inc bufIdx
lastWasSpace = false
inc idx
if count >= lineData.len:
discard
else:
copyMemStrBuf(lineData[count], buf, bufIdx)
inc count
#lineData[count] = buf[0 ..< bufIdx]
if not isHeaderLine and count != expected and lineCnt != 0:
echo "parsed : ", lineData.mapIt(it.strip(chars = {' ', '\0'}))
echo "in line: ", lineCnt
return 0 # invalid file!
elif not isHeaderLine:
if count != expected:
echo "[Warning] Count " & $count & " but expected " & $expected
return 0
else:
validData = true
result = count
proc parse_sc_logfile(content: string,
schema: VersionSchema,
filename = ""
): SlowControlLog =
## proc to read a slow control log file
## inputs:
## filename: string = the filename of the log file
## schemaFile: The map of all known schemas. Used to parse the correct columns.
## outputs:
## SlowControlLog = an object storing the data from a slow control
## log file.
# define the indices for each data column. See the ./sc_log_understand.org file
# look up the correct fields
let
date_i = schema["DATE"]
time_i = schema["TIME"]
# for these: taking into account offset of 1
pmm_i = schema["P-MM"]
p3_i = schema["P-3"]
p3ba_i = schema["P3_BA"]
Imag_i = schema["I_magnet"] # is actually the magnetic field in ``T``
tamb_i = schema["Tenv_Amb"]
tiron_i = schema["Tenv_Iron"]
tmrb_i = schema["Tenv_MRB"]
tmfb_i = schema["Tenv_MFB"]
text_i = schema["TEnv_Ext"]
tvent_i = schema["TEnv_Vent"]
humid_i = schema["Humidity"]
mm_gas_i = schema["MM GAS"]
hang_i = schema["Horiz_Angle"]
vang_i = schema["Verti_Angle"]
hme_i = schema["Horiz_ME"]
vme_i = schema["Verti_ME"]
result = newSlowControlLog(name = filename)
var parsedDate = false
var lineCnt = 0
var badLineCount = 0
var lineStart = 0
var lineBuf = newString(5000)
var d = newSeqWith(schema.len, newString(100))
var i = 0
var isHeader = false
var validData = false
while i < content.len:
## accumulate until line break
case content[i]
of '\r', '\n':
# process last line
lineStart = i+1
inc i
else:
# accumulate
# parse into seq
let count = splitMinTwo(d, i, lineBuf,
validData, isHeader,
content,
schema.len, lineCnt)
# `d` now contains correct data
# skip the first line (header)
if isHeader:
inc lineCnt
isHeader = false
continue
# else add the data to the SlowControlLog object
if count == 0: # parsing of line unsuccessful
echo "bad line count+1 ", badLineCount, " in line : ", lineCnt
inc lineCnt
inc badLineCount
continue
if not parsedDate and not d[0].strip.startsWith("DATE"):
# set date based on first row
try:
result.date = tryParseTime(d[date_i.get], ["MM/dd/yyyy",
"M/d/yyyy",
"dd-MMM-yy"]) # DATE always exists, safe to get
parsedDate = true
if result.date.utc().year == 1970:
echo "fuck this file: ", filename
quit()
except TimeParseError:
## Parsing *can* fail, because some files contain line breaks in the header...
if lineCnt > 5: # don't output for first few lines, as they often have more header etc
echo "Failed to parse the following date line: ", d[date_i.get]
#echo content
echo "Full line: ", d
echo "In file ", filename, " Trying again next line"
inc lineCnt
continue
#quit()
## Different versions of the files have different fields set. Probably need to
## skip files that don't have what we need?
if count != schema.len:
echo "INVALID FILE: ", filename, ". ", count, " columns found, but ", schema.len, " expected!"
echo "Offending line: ", content[lineStart .. i]
echo "split to: ", d.mapIt(it.strip(chars = {' ', '\0'}))
inc lineCnt
inc badLineCount
continue
# now parse both date and time & add as a unix timestamp
let date = tryParseTime(d[date_i.get], ["MM/dd/yyyy",
"M/d/yyyy",
"dd-MMM-yy"]) # DATE always exists, safe to get
if abs(date - result.date) > initDuration(days = 1): # one day is allowed to account for midnight to next day logs
# If there's a line with a different date suddenly, skip it. We cannot safely handle such cases!
echo "Line detected with mismatching date: ", date, " vs ", result.date
echo "Skipping line. File: ", filename
inc badLineCount
inc lineCnt
continue
let time = parseTime(d[time_i.get].strip(chars = {'\0'}))
result.timestamps.add (date + time).toUnix.int
template addIfAvailable(arg, body: untyped): untyped =
if arg.isSome:
let idx {.inject.} = arg.unsafeGet
body
template addIfAvailable(arg, col, toAdd, fn: untyped): untyped =
if arg.isSome:
let idx {.inject.} = arg.unsafeGet
if idx > col.high:
echo "[WARNING] Bad file: ", filename, ". Skipping it."
return
toAdd.add fn(col[idx])
template getIf(arg: untyped): untyped =
if arg.isSome:
let idx = arg.unsafeGet
parseFloat d[idx]
else:
0.0
#addIfAvailable(pmm_i, result.pmm.add parseFloat(d[idx]))
addIfAvailable(pmm_i, d, result.pmm, parseFloat)
addIfAvailable(p3_i, d, result.p3, parseFloat)
addIfAvailable(p3_ba_i, d, result.p3_ba, parseFloat)
addIfAvailable(Imag_i, d, result.B_magnet, parseFloat)
addIfAvailable(humid_i, d, result.humidity, parseFloat)
addIfAvailable(hang_i, d, result.h_angle, parseFloat)
addIfAvailable(vang_i, d, result.v_angle, parseFloat)
addIfAvailable(hme_i, d, result.h_encoder, proc(s: string): uint16 = uint16(parseInt(s)))
addIfAvailable(vme_i, d, result.v_encoder, proc(s: string): uint16 = uint16(parseInt(s)))
addIfAvailable(mm_gas_i):
let mm_gas_b = d[idx][0] == '0'
result.mm_gas.add mm_gas_b
# finally try to add all temperatures
let temps = Temperatures(amb: getIf(tamb_i),
iron: getIf(tiron_i),
mrb: getIf(tmrb_i),
mfb: getIf(tmfb_i),
ext: getIf(text_i),
vent: getIf(tvent_i))
result.temps.add temps
inc lineCnt
result.badLineCount = badLineCount
proc read_sc_logfile(filename: string,
schemaFile: VersionSchemaFile): SlowControlLog =
# get schema
let schema = schemaFile.getVersionSchema(filename)
if schema.isSome:
try:
result = parse_sc_logfile(readFile(filename), schema.get, filename)
except ValueError as e:
echo "[WARNING] Encountered a `ValueError` trying to parse the file: ", filename
echo "\tException message: ", e.msg
echo "\tSkipping this file."
proc splitWhitespaceBuf(data: var seq[string],
buf: var string,
line: string): int =
## splits the given `line` at spaces and inserts the elements into data fields
var idx = 0
var count = 0
var bufIdx = 0
var lastWasSpace = false
while idx < line.len:
case line[idx]
of '\0', '\n', '\r':
# done, break
break
of ' ', '\t':
lastWasSpace = true
else:
if lastWasSpace and bufIdx > 0:
copyMemStrBuf(data[count], buf, bufIdx)
inc count
bufIdx = 0
lastWasSpace = false
buf[bufIdx] = line[idx]
inc bufIdx
inc idx
result = count
var seenHeaders: set[uint16]
proc parse_tracking_logfile*(content: string, filename: string): TrackingLog =
## reads a tracking log file and returns an object, which stores
## the relevant data
const
tracking_i = 0
date_i = 6
time_i = 7
h_me = 9
v_me = 10
h_prec = 15
v_prec = 16
magB_i = 22
var
lineCnt = 0
h_me_p = 0
v_me_p = 0
tracking_p = false
tracking_start = fromUnix(0)
tracking_stop = fromUnix(0)
# helper bool, needed because some tracking logs start
# before midnight
date_set = false
badLineCount = 0
var stream = newStringStream(content)
var line = newString(5000)
var d = newSeqWith(100, newString(50)) # just enough space for everything
var buf = newString(100)
while stream.readLine(line):
if lineCnt < 2:
let spl = line.split(",")
if spl.len.uint16 notin seenHeaders:
echo "header: ", line
for i, s in spl:
echo "index ", i, " = " , s
seenHeaders.incl spl.len.uint16
# skip header
inc lineCnt
continue
if line.len == 0:
break
let count = splitWhitespaceBuf(d, buf, line)
#echo d.len
## Order of columns is ``kept`` in tracking logs. That means we only need to remove those
## files that don't have the information we want, namely the "bare bones" files from the
## commissioning
var date: Time
if lineCnt > 1 and not date_set and count >= 22:
date = toTime(parse(d[date_i], "MM/dd/yy", zone = utc()))
# check whether this date is at the end of day from the log file or already past midnight
if date.utc().hour > 23:
continue
else:
# set the date of the tracking log
result.date = date
date_set = true
elif count < 22:
inc badLineCount
continue
else:
# now parse date to make sure we have it
try:
date = toTime(parse(d[date_i], "MM/dd/yy", zone = utc()))
except:
echo "d was ", d, " with count ", count
raise
if abs(date - result.date) > initDuration(days = 7):# one week is allowed, because old tracking logs were sometimes multiple days
# If there's a line with a different date suddenly, skip it. We cannot safely handle such cases!
echo "Line detected with mismatching date: ", date, " vs ", result.date
echo "Skipping line. File: ", filename
inc badLineCount
continue
let
h_me = int(parseFloat(d[h_me]))
v_me = int(parseFloat(d[v_me]))
daytime = parseTime(d[time_i]) # time on the current day since midnight
tracking = int(parseFloat(d[tracking_i])) == 1
magB = parseFloat(d[magB_i])
h_precise = parseFloat(d[h_prec])
v_precise = parseFloat(d[v_prec])
# determine magnet movement and set old encoder values
let move = is_magnet_moving((h_me, h_me_p), (v_me, v_me_p))
h_me_p = h_me
v_me_p = v_me
if not tracking_p and tracking:
tracking_start = date + daytime
tracking_p = not tracking_p
elif tracking_p and not tracking:
tracking_stop = date + daytime
tracking_p = not tracking_p
# append seq data
result.timestamps.add (date + daytime).toUnix.int
result.isMoving.add move
result.isTracking.add tracking
result.magB.add magB
result.horizontal.add h_precise
result.vertical.add v_precise
result.horizontalEnc.add h_me
result.verticalEnc.add v_me
inc lineCnt
# now set the tracking variant object depending on whether tracking took place
# or not
if tracking_start == tracking_stop:
result = TrackingLog(kind: rkNoTracking,
date: result.date,
timestamps: result.timestamps,
isMoving: result.isMoving,
isTracking: result.isTracking,
magB: result.magB)
elif tracking_start > tracking_stop: # likely file _ends_ while tracking (broken old file)
doAssert tracking_stop == fromUnix(0)
result.tracking_start = tracking_start
result.tracking_stop = result.timestamps[^1].fromUnix
else:
result.tracking_start = tracking_start
result.tracking_stop = tracking_stop
result.name = filename
result.badLineCount = badLineCount
if result.kind == rkTracking and result.tracking_stop < parseTime("1975/01/01", "YYYY/MM/dd", utc()):
echo "[ERROR]: Invalid tracking stop time parsed: ", result
quit()
proc read_tracking_logfile*(filename: string): TrackingLog =
result = parse_tracking_logfile(readFile(filename), filename)
proc split_tracking_logs(logs: seq[TrackingLog]): (seq[TrackingLog], seq[TrackingLog]) =
## given a sequence of sorted tracking logs, splits them by `kind`, i.e.
## tracking or no tracking
## inputs:
## logs: seq[TrackingLog] = seq of mixed TrackingLog kinds to be split by kind
## outputs:
## (seq[TrackingLog], seq[TrackingLog]) =
## 0: TrackingLog.kind == rkTracking
## 1: TrackingLog.kind == rkNoTracking
## both seqs are sorted by date
# init result
result[0] = @[]
result[1] = @[]
for log in logs:
case log.kind
of rkTracking: result[0].add log
of rkNoTracking: result[1].add log
proc read_tracking_log_folder(log_folder: string): seq[TrackingLog] =
## reads all log files from `log_folder` and returns a tuple of sorted `TrackingLog`
## objects, one set for logs w/ tracking, others without
## inputs:
## log_folder: string = folder in which log files are stored
## outputs:
## (seq[TrackingLog], seq[TrackingLog]) =
## 0: TrackingLog.kind == rkTracking
## 1: TrackingLog.kind == rkNoTracking
## both seqs are sorted by date
var tracking_logs: seq[TrackingLog] = @[]
# for each file in log_folder we perform a check of which run corresponds
# to the date of this tracking log
for log_p in walkDir(log_folder):
try:
case log_p.kind
of pcFile:
let log = log_p.path
echo "Reading ", log
# check whether actual log file (extension fits)
let (dir, fn, ext) = splitFile(log)
if ext == ".log":
tracking_logs.add read_tracking_logfile(log)
else:
# skipping files other than log files
continue
else: discard
except:
continue # drop bad file
result = sortTrackingLogs(tracking_logs)
proc toDf(sc: SlowControlLog, enforceSameFields = false): DataFrame =
if enforceSameFields:
doAssert sc.timestamps.len == sc.B_magnet.len or sc.B_magnet.len == 0, "Enforcement of times & magnet data length failed! " &
"times.len = " & $sc.timestamps.len & ", magnet.len = " & $sc.B_magnet.len
let B = if sc.B_magnet.len == 0: sc.timestamps.mapIt(NaN) # fill with NaN to have floats
else: sc.B_magnet
## XXX: even 22/12/2022 cannot use `toDf` here due to overload resolution issue causing it to complain
## due to different data types, wtf
var df = seqsToDf({ "Time / s" : sc.timestamps,
"B / T" : B })
df["Date"] = newTensorWith(df.len, sc.date.toUnix) #constantColumn(sc.date.toUnix, df.len)
result = df
proc plotTemperatures(scLogs: seq[SlowControlLog]) =
## Plots the different temperatures recorded in the CAST hall in the given time range
let df = seqstoDf({ "Time" : scLogs.mapIt(it.timestamps).flatten,
"T_amb" : scLogs.mapIt(it.temps.mapIt(it.amb)).flatten,
"T_iron" : scLogs.mapIt(it.temps.mapIt(it.iron)).flatten,
"T_mrb" : scLogs.mapIt(it.temps.mapIt(it.mrb)).flatten,
"T_mfb" : scLogs.mapIt(it.temps.mapIt(it.mfb)).flatten,
"T_ext" : scLogs.mapIt(it.temps.mapIt(it.ext)).flatten,
"T_vent" : scLogs.mapIt(it.temps.mapIt(it.vent)).flatten,
"Humidity" : scLogs.mapIt(it.humidity).flatten })
.gather(["T_amb", "T_iron", "T_mrb", "T_mfb", "T_ext", "T_vent"], key = "Temperature", value = "TempVal")
.arrange("Time")
df.writeCsv("/tmp/temperatures_cast.csv")
ggplot(df, aes("Time", "TempVal", color = "Temperature")) +
geom_line() +