-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathdb.py
More file actions
6528 lines (6132 loc) · 295 KB
/
db.py
File metadata and controls
6528 lines (6132 loc) · 295 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
"""SQLite database for service state and settings."""
import atexit
import sqlite3
import os
import glob
import logging
import queue
import threading
from contextlib import contextmanager
import config
_log = logging.getLogger('nomad.db')
# ─── Connection Pool (v7.29.0 — audit M6) ──────────────────────────────
# Process-wide queue of reusable SQLite connections. Reduces per-request
# connect/PRAGMA overhead under LAN multi-user access. Opt-in via env var.
# Default size of 4 keeps memory low for single-user desktop use. SQLite WAL
# tolerates many concurrent readers; contention only appears under write
# load, where the pool has no effect either way.
try:
_POOL_SIZE = max(0, int(os.environ.get('NOMAD_DB_POOL_SIZE', '4')))
except (ValueError, TypeError):
_POOL_SIZE = 4
_pool: 'queue.Queue[sqlite3.Connection]' = queue.Queue(maxsize=_POOL_SIZE) if _POOL_SIZE > 0 else None
_pool_lock = threading.Lock()
_pool_db_path: str = None # pool is keyed by db path; clears on change
atexit.register(lambda: _pool_clear())
def _pool_clear():
"""Drain and close every connection currently in the pool."""
if _pool is None:
return
while True:
try:
conn = _pool.get_nowait()
except queue.Empty:
break
try:
conn.close()
except Exception:
pass
def get_db_path():
db_path = config.get_config_value('db_path')
if isinstance(db_path, str) and db_path:
return db_path
data_dir = config.get_data_dir()
os.makedirs(data_dir, exist_ok=True)
return os.path.join(data_dir, 'nomad.db')
_wal_set = False
_wal_lock = threading.Lock()
_migration_lock = threading.Lock()
def get_db():
global _wal_set
db_path = get_db_path()
try:
conn = sqlite3.connect(db_path, timeout=30, uri=db_path.startswith('file:'))
conn.row_factory = sqlite3.Row
# WAL mode is persistent on the database file — only set once per process
if not _wal_set:
with _wal_lock:
if not _wal_set:
conn.execute('PRAGMA journal_mode=WAL')
_wal_set = True
conn.execute('PRAGMA foreign_keys=ON')
# Register on flask.g so teardown_appcontext can auto-close leaked connections
try:
from flask import g, has_app_context
if has_app_context():
g._db_conn = conn
except Exception as exc:
_log.debug('Failed to bind DB connection to Flask context: %s', exc)
return conn
except Exception:
if 'conn' in locals():
try:
conn.close()
except Exception:
pass
raise
def _pool_acquire():
"""Return a pooled SQLite connection if available, else a fresh one.
Pooled connections have already-set PRAGMAs (foreign_keys=ON) and are
validated with a cheap SELECT 1 before reuse. Invalid connections are
discarded and replaced."""
global _pool_db_path
if _pool is None:
return get_db(), False
# Invalidate pool if the target DB path changed (test isolation).
current_path = get_db_path()
with _pool_lock:
if _pool_db_path != current_path:
_pool_clear()
_pool_db_path = current_path
try:
conn = _pool.get_nowait()
except queue.Empty:
return get_db(), False
try:
conn.execute('SELECT 1').fetchone()
# Rebind to current flask.g so teardown can see it
try:
from flask import g, has_app_context
if has_app_context():
g._db_conn = conn
except Exception:
pass
return conn, True
except sqlite3.Error:
try:
conn.close()
except Exception:
pass
return get_db(), False
def _pool_release(conn):
"""Return a connection to the pool if space available, else close it."""
if _pool is None:
try:
conn.close()
except Exception:
pass
return
# Never pool a connection with an open transaction
try:
if conn.in_transaction:
conn.rollback()
except Exception:
try:
conn.close()
except Exception:
pass
return
try:
_pool.put_nowait(conn)
except queue.Full:
try:
conn.close()
except Exception:
pass
def pool_stats():
"""Return current pool size / capacity for diagnostics."""
if _pool is None:
return {'enabled': False, 'size': 0, 'capacity': 0}
return {'enabled': True, 'size': _pool.qsize(), 'capacity': _POOL_SIZE}
def pool_shutdown():
"""Drain and close all pooled connections. Call at process exit."""
_pool_clear()
@contextmanager
def db_session():
"""Context manager for DB connections with automatic close/release.
Usage:
with db_session() as db:
db.execute(...)
db.commit()
"""
conn, from_pool = _pool_acquire()
try:
yield conn
except Exception:
try:
conn.rollback()
except Exception:
pass
# Clear flask.g binding so teardown_appcontext doesn't try to
# close an already-closed connection and generate a needless exception.
try:
from flask import g, has_app_context
if has_app_context() and getattr(g, '_db_conn', None) is conn:
g._db_conn = None
except Exception:
pass
# Don't return a possibly-broken conn to the pool
try:
conn.close()
except Exception:
pass
raise
else:
# Unbind from flask.g before returning to pool so teardown_appcontext
# does not close a pooled connection still in use by another caller.
try:
from flask import g, has_app_context
if has_app_context() and getattr(g, '_db_conn', None) is conn:
g._db_conn = None
except Exception:
pass
# Always try to return to pool — Queue.put_nowait bounds size.
# If pooling disabled or pool full, _pool_release closes it.
_pool_release(conn)
def log_activity(event: str, service: str = None, detail: str = None, level: str = 'info'):
"""Log an activity event to the DB."""
try:
with db_session() as conn:
conn.execute('INSERT INTO activity_log (event, service, detail, level) VALUES (?, ?, ?, ?)',
(event, service, detail, level))
conn.commit()
except sqlite3.Error as e:
_log.debug(f'Failed to log activity: {e}')
def backup_db():
"""Create a timestamped backup of the database using SQLite backup API.
Writes to a `.tmp` file first and atomically renames on success so that a
crash mid-backup can't leave behind a truncated file with the canonical
name. Backup files are chmod'd to 0o600 on POSIX to prevent sibling
users from reading them.
"""
db_path = get_db_path()
if db_path.startswith('file:') or not os.path.isfile(db_path):
return
backup_dir = os.path.join(os.path.dirname(db_path), 'backups')
os.makedirs(backup_dir, exist_ok=True)
from datetime import datetime
backup_path = os.path.join(backup_dir, f'nomad_backup_{datetime.now().strftime("%Y%m%d_%H%M%S")}.db')
tmp_path = backup_path + '.tmp'
# Use SQLite backup API for WAL-safe copies.
# TRUNCATE checkpoint flushes all WAL frames into the main DB and truncates
# the WAL file, guaranteeing backup() captures every committed transaction.
# Fall back to PASSIVE if the database is busy (avoids blocking writers
# indefinitely during normal operation — startup/shutdown backups are the
# only callers and contention there is rare). A PASSIVE checkpoint may
# leave a few uncommitted WAL frames outside the backup, which is an
# acceptable tradeoff for not blocking live writers.
src = sqlite3.connect(db_path, timeout=30)
try:
try:
src.execute('PRAGMA wal_checkpoint(TRUNCATE)')
except sqlite3.OperationalError:
try:
src.execute('PRAGMA wal_checkpoint(PASSIVE)')
except sqlite3.OperationalError as e:
_log.debug('Backup checkpoint skipped: %s', e)
dst = sqlite3.connect(tmp_path)
try:
src.backup(dst)
finally:
dst.close()
except Exception:
# Roll back the half-written tmp file so we don't leak cruft.
try:
if os.path.isfile(tmp_path):
os.remove(tmp_path)
except OSError:
pass
src.close()
raise
else:
src.close()
try:
os.replace(tmp_path, backup_path)
except OSError as e:
_log.warning('Could not finalize backup %s: %s', backup_path, e)
try:
os.remove(tmp_path)
except OSError:
pass
return
# POSIX: restrict backup readability to the current user.
if os.name == 'posix':
try:
os.chmod(backup_path, 0o600)
except OSError as e:
_log.debug('Could not chmod backup %s: %s', backup_path, e)
# Prune old backups (keep newest 5). Log failures — silent OS errors here
# have masked real issues (locked files, full disks) in past incidents.
try:
backups = sorted(
[os.path.join(backup_dir, f) for f in os.listdir(backup_dir)
if f.endswith('.db') and not f.endswith('.tmp.db')],
key=os.path.getmtime,
)
except OSError as e:
_log.warning('Could not list backup directory for pruning: %s', e)
return
for old in backups[:-5]:
try:
os.remove(old)
except OSError as e:
_log.warning('Failed to prune old backup %s: %s', old, e)
def _get_migrations_dir():
"""Return the path to db_migrations/ relative to this file."""
return os.path.join(os.path.dirname(os.path.abspath(__file__)), 'db_migrations')
def apply_migrations(conn):
"""Apply unapplied SQL migration files from db_migrations/.
Each migration is executed inside its own transaction. The filename
is recorded in the ``_migrations`` table so it is never replayed.
"""
migrations_dir = _get_migrations_dir()
if not os.path.isdir(migrations_dir):
_log.debug('No db_migrations/ directory found — skipping migrations')
return
with _migration_lock:
# Ensure the tracking table exists (bootstrap)
conn.execute('''
CREATE TABLE IF NOT EXISTS _migrations (
id INTEGER PRIMARY KEY AUTOINCREMENT,
filename TEXT NOT NULL UNIQUE,
applied_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
)
''')
conn.commit()
# Which migrations have already been applied?
applied = {
row[0]
for row in conn.execute('SELECT filename FROM _migrations').fetchall()
}
# Discover .sql files, sorted by name (numeric prefix keeps order)
sql_files = sorted(glob.glob(os.path.join(migrations_dir, '*.sql')))
for path in sql_files:
filename = os.path.basename(path)
if filename in applied:
continue
_log.info('Applying migration: %s', filename)
with open(path, 'r', encoding='utf-8') as fh:
sql = fh.read()
try:
# Execute each statement individually inside an explicit
# transaction so that partial failures roll back cleanly.
# conn.executescript() auto-commits after every statement,
# which would leave the schema half-applied on error.
#
# Use ``sqlite3.complete_statement`` to handle statements
# containing embedded semicolons — e.g. CREATE TRIGGER
# ... BEGIN ...; ...; END; — which a naive ``sql.split(';')``
# would mangle into syntax errors.
conn.execute('BEGIN IMMEDIATE')
buffer = ''
for line in sql.splitlines(keepends=True):
buffer += line
if sqlite3.complete_statement(buffer):
stmt = buffer.strip()
if stmt:
conn.execute(stmt)
buffer = ''
# Trailing content without a final ; — treat as one statement
trailing = buffer.strip()
if trailing:
conn.execute(trailing)
conn.execute(
'INSERT OR IGNORE INTO _migrations (filename) VALUES (?)', (filename,)
)
conn.commit()
applied.add(filename)
_log.info('Migration applied: %s', filename)
except Exception:
try:
conn.rollback()
except Exception:
pass
_log.exception('Migration FAILED: %s', filename)
raise
# V8-01: Schema version gate — skip 935 SQL statements on subsequent starts
_SCHEMA_VERSION = 57 # v7.62: CE-03/04/13/15 Field Medicine — expanded meds, interactions, pediatric, 50 herbs
def init_db():
conn = get_db()
try:
# Check if schema is already at current version
try:
row = conn.execute(
"SELECT value FROM _meta WHERE key = 'schema_version'"
).fetchone()
if row and int(row[0]) >= _SCHEMA_VERSION:
_log.debug('Schema version %s is current — skipping init', row[0])
# Still prune old activity log
try:
conn.execute("DELETE FROM activity_log WHERE created_at < datetime('now', '-90 days')")
conn.commit()
except Exception:
pass
return
except Exception:
pass # _meta table doesn't exist yet — first run
_init_db_inner(conn)
apply_migrations(conn)
# Write schema version marker
conn.execute('''
CREATE TABLE IF NOT EXISTS _meta (
key TEXT PRIMARY KEY,
value TEXT NOT NULL,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
)
''')
conn.execute(
"INSERT OR REPLACE INTO _meta (key, value) VALUES ('schema_version', ?)",
(str(_SCHEMA_VERSION),)
)
conn.commit()
# Prune old activity log entries (older than 90 days)
try:
conn.execute("DELETE FROM activity_log WHERE created_at < datetime('now', '-90 days')")
conn.commit()
except Exception:
pass
finally:
conn.close()
def _create_core_tables(conn):
"""Create core tables: services, settings, notes, conversations, activity_log,
documents, benchmarks, checklists, inventory, contacts, lan_messages,
vault_entries, comms_log, drill_history."""
conn.executescript('''
CREATE TABLE IF NOT EXISTS services (
id TEXT PRIMARY KEY,
name TEXT NOT NULL,
description TEXT,
icon TEXT,
category TEXT DEFAULT 'tools',
installed INTEGER DEFAULT 0,
running INTEGER DEFAULT 0,
version TEXT,
port INTEGER,
pid INTEGER,
install_path TEXT,
exe_path TEXT,
url TEXT
);
CREATE TABLE IF NOT EXISTS settings (
key TEXT PRIMARY KEY,
value TEXT
);
CREATE TABLE IF NOT EXISTS notes (
id INTEGER PRIMARY KEY AUTOINCREMENT,
title TEXT NOT NULL,
content TEXT DEFAULT '',
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE IF NOT EXISTS conversations (
id INTEGER PRIMARY KEY AUTOINCREMENT,
title TEXT NOT NULL DEFAULT 'New Chat',
model TEXT,
messages TEXT DEFAULT '[]',
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE IF NOT EXISTS activity_log (
id INTEGER PRIMARY KEY AUTOINCREMENT,
event TEXT NOT NULL,
service TEXT,
detail TEXT,
level TEXT DEFAULT 'info',
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE IF NOT EXISTS documents (
id INTEGER PRIMARY KEY AUTOINCREMENT,
filename TEXT NOT NULL,
content_type TEXT DEFAULT 'text',
file_size INTEGER DEFAULT 0,
chunks_count INTEGER DEFAULT 0,
status TEXT DEFAULT 'pending',
error TEXT,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE IF NOT EXISTS benchmarks (
id INTEGER PRIMARY KEY AUTOINCREMENT,
cpu_score REAL DEFAULT 0,
memory_score REAL DEFAULT 0,
disk_read_score REAL DEFAULT 0,
disk_write_score REAL DEFAULT 0,
ai_tps REAL DEFAULT 0,
ai_ttft REAL DEFAULT 0,
nomad_score REAL DEFAULT 0,
hardware TEXT DEFAULT '{}',
details TEXT DEFAULT '{}',
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE IF NOT EXISTS checklists (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT NOT NULL,
template TEXT NOT NULL DEFAULT '',
items TEXT DEFAULT '[]',
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE IF NOT EXISTS inventory (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT NOT NULL,
category TEXT NOT NULL DEFAULT 'other',
quantity REAL DEFAULT 0,
unit TEXT DEFAULT 'ea',
min_quantity REAL DEFAULT 0,
location TEXT DEFAULT '',
expiration TEXT DEFAULT '',
notes TEXT DEFAULT '',
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE IF NOT EXISTS contacts (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT NOT NULL,
callsign TEXT DEFAULT '',
role TEXT DEFAULT '',
skills TEXT DEFAULT '',
phone TEXT DEFAULT '',
freq TEXT DEFAULT '',
email TEXT DEFAULT '',
address TEXT DEFAULT '',
rally_point TEXT DEFAULT '',
blood_type TEXT DEFAULT '',
medical_notes TEXT DEFAULT '',
notes TEXT DEFAULT '',
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE IF NOT EXISTS lan_messages (
id INTEGER PRIMARY KEY AUTOINCREMENT,
sender TEXT NOT NULL DEFAULT 'Anonymous',
content TEXT NOT NULL,
msg_type TEXT DEFAULT 'text',
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE IF NOT EXISTS vault_entries (
id INTEGER PRIMARY KEY AUTOINCREMENT,
title TEXT NOT NULL,
encrypted_data TEXT NOT NULL,
iv TEXT NOT NULL,
salt TEXT NOT NULL,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE IF NOT EXISTS comms_log (
id INTEGER PRIMARY KEY AUTOINCREMENT,
freq TEXT NOT NULL DEFAULT '',
callsign TEXT DEFAULT '',
direction TEXT DEFAULT 'rx',
message TEXT DEFAULT '',
signal_quality TEXT DEFAULT '',
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE IF NOT EXISTS drill_history (
id INTEGER PRIMARY KEY AUTOINCREMENT,
drill_type TEXT NOT NULL,
title TEXT NOT NULL,
duration_sec INTEGER DEFAULT 0,
tasks_total INTEGER DEFAULT 0,
tasks_completed INTEGER DEFAULT 0,
notes TEXT DEFAULT '',
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
''')
conn.commit()
def _create_comms_media_tables(conn):
"""Create comms/media tables: videos, audio, books, weather_log, waypoints,
sensor_devices, sensor_readings."""
conn.executescript('''
CREATE TABLE IF NOT EXISTS videos (
id INTEGER PRIMARY KEY AUTOINCREMENT,
title TEXT NOT NULL,
filename TEXT NOT NULL,
category TEXT DEFAULT 'general',
folder TEXT DEFAULT '',
duration TEXT DEFAULT '',
notes TEXT DEFAULT '',
url TEXT DEFAULT '',
thumbnail TEXT DEFAULT '',
filesize INTEGER DEFAULT 0,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE IF NOT EXISTS audio (
id INTEGER PRIMARY KEY AUTOINCREMENT,
title TEXT NOT NULL,
filename TEXT NOT NULL,
artist TEXT DEFAULT '',
album TEXT DEFAULT '',
category TEXT DEFAULT 'general',
folder TEXT DEFAULT '',
duration TEXT DEFAULT '',
url TEXT DEFAULT '',
filesize INTEGER DEFAULT 0,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE IF NOT EXISTS books (
id INTEGER PRIMARY KEY AUTOINCREMENT,
title TEXT NOT NULL,
author TEXT DEFAULT '',
filename TEXT NOT NULL,
format TEXT DEFAULT 'pdf',
category TEXT DEFAULT 'general',
folder TEXT DEFAULT '',
description TEXT DEFAULT '',
url TEXT DEFAULT '',
filesize INTEGER DEFAULT 0,
last_position TEXT DEFAULT '',
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE IF NOT EXISTS weather_log (
id INTEGER PRIMARY KEY AUTOINCREMENT,
pressure_hpa REAL,
temp_f REAL,
wind_dir TEXT DEFAULT '',
wind_speed TEXT DEFAULT '',
clouds TEXT DEFAULT '',
precip TEXT DEFAULT '',
visibility TEXT DEFAULT '',
notes TEXT DEFAULT '',
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE IF NOT EXISTS waypoints (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT NOT NULL,
lat REAL NOT NULL,
lng REAL NOT NULL,
category TEXT DEFAULT 'general',
color TEXT DEFAULT '#5b9fff',
icon TEXT DEFAULT 'pin',
elevation_m REAL,
notes TEXT DEFAULT '',
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE IF NOT EXISTS sensor_devices (
id INTEGER PRIMARY KEY AUTOINCREMENT,
device_type TEXT NOT NULL DEFAULT 'manual',
name TEXT NOT NULL,
connection_type TEXT DEFAULT 'manual',
connection_config TEXT DEFAULT '{}',
polling_interval_sec INTEGER DEFAULT 300,
last_reading TEXT DEFAULT '{}',
status TEXT DEFAULT 'active',
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE IF NOT EXISTS sensor_readings (
id INTEGER PRIMARY KEY AUTOINCREMENT,
device_id INTEGER NOT NULL,
reading_type TEXT NOT NULL,
value REAL NOT NULL,
unit TEXT DEFAULT '',
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
''')
conn.commit()
def _create_federation_tables(conn):
"""Create federation tables: planting_calendar, preservation_log,
federation_peers, federation_offers, federation_requests, federation_sitboard,
mutual_aid_agreements, federation_transactions, sync_peers, vector_clocks,
dead_drop_messages, group_exercises, training_datasets, training_jobs."""
conn.executescript('''
CREATE TABLE IF NOT EXISTS planting_calendar (
id INTEGER PRIMARY KEY AUTOINCREMENT,
crop TEXT NOT NULL,
zone TEXT DEFAULT '7',
month INTEGER NOT NULL,
action TEXT NOT NULL,
notes TEXT DEFAULT '',
yield_per_sqft REAL DEFAULT 0,
calories_per_lb REAL DEFAULT 0,
days_to_harvest INTEGER DEFAULT 0
);
CREATE TABLE IF NOT EXISTS preservation_log (
id INTEGER PRIMARY KEY AUTOINCREMENT,
crop TEXT NOT NULL,
method TEXT NOT NULL DEFAULT 'canning',
quantity REAL DEFAULT 0,
unit TEXT DEFAULT 'quarts',
batch_date TEXT DEFAULT '',
shelf_life_months INTEGER DEFAULT 12,
notes TEXT DEFAULT '',
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE IF NOT EXISTS federation_peers (
id INTEGER PRIMARY KEY AUTOINCREMENT,
node_id TEXT NOT NULL UNIQUE,
node_name TEXT DEFAULT '',
trust_level TEXT DEFAULT 'observer',
last_seen TIMESTAMP,
last_sync TIMESTAMP,
ip TEXT DEFAULT '',
port INTEGER DEFAULT 8080,
public_key TEXT DEFAULT '',
shared_tables TEXT DEFAULT '[]',
auto_sync INTEGER DEFAULT 0,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE IF NOT EXISTS federation_offers (
id INTEGER PRIMARY KEY AUTOINCREMENT,
item_type TEXT NOT NULL,
item_id INTEGER,
quantity REAL DEFAULT 0,
node_id TEXT DEFAULT '',
notes TEXT DEFAULT '',
status TEXT DEFAULT 'active',
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE IF NOT EXISTS federation_requests (
id INTEGER PRIMARY KEY AUTOINCREMENT,
item_type TEXT NOT NULL,
description TEXT DEFAULT '',
quantity REAL DEFAULT 0,
urgency TEXT DEFAULT 'normal',
node_id TEXT DEFAULT '',
status TEXT DEFAULT 'active',
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE IF NOT EXISTS federation_sitboard (
id INTEGER PRIMARY KEY AUTOINCREMENT,
node_id TEXT NOT NULL,
node_name TEXT DEFAULT '',
situation TEXT DEFAULT '{}',
alerts TEXT DEFAULT '[]',
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE IF NOT EXISTS mutual_aid_agreements (
id INTEGER PRIMARY KEY AUTOINCREMENT,
peer_node_id TEXT NOT NULL,
peer_name TEXT DEFAULT '',
title TEXT NOT NULL,
description TEXT DEFAULT '',
our_commitments TEXT DEFAULT '[]',
their_commitments TEXT DEFAULT '[]',
status TEXT DEFAULT 'draft',
effective_date TEXT DEFAULT '',
expiry_date TEXT DEFAULT '',
signed_by_us INTEGER DEFAULT 0,
signed_by_peer INTEGER DEFAULT 0,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE IF NOT EXISTS federation_transactions (
id INTEGER PRIMARY KEY AUTOINCREMENT,
offer_id INTEGER,
request_id INTEGER,
from_node_id TEXT NOT NULL,
to_node_id TEXT NOT NULL,
item_type TEXT NOT NULL,
quantity REAL DEFAULT 0,
status TEXT DEFAULT 'proposed',
proposed_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
accepted_at TIMESTAMP,
delivered_at TIMESTAMP,
confirmed_at TIMESTAMP,
notes TEXT DEFAULT '',
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE IF NOT EXISTS sync_peers (
id INTEGER PRIMARY KEY AUTOINCREMENT,
peer_id TEXT NOT NULL UNIQUE,
last_synced_at TIMESTAMP,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE IF NOT EXISTS vector_clocks (
id INTEGER PRIMARY KEY AUTOINCREMENT,
table_name TEXT NOT NULL,
row_hash TEXT NOT NULL,
clock TEXT DEFAULT '{}',
last_node TEXT DEFAULT '',
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
UNIQUE(table_name, row_hash)
);
CREATE TABLE IF NOT EXISTS dead_drop_messages (
id INTEGER PRIMARY KEY AUTOINCREMENT,
from_node_id TEXT DEFAULT '',
from_name TEXT DEFAULT '',
recipient TEXT DEFAULT '',
encrypted_data TEXT DEFAULT '',
checksum TEXT DEFAULT '',
message_timestamp TEXT DEFAULT '',
decrypted INTEGER DEFAULT 0,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE IF NOT EXISTS group_exercises (
id INTEGER PRIMARY KEY AUTOINCREMENT,
exercise_id TEXT NOT NULL UNIQUE,
title TEXT NOT NULL,
scenario_type TEXT DEFAULT 'custom',
description TEXT DEFAULT '',
initiator_node TEXT DEFAULT '',
initiator_name TEXT DEFAULT '',
participants TEXT DEFAULT '[]',
status TEXT DEFAULT 'pending',
current_phase INTEGER DEFAULT 0,
shared_state TEXT DEFAULT '{}',
decisions_log TEXT DEFAULT '[]',
aar_text TEXT DEFAULT '',
score INTEGER DEFAULT 0,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE IF NOT EXISTS training_datasets (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT NOT NULL,
description TEXT DEFAULT '',
format TEXT DEFAULT 'jsonl',
record_count INTEGER DEFAULT 0,
file_path TEXT DEFAULT '',
base_model TEXT DEFAULT '',
status TEXT DEFAULT 'draft',
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE IF NOT EXISTS training_jobs (
id INTEGER PRIMARY KEY AUTOINCREMENT,
dataset_id INTEGER,
base_model TEXT NOT NULL,
output_model TEXT DEFAULT '',
method TEXT DEFAULT 'qlora',
epochs INTEGER DEFAULT 3,
learning_rate REAL DEFAULT 0.0002,
status TEXT DEFAULT 'pending',
progress INTEGER DEFAULT 0,
log_text TEXT DEFAULT '',
started_at TEXT DEFAULT '',
completed_at TEXT DEFAULT '',
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
FOREIGN KEY (dataset_id) REFERENCES training_datasets(id)
);
''')
conn.commit()
def _create_medical_security_tables(conn):
"""Create medical/security tables: perimeter_zones, triage_events,
handoff_reports, freq_database, radio_profiles, map_routes, map_annotations,
gps_tracks, timers, incidents, patients, vitals_log, wound_log,
medication_log, triage_history, wound_updates, journal."""
conn.executescript('''
CREATE TABLE IF NOT EXISTS perimeter_zones (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT NOT NULL,
zone_type TEXT DEFAULT 'perimeter',
boundary_geojson TEXT DEFAULT '',
camera_ids TEXT DEFAULT '[]',
waypoint_ids TEXT DEFAULT '[]',
alert_on_entry INTEGER DEFAULT 1,
alert_on_exit INTEGER DEFAULT 0,
threat_level TEXT DEFAULT 'normal',
color TEXT DEFAULT '#ff0000',
notes TEXT DEFAULT '',
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE IF NOT EXISTS triage_events (
id INTEGER PRIMARY KEY AUTOINCREMENT,
event_name TEXT NOT NULL DEFAULT 'Mass Casualty',
status TEXT DEFAULT 'active',
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE IF NOT EXISTS handoff_reports (
id INTEGER PRIMARY KEY AUTOINCREMENT,
patient_id INTEGER NOT NULL,
from_provider TEXT DEFAULT '',
to_provider TEXT DEFAULT '',
situation TEXT DEFAULT '',
background TEXT DEFAULT '',
assessment TEXT DEFAULT '',
recommendation TEXT DEFAULT '',
report_html TEXT DEFAULT '',
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE IF NOT EXISTS freq_database (
id INTEGER PRIMARY KEY AUTOINCREMENT,
frequency REAL NOT NULL,
mode TEXT DEFAULT 'FM',
bandwidth TEXT DEFAULT '',
service TEXT NOT NULL,
description TEXT DEFAULT '',
region TEXT DEFAULT 'US',
license_required INTEGER DEFAULT 0,
priority INTEGER DEFAULT 0,
notes TEXT DEFAULT '',
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE IF NOT EXISTS radio_profiles (
id INTEGER PRIMARY KEY AUTOINCREMENT,
radio_model TEXT DEFAULT '',
name TEXT NOT NULL,
channels TEXT DEFAULT '[]',
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE IF NOT EXISTS map_routes (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT NOT NULL,
waypoint_ids TEXT DEFAULT '[]',
distance_km REAL DEFAULT 0,
estimated_time_min INTEGER DEFAULT 0,
terrain_difficulty TEXT DEFAULT 'moderate',
notes TEXT DEFAULT '',
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE IF NOT EXISTS map_annotations (
id INTEGER PRIMARY KEY AUTOINCREMENT,
type TEXT DEFAULT 'polygon',
geojson TEXT NOT NULL,
label TEXT DEFAULT '',
name TEXT DEFAULT '',
lat REAL,
lng REAL,
color TEXT DEFAULT '#ff0000',
notes TEXT DEFAULT '',
is_geofence INTEGER DEFAULT 0,
properties TEXT DEFAULT '{}',
radius_m REAL DEFAULT 0,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE IF NOT EXISTS gps_tracks (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT NOT NULL DEFAULT 'Track',
geojson TEXT NOT NULL DEFAULT '{}',
total_distance_m REAL DEFAULT 0,
total_ascent_m REAL DEFAULT 0,
duration_sec INTEGER DEFAULT 0,
started_at TIMESTAMP,
ended_at TIMESTAMP,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE IF NOT EXISTS timers (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT NOT NULL,
duration_sec INTEGER NOT NULL,
started_at TEXT NOT NULL,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE IF NOT EXISTS incidents (
id INTEGER PRIMARY KEY AUTOINCREMENT,
severity TEXT NOT NULL DEFAULT 'info',