Skip to content

Commit 838239d

Browse files
authored
Merge pull request #12 from osherlock1/refactor-localization
Add localization simulation tools
2 parents ce8b7f5 + a1fb719 commit 838239d

11 files changed

Lines changed: 469 additions & 16 deletions

File tree

requirements.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,3 +23,4 @@ tomli>=2.4.0
2323
typing_extensions>=4.13.2
2424
tzdata>=2025.3
2525
zipp>=3.20.2
26+
pqdm
Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,22 @@
1+
from pathlib import Path
2+
import argparse
3+
from tqdm import tqdm
4+
5+
from ofdm.processing import pipeline
6+
from ofdm.config import OFDMConfig
7+
8+
9+
def main():
10+
parser = argparse.ArgumentParser()
11+
parser.add_argument("--experiment_pth", type=str, default="/home/guoyixu/OFDM_Sense/EXPERIMENTS/rj_virtual_multilateration")
12+
parser.add_argument("--ref_pth", type=str, default="./data_files/rand_ofdm_packet_ref.json")
13+
args = parser.parse_args()
14+
15+
ofdm_conf = OFDMConfig()
16+
experiment_pth = Path(args.experiment_pth)
17+
for archive_pth in experiment_pth.glob("*archive"):
18+
print(f"\n--- Processing {archive_pth} ---")
19+
pipeline.process_archive(archive_dir=archive_pth, ref_path=args.ref_pth, ofdm_conf=ofdm_conf)
20+
21+
if __name__ == "__main__":
22+
main()
Lines changed: 108 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,108 @@
1+
import argparse
2+
import json
3+
import numpy as np
4+
import pandas as pd
5+
import matplotlib.pyplot as plt
6+
from pathlib import Path
7+
8+
from ofdm.simulation.solver import solve_tdoa
9+
from ofdm.viz.sim_plotter import plot_experiment_results, plot_tdoa_hyperbolas
10+
11+
def load_rover_position(roaming_positions_path: Path) ->np.ndarray:
12+
with open(roaming_positions_path) as f:
13+
data = json.load(f)
14+
pos = next(iter(data.values()))
15+
return np.array([pos["x"], pos["y"]])
16+
17+
def main():
18+
parser = argparse.ArgumentParser(description="Run TDOA multilateration on a processed experiment.")
19+
parser.add_argument("--experiment", type=str, required=True, help ="Path to experiment directory")
20+
parser.add_argument("--devices", nargs="+", required=True, help="Device archive names e.g. RX3ch1 RX5ch1, RX7ch1")
21+
parser.add_argument("--anchor", type=str, required=True, help="Anchor device key in fixed_positions.json e.g ANCHORch0")
22+
parser.add_argument("--bias-ns", type=float, default=3.661, help="Hardware TDOA bias to subtract (nanoseconds)")
23+
args = parser.parse_args()
24+
25+
experiment_dir = Path(args.experiment)
26+
27+
with open(experiment_dir / "fixed_positions.json") as f:
28+
fixed = json.load(f)
29+
30+
anchor_pos = np.array([fixed[args.anchor]["x"], fixed[args.anchor]["y"]])
31+
tx_true = np.array([fixed["TX"]["x"], fixed["TX"]["y"]])
32+
33+
rover_positions = []
34+
device_dataframes = []
35+
36+
for device in args.devices:
37+
archive_dir = experiment_dir / f"{device}_archive"
38+
rover_positions.append(load_rover_position(archive_dir / "roaming_positions.json"))
39+
40+
df = pd.read_csv(archive_dir / "delays.csv")
41+
device_dataframes.append(df[["run", "delay0", "delay1"]].rename(columns={"delay0": f"delay0_{device}", "delay1": f"delay1_{device}"}))
42+
43+
rx_coords = np.array([anchor_pos] + rover_positions)
44+
45+
print(f"ROVER POSITIONS {rover_positions}")
46+
47+
merged = device_dataframes[0][["run"]].copy()
48+
for df in device_dataframes:
49+
merged = merged.merge(df, on="run")
50+
51+
estimated_positions = []
52+
53+
MAX_TDOA_NS = 30
54+
55+
for _, row in merged.iterrows():
56+
delay_diffs_s = []
57+
valid=True
58+
for device in args.devices:
59+
tdoa_ns = row[f"delay0_{device}"] - row[f"delay1_{device}"] - args.bias_ns
60+
if abs(tdoa_ns) > MAX_TDOA_NS:
61+
valid = False
62+
break
63+
delay_diffs_s.append(tdoa_ns * 1e-9)
64+
if not valid:
65+
continue
66+
result = solve_tdoa(rx_coords, np.array(delay_diffs_s))
67+
if result is not None:
68+
estimated_positions.append(result)
69+
70+
estimated_positions = np.array(estimated_positions)
71+
n_converged = len(estimated_positions)
72+
n_total = len(merged)
73+
74+
if n_converged == 0:
75+
print("No successful solves.")
76+
return
77+
78+
centroid = np.mean(estimated_positions, axis=0)
79+
errors = np.linalg.norm(estimated_positions - tx_true, axis=1)
80+
centroid_error = np.linalg.norm(centroid - tx_true)
81+
82+
print(f"Converged: {n_converged}/{n_total}")
83+
print(f"Centroid: X={centroid[0]:.4f}m Y={centroid[1]:.4f}m")
84+
print(f"Mean error: {np.mean(errors)*100:.2f} cm")
85+
print(f"RMSE: {np.sqrt(np.mean(errors**2))*100:.2f} cm")
86+
print(f"P95 error: {np.percentile(errors, 95)*100:.2f} cm")
87+
print(f"Centroid error: {centroid_error*100:.2f} cm")
88+
89+
results = {
90+
"estimates": estimated_positions,
91+
"centroid": centroid,
92+
"rmse": np.sqrt(np.mean(errors**2)),
93+
"mean_error": np.mean(errors),
94+
"p95_error": np.percentile(errors, 95),
95+
"centroid_error": centroid_error,
96+
"n_converged": n_converged,
97+
"n_trials": n_total,
98+
}
99+
100+
fig, ax = plt.subplots(figsize=(9, 9))
101+
plot_experiment_results(results, tx_true, rx_coords, ax=ax)
102+
plot_tdoa_hyperbolas(tx_true, rx_coords, results, ax)
103+
ax.set_title(f"OFDM Localization")
104+
plt.tight_layout()
105+
plt.show()
106+
107+
if __name__ == "__main__":
108+
main()
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
import numpy as np
2+
from tqdm import tqdm
23
import matplotlib.pyplot as plt
34
from ofdm.simulation.monte_carlo import run_monte_carlo
45
from ofdm.config import loadLayout
@@ -11,30 +12,31 @@ def main():
1112
rx_x = rx_coords[:, 0].reshape(-1, 1, 1)
1213
rx_y = rx_coords[:, 1].reshape(-1, 1, 1)
1314

14-
x_range = np.linspace(0, 1, num=40)
15-
y_range = np.linspace(0, 1, num=40)
15+
x_range = np.linspace(0, 2, num=50)
16+
y_range = np.linspace(0, 2, num=50)
1617
X, Y = np.meshgrid(x_range, y_range)
17-
18+
bounds = ([0, 2], [0, 2])
1819
error_heatmap = np.zeros(X.shape)
1920

20-
for i in range(len(x_range)):
21+
for i in tqdm(range(len(x_range)), desc="Running Monte Carlo Simulations"):
2122
for j in range(len(y_range)):
2223
tx_coords = np.array([X[i][j], Y[i][j]])
2324
results = run_monte_carlo(
2425
rx_coords=rx_coords,
2526
tx_pos=tx_coords,
26-
sigma_ns=0.1,
27-
n_trials=10,
27+
sigma_ns=0.5,
28+
n_trials=30,
2829
seed=42,
30+
bounds=bounds
2931
)
3032
error_heatmap[i][j] = results['rmse']
3133

3234
plt.figure(figsize=(10, 8))
33-
v_max = 1
35+
v_max = 1.5
3436
v_min = 0
3537
cp = plt.pcolormesh(X, Y, error_heatmap,vmin=v_min, vmax=v_max, shading='auto', cmap='viridis')
3638
plt.colorbar(cp, label='RMSE (meters)')
37-
plt.scatter(rx_x, rx_y, marker='^', color='red', label='Receivers')
39+
plt.scatter(rx_x, rx_y, marker='^', color='red', label='Receivers', s=150)
3840
plt.title('OFDM Localization Error Heatmap')
3941
plt.xlabel('X Position (m)')
4042
plt.ylabel('Y Position (m)')
@@ -43,4 +45,4 @@ def main():
4345

4446

4547
if __name__ == "__main__":
46-
main()
48+
main()

scripts/simulation/monte_carlo.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,12 +18,15 @@ def main():
1818
if args.tx is not None:
1919
tx_true = np.array(args.tx)
2020

21+
bounds = ([0, 2], [0, 2])
22+
2123
results = run_monte_carlo(
2224
tx_pos=tx_true,
2325
rx_coords=rx_coords,
2426
sigma_ns=args.sigma_ns,
2527
n_trials=args.trials,
2628
seed=42,
29+
bounds=bounds
2730
)
2831

2932
print(f"Converged: {results['n_converged']}/{results['n_trials']}")

src/ofdm/modulation/qam.py

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -66,5 +66,16 @@ def get_reference_constalation() -> np.ndarray:
6666
points = list(QAM16_MAP.values())
6767
return np.array(points)
6868

69+
def binary_ref_to_iq(binary_string:str)->np.ndarray:
70+
"""
71+
Helper to convert the binary reference data in the ref_data json files into iq data for evaluation
72+
calculations.
73+
"""
74+
full_string = "".join(binary_string)
75+
word_len = 4
76+
binary_word_list = np.array([full_string[i:word_len + i] for i in range(0 ,len(full_string), word_len)])
77+
78+
iq_array = [binary_to_iq(word) for word in binary_word_list]
79+
return np.array(iq_array) * np.sqrt(10)
6980

7081

src/ofdm/processing/pipeline.py

Lines changed: 91 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,91 @@
1+
import numpy as np
2+
import json
3+
from pathlib import Path
4+
from collections import defaultdict
5+
from tqdm import tqdm
6+
import pandas as pd
7+
8+
from ofdm.processing.rx import unpack_rx_file, normalize_rx_signal, extract_packet
9+
from ofdm.channel.delay import calculate_sub_sample_delay_parabolic
10+
from ofdm.modulation import qam
11+
from ofdm.utils.eval import calc_EVM, calc_BER, calc_SER
12+
from ofdm.config import OFDMConfig
13+
14+
15+
16+
17+
def process_dat_file(dat_path:Path, ref_path:Path, channel:int, ofdm_conf:OFDMConfig) -> dict:
18+
"""
19+
Processes a single .dat file and returns delay and transfer quality metrics.
20+
Only the delay is used for localization, demodulated IQ data is discarded
21+
"""
22+
23+
# get refined_packet_start for delay calculation
24+
demodulated_data, ref_data, refined_packet_start = unpack_rx_file(
25+
ofdm_conf = ofdm_conf,
26+
rx_path = dat_path,
27+
ref_path = ref_path,
28+
)
29+
30+
# re-read raw signal for delay calc — unpack_rx_file applies CFO correction internally
31+
# which we do not want for the matched filter time delay estimate
32+
rx_data = normalize_rx_signal(extract_packet(
33+
rx_data=np.fromfile(dat_path, dtype=np.complex64),
34+
start_idx=refined_packet_start,
35+
total_symbols = (1 + 1 + ref_data["n_data_symb"]),
36+
ofdm_conf=ofdm_conf
37+
))
38+
tx_pilot = np.array(ref_data['pilot_ref_real']) + 1j * np.array(ref_data['pilot_ref_imag'])
39+
40+
delay_s = calculate_sub_sample_delay_parabolic(
41+
rx_signal=rx_data,
42+
ref_signal = tx_pilot,
43+
fs = ofdm_conf.FS
44+
)
45+
delay_ns = delay_s * 1e9 # convert to nano seconds
46+
47+
ref_binary = ref_data['binary_data']
48+
ref_iq_data = qam.binary_ref_to_iq(binary_string=ref_binary)
49+
evm = calc_EVM(iq_rx=demodulated_data, iq_ref=ref_iq_data)
50+
ser = calc_SER(iq_rx=demodulated_data, iq_ref=ref_iq_data)
51+
ber = calc_BER(iq_rx=demodulated_data, iq_ref = ref_iq_data)
52+
53+
return {f"delay{channel}":delay_ns,
54+
f"evm{channel}": evm,
55+
f"ser{channel}": ser,
56+
f"ber{channel}": ber}
57+
58+
59+
def process_archive(archive_dir:Path, ref_path:Path, ofdm_conf:OFDMConfig):
60+
"""
61+
Script to automate experiment unpacking.
62+
63+
reads all of the .dat files in each of the channel dirs in a device archive and saves the unpacked experiment data in a csv
64+
"""
65+
runs = defaultdict(dict)
66+
for channel_dir in archive_dir.glob("channel*/"):
67+
channel = int(channel_dir.name.replace("channel", ""))
68+
for dat_file in channel_dir.glob("*.dat"):
69+
run_number = int(dat_file.name.split("_run_")[1].split("_")[0])
70+
runs[run_number][channel] = dat_file
71+
72+
results = []
73+
for run_number, channel_files in tqdm(sorted(runs.items()), desc="Processing runs"):
74+
row = {"run": run_number}
75+
for channel, dat_file in sorted(channel_files.items()):
76+
try:
77+
unpacked_data = process_dat_file(
78+
dat_path=dat_file,
79+
ref_path=ref_path,
80+
channel= channel,
81+
ofdm_conf= ofdm_conf
82+
)
83+
row.update(unpacked_data)
84+
except Exception as e:
85+
tqdm.write(f" [WARNING] run {run_number} channel {channel} failed: {e}")
86+
results.append(row)
87+
88+
output_path = archive_dir / "delays.csv"
89+
pd.DataFrame(results).to_csv(output_path, index=False)
90+
print(f"Saved {len(results)} runs to {output_path}")
91+

0 commit comments

Comments
 (0)