Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -23,3 +23,4 @@ tomli>=2.4.0
typing_extensions>=4.13.2
tzdata>=2025.3
zipp>=3.20.2
pqdm
22 changes: 22 additions & 0 deletions scripts/experiment_scripts/process_experiment.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
from pathlib import Path
import argparse
from tqdm import tqdm

from ofdm.processing import pipeline
from ofdm.config import OFDMConfig


def main():
parser = argparse.ArgumentParser()
parser.add_argument("--experiment_pth", type=str, default="/home/guoyixu/OFDM_Sense/EXPERIMENTS/rj_virtual_multilateration")
parser.add_argument("--ref_pth", type=str, default="./data_files/rand_ofdm_packet_ref.json")
args = parser.parse_args()

ofdm_conf = OFDMConfig()
experiment_pth = Path(args.experiment_pth)
for archive_pth in experiment_pth.glob("*archive"):
print(f"\n--- Processing {archive_pth} ---")
pipeline.process_archive(archive_dir=archive_pth, ref_path=args.ref_pth, ofdm_conf=ofdm_conf)

if __name__ == "__main__":
main()
108 changes: 108 additions & 0 deletions scripts/localization/multilateration.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,108 @@
import argparse
import json
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from pathlib import Path

from ofdm.simulation.solver import solve_tdoa
from ofdm.viz.sim_plotter import plot_experiment_results, plot_tdoa_hyperbolas

def load_rover_position(roaming_positions_path: Path) ->np.ndarray:
with open(roaming_positions_path) as f:
data = json.load(f)
pos = next(iter(data.values()))
return np.array([pos["x"], pos["y"]])

def main():
parser = argparse.ArgumentParser(description="Run TDOA multilateration on a processed experiment.")
parser.add_argument("--experiment", type=str, required=True, help ="Path to experiment directory")
parser.add_argument("--devices", nargs="+", required=True, help="Device archive names e.g. RX3ch1 RX5ch1, RX7ch1")
parser.add_argument("--anchor", type=str, required=True, help="Anchor device key in fixed_positions.json e.g ANCHORch0")
parser.add_argument("--bias-ns", type=float, default=3.661, help="Hardware TDOA bias to subtract (nanoseconds)")
args = parser.parse_args()

experiment_dir = Path(args.experiment)

with open(experiment_dir / "fixed_positions.json") as f:
fixed = json.load(f)

anchor_pos = np.array([fixed[args.anchor]["x"], fixed[args.anchor]["y"]])
tx_true = np.array([fixed["TX"]["x"], fixed["TX"]["y"]])

rover_positions = []
device_dataframes = []

for device in args.devices:
archive_dir = experiment_dir / f"{device}_archive"
rover_positions.append(load_rover_position(archive_dir / "roaming_positions.json"))

df = pd.read_csv(archive_dir / "delays.csv")
device_dataframes.append(df[["run", "delay0", "delay1"]].rename(columns={"delay0": f"delay0_{device}", "delay1": f"delay1_{device}"}))

rx_coords = np.array([anchor_pos] + rover_positions)

print(f"ROVER POSITIONS {rover_positions}")

merged = device_dataframes[0][["run"]].copy()
for df in device_dataframes:
merged = merged.merge(df, on="run")

estimated_positions = []

MAX_TDOA_NS = 30

for _, row in merged.iterrows():
delay_diffs_s = []
valid=True
for device in args.devices:
tdoa_ns = row[f"delay0_{device}"] - row[f"delay1_{device}"] - args.bias_ns
if abs(tdoa_ns) > MAX_TDOA_NS:
valid = False
break
delay_diffs_s.append(tdoa_ns * 1e-9)
if not valid:
continue
result = solve_tdoa(rx_coords, np.array(delay_diffs_s))
if result is not None:
estimated_positions.append(result)

estimated_positions = np.array(estimated_positions)
n_converged = len(estimated_positions)
n_total = len(merged)

if n_converged == 0:
print("No successful solves.")
return

centroid = np.mean(estimated_positions, axis=0)
errors = np.linalg.norm(estimated_positions - tx_true, axis=1)
centroid_error = np.linalg.norm(centroid - tx_true)

print(f"Converged: {n_converged}/{n_total}")
print(f"Centroid: X={centroid[0]:.4f}m Y={centroid[1]:.4f}m")
print(f"Mean error: {np.mean(errors)*100:.2f} cm")
print(f"RMSE: {np.sqrt(np.mean(errors**2))*100:.2f} cm")
print(f"P95 error: {np.percentile(errors, 95)*100:.2f} cm")
print(f"Centroid error: {centroid_error*100:.2f} cm")

results = {
"estimates": estimated_positions,
"centroid": centroid,
"rmse": np.sqrt(np.mean(errors**2)),
"mean_error": np.mean(errors),
"p95_error": np.percentile(errors, 95),
"centroid_error": centroid_error,
"n_converged": n_converged,
"n_trials": n_total,
}

fig, ax = plt.subplots(figsize=(9, 9))
plot_experiment_results(results, tx_true, rx_coords, ax=ax)
plot_tdoa_hyperbolas(tx_true, rx_coords, results, ax)
ax.set_title(f"OFDM Localization")
plt.tight_layout()
plt.show()

if __name__ == "__main__":
main()
20 changes: 11 additions & 9 deletions scripts/simulation/heapmap.py → scripts/simulation/heatmap.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
from ofdm.simulation.monte_carlo import run_monte_carlo
from ofdm.config import loadLayout
Expand All @@ -11,30 +12,31 @@ def main():
rx_x = rx_coords[:, 0].reshape(-1, 1, 1)
rx_y = rx_coords[:, 1].reshape(-1, 1, 1)

x_range = np.linspace(0, 1, num=40)
y_range = np.linspace(0, 1, num=40)
x_range = np.linspace(0, 2, num=50)
y_range = np.linspace(0, 2, num=50)
X, Y = np.meshgrid(x_range, y_range)

bounds = ([0, 2], [0, 2])
error_heatmap = np.zeros(X.shape)

for i in range(len(x_range)):
for i in tqdm(range(len(x_range)), desc="Running Monte Carlo Simulations"):
for j in range(len(y_range)):
tx_coords = np.array([X[i][j], Y[i][j]])
results = run_monte_carlo(
rx_coords=rx_coords,
tx_pos=tx_coords,
sigma_ns=0.1,
n_trials=10,
sigma_ns=0.5,
n_trials=30,
seed=42,
bounds=bounds
)
error_heatmap[i][j] = results['rmse']

plt.figure(figsize=(10, 8))
v_max = 1
v_max = 1.5
v_min = 0
cp = plt.pcolormesh(X, Y, error_heatmap,vmin=v_min, vmax=v_max, shading='auto', cmap='viridis')
plt.colorbar(cp, label='RMSE (meters)')
plt.scatter(rx_x, rx_y, marker='^', color='red', label='Receivers')
plt.scatter(rx_x, rx_y, marker='^', color='red', label='Receivers', s=150)
plt.title('OFDM Localization Error Heatmap')
plt.xlabel('X Position (m)')
plt.ylabel('Y Position (m)')
Expand All @@ -43,4 +45,4 @@ def main():


if __name__ == "__main__":
main()
main()
3 changes: 3 additions & 0 deletions scripts/simulation/monte_carlo.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,12 +18,15 @@ def main():
if args.tx is not None:
tx_true = np.array(args.tx)

bounds = ([0, 2], [0, 2])

results = run_monte_carlo(
tx_pos=tx_true,
rx_coords=rx_coords,
sigma_ns=args.sigma_ns,
n_trials=args.trials,
seed=42,
bounds=bounds
)

print(f"Converged: {results['n_converged']}/{results['n_trials']}")
Expand Down
11 changes: 11 additions & 0 deletions src/ofdm/modulation/qam.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,5 +66,16 @@ def get_reference_constalation() -> np.ndarray:
points = list(QAM16_MAP.values())
return np.array(points)

def binary_ref_to_iq(binary_string:str)->np.ndarray:
"""
Helper to convert the binary reference data in the ref_data json files into iq data for evaluation
calculations.
"""
full_string = "".join(binary_string)
word_len = 4
binary_word_list = np.array([full_string[i:word_len + i] for i in range(0 ,len(full_string), word_len)])

iq_array = [binary_to_iq(word) for word in binary_word_list]
return np.array(iq_array) * np.sqrt(10)


91 changes: 91 additions & 0 deletions src/ofdm/processing/pipeline.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,91 @@
import numpy as np
import json
from pathlib import Path
from collections import defaultdict
from tqdm import tqdm
import pandas as pd

from ofdm.processing.rx import unpack_rx_file, normalize_rx_signal, extract_packet
from ofdm.channel.delay import calculate_sub_sample_delay_parabolic
from ofdm.modulation import qam
from ofdm.utils.eval import calc_EVM, calc_BER, calc_SER
from ofdm.config import OFDMConfig




def process_dat_file(dat_path:Path, ref_path:Path, channel:int, ofdm_conf:OFDMConfig) -> dict:
"""
Processes a single .dat file and returns delay and transfer quality metrics.
Only the delay is used for localization, demodulated IQ data is discarded
"""

# get refined_packet_start for delay calculation
demodulated_data, ref_data, refined_packet_start = unpack_rx_file(
ofdm_conf = ofdm_conf,
rx_path = dat_path,
ref_path = ref_path,
)

# re-read raw signal for delay calc — unpack_rx_file applies CFO correction internally
# which we do not want for the matched filter time delay estimate
rx_data = normalize_rx_signal(extract_packet(
rx_data=np.fromfile(dat_path, dtype=np.complex64),
start_idx=refined_packet_start,
total_symbols = (1 + 1 + ref_data["n_data_symb"]),
ofdm_conf=ofdm_conf
))
tx_pilot = np.array(ref_data['pilot_ref_real']) + 1j * np.array(ref_data['pilot_ref_imag'])

delay_s = calculate_sub_sample_delay_parabolic(
rx_signal=rx_data,
ref_signal = tx_pilot,
fs = ofdm_conf.FS
)
delay_ns = delay_s * 1e9 # convert to nano seconds

ref_binary = ref_data['binary_data']
ref_iq_data = qam.binary_ref_to_iq(binary_string=ref_binary)
evm = calc_EVM(iq_rx=demodulated_data, iq_ref=ref_iq_data)
ser = calc_SER(iq_rx=demodulated_data, iq_ref=ref_iq_data)
ber = calc_BER(iq_rx=demodulated_data, iq_ref = ref_iq_data)

return {f"delay{channel}":delay_ns,
f"evm{channel}": evm,
f"ser{channel}": ser,
f"ber{channel}": ber}


def process_archive(archive_dir:Path, ref_path:Path, ofdm_conf:OFDMConfig):
"""
Script to automate experiment unpacking.

reads all of the .dat files in each of the channel dirs in a device archive and saves the unpacked experiment data in a csv
"""
runs = defaultdict(dict)
for channel_dir in archive_dir.glob("channel*/"):
channel = int(channel_dir.name.replace("channel", ""))
for dat_file in channel_dir.glob("*.dat"):
run_number = int(dat_file.name.split("_run_")[1].split("_")[0])
runs[run_number][channel] = dat_file

results = []
for run_number, channel_files in tqdm(sorted(runs.items()), desc="Processing runs"):
row = {"run": run_number}
for channel, dat_file in sorted(channel_files.items()):
try:
unpacked_data = process_dat_file(
dat_path=dat_file,
ref_path=ref_path,
channel= channel,
ofdm_conf= ofdm_conf
)
row.update(unpacked_data)
except Exception as e:
tqdm.write(f" [WARNING] run {run_number} channel {channel} failed: {e}")
results.append(row)

output_path = archive_dir / "delays.csv"
pd.DataFrame(results).to_csv(output_path, index=False)
print(f"Saved {len(results)} runs to {output_path}")

Loading
Loading