You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
abstract = {Path planning for autonomous search and tracking of multiple objects is a critical problem in applications such as reconnaissance, surveillance, and data gathering. Due to the inherent competing objectives of searching for new objects while maintaining tracks for found objects, most current approaches rely on multi-objective planning methods, leaving it up to the user to tune parameters to balance between the two objectives, usually based on heuristics or trial and error. In this paper, we introduce UniSaT (Unified Search and Track), a novel unified-objective formulation for the search and track problem based on Random Finite Sets (RFS). Our approach models unknown and known objects using a combined generalized labeled multi-Bernoulli (GLMB) filter. For unseen objects, UniSaT leverages both cardinality and spatial prior distributions, allowing it to operate without prior knowledge of the exact number of objects in the search space. The planner maximizes the mutual information of this unified belief model, creating balanced search and tracking behaviors. We demonstrate our work in a simulated environment, presenting both qualitative results and quantitative improvements over a multi-objective method.}
136
136
}
137
+
@inproceedings{harutyunyan2025mapexrl,
138
+
title = {MapExRL: Human-Inspired Indoor Exploration with Predicted Environment Context and Reinforcement Learning},
139
+
author = {Narek Harutyunyan and Brady Moon and Seungchan Kim and Cherie Ho and Adam Hung and Sebastian Scherer},
140
+
year = {2025},
141
+
booktitle = {22nd International Conference on Advanced Robotics (ICAR)},
142
+
url = {https://arxiv.org/abs/2503.01548},
143
+
video = {https://youtu.be/aprE3y0vcpY?si=vxqieR6T68L9oXX4},
144
+
abstract = {Path planning for robotic exploration is challenging, requiring reasoning over unknown spaces and anticipating future observations. Efficient exploration requires selecting budget-constrained paths that maximize information gain. Despite advances in autonomous exploration, existing algorithms still fall short of human performance, particularly in structured environments where predictive cues exist but are underutilized. Guided by insights from our user study, we introduce MapExRL, which improves robot exploration efficiency in structured indoor environments by enabling longer-horizon planning through reinforcement learning (RL) and global map predictions. Unlike many RL-based exploration methods that use motion primitives as the action space, our approach leverages frontiers for more efficient model learning and longer horizon reasoning. Our framework generates global map predictions from the observed map, which our policy utilizes, along with the prediction uncertainty, estimated sensor coverage, frontier distance, and remaining distance budget, to assess the strategic long-term value of frontiers. By leveraging multiple frontier scoring methods and additional context, our policy makes more informed decisions at each stage of the exploration. We evaluate our framework on a real-world indoor map dataset, achieving up to an 18.8% improvement over the strongest state-of-the-art baseline, with even greater gains compared to conventional frontier-based algorithms.}
145
+
}
137
146
@misc{moon2025ia-tigris,
138
147
title = {IA-TIGRIS: An Incremental and Adaptive Sampling-Based Planner for Online Informative Path Planning},
139
148
author = {Brady Moon and Nayana Suvarna and Andrew Jong and Satrajit Chatterjee and Junbin Yuan and Sebastian Scherer},
@@ -145,16 +154,6 @@ @misc{moon2025ia-tigris
145
154
video = {https://youtu.be/etFLanBdgHs},
146
155
abstract = {Planning paths that maximize information gain for robotic platforms has wide-ranging applications and significant potential impact. To effectively adapt to real-time data collection, informative path planning must be computed online and be responsive to new observations. In this work, we present IA-TIGRIS, an incremental and adaptive sampling-based informative path planner that can be run efficiently with onboard computation. Our approach leverages past planning efforts through incremental refinement while continuously adapting to updated world beliefs. We additionally present detailed implementation and optimization insights to facilitate real-world deployment, along with an array of reward functions tailored to specific missions and behaviors. Extensive simulation results demonstrate IA-TIGRIS generates higher-quality paths compared to baseline methods. We validate our planner on two distinct hardware platforms: a hexarotor UAV and a fixed-wing UAV, each having unique motion models and configuration spaces. Our results show up to a 41% improvement in information gain compared to baseline methods, suggesting significant potential for deployment in real-world applications.}
147
156
}
148
-
@misc{harutyunyan2025mapexrl,
149
-
title = {MapExRL: Human-Inspired Indoor Exploration with Predicted Environment Context and Reinforcement Learning},
150
-
author = {Narek Harutyunyan and Brady Moon and Seungchan Kim and Cherie Ho and Adam Hung and Sebastian Scherer},
151
-
year = {2025},
152
-
url = {https://arxiv.org/abs/2503.01548},
153
-
eprint = {2503.01548},
154
-
archiveprefix = {arXiv},
155
-
primaryclass = {cs.RO},
156
-
abstract = {Path planning for robotic exploration is challenging, requiring reasoning over unknown spaces and anticipating future observations. Efficient exploration requires selecting budget-constrained paths that maximize information gain. Despite advances in autonomous exploration, existing algorithms still fall short of human performance, particularly in structured environments where predictive cues exist but are underutilized. Guided by insights from our user study, we introduce MapExRL, which improves robot exploration efficiency in structured indoor environments by enabling longer-horizon planning through reinforcement learning (RL) and global map predictions. Unlike many RL-based exploration methods that use motion primitives as the action space, our approach leverages frontiers for more efficient model learning and longer horizon reasoning. Our framework generates global map predictions from the observed map, which our policy utilizes, along with the prediction uncertainty, estimated sensor coverage, frontier distance, and remaining distance budget, to assess the strategic long-term value of frontiers. By leveraging multiple frontier scoring methods and additional context, our policy makes more informed decisions at each stage of the exploration. We evaluate our framework on a real-world indoor map dataset, achieving up to an 18.8% improvement over the strongest state-of-the-art baseline, with even greater gains compared to conventional frontier-based algorithms.}
157
-
}
158
157
@misc{rauniyar2025aug3d,
159
158
title = {Aug3D: Augmenting large scale outdoor datasets for Generalizable Novel View Synthesis},
160
159
author = {Rauniyar, Aditya and Alama, Omar and Yong, Silong and Sycara, Katia and Scherer, Sebastian},
0 commit comments