Skip to content

Commit 8a00171

Browse files
kalyazinzulinx86
authored andcommitted
fix(entropy): cap per-request entropy allocation to 64 KiB
Overlapping descriptors within a single chain can cause buffer.len() to exceed the distinct guest memory backing the request. Without a bound, handle_one() allocates a vec proportional to the inflated length, which can reach ~4 GiB from a 17 MiB guest. Introduce MAX_ENTROPY_BYTES (64 KiB) and clamp the allocation in handle_one() to that limit. Legitimate requests are unaffected since a 256-entry descriptor chain with typical page-sized buffers fits well within the cap. Add tests covering the capped path, the large inflated buffer path, and the pass-through for small requests. Signed-off-by: Nikita Kalyazin <kalyazin@amazon.com>
1 parent 9154cfe commit 8a00171

2 files changed

Lines changed: 147 additions & 3 deletions

File tree

CHANGELOG.md

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,16 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
66
and this project adheres to
77
[Semantic Versioning](https://semver.org/spec/v2.0.0.html).
88

9+
## [1.14.4]
10+
11+
### Fixed
12+
13+
- [#5762](https://github.com/firecracker-microvm/firecracker/pull/5762): Cap
14+
virtio-rng per-request entropy to 64 KiB. Previously, a guest could construct
15+
a descriptor chain that caused Firecracker to allocate more host memory than
16+
the guest actually provided, potentially leading to excessive host memory
17+
consumption.
18+
919
## [1.14.3]
1020

1121
### Fixed

src/vmm/src/devices/virtio/rng/device.rs

Lines changed: 137 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,14 @@ use crate::vstate::memory::GuestMemoryMmap;
2828

2929
pub const ENTROPY_DEV_ID: &str = "rng";
3030

31+
/// Maximum number of bytes `handle_one()` will serve per request.
32+
///
33+
/// Overlapping descriptors within a single chain can cause `buffer.len()` to
34+
/// exceed the amount of distinct guest memory actually backing the request.
35+
/// Capping the per-request allocation to 64 KiB keeps host memory usage
36+
/// bounded regardless of how the descriptor chain is constructed.
37+
const MAX_ENTROPY_BYTES: u32 = 64 * 1024;
38+
3139
#[derive(Debug, thiserror::Error, displaydoc::Display)]
3240
pub enum EntropyError {
3341
/// Error while handling an Event file descriptor: {0}
@@ -119,14 +127,19 @@ impl Entropy {
119127
return Ok(0);
120128
}
121129

122-
let mut rand_bytes = vec![0; self.buffer.len() as usize];
130+
// Cap the number of bytes we actually generate so that the host-side
131+
// allocation stays bounded even when buffer.len() is inflated by
132+
// overlapping descriptors in the chain.
133+
let len = std::cmp::min(self.buffer.len(), MAX_ENTROPY_BYTES);
134+
135+
let mut rand_bytes = vec![0; len as usize];
123136
rand::fill(&mut rand_bytes).inspect_err(|_| {
124137
METRICS.host_rng_fails.inc();
125138
})?;
126139

127-
// It is ok to unwrap here. We are writing `iovec.len()` bytes at offset 0.
140+
// It is ok to unwrap here. We are writing `len` bytes at offset 0.
128141
self.buffer.write_all_volatile_at(&rand_bytes, 0).unwrap();
129-
Ok(self.buffer.len())
142+
Ok(len)
130143
}
131144

132145
fn process_entropy_queue(&mut self) -> Result<(), InvalidAvailIdx> {
@@ -611,4 +624,125 @@ mod tests {
611624
// The rate limiter event should have processed the pending buffer as well
612625
assert_eq!(METRICS.entropy_bytes.count(), entropy_bytes + 128);
613626
}
627+
628+
/// Verify that handle_one() caps the host allocation to MAX_ENTROPY_BYTES
629+
/// when overlapping descriptors inflate buffer.len() beyond the limit.
630+
#[test]
631+
fn test_handle_one_caps_overlapping_descriptors() {
632+
use crate::devices::virtio::queue::VIRTQ_DESC_F_NEXT;
633+
use crate::devices::virtio::test_utils::VirtQueue;
634+
use crate::test_utils::single_region_mem;
635+
use crate::vstate::memory::GuestAddress;
636+
637+
// 32 descriptors × 4 KiB = 128 KiB claimed, which exceeds MAX_ENTROPY_BYTES (64 KiB).
638+
const N_DESC: u16 = 32;
639+
const CHUNK: u32 = 4096;
640+
641+
let mem = single_region_mem(0x20000);
642+
let vq = VirtQueue::new(GuestAddress(0), &mem, 256);
643+
let mut queue = vq.create_queue();
644+
645+
let target: u64 = 0x10000;
646+
for i in 0..N_DESC {
647+
let flags = VIRTQ_DESC_F_WRITE | if i < N_DESC - 1 { VIRTQ_DESC_F_NEXT } else { 0 };
648+
vq.dtable[i as usize].set(target, CHUNK, flags, i + 1);
649+
}
650+
vq.avail.ring[0].set(0);
651+
vq.avail.idx.set(1);
652+
653+
let head = queue.pop().unwrap().unwrap();
654+
// SAFETY: `mem` is a valid guest memory region and `head` is a descriptor chain
655+
// obtained from the virtqueue backed by that memory.
656+
let buf = unsafe { IoVecBufferMut::<256>::from_descriptor_chain(&mem, head).unwrap() };
657+
// buffer.len() is inflated well past the cap.
658+
assert_eq!(buf.len(), u32::from(N_DESC) * CHUNK); // 128 KiB
659+
660+
let mut dev = default_entropy();
661+
dev.buffer = buf;
662+
let bytes = dev.handle_one().unwrap();
663+
664+
assert_eq!(
665+
bytes,
666+
MAX_ENTROPY_BYTES,
667+
"handle_one() must cap at MAX_ENTROPY_BYTES ({MAX_ENTROPY_BYTES}), got {bytes} for \
668+
inflated buffer.len() = {}",
669+
u32::from(N_DESC) * CHUNK
670+
);
671+
}
672+
673+
/// Verify that handle_one() caps a large inflated buffer (~4 GiB from
674+
/// 255 overlapping descriptors) to MAX_ENTROPY_BYTES.
675+
#[test]
676+
fn test_handle_one_caps_large_inflated_buffer() {
677+
use crate::devices::virtio::queue::VIRTQ_DESC_F_NEXT;
678+
use crate::devices::virtio::test_utils::VirtQueue;
679+
use crate::test_utils::single_region_mem;
680+
use crate::vstate::memory::GuestAddress;
681+
682+
const N_DESC: u16 = 255;
683+
const CHUNK: u32 = 16 * 1024 * 1024; // 16 MiB
684+
const TOTAL: u64 = (N_DESC as u64) * (CHUNK as u64); // ~4 GiB
685+
686+
let mem = single_region_mem((CHUNK as usize) + 0x100000);
687+
let vq = VirtQueue::new(GuestAddress(0), &mem, 256);
688+
let mut queue = vq.create_queue();
689+
690+
let target: u64 = 0x80000;
691+
for i in 0..N_DESC {
692+
let flags = VIRTQ_DESC_F_WRITE | if i < N_DESC - 1 { VIRTQ_DESC_F_NEXT } else { 0 };
693+
vq.dtable[i as usize].set(target, CHUNK, flags, i + 1);
694+
}
695+
vq.avail.ring[0].set(0);
696+
vq.avail.idx.set(1);
697+
698+
let head = queue.pop().unwrap().unwrap();
699+
// SAFETY: `mem` is a valid guest memory region and `head` is a descriptor chain
700+
// obtained from the virtqueue backed by that memory.
701+
let buf = unsafe { IoVecBufferMut::<256>::from_descriptor_chain(&mem, head).unwrap() };
702+
assert_eq!(buf.len() as u64, TOTAL);
703+
704+
let mut dev = default_entropy();
705+
dev.buffer = buf;
706+
let bytes = dev.handle_one().unwrap();
707+
708+
assert_eq!(
709+
bytes, MAX_ENTROPY_BYTES,
710+
"handle_one() must cap at MAX_ENTROPY_BYTES, not allocate {} bytes",
711+
TOTAL
712+
);
713+
}
714+
715+
/// Verify that a request within MAX_ENTROPY_BYTES is served in full
716+
/// (the cap does not truncate legitimate small requests).
717+
#[test]
718+
fn test_handle_one_serves_small_request_in_full() {
719+
use crate::devices::virtio::test_utils::VirtQueue;
720+
use crate::test_utils::single_region_mem;
721+
use crate::vstate::memory::GuestAddress;
722+
723+
const SIZE: u32 = 256;
724+
725+
let mem = single_region_mem(0x20000);
726+
let vq = VirtQueue::new(GuestAddress(0), &mem, 256);
727+
let mut queue = vq.create_queue();
728+
729+
vq.dtable[0].set(0x10000, SIZE, VIRTQ_DESC_F_WRITE, 0);
730+
vq.avail.ring[0].set(0);
731+
vq.avail.idx.set(1);
732+
733+
let head = queue.pop().unwrap().unwrap();
734+
// SAFETY: `mem` is a valid guest memory region and `head` is a descriptor chain
735+
// obtained from the virtqueue backed by that memory.
736+
let buf = unsafe { IoVecBufferMut::<256>::from_descriptor_chain(&mem, head).unwrap() };
737+
assert_eq!(buf.len(), SIZE);
738+
739+
let mut dev = default_entropy();
740+
dev.buffer = buf;
741+
let bytes = dev.handle_one().unwrap();
742+
743+
assert_eq!(
744+
bytes, SIZE,
745+
"small request ({SIZE} bytes) should be served in full, got {bytes}"
746+
);
747+
}
614748
}

0 commit comments

Comments
 (0)