Skip to content
This repository was archived by the owner on Jul 17, 2025. It is now read-only.

Commit 2629ab9

Browse files
committed
Clean up some code, prepare to add rackscale system syscalls
1 parent b2981bd commit 2629ab9

File tree

4 files changed

+191
-37
lines changed

4 files changed

+191
-37
lines changed

kernel/src/arch/x86_64/rackscale/processops/request_core.rs

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,7 @@ pub(crate) fn handle_request_core(hdr: &mut RPCHeader, payload: &mut [u8]) -> Re
8484
return construct_error_ret(hdr, payload, RPCError::NoFileDescForPid);
8585
}
8686
let local_pid = local_pid.unwrap();
87-
info!("handle_request_core() start");
87+
log::trace!("handle_request_core() start");
8888

8989
// Parse request
9090
let core_req = match unsafe { decode::<RequestCoreReq>(payload) } {
@@ -166,7 +166,7 @@ pub(crate) fn request_core_work(rpc_client: &mut dyn RPCClient) -> () {
166166

167167
log::info!("Client finished processing core work request");
168168
} else {
169-
log::info!("Client received no work.")
169+
log::trace!("Client received no work.")
170170
}
171171
}
172172
}
@@ -183,7 +183,9 @@ pub(crate) fn handle_request_core_work(
183183
.expect("failed to fetch core assignment deque for node");
184184
deque.pop_front()
185185
};
186-
log::info!("handle_request_core_work() Found work={:?}", work);
186+
if work.is_some() {
187+
log::info!("handle_request_core_work() Found work={:?}", work);
188+
}
187189
let result = RequestCoreWorkRes { work };
188190

189191
// Populate output buffer & header

kernel/src/arch/x86_64/rackscale/syscalls.rs

Lines changed: 15 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ use crate::fs::fd::FileDescriptor;
99
use crate::memory::Frame;
1010
use crate::nrproc;
1111
use crate::process::{KernArcBuffer, UserSlice};
12-
use crate::syscalls::{FsDispatch, ProcessDispatch, SystemCallDispatch};
12+
use crate::syscalls::{FsDispatch, ProcessDispatch, SystemCallDispatch, SystemDispatch};
1313

1414
use super::super::syscall::{Arch86SystemCall, Arch86SystemDispatch, Arch86VSpaceDispatch};
1515
use super::client::RPC_CLIENT;
@@ -31,9 +31,22 @@ pub(crate) struct Arch86LwkSystemCall {
3131

3232
impl SystemCallDispatch<u64> for Arch86LwkSystemCall {}
3333
// Use x86 syscall processing for not yet implemented systems:
34-
impl Arch86SystemDispatch for Arch86LwkSystemCall {}
3534
impl Arch86VSpaceDispatch for Arch86LwkSystemCall {}
3635

36+
impl SystemDispatch<u64> for Arch86LwkSystemCall {
37+
fn get_hardware_threads(&self, vaddr_buf: u64, vaddr_buf_len: u64) -> KResult<(u64, u64)> {
38+
self.local.get_hardware_threads(vaddr_buf, vaddr_buf_len)
39+
}
40+
41+
fn get_stats(&self) -> KResult<(u64, u64)> {
42+
self.local.get_stats()
43+
}
44+
45+
fn get_core_id(&self) -> KResult<(u64, u64)> {
46+
self.local.get_core_id()
47+
}
48+
}
49+
3750
impl FsDispatch<u64> for Arch86LwkSystemCall {
3851
fn open(&self, path: UserSlice, flags: FileFlags, modes: FileModes) -> KResult<(u64, u64)> {
3952
let pid = path.pid;

kernel/tests/s06_rackscale_tests.rs

Lines changed: 133 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -488,7 +488,7 @@ fn rackscale_userspace_multicore_test(is_shmem: bool) {
488488

489489
#[cfg(not(feature = "baremetal"))]
490490
#[test]
491-
fn s06_rackscale_shmem_request_core_remote() {
491+
fn s06_rackscale_shmem_request_core_remote_test() {
492492
use std::fs::remove_file;
493493
use std::sync::Arc;
494494
use std::thread::sleep;
@@ -501,16 +501,7 @@ fn s06_rackscale_shmem_request_core_remote() {
501501
let timeout = 30_000;
502502

503503
// Create build for both controller and client
504-
let controller_build = Arc::new(
505-
BuildArgs::default()
506-
.module("init")
507-
.kernel_feature("shmem")
508-
.kernel_feature("ethernet")
509-
.kernel_feature("rackscale")
510-
.release()
511-
.build(),
512-
);
513-
let client_build = Arc::new(
504+
let build = Arc::new(
514505
BuildArgs::default()
515506
.module("init")
516507
.user_feature("test-request-core-remote")
@@ -522,6 +513,7 @@ fn s06_rackscale_shmem_request_core_remote() {
522513
);
523514

524515
// Run DCM and controller in separate thread
516+
let controller_build = build.clone();
525517
let controller = std::thread::spawn(move || {
526518
let cmdline_controller = RunnerArgs::new_with_build("userspace-smp", &controller_build)
527519
.timeout(timeout)
@@ -538,7 +530,6 @@ fn s06_rackscale_shmem_request_core_remote() {
538530
let mut dcm = spawn_dcm(1, timeout)?;
539531
let mut p = spawn_nrk(&cmdline_controller)?;
540532

541-
//output += p.exp_string("Finished sending requests!")?.as_str();
542533
output += p.exp_eof()?.as_str();
543534

544535
dcm.send_control('c')?;
@@ -549,7 +540,7 @@ fn s06_rackscale_shmem_request_core_remote() {
549540
});
550541

551542
// Run client in separate thead. Wait a bit to make sure DCM and controller started
552-
let client1_build = client_build.clone();
543+
let client1_build = build.clone();
553544
let client = std::thread::spawn(move || {
554545
sleep(Duration::from_millis(5_000));
555546
let cmdline_client = RunnerArgs::new_with_build("userspace-smp", &client1_build)
@@ -562,6 +553,7 @@ fn s06_rackscale_shmem_request_core_remote() {
562553
.workers(3)
563554
.cores(2)
564555
.memory(4096)
556+
.nobuild() // Use single build for all for consistency
565557
.use_vmxnet3();
566558

567559
let mut output = String::new();
@@ -580,7 +572,7 @@ fn s06_rackscale_shmem_request_core_remote() {
580572
});
581573

582574
// Run client in separate thead. Wait a bit to make sure DCM and controller started
583-
let client2_build = client_build.clone();
575+
let client2_build = build.clone();
584576
let client2 = std::thread::spawn(move || {
585577
sleep(Duration::from_millis(10_000));
586578
let cmdline_client = RunnerArgs::new_with_build("userspace-smp", &client2_build)
@@ -612,3 +604,130 @@ fn s06_rackscale_shmem_request_core_remote() {
612604

613605
let _ignore = remove_file(SHMEM_PATH);
614606
}
607+
608+
#[cfg(not(feature = "baremetal"))]
609+
#[test]
610+
fn s06_rackscale_spawn_test() {
611+
use std::fs::remove_file;
612+
use std::sync::Arc;
613+
use std::thread::sleep;
614+
use std::time::Duration;
615+
616+
// Setup ivshmem file
617+
setup_shmem(SHMEM_PATH, SHMEM_SIZE);
618+
619+
setup_network(3);
620+
let timeout = 30_000;
621+
622+
const TOTAL_CLIENT_CORES: usize = 3;
623+
624+
// Create build for both controller and client
625+
let build = Arc::new(
626+
BuildArgs::default()
627+
.module("init")
628+
.user_feature("test-scheduler-smp")
629+
.kernel_feature("shmem")
630+
.kernel_feature("ethernet")
631+
.kernel_feature("rackscale")
632+
.release()
633+
.build(),
634+
);
635+
636+
// Run DCM and controller in separate thread
637+
let controller_build = build.clone();
638+
let controller = std::thread::spawn(move || {
639+
let cmdline_controller = RunnerArgs::new_with_build("userspace-smp", &controller_build)
640+
.timeout(timeout)
641+
.cmd("mode=controller transport=shmem")
642+
.shmem_size(SHMEM_SIZE as usize)
643+
.shmem_path(SHMEM_PATH)
644+
.tap("tap0")
645+
.no_network_setup()
646+
.workers(3)
647+
.use_vmxnet3();
648+
649+
let mut output = String::new();
650+
let mut qemu_run = || -> Result<WaitStatus> {
651+
let mut dcm = spawn_dcm(1, timeout)?;
652+
let mut p = spawn_nrk(&cmdline_controller)?;
653+
654+
output += p.exp_eof()?.as_str();
655+
656+
dcm.send_control('c')?;
657+
p.process.exit()
658+
};
659+
660+
let _ignore = qemu_run();
661+
});
662+
663+
sleep(Duration::from_millis(5_000));
664+
665+
// Run client in separate thead. Wait a bit to make sure DCM and controller started
666+
let client1_build = build.clone();
667+
let client = std::thread::spawn(move || {
668+
let cmdline_client = RunnerArgs::new_with_build("userspace-smp", &client1_build)
669+
.timeout(timeout)
670+
.cmd("mode=client transport=shmem")
671+
.shmem_size(SHMEM_SIZE as usize)
672+
.shmem_path(SHMEM_PATH)
673+
.tap("tap2")
674+
.no_network_setup()
675+
.workers(3)
676+
.cores(2)
677+
.memory(4096)
678+
.use_vmxnet3();
679+
680+
let mut output = String::new();
681+
let mut qemu_run = || -> Result<WaitStatus> {
682+
let mut p = spawn_nrk(&cmdline_client)?;
683+
684+
// should get two requests for two cores
685+
output += p
686+
.exp_string("Client finished processing core work request")?
687+
.as_str();
688+
output += p
689+
.exp_string("Client finished processing core work request")?
690+
.as_str();
691+
692+
p.process.exit()
693+
};
694+
sleep(Duration::from_millis(10_000));
695+
696+
let _ignore = qemu_run();
697+
});
698+
699+
// Run client in separate thead. Wait a bit to make sure DCM and controller started
700+
let client2_build = build.clone();
701+
let client2 = std::thread::spawn(move || {
702+
let cmdline_client = RunnerArgs::new_with_build("userspace-smp", &client2_build)
703+
.timeout(timeout)
704+
.cmd("mode=client transport=shmem")
705+
.shmem_size(SHMEM_SIZE as usize)
706+
.shmem_path(SHMEM_PATH)
707+
.tap("tap4")
708+
.no_network_setup()
709+
.workers(3)
710+
.nobuild() // Use build from previous client for consistency
711+
.use_vmxnet3();
712+
713+
let mut output = String::new();
714+
let mut qemu_run = || -> Result<WaitStatus> {
715+
let mut p = spawn_nrk(&cmdline_client)?;
716+
717+
for _i in 0..TOTAL_CLIENT_CORES {
718+
let r = p.exp_regex(r#"init: Hello from core (\d+)"#)?;
719+
output += r.0.as_str();
720+
output += r.1.as_str();
721+
}
722+
p.process.kill(SIGTERM)
723+
};
724+
725+
check_for_successful_exit(&cmdline_client, qemu_run(), output);
726+
});
727+
728+
controller.join().unwrap();
729+
client.join().unwrap();
730+
client2.join().unwrap();
731+
732+
let _ignore = remove_file(SHMEM_PATH);
733+
}

usr/init/src/init.rs

Lines changed: 38 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -150,34 +150,54 @@ fn request_core_remote_test() {
150150
let s = &vibrio::upcalls::PROCESS_SCHEDULER;
151151

152152
let threads = vibrio::syscalls::System::threads().expect("Can't get system topology");
153+
info!("Threads are: {:?}", threads);
153154

154-
// Only ask for more cores on the machine with only 1 core
155-
if threads.len() == 1 {
156-
let HW_THREAD_ID = 1;
157-
match vibrio::syscalls::Process::request_core(
158-
HW_THREAD_ID,
159-
VAddr::from(vibrio::upcalls::upcall_while_enabled as *const fn() as u64),
160-
) {
161-
Ok(_) => {
162-
info!("request_core_remote_test OK");
163-
}
164-
Err(e) => {
165-
error!("Can't spawn on {:?}: {:?}", HW_THREAD_ID, e);
155+
/*
156+
for thread in threads.iter() {
157+
if thread.id != 0 {
158+
let r = vibrio::syscalls::Process::request_core(
159+
thread.id,
160+
VAddr::from(vibrio::upcalls::upcall_while_enabled as *const fn() as u64),
161+
);
162+
match r {
163+
Ok(ctoken) => {
164+
info!("Spawned core on {:?} <-> {}", ctoken, thread.id);
165+
}
166+
Err(_e) => {
167+
panic!("Failed to spawn to core {}", thread.id);
168+
}
166169
}
167170
}
168-
} else {
169-
// Run scheduler on core 0
170-
let scb: SchedulerControlBlock = SchedulerControlBlock::new(0);
171-
loop {
172-
s.run(&scb);
173-
}
174171
}
172+
173+
for thread in threads {
174+
s.spawn(
175+
32 * 4096,
176+
move |_| {
177+
info!(
178+
"Hello from core {}",
179+
lineup::tls2::Environment::scheduler().core_id
180+
);
181+
},
182+
ptr::null_mut(),
183+
thread.id,
184+
None,
185+
);
186+
}
187+
188+
// Run scheduler on core 0
189+
let scb: SchedulerControlBlock = SchedulerControlBlock::new(0);
190+
loop {
191+
s.run(&scb);
192+
}
193+
*/
175194
}
176195

177196
fn scheduler_smp_test() {
178197
let s = &vibrio::upcalls::PROCESS_SCHEDULER;
179198

180199
let threads = vibrio::syscalls::System::threads().expect("Can't get system topology");
200+
info!("Threads are: {:?}", threads);
181201

182202
for thread in threads.iter() {
183203
if thread.id != 0 {

0 commit comments

Comments
 (0)