Skip to content
This repository was archived by the owner on Jul 17, 2025. It is now read-only.

Commit 85748b2

Browse files
committed
Add get_hardware_threads syscall rpc
1 parent 4e07877 commit 85748b2

File tree

8 files changed

+179
-38
lines changed

8 files changed

+179
-38
lines changed

kernel/src/arch/x86_64/rackscale/controller.rs

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -66,12 +66,9 @@ lazy_static! {
6666
}
6767

6868
lazy_static! {
69-
pub(crate) static ref HWTHREADS: Arc<Mutex<Vec<Vec<CpuThread>>>> = {
69+
pub(crate) static ref HWTHREADS: Arc<Mutex<Vec<(ClientId, CpuThread)>>> = {
7070
let mut hwthreads = Vec::try_with_capacity(get_num_clients() as usize)
7171
.expect("Failed to create vector for rack cpu threads");
72-
for i in 0..get_num_clients() {
73-
hwthreads.push(Vec::new());
74-
}
7572
Arc::new(Mutex::new(hwthreads))
7673
};
7774
}
@@ -229,6 +226,13 @@ fn register_rpcs(server: &mut Box<dyn RPCServer>) {
229226
&REQUEST_CORE_WORK_HANDLER,
230227
)
231228
.unwrap();
229+
230+
server
231+
.register(
232+
KernelRpc::GetHardwareThreads as RPCType,
233+
&GET_HARDWARE_THREADS_HANDLER,
234+
)
235+
.unwrap();
232236
}
233237

234238
// Lookup the local pid corresponding to a remote pid

kernel/src/arch/x86_64/rackscale/kernelrpc.rs

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,7 @@ pub(crate) enum KernelRpc {
4444
FileRename = 10,
4545
/// Create a directory.
4646
MkDir = 11,
47+
4748
/// Log (print) message of a process.
4849
Log = 12,
4950
/// Allocate physical memory for a process.
@@ -52,8 +53,12 @@ pub(crate) enum KernelRpc {
5253
ReleasePhysical = 14,
5354
/// Allocate a core for a process
5455
RequestCore = 15,
56+
5557
/// Request work (e.g., request cores) - used by client to ask controller for tasks
5658
RequestWork = 16,
59+
60+
/// Get the hardware threads for the rack
61+
GetHardwareThreads = 17,
5762
}
5863

5964
impl TryFrom<RPCType> for KernelRpc {
@@ -79,6 +84,7 @@ impl TryFrom<RPCType> for KernelRpc {
7984
15 => Ok(KernelRpc::ReleasePhysical),
8085
15 => Ok(KernelRpc::RequestCore),
8186
16 => Ok(KernelRpc::RequestWork),
87+
17 => Ok(KernelRpc::GetHardwareThreads),
8288
_ => Err(KError::InvalidRpcType),
8389
}
8490
}

kernel/src/arch/x86_64/rackscale/mod.rs

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@ pub(crate) mod kernelrpc;
1414
pub(crate) mod processops;
1515
pub(crate) mod registration;
1616
pub(crate) mod syscalls;
17+
pub(crate) mod systemops;
1718

1819
pub(crate) use self::kernelrpc::KernelRpc;
1920

@@ -40,6 +41,10 @@ pub(crate) const RELEASE_PHYSICAL_HANDLER: RPCHandler =
4041
processops::release_physical::handle_release_physical;
4142
pub(crate) const LOG_HANDLER: RPCHandler = processops::print::handle_log;
4243

44+
// Re-export handlers: system operations
45+
pub(crate) const GET_HARDWARE_THREADS_HANDLER: RPCHandler =
46+
systemops::get_hardware_threads::handle_get_hardware_threads;
47+
4348
// Client polls for work
4449
pub(crate) const REQUEST_CORE_WORK_HANDLER: RPCHandler =
4550
processops::request_core::handle_request_core_work;

kernel/src/arch/x86_64/rackscale/registration.rs

Lines changed: 1 addition & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -145,23 +145,9 @@ pub(crate) fn register_client(
145145
// Record information about the hardware threads
146146
info!("hwthreads: {:?}", hwthreads);
147147
let mut rack_threads = HWTHREADS.lock();
148-
let mut gtid = 0;
149-
for i in 0..node_id as usize {
150-
gtid += rack_threads[i].len();
151-
}
152-
info!("Starting client gtid at: {:?}", gtid);
153148
for hwthread in hwthreads {
154-
// Create new, globally unique global thread id (gtid)
155-
rack_threads[node_id as usize].push(CpuThread {
156-
id: gtid,
157-
node_id: hwthread.node_id,
158-
package_id: hwthread.package_id,
159-
core_id: hwthread.core_id,
160-
thread_id: hwthread.thread_id,
161-
});
162-
gtid += 1;
149+
rack_threads.push((node_id, *hwthread));
163150
}
164-
info!("rack_threads: {:?}", rack_threads);
165151

166152
Ok(node_id)
167153
} else {

kernel/src/arch/x86_64/rackscale/syscalls.rs

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@ use super::processops::allocate_physical::rpc_allocate_physical;
2424
use super::processops::print::rpc_log;
2525
use super::processops::release_physical::rpc_release_physical;
2626
use super::processops::request_core::rpc_request_core;
27+
use super::systemops::get_hardware_threads::rpc_get_hardware_threads;
2728

2829
pub(crate) struct Arch86LwkSystemCall {
2930
pub(crate) local: Arch86SystemCall,
@@ -35,7 +36,9 @@ impl Arch86VSpaceDispatch for Arch86LwkSystemCall {}
3536

3637
impl SystemDispatch<u64> for Arch86LwkSystemCall {
3738
fn get_hardware_threads(&self, vaddr_buf: u64, vaddr_buf_len: u64) -> KResult<(u64, u64)> {
38-
self.local.get_hardware_threads(vaddr_buf, vaddr_buf_len)
39+
let pid = crate::arch::process::current_pid()?;
40+
let mut client = RPC_CLIENT.lock();
41+
rpc_get_hardware_threads(&mut **client, pid, vaddr_buf, vaddr_buf_len).map_err(|e| e.into())
3942
}
4043

4144
fn get_stats(&self) -> KResult<(u64, u64)> {
Lines changed: 114 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,114 @@
1+
// Copyright © 2021 University of Colorado. All Rights Reserved.
2+
// SPDX-License-Identifier: Apache-2.0 OR MIT
3+
4+
use alloc::vec::Vec;
5+
6+
use abomonation::{decode, encode, unsafe_abomonate, Abomonation};
7+
use core2::io::Result as IOResult;
8+
use core2::io::Write;
9+
use fallible_collections::FallibleVecGlobal;
10+
11+
use kpi::system::CpuThread;
12+
use rpc::rpc::*;
13+
use rpc::RPCClient;
14+
15+
use super::super::kernelrpc::*;
16+
use crate::arch::process::Ring3Process;
17+
use crate::arch::rackscale::controller::{get_local_pid, HWTHREADS};
18+
use crate::nrproc::NrProcess;
19+
use crate::process::{UVAddr, UserSlice};
20+
21+
pub(crate) fn rpc_get_hardware_threads(
22+
rpc_client: &mut dyn RPCClient,
23+
pid: usize,
24+
vaddr_buf: u64,
25+
vaddr_buf_len: u64,
26+
) -> Result<(u64, u64), RPCError> {
27+
// Setup result
28+
// TODO: make dynamic, for now, size copied from kpi implementation
29+
let mut res_data = [0u8; core::mem::size_of::<KernelRpcRes>() + 5 * 4096];
30+
31+
// Call GetHardwareThreads() RPC
32+
rpc_client
33+
.call(
34+
pid,
35+
KernelRpc::GetHardwareThreads as RPCType,
36+
&[&[]],
37+
&mut [&mut res_data],
38+
)
39+
.unwrap();
40+
41+
// Decode and return result
42+
if let Some((res, remaining)) = unsafe { decode::<KernelRpcRes>(&mut res_data) } {
43+
log::info!("GetHardwareThreads() {:?}", res);
44+
45+
if let Ok((data_len, n)) = res.ret {
46+
if data_len as usize <= remaining.len() && data_len <= vaddr_buf_len {
47+
log::info!("There's a match! Writing into usesprace now");
48+
let mut user_slice =
49+
UserSlice::new(pid, UVAddr::try_from(vaddr_buf)?, data_len as usize)?;
50+
NrProcess::<Ring3Process>::write_to_userspace(
51+
&mut user_slice,
52+
&remaining[..data_len as usize],
53+
)?;
54+
log::info!("Returning value...");
55+
Ok((data_len, n))
56+
} else {
57+
log::info!(
58+
"Bad payload data: data_len: {:?} remaining.len(): {:?} vaddr_buf_len: {:?}",
59+
data_len,
60+
remaining.len(),
61+
vaddr_buf_len
62+
);
63+
Err(RPCError::MalformedResponse)
64+
}
65+
} else {
66+
res.ret
67+
}
68+
} else {
69+
Err(RPCError::MalformedResponse)
70+
}
71+
}
72+
73+
// RPC Handler function for get_hardware_threads() RPCs in the controller
74+
pub(crate) fn handle_get_hardware_threads(
75+
hdr: &mut RPCHeader,
76+
payload: &mut [u8],
77+
) -> Result<(), RPCError> {
78+
// Lookup local pid
79+
let local_pid = { get_local_pid(hdr.client_id, hdr.pid) };
80+
if local_pid.is_err() {
81+
return construct_error_ret(hdr, payload, RPCError::NoFileDescForPid);
82+
}
83+
let local_pid = local_pid.unwrap();
84+
85+
let rack_threads = HWTHREADS.lock();
86+
87+
// calculate total number of threads
88+
let mut hwthreads =
89+
Vec::try_with_capacity(rack_threads.len()).expect("failed to allocate space for hwthreads");
90+
for i in 0..rack_threads.len() {
91+
hwthreads.push(rack_threads[i].1);
92+
}
93+
log::info!(
94+
"Found {:?} hardware threads: {:?}",
95+
hwthreads.len(),
96+
hwthreads
97+
);
98+
99+
// Encode hwthread information into payload buffer
100+
let start = KernelRpcRes_SIZE as usize;
101+
let end = start
102+
+ hwthreads.len() * core::mem::size_of::<CpuThread>()
103+
+ core::mem::size_of::<Vec<CpuThread>>();
104+
let additional_data = end - start;
105+
unsafe { encode(&hwthreads, &mut &mut payload[start..end]) }
106+
.expect("Failed to encode hardware thread vector");
107+
log::info!("Sending back {:?} bytes of data", additional_data);
108+
109+
// Construct return
110+
let res = KernelRpcRes {
111+
ret: Ok((additional_data as u64, 0)),
112+
};
113+
construct_ret_extra_data(hdr, payload, res, additional_data as u64)
114+
}
Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
// Copyright © 2022 University of Colorado. All Rights Reserved.
2+
// SPDX-License-Identifier: Apache-2.0 OR MIT
3+
4+
pub mod get_hardware_threads;

usr/init/src/init.rs

Lines changed: 37 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -150,27 +150,46 @@ fn request_core_remote_test() {
150150
let s = &vibrio::upcalls::PROCESS_SCHEDULER;
151151

152152
let threads = vibrio::syscalls::System::threads().expect("Can't get system topology");
153+
info!("threads: {:?}", threads);
153154

154-
// Only ask for more cores on the machine with only 1 core
155-
if threads.len() == 1 {
156-
let HW_THREAD_ID = 1;
157-
match vibrio::syscalls::Process::request_core(
158-
HW_THREAD_ID,
159-
VAddr::from(vibrio::upcalls::upcall_while_enabled as *const fn() as u64),
160-
) {
161-
Ok(_) => {
162-
info!("request_core_remote_test OK");
163-
}
164-
Err(e) => {
165-
error!("Can't spawn on {:?}: {:?}", HW_THREAD_ID, e);
155+
for thread in threads.iter() {
156+
if thread.id != 0 {
157+
let r = vibrio::syscalls::Process::request_core(
158+
thread.id,
159+
VAddr::from(vibrio::upcalls::upcall_while_enabled as *const fn() as u64),
160+
);
161+
match r {
162+
Ok(ctoken) => {
163+
info!("Spawned core on {:?} <-> {}", ctoken, thread.id);
164+
}
165+
Err(_e) => {
166+
panic!("Failed to spawn to core {}", thread.id);
167+
}
166168
}
167169
}
168-
} else {
169-
// Run scheduler on core 0
170-
let scb: SchedulerControlBlock = SchedulerControlBlock::new(0);
171-
loop {
172-
s.run(&scb);
173-
}
170+
}
171+
172+
/*
173+
for thread in threads {
174+
s.spawn(
175+
32 * 4096,
176+
move |_| {
177+
info!(
178+
"Hello from core {}",
179+
lineup::tls2::Environment::scheduler().core_id
180+
);
181+
},
182+
ptr::null_mut(),
183+
thread.id,
184+
None,
185+
);
186+
}
187+
*/
188+
189+
// Run scheduler on core 0
190+
let scb: SchedulerControlBlock = SchedulerControlBlock::new(0);
191+
loop {
192+
s.run(&scb);
174193
}
175194
}
176195

0 commit comments

Comments
 (0)