Skip to content

Commit 9b482f4

Browse files
author
Andrea Righi
committed
scx_rustland: determine the amount of cores via /proc/stat
libbpf_rs::num_possible_cpus() may take into account multi-threads multi-cores information, that are not used efficiently by the scheduler at the moment. For simplicity rely on /proc/stat to determine the amount of CPUs that can be used by the scheduler and provide a proper abstraction to access this information from the bpf Rust module. Signed-off-by: Andrea Righi <[email protected]>
1 parent 0d107d6 commit 9b482f4

File tree

3 files changed

+37
-12
lines changed

3 files changed

+37
-12
lines changed

scheds/rust/scx_rustland/src/bpf.rs

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,8 @@ use crate::bpf_intf;
77
use crate::bpf_skel::*;
88

99
use std::ffi::CStr;
10+
use std::fs::File;
11+
use std::io::{self, BufRead};
1012

1113
use anyhow::Context;
1214
use anyhow::Result;
@@ -200,6 +202,13 @@ impl<'a> BpfScheduler<'a> {
200202
let skel_builder = BpfSkelBuilder::default();
201203
let mut skel = skel_builder.open().context("Failed to open BPF program")?;
202204

205+
// Initialize online CPUs counter.
206+
//
207+
// We should probably refresh this counter during the normal execution to support cpu
208+
// hotplugging, but for now let's keep it simple and set this only at initialization).
209+
let nr_cpus_online = Self::count_cpus()?;
210+
skel.rodata_mut().num_possible_cpus = nr_cpus_online;
211+
203212
// Set scheduler options (defined in the BPF part).
204213
skel.bss_mut().usersched_pid = std::process::id();
205214
skel.rodata_mut().slice_ns = slice_us * 1000;
@@ -226,6 +235,30 @@ impl<'a> BpfScheduler<'a> {
226235
}
227236
}
228237

238+
// Return the amount of available CPUs in the system (according to /proc/stat).
239+
fn count_cpus() -> io::Result<i32> {
240+
let file = File::open("/proc/stat")?;
241+
let reader = io::BufReader::new(file);
242+
let mut cpu_count = -1;
243+
244+
for line in reader.lines() {
245+
let line = line?;
246+
if line.starts_with("cpu") {
247+
cpu_count += 1;
248+
} else {
249+
break;
250+
}
251+
}
252+
253+
Ok(cpu_count)
254+
}
255+
256+
// Override the default scheduler time slice (in us).
257+
#[allow(dead_code)]
258+
pub fn get_nr_cpus(&self) -> i32 {
259+
self.skel.rodata().num_possible_cpus
260+
}
261+
229262
// Override the default scheduler time slice (in us).
230263
#[allow(dead_code)]
231264
pub fn set_effective_slice_us(&mut self, slice_us: u64) {

scheds/rust/scx_rustland/src/bpf/main.bpf.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ char _license[] SEC("license") = "GPL";
4444
#define MAX_CPUS 1024
4545

4646
/* !0 for veristat, set during init */
47-
const volatile u32 num_possible_cpus = 8;
47+
const volatile s32 num_possible_cpus = 8;
4848

4949
/*
5050
* Exit info (passed to the user-space counterpart).

scheds/rust/scx_rustland/src/main.rs

Lines changed: 3 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -164,7 +164,6 @@ struct Scheduler<'a> {
164164
task_pool: TaskTree, // tasks ordered by vruntime
165165
task_map: TaskInfoMap, // map pids to the corresponding task information
166166
min_vruntime: u64, // Keep track of the minimum vruntime across all tasks
167-
nr_cpus_online: i32, // Amount of the available CPUs in the system
168167
slice_ns: u64, // Default time slice (in ns)
169168
}
170169

@@ -186,19 +185,12 @@ impl<'a> Scheduler<'a> {
186185
// Initialize global minimum vruntime.
187186
let min_vruntime: u64 = 0;
188187

189-
// Initialize online CPUs counter.
190-
//
191-
// We should probably refresh this counter during the normal execution to support cpu
192-
// hotplugging, but for now let's keep it simple and set this only at initialization).
193-
let nr_cpus_online = libbpf_rs::num_possible_cpus().unwrap() as i32;
194-
195188
// Return scheduler object.
196189
Ok(Self {
197190
bpf,
198191
task_pool,
199192
task_map,
200193
min_vruntime,
201-
nr_cpus_online,
202194
slice_ns,
203195
})
204196
}
@@ -207,7 +199,7 @@ impl<'a> Scheduler<'a> {
207199
fn get_idle_cpus(&self) -> Vec<i32> {
208200
let mut idle_cpus = Vec::new();
209201

210-
for cpu in 0..self.nr_cpus_online {
202+
for cpu in 0..self.bpf.get_nr_cpus() {
211203
let pid = self.bpf.get_cpu_pid(cpu);
212204
if pid == 0 {
213205
idle_cpus.push(cpu);
@@ -327,7 +319,7 @@ impl<'a> Scheduler<'a> {
327319
let nr_queued = *self.bpf.nr_queued_mut();
328320
let nr_scheduled = *self.bpf.nr_scheduled_mut();
329321
let nr_waiting = nr_queued + nr_scheduled;
330-
let nr_cpus = self.nr_cpus_online as u64;
322+
let nr_cpus = self.bpf.get_nr_cpus() as u64;
331323

332324
// Scale time slice, but never scale below 1 ms.
333325
let scaling = nr_waiting / nr_cpus + 1;
@@ -475,7 +467,7 @@ impl<'a> Scheduler<'a> {
475467
Err(_) => -1,
476468
};
477469
info!("Running tasks:");
478-
for cpu in 0..self.nr_cpus_online {
470+
for cpu in 0..self.bpf.get_nr_cpus() {
479471
let pid = if cpu == sched_cpu {
480472
"[self]".to_string()
481473
} else {

0 commit comments

Comments
 (0)