@@ -164,7 +164,6 @@ struct Scheduler<'a> {
164164 task_pool : TaskTree , // tasks ordered by vruntime
165165 task_map : TaskInfoMap , // map pids to the corresponding task information
166166 min_vruntime : u64 , // Keep track of the minimum vruntime across all tasks
167- nr_cpus_online : i32 , // Amount of the available CPUs in the system
168167 slice_ns : u64 , // Default time slice (in ns)
169168}
170169
@@ -186,19 +185,12 @@ impl<'a> Scheduler<'a> {
186185 // Initialize global minimum vruntime.
187186 let min_vruntime: u64 = 0 ;
188187
189- // Initialize online CPUs counter.
190- //
191- // We should probably refresh this counter during the normal execution to support cpu
192- // hotplugging, but for now let's keep it simple and set this only at initialization).
193- let nr_cpus_online = libbpf_rs:: num_possible_cpus ( ) . unwrap ( ) as i32 ;
194-
195188 // Return scheduler object.
196189 Ok ( Self {
197190 bpf,
198191 task_pool,
199192 task_map,
200193 min_vruntime,
201- nr_cpus_online,
202194 slice_ns,
203195 } )
204196 }
@@ -207,7 +199,7 @@ impl<'a> Scheduler<'a> {
207199 fn get_idle_cpus ( & self ) -> Vec < i32 > {
208200 let mut idle_cpus = Vec :: new ( ) ;
209201
210- for cpu in 0 ..self . nr_cpus_online {
202+ for cpu in 0 ..self . bpf . get_nr_cpus ( ) {
211203 let pid = self . bpf . get_cpu_pid ( cpu) ;
212204 if pid == 0 {
213205 idle_cpus. push ( cpu) ;
@@ -228,10 +220,6 @@ impl<'a> Scheduler<'a> {
228220 min_vruntime : u64 ,
229221 slice_ns : u64 ,
230222 ) {
231- // Allow to scale the maximum time slice by a factor of 10 to increase the range of allowed
232- // time delta and give a better chance to prioritize tasks with higher weight.
233- let max_slice_ns = slice_ns * 10 ;
234-
235223 // Evaluate last time slot used by the task, scaled by its priority (weight).
236224 //
237225 // NOTE: make sure to handle the case where the current sum_exec_runtime is less then the
@@ -251,15 +239,15 @@ impl<'a> Scheduler<'a> {
251239
252240 // Make sure that the updated vruntime is in the range:
253241 //
254- // (min_vruntime, min_vruntime + max_slice_ns ]
242+ // (min_vruntime, min_vruntime + slice_ns ]
255243 //
256244 // In this way we ensure that global vruntime is always progressing during each scheduler
257245 // run, preventing excessive starvation of the other tasks sitting in the self.task_pool
258246 // tree.
259247 //
260- // Moreover, limiting the accounted time slice to max_slice_ns , allows to prevent starving
261- // the current task for too long in the scheduler task pool.
262- task_info. vruntime = min_vruntime + slice. clamp ( 1 , max_slice_ns ) ;
248+ // Moreover, limiting the accounted time slice to slice_ns , allows to prevent starving the
249+ // current task for too long in the scheduler task pool.
250+ task_info. vruntime = min_vruntime + slice. clamp ( 1 , slice_ns ) ;
263251
264252 // Update total task cputime.
265253 task_info. sum_exec_runtime = sum_exec_runtime;
@@ -327,7 +315,7 @@ impl<'a> Scheduler<'a> {
327315 let nr_queued = * self . bpf . nr_queued_mut ( ) ;
328316 let nr_scheduled = * self . bpf . nr_scheduled_mut ( ) ;
329317 let nr_waiting = nr_queued + nr_scheduled;
330- let nr_cpus = self . nr_cpus_online as u64 ;
318+ let nr_cpus = self . bpf . get_nr_cpus ( ) as u64 ;
331319
332320 // Scale time slice, but never scale below 1 ms.
333321 let scaling = nr_waiting / nr_cpus + 1 ;
@@ -475,7 +463,7 @@ impl<'a> Scheduler<'a> {
475463 Err ( _) => -1 ,
476464 } ;
477465 info ! ( "Running tasks:" ) ;
478- for cpu in 0 ..self . nr_cpus_online {
466+ for cpu in 0 ..self . bpf . get_nr_cpus ( ) {
479467 let pid = if cpu == sched_cpu {
480468 "[self]" . to_string ( )
481469 } else {
0 commit comments