core.c (1087ad4e3f88c474b8134a482720782922bf3fdf) | core.c (0fb3978b0aac3a5c08637aed03cc2d65f793508f) |
---|---|
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * kernel/sched/core.c 4 * 5 * Core kernel scheduler code and related syscalls 6 * 7 * Copyright (C) 1991-2002 Linus Torvalds 8 */ --- 9038 unchanged lines hidden (view full) --- 9047 * When going up, increment the number of cores with SMT present. 9048 */ 9049 if (cpumask_weight(cpu_smt_mask(cpu)) == 2) 9050 static_branch_inc_cpuslocked(&sched_smt_present); 9051#endif 9052 set_cpu_active(cpu, true); 9053 9054 if (sched_smp_initialized) { | 1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * kernel/sched/core.c 4 * 5 * Core kernel scheduler code and related syscalls 6 * 7 * Copyright (C) 1991-2002 Linus Torvalds 8 */ --- 9038 unchanged lines hidden (view full) --- 9047 * When going up, increment the number of cores with SMT present. 9048 */ 9049 if (cpumask_weight(cpu_smt_mask(cpu)) == 2) 9050 static_branch_inc_cpuslocked(&sched_smt_present); 9051#endif 9052 set_cpu_active(cpu, true); 9053 9054 if (sched_smp_initialized) { |
9055 sched_update_numa(cpu, true); |
|
9055 sched_domains_numa_masks_set(cpu); 9056 cpuset_cpu_active(); 9057 } 9058 9059 /* 9060 * Put the rq online, if not already. This happens: 9061 * 9062 * 1) In the early boot process, because we build the real domains --- 62 unchanged lines hidden (view full) --- 9125 static_branch_dec_cpuslocked(&sched_smt_present); 9126 9127 sched_core_cpu_deactivate(cpu); 9128#endif 9129 9130 if (!sched_smp_initialized) 9131 return 0; 9132 | 9056 sched_domains_numa_masks_set(cpu); 9057 cpuset_cpu_active(); 9058 } 9059 9060 /* 9061 * Put the rq online, if not already. This happens: 9062 * 9063 * 1) In the early boot process, because we build the real domains --- 62 unchanged lines hidden (view full) --- 9126 static_branch_dec_cpuslocked(&sched_smt_present); 9127 9128 sched_core_cpu_deactivate(cpu); 9129#endif 9130 9131 if (!sched_smp_initialized) 9132 return 0; 9133 |
9134 sched_update_numa(cpu, false); |
|
9133 ret = cpuset_cpu_inactive(cpu); 9134 if (ret) { 9135 balance_push_set(cpu, false); 9136 set_cpu_active(cpu, true); | 9135 ret = cpuset_cpu_inactive(cpu); 9136 if (ret) { 9137 balance_push_set(cpu, false); 9138 set_cpu_active(cpu, true); |
9139 sched_update_numa(cpu, true); |
|
9137 return ret; 9138 } 9139 sched_domains_numa_masks_clear(cpu); 9140 return 0; 9141} 9142 9143static void sched_rq_cpu_starting(unsigned int cpu) 9144{ --- 86 unchanged lines hidden (view full) --- 9231 hrtick_clear(rq); 9232 sched_core_cpu_dying(cpu); 9233 return 0; 9234} 9235#endif 9236 9237void __init sched_init_smp(void) 9238{ | 9140 return ret; 9141 } 9142 sched_domains_numa_masks_clear(cpu); 9143 return 0; 9144} 9145 9146static void sched_rq_cpu_starting(unsigned int cpu) 9147{ --- 86 unchanged lines hidden (view full) --- 9234 hrtick_clear(rq); 9235 sched_core_cpu_dying(cpu); 9236 return 0; 9237} 9238#endif 9239 9240void __init sched_init_smp(void) 9241{ |
9239 sched_init_numa(); | 9242 sched_init_numa(NUMA_NO_NODE); |
9240 9241 /* 9242 * There's no userspace yet to cause hotplug operations; hence all the 9243 * CPU masks are stable and all blatant races in the below code cannot 9244 * happen. 9245 */ 9246 mutex_lock(&sched_domains_mutex); 9247 sched_init_domains(cpu_active_mask); --- 1679 unchanged lines hidden --- | 9243 9244 /* 9245 * There's no userspace yet to cause hotplug operations; hence all the 9246 * CPU masks are stable and all blatant races in the below code cannot 9247 * happen. 9248 */ 9249 mutex_lock(&sched_domains_mutex); 9250 sched_init_domains(cpu_active_mask); --- 1679 unchanged lines hidden --- |