core.c (1c482452d5db0f52e4e8eed95bd7314eec537d78) | core.c (46a87b3851f0d6eb05e6d83d5c5a30df0eca8f76) |
---|---|
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * kernel/sched/core.c 4 * 5 * Core kernel scheduler code and related syscalls 6 * 7 * Copyright (C) 1991-2002 Linus Torvalds 8 */ --- 747 unchanged lines hidden (view full) --- 756 struct load_weight *load = &p->se.load; 757 758 /* 759 * SCHED_IDLE tasks get minimal weight: 760 */ 761 if (task_has_idle_policy(p)) { 762 load->weight = scale_load(WEIGHT_IDLEPRIO); 763 load->inv_weight = WMULT_IDLEPRIO; | 1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * kernel/sched/core.c 4 * 5 * Core kernel scheduler code and related syscalls 6 * 7 * Copyright (C) 1991-2002 Linus Torvalds 8 */ --- 747 unchanged lines hidden (view full) --- 756 struct load_weight *load = &p->se.load; 757 758 /* 759 * SCHED_IDLE tasks get minimal weight: 760 */ 761 if (task_has_idle_policy(p)) { 762 load->weight = scale_load(WEIGHT_IDLEPRIO); 763 load->inv_weight = WMULT_IDLEPRIO; |
764 p->se.runnable_weight = load->weight; | |
765 return; 766 } 767 768 /* 769 * SCHED_OTHER tasks have to update their load when changing their 770 * weight 771 */ 772 if (update_load && p->sched_class == &fair_sched_class) { 773 reweight_task(p, prio); 774 } else { 775 load->weight = scale_load(sched_prio_to_weight[prio]); 776 load->inv_weight = sched_prio_to_wmult[prio]; | 764 return; 765 } 766 767 /* 768 * SCHED_OTHER tasks have to update their load when changing their 769 * weight 770 */ 771 if (update_load && p->sched_class == &fair_sched_class) { 772 reweight_task(p, prio); 773 } else { 774 load->weight = scale_load(sched_prio_to_weight[prio]); 775 load->inv_weight = sched_prio_to_wmult[prio]; |
777 p->se.runnable_weight = load->weight; | |
778 } 779} 780 781#ifdef CONFIG_UCLAMP_TASK 782/* 783 * Serializes updates of utilization clamp values 784 * 785 * The (slow-path) user-space triggers utilization clamp value updates which --- 861 unchanged lines hidden (view full) --- 1647 if (check && (p->flags & PF_NO_SETAFFINITY)) { 1648 ret = -EINVAL; 1649 goto out; 1650 } 1651 1652 if (cpumask_equal(p->cpus_ptr, new_mask)) 1653 goto out; 1654 | 776 } 777} 778 779#ifdef CONFIG_UCLAMP_TASK 780/* 781 * Serializes updates of utilization clamp values 782 * 783 * The (slow-path) user-space triggers utilization clamp value updates which --- 861 unchanged lines hidden (view full) --- 1645 if (check && (p->flags & PF_NO_SETAFFINITY)) { 1646 ret = -EINVAL; 1647 goto out; 1648 } 1649 1650 if (cpumask_equal(p->cpus_ptr, new_mask)) 1651 goto out; 1652 |
1655 dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask); | 1653 /* 1654 * Picking a ~random cpu helps in cases where we are changing affinity 1655 * for groups of tasks (ie. cpuset), so that load balancing is not 1656 * immediately required to distribute the tasks within their new mask. 1657 */ 1658 dest_cpu = cpumask_any_and_distribute(cpu_valid_mask, new_mask); |
1656 if (dest_cpu >= nr_cpu_ids) { 1657 ret = -EINVAL; 1658 goto out; 1659 } 1660 1661 do_set_cpus_allowed(p, new_mask); 1662 1663 if (p->flags & PF_KTHREAD) { --- 1909 unchanged lines hidden (view full) --- 3573 p->sched_class->update_curr(rq); 3574 } 3575 ns = p->se.sum_exec_runtime; 3576 task_rq_unlock(rq, p, &rf); 3577 3578 return ns; 3579} 3580 | 1659 if (dest_cpu >= nr_cpu_ids) { 1660 ret = -EINVAL; 1661 goto out; 1662 } 1663 1664 do_set_cpus_allowed(p, new_mask); 1665 1666 if (p->flags & PF_KTHREAD) { --- 1909 unchanged lines hidden (view full) --- 3576 p->sched_class->update_curr(rq); 3577 } 3578 ns = p->se.sum_exec_runtime; 3579 task_rq_unlock(rq, p, &rf); 3580 3581 return ns; 3582} 3583 |
3584DEFINE_PER_CPU(unsigned long, thermal_pressure); 3585 3586void arch_set_thermal_pressure(struct cpumask *cpus, 3587 unsigned long th_pressure) 3588{ 3589 int cpu; 3590 3591 for_each_cpu(cpu, cpus) 3592 WRITE_ONCE(per_cpu(thermal_pressure, cpu), th_pressure); 3593} 3594 |
|
3581/* 3582 * This function gets called by the timer code, with HZ frequency. 3583 * We call it with interrupts disabled. 3584 */ 3585void scheduler_tick(void) 3586{ 3587 int cpu = smp_processor_id(); 3588 struct rq *rq = cpu_rq(cpu); 3589 struct task_struct *curr = rq->curr; 3590 struct rq_flags rf; | 3595/* 3596 * This function gets called by the timer code, with HZ frequency. 3597 * We call it with interrupts disabled. 3598 */ 3599void scheduler_tick(void) 3600{ 3601 int cpu = smp_processor_id(); 3602 struct rq *rq = cpu_rq(cpu); 3603 struct task_struct *curr = rq->curr; 3604 struct rq_flags rf; |
3605 unsigned long thermal_pressure; |
|
3591 | 3606 |
3607 arch_scale_freq_tick(); |
|
3592 sched_clock_tick(); 3593 3594 rq_lock(rq, &rf); 3595 3596 update_rq_clock(rq); | 3608 sched_clock_tick(); 3609 3610 rq_lock(rq, &rf); 3611 3612 update_rq_clock(rq); |
3613 thermal_pressure = arch_scale_thermal_pressure(cpu_of(rq)); 3614 update_thermal_load_avg(rq_clock_thermal(rq), rq, thermal_pressure); |
|
3597 curr->sched_class->task_tick(rq, curr, 0); 3598 calc_global_load_tick(rq); 3599 psi_task_tick(rq); 3600 3601 rq_unlock(rq, &rf); 3602 3603 perf_event_task_tick(); 3604 --- 61 unchanged lines hidden (view full) --- 3666 if (!tick_nohz_tick_stopped_cpu(cpu)) 3667 goto out_requeue; 3668 3669 rq_lock_irq(rq, &rf); 3670 curr = rq->curr; 3671 if (cpu_is_offline(cpu)) 3672 goto out_unlock; 3673 | 3615 curr->sched_class->task_tick(rq, curr, 0); 3616 calc_global_load_tick(rq); 3617 psi_task_tick(rq); 3618 3619 rq_unlock(rq, &rf); 3620 3621 perf_event_task_tick(); 3622 --- 61 unchanged lines hidden (view full) --- 3684 if (!tick_nohz_tick_stopped_cpu(cpu)) 3685 goto out_requeue; 3686 3687 rq_lock_irq(rq, &rf); 3688 curr = rq->curr; 3689 if (cpu_is_offline(cpu)) 3690 goto out_unlock; 3691 |
3674 curr = rq->curr; | |
3675 update_rq_clock(rq); 3676 3677 if (!is_idle_task(curr)) { 3678 /* 3679 * Make sure the next tick runs within a reasonable 3680 * amount of time. 3681 */ 3682 delta = rq_clock_task(rq) - curr->se.exec_start; --- 4288 unchanged lines hidden --- | 3692 update_rq_clock(rq); 3693 3694 if (!is_idle_task(curr)) { 3695 /* 3696 * Make sure the next tick runs within a reasonable 3697 * amount of time. 3698 */ 3699 delta = rq_clock_task(rq) - curr->se.exec_start; --- 4288 unchanged lines hidden --- |