Lines Matching refs:dl_b
169 void __dl_update(struct dl_bw *dl_b, s64 bw) in __dl_update() argument
171 struct root_domain *rd = container_of(dl_b, struct root_domain, dl_bw); in __dl_update()
204 void __dl_update(struct dl_bw *dl_b, s64 bw) in __dl_update() argument
206 struct dl_rq *dl = container_of(dl_b, struct dl_rq, dl_bw); in __dl_update()
213 void __dl_sub(struct dl_bw *dl_b, u64 tsk_bw, int cpus) in __dl_sub() argument
215 dl_b->total_bw -= tsk_bw; in __dl_sub()
216 __dl_update(dl_b, (s32)tsk_bw / cpus); in __dl_sub()
220 void __dl_add(struct dl_bw *dl_b, u64 tsk_bw, int cpus) in __dl_add() argument
222 dl_b->total_bw += tsk_bw; in __dl_add()
223 __dl_update(dl_b, -((s32)tsk_bw / cpus)); in __dl_add()
227 __dl_overflow(struct dl_bw *dl_b, unsigned long cap, u64 old_bw, u64 new_bw) in __dl_overflow() argument
229 return dl_b->bw != -1 && in __dl_overflow()
230 cap_scale(dl_b->bw, cap) < dl_b->total_bw - old_bw + new_bw; in __dl_overflow()
430 struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); in task_non_contending() local
434 raw_spin_lock(&dl_b->lock); in task_non_contending()
435 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p))); in task_non_contending()
436 raw_spin_unlock(&dl_b->lock); in task_non_contending()
494 void init_dl_bw(struct dl_bw *dl_b) in init_dl_bw() argument
496 raw_spin_lock_init(&dl_b->lock); in init_dl_bw()
498 dl_b->bw = -1; in init_dl_bw()
500 dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime()); in init_dl_bw()
501 dl_b->total_bw = 0; in init_dl_bw()
666 struct dl_bw *dl_b; in dl_task_offline_migration() local
717 dl_b = &rq->rd->dl_bw; in dl_task_offline_migration()
718 raw_spin_lock(&dl_b->lock); in dl_task_offline_migration()
719 __dl_sub(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span)); in dl_task_offline_migration()
720 raw_spin_unlock(&dl_b->lock); in dl_task_offline_migration()
722 dl_b = &later_rq->rd->dl_bw; in dl_task_offline_migration()
723 raw_spin_lock(&dl_b->lock); in dl_task_offline_migration()
724 __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(later_rq->rd->span)); in dl_task_offline_migration()
725 raw_spin_unlock(&dl_b->lock); in dl_task_offline_migration()
1414 struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); in inactive_task_timer() local
1422 raw_spin_lock(&dl_b->lock); in inactive_task_timer()
1423 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p))); in inactive_task_timer()
1424 raw_spin_unlock(&dl_b->lock); in inactive_task_timer()
2548 struct dl_bw *dl_b; in dl_add_task_root_domain() local
2558 dl_b = &rq->rd->dl_bw; in dl_add_task_root_domain()
2559 raw_spin_lock(&dl_b->lock); in dl_add_task_root_domain()
2561 __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span)); in dl_add_task_root_domain()
2563 raw_spin_unlock(&dl_b->lock); in dl_add_task_root_domain()
2766 struct dl_bw *dl_b; in sched_dl_global_validate() local
2781 dl_b = dl_bw_of(cpu); in sched_dl_global_validate()
2784 raw_spin_lock_irqsave(&dl_b->lock, flags); in sched_dl_global_validate()
2785 if (new_bw * cpus < dl_b->total_bw) in sched_dl_global_validate()
2787 raw_spin_unlock_irqrestore(&dl_b->lock, flags); in sched_dl_global_validate()
2816 struct dl_bw *dl_b; in sched_dl_do_global() local
2831 dl_b = dl_bw_of(cpu); in sched_dl_do_global()
2833 raw_spin_lock_irqsave(&dl_b->lock, flags); in sched_dl_do_global()
2834 dl_b->bw = new_bw; in sched_dl_do_global()
2835 raw_spin_unlock_irqrestore(&dl_b->lock, flags); in sched_dl_do_global()
2857 struct dl_bw *dl_b = dl_bw_of(cpu); in sched_dl_overflow() local
2872 raw_spin_lock(&dl_b->lock); in sched_dl_overflow()
2877 !__dl_overflow(dl_b, cap, 0, new_bw)) { in sched_dl_overflow()
2879 __dl_sub(dl_b, p->dl.dl_bw, cpus); in sched_dl_overflow()
2880 __dl_add(dl_b, new_bw, cpus); in sched_dl_overflow()
2883 !__dl_overflow(dl_b, cap, p->dl.dl_bw, new_bw)) { in sched_dl_overflow()
2891 __dl_sub(dl_b, p->dl.dl_bw, cpus); in sched_dl_overflow()
2892 __dl_add(dl_b, new_bw, cpus); in sched_dl_overflow()
2903 raw_spin_unlock(&dl_b->lock); in sched_dl_overflow()
3061 struct dl_bw *dl_b; in dl_bw_manage() local
3065 dl_b = dl_bw_of(cpu); in dl_bw_manage()
3066 raw_spin_lock_irqsave(&dl_b->lock, flags); in dl_bw_manage()
3069 __dl_sub(dl_b, dl_bw, dl_bw_cpus(cpu)); in dl_bw_manage()
3073 overflow = __dl_overflow(dl_b, cap, 0, dl_bw); in dl_bw_manage()
3082 __dl_add(dl_b, dl_bw, dl_bw_cpus(cpu)); in dl_bw_manage()
3086 raw_spin_unlock_irqrestore(&dl_b->lock, flags); in dl_bw_manage()