xref: /openbmc/linux/kernel/sched/pelt.h (revision f220d3eb)
1 #ifdef CONFIG_SMP
2 
3 int __update_load_avg_blocked_se(u64 now, int cpu, struct sched_entity *se);
4 int __update_load_avg_se(u64 now, int cpu, struct cfs_rq *cfs_rq, struct sched_entity *se);
5 int __update_load_avg_cfs_rq(u64 now, int cpu, struct cfs_rq *cfs_rq);
6 int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
7 int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
8 
9 #if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
10 int update_irq_load_avg(struct rq *rq, u64 running);
11 #else
12 static inline int
13 update_irq_load_avg(struct rq *rq, u64 running)
14 {
15 	return 0;
16 }
17 #endif
18 
19 /*
20  * When a task is dequeued, its estimated utilization should not be update if
21  * its util_avg has not been updated at least once.
22  * This flag is used to synchronize util_avg updates with util_est updates.
23  * We map this information into the LSB bit of the utilization saved at
24  * dequeue time (i.e. util_est.dequeued).
25  */
26 #define UTIL_AVG_UNCHANGED 0x1
27 
28 static inline void cfs_se_util_change(struct sched_avg *avg)
29 {
30 	unsigned int enqueued;
31 
32 	if (!sched_feat(UTIL_EST))
33 		return;
34 
35 	/* Avoid store if the flag has been already set */
36 	enqueued = avg->util_est.enqueued;
37 	if (!(enqueued & UTIL_AVG_UNCHANGED))
38 		return;
39 
40 	/* Reset flag to report util_avg has been updated */
41 	enqueued &= ~UTIL_AVG_UNCHANGED;
42 	WRITE_ONCE(avg->util_est.enqueued, enqueued);
43 }
44 
45 #else
46 
47 static inline int
48 update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
49 {
50 	return 0;
51 }
52 
53 static inline int
54 update_rt_rq_load_avg(u64 now, struct rq *rq, int running)
55 {
56 	return 0;
57 }
58 
59 static inline int
60 update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
61 {
62 	return 0;
63 }
64 
65 static inline int
66 update_irq_load_avg(struct rq *rq, u64 running)
67 {
68 	return 0;
69 }
70 #endif
71 
72 
73