1 #ifdef CONFIG_SMP 2 #include "sched-pelt.h" 3 4 int __update_load_avg_blocked_se(u64 now, struct sched_entity *se); 5 int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se); 6 int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq); 7 int update_rt_rq_load_avg(u64 now, struct rq *rq, int running); 8 int update_dl_rq_load_avg(u64 now, struct rq *rq, int running); 9 10 #ifdef CONFIG_SCHED_THERMAL_PRESSURE 11 int update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity); 12 13 static inline u64 thermal_load_avg(struct rq *rq) 14 { 15 return READ_ONCE(rq->avg_thermal.load_avg); 16 } 17 #else 18 static inline int 19 update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity) 20 { 21 return 0; 22 } 23 24 static inline u64 thermal_load_avg(struct rq *rq) 25 { 26 return 0; 27 } 28 #endif 29 30 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ 31 int update_irq_load_avg(struct rq *rq, u64 running); 32 #else 33 static inline int 34 update_irq_load_avg(struct rq *rq, u64 running) 35 { 36 return 0; 37 } 38 #endif 39 40 /* 41 * When a task is dequeued, its estimated utilization should not be update if 42 * its util_avg has not been updated at least once. 43 * This flag is used to synchronize util_avg updates with util_est updates. 44 * We map this information into the LSB bit of the utilization saved at 45 * dequeue time (i.e. util_est.dequeued). 46 */ 47 #define UTIL_AVG_UNCHANGED 0x1 48 49 static inline void cfs_se_util_change(struct sched_avg *avg) 50 { 51 unsigned int enqueued; 52 53 if (!sched_feat(UTIL_EST)) 54 return; 55 56 /* Avoid store if the flag has been already set */ 57 enqueued = avg->util_est.enqueued; 58 if (!(enqueued & UTIL_AVG_UNCHANGED)) 59 return; 60 61 /* Reset flag to report util_avg has been updated */ 62 enqueued &= ~UTIL_AVG_UNCHANGED; 63 WRITE_ONCE(avg->util_est.enqueued, enqueued); 64 } 65 66 /* 67 * The clock_pelt scales the time to reflect the effective amount of 68 * computation done during the running delta time but then sync back to 69 * clock_task when rq is idle. 70 * 71 * 72 * absolute time | 1| 2| 3| 4| 5| 6| 7| 8| 9|10|11|12|13|14|15|16 73 * @ max capacity ------******---------------******--------------- 74 * @ half capacity ------************---------************--------- 75 * clock pelt | 1| 2| 3| 4| 7| 8| 9| 10| 11|14|15|16 76 * 77 */ 78 static inline void update_rq_clock_pelt(struct rq *rq, s64 delta) 79 { 80 if (unlikely(is_idle_task(rq->curr))) { 81 /* The rq is idle, we can sync to clock_task */ 82 rq->clock_pelt = rq_clock_task(rq); 83 return; 84 } 85 86 /* 87 * When a rq runs at a lower compute capacity, it will need 88 * more time to do the same amount of work than at max 89 * capacity. In order to be invariant, we scale the delta to 90 * reflect how much work has been really done. 91 * Running longer results in stealing idle time that will 92 * disturb the load signal compared to max capacity. This 93 * stolen idle time will be automatically reflected when the 94 * rq will be idle and the clock will be synced with 95 * rq_clock_task. 96 */ 97 98 /* 99 * Scale the elapsed time to reflect the real amount of 100 * computation 101 */ 102 delta = cap_scale(delta, arch_scale_cpu_capacity(cpu_of(rq))); 103 delta = cap_scale(delta, arch_scale_freq_capacity(cpu_of(rq))); 104 105 rq->clock_pelt += delta; 106 } 107 108 /* 109 * When rq becomes idle, we have to check if it has lost idle time 110 * because it was fully busy. A rq is fully used when the /Sum util_sum 111 * is greater or equal to: 112 * (LOAD_AVG_MAX - 1024 + rq->cfs.avg.period_contrib) << SCHED_CAPACITY_SHIFT; 113 * For optimization and computing rounding purpose, we don't take into account 114 * the position in the current window (period_contrib) and we use the higher 115 * bound of util_sum to decide. 116 */ 117 static inline void update_idle_rq_clock_pelt(struct rq *rq) 118 { 119 u32 divider = ((LOAD_AVG_MAX - 1024) << SCHED_CAPACITY_SHIFT) - LOAD_AVG_MAX; 120 u32 util_sum = rq->cfs.avg.util_sum; 121 util_sum += rq->avg_rt.util_sum; 122 util_sum += rq->avg_dl.util_sum; 123 124 /* 125 * Reflecting stolen time makes sense only if the idle 126 * phase would be present at max capacity. As soon as the 127 * utilization of a rq has reached the maximum value, it is 128 * considered as an always runnig rq without idle time to 129 * steal. This potential idle time is considered as lost in 130 * this case. We keep track of this lost idle time compare to 131 * rq's clock_task. 132 */ 133 if (util_sum >= divider) 134 rq->lost_idle_time += rq_clock_task(rq) - rq->clock_pelt; 135 } 136 137 static inline u64 rq_clock_pelt(struct rq *rq) 138 { 139 lockdep_assert_held(&rq->lock); 140 assert_clock_updated(rq); 141 142 return rq->clock_pelt - rq->lost_idle_time; 143 } 144 145 #ifdef CONFIG_CFS_BANDWIDTH 146 /* rq->task_clock normalized against any time this cfs_rq has spent throttled */ 147 static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) 148 { 149 if (unlikely(cfs_rq->throttle_count)) 150 return cfs_rq->throttled_clock_task - cfs_rq->throttled_clock_task_time; 151 152 return rq_clock_pelt(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time; 153 } 154 #else 155 static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) 156 { 157 return rq_clock_pelt(rq_of(cfs_rq)); 158 } 159 #endif 160 161 #else 162 163 static inline int 164 update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) 165 { 166 return 0; 167 } 168 169 static inline int 170 update_rt_rq_load_avg(u64 now, struct rq *rq, int running) 171 { 172 return 0; 173 } 174 175 static inline int 176 update_dl_rq_load_avg(u64 now, struct rq *rq, int running) 177 { 178 return 0; 179 } 180 181 static inline int 182 update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity) 183 { 184 return 0; 185 } 186 187 static inline u64 thermal_load_avg(struct rq *rq) 188 { 189 return 0; 190 } 191 192 static inline int 193 update_irq_load_avg(struct rq *rq, u64 running) 194 { 195 return 0; 196 } 197 198 static inline u64 rq_clock_pelt(struct rq *rq) 199 { 200 return rq_clock_task(rq); 201 } 202 203 static inline void 204 update_rq_clock_pelt(struct rq *rq, s64 delta) { } 205 206 static inline void 207 update_idle_rq_clock_pelt(struct rq *rq) { } 208 209 #endif 210 211 212