1 /* SPDX-License-Identifier: GPL-2.0 2 * 3 * Legacy blkg rwstat helpers enabled by CONFIG_BLK_CGROUP_RWSTAT. 4 * Do not use in new code. 5 */ 6 #ifndef _BLK_CGROUP_RWSTAT_H 7 #define _BLK_CGROUP_RWSTAT_H 8 9 #include "blk-cgroup.h" 10 11 enum blkg_rwstat_type { 12 BLKG_RWSTAT_READ, 13 BLKG_RWSTAT_WRITE, 14 BLKG_RWSTAT_SYNC, 15 BLKG_RWSTAT_ASYNC, 16 BLKG_RWSTAT_DISCARD, 17 18 BLKG_RWSTAT_NR, 19 BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR, 20 }; 21 22 /* 23 * blkg_[rw]stat->aux_cnt is excluded for local stats but included for 24 * recursive. Used to carry stats of dead children. 25 */ 26 struct blkg_rwstat { 27 struct percpu_counter cpu_cnt[BLKG_RWSTAT_NR]; 28 atomic64_t aux_cnt[BLKG_RWSTAT_NR]; 29 }; 30 31 struct blkg_rwstat_sample { 32 u64 cnt[BLKG_RWSTAT_NR]; 33 }; 34 35 static inline u64 blkg_rwstat_read_counter(struct blkg_rwstat *rwstat, 36 unsigned int idx) 37 { 38 return atomic64_read(&rwstat->aux_cnt[idx]) + 39 percpu_counter_sum_positive(&rwstat->cpu_cnt[idx]); 40 } 41 42 int blkg_rwstat_init(struct blkg_rwstat *rwstat, gfp_t gfp); 43 void blkg_rwstat_exit(struct blkg_rwstat *rwstat); 44 u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, 45 const struct blkg_rwstat_sample *rwstat); 46 u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, 47 int off); 48 void blkg_rwstat_recursive_sum(struct blkcg_gq *blkg, struct blkcg_policy *pol, 49 int off, struct blkg_rwstat_sample *sum); 50 51 52 /** 53 * blkg_rwstat_add - add a value to a blkg_rwstat 54 * @rwstat: target blkg_rwstat 55 * @op: REQ_OP and flags 56 * @val: value to add 57 * 58 * Add @val to @rwstat. The counters are chosen according to @rw. The 59 * caller is responsible for synchronizing calls to this function. 60 */ 61 static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat, 62 unsigned int op, uint64_t val) 63 { 64 struct percpu_counter *cnt; 65 66 if (op_is_discard(op)) 67 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_DISCARD]; 68 else if (op_is_write(op)) 69 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_WRITE]; 70 else 71 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_READ]; 72 73 percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH); 74 75 if (op_is_sync(op)) 76 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC]; 77 else 78 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC]; 79 80 percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH); 81 } 82 83 /** 84 * blkg_rwstat_read - read the current values of a blkg_rwstat 85 * @rwstat: blkg_rwstat to read 86 * 87 * Read the current snapshot of @rwstat and return it in the aux counts. 88 */ 89 static inline void blkg_rwstat_read(struct blkg_rwstat *rwstat, 90 struct blkg_rwstat_sample *result) 91 { 92 int i; 93 94 for (i = 0; i < BLKG_RWSTAT_NR; i++) 95 result->cnt[i] = 96 percpu_counter_sum_positive(&rwstat->cpu_cnt[i]); 97 } 98 99 /** 100 * blkg_rwstat_total - read the total count of a blkg_rwstat 101 * @rwstat: blkg_rwstat to read 102 * 103 * Return the total count of @rwstat regardless of the IO direction. This 104 * function can be called without synchronization and takes care of u64 105 * atomicity. 106 */ 107 static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat) 108 { 109 struct blkg_rwstat_sample tmp = { }; 110 111 blkg_rwstat_read(rwstat, &tmp); 112 return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE]; 113 } 114 115 /** 116 * blkg_rwstat_reset - reset a blkg_rwstat 117 * @rwstat: blkg_rwstat to reset 118 */ 119 static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat) 120 { 121 int i; 122 123 for (i = 0; i < BLKG_RWSTAT_NR; i++) { 124 percpu_counter_set(&rwstat->cpu_cnt[i], 0); 125 atomic64_set(&rwstat->aux_cnt[i], 0); 126 } 127 } 128 129 /** 130 * blkg_rwstat_add_aux - add a blkg_rwstat into another's aux count 131 * @to: the destination blkg_rwstat 132 * @from: the source 133 * 134 * Add @from's count including the aux one to @to's aux count. 135 */ 136 static inline void blkg_rwstat_add_aux(struct blkg_rwstat *to, 137 struct blkg_rwstat *from) 138 { 139 u64 sum[BLKG_RWSTAT_NR]; 140 int i; 141 142 for (i = 0; i < BLKG_RWSTAT_NR; i++) 143 sum[i] = percpu_counter_sum_positive(&from->cpu_cnt[i]); 144 145 for (i = 0; i < BLKG_RWSTAT_NR; i++) 146 atomic64_add(sum[i] + atomic64_read(&from->aux_cnt[i]), 147 &to->aux_cnt[i]); 148 } 149 #endif /* _BLK_CGROUP_RWSTAT_H */ 150