1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _MM_PERCPU_INTERNAL_H 3 #define _MM_PERCPU_INTERNAL_H 4 5 #include <linux/types.h> 6 #include <linux/percpu.h> 7 #include <linux/memcontrol.h> 8 9 /* 10 * pcpu_block_md is the metadata block struct. 11 * Each chunk's bitmap is split into a number of full blocks. 12 * All units are in terms of bits. 13 * 14 * The scan hint is the largest known contiguous area before the contig hint. 15 * It is not necessarily the actual largest contig hint though. There is an 16 * invariant that the scan_hint_start > contig_hint_start iff 17 * scan_hint == contig_hint. This is necessary because when scanning forward, 18 * we don't know if a new contig hint would be better than the current one. 19 */ 20 struct pcpu_block_md { 21 int scan_hint; /* scan hint for block */ 22 int scan_hint_start; /* block relative starting 23 position of the scan hint */ 24 int contig_hint; /* contig hint for block */ 25 int contig_hint_start; /* block relative starting 26 position of the contig hint */ 27 int left_free; /* size of free space along 28 the left side of the block */ 29 int right_free; /* size of free space along 30 the right side of the block */ 31 int first_free; /* block position of first free */ 32 int nr_bits; /* total bits responsible for */ 33 }; 34 35 struct pcpu_chunk { 36 #ifdef CONFIG_PERCPU_STATS 37 int nr_alloc; /* # of allocations */ 38 size_t max_alloc_size; /* largest allocation size */ 39 #endif 40 41 struct list_head list; /* linked to pcpu_slot lists */ 42 int free_bytes; /* free bytes in the chunk */ 43 struct pcpu_block_md chunk_md; 44 void *base_addr; /* base address of this chunk */ 45 46 unsigned long *alloc_map; /* allocation map */ 47 unsigned long *bound_map; /* boundary map */ 48 struct pcpu_block_md *md_blocks; /* metadata blocks */ 49 50 void *data; /* chunk data */ 51 bool immutable; /* no [de]population allowed */ 52 bool isolated; /* isolated from active chunk 53 slots */ 54 int start_offset; /* the overlap with the previous 55 region to have a page aligned 56 base_addr */ 57 int end_offset; /* additional area required to 58 have the region end page 59 aligned */ 60 #ifdef CONFIG_MEMCG_KMEM 61 struct obj_cgroup **obj_cgroups; /* vector of object cgroups */ 62 #endif 63 64 int nr_pages; /* # of pages served by this chunk */ 65 int nr_populated; /* # of populated pages */ 66 int nr_empty_pop_pages; /* # of empty populated pages */ 67 unsigned long populated[]; /* populated bitmap */ 68 }; 69 70 extern spinlock_t pcpu_lock; 71 72 extern struct list_head *pcpu_chunk_lists; 73 extern int pcpu_nr_slots; 74 extern int pcpu_sidelined_slot; 75 extern int pcpu_to_depopulate_slot; 76 extern int pcpu_nr_empty_pop_pages; 77 78 extern struct pcpu_chunk *pcpu_first_chunk; 79 extern struct pcpu_chunk *pcpu_reserved_chunk; 80 81 /** 82 * pcpu_chunk_nr_blocks - converts nr_pages to # of md_blocks 83 * @chunk: chunk of interest 84 * 85 * This conversion is from the number of physical pages that the chunk 86 * serves to the number of bitmap blocks used. 87 */ 88 static inline int pcpu_chunk_nr_blocks(struct pcpu_chunk *chunk) 89 { 90 return chunk->nr_pages * PAGE_SIZE / PCPU_BITMAP_BLOCK_SIZE; 91 } 92 93 /** 94 * pcpu_nr_pages_to_map_bits - converts the pages to size of bitmap 95 * @pages: number of physical pages 96 * 97 * This conversion is from physical pages to the number of bits 98 * required in the bitmap. 99 */ 100 static inline int pcpu_nr_pages_to_map_bits(int pages) 101 { 102 return pages * PAGE_SIZE / PCPU_MIN_ALLOC_SIZE; 103 } 104 105 /** 106 * pcpu_chunk_map_bits - helper to convert nr_pages to size of bitmap 107 * @chunk: chunk of interest 108 * 109 * This conversion is from the number of physical pages that the chunk 110 * serves to the number of bits in the bitmap. 111 */ 112 static inline int pcpu_chunk_map_bits(struct pcpu_chunk *chunk) 113 { 114 return pcpu_nr_pages_to_map_bits(chunk->nr_pages); 115 } 116 117 /** 118 * pcpu_obj_full_size - helper to calculate size of each accounted object 119 * @size: size of area to allocate in bytes 120 * 121 * For each accounted object there is an extra space which is used to store 122 * obj_cgroup membership if kmemcg is not disabled. Charge it too. 123 */ 124 static inline size_t pcpu_obj_full_size(size_t size) 125 { 126 size_t extra_size = 0; 127 128 #ifdef CONFIG_MEMCG_KMEM 129 if (!mem_cgroup_kmem_disabled()) 130 extra_size += size / PCPU_MIN_ALLOC_SIZE * sizeof(struct obj_cgroup *); 131 #endif 132 133 return size * num_possible_cpus() + extra_size; 134 } 135 136 #ifdef CONFIG_PERCPU_STATS 137 138 #include <linux/spinlock.h> 139 140 struct percpu_stats { 141 u64 nr_alloc; /* lifetime # of allocations */ 142 u64 nr_dealloc; /* lifetime # of deallocations */ 143 u64 nr_cur_alloc; /* current # of allocations */ 144 u64 nr_max_alloc; /* max # of live allocations */ 145 u32 nr_chunks; /* current # of live chunks */ 146 u32 nr_max_chunks; /* max # of live chunks */ 147 size_t min_alloc_size; /* min allocation size */ 148 size_t max_alloc_size; /* max allocation size */ 149 }; 150 151 extern struct percpu_stats pcpu_stats; 152 extern struct pcpu_alloc_info pcpu_stats_ai; 153 154 /* 155 * For debug purposes. We don't care about the flexible array. 156 */ 157 static inline void pcpu_stats_save_ai(const struct pcpu_alloc_info *ai) 158 { 159 memcpy(&pcpu_stats_ai, ai, sizeof(struct pcpu_alloc_info)); 160 161 /* initialize min_alloc_size to unit_size */ 162 pcpu_stats.min_alloc_size = pcpu_stats_ai.unit_size; 163 } 164 165 /* 166 * pcpu_stats_area_alloc - increment area allocation stats 167 * @chunk: the location of the area being allocated 168 * @size: size of area to allocate in bytes 169 * 170 * CONTEXT: 171 * pcpu_lock. 172 */ 173 static inline void pcpu_stats_area_alloc(struct pcpu_chunk *chunk, size_t size) 174 { 175 lockdep_assert_held(&pcpu_lock); 176 177 pcpu_stats.nr_alloc++; 178 pcpu_stats.nr_cur_alloc++; 179 pcpu_stats.nr_max_alloc = 180 max(pcpu_stats.nr_max_alloc, pcpu_stats.nr_cur_alloc); 181 pcpu_stats.min_alloc_size = 182 min(pcpu_stats.min_alloc_size, size); 183 pcpu_stats.max_alloc_size = 184 max(pcpu_stats.max_alloc_size, size); 185 186 chunk->nr_alloc++; 187 chunk->max_alloc_size = max(chunk->max_alloc_size, size); 188 } 189 190 /* 191 * pcpu_stats_area_dealloc - decrement allocation stats 192 * @chunk: the location of the area being deallocated 193 * 194 * CONTEXT: 195 * pcpu_lock. 196 */ 197 static inline void pcpu_stats_area_dealloc(struct pcpu_chunk *chunk) 198 { 199 lockdep_assert_held(&pcpu_lock); 200 201 pcpu_stats.nr_dealloc++; 202 pcpu_stats.nr_cur_alloc--; 203 204 chunk->nr_alloc--; 205 } 206 207 /* 208 * pcpu_stats_chunk_alloc - increment chunk stats 209 */ 210 static inline void pcpu_stats_chunk_alloc(void) 211 { 212 unsigned long flags; 213 spin_lock_irqsave(&pcpu_lock, flags); 214 215 pcpu_stats.nr_chunks++; 216 pcpu_stats.nr_max_chunks = 217 max(pcpu_stats.nr_max_chunks, pcpu_stats.nr_chunks); 218 219 spin_unlock_irqrestore(&pcpu_lock, flags); 220 } 221 222 /* 223 * pcpu_stats_chunk_dealloc - decrement chunk stats 224 */ 225 static inline void pcpu_stats_chunk_dealloc(void) 226 { 227 unsigned long flags; 228 spin_lock_irqsave(&pcpu_lock, flags); 229 230 pcpu_stats.nr_chunks--; 231 232 spin_unlock_irqrestore(&pcpu_lock, flags); 233 } 234 235 #else 236 237 static inline void pcpu_stats_save_ai(const struct pcpu_alloc_info *ai) 238 { 239 } 240 241 static inline void pcpu_stats_area_alloc(struct pcpu_chunk *chunk, size_t size) 242 { 243 } 244 245 static inline void pcpu_stats_area_dealloc(struct pcpu_chunk *chunk) 246 { 247 } 248 249 static inline void pcpu_stats_chunk_alloc(void) 250 { 251 } 252 253 static inline void pcpu_stats_chunk_dealloc(void) 254 { 255 } 256 257 #endif /* !CONFIG_PERCPU_STATS */ 258 259 #endif 260