1 #ifndef _MM_PERCPU_INTERNAL_H 2 #define _MM_PERCPU_INTERNAL_H 3 4 #include <linux/types.h> 5 #include <linux/percpu.h> 6 7 /* 8 * pcpu_block_md is the metadata block struct. 9 * Each chunk's bitmap is split into a number of full blocks. 10 * All units are in terms of bits. 11 */ 12 struct pcpu_block_md { 13 int contig_hint; /* contig hint for block */ 14 int contig_hint_start; /* block relative starting 15 position of the contig hint */ 16 int left_free; /* size of free space along 17 the left side of the block */ 18 int right_free; /* size of free space along 19 the right side of the block */ 20 int first_free; /* block position of first free */ 21 }; 22 23 struct pcpu_chunk { 24 #ifdef CONFIG_PERCPU_STATS 25 int nr_alloc; /* # of allocations */ 26 size_t max_alloc_size; /* largest allocation size */ 27 #endif 28 29 struct list_head list; /* linked to pcpu_slot lists */ 30 int free_bytes; /* free bytes in the chunk */ 31 int contig_bits; /* max contiguous size hint */ 32 int contig_bits_start; /* contig_bits starting 33 offset */ 34 void *base_addr; /* base address of this chunk */ 35 36 unsigned long *alloc_map; /* allocation map */ 37 unsigned long *bound_map; /* boundary map */ 38 struct pcpu_block_md *md_blocks; /* metadata blocks */ 39 40 void *data; /* chunk data */ 41 int first_bit; /* no free below this */ 42 bool immutable; /* no [de]population allowed */ 43 int start_offset; /* the overlap with the previous 44 region to have a page aligned 45 base_addr */ 46 int end_offset; /* additional area required to 47 have the region end page 48 aligned */ 49 50 int nr_pages; /* # of pages served by this chunk */ 51 int nr_populated; /* # of populated pages */ 52 int nr_empty_pop_pages; /* # of empty populated pages */ 53 unsigned long populated[]; /* populated bitmap */ 54 }; 55 56 extern spinlock_t pcpu_lock; 57 58 extern struct list_head *pcpu_slot; 59 extern int pcpu_nr_slots; 60 extern int pcpu_nr_empty_pop_pages; 61 62 extern struct pcpu_chunk *pcpu_first_chunk; 63 extern struct pcpu_chunk *pcpu_reserved_chunk; 64 65 /** 66 * pcpu_chunk_nr_blocks - converts nr_pages to # of md_blocks 67 * @chunk: chunk of interest 68 * 69 * This conversion is from the number of physical pages that the chunk 70 * serves to the number of bitmap blocks used. 71 */ 72 static inline int pcpu_chunk_nr_blocks(struct pcpu_chunk *chunk) 73 { 74 return chunk->nr_pages * PAGE_SIZE / PCPU_BITMAP_BLOCK_SIZE; 75 } 76 77 /** 78 * pcpu_nr_pages_to_map_bits - converts the pages to size of bitmap 79 * @pages: number of physical pages 80 * 81 * This conversion is from physical pages to the number of bits 82 * required in the bitmap. 83 */ 84 static inline int pcpu_nr_pages_to_map_bits(int pages) 85 { 86 return pages * PAGE_SIZE / PCPU_MIN_ALLOC_SIZE; 87 } 88 89 /** 90 * pcpu_chunk_map_bits - helper to convert nr_pages to size of bitmap 91 * @chunk: chunk of interest 92 * 93 * This conversion is from the number of physical pages that the chunk 94 * serves to the number of bits in the bitmap. 95 */ 96 static inline int pcpu_chunk_map_bits(struct pcpu_chunk *chunk) 97 { 98 return pcpu_nr_pages_to_map_bits(chunk->nr_pages); 99 } 100 101 #ifdef CONFIG_PERCPU_STATS 102 103 #include <linux/spinlock.h> 104 105 struct percpu_stats { 106 u64 nr_alloc; /* lifetime # of allocations */ 107 u64 nr_dealloc; /* lifetime # of deallocations */ 108 u64 nr_cur_alloc; /* current # of allocations */ 109 u64 nr_max_alloc; /* max # of live allocations */ 110 u32 nr_chunks; /* current # of live chunks */ 111 u32 nr_max_chunks; /* max # of live chunks */ 112 size_t min_alloc_size; /* min allocaiton size */ 113 size_t max_alloc_size; /* max allocation size */ 114 }; 115 116 extern struct percpu_stats pcpu_stats; 117 extern struct pcpu_alloc_info pcpu_stats_ai; 118 119 /* 120 * For debug purposes. We don't care about the flexible array. 121 */ 122 static inline void pcpu_stats_save_ai(const struct pcpu_alloc_info *ai) 123 { 124 memcpy(&pcpu_stats_ai, ai, sizeof(struct pcpu_alloc_info)); 125 126 /* initialize min_alloc_size to unit_size */ 127 pcpu_stats.min_alloc_size = pcpu_stats_ai.unit_size; 128 } 129 130 /* 131 * pcpu_stats_area_alloc - increment area allocation stats 132 * @chunk: the location of the area being allocated 133 * @size: size of area to allocate in bytes 134 * 135 * CONTEXT: 136 * pcpu_lock. 137 */ 138 static inline void pcpu_stats_area_alloc(struct pcpu_chunk *chunk, size_t size) 139 { 140 lockdep_assert_held(&pcpu_lock); 141 142 pcpu_stats.nr_alloc++; 143 pcpu_stats.nr_cur_alloc++; 144 pcpu_stats.nr_max_alloc = 145 max(pcpu_stats.nr_max_alloc, pcpu_stats.nr_cur_alloc); 146 pcpu_stats.min_alloc_size = 147 min(pcpu_stats.min_alloc_size, size); 148 pcpu_stats.max_alloc_size = 149 max(pcpu_stats.max_alloc_size, size); 150 151 chunk->nr_alloc++; 152 chunk->max_alloc_size = max(chunk->max_alloc_size, size); 153 } 154 155 /* 156 * pcpu_stats_area_dealloc - decrement allocation stats 157 * @chunk: the location of the area being deallocated 158 * 159 * CONTEXT: 160 * pcpu_lock. 161 */ 162 static inline void pcpu_stats_area_dealloc(struct pcpu_chunk *chunk) 163 { 164 lockdep_assert_held(&pcpu_lock); 165 166 pcpu_stats.nr_dealloc++; 167 pcpu_stats.nr_cur_alloc--; 168 169 chunk->nr_alloc--; 170 } 171 172 /* 173 * pcpu_stats_chunk_alloc - increment chunk stats 174 */ 175 static inline void pcpu_stats_chunk_alloc(void) 176 { 177 unsigned long flags; 178 spin_lock_irqsave(&pcpu_lock, flags); 179 180 pcpu_stats.nr_chunks++; 181 pcpu_stats.nr_max_chunks = 182 max(pcpu_stats.nr_max_chunks, pcpu_stats.nr_chunks); 183 184 spin_unlock_irqrestore(&pcpu_lock, flags); 185 } 186 187 /* 188 * pcpu_stats_chunk_dealloc - decrement chunk stats 189 */ 190 static inline void pcpu_stats_chunk_dealloc(void) 191 { 192 unsigned long flags; 193 spin_lock_irqsave(&pcpu_lock, flags); 194 195 pcpu_stats.nr_chunks--; 196 197 spin_unlock_irqrestore(&pcpu_lock, flags); 198 } 199 200 #else 201 202 static inline void pcpu_stats_save_ai(const struct pcpu_alloc_info *ai) 203 { 204 } 205 206 static inline void pcpu_stats_area_alloc(struct pcpu_chunk *chunk, size_t size) 207 { 208 } 209 210 static inline void pcpu_stats_area_dealloc(struct pcpu_chunk *chunk) 211 { 212 } 213 214 static inline void pcpu_stats_chunk_alloc(void) 215 { 216 } 217 218 static inline void pcpu_stats_chunk_dealloc(void) 219 { 220 } 221 222 #endif /* !CONFIG_PERCPU_STATS */ 223 224 #endif 225