Lines Matching full:chunk
28 * There is special consideration for the first chunk which must handle
30 * are not online yet. In short, the first chunk is structured like so:
45 * The allocator tries to allocate from the fullest chunk first. Each chunk
50 * of the bitmap. The reverse mapping from page to chunk is stored in
54 * Each bit represents a fragment of size PCPU_MIN_ALLOC_SIZE. The chunk
66 * setup the first chunk containing the kernel static percpu area
146 /* the address of the first chunk which starts with the kernel static area */
158 * The first chunk which always exists. Note that unlike other
165 * Optional reserved chunk. This chunk reserves part of the first
166 * chunk and serves it for reserved allocations. When the reserved
172 static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop, map ext */
174 struct list_head *pcpu_chunk_lists __ro_after_init; /* chunk list slots */
178 * The reserved chunk doesn't contribute to the count.
184 * pcpu_lock. This number is kept per a unit per chunk (i.e. when a page gets
185 * allocated/deallocated, it is allocated/deallocated in all units of a chunk
194 * empty chunk.
208 * pcpu_addr_in_chunk - check if the address is served from this chunk
209 * @chunk: chunk of interest
213 * True if the address is served from this chunk.
215 static bool pcpu_addr_in_chunk(struct pcpu_chunk *chunk, void *addr) in pcpu_addr_in_chunk() argument
219 if (!chunk) in pcpu_addr_in_chunk()
222 start_addr = chunk->base_addr + chunk->start_offset; in pcpu_addr_in_chunk()
223 end_addr = chunk->base_addr + chunk->nr_pages * PAGE_SIZE - in pcpu_addr_in_chunk()
224 chunk->end_offset; in pcpu_addr_in_chunk()
242 static int pcpu_chunk_slot(const struct pcpu_chunk *chunk) in pcpu_chunk_slot() argument
244 const struct pcpu_block_md *chunk_md = &chunk->chunk_md; in pcpu_chunk_slot()
246 if (chunk->free_bytes < PCPU_MIN_ALLOC_SIZE || in pcpu_chunk_slot()
253 /* set the pointer to a chunk in a page struct */
259 /* obtain pointer to a chunk from a page struct */
275 static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk, in pcpu_chunk_addr() argument
278 return (unsigned long)chunk->base_addr + in pcpu_chunk_addr()
286 static unsigned long *pcpu_index_alloc_map(struct pcpu_chunk *chunk, int index) in pcpu_index_alloc_map() argument
288 return chunk->alloc_map + in pcpu_index_alloc_map()
314 * Note, a chunk uses the same hints as a block so this can also check against
315 * the chunk's contig hint.
356 * @chunk: chunk of interest
357 * @bit_off: chunk offset
365 static void pcpu_next_md_free_region(struct pcpu_chunk *chunk, int *bit_off, in pcpu_next_md_free_region() argument
373 for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk); in pcpu_next_md_free_region()
408 * @chunk: chunk of interest
411 * @bit_off: chunk offset
420 static void pcpu_next_fit_region(struct pcpu_chunk *chunk, int alloc_bits, in pcpu_next_fit_region() argument
428 for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk); in pcpu_next_fit_region()
468 *bit_off = pcpu_chunk_map_bits(chunk); in pcpu_next_fit_region()
477 #define pcpu_for_each_md_free_region(chunk, bit_off, bits) \ argument
478 for (pcpu_next_md_free_region((chunk), &(bit_off), &(bits)); \
479 (bit_off) < pcpu_chunk_map_bits((chunk)); \
481 pcpu_next_md_free_region((chunk), &(bit_off), &(bits)))
483 #define pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits) \ argument
484 for (pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \
486 (bit_off) < pcpu_chunk_map_bits((chunk)); \
488 pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \
526 static void __pcpu_chunk_move(struct pcpu_chunk *chunk, int slot, in __pcpu_chunk_move() argument
529 if (chunk != pcpu_reserved_chunk) { in __pcpu_chunk_move()
531 list_move(&chunk->list, &pcpu_chunk_lists[slot]); in __pcpu_chunk_move()
533 list_move_tail(&chunk->list, &pcpu_chunk_lists[slot]); in __pcpu_chunk_move()
537 static void pcpu_chunk_move(struct pcpu_chunk *chunk, int slot) in pcpu_chunk_move() argument
539 __pcpu_chunk_move(chunk, slot, true); in pcpu_chunk_move()
543 * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
544 * @chunk: chunk of interest
547 * This function is called after an allocation or free changed @chunk.
548 * New slot according to the changed state is determined and @chunk is
549 * moved to the slot. Note that the reserved chunk is never put on
550 * chunk slots.
555 static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot) in pcpu_chunk_relocate() argument
557 int nslot = pcpu_chunk_slot(chunk); in pcpu_chunk_relocate()
560 if (chunk->isolated) in pcpu_chunk_relocate()
564 __pcpu_chunk_move(chunk, nslot, oslot < nslot); in pcpu_chunk_relocate()
567 static void pcpu_isolate_chunk(struct pcpu_chunk *chunk) in pcpu_isolate_chunk() argument
571 if (!chunk->isolated) { in pcpu_isolate_chunk()
572 chunk->isolated = true; in pcpu_isolate_chunk()
573 pcpu_nr_empty_pop_pages -= chunk->nr_empty_pop_pages; in pcpu_isolate_chunk()
575 list_move(&chunk->list, &pcpu_chunk_lists[pcpu_to_depopulate_slot]); in pcpu_isolate_chunk()
578 static void pcpu_reintegrate_chunk(struct pcpu_chunk *chunk) in pcpu_reintegrate_chunk() argument
582 if (chunk->isolated) { in pcpu_reintegrate_chunk()
583 chunk->isolated = false; in pcpu_reintegrate_chunk()
584 pcpu_nr_empty_pop_pages += chunk->nr_empty_pop_pages; in pcpu_reintegrate_chunk()
585 pcpu_chunk_relocate(chunk, -1); in pcpu_reintegrate_chunk()
591 * @chunk: chunk of interest
598 static inline void pcpu_update_empty_pages(struct pcpu_chunk *chunk, int nr) in pcpu_update_empty_pages() argument
600 chunk->nr_empty_pop_pages += nr; in pcpu_update_empty_pages()
601 if (chunk != pcpu_reserved_chunk && !chunk->isolated) in pcpu_update_empty_pages()
698 * @chunk: chunk of interest
699 * @bit_off: chunk offset
712 static void pcpu_block_update_scan(struct pcpu_chunk *chunk, int bit_off, in pcpu_block_update_scan() argument
724 block = chunk->md_blocks + s_index; in pcpu_block_update_scan()
727 l_bit = find_last_bit(pcpu_index_alloc_map(chunk, s_index), s_off); in pcpu_block_update_scan()
734 * pcpu_chunk_refresh_hint - updates metadata about a chunk
735 * @chunk: chunk of interest
745 static void pcpu_chunk_refresh_hint(struct pcpu_chunk *chunk, bool full_scan) in pcpu_chunk_refresh_hint() argument
747 struct pcpu_block_md *chunk_md = &chunk->chunk_md; in pcpu_chunk_refresh_hint()
762 pcpu_for_each_md_free_region(chunk, bit_off, bits) in pcpu_chunk_refresh_hint()
768 * @chunk: chunk of interest
774 static void pcpu_block_refresh_hint(struct pcpu_chunk *chunk, int index) in pcpu_block_refresh_hint() argument
776 struct pcpu_block_md *block = chunk->md_blocks + index; in pcpu_block_refresh_hint()
777 unsigned long *alloc_map = pcpu_index_alloc_map(chunk, index); in pcpu_block_refresh_hint()
800 * @chunk: chunk of interest
801 * @bit_off: chunk offset
805 * refreshed by a full scan iff the chunk's contig hint is broken. Block level
808 static void pcpu_block_update_hint_alloc(struct pcpu_chunk *chunk, int bit_off, in pcpu_block_update_hint_alloc() argument
811 struct pcpu_block_md *chunk_md = &chunk->chunk_md; in pcpu_block_update_hint_alloc()
828 s_block = chunk->md_blocks + s_index; in pcpu_block_update_hint_alloc()
829 e_block = chunk->md_blocks + e_index; in pcpu_block_update_hint_alloc()
844 pcpu_index_alloc_map(chunk, s_index), in pcpu_block_update_hint_alloc()
862 pcpu_block_refresh_hint(chunk, s_index); in pcpu_block_update_hint_alloc()
885 pcpu_index_alloc_map(chunk, e_index), in pcpu_block_update_hint_alloc()
898 pcpu_block_refresh_hint(chunk, e_index); in pcpu_block_update_hint_alloc()
923 pcpu_update_empty_pages(chunk, -nr_empty_pages); in pcpu_block_update_hint_alloc()
933 * The only time a full chunk scan is required is if the chunk in pcpu_block_update_hint_alloc()
935 * was used and therefore the chunk contig hint is still correct. in pcpu_block_update_hint_alloc()
942 pcpu_chunk_refresh_hint(chunk, false); in pcpu_block_update_hint_alloc()
947 * @chunk: chunk of interest
948 * @bit_off: chunk offset
956 * A chunk update is triggered if a page becomes free, a block becomes free,
963 static void pcpu_block_update_hint_free(struct pcpu_chunk *chunk, int bit_off, in pcpu_block_update_hint_free() argument
983 s_block = chunk->md_blocks + s_index; in pcpu_block_update_hint_free()
984 e_block = chunk->md_blocks + e_index; in pcpu_block_update_hint_free()
1004 * remainder of the chunk is free. in pcpu_block_update_hint_free()
1006 int l_bit = find_last_bit(pcpu_index_alloc_map(chunk, s_index), in pcpu_block_update_hint_free()
1015 end = find_next_bit(pcpu_index_alloc_map(chunk, e_index), in pcpu_block_update_hint_free()
1044 pcpu_update_empty_pages(chunk, nr_empty_pages); in pcpu_block_update_hint_free()
1047 * Refresh chunk metadata when the free makes a block free or spans in pcpu_block_update_hint_free()
1053 pcpu_chunk_refresh_hint(chunk, true); in pcpu_block_update_hint_free()
1055 pcpu_block_update(&chunk->chunk_md, in pcpu_block_update_hint_free()
1062 * @chunk: chunk of interest
1063 * @bit_off: chunk offset
1073 static bool pcpu_is_populated(struct pcpu_chunk *chunk, int bit_off, int bits, in pcpu_is_populated() argument
1081 start = find_next_zero_bit(chunk->populated, end, start); in pcpu_is_populated()
1085 end = find_next_bit(chunk->populated, end, start + 1); in pcpu_is_populated()
1093 * @chunk: chunk of interest
1098 * Given a chunk and an allocation spec, find the offset to begin searching
1102 * of a block or chunk, it is skipped. This errs on the side of caution
1110 static int pcpu_find_block_fit(struct pcpu_chunk *chunk, int alloc_bits, in pcpu_find_block_fit() argument
1113 struct pcpu_block_md *chunk_md = &chunk->chunk_md; in pcpu_find_block_fit()
1119 * and creating a new chunk would happen soon. in pcpu_find_block_fit()
1126 pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits) { in pcpu_find_block_fit()
1127 if (!pop_only || pcpu_is_populated(chunk, bit_off, bits, in pcpu_find_block_fit()
1135 if (bit_off == pcpu_chunk_map_bits(chunk)) in pcpu_find_block_fit()
1199 * @chunk: chunk of interest
1213 * Allocated addr offset in @chunk on success.
1216 static int pcpu_alloc_area(struct pcpu_chunk *chunk, int alloc_bits, in pcpu_alloc_area() argument
1219 struct pcpu_block_md *chunk_md = &chunk->chunk_md; in pcpu_alloc_area()
1226 oslot = pcpu_chunk_slot(chunk); in pcpu_alloc_area()
1232 pcpu_chunk_map_bits(chunk)); in pcpu_alloc_area()
1233 bit_off = pcpu_find_zero_area(chunk->alloc_map, end, start, alloc_bits, in pcpu_alloc_area()
1239 pcpu_block_update_scan(chunk, area_off, area_bits); in pcpu_alloc_area()
1242 bitmap_set(chunk->alloc_map, bit_off, alloc_bits); in pcpu_alloc_area()
1245 set_bit(bit_off, chunk->bound_map); in pcpu_alloc_area()
1246 bitmap_clear(chunk->bound_map, bit_off + 1, alloc_bits - 1); in pcpu_alloc_area()
1247 set_bit(bit_off + alloc_bits, chunk->bound_map); in pcpu_alloc_area()
1249 chunk->free_bytes -= alloc_bits * PCPU_MIN_ALLOC_SIZE; in pcpu_alloc_area()
1254 chunk->alloc_map, in pcpu_alloc_area()
1255 pcpu_chunk_map_bits(chunk), in pcpu_alloc_area()
1258 pcpu_block_update_hint_alloc(chunk, bit_off, alloc_bits); in pcpu_alloc_area()
1260 pcpu_chunk_relocate(chunk, oslot); in pcpu_alloc_area()
1267 * @chunk: chunk of interest
1268 * @off: addr offset into chunk
1276 static int pcpu_free_area(struct pcpu_chunk *chunk, int off) in pcpu_free_area() argument
1278 struct pcpu_block_md *chunk_md = &chunk->chunk_md; in pcpu_free_area()
1282 pcpu_stats_area_dealloc(chunk); in pcpu_free_area()
1284 oslot = pcpu_chunk_slot(chunk); in pcpu_free_area()
1289 end = find_next_bit(chunk->bound_map, pcpu_chunk_map_bits(chunk), in pcpu_free_area()
1292 bitmap_clear(chunk->alloc_map, bit_off, bits); in pcpu_free_area()
1297 chunk->free_bytes += freed; in pcpu_free_area()
1302 pcpu_block_update_hint_free(chunk, bit_off, bits); in pcpu_free_area()
1304 pcpu_chunk_relocate(chunk, oslot); in pcpu_free_area()
1319 static void pcpu_init_md_blocks(struct pcpu_chunk *chunk) in pcpu_init_md_blocks() argument
1323 /* init the chunk's block */ in pcpu_init_md_blocks()
1324 pcpu_init_md_block(&chunk->chunk_md, pcpu_chunk_map_bits(chunk)); in pcpu_init_md_blocks()
1326 for (md_block = chunk->md_blocks; in pcpu_init_md_blocks()
1327 md_block != chunk->md_blocks + pcpu_chunk_nr_blocks(chunk); in pcpu_init_md_blocks()
1333 * pcpu_alloc_first_chunk - creates chunks that serve the first chunk
1337 * This is responsible for creating the chunks that serve the first chunk. The
1343 * Chunk serving the region at @tmp_addr of @map_size.
1348 struct pcpu_chunk *chunk; in pcpu_alloc_first_chunk() local
1359 /* allocate chunk */ in pcpu_alloc_first_chunk()
1360 alloc_size = struct_size(chunk, populated, in pcpu_alloc_first_chunk()
1362 chunk = memblock_alloc(alloc_size, SMP_CACHE_BYTES); in pcpu_alloc_first_chunk()
1363 if (!chunk) in pcpu_alloc_first_chunk()
1367 INIT_LIST_HEAD(&chunk->list); in pcpu_alloc_first_chunk()
1369 chunk->base_addr = (void *)aligned_addr; in pcpu_alloc_first_chunk()
1370 chunk->start_offset = start_offset; in pcpu_alloc_first_chunk()
1371 chunk->end_offset = region_size - chunk->start_offset - map_size; in pcpu_alloc_first_chunk()
1373 chunk->nr_pages = region_size >> PAGE_SHIFT; in pcpu_alloc_first_chunk()
1374 region_bits = pcpu_chunk_map_bits(chunk); in pcpu_alloc_first_chunk()
1376 alloc_size = BITS_TO_LONGS(region_bits) * sizeof(chunk->alloc_map[0]); in pcpu_alloc_first_chunk()
1377 chunk->alloc_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES); in pcpu_alloc_first_chunk()
1378 if (!chunk->alloc_map) in pcpu_alloc_first_chunk()
1383 BITS_TO_LONGS(region_bits + 1) * sizeof(chunk->bound_map[0]); in pcpu_alloc_first_chunk()
1384 chunk->bound_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES); in pcpu_alloc_first_chunk()
1385 if (!chunk->bound_map) in pcpu_alloc_first_chunk()
1389 alloc_size = pcpu_chunk_nr_blocks(chunk) * sizeof(chunk->md_blocks[0]); in pcpu_alloc_first_chunk()
1390 chunk->md_blocks = memblock_alloc(alloc_size, SMP_CACHE_BYTES); in pcpu_alloc_first_chunk()
1391 if (!chunk->md_blocks) in pcpu_alloc_first_chunk()
1396 /* first chunk is free to use */ in pcpu_alloc_first_chunk()
1397 chunk->obj_cgroups = NULL; in pcpu_alloc_first_chunk()
1399 pcpu_init_md_blocks(chunk); in pcpu_alloc_first_chunk()
1402 chunk->immutable = true; in pcpu_alloc_first_chunk()
1403 bitmap_fill(chunk->populated, chunk->nr_pages); in pcpu_alloc_first_chunk()
1404 chunk->nr_populated = chunk->nr_pages; in pcpu_alloc_first_chunk()
1405 chunk->nr_empty_pop_pages = chunk->nr_pages; in pcpu_alloc_first_chunk()
1407 chunk->free_bytes = map_size; in pcpu_alloc_first_chunk()
1409 if (chunk->start_offset) { in pcpu_alloc_first_chunk()
1411 offset_bits = chunk->start_offset / PCPU_MIN_ALLOC_SIZE; in pcpu_alloc_first_chunk()
1412 bitmap_set(chunk->alloc_map, 0, offset_bits); in pcpu_alloc_first_chunk()
1413 set_bit(0, chunk->bound_map); in pcpu_alloc_first_chunk()
1414 set_bit(offset_bits, chunk->bound_map); in pcpu_alloc_first_chunk()
1416 chunk->chunk_md.first_free = offset_bits; in pcpu_alloc_first_chunk()
1418 pcpu_block_update_hint_alloc(chunk, 0, offset_bits); in pcpu_alloc_first_chunk()
1421 if (chunk->end_offset) { in pcpu_alloc_first_chunk()
1423 offset_bits = chunk->end_offset / PCPU_MIN_ALLOC_SIZE; in pcpu_alloc_first_chunk()
1424 bitmap_set(chunk->alloc_map, in pcpu_alloc_first_chunk()
1425 pcpu_chunk_map_bits(chunk) - offset_bits, in pcpu_alloc_first_chunk()
1428 chunk->bound_map); in pcpu_alloc_first_chunk()
1429 set_bit(region_bits, chunk->bound_map); in pcpu_alloc_first_chunk()
1431 pcpu_block_update_hint_alloc(chunk, pcpu_chunk_map_bits(chunk) in pcpu_alloc_first_chunk()
1435 return chunk; in pcpu_alloc_first_chunk()
1440 struct pcpu_chunk *chunk; in pcpu_alloc_chunk() local
1443 chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size, gfp); in pcpu_alloc_chunk()
1444 if (!chunk) in pcpu_alloc_chunk()
1447 INIT_LIST_HEAD(&chunk->list); in pcpu_alloc_chunk()
1448 chunk->nr_pages = pcpu_unit_pages; in pcpu_alloc_chunk()
1449 region_bits = pcpu_chunk_map_bits(chunk); in pcpu_alloc_chunk()
1451 chunk->alloc_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits) * in pcpu_alloc_chunk()
1452 sizeof(chunk->alloc_map[0]), gfp); in pcpu_alloc_chunk()
1453 if (!chunk->alloc_map) in pcpu_alloc_chunk()
1456 chunk->bound_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits + 1) * in pcpu_alloc_chunk()
1457 sizeof(chunk->bound_map[0]), gfp); in pcpu_alloc_chunk()
1458 if (!chunk->bound_map) in pcpu_alloc_chunk()
1461 chunk->md_blocks = pcpu_mem_zalloc(pcpu_chunk_nr_blocks(chunk) * in pcpu_alloc_chunk()
1462 sizeof(chunk->md_blocks[0]), gfp); in pcpu_alloc_chunk()
1463 if (!chunk->md_blocks) in pcpu_alloc_chunk()
1468 chunk->obj_cgroups = in pcpu_alloc_chunk()
1469 pcpu_mem_zalloc(pcpu_chunk_map_bits(chunk) * in pcpu_alloc_chunk()
1471 if (!chunk->obj_cgroups) in pcpu_alloc_chunk()
1476 pcpu_init_md_blocks(chunk); in pcpu_alloc_chunk()
1479 chunk->free_bytes = chunk->nr_pages * PAGE_SIZE; in pcpu_alloc_chunk()
1481 return chunk; in pcpu_alloc_chunk()
1485 pcpu_mem_free(chunk->md_blocks); in pcpu_alloc_chunk()
1488 pcpu_mem_free(chunk->bound_map); in pcpu_alloc_chunk()
1490 pcpu_mem_free(chunk->alloc_map); in pcpu_alloc_chunk()
1492 pcpu_mem_free(chunk); in pcpu_alloc_chunk()
1497 static void pcpu_free_chunk(struct pcpu_chunk *chunk) in pcpu_free_chunk() argument
1499 if (!chunk) in pcpu_free_chunk()
1502 pcpu_mem_free(chunk->obj_cgroups); in pcpu_free_chunk()
1504 pcpu_mem_free(chunk->md_blocks); in pcpu_free_chunk()
1505 pcpu_mem_free(chunk->bound_map); in pcpu_free_chunk()
1506 pcpu_mem_free(chunk->alloc_map); in pcpu_free_chunk()
1507 pcpu_mem_free(chunk); in pcpu_free_chunk()
1512 * @chunk: pcpu_chunk which got populated
1516 * Pages in [@page_start,@page_end) have been populated to @chunk. Update
1520 static void pcpu_chunk_populated(struct pcpu_chunk *chunk, int page_start, in pcpu_chunk_populated() argument
1527 bitmap_set(chunk->populated, page_start, nr); in pcpu_chunk_populated()
1528 chunk->nr_populated += nr; in pcpu_chunk_populated()
1531 pcpu_update_empty_pages(chunk, nr); in pcpu_chunk_populated()
1536 * @chunk: pcpu_chunk which got depopulated
1540 * Pages in [@page_start,@page_end) have been depopulated from @chunk.
1544 static void pcpu_chunk_depopulated(struct pcpu_chunk *chunk, in pcpu_chunk_depopulated() argument
1551 bitmap_clear(chunk->populated, page_start, nr); in pcpu_chunk_depopulated()
1552 chunk->nr_populated -= nr; in pcpu_chunk_depopulated()
1555 pcpu_update_empty_pages(chunk, -nr); in pcpu_chunk_depopulated()
1559 * Chunk management implementation.
1561 * To allow different implementations, chunk alloc/free and
1566 * pcpu_populate_chunk - populate the specified range of a chunk
1567 * pcpu_depopulate_chunk - depopulate the specified range of a chunk
1568 * pcpu_post_unmap_tlb_flush - flush tlb for the specified range of a chunk
1569 * pcpu_create_chunk - create a new chunk
1570 * pcpu_destroy_chunk - destroy a chunk, always preceded by full depop
1574 static int pcpu_populate_chunk(struct pcpu_chunk *chunk,
1576 static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk,
1578 static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk,
1581 static void pcpu_destroy_chunk(struct pcpu_chunk *chunk);
1592 * pcpu_chunk_addr_search - determine chunk containing specified address
1593 * @addr: address for which the chunk needs to be determined.
1599 * The address of the found chunk.
1603 /* is it in the dynamic region (first chunk)? */ in pcpu_chunk_addr_search()
1645 struct pcpu_chunk *chunk, int off, in pcpu_memcg_post_alloc_hook() argument
1651 if (likely(chunk && chunk->obj_cgroups)) { in pcpu_memcg_post_alloc_hook()
1652 chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT] = objcg; in pcpu_memcg_post_alloc_hook()
1664 static void pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size) in pcpu_memcg_free_hook() argument
1668 if (unlikely(!chunk->obj_cgroups)) in pcpu_memcg_free_hook()
1671 objcg = chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT]; in pcpu_memcg_free_hook()
1674 chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT] = NULL; in pcpu_memcg_free_hook()
1694 struct pcpu_chunk *chunk, int off, in pcpu_memcg_post_alloc_hook() argument
1699 static void pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size) in pcpu_memcg_free_hook() argument
1708 * @reserved: allocate from the reserved chunk if available
1727 struct pcpu_chunk *chunk, *next; in pcpu_alloc() local
1779 /* serve reserved allocations from the reserved chunk if available */ in pcpu_alloc()
1781 chunk = pcpu_reserved_chunk; in pcpu_alloc()
1783 off = pcpu_find_block_fit(chunk, bits, bit_align, is_atomic); in pcpu_alloc()
1785 err = "alloc from reserved chunk failed"; in pcpu_alloc()
1789 off = pcpu_alloc_area(chunk, bits, bit_align, off); in pcpu_alloc()
1793 err = "alloc from reserved chunk failed"; in pcpu_alloc()
1800 list_for_each_entry_safe(chunk, next, &pcpu_chunk_lists[slot], in pcpu_alloc()
1802 off = pcpu_find_block_fit(chunk, bits, bit_align, in pcpu_alloc()
1806 pcpu_chunk_move(chunk, 0); in pcpu_alloc()
1810 off = pcpu_alloc_area(chunk, bits, bit_align, off); in pcpu_alloc()
1812 pcpu_reintegrate_chunk(chunk); in pcpu_alloc()
1825 /* No space left. Create a new chunk. */ in pcpu_alloc()
1827 chunk = pcpu_create_chunk(pcpu_gfp); in pcpu_alloc()
1828 if (!chunk) { in pcpu_alloc()
1829 err = "failed to allocate new chunk"; in pcpu_alloc()
1834 pcpu_chunk_relocate(chunk, -1); in pcpu_alloc()
1842 pcpu_stats_area_alloc(chunk, size); in pcpu_alloc()
1852 for_each_clear_bitrange_from(rs, re, chunk->populated, page_end) { in pcpu_alloc()
1853 WARN_ON(chunk->immutable); in pcpu_alloc()
1855 ret = pcpu_populate_chunk(chunk, rs, re, pcpu_gfp); in pcpu_alloc()
1859 pcpu_free_area(chunk, off); in pcpu_alloc()
1863 pcpu_chunk_populated(chunk, rs, re); in pcpu_alloc()
1875 memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size); in pcpu_alloc()
1877 ptr = __addr_to_pcpu_ptr(chunk->base_addr + off); in pcpu_alloc()
1881 chunk->base_addr, off, ptr, in pcpu_alloc()
1884 pcpu_memcg_post_alloc_hook(objcg, chunk, off, size); in pcpu_alloc()
1985 struct pcpu_chunk *chunk, *next; in pcpu_balance_free() local
1993 list_for_each_entry_safe(chunk, next, free_head, list) { in pcpu_balance_free()
1994 WARN_ON(chunk->immutable); in pcpu_balance_free()
1997 if (chunk == list_first_entry(free_head, struct pcpu_chunk, list)) in pcpu_balance_free()
2000 if (!empty_only || chunk->nr_empty_pop_pages == 0) in pcpu_balance_free()
2001 list_move(&chunk->list, &to_free); in pcpu_balance_free()
2008 list_for_each_entry_safe(chunk, next, &to_free, list) { in pcpu_balance_free()
2011 for_each_set_bitrange(rs, re, chunk->populated, chunk->nr_pages) { in pcpu_balance_free()
2012 pcpu_depopulate_chunk(chunk, rs, re); in pcpu_balance_free()
2014 pcpu_chunk_depopulated(chunk, rs, re); in pcpu_balance_free()
2017 pcpu_destroy_chunk(chunk); in pcpu_balance_free()
2039 struct pcpu_chunk *chunk; in pcpu_balance_populated() local
2071 list_for_each_entry(chunk, &pcpu_chunk_lists[slot], list) { in pcpu_balance_populated()
2072 nr_unpop = chunk->nr_pages - chunk->nr_populated; in pcpu_balance_populated()
2080 /* @chunk can't go away while pcpu_alloc_mutex is held */ in pcpu_balance_populated()
2081 for_each_clear_bitrange(rs, re, chunk->populated, chunk->nr_pages) { in pcpu_balance_populated()
2085 ret = pcpu_populate_chunk(chunk, rs, rs + nr, gfp); in pcpu_balance_populated()
2090 pcpu_chunk_populated(chunk, rs, rs + nr); in pcpu_balance_populated()
2103 chunk = pcpu_create_chunk(gfp); in pcpu_balance_populated()
2106 if (chunk) { in pcpu_balance_populated()
2107 pcpu_chunk_relocate(chunk, -1); in pcpu_balance_populated()
2120 * populated pages threshold, reintegrate the chunk if it has empty free pages.
2121 * Each chunk is scanned in the reverse order to keep populated pages close to
2122 * the beginning of the chunk.
2130 struct pcpu_chunk *chunk; in pcpu_reclaim_populated() local
2139 * Once a chunk is isolated to the to_depopulate list, the chunk is no in pcpu_reclaim_populated()
2144 while ((chunk = list_first_entry_or_null( in pcpu_reclaim_populated()
2147 WARN_ON(chunk->immutable); in pcpu_reclaim_populated()
2150 * Scan chunk's pages in the reverse order to keep populated in pcpu_reclaim_populated()
2151 * pages close to the beginning of the chunk. in pcpu_reclaim_populated()
2153 freed_page_start = chunk->nr_pages; in pcpu_reclaim_populated()
2156 for (i = chunk->nr_pages - 1, end = -1; i >= 0; i--) { in pcpu_reclaim_populated()
2158 if (chunk->nr_empty_pop_pages == 0) in pcpu_reclaim_populated()
2161 /* reintegrate chunk to prevent atomic alloc failures */ in pcpu_reclaim_populated()
2171 * (first) page in the chunk. in pcpu_reclaim_populated()
2173 block = chunk->md_blocks + i; in pcpu_reclaim_populated()
2175 test_bit(i, chunk->populated)) { in pcpu_reclaim_populated()
2188 pcpu_depopulate_chunk(chunk, i + 1, end + 1); in pcpu_reclaim_populated()
2192 pcpu_chunk_depopulated(chunk, i + 1, end + 1); in pcpu_reclaim_populated()
2200 /* batch tlb flush per chunk to amortize cost */ in pcpu_reclaim_populated()
2203 pcpu_post_unmap_tlb_flush(chunk, in pcpu_reclaim_populated()
2210 if (reintegrate || chunk->free_bytes == pcpu_unit_size) in pcpu_reclaim_populated()
2211 pcpu_reintegrate_chunk(chunk); in pcpu_reclaim_populated()
2213 list_move_tail(&chunk->list, in pcpu_reclaim_populated()
2222 * For each chunk type, manage the number of fully free chunks and the number of
2259 struct pcpu_chunk *chunk; in free_percpu() local
2273 chunk = pcpu_chunk_addr_search(addr); in free_percpu()
2274 off = addr - chunk->base_addr; in free_percpu()
2276 size = pcpu_free_area(chunk, off); in free_percpu()
2278 pcpu_memcg_free_hook(chunk, off, size); in free_percpu()
2282 * If the chunk is isolated, it may be in the process of being in free_percpu()
2283 * reclaimed. Let reclaim manage cleaning up of that chunk. in free_percpu()
2285 if (!chunk->isolated && chunk->free_bytes == pcpu_unit_size) { in free_percpu()
2289 if (pos != chunk) { in free_percpu()
2293 } else if (pcpu_should_reclaim_chunk(chunk)) { in free_percpu()
2294 pcpu_isolate_chunk(chunk); in free_percpu()
2298 trace_percpu_free_percpu(chunk->base_addr, off, ptr); in free_percpu()
2357 * percpu allocator has special setup for the first chunk, which currently
2363 * first chunk. But the current code reflects better how percpu allocator
2381 * aren't in the first chunk. in per_cpu_ptr_to_phys()
2383 * The address check is against full chunk sizes. pcpu_base_addr in per_cpu_ptr_to_phys()
2384 * points to the beginning of the first chunk including the in per_cpu_ptr_to_phys()
2385 * static region. Assumes good intent as the first chunk may in per_cpu_ptr_to_phys()
2528 * pcpu_setup_first_chunk - initialize the first percpu chunk
2532 * Initialize the first percpu chunk which contains the kernel static
2537 * chunk and prime the dynamic percpu allocator.
2542 * reserve after the static area in the first chunk. This reserves
2543 * the first chunk such that it's available only through reserved
2550 * allocation in the first chunk. The area between @ai->static_size +
2570 * The caller should have mapped the first chunk at @base_addr and
2573 * The first chunk will always contain a static and a dynamic region.
2574 * However, the static region is not managed by any chunk. If the first
2575 * chunk also contains a reserved region, it is served by two chunks -
2578 * The chunk serving the dynamic region is circulated in the chunk slots
2579 * and available for dynamic allocation like any other chunk.
2706 * Allocate chunk slots. The slots after the active slots are: in pcpu_setup_first_chunk()
2728 * dynamic region. The first chunk ends page aligned by in pcpu_setup_first_chunk()
2737 * Initialize first chunk: in pcpu_setup_first_chunk()
2738 * This chunk is broken up into 3 parts: in pcpu_setup_first_chunk()
2740 * - static - there is no backing chunk because these allocations can in pcpu_setup_first_chunk()
2745 * chunk. in pcpu_setup_first_chunk()
2757 /* include all regions of the first chunk */ in pcpu_setup_first_chunk()
2814 /* pcpu_build_alloc_info() is used by both embed and page first chunk */
3018 * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
3025 * This is a helper to ease setting up embedded first percpu chunk and
3028 * If this function is used to setup the first chunk, it is allocated
3033 * This enables the first chunk to piggy back on the linear physical
3224 * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
3229 * chunk and can be called where pcpu_setup_first_chunk() is expected.