/openbmc/linux/net/core/ |
H A D | page_pool.c | 176 memcpy(&pool->p, params, sizeof(pool->p)); in page_pool_init() 298 pool->alloc.cache[pool->alloc.count++] = page; in page_pool_refill_alloc_cache() 314 page = pool->alloc.cache[--pool->alloc.count]; in page_pool_refill_alloc_cache() 329 page = pool->alloc.cache[--pool->alloc.count]; in __page_pool_get_cached() 380 pool->p.init_callback(page, pool->p.init_arg); in page_pool_set_pp_info() 431 return pool->alloc.cache[--pool->alloc.count]; in __page_pool_alloc_pages_slow() 453 pool->alloc.cache[pool->alloc.count++] = page; in __page_pool_alloc_pages_slow() 462 page = pool->alloc.cache[--pool->alloc.count]; in __page_pool_alloc_pages_slow() 579 pool->alloc.cache[pool->alloc.count++] = page; in page_pool_recycle_in_cache() 808 pool->disconnect(pool); in page_pool_free() [all …]
|
/openbmc/linux/net/xdp/ |
H A D | xsk_buff_pool.c | 37 if (!pool) in xp_destroy() 42 kvfree(pool); in xp_destroy() 65 if (!pool) in xp_create_and_assign_umem() 100 xskb->pool = pool; in xp_create_and_assign_umem() 107 xp_init_xskb_addr(xskb, pool, i * pool->chunk_size); in xp_create_and_assign_umem() 201 bpf.xsk.pool = pool; in xp_assign_dev() 235 if (!pool->fq || !pool->cq) in xp_assign_dev_shared() 251 xsk_clear_pool_at_qid(pool->netdev, pool->queue_id); in xp_clear_dev() 286 if (!pool) in xp_put_pool() 477 *addr + pool->chunk_size > pool->addrs_cnt || in xp_check_unaligned() [all …]
|
/openbmc/qemu/util/ |
H A D | thread-pool.c | 38 ThreadPool *pool; member 85 while (pool->cur_threads <= pool->max_threads) { in worker_thread() 95 pool->cur_threads > pool->min_threads) { in worker_thread() 221 ThreadPool *pool = elem->pool; in thread_pool_cancel() local 255 req->pool = pool; in thread_pool_submit_aio() 262 if (pool->idle_threads == 0 && pool->cur_threads < pool->max_threads) { in thread_pool_submit_aio() 331 memset(pool, 0, sizeof(*pool)); in thread_pool_init_one() 349 return pool; in thread_pool_new() 354 if (!pool) { in thread_pool_free() 364 pool->cur_threads -= pool->new_threads; in thread_pool_free() [all …]
|
/openbmc/linux/mm/ |
H A D | mempool.c | 129 BUG_ON(pool->curr_nr >= pool->min_nr); in add_element() 132 pool->elements[pool->curr_nr++] = element; in add_element() 137 void *element = pool->elements[--pool->curr_nr]; in remove_element() 160 pool->free(element, pool->pool_data); in mempool_exit() 204 while (pool->curr_nr < pool->min_nr) { in mempool_init_node() 207 element = pool->alloc(gfp_mask, pool->pool_data); in mempool_init_node() 318 pool->free(element, pool->pool_data); in mempool_resize() 345 while (pool->curr_nr < pool->min_nr) { in mempool_resize() 351 if (pool->curr_nr < pool->min_nr) { in mempool_resize() 499 if (likely(pool->curr_nr < pool->min_nr)) { in mempool_free() [all …]
|
H A D | dmapool.c | 83 pool->name, pool->nr_active, in pools_show() 84 pool->nr_blocks, pool->size, in pools_show() 187 pool->nr_active++; in pool_block_pop() 306 while (offset + pool->size <= pool->allocation) { in pool_initialise_page() 331 pool->nr_pages++; in pool_initialise_page() 342 page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation, in pool_alloc_page() 378 dev_err(pool->dev, "%s %s busy\n", __func__, pool->name); in dma_pool_destroy() 384 dma_free_coherent(pool->dev, pool->allocation, in dma_pool_destroy() 390 kfree(pool); in dma_pool_destroy() 503 if (pool) in dmam_pool_create() [all …]
|
H A D | zbud.c | 202 struct zbud_pool *pool; in zbud_create_pool() local 206 if (!pool) in zbud_create_pool() 212 pool->pages_nr = 0; in zbud_create_pool() 213 return pool; in zbud_create_pool() 224 kfree(pool); in zbud_destroy_pool() 259 spin_lock(&pool->lock); in zbud_alloc() 280 spin_lock(&pool->lock); in zbud_alloc() 281 pool->pages_nr++; in zbud_alloc() 316 spin_lock(&pool->lock); in zbud_free() 331 pool->pages_nr--; in zbud_free() [all …]
|
/openbmc/linux/drivers/net/ethernet/ti/ |
H A D | k3-cppi-desc-pool.c | 29 if (!pool) in k3_cppi_desc_pool_destroy() 37 dma_free_coherent(pool->dev, pool->mem_size, pool->cpumem, in k3_cppi_desc_pool_destroy() 53 pool = devm_kzalloc(dev, sizeof(*pool), GFP_KERNEL); in k3_cppi_desc_pool_create_name() 54 if (!pool) in k3_cppi_desc_pool_create_name() 60 pool->mem_size = pool->num_desc * pool->desc_size; in k3_cppi_desc_pool_create_name() 77 pool->cpumem = dma_alloc_coherent(pool->dev, pool->mem_size, in k3_cppi_desc_pool_create_name() 84 (phys_addr_t)pool->dma_addr, pool->mem_size, in k3_cppi_desc_pool_create_name() 91 return pool; in k3_cppi_desc_pool_create_name() 94 dma_free_coherent(pool->dev, pool->mem_size, pool->cpumem, in k3_cppi_desc_pool_create_name() 99 devm_kfree(pool->dev, pool); in k3_cppi_desc_pool_create_name() [all …]
|
/openbmc/linux/drivers/md/ |
H A D | dm-thin.c | 626 struct pool *pool = tc->pool; in requeue_deferred_cells() local 679 struct pool *pool = tc->pool; in get_bio_block() local 696 struct pool *pool = tc->pool; in get_bio_block_range() local 720 struct pool *pool = tc->pool; in remap() local 757 struct pool *pool = tc->pool; in issue() local 885 struct pool *pool = tc->pool; in cell_defer_no_holder() local 965 struct pool *pool = tc->pool; in complete_overwrite_bio() local 998 struct pool *pool = tc->pool; in process_prepared_mapping() local 1090 struct pool *pool = tc->pool; in passdown_double_checking_shared_status() local 1154 struct pool *pool = tc->pool; in process_prepared_discard_passdown_pt1() local [all …]
|
/openbmc/qemu/block/ |
H A D | aio_task.c | 40 AioTaskPool *pool = task->pool; in aio_task_co() local 42 assert(pool->busy_tasks < pool->max_busy_tasks); in aio_task_co() 43 pool->busy_tasks++; in aio_task_co() 47 pool->busy_tasks--; in aio_task_co() 55 if (pool->waiting) { in aio_task_co() 70 assert(pool->busy_tasks < pool->max_busy_tasks); in aio_task_pool_wait_one() 75 if (pool->busy_tasks < pool->max_busy_tasks) { in aio_task_pool_wait_slot() 93 task->pool = pool; in aio_task_pool_start_task() 106 return pool; in aio_task_pool_new() 111 g_free(pool); in aio_task_pool_free() [all …]
|
/openbmc/linux/net/ceph/ |
H A D | msgpool.c | 17 msg = ceph_msg_new2(pool->type, pool->front_len, pool->max_data_items, in msgpool_alloc() 23 msg->pool = pool; in msgpool_alloc() 34 msg->pool = NULL; in msgpool_free() 43 pool->type = type; in ceph_msgpool_init() 46 pool->pool = mempool_create(size, msgpool_alloc, msgpool_free, pool); in ceph_msgpool_init() 47 if (!pool->pool) in ceph_msgpool_init() 49 pool->name = name; in ceph_msgpool_init() 56 mempool_destroy(pool->pool); in ceph_msgpool_destroy() 68 pool->front_len, pool->max_data_items); in ceph_msgpool_get() 76 msg = mempool_alloc(pool->pool, GFP_NOFS); in ceph_msgpool_get() [all …]
|
/openbmc/linux/sound/core/seq/ |
H A D | seq_memory.c | 24 return pool->total_elements - atomic_read(&pool->counter); in snd_seq_pool_available() 29 return snd_seq_pool_available(pool) >= pool->room; in snd_seq_output_ok() 240 pool = cell->pool; in snd_seq_cell_free() 291 while (pool->free == NULL && ! nonblock && ! pool->closing) { in snd_seq_cell_alloc() 463 cellptr->pool = pool; in snd_seq_pool_init() 467 pool->room = (pool->size + 1) / 2; in snd_seq_pool_init() 471 pool->total_elements = pool->size; in snd_seq_pool_init() 527 pool = kzalloc(sizeof(*pool), GFP_KERNEL); in snd_seq_pool_new() 528 if (!pool) in snd_seq_pool_new() 542 return pool; in snd_seq_pool_new() [all …]
|
/openbmc/linux/include/net/ |
H A D | xdp_sock_drv.h | 36 return pool->chunk_size; in xsk_pool_get_chunk_size() 41 return xsk_pool_get_chunk_size(pool) - xsk_pool_get_headroom(pool); in xsk_pool_get_rx_frame_size() 47 xp_set_rxq_info(pool, rxq); in xsk_pool_set_rxq_info() 53 return pool->heads[0].xdp.rxq->napi_id; in xsk_pool_get_napi_id() 62 xp_dma_unmap(pool, attrs); in xsk_pool_dma_unmap() 68 struct xdp_umem *umem = pool->umem; in xsk_pool_dma_map() 89 return xp_alloc(pool); in xsk_buff_alloc() 105 return xp_can_alloc(pool, count); in xsk_buff_can_alloc() 178 return xp_raw_get_dma(pool, addr); in xsk_buff_raw_get_dma() 183 return xp_raw_get_data(pool, addr); in xsk_buff_raw_get_data() [all …]
|
/openbmc/linux/drivers/staging/media/atomisp/pci/runtime/rmgr/src/ |
H A D | rmgr_vbuf.c | 134 assert(pool); in ia_css_rmgr_init_vbuf() 135 if (!pool) in ia_css_rmgr_init_vbuf() 138 if (pool->recycle && pool->size) { in ia_css_rmgr_init_vbuf() 142 pool->size; in ia_css_rmgr_init_vbuf() 144 if (pool->handles) in ia_css_rmgr_init_vbuf() 150 pool->size = 0; in ia_css_rmgr_init_vbuf() 151 pool->handles = NULL; in ia_css_rmgr_init_vbuf() 166 if (!pool) { in ia_css_rmgr_uninit_vbuf() 170 if (pool->handles) { in ia_css_rmgr_uninit_vbuf() 203 assert(pool); in rmgr_push_handle() [all …]
|
/openbmc/linux/drivers/net/ethernet/mellanox/mlx5/core/ |
H A D | irq_affinity.c | 10 pool->irqs_per_cpu[cpu]--; in cpu_put() 15 pool->irqs_per_cpu[cpu]++; in cpu_get() 27 if (!pool->irqs_per_cpu[cpu]) { in cpu_get_least_loaded() 33 if (pool->irqs_per_cpu[cpu] < pool->irqs_per_cpu[best_cpu]) in cpu_get_least_loaded() 42 pool->irqs_per_cpu[best_cpu]++; in cpu_get_least_loaded() 55 err = xa_alloc(&pool->irqs, &irq_index, NULL, pool->xa_num_irqs, GFP_KERNEL); in irq_pool_request_irq() 58 if (pool->irqs_per_cpu) { in irq_pool_request_irq() 129 mutex_lock(&pool->lock); in mlx5_irq_affinity_request() 159 mutex_unlock(&pool->lock); in mlx5_irq_affinity_request() 172 if (pool->irqs_per_cpu) in mlx5_irq_affinity_irq_release() [all …]
|
/openbmc/linux/drivers/net/ethernet/mellanox/mlx5/core/steering/ |
H A D | dr_icm_pool.c | 280 buddy->pool = pool; in dr_icm_buddy_create() 291 pool->dmn->num_buddies[pool->icm_type]++; in dr_icm_buddy_create() 343 return pool->hot_memory_size > pool->th; in dr_icm_pool_is_sync_required() 470 struct mlx5dr_icm_pool *pool = buddy->pool; in mlx5dr_icm_free_chunk() local 481 hot_chunk = &pool->hot_chunks_arr[pool->hot_chunks_num++]; in mlx5dr_icm_free_chunk() 512 pool = kvzalloc(sizeof(*pool), GFP_KERNEL); in mlx5dr_icm_pool_create() 513 if (!pool) in mlx5dr_icm_pool_create() 516 pool->dmn = dmn; in mlx5dr_icm_pool_create() 557 return pool; in mlx5dr_icm_pool_create() 560 kvfree(pool); in mlx5dr_icm_pool_create() [all …]
|
H A D | dr_arg.c | 62 pool->dmn->pdn, in dr_arg_pool_alloc_objs() 102 mutex_lock(&pool->mutex); in dr_arg_pool_get_arg_obj() 125 mutex_lock(&pool->mutex); in dr_arg_pool_put_arg_obj() 133 struct dr_arg_pool *pool; in dr_arg_pool_create() local 135 pool = kzalloc(sizeof(*pool), GFP_KERNEL); in dr_arg_pool_create() 136 if (!pool) in dr_arg_pool_create() 139 pool->dmn = dmn; in dr_arg_pool_create() 142 mutex_init(&pool->mutex); in dr_arg_pool_create() 148 return pool; in dr_arg_pool_create() 151 kfree(pool); in dr_arg_pool_create() [all …]
|
/openbmc/linux/drivers/gpu/drm/amd/display/dc/dce80/ |
H A D | dce80_resource.c | 934 *pool = NULL; in dce80_destroy_resource_pool() 1045 if (!pool->base.irqs) in dce80_construct() 1139 if (!pool) in dce80_create_resource_pool() 1143 return &pool->base; in dce80_create_resource_pool() 1145 kfree(pool); in dce80_create_resource_pool() 1245 if (!pool->base.irqs) in dce81_construct() 1339 if (!pool) in dce81_create_resource_pool() 1343 return &pool->base; in dce81_create_resource_pool() 1345 kfree(pool); in dce81_create_resource_pool() 1536 if (!pool) in dce83_create_resource_pool() [all …]
|
/openbmc/linux/drivers/gpu/drm/amd/display/dc/dce60/ |
H A D | dce60_resource.c | 927 *pool = NULL; in dce60_destroy_resource_pool() 1031 if (!pool->base.irqs) in dce60_construct() 1125 if (!pool) in dce60_create_resource_pool() 1129 return &pool->base; in dce60_create_resource_pool() 1131 kfree(pool); in dce60_create_resource_pool() 1323 if (!pool) in dce61_create_resource_pool() 1327 return &pool->base; in dce61_create_resource_pool() 1329 kfree(pool); in dce61_create_resource_pool() 1517 if (!pool) in dce64_create_resource_pool() 1521 return &pool->base; in dce64_create_resource_pool() [all …]
|
/openbmc/linux/arch/arm64/kvm/hyp/nvhe/ |
H A D | page_alloc.c | 45 if (addr < pool->range_start || addr >= pool->range_end) in __find_buddy_nocheck() 103 if (phys < pool->range_start || phys >= pool->range_end) in __hyp_attach_page() 156 __hyp_attach_page(pool, p); in __hyp_put_page() 170 hyp_spin_lock(&pool->lock); in hyp_put_page() 171 __hyp_put_page(pool, p); in hyp_put_page() 172 hyp_spin_unlock(&pool->lock); in hyp_put_page() 179 hyp_spin_lock(&pool->lock); in hyp_get_page() 203 hyp_spin_lock(&pool->lock); in hyp_alloc_pages() 206 while (i <= pool->max_order && list_empty(&pool->free_area[i])) in hyp_alloc_pages() 208 if (i > pool->max_order) { in hyp_alloc_pages() [all …]
|
/openbmc/linux/drivers/net/ethernet/mellanox/mlx5/core/lib/ |
H A D | crypto.c | 19 #define MLX5_CRYPTO_DEK_POOL_CALC_FREED(pool) MLX5_CRYPTO_DEK_CALC_FREED(pool) argument 412 pool->avail_deks--; in mlx5_crypto_dek_pool_pop() 413 pool->in_use_deks++; in mlx5_crypto_dek_pool_pop() 460 if (pool->syncing) in mlx5_crypto_dek_pool_push() 595 err = mlx5_crypto_cmd_sync_crypto(pool->mdev, BIT(pool->key_purpose)); in mlx5_crypto_dek_sync_work_fn() 676 mlx5_crypto_dek_pool_splice_destroy_list(pool, &pool->destroy_list, in mlx5_crypto_dek_destroy_work_fn() 686 pool = kzalloc(sizeof(*pool), GFP_KERNEL); in mlx5_crypto_dek_pool_create() 687 if (!pool) in mlx5_crypto_dek_pool_create() 690 pool->mdev = mdev; in mlx5_crypto_dek_pool_create() 704 return pool; in mlx5_crypto_dek_pool_create() [all …]
|
/openbmc/linux/net/rds/ |
H A D | ib_rdma.c | 275 struct rds_ib_mr_pool *pool = ibmr->pool; in rds_ib_teardown_mr() local 423 &pool->clean_list); in rds_ib_flush_mr_pool() 450 if (atomic_inc_return(&pool->item_count) <= pool->max_items) in rds_ib_try_reuse_ibmr() 487 struct rds_ib_mr_pool *pool = ibmr->pool; in rds_ib_free_mr() local 509 if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned || in rds_ib_free_mr() 510 atomic_read(&pool->dirty_count) >= pool->max_items / 5) in rds_ib_free_mr() 640 kfree(pool); in rds_ib_destroy_mr_pool() 648 pool = kzalloc(sizeof(*pool), GFP_KERNEL); in rds_ib_create_mr_pool() 649 if (!pool) in rds_ib_create_mr_pool() 671 pool->max_free_pinned = pool->max_items * pool->max_pages / 4; in rds_ib_create_mr_pool() [all …]
|
/openbmc/linux/drivers/net/ethernet/mellanox/mlxsw/ |
H A D | spectrum_cnt.c | 127 pool = kzalloc(struct_size(pool, sub_pools, sub_pools_count), in mlxsw_sp_counter_pool_init() 129 if (!pool) in mlxsw_sp_counter_pool_init() 134 flex_array_size(pool, sub_pools, pool->sub_pools_count)); in mlxsw_sp_counter_pool_init() 139 &pool->pool_size); in mlxsw_sp_counter_pool_init() 145 pool->usage = bitmap_zalloc(pool->pool_size, GFP_KERNEL); in mlxsw_sp_counter_pool_init() 146 if (!pool->usage) { in mlxsw_sp_counter_pool_init() 158 bitmap_free(pool->usage); in mlxsw_sp_counter_pool_init() 163 kfree(pool); in mlxsw_sp_counter_pool_init() 173 WARN_ON(find_first_bit(pool->usage, pool->pool_size) != in mlxsw_sp_counter_pool_fini() 176 bitmap_free(pool->usage); in mlxsw_sp_counter_pool_fini() [all …]
|
/openbmc/linux/include/linux/ |
H A D | genalloc.h | 52 void *data, struct gen_pool *pool, 104 return gen_pool_add_owner(pool, addr, phys, size, nid, NULL); in gen_pool_add_virt() 122 return gen_pool_add_virt(pool, addr, -1, size, nid); in gen_pool_add() 131 return gen_pool_alloc_algo_owner(pool, size, pool->algo, pool->data, in gen_pool_alloc_owner() 153 return gen_pool_alloc_algo(pool, size, pool->algo, pool->data); in gen_pool_alloc() 172 gen_pool_free_owner(pool, addr, size, NULL); in gen_pool_free() 185 struct gen_pool *pool, unsigned long start_addr); 189 void *data, struct gen_pool *pool, unsigned long start_addr); 193 void *data, struct gen_pool *pool, unsigned long start_addr); 198 void *data, struct gen_pool *pool, unsigned long start_addr); [all …]
|
/openbmc/linux/include/trace/events/ |
H A D | page_pool.h | 16 TP_PROTO(const struct page_pool *pool, 19 TP_ARGS(pool, inflight, hold, release), 30 __entry->pool = pool; 34 __entry->cnt = pool->destroy_cnt; 47 TP_ARGS(pool, page, release), 57 __entry->pool = pool; 72 TP_ARGS(pool, page, hold), 82 __entry->pool = pool; 96 TP_ARGS(pool, new_nid), 105 __entry->pool = pool; [all …]
|
/openbmc/linux/drivers/gpu/drm/i915/gt/ |
H A D | intel_gt_buffer_pool.c | 24 if (n >= ARRAY_SIZE(pool->cache_list)) in bucket_for_size() 25 n = ARRAY_SIZE(pool->cache_list) - 1; in bucket_for_size() 27 return &pool->cache_list[n]; in bucket_for_size() 73 spin_unlock_irq(&pool->lock); in pool_free_older_than() 89 struct intel_gt_buffer_pool *pool = in pool_free_work() local 93 if (pool_free_older_than(pool, HZ)) in pool_free_work() 102 struct intel_gt_buffer_pool *pool = node->pool; in pool_retire() local 152 node->pool = pool; in node_create() 197 spin_lock_irq(&pool->lock); in intel_gt_get_buffer_pool() 199 spin_unlock_irq(&pool->lock); in intel_gt_get_buffer_pool() [all …]
|