/openbmc/linux/net/xdp/ |
H A D | xsk_buff_pool.c | 11 void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs) in xp_add_xsk() argument 18 spin_lock_irqsave(&pool->xsk_tx_list_lock, flags); in xp_add_xsk() 19 list_add_rcu(&xs->tx_list, &pool->xsk_tx_list); in xp_add_xsk() 20 spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags); in xp_add_xsk() 23 void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs) in xp_del_xsk() argument 30 spin_lock_irqsave(&pool->xsk_tx_list_lock, flags); in xp_del_xsk() 32 spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags); in xp_del_xsk() 35 void xp_destroy(struct xsk_buff_pool *pool) in xp_destroy() argument 37 if (!pool) in xp_destroy() 40 kvfree(pool->tx_descs); in xp_destroy() [all …]
|
/openbmc/linux/net/core/ |
H A D | page_pool.c | 33 #define alloc_stat_inc(pool, __stat) (pool->alloc_stats.__stat++) argument 35 #define recycle_stat_inc(pool, __stat) \ argument 37 struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \ 41 #define recycle_stat_add(pool, __stat, val) \ argument 43 struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \ 62 * page_pool_get_stats() - fetch page pool stats 63 * @pool: pool from which page was allocated 72 bool page_pool_get_stats(struct page_pool *pool, in page_pool_get_stats() argument 81 stats->alloc_stats.fast += pool->alloc_stats.fast; in page_pool_get_stats() 82 stats->alloc_stats.slow += pool->alloc_stats.slow; in page_pool_get_stats() [all …]
|
/openbmc/linux/drivers/net/ethernet/ti/ |
H A D | k3-cppi-desc-pool.c | 2 /* TI K3 CPPI5 descriptors pool API 15 #include "k3-cppi-desc-pool.h" 27 void k3_cppi_desc_pool_destroy(struct k3_cppi_desc_pool *pool) in k3_cppi_desc_pool_destroy() argument 29 if (!pool) in k3_cppi_desc_pool_destroy() 32 WARN(gen_pool_size(pool->gen_pool) != gen_pool_avail(pool->gen_pool), in k3_cppi_desc_pool_destroy() 34 gen_pool_size(pool->gen_pool), in k3_cppi_desc_pool_destroy() 35 gen_pool_avail(pool->gen_pool)); in k3_cppi_desc_pool_destroy() 36 if (pool->cpumem) in k3_cppi_desc_pool_destroy() 37 dma_free_coherent(pool->dev, pool->mem_size, pool->cpumem, in k3_cppi_desc_pool_destroy() 38 pool->dma_addr); in k3_cppi_desc_pool_destroy() [all …]
|
/openbmc/linux/mm/ |
H A D | mempool.c | 5 * memory buffer pool support. Such pools are mostly used 24 static void poison_error(mempool_t *pool, void *element, size_t size, in poison_error() argument 27 const int nr = pool->curr_nr; in poison_error() 33 pr_err("Mempool %p size %zu\n", pool, size); in poison_error() 41 static void __check_element(mempool_t *pool, void *element, size_t size) in __check_element() argument 50 poison_error(pool, element, size, i); in __check_element() 57 static void check_element(mempool_t *pool, void *element) in check_element() argument 60 if (pool->free == mempool_kfree) { in check_element() 61 __check_element(pool, element, (size_t)pool->pool_data); in check_element() 62 } else if (pool->free == mempool_free_slab) { in check_element() [all …]
|
H A D | dmapool.c | 3 * DMA Pool allocator 14 * The current design of this allocator is fairly simple. The pool is 48 struct dma_pool { /* the pool */ 74 struct dma_pool *pool; in pools_show() local 80 list_for_each_entry(pool, &dev->dma_pools, pools) { in pools_show() 81 /* per-pool info, no real statistics yet */ in pools_show() 83 pool->name, pool->nr_active, in pools_show() 84 pool->nr_blocks, pool->size, in pools_show() 85 pool->nr_pages); in pools_show() 95 static void pool_check_block(struct dma_pool *pool, struct dma_block *block, in pool_check_block() argument [all …]
|
H A D | zbud.c | 62 * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the 66 * 63 freelists per pool. 78 * struct zbud_pool - stores metadata for each zbud pool 79 * @lock: protects all pool fields and first|last_chunk fields of any 80 * zbud page in the pool 86 * @pages_nr: number of zbud pages in the pool. 88 * This structure is allocated at pool creation time and maintains metadata 89 * pertaining to a particular zbud pool. 107 * @buddy: links the zbud page into the unbuddied/buddied lists in the pool 153 * Pool lock should be held as this function accesses first|last_chunks [all …]
|
/openbmc/qemu/util/ |
H A D | thread-pool.c | 2 * QEMU block layer thread pool 23 #include "block/thread-pool.h" 26 static void do_spawn_thread(ThreadPool *pool); 38 ThreadPool *pool; member 52 /* This list is only written by the thread pool's mother thread. */ 79 ThreadPool *pool = opaque; in worker_thread() local 81 qemu_mutex_lock(&pool->lock); in worker_thread() 82 pool->pending_threads--; in worker_thread() 83 do_spawn_thread(pool); in worker_thread() 85 while (pool->cur_threads <= pool->max_threads) { in worker_thread() [all …]
|
/openbmc/linux/drivers/md/ |
H A D | dm-thin.c | 42 * The block size of the device holding pool data must be 194 * A pool device ties together a metadata device and a data device. It 201 * The pool runs in various modes. Ordered in degraded order for comparisons. 232 struct pool { struct 234 struct dm_target *ti; /* Only set if a pool target is bound */ argument 290 static void metadata_operation_failed(struct pool *pool, const char *op, int r); argument 292 static enum pool_mode get_pool_mode(struct pool *pool) in get_pool_mode() argument 294 return pool->pf.mode; in get_pool_mode() 297 static void notify_of_pool_mode_change(struct pool *pool) in notify_of_pool_mode_change() argument 307 enum pool_mode mode = get_pool_mode(pool); in notify_of_pool_mode_change() [all …]
|
/openbmc/linux/net/ceph/ |
H A D | msgpool.c | 14 struct ceph_msgpool *pool = arg; in msgpool_alloc() local 17 msg = ceph_msg_new2(pool->type, pool->front_len, pool->max_data_items, in msgpool_alloc() 20 dout("msgpool_alloc %s failed\n", pool->name); in msgpool_alloc() 22 dout("msgpool_alloc %s %p\n", pool->name, msg); in msgpool_alloc() 23 msg->pool = pool; in msgpool_alloc() 30 struct ceph_msgpool *pool = arg; in msgpool_free() local 33 dout("msgpool_release %s %p\n", pool->name, msg); in msgpool_free() 34 msg->pool = NULL; in msgpool_free() 38 int ceph_msgpool_init(struct ceph_msgpool *pool, int type, in ceph_msgpool_init() argument 43 pool->type = type; in ceph_msgpool_init() [all …]
|
/openbmc/linux/sound/core/seq/ |
H A D | seq_memory.c | 22 static inline int snd_seq_pool_available(struct snd_seq_pool *pool) in snd_seq_pool_available() argument 24 return pool->total_elements - atomic_read(&pool->counter); in snd_seq_pool_available() 27 static inline int snd_seq_output_ok(struct snd_seq_pool *pool) in snd_seq_output_ok() argument 29 return snd_seq_pool_available(pool) >= pool->room; in snd_seq_output_ok() 225 static inline void free_cell(struct snd_seq_pool *pool, in free_cell() argument 228 cell->next = pool->free; in free_cell() 229 pool->free = cell; in free_cell() 230 atomic_dec(&pool->counter); in free_cell() 236 struct snd_seq_pool *pool; in snd_seq_cell_free() local 240 pool = cell->pool; in snd_seq_cell_free() [all …]
|
/openbmc/linux/include/net/ |
H A D | xdp_sock_drv.h | 17 void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries); 18 bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc); 19 u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max); 20 void xsk_tx_release(struct xsk_buff_pool *pool); 23 void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool); 24 void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool); 25 void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool); 26 void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool); 27 bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool); 29 static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool) in xsk_pool_get_headroom() argument [all …]
|
/openbmc/linux/lib/ |
H A D | genalloc.c | 16 * available. If new memory is added to the pool a lock has to be 146 * gen_pool_create - create a new special memory pool 148 * @nid: node id of the node the pool structure should be allocated on, or -1 150 * Create a new special memory pool that can be used to manage special purpose 155 struct gen_pool *pool; in gen_pool_create() local 157 pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid); in gen_pool_create() 158 if (pool != NULL) { in gen_pool_create() 159 spin_lock_init(&pool->lock); in gen_pool_create() 160 INIT_LIST_HEAD(&pool->chunks); in gen_pool_create() 161 pool->min_alloc_order = min_alloc_order; in gen_pool_create() [all …]
|
/openbmc/qemu/block/ |
H A D | aio_task.c | 40 AioTaskPool *pool = task->pool; in aio_task_co() local 42 assert(pool->busy_tasks < pool->max_busy_tasks); in aio_task_co() 43 pool->busy_tasks++; in aio_task_co() 47 pool->busy_tasks--; in aio_task_co() 49 if (task->ret < 0 && pool->status == 0) { in aio_task_co() 50 pool->status = task->ret; in aio_task_co() 55 if (pool->waiting) { in aio_task_co() 56 pool->waiting = false; in aio_task_co() 57 aio_co_wake(pool->main_co); in aio_task_co() 61 void coroutine_fn aio_task_pool_wait_one(AioTaskPool *pool) in aio_task_pool_wait_one() argument [all …]
|
/openbmc/linux/drivers/net/ethernet/mellanox/mlx5/core/steering/ |
H A D | dr_icm_pool.c | 24 struct mutex mutex; /* protect the ICM pool and ICM buddy */ 82 u32 offset = mlx5dr_icm_pool_dm_type_to_entry_size(chunk->buddy_mem->pool->icm_type); in mlx5dr_icm_pool_get_chunk_mr_addr() 94 u32 size = mlx5dr_icm_pool_dm_type_to_entry_size(chunk->buddy_mem->pool->icm_type); in mlx5dr_icm_pool_get_chunk_icm_addr() 102 chunk->buddy_mem->pool->icm_type); in mlx5dr_icm_pool_get_chunk_byte_size() 111 dr_icm_pool_mr_create(struct mlx5dr_icm_pool *pool) in dr_icm_pool_mr_create() argument 113 struct mlx5_core_dev *mdev = pool->dmn->mdev; in dr_icm_pool_mr_create() 123 icm_mr->dmn = pool->dmn; in dr_icm_pool_mr_create() 125 icm_mr->dm.length = mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz, in dr_icm_pool_mr_create() 126 pool->icm_type); in dr_icm_pool_mr_create() 128 switch (pool->icm_type) { in dr_icm_pool_mr_create() [all …]
|
H A D | dr_arg.c | 8 /* modify-header arg pool */ 18 /* argument pool area */ 23 struct mutex mutex; /* protect arg pool */ 31 static int dr_arg_pool_alloc_objs(struct dr_arg_pool *pool) in dr_arg_pool_alloc_objs() argument 43 pool->dmn->info.caps.log_header_modify_argument_granularity; in dr_arg_pool_alloc_objs() 46 max_t(u32, pool->dmn->info.caps.log_header_modify_argument_granularity, in dr_arg_pool_alloc_objs() 49 min_t(u32, pool->dmn->info.caps.log_header_modify_argument_max_alloc, in dr_arg_pool_alloc_objs() 52 if (pool->log_chunk_size > object_range) { in dr_arg_pool_alloc_objs() 53 mlx5dr_err(pool->dmn, "Required chunk size (%d) is not supported\n", in dr_arg_pool_alloc_objs() 54 pool->log_chunk_size); in dr_arg_pool_alloc_objs() [all …]
|
/openbmc/linux/drivers/staging/media/atomisp/pci/runtime/rmgr/src/ |
H A D | rmgr_vbuf.c | 31 * @brief VBUF resource pool - refpool 36 * @brief VBUF resource pool - writepool 43 * @brief VBUF resource pool - hmmbufferpool 124 * @brief Initialize the resource pool (host, vbuf) 126 * @param pool The pointer to the pool 128 int ia_css_rmgr_init_vbuf(struct ia_css_rmgr_vbuf_pool *pool) in ia_css_rmgr_init_vbuf() argument 134 assert(pool); in ia_css_rmgr_init_vbuf() 135 if (!pool) in ia_css_rmgr_init_vbuf() 137 /* initialize the recycle pool if used */ in ia_css_rmgr_init_vbuf() 138 if (pool->recycle && pool->size) { in ia_css_rmgr_init_vbuf() [all …]
|
/openbmc/linux/drivers/staging/octeon/ |
H A D | ethernet-mem.c | 17 * cvm_oct_fill_hw_skbuff - fill the supplied hardware pool with skbuffs 18 * @pool: Pool to allocate an skbuff for 19 * @size: Size of the buffer needed for the pool 24 static int cvm_oct_fill_hw_skbuff(int pool, int size, int elements) in cvm_oct_fill_hw_skbuff() argument 35 cvmx_fpa_free(skb->data, pool, size / 128); in cvm_oct_fill_hw_skbuff() 42 * cvm_oct_free_hw_skbuff- free hardware pool skbuffs 43 * @pool: Pool to allocate an skbuff for 44 * @size: Size of the buffer needed for the pool 47 static void cvm_oct_free_hw_skbuff(int pool, int size, int elements) in cvm_oct_free_hw_skbuff() argument 52 memory = cvmx_fpa_alloc(pool); in cvm_oct_free_hw_skbuff() [all …]
|
/openbmc/linux/drivers/net/ethernet/mellanox/mlx5/core/lib/ |
H A D | crypto.c | 13 * (for example, TLS) after last revalidation in a pool or a bulk. 19 #define MLX5_CRYPTO_DEK_POOL_CALC_FREED(pool) MLX5_CRYPTO_DEK_CALC_FREED(pool) argument 33 int num_deks; /* the total number of keys in this pool */ 34 int avail_deks; /* the number of available keys in this pool */ 35 int in_use_deks; /* the number of being used keys in this pool */ 288 mlx5_crypto_dek_bulk_create(struct mlx5_crypto_dek_pool *pool) in mlx5_crypto_dek_bulk_create() argument 290 struct mlx5_crypto_dek_priv *dek_priv = pool->mdev->mlx5e_res.dek_priv; in mlx5_crypto_dek_bulk_create() 291 struct mlx5_core_dev *mdev = pool->mdev; in mlx5_crypto_dek_bulk_create() 313 err = mlx5_crypto_create_dek_bulk(mdev, pool->key_purpose, in mlx5_crypto_dek_bulk_create() 334 mlx5_crypto_dek_pool_add_bulk(struct mlx5_crypto_dek_pool *pool) in mlx5_crypto_dek_pool_add_bulk() argument [all …]
|
/openbmc/linux/tools/testing/selftests/drivers/net/mlxsw/ |
H A D | sharedbuffer_configuration.py | 16 objects, pool, tcbind and portpool. Provide an interface to get random 18 1. Pool: 22 - random pool number 30 for pool in pools: 31 self._pools.append(pool) 47 def _get_th(self, pool): argument 50 if pool["thtype"] == "dynamic": 58 for pool in self._pools: 59 if pool["type"] == "ingress": 60 ing_pools.append(pool) [all …]
|
/openbmc/linux/drivers/gpu/drm/ttm/tests/ |
H A D | ttm_pool_test.c | 79 struct ttm_pool *pool; in ttm_pool_pre_populated() local 87 pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL); in ttm_pool_pre_populated() 88 KUNIT_ASSERT_NOT_NULL(test, pool); in ttm_pool_pre_populated() 90 ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, true, false); in ttm_pool_pre_populated() 92 err = ttm_pool_alloc(pool, tt, &simple_ctx); in ttm_pool_pre_populated() 95 ttm_pool_free(pool, tt); in ttm_pool_pre_populated() 98 return pool; in ttm_pool_pre_populated() 141 struct ttm_pool *pool; in ttm_pool_alloc_basic() local 151 pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL); in ttm_pool_alloc_basic() 152 KUNIT_ASSERT_NOT_NULL(test, pool); in ttm_pool_alloc_basic() [all …]
|
/openbmc/linux/drivers/net/ethernet/mellanox/mlx5/core/ |
H A D | irq_affinity.c | 8 static void cpu_put(struct mlx5_irq_pool *pool, int cpu) in cpu_put() argument 10 pool->irqs_per_cpu[cpu]--; in cpu_put() 13 static void cpu_get(struct mlx5_irq_pool *pool, int cpu) in cpu_get() argument 15 pool->irqs_per_cpu[cpu]++; in cpu_get() 19 static int cpu_get_least_loaded(struct mlx5_irq_pool *pool, in cpu_get_least_loaded() argument 27 if (!pool->irqs_per_cpu[cpu]) { in cpu_get_least_loaded() 33 if (pool->irqs_per_cpu[cpu] < pool->irqs_per_cpu[best_cpu]) in cpu_get_least_loaded() 38 mlx5_core_err(pool->dev, "NO online CPUs in req_mask (%*pbl)\n", in cpu_get_least_loaded() 42 pool->irqs_per_cpu[best_cpu]++; in cpu_get_least_loaded() 48 irq_pool_request_irq(struct mlx5_irq_pool *pool, struct irq_affinity_desc *af_desc) in irq_pool_request_irq() argument [all …]
|
/openbmc/linux/include/linux/ |
H A D | genalloc.h | 16 * available. If new memory is added to the pool a lock has to be 46 * @pool: the pool being allocated from 52 void *data, struct gen_pool *pool, 56 * General purpose special memory pool descriptor. 60 struct list_head chunks; /* list of chunks in this pool */ 70 * General purpose special memory pool chunk descriptor. 73 struct list_head next_chunk; /* next chunk in pool */ 97 extern phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long); 101 static inline int gen_pool_add_virt(struct gen_pool *pool, unsigned long addr, in gen_pool_add_virt() argument 104 return gen_pool_add_owner(pool, addr, phys, size, nid, NULL); in gen_pool_add_virt() [all …]
|
/openbmc/linux/drivers/net/ethernet/mellanox/mlxsw/ |
H A D | spectrum_cnt.c | 24 spinlock_t counter_pool_lock; /* Protects counter pool allocations */ 54 struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool; in mlxsw_sp_counter_sub_pools_init() local 62 for (i = 0; i < pool->sub_pools_count; i++) { in mlxsw_sp_counter_sub_pools_init() 63 sub_pool = &pool->sub_pools[i]; in mlxsw_sp_counter_sub_pools_init() 89 sub_pool = &pool->sub_pools[i]; in mlxsw_sp_counter_sub_pools_init() 99 struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool; in mlxsw_sp_counter_sub_pools_fini() local 104 for (i = 0; i < pool->sub_pools_count; i++) { in mlxsw_sp_counter_sub_pools_fini() 105 sub_pool = &pool->sub_pools[i]; in mlxsw_sp_counter_sub_pools_fini() 115 const struct mlxsw_sp_counter_pool *pool = priv; in mlxsw_sp_counter_pool_occ_get() local 117 return atomic_read(&pool->active_entries_count); in mlxsw_sp_counter_pool_occ_get() [all …]
|
/openbmc/linux/drivers/gpu/drm/amd/display/dc/dce80/ |
H A D | dce80_resource.c | 804 static void dce80_resource_destruct(struct dce110_resource_pool *pool) in dce80_resource_destruct() argument 808 for (i = 0; i < pool->base.pipe_count; i++) { in dce80_resource_destruct() 809 if (pool->base.opps[i] != NULL) in dce80_resource_destruct() 810 dce110_opp_destroy(&pool->base.opps[i]); in dce80_resource_destruct() 812 if (pool->base.transforms[i] != NULL) in dce80_resource_destruct() 813 dce80_transform_destroy(&pool->base.transforms[i]); in dce80_resource_destruct() 815 if (pool->base.ipps[i] != NULL) in dce80_resource_destruct() 816 dce_ipp_destroy(&pool->base.ipps[i]); in dce80_resource_destruct() 818 if (pool->base.mis[i] != NULL) { in dce80_resource_destruct() 819 kfree(TO_DCE_MEM_INPUT(pool->base.mis[i])); in dce80_resource_destruct() [all …]
|
/openbmc/linux/drivers/gpu/drm/amd/display/dc/dce60/ |
H A D | dce60_resource.c | 797 static void dce60_resource_destruct(struct dce110_resource_pool *pool) in dce60_resource_destruct() argument 801 for (i = 0; i < pool->base.pipe_count; i++) { in dce60_resource_destruct() 802 if (pool->base.opps[i] != NULL) in dce60_resource_destruct() 803 dce110_opp_destroy(&pool->base.opps[i]); in dce60_resource_destruct() 805 if (pool->base.transforms[i] != NULL) in dce60_resource_destruct() 806 dce60_transform_destroy(&pool->base.transforms[i]); in dce60_resource_destruct() 808 if (pool->base.ipps[i] != NULL) in dce60_resource_destruct() 809 dce_ipp_destroy(&pool->base.ipps[i]); in dce60_resource_destruct() 811 if (pool->base.mis[i] != NULL) { in dce60_resource_destruct() 812 kfree(TO_DCE_MEM_INPUT(pool->base.mis[i])); in dce60_resource_destruct() [all …]
|