/openbmc/linux/tools/testing/selftests/drivers/net/mlxsw/ |
H A D | sharedbuffer_configuration.py | 28 def __init__(self, pools): argument 30 for pool in pools: 187 pools = PoolList() 190 pools.append(Pool(pooldict)) 191 return pools 194 def do_check_pools(dlname, pools, vp): argument 195 for pool in pools: 219 def check_pools(dlname, pools): argument 221 record_vp = RecordValuePicker(pools) 224 do_check_pools(dlname, pools, RandomValuePicker(pools)) [all …]
|
/openbmc/u-boot/include/fsl-mc/ |
H A D | fsl_dpni.h | 90 MC_CMD_OP(cmd, 0, 8, 1, int, cfg->pools[0].backup_pool); \ 91 MC_CMD_OP(cmd, 0, 9, 1, int, cfg->pools[1].backup_pool); \ 92 MC_CMD_OP(cmd, 0, 10, 1, int, cfg->pools[2].backup_pool); \ 93 MC_CMD_OP(cmd, 0, 11, 1, int, cfg->pools[3].backup_pool); \ 94 MC_CMD_OP(cmd, 0, 12, 1, int, cfg->pools[4].backup_pool); \ 95 MC_CMD_OP(cmd, 0, 13, 1, int, cfg->pools[5].backup_pool); \ 96 MC_CMD_OP(cmd, 0, 14, 1, int, cfg->pools[6].backup_pool); \ 97 MC_CMD_OP(cmd, 0, 15, 1, int, cfg->pools[7].backup_pool); \ 98 MC_CMD_OP(cmd, 0, 32, 32, int, cfg->pools[0].dpbp_id); \ 99 MC_CMD_OP(cmd, 4, 32, 16, uint16_t, cfg->pools[0].buffer_size);\ [all …]
|
/openbmc/linux/drivers/net/ethernet/mellanox/mlx5/core/steering/ |
H A D | dr_arg.c | 28 struct dr_arg_pool *pools[DR_ARG_CHUNK_SIZE_MAX]; member 201 arg_obj = dr_arg_pool_get_arg_obj(mgr->pools[size]); in mlx5dr_arg_get_obj() 226 dr_arg_pool_put_arg_obj(mgr->pools[arg_obj->log_chunk_size], arg_obj); in mlx5dr_arg_put_obj() 245 pool_mgr->pools[i] = dr_arg_pool_create(dmn, i); in mlx5dr_arg_mgr_create() 246 if (!pool_mgr->pools[i]) in mlx5dr_arg_mgr_create() 254 dr_arg_pool_destroy(pool_mgr->pools[i]); in mlx5dr_arg_mgr_create() 262 struct dr_arg_pool **pools; in mlx5dr_arg_mgr_destroy() local 268 pools = mgr->pools; in mlx5dr_arg_mgr_destroy() 270 dr_arg_pool_destroy(pools[i]); in mlx5dr_arg_mgr_destroy()
|
/openbmc/linux/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/ |
H A D | pool.c | 25 if (!xsk->pools) { in mlx5e_xsk_get_pools() 26 xsk->pools = kcalloc(MLX5E_MAX_NUM_CHANNELS, in mlx5e_xsk_get_pools() 27 sizeof(*xsk->pools), GFP_KERNEL); in mlx5e_xsk_get_pools() 28 if (unlikely(!xsk->pools)) in mlx5e_xsk_get_pools() 41 kfree(xsk->pools); in mlx5e_xsk_put_pools() 42 xsk->pools = NULL; in mlx5e_xsk_put_pools() 54 xsk->pools[ix] = pool; in mlx5e_xsk_add_pool() 60 xsk->pools[ix] = NULL; in mlx5e_xsk_remove_pool()
|
H A D | pool.h | 12 if (!xsk || !xsk->pools) in mlx5e_xsk_get_pool() 18 return xsk->pools[ix]; in mlx5e_xsk_get_pool()
|
/openbmc/linux/arch/sparc/kernel/ |
H A D | iommu-common.c | 82 spin_lock_init(&(iommu->pools[i].lock)); in iommu_tbl_pool_init() 83 iommu->pools[i].start = start; in iommu_tbl_pool_init() 84 iommu->pools[i].hint = start; in iommu_tbl_pool_init() 86 iommu->pools[i].end = start - 1; in iommu_tbl_pool_init() 131 pool = &(iommu->pools[pool_nr]); in iommu_tbl_range_alloc() 161 pool = &(iommu->pools[0]); in iommu_tbl_range_alloc() 193 pool = &(iommu->pools[pool_nr]); in iommu_tbl_range_alloc() 237 p = &tbl->pools[pool_nr]; in get_pool()
|
/openbmc/linux/mm/ |
H A D | dmapool.c | 60 struct list_head pools; member 80 list_for_each_entry(pool, &dev->dma_pools, pools) { in pools_show() 92 static DEVICE_ATTR_RO(pools); 267 INIT_LIST_HEAD(&retval->pools); in dma_pool_create() 280 list_add(&retval->pools, &dev->dma_pools); in dma_pool_create() 288 list_del(&retval->pools); in dma_pool_create() 370 list_del(&pool->pools); in dma_pool_destroy()
|
/openbmc/linux/drivers/net/ethernet/chelsio/libcxgb/ |
H A D | libcxgb_ppm.c | 348 struct cxgbi_ppm_pool *pools; in ppm_alloc_cpu_pool() local 350 unsigned int max = (PCPU_MIN_UNIT_SIZE - sizeof(*pools)) << 3; in ppm_alloc_cpu_pool() 367 alloc_sz = sizeof(*pools) + sizeof(unsigned long) * bmap; in ppm_alloc_cpu_pool() 368 pools = __alloc_percpu(alloc_sz, __alignof__(struct cxgbi_ppm_pool)); in ppm_alloc_cpu_pool() 370 if (!pools) in ppm_alloc_cpu_pool() 374 struct cxgbi_ppm_pool *ppool = per_cpu_ptr(pools, cpu); in ppm_alloc_cpu_pool() 384 return pools; in ppm_alloc_cpu_pool()
|
/openbmc/linux/drivers/net/ethernet/freescale/dpaa2/ |
H A D | dpaa2-xsk.c | 162 pools_params->pools[curr_bp].priority_mask |= (1 << j); in dpaa2_xsk_set_bp_per_qdbin() 163 if (!pools_params->pools[curr_bp].priority_mask) in dpaa2_xsk_set_bp_per_qdbin() 166 pools_params->pools[curr_bp].dpbp_id = priv->bp[i]->bpid; in dpaa2_xsk_set_bp_per_qdbin() 167 pools_params->pools[curr_bp].buffer_size = priv->rx_buf_size; in dpaa2_xsk_set_bp_per_qdbin() 168 pools_params->pools[curr_bp++].backup_pool = 0; in dpaa2_xsk_set_bp_per_qdbin()
|
/openbmc/linux/drivers/soc/ti/ |
H A D | knav_qmss.h | 203 struct list_head pools; member 304 struct list_head pools; member 363 list_for_each_entry(pool, &kdev->pools, list)
|
H A D | knav_qmss_queue.c | 818 node = ®ion->pools; in knav_pool_create() 819 list_for_each_entry(iter, ®ion->pools, region_inst) { in knav_pool_create() 833 list_add_tail(&pool->list, &kdev->pools); in knav_pool_create() 1035 list_add(&pool->region_inst, ®ion->pools); in knav_queue_setup_region() 1119 INIT_LIST_HEAD(®ion->pools); in knav_queue_setup_regions() 1357 list_for_each_entry_safe(pool, tmp, ®ion->pools, region_inst) in knav_queue_free_regions() 1781 INIT_LIST_HEAD(&kdev->pools); in knav_queue_probe()
|
/openbmc/linux/drivers/net/ethernet/wangxun/libwx/ |
H A D | wx_hw.c | 510 static int wx_set_rar(struct wx *wx, u32 index, u8 *addr, u64 pools, in wx_set_rar() argument 526 wr32(wx, WX_PSR_MAC_SWC_VM_L, pools & 0xFFFFFFFF); in wx_set_rar() 528 wr32(wx, WX_PSR_MAC_SWC_VM_H, pools >> 32); in wx_set_rar() 703 wx->mac_table[i].pools, in wx_sync_mac_table() 717 wx->mac_table[0].pools = 1ULL; in wx_mac_set_default_filter() 720 wx->mac_table[0].pools, in wx_mac_set_default_filter() 736 wx->mac_table[i].pools = 0; in wx_flush_sw_mac_table() 752 if (wx->mac_table[i].pools != (1ULL << pool)) { in wx_add_mac_filter() 754 wx->mac_table[i].pools |= (1ULL << pool); in wx_add_mac_filter() 766 wx->mac_table[i].pools |= (1ULL << pool); in wx_add_mac_filter() [all …]
|
/openbmc/linux/arch/powerpc/kernel/ |
H A D | iommu.c | 252 pool = &(tbl->pools[pool_nr]); in iommu_range_alloc() 280 pool = &(tbl->pools[0]); in iommu_range_alloc() 302 pool = &tbl->pools[pool_nr]; in iommu_range_alloc() 429 p = &tbl->pools[pool_nr]; in get_pool() 755 p = &tbl->pools[i]; in iommu_init_table() 1122 spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock); in iommu_take_ownership() 1132 spin_unlock(&tbl->pools[i].lock); in iommu_take_ownership() 1144 spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock); in iommu_release_ownership() 1152 spin_unlock(&tbl->pools[i].lock); in iommu_release_ownership()
|
/openbmc/linux/Documentation/devicetree/bindings/soc/ti/ |
H A D | keystone-navigator-qmss.txt | 6 processors(PDSP), linking RAM, descriptor pools and infrastructure 49 - queue-pools : child node classifying the queue ranges into pools. 50 Queue ranges are grouped into 3 type of pools: 151 queue-pools {
|
/openbmc/linux/Documentation/core-api/ |
H A D | workqueue.rst | 60 * Use per-CPU unified worker pools shared by all wq to provide 83 called worker-pools. 87 which manages worker-pools and processes the queued work items. 89 There are two worker-pools, one for normal work items and the other 91 worker-pools to serve work items queued on unbound workqueues - the 92 number of these backing pools is dynamic. 132 For unbound workqueues, the number of backing pools is dynamic. 135 backing worker pools matching the attributes. The responsibility of 169 worker-pools which host workers which are not bound to any 172 worker-pools try to start execution of work items as soon as [all …]
|
H A D | mm-api.rst | 88 Memory pools 94 DMA pools
|
/openbmc/linux/arch/sparc/include/asm/ |
H A D | iommu-common.h | 26 struct iommu_pool pools[IOMMU_NR_POOLS]; member
|
/openbmc/linux/drivers/md/ |
H A D | dm-table.c | 1023 struct dm_md_mempools *pools; in dm_table_alloc_md_mempools() local 1030 pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id); in dm_table_alloc_md_mempools() 1031 if (!pools) in dm_table_alloc_md_mempools() 1052 if (bioset_init(&pools->io_bs, pool_size, io_front_pad, in dm_table_alloc_md_mempools() 1056 bioset_integrity_create(&pools->io_bs, pool_size)) in dm_table_alloc_md_mempools() 1059 if (bioset_init(&pools->bs, pool_size, front_pad, 0)) in dm_table_alloc_md_mempools() 1062 bioset_integrity_create(&pools->bs, pool_size)) in dm_table_alloc_md_mempools() 1065 t->mempools = pools; in dm_table_alloc_md_mempools() 1069 dm_free_md_mempools(pools); in dm_table_alloc_md_mempools()
|
/openbmc/linux/Documentation/networking/device_drivers/ethernet/freescale/dpaa2/ |
H A D | ethernet-driver.rst | 26 - buffer pools 69 DPBPs represent hardware buffer pools. Packet I/O is performed in the context 124 The role of hardware buffer pools is storage of ingress frame data. Each network
|
/openbmc/linux/Documentation/arch/arm/keystone/ |
H A D | knav-qmss.rst | 12 processors(PDSP), linking RAM, descriptor pools and infrastructure 25 allocate descriptor pools, map the descriptors, push/pop to queues etc. For
|
/openbmc/openbmc/meta-openembedded/meta-oe/recipes-devtools/php/php/ |
H A D | php-fpm.conf | 73 ; the global number of processes when using dynamic PM within a lot of pools. 114 ; Multiple pools of child processes may be started with different listening 116 ; used in logs and stats. There is no limitation on the number of pools which 135 ;prefix = /path/to/pools/$pool
|
/openbmc/openbmc/meta-openembedded/meta-networking/recipes-daemons/ippool/ |
H A D | ippool_1.3.bb | 3 to allow any application to use its address pools. This makes it possible \ 4 to define address pools that are shared by PPP, L2TP, PPTP etc. It may be \
|
/openbmc/linux/kernel/dma/ |
H A D | swiotlb.c | 91 .pools = LIST_HEAD_INIT(io_tlb_default_mem.pools), 310 list_add_rcu(&pool->node, &mem->pools); in add_mem_pool() 781 list_for_each_entry_rcu(pool, &mem->pools, node) { in swiotlb_find_pool() 1165 list_for_each_entry_rcu(pool, &mem->pools, node) { in swiotlb_find_slots() 1289 list_for_each_entry_rcu(pool, &mem->pools, node) in mem_used() 1738 INIT_LIST_HEAD_RCU(&mem->pools); in rmem_swiotlb_device_init()
|
/openbmc/linux/Documentation/networking/ |
H A D | page_pool.rst | 46 The number of pools created **must** match the number of hardware queues 99 with fragmented page pools.
|
/openbmc/u-boot/lib/lzma/ |
H A D | history.txt | 139 It used Alloc/Free functions from different memory pools. 140 So if program used two memory pools, it worked incorrectly.
|