/openbmc/linux/net/sctp/ |
H A D | inqueue.c | 41 /* Properly release the chunk which is being worked on. */ 42 static inline void sctp_inq_chunk_free(struct sctp_chunk *chunk) in sctp_inq_chunk_free() argument 44 if (chunk->head_skb) in sctp_inq_chunk_free() 45 chunk->skb = chunk->head_skb; in sctp_inq_chunk_free() 46 sctp_chunk_free(chunk); in sctp_inq_chunk_free() 52 struct sctp_chunk *chunk, *tmp; in sctp_inq_free() local 55 list_for_each_entry_safe(chunk, tmp, &queue->in_chunk_list, list) { in sctp_inq_free() 56 list_del_init(&chunk->list); in sctp_inq_free() 57 sctp_chunk_free(chunk); in sctp_inq_free() 72 void sctp_inq_push(struct sctp_inq *q, struct sctp_chunk *chunk) in sctp_inq_push() argument [all …]
|
H A D | chunk.c | 7 * This file contains the code relating the chunk abstraction. 60 struct sctp_chunk *chunk; in sctp_datamsg_free() local 65 list_for_each_entry(chunk, &msg->chunks, frag_list) in sctp_datamsg_free() 66 sctp_chunk_free(chunk); in sctp_datamsg_free() 76 struct sctp_chunk *chunk; in sctp_datamsg_destroy() local 83 chunk = list_entry(pos, struct sctp_chunk, frag_list); in sctp_datamsg_destroy() 86 sctp_chunk_put(chunk); in sctp_datamsg_destroy() 90 asoc = chunk->asoc; in sctp_datamsg_destroy() 92 sent = chunk->has_tsn ? SCTP_DATA_SENT : SCTP_DATA_UNSENT; in sctp_datamsg_destroy() 96 ev = sctp_ulpevent_make_send_failed(asoc, chunk, sent, in sctp_datamsg_destroy() [all …]
|
H A D | output.c | 46 struct sctp_chunk *chunk); 48 struct sctp_chunk *chunk); 50 struct sctp_chunk *chunk); 52 struct sctp_chunk *chunk, 118 /* If there a is a prepend chunk stick it on the list before in sctp_packet_config() 122 struct sctp_chunk *chunk = sctp_get_ecne_prepend(asoc); in sctp_packet_config() local 124 if (chunk) in sctp_packet_config() 125 sctp_packet_append_chunk(packet, chunk); in sctp_packet_config() 163 struct sctp_chunk *chunk, *tmp; in sctp_packet_free() local 167 list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) { in sctp_packet_free() [all …]
|
H A D | sm_statefuns.c | 54 struct sctp_chunk *chunk, 57 struct sctp_chunk *chunk, 62 const struct sctp_chunk *chunk); 66 const struct sctp_chunk *chunk, 97 static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk); 149 struct sctp_chunk *chunk); 165 /* Small helper function that checks if the chunk length 167 * is set to be the size of a specific chunk we are testing. 172 static inline bool sctp_chunk_length_valid(struct sctp_chunk *chunk, in sctp_chunk_length_valid() argument 175 __u16 chunk_length = ntohs(chunk->chunk_hdr->length); in sctp_chunk_length_valid() [all …]
|
H A D | outqueue.c | 79 /* Add data chunk to the end of the queue. */ 210 struct sctp_chunk *chunk, *tmp; in __sctp_outq_teardown() local 216 chunk = list_entry(lchunk, struct sctp_chunk, in __sctp_outq_teardown() 219 sctp_chunk_fail(chunk, q->error); in __sctp_outq_teardown() 220 sctp_chunk_free(chunk); in __sctp_outq_teardown() 227 chunk = list_entry(lchunk, struct sctp_chunk, in __sctp_outq_teardown() 229 sctp_chunk_fail(chunk, q->error); in __sctp_outq_teardown() 230 sctp_chunk_free(chunk); in __sctp_outq_teardown() 236 chunk = list_entry(lchunk, struct sctp_chunk, in __sctp_outq_teardown() 238 sctp_chunk_fail(chunk, q->error); in __sctp_outq_teardown() [all …]
|
H A D | sm_make_chunk.c | 67 static void *sctp_addto_param(struct sctp_chunk *chunk, int len, 70 /* Control chunk destructor */ 73 struct sctp_chunk *chunk = skb_shinfo(skb)->destructor_arg; in sctp_control_release_owner() local 75 if (chunk->shkey) { in sctp_control_release_owner() 76 struct sctp_shared_key *shkey = chunk->shkey; in sctp_control_release_owner() 77 struct sctp_association *asoc = chunk->asoc; in sctp_control_release_owner() 93 sctp_auth_shkey_release(chunk->shkey); in sctp_control_release_owner() 97 static void sctp_control_set_owner_w(struct sctp_chunk *chunk) in sctp_control_set_owner_w() argument 99 struct sctp_association *asoc = chunk->asoc; in sctp_control_set_owner_w() 100 struct sk_buff *skb = chunk->skb; in sctp_control_set_owner_w() [all …]
|
/openbmc/linux/mm/ |
H A D | percpu-vm.c | 3 * mm/percpu-vm.c - vmalloc area based chunk allocation 9 * This is the default chunk allocator. 13 static struct page *pcpu_chunk_page(struct pcpu_chunk *chunk, in pcpu_chunk_page() argument 16 /* must not be used on pre-mapped chunk */ in pcpu_chunk_page() 17 WARN_ON(chunk->immutable); in pcpu_chunk_page() 19 return vmalloc_to_page((void *)pcpu_chunk_addr(chunk, cpu, page_idx)); in pcpu_chunk_page() 45 * pcpu_free_pages - free pages which were allocated for @chunk 46 * @chunk: chunk pages were allocated for 52 * The pages were allocated for @chunk. 54 static void pcpu_free_pages(struct pcpu_chunk *chunk, in pcpu_free_pages() argument [all …]
|
H A D | percpu.c | 28 * There is special consideration for the first chunk which must handle 30 * are not online yet. In short, the first chunk is structured like so: 45 * The allocator tries to allocate from the fullest chunk first. Each chunk 50 * of the bitmap. The reverse mapping from page to chunk is stored in 54 * Each bit represents a fragment of size PCPU_MIN_ALLOC_SIZE. The chunk 66 * setup the first chunk containing the kernel static percpu area 146 /* the address of the first chunk which starts with the kernel static area */ 158 * The first chunk which always exists. Note that unlike other 165 * Optional reserved chunk. This chunk reserves part of the first 166 * chunk and serves it for reserved allocations. When the reserved [all …]
|
H A D | percpu-km.c | 3 * mm/percpu-km.c - kernel memory based chunk allocation 19 * - NUMA is not supported. When setting up the first chunk, 23 * - It's best if the chunk size is power of two multiple of 24 * PAGE_SIZE. Because each chunk is allocated as a contiguous 26 * chunk size is not aligned. percpu-km code will whine about it. 30 #error "contiguous percpu allocation is incompatible with paged first chunk" 35 static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk, in pcpu_post_unmap_tlb_flush() argument 41 static int pcpu_populate_chunk(struct pcpu_chunk *chunk, in pcpu_populate_chunk() argument 47 static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, in pcpu_depopulate_chunk() argument 56 struct pcpu_chunk *chunk; in pcpu_create_chunk() local [all …]
|
H A D | percpu-stats.c | 35 struct pcpu_chunk *chunk; in find_max_nr_alloc() local 40 list_for_each_entry(chunk, &pcpu_chunk_lists[slot], list) in find_max_nr_alloc() 41 max_nr_alloc = max(max_nr_alloc, chunk->nr_alloc); in find_max_nr_alloc() 47 * Prints out chunk state. Fragmentation is considered between 48 * the beginning of the chunk to the last allocation. 52 static void chunk_map_stats(struct seq_file *m, struct pcpu_chunk *chunk, in chunk_map_stats() argument 55 struct pcpu_block_md *chunk_md = &chunk->chunk_md; in chunk_map_stats() 69 last_alloc = find_last_bit(chunk->alloc_map, in chunk_map_stats() 70 pcpu_chunk_map_bits(chunk) - in chunk_map_stats() 71 chunk->end_offset / PCPU_MIN_ALLOC_SIZE - 1); in chunk_map_stats() [all …]
|
H A D | percpu-internal.h | 11 * Each chunk's bitmap is split into a number of full blocks. 42 int free_bytes; /* free bytes in the chunk */ 47 * base_addr is the base address of this chunk. 57 void *data; /* chunk data */ 59 bool isolated; /* isolated from active chunk 71 int nr_pages; /* # of pages served by this chunk */ 90 * @chunk: chunk of interest 92 * This conversion is from the number of physical pages that the chunk 95 static inline int pcpu_chunk_nr_blocks(struct pcpu_chunk *chunk) in pcpu_chunk_nr_blocks() argument 97 return chunk->nr_pages * PAGE_SIZE / PCPU_BITMAP_BLOCK_SIZE; in pcpu_chunk_nr_blocks() [all …]
|
/openbmc/linux/net/sunrpc/xprtrdma/ |
H A D | svc_rdma_pcl.c | 13 * pcl_free - Release all memory associated with a parsed chunk list 14 * @pcl: parsed chunk list 20 struct svc_rdma_chunk *chunk; in pcl_free() local 22 chunk = pcl_first_chunk(pcl); in pcl_free() 23 list_del(&chunk->ch_list); in pcl_free() 24 kfree(chunk); in pcl_free() 30 struct svc_rdma_chunk *chunk; in pcl_alloc_chunk() local 32 chunk = kmalloc(struct_size(chunk, ch_segments, segcount), GFP_KERNEL); in pcl_alloc_chunk() 33 if (!chunk) in pcl_alloc_chunk() 36 chunk->ch_position = position; in pcl_alloc_chunk() [all …]
|
/openbmc/linux/include/linux/sunrpc/ |
H A D | svc_rdma_pcl.h | 34 * pcl_init - Initialize a parsed chunk list 35 * @pcl: parsed chunk list to initialize 44 * pcl_is_empty - Return true if parsed chunk list is empty 45 * @pcl: parsed chunk list 54 * pcl_first_chunk - Return first chunk in a parsed chunk list 55 * @pcl: parsed chunk list 57 * Returns the first chunk in the list, or NULL if the list is empty. 69 * pcl_next_chunk - Return next chunk in a parsed chunk list 70 * @pcl: a parsed chunk list 71 * @chunk: chunk in @pcl [all …]
|
/openbmc/linux/kernel/ |
H A D | audit_tree.c | 42 struct audit_chunk *chunk; member 50 * One struct chunk is attached to each inode of interest through 51 * audit_tree_mark (fsnotify mark). We replace struct chunk on tagging / 52 * untagging, the mark is stable as long as there is chunk attached. The 53 * association between mark and chunk is protected by hash_lock and 57 * the current chunk. 62 * References to struct chunk are collected at audit_inode{,_child}() 68 * tree.chunks anchors chunk.owners[].list hash_lock 70 * chunk.trees anchors tree.same_root hash_lock 71 * chunk.hash is a hash with middle bits of watch.inode as [all …]
|
/openbmc/linux/drivers/net/ethernet/mellanox/mlx4/ |
H A D | icm.c | 47 * per chunk. Note that the chunks are not necessarily in contiguous 55 static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk) in mlx4_free_icm_pages() argument 59 if (chunk->nsg > 0) in mlx4_free_icm_pages() 60 dma_unmap_sg(&dev->persist->pdev->dev, chunk->sg, chunk->npages, in mlx4_free_icm_pages() 63 for (i = 0; i < chunk->npages; ++i) in mlx4_free_icm_pages() 64 __free_pages(sg_page(&chunk->sg[i]), in mlx4_free_icm_pages() 65 get_order(chunk->sg[i].length)); in mlx4_free_icm_pages() 68 static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk) in mlx4_free_icm_coherent() argument 72 for (i = 0; i < chunk->npages; ++i) in mlx4_free_icm_coherent() 74 chunk->buf[i].size, in mlx4_free_icm_coherent() [all …]
|
/openbmc/linux/kernel/trace/ |
H A D | pid_list.c | 14 union lower_chunk *chunk; in get_lower_chunk() local 21 chunk = pid_list->lower_list; in get_lower_chunk() 22 pid_list->lower_list = chunk->next; in get_lower_chunk() 25 chunk->next = NULL; in get_lower_chunk() 33 return chunk; in get_lower_chunk() 38 union upper_chunk *chunk; in get_upper_chunk() local 45 chunk = pid_list->upper_list; in get_upper_chunk() 46 pid_list->upper_list = chunk->next; in get_upper_chunk() 49 chunk->next = NULL; in get_upper_chunk() 57 return chunk; in get_upper_chunk() [all …]
|
/openbmc/u-boot/fs/yaffs2/ |
H A D | yaffs_bitmap.c | 17 * Chunk bitmap manipulations 32 void yaffs_verify_chunk_bit_id(struct yaffs_dev *dev, int blk, int chunk) in yaffs_verify_chunk_bit_id() argument 35 chunk < 0 || chunk >= dev->param.chunks_per_block) { in yaffs_verify_chunk_bit_id() 37 "Chunk Id (%d:%d) invalid", in yaffs_verify_chunk_bit_id() 38 blk, chunk); in yaffs_verify_chunk_bit_id() 50 void yaffs_clear_chunk_bit(struct yaffs_dev *dev, int blk, int chunk) in yaffs_clear_chunk_bit() argument 54 yaffs_verify_chunk_bit_id(dev, blk, chunk); in yaffs_clear_chunk_bit() 55 blk_bits[chunk / 8] &= ~(1 << (chunk & 7)); in yaffs_clear_chunk_bit() 58 void yaffs_set_chunk_bit(struct yaffs_dev *dev, int blk, int chunk) in yaffs_set_chunk_bit() argument 62 yaffs_verify_chunk_bit_id(dev, blk, chunk); in yaffs_set_chunk_bit() [all …]
|
/openbmc/linux/drivers/gpu/drm/nouveau/ |
H A D | nouveau_dmem.c | 96 struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page); in page_to_drm() local 98 return chunk->drm; in page_to_drm() 103 struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page); in nouveau_dmem_page_addr() local 105 chunk->pagemap.range.start; in nouveau_dmem_page_addr() 107 return chunk->bo->offset + off; in nouveau_dmem_page_addr() 112 struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page); in nouveau_dmem_page_free() local 113 struct nouveau_dmem *dmem = chunk->drm->dmem; in nouveau_dmem_page_free() 119 WARN_ON(!chunk->callocated); in nouveau_dmem_page_free() 120 chunk->callocated--; in nouveau_dmem_page_free() 122 * FIXME when chunk->callocated reach 0 we should add the chunk to in nouveau_dmem_page_free() [all …]
|
/openbmc/linux/drivers/s390/cio/ |
H A D | itcw.c | 118 * to the placement of the data chunk in memory, and a further in itcw_calc_size() 183 void *chunk; in itcw_init() local 195 chunk = fit_chunk(&start, end, sizeof(struct itcw), 1, 0); in itcw_init() 196 if (IS_ERR(chunk)) in itcw_init() 197 return chunk; in itcw_init() 198 itcw = chunk; in itcw_init() 211 chunk = fit_chunk(&start, end, sizeof(struct tcw), 64, 0); in itcw_init() 212 if (IS_ERR(chunk)) in itcw_init() 213 return chunk; in itcw_init() 214 itcw->tcw = chunk; in itcw_init() [all …]
|
/openbmc/linux/include/net/sctp/ |
H A D | sm.h | 77 /* Prototypes for chunk state functions. */ 169 /* Prototypes for chunk-building functions. */ 174 const struct sctp_chunk *chunk, 177 const struct sctp_chunk *chunk); 179 const struct sctp_chunk *chunk); 182 const struct sctp_chunk *chunk); 195 const struct sctp_chunk *chunk); 197 const struct sctp_chunk *chunk); 200 const struct sctp_chunk *chunk); 201 int sctp_init_cause(struct sctp_chunk *chunk, __be16 cause, size_t paylen); [all …]
|
/openbmc/linux/drivers/net/ethernet/mellanox/mlx5/core/steering/ |
H A D | dr_icm_pool.c | 80 u64 mlx5dr_icm_pool_get_chunk_mr_addr(struct mlx5dr_icm_chunk *chunk) in mlx5dr_icm_pool_get_chunk_mr_addr() argument 82 u32 offset = mlx5dr_icm_pool_dm_type_to_entry_size(chunk->buddy_mem->pool->icm_type); in mlx5dr_icm_pool_get_chunk_mr_addr() 84 return (u64)offset * chunk->seg; in mlx5dr_icm_pool_get_chunk_mr_addr() 87 u32 mlx5dr_icm_pool_get_chunk_rkey(struct mlx5dr_icm_chunk *chunk) in mlx5dr_icm_pool_get_chunk_rkey() argument 89 return chunk->buddy_mem->icm_mr->mkey; in mlx5dr_icm_pool_get_chunk_rkey() 92 u64 mlx5dr_icm_pool_get_chunk_icm_addr(struct mlx5dr_icm_chunk *chunk) in mlx5dr_icm_pool_get_chunk_icm_addr() argument 94 u32 size = mlx5dr_icm_pool_dm_type_to_entry_size(chunk->buddy_mem->pool->icm_type); in mlx5dr_icm_pool_get_chunk_icm_addr() 96 return (u64)chunk->buddy_mem->icm_mr->icm_start_addr + size * chunk->seg; in mlx5dr_icm_pool_get_chunk_icm_addr() 99 u32 mlx5dr_icm_pool_get_chunk_byte_size(struct mlx5dr_icm_chunk *chunk) in mlx5dr_icm_pool_get_chunk_byte_size() argument 101 return mlx5dr_icm_pool_chunk_size_to_byte(chunk->size, in mlx5dr_icm_pool_get_chunk_byte_size() [all …]
|
/openbmc/linux/lib/ |
H A D | genalloc.c | 40 static inline size_t chunk_size(const struct gen_pool_chunk *chunk) in chunk_size() argument 42 return chunk->end_addr - chunk->start_addr + 1; in chunk_size() 171 * gen_pool_add_owner- add a new chunk of special memory to the pool 172 * @pool: pool to add new memory chunk to 173 * @virt: virtual starting address of memory chunk to add to pool 174 * @phys: physical starting address of memory chunk to add to pool 175 * @size: size in bytes of the memory chunk to add to pool 176 * @nid: node id of the node the chunk structure and bitmap should be 180 * Add a new chunk of special memory to the specified pool. 187 struct gen_pool_chunk *chunk; in gen_pool_add_owner() local [all …]
|
/openbmc/linux/drivers/infiniband/hw/irdma/ |
H A D | pble.c | 18 struct irdma_chunk *chunk; in irdma_destroy_pble_prm() local 22 chunk = (struct irdma_chunk *) pinfo->clist.next; in irdma_destroy_pble_prm() 23 list_del(&chunk->list); in irdma_destroy_pble_prm() 24 if (chunk->type == PBLE_SD_PAGED) in irdma_destroy_pble_prm() 25 irdma_pble_free_paged_mem(chunk); in irdma_destroy_pble_prm() 26 bitmap_free(chunk->bitmapbuf); in irdma_destroy_pble_prm() 27 kfree(chunk->chunkmem.va); in irdma_destroy_pble_prm() 90 struct irdma_chunk *chunk = info->chunk; in add_sd_direct() local 103 chunk->type = PBLE_SD_CONTIGOUS; in add_sd_direct() 107 chunk->size = info->pages << HMC_PAGED_BP_SHIFT; in add_sd_direct() [all …]
|
/openbmc/linux/drivers/gpu/drm/amd/amdgpu/ |
H A D | amdgpu_ring_mux.c | 79 struct amdgpu_mux_chunk *chunk; in amdgpu_mux_resubmit_chunks() local 103 list_for_each_entry(chunk, &e->list, entry) { in amdgpu_mux_resubmit_chunks() 104 if (chunk->sync_seq > last_seq && chunk->sync_seq <= seq) { in amdgpu_mux_resubmit_chunks() 106 chunk->sync_seq, in amdgpu_mux_resubmit_chunks() 108 if (chunk->sync_seq == in amdgpu_mux_resubmit_chunks() 110 if (chunk->cntl_offset <= e->ring->buf_mask) in amdgpu_mux_resubmit_chunks() 112 chunk->cntl_offset); in amdgpu_mux_resubmit_chunks() 113 if (chunk->ce_offset <= e->ring->buf_mask) in amdgpu_mux_resubmit_chunks() 114 amdgpu_ring_patch_ce(e->ring, chunk->ce_offset); in amdgpu_mux_resubmit_chunks() 115 if (chunk->de_offset <= e->ring->buf_mask) in amdgpu_mux_resubmit_chunks() [all …]
|
/openbmc/bmcweb/redfish-core/schema/dmtf/json-schema/ |
H A D | MemoryChunks.v1_6_2.json | 50 "description": "This an interleave set for a memory chunk.", 51 …ongDescription": "This type shall describe an interleave set of which the memory chunk is a part.", 134 … "description": "An array of links to the CXL logical devices associated with this memory chunk.", 138 …type `CXLLogicalDevice` that represent the CXL logical devices associated with this memory chunk.", 147 … "description": "An array of links to the endpoints that connect to this memory chunk.", 151 …ll contain a link to the resources of type `Endpoint` with which this memory chunk is associated.", 160 …cription": "An array of links to the memory regions for which this memory chunk provides capacity.… 164 …pe `MemoryRegion` that represent the memory regions for which this memory chunk provides capacity.… 187 "Local": "The memory chunk was created using local media.", 188 …"Mixed": "The memory chunk was created using both local media and remote media accessible through … [all …]
|