1 // SPDX-License-Identifier: GPL-2.0 2 3 #include <net/xsk_buff_pool.h> 4 #include <net/xdp_sock.h> 5 6 #include "xsk_queue.h" 7 8 static void xp_addr_unmap(struct xsk_buff_pool *pool) 9 { 10 vunmap(pool->addrs); 11 } 12 13 static int xp_addr_map(struct xsk_buff_pool *pool, 14 struct page **pages, u32 nr_pages) 15 { 16 pool->addrs = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL); 17 if (!pool->addrs) 18 return -ENOMEM; 19 return 0; 20 } 21 22 void xp_destroy(struct xsk_buff_pool *pool) 23 { 24 if (!pool) 25 return; 26 27 xp_addr_unmap(pool); 28 kvfree(pool->heads); 29 kvfree(pool); 30 } 31 32 struct xsk_buff_pool *xp_create(struct page **pages, u32 nr_pages, u32 chunks, 33 u32 chunk_size, u32 headroom, u64 size, 34 bool unaligned) 35 { 36 struct xsk_buff_pool *pool; 37 struct xdp_buff_xsk *xskb; 38 int err; 39 u32 i; 40 41 pool = kvzalloc(struct_size(pool, free_heads, chunks), GFP_KERNEL); 42 if (!pool) 43 goto out; 44 45 pool->heads = kvcalloc(chunks, sizeof(*pool->heads), GFP_KERNEL); 46 if (!pool->heads) 47 goto out; 48 49 pool->chunk_mask = ~((u64)chunk_size - 1); 50 pool->addrs_cnt = size; 51 pool->heads_cnt = chunks; 52 pool->free_heads_cnt = chunks; 53 pool->headroom = headroom; 54 pool->chunk_size = chunk_size; 55 pool->unaligned = unaligned; 56 pool->frame_len = chunk_size - headroom - XDP_PACKET_HEADROOM; 57 INIT_LIST_HEAD(&pool->free_list); 58 59 for (i = 0; i < pool->free_heads_cnt; i++) { 60 xskb = &pool->heads[i]; 61 xskb->pool = pool; 62 xskb->xdp.frame_sz = chunk_size - headroom; 63 pool->free_heads[i] = xskb; 64 } 65 66 err = xp_addr_map(pool, pages, nr_pages); 67 if (!err) 68 return pool; 69 70 out: 71 xp_destroy(pool); 72 return NULL; 73 } 74 75 void xp_set_fq(struct xsk_buff_pool *pool, struct xsk_queue *fq) 76 { 77 pool->fq = fq; 78 } 79 80 void xp_set_rxq_info(struct xsk_buff_pool *pool, struct xdp_rxq_info *rxq) 81 { 82 u32 i; 83 84 for (i = 0; i < pool->heads_cnt; i++) 85 pool->heads[i].xdp.rxq = rxq; 86 } 87 EXPORT_SYMBOL(xp_set_rxq_info); 88 89 void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs) 90 { 91 dma_addr_t *dma; 92 u32 i; 93 94 if (pool->dma_pages_cnt == 0) 95 return; 96 97 for (i = 0; i < pool->dma_pages_cnt; i++) { 98 dma = &pool->dma_pages[i]; 99 if (*dma) { 100 dma_unmap_page_attrs(pool->dev, *dma, PAGE_SIZE, 101 DMA_BIDIRECTIONAL, attrs); 102 *dma = 0; 103 } 104 } 105 106 kvfree(pool->dma_pages); 107 pool->dma_pages_cnt = 0; 108 pool->dev = NULL; 109 } 110 EXPORT_SYMBOL(xp_dma_unmap); 111 112 static void xp_check_dma_contiguity(struct xsk_buff_pool *pool) 113 { 114 u32 i; 115 116 for (i = 0; i < pool->dma_pages_cnt - 1; i++) { 117 if (pool->dma_pages[i] + PAGE_SIZE == pool->dma_pages[i + 1]) 118 pool->dma_pages[i] |= XSK_NEXT_PG_CONTIG_MASK; 119 else 120 pool->dma_pages[i] &= ~XSK_NEXT_PG_CONTIG_MASK; 121 } 122 } 123 124 int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev, 125 unsigned long attrs, struct page **pages, u32 nr_pages) 126 { 127 dma_addr_t dma; 128 u32 i; 129 130 pool->dma_pages = kvcalloc(nr_pages, sizeof(*pool->dma_pages), 131 GFP_KERNEL); 132 if (!pool->dma_pages) 133 return -ENOMEM; 134 135 pool->dev = dev; 136 pool->dma_pages_cnt = nr_pages; 137 pool->dma_need_sync = false; 138 139 for (i = 0; i < pool->dma_pages_cnt; i++) { 140 dma = dma_map_page_attrs(dev, pages[i], 0, PAGE_SIZE, 141 DMA_BIDIRECTIONAL, attrs); 142 if (dma_mapping_error(dev, dma)) { 143 xp_dma_unmap(pool, attrs); 144 return -ENOMEM; 145 } 146 if (dma_need_sync(dev, dma)) 147 pool->dma_need_sync = true; 148 pool->dma_pages[i] = dma; 149 } 150 151 if (pool->unaligned) 152 xp_check_dma_contiguity(pool); 153 return 0; 154 } 155 EXPORT_SYMBOL(xp_dma_map); 156 157 static bool xp_addr_crosses_non_contig_pg(struct xsk_buff_pool *pool, 158 u64 addr) 159 { 160 return xp_desc_crosses_non_contig_pg(pool, addr, pool->chunk_size); 161 } 162 163 static bool xp_check_unaligned(struct xsk_buff_pool *pool, u64 *addr) 164 { 165 *addr = xp_unaligned_extract_addr(*addr); 166 if (*addr >= pool->addrs_cnt || 167 *addr + pool->chunk_size > pool->addrs_cnt || 168 xp_addr_crosses_non_contig_pg(pool, *addr)) 169 return false; 170 return true; 171 } 172 173 static bool xp_check_aligned(struct xsk_buff_pool *pool, u64 *addr) 174 { 175 *addr = xp_aligned_extract_addr(pool, *addr); 176 return *addr < pool->addrs_cnt; 177 } 178 179 static struct xdp_buff_xsk *__xp_alloc(struct xsk_buff_pool *pool) 180 { 181 struct xdp_buff_xsk *xskb; 182 u64 addr; 183 bool ok; 184 185 if (pool->free_heads_cnt == 0) 186 return NULL; 187 188 xskb = pool->free_heads[--pool->free_heads_cnt]; 189 190 for (;;) { 191 if (!xskq_cons_peek_addr_unchecked(pool->fq, &addr)) { 192 xp_release(xskb); 193 return NULL; 194 } 195 196 ok = pool->unaligned ? xp_check_unaligned(pool, &addr) : 197 xp_check_aligned(pool, &addr); 198 if (!ok) { 199 pool->fq->invalid_descs++; 200 xskq_cons_release(pool->fq); 201 continue; 202 } 203 break; 204 } 205 xskq_cons_release(pool->fq); 206 207 xskb->orig_addr = addr; 208 xskb->xdp.data_hard_start = pool->addrs + addr + pool->headroom; 209 if (pool->dma_pages_cnt) { 210 xskb->frame_dma = (pool->dma_pages[addr >> PAGE_SHIFT] & 211 ~XSK_NEXT_PG_CONTIG_MASK) + 212 (addr & ~PAGE_MASK); 213 xskb->dma = xskb->frame_dma + pool->headroom + 214 XDP_PACKET_HEADROOM; 215 } 216 return xskb; 217 } 218 219 struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool) 220 { 221 struct xdp_buff_xsk *xskb; 222 223 if (!pool->free_list_cnt) { 224 xskb = __xp_alloc(pool); 225 if (!xskb) 226 return NULL; 227 } else { 228 pool->free_list_cnt--; 229 xskb = list_first_entry(&pool->free_list, struct xdp_buff_xsk, 230 free_list_node); 231 list_del(&xskb->free_list_node); 232 } 233 234 xskb->xdp.data = xskb->xdp.data_hard_start + XDP_PACKET_HEADROOM; 235 xskb->xdp.data_meta = xskb->xdp.data; 236 237 if (pool->dma_need_sync) { 238 dma_sync_single_range_for_device(pool->dev, xskb->dma, 0, 239 pool->frame_len, 240 DMA_BIDIRECTIONAL); 241 } 242 return &xskb->xdp; 243 } 244 EXPORT_SYMBOL(xp_alloc); 245 246 bool xp_can_alloc(struct xsk_buff_pool *pool, u32 count) 247 { 248 if (pool->free_list_cnt >= count) 249 return true; 250 return xskq_cons_has_entries(pool->fq, count - pool->free_list_cnt); 251 } 252 EXPORT_SYMBOL(xp_can_alloc); 253 254 void xp_free(struct xdp_buff_xsk *xskb) 255 { 256 xskb->pool->free_list_cnt++; 257 list_add(&xskb->free_list_node, &xskb->pool->free_list); 258 } 259 EXPORT_SYMBOL(xp_free); 260 261 void *xp_raw_get_data(struct xsk_buff_pool *pool, u64 addr) 262 { 263 addr = pool->unaligned ? xp_unaligned_add_offset_to_addr(addr) : addr; 264 return pool->addrs + addr; 265 } 266 EXPORT_SYMBOL(xp_raw_get_data); 267 268 dma_addr_t xp_raw_get_dma(struct xsk_buff_pool *pool, u64 addr) 269 { 270 addr = pool->unaligned ? xp_unaligned_add_offset_to_addr(addr) : addr; 271 return (pool->dma_pages[addr >> PAGE_SHIFT] & 272 ~XSK_NEXT_PG_CONTIG_MASK) + 273 (addr & ~PAGE_MASK); 274 } 275 EXPORT_SYMBOL(xp_raw_get_dma); 276 277 void xp_dma_sync_for_cpu_slow(struct xdp_buff_xsk *xskb) 278 { 279 dma_sync_single_range_for_cpu(xskb->pool->dev, xskb->dma, 0, 280 xskb->pool->frame_len, DMA_BIDIRECTIONAL); 281 } 282 EXPORT_SYMBOL(xp_dma_sync_for_cpu_slow); 283 284 void xp_dma_sync_for_device_slow(struct xsk_buff_pool *pool, dma_addr_t dma, 285 size_t size) 286 { 287 dma_sync_single_range_for_device(pool->dev, dma, 0, 288 size, DMA_BIDIRECTIONAL); 289 } 290 EXPORT_SYMBOL(xp_dma_sync_for_device_slow); 291