1 // SPDX-License-Identifier: GPL-2.0 2 /* XDP user-space packet buffer 3 * Copyright(c) 2018 Intel Corporation. 4 */ 5 6 #include <linux/init.h> 7 #include <linux/sched/mm.h> 8 #include <linux/sched/signal.h> 9 #include <linux/sched/task.h> 10 #include <linux/uaccess.h> 11 #include <linux/slab.h> 12 #include <linux/bpf.h> 13 #include <linux/mm.h> 14 #include <linux/netdevice.h> 15 #include <linux/rtnetlink.h> 16 #include <linux/idr.h> 17 18 #include "xdp_umem.h" 19 #include "xsk_queue.h" 20 21 #define XDP_UMEM_MIN_CHUNK_SIZE 2048 22 23 static DEFINE_IDA(umem_ida); 24 25 void xdp_add_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs) 26 { 27 unsigned long flags; 28 29 spin_lock_irqsave(&umem->xsk_list_lock, flags); 30 list_add_rcu(&xs->list, &umem->xsk_list); 31 spin_unlock_irqrestore(&umem->xsk_list_lock, flags); 32 } 33 34 void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs) 35 { 36 unsigned long flags; 37 38 spin_lock_irqsave(&umem->xsk_list_lock, flags); 39 list_del_rcu(&xs->list); 40 spin_unlock_irqrestore(&umem->xsk_list_lock, flags); 41 } 42 43 /* The umem is stored both in the _rx struct and the _tx struct as we do 44 * not know if the device has more tx queues than rx, or the opposite. 45 * This might also change during run time. 46 */ 47 static int xdp_reg_umem_at_qid(struct net_device *dev, struct xdp_umem *umem, 48 u16 queue_id) 49 { 50 if (queue_id >= max_t(unsigned int, 51 dev->real_num_rx_queues, 52 dev->real_num_tx_queues)) 53 return -EINVAL; 54 55 if (queue_id < dev->real_num_rx_queues) 56 dev->_rx[queue_id].umem = umem; 57 if (queue_id < dev->real_num_tx_queues) 58 dev->_tx[queue_id].umem = umem; 59 60 return 0; 61 } 62 63 struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev, 64 u16 queue_id) 65 { 66 if (queue_id < dev->real_num_rx_queues) 67 return dev->_rx[queue_id].umem; 68 if (queue_id < dev->real_num_tx_queues) 69 return dev->_tx[queue_id].umem; 70 71 return NULL; 72 } 73 EXPORT_SYMBOL(xdp_get_umem_from_qid); 74 75 static void xdp_clear_umem_at_qid(struct net_device *dev, u16 queue_id) 76 { 77 if (queue_id < dev->real_num_rx_queues) 78 dev->_rx[queue_id].umem = NULL; 79 if (queue_id < dev->real_num_tx_queues) 80 dev->_tx[queue_id].umem = NULL; 81 } 82 83 int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev, 84 u16 queue_id, u16 flags) 85 { 86 bool force_zc, force_copy; 87 struct netdev_bpf bpf; 88 int err = 0; 89 90 force_zc = flags & XDP_ZEROCOPY; 91 force_copy = flags & XDP_COPY; 92 93 if (force_zc && force_copy) 94 return -EINVAL; 95 96 rtnl_lock(); 97 if (xdp_get_umem_from_qid(dev, queue_id)) { 98 err = -EBUSY; 99 goto out_rtnl_unlock; 100 } 101 102 err = xdp_reg_umem_at_qid(dev, umem, queue_id); 103 if (err) 104 goto out_rtnl_unlock; 105 106 umem->dev = dev; 107 umem->queue_id = queue_id; 108 if (force_copy) 109 /* For copy-mode, we are done. */ 110 goto out_rtnl_unlock; 111 112 if (!dev->netdev_ops->ndo_bpf || 113 !dev->netdev_ops->ndo_xsk_async_xmit) { 114 err = -EOPNOTSUPP; 115 goto err_unreg_umem; 116 } 117 118 bpf.command = XDP_SETUP_XSK_UMEM; 119 bpf.xsk.umem = umem; 120 bpf.xsk.queue_id = queue_id; 121 122 err = dev->netdev_ops->ndo_bpf(dev, &bpf); 123 if (err) 124 goto err_unreg_umem; 125 rtnl_unlock(); 126 127 dev_hold(dev); 128 umem->zc = true; 129 return 0; 130 131 err_unreg_umem: 132 xdp_clear_umem_at_qid(dev, queue_id); 133 if (!force_zc) 134 err = 0; /* fallback to copy mode */ 135 out_rtnl_unlock: 136 rtnl_unlock(); 137 return err; 138 } 139 140 static void xdp_umem_clear_dev(struct xdp_umem *umem) 141 { 142 struct netdev_bpf bpf; 143 int err; 144 145 if (umem->zc) { 146 bpf.command = XDP_SETUP_XSK_UMEM; 147 bpf.xsk.umem = NULL; 148 bpf.xsk.queue_id = umem->queue_id; 149 150 rtnl_lock(); 151 err = umem->dev->netdev_ops->ndo_bpf(umem->dev, &bpf); 152 rtnl_unlock(); 153 154 if (err) 155 WARN(1, "failed to disable umem!\n"); 156 } 157 158 if (umem->dev) { 159 rtnl_lock(); 160 xdp_clear_umem_at_qid(umem->dev, umem->queue_id); 161 rtnl_unlock(); 162 } 163 164 if (umem->zc) { 165 dev_put(umem->dev); 166 umem->zc = false; 167 } 168 } 169 170 static void xdp_umem_unpin_pages(struct xdp_umem *umem) 171 { 172 unsigned int i; 173 174 for (i = 0; i < umem->npgs; i++) { 175 struct page *page = umem->pgs[i]; 176 177 set_page_dirty_lock(page); 178 put_page(page); 179 } 180 181 kfree(umem->pgs); 182 umem->pgs = NULL; 183 } 184 185 static void xdp_umem_unaccount_pages(struct xdp_umem *umem) 186 { 187 if (umem->user) { 188 atomic_long_sub(umem->npgs, &umem->user->locked_vm); 189 free_uid(umem->user); 190 } 191 } 192 193 static void xdp_umem_release(struct xdp_umem *umem) 194 { 195 struct task_struct *task; 196 struct mm_struct *mm; 197 198 xdp_umem_clear_dev(umem); 199 200 ida_simple_remove(&umem_ida, umem->id); 201 202 if (umem->fq) { 203 xskq_destroy(umem->fq); 204 umem->fq = NULL; 205 } 206 207 if (umem->cq) { 208 xskq_destroy(umem->cq); 209 umem->cq = NULL; 210 } 211 212 xsk_reuseq_destroy(umem); 213 214 xdp_umem_unpin_pages(umem); 215 216 task = get_pid_task(umem->pid, PIDTYPE_PID); 217 put_pid(umem->pid); 218 if (!task) 219 goto out; 220 mm = get_task_mm(task); 221 put_task_struct(task); 222 if (!mm) 223 goto out; 224 225 mmput(mm); 226 kfree(umem->pages); 227 umem->pages = NULL; 228 229 xdp_umem_unaccount_pages(umem); 230 out: 231 kfree(umem); 232 } 233 234 static void xdp_umem_release_deferred(struct work_struct *work) 235 { 236 struct xdp_umem *umem = container_of(work, struct xdp_umem, work); 237 238 xdp_umem_release(umem); 239 } 240 241 void xdp_get_umem(struct xdp_umem *umem) 242 { 243 refcount_inc(&umem->users); 244 } 245 246 void xdp_put_umem(struct xdp_umem *umem) 247 { 248 if (!umem) 249 return; 250 251 if (refcount_dec_and_test(&umem->users)) { 252 INIT_WORK(&umem->work, xdp_umem_release_deferred); 253 schedule_work(&umem->work); 254 } 255 } 256 257 static int xdp_umem_pin_pages(struct xdp_umem *umem) 258 { 259 unsigned int gup_flags = FOLL_WRITE; 260 long npgs; 261 int err; 262 263 umem->pgs = kcalloc(umem->npgs, sizeof(*umem->pgs), 264 GFP_KERNEL | __GFP_NOWARN); 265 if (!umem->pgs) 266 return -ENOMEM; 267 268 down_write(¤t->mm->mmap_sem); 269 npgs = get_user_pages(umem->address, umem->npgs, 270 gup_flags, &umem->pgs[0], NULL); 271 up_write(¤t->mm->mmap_sem); 272 273 if (npgs != umem->npgs) { 274 if (npgs >= 0) { 275 umem->npgs = npgs; 276 err = -ENOMEM; 277 goto out_pin; 278 } 279 err = npgs; 280 goto out_pgs; 281 } 282 return 0; 283 284 out_pin: 285 xdp_umem_unpin_pages(umem); 286 out_pgs: 287 kfree(umem->pgs); 288 umem->pgs = NULL; 289 return err; 290 } 291 292 static int xdp_umem_account_pages(struct xdp_umem *umem) 293 { 294 unsigned long lock_limit, new_npgs, old_npgs; 295 296 if (capable(CAP_IPC_LOCK)) 297 return 0; 298 299 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; 300 umem->user = get_uid(current_user()); 301 302 do { 303 old_npgs = atomic_long_read(&umem->user->locked_vm); 304 new_npgs = old_npgs + umem->npgs; 305 if (new_npgs > lock_limit) { 306 free_uid(umem->user); 307 umem->user = NULL; 308 return -ENOBUFS; 309 } 310 } while (atomic_long_cmpxchg(&umem->user->locked_vm, old_npgs, 311 new_npgs) != old_npgs); 312 return 0; 313 } 314 315 static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr) 316 { 317 u32 chunk_size = mr->chunk_size, headroom = mr->headroom; 318 unsigned int chunks, chunks_per_page; 319 u64 addr = mr->addr, size = mr->len; 320 int size_chk, err, i; 321 322 if (chunk_size < XDP_UMEM_MIN_CHUNK_SIZE || chunk_size > PAGE_SIZE) { 323 /* Strictly speaking we could support this, if: 324 * - huge pages, or* 325 * - using an IOMMU, or 326 * - making sure the memory area is consecutive 327 * but for now, we simply say "computer says no". 328 */ 329 return -EINVAL; 330 } 331 332 if (!is_power_of_2(chunk_size)) 333 return -EINVAL; 334 335 if (!PAGE_ALIGNED(addr)) { 336 /* Memory area has to be page size aligned. For 337 * simplicity, this might change. 338 */ 339 return -EINVAL; 340 } 341 342 if ((addr + size) < addr) 343 return -EINVAL; 344 345 chunks = (unsigned int)div_u64(size, chunk_size); 346 if (chunks == 0) 347 return -EINVAL; 348 349 chunks_per_page = PAGE_SIZE / chunk_size; 350 if (chunks < chunks_per_page || chunks % chunks_per_page) 351 return -EINVAL; 352 353 headroom = ALIGN(headroom, 64); 354 355 size_chk = chunk_size - headroom - XDP_PACKET_HEADROOM; 356 if (size_chk < 0) 357 return -EINVAL; 358 359 umem->pid = get_task_pid(current, PIDTYPE_PID); 360 umem->address = (unsigned long)addr; 361 umem->chunk_mask = ~((u64)chunk_size - 1); 362 umem->size = size; 363 umem->headroom = headroom; 364 umem->chunk_size_nohr = chunk_size - headroom; 365 umem->npgs = size / PAGE_SIZE; 366 umem->pgs = NULL; 367 umem->user = NULL; 368 INIT_LIST_HEAD(&umem->xsk_list); 369 spin_lock_init(&umem->xsk_list_lock); 370 371 refcount_set(&umem->users, 1); 372 373 err = xdp_umem_account_pages(umem); 374 if (err) 375 goto out; 376 377 err = xdp_umem_pin_pages(umem); 378 if (err) 379 goto out_account; 380 381 umem->pages = kcalloc(umem->npgs, sizeof(*umem->pages), GFP_KERNEL); 382 if (!umem->pages) { 383 err = -ENOMEM; 384 goto out_account; 385 } 386 387 for (i = 0; i < umem->npgs; i++) 388 umem->pages[i].addr = page_address(umem->pgs[i]); 389 390 return 0; 391 392 out_account: 393 xdp_umem_unaccount_pages(umem); 394 out: 395 put_pid(umem->pid); 396 return err; 397 } 398 399 struct xdp_umem *xdp_umem_create(struct xdp_umem_reg *mr) 400 { 401 struct xdp_umem *umem; 402 int err; 403 404 umem = kzalloc(sizeof(*umem), GFP_KERNEL); 405 if (!umem) 406 return ERR_PTR(-ENOMEM); 407 408 err = ida_simple_get(&umem_ida, 0, 0, GFP_KERNEL); 409 if (err < 0) { 410 kfree(umem); 411 return ERR_PTR(err); 412 } 413 umem->id = err; 414 415 err = xdp_umem_reg(umem, mr); 416 if (err) { 417 ida_simple_remove(&umem_ida, umem->id); 418 kfree(umem); 419 return ERR_PTR(err); 420 } 421 422 return umem; 423 } 424 425 bool xdp_umem_validate_queues(struct xdp_umem *umem) 426 { 427 return umem->fq && umem->cq; 428 } 429