1 /* 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <rdma/ib_umem.h> 34 #include <rdma/ib_umem_odp.h> 35 #include <linux/kernel.h> 36 #include <linux/dma-buf.h> 37 #include <linux/dma-resv.h> 38 39 #include "mlx5_ib.h" 40 #include "cmd.h" 41 #include "umr.h" 42 #include "qp.h" 43 44 #include <linux/mlx5/eq.h> 45 46 /* Contains the details of a pagefault. */ 47 struct mlx5_pagefault { 48 u32 bytes_committed; 49 u32 token; 50 u8 event_subtype; 51 u8 type; 52 union { 53 /* Initiator or send message responder pagefault details. */ 54 struct { 55 /* Received packet size, only valid for responders. */ 56 u32 packet_size; 57 /* 58 * Number of resource holding WQE, depends on type. 59 */ 60 u32 wq_num; 61 /* 62 * WQE index. Refers to either the send queue or 63 * receive queue, according to event_subtype. 64 */ 65 u16 wqe_index; 66 } wqe; 67 /* RDMA responder pagefault details */ 68 struct { 69 u32 r_key; 70 /* 71 * Received packet size, minimal size page fault 72 * resolution required for forward progress. 73 */ 74 u32 packet_size; 75 u32 rdma_op_len; 76 u64 rdma_va; 77 } rdma; 78 }; 79 80 struct mlx5_ib_pf_eq *eq; 81 struct work_struct work; 82 }; 83 84 #define MAX_PREFETCH_LEN (4*1024*1024U) 85 86 /* Timeout in ms to wait for an active mmu notifier to complete when handling 87 * a pagefault. */ 88 #define MMU_NOTIFIER_TIMEOUT 1000 89 90 #define MLX5_IMR_MTT_BITS (30 - PAGE_SHIFT) 91 #define MLX5_IMR_MTT_SHIFT (MLX5_IMR_MTT_BITS + PAGE_SHIFT) 92 #define MLX5_IMR_MTT_ENTRIES BIT_ULL(MLX5_IMR_MTT_BITS) 93 #define MLX5_IMR_MTT_SIZE BIT_ULL(MLX5_IMR_MTT_SHIFT) 94 #define MLX5_IMR_MTT_MASK (~(MLX5_IMR_MTT_SIZE - 1)) 95 96 #define MLX5_KSM_PAGE_SHIFT MLX5_IMR_MTT_SHIFT 97 98 static u64 mlx5_imr_ksm_entries; 99 100 static void populate_klm(struct mlx5_klm *pklm, size_t idx, size_t nentries, 101 struct mlx5_ib_mr *imr, int flags) 102 { 103 struct mlx5_klm *end = pklm + nentries; 104 105 if (flags & MLX5_IB_UPD_XLT_ZAP) { 106 for (; pklm != end; pklm++, idx++) { 107 pklm->bcount = cpu_to_be32(MLX5_IMR_MTT_SIZE); 108 pklm->key = cpu_to_be32(mr_to_mdev(imr)->null_mkey); 109 pklm->va = 0; 110 } 111 return; 112 } 113 114 /* 115 * The locking here is pretty subtle. Ideally the implicit_children 116 * xarray would be protected by the umem_mutex, however that is not 117 * possible. Instead this uses a weaker update-then-lock pattern: 118 * 119 * xa_store() 120 * mutex_lock(umem_mutex) 121 * mlx5r_umr_update_xlt() 122 * mutex_unlock(umem_mutex) 123 * destroy lkey 124 * 125 * ie any change the xarray must be followed by the locked update_xlt 126 * before destroying. 127 * 128 * The umem_mutex provides the acquire/release semantic needed to make 129 * the xa_store() visible to a racing thread. 130 */ 131 lockdep_assert_held(&to_ib_umem_odp(imr->umem)->umem_mutex); 132 133 for (; pklm != end; pklm++, idx++) { 134 struct mlx5_ib_mr *mtt = xa_load(&imr->implicit_children, idx); 135 136 pklm->bcount = cpu_to_be32(MLX5_IMR_MTT_SIZE); 137 if (mtt) { 138 pklm->key = cpu_to_be32(mtt->ibmr.lkey); 139 pklm->va = cpu_to_be64(idx * MLX5_IMR_MTT_SIZE); 140 } else { 141 pklm->key = cpu_to_be32(mr_to_mdev(imr)->null_mkey); 142 pklm->va = 0; 143 } 144 } 145 } 146 147 static u64 umem_dma_to_mtt(dma_addr_t umem_dma) 148 { 149 u64 mtt_entry = umem_dma & ODP_DMA_ADDR_MASK; 150 151 if (umem_dma & ODP_READ_ALLOWED_BIT) 152 mtt_entry |= MLX5_IB_MTT_READ; 153 if (umem_dma & ODP_WRITE_ALLOWED_BIT) 154 mtt_entry |= MLX5_IB_MTT_WRITE; 155 156 return mtt_entry; 157 } 158 159 static void populate_mtt(__be64 *pas, size_t idx, size_t nentries, 160 struct mlx5_ib_mr *mr, int flags) 161 { 162 struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem); 163 dma_addr_t pa; 164 size_t i; 165 166 if (flags & MLX5_IB_UPD_XLT_ZAP) 167 return; 168 169 for (i = 0; i < nentries; i++) { 170 pa = odp->dma_list[idx + i]; 171 pas[i] = cpu_to_be64(umem_dma_to_mtt(pa)); 172 } 173 } 174 175 void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries, 176 struct mlx5_ib_mr *mr, int flags) 177 { 178 if (flags & MLX5_IB_UPD_XLT_INDIRECT) { 179 populate_klm(xlt, idx, nentries, mr, flags); 180 } else { 181 populate_mtt(xlt, idx, nentries, mr, flags); 182 } 183 } 184 185 /* 186 * This must be called after the mr has been removed from implicit_children. 187 * NOTE: The MR does not necessarily have to be 188 * empty here, parallel page faults could have raced with the free process and 189 * added pages to it. 190 */ 191 static void free_implicit_child_mr_work(struct work_struct *work) 192 { 193 struct mlx5_ib_mr *mr = 194 container_of(work, struct mlx5_ib_mr, odp_destroy.work); 195 struct mlx5_ib_mr *imr = mr->parent; 196 struct ib_umem_odp *odp_imr = to_ib_umem_odp(imr->umem); 197 struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem); 198 199 mlx5r_deref_wait_odp_mkey(&mr->mmkey); 200 201 mutex_lock(&odp_imr->umem_mutex); 202 mlx5r_umr_update_xlt(mr->parent, 203 ib_umem_start(odp) >> MLX5_IMR_MTT_SHIFT, 1, 0, 204 MLX5_IB_UPD_XLT_INDIRECT | MLX5_IB_UPD_XLT_ATOMIC); 205 mutex_unlock(&odp_imr->umem_mutex); 206 mlx5_ib_dereg_mr(&mr->ibmr, NULL); 207 208 mlx5r_deref_odp_mkey(&imr->mmkey); 209 } 210 211 static void destroy_unused_implicit_child_mr(struct mlx5_ib_mr *mr) 212 { 213 struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem); 214 unsigned long idx = ib_umem_start(odp) >> MLX5_IMR_MTT_SHIFT; 215 struct mlx5_ib_mr *imr = mr->parent; 216 217 if (!refcount_inc_not_zero(&imr->mmkey.usecount)) 218 return; 219 220 xa_erase(&imr->implicit_children, idx); 221 222 /* Freeing a MR is a sleeping operation, so bounce to a work queue */ 223 INIT_WORK(&mr->odp_destroy.work, free_implicit_child_mr_work); 224 queue_work(system_unbound_wq, &mr->odp_destroy.work); 225 } 226 227 static bool mlx5_ib_invalidate_range(struct mmu_interval_notifier *mni, 228 const struct mmu_notifier_range *range, 229 unsigned long cur_seq) 230 { 231 struct ib_umem_odp *umem_odp = 232 container_of(mni, struct ib_umem_odp, notifier); 233 struct mlx5_ib_mr *mr; 234 const u64 umr_block_mask = (MLX5_UMR_MTT_ALIGNMENT / 235 sizeof(struct mlx5_mtt)) - 1; 236 u64 idx = 0, blk_start_idx = 0; 237 u64 invalidations = 0; 238 unsigned long start; 239 unsigned long end; 240 int in_block = 0; 241 u64 addr; 242 243 if (!mmu_notifier_range_blockable(range)) 244 return false; 245 246 mutex_lock(&umem_odp->umem_mutex); 247 mmu_interval_set_seq(mni, cur_seq); 248 /* 249 * If npages is zero then umem_odp->private may not be setup yet. This 250 * does not complete until after the first page is mapped for DMA. 251 */ 252 if (!umem_odp->npages) 253 goto out; 254 mr = umem_odp->private; 255 256 start = max_t(u64, ib_umem_start(umem_odp), range->start); 257 end = min_t(u64, ib_umem_end(umem_odp), range->end); 258 259 /* 260 * Iteration one - zap the HW's MTTs. The notifiers_count ensures that 261 * while we are doing the invalidation, no page fault will attempt to 262 * overwrite the same MTTs. Concurent invalidations might race us, 263 * but they will write 0s as well, so no difference in the end result. 264 */ 265 for (addr = start; addr < end; addr += BIT(umem_odp->page_shift)) { 266 idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift; 267 /* 268 * Strive to write the MTTs in chunks, but avoid overwriting 269 * non-existing MTTs. The huristic here can be improved to 270 * estimate the cost of another UMR vs. the cost of bigger 271 * UMR. 272 */ 273 if (umem_odp->dma_list[idx] & 274 (ODP_READ_ALLOWED_BIT | ODP_WRITE_ALLOWED_BIT)) { 275 if (!in_block) { 276 blk_start_idx = idx; 277 in_block = 1; 278 } 279 280 /* Count page invalidations */ 281 invalidations += idx - blk_start_idx + 1; 282 } else { 283 u64 umr_offset = idx & umr_block_mask; 284 285 if (in_block && umr_offset == 0) { 286 mlx5r_umr_update_xlt(mr, blk_start_idx, 287 idx - blk_start_idx, 0, 288 MLX5_IB_UPD_XLT_ZAP | 289 MLX5_IB_UPD_XLT_ATOMIC); 290 in_block = 0; 291 } 292 } 293 } 294 if (in_block) 295 mlx5r_umr_update_xlt(mr, blk_start_idx, 296 idx - blk_start_idx + 1, 0, 297 MLX5_IB_UPD_XLT_ZAP | 298 MLX5_IB_UPD_XLT_ATOMIC); 299 300 mlx5_update_odp_stats(mr, invalidations, invalidations); 301 302 /* 303 * We are now sure that the device will not access the 304 * memory. We can safely unmap it, and mark it as dirty if 305 * needed. 306 */ 307 308 ib_umem_odp_unmap_dma_pages(umem_odp, start, end); 309 310 if (unlikely(!umem_odp->npages && mr->parent)) 311 destroy_unused_implicit_child_mr(mr); 312 out: 313 mutex_unlock(&umem_odp->umem_mutex); 314 return true; 315 } 316 317 const struct mmu_interval_notifier_ops mlx5_mn_ops = { 318 .invalidate = mlx5_ib_invalidate_range, 319 }; 320 321 static void internal_fill_odp_caps(struct mlx5_ib_dev *dev) 322 { 323 struct ib_odp_caps *caps = &dev->odp_caps; 324 325 memset(caps, 0, sizeof(*caps)); 326 327 if (!MLX5_CAP_GEN(dev->mdev, pg) || !mlx5r_umr_can_load_pas(dev, 0)) 328 return; 329 330 caps->general_caps = IB_ODP_SUPPORT; 331 332 if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) 333 dev->odp_max_size = U64_MAX; 334 else 335 dev->odp_max_size = BIT_ULL(MLX5_MAX_UMR_SHIFT + PAGE_SHIFT); 336 337 if (MLX5_CAP_ODP(dev->mdev, ud_odp_caps.send)) 338 caps->per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_SEND; 339 340 if (MLX5_CAP_ODP(dev->mdev, ud_odp_caps.srq_receive)) 341 caps->per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV; 342 343 if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.send)) 344 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SEND; 345 346 if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.receive)) 347 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_RECV; 348 349 if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.write)) 350 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_WRITE; 351 352 if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.read)) 353 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_READ; 354 355 if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.atomic)) 356 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_ATOMIC; 357 358 if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.srq_receive)) 359 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV; 360 361 if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.send)) 362 caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_SEND; 363 364 if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.receive)) 365 caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_RECV; 366 367 if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.write)) 368 caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_WRITE; 369 370 if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.read)) 371 caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_READ; 372 373 if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.atomic)) 374 caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_ATOMIC; 375 376 if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.srq_receive)) 377 caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV; 378 379 if (MLX5_CAP_GEN(dev->mdev, fixed_buffer_size) && 380 MLX5_CAP_GEN(dev->mdev, null_mkey) && 381 MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset) && 382 !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled)) 383 caps->general_caps |= IB_ODP_SUPPORT_IMPLICIT; 384 } 385 386 static void mlx5_ib_page_fault_resume(struct mlx5_ib_dev *dev, 387 struct mlx5_pagefault *pfault, 388 int error) 389 { 390 int wq_num = pfault->event_subtype == MLX5_PFAULT_SUBTYPE_WQE ? 391 pfault->wqe.wq_num : pfault->token; 392 u32 in[MLX5_ST_SZ_DW(page_fault_resume_in)] = {}; 393 int err; 394 395 MLX5_SET(page_fault_resume_in, in, opcode, MLX5_CMD_OP_PAGE_FAULT_RESUME); 396 MLX5_SET(page_fault_resume_in, in, page_fault_type, pfault->type); 397 MLX5_SET(page_fault_resume_in, in, token, pfault->token); 398 MLX5_SET(page_fault_resume_in, in, wq_number, wq_num); 399 MLX5_SET(page_fault_resume_in, in, error, !!error); 400 401 err = mlx5_cmd_exec_in(dev->mdev, page_fault_resume, in); 402 if (err) 403 mlx5_ib_err(dev, "Failed to resolve the page fault on WQ 0x%x err %d\n", 404 wq_num, err); 405 } 406 407 static struct mlx5_ib_mr *implicit_get_child_mr(struct mlx5_ib_mr *imr, 408 unsigned long idx) 409 { 410 struct mlx5_ib_dev *dev = mr_to_mdev(imr); 411 struct ib_umem_odp *odp; 412 struct mlx5_ib_mr *mr; 413 struct mlx5_ib_mr *ret; 414 int err; 415 416 odp = ib_umem_odp_alloc_child(to_ib_umem_odp(imr->umem), 417 idx * MLX5_IMR_MTT_SIZE, 418 MLX5_IMR_MTT_SIZE, &mlx5_mn_ops); 419 if (IS_ERR(odp)) 420 return ERR_CAST(odp); 421 422 mr = mlx5_mr_cache_alloc(dev, &dev->cache.ent[MLX5_IMR_MTT_CACHE_ENTRY], 423 imr->access_flags); 424 if (IS_ERR(mr)) { 425 ib_umem_odp_release(odp); 426 return mr; 427 } 428 429 mr->access_flags = imr->access_flags; 430 mr->ibmr.pd = imr->ibmr.pd; 431 mr->ibmr.device = &mr_to_mdev(imr)->ib_dev; 432 mr->umem = &odp->umem; 433 mr->ibmr.lkey = mr->mmkey.key; 434 mr->ibmr.rkey = mr->mmkey.key; 435 mr->ibmr.iova = idx * MLX5_IMR_MTT_SIZE; 436 mr->parent = imr; 437 odp->private = mr; 438 439 /* 440 * First refcount is owned by the xarray and second refconut 441 * is returned to the caller. 442 */ 443 refcount_set(&mr->mmkey.usecount, 2); 444 445 err = mlx5r_umr_update_xlt(mr, 0, 446 MLX5_IMR_MTT_ENTRIES, 447 PAGE_SHIFT, 448 MLX5_IB_UPD_XLT_ZAP | 449 MLX5_IB_UPD_XLT_ENABLE); 450 if (err) { 451 ret = ERR_PTR(err); 452 goto out_mr; 453 } 454 455 xa_lock(&imr->implicit_children); 456 ret = __xa_cmpxchg(&imr->implicit_children, idx, NULL, mr, 457 GFP_KERNEL); 458 if (unlikely(ret)) { 459 if (xa_is_err(ret)) { 460 ret = ERR_PTR(xa_err(ret)); 461 goto out_lock; 462 } 463 /* 464 * Another thread beat us to creating the child mr, use 465 * theirs. 466 */ 467 refcount_inc(&ret->mmkey.usecount); 468 goto out_lock; 469 } 470 xa_unlock(&imr->implicit_children); 471 472 mlx5_ib_dbg(mr_to_mdev(imr), "key %x mr %p\n", mr->mmkey.key, mr); 473 return mr; 474 475 out_lock: 476 xa_unlock(&imr->implicit_children); 477 out_mr: 478 mlx5_ib_dereg_mr(&mr->ibmr, NULL); 479 return ret; 480 } 481 482 struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd, 483 int access_flags) 484 { 485 struct mlx5_ib_dev *dev = to_mdev(pd->ibpd.device); 486 struct ib_umem_odp *umem_odp; 487 struct mlx5_ib_mr *imr; 488 int err; 489 490 if (!mlx5r_umr_can_load_pas(dev, MLX5_IMR_MTT_ENTRIES * PAGE_SIZE)) 491 return ERR_PTR(-EOPNOTSUPP); 492 493 umem_odp = ib_umem_odp_alloc_implicit(&dev->ib_dev, access_flags); 494 if (IS_ERR(umem_odp)) 495 return ERR_CAST(umem_odp); 496 497 imr = mlx5_mr_cache_alloc(dev, 498 &dev->cache.ent[MLX5_IMR_KSM_CACHE_ENTRY], 499 access_flags); 500 if (IS_ERR(imr)) { 501 ib_umem_odp_release(umem_odp); 502 return imr; 503 } 504 505 imr->access_flags = access_flags; 506 imr->ibmr.pd = &pd->ibpd; 507 imr->ibmr.iova = 0; 508 imr->umem = &umem_odp->umem; 509 imr->ibmr.lkey = imr->mmkey.key; 510 imr->ibmr.rkey = imr->mmkey.key; 511 imr->ibmr.device = &dev->ib_dev; 512 imr->is_odp_implicit = true; 513 xa_init(&imr->implicit_children); 514 515 err = mlx5r_umr_update_xlt(imr, 0, 516 mlx5_imr_ksm_entries, 517 MLX5_KSM_PAGE_SHIFT, 518 MLX5_IB_UPD_XLT_INDIRECT | 519 MLX5_IB_UPD_XLT_ZAP | 520 MLX5_IB_UPD_XLT_ENABLE); 521 if (err) 522 goto out_mr; 523 524 err = mlx5r_store_odp_mkey(dev, &imr->mmkey); 525 if (err) 526 goto out_mr; 527 528 mlx5_ib_dbg(dev, "key %x mr %p\n", imr->mmkey.key, imr); 529 return imr; 530 out_mr: 531 mlx5_ib_err(dev, "Failed to register MKEY %d\n", err); 532 mlx5_ib_dereg_mr(&imr->ibmr, NULL); 533 return ERR_PTR(err); 534 } 535 536 void mlx5_ib_free_odp_mr(struct mlx5_ib_mr *mr) 537 { 538 struct mlx5_ib_mr *mtt; 539 unsigned long idx; 540 541 /* 542 * If this is an implicit MR it is already invalidated so we can just 543 * delete the children mkeys. 544 */ 545 xa_for_each(&mr->implicit_children, idx, mtt) { 546 xa_erase(&mr->implicit_children, idx); 547 mlx5_ib_dereg_mr(&mtt->ibmr, NULL); 548 } 549 } 550 551 #define MLX5_PF_FLAGS_DOWNGRADE BIT(1) 552 #define MLX5_PF_FLAGS_SNAPSHOT BIT(2) 553 #define MLX5_PF_FLAGS_ENABLE BIT(3) 554 static int pagefault_real_mr(struct mlx5_ib_mr *mr, struct ib_umem_odp *odp, 555 u64 user_va, size_t bcnt, u32 *bytes_mapped, 556 u32 flags) 557 { 558 int page_shift, ret, np; 559 bool downgrade = flags & MLX5_PF_FLAGS_DOWNGRADE; 560 u64 access_mask; 561 u64 start_idx; 562 bool fault = !(flags & MLX5_PF_FLAGS_SNAPSHOT); 563 u32 xlt_flags = MLX5_IB_UPD_XLT_ATOMIC; 564 565 if (flags & MLX5_PF_FLAGS_ENABLE) 566 xlt_flags |= MLX5_IB_UPD_XLT_ENABLE; 567 568 page_shift = odp->page_shift; 569 start_idx = (user_va - ib_umem_start(odp)) >> page_shift; 570 access_mask = ODP_READ_ALLOWED_BIT; 571 572 if (odp->umem.writable && !downgrade) 573 access_mask |= ODP_WRITE_ALLOWED_BIT; 574 575 np = ib_umem_odp_map_dma_and_lock(odp, user_va, bcnt, access_mask, fault); 576 if (np < 0) 577 return np; 578 579 /* 580 * No need to check whether the MTTs really belong to this MR, since 581 * ib_umem_odp_map_dma_and_lock already checks this. 582 */ 583 ret = mlx5r_umr_update_xlt(mr, start_idx, np, page_shift, xlt_flags); 584 mutex_unlock(&odp->umem_mutex); 585 586 if (ret < 0) { 587 if (ret != -EAGAIN) 588 mlx5_ib_err(mr_to_mdev(mr), 589 "Failed to update mkey page tables\n"); 590 goto out; 591 } 592 593 if (bytes_mapped) { 594 u32 new_mappings = (np << page_shift) - 595 (user_va - round_down(user_va, 1 << page_shift)); 596 597 *bytes_mapped += min_t(u32, new_mappings, bcnt); 598 } 599 600 return np << (page_shift - PAGE_SHIFT); 601 602 out: 603 return ret; 604 } 605 606 static int pagefault_implicit_mr(struct mlx5_ib_mr *imr, 607 struct ib_umem_odp *odp_imr, u64 user_va, 608 size_t bcnt, u32 *bytes_mapped, u32 flags) 609 { 610 unsigned long end_idx = (user_va + bcnt - 1) >> MLX5_IMR_MTT_SHIFT; 611 unsigned long upd_start_idx = end_idx + 1; 612 unsigned long upd_len = 0; 613 unsigned long npages = 0; 614 int err; 615 int ret; 616 617 if (unlikely(user_va >= mlx5_imr_ksm_entries * MLX5_IMR_MTT_SIZE || 618 mlx5_imr_ksm_entries * MLX5_IMR_MTT_SIZE - user_va < bcnt)) 619 return -EFAULT; 620 621 /* Fault each child mr that intersects with our interval. */ 622 while (bcnt) { 623 unsigned long idx = user_va >> MLX5_IMR_MTT_SHIFT; 624 struct ib_umem_odp *umem_odp; 625 struct mlx5_ib_mr *mtt; 626 u64 len; 627 628 xa_lock(&imr->implicit_children); 629 mtt = xa_load(&imr->implicit_children, idx); 630 if (unlikely(!mtt)) { 631 xa_unlock(&imr->implicit_children); 632 mtt = implicit_get_child_mr(imr, idx); 633 if (IS_ERR(mtt)) { 634 ret = PTR_ERR(mtt); 635 goto out; 636 } 637 upd_start_idx = min(upd_start_idx, idx); 638 upd_len = idx - upd_start_idx + 1; 639 } else { 640 refcount_inc(&mtt->mmkey.usecount); 641 xa_unlock(&imr->implicit_children); 642 } 643 644 umem_odp = to_ib_umem_odp(mtt->umem); 645 len = min_t(u64, user_va + bcnt, ib_umem_end(umem_odp)) - 646 user_va; 647 648 ret = pagefault_real_mr(mtt, umem_odp, user_va, len, 649 bytes_mapped, flags); 650 651 mlx5r_deref_odp_mkey(&mtt->mmkey); 652 653 if (ret < 0) 654 goto out; 655 user_va += len; 656 bcnt -= len; 657 npages += ret; 658 } 659 660 ret = npages; 661 662 /* 663 * Any time the implicit_children are changed we must perform an 664 * update of the xlt before exiting to ensure the HW and the 665 * implicit_children remains synchronized. 666 */ 667 out: 668 if (likely(!upd_len)) 669 return ret; 670 671 /* 672 * Notice this is not strictly ordered right, the KSM is updated after 673 * the implicit_children is updated, so a parallel page fault could 674 * see a MR that is not yet visible in the KSM. This is similar to a 675 * parallel page fault seeing a MR that is being concurrently removed 676 * from the KSM. Both of these improbable situations are resolved 677 * safely by resuming the HW and then taking another page fault. The 678 * next pagefault handler will see the new information. 679 */ 680 mutex_lock(&odp_imr->umem_mutex); 681 err = mlx5r_umr_update_xlt(imr, upd_start_idx, upd_len, 0, 682 MLX5_IB_UPD_XLT_INDIRECT | 683 MLX5_IB_UPD_XLT_ATOMIC); 684 mutex_unlock(&odp_imr->umem_mutex); 685 if (err) { 686 mlx5_ib_err(mr_to_mdev(imr), "Failed to update PAS\n"); 687 return err; 688 } 689 return ret; 690 } 691 692 static int pagefault_dmabuf_mr(struct mlx5_ib_mr *mr, size_t bcnt, 693 u32 *bytes_mapped, u32 flags) 694 { 695 struct ib_umem_dmabuf *umem_dmabuf = to_ib_umem_dmabuf(mr->umem); 696 u32 xlt_flags = 0; 697 int err; 698 unsigned int page_size; 699 700 if (flags & MLX5_PF_FLAGS_ENABLE) 701 xlt_flags |= MLX5_IB_UPD_XLT_ENABLE; 702 703 dma_resv_lock(umem_dmabuf->attach->dmabuf->resv, NULL); 704 err = ib_umem_dmabuf_map_pages(umem_dmabuf); 705 if (err) { 706 dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv); 707 return err; 708 } 709 710 page_size = mlx5_umem_find_best_pgsz(&umem_dmabuf->umem, mkc, 711 log_page_size, 0, 712 umem_dmabuf->umem.iova); 713 if (unlikely(page_size < PAGE_SIZE)) { 714 ib_umem_dmabuf_unmap_pages(umem_dmabuf); 715 err = -EINVAL; 716 } else { 717 err = mlx5r_umr_update_mr_pas(mr, xlt_flags); 718 } 719 dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv); 720 721 if (err) 722 return err; 723 724 if (bytes_mapped) 725 *bytes_mapped += bcnt; 726 727 return ib_umem_num_pages(mr->umem); 728 } 729 730 /* 731 * Returns: 732 * -EFAULT: The io_virt->bcnt is not within the MR, it covers pages that are 733 * not accessible, or the MR is no longer valid. 734 * -EAGAIN/-ENOMEM: The operation should be retried 735 * 736 * -EINVAL/others: General internal malfunction 737 * >0: Number of pages mapped 738 */ 739 static int pagefault_mr(struct mlx5_ib_mr *mr, u64 io_virt, size_t bcnt, 740 u32 *bytes_mapped, u32 flags) 741 { 742 struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem); 743 744 if (unlikely(io_virt < mr->ibmr.iova)) 745 return -EFAULT; 746 747 if (mr->umem->is_dmabuf) 748 return pagefault_dmabuf_mr(mr, bcnt, bytes_mapped, flags); 749 750 if (!odp->is_implicit_odp) { 751 u64 user_va; 752 753 if (check_add_overflow(io_virt - mr->ibmr.iova, 754 (u64)odp->umem.address, &user_va)) 755 return -EFAULT; 756 if (unlikely(user_va >= ib_umem_end(odp) || 757 ib_umem_end(odp) - user_va < bcnt)) 758 return -EFAULT; 759 return pagefault_real_mr(mr, odp, user_va, bcnt, bytes_mapped, 760 flags); 761 } 762 return pagefault_implicit_mr(mr, odp, io_virt, bcnt, bytes_mapped, 763 flags); 764 } 765 766 int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr) 767 { 768 int ret; 769 770 ret = pagefault_real_mr(mr, to_ib_umem_odp(mr->umem), mr->umem->address, 771 mr->umem->length, NULL, 772 MLX5_PF_FLAGS_SNAPSHOT | MLX5_PF_FLAGS_ENABLE); 773 return ret >= 0 ? 0 : ret; 774 } 775 776 int mlx5_ib_init_dmabuf_mr(struct mlx5_ib_mr *mr) 777 { 778 int ret; 779 780 ret = pagefault_dmabuf_mr(mr, mr->umem->length, NULL, 781 MLX5_PF_FLAGS_ENABLE); 782 783 return ret >= 0 ? 0 : ret; 784 } 785 786 struct pf_frame { 787 struct pf_frame *next; 788 u32 key; 789 u64 io_virt; 790 size_t bcnt; 791 int depth; 792 }; 793 794 static bool mkey_is_eq(struct mlx5_ib_mkey *mmkey, u32 key) 795 { 796 if (!mmkey) 797 return false; 798 if (mmkey->type == MLX5_MKEY_MW || 799 mmkey->type == MLX5_MKEY_INDIRECT_DEVX) 800 return mlx5_base_mkey(mmkey->key) == mlx5_base_mkey(key); 801 return mmkey->key == key; 802 } 803 804 /* 805 * Handle a single data segment in a page-fault WQE or RDMA region. 806 * 807 * Returns number of OS pages retrieved on success. The caller may continue to 808 * the next data segment. 809 * Can return the following error codes: 810 * -EAGAIN to designate a temporary error. The caller will abort handling the 811 * page fault and resolve it. 812 * -EFAULT when there's an error mapping the requested pages. The caller will 813 * abort the page fault handling. 814 */ 815 static int pagefault_single_data_segment(struct mlx5_ib_dev *dev, 816 struct ib_pd *pd, u32 key, 817 u64 io_virt, size_t bcnt, 818 u32 *bytes_committed, 819 u32 *bytes_mapped) 820 { 821 int npages = 0, ret, i, outlen, cur_outlen = 0, depth = 0; 822 struct pf_frame *head = NULL, *frame; 823 struct mlx5_ib_mkey *mmkey; 824 struct mlx5_ib_mr *mr; 825 struct mlx5_klm *pklm; 826 u32 *out = NULL; 827 size_t offset; 828 829 io_virt += *bytes_committed; 830 bcnt -= *bytes_committed; 831 832 next_mr: 833 xa_lock(&dev->odp_mkeys); 834 mmkey = xa_load(&dev->odp_mkeys, mlx5_base_mkey(key)); 835 if (!mmkey) { 836 xa_unlock(&dev->odp_mkeys); 837 mlx5_ib_dbg( 838 dev, 839 "skipping non ODP MR (lkey=0x%06x) in page fault handler.\n", 840 key); 841 if (bytes_mapped) 842 *bytes_mapped += bcnt; 843 /* 844 * The user could specify a SGL with multiple lkeys and only 845 * some of them are ODP. Treat the non-ODP ones as fully 846 * faulted. 847 */ 848 ret = 0; 849 goto end; 850 } 851 refcount_inc(&mmkey->usecount); 852 xa_unlock(&dev->odp_mkeys); 853 854 if (!mkey_is_eq(mmkey, key)) { 855 mlx5_ib_dbg(dev, "failed to find mkey %x\n", key); 856 ret = -EFAULT; 857 goto end; 858 } 859 860 switch (mmkey->type) { 861 case MLX5_MKEY_MR: 862 mr = container_of(mmkey, struct mlx5_ib_mr, mmkey); 863 864 ret = pagefault_mr(mr, io_virt, bcnt, bytes_mapped, 0); 865 if (ret < 0) 866 goto end; 867 868 mlx5_update_odp_stats(mr, faults, ret); 869 870 npages += ret; 871 ret = 0; 872 break; 873 874 case MLX5_MKEY_MW: 875 case MLX5_MKEY_INDIRECT_DEVX: 876 if (depth >= MLX5_CAP_GEN(dev->mdev, max_indirection)) { 877 mlx5_ib_dbg(dev, "indirection level exceeded\n"); 878 ret = -EFAULT; 879 goto end; 880 } 881 882 outlen = MLX5_ST_SZ_BYTES(query_mkey_out) + 883 sizeof(*pklm) * (mmkey->ndescs - 2); 884 885 if (outlen > cur_outlen) { 886 kfree(out); 887 out = kzalloc(outlen, GFP_KERNEL); 888 if (!out) { 889 ret = -ENOMEM; 890 goto end; 891 } 892 cur_outlen = outlen; 893 } 894 895 pklm = (struct mlx5_klm *)MLX5_ADDR_OF(query_mkey_out, out, 896 bsf0_klm0_pas_mtt0_1); 897 898 ret = mlx5_core_query_mkey(dev->mdev, mmkey->key, out, outlen); 899 if (ret) 900 goto end; 901 902 offset = io_virt - MLX5_GET64(query_mkey_out, out, 903 memory_key_mkey_entry.start_addr); 904 905 for (i = 0; bcnt && i < mmkey->ndescs; i++, pklm++) { 906 if (offset >= be32_to_cpu(pklm->bcount)) { 907 offset -= be32_to_cpu(pklm->bcount); 908 continue; 909 } 910 911 frame = kzalloc(sizeof(*frame), GFP_KERNEL); 912 if (!frame) { 913 ret = -ENOMEM; 914 goto end; 915 } 916 917 frame->key = be32_to_cpu(pklm->key); 918 frame->io_virt = be64_to_cpu(pklm->va) + offset; 919 frame->bcnt = min_t(size_t, bcnt, 920 be32_to_cpu(pklm->bcount) - offset); 921 frame->depth = depth + 1; 922 frame->next = head; 923 head = frame; 924 925 bcnt -= frame->bcnt; 926 offset = 0; 927 } 928 break; 929 930 default: 931 mlx5_ib_dbg(dev, "wrong mkey type %d\n", mmkey->type); 932 ret = -EFAULT; 933 goto end; 934 } 935 936 if (head) { 937 frame = head; 938 head = frame->next; 939 940 key = frame->key; 941 io_virt = frame->io_virt; 942 bcnt = frame->bcnt; 943 depth = frame->depth; 944 kfree(frame); 945 946 mlx5r_deref_odp_mkey(mmkey); 947 goto next_mr; 948 } 949 950 end: 951 if (mmkey) 952 mlx5r_deref_odp_mkey(mmkey); 953 while (head) { 954 frame = head; 955 head = frame->next; 956 kfree(frame); 957 } 958 kfree(out); 959 960 *bytes_committed = 0; 961 return ret ? ret : npages; 962 } 963 964 /* 965 * Parse a series of data segments for page fault handling. 966 * 967 * @dev: Pointer to mlx5 IB device 968 * @pfault: contains page fault information. 969 * @wqe: points at the first data segment in the WQE. 970 * @wqe_end: points after the end of the WQE. 971 * @bytes_mapped: receives the number of bytes that the function was able to 972 * map. This allows the caller to decide intelligently whether 973 * enough memory was mapped to resolve the page fault 974 * successfully (e.g. enough for the next MTU, or the entire 975 * WQE). 976 * @total_wqe_bytes: receives the total data size of this WQE in bytes (minus 977 * the committed bytes). 978 * @receive_queue: receive WQE end of sg list 979 * 980 * Returns the number of pages loaded if positive, zero for an empty WQE, or a 981 * negative error code. 982 */ 983 static int pagefault_data_segments(struct mlx5_ib_dev *dev, 984 struct mlx5_pagefault *pfault, 985 void *wqe, 986 void *wqe_end, u32 *bytes_mapped, 987 u32 *total_wqe_bytes, bool receive_queue) 988 { 989 int ret = 0, npages = 0; 990 u64 io_virt; 991 u32 key; 992 u32 byte_count; 993 size_t bcnt; 994 int inline_segment; 995 996 if (bytes_mapped) 997 *bytes_mapped = 0; 998 if (total_wqe_bytes) 999 *total_wqe_bytes = 0; 1000 1001 while (wqe < wqe_end) { 1002 struct mlx5_wqe_data_seg *dseg = wqe; 1003 1004 io_virt = be64_to_cpu(dseg->addr); 1005 key = be32_to_cpu(dseg->lkey); 1006 byte_count = be32_to_cpu(dseg->byte_count); 1007 inline_segment = !!(byte_count & MLX5_INLINE_SEG); 1008 bcnt = byte_count & ~MLX5_INLINE_SEG; 1009 1010 if (inline_segment) { 1011 bcnt = bcnt & MLX5_WQE_INLINE_SEG_BYTE_COUNT_MASK; 1012 wqe += ALIGN(sizeof(struct mlx5_wqe_inline_seg) + bcnt, 1013 16); 1014 } else { 1015 wqe += sizeof(*dseg); 1016 } 1017 1018 /* receive WQE end of sg list. */ 1019 if (receive_queue && bcnt == 0 && key == MLX5_INVALID_LKEY && 1020 io_virt == 0) 1021 break; 1022 1023 if (!inline_segment && total_wqe_bytes) { 1024 *total_wqe_bytes += bcnt - min_t(size_t, bcnt, 1025 pfault->bytes_committed); 1026 } 1027 1028 /* A zero length data segment designates a length of 2GB. */ 1029 if (bcnt == 0) 1030 bcnt = 1U << 31; 1031 1032 if (inline_segment || bcnt <= pfault->bytes_committed) { 1033 pfault->bytes_committed -= 1034 min_t(size_t, bcnt, 1035 pfault->bytes_committed); 1036 continue; 1037 } 1038 1039 ret = pagefault_single_data_segment(dev, NULL, key, 1040 io_virt, bcnt, 1041 &pfault->bytes_committed, 1042 bytes_mapped); 1043 if (ret < 0) 1044 break; 1045 npages += ret; 1046 } 1047 1048 return ret < 0 ? ret : npages; 1049 } 1050 1051 /* 1052 * Parse initiator WQE. Advances the wqe pointer to point at the 1053 * scatter-gather list, and set wqe_end to the end of the WQE. 1054 */ 1055 static int mlx5_ib_mr_initiator_pfault_handler( 1056 struct mlx5_ib_dev *dev, struct mlx5_pagefault *pfault, 1057 struct mlx5_ib_qp *qp, void **wqe, void **wqe_end, int wqe_length) 1058 { 1059 struct mlx5_wqe_ctrl_seg *ctrl = *wqe; 1060 u16 wqe_index = pfault->wqe.wqe_index; 1061 struct mlx5_base_av *av; 1062 unsigned ds, opcode; 1063 u32 qpn = qp->trans_qp.base.mqp.qpn; 1064 1065 ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK; 1066 if (ds * MLX5_WQE_DS_UNITS > wqe_length) { 1067 mlx5_ib_err(dev, "Unable to read the complete WQE. ds = 0x%x, ret = 0x%x\n", 1068 ds, wqe_length); 1069 return -EFAULT; 1070 } 1071 1072 if (ds == 0) { 1073 mlx5_ib_err(dev, "Got WQE with zero DS. wqe_index=%x, qpn=%x\n", 1074 wqe_index, qpn); 1075 return -EFAULT; 1076 } 1077 1078 *wqe_end = *wqe + ds * MLX5_WQE_DS_UNITS; 1079 *wqe += sizeof(*ctrl); 1080 1081 opcode = be32_to_cpu(ctrl->opmod_idx_opcode) & 1082 MLX5_WQE_CTRL_OPCODE_MASK; 1083 1084 if (qp->type == IB_QPT_XRC_INI) 1085 *wqe += sizeof(struct mlx5_wqe_xrc_seg); 1086 1087 if (qp->type == IB_QPT_UD || qp->type == MLX5_IB_QPT_DCI) { 1088 av = *wqe; 1089 if (av->dqp_dct & cpu_to_be32(MLX5_EXTENDED_UD_AV)) 1090 *wqe += sizeof(struct mlx5_av); 1091 else 1092 *wqe += sizeof(struct mlx5_base_av); 1093 } 1094 1095 switch (opcode) { 1096 case MLX5_OPCODE_RDMA_WRITE: 1097 case MLX5_OPCODE_RDMA_WRITE_IMM: 1098 case MLX5_OPCODE_RDMA_READ: 1099 *wqe += sizeof(struct mlx5_wqe_raddr_seg); 1100 break; 1101 case MLX5_OPCODE_ATOMIC_CS: 1102 case MLX5_OPCODE_ATOMIC_FA: 1103 *wqe += sizeof(struct mlx5_wqe_raddr_seg); 1104 *wqe += sizeof(struct mlx5_wqe_atomic_seg); 1105 break; 1106 } 1107 1108 return 0; 1109 } 1110 1111 /* 1112 * Parse responder WQE and set wqe_end to the end of the WQE. 1113 */ 1114 static int mlx5_ib_mr_responder_pfault_handler_srq(struct mlx5_ib_dev *dev, 1115 struct mlx5_ib_srq *srq, 1116 void **wqe, void **wqe_end, 1117 int wqe_length) 1118 { 1119 int wqe_size = 1 << srq->msrq.wqe_shift; 1120 1121 if (wqe_size > wqe_length) { 1122 mlx5_ib_err(dev, "Couldn't read all of the receive WQE's content\n"); 1123 return -EFAULT; 1124 } 1125 1126 *wqe_end = *wqe + wqe_size; 1127 *wqe += sizeof(struct mlx5_wqe_srq_next_seg); 1128 1129 return 0; 1130 } 1131 1132 static int mlx5_ib_mr_responder_pfault_handler_rq(struct mlx5_ib_dev *dev, 1133 struct mlx5_ib_qp *qp, 1134 void *wqe, void **wqe_end, 1135 int wqe_length) 1136 { 1137 struct mlx5_ib_wq *wq = &qp->rq; 1138 int wqe_size = 1 << wq->wqe_shift; 1139 1140 if (qp->flags_en & MLX5_QP_FLAG_SIGNATURE) { 1141 mlx5_ib_err(dev, "ODP fault with WQE signatures is not supported\n"); 1142 return -EFAULT; 1143 } 1144 1145 if (wqe_size > wqe_length) { 1146 mlx5_ib_err(dev, "Couldn't read all of the receive WQE's content\n"); 1147 return -EFAULT; 1148 } 1149 1150 *wqe_end = wqe + wqe_size; 1151 1152 return 0; 1153 } 1154 1155 static inline struct mlx5_core_rsc_common *odp_get_rsc(struct mlx5_ib_dev *dev, 1156 u32 wq_num, int pf_type) 1157 { 1158 struct mlx5_core_rsc_common *common = NULL; 1159 struct mlx5_core_srq *srq; 1160 1161 switch (pf_type) { 1162 case MLX5_WQE_PF_TYPE_RMP: 1163 srq = mlx5_cmd_get_srq(dev, wq_num); 1164 if (srq) 1165 common = &srq->common; 1166 break; 1167 case MLX5_WQE_PF_TYPE_REQ_SEND_OR_WRITE: 1168 case MLX5_WQE_PF_TYPE_RESP: 1169 case MLX5_WQE_PF_TYPE_REQ_READ_OR_ATOMIC: 1170 common = mlx5_core_res_hold(dev, wq_num, MLX5_RES_QP); 1171 break; 1172 default: 1173 break; 1174 } 1175 1176 return common; 1177 } 1178 1179 static inline struct mlx5_ib_qp *res_to_qp(struct mlx5_core_rsc_common *res) 1180 { 1181 struct mlx5_core_qp *mqp = (struct mlx5_core_qp *)res; 1182 1183 return to_mibqp(mqp); 1184 } 1185 1186 static inline struct mlx5_ib_srq *res_to_srq(struct mlx5_core_rsc_common *res) 1187 { 1188 struct mlx5_core_srq *msrq = 1189 container_of(res, struct mlx5_core_srq, common); 1190 1191 return to_mibsrq(msrq); 1192 } 1193 1194 static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_dev *dev, 1195 struct mlx5_pagefault *pfault) 1196 { 1197 bool sq = pfault->type & MLX5_PFAULT_REQUESTOR; 1198 u16 wqe_index = pfault->wqe.wqe_index; 1199 void *wqe, *wqe_start = NULL, *wqe_end = NULL; 1200 u32 bytes_mapped, total_wqe_bytes; 1201 struct mlx5_core_rsc_common *res; 1202 int resume_with_error = 1; 1203 struct mlx5_ib_qp *qp; 1204 size_t bytes_copied; 1205 int ret = 0; 1206 1207 res = odp_get_rsc(dev, pfault->wqe.wq_num, pfault->type); 1208 if (!res) { 1209 mlx5_ib_dbg(dev, "wqe page fault for missing resource %d\n", pfault->wqe.wq_num); 1210 return; 1211 } 1212 1213 if (res->res != MLX5_RES_QP && res->res != MLX5_RES_SRQ && 1214 res->res != MLX5_RES_XSRQ) { 1215 mlx5_ib_err(dev, "wqe page fault for unsupported type %d\n", 1216 pfault->type); 1217 goto resolve_page_fault; 1218 } 1219 1220 wqe_start = (void *)__get_free_page(GFP_KERNEL); 1221 if (!wqe_start) { 1222 mlx5_ib_err(dev, "Error allocating memory for IO page fault handling.\n"); 1223 goto resolve_page_fault; 1224 } 1225 1226 wqe = wqe_start; 1227 qp = (res->res == MLX5_RES_QP) ? res_to_qp(res) : NULL; 1228 if (qp && sq) { 1229 ret = mlx5_ib_read_wqe_sq(qp, wqe_index, wqe, PAGE_SIZE, 1230 &bytes_copied); 1231 if (ret) 1232 goto read_user; 1233 ret = mlx5_ib_mr_initiator_pfault_handler( 1234 dev, pfault, qp, &wqe, &wqe_end, bytes_copied); 1235 } else if (qp && !sq) { 1236 ret = mlx5_ib_read_wqe_rq(qp, wqe_index, wqe, PAGE_SIZE, 1237 &bytes_copied); 1238 if (ret) 1239 goto read_user; 1240 ret = mlx5_ib_mr_responder_pfault_handler_rq( 1241 dev, qp, wqe, &wqe_end, bytes_copied); 1242 } else if (!qp) { 1243 struct mlx5_ib_srq *srq = res_to_srq(res); 1244 1245 ret = mlx5_ib_read_wqe_srq(srq, wqe_index, wqe, PAGE_SIZE, 1246 &bytes_copied); 1247 if (ret) 1248 goto read_user; 1249 ret = mlx5_ib_mr_responder_pfault_handler_srq( 1250 dev, srq, &wqe, &wqe_end, bytes_copied); 1251 } 1252 1253 if (ret < 0 || wqe >= wqe_end) 1254 goto resolve_page_fault; 1255 1256 ret = pagefault_data_segments(dev, pfault, wqe, wqe_end, &bytes_mapped, 1257 &total_wqe_bytes, !sq); 1258 if (ret == -EAGAIN) 1259 goto out; 1260 1261 if (ret < 0 || total_wqe_bytes > bytes_mapped) 1262 goto resolve_page_fault; 1263 1264 out: 1265 ret = 0; 1266 resume_with_error = 0; 1267 1268 read_user: 1269 if (ret) 1270 mlx5_ib_err( 1271 dev, 1272 "Failed reading a WQE following page fault, error %d, wqe_index %x, qpn %x\n", 1273 ret, wqe_index, pfault->token); 1274 1275 resolve_page_fault: 1276 mlx5_ib_page_fault_resume(dev, pfault, resume_with_error); 1277 mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%x resume_with_error=%d, type: 0x%x\n", 1278 pfault->wqe.wq_num, resume_with_error, 1279 pfault->type); 1280 mlx5_core_res_put(res); 1281 free_page((unsigned long)wqe_start); 1282 } 1283 1284 static int pages_in_range(u64 address, u32 length) 1285 { 1286 return (ALIGN(address + length, PAGE_SIZE) - 1287 (address & PAGE_MASK)) >> PAGE_SHIFT; 1288 } 1289 1290 static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_dev *dev, 1291 struct mlx5_pagefault *pfault) 1292 { 1293 u64 address; 1294 u32 length; 1295 u32 prefetch_len = pfault->bytes_committed; 1296 int prefetch_activated = 0; 1297 u32 rkey = pfault->rdma.r_key; 1298 int ret; 1299 1300 /* The RDMA responder handler handles the page fault in two parts. 1301 * First it brings the necessary pages for the current packet 1302 * (and uses the pfault context), and then (after resuming the QP) 1303 * prefetches more pages. The second operation cannot use the pfault 1304 * context and therefore uses the dummy_pfault context allocated on 1305 * the stack */ 1306 pfault->rdma.rdma_va += pfault->bytes_committed; 1307 pfault->rdma.rdma_op_len -= min(pfault->bytes_committed, 1308 pfault->rdma.rdma_op_len); 1309 pfault->bytes_committed = 0; 1310 1311 address = pfault->rdma.rdma_va; 1312 length = pfault->rdma.rdma_op_len; 1313 1314 /* For some operations, the hardware cannot tell the exact message 1315 * length, and in those cases it reports zero. Use prefetch 1316 * logic. */ 1317 if (length == 0) { 1318 prefetch_activated = 1; 1319 length = pfault->rdma.packet_size; 1320 prefetch_len = min(MAX_PREFETCH_LEN, prefetch_len); 1321 } 1322 1323 ret = pagefault_single_data_segment(dev, NULL, rkey, address, length, 1324 &pfault->bytes_committed, NULL); 1325 if (ret == -EAGAIN) { 1326 /* We're racing with an invalidation, don't prefetch */ 1327 prefetch_activated = 0; 1328 } else if (ret < 0 || pages_in_range(address, length) > ret) { 1329 mlx5_ib_page_fault_resume(dev, pfault, 1); 1330 if (ret != -ENOENT) 1331 mlx5_ib_dbg(dev, "PAGE FAULT error %d. QP 0x%x, type: 0x%x\n", 1332 ret, pfault->token, pfault->type); 1333 return; 1334 } 1335 1336 mlx5_ib_page_fault_resume(dev, pfault, 0); 1337 mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%x, type: 0x%x, prefetch_activated: %d\n", 1338 pfault->token, pfault->type, 1339 prefetch_activated); 1340 1341 /* At this point, there might be a new pagefault already arriving in 1342 * the eq, switch to the dummy pagefault for the rest of the 1343 * processing. We're still OK with the objects being alive as the 1344 * work-queue is being fenced. */ 1345 1346 if (prefetch_activated) { 1347 u32 bytes_committed = 0; 1348 1349 ret = pagefault_single_data_segment(dev, NULL, rkey, address, 1350 prefetch_len, 1351 &bytes_committed, NULL); 1352 if (ret < 0 && ret != -EAGAIN) { 1353 mlx5_ib_dbg(dev, "Prefetch failed. ret: %d, QP 0x%x, address: 0x%.16llx, length = 0x%.16x\n", 1354 ret, pfault->token, address, prefetch_len); 1355 } 1356 } 1357 } 1358 1359 static void mlx5_ib_pfault(struct mlx5_ib_dev *dev, struct mlx5_pagefault *pfault) 1360 { 1361 u8 event_subtype = pfault->event_subtype; 1362 1363 switch (event_subtype) { 1364 case MLX5_PFAULT_SUBTYPE_WQE: 1365 mlx5_ib_mr_wqe_pfault_handler(dev, pfault); 1366 break; 1367 case MLX5_PFAULT_SUBTYPE_RDMA: 1368 mlx5_ib_mr_rdma_pfault_handler(dev, pfault); 1369 break; 1370 default: 1371 mlx5_ib_err(dev, "Invalid page fault event subtype: 0x%x\n", 1372 event_subtype); 1373 mlx5_ib_page_fault_resume(dev, pfault, 1); 1374 } 1375 } 1376 1377 static void mlx5_ib_eqe_pf_action(struct work_struct *work) 1378 { 1379 struct mlx5_pagefault *pfault = container_of(work, 1380 struct mlx5_pagefault, 1381 work); 1382 struct mlx5_ib_pf_eq *eq = pfault->eq; 1383 1384 mlx5_ib_pfault(eq->dev, pfault); 1385 mempool_free(pfault, eq->pool); 1386 } 1387 1388 static void mlx5_ib_eq_pf_process(struct mlx5_ib_pf_eq *eq) 1389 { 1390 struct mlx5_eqe_page_fault *pf_eqe; 1391 struct mlx5_pagefault *pfault; 1392 struct mlx5_eqe *eqe; 1393 int cc = 0; 1394 1395 while ((eqe = mlx5_eq_get_eqe(eq->core, cc))) { 1396 pfault = mempool_alloc(eq->pool, GFP_ATOMIC); 1397 if (!pfault) { 1398 schedule_work(&eq->work); 1399 break; 1400 } 1401 1402 pf_eqe = &eqe->data.page_fault; 1403 pfault->event_subtype = eqe->sub_type; 1404 pfault->bytes_committed = be32_to_cpu(pf_eqe->bytes_committed); 1405 1406 mlx5_ib_dbg(eq->dev, 1407 "PAGE_FAULT: subtype: 0x%02x, bytes_committed: 0x%06x\n", 1408 eqe->sub_type, pfault->bytes_committed); 1409 1410 switch (eqe->sub_type) { 1411 case MLX5_PFAULT_SUBTYPE_RDMA: 1412 /* RDMA based event */ 1413 pfault->type = 1414 be32_to_cpu(pf_eqe->rdma.pftype_token) >> 24; 1415 pfault->token = 1416 be32_to_cpu(pf_eqe->rdma.pftype_token) & 1417 MLX5_24BIT_MASK; 1418 pfault->rdma.r_key = 1419 be32_to_cpu(pf_eqe->rdma.r_key); 1420 pfault->rdma.packet_size = 1421 be16_to_cpu(pf_eqe->rdma.packet_length); 1422 pfault->rdma.rdma_op_len = 1423 be32_to_cpu(pf_eqe->rdma.rdma_op_len); 1424 pfault->rdma.rdma_va = 1425 be64_to_cpu(pf_eqe->rdma.rdma_va); 1426 mlx5_ib_dbg(eq->dev, 1427 "PAGE_FAULT: type:0x%x, token: 0x%06x, r_key: 0x%08x\n", 1428 pfault->type, pfault->token, 1429 pfault->rdma.r_key); 1430 mlx5_ib_dbg(eq->dev, 1431 "PAGE_FAULT: rdma_op_len: 0x%08x, rdma_va: 0x%016llx\n", 1432 pfault->rdma.rdma_op_len, 1433 pfault->rdma.rdma_va); 1434 break; 1435 1436 case MLX5_PFAULT_SUBTYPE_WQE: 1437 /* WQE based event */ 1438 pfault->type = 1439 (be32_to_cpu(pf_eqe->wqe.pftype_wq) >> 24) & 0x7; 1440 pfault->token = 1441 be32_to_cpu(pf_eqe->wqe.token); 1442 pfault->wqe.wq_num = 1443 be32_to_cpu(pf_eqe->wqe.pftype_wq) & 1444 MLX5_24BIT_MASK; 1445 pfault->wqe.wqe_index = 1446 be16_to_cpu(pf_eqe->wqe.wqe_index); 1447 pfault->wqe.packet_size = 1448 be16_to_cpu(pf_eqe->wqe.packet_length); 1449 mlx5_ib_dbg(eq->dev, 1450 "PAGE_FAULT: type:0x%x, token: 0x%06x, wq_num: 0x%06x, wqe_index: 0x%04x\n", 1451 pfault->type, pfault->token, 1452 pfault->wqe.wq_num, 1453 pfault->wqe.wqe_index); 1454 break; 1455 1456 default: 1457 mlx5_ib_warn(eq->dev, 1458 "Unsupported page fault event sub-type: 0x%02hhx\n", 1459 eqe->sub_type); 1460 /* Unsupported page faults should still be 1461 * resolved by the page fault handler 1462 */ 1463 } 1464 1465 pfault->eq = eq; 1466 INIT_WORK(&pfault->work, mlx5_ib_eqe_pf_action); 1467 queue_work(eq->wq, &pfault->work); 1468 1469 cc = mlx5_eq_update_cc(eq->core, ++cc); 1470 } 1471 1472 mlx5_eq_update_ci(eq->core, cc, 1); 1473 } 1474 1475 static int mlx5_ib_eq_pf_int(struct notifier_block *nb, unsigned long type, 1476 void *data) 1477 { 1478 struct mlx5_ib_pf_eq *eq = 1479 container_of(nb, struct mlx5_ib_pf_eq, irq_nb); 1480 unsigned long flags; 1481 1482 if (spin_trylock_irqsave(&eq->lock, flags)) { 1483 mlx5_ib_eq_pf_process(eq); 1484 spin_unlock_irqrestore(&eq->lock, flags); 1485 } else { 1486 schedule_work(&eq->work); 1487 } 1488 1489 return IRQ_HANDLED; 1490 } 1491 1492 /* mempool_refill() was proposed but unfortunately wasn't accepted 1493 * http://lkml.iu.edu/hypermail/linux/kernel/1512.1/05073.html 1494 * Cheap workaround. 1495 */ 1496 static void mempool_refill(mempool_t *pool) 1497 { 1498 while (pool->curr_nr < pool->min_nr) 1499 mempool_free(mempool_alloc(pool, GFP_KERNEL), pool); 1500 } 1501 1502 static void mlx5_ib_eq_pf_action(struct work_struct *work) 1503 { 1504 struct mlx5_ib_pf_eq *eq = 1505 container_of(work, struct mlx5_ib_pf_eq, work); 1506 1507 mempool_refill(eq->pool); 1508 1509 spin_lock_irq(&eq->lock); 1510 mlx5_ib_eq_pf_process(eq); 1511 spin_unlock_irq(&eq->lock); 1512 } 1513 1514 enum { 1515 MLX5_IB_NUM_PF_EQE = 0x1000, 1516 MLX5_IB_NUM_PF_DRAIN = 64, 1517 }; 1518 1519 int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq) 1520 { 1521 struct mlx5_eq_param param = {}; 1522 int err = 0; 1523 1524 mutex_lock(&dev->odp_eq_mutex); 1525 if (eq->core) 1526 goto unlock; 1527 INIT_WORK(&eq->work, mlx5_ib_eq_pf_action); 1528 spin_lock_init(&eq->lock); 1529 eq->dev = dev; 1530 1531 eq->pool = mempool_create_kmalloc_pool(MLX5_IB_NUM_PF_DRAIN, 1532 sizeof(struct mlx5_pagefault)); 1533 if (!eq->pool) { 1534 err = -ENOMEM; 1535 goto unlock; 1536 } 1537 1538 eq->wq = alloc_workqueue("mlx5_ib_page_fault", 1539 WQ_HIGHPRI | WQ_UNBOUND | WQ_MEM_RECLAIM, 1540 MLX5_NUM_CMD_EQE); 1541 if (!eq->wq) { 1542 err = -ENOMEM; 1543 goto err_mempool; 1544 } 1545 1546 eq->irq_nb.notifier_call = mlx5_ib_eq_pf_int; 1547 param = (struct mlx5_eq_param) { 1548 .nent = MLX5_IB_NUM_PF_EQE, 1549 }; 1550 param.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_FAULT; 1551 eq->core = mlx5_eq_create_generic(dev->mdev, ¶m); 1552 if (IS_ERR(eq->core)) { 1553 err = PTR_ERR(eq->core); 1554 goto err_wq; 1555 } 1556 err = mlx5_eq_enable(dev->mdev, eq->core, &eq->irq_nb); 1557 if (err) { 1558 mlx5_ib_err(dev, "failed to enable odp EQ %d\n", err); 1559 goto err_eq; 1560 } 1561 1562 mutex_unlock(&dev->odp_eq_mutex); 1563 return 0; 1564 err_eq: 1565 mlx5_eq_destroy_generic(dev->mdev, eq->core); 1566 err_wq: 1567 eq->core = NULL; 1568 destroy_workqueue(eq->wq); 1569 err_mempool: 1570 mempool_destroy(eq->pool); 1571 unlock: 1572 mutex_unlock(&dev->odp_eq_mutex); 1573 return err; 1574 } 1575 1576 static int 1577 mlx5_ib_odp_destroy_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq) 1578 { 1579 int err; 1580 1581 if (!eq->core) 1582 return 0; 1583 mlx5_eq_disable(dev->mdev, eq->core, &eq->irq_nb); 1584 err = mlx5_eq_destroy_generic(dev->mdev, eq->core); 1585 cancel_work_sync(&eq->work); 1586 destroy_workqueue(eq->wq); 1587 mempool_destroy(eq->pool); 1588 1589 return err; 1590 } 1591 1592 void mlx5_odp_init_mkey_cache_entry(struct mlx5_cache_ent *ent) 1593 { 1594 if (!(ent->dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT)) 1595 return; 1596 1597 switch (ent->order - 2) { 1598 case MLX5_IMR_MTT_CACHE_ENTRY: 1599 ent->page = PAGE_SHIFT; 1600 ent->ndescs = MLX5_IMR_MTT_ENTRIES; 1601 ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT; 1602 ent->limit = 0; 1603 break; 1604 1605 case MLX5_IMR_KSM_CACHE_ENTRY: 1606 ent->page = MLX5_KSM_PAGE_SHIFT; 1607 ent->ndescs = mlx5_imr_ksm_entries; 1608 ent->access_mode = MLX5_MKC_ACCESS_MODE_KSM; 1609 ent->limit = 0; 1610 break; 1611 } 1612 } 1613 1614 static const struct ib_device_ops mlx5_ib_dev_odp_ops = { 1615 .advise_mr = mlx5_ib_advise_mr, 1616 }; 1617 1618 int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev) 1619 { 1620 int ret = 0; 1621 1622 internal_fill_odp_caps(dev); 1623 1624 if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT)) 1625 return ret; 1626 1627 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_odp_ops); 1628 1629 if (dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT) { 1630 ret = mlx5_cmd_null_mkey(dev->mdev, &dev->null_mkey); 1631 if (ret) { 1632 mlx5_ib_err(dev, "Error getting null_mkey %d\n", ret); 1633 return ret; 1634 } 1635 } 1636 1637 mutex_init(&dev->odp_eq_mutex); 1638 return ret; 1639 } 1640 1641 void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *dev) 1642 { 1643 if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT)) 1644 return; 1645 1646 mlx5_ib_odp_destroy_eq(dev, &dev->odp_pf_eq); 1647 } 1648 1649 int mlx5_ib_odp_init(void) 1650 { 1651 mlx5_imr_ksm_entries = BIT_ULL(get_order(TASK_SIZE) - 1652 MLX5_IMR_MTT_BITS); 1653 1654 return 0; 1655 } 1656 1657 struct prefetch_mr_work { 1658 struct work_struct work; 1659 u32 pf_flags; 1660 u32 num_sge; 1661 struct { 1662 u64 io_virt; 1663 struct mlx5_ib_mr *mr; 1664 size_t length; 1665 } frags[]; 1666 }; 1667 1668 static void destroy_prefetch_work(struct prefetch_mr_work *work) 1669 { 1670 u32 i; 1671 1672 for (i = 0; i < work->num_sge; ++i) 1673 mlx5r_deref_odp_mkey(&work->frags[i].mr->mmkey); 1674 1675 kvfree(work); 1676 } 1677 1678 static struct mlx5_ib_mr * 1679 get_prefetchable_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice, 1680 u32 lkey) 1681 { 1682 struct mlx5_ib_dev *dev = to_mdev(pd->device); 1683 struct mlx5_ib_mr *mr = NULL; 1684 struct mlx5_ib_mkey *mmkey; 1685 1686 xa_lock(&dev->odp_mkeys); 1687 mmkey = xa_load(&dev->odp_mkeys, mlx5_base_mkey(lkey)); 1688 if (!mmkey || mmkey->key != lkey) { 1689 mr = ERR_PTR(-ENOENT); 1690 goto end; 1691 } 1692 if (mmkey->type != MLX5_MKEY_MR) { 1693 mr = ERR_PTR(-EINVAL); 1694 goto end; 1695 } 1696 1697 mr = container_of(mmkey, struct mlx5_ib_mr, mmkey); 1698 1699 if (mr->ibmr.pd != pd) { 1700 mr = ERR_PTR(-EPERM); 1701 goto end; 1702 } 1703 1704 /* prefetch with write-access must be supported by the MR */ 1705 if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE && 1706 !mr->umem->writable) { 1707 mr = ERR_PTR(-EPERM); 1708 goto end; 1709 } 1710 1711 refcount_inc(&mmkey->usecount); 1712 end: 1713 xa_unlock(&dev->odp_mkeys); 1714 return mr; 1715 } 1716 1717 static void mlx5_ib_prefetch_mr_work(struct work_struct *w) 1718 { 1719 struct prefetch_mr_work *work = 1720 container_of(w, struct prefetch_mr_work, work); 1721 u32 bytes_mapped = 0; 1722 int ret; 1723 u32 i; 1724 1725 /* We rely on IB/core that work is executed if we have num_sge != 0 only. */ 1726 WARN_ON(!work->num_sge); 1727 for (i = 0; i < work->num_sge; ++i) { 1728 ret = pagefault_mr(work->frags[i].mr, work->frags[i].io_virt, 1729 work->frags[i].length, &bytes_mapped, 1730 work->pf_flags); 1731 if (ret <= 0) 1732 continue; 1733 mlx5_update_odp_stats(work->frags[i].mr, prefetch, ret); 1734 } 1735 1736 destroy_prefetch_work(work); 1737 } 1738 1739 static int init_prefetch_work(struct ib_pd *pd, 1740 enum ib_uverbs_advise_mr_advice advice, 1741 u32 pf_flags, struct prefetch_mr_work *work, 1742 struct ib_sge *sg_list, u32 num_sge) 1743 { 1744 u32 i; 1745 1746 INIT_WORK(&work->work, mlx5_ib_prefetch_mr_work); 1747 work->pf_flags = pf_flags; 1748 1749 for (i = 0; i < num_sge; ++i) { 1750 struct mlx5_ib_mr *mr; 1751 1752 mr = get_prefetchable_mr(pd, advice, sg_list[i].lkey); 1753 if (IS_ERR(mr)) { 1754 work->num_sge = i; 1755 return PTR_ERR(mr); 1756 } 1757 work->frags[i].io_virt = sg_list[i].addr; 1758 work->frags[i].length = sg_list[i].length; 1759 work->frags[i].mr = mr; 1760 } 1761 work->num_sge = num_sge; 1762 return 0; 1763 } 1764 1765 static int mlx5_ib_prefetch_sg_list(struct ib_pd *pd, 1766 enum ib_uverbs_advise_mr_advice advice, 1767 u32 pf_flags, struct ib_sge *sg_list, 1768 u32 num_sge) 1769 { 1770 u32 bytes_mapped = 0; 1771 int ret = 0; 1772 u32 i; 1773 1774 for (i = 0; i < num_sge; ++i) { 1775 struct mlx5_ib_mr *mr; 1776 1777 mr = get_prefetchable_mr(pd, advice, sg_list[i].lkey); 1778 if (IS_ERR(mr)) 1779 return PTR_ERR(mr); 1780 ret = pagefault_mr(mr, sg_list[i].addr, sg_list[i].length, 1781 &bytes_mapped, pf_flags); 1782 if (ret < 0) { 1783 mlx5r_deref_odp_mkey(&mr->mmkey); 1784 return ret; 1785 } 1786 mlx5_update_odp_stats(mr, prefetch, ret); 1787 mlx5r_deref_odp_mkey(&mr->mmkey); 1788 } 1789 1790 return 0; 1791 } 1792 1793 int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd, 1794 enum ib_uverbs_advise_mr_advice advice, 1795 u32 flags, struct ib_sge *sg_list, u32 num_sge) 1796 { 1797 u32 pf_flags = 0; 1798 struct prefetch_mr_work *work; 1799 int rc; 1800 1801 if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH) 1802 pf_flags |= MLX5_PF_FLAGS_DOWNGRADE; 1803 1804 if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_NO_FAULT) 1805 pf_flags |= MLX5_PF_FLAGS_SNAPSHOT; 1806 1807 if (flags & IB_UVERBS_ADVISE_MR_FLAG_FLUSH) 1808 return mlx5_ib_prefetch_sg_list(pd, advice, pf_flags, sg_list, 1809 num_sge); 1810 1811 work = kvzalloc(struct_size(work, frags, num_sge), GFP_KERNEL); 1812 if (!work) 1813 return -ENOMEM; 1814 1815 rc = init_prefetch_work(pd, advice, pf_flags, work, sg_list, num_sge); 1816 if (rc) { 1817 destroy_prefetch_work(work); 1818 return rc; 1819 } 1820 queue_work(system_unbound_wq, &work->work); 1821 return 0; 1822 } 1823