1 /* 2 * Copyright (c) 2014 Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/types.h> 34 #include <linux/sched.h> 35 #include <linux/sched/mm.h> 36 #include <linux/sched/task.h> 37 #include <linux/pid.h> 38 #include <linux/slab.h> 39 #include <linux/export.h> 40 #include <linux/vmalloc.h> 41 #include <linux/hugetlb.h> 42 #include <linux/interval_tree.h> 43 #include <linux/hmm.h> 44 #include <linux/pagemap.h> 45 46 #include <rdma/ib_verbs.h> 47 #include <rdma/ib_umem.h> 48 #include <rdma/ib_umem_odp.h> 49 50 #include "uverbs.h" 51 52 static inline int ib_init_umem_odp(struct ib_umem_odp *umem_odp, 53 const struct mmu_interval_notifier_ops *ops) 54 { 55 int ret; 56 57 umem_odp->umem.is_odp = 1; 58 mutex_init(&umem_odp->umem_mutex); 59 60 if (!umem_odp->is_implicit_odp) { 61 size_t page_size = 1UL << umem_odp->page_shift; 62 unsigned long start; 63 unsigned long end; 64 size_t ndmas, npfns; 65 66 start = ALIGN_DOWN(umem_odp->umem.address, page_size); 67 if (check_add_overflow(umem_odp->umem.address, 68 (unsigned long)umem_odp->umem.length, 69 &end)) 70 return -EOVERFLOW; 71 end = ALIGN(end, page_size); 72 if (unlikely(end < page_size)) 73 return -EOVERFLOW; 74 75 ndmas = (end - start) >> umem_odp->page_shift; 76 if (!ndmas) 77 return -EINVAL; 78 79 npfns = (end - start) >> PAGE_SHIFT; 80 umem_odp->pfn_list = kvcalloc( 81 npfns, sizeof(*umem_odp->pfn_list), GFP_KERNEL); 82 if (!umem_odp->pfn_list) 83 return -ENOMEM; 84 85 umem_odp->dma_list = kvcalloc( 86 ndmas, sizeof(*umem_odp->dma_list), GFP_KERNEL); 87 if (!umem_odp->dma_list) { 88 ret = -ENOMEM; 89 goto out_pfn_list; 90 } 91 92 ret = mmu_interval_notifier_insert(&umem_odp->notifier, 93 umem_odp->umem.owning_mm, 94 start, end - start, ops); 95 if (ret) 96 goto out_dma_list; 97 } 98 99 return 0; 100 101 out_dma_list: 102 kvfree(umem_odp->dma_list); 103 out_pfn_list: 104 kvfree(umem_odp->pfn_list); 105 return ret; 106 } 107 108 /** 109 * ib_umem_odp_alloc_implicit - Allocate a parent implicit ODP umem 110 * 111 * Implicit ODP umems do not have a VA range and do not have any page lists. 112 * They exist only to hold the per_mm reference to help the driver create 113 * children umems. 114 * 115 * @device: IB device to create UMEM 116 * @access: ib_reg_mr access flags 117 */ 118 struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_device *device, 119 int access) 120 { 121 struct ib_umem *umem; 122 struct ib_umem_odp *umem_odp; 123 int ret; 124 125 if (access & IB_ACCESS_HUGETLB) 126 return ERR_PTR(-EINVAL); 127 128 umem_odp = kzalloc(sizeof(*umem_odp), GFP_KERNEL); 129 if (!umem_odp) 130 return ERR_PTR(-ENOMEM); 131 umem = &umem_odp->umem; 132 umem->ibdev = device; 133 umem->writable = ib_access_writable(access); 134 umem->owning_mm = current->mm; 135 umem_odp->is_implicit_odp = 1; 136 umem_odp->page_shift = PAGE_SHIFT; 137 138 umem_odp->tgid = get_task_pid(current->group_leader, PIDTYPE_PID); 139 ret = ib_init_umem_odp(umem_odp, NULL); 140 if (ret) { 141 put_pid(umem_odp->tgid); 142 kfree(umem_odp); 143 return ERR_PTR(ret); 144 } 145 return umem_odp; 146 } 147 EXPORT_SYMBOL(ib_umem_odp_alloc_implicit); 148 149 /** 150 * ib_umem_odp_alloc_child - Allocate a child ODP umem under an implicit 151 * parent ODP umem 152 * 153 * @root: The parent umem enclosing the child. This must be allocated using 154 * ib_alloc_implicit_odp_umem() 155 * @addr: The starting userspace VA 156 * @size: The length of the userspace VA 157 * @ops: MMU interval ops, currently only @invalidate 158 */ 159 struct ib_umem_odp * 160 ib_umem_odp_alloc_child(struct ib_umem_odp *root, unsigned long addr, 161 size_t size, 162 const struct mmu_interval_notifier_ops *ops) 163 { 164 /* 165 * Caller must ensure that root cannot be freed during the call to 166 * ib_alloc_odp_umem. 167 */ 168 struct ib_umem_odp *odp_data; 169 struct ib_umem *umem; 170 int ret; 171 172 if (WARN_ON(!root->is_implicit_odp)) 173 return ERR_PTR(-EINVAL); 174 175 odp_data = kzalloc(sizeof(*odp_data), GFP_KERNEL); 176 if (!odp_data) 177 return ERR_PTR(-ENOMEM); 178 umem = &odp_data->umem; 179 umem->ibdev = root->umem.ibdev; 180 umem->length = size; 181 umem->address = addr; 182 umem->writable = root->umem.writable; 183 umem->owning_mm = root->umem.owning_mm; 184 odp_data->page_shift = PAGE_SHIFT; 185 odp_data->notifier.ops = ops; 186 187 /* 188 * A mmget must be held when registering a notifier, the owming_mm only 189 * has a mm_grab at this point. 190 */ 191 if (!mmget_not_zero(umem->owning_mm)) { 192 ret = -EFAULT; 193 goto out_free; 194 } 195 196 odp_data->tgid = get_pid(root->tgid); 197 ret = ib_init_umem_odp(odp_data, ops); 198 if (ret) 199 goto out_tgid; 200 mmput(umem->owning_mm); 201 return odp_data; 202 203 out_tgid: 204 put_pid(odp_data->tgid); 205 mmput(umem->owning_mm); 206 out_free: 207 kfree(odp_data); 208 return ERR_PTR(ret); 209 } 210 EXPORT_SYMBOL(ib_umem_odp_alloc_child); 211 212 /** 213 * ib_umem_odp_get - Create a umem_odp for a userspace va 214 * 215 * @device: IB device struct to get UMEM 216 * @addr: userspace virtual address to start at 217 * @size: length of region to pin 218 * @access: IB_ACCESS_xxx flags for memory being pinned 219 * @ops: MMU interval ops, currently only @invalidate 220 * 221 * The driver should use when the access flags indicate ODP memory. It avoids 222 * pinning, instead, stores the mm for future page fault handling in 223 * conjunction with MMU notifiers. 224 */ 225 struct ib_umem_odp *ib_umem_odp_get(struct ib_device *device, 226 unsigned long addr, size_t size, int access, 227 const struct mmu_interval_notifier_ops *ops) 228 { 229 struct ib_umem_odp *umem_odp; 230 struct mm_struct *mm; 231 int ret; 232 233 if (WARN_ON_ONCE(!(access & IB_ACCESS_ON_DEMAND))) 234 return ERR_PTR(-EINVAL); 235 236 umem_odp = kzalloc(sizeof(struct ib_umem_odp), GFP_KERNEL); 237 if (!umem_odp) 238 return ERR_PTR(-ENOMEM); 239 240 umem_odp->umem.ibdev = device; 241 umem_odp->umem.length = size; 242 umem_odp->umem.address = addr; 243 umem_odp->umem.writable = ib_access_writable(access); 244 umem_odp->umem.owning_mm = mm = current->mm; 245 umem_odp->notifier.ops = ops; 246 247 umem_odp->page_shift = PAGE_SHIFT; 248 #ifdef CONFIG_HUGETLB_PAGE 249 if (access & IB_ACCESS_HUGETLB) 250 umem_odp->page_shift = HPAGE_SHIFT; 251 #endif 252 253 umem_odp->tgid = get_task_pid(current->group_leader, PIDTYPE_PID); 254 ret = ib_init_umem_odp(umem_odp, ops); 255 if (ret) 256 goto err_put_pid; 257 return umem_odp; 258 259 err_put_pid: 260 put_pid(umem_odp->tgid); 261 kfree(umem_odp); 262 return ERR_PTR(ret); 263 } 264 EXPORT_SYMBOL(ib_umem_odp_get); 265 266 void ib_umem_odp_release(struct ib_umem_odp *umem_odp) 267 { 268 /* 269 * Ensure that no more pages are mapped in the umem. 270 * 271 * It is the driver's responsibility to ensure, before calling us, 272 * that the hardware will not attempt to access the MR any more. 273 */ 274 if (!umem_odp->is_implicit_odp) { 275 mutex_lock(&umem_odp->umem_mutex); 276 ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem_odp), 277 ib_umem_end(umem_odp)); 278 mutex_unlock(&umem_odp->umem_mutex); 279 mmu_interval_notifier_remove(&umem_odp->notifier); 280 kvfree(umem_odp->dma_list); 281 kvfree(umem_odp->pfn_list); 282 } 283 put_pid(umem_odp->tgid); 284 kfree(umem_odp); 285 } 286 EXPORT_SYMBOL(ib_umem_odp_release); 287 288 /* 289 * Map for DMA and insert a single page into the on-demand paging page tables. 290 * 291 * @umem: the umem to insert the page to. 292 * @dma_index: index in the umem to add the dma to. 293 * @page: the page struct to map and add. 294 * @access_mask: access permissions needed for this page. 295 * 296 * The function returns -EFAULT if the DMA mapping operation fails. 297 * 298 */ 299 static int ib_umem_odp_map_dma_single_page( 300 struct ib_umem_odp *umem_odp, 301 unsigned int dma_index, 302 struct page *page, 303 u64 access_mask) 304 { 305 struct ib_device *dev = umem_odp->umem.ibdev; 306 dma_addr_t *dma_addr = &umem_odp->dma_list[dma_index]; 307 308 if (*dma_addr) { 309 /* 310 * If the page is already dma mapped it means it went through 311 * a non-invalidating trasition, like read-only to writable. 312 * Resync the flags. 313 */ 314 *dma_addr = (*dma_addr & ODP_DMA_ADDR_MASK) | access_mask; 315 return 0; 316 } 317 318 *dma_addr = ib_dma_map_page(dev, page, 0, 1 << umem_odp->page_shift, 319 DMA_BIDIRECTIONAL); 320 if (ib_dma_mapping_error(dev, *dma_addr)) { 321 *dma_addr = 0; 322 return -EFAULT; 323 } 324 umem_odp->npages++; 325 *dma_addr |= access_mask; 326 return 0; 327 } 328 329 /** 330 * ib_umem_odp_map_dma_and_lock - DMA map userspace memory in an ODP MR and lock it. 331 * 332 * Maps the range passed in the argument to DMA addresses. 333 * The DMA addresses of the mapped pages is updated in umem_odp->dma_list. 334 * Upon success the ODP MR will be locked to let caller complete its device 335 * page table update. 336 * 337 * Returns the number of pages mapped in success, negative error code 338 * for failure. 339 * @umem_odp: the umem to map and pin 340 * @user_virt: the address from which we need to map. 341 * @bcnt: the minimal number of bytes to pin and map. The mapping might be 342 * bigger due to alignment, and may also be smaller in case of an error 343 * pinning or mapping a page. The actual pages mapped is returned in 344 * the return value. 345 * @access_mask: bit mask of the requested access permissions for the given 346 * range. 347 * @fault: is faulting required for the given range 348 */ 349 int ib_umem_odp_map_dma_and_lock(struct ib_umem_odp *umem_odp, u64 user_virt, 350 u64 bcnt, u64 access_mask, bool fault) 351 __acquires(&umem_odp->umem_mutex) 352 { 353 struct task_struct *owning_process = NULL; 354 struct mm_struct *owning_mm = umem_odp->umem.owning_mm; 355 int pfn_index, dma_index, ret = 0, start_idx; 356 unsigned int page_shift, hmm_order, pfn_start_idx; 357 unsigned long num_pfns, current_seq; 358 struct hmm_range range = {}; 359 unsigned long timeout; 360 361 if (access_mask == 0) 362 return -EINVAL; 363 364 if (user_virt < ib_umem_start(umem_odp) || 365 user_virt + bcnt > ib_umem_end(umem_odp)) 366 return -EFAULT; 367 368 page_shift = umem_odp->page_shift; 369 370 /* 371 * owning_process is allowed to be NULL, this means somehow the mm is 372 * existing beyond the lifetime of the originating process.. Presumably 373 * mmget_not_zero will fail in this case. 374 */ 375 owning_process = get_pid_task(umem_odp->tgid, PIDTYPE_PID); 376 if (!owning_process || !mmget_not_zero(owning_mm)) { 377 ret = -EINVAL; 378 goto out_put_task; 379 } 380 381 range.notifier = &umem_odp->notifier; 382 range.start = ALIGN_DOWN(user_virt, 1UL << page_shift); 383 range.end = ALIGN(user_virt + bcnt, 1UL << page_shift); 384 pfn_start_idx = (range.start - ib_umem_start(umem_odp)) >> PAGE_SHIFT; 385 num_pfns = (range.end - range.start) >> PAGE_SHIFT; 386 if (fault) { 387 range.default_flags = HMM_PFN_REQ_FAULT; 388 389 if (access_mask & ODP_WRITE_ALLOWED_BIT) 390 range.default_flags |= HMM_PFN_REQ_WRITE; 391 } 392 393 range.hmm_pfns = &(umem_odp->pfn_list[pfn_start_idx]); 394 timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT); 395 396 retry: 397 current_seq = range.notifier_seq = 398 mmu_interval_read_begin(&umem_odp->notifier); 399 400 mmap_read_lock(owning_mm); 401 ret = hmm_range_fault(&range); 402 mmap_read_unlock(owning_mm); 403 if (unlikely(ret)) { 404 if (ret == -EBUSY && !time_after(jiffies, timeout)) 405 goto retry; 406 goto out_put_mm; 407 } 408 409 start_idx = (range.start - ib_umem_start(umem_odp)) >> page_shift; 410 dma_index = start_idx; 411 412 mutex_lock(&umem_odp->umem_mutex); 413 if (mmu_interval_read_retry(&umem_odp->notifier, current_seq)) { 414 mutex_unlock(&umem_odp->umem_mutex); 415 goto retry; 416 } 417 418 for (pfn_index = 0; pfn_index < num_pfns; 419 pfn_index += 1 << (page_shift - PAGE_SHIFT), dma_index++) { 420 421 if (fault) { 422 /* 423 * Since we asked for hmm_range_fault() to populate 424 * pages it shouldn't return an error entry on success. 425 */ 426 WARN_ON(range.hmm_pfns[pfn_index] & HMM_PFN_ERROR); 427 WARN_ON(!(range.hmm_pfns[pfn_index] & HMM_PFN_VALID)); 428 } else { 429 if (!(range.hmm_pfns[pfn_index] & HMM_PFN_VALID)) { 430 WARN_ON(umem_odp->dma_list[dma_index]); 431 continue; 432 } 433 access_mask = ODP_READ_ALLOWED_BIT; 434 if (range.hmm_pfns[pfn_index] & HMM_PFN_WRITE) 435 access_mask |= ODP_WRITE_ALLOWED_BIT; 436 } 437 438 hmm_order = hmm_pfn_to_map_order(range.hmm_pfns[pfn_index]); 439 /* If a hugepage was detected and ODP wasn't set for, the umem 440 * page_shift will be used, the opposite case is an error. 441 */ 442 if (hmm_order + PAGE_SHIFT < page_shift) { 443 ret = -EINVAL; 444 ibdev_dbg(umem_odp->umem.ibdev, 445 "%s: un-expected hmm_order %u, page_shift %u\n", 446 __func__, hmm_order, page_shift); 447 break; 448 } 449 450 ret = ib_umem_odp_map_dma_single_page( 451 umem_odp, dma_index, hmm_pfn_to_page(range.hmm_pfns[pfn_index]), 452 access_mask); 453 if (ret < 0) { 454 ibdev_dbg(umem_odp->umem.ibdev, 455 "ib_umem_odp_map_dma_single_page failed with error %d\n", ret); 456 break; 457 } 458 } 459 /* upon sucesss lock should stay on hold for the callee */ 460 if (!ret) 461 ret = dma_index - start_idx; 462 else 463 mutex_unlock(&umem_odp->umem_mutex); 464 465 out_put_mm: 466 mmput(owning_mm); 467 out_put_task: 468 if (owning_process) 469 put_task_struct(owning_process); 470 return ret; 471 } 472 EXPORT_SYMBOL(ib_umem_odp_map_dma_and_lock); 473 474 void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 virt, 475 u64 bound) 476 { 477 dma_addr_t dma_addr; 478 dma_addr_t dma; 479 int idx; 480 u64 addr; 481 struct ib_device *dev = umem_odp->umem.ibdev; 482 483 lockdep_assert_held(&umem_odp->umem_mutex); 484 485 virt = max_t(u64, virt, ib_umem_start(umem_odp)); 486 bound = min_t(u64, bound, ib_umem_end(umem_odp)); 487 for (addr = virt; addr < bound; addr += BIT(umem_odp->page_shift)) { 488 idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift; 489 dma = umem_odp->dma_list[idx]; 490 491 /* The access flags guaranteed a valid DMA address in case was NULL */ 492 if (dma) { 493 unsigned long pfn_idx = (addr - ib_umem_start(umem_odp)) >> PAGE_SHIFT; 494 struct page *page = hmm_pfn_to_page(umem_odp->pfn_list[pfn_idx]); 495 496 dma_addr = dma & ODP_DMA_ADDR_MASK; 497 ib_dma_unmap_page(dev, dma_addr, 498 BIT(umem_odp->page_shift), 499 DMA_BIDIRECTIONAL); 500 if (dma & ODP_WRITE_ALLOWED_BIT) { 501 struct page *head_page = compound_head(page); 502 /* 503 * set_page_dirty prefers being called with 504 * the page lock. However, MMU notifiers are 505 * called sometimes with and sometimes without 506 * the lock. We rely on the umem_mutex instead 507 * to prevent other mmu notifiers from 508 * continuing and allowing the page mapping to 509 * be removed. 510 */ 511 set_page_dirty(head_page); 512 } 513 umem_odp->dma_list[idx] = 0; 514 umem_odp->npages--; 515 } 516 } 517 } 518 EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages); 519