1 /* 2 * Copyright 2019 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * based on nouveau_prime.c 23 * 24 * Authors: Alex Deucher 25 */ 26 27 /** 28 * DOC: PRIME Buffer Sharing 29 * 30 * The following callback implementations are used for :ref:`sharing GEM buffer 31 * objects between different devices via PRIME <prime_buffer_sharing>`. 32 */ 33 34 #include "amdgpu.h" 35 #include "amdgpu_display.h" 36 #include "amdgpu_gem.h" 37 #include "amdgpu_dma_buf.h" 38 #include "amdgpu_xgmi.h" 39 #include <drm/amdgpu_drm.h> 40 #include <linux/dma-buf.h> 41 #include <linux/dma-fence-array.h> 42 #include <linux/pci-p2pdma.h> 43 #include <linux/pm_runtime.h> 44 45 static int 46 __dma_resv_make_exclusive(struct dma_resv *obj) 47 { 48 struct dma_fence **fences; 49 unsigned int count; 50 int r; 51 52 if (!dma_resv_shared_list(obj)) /* no shared fences to convert */ 53 return 0; 54 55 r = dma_resv_get_fences(obj, NULL, &count, &fences); 56 if (r) 57 return r; 58 59 if (count == 0) { 60 /* Now that was unexpected. */ 61 } else if (count == 1) { 62 dma_resv_add_excl_fence(obj, fences[0]); 63 dma_fence_put(fences[0]); 64 kfree(fences); 65 } else { 66 struct dma_fence_array *array; 67 68 array = dma_fence_array_create(count, fences, 69 dma_fence_context_alloc(1), 0, 70 false); 71 if (!array) 72 goto err_fences_put; 73 74 dma_resv_add_excl_fence(obj, &array->base); 75 dma_fence_put(&array->base); 76 } 77 78 return 0; 79 80 err_fences_put: 81 while (count--) 82 dma_fence_put(fences[count]); 83 kfree(fences); 84 return -ENOMEM; 85 } 86 87 /** 88 * amdgpu_dma_buf_attach - &dma_buf_ops.attach implementation 89 * 90 * @dmabuf: DMA-buf where we attach to 91 * @attach: attachment to add 92 * 93 * Add the attachment as user to the exported DMA-buf. 94 */ 95 static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf, 96 struct dma_buf_attachment *attach) 97 { 98 struct drm_gem_object *obj = dmabuf->priv; 99 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); 100 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 101 int r; 102 103 if (pci_p2pdma_distance_many(adev->pdev, &attach->dev, 1, true) < 0) 104 attach->peer2peer = false; 105 106 if (attach->dev->driver == adev->dev->driver) 107 return 0; 108 109 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 110 if (r < 0) 111 goto out; 112 113 r = amdgpu_bo_reserve(bo, false); 114 if (unlikely(r != 0)) 115 goto out; 116 117 /* 118 * We only create shared fences for internal use, but importers 119 * of the dmabuf rely on exclusive fences for implicitly 120 * tracking write hazards. As any of the current fences may 121 * correspond to a write, we need to convert all existing 122 * fences on the reservation object into a single exclusive 123 * fence. 124 */ 125 r = __dma_resv_make_exclusive(bo->tbo.base.resv); 126 if (r) 127 goto out; 128 129 bo->prime_shared_count++; 130 amdgpu_bo_unreserve(bo); 131 return 0; 132 133 out: 134 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 135 return r; 136 } 137 138 /** 139 * amdgpu_dma_buf_detach - &dma_buf_ops.detach implementation 140 * 141 * @dmabuf: DMA-buf where we remove the attachment from 142 * @attach: the attachment to remove 143 * 144 * Called when an attachment is removed from the DMA-buf. 145 */ 146 static void amdgpu_dma_buf_detach(struct dma_buf *dmabuf, 147 struct dma_buf_attachment *attach) 148 { 149 struct drm_gem_object *obj = dmabuf->priv; 150 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); 151 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 152 153 if (attach->dev->driver != adev->dev->driver && bo->prime_shared_count) 154 bo->prime_shared_count--; 155 156 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 157 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 158 } 159 160 /** 161 * amdgpu_dma_buf_pin - &dma_buf_ops.pin implementation 162 * 163 * @attach: attachment to pin down 164 * 165 * Pin the BO which is backing the DMA-buf so that it can't move any more. 166 */ 167 static int amdgpu_dma_buf_pin(struct dma_buf_attachment *attach) 168 { 169 struct drm_gem_object *obj = attach->dmabuf->priv; 170 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); 171 172 /* pin buffer into GTT */ 173 return amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT); 174 } 175 176 /** 177 * amdgpu_dma_buf_unpin - &dma_buf_ops.unpin implementation 178 * 179 * @attach: attachment to unpin 180 * 181 * Unpin a previously pinned BO to make it movable again. 182 */ 183 static void amdgpu_dma_buf_unpin(struct dma_buf_attachment *attach) 184 { 185 struct drm_gem_object *obj = attach->dmabuf->priv; 186 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); 187 188 amdgpu_bo_unpin(bo); 189 } 190 191 /** 192 * amdgpu_dma_buf_map - &dma_buf_ops.map_dma_buf implementation 193 * @attach: DMA-buf attachment 194 * @dir: DMA direction 195 * 196 * Makes sure that the shared DMA buffer can be accessed by the target device. 197 * For now, simply pins it to the GTT domain, where it should be accessible by 198 * all DMA devices. 199 * 200 * Returns: 201 * sg_table filled with the DMA addresses to use or ERR_PRT with negative error 202 * code. 203 */ 204 static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach, 205 enum dma_data_direction dir) 206 { 207 struct dma_buf *dma_buf = attach->dmabuf; 208 struct drm_gem_object *obj = dma_buf->priv; 209 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); 210 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 211 struct sg_table *sgt; 212 long r; 213 214 if (!bo->tbo.pin_count) { 215 /* move buffer into GTT or VRAM */ 216 struct ttm_operation_ctx ctx = { false, false }; 217 unsigned domains = AMDGPU_GEM_DOMAIN_GTT; 218 219 if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM && 220 attach->peer2peer) { 221 bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; 222 domains |= AMDGPU_GEM_DOMAIN_VRAM; 223 } 224 amdgpu_bo_placement_from_domain(bo, domains); 225 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 226 if (r) 227 return ERR_PTR(r); 228 229 } else if (!(amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type) & 230 AMDGPU_GEM_DOMAIN_GTT)) { 231 return ERR_PTR(-EBUSY); 232 } 233 234 switch (bo->tbo.resource->mem_type) { 235 case TTM_PL_TT: 236 sgt = drm_prime_pages_to_sg(obj->dev, 237 bo->tbo.ttm->pages, 238 bo->tbo.ttm->num_pages); 239 if (IS_ERR(sgt)) 240 return sgt; 241 242 if (dma_map_sgtable(attach->dev, sgt, dir, 243 DMA_ATTR_SKIP_CPU_SYNC)) 244 goto error_free; 245 break; 246 247 case TTM_PL_VRAM: 248 r = amdgpu_vram_mgr_alloc_sgt(adev, bo->tbo.resource, 0, 249 bo->tbo.base.size, attach->dev, 250 dir, &sgt); 251 if (r) 252 return ERR_PTR(r); 253 break; 254 default: 255 return ERR_PTR(-EINVAL); 256 } 257 258 return sgt; 259 260 error_free: 261 sg_free_table(sgt); 262 kfree(sgt); 263 return ERR_PTR(-EBUSY); 264 } 265 266 /** 267 * amdgpu_dma_buf_unmap - &dma_buf_ops.unmap_dma_buf implementation 268 * @attach: DMA-buf attachment 269 * @sgt: sg_table to unmap 270 * @dir: DMA direction 271 * 272 * This is called when a shared DMA buffer no longer needs to be accessible by 273 * another device. For now, simply unpins the buffer from GTT. 274 */ 275 static void amdgpu_dma_buf_unmap(struct dma_buf_attachment *attach, 276 struct sg_table *sgt, 277 enum dma_data_direction dir) 278 { 279 if (sgt->sgl->page_link) { 280 dma_unmap_sgtable(attach->dev, sgt, dir, 0); 281 sg_free_table(sgt); 282 kfree(sgt); 283 } else { 284 amdgpu_vram_mgr_free_sgt(attach->dev, dir, sgt); 285 } 286 } 287 288 /** 289 * amdgpu_dma_buf_begin_cpu_access - &dma_buf_ops.begin_cpu_access implementation 290 * @dma_buf: Shared DMA buffer 291 * @direction: Direction of DMA transfer 292 * 293 * This is called before CPU access to the shared DMA buffer's memory. If it's 294 * a read access, the buffer is moved to the GTT domain if possible, for optimal 295 * CPU read performance. 296 * 297 * Returns: 298 * 0 on success or a negative error code on failure. 299 */ 300 static int amdgpu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf, 301 enum dma_data_direction direction) 302 { 303 struct amdgpu_bo *bo = gem_to_amdgpu_bo(dma_buf->priv); 304 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 305 struct ttm_operation_ctx ctx = { true, false }; 306 u32 domain = amdgpu_display_supported_domains(adev, bo->flags); 307 int ret; 308 bool reads = (direction == DMA_BIDIRECTIONAL || 309 direction == DMA_FROM_DEVICE); 310 311 if (!reads || !(domain & AMDGPU_GEM_DOMAIN_GTT)) 312 return 0; 313 314 /* move to gtt */ 315 ret = amdgpu_bo_reserve(bo, false); 316 if (unlikely(ret != 0)) 317 return ret; 318 319 if (!bo->tbo.pin_count && 320 (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) { 321 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); 322 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 323 } 324 325 amdgpu_bo_unreserve(bo); 326 return ret; 327 } 328 329 const struct dma_buf_ops amdgpu_dmabuf_ops = { 330 .attach = amdgpu_dma_buf_attach, 331 .detach = amdgpu_dma_buf_detach, 332 .pin = amdgpu_dma_buf_pin, 333 .unpin = amdgpu_dma_buf_unpin, 334 .map_dma_buf = amdgpu_dma_buf_map, 335 .unmap_dma_buf = amdgpu_dma_buf_unmap, 336 .release = drm_gem_dmabuf_release, 337 .begin_cpu_access = amdgpu_dma_buf_begin_cpu_access, 338 .mmap = drm_gem_dmabuf_mmap, 339 .vmap = drm_gem_dmabuf_vmap, 340 .vunmap = drm_gem_dmabuf_vunmap, 341 }; 342 343 /** 344 * amdgpu_gem_prime_export - &drm_driver.gem_prime_export implementation 345 * @gobj: GEM BO 346 * @flags: Flags such as DRM_CLOEXEC and DRM_RDWR. 347 * 348 * The main work is done by the &drm_gem_prime_export helper. 349 * 350 * Returns: 351 * Shared DMA buffer representing the GEM BO from the given device. 352 */ 353 struct dma_buf *amdgpu_gem_prime_export(struct drm_gem_object *gobj, 354 int flags) 355 { 356 struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj); 357 struct dma_buf *buf; 358 359 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) || 360 bo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) 361 return ERR_PTR(-EPERM); 362 363 buf = drm_gem_prime_export(gobj, flags); 364 if (!IS_ERR(buf)) 365 buf->ops = &amdgpu_dmabuf_ops; 366 367 return buf; 368 } 369 370 /** 371 * amdgpu_dma_buf_create_obj - create BO for DMA-buf import 372 * 373 * @dev: DRM device 374 * @dma_buf: DMA-buf 375 * 376 * Creates an empty SG BO for DMA-buf import. 377 * 378 * Returns: 379 * A new GEM BO of the given DRM device, representing the memory 380 * described by the given DMA-buf attachment and scatter/gather table. 381 */ 382 static struct drm_gem_object * 383 amdgpu_dma_buf_create_obj(struct drm_device *dev, struct dma_buf *dma_buf) 384 { 385 struct dma_resv *resv = dma_buf->resv; 386 struct amdgpu_device *adev = drm_to_adev(dev); 387 struct drm_gem_object *gobj; 388 struct amdgpu_bo *bo; 389 uint64_t flags = 0; 390 int ret; 391 392 dma_resv_lock(resv, NULL); 393 394 if (dma_buf->ops == &amdgpu_dmabuf_ops) { 395 struct amdgpu_bo *other = gem_to_amdgpu_bo(dma_buf->priv); 396 397 flags |= other->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC; 398 } 399 400 ret = amdgpu_gem_object_create(adev, dma_buf->size, PAGE_SIZE, 401 AMDGPU_GEM_DOMAIN_CPU, flags, 402 ttm_bo_type_sg, resv, &gobj); 403 if (ret) 404 goto error; 405 406 bo = gem_to_amdgpu_bo(gobj); 407 bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT; 408 bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT; 409 if (dma_buf->ops != &amdgpu_dmabuf_ops) 410 bo->prime_shared_count = 1; 411 412 dma_resv_unlock(resv); 413 return gobj; 414 415 error: 416 dma_resv_unlock(resv); 417 return ERR_PTR(ret); 418 } 419 420 /** 421 * amdgpu_dma_buf_move_notify - &attach.move_notify implementation 422 * 423 * @attach: the DMA-buf attachment 424 * 425 * Invalidate the DMA-buf attachment, making sure that the we re-create the 426 * mapping before the next use. 427 */ 428 static void 429 amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach) 430 { 431 struct drm_gem_object *obj = attach->importer_priv; 432 struct ww_acquire_ctx *ticket = dma_resv_locking_ctx(obj->resv); 433 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); 434 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 435 struct ttm_operation_ctx ctx = { false, false }; 436 struct ttm_placement placement = {}; 437 struct amdgpu_vm_bo_base *bo_base; 438 int r; 439 440 if (bo->tbo.resource->mem_type == TTM_PL_SYSTEM) 441 return; 442 443 r = ttm_bo_validate(&bo->tbo, &placement, &ctx); 444 if (r) { 445 DRM_ERROR("Failed to invalidate DMA-buf import (%d))\n", r); 446 return; 447 } 448 449 for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) { 450 struct amdgpu_vm *vm = bo_base->vm; 451 struct dma_resv *resv = vm->root.bo->tbo.base.resv; 452 453 if (ticket) { 454 /* When we get an error here it means that somebody 455 * else is holding the VM lock and updating page tables 456 * So we can just continue here. 457 */ 458 r = dma_resv_lock(resv, ticket); 459 if (r) 460 continue; 461 462 } else { 463 /* TODO: This is more problematic and we actually need 464 * to allow page tables updates without holding the 465 * lock. 466 */ 467 if (!dma_resv_trylock(resv)) 468 continue; 469 } 470 471 r = amdgpu_vm_clear_freed(adev, vm, NULL); 472 if (!r) 473 r = amdgpu_vm_handle_moved(adev, vm); 474 475 if (r && r != -EBUSY) 476 DRM_ERROR("Failed to invalidate VM page tables (%d))\n", 477 r); 478 479 dma_resv_unlock(resv); 480 } 481 } 482 483 static const struct dma_buf_attach_ops amdgpu_dma_buf_attach_ops = { 484 .allow_peer2peer = true, 485 .move_notify = amdgpu_dma_buf_move_notify 486 }; 487 488 /** 489 * amdgpu_gem_prime_import - &drm_driver.gem_prime_import implementation 490 * @dev: DRM device 491 * @dma_buf: Shared DMA buffer 492 * 493 * Import a dma_buf into a the driver and potentially create a new GEM object. 494 * 495 * Returns: 496 * GEM BO representing the shared DMA buffer for the given device. 497 */ 498 struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev, 499 struct dma_buf *dma_buf) 500 { 501 struct dma_buf_attachment *attach; 502 struct drm_gem_object *obj; 503 504 if (dma_buf->ops == &amdgpu_dmabuf_ops) { 505 obj = dma_buf->priv; 506 if (obj->dev == dev) { 507 /* 508 * Importing dmabuf exported from out own gem increases 509 * refcount on gem itself instead of f_count of dmabuf. 510 */ 511 drm_gem_object_get(obj); 512 return obj; 513 } 514 } 515 516 obj = amdgpu_dma_buf_create_obj(dev, dma_buf); 517 if (IS_ERR(obj)) 518 return obj; 519 520 attach = dma_buf_dynamic_attach(dma_buf, dev->dev, 521 &amdgpu_dma_buf_attach_ops, obj); 522 if (IS_ERR(attach)) { 523 drm_gem_object_put(obj); 524 return ERR_CAST(attach); 525 } 526 527 get_dma_buf(dma_buf); 528 obj->import_attach = attach; 529 return obj; 530 } 531 532 /** 533 * amdgpu_dmabuf_is_xgmi_accessible - Check if xgmi available for P2P transfer 534 * 535 * @adev: amdgpu_device pointer of the importer 536 * @bo: amdgpu buffer object 537 * 538 * Returns: 539 * True if dmabuf accessible over xgmi, false otherwise. 540 */ 541 bool amdgpu_dmabuf_is_xgmi_accessible(struct amdgpu_device *adev, 542 struct amdgpu_bo *bo) 543 { 544 struct drm_gem_object *obj = &bo->tbo.base; 545 struct drm_gem_object *gobj; 546 547 if (obj->import_attach) { 548 struct dma_buf *dma_buf = obj->import_attach->dmabuf; 549 550 if (dma_buf->ops != &amdgpu_dmabuf_ops) 551 /* No XGMI with non AMD GPUs */ 552 return false; 553 554 gobj = dma_buf->priv; 555 bo = gem_to_amdgpu_bo(gobj); 556 } 557 558 if (amdgpu_xgmi_same_hive(adev, amdgpu_ttm_adev(bo->tbo.bdev)) && 559 (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM)) 560 return true; 561 562 return false; 563 } 564