1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * 21 * The above copyright notice and this permission notice (including the 22 * next paragraph) shall be included in all copies or substantial portions 23 * of the Software. 24 * 25 */ 26 /* 27 * Authors: 28 * Christian König <christian.koenig@amd.com> 29 */ 30 31 #include <linux/firmware.h> 32 #include <linux/module.h> 33 #include <linux/mmu_notifier.h> 34 35 #include <drm/drm.h> 36 37 #include "radeon.h" 38 39 /** 40 * radeon_mn_invalidate - callback to notify about mm change 41 * 42 * @mn: our notifier 43 * @range: the VMA under invalidation 44 * 45 * We block for all BOs between start and end to be idle and 46 * unmap them by move them into system domain again. 47 */ 48 static bool radeon_mn_invalidate(struct mmu_interval_notifier *mn, 49 const struct mmu_notifier_range *range, 50 unsigned long cur_seq) 51 { 52 struct radeon_bo *bo = container_of(mn, struct radeon_bo, notifier); 53 struct ttm_operation_ctx ctx = { false, false }; 54 long r; 55 56 if (!bo->tbo.ttm || bo->tbo.ttm->state != tt_bound) 57 return true; 58 59 if (!mmu_notifier_range_blockable(range)) 60 return false; 61 62 r = radeon_bo_reserve(bo, true); 63 if (r) { 64 DRM_ERROR("(%ld) failed to reserve user bo\n", r); 65 return true; 66 } 67 68 r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, true, false, 69 MAX_SCHEDULE_TIMEOUT); 70 if (r <= 0) 71 DRM_ERROR("(%ld) failed to wait for user bo\n", r); 72 73 radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU); 74 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 75 if (r) 76 DRM_ERROR("(%ld) failed to validate user bo\n", r); 77 78 radeon_bo_unreserve(bo); 79 return true; 80 } 81 82 static const struct mmu_interval_notifier_ops radeon_mn_ops = { 83 .invalidate = radeon_mn_invalidate, 84 }; 85 86 /** 87 * radeon_mn_register - register a BO for notifier updates 88 * 89 * @bo: radeon buffer object 90 * @addr: userptr addr we should monitor 91 * 92 * Registers an MMU notifier for the given BO at the specified address. 93 * Returns 0 on success, -ERRNO if anything goes wrong. 94 */ 95 int radeon_mn_register(struct radeon_bo *bo, unsigned long addr) 96 { 97 int ret; 98 99 ret = mmu_interval_notifier_insert(&bo->notifier, current->mm, addr, 100 radeon_bo_size(bo), &radeon_mn_ops); 101 if (ret) 102 return ret; 103 104 /* 105 * FIXME: radeon appears to allow get_user_pages to run during 106 * invalidate_range_start/end, which is not a safe way to read the 107 * PTEs. It should use the mmu_interval_read_begin() scheme around the 108 * get_user_pages to ensure that the PTEs are read properly 109 */ 110 mmu_interval_read_begin(&bo->notifier); 111 return 0; 112 } 113 114 /** 115 * radeon_mn_unregister - unregister a BO for notifier updates 116 * 117 * @bo: radeon buffer object 118 * 119 * Remove any registration of MMU notifier updates from the buffer object. 120 */ 121 void radeon_mn_unregister(struct radeon_bo *bo) 122 { 123 if (!bo->notifier.mm) 124 return; 125 mmu_interval_notifier_remove(&bo->notifier); 126 bo->notifier.mm = NULL; 127 } 128