1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * 21 * The above copyright notice and this permission notice (including the 22 * next paragraph) shall be included in all copies or substantial portions 23 * of the Software. 24 * 25 */ 26 /* 27 * Authors: 28 * Christian König <christian.koenig@amd.com> 29 */ 30 31 #include <linux/firmware.h> 32 #include <linux/module.h> 33 #include <linux/mmu_notifier.h> 34 #include <drm/drmP.h> 35 #include <drm/drm.h> 36 37 #include "radeon.h" 38 39 struct radeon_mn { 40 /* constant after initialisation */ 41 struct radeon_device *rdev; 42 struct mm_struct *mm; 43 struct mmu_notifier mn; 44 45 /* only used on destruction */ 46 struct work_struct work; 47 48 /* protected by rdev->mn_lock */ 49 struct hlist_node node; 50 51 /* objects protected by lock */ 52 struct mutex lock; 53 struct rb_root_cached objects; 54 }; 55 56 struct radeon_mn_node { 57 struct interval_tree_node it; 58 struct list_head bos; 59 }; 60 61 /** 62 * radeon_mn_destroy - destroy the rmn 63 * 64 * @work: previously sheduled work item 65 * 66 * Lazy destroys the notifier from a work item 67 */ 68 static void radeon_mn_destroy(struct work_struct *work) 69 { 70 struct radeon_mn *rmn = container_of(work, struct radeon_mn, work); 71 struct radeon_device *rdev = rmn->rdev; 72 struct radeon_mn_node *node, *next_node; 73 struct radeon_bo *bo, *next_bo; 74 75 mutex_lock(&rdev->mn_lock); 76 mutex_lock(&rmn->lock); 77 hash_del(&rmn->node); 78 rbtree_postorder_for_each_entry_safe(node, next_node, 79 &rmn->objects.rb_root, it.rb) { 80 81 interval_tree_remove(&node->it, &rmn->objects); 82 list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) { 83 bo->mn = NULL; 84 list_del_init(&bo->mn_list); 85 } 86 kfree(node); 87 } 88 mutex_unlock(&rmn->lock); 89 mutex_unlock(&rdev->mn_lock); 90 mmu_notifier_unregister(&rmn->mn, rmn->mm); 91 kfree(rmn); 92 } 93 94 /** 95 * radeon_mn_release - callback to notify about mm destruction 96 * 97 * @mn: our notifier 98 * @mn: the mm this callback is about 99 * 100 * Shedule a work item to lazy destroy our notifier. 101 */ 102 static void radeon_mn_release(struct mmu_notifier *mn, 103 struct mm_struct *mm) 104 { 105 struct radeon_mn *rmn = container_of(mn, struct radeon_mn, mn); 106 INIT_WORK(&rmn->work, radeon_mn_destroy); 107 schedule_work(&rmn->work); 108 } 109 110 /** 111 * radeon_mn_invalidate_range_start - callback to notify about mm change 112 * 113 * @mn: our notifier 114 * @mn: the mm this callback is about 115 * @start: start of updated range 116 * @end: end of updated range 117 * 118 * We block for all BOs between start and end to be idle and 119 * unmap them by move them into system domain again. 120 */ 121 static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn, 122 struct mm_struct *mm, 123 unsigned long start, 124 unsigned long end) 125 { 126 struct radeon_mn *rmn = container_of(mn, struct radeon_mn, mn); 127 struct ttm_operation_ctx ctx = { false, false }; 128 struct interval_tree_node *it; 129 130 /* notification is exclusive, but interval is inclusive */ 131 end -= 1; 132 133 mutex_lock(&rmn->lock); 134 135 it = interval_tree_iter_first(&rmn->objects, start, end); 136 while (it) { 137 struct radeon_mn_node *node; 138 struct radeon_bo *bo; 139 long r; 140 141 node = container_of(it, struct radeon_mn_node, it); 142 it = interval_tree_iter_next(it, start, end); 143 144 list_for_each_entry(bo, &node->bos, mn_list) { 145 146 if (!bo->tbo.ttm || bo->tbo.ttm->state != tt_bound) 147 continue; 148 149 r = radeon_bo_reserve(bo, true); 150 if (r) { 151 DRM_ERROR("(%ld) failed to reserve user bo\n", r); 152 continue; 153 } 154 155 r = reservation_object_wait_timeout_rcu(bo->tbo.resv, 156 true, false, MAX_SCHEDULE_TIMEOUT); 157 if (r <= 0) 158 DRM_ERROR("(%ld) failed to wait for user bo\n", r); 159 160 radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU); 161 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 162 if (r) 163 DRM_ERROR("(%ld) failed to validate user bo\n", r); 164 165 radeon_bo_unreserve(bo); 166 } 167 } 168 169 mutex_unlock(&rmn->lock); 170 } 171 172 static const struct mmu_notifier_ops radeon_mn_ops = { 173 .release = radeon_mn_release, 174 .invalidate_range_start = radeon_mn_invalidate_range_start, 175 }; 176 177 /** 178 * radeon_mn_get - create notifier context 179 * 180 * @rdev: radeon device pointer 181 * 182 * Creates a notifier context for current->mm. 183 */ 184 static struct radeon_mn *radeon_mn_get(struct radeon_device *rdev) 185 { 186 struct mm_struct *mm = current->mm; 187 struct radeon_mn *rmn; 188 int r; 189 190 if (down_write_killable(&mm->mmap_sem)) 191 return ERR_PTR(-EINTR); 192 193 mutex_lock(&rdev->mn_lock); 194 195 hash_for_each_possible(rdev->mn_hash, rmn, node, (unsigned long)mm) 196 if (rmn->mm == mm) 197 goto release_locks; 198 199 rmn = kzalloc(sizeof(*rmn), GFP_KERNEL); 200 if (!rmn) { 201 rmn = ERR_PTR(-ENOMEM); 202 goto release_locks; 203 } 204 205 rmn->rdev = rdev; 206 rmn->mm = mm; 207 rmn->mn.ops = &radeon_mn_ops; 208 mutex_init(&rmn->lock); 209 rmn->objects = RB_ROOT_CACHED; 210 211 r = __mmu_notifier_register(&rmn->mn, mm); 212 if (r) 213 goto free_rmn; 214 215 hash_add(rdev->mn_hash, &rmn->node, (unsigned long)mm); 216 217 release_locks: 218 mutex_unlock(&rdev->mn_lock); 219 up_write(&mm->mmap_sem); 220 221 return rmn; 222 223 free_rmn: 224 mutex_unlock(&rdev->mn_lock); 225 up_write(&mm->mmap_sem); 226 kfree(rmn); 227 228 return ERR_PTR(r); 229 } 230 231 /** 232 * radeon_mn_register - register a BO for notifier updates 233 * 234 * @bo: radeon buffer object 235 * @addr: userptr addr we should monitor 236 * 237 * Registers an MMU notifier for the given BO at the specified address. 238 * Returns 0 on success, -ERRNO if anything goes wrong. 239 */ 240 int radeon_mn_register(struct radeon_bo *bo, unsigned long addr) 241 { 242 unsigned long end = addr + radeon_bo_size(bo) - 1; 243 struct radeon_device *rdev = bo->rdev; 244 struct radeon_mn *rmn; 245 struct radeon_mn_node *node = NULL; 246 struct list_head bos; 247 struct interval_tree_node *it; 248 249 rmn = radeon_mn_get(rdev); 250 if (IS_ERR(rmn)) 251 return PTR_ERR(rmn); 252 253 INIT_LIST_HEAD(&bos); 254 255 mutex_lock(&rmn->lock); 256 257 while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) { 258 kfree(node); 259 node = container_of(it, struct radeon_mn_node, it); 260 interval_tree_remove(&node->it, &rmn->objects); 261 addr = min(it->start, addr); 262 end = max(it->last, end); 263 list_splice(&node->bos, &bos); 264 } 265 266 if (!node) { 267 node = kmalloc(sizeof(struct radeon_mn_node), GFP_KERNEL); 268 if (!node) { 269 mutex_unlock(&rmn->lock); 270 return -ENOMEM; 271 } 272 } 273 274 bo->mn = rmn; 275 276 node->it.start = addr; 277 node->it.last = end; 278 INIT_LIST_HEAD(&node->bos); 279 list_splice(&bos, &node->bos); 280 list_add(&bo->mn_list, &node->bos); 281 282 interval_tree_insert(&node->it, &rmn->objects); 283 284 mutex_unlock(&rmn->lock); 285 286 return 0; 287 } 288 289 /** 290 * radeon_mn_unregister - unregister a BO for notifier updates 291 * 292 * @bo: radeon buffer object 293 * 294 * Remove any registration of MMU notifier updates from the buffer object. 295 */ 296 void radeon_mn_unregister(struct radeon_bo *bo) 297 { 298 struct radeon_device *rdev = bo->rdev; 299 struct radeon_mn *rmn; 300 struct list_head *head; 301 302 mutex_lock(&rdev->mn_lock); 303 rmn = bo->mn; 304 if (rmn == NULL) { 305 mutex_unlock(&rdev->mn_lock); 306 return; 307 } 308 309 mutex_lock(&rmn->lock); 310 /* save the next list entry for later */ 311 head = bo->mn_list.next; 312 313 bo->mn = NULL; 314 list_del(&bo->mn_list); 315 316 if (list_empty(head)) { 317 struct radeon_mn_node *node; 318 node = container_of(head, struct radeon_mn_node, bos); 319 interval_tree_remove(&node->it, &rmn->objects); 320 kfree(node); 321 } 322 323 mutex_unlock(&rmn->lock); 324 mutex_unlock(&rdev->mn_lock); 325 } 326