1 /************************************************************************** 2 * 3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28 #include <drm/ttm/ttm_execbuf_util.h> 29 #include <drm/ttm/ttm_bo_driver.h> 30 #include <drm/ttm/ttm_placement.h> 31 #include <linux/wait.h> 32 #include <linux/sched.h> 33 #include <linux/module.h> 34 35 static void ttm_eu_backoff_reservation_locked(struct list_head *list, 36 struct ww_acquire_ctx *ticket) 37 { 38 struct ttm_validate_buffer *entry; 39 40 list_for_each_entry(entry, list, head) { 41 struct ttm_buffer_object *bo = entry->bo; 42 if (!entry->reserved) 43 continue; 44 45 entry->reserved = false; 46 if (entry->removed) { 47 ttm_bo_add_to_lru(bo); 48 entry->removed = false; 49 } 50 ww_mutex_unlock(&bo->resv->lock); 51 } 52 } 53 54 static void ttm_eu_del_from_lru_locked(struct list_head *list) 55 { 56 struct ttm_validate_buffer *entry; 57 58 list_for_each_entry(entry, list, head) { 59 struct ttm_buffer_object *bo = entry->bo; 60 if (!entry->reserved) 61 continue; 62 63 if (!entry->removed) { 64 entry->put_count = ttm_bo_del_from_lru(bo); 65 entry->removed = true; 66 } 67 } 68 } 69 70 static void ttm_eu_list_ref_sub(struct list_head *list) 71 { 72 struct ttm_validate_buffer *entry; 73 74 list_for_each_entry(entry, list, head) { 75 struct ttm_buffer_object *bo = entry->bo; 76 77 if (entry->put_count) { 78 ttm_bo_list_ref_sub(bo, entry->put_count, true); 79 entry->put_count = 0; 80 } 81 } 82 } 83 84 void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket, 85 struct list_head *list) 86 { 87 struct ttm_validate_buffer *entry; 88 struct ttm_bo_global *glob; 89 90 if (list_empty(list)) 91 return; 92 93 entry = list_first_entry(list, struct ttm_validate_buffer, head); 94 glob = entry->bo->glob; 95 spin_lock(&glob->lru_lock); 96 ttm_eu_backoff_reservation_locked(list, ticket); 97 ww_acquire_fini(ticket); 98 spin_unlock(&glob->lru_lock); 99 } 100 EXPORT_SYMBOL(ttm_eu_backoff_reservation); 101 102 /* 103 * Reserve buffers for validation. 104 * 105 * If a buffer in the list is marked for CPU access, we back off and 106 * wait for that buffer to become free for GPU access. 107 * 108 * If a buffer is reserved for another validation, the validator with 109 * the highest validation sequence backs off and waits for that buffer 110 * to become unreserved. This prevents deadlocks when validating multiple 111 * buffers in different orders. 112 */ 113 114 int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, 115 struct list_head *list) 116 { 117 struct ttm_bo_global *glob; 118 struct ttm_validate_buffer *entry; 119 int ret; 120 121 if (list_empty(list)) 122 return 0; 123 124 list_for_each_entry(entry, list, head) { 125 entry->reserved = false; 126 entry->put_count = 0; 127 entry->removed = false; 128 } 129 130 entry = list_first_entry(list, struct ttm_validate_buffer, head); 131 glob = entry->bo->glob; 132 133 ww_acquire_init(ticket, &reservation_ww_class); 134 retry: 135 list_for_each_entry(entry, list, head) { 136 struct ttm_buffer_object *bo = entry->bo; 137 138 /* already slowpath reserved? */ 139 if (entry->reserved) 140 continue; 141 142 143 ret = ttm_bo_reserve_nolru(bo, true, false, true, ticket); 144 145 if (ret == -EDEADLK) { 146 /* uh oh, we lost out, drop every reservation and try 147 * to only reserve this buffer, then start over if 148 * this succeeds. 149 */ 150 spin_lock(&glob->lru_lock); 151 ttm_eu_backoff_reservation_locked(list, ticket); 152 spin_unlock(&glob->lru_lock); 153 ttm_eu_list_ref_sub(list); 154 ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock, 155 ticket); 156 if (unlikely(ret != 0)) { 157 if (ret == -EINTR) 158 ret = -ERESTARTSYS; 159 goto err_fini; 160 } 161 162 entry->reserved = true; 163 if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { 164 ret = -EBUSY; 165 goto err; 166 } 167 goto retry; 168 } else if (ret) 169 goto err; 170 171 entry->reserved = true; 172 if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { 173 ret = -EBUSY; 174 goto err; 175 } 176 } 177 178 ww_acquire_done(ticket); 179 spin_lock(&glob->lru_lock); 180 ttm_eu_del_from_lru_locked(list); 181 spin_unlock(&glob->lru_lock); 182 ttm_eu_list_ref_sub(list); 183 return 0; 184 185 err: 186 spin_lock(&glob->lru_lock); 187 ttm_eu_backoff_reservation_locked(list, ticket); 188 spin_unlock(&glob->lru_lock); 189 ttm_eu_list_ref_sub(list); 190 err_fini: 191 ww_acquire_done(ticket); 192 ww_acquire_fini(ticket); 193 return ret; 194 } 195 EXPORT_SYMBOL(ttm_eu_reserve_buffers); 196 197 void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, 198 struct list_head *list, void *sync_obj) 199 { 200 struct ttm_validate_buffer *entry; 201 struct ttm_buffer_object *bo; 202 struct ttm_bo_global *glob; 203 struct ttm_bo_device *bdev; 204 struct ttm_bo_driver *driver; 205 206 if (list_empty(list)) 207 return; 208 209 bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo; 210 bdev = bo->bdev; 211 driver = bdev->driver; 212 glob = bo->glob; 213 214 spin_lock(&glob->lru_lock); 215 spin_lock(&bdev->fence_lock); 216 217 list_for_each_entry(entry, list, head) { 218 bo = entry->bo; 219 entry->old_sync_obj = bo->sync_obj; 220 bo->sync_obj = driver->sync_obj_ref(sync_obj); 221 ttm_bo_add_to_lru(bo); 222 ww_mutex_unlock(&bo->resv->lock); 223 entry->reserved = false; 224 } 225 spin_unlock(&bdev->fence_lock); 226 spin_unlock(&glob->lru_lock); 227 ww_acquire_fini(ticket); 228 229 list_for_each_entry(entry, list, head) { 230 if (entry->old_sync_obj) 231 driver->sync_obj_unref(&entry->old_sync_obj); 232 } 233 } 234 EXPORT_SYMBOL(ttm_eu_fence_buffer_objects); 235