1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */ 2 /************************************************************************** 3 * 4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 25 * USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 **************************************************************************/ 28 29 #include <drm/ttm/ttm_execbuf_util.h> 30 #include <drm/ttm/ttm_bo_driver.h> 31 #include <drm/ttm/ttm_placement.h> 32 #include <linux/wait.h> 33 #include <linux/sched.h> 34 #include <linux/module.h> 35 36 static void ttm_eu_backoff_reservation_reverse(struct list_head *list, 37 struct ttm_validate_buffer *entry) 38 { 39 list_for_each_entry_continue_reverse(entry, list, head) { 40 struct ttm_buffer_object *bo = entry->bo; 41 42 dma_resv_unlock(bo->base.resv); 43 } 44 } 45 46 void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket, 47 struct list_head *list) 48 { 49 struct ttm_validate_buffer *entry; 50 51 if (list_empty(list)) 52 return; 53 54 list_for_each_entry(entry, list, head) { 55 struct ttm_buffer_object *bo = entry->bo; 56 57 ttm_bo_move_to_lru_tail_unlocked(bo); 58 dma_resv_unlock(bo->base.resv); 59 } 60 61 if (ticket) 62 ww_acquire_fini(ticket); 63 } 64 EXPORT_SYMBOL(ttm_eu_backoff_reservation); 65 66 /* 67 * Reserve buffers for validation. 68 * 69 * If a buffer in the list is marked for CPU access, we back off and 70 * wait for that buffer to become free for GPU access. 71 * 72 * If a buffer is reserved for another validation, the validator with 73 * the highest validation sequence backs off and waits for that buffer 74 * to become unreserved. This prevents deadlocks when validating multiple 75 * buffers in different orders. 76 */ 77 78 int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, 79 struct list_head *list, bool intr, 80 struct list_head *dups) 81 { 82 struct ttm_validate_buffer *entry; 83 int ret; 84 85 if (list_empty(list)) 86 return 0; 87 88 if (ticket) 89 ww_acquire_init(ticket, &reservation_ww_class); 90 91 list_for_each_entry(entry, list, head) { 92 struct ttm_buffer_object *bo = entry->bo; 93 unsigned int num_fences; 94 95 ret = ttm_bo_reserve(bo, intr, (ticket == NULL), ticket); 96 if (ret == -EALREADY && dups) { 97 struct ttm_validate_buffer *safe = entry; 98 entry = list_prev_entry(entry, head); 99 list_del(&safe->head); 100 list_add(&safe->head, dups); 101 continue; 102 } 103 104 num_fences = max(entry->num_shared, 1u); 105 if (!ret) { 106 ret = dma_resv_reserve_fences(bo->base.resv, 107 num_fences); 108 if (!ret) 109 continue; 110 } 111 112 /* uh oh, we lost out, drop every reservation and try 113 * to only reserve this buffer, then start over if 114 * this succeeds. 115 */ 116 ttm_eu_backoff_reservation_reverse(list, entry); 117 118 if (ret == -EDEADLK) { 119 ret = ttm_bo_reserve_slowpath(bo, intr, ticket); 120 } 121 122 if (!ret) 123 ret = dma_resv_reserve_fences(bo->base.resv, 124 num_fences); 125 126 if (unlikely(ret != 0)) { 127 if (ticket) { 128 ww_acquire_done(ticket); 129 ww_acquire_fini(ticket); 130 } 131 return ret; 132 } 133 134 /* move this item to the front of the list, 135 * forces correct iteration of the loop without keeping track 136 */ 137 list_del(&entry->head); 138 list_add(&entry->head, list); 139 } 140 141 return 0; 142 } 143 EXPORT_SYMBOL(ttm_eu_reserve_buffers); 144 145 void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, 146 struct list_head *list, 147 struct dma_fence *fence) 148 { 149 struct ttm_validate_buffer *entry; 150 151 if (list_empty(list)) 152 return; 153 154 list_for_each_entry(entry, list, head) { 155 struct ttm_buffer_object *bo = entry->bo; 156 157 dma_resv_add_fence(bo->base.resv, fence, entry->num_shared ? 158 DMA_RESV_USAGE_READ : DMA_RESV_USAGE_WRITE); 159 ttm_bo_move_to_lru_tail_unlocked(bo); 160 dma_resv_unlock(bo->base.resv); 161 } 162 if (ticket) 163 ww_acquire_fini(ticket); 164 } 165 EXPORT_SYMBOL(ttm_eu_fence_buffer_objects); 166