1 /************************************************************************** 2 * 3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28 #include "ttm/ttm_execbuf_util.h" 29 #include "ttm/ttm_bo_driver.h" 30 #include "ttm/ttm_placement.h" 31 #include <linux/wait.h> 32 #include <linux/sched.h> 33 #include <linux/module.h> 34 35 void ttm_eu_backoff_reservation(struct list_head *list) 36 { 37 struct ttm_validate_buffer *entry; 38 39 list_for_each_entry(entry, list, head) { 40 struct ttm_buffer_object *bo = entry->bo; 41 if (!entry->reserved) 42 continue; 43 44 entry->reserved = false; 45 ttm_bo_unreserve(bo); 46 } 47 } 48 EXPORT_SYMBOL(ttm_eu_backoff_reservation); 49 50 /* 51 * Reserve buffers for validation. 52 * 53 * If a buffer in the list is marked for CPU access, we back off and 54 * wait for that buffer to become free for GPU access. 55 * 56 * If a buffer is reserved for another validation, the validator with 57 * the highest validation sequence backs off and waits for that buffer 58 * to become unreserved. This prevents deadlocks when validating multiple 59 * buffers in different orders. 60 */ 61 62 int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq) 63 { 64 struct ttm_validate_buffer *entry; 65 int ret; 66 67 retry: 68 list_for_each_entry(entry, list, head) { 69 struct ttm_buffer_object *bo = entry->bo; 70 71 entry->reserved = false; 72 ret = ttm_bo_reserve(bo, true, false, true, val_seq); 73 if (ret != 0) { 74 ttm_eu_backoff_reservation(list); 75 if (ret == -EAGAIN) { 76 ret = ttm_bo_wait_unreserved(bo, true); 77 if (unlikely(ret != 0)) 78 return ret; 79 goto retry; 80 } else 81 return ret; 82 } 83 84 entry->reserved = true; 85 if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { 86 ttm_eu_backoff_reservation(list); 87 ret = ttm_bo_wait_cpu(bo, false); 88 if (ret) 89 return ret; 90 goto retry; 91 } 92 } 93 return 0; 94 } 95 EXPORT_SYMBOL(ttm_eu_reserve_buffers); 96 97 void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj) 98 { 99 struct ttm_validate_buffer *entry; 100 101 list_for_each_entry(entry, list, head) { 102 struct ttm_buffer_object *bo = entry->bo; 103 struct ttm_bo_driver *driver = bo->bdev->driver; 104 void *old_sync_obj; 105 106 spin_lock(&bo->lock); 107 old_sync_obj = bo->sync_obj; 108 bo->sync_obj = driver->sync_obj_ref(sync_obj); 109 bo->sync_obj_arg = entry->new_sync_obj_arg; 110 spin_unlock(&bo->lock); 111 ttm_bo_unreserve(bo); 112 entry->reserved = false; 113 if (old_sync_obj) 114 driver->sync_obj_unref(&old_sync_obj); 115 } 116 } 117 EXPORT_SYMBOL(ttm_eu_fence_buffer_objects); 118