1 /************************************************************************** 2 * 3 * Copyright (C) 2010 VMware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28 29 #include "vmwgfx_drv.h" 30 31 struct vmw_fence { 32 struct list_head head; 33 uint32_t sequence; 34 struct timespec submitted; 35 }; 36 37 void vmw_fence_queue_init(struct vmw_fence_queue *queue) 38 { 39 INIT_LIST_HEAD(&queue->head); 40 queue->lag = ns_to_timespec(0); 41 getrawmonotonic(&queue->lag_time); 42 spin_lock_init(&queue->lock); 43 } 44 45 void vmw_fence_queue_takedown(struct vmw_fence_queue *queue) 46 { 47 struct vmw_fence *fence, *next; 48 49 spin_lock(&queue->lock); 50 list_for_each_entry_safe(fence, next, &queue->head, head) { 51 kfree(fence); 52 } 53 spin_unlock(&queue->lock); 54 } 55 56 int vmw_fence_push(struct vmw_fence_queue *queue, 57 uint32_t sequence) 58 { 59 struct vmw_fence *fence = kmalloc(sizeof(*fence), GFP_KERNEL); 60 61 if (unlikely(!fence)) 62 return -ENOMEM; 63 64 fence->sequence = sequence; 65 getrawmonotonic(&fence->submitted); 66 spin_lock(&queue->lock); 67 list_add_tail(&fence->head, &queue->head); 68 spin_unlock(&queue->lock); 69 70 return 0; 71 } 72 73 int vmw_fence_pull(struct vmw_fence_queue *queue, 74 uint32_t signaled_sequence) 75 { 76 struct vmw_fence *fence, *next; 77 struct timespec now; 78 bool updated = false; 79 80 spin_lock(&queue->lock); 81 getrawmonotonic(&now); 82 83 if (list_empty(&queue->head)) { 84 queue->lag = ns_to_timespec(0); 85 queue->lag_time = now; 86 updated = true; 87 goto out_unlock; 88 } 89 90 list_for_each_entry_safe(fence, next, &queue->head, head) { 91 if (signaled_sequence - fence->sequence > (1 << 30)) 92 continue; 93 94 queue->lag = timespec_sub(now, fence->submitted); 95 queue->lag_time = now; 96 updated = true; 97 list_del(&fence->head); 98 kfree(fence); 99 } 100 101 out_unlock: 102 spin_unlock(&queue->lock); 103 104 return (updated) ? 0 : -EBUSY; 105 } 106 107 static struct timespec vmw_timespec_add(struct timespec t1, 108 struct timespec t2) 109 { 110 t1.tv_sec += t2.tv_sec; 111 t1.tv_nsec += t2.tv_nsec; 112 if (t1.tv_nsec >= 1000000000L) { 113 t1.tv_sec += 1; 114 t1.tv_nsec -= 1000000000L; 115 } 116 117 return t1; 118 } 119 120 static struct timespec vmw_fifo_lag(struct vmw_fence_queue *queue) 121 { 122 struct timespec now; 123 124 spin_lock(&queue->lock); 125 getrawmonotonic(&now); 126 queue->lag = vmw_timespec_add(queue->lag, 127 timespec_sub(now, queue->lag_time)); 128 queue->lag_time = now; 129 spin_unlock(&queue->lock); 130 return queue->lag; 131 } 132 133 134 static bool vmw_lag_lt(struct vmw_fence_queue *queue, 135 uint32_t us) 136 { 137 struct timespec lag, cond; 138 139 cond = ns_to_timespec((s64) us * 1000); 140 lag = vmw_fifo_lag(queue); 141 return (timespec_compare(&lag, &cond) < 1); 142 } 143 144 int vmw_wait_lag(struct vmw_private *dev_priv, 145 struct vmw_fence_queue *queue, uint32_t us) 146 { 147 struct vmw_fence *fence; 148 uint32_t sequence; 149 int ret; 150 151 while (!vmw_lag_lt(queue, us)) { 152 spin_lock(&queue->lock); 153 if (list_empty(&queue->head)) 154 sequence = atomic_read(&dev_priv->fence_seq); 155 else { 156 fence = list_first_entry(&queue->head, 157 struct vmw_fence, head); 158 sequence = fence->sequence; 159 } 160 spin_unlock(&queue->lock); 161 162 ret = vmw_wait_fence(dev_priv, false, sequence, true, 163 3*HZ); 164 165 if (unlikely(ret != 0)) 166 return ret; 167 168 (void) vmw_fence_pull(queue, sequence); 169 } 170 return 0; 171 } 172 173 174