1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/kthread.h> 25 #include <linux/module.h> 26 #include <linux/sched.h> 27 #include <linux/slab.h> 28 #include <linux/wait.h> 29 30 #include <drm/gpu_scheduler.h> 31 32 static struct kmem_cache *sched_fence_slab; 33 34 static int __init drm_sched_fence_slab_init(void) 35 { 36 sched_fence_slab = kmem_cache_create( 37 "drm_sched_fence", sizeof(struct drm_sched_fence), 0, 38 SLAB_HWCACHE_ALIGN, NULL); 39 if (!sched_fence_slab) 40 return -ENOMEM; 41 42 return 0; 43 } 44 45 static void __exit drm_sched_fence_slab_fini(void) 46 { 47 rcu_barrier(); 48 kmem_cache_destroy(sched_fence_slab); 49 } 50 51 void drm_sched_fence_scheduled(struct drm_sched_fence *fence) 52 { 53 dma_fence_signal(&fence->scheduled); 54 } 55 56 void drm_sched_fence_finished(struct drm_sched_fence *fence, int result) 57 { 58 if (result) 59 dma_fence_set_error(&fence->finished, result); 60 dma_fence_signal(&fence->finished); 61 } 62 63 static const char *drm_sched_fence_get_driver_name(struct dma_fence *fence) 64 { 65 return "drm_sched"; 66 } 67 68 static const char *drm_sched_fence_get_timeline_name(struct dma_fence *f) 69 { 70 struct drm_sched_fence *fence = to_drm_sched_fence(f); 71 return (const char *)fence->sched->name; 72 } 73 74 static void drm_sched_fence_free_rcu(struct rcu_head *rcu) 75 { 76 struct dma_fence *f = container_of(rcu, struct dma_fence, rcu); 77 struct drm_sched_fence *fence = to_drm_sched_fence(f); 78 79 if (!WARN_ON_ONCE(!fence)) 80 kmem_cache_free(sched_fence_slab, fence); 81 } 82 83 /** 84 * drm_sched_fence_free - free up an uninitialized fence 85 * 86 * @fence: fence to free 87 * 88 * Free up the fence memory. Should only be used if drm_sched_fence_init() 89 * has not been called yet. 90 */ 91 void drm_sched_fence_free(struct drm_sched_fence *fence) 92 { 93 /* This function should not be called if the fence has been initialized. */ 94 if (!WARN_ON_ONCE(fence->sched)) 95 kmem_cache_free(sched_fence_slab, fence); 96 } 97 98 /** 99 * drm_sched_fence_release_scheduled - callback that fence can be freed 100 * 101 * @f: fence 102 * 103 * This function is called when the reference count becomes zero. 104 * It just RCU schedules freeing up the fence. 105 */ 106 static void drm_sched_fence_release_scheduled(struct dma_fence *f) 107 { 108 struct drm_sched_fence *fence = to_drm_sched_fence(f); 109 110 dma_fence_put(fence->parent); 111 call_rcu(&fence->finished.rcu, drm_sched_fence_free_rcu); 112 } 113 114 /** 115 * drm_sched_fence_release_finished - drop extra reference 116 * 117 * @f: fence 118 * 119 * Drop the extra reference from the scheduled fence to the base fence. 120 */ 121 static void drm_sched_fence_release_finished(struct dma_fence *f) 122 { 123 struct drm_sched_fence *fence = to_drm_sched_fence(f); 124 125 dma_fence_put(&fence->scheduled); 126 } 127 128 static void drm_sched_fence_set_deadline_finished(struct dma_fence *f, 129 ktime_t deadline) 130 { 131 struct drm_sched_fence *fence = to_drm_sched_fence(f); 132 struct dma_fence *parent; 133 unsigned long flags; 134 135 spin_lock_irqsave(&fence->lock, flags); 136 137 /* If we already have an earlier deadline, keep it: */ 138 if (test_bit(DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT, &f->flags) && 139 ktime_before(fence->deadline, deadline)) { 140 spin_unlock_irqrestore(&fence->lock, flags); 141 return; 142 } 143 144 fence->deadline = deadline; 145 set_bit(DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT, &f->flags); 146 147 spin_unlock_irqrestore(&fence->lock, flags); 148 149 /* 150 * smp_load_aquire() to ensure that if we are racing another 151 * thread calling drm_sched_fence_set_parent(), that we see 152 * the parent set before it calls test_bit(HAS_DEADLINE_BIT) 153 */ 154 parent = smp_load_acquire(&fence->parent); 155 if (parent) 156 dma_fence_set_deadline(parent, deadline); 157 } 158 159 static const struct dma_fence_ops drm_sched_fence_ops_scheduled = { 160 .get_driver_name = drm_sched_fence_get_driver_name, 161 .get_timeline_name = drm_sched_fence_get_timeline_name, 162 .release = drm_sched_fence_release_scheduled, 163 }; 164 165 static const struct dma_fence_ops drm_sched_fence_ops_finished = { 166 .get_driver_name = drm_sched_fence_get_driver_name, 167 .get_timeline_name = drm_sched_fence_get_timeline_name, 168 .release = drm_sched_fence_release_finished, 169 .set_deadline = drm_sched_fence_set_deadline_finished, 170 }; 171 172 struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f) 173 { 174 if (f->ops == &drm_sched_fence_ops_scheduled) 175 return container_of(f, struct drm_sched_fence, scheduled); 176 177 if (f->ops == &drm_sched_fence_ops_finished) 178 return container_of(f, struct drm_sched_fence, finished); 179 180 return NULL; 181 } 182 EXPORT_SYMBOL(to_drm_sched_fence); 183 184 void drm_sched_fence_set_parent(struct drm_sched_fence *s_fence, 185 struct dma_fence *fence) 186 { 187 /* 188 * smp_store_release() to ensure another thread racing us 189 * in drm_sched_fence_set_deadline_finished() sees the 190 * fence's parent set before test_bit() 191 */ 192 smp_store_release(&s_fence->parent, dma_fence_get(fence)); 193 if (test_bit(DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT, 194 &s_fence->finished.flags)) 195 dma_fence_set_deadline(fence, s_fence->deadline); 196 } 197 198 struct drm_sched_fence *drm_sched_fence_alloc(struct drm_sched_entity *entity, 199 void *owner) 200 { 201 struct drm_sched_fence *fence = NULL; 202 203 fence = kmem_cache_zalloc(sched_fence_slab, GFP_KERNEL); 204 if (fence == NULL) 205 return NULL; 206 207 fence->owner = owner; 208 spin_lock_init(&fence->lock); 209 210 return fence; 211 } 212 213 void drm_sched_fence_init(struct drm_sched_fence *fence, 214 struct drm_sched_entity *entity) 215 { 216 unsigned seq; 217 218 fence->sched = entity->rq->sched; 219 seq = atomic_inc_return(&entity->fence_seq); 220 dma_fence_init(&fence->scheduled, &drm_sched_fence_ops_scheduled, 221 &fence->lock, entity->fence_context, seq); 222 dma_fence_init(&fence->finished, &drm_sched_fence_ops_finished, 223 &fence->lock, entity->fence_context + 1, seq); 224 } 225 226 module_init(drm_sched_fence_slab_init); 227 module_exit(drm_sched_fence_slab_fini); 228 229 MODULE_DESCRIPTION("DRM GPU scheduler"); 230 MODULE_LICENSE("GPL and additional rights"); 231