xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c (revision 781095f903f398148cd0b646d3984234a715f29e)
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Christian König <christian.koenig@amd.com>
29  */
30 
31 #include <drm/drmP.h>
32 #include "amdgpu.h"
33 #include "amdgpu_trace.h"
34 
35 struct amdgpu_sync_entry {
36 	struct hlist_node	node;
37 	struct fence		*fence;
38 };
39 
40 /**
41  * amdgpu_sync_create - zero init sync object
42  *
43  * @sync: sync object to initialize
44  *
45  * Just clear the sync object for now.
46  */
47 void amdgpu_sync_create(struct amdgpu_sync *sync)
48 {
49 	hash_init(sync->fences);
50 	sync->last_vm_update = NULL;
51 }
52 
53 static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f)
54 {
55 	struct amdgpu_fence *a_fence = to_amdgpu_fence(f);
56 	struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
57 
58 	if (a_fence)
59 		return a_fence->ring->adev == adev;
60 
61 	if (s_fence) {
62 		struct amdgpu_ring *ring;
63 
64 		ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
65 		return ring->adev == adev;
66 	}
67 
68 	return false;
69 }
70 
71 static bool amdgpu_sync_test_owner(struct fence *f, void *owner)
72 {
73 	struct amdgpu_fence *a_fence = to_amdgpu_fence(f);
74 	struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
75 	if (s_fence)
76 		return s_fence->owner == owner;
77 	if (a_fence)
78 		return a_fence->owner == owner;
79 	return false;
80 }
81 
82 static void amdgpu_sync_keep_later(struct fence **keep, struct fence *fence)
83 {
84 	if (*keep && fence_is_later(*keep, fence))
85 		return;
86 
87 	fence_put(*keep);
88 	*keep = fence_get(fence);
89 }
90 
91 /**
92  * amdgpu_sync_fence - remember to sync to this fence
93  *
94  * @sync: sync object to add fence to
95  * @fence: fence to sync to
96  *
97  */
98 int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
99 		      struct fence *f)
100 {
101 	struct amdgpu_sync_entry *e;
102 
103 	if (!f)
104 		return 0;
105 
106 	if (amdgpu_sync_same_dev(adev, f) &&
107 	    amdgpu_sync_test_owner(f, AMDGPU_FENCE_OWNER_VM))
108 		amdgpu_sync_keep_later(&sync->last_vm_update, f);
109 
110 	hash_for_each_possible(sync->fences, e, node, f->context) {
111 		if (unlikely(e->fence->context != f->context))
112 			continue;
113 
114 		amdgpu_sync_keep_later(&e->fence, f);
115 		return 0;
116 	}
117 
118 	e = kmalloc(sizeof(struct amdgpu_sync_entry), GFP_KERNEL);
119 	if (!e)
120 		return -ENOMEM;
121 
122 	hash_add(sync->fences, &e->node, f->context);
123 	e->fence = fence_get(f);
124 	return 0;
125 }
126 
127 static void *amdgpu_sync_get_owner(struct fence *f)
128 {
129 	struct amdgpu_fence *a_fence = to_amdgpu_fence(f);
130 	struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
131 
132 	if (s_fence)
133 		return s_fence->owner;
134 	else if (a_fence)
135 		return a_fence->owner;
136 	return AMDGPU_FENCE_OWNER_UNDEFINED;
137 }
138 
139 /**
140  * amdgpu_sync_resv - sync to a reservation object
141  *
142  * @sync: sync object to add fences from reservation object to
143  * @resv: reservation object with embedded fence
144  * @shared: true if we should only sync to the exclusive fence
145  *
146  * Sync to the fence
147  */
148 int amdgpu_sync_resv(struct amdgpu_device *adev,
149 		     struct amdgpu_sync *sync,
150 		     struct reservation_object *resv,
151 		     void *owner)
152 {
153 	struct reservation_object_list *flist;
154 	struct fence *f;
155 	void *fence_owner;
156 	unsigned i;
157 	int r = 0;
158 
159 	if (resv == NULL)
160 		return -EINVAL;
161 
162 	/* always sync to the exclusive fence */
163 	f = reservation_object_get_excl(resv);
164 	r = amdgpu_sync_fence(adev, sync, f);
165 
166 	flist = reservation_object_get_list(resv);
167 	if (!flist || r)
168 		return r;
169 
170 	for (i = 0; i < flist->shared_count; ++i) {
171 		f = rcu_dereference_protected(flist->shared[i],
172 					      reservation_object_held(resv));
173 		if (amdgpu_sync_same_dev(adev, f)) {
174 			/* VM updates are only interesting
175 			 * for other VM updates and moves.
176 			 */
177 			fence_owner = amdgpu_sync_get_owner(f);
178 			if ((owner != AMDGPU_FENCE_OWNER_UNDEFINED) &&
179 			    (fence_owner != AMDGPU_FENCE_OWNER_UNDEFINED) &&
180 			    ((owner == AMDGPU_FENCE_OWNER_VM) !=
181 			     (fence_owner == AMDGPU_FENCE_OWNER_VM)))
182 				continue;
183 
184 			/* Ignore fence from the same owner as
185 			 * long as it isn't undefined.
186 			 */
187 			if (owner != AMDGPU_FENCE_OWNER_UNDEFINED &&
188 			    fence_owner == owner)
189 				continue;
190 		}
191 
192 		r = amdgpu_sync_fence(adev, sync, f);
193 		if (r)
194 			break;
195 	}
196 	return r;
197 }
198 
199 struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
200 {
201 	struct amdgpu_sync_entry *e;
202 	struct hlist_node *tmp;
203 	struct fence *f;
204 	int i;
205 
206 	hash_for_each_safe(sync->fences, i, tmp, e, node) {
207 
208 		f = e->fence;
209 
210 		hash_del(&e->node);
211 		kfree(e);
212 
213 		if (!fence_is_signaled(f))
214 			return f;
215 
216 		fence_put(f);
217 	}
218 	return NULL;
219 }
220 
221 int amdgpu_sync_wait(struct amdgpu_sync *sync)
222 {
223 	struct amdgpu_sync_entry *e;
224 	struct hlist_node *tmp;
225 	int i, r;
226 
227 	hash_for_each_safe(sync->fences, i, tmp, e, node) {
228 		r = fence_wait(e->fence, false);
229 		if (r)
230 			return r;
231 
232 		hash_del(&e->node);
233 		fence_put(e->fence);
234 		kfree(e);
235 	}
236 
237 	return 0;
238 }
239 
240 /**
241  * amdgpu_sync_free - free the sync object
242  *
243  * @sync: sync object to use
244  *
245  * Free the sync object.
246  */
247 void amdgpu_sync_free(struct amdgpu_sync *sync)
248 {
249 	struct amdgpu_sync_entry *e;
250 	struct hlist_node *tmp;
251 	unsigned i;
252 
253 	hash_for_each_safe(sync->fences, i, tmp, e, node) {
254 		hash_del(&e->node);
255 		fence_put(e->fence);
256 		kfree(e);
257 	}
258 
259 	fence_put(sync->last_vm_update);
260 }
261