1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2019 Intel Corporation
5  */
6 
7 #include "gem/i915_gem_context.h"
8 #include "gem/i915_gem_pm.h"
9 
10 #include "i915_drv.h"
11 #include "i915_globals.h"
12 
13 #include "intel_context.h"
14 #include "intel_engine.h"
15 #include "intel_engine_pm.h"
16 #include "intel_ring.h"
17 
18 static struct i915_global_context {
19 	struct i915_global base;
20 	struct kmem_cache *slab_ce;
21 } global;
22 
23 static struct intel_context *intel_context_alloc(void)
24 {
25 	return kmem_cache_zalloc(global.slab_ce, GFP_KERNEL);
26 }
27 
28 void intel_context_free(struct intel_context *ce)
29 {
30 	kmem_cache_free(global.slab_ce, ce);
31 }
32 
33 struct intel_context *
34 intel_context_create(struct intel_engine_cs *engine)
35 {
36 	struct intel_context *ce;
37 
38 	ce = intel_context_alloc();
39 	if (!ce)
40 		return ERR_PTR(-ENOMEM);
41 
42 	intel_context_init(ce, engine);
43 	return ce;
44 }
45 
46 int intel_context_alloc_state(struct intel_context *ce)
47 {
48 	int err = 0;
49 
50 	if (mutex_lock_interruptible(&ce->pin_mutex))
51 		return -EINTR;
52 
53 	if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
54 		err = ce->ops->alloc(ce);
55 		if (unlikely(err))
56 			goto unlock;
57 
58 		set_bit(CONTEXT_ALLOC_BIT, &ce->flags);
59 	}
60 
61 unlock:
62 	mutex_unlock(&ce->pin_mutex);
63 	return err;
64 }
65 
66 static int intel_context_active_acquire(struct intel_context *ce)
67 {
68 	int err;
69 
70 	__i915_active_acquire(&ce->active);
71 
72 	if (intel_context_is_barrier(ce))
73 		return 0;
74 
75 	/* Preallocate tracking nodes */
76 	err = i915_active_acquire_preallocate_barrier(&ce->active,
77 						      ce->engine);
78 	if (err)
79 		i915_active_release(&ce->active);
80 
81 	return err;
82 }
83 
84 static void intel_context_active_release(struct intel_context *ce)
85 {
86 	/* Nodes preallocated in intel_context_active() */
87 	i915_active_acquire_barrier(&ce->active);
88 	i915_active_release(&ce->active);
89 }
90 
91 int __intel_context_do_pin(struct intel_context *ce)
92 {
93 	int err;
94 
95 	if (unlikely(!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) {
96 		err = intel_context_alloc_state(ce);
97 		if (err)
98 			return err;
99 	}
100 
101 	err = i915_active_acquire(&ce->active);
102 	if (err)
103 		return err;
104 
105 	if (mutex_lock_interruptible(&ce->pin_mutex)) {
106 		err = -EINTR;
107 		goto out_release;
108 	}
109 
110 	if (likely(!atomic_add_unless(&ce->pin_count, 1, 0))) {
111 		err = intel_context_active_acquire(ce);
112 		if (unlikely(err))
113 			goto out_unlock;
114 
115 		err = ce->ops->pin(ce);
116 		if (unlikely(err))
117 			goto err_active;
118 
119 		CE_TRACE(ce, "pin ring:{head:%04x, tail:%04x}\n",
120 			 ce->ring->head, ce->ring->tail);
121 
122 		smp_mb__before_atomic(); /* flush pin before it is visible */
123 		atomic_inc(&ce->pin_count);
124 	}
125 
126 	GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */
127 	GEM_BUG_ON(i915_active_is_idle(&ce->active));
128 	goto out_unlock;
129 
130 err_active:
131 	intel_context_active_release(ce);
132 out_unlock:
133 	mutex_unlock(&ce->pin_mutex);
134 out_release:
135 	i915_active_release(&ce->active);
136 	return err;
137 }
138 
139 void intel_context_unpin(struct intel_context *ce)
140 {
141 	if (!atomic_dec_and_test(&ce->pin_count))
142 		return;
143 
144 	CE_TRACE(ce, "unpin\n");
145 	ce->ops->unpin(ce);
146 
147 	/*
148 	 * Once released, we may asynchronously drop the active reference.
149 	 * As that may be the only reference keeping the context alive,
150 	 * take an extra now so that it is not freed before we finish
151 	 * dereferencing it.
152 	 */
153 	intel_context_get(ce);
154 	intel_context_active_release(ce);
155 	intel_context_put(ce);
156 }
157 
158 static int __context_pin_state(struct i915_vma *vma)
159 {
160 	unsigned int bias = i915_ggtt_pin_bias(vma) | PIN_OFFSET_BIAS;
161 	int err;
162 
163 	err = i915_ggtt_pin(vma, 0, bias | PIN_HIGH);
164 	if (err)
165 		return err;
166 
167 	err = i915_active_acquire(&vma->active);
168 	if (err)
169 		goto err_unpin;
170 
171 	/*
172 	 * And mark it as a globally pinned object to let the shrinker know
173 	 * it cannot reclaim the object until we release it.
174 	 */
175 	i915_vma_make_unshrinkable(vma);
176 	vma->obj->mm.dirty = true;
177 
178 	return 0;
179 
180 err_unpin:
181 	i915_vma_unpin(vma);
182 	return err;
183 }
184 
185 static void __context_unpin_state(struct i915_vma *vma)
186 {
187 	i915_vma_make_shrinkable(vma);
188 	i915_active_release(&vma->active);
189 	__i915_vma_unpin(vma);
190 }
191 
192 static int __ring_active(struct intel_ring *ring)
193 {
194 	int err;
195 
196 	err = i915_active_acquire(&ring->vma->active);
197 	if (err)
198 		return err;
199 
200 	err = intel_ring_pin(ring);
201 	if (err)
202 		goto err_active;
203 
204 	return 0;
205 
206 err_active:
207 	i915_active_release(&ring->vma->active);
208 	return err;
209 }
210 
211 static void __ring_retire(struct intel_ring *ring)
212 {
213 	intel_ring_unpin(ring);
214 	i915_active_release(&ring->vma->active);
215 }
216 
217 __i915_active_call
218 static void __intel_context_retire(struct i915_active *active)
219 {
220 	struct intel_context *ce = container_of(active, typeof(*ce), active);
221 
222 	CE_TRACE(ce, "retire\n");
223 
224 	set_bit(CONTEXT_VALID_BIT, &ce->flags);
225 	if (ce->state)
226 		__context_unpin_state(ce->state);
227 
228 	intel_timeline_unpin(ce->timeline);
229 	__ring_retire(ce->ring);
230 
231 	intel_context_put(ce);
232 }
233 
234 static int __intel_context_active(struct i915_active *active)
235 {
236 	struct intel_context *ce = container_of(active, typeof(*ce), active);
237 	int err;
238 
239 	CE_TRACE(ce, "active\n");
240 
241 	intel_context_get(ce);
242 
243 	err = __ring_active(ce->ring);
244 	if (err)
245 		goto err_put;
246 
247 	err = intel_timeline_pin(ce->timeline);
248 	if (err)
249 		goto err_ring;
250 
251 	if (!ce->state)
252 		return 0;
253 
254 	err = __context_pin_state(ce->state);
255 	if (err)
256 		goto err_timeline;
257 
258 	return 0;
259 
260 err_timeline:
261 	intel_timeline_unpin(ce->timeline);
262 err_ring:
263 	__ring_retire(ce->ring);
264 err_put:
265 	intel_context_put(ce);
266 	return err;
267 }
268 
269 void
270 intel_context_init(struct intel_context *ce,
271 		   struct intel_engine_cs *engine)
272 {
273 	GEM_BUG_ON(!engine->cops);
274 	GEM_BUG_ON(!engine->gt->vm);
275 
276 	kref_init(&ce->ref);
277 
278 	ce->engine = engine;
279 	ce->ops = engine->cops;
280 	ce->sseu = engine->sseu;
281 	ce->ring = __intel_context_ring_size(SZ_4K);
282 
283 	ce->vm = i915_vm_get(engine->gt->vm);
284 
285 	INIT_LIST_HEAD(&ce->signal_link);
286 	INIT_LIST_HEAD(&ce->signals);
287 
288 	mutex_init(&ce->pin_mutex);
289 
290 	i915_active_init(&ce->active,
291 			 __intel_context_active, __intel_context_retire);
292 }
293 
294 void intel_context_fini(struct intel_context *ce)
295 {
296 	if (ce->timeline)
297 		intel_timeline_put(ce->timeline);
298 	i915_vm_put(ce->vm);
299 
300 	mutex_destroy(&ce->pin_mutex);
301 	i915_active_fini(&ce->active);
302 }
303 
304 static void i915_global_context_shrink(void)
305 {
306 	kmem_cache_shrink(global.slab_ce);
307 }
308 
309 static void i915_global_context_exit(void)
310 {
311 	kmem_cache_destroy(global.slab_ce);
312 }
313 
314 static struct i915_global_context global = { {
315 	.shrink = i915_global_context_shrink,
316 	.exit = i915_global_context_exit,
317 } };
318 
319 int __init i915_global_context_init(void)
320 {
321 	global.slab_ce = KMEM_CACHE(intel_context, SLAB_HWCACHE_ALIGN);
322 	if (!global.slab_ce)
323 		return -ENOMEM;
324 
325 	i915_global_register(&global.base);
326 	return 0;
327 }
328 
329 void intel_context_enter_engine(struct intel_context *ce)
330 {
331 	intel_engine_pm_get(ce->engine);
332 	intel_timeline_enter(ce->timeline);
333 }
334 
335 void intel_context_exit_engine(struct intel_context *ce)
336 {
337 	intel_timeline_exit(ce->timeline);
338 	intel_engine_pm_put(ce->engine);
339 }
340 
341 int intel_context_prepare_remote_request(struct intel_context *ce,
342 					 struct i915_request *rq)
343 {
344 	struct intel_timeline *tl = ce->timeline;
345 	int err;
346 
347 	/* Only suitable for use in remotely modifying this context */
348 	GEM_BUG_ON(rq->context == ce);
349 
350 	if (rcu_access_pointer(rq->timeline) != tl) { /* timeline sharing! */
351 		/* Queue this switch after current activity by this context. */
352 		err = i915_active_fence_set(&tl->last_request, rq);
353 		if (err)
354 			return err;
355 	}
356 
357 	/*
358 	 * Guarantee context image and the timeline remains pinned until the
359 	 * modifying request is retired by setting the ce activity tracker.
360 	 *
361 	 * But we only need to take one pin on the account of it. Or in other
362 	 * words transfer the pinned ce object to tracked active request.
363 	 */
364 	GEM_BUG_ON(i915_active_is_idle(&ce->active));
365 	return i915_active_add_request(&ce->active, rq);
366 }
367 
368 struct i915_request *intel_context_create_request(struct intel_context *ce)
369 {
370 	struct i915_request *rq;
371 	int err;
372 
373 	err = intel_context_pin(ce);
374 	if (unlikely(err))
375 		return ERR_PTR(err);
376 
377 	rq = i915_request_create(ce);
378 	intel_context_unpin(ce);
379 
380 	return rq;
381 }
382 
383 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
384 #include "selftest_context.c"
385 #endif
386