1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2019 Intel Corporation
5  */
6 
7 #include "gem/i915_gem_context.h"
8 #include "gem/i915_gem_pm.h"
9 
10 #include "i915_drv.h"
11 #include "i915_globals.h"
12 
13 #include "intel_context.h"
14 #include "intel_engine.h"
15 #include "intel_engine_pm.h"
16 #include "intel_ring.h"
17 
18 static struct i915_global_context {
19 	struct i915_global base;
20 	struct kmem_cache *slab_ce;
21 } global;
22 
23 static struct intel_context *intel_context_alloc(void)
24 {
25 	return kmem_cache_zalloc(global.slab_ce, GFP_KERNEL);
26 }
27 
28 void intel_context_free(struct intel_context *ce)
29 {
30 	kmem_cache_free(global.slab_ce, ce);
31 }
32 
33 struct intel_context *
34 intel_context_create(struct intel_engine_cs *engine)
35 {
36 	struct intel_context *ce;
37 
38 	ce = intel_context_alloc();
39 	if (!ce)
40 		return ERR_PTR(-ENOMEM);
41 
42 	intel_context_init(ce, engine);
43 	return ce;
44 }
45 
46 int intel_context_alloc_state(struct intel_context *ce)
47 {
48 	int err = 0;
49 
50 	if (mutex_lock_interruptible(&ce->pin_mutex))
51 		return -EINTR;
52 
53 	if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
54 		err = ce->ops->alloc(ce);
55 		if (unlikely(err))
56 			goto unlock;
57 
58 		set_bit(CONTEXT_ALLOC_BIT, &ce->flags);
59 	}
60 
61 unlock:
62 	mutex_unlock(&ce->pin_mutex);
63 	return err;
64 }
65 
66 static int intel_context_active_acquire(struct intel_context *ce)
67 {
68 	int err;
69 
70 	err = i915_active_acquire(&ce->active);
71 	if (err)
72 		return err;
73 
74 	/* Preallocate tracking nodes */
75 	if (!intel_context_is_barrier(ce)) {
76 		err = i915_active_acquire_preallocate_barrier(&ce->active,
77 							      ce->engine);
78 		if (err) {
79 			i915_active_release(&ce->active);
80 			return err;
81 		}
82 	}
83 
84 	return 0;
85 }
86 
87 static void intel_context_active_release(struct intel_context *ce)
88 {
89 	/* Nodes preallocated in intel_context_active() */
90 	i915_active_acquire_barrier(&ce->active);
91 	i915_active_release(&ce->active);
92 }
93 
94 int __intel_context_do_pin(struct intel_context *ce)
95 {
96 	int err;
97 
98 	if (unlikely(!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) {
99 		err = intel_context_alloc_state(ce);
100 		if (err)
101 			return err;
102 	}
103 
104 	if (mutex_lock_interruptible(&ce->pin_mutex))
105 		return -EINTR;
106 
107 	if (likely(!atomic_read(&ce->pin_count))) {
108 		err = intel_context_active_acquire(ce);
109 		if (unlikely(err))
110 			goto err;
111 
112 		err = ce->ops->pin(ce);
113 		if (unlikely(err))
114 			goto err_active;
115 
116 		CE_TRACE(ce, "pin ring:{head:%04x, tail:%04x}\n",
117 			 ce->ring->head, ce->ring->tail);
118 
119 		smp_mb__before_atomic(); /* flush pin before it is visible */
120 	}
121 
122 	atomic_inc(&ce->pin_count);
123 	GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */
124 
125 	mutex_unlock(&ce->pin_mutex);
126 	return 0;
127 
128 err_active:
129 	intel_context_active_release(ce);
130 err:
131 	mutex_unlock(&ce->pin_mutex);
132 	return err;
133 }
134 
135 void intel_context_unpin(struct intel_context *ce)
136 {
137 	if (!atomic_dec_and_test(&ce->pin_count))
138 		return;
139 
140 	CE_TRACE(ce, "unpin\n");
141 	ce->ops->unpin(ce);
142 
143 	/*
144 	 * Once released, we may asynchronously drop the active reference.
145 	 * As that may be the only reference keeping the context alive,
146 	 * take an extra now so that it is not freed before we finish
147 	 * dereferencing it.
148 	 */
149 	intel_context_get(ce);
150 	intel_context_active_release(ce);
151 	intel_context_put(ce);
152 }
153 
154 static int __context_pin_state(struct i915_vma *vma)
155 {
156 	unsigned int bias = i915_ggtt_pin_bias(vma) | PIN_OFFSET_BIAS;
157 	int err;
158 
159 	err = i915_ggtt_pin(vma, 0, bias | PIN_HIGH);
160 	if (err)
161 		return err;
162 
163 	err = i915_active_acquire(&vma->active);
164 	if (err)
165 		goto err_unpin;
166 
167 	/*
168 	 * And mark it as a globally pinned object to let the shrinker know
169 	 * it cannot reclaim the object until we release it.
170 	 */
171 	i915_vma_make_unshrinkable(vma);
172 	vma->obj->mm.dirty = true;
173 
174 	return 0;
175 
176 err_unpin:
177 	i915_vma_unpin(vma);
178 	return err;
179 }
180 
181 static void __context_unpin_state(struct i915_vma *vma)
182 {
183 	i915_vma_make_shrinkable(vma);
184 	i915_active_release(&vma->active);
185 	__i915_vma_unpin(vma);
186 }
187 
188 static int __ring_active(struct intel_ring *ring)
189 {
190 	int err;
191 
192 	err = i915_active_acquire(&ring->vma->active);
193 	if (err)
194 		return err;
195 
196 	err = intel_ring_pin(ring);
197 	if (err)
198 		goto err_active;
199 
200 	return 0;
201 
202 err_active:
203 	i915_active_release(&ring->vma->active);
204 	return err;
205 }
206 
207 static void __ring_retire(struct intel_ring *ring)
208 {
209 	intel_ring_unpin(ring);
210 	i915_active_release(&ring->vma->active);
211 }
212 
213 __i915_active_call
214 static void __intel_context_retire(struct i915_active *active)
215 {
216 	struct intel_context *ce = container_of(active, typeof(*ce), active);
217 
218 	CE_TRACE(ce, "retire\n");
219 
220 	set_bit(CONTEXT_VALID_BIT, &ce->flags);
221 	if (ce->state)
222 		__context_unpin_state(ce->state);
223 
224 	intel_timeline_unpin(ce->timeline);
225 	__ring_retire(ce->ring);
226 
227 	intel_context_put(ce);
228 }
229 
230 static int __intel_context_active(struct i915_active *active)
231 {
232 	struct intel_context *ce = container_of(active, typeof(*ce), active);
233 	int err;
234 
235 	CE_TRACE(ce, "active\n");
236 
237 	intel_context_get(ce);
238 
239 	err = __ring_active(ce->ring);
240 	if (err)
241 		goto err_put;
242 
243 	err = intel_timeline_pin(ce->timeline);
244 	if (err)
245 		goto err_ring;
246 
247 	if (!ce->state)
248 		return 0;
249 
250 	err = __context_pin_state(ce->state);
251 	if (err)
252 		goto err_timeline;
253 
254 	return 0;
255 
256 err_timeline:
257 	intel_timeline_unpin(ce->timeline);
258 err_ring:
259 	__ring_retire(ce->ring);
260 err_put:
261 	intel_context_put(ce);
262 	return err;
263 }
264 
265 void
266 intel_context_init(struct intel_context *ce,
267 		   struct intel_engine_cs *engine)
268 {
269 	GEM_BUG_ON(!engine->cops);
270 	GEM_BUG_ON(!engine->gt->vm);
271 
272 	kref_init(&ce->ref);
273 
274 	ce->engine = engine;
275 	ce->ops = engine->cops;
276 	ce->sseu = engine->sseu;
277 	ce->ring = __intel_context_ring_size(SZ_4K);
278 
279 	ce->vm = i915_vm_get(engine->gt->vm);
280 
281 	INIT_LIST_HEAD(&ce->signal_link);
282 	INIT_LIST_HEAD(&ce->signals);
283 
284 	mutex_init(&ce->pin_mutex);
285 
286 	i915_active_init(&ce->active,
287 			 __intel_context_active, __intel_context_retire);
288 }
289 
290 void intel_context_fini(struct intel_context *ce)
291 {
292 	if (ce->timeline)
293 		intel_timeline_put(ce->timeline);
294 	i915_vm_put(ce->vm);
295 
296 	mutex_destroy(&ce->pin_mutex);
297 	i915_active_fini(&ce->active);
298 }
299 
300 static void i915_global_context_shrink(void)
301 {
302 	kmem_cache_shrink(global.slab_ce);
303 }
304 
305 static void i915_global_context_exit(void)
306 {
307 	kmem_cache_destroy(global.slab_ce);
308 }
309 
310 static struct i915_global_context global = { {
311 	.shrink = i915_global_context_shrink,
312 	.exit = i915_global_context_exit,
313 } };
314 
315 int __init i915_global_context_init(void)
316 {
317 	global.slab_ce = KMEM_CACHE(intel_context, SLAB_HWCACHE_ALIGN);
318 	if (!global.slab_ce)
319 		return -ENOMEM;
320 
321 	i915_global_register(&global.base);
322 	return 0;
323 }
324 
325 void intel_context_enter_engine(struct intel_context *ce)
326 {
327 	intel_engine_pm_get(ce->engine);
328 	intel_timeline_enter(ce->timeline);
329 }
330 
331 void intel_context_exit_engine(struct intel_context *ce)
332 {
333 	intel_timeline_exit(ce->timeline);
334 	intel_engine_pm_put(ce->engine);
335 }
336 
337 int intel_context_prepare_remote_request(struct intel_context *ce,
338 					 struct i915_request *rq)
339 {
340 	struct intel_timeline *tl = ce->timeline;
341 	int err;
342 
343 	/* Only suitable for use in remotely modifying this context */
344 	GEM_BUG_ON(rq->context == ce);
345 
346 	if (rcu_access_pointer(rq->timeline) != tl) { /* timeline sharing! */
347 		/* Queue this switch after current activity by this context. */
348 		err = i915_active_fence_set(&tl->last_request, rq);
349 		if (err)
350 			return err;
351 	}
352 
353 	/*
354 	 * Guarantee context image and the timeline remains pinned until the
355 	 * modifying request is retired by setting the ce activity tracker.
356 	 *
357 	 * But we only need to take one pin on the account of it. Or in other
358 	 * words transfer the pinned ce object to tracked active request.
359 	 */
360 	GEM_BUG_ON(i915_active_is_idle(&ce->active));
361 	return i915_active_add_request(&ce->active, rq);
362 }
363 
364 struct i915_request *intel_context_create_request(struct intel_context *ce)
365 {
366 	struct i915_request *rq;
367 	int err;
368 
369 	err = intel_context_pin(ce);
370 	if (unlikely(err))
371 		return ERR_PTR(err);
372 
373 	rq = i915_request_create(ce);
374 	intel_context_unpin(ce);
375 
376 	return rq;
377 }
378 
379 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
380 #include "selftest_context.c"
381 #endif
382