xref: /openbmc/linux/drivers/gpu/drm/i915/gt/intel_context.c (revision 04eb94d526423ff082efce61f4f26b0369d0bfdd)
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2019 Intel Corporation
5  */
6 
7 #include "gem/i915_gem_context.h"
8 #include "gem/i915_gem_pm.h"
9 
10 #include "i915_drv.h"
11 #include "i915_globals.h"
12 
13 #include "intel_context.h"
14 #include "intel_engine.h"
15 #include "intel_engine_pm.h"
16 
17 static struct i915_global_context {
18 	struct i915_global base;
19 	struct kmem_cache *slab_ce;
20 } global;
21 
22 static struct intel_context *intel_context_alloc(void)
23 {
24 	return kmem_cache_zalloc(global.slab_ce, GFP_KERNEL);
25 }
26 
27 void intel_context_free(struct intel_context *ce)
28 {
29 	kmem_cache_free(global.slab_ce, ce);
30 }
31 
32 struct intel_context *
33 intel_context_create(struct i915_gem_context *ctx,
34 		     struct intel_engine_cs *engine)
35 {
36 	struct intel_context *ce;
37 
38 	ce = intel_context_alloc();
39 	if (!ce)
40 		return ERR_PTR(-ENOMEM);
41 
42 	intel_context_init(ce, ctx, engine);
43 	return ce;
44 }
45 
46 int __intel_context_do_pin(struct intel_context *ce)
47 {
48 	int err;
49 
50 	if (mutex_lock_interruptible(&ce->pin_mutex))
51 		return -EINTR;
52 
53 	if (likely(!atomic_read(&ce->pin_count))) {
54 		intel_wakeref_t wakeref;
55 
56 		err = 0;
57 		with_intel_runtime_pm(&ce->engine->i915->runtime_pm, wakeref)
58 			err = ce->ops->pin(ce);
59 		if (err)
60 			goto err;
61 
62 		i915_gem_context_get(ce->gem_context); /* for ctx->ppgtt */
63 
64 		smp_mb__before_atomic(); /* flush pin before it is visible */
65 	}
66 
67 	atomic_inc(&ce->pin_count);
68 	GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */
69 
70 	mutex_unlock(&ce->pin_mutex);
71 	return 0;
72 
73 err:
74 	mutex_unlock(&ce->pin_mutex);
75 	return err;
76 }
77 
78 void intel_context_unpin(struct intel_context *ce)
79 {
80 	if (likely(atomic_add_unless(&ce->pin_count, -1, 1)))
81 		return;
82 
83 	/* We may be called from inside intel_context_pin() to evict another */
84 	intel_context_get(ce);
85 	mutex_lock_nested(&ce->pin_mutex, SINGLE_DEPTH_NESTING);
86 
87 	if (likely(atomic_dec_and_test(&ce->pin_count))) {
88 		ce->ops->unpin(ce);
89 
90 		i915_gem_context_put(ce->gem_context);
91 		intel_context_active_release(ce);
92 	}
93 
94 	mutex_unlock(&ce->pin_mutex);
95 	intel_context_put(ce);
96 }
97 
98 static int __context_pin_state(struct i915_vma *vma, unsigned long flags)
99 {
100 	int err;
101 
102 	err = i915_vma_pin(vma, 0, 0, flags | PIN_GLOBAL);
103 	if (err)
104 		return err;
105 
106 	/*
107 	 * And mark it as a globally pinned object to let the shrinker know
108 	 * it cannot reclaim the object until we release it.
109 	 */
110 	vma->obj->pin_global++;
111 	vma->obj->mm.dirty = true;
112 
113 	return 0;
114 }
115 
116 static void __context_unpin_state(struct i915_vma *vma)
117 {
118 	vma->obj->pin_global--;
119 	__i915_vma_unpin(vma);
120 }
121 
122 static void intel_context_retire(struct i915_active *active)
123 {
124 	struct intel_context *ce = container_of(active, typeof(*ce), active);
125 
126 	if (ce->state)
127 		__context_unpin_state(ce->state);
128 
129 	intel_ring_unpin(ce->ring);
130 	intel_context_put(ce);
131 }
132 
133 void
134 intel_context_init(struct intel_context *ce,
135 		   struct i915_gem_context *ctx,
136 		   struct intel_engine_cs *engine)
137 {
138 	GEM_BUG_ON(!engine->cops);
139 
140 	kref_init(&ce->ref);
141 
142 	ce->gem_context = ctx;
143 	ce->engine = engine;
144 	ce->ops = engine->cops;
145 	ce->sseu = engine->sseu;
146 
147 	INIT_LIST_HEAD(&ce->signal_link);
148 	INIT_LIST_HEAD(&ce->signals);
149 
150 	mutex_init(&ce->pin_mutex);
151 
152 	i915_active_init(ctx->i915, &ce->active, intel_context_retire);
153 }
154 
155 int intel_context_active_acquire(struct intel_context *ce, unsigned long flags)
156 {
157 	int err;
158 
159 	if (!i915_active_acquire(&ce->active))
160 		return 0;
161 
162 	intel_context_get(ce);
163 
164 	err = intel_ring_pin(ce->ring);
165 	if (err)
166 		goto err_put;
167 
168 	if (!ce->state)
169 		return 0;
170 
171 	err = __context_pin_state(ce->state, flags);
172 	if (err)
173 		goto err_ring;
174 
175 	/* Preallocate tracking nodes */
176 	if (!i915_gem_context_is_kernel(ce->gem_context)) {
177 		err = i915_active_acquire_preallocate_barrier(&ce->active,
178 							      ce->engine);
179 		if (err)
180 			goto err_state;
181 	}
182 
183 	return 0;
184 
185 err_state:
186 	__context_unpin_state(ce->state);
187 err_ring:
188 	intel_ring_unpin(ce->ring);
189 err_put:
190 	intel_context_put(ce);
191 	i915_active_cancel(&ce->active);
192 	return err;
193 }
194 
195 void intel_context_active_release(struct intel_context *ce)
196 {
197 	/* Nodes preallocated in intel_context_active() */
198 	i915_active_acquire_barrier(&ce->active);
199 	i915_active_release(&ce->active);
200 }
201 
202 static void i915_global_context_shrink(void)
203 {
204 	kmem_cache_shrink(global.slab_ce);
205 }
206 
207 static void i915_global_context_exit(void)
208 {
209 	kmem_cache_destroy(global.slab_ce);
210 }
211 
212 static struct i915_global_context global = { {
213 	.shrink = i915_global_context_shrink,
214 	.exit = i915_global_context_exit,
215 } };
216 
217 int __init i915_global_context_init(void)
218 {
219 	global.slab_ce = KMEM_CACHE(intel_context, SLAB_HWCACHE_ALIGN);
220 	if (!global.slab_ce)
221 		return -ENOMEM;
222 
223 	i915_global_register(&global.base);
224 	return 0;
225 }
226 
227 void intel_context_enter_engine(struct intel_context *ce)
228 {
229 	intel_engine_pm_get(ce->engine);
230 }
231 
232 void intel_context_exit_engine(struct intel_context *ce)
233 {
234 	intel_engine_pm_put(ce->engine);
235 }
236 
237 struct i915_request *intel_context_create_request(struct intel_context *ce)
238 {
239 	struct i915_request *rq;
240 	int err;
241 
242 	err = intel_context_pin(ce);
243 	if (unlikely(err))
244 		return ERR_PTR(err);
245 
246 	rq = i915_request_create(ce);
247 	intel_context_unpin(ce);
248 
249 	return rq;
250 }
251