1*e65e175bSOded Gabbay // SPDX-License-Identifier: GPL-2.0
2*e65e175bSOded Gabbay 
3*e65e175bSOded Gabbay /*
4*e65e175bSOded Gabbay  * Copyright 2016-2021 HabanaLabs, Ltd.
5*e65e175bSOded Gabbay  * All Rights Reserved.
6*e65e175bSOded Gabbay  */
7*e65e175bSOded Gabbay 
8*e65e175bSOded Gabbay #include "habanalabs.h"
9*e65e175bSOded Gabbay 
10*e65e175bSOded Gabbay #include <linux/slab.h>
11*e65e175bSOded Gabbay 
encaps_handle_do_release(struct hl_cs_encaps_sig_handle * handle,bool put_hw_sob,bool put_ctx)12*e65e175bSOded Gabbay static void encaps_handle_do_release(struct hl_cs_encaps_sig_handle *handle, bool put_hw_sob,
13*e65e175bSOded Gabbay 					bool put_ctx)
14*e65e175bSOded Gabbay {
15*e65e175bSOded Gabbay 	struct hl_encaps_signals_mgr *mgr = &handle->ctx->sig_mgr;
16*e65e175bSOded Gabbay 
17*e65e175bSOded Gabbay 	if (put_hw_sob)
18*e65e175bSOded Gabbay 		hw_sob_put(handle->hw_sob);
19*e65e175bSOded Gabbay 
20*e65e175bSOded Gabbay 	spin_lock(&mgr->lock);
21*e65e175bSOded Gabbay 	idr_remove(&mgr->handles, handle->id);
22*e65e175bSOded Gabbay 	spin_unlock(&mgr->lock);
23*e65e175bSOded Gabbay 
24*e65e175bSOded Gabbay 	if (put_ctx)
25*e65e175bSOded Gabbay 		hl_ctx_put(handle->ctx);
26*e65e175bSOded Gabbay 
27*e65e175bSOded Gabbay 	kfree(handle);
28*e65e175bSOded Gabbay }
29*e65e175bSOded Gabbay 
hl_encaps_release_handle_and_put_ctx(struct kref * ref)30*e65e175bSOded Gabbay void hl_encaps_release_handle_and_put_ctx(struct kref *ref)
31*e65e175bSOded Gabbay {
32*e65e175bSOded Gabbay 	struct hl_cs_encaps_sig_handle *handle =
33*e65e175bSOded Gabbay 			container_of(ref, struct hl_cs_encaps_sig_handle, refcount);
34*e65e175bSOded Gabbay 
35*e65e175bSOded Gabbay 	encaps_handle_do_release(handle, false, true);
36*e65e175bSOded Gabbay }
37*e65e175bSOded Gabbay 
hl_encaps_release_handle_and_put_sob(struct kref * ref)38*e65e175bSOded Gabbay static void hl_encaps_release_handle_and_put_sob(struct kref *ref)
39*e65e175bSOded Gabbay {
40*e65e175bSOded Gabbay 	struct hl_cs_encaps_sig_handle *handle =
41*e65e175bSOded Gabbay 			container_of(ref, struct hl_cs_encaps_sig_handle, refcount);
42*e65e175bSOded Gabbay 
43*e65e175bSOded Gabbay 	encaps_handle_do_release(handle, true, false);
44*e65e175bSOded Gabbay }
45*e65e175bSOded Gabbay 
hl_encaps_release_handle_and_put_sob_ctx(struct kref * ref)46*e65e175bSOded Gabbay void hl_encaps_release_handle_and_put_sob_ctx(struct kref *ref)
47*e65e175bSOded Gabbay {
48*e65e175bSOded Gabbay 	struct hl_cs_encaps_sig_handle *handle =
49*e65e175bSOded Gabbay 			container_of(ref, struct hl_cs_encaps_sig_handle, refcount);
50*e65e175bSOded Gabbay 
51*e65e175bSOded Gabbay 	encaps_handle_do_release(handle, true, true);
52*e65e175bSOded Gabbay }
53*e65e175bSOded Gabbay 
hl_encaps_sig_mgr_init(struct hl_encaps_signals_mgr * mgr)54*e65e175bSOded Gabbay static void hl_encaps_sig_mgr_init(struct hl_encaps_signals_mgr *mgr)
55*e65e175bSOded Gabbay {
56*e65e175bSOded Gabbay 	spin_lock_init(&mgr->lock);
57*e65e175bSOded Gabbay 	idr_init(&mgr->handles);
58*e65e175bSOded Gabbay }
59*e65e175bSOded Gabbay 
hl_encaps_sig_mgr_fini(struct hl_device * hdev,struct hl_encaps_signals_mgr * mgr)60*e65e175bSOded Gabbay static void hl_encaps_sig_mgr_fini(struct hl_device *hdev, struct hl_encaps_signals_mgr *mgr)
61*e65e175bSOded Gabbay {
62*e65e175bSOded Gabbay 	struct hl_cs_encaps_sig_handle *handle;
63*e65e175bSOded Gabbay 	struct idr *idp;
64*e65e175bSOded Gabbay 	u32 id;
65*e65e175bSOded Gabbay 
66*e65e175bSOded Gabbay 	idp = &mgr->handles;
67*e65e175bSOded Gabbay 
68*e65e175bSOded Gabbay 	/* The IDR is expected to be empty at this stage, because any left signal should have been
69*e65e175bSOded Gabbay 	 * released as part of CS roll-back.
70*e65e175bSOded Gabbay 	 */
71*e65e175bSOded Gabbay 	if (!idr_is_empty(idp)) {
72*e65e175bSOded Gabbay 		dev_warn(hdev->dev,
73*e65e175bSOded Gabbay 			"device released while some encaps signals handles are still allocated\n");
74*e65e175bSOded Gabbay 		idr_for_each_entry(idp, handle, id)
75*e65e175bSOded Gabbay 			kref_put(&handle->refcount, hl_encaps_release_handle_and_put_sob);
76*e65e175bSOded Gabbay 	}
77*e65e175bSOded Gabbay 
78*e65e175bSOded Gabbay 	idr_destroy(&mgr->handles);
79*e65e175bSOded Gabbay }
80*e65e175bSOded Gabbay 
hl_ctx_fini(struct hl_ctx * ctx)81*e65e175bSOded Gabbay static void hl_ctx_fini(struct hl_ctx *ctx)
82*e65e175bSOded Gabbay {
83*e65e175bSOded Gabbay 	struct hl_device *hdev = ctx->hdev;
84*e65e175bSOded Gabbay 	int i;
85*e65e175bSOded Gabbay 
86*e65e175bSOded Gabbay 	/* Release all allocated HW block mapped list entries and destroy
87*e65e175bSOded Gabbay 	 * the mutex.
88*e65e175bSOded Gabbay 	 */
89*e65e175bSOded Gabbay 	hl_hw_block_mem_fini(ctx);
90*e65e175bSOded Gabbay 
91*e65e175bSOded Gabbay 	/*
92*e65e175bSOded Gabbay 	 * If we arrived here, there are no jobs waiting for this context
93*e65e175bSOded Gabbay 	 * on its queues so we can safely remove it.
94*e65e175bSOded Gabbay 	 * This is because for each CS, we increment the ref count and for
95*e65e175bSOded Gabbay 	 * every CS that was finished we decrement it and we won't arrive
96*e65e175bSOded Gabbay 	 * to this function unless the ref count is 0
97*e65e175bSOded Gabbay 	 */
98*e65e175bSOded Gabbay 
99*e65e175bSOded Gabbay 	for (i = 0 ; i < hdev->asic_prop.max_pending_cs ; i++)
100*e65e175bSOded Gabbay 		hl_fence_put(ctx->cs_pending[i]);
101*e65e175bSOded Gabbay 
102*e65e175bSOded Gabbay 	kfree(ctx->cs_pending);
103*e65e175bSOded Gabbay 
104*e65e175bSOded Gabbay 	if (ctx->asid != HL_KERNEL_ASID_ID) {
105*e65e175bSOded Gabbay 		dev_dbg(hdev->dev, "closing user context %d\n", ctx->asid);
106*e65e175bSOded Gabbay 
107*e65e175bSOded Gabbay 		/* The engines are stopped as there is no executing CS, but the
108*e65e175bSOded Gabbay 		 * Coresight might be still working by accessing addresses
109*e65e175bSOded Gabbay 		 * related to the stopped engines. Hence stop it explicitly.
110*e65e175bSOded Gabbay 		 */
111*e65e175bSOded Gabbay 		if (hdev->in_debug)
112*e65e175bSOded Gabbay 			hl_device_set_debug_mode(hdev, ctx, false);
113*e65e175bSOded Gabbay 
114*e65e175bSOded Gabbay 		hdev->asic_funcs->ctx_fini(ctx);
115*e65e175bSOded Gabbay 
116*e65e175bSOded Gabbay 		hl_dec_ctx_fini(ctx);
117*e65e175bSOded Gabbay 
118*e65e175bSOded Gabbay 		hl_cb_va_pool_fini(ctx);
119*e65e175bSOded Gabbay 		hl_vm_ctx_fini(ctx);
120*e65e175bSOded Gabbay 		hl_asid_free(hdev, ctx->asid);
121*e65e175bSOded Gabbay 		hl_encaps_sig_mgr_fini(hdev, &ctx->sig_mgr);
122*e65e175bSOded Gabbay 	} else {
123*e65e175bSOded Gabbay 		dev_dbg(hdev->dev, "closing kernel context\n");
124*e65e175bSOded Gabbay 		hdev->asic_funcs->ctx_fini(ctx);
125*e65e175bSOded Gabbay 		hl_vm_ctx_fini(ctx);
126*e65e175bSOded Gabbay 		hl_mmu_ctx_fini(ctx);
127*e65e175bSOded Gabbay 	}
128*e65e175bSOded Gabbay }
129*e65e175bSOded Gabbay 
hl_ctx_do_release(struct kref * ref)130*e65e175bSOded Gabbay void hl_ctx_do_release(struct kref *ref)
131*e65e175bSOded Gabbay {
132*e65e175bSOded Gabbay 	struct hl_ctx *ctx;
133*e65e175bSOded Gabbay 
134*e65e175bSOded Gabbay 	ctx = container_of(ref, struct hl_ctx, refcount);
135*e65e175bSOded Gabbay 
136*e65e175bSOded Gabbay 	hl_ctx_fini(ctx);
137*e65e175bSOded Gabbay 
138*e65e175bSOded Gabbay 	if (ctx->hpriv) {
139*e65e175bSOded Gabbay 		struct hl_fpriv *hpriv = ctx->hpriv;
140*e65e175bSOded Gabbay 
141*e65e175bSOded Gabbay 		mutex_lock(&hpriv->ctx_lock);
142*e65e175bSOded Gabbay 		hpriv->ctx = NULL;
143*e65e175bSOded Gabbay 		mutex_unlock(&hpriv->ctx_lock);
144*e65e175bSOded Gabbay 
145*e65e175bSOded Gabbay 		hl_hpriv_put(hpriv);
146*e65e175bSOded Gabbay 	}
147*e65e175bSOded Gabbay 
148*e65e175bSOded Gabbay 	kfree(ctx);
149*e65e175bSOded Gabbay }
150*e65e175bSOded Gabbay 
hl_ctx_create(struct hl_device * hdev,struct hl_fpriv * hpriv)151*e65e175bSOded Gabbay int hl_ctx_create(struct hl_device *hdev, struct hl_fpriv *hpriv)
152*e65e175bSOded Gabbay {
153*e65e175bSOded Gabbay 	struct hl_ctx_mgr *ctx_mgr = &hpriv->ctx_mgr;
154*e65e175bSOded Gabbay 	struct hl_ctx *ctx;
155*e65e175bSOded Gabbay 	int rc;
156*e65e175bSOded Gabbay 
157*e65e175bSOded Gabbay 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
158*e65e175bSOded Gabbay 	if (!ctx) {
159*e65e175bSOded Gabbay 		rc = -ENOMEM;
160*e65e175bSOded Gabbay 		goto out_err;
161*e65e175bSOded Gabbay 	}
162*e65e175bSOded Gabbay 
163*e65e175bSOded Gabbay 	mutex_lock(&ctx_mgr->lock);
164*e65e175bSOded Gabbay 	rc = idr_alloc(&ctx_mgr->handles, ctx, 1, 0, GFP_KERNEL);
165*e65e175bSOded Gabbay 	mutex_unlock(&ctx_mgr->lock);
166*e65e175bSOded Gabbay 
167*e65e175bSOded Gabbay 	if (rc < 0) {
168*e65e175bSOded Gabbay 		dev_err(hdev->dev, "Failed to allocate IDR for a new CTX\n");
169*e65e175bSOded Gabbay 		goto free_ctx;
170*e65e175bSOded Gabbay 	}
171*e65e175bSOded Gabbay 
172*e65e175bSOded Gabbay 	ctx->handle = rc;
173*e65e175bSOded Gabbay 
174*e65e175bSOded Gabbay 	rc = hl_ctx_init(hdev, ctx, false);
175*e65e175bSOded Gabbay 	if (rc)
176*e65e175bSOded Gabbay 		goto remove_from_idr;
177*e65e175bSOded Gabbay 
178*e65e175bSOded Gabbay 	hl_hpriv_get(hpriv);
179*e65e175bSOded Gabbay 	ctx->hpriv = hpriv;
180*e65e175bSOded Gabbay 
181*e65e175bSOded Gabbay 	/* TODO: remove for multiple contexts per process */
182*e65e175bSOded Gabbay 	hpriv->ctx = ctx;
183*e65e175bSOded Gabbay 
184*e65e175bSOded Gabbay 	/* TODO: remove the following line for multiple process support */
185*e65e175bSOded Gabbay 	hdev->is_compute_ctx_active = true;
186*e65e175bSOded Gabbay 
187*e65e175bSOded Gabbay 	return 0;
188*e65e175bSOded Gabbay 
189*e65e175bSOded Gabbay remove_from_idr:
190*e65e175bSOded Gabbay 	mutex_lock(&ctx_mgr->lock);
191*e65e175bSOded Gabbay 	idr_remove(&ctx_mgr->handles, ctx->handle);
192*e65e175bSOded Gabbay 	mutex_unlock(&ctx_mgr->lock);
193*e65e175bSOded Gabbay free_ctx:
194*e65e175bSOded Gabbay 	kfree(ctx);
195*e65e175bSOded Gabbay out_err:
196*e65e175bSOded Gabbay 	return rc;
197*e65e175bSOded Gabbay }
198*e65e175bSOded Gabbay 
hl_ctx_init(struct hl_device * hdev,struct hl_ctx * ctx,bool is_kernel_ctx)199*e65e175bSOded Gabbay int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
200*e65e175bSOded Gabbay {
201*e65e175bSOded Gabbay 	int rc = 0, i;
202*e65e175bSOded Gabbay 
203*e65e175bSOded Gabbay 	ctx->hdev = hdev;
204*e65e175bSOded Gabbay 
205*e65e175bSOded Gabbay 	kref_init(&ctx->refcount);
206*e65e175bSOded Gabbay 
207*e65e175bSOded Gabbay 	ctx->cs_sequence = 1;
208*e65e175bSOded Gabbay 	spin_lock_init(&ctx->cs_lock);
209*e65e175bSOded Gabbay 	atomic_set(&ctx->thread_ctx_switch_token, 1);
210*e65e175bSOded Gabbay 	ctx->thread_ctx_switch_wait_token = 0;
211*e65e175bSOded Gabbay 	ctx->cs_pending = kcalloc(hdev->asic_prop.max_pending_cs,
212*e65e175bSOded Gabbay 				sizeof(struct hl_fence *),
213*e65e175bSOded Gabbay 				GFP_KERNEL);
214*e65e175bSOded Gabbay 	if (!ctx->cs_pending)
215*e65e175bSOded Gabbay 		return -ENOMEM;
216*e65e175bSOded Gabbay 
217*e65e175bSOded Gabbay 	INIT_LIST_HEAD(&ctx->outcome_store.used_list);
218*e65e175bSOded Gabbay 	INIT_LIST_HEAD(&ctx->outcome_store.free_list);
219*e65e175bSOded Gabbay 	hash_init(ctx->outcome_store.outcome_map);
220*e65e175bSOded Gabbay 	for (i = 0; i < ARRAY_SIZE(ctx->outcome_store.nodes_pool); ++i)
221*e65e175bSOded Gabbay 		list_add(&ctx->outcome_store.nodes_pool[i].list_link,
222*e65e175bSOded Gabbay 			 &ctx->outcome_store.free_list);
223*e65e175bSOded Gabbay 
224*e65e175bSOded Gabbay 	hl_hw_block_mem_init(ctx);
225*e65e175bSOded Gabbay 
226*e65e175bSOded Gabbay 	if (is_kernel_ctx) {
227*e65e175bSOded Gabbay 		ctx->asid = HL_KERNEL_ASID_ID; /* Kernel driver gets ASID 0 */
228*e65e175bSOded Gabbay 		rc = hl_vm_ctx_init(ctx);
229*e65e175bSOded Gabbay 		if (rc) {
230*e65e175bSOded Gabbay 			dev_err(hdev->dev, "Failed to init mem ctx module\n");
231*e65e175bSOded Gabbay 			rc = -ENOMEM;
232*e65e175bSOded Gabbay 			goto err_hw_block_mem_fini;
233*e65e175bSOded Gabbay 		}
234*e65e175bSOded Gabbay 
235*e65e175bSOded Gabbay 		rc = hdev->asic_funcs->ctx_init(ctx);
236*e65e175bSOded Gabbay 		if (rc) {
237*e65e175bSOded Gabbay 			dev_err(hdev->dev, "ctx_init failed\n");
238*e65e175bSOded Gabbay 			goto err_vm_ctx_fini;
239*e65e175bSOded Gabbay 		}
240*e65e175bSOded Gabbay 	} else {
241*e65e175bSOded Gabbay 		ctx->asid = hl_asid_alloc(hdev);
242*e65e175bSOded Gabbay 		if (!ctx->asid) {
243*e65e175bSOded Gabbay 			dev_err(hdev->dev, "No free ASID, failed to create context\n");
244*e65e175bSOded Gabbay 			rc = -ENOMEM;
245*e65e175bSOded Gabbay 			goto err_hw_block_mem_fini;
246*e65e175bSOded Gabbay 		}
247*e65e175bSOded Gabbay 
248*e65e175bSOded Gabbay 		rc = hl_vm_ctx_init(ctx);
249*e65e175bSOded Gabbay 		if (rc) {
250*e65e175bSOded Gabbay 			dev_err(hdev->dev, "Failed to init mem ctx module\n");
251*e65e175bSOded Gabbay 			rc = -ENOMEM;
252*e65e175bSOded Gabbay 			goto err_asid_free;
253*e65e175bSOded Gabbay 		}
254*e65e175bSOded Gabbay 
255*e65e175bSOded Gabbay 		rc = hl_cb_va_pool_init(ctx);
256*e65e175bSOded Gabbay 		if (rc) {
257*e65e175bSOded Gabbay 			dev_err(hdev->dev,
258*e65e175bSOded Gabbay 				"Failed to init VA pool for mapped CB\n");
259*e65e175bSOded Gabbay 			goto err_vm_ctx_fini;
260*e65e175bSOded Gabbay 		}
261*e65e175bSOded Gabbay 
262*e65e175bSOded Gabbay 		rc = hdev->asic_funcs->ctx_init(ctx);
263*e65e175bSOded Gabbay 		if (rc) {
264*e65e175bSOded Gabbay 			dev_err(hdev->dev, "ctx_init failed\n");
265*e65e175bSOded Gabbay 			goto err_cb_va_pool_fini;
266*e65e175bSOded Gabbay 		}
267*e65e175bSOded Gabbay 
268*e65e175bSOded Gabbay 		hl_encaps_sig_mgr_init(&ctx->sig_mgr);
269*e65e175bSOded Gabbay 
270*e65e175bSOded Gabbay 		dev_dbg(hdev->dev, "create user context %d\n", ctx->asid);
271*e65e175bSOded Gabbay 	}
272*e65e175bSOded Gabbay 
273*e65e175bSOded Gabbay 	return 0;
274*e65e175bSOded Gabbay 
275*e65e175bSOded Gabbay err_cb_va_pool_fini:
276*e65e175bSOded Gabbay 	hl_cb_va_pool_fini(ctx);
277*e65e175bSOded Gabbay err_vm_ctx_fini:
278*e65e175bSOded Gabbay 	hl_vm_ctx_fini(ctx);
279*e65e175bSOded Gabbay err_asid_free:
280*e65e175bSOded Gabbay 	if (ctx->asid != HL_KERNEL_ASID_ID)
281*e65e175bSOded Gabbay 		hl_asid_free(hdev, ctx->asid);
282*e65e175bSOded Gabbay err_hw_block_mem_fini:
283*e65e175bSOded Gabbay 	hl_hw_block_mem_fini(ctx);
284*e65e175bSOded Gabbay 	kfree(ctx->cs_pending);
285*e65e175bSOded Gabbay 
286*e65e175bSOded Gabbay 	return rc;
287*e65e175bSOded Gabbay }
288*e65e175bSOded Gabbay 
hl_ctx_get_unless_zero(struct hl_ctx * ctx)289*e65e175bSOded Gabbay static int hl_ctx_get_unless_zero(struct hl_ctx *ctx)
290*e65e175bSOded Gabbay {
291*e65e175bSOded Gabbay 	return kref_get_unless_zero(&ctx->refcount);
292*e65e175bSOded Gabbay }
293*e65e175bSOded Gabbay 
hl_ctx_get(struct hl_ctx * ctx)294*e65e175bSOded Gabbay void hl_ctx_get(struct hl_ctx *ctx)
295*e65e175bSOded Gabbay {
296*e65e175bSOded Gabbay 	kref_get(&ctx->refcount);
297*e65e175bSOded Gabbay }
298*e65e175bSOded Gabbay 
hl_ctx_put(struct hl_ctx * ctx)299*e65e175bSOded Gabbay int hl_ctx_put(struct hl_ctx *ctx)
300*e65e175bSOded Gabbay {
301*e65e175bSOded Gabbay 	return kref_put(&ctx->refcount, hl_ctx_do_release);
302*e65e175bSOded Gabbay }
303*e65e175bSOded Gabbay 
hl_get_compute_ctx(struct hl_device * hdev)304*e65e175bSOded Gabbay struct hl_ctx *hl_get_compute_ctx(struct hl_device *hdev)
305*e65e175bSOded Gabbay {
306*e65e175bSOded Gabbay 	struct hl_ctx *ctx = NULL;
307*e65e175bSOded Gabbay 	struct hl_fpriv *hpriv;
308*e65e175bSOded Gabbay 
309*e65e175bSOded Gabbay 	mutex_lock(&hdev->fpriv_list_lock);
310*e65e175bSOded Gabbay 
311*e65e175bSOded Gabbay 	list_for_each_entry(hpriv, &hdev->fpriv_list, dev_node) {
312*e65e175bSOded Gabbay 		mutex_lock(&hpriv->ctx_lock);
313*e65e175bSOded Gabbay 		ctx = hpriv->ctx;
314*e65e175bSOded Gabbay 		if (ctx && !hl_ctx_get_unless_zero(ctx))
315*e65e175bSOded Gabbay 			ctx = NULL;
316*e65e175bSOded Gabbay 		mutex_unlock(&hpriv->ctx_lock);
317*e65e175bSOded Gabbay 
318*e65e175bSOded Gabbay 		/* There can only be a single user which has opened the compute device, so exit
319*e65e175bSOded Gabbay 		 * immediately once we find its context or if we see that it has been released
320*e65e175bSOded Gabbay 		 */
321*e65e175bSOded Gabbay 		break;
322*e65e175bSOded Gabbay 	}
323*e65e175bSOded Gabbay 
324*e65e175bSOded Gabbay 	mutex_unlock(&hdev->fpriv_list_lock);
325*e65e175bSOded Gabbay 
326*e65e175bSOded Gabbay 	return ctx;
327*e65e175bSOded Gabbay }
328*e65e175bSOded Gabbay 
329*e65e175bSOded Gabbay /*
330*e65e175bSOded Gabbay  * hl_ctx_get_fence_locked - get CS fence under CS lock
331*e65e175bSOded Gabbay  *
332*e65e175bSOded Gabbay  * @ctx: pointer to the context structure.
333*e65e175bSOded Gabbay  * @seq: CS sequences number
334*e65e175bSOded Gabbay  *
335*e65e175bSOded Gabbay  * @return valid fence pointer on success, NULL if fence is gone, otherwise
336*e65e175bSOded Gabbay  *         error pointer.
337*e65e175bSOded Gabbay  *
338*e65e175bSOded Gabbay  * NOTE: this function shall be called with cs_lock locked
339*e65e175bSOded Gabbay  */
hl_ctx_get_fence_locked(struct hl_ctx * ctx,u64 seq)340*e65e175bSOded Gabbay static struct hl_fence *hl_ctx_get_fence_locked(struct hl_ctx *ctx, u64 seq)
341*e65e175bSOded Gabbay {
342*e65e175bSOded Gabbay 	struct asic_fixed_properties *asic_prop = &ctx->hdev->asic_prop;
343*e65e175bSOded Gabbay 	struct hl_fence *fence;
344*e65e175bSOded Gabbay 
345*e65e175bSOded Gabbay 	if (seq >= ctx->cs_sequence)
346*e65e175bSOded Gabbay 		return ERR_PTR(-EINVAL);
347*e65e175bSOded Gabbay 
348*e65e175bSOded Gabbay 	if (seq + asic_prop->max_pending_cs < ctx->cs_sequence)
349*e65e175bSOded Gabbay 		return NULL;
350*e65e175bSOded Gabbay 
351*e65e175bSOded Gabbay 	fence = ctx->cs_pending[seq & (asic_prop->max_pending_cs - 1)];
352*e65e175bSOded Gabbay 	hl_fence_get(fence);
353*e65e175bSOded Gabbay 	return fence;
354*e65e175bSOded Gabbay }
355*e65e175bSOded Gabbay 
hl_ctx_get_fence(struct hl_ctx * ctx,u64 seq)356*e65e175bSOded Gabbay struct hl_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq)
357*e65e175bSOded Gabbay {
358*e65e175bSOded Gabbay 	struct hl_fence *fence;
359*e65e175bSOded Gabbay 
360*e65e175bSOded Gabbay 	spin_lock(&ctx->cs_lock);
361*e65e175bSOded Gabbay 
362*e65e175bSOded Gabbay 	fence = hl_ctx_get_fence_locked(ctx, seq);
363*e65e175bSOded Gabbay 
364*e65e175bSOded Gabbay 	spin_unlock(&ctx->cs_lock);
365*e65e175bSOded Gabbay 
366*e65e175bSOded Gabbay 	return fence;
367*e65e175bSOded Gabbay }
368*e65e175bSOded Gabbay 
369*e65e175bSOded Gabbay /*
370*e65e175bSOded Gabbay  * hl_ctx_get_fences - get multiple CS fences under the same CS lock
371*e65e175bSOded Gabbay  *
372*e65e175bSOded Gabbay  * @ctx: pointer to the context structure.
373*e65e175bSOded Gabbay  * @seq_arr: array of CS sequences to wait for
374*e65e175bSOded Gabbay  * @fence: fence array to store the CS fences
375*e65e175bSOded Gabbay  * @arr_len: length of seq_arr and fence_arr
376*e65e175bSOded Gabbay  *
377*e65e175bSOded Gabbay  * @return 0 on success, otherwise non 0 error code
378*e65e175bSOded Gabbay  */
hl_ctx_get_fences(struct hl_ctx * ctx,u64 * seq_arr,struct hl_fence ** fence,u32 arr_len)379*e65e175bSOded Gabbay int hl_ctx_get_fences(struct hl_ctx *ctx, u64 *seq_arr,
380*e65e175bSOded Gabbay 				struct hl_fence **fence, u32 arr_len)
381*e65e175bSOded Gabbay {
382*e65e175bSOded Gabbay 	struct hl_fence **fence_arr_base = fence;
383*e65e175bSOded Gabbay 	int i, rc = 0;
384*e65e175bSOded Gabbay 
385*e65e175bSOded Gabbay 	spin_lock(&ctx->cs_lock);
386*e65e175bSOded Gabbay 
387*e65e175bSOded Gabbay 	for (i = 0; i < arr_len; i++, fence++) {
388*e65e175bSOded Gabbay 		u64 seq = seq_arr[i];
389*e65e175bSOded Gabbay 
390*e65e175bSOded Gabbay 		*fence = hl_ctx_get_fence_locked(ctx, seq);
391*e65e175bSOded Gabbay 
392*e65e175bSOded Gabbay 		if (IS_ERR(*fence)) {
393*e65e175bSOded Gabbay 			dev_err(ctx->hdev->dev,
394*e65e175bSOded Gabbay 				"Failed to get fence for CS with seq 0x%llx\n",
395*e65e175bSOded Gabbay 					seq);
396*e65e175bSOded Gabbay 			rc = PTR_ERR(*fence);
397*e65e175bSOded Gabbay 			break;
398*e65e175bSOded Gabbay 		}
399*e65e175bSOded Gabbay 	}
400*e65e175bSOded Gabbay 
401*e65e175bSOded Gabbay 	spin_unlock(&ctx->cs_lock);
402*e65e175bSOded Gabbay 
403*e65e175bSOded Gabbay 	if (rc)
404*e65e175bSOded Gabbay 		hl_fences_put(fence_arr_base, i);
405*e65e175bSOded Gabbay 
406*e65e175bSOded Gabbay 	return rc;
407*e65e175bSOded Gabbay }
408*e65e175bSOded Gabbay 
409*e65e175bSOded Gabbay /*
410*e65e175bSOded Gabbay  * hl_ctx_mgr_init - initialize the context manager
411*e65e175bSOded Gabbay  *
412*e65e175bSOded Gabbay  * @ctx_mgr: pointer to context manager structure
413*e65e175bSOded Gabbay  *
414*e65e175bSOded Gabbay  * This manager is an object inside the hpriv object of the user process.
415*e65e175bSOded Gabbay  * The function is called when a user process opens the FD.
416*e65e175bSOded Gabbay  */
hl_ctx_mgr_init(struct hl_ctx_mgr * ctx_mgr)417*e65e175bSOded Gabbay void hl_ctx_mgr_init(struct hl_ctx_mgr *ctx_mgr)
418*e65e175bSOded Gabbay {
419*e65e175bSOded Gabbay 	mutex_init(&ctx_mgr->lock);
420*e65e175bSOded Gabbay 	idr_init(&ctx_mgr->handles);
421*e65e175bSOded Gabbay }
422*e65e175bSOded Gabbay 
423*e65e175bSOded Gabbay /*
424*e65e175bSOded Gabbay  * hl_ctx_mgr_fini - finalize the context manager
425*e65e175bSOded Gabbay  *
426*e65e175bSOded Gabbay  * @hdev: pointer to device structure
427*e65e175bSOded Gabbay  * @ctx_mgr: pointer to context manager structure
428*e65e175bSOded Gabbay  *
429*e65e175bSOded Gabbay  * This function goes over all the contexts in the manager and frees them.
430*e65e175bSOded Gabbay  * It is called when a process closes the FD.
431*e65e175bSOded Gabbay  */
hl_ctx_mgr_fini(struct hl_device * hdev,struct hl_ctx_mgr * ctx_mgr)432*e65e175bSOded Gabbay void hl_ctx_mgr_fini(struct hl_device *hdev, struct hl_ctx_mgr *ctx_mgr)
433*e65e175bSOded Gabbay {
434*e65e175bSOded Gabbay 	struct hl_ctx *ctx;
435*e65e175bSOded Gabbay 	struct idr *idp;
436*e65e175bSOded Gabbay 	u32 id;
437*e65e175bSOded Gabbay 
438*e65e175bSOded Gabbay 	idp = &ctx_mgr->handles;
439*e65e175bSOded Gabbay 
440*e65e175bSOded Gabbay 	idr_for_each_entry(idp, ctx, id)
441*e65e175bSOded Gabbay 		kref_put(&ctx->refcount, hl_ctx_do_release);
442*e65e175bSOded Gabbay 
443*e65e175bSOded Gabbay 	idr_destroy(&ctx_mgr->handles);
444*e65e175bSOded Gabbay 	mutex_destroy(&ctx_mgr->lock);
445*e65e175bSOded Gabbay }
446