197fb5e8dSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2f7de1545SJordan Crouse /* Copyright (c) 2017 The Linux Foundation. All rights reserved.
3f7de1545SJordan Crouse  */
4f7de1545SJordan Crouse 
5f7de1545SJordan Crouse #include <linux/kref.h>
6feea39a8SSam Ravnborg #include <linux/uaccess.h>
7feea39a8SSam Ravnborg 
8f7de1545SJordan Crouse #include "msm_gpu.h"
9f7de1545SJordan Crouse 
msm_file_private_set_sysprof(struct msm_file_private * ctx,struct msm_gpu * gpu,int sysprof)1090f45c42SRob Clark int msm_file_private_set_sysprof(struct msm_file_private *ctx,
1190f45c42SRob Clark 				 struct msm_gpu *gpu, int sysprof)
1290f45c42SRob Clark {
1390f45c42SRob Clark 	/*
1490f45c42SRob Clark 	 * Since pm_runtime and sysprof_active are both refcounts, we
1590f45c42SRob Clark 	 * call apply the new value first, and then unwind the previous
1690f45c42SRob Clark 	 * value
1790f45c42SRob Clark 	 */
1890f45c42SRob Clark 
1990f45c42SRob Clark 	switch (sysprof) {
2090f45c42SRob Clark 	default:
2190f45c42SRob Clark 		return -EINVAL;
2290f45c42SRob Clark 	case 2:
2390f45c42SRob Clark 		pm_runtime_get_sync(&gpu->pdev->dev);
2490f45c42SRob Clark 		fallthrough;
2590f45c42SRob Clark 	case 1:
2690f45c42SRob Clark 		refcount_inc(&gpu->sysprof_active);
2790f45c42SRob Clark 		fallthrough;
2890f45c42SRob Clark 	case 0:
2990f45c42SRob Clark 		break;
3090f45c42SRob Clark 	}
3190f45c42SRob Clark 
3290f45c42SRob Clark 	/* unwind old value: */
3390f45c42SRob Clark 	switch (ctx->sysprof) {
3490f45c42SRob Clark 	case 2:
3590f45c42SRob Clark 		pm_runtime_put_autosuspend(&gpu->pdev->dev);
3690f45c42SRob Clark 		fallthrough;
3790f45c42SRob Clark 	case 1:
3890f45c42SRob Clark 		refcount_dec(&gpu->sysprof_active);
3990f45c42SRob Clark 		fallthrough;
4090f45c42SRob Clark 	case 0:
4190f45c42SRob Clark 		break;
4290f45c42SRob Clark 	}
4390f45c42SRob Clark 
4490f45c42SRob Clark 	ctx->sysprof = sysprof;
4590f45c42SRob Clark 
4690f45c42SRob Clark 	return 0;
4790f45c42SRob Clark }
4890f45c42SRob Clark 
__msm_file_private_destroy(struct kref * kref)4968002469SRob Clark void __msm_file_private_destroy(struct kref *kref)
5068002469SRob Clark {
5168002469SRob Clark 	struct msm_file_private *ctx = container_of(kref,
5268002469SRob Clark 		struct msm_file_private, ref);
5368002469SRob Clark 	int i;
5468002469SRob Clark 
5568002469SRob Clark 	for (i = 0; i < ARRAY_SIZE(ctx->entities); i++) {
5668002469SRob Clark 		if (!ctx->entities[i])
5768002469SRob Clark 			continue;
5868002469SRob Clark 
5968002469SRob Clark 		drm_sched_entity_destroy(ctx->entities[i]);
6068002469SRob Clark 		kfree(ctx->entities[i]);
6168002469SRob Clark 	}
6268002469SRob Clark 
6368002469SRob Clark 	msm_gem_address_space_put(ctx->aspace);
64d4726d77SRob Clark 	kfree(ctx->comm);
65d4726d77SRob Clark 	kfree(ctx->cmdline);
6668002469SRob Clark 	kfree(ctx);
6768002469SRob Clark }
6868002469SRob Clark 
msm_submitqueue_destroy(struct kref * kref)69f7de1545SJordan Crouse void msm_submitqueue_destroy(struct kref *kref)
70f7de1545SJordan Crouse {
71f7de1545SJordan Crouse 	struct msm_gpu_submitqueue *queue = container_of(kref,
72f7de1545SJordan Crouse 		struct msm_gpu_submitqueue, ref);
73f7de1545SJordan Crouse 
74a61acbbeSRob Clark 	idr_destroy(&queue->fence_idr);
75a61acbbeSRob Clark 
76cf655d61SJordan Crouse 	msm_file_private_put(queue->ctx);
77cf655d61SJordan Crouse 
78f7de1545SJordan Crouse 	kfree(queue);
79f7de1545SJordan Crouse }
80f7de1545SJordan Crouse 
msm_submitqueue_get(struct msm_file_private * ctx,u32 id)81f7de1545SJordan Crouse struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
82f7de1545SJordan Crouse 		u32 id)
83f7de1545SJordan Crouse {
84f7de1545SJordan Crouse 	struct msm_gpu_submitqueue *entry;
85f7de1545SJordan Crouse 
86f7de1545SJordan Crouse 	if (!ctx)
87f7de1545SJordan Crouse 		return NULL;
88f7de1545SJordan Crouse 
89f7de1545SJordan Crouse 	read_lock(&ctx->queuelock);
90f7de1545SJordan Crouse 
91f7de1545SJordan Crouse 	list_for_each_entry(entry, &ctx->submitqueues, node) {
92f7de1545SJordan Crouse 		if (entry->id == id) {
93f7de1545SJordan Crouse 			kref_get(&entry->ref);
94f7de1545SJordan Crouse 			read_unlock(&ctx->queuelock);
95f7de1545SJordan Crouse 
96f7de1545SJordan Crouse 			return entry;
97f7de1545SJordan Crouse 		}
98f7de1545SJordan Crouse 	}
99f7de1545SJordan Crouse 
100f7de1545SJordan Crouse 	read_unlock(&ctx->queuelock);
101f7de1545SJordan Crouse 	return NULL;
102f7de1545SJordan Crouse }
103f7de1545SJordan Crouse 
msm_submitqueue_close(struct msm_file_private * ctx)104f7de1545SJordan Crouse void msm_submitqueue_close(struct msm_file_private *ctx)
105f7de1545SJordan Crouse {
106f7de1545SJordan Crouse 	struct msm_gpu_submitqueue *entry, *tmp;
107f7de1545SJordan Crouse 
108f7de1545SJordan Crouse 	if (!ctx)
109f7de1545SJordan Crouse 		return;
110f7de1545SJordan Crouse 
111f7de1545SJordan Crouse 	/*
112f7de1545SJordan Crouse 	 * No lock needed in close and there won't
113f7de1545SJordan Crouse 	 * be any more user ioctls coming our way
114f7de1545SJordan Crouse 	 */
115a3367f5fSRob Clark 	list_for_each_entry_safe(entry, tmp, &ctx->submitqueues, node) {
116a3367f5fSRob Clark 		list_del(&entry->node);
117f7de1545SJordan Crouse 		msm_submitqueue_put(entry);
118f7de1545SJordan Crouse 	}
119a3367f5fSRob Clark }
120f7de1545SJordan Crouse 
12168002469SRob Clark static struct drm_sched_entity *
get_sched_entity(struct msm_file_private * ctx,struct msm_ringbuffer * ring,unsigned ring_nr,enum drm_sched_priority sched_prio)12268002469SRob Clark get_sched_entity(struct msm_file_private *ctx, struct msm_ringbuffer *ring,
12368002469SRob Clark 		 unsigned ring_nr, enum drm_sched_priority sched_prio)
12468002469SRob Clark {
12568002469SRob Clark 	static DEFINE_MUTEX(entity_lock);
12668002469SRob Clark 	unsigned idx = (ring_nr * NR_SCHED_PRIORITIES) + sched_prio;
12768002469SRob Clark 
12868002469SRob Clark 	/* We should have already validated that the requested priority is
12968002469SRob Clark 	 * valid by the time we get here.
13068002469SRob Clark 	 */
13168002469SRob Clark 	if (WARN_ON(idx >= ARRAY_SIZE(ctx->entities)))
13268002469SRob Clark 		return ERR_PTR(-EINVAL);
13368002469SRob Clark 
13468002469SRob Clark 	mutex_lock(&entity_lock);
13568002469SRob Clark 
13668002469SRob Clark 	if (!ctx->entities[idx]) {
13768002469SRob Clark 		struct drm_sched_entity *entity;
13868002469SRob Clark 		struct drm_gpu_scheduler *sched = &ring->sched;
13968002469SRob Clark 		int ret;
14068002469SRob Clark 
14168002469SRob Clark 		entity = kzalloc(sizeof(*ctx->entities[idx]), GFP_KERNEL);
14268002469SRob Clark 
14368002469SRob Clark 		ret = drm_sched_entity_init(entity, sched_prio, &sched, 1, NULL);
14468002469SRob Clark 		if (ret) {
1457425e816SDan Carpenter 			mutex_unlock(&entity_lock);
14668002469SRob Clark 			kfree(entity);
14768002469SRob Clark 			return ERR_PTR(ret);
14868002469SRob Clark 		}
14968002469SRob Clark 
15068002469SRob Clark 		ctx->entities[idx] = entity;
15168002469SRob Clark 	}
15268002469SRob Clark 
15368002469SRob Clark 	mutex_unlock(&entity_lock);
15468002469SRob Clark 
15568002469SRob Clark 	return ctx->entities[idx];
15668002469SRob Clark }
15768002469SRob Clark 
msm_submitqueue_create(struct drm_device * drm,struct msm_file_private * ctx,u32 prio,u32 flags,u32 * id)158f97decacSJordan Crouse int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
159f97decacSJordan Crouse 		u32 prio, u32 flags, u32 *id)
160f7de1545SJordan Crouse {
161f97decacSJordan Crouse 	struct msm_drm_private *priv = drm->dev_private;
162f7de1545SJordan Crouse 	struct msm_gpu_submitqueue *queue;
163fc40e5e1SRob Clark 	enum drm_sched_priority sched_prio;
164fc40e5e1SRob Clark 	unsigned ring_nr;
1651d8a5ca4SRob Clark 	int ret;
166f7de1545SJordan Crouse 
167f7de1545SJordan Crouse 	if (!ctx)
168f7de1545SJordan Crouse 		return -ENODEV;
169f7de1545SJordan Crouse 
17086c2a0f0SRob Clark 	if (!priv->gpu)
17186c2a0f0SRob Clark 		return -ENODEV;
17286c2a0f0SRob Clark 
173fc40e5e1SRob Clark 	ret = msm_gpu_convert_priority(priv->gpu, prio, &ring_nr, &sched_prio);
174fc40e5e1SRob Clark 	if (ret)
175fc40e5e1SRob Clark 		return ret;
17686c2a0f0SRob Clark 
177f7de1545SJordan Crouse 	queue = kzalloc(sizeof(*queue), GFP_KERNEL);
178f7de1545SJordan Crouse 
179f7de1545SJordan Crouse 	if (!queue)
180f7de1545SJordan Crouse 		return -ENOMEM;
181f7de1545SJordan Crouse 
182f7de1545SJordan Crouse 	kref_init(&queue->ref);
183f7de1545SJordan Crouse 	queue->flags = flags;
184fc40e5e1SRob Clark 	queue->ring_nr = ring_nr;
185f7de1545SJordan Crouse 
18668002469SRob Clark 	queue->entity = get_sched_entity(ctx, priv->gpu->rb[ring_nr],
18768002469SRob Clark 					 ring_nr, sched_prio);
18868002469SRob Clark 	if (IS_ERR(queue->entity)) {
18968002469SRob Clark 		ret = PTR_ERR(queue->entity);
1901d8a5ca4SRob Clark 		kfree(queue);
1911d8a5ca4SRob Clark 		return ret;
1921d8a5ca4SRob Clark 	}
1931d8a5ca4SRob Clark 
194f7de1545SJordan Crouse 	write_lock(&ctx->queuelock);
195f7de1545SJordan Crouse 
196cf655d61SJordan Crouse 	queue->ctx = msm_file_private_get(ctx);
197f7de1545SJordan Crouse 	queue->id = ctx->queueid++;
198f7de1545SJordan Crouse 
199f7de1545SJordan Crouse 	if (id)
200f7de1545SJordan Crouse 		*id = queue->id;
201f7de1545SJordan Crouse 
202a61acbbeSRob Clark 	idr_init(&queue->fence_idr);
203*e4f020c6SRob Clark 	spin_lock_init(&queue->idr_lock);
204a61acbbeSRob Clark 	mutex_init(&queue->lock);
205a61acbbeSRob Clark 
206f7de1545SJordan Crouse 	list_add_tail(&queue->node, &ctx->submitqueues);
207f7de1545SJordan Crouse 
208f7de1545SJordan Crouse 	write_unlock(&ctx->queuelock);
209f7de1545SJordan Crouse 
210f7de1545SJordan Crouse 	return 0;
211f7de1545SJordan Crouse }
212f7de1545SJordan Crouse 
213375f9a63SRob Clark /*
214375f9a63SRob Clark  * Create the default submit-queue (id==0), used for backwards compatibility
215375f9a63SRob Clark  * for userspace that pre-dates the introduction of submitqueues.
216375f9a63SRob Clark  */
msm_submitqueue_init(struct drm_device * drm,struct msm_file_private * ctx)217f97decacSJordan Crouse int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx)
218f7de1545SJordan Crouse {
219f97decacSJordan Crouse 	struct msm_drm_private *priv = drm->dev_private;
220fc40e5e1SRob Clark 	int default_prio, max_priority;
221f97decacSJordan Crouse 
22286c2a0f0SRob Clark 	if (!priv->gpu)
22386c2a0f0SRob Clark 		return -ENODEV;
22486c2a0f0SRob Clark 
225fc40e5e1SRob Clark 	max_priority = (priv->gpu->nr_rings * NR_SCHED_PRIORITIES) - 1;
226fc40e5e1SRob Clark 
227f97decacSJordan Crouse 	/*
228fc40e5e1SRob Clark 	 * Pick a medium priority level as default.  Lower numeric value is
229fc40e5e1SRob Clark 	 * higher priority, so round-up to pick a priority that is not higher
230fc40e5e1SRob Clark 	 * than the middle priority level.
231f97decacSJordan Crouse 	 */
232fc40e5e1SRob Clark 	default_prio = DIV_ROUND_UP(max_priority, 2);
233f97decacSJordan Crouse 
234f97decacSJordan Crouse 	return msm_submitqueue_create(drm, ctx, default_prio, 0, NULL);
235f7de1545SJordan Crouse }
236f7de1545SJordan Crouse 
msm_submitqueue_query_faults(struct msm_gpu_submitqueue * queue,struct drm_msm_submitqueue_query * args)237b0fb6604SJordan Crouse static int msm_submitqueue_query_faults(struct msm_gpu_submitqueue *queue,
238b0fb6604SJordan Crouse 		struct drm_msm_submitqueue_query *args)
239b0fb6604SJordan Crouse {
240b0fb6604SJordan Crouse 	size_t size = min_t(size_t, args->len, sizeof(queue->faults));
241b0fb6604SJordan Crouse 	int ret;
242b0fb6604SJordan Crouse 
243b0fb6604SJordan Crouse 	/* If a zero length was passed in, return the data size we expect */
244b0fb6604SJordan Crouse 	if (!args->len) {
245b0fb6604SJordan Crouse 		args->len = sizeof(queue->faults);
246b0fb6604SJordan Crouse 		return 0;
247b0fb6604SJordan Crouse 	}
248b0fb6604SJordan Crouse 
249b0fb6604SJordan Crouse 	/* Set the length to the actual size of the data */
250b0fb6604SJordan Crouse 	args->len = size;
251b0fb6604SJordan Crouse 
252b0fb6604SJordan Crouse 	ret = copy_to_user(u64_to_user_ptr(args->data), &queue->faults, size);
253b0fb6604SJordan Crouse 
254b0fb6604SJordan Crouse 	return ret ? -EFAULT : 0;
255b0fb6604SJordan Crouse }
256b0fb6604SJordan Crouse 
msm_submitqueue_query(struct drm_device * drm,struct msm_file_private * ctx,struct drm_msm_submitqueue_query * args)257b0fb6604SJordan Crouse int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx,
258b0fb6604SJordan Crouse 		struct drm_msm_submitqueue_query *args)
259b0fb6604SJordan Crouse {
260b0fb6604SJordan Crouse 	struct msm_gpu_submitqueue *queue;
261b0fb6604SJordan Crouse 	int ret = -EINVAL;
262b0fb6604SJordan Crouse 
263b0fb6604SJordan Crouse 	if (args->pad)
264b0fb6604SJordan Crouse 		return -EINVAL;
265b0fb6604SJordan Crouse 
266b0fb6604SJordan Crouse 	queue = msm_submitqueue_get(ctx, args->id);
267b0fb6604SJordan Crouse 	if (!queue)
268b0fb6604SJordan Crouse 		return -ENOENT;
269b0fb6604SJordan Crouse 
270b0fb6604SJordan Crouse 	if (args->param == MSM_SUBMITQUEUE_PARAM_FAULTS)
271b0fb6604SJordan Crouse 		ret = msm_submitqueue_query_faults(queue, args);
272b0fb6604SJordan Crouse 
273b0fb6604SJordan Crouse 	msm_submitqueue_put(queue);
274b0fb6604SJordan Crouse 
275b0fb6604SJordan Crouse 	return ret;
276b0fb6604SJordan Crouse }
277b0fb6604SJordan Crouse 
msm_submitqueue_remove(struct msm_file_private * ctx,u32 id)278f7de1545SJordan Crouse int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id)
279f7de1545SJordan Crouse {
280f7de1545SJordan Crouse 	struct msm_gpu_submitqueue *entry;
281f7de1545SJordan Crouse 
282f7de1545SJordan Crouse 	if (!ctx)
283f7de1545SJordan Crouse 		return 0;
284f7de1545SJordan Crouse 
285f7de1545SJordan Crouse 	/*
286f7de1545SJordan Crouse 	 * id 0 is the "default" queue and can't be destroyed
287f7de1545SJordan Crouse 	 * by the user
288f7de1545SJordan Crouse 	 */
289f7de1545SJordan Crouse 	if (!id)
290f7de1545SJordan Crouse 		return -ENOENT;
291f7de1545SJordan Crouse 
292f7de1545SJordan Crouse 	write_lock(&ctx->queuelock);
293f7de1545SJordan Crouse 
294f7de1545SJordan Crouse 	list_for_each_entry(entry, &ctx->submitqueues, node) {
295f7de1545SJordan Crouse 		if (entry->id == id) {
296f7de1545SJordan Crouse 			list_del(&entry->node);
297f7de1545SJordan Crouse 			write_unlock(&ctx->queuelock);
298f7de1545SJordan Crouse 
299f7de1545SJordan Crouse 			msm_submitqueue_put(entry);
300f7de1545SJordan Crouse 			return 0;
301f7de1545SJordan Crouse 		}
302f7de1545SJordan Crouse 	}
303f7de1545SJordan Crouse 
304f7de1545SJordan Crouse 	write_unlock(&ctx->queuelock);
305f7de1545SJordan Crouse 	return -ENOENT;
306f7de1545SJordan Crouse }
307f7de1545SJordan Crouse 
308