xref: /openbmc/linux/drivers/gpu/drm/tegra/uapi.c (revision 2cf1c348)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2020 NVIDIA Corporation */
3 
4 #include <linux/host1x.h>
5 #include <linux/iommu.h>
6 #include <linux/list.h>
7 
8 #include <drm/drm_drv.h>
9 #include <drm/drm_file.h>
10 #include <drm/drm_utils.h>
11 
12 #include "drm.h"
13 #include "uapi.h"
14 
15 static void tegra_drm_mapping_release(struct kref *ref)
16 {
17 	struct tegra_drm_mapping *mapping =
18 		container_of(ref, struct tegra_drm_mapping, ref);
19 
20 	host1x_bo_unpin(mapping->map);
21 	host1x_bo_put(mapping->bo);
22 
23 	kfree(mapping);
24 }
25 
26 void tegra_drm_mapping_put(struct tegra_drm_mapping *mapping)
27 {
28 	kref_put(&mapping->ref, tegra_drm_mapping_release);
29 }
30 
31 static void tegra_drm_channel_context_close(struct tegra_drm_context *context)
32 {
33 	struct tegra_drm_mapping *mapping;
34 	unsigned long id;
35 
36 	xa_for_each(&context->mappings, id, mapping)
37 		tegra_drm_mapping_put(mapping);
38 
39 	xa_destroy(&context->mappings);
40 
41 	host1x_channel_put(context->channel);
42 
43 	kfree(context);
44 }
45 
46 void tegra_drm_uapi_close_file(struct tegra_drm_file *file)
47 {
48 	struct tegra_drm_context *context;
49 	struct host1x_syncpt *sp;
50 	unsigned long id;
51 
52 	xa_for_each(&file->contexts, id, context)
53 		tegra_drm_channel_context_close(context);
54 
55 	xa_for_each(&file->syncpoints, id, sp)
56 		host1x_syncpt_put(sp);
57 
58 	xa_destroy(&file->contexts);
59 	xa_destroy(&file->syncpoints);
60 }
61 
62 static struct tegra_drm_client *tegra_drm_find_client(struct tegra_drm *tegra, u32 class)
63 {
64 	struct tegra_drm_client *client;
65 
66 	list_for_each_entry(client, &tegra->clients, list)
67 		if (client->base.class == class)
68 			return client;
69 
70 	return NULL;
71 }
72 
73 int tegra_drm_ioctl_channel_open(struct drm_device *drm, void *data, struct drm_file *file)
74 {
75 	struct tegra_drm_file *fpriv = file->driver_priv;
76 	struct tegra_drm *tegra = drm->dev_private;
77 	struct drm_tegra_channel_open *args = data;
78 	struct tegra_drm_client *client = NULL;
79 	struct tegra_drm_context *context;
80 	int err;
81 
82 	if (args->flags)
83 		return -EINVAL;
84 
85 	context = kzalloc(sizeof(*context), GFP_KERNEL);
86 	if (!context)
87 		return -ENOMEM;
88 
89 	client = tegra_drm_find_client(tegra, args->host1x_class);
90 	if (!client) {
91 		err = -ENODEV;
92 		goto free;
93 	}
94 
95 	if (client->shared_channel) {
96 		context->channel = host1x_channel_get(client->shared_channel);
97 	} else {
98 		context->channel = host1x_channel_request(&client->base);
99 		if (!context->channel) {
100 			err = -EBUSY;
101 			goto free;
102 		}
103 	}
104 
105 	err = xa_alloc(&fpriv->contexts, &args->context, context, XA_LIMIT(1, U32_MAX),
106 		       GFP_KERNEL);
107 	if (err < 0)
108 		goto put_channel;
109 
110 	context->client = client;
111 	xa_init_flags(&context->mappings, XA_FLAGS_ALLOC1);
112 
113 	args->version = client->version;
114 	args->capabilities = 0;
115 
116 	if (device_get_dma_attr(client->base.dev) == DEV_DMA_COHERENT)
117 		args->capabilities |= DRM_TEGRA_CHANNEL_CAP_CACHE_COHERENT;
118 
119 	return 0;
120 
121 put_channel:
122 	host1x_channel_put(context->channel);
123 free:
124 	kfree(context);
125 
126 	return err;
127 }
128 
129 int tegra_drm_ioctl_channel_close(struct drm_device *drm, void *data, struct drm_file *file)
130 {
131 	struct tegra_drm_file *fpriv = file->driver_priv;
132 	struct drm_tegra_channel_close *args = data;
133 	struct tegra_drm_context *context;
134 
135 	mutex_lock(&fpriv->lock);
136 
137 	context = xa_load(&fpriv->contexts, args->context);
138 	if (!context) {
139 		mutex_unlock(&fpriv->lock);
140 		return -EINVAL;
141 	}
142 
143 	xa_erase(&fpriv->contexts, args->context);
144 
145 	mutex_unlock(&fpriv->lock);
146 
147 	tegra_drm_channel_context_close(context);
148 
149 	return 0;
150 }
151 
152 int tegra_drm_ioctl_channel_map(struct drm_device *drm, void *data, struct drm_file *file)
153 {
154 	struct tegra_drm_file *fpriv = file->driver_priv;
155 	struct drm_tegra_channel_map *args = data;
156 	struct tegra_drm_mapping *mapping;
157 	struct tegra_drm_context *context;
158 	enum dma_data_direction direction;
159 	int err = 0;
160 
161 	if (args->flags & ~DRM_TEGRA_CHANNEL_MAP_READ_WRITE)
162 		return -EINVAL;
163 
164 	mutex_lock(&fpriv->lock);
165 
166 	context = xa_load(&fpriv->contexts, args->context);
167 	if (!context) {
168 		mutex_unlock(&fpriv->lock);
169 		return -EINVAL;
170 	}
171 
172 	mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
173 	if (!mapping) {
174 		err = -ENOMEM;
175 		goto unlock;
176 	}
177 
178 	kref_init(&mapping->ref);
179 
180 	mapping->bo = tegra_gem_lookup(file, args->handle);
181 	if (!mapping->bo) {
182 		err = -EINVAL;
183 		goto free;
184 	}
185 
186 	switch (args->flags & DRM_TEGRA_CHANNEL_MAP_READ_WRITE) {
187 	case DRM_TEGRA_CHANNEL_MAP_READ_WRITE:
188 		direction = DMA_BIDIRECTIONAL;
189 		break;
190 
191 	case DRM_TEGRA_CHANNEL_MAP_WRITE:
192 		direction = DMA_FROM_DEVICE;
193 		break;
194 
195 	case DRM_TEGRA_CHANNEL_MAP_READ:
196 		direction = DMA_TO_DEVICE;
197 		break;
198 
199 	default:
200 		err = -EINVAL;
201 		goto put_gem;
202 	}
203 
204 	mapping->map = host1x_bo_pin(context->client->base.dev, mapping->bo, direction, NULL);
205 	if (IS_ERR(mapping->map)) {
206 		err = PTR_ERR(mapping->map);
207 		goto put_gem;
208 	}
209 
210 	mapping->iova = mapping->map->phys;
211 	mapping->iova_end = mapping->iova + host1x_to_tegra_bo(mapping->bo)->gem.size;
212 
213 	err = xa_alloc(&context->mappings, &args->mapping, mapping, XA_LIMIT(1, U32_MAX),
214 		       GFP_KERNEL);
215 	if (err < 0)
216 		goto unpin;
217 
218 	mutex_unlock(&fpriv->lock);
219 
220 	return 0;
221 
222 unpin:
223 	host1x_bo_unpin(mapping->map);
224 put_gem:
225 	host1x_bo_put(mapping->bo);
226 free:
227 	kfree(mapping);
228 unlock:
229 	mutex_unlock(&fpriv->lock);
230 	return err;
231 }
232 
233 int tegra_drm_ioctl_channel_unmap(struct drm_device *drm, void *data, struct drm_file *file)
234 {
235 	struct tegra_drm_file *fpriv = file->driver_priv;
236 	struct drm_tegra_channel_unmap *args = data;
237 	struct tegra_drm_mapping *mapping;
238 	struct tegra_drm_context *context;
239 
240 	mutex_lock(&fpriv->lock);
241 
242 	context = xa_load(&fpriv->contexts, args->context);
243 	if (!context) {
244 		mutex_unlock(&fpriv->lock);
245 		return -EINVAL;
246 	}
247 
248 	mapping = xa_erase(&context->mappings, args->mapping);
249 
250 	mutex_unlock(&fpriv->lock);
251 
252 	if (!mapping)
253 		return -EINVAL;
254 
255 	tegra_drm_mapping_put(mapping);
256 	return 0;
257 }
258 
259 int tegra_drm_ioctl_syncpoint_allocate(struct drm_device *drm, void *data, struct drm_file *file)
260 {
261 	struct host1x *host1x = tegra_drm_to_host1x(drm->dev_private);
262 	struct tegra_drm_file *fpriv = file->driver_priv;
263 	struct drm_tegra_syncpoint_allocate *args = data;
264 	struct host1x_syncpt *sp;
265 	int err;
266 
267 	if (args->id)
268 		return -EINVAL;
269 
270 	sp = host1x_syncpt_alloc(host1x, HOST1X_SYNCPT_CLIENT_MANAGED, current->comm);
271 	if (!sp)
272 		return -EBUSY;
273 
274 	args->id = host1x_syncpt_id(sp);
275 
276 	err = xa_insert(&fpriv->syncpoints, args->id, sp, GFP_KERNEL);
277 	if (err) {
278 		host1x_syncpt_put(sp);
279 		return err;
280 	}
281 
282 	return 0;
283 }
284 
285 int tegra_drm_ioctl_syncpoint_free(struct drm_device *drm, void *data, struct drm_file *file)
286 {
287 	struct tegra_drm_file *fpriv = file->driver_priv;
288 	struct drm_tegra_syncpoint_allocate *args = data;
289 	struct host1x_syncpt *sp;
290 
291 	mutex_lock(&fpriv->lock);
292 	sp = xa_erase(&fpriv->syncpoints, args->id);
293 	mutex_unlock(&fpriv->lock);
294 
295 	if (!sp)
296 		return -EINVAL;
297 
298 	host1x_syncpt_put(sp);
299 
300 	return 0;
301 }
302 
303 int tegra_drm_ioctl_syncpoint_wait(struct drm_device *drm, void *data, struct drm_file *file)
304 {
305 	struct host1x *host1x = tegra_drm_to_host1x(drm->dev_private);
306 	struct drm_tegra_syncpoint_wait *args = data;
307 	signed long timeout_jiffies;
308 	struct host1x_syncpt *sp;
309 
310 	if (args->padding != 0)
311 		return -EINVAL;
312 
313 	sp = host1x_syncpt_get_by_id_noref(host1x, args->id);
314 	if (!sp)
315 		return -EINVAL;
316 
317 	timeout_jiffies = drm_timeout_abs_to_jiffies(args->timeout_ns);
318 
319 	return host1x_syncpt_wait(sp, args->threshold, timeout_jiffies, &args->value);
320 }
321