xref: /openbmc/linux/drivers/misc/uacce/uacce.c (revision add48ba4)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 #include <linux/compat.h>
3 #include <linux/dma-mapping.h>
4 #include <linux/iommu.h>
5 #include <linux/module.h>
6 #include <linux/poll.h>
7 #include <linux/uacce.h>
8 
9 static struct class *uacce_class;
10 static dev_t uacce_devt;
11 static DEFINE_MUTEX(uacce_mutex);
12 static DEFINE_XARRAY_ALLOC(uacce_xa);
13 
14 static int uacce_start_queue(struct uacce_queue *q)
15 {
16 	int ret = 0;
17 
18 	mutex_lock(&uacce_mutex);
19 
20 	if (q->state != UACCE_Q_INIT) {
21 		ret = -EINVAL;
22 		goto out_with_lock;
23 	}
24 
25 	if (q->uacce->ops->start_queue) {
26 		ret = q->uacce->ops->start_queue(q);
27 		if (ret < 0)
28 			goto out_with_lock;
29 	}
30 
31 	q->state = UACCE_Q_STARTED;
32 
33 out_with_lock:
34 	mutex_unlock(&uacce_mutex);
35 
36 	return ret;
37 }
38 
39 static int uacce_put_queue(struct uacce_queue *q)
40 {
41 	struct uacce_device *uacce = q->uacce;
42 
43 	mutex_lock(&uacce_mutex);
44 
45 	if (q->state == UACCE_Q_ZOMBIE)
46 		goto out;
47 
48 	if ((q->state == UACCE_Q_STARTED) && uacce->ops->stop_queue)
49 		uacce->ops->stop_queue(q);
50 
51 	if ((q->state == UACCE_Q_INIT || q->state == UACCE_Q_STARTED) &&
52 	     uacce->ops->put_queue)
53 		uacce->ops->put_queue(q);
54 
55 	q->state = UACCE_Q_ZOMBIE;
56 out:
57 	mutex_unlock(&uacce_mutex);
58 
59 	return 0;
60 }
61 
62 static long uacce_fops_unl_ioctl(struct file *filep,
63 				 unsigned int cmd, unsigned long arg)
64 {
65 	struct uacce_queue *q = filep->private_data;
66 	struct uacce_device *uacce = q->uacce;
67 
68 	switch (cmd) {
69 	case UACCE_CMD_START_Q:
70 		return uacce_start_queue(q);
71 
72 	case UACCE_CMD_PUT_Q:
73 		return uacce_put_queue(q);
74 
75 	default:
76 		if (!uacce->ops->ioctl)
77 			return -EINVAL;
78 
79 		return uacce->ops->ioctl(q, cmd, arg);
80 	}
81 }
82 
83 #ifdef CONFIG_COMPAT
84 static long uacce_fops_compat_ioctl(struct file *filep,
85 				   unsigned int cmd, unsigned long arg)
86 {
87 	arg = (unsigned long)compat_ptr(arg);
88 
89 	return uacce_fops_unl_ioctl(filep, cmd, arg);
90 }
91 #endif
92 
93 static int uacce_bind_queue(struct uacce_device *uacce, struct uacce_queue *q)
94 {
95 	int pasid;
96 	struct iommu_sva *handle;
97 
98 	if (!(uacce->flags & UACCE_DEV_SVA))
99 		return 0;
100 
101 	handle = iommu_sva_bind_device(uacce->parent, current->mm, NULL);
102 	if (IS_ERR(handle))
103 		return PTR_ERR(handle);
104 
105 	pasid = iommu_sva_get_pasid(handle);
106 	if (pasid == IOMMU_PASID_INVALID) {
107 		iommu_sva_unbind_device(handle);
108 		return -ENODEV;
109 	}
110 
111 	q->handle = handle;
112 	q->pasid = pasid;
113 	return 0;
114 }
115 
116 static void uacce_unbind_queue(struct uacce_queue *q)
117 {
118 	if (!q->handle)
119 		return;
120 	iommu_sva_unbind_device(q->handle);
121 	q->handle = NULL;
122 }
123 
124 static int uacce_fops_open(struct inode *inode, struct file *filep)
125 {
126 	struct uacce_device *uacce;
127 	struct uacce_queue *q;
128 	int ret = 0;
129 
130 	uacce = xa_load(&uacce_xa, iminor(inode));
131 	if (!uacce)
132 		return -ENODEV;
133 
134 	q = kzalloc(sizeof(struct uacce_queue), GFP_KERNEL);
135 	if (!q)
136 		return -ENOMEM;
137 
138 	ret = uacce_bind_queue(uacce, q);
139 	if (ret)
140 		goto out_with_mem;
141 
142 	q->uacce = uacce;
143 
144 	if (uacce->ops->get_queue) {
145 		ret = uacce->ops->get_queue(uacce, q->pasid, q);
146 		if (ret < 0)
147 			goto out_with_bond;
148 	}
149 
150 	init_waitqueue_head(&q->wait);
151 	filep->private_data = q;
152 	uacce->inode = inode;
153 	q->state = UACCE_Q_INIT;
154 
155 	mutex_lock(&uacce->queues_lock);
156 	list_add(&q->list, &uacce->queues);
157 	mutex_unlock(&uacce->queues_lock);
158 
159 	return 0;
160 
161 out_with_bond:
162 	uacce_unbind_queue(q);
163 out_with_mem:
164 	kfree(q);
165 	return ret;
166 }
167 
168 static int uacce_fops_release(struct inode *inode, struct file *filep)
169 {
170 	struct uacce_queue *q = filep->private_data;
171 
172 	mutex_lock(&q->uacce->queues_lock);
173 	list_del(&q->list);
174 	mutex_unlock(&q->uacce->queues_lock);
175 	uacce_put_queue(q);
176 	uacce_unbind_queue(q);
177 	kfree(q);
178 
179 	return 0;
180 }
181 
182 static vm_fault_t uacce_vma_fault(struct vm_fault *vmf)
183 {
184 	if (vmf->flags & (FAULT_FLAG_MKWRITE | FAULT_FLAG_WRITE))
185 		return VM_FAULT_SIGBUS;
186 
187 	return 0;
188 }
189 
190 static void uacce_vma_close(struct vm_area_struct *vma)
191 {
192 	struct uacce_queue *q = vma->vm_private_data;
193 	struct uacce_qfile_region *qfr = NULL;
194 
195 	if (vma->vm_pgoff < UACCE_MAX_REGION)
196 		qfr = q->qfrs[vma->vm_pgoff];
197 
198 	kfree(qfr);
199 }
200 
201 static const struct vm_operations_struct uacce_vm_ops = {
202 	.fault = uacce_vma_fault,
203 	.close = uacce_vma_close,
204 };
205 
206 static int uacce_fops_mmap(struct file *filep, struct vm_area_struct *vma)
207 {
208 	struct uacce_queue *q = filep->private_data;
209 	struct uacce_device *uacce = q->uacce;
210 	struct uacce_qfile_region *qfr;
211 	enum uacce_qfrt type = UACCE_MAX_REGION;
212 	int ret = 0;
213 
214 	if (vma->vm_pgoff < UACCE_MAX_REGION)
215 		type = vma->vm_pgoff;
216 	else
217 		return -EINVAL;
218 
219 	qfr = kzalloc(sizeof(*qfr), GFP_KERNEL);
220 	if (!qfr)
221 		return -ENOMEM;
222 
223 	vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_WIPEONFORK;
224 	vma->vm_ops = &uacce_vm_ops;
225 	vma->vm_private_data = q;
226 	qfr->type = type;
227 
228 	mutex_lock(&uacce_mutex);
229 
230 	if (q->state != UACCE_Q_INIT && q->state != UACCE_Q_STARTED) {
231 		ret = -EINVAL;
232 		goto out_with_lock;
233 	}
234 
235 	if (q->qfrs[type]) {
236 		ret = -EEXIST;
237 		goto out_with_lock;
238 	}
239 
240 	switch (type) {
241 	case UACCE_QFRT_MMIO:
242 		if (!uacce->ops->mmap) {
243 			ret = -EINVAL;
244 			goto out_with_lock;
245 		}
246 
247 		ret = uacce->ops->mmap(q, vma, qfr);
248 		if (ret)
249 			goto out_with_lock;
250 
251 		break;
252 
253 	case UACCE_QFRT_DUS:
254 		if (!uacce->ops->mmap) {
255 			ret = -EINVAL;
256 			goto out_with_lock;
257 		}
258 
259 		ret = uacce->ops->mmap(q, vma, qfr);
260 		if (ret)
261 			goto out_with_lock;
262 		break;
263 
264 	default:
265 		ret = -EINVAL;
266 		goto out_with_lock;
267 	}
268 
269 	q->qfrs[type] = qfr;
270 	mutex_unlock(&uacce_mutex);
271 
272 	return ret;
273 
274 out_with_lock:
275 	mutex_unlock(&uacce_mutex);
276 	kfree(qfr);
277 	return ret;
278 }
279 
280 static __poll_t uacce_fops_poll(struct file *file, poll_table *wait)
281 {
282 	struct uacce_queue *q = file->private_data;
283 	struct uacce_device *uacce = q->uacce;
284 
285 	poll_wait(file, &q->wait, wait);
286 	if (uacce->ops->is_q_updated && uacce->ops->is_q_updated(q))
287 		return EPOLLIN | EPOLLRDNORM;
288 
289 	return 0;
290 }
291 
292 static const struct file_operations uacce_fops = {
293 	.owner		= THIS_MODULE,
294 	.open		= uacce_fops_open,
295 	.release	= uacce_fops_release,
296 	.unlocked_ioctl	= uacce_fops_unl_ioctl,
297 #ifdef CONFIG_COMPAT
298 	.compat_ioctl	= uacce_fops_compat_ioctl,
299 #endif
300 	.mmap		= uacce_fops_mmap,
301 	.poll		= uacce_fops_poll,
302 };
303 
304 #define to_uacce_device(dev) container_of(dev, struct uacce_device, dev)
305 
306 static ssize_t api_show(struct device *dev,
307 			struct device_attribute *attr, char *buf)
308 {
309 	struct uacce_device *uacce = to_uacce_device(dev);
310 
311 	return sprintf(buf, "%s\n", uacce->api_ver);
312 }
313 
314 static ssize_t flags_show(struct device *dev,
315 			  struct device_attribute *attr, char *buf)
316 {
317 	struct uacce_device *uacce = to_uacce_device(dev);
318 
319 	return sprintf(buf, "%u\n", uacce->flags);
320 }
321 
322 static ssize_t available_instances_show(struct device *dev,
323 					struct device_attribute *attr,
324 					char *buf)
325 {
326 	struct uacce_device *uacce = to_uacce_device(dev);
327 
328 	if (!uacce->ops->get_available_instances)
329 		return -ENODEV;
330 
331 	return sprintf(buf, "%d\n",
332 		       uacce->ops->get_available_instances(uacce));
333 }
334 
335 static ssize_t algorithms_show(struct device *dev,
336 			       struct device_attribute *attr, char *buf)
337 {
338 	struct uacce_device *uacce = to_uacce_device(dev);
339 
340 	return sprintf(buf, "%s\n", uacce->algs);
341 }
342 
343 static ssize_t region_mmio_size_show(struct device *dev,
344 				     struct device_attribute *attr, char *buf)
345 {
346 	struct uacce_device *uacce = to_uacce_device(dev);
347 
348 	return sprintf(buf, "%lu\n",
349 		       uacce->qf_pg_num[UACCE_QFRT_MMIO] << PAGE_SHIFT);
350 }
351 
352 static ssize_t region_dus_size_show(struct device *dev,
353 				    struct device_attribute *attr, char *buf)
354 {
355 	struct uacce_device *uacce = to_uacce_device(dev);
356 
357 	return sprintf(buf, "%lu\n",
358 		       uacce->qf_pg_num[UACCE_QFRT_DUS] << PAGE_SHIFT);
359 }
360 
361 static DEVICE_ATTR_RO(api);
362 static DEVICE_ATTR_RO(flags);
363 static DEVICE_ATTR_RO(available_instances);
364 static DEVICE_ATTR_RO(algorithms);
365 static DEVICE_ATTR_RO(region_mmio_size);
366 static DEVICE_ATTR_RO(region_dus_size);
367 
368 static struct attribute *uacce_dev_attrs[] = {
369 	&dev_attr_api.attr,
370 	&dev_attr_flags.attr,
371 	&dev_attr_available_instances.attr,
372 	&dev_attr_algorithms.attr,
373 	&dev_attr_region_mmio_size.attr,
374 	&dev_attr_region_dus_size.attr,
375 	NULL,
376 };
377 
378 static umode_t uacce_dev_is_visible(struct kobject *kobj,
379 				    struct attribute *attr, int n)
380 {
381 	struct device *dev = container_of(kobj, struct device, kobj);
382 	struct uacce_device *uacce = to_uacce_device(dev);
383 
384 	if (((attr == &dev_attr_region_mmio_size.attr) &&
385 	    (!uacce->qf_pg_num[UACCE_QFRT_MMIO])) ||
386 	    ((attr == &dev_attr_region_dus_size.attr) &&
387 	    (!uacce->qf_pg_num[UACCE_QFRT_DUS])))
388 		return 0;
389 
390 	return attr->mode;
391 }
392 
393 static struct attribute_group uacce_dev_group = {
394 	.is_visible	= uacce_dev_is_visible,
395 	.attrs		= uacce_dev_attrs,
396 };
397 
398 __ATTRIBUTE_GROUPS(uacce_dev);
399 
400 static void uacce_release(struct device *dev)
401 {
402 	struct uacce_device *uacce = to_uacce_device(dev);
403 
404 	kfree(uacce);
405 }
406 
407 /**
408  * uacce_alloc() - alloc an accelerator
409  * @parent: pointer of uacce parent device
410  * @interface: pointer of uacce_interface for register
411  *
412  * Returns uacce pointer if success and ERR_PTR if not
413  * Need check returned negotiated uacce->flags
414  */
415 struct uacce_device *uacce_alloc(struct device *parent,
416 				 struct uacce_interface *interface)
417 {
418 	unsigned int flags = interface->flags;
419 	struct uacce_device *uacce;
420 	int ret;
421 
422 	uacce = kzalloc(sizeof(struct uacce_device), GFP_KERNEL);
423 	if (!uacce)
424 		return ERR_PTR(-ENOMEM);
425 
426 	if (flags & UACCE_DEV_SVA) {
427 		ret = iommu_dev_enable_feature(parent, IOMMU_DEV_FEAT_SVA);
428 		if (ret)
429 			flags &= ~UACCE_DEV_SVA;
430 	}
431 
432 	uacce->parent = parent;
433 	uacce->flags = flags;
434 	uacce->ops = interface->ops;
435 
436 	ret = xa_alloc(&uacce_xa, &uacce->dev_id, uacce, xa_limit_32b,
437 		       GFP_KERNEL);
438 	if (ret < 0)
439 		goto err_with_uacce;
440 
441 	INIT_LIST_HEAD(&uacce->queues);
442 	mutex_init(&uacce->queues_lock);
443 	device_initialize(&uacce->dev);
444 	uacce->dev.devt = MKDEV(MAJOR(uacce_devt), uacce->dev_id);
445 	uacce->dev.class = uacce_class;
446 	uacce->dev.groups = uacce_dev_groups;
447 	uacce->dev.parent = uacce->parent;
448 	uacce->dev.release = uacce_release;
449 	dev_set_name(&uacce->dev, "%s-%d", interface->name, uacce->dev_id);
450 
451 	return uacce;
452 
453 err_with_uacce:
454 	if (flags & UACCE_DEV_SVA)
455 		iommu_dev_disable_feature(uacce->parent, IOMMU_DEV_FEAT_SVA);
456 	kfree(uacce);
457 	return ERR_PTR(ret);
458 }
459 EXPORT_SYMBOL_GPL(uacce_alloc);
460 
461 /**
462  * uacce_register() - add the accelerator to cdev and export to user space
463  * @uacce: The initialized uacce device
464  *
465  * Return 0 if register succeeded, or an error.
466  */
467 int uacce_register(struct uacce_device *uacce)
468 {
469 	if (!uacce)
470 		return -ENODEV;
471 
472 	uacce->cdev = cdev_alloc();
473 	if (!uacce->cdev)
474 		return -ENOMEM;
475 
476 	uacce->cdev->ops = &uacce_fops;
477 	uacce->cdev->owner = THIS_MODULE;
478 
479 	return cdev_device_add(uacce->cdev, &uacce->dev);
480 }
481 EXPORT_SYMBOL_GPL(uacce_register);
482 
483 /**
484  * uacce_remove() - remove the accelerator
485  * @uacce: the accelerator to remove
486  */
487 void uacce_remove(struct uacce_device *uacce)
488 {
489 	struct uacce_queue *q, *next_q;
490 
491 	if (!uacce)
492 		return;
493 	/*
494 	 * unmap remaining mapping from user space, preventing user still
495 	 * access the mmaped area while parent device is already removed
496 	 */
497 	if (uacce->inode)
498 		unmap_mapping_range(uacce->inode->i_mapping, 0, 0, 1);
499 
500 	/* ensure no open queue remains */
501 	mutex_lock(&uacce->queues_lock);
502 	list_for_each_entry_safe(q, next_q, &uacce->queues, list) {
503 		uacce_put_queue(q);
504 		uacce_unbind_queue(q);
505 	}
506 	mutex_unlock(&uacce->queues_lock);
507 
508 	/* disable sva now since no opened queues */
509 	if (uacce->flags & UACCE_DEV_SVA)
510 		iommu_dev_disable_feature(uacce->parent, IOMMU_DEV_FEAT_SVA);
511 
512 	if (uacce->cdev)
513 		cdev_device_del(uacce->cdev, &uacce->dev);
514 	xa_erase(&uacce_xa, uacce->dev_id);
515 	put_device(&uacce->dev);
516 }
517 EXPORT_SYMBOL_GPL(uacce_remove);
518 
519 static int __init uacce_init(void)
520 {
521 	int ret;
522 
523 	uacce_class = class_create(THIS_MODULE, UACCE_NAME);
524 	if (IS_ERR(uacce_class))
525 		return PTR_ERR(uacce_class);
526 
527 	ret = alloc_chrdev_region(&uacce_devt, 0, MINORMASK, UACCE_NAME);
528 	if (ret)
529 		class_destroy(uacce_class);
530 
531 	return ret;
532 }
533 
534 static __exit void uacce_exit(void)
535 {
536 	unregister_chrdev_region(uacce_devt, MINORMASK);
537 	class_destroy(uacce_class);
538 }
539 
540 subsys_initcall(uacce_init);
541 module_exit(uacce_exit);
542 
543 MODULE_LICENSE("GPL");
544 MODULE_AUTHOR("Hisilicon Tech. Co., Ltd.");
545 MODULE_DESCRIPTION("Accelerator interface for Userland applications");
546