xref: /openbmc/linux/drivers/misc/uacce/uacce.c (revision e3e289fb)
1015d239aSKenneth Lee // SPDX-License-Identifier: GPL-2.0-or-later
2015d239aSKenneth Lee #include <linux/compat.h>
3015d239aSKenneth Lee #include <linux/dma-mapping.h>
4015d239aSKenneth Lee #include <linux/iommu.h>
5015d239aSKenneth Lee #include <linux/module.h>
6015d239aSKenneth Lee #include <linux/poll.h>
77999096fSHerbert Xu #include <linux/slab.h>
8015d239aSKenneth Lee #include <linux/uacce.h>
9015d239aSKenneth Lee 
10015d239aSKenneth Lee static struct class *uacce_class;
11015d239aSKenneth Lee static dev_t uacce_devt;
12015d239aSKenneth Lee static DEFINE_XARRAY_ALLOC(uacce_xa);
13015d239aSKenneth Lee 
1480fc671bSJean-Philippe Brucker /*
1580fc671bSJean-Philippe Brucker  * If the parent driver or the device disappears, the queue state is invalid and
1680fc671bSJean-Philippe Brucker  * ops are not usable anymore.
1780fc671bSJean-Philippe Brucker  */
1880fc671bSJean-Philippe Brucker static bool uacce_queue_is_valid(struct uacce_queue *q)
1980fc671bSJean-Philippe Brucker {
2080fc671bSJean-Philippe Brucker 	return q->state == UACCE_Q_INIT || q->state == UACCE_Q_STARTED;
2180fc671bSJean-Philippe Brucker }
2280fc671bSJean-Philippe Brucker 
23015d239aSKenneth Lee static int uacce_start_queue(struct uacce_queue *q)
24015d239aSKenneth Lee {
2580fc671bSJean-Philippe Brucker 	int ret;
26015d239aSKenneth Lee 
2780fc671bSJean-Philippe Brucker 	if (q->state != UACCE_Q_INIT)
2880fc671bSJean-Philippe Brucker 		return -EINVAL;
29015d239aSKenneth Lee 
30015d239aSKenneth Lee 	if (q->uacce->ops->start_queue) {
31015d239aSKenneth Lee 		ret = q->uacce->ops->start_queue(q);
32015d239aSKenneth Lee 		if (ret < 0)
3380fc671bSJean-Philippe Brucker 			return ret;
34015d239aSKenneth Lee 	}
35015d239aSKenneth Lee 
36015d239aSKenneth Lee 	q->state = UACCE_Q_STARTED;
3780fc671bSJean-Philippe Brucker 	return 0;
38015d239aSKenneth Lee }
39015d239aSKenneth Lee 
40015d239aSKenneth Lee static int uacce_put_queue(struct uacce_queue *q)
41015d239aSKenneth Lee {
42015d239aSKenneth Lee 	struct uacce_device *uacce = q->uacce;
43015d239aSKenneth Lee 
44015d239aSKenneth Lee 	if ((q->state == UACCE_Q_STARTED) && uacce->ops->stop_queue)
45015d239aSKenneth Lee 		uacce->ops->stop_queue(q);
46015d239aSKenneth Lee 
47015d239aSKenneth Lee 	if ((q->state == UACCE_Q_INIT || q->state == UACCE_Q_STARTED) &&
48015d239aSKenneth Lee 	     uacce->ops->put_queue)
49015d239aSKenneth Lee 		uacce->ops->put_queue(q);
50015d239aSKenneth Lee 
51015d239aSKenneth Lee 	q->state = UACCE_Q_ZOMBIE;
52015d239aSKenneth Lee 
53015d239aSKenneth Lee 	return 0;
54015d239aSKenneth Lee }
55015d239aSKenneth Lee 
56015d239aSKenneth Lee static long uacce_fops_unl_ioctl(struct file *filep,
57015d239aSKenneth Lee 				 unsigned int cmd, unsigned long arg)
58015d239aSKenneth Lee {
59015d239aSKenneth Lee 	struct uacce_queue *q = filep->private_data;
60015d239aSKenneth Lee 	struct uacce_device *uacce = q->uacce;
6180fc671bSJean-Philippe Brucker 	long ret = -ENXIO;
6280fc671bSJean-Philippe Brucker 
6380fc671bSJean-Philippe Brucker 	/*
6480fc671bSJean-Philippe Brucker 	 * uacce->ops->ioctl() may take the mmap_lock when copying arg to/from
6580fc671bSJean-Philippe Brucker 	 * user. Avoid a circular lock dependency with uacce_fops_mmap(), which
6680fc671bSJean-Philippe Brucker 	 * gets called with mmap_lock held, by taking uacce->mutex instead of
6780fc671bSJean-Philippe Brucker 	 * q->mutex. Doing this in uacce_fops_mmap() is not possible because
6880fc671bSJean-Philippe Brucker 	 * uacce_fops_open() calls iommu_sva_bind_device(), which takes
6980fc671bSJean-Philippe Brucker 	 * mmap_lock, while holding uacce->mutex.
7080fc671bSJean-Philippe Brucker 	 */
7180fc671bSJean-Philippe Brucker 	mutex_lock(&uacce->mutex);
7280fc671bSJean-Philippe Brucker 	if (!uacce_queue_is_valid(q))
7380fc671bSJean-Philippe Brucker 		goto out_unlock;
74015d239aSKenneth Lee 
75015d239aSKenneth Lee 	switch (cmd) {
76015d239aSKenneth Lee 	case UACCE_CMD_START_Q:
7780fc671bSJean-Philippe Brucker 		ret = uacce_start_queue(q);
7880fc671bSJean-Philippe Brucker 		break;
79015d239aSKenneth Lee 	case UACCE_CMD_PUT_Q:
8080fc671bSJean-Philippe Brucker 		ret = uacce_put_queue(q);
8180fc671bSJean-Philippe Brucker 		break;
82015d239aSKenneth Lee 	default:
8380fc671bSJean-Philippe Brucker 		if (uacce->ops->ioctl)
8480fc671bSJean-Philippe Brucker 			ret = uacce->ops->ioctl(q, cmd, arg);
8580fc671bSJean-Philippe Brucker 		else
8680fc671bSJean-Philippe Brucker 			ret = -EINVAL;
87015d239aSKenneth Lee 	}
8880fc671bSJean-Philippe Brucker out_unlock:
8980fc671bSJean-Philippe Brucker 	mutex_unlock(&uacce->mutex);
9080fc671bSJean-Philippe Brucker 	return ret;
91015d239aSKenneth Lee }
92015d239aSKenneth Lee 
93015d239aSKenneth Lee #ifdef CONFIG_COMPAT
94015d239aSKenneth Lee static long uacce_fops_compat_ioctl(struct file *filep,
95015d239aSKenneth Lee 				   unsigned int cmd, unsigned long arg)
96015d239aSKenneth Lee {
97015d239aSKenneth Lee 	arg = (unsigned long)compat_ptr(arg);
98015d239aSKenneth Lee 
99015d239aSKenneth Lee 	return uacce_fops_unl_ioctl(filep, cmd, arg);
100015d239aSKenneth Lee }
101015d239aSKenneth Lee #endif
102015d239aSKenneth Lee 
103fb01562eSJean-Philippe Brucker static int uacce_bind_queue(struct uacce_device *uacce, struct uacce_queue *q)
104015d239aSKenneth Lee {
105c7b6bac9SFenghua Yu 	u32 pasid;
106fb01562eSJean-Philippe Brucker 	struct iommu_sva *handle;
107015d239aSKenneth Lee 
108fb01562eSJean-Philippe Brucker 	if (!(uacce->flags & UACCE_DEV_SVA))
109fb01562eSJean-Philippe Brucker 		return 0;
110015d239aSKenneth Lee 
111942fd543SLu Baolu 	handle = iommu_sva_bind_device(uacce->parent, current->mm);
112fb01562eSJean-Philippe Brucker 	if (IS_ERR(handle))
113fb01562eSJean-Philippe Brucker 		return PTR_ERR(handle);
114fb01562eSJean-Philippe Brucker 
115fb01562eSJean-Philippe Brucker 	pasid = iommu_sva_get_pasid(handle);
116fb01562eSJean-Philippe Brucker 	if (pasid == IOMMU_PASID_INVALID) {
117fb01562eSJean-Philippe Brucker 		iommu_sva_unbind_device(handle);
118fb01562eSJean-Philippe Brucker 		return -ENODEV;
119fb01562eSJean-Philippe Brucker 	}
120fb01562eSJean-Philippe Brucker 
121fb01562eSJean-Philippe Brucker 	q->handle = handle;
122fb01562eSJean-Philippe Brucker 	q->pasid = pasid;
123015d239aSKenneth Lee 	return 0;
124015d239aSKenneth Lee }
125015d239aSKenneth Lee 
126fb01562eSJean-Philippe Brucker static void uacce_unbind_queue(struct uacce_queue *q)
127015d239aSKenneth Lee {
128fb01562eSJean-Philippe Brucker 	if (!q->handle)
129fb01562eSJean-Philippe Brucker 		return;
130fb01562eSJean-Philippe Brucker 	iommu_sva_unbind_device(q->handle);
131fb01562eSJean-Philippe Brucker 	q->handle = NULL;
132015d239aSKenneth Lee }
133015d239aSKenneth Lee 
134015d239aSKenneth Lee static int uacce_fops_open(struct inode *inode, struct file *filep)
135015d239aSKenneth Lee {
136015d239aSKenneth Lee 	struct uacce_device *uacce;
137015d239aSKenneth Lee 	struct uacce_queue *q;
138f20b2c2aSKai Ye 	int ret;
139015d239aSKenneth Lee 
140015d239aSKenneth Lee 	uacce = xa_load(&uacce_xa, iminor(inode));
141015d239aSKenneth Lee 	if (!uacce)
142015d239aSKenneth Lee 		return -ENODEV;
143015d239aSKenneth Lee 
144015d239aSKenneth Lee 	q = kzalloc(sizeof(struct uacce_queue), GFP_KERNEL);
145015d239aSKenneth Lee 	if (!q)
146015d239aSKenneth Lee 		return -ENOMEM;
147015d239aSKenneth Lee 
14880fc671bSJean-Philippe Brucker 	mutex_lock(&uacce->mutex);
14980fc671bSJean-Philippe Brucker 
15080fc671bSJean-Philippe Brucker 	if (!uacce->parent) {
15180fc671bSJean-Philippe Brucker 		ret = -EINVAL;
15280fc671bSJean-Philippe Brucker 		goto out_with_mem;
15380fc671bSJean-Philippe Brucker 	}
15480fc671bSJean-Philippe Brucker 
155fb01562eSJean-Philippe Brucker 	ret = uacce_bind_queue(uacce, q);
156fb01562eSJean-Philippe Brucker 	if (ret)
157015d239aSKenneth Lee 		goto out_with_mem;
158015d239aSKenneth Lee 
159015d239aSKenneth Lee 	q->uacce = uacce;
160015d239aSKenneth Lee 
161015d239aSKenneth Lee 	if (uacce->ops->get_queue) {
162fb01562eSJean-Philippe Brucker 		ret = uacce->ops->get_queue(uacce, q->pasid, q);
163015d239aSKenneth Lee 		if (ret < 0)
164fb01562eSJean-Philippe Brucker 			goto out_with_bond;
165015d239aSKenneth Lee 	}
166015d239aSKenneth Lee 
167015d239aSKenneth Lee 	init_waitqueue_head(&q->wait);
168015d239aSKenneth Lee 	filep->private_data = q;
169acc670dbSZhangfei Gao 	uacce->inode = inode;
170015d239aSKenneth Lee 	q->state = UACCE_Q_INIT;
17180fc671bSJean-Philippe Brucker 	mutex_init(&q->mutex);
172fb01562eSJean-Philippe Brucker 	list_add(&q->list, &uacce->queues);
17380fc671bSJean-Philippe Brucker 	mutex_unlock(&uacce->mutex);
174fb01562eSJean-Philippe Brucker 
175015d239aSKenneth Lee 	return 0;
176015d239aSKenneth Lee 
177fb01562eSJean-Philippe Brucker out_with_bond:
178fb01562eSJean-Philippe Brucker 	uacce_unbind_queue(q);
179015d239aSKenneth Lee out_with_mem:
180015d239aSKenneth Lee 	kfree(q);
18180fc671bSJean-Philippe Brucker 	mutex_unlock(&uacce->mutex);
182015d239aSKenneth Lee 	return ret;
183015d239aSKenneth Lee }
184015d239aSKenneth Lee 
185015d239aSKenneth Lee static int uacce_fops_release(struct inode *inode, struct file *filep)
186015d239aSKenneth Lee {
187015d239aSKenneth Lee 	struct uacce_queue *q = filep->private_data;
18880fc671bSJean-Philippe Brucker 	struct uacce_device *uacce = q->uacce;
189015d239aSKenneth Lee 
19080fc671bSJean-Philippe Brucker 	mutex_lock(&uacce->mutex);
191015d239aSKenneth Lee 	uacce_put_queue(q);
192fb01562eSJean-Philippe Brucker 	uacce_unbind_queue(q);
19380fc671bSJean-Philippe Brucker 	list_del(&q->list);
19480fc671bSJean-Philippe Brucker 	mutex_unlock(&uacce->mutex);
195015d239aSKenneth Lee 	kfree(q);
196015d239aSKenneth Lee 
197015d239aSKenneth Lee 	return 0;
198015d239aSKenneth Lee }
199015d239aSKenneth Lee 
200015d239aSKenneth Lee static void uacce_vma_close(struct vm_area_struct *vma)
201015d239aSKenneth Lee {
202015d239aSKenneth Lee 	struct uacce_queue *q = vma->vm_private_data;
203015d239aSKenneth Lee 	struct uacce_qfile_region *qfr = NULL;
204015d239aSKenneth Lee 
205015d239aSKenneth Lee 	if (vma->vm_pgoff < UACCE_MAX_REGION)
206015d239aSKenneth Lee 		qfr = q->qfrs[vma->vm_pgoff];
207015d239aSKenneth Lee 
208015d239aSKenneth Lee 	kfree(qfr);
209015d239aSKenneth Lee }
210015d239aSKenneth Lee 
211015d239aSKenneth Lee static const struct vm_operations_struct uacce_vm_ops = {
212015d239aSKenneth Lee 	.close = uacce_vma_close,
213015d239aSKenneth Lee };
214015d239aSKenneth Lee 
215015d239aSKenneth Lee static int uacce_fops_mmap(struct file *filep, struct vm_area_struct *vma)
216015d239aSKenneth Lee {
217015d239aSKenneth Lee 	struct uacce_queue *q = filep->private_data;
218015d239aSKenneth Lee 	struct uacce_device *uacce = q->uacce;
219015d239aSKenneth Lee 	struct uacce_qfile_region *qfr;
220015d239aSKenneth Lee 	enum uacce_qfrt type = UACCE_MAX_REGION;
221015d239aSKenneth Lee 	int ret = 0;
222015d239aSKenneth Lee 
223015d239aSKenneth Lee 	if (vma->vm_pgoff < UACCE_MAX_REGION)
224015d239aSKenneth Lee 		type = vma->vm_pgoff;
225015d239aSKenneth Lee 	else
226015d239aSKenneth Lee 		return -EINVAL;
227015d239aSKenneth Lee 
228015d239aSKenneth Lee 	qfr = kzalloc(sizeof(*qfr), GFP_KERNEL);
229015d239aSKenneth Lee 	if (!qfr)
230015d239aSKenneth Lee 		return -ENOMEM;
231015d239aSKenneth Lee 
232015d239aSKenneth Lee 	vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_WIPEONFORK;
233015d239aSKenneth Lee 	vma->vm_ops = &uacce_vm_ops;
234015d239aSKenneth Lee 	vma->vm_private_data = q;
235015d239aSKenneth Lee 	qfr->type = type;
236015d239aSKenneth Lee 
23780fc671bSJean-Philippe Brucker 	mutex_lock(&q->mutex);
23880fc671bSJean-Philippe Brucker 	if (!uacce_queue_is_valid(q)) {
23980fc671bSJean-Philippe Brucker 		ret = -ENXIO;
240015d239aSKenneth Lee 		goto out_with_lock;
241015d239aSKenneth Lee 	}
242015d239aSKenneth Lee 
243015d239aSKenneth Lee 	if (q->qfrs[type]) {
244015d239aSKenneth Lee 		ret = -EEXIST;
245015d239aSKenneth Lee 		goto out_with_lock;
246015d239aSKenneth Lee 	}
247015d239aSKenneth Lee 
248015d239aSKenneth Lee 	switch (type) {
249015d239aSKenneth Lee 	case UACCE_QFRT_MMIO:
250015d239aSKenneth Lee 	case UACCE_QFRT_DUS:
251015d239aSKenneth Lee 		if (!uacce->ops->mmap) {
252015d239aSKenneth Lee 			ret = -EINVAL;
253015d239aSKenneth Lee 			goto out_with_lock;
254015d239aSKenneth Lee 		}
255015d239aSKenneth Lee 
256015d239aSKenneth Lee 		ret = uacce->ops->mmap(q, vma, qfr);
257015d239aSKenneth Lee 		if (ret)
258015d239aSKenneth Lee 			goto out_with_lock;
259015d239aSKenneth Lee 		break;
260015d239aSKenneth Lee 
261015d239aSKenneth Lee 	default:
262015d239aSKenneth Lee 		ret = -EINVAL;
263015d239aSKenneth Lee 		goto out_with_lock;
264015d239aSKenneth Lee 	}
265015d239aSKenneth Lee 
266015d239aSKenneth Lee 	q->qfrs[type] = qfr;
26780fc671bSJean-Philippe Brucker 	mutex_unlock(&q->mutex);
268015d239aSKenneth Lee 
269015d239aSKenneth Lee 	return ret;
270015d239aSKenneth Lee 
271015d239aSKenneth Lee out_with_lock:
27280fc671bSJean-Philippe Brucker 	mutex_unlock(&q->mutex);
273015d239aSKenneth Lee 	kfree(qfr);
274015d239aSKenneth Lee 	return ret;
275015d239aSKenneth Lee }
276015d239aSKenneth Lee 
277015d239aSKenneth Lee static __poll_t uacce_fops_poll(struct file *file, poll_table *wait)
278015d239aSKenneth Lee {
279015d239aSKenneth Lee 	struct uacce_queue *q = file->private_data;
280015d239aSKenneth Lee 	struct uacce_device *uacce = q->uacce;
28180fc671bSJean-Philippe Brucker 	__poll_t ret = 0;
28280fc671bSJean-Philippe Brucker 
28380fc671bSJean-Philippe Brucker 	mutex_lock(&q->mutex);
28480fc671bSJean-Philippe Brucker 	if (!uacce_queue_is_valid(q))
28580fc671bSJean-Philippe Brucker 		goto out_unlock;
286015d239aSKenneth Lee 
287015d239aSKenneth Lee 	poll_wait(file, &q->wait, wait);
288015d239aSKenneth Lee 
28980fc671bSJean-Philippe Brucker 	if (uacce->ops->is_q_updated && uacce->ops->is_q_updated(q))
29080fc671bSJean-Philippe Brucker 		ret = EPOLLIN | EPOLLRDNORM;
29180fc671bSJean-Philippe Brucker 
29280fc671bSJean-Philippe Brucker out_unlock:
29380fc671bSJean-Philippe Brucker 	mutex_unlock(&q->mutex);
29480fc671bSJean-Philippe Brucker 	return ret;
295015d239aSKenneth Lee }
296015d239aSKenneth Lee 
297015d239aSKenneth Lee static const struct file_operations uacce_fops = {
298015d239aSKenneth Lee 	.owner		= THIS_MODULE,
299015d239aSKenneth Lee 	.open		= uacce_fops_open,
300015d239aSKenneth Lee 	.release	= uacce_fops_release,
301015d239aSKenneth Lee 	.unlocked_ioctl	= uacce_fops_unl_ioctl,
302015d239aSKenneth Lee #ifdef CONFIG_COMPAT
303015d239aSKenneth Lee 	.compat_ioctl	= uacce_fops_compat_ioctl,
304015d239aSKenneth Lee #endif
305015d239aSKenneth Lee 	.mmap		= uacce_fops_mmap,
306015d239aSKenneth Lee 	.poll		= uacce_fops_poll,
307015d239aSKenneth Lee };
308015d239aSKenneth Lee 
309015d239aSKenneth Lee #define to_uacce_device(dev) container_of(dev, struct uacce_device, dev)
310015d239aSKenneth Lee 
311015d239aSKenneth Lee static ssize_t api_show(struct device *dev,
312015d239aSKenneth Lee 			struct device_attribute *attr, char *buf)
313015d239aSKenneth Lee {
314015d239aSKenneth Lee 	struct uacce_device *uacce = to_uacce_device(dev);
315015d239aSKenneth Lee 
3162d2802fbSKai Ye 	return sysfs_emit(buf, "%s\n", uacce->api_ver);
317015d239aSKenneth Lee }
318015d239aSKenneth Lee 
319015d239aSKenneth Lee static ssize_t flags_show(struct device *dev,
320015d239aSKenneth Lee 			  struct device_attribute *attr, char *buf)
321015d239aSKenneth Lee {
322015d239aSKenneth Lee 	struct uacce_device *uacce = to_uacce_device(dev);
323015d239aSKenneth Lee 
3242d2802fbSKai Ye 	return sysfs_emit(buf, "%u\n", uacce->flags);
325015d239aSKenneth Lee }
326015d239aSKenneth Lee 
327015d239aSKenneth Lee static ssize_t available_instances_show(struct device *dev,
328015d239aSKenneth Lee 					struct device_attribute *attr,
329015d239aSKenneth Lee 					char *buf)
330015d239aSKenneth Lee {
331015d239aSKenneth Lee 	struct uacce_device *uacce = to_uacce_device(dev);
332015d239aSKenneth Lee 
333015d239aSKenneth Lee 	if (!uacce->ops->get_available_instances)
334015d239aSKenneth Lee 		return -ENODEV;
335015d239aSKenneth Lee 
3362d2802fbSKai Ye 	return sysfs_emit(buf, "%d\n",
337015d239aSKenneth Lee 		       uacce->ops->get_available_instances(uacce));
338015d239aSKenneth Lee }
339015d239aSKenneth Lee 
340015d239aSKenneth Lee static ssize_t algorithms_show(struct device *dev,
341015d239aSKenneth Lee 			       struct device_attribute *attr, char *buf)
342015d239aSKenneth Lee {
343015d239aSKenneth Lee 	struct uacce_device *uacce = to_uacce_device(dev);
344015d239aSKenneth Lee 
3452d2802fbSKai Ye 	return sysfs_emit(buf, "%s\n", uacce->algs);
346015d239aSKenneth Lee }
347015d239aSKenneth Lee 
348015d239aSKenneth Lee static ssize_t region_mmio_size_show(struct device *dev,
349015d239aSKenneth Lee 				     struct device_attribute *attr, char *buf)
350015d239aSKenneth Lee {
351015d239aSKenneth Lee 	struct uacce_device *uacce = to_uacce_device(dev);
352015d239aSKenneth Lee 
3532d2802fbSKai Ye 	return sysfs_emit(buf, "%lu\n",
354015d239aSKenneth Lee 		       uacce->qf_pg_num[UACCE_QFRT_MMIO] << PAGE_SHIFT);
355015d239aSKenneth Lee }
356015d239aSKenneth Lee 
357015d239aSKenneth Lee static ssize_t region_dus_size_show(struct device *dev,
358015d239aSKenneth Lee 				    struct device_attribute *attr, char *buf)
359015d239aSKenneth Lee {
360015d239aSKenneth Lee 	struct uacce_device *uacce = to_uacce_device(dev);
361015d239aSKenneth Lee 
3622d2802fbSKai Ye 	return sysfs_emit(buf, "%lu\n",
363015d239aSKenneth Lee 		       uacce->qf_pg_num[UACCE_QFRT_DUS] << PAGE_SHIFT);
364015d239aSKenneth Lee }
365015d239aSKenneth Lee 
366*e3e289fbSKai Ye static ssize_t isolate_show(struct device *dev,
367*e3e289fbSKai Ye 			    struct device_attribute *attr, char *buf)
368*e3e289fbSKai Ye {
369*e3e289fbSKai Ye 	struct uacce_device *uacce = to_uacce_device(dev);
370*e3e289fbSKai Ye 
371*e3e289fbSKai Ye 	return sysfs_emit(buf, "%d\n", uacce->ops->get_isolate_state(uacce));
372*e3e289fbSKai Ye }
373*e3e289fbSKai Ye 
374*e3e289fbSKai Ye static ssize_t isolate_strategy_show(struct device *dev, struct device_attribute *attr, char *buf)
375*e3e289fbSKai Ye {
376*e3e289fbSKai Ye 	struct uacce_device *uacce = to_uacce_device(dev);
377*e3e289fbSKai Ye 	u32 val;
378*e3e289fbSKai Ye 
379*e3e289fbSKai Ye 	val = uacce->ops->isolate_err_threshold_read(uacce);
380*e3e289fbSKai Ye 
381*e3e289fbSKai Ye 	return sysfs_emit(buf, "%u\n", val);
382*e3e289fbSKai Ye }
383*e3e289fbSKai Ye 
384*e3e289fbSKai Ye static ssize_t isolate_strategy_store(struct device *dev, struct device_attribute *attr,
385*e3e289fbSKai Ye 				   const char *buf, size_t count)
386*e3e289fbSKai Ye {
387*e3e289fbSKai Ye 	struct uacce_device *uacce = to_uacce_device(dev);
388*e3e289fbSKai Ye 	unsigned long val;
389*e3e289fbSKai Ye 	int ret;
390*e3e289fbSKai Ye 
391*e3e289fbSKai Ye 	if (kstrtoul(buf, 0, &val) < 0)
392*e3e289fbSKai Ye 		return -EINVAL;
393*e3e289fbSKai Ye 
394*e3e289fbSKai Ye 	if (val > UACCE_MAX_ERR_THRESHOLD)
395*e3e289fbSKai Ye 		return -EINVAL;
396*e3e289fbSKai Ye 
397*e3e289fbSKai Ye 	ret = uacce->ops->isolate_err_threshold_write(uacce, val);
398*e3e289fbSKai Ye 	if (ret)
399*e3e289fbSKai Ye 		return ret;
400*e3e289fbSKai Ye 
401*e3e289fbSKai Ye 	return count;
402*e3e289fbSKai Ye }
403*e3e289fbSKai Ye 
404015d239aSKenneth Lee static DEVICE_ATTR_RO(api);
405015d239aSKenneth Lee static DEVICE_ATTR_RO(flags);
406015d239aSKenneth Lee static DEVICE_ATTR_RO(available_instances);
407015d239aSKenneth Lee static DEVICE_ATTR_RO(algorithms);
408015d239aSKenneth Lee static DEVICE_ATTR_RO(region_mmio_size);
409015d239aSKenneth Lee static DEVICE_ATTR_RO(region_dus_size);
410*e3e289fbSKai Ye static DEVICE_ATTR_RO(isolate);
411*e3e289fbSKai Ye static DEVICE_ATTR_RW(isolate_strategy);
412015d239aSKenneth Lee 
413015d239aSKenneth Lee static struct attribute *uacce_dev_attrs[] = {
414015d239aSKenneth Lee 	&dev_attr_api.attr,
415015d239aSKenneth Lee 	&dev_attr_flags.attr,
416015d239aSKenneth Lee 	&dev_attr_available_instances.attr,
417015d239aSKenneth Lee 	&dev_attr_algorithms.attr,
418015d239aSKenneth Lee 	&dev_attr_region_mmio_size.attr,
419015d239aSKenneth Lee 	&dev_attr_region_dus_size.attr,
420*e3e289fbSKai Ye 	&dev_attr_isolate.attr,
421*e3e289fbSKai Ye 	&dev_attr_isolate_strategy.attr,
422015d239aSKenneth Lee 	NULL,
423015d239aSKenneth Lee };
424015d239aSKenneth Lee 
425015d239aSKenneth Lee static umode_t uacce_dev_is_visible(struct kobject *kobj,
426015d239aSKenneth Lee 				    struct attribute *attr, int n)
427015d239aSKenneth Lee {
428a4c3d756STian Tao 	struct device *dev = kobj_to_dev(kobj);
429015d239aSKenneth Lee 	struct uacce_device *uacce = to_uacce_device(dev);
430015d239aSKenneth Lee 
431015d239aSKenneth Lee 	if (((attr == &dev_attr_region_mmio_size.attr) &&
432015d239aSKenneth Lee 	    (!uacce->qf_pg_num[UACCE_QFRT_MMIO])) ||
433015d239aSKenneth Lee 	    ((attr == &dev_attr_region_dus_size.attr) &&
434015d239aSKenneth Lee 	    (!uacce->qf_pg_num[UACCE_QFRT_DUS])))
435015d239aSKenneth Lee 		return 0;
436015d239aSKenneth Lee 
437*e3e289fbSKai Ye 	if (attr == &dev_attr_isolate_strategy.attr &&
438*e3e289fbSKai Ye 	    (!uacce->ops->isolate_err_threshold_read &&
439*e3e289fbSKai Ye 	     !uacce->ops->isolate_err_threshold_write))
440*e3e289fbSKai Ye 		return 0;
441*e3e289fbSKai Ye 
442*e3e289fbSKai Ye 	if (attr == &dev_attr_isolate.attr && !uacce->ops->get_isolate_state)
443*e3e289fbSKai Ye 		return 0;
444*e3e289fbSKai Ye 
445015d239aSKenneth Lee 	return attr->mode;
446015d239aSKenneth Lee }
447015d239aSKenneth Lee 
448015d239aSKenneth Lee static struct attribute_group uacce_dev_group = {
449015d239aSKenneth Lee 	.is_visible	= uacce_dev_is_visible,
450015d239aSKenneth Lee 	.attrs		= uacce_dev_attrs,
451015d239aSKenneth Lee };
452015d239aSKenneth Lee 
453015d239aSKenneth Lee __ATTRIBUTE_GROUPS(uacce_dev);
454015d239aSKenneth Lee 
455015d239aSKenneth Lee static void uacce_release(struct device *dev)
456015d239aSKenneth Lee {
457015d239aSKenneth Lee 	struct uacce_device *uacce = to_uacce_device(dev);
458015d239aSKenneth Lee 
459015d239aSKenneth Lee 	kfree(uacce);
460015d239aSKenneth Lee }
461015d239aSKenneth Lee 
4620860788dSJean-Philippe Brucker static unsigned int uacce_enable_sva(struct device *parent, unsigned int flags)
4630860788dSJean-Philippe Brucker {
464762b296bSKai Ye 	int ret;
465762b296bSKai Ye 
4660860788dSJean-Philippe Brucker 	if (!(flags & UACCE_DEV_SVA))
4670860788dSJean-Philippe Brucker 		return flags;
4680860788dSJean-Philippe Brucker 
4690860788dSJean-Philippe Brucker 	flags &= ~UACCE_DEV_SVA;
4700860788dSJean-Philippe Brucker 
471762b296bSKai Ye 	ret = iommu_dev_enable_feature(parent, IOMMU_DEV_FEAT_IOPF);
472762b296bSKai Ye 	if (ret) {
473762b296bSKai Ye 		dev_err(parent, "failed to enable IOPF feature! ret = %pe\n", ERR_PTR(ret));
4740860788dSJean-Philippe Brucker 		return flags;
475762b296bSKai Ye 	}
4760860788dSJean-Philippe Brucker 
477762b296bSKai Ye 	ret = iommu_dev_enable_feature(parent, IOMMU_DEV_FEAT_SVA);
478762b296bSKai Ye 	if (ret) {
479762b296bSKai Ye 		dev_err(parent, "failed to enable SVA feature! ret = %pe\n", ERR_PTR(ret));
4800860788dSJean-Philippe Brucker 		iommu_dev_disable_feature(parent, IOMMU_DEV_FEAT_IOPF);
4810860788dSJean-Philippe Brucker 		return flags;
4820860788dSJean-Philippe Brucker 	}
4830860788dSJean-Philippe Brucker 
4840860788dSJean-Philippe Brucker 	return flags | UACCE_DEV_SVA;
4850860788dSJean-Philippe Brucker }
4860860788dSJean-Philippe Brucker 
4870860788dSJean-Philippe Brucker static void uacce_disable_sva(struct uacce_device *uacce)
4880860788dSJean-Philippe Brucker {
4890860788dSJean-Philippe Brucker 	if (!(uacce->flags & UACCE_DEV_SVA))
4900860788dSJean-Philippe Brucker 		return;
4910860788dSJean-Philippe Brucker 
4920860788dSJean-Philippe Brucker 	iommu_dev_disable_feature(uacce->parent, IOMMU_DEV_FEAT_SVA);
4930860788dSJean-Philippe Brucker 	iommu_dev_disable_feature(uacce->parent, IOMMU_DEV_FEAT_IOPF);
4940860788dSJean-Philippe Brucker }
4950860788dSJean-Philippe Brucker 
496015d239aSKenneth Lee /**
497015d239aSKenneth Lee  * uacce_alloc() - alloc an accelerator
498015d239aSKenneth Lee  * @parent: pointer of uacce parent device
499015d239aSKenneth Lee  * @interface: pointer of uacce_interface for register
500015d239aSKenneth Lee  *
501015d239aSKenneth Lee  * Returns uacce pointer if success and ERR_PTR if not
502015d239aSKenneth Lee  * Need check returned negotiated uacce->flags
503015d239aSKenneth Lee  */
504015d239aSKenneth Lee struct uacce_device *uacce_alloc(struct device *parent,
505015d239aSKenneth Lee 				 struct uacce_interface *interface)
506015d239aSKenneth Lee {
507015d239aSKenneth Lee 	unsigned int flags = interface->flags;
508015d239aSKenneth Lee 	struct uacce_device *uacce;
509015d239aSKenneth Lee 	int ret;
510015d239aSKenneth Lee 
511015d239aSKenneth Lee 	uacce = kzalloc(sizeof(struct uacce_device), GFP_KERNEL);
512015d239aSKenneth Lee 	if (!uacce)
513015d239aSKenneth Lee 		return ERR_PTR(-ENOMEM);
514015d239aSKenneth Lee 
5150860788dSJean-Philippe Brucker 	flags = uacce_enable_sva(parent, flags);
516015d239aSKenneth Lee 
517015d239aSKenneth Lee 	uacce->parent = parent;
518015d239aSKenneth Lee 	uacce->flags = flags;
519015d239aSKenneth Lee 	uacce->ops = interface->ops;
520015d239aSKenneth Lee 
521015d239aSKenneth Lee 	ret = xa_alloc(&uacce_xa, &uacce->dev_id, uacce, xa_limit_32b,
522015d239aSKenneth Lee 		       GFP_KERNEL);
523015d239aSKenneth Lee 	if (ret < 0)
524015d239aSKenneth Lee 		goto err_with_uacce;
525015d239aSKenneth Lee 
526fb01562eSJean-Philippe Brucker 	INIT_LIST_HEAD(&uacce->queues);
52780fc671bSJean-Philippe Brucker 	mutex_init(&uacce->mutex);
528015d239aSKenneth Lee 	device_initialize(&uacce->dev);
529015d239aSKenneth Lee 	uacce->dev.devt = MKDEV(MAJOR(uacce_devt), uacce->dev_id);
530015d239aSKenneth Lee 	uacce->dev.class = uacce_class;
531015d239aSKenneth Lee 	uacce->dev.groups = uacce_dev_groups;
532015d239aSKenneth Lee 	uacce->dev.parent = uacce->parent;
533015d239aSKenneth Lee 	uacce->dev.release = uacce_release;
534015d239aSKenneth Lee 	dev_set_name(&uacce->dev, "%s-%d", interface->name, uacce->dev_id);
535015d239aSKenneth Lee 
536015d239aSKenneth Lee 	return uacce;
537015d239aSKenneth Lee 
538015d239aSKenneth Lee err_with_uacce:
5390860788dSJean-Philippe Brucker 	uacce_disable_sva(uacce);
540015d239aSKenneth Lee 	kfree(uacce);
541015d239aSKenneth Lee 	return ERR_PTR(ret);
542015d239aSKenneth Lee }
543015d239aSKenneth Lee EXPORT_SYMBOL_GPL(uacce_alloc);
544015d239aSKenneth Lee 
545015d239aSKenneth Lee /**
546015d239aSKenneth Lee  * uacce_register() - add the accelerator to cdev and export to user space
547015d239aSKenneth Lee  * @uacce: The initialized uacce device
548015d239aSKenneth Lee  *
549015d239aSKenneth Lee  * Return 0 if register succeeded, or an error.
550015d239aSKenneth Lee  */
551015d239aSKenneth Lee int uacce_register(struct uacce_device *uacce)
552015d239aSKenneth Lee {
553015d239aSKenneth Lee 	if (!uacce)
554015d239aSKenneth Lee 		return -ENODEV;
555015d239aSKenneth Lee 
556015d239aSKenneth Lee 	uacce->cdev = cdev_alloc();
557015d239aSKenneth Lee 	if (!uacce->cdev)
558015d239aSKenneth Lee 		return -ENOMEM;
559015d239aSKenneth Lee 
560015d239aSKenneth Lee 	uacce->cdev->ops = &uacce_fops;
561015d239aSKenneth Lee 	uacce->cdev->owner = THIS_MODULE;
562015d239aSKenneth Lee 
563015d239aSKenneth Lee 	return cdev_device_add(uacce->cdev, &uacce->dev);
564015d239aSKenneth Lee }
565015d239aSKenneth Lee EXPORT_SYMBOL_GPL(uacce_register);
566015d239aSKenneth Lee 
567015d239aSKenneth Lee /**
568015d239aSKenneth Lee  * uacce_remove() - remove the accelerator
569015d239aSKenneth Lee  * @uacce: the accelerator to remove
570015d239aSKenneth Lee  */
571015d239aSKenneth Lee void uacce_remove(struct uacce_device *uacce)
572015d239aSKenneth Lee {
573fb01562eSJean-Philippe Brucker 	struct uacce_queue *q, *next_q;
574015d239aSKenneth Lee 
575015d239aSKenneth Lee 	if (!uacce)
576015d239aSKenneth Lee 		return;
577acc670dbSZhangfei Gao 	/*
578acc670dbSZhangfei Gao 	 * unmap remaining mapping from user space, preventing user still
579acc670dbSZhangfei Gao 	 * access the mmaped area while parent device is already removed
580acc670dbSZhangfei Gao 	 */
581acc670dbSZhangfei Gao 	if (uacce->inode)
582acc670dbSZhangfei Gao 		unmap_mapping_range(uacce->inode->i_mapping, 0, 0, 1);
583015d239aSKenneth Lee 
58480fc671bSJean-Philippe Brucker 	/*
58580fc671bSJean-Philippe Brucker 	 * uacce_fops_open() may be running concurrently, even after we remove
58680fc671bSJean-Philippe Brucker 	 * the cdev. Holding uacce->mutex ensures that open() does not obtain a
58780fc671bSJean-Philippe Brucker 	 * removed uacce device.
58880fc671bSJean-Philippe Brucker 	 */
58980fc671bSJean-Philippe Brucker 	mutex_lock(&uacce->mutex);
590015d239aSKenneth Lee 	/* ensure no open queue remains */
591fb01562eSJean-Philippe Brucker 	list_for_each_entry_safe(q, next_q, &uacce->queues, list) {
59280fc671bSJean-Philippe Brucker 		/*
59380fc671bSJean-Philippe Brucker 		 * Taking q->mutex ensures that fops do not use the defunct
59480fc671bSJean-Philippe Brucker 		 * uacce->ops after the queue is disabled.
59580fc671bSJean-Philippe Brucker 		 */
59680fc671bSJean-Philippe Brucker 		mutex_lock(&q->mutex);
597015d239aSKenneth Lee 		uacce_put_queue(q);
59880fc671bSJean-Philippe Brucker 		mutex_unlock(&q->mutex);
599fb01562eSJean-Philippe Brucker 		uacce_unbind_queue(q);
600015d239aSKenneth Lee 	}
601015d239aSKenneth Lee 
602015d239aSKenneth Lee 	/* disable sva now since no opened queues */
6030860788dSJean-Philippe Brucker 	uacce_disable_sva(uacce);
604015d239aSKenneth Lee 
605015d239aSKenneth Lee 	if (uacce->cdev)
606015d239aSKenneth Lee 		cdev_device_del(uacce->cdev, &uacce->dev);
607015d239aSKenneth Lee 	xa_erase(&uacce_xa, uacce->dev_id);
60880fc671bSJean-Philippe Brucker 	/*
60980fc671bSJean-Philippe Brucker 	 * uacce exists as long as there are open fds, but ops will be freed
61080fc671bSJean-Philippe Brucker 	 * now. Ensure that bugs cause NULL deref rather than use-after-free.
61180fc671bSJean-Philippe Brucker 	 */
61280fc671bSJean-Philippe Brucker 	uacce->ops = NULL;
61380fc671bSJean-Philippe Brucker 	uacce->parent = NULL;
61480fc671bSJean-Philippe Brucker 	mutex_unlock(&uacce->mutex);
615015d239aSKenneth Lee 	put_device(&uacce->dev);
616015d239aSKenneth Lee }
617015d239aSKenneth Lee EXPORT_SYMBOL_GPL(uacce_remove);
618015d239aSKenneth Lee 
619015d239aSKenneth Lee static int __init uacce_init(void)
620015d239aSKenneth Lee {
621015d239aSKenneth Lee 	int ret;
622015d239aSKenneth Lee 
623015d239aSKenneth Lee 	uacce_class = class_create(THIS_MODULE, UACCE_NAME);
624015d239aSKenneth Lee 	if (IS_ERR(uacce_class))
625015d239aSKenneth Lee 		return PTR_ERR(uacce_class);
626015d239aSKenneth Lee 
627015d239aSKenneth Lee 	ret = alloc_chrdev_region(&uacce_devt, 0, MINORMASK, UACCE_NAME);
628015d239aSKenneth Lee 	if (ret)
629015d239aSKenneth Lee 		class_destroy(uacce_class);
630015d239aSKenneth Lee 
631015d239aSKenneth Lee 	return ret;
632015d239aSKenneth Lee }
633015d239aSKenneth Lee 
634015d239aSKenneth Lee static __exit void uacce_exit(void)
635015d239aSKenneth Lee {
636015d239aSKenneth Lee 	unregister_chrdev_region(uacce_devt, MINORMASK);
637015d239aSKenneth Lee 	class_destroy(uacce_class);
638015d239aSKenneth Lee }
639015d239aSKenneth Lee 
640015d239aSKenneth Lee subsys_initcall(uacce_init);
641015d239aSKenneth Lee module_exit(uacce_exit);
642015d239aSKenneth Lee 
643015d239aSKenneth Lee MODULE_LICENSE("GPL");
644385997dcSKai Ye MODULE_AUTHOR("HiSilicon Tech. Co., Ltd.");
645015d239aSKenneth Lee MODULE_DESCRIPTION("Accelerator interface for Userland applications");
646