xref: /openbmc/linux/drivers/s390/cio/vfio_ccw_ops.c (revision b8d312aa)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Physical device callbacks for vfio_ccw
4  *
5  * Copyright IBM Corp. 2017
6  * Copyright Red Hat, Inc. 2019
7  *
8  * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
9  *            Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
10  *            Cornelia Huck <cohuck@redhat.com>
11  */
12 
13 #include <linux/vfio.h>
14 #include <linux/mdev.h>
15 #include <linux/nospec.h>
16 #include <linux/slab.h>
17 
18 #include "vfio_ccw_private.h"
19 
20 static int vfio_ccw_mdev_reset(struct mdev_device *mdev)
21 {
22 	struct vfio_ccw_private *private;
23 	struct subchannel *sch;
24 	int ret;
25 
26 	private = dev_get_drvdata(mdev_parent_dev(mdev));
27 	sch = private->sch;
28 	/*
29 	 * TODO:
30 	 * In the cureent stage, some things like "no I/O running" and "no
31 	 * interrupt pending" are clear, but we are not sure what other state
32 	 * we need to care about.
33 	 * There are still a lot more instructions need to be handled. We
34 	 * should come back here later.
35 	 */
36 	ret = vfio_ccw_sch_quiesce(sch);
37 	if (ret)
38 		return ret;
39 
40 	ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
41 	if (!ret)
42 		private->state = VFIO_CCW_STATE_IDLE;
43 
44 	return ret;
45 }
46 
47 static int vfio_ccw_mdev_notifier(struct notifier_block *nb,
48 				  unsigned long action,
49 				  void *data)
50 {
51 	struct vfio_ccw_private *private =
52 		container_of(nb, struct vfio_ccw_private, nb);
53 
54 	/*
55 	 * Vendor drivers MUST unpin pages in response to an
56 	 * invalidation.
57 	 */
58 	if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
59 		struct vfio_iommu_type1_dma_unmap *unmap = data;
60 
61 		if (!cp_iova_pinned(&private->cp, unmap->iova))
62 			return NOTIFY_OK;
63 
64 		if (vfio_ccw_mdev_reset(private->mdev))
65 			return NOTIFY_BAD;
66 
67 		cp_free(&private->cp);
68 		return NOTIFY_OK;
69 	}
70 
71 	return NOTIFY_DONE;
72 }
73 
74 static ssize_t name_show(struct kobject *kobj, struct device *dev, char *buf)
75 {
76 	return sprintf(buf, "I/O subchannel (Non-QDIO)\n");
77 }
78 static MDEV_TYPE_ATTR_RO(name);
79 
80 static ssize_t device_api_show(struct kobject *kobj, struct device *dev,
81 			       char *buf)
82 {
83 	return sprintf(buf, "%s\n", VFIO_DEVICE_API_CCW_STRING);
84 }
85 static MDEV_TYPE_ATTR_RO(device_api);
86 
87 static ssize_t available_instances_show(struct kobject *kobj,
88 					struct device *dev, char *buf)
89 {
90 	struct vfio_ccw_private *private = dev_get_drvdata(dev);
91 
92 	return sprintf(buf, "%d\n", atomic_read(&private->avail));
93 }
94 static MDEV_TYPE_ATTR_RO(available_instances);
95 
96 static struct attribute *mdev_types_attrs[] = {
97 	&mdev_type_attr_name.attr,
98 	&mdev_type_attr_device_api.attr,
99 	&mdev_type_attr_available_instances.attr,
100 	NULL,
101 };
102 
103 static struct attribute_group mdev_type_group = {
104 	.name  = "io",
105 	.attrs = mdev_types_attrs,
106 };
107 
108 static struct attribute_group *mdev_type_groups[] = {
109 	&mdev_type_group,
110 	NULL,
111 };
112 
113 static int vfio_ccw_mdev_create(struct kobject *kobj, struct mdev_device *mdev)
114 {
115 	struct vfio_ccw_private *private =
116 		dev_get_drvdata(mdev_parent_dev(mdev));
117 
118 	if (private->state == VFIO_CCW_STATE_NOT_OPER)
119 		return -ENODEV;
120 
121 	if (atomic_dec_if_positive(&private->avail) < 0)
122 		return -EPERM;
123 
124 	private->mdev = mdev;
125 	private->state = VFIO_CCW_STATE_IDLE;
126 
127 	return 0;
128 }
129 
130 static int vfio_ccw_mdev_remove(struct mdev_device *mdev)
131 {
132 	struct vfio_ccw_private *private =
133 		dev_get_drvdata(mdev_parent_dev(mdev));
134 
135 	if ((private->state != VFIO_CCW_STATE_NOT_OPER) &&
136 	    (private->state != VFIO_CCW_STATE_STANDBY)) {
137 		if (!vfio_ccw_sch_quiesce(private->sch))
138 			private->state = VFIO_CCW_STATE_STANDBY;
139 		/* The state will be NOT_OPER on error. */
140 	}
141 
142 	cp_free(&private->cp);
143 	private->mdev = NULL;
144 	atomic_inc(&private->avail);
145 
146 	return 0;
147 }
148 
149 static int vfio_ccw_mdev_open(struct mdev_device *mdev)
150 {
151 	struct vfio_ccw_private *private =
152 		dev_get_drvdata(mdev_parent_dev(mdev));
153 	unsigned long events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
154 	int ret;
155 
156 	private->nb.notifier_call = vfio_ccw_mdev_notifier;
157 
158 	ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
159 				     &events, &private->nb);
160 	if (ret)
161 		return ret;
162 
163 	ret = vfio_ccw_register_async_dev_regions(private);
164 	if (ret)
165 		vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
166 					 &private->nb);
167 	return ret;
168 }
169 
170 static void vfio_ccw_mdev_release(struct mdev_device *mdev)
171 {
172 	struct vfio_ccw_private *private =
173 		dev_get_drvdata(mdev_parent_dev(mdev));
174 	int i;
175 
176 	if ((private->state != VFIO_CCW_STATE_NOT_OPER) &&
177 	    (private->state != VFIO_CCW_STATE_STANDBY)) {
178 		if (!vfio_ccw_mdev_reset(mdev))
179 			private->state = VFIO_CCW_STATE_STANDBY;
180 		/* The state will be NOT_OPER on error. */
181 	}
182 
183 	cp_free(&private->cp);
184 	vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
185 				 &private->nb);
186 
187 	for (i = 0; i < private->num_regions; i++)
188 		private->region[i].ops->release(private, &private->region[i]);
189 
190 	private->num_regions = 0;
191 	kfree(private->region);
192 	private->region = NULL;
193 }
194 
195 static ssize_t vfio_ccw_mdev_read_io_region(struct vfio_ccw_private *private,
196 					    char __user *buf, size_t count,
197 					    loff_t *ppos)
198 {
199 	loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK;
200 	struct ccw_io_region *region;
201 	int ret;
202 
203 	if (pos + count > sizeof(*region))
204 		return -EINVAL;
205 
206 	mutex_lock(&private->io_mutex);
207 	region = private->io_region;
208 	if (copy_to_user(buf, (void *)region + pos, count))
209 		ret = -EFAULT;
210 	else
211 		ret = count;
212 	mutex_unlock(&private->io_mutex);
213 	return ret;
214 }
215 
216 static ssize_t vfio_ccw_mdev_read(struct mdev_device *mdev,
217 				  char __user *buf,
218 				  size_t count,
219 				  loff_t *ppos)
220 {
221 	unsigned int index = VFIO_CCW_OFFSET_TO_INDEX(*ppos);
222 	struct vfio_ccw_private *private;
223 
224 	private = dev_get_drvdata(mdev_parent_dev(mdev));
225 
226 	if (index >= VFIO_CCW_NUM_REGIONS + private->num_regions)
227 		return -EINVAL;
228 
229 	switch (index) {
230 	case VFIO_CCW_CONFIG_REGION_INDEX:
231 		return vfio_ccw_mdev_read_io_region(private, buf, count, ppos);
232 	default:
233 		index -= VFIO_CCW_NUM_REGIONS;
234 		return private->region[index].ops->read(private, buf, count,
235 							ppos);
236 	}
237 
238 	return -EINVAL;
239 }
240 
241 static ssize_t vfio_ccw_mdev_write_io_region(struct vfio_ccw_private *private,
242 					     const char __user *buf,
243 					     size_t count, loff_t *ppos)
244 {
245 	loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK;
246 	struct ccw_io_region *region;
247 	int ret;
248 
249 	if (pos + count > sizeof(*region))
250 		return -EINVAL;
251 
252 	if (!mutex_trylock(&private->io_mutex))
253 		return -EAGAIN;
254 
255 	region = private->io_region;
256 	if (copy_from_user((void *)region + pos, buf, count)) {
257 		ret = -EFAULT;
258 		goto out_unlock;
259 	}
260 
261 	vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_IO_REQ);
262 	if (region->ret_code != 0)
263 		private->state = VFIO_CCW_STATE_IDLE;
264 	ret = (region->ret_code != 0) ? region->ret_code : count;
265 
266 out_unlock:
267 	mutex_unlock(&private->io_mutex);
268 	return ret;
269 }
270 
271 static ssize_t vfio_ccw_mdev_write(struct mdev_device *mdev,
272 				   const char __user *buf,
273 				   size_t count,
274 				   loff_t *ppos)
275 {
276 	unsigned int index = VFIO_CCW_OFFSET_TO_INDEX(*ppos);
277 	struct vfio_ccw_private *private;
278 
279 	private = dev_get_drvdata(mdev_parent_dev(mdev));
280 
281 	if (index >= VFIO_CCW_NUM_REGIONS + private->num_regions)
282 		return -EINVAL;
283 
284 	switch (index) {
285 	case VFIO_CCW_CONFIG_REGION_INDEX:
286 		return vfio_ccw_mdev_write_io_region(private, buf, count, ppos);
287 	default:
288 		index -= VFIO_CCW_NUM_REGIONS;
289 		return private->region[index].ops->write(private, buf, count,
290 							 ppos);
291 	}
292 
293 	return -EINVAL;
294 }
295 
296 static int vfio_ccw_mdev_get_device_info(struct vfio_device_info *info,
297 					 struct mdev_device *mdev)
298 {
299 	struct vfio_ccw_private *private;
300 
301 	private = dev_get_drvdata(mdev_parent_dev(mdev));
302 	info->flags = VFIO_DEVICE_FLAGS_CCW | VFIO_DEVICE_FLAGS_RESET;
303 	info->num_regions = VFIO_CCW_NUM_REGIONS + private->num_regions;
304 	info->num_irqs = VFIO_CCW_NUM_IRQS;
305 
306 	return 0;
307 }
308 
309 static int vfio_ccw_mdev_get_region_info(struct vfio_region_info *info,
310 					 struct mdev_device *mdev,
311 					 unsigned long arg)
312 {
313 	struct vfio_ccw_private *private;
314 	int i;
315 
316 	private = dev_get_drvdata(mdev_parent_dev(mdev));
317 	switch (info->index) {
318 	case VFIO_CCW_CONFIG_REGION_INDEX:
319 		info->offset = 0;
320 		info->size = sizeof(struct ccw_io_region);
321 		info->flags = VFIO_REGION_INFO_FLAG_READ
322 			      | VFIO_REGION_INFO_FLAG_WRITE;
323 		return 0;
324 	default: /* all other regions are handled via capability chain */
325 	{
326 		struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
327 		struct vfio_region_info_cap_type cap_type = {
328 			.header.id = VFIO_REGION_INFO_CAP_TYPE,
329 			.header.version = 1 };
330 		int ret;
331 
332 		if (info->index >=
333 		    VFIO_CCW_NUM_REGIONS + private->num_regions)
334 			return -EINVAL;
335 
336 		info->index = array_index_nospec(info->index,
337 						 VFIO_CCW_NUM_REGIONS +
338 						 private->num_regions);
339 
340 		i = info->index - VFIO_CCW_NUM_REGIONS;
341 
342 		info->offset = VFIO_CCW_INDEX_TO_OFFSET(info->index);
343 		info->size = private->region[i].size;
344 		info->flags = private->region[i].flags;
345 
346 		cap_type.type = private->region[i].type;
347 		cap_type.subtype = private->region[i].subtype;
348 
349 		ret = vfio_info_add_capability(&caps, &cap_type.header,
350 					       sizeof(cap_type));
351 		if (ret)
352 			return ret;
353 
354 		info->flags |= VFIO_REGION_INFO_FLAG_CAPS;
355 		if (info->argsz < sizeof(*info) + caps.size) {
356 			info->argsz = sizeof(*info) + caps.size;
357 			info->cap_offset = 0;
358 		} else {
359 			vfio_info_cap_shift(&caps, sizeof(*info));
360 			if (copy_to_user((void __user *)arg + sizeof(*info),
361 					 caps.buf, caps.size)) {
362 				kfree(caps.buf);
363 				return -EFAULT;
364 			}
365 			info->cap_offset = sizeof(*info);
366 		}
367 
368 		kfree(caps.buf);
369 
370 	}
371 	}
372 	return 0;
373 }
374 
375 static int vfio_ccw_mdev_get_irq_info(struct vfio_irq_info *info)
376 {
377 	if (info->index != VFIO_CCW_IO_IRQ_INDEX)
378 		return -EINVAL;
379 
380 	info->count = 1;
381 	info->flags = VFIO_IRQ_INFO_EVENTFD;
382 
383 	return 0;
384 }
385 
386 static int vfio_ccw_mdev_set_irqs(struct mdev_device *mdev,
387 				  uint32_t flags,
388 				  void __user *data)
389 {
390 	struct vfio_ccw_private *private;
391 	struct eventfd_ctx **ctx;
392 
393 	if (!(flags & VFIO_IRQ_SET_ACTION_TRIGGER))
394 		return -EINVAL;
395 
396 	private = dev_get_drvdata(mdev_parent_dev(mdev));
397 	ctx = &private->io_trigger;
398 
399 	switch (flags & VFIO_IRQ_SET_DATA_TYPE_MASK) {
400 	case VFIO_IRQ_SET_DATA_NONE:
401 	{
402 		if (*ctx)
403 			eventfd_signal(*ctx, 1);
404 		return 0;
405 	}
406 	case VFIO_IRQ_SET_DATA_BOOL:
407 	{
408 		uint8_t trigger;
409 
410 		if (get_user(trigger, (uint8_t __user *)data))
411 			return -EFAULT;
412 
413 		if (trigger && *ctx)
414 			eventfd_signal(*ctx, 1);
415 		return 0;
416 	}
417 	case VFIO_IRQ_SET_DATA_EVENTFD:
418 	{
419 		int32_t fd;
420 
421 		if (get_user(fd, (int32_t __user *)data))
422 			return -EFAULT;
423 
424 		if (fd == -1) {
425 			if (*ctx)
426 				eventfd_ctx_put(*ctx);
427 			*ctx = NULL;
428 		} else if (fd >= 0) {
429 			struct eventfd_ctx *efdctx;
430 
431 			efdctx = eventfd_ctx_fdget(fd);
432 			if (IS_ERR(efdctx))
433 				return PTR_ERR(efdctx);
434 
435 			if (*ctx)
436 				eventfd_ctx_put(*ctx);
437 
438 			*ctx = efdctx;
439 		} else
440 			return -EINVAL;
441 
442 		return 0;
443 	}
444 	default:
445 		return -EINVAL;
446 	}
447 }
448 
449 int vfio_ccw_register_dev_region(struct vfio_ccw_private *private,
450 				 unsigned int subtype,
451 				 const struct vfio_ccw_regops *ops,
452 				 size_t size, u32 flags, void *data)
453 {
454 	struct vfio_ccw_region *region;
455 
456 	region = krealloc(private->region,
457 			  (private->num_regions + 1) * sizeof(*region),
458 			  GFP_KERNEL);
459 	if (!region)
460 		return -ENOMEM;
461 
462 	private->region = region;
463 	private->region[private->num_regions].type = VFIO_REGION_TYPE_CCW;
464 	private->region[private->num_regions].subtype = subtype;
465 	private->region[private->num_regions].ops = ops;
466 	private->region[private->num_regions].size = size;
467 	private->region[private->num_regions].flags = flags;
468 	private->region[private->num_regions].data = data;
469 
470 	private->num_regions++;
471 
472 	return 0;
473 }
474 
475 static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev,
476 				   unsigned int cmd,
477 				   unsigned long arg)
478 {
479 	int ret = 0;
480 	unsigned long minsz;
481 
482 	switch (cmd) {
483 	case VFIO_DEVICE_GET_INFO:
484 	{
485 		struct vfio_device_info info;
486 
487 		minsz = offsetofend(struct vfio_device_info, num_irqs);
488 
489 		if (copy_from_user(&info, (void __user *)arg, minsz))
490 			return -EFAULT;
491 
492 		if (info.argsz < minsz)
493 			return -EINVAL;
494 
495 		ret = vfio_ccw_mdev_get_device_info(&info, mdev);
496 		if (ret)
497 			return ret;
498 
499 		return copy_to_user((void __user *)arg, &info, minsz);
500 	}
501 	case VFIO_DEVICE_GET_REGION_INFO:
502 	{
503 		struct vfio_region_info info;
504 
505 		minsz = offsetofend(struct vfio_region_info, offset);
506 
507 		if (copy_from_user(&info, (void __user *)arg, minsz))
508 			return -EFAULT;
509 
510 		if (info.argsz < minsz)
511 			return -EINVAL;
512 
513 		ret = vfio_ccw_mdev_get_region_info(&info, mdev, arg);
514 		if (ret)
515 			return ret;
516 
517 		return copy_to_user((void __user *)arg, &info, minsz);
518 	}
519 	case VFIO_DEVICE_GET_IRQ_INFO:
520 	{
521 		struct vfio_irq_info info;
522 
523 		minsz = offsetofend(struct vfio_irq_info, count);
524 
525 		if (copy_from_user(&info, (void __user *)arg, minsz))
526 			return -EFAULT;
527 
528 		if (info.argsz < minsz || info.index >= VFIO_CCW_NUM_IRQS)
529 			return -EINVAL;
530 
531 		ret = vfio_ccw_mdev_get_irq_info(&info);
532 		if (ret)
533 			return ret;
534 
535 		if (info.count == -1)
536 			return -EINVAL;
537 
538 		return copy_to_user((void __user *)arg, &info, minsz);
539 	}
540 	case VFIO_DEVICE_SET_IRQS:
541 	{
542 		struct vfio_irq_set hdr;
543 		size_t data_size;
544 		void __user *data;
545 
546 		minsz = offsetofend(struct vfio_irq_set, count);
547 
548 		if (copy_from_user(&hdr, (void __user *)arg, minsz))
549 			return -EFAULT;
550 
551 		ret = vfio_set_irqs_validate_and_prepare(&hdr, 1,
552 							 VFIO_CCW_NUM_IRQS,
553 							 &data_size);
554 		if (ret)
555 			return ret;
556 
557 		data = (void __user *)(arg + minsz);
558 		return vfio_ccw_mdev_set_irqs(mdev, hdr.flags, data);
559 	}
560 	case VFIO_DEVICE_RESET:
561 		return vfio_ccw_mdev_reset(mdev);
562 	default:
563 		return -ENOTTY;
564 	}
565 }
566 
567 static const struct mdev_parent_ops vfio_ccw_mdev_ops = {
568 	.owner			= THIS_MODULE,
569 	.supported_type_groups  = mdev_type_groups,
570 	.create			= vfio_ccw_mdev_create,
571 	.remove			= vfio_ccw_mdev_remove,
572 	.open			= vfio_ccw_mdev_open,
573 	.release		= vfio_ccw_mdev_release,
574 	.read			= vfio_ccw_mdev_read,
575 	.write			= vfio_ccw_mdev_write,
576 	.ioctl			= vfio_ccw_mdev_ioctl,
577 };
578 
579 int vfio_ccw_mdev_reg(struct subchannel *sch)
580 {
581 	return mdev_register_device(&sch->dev, &vfio_ccw_mdev_ops);
582 }
583 
584 void vfio_ccw_mdev_unreg(struct subchannel *sch)
585 {
586 	mdev_unregister_device(&sch->dev);
587 }
588