xref: /openbmc/linux/drivers/s390/cio/vfio_ccw_ops.c (revision 4302b3fb)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Physical device callbacks for vfio_ccw
4  *
5  * Copyright IBM Corp. 2017
6  * Copyright Red Hat, Inc. 2019
7  *
8  * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
9  *            Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
10  *            Cornelia Huck <cohuck@redhat.com>
11  */
12 
13 #include <linux/vfio.h>
14 #include <linux/mdev.h>
15 #include <linux/nospec.h>
16 #include <linux/slab.h>
17 
18 #include "vfio_ccw_private.h"
19 
20 static const struct vfio_device_ops vfio_ccw_dev_ops;
21 
22 static int vfio_ccw_mdev_reset(struct vfio_ccw_private *private)
23 {
24 	/*
25 	 * If the FSM state is seen as Not Operational after closing
26 	 * and re-opening the mdev, return an error.
27 	 */
28 	vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_CLOSE);
29 	vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_OPEN);
30 	if (private->state == VFIO_CCW_STATE_NOT_OPER)
31 		return -EINVAL;
32 
33 	return 0;
34 }
35 
36 static void vfio_ccw_dma_unmap(struct vfio_device *vdev, u64 iova, u64 length)
37 {
38 	struct vfio_ccw_private *private =
39 		container_of(vdev, struct vfio_ccw_private, vdev);
40 
41 	/* Drivers MUST unpin pages in response to an invalidation. */
42 	if (!cp_iova_pinned(&private->cp, iova, length))
43 		return;
44 
45 	vfio_ccw_mdev_reset(private);
46 }
47 
48 static ssize_t name_show(struct mdev_type *mtype,
49 			 struct mdev_type_attribute *attr, char *buf)
50 {
51 	return sprintf(buf, "I/O subchannel (Non-QDIO)\n");
52 }
53 static MDEV_TYPE_ATTR_RO(name);
54 
55 static ssize_t device_api_show(struct mdev_type *mtype,
56 			       struct mdev_type_attribute *attr, char *buf)
57 {
58 	return sprintf(buf, "%s\n", VFIO_DEVICE_API_CCW_STRING);
59 }
60 static MDEV_TYPE_ATTR_RO(device_api);
61 
62 static ssize_t available_instances_show(struct mdev_type *mtype,
63 					struct mdev_type_attribute *attr,
64 					char *buf)
65 {
66 	struct vfio_ccw_private *private =
67 		dev_get_drvdata(mtype_get_parent_dev(mtype));
68 
69 	return sprintf(buf, "%d\n", atomic_read(&private->avail));
70 }
71 static MDEV_TYPE_ATTR_RO(available_instances);
72 
73 static struct attribute *mdev_types_attrs[] = {
74 	&mdev_type_attr_name.attr,
75 	&mdev_type_attr_device_api.attr,
76 	&mdev_type_attr_available_instances.attr,
77 	NULL,
78 };
79 
80 static struct attribute_group mdev_type_group = {
81 	.name  = "io",
82 	.attrs = mdev_types_attrs,
83 };
84 
85 static struct attribute_group *mdev_type_groups[] = {
86 	&mdev_type_group,
87 	NULL,
88 };
89 
90 static int vfio_ccw_mdev_probe(struct mdev_device *mdev)
91 {
92 	struct vfio_ccw_private *private = dev_get_drvdata(mdev->dev.parent);
93 	int ret;
94 
95 	if (private->state == VFIO_CCW_STATE_NOT_OPER)
96 		return -ENODEV;
97 
98 	if (atomic_dec_if_positive(&private->avail) < 0)
99 		return -EPERM;
100 
101 	memset(&private->vdev, 0, sizeof(private->vdev));
102 	vfio_init_group_dev(&private->vdev, &mdev->dev,
103 			    &vfio_ccw_dev_ops);
104 
105 	VFIO_CCW_MSG_EVENT(2, "sch %x.%x.%04x: create\n",
106 			   private->sch->schid.cssid,
107 			   private->sch->schid.ssid,
108 			   private->sch->schid.sch_no);
109 
110 	ret = vfio_register_emulated_iommu_dev(&private->vdev);
111 	if (ret)
112 		goto err_atomic;
113 	dev_set_drvdata(&mdev->dev, private);
114 	return 0;
115 
116 err_atomic:
117 	vfio_uninit_group_dev(&private->vdev);
118 	atomic_inc(&private->avail);
119 	return ret;
120 }
121 
122 static void vfio_ccw_mdev_remove(struct mdev_device *mdev)
123 {
124 	struct vfio_ccw_private *private = dev_get_drvdata(mdev->dev.parent);
125 
126 	VFIO_CCW_MSG_EVENT(2, "sch %x.%x.%04x: remove\n",
127 			   private->sch->schid.cssid,
128 			   private->sch->schid.ssid,
129 			   private->sch->schid.sch_no);
130 
131 	vfio_unregister_group_dev(&private->vdev);
132 
133 	vfio_uninit_group_dev(&private->vdev);
134 	atomic_inc(&private->avail);
135 }
136 
137 static int vfio_ccw_mdev_open_device(struct vfio_device *vdev)
138 {
139 	struct vfio_ccw_private *private =
140 		container_of(vdev, struct vfio_ccw_private, vdev);
141 	int ret;
142 
143 	/* Device cannot simply be opened again from this state */
144 	if (private->state == VFIO_CCW_STATE_NOT_OPER)
145 		return -EINVAL;
146 
147 	ret = vfio_ccw_register_async_dev_regions(private);
148 	if (ret)
149 		return ret;
150 
151 	ret = vfio_ccw_register_schib_dev_regions(private);
152 	if (ret)
153 		goto out_unregister;
154 
155 	ret = vfio_ccw_register_crw_dev_regions(private);
156 	if (ret)
157 		goto out_unregister;
158 
159 	vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_OPEN);
160 	if (private->state == VFIO_CCW_STATE_NOT_OPER) {
161 		ret = -EINVAL;
162 		goto out_unregister;
163 	}
164 
165 	return ret;
166 
167 out_unregister:
168 	vfio_ccw_unregister_dev_regions(private);
169 	return ret;
170 }
171 
172 static void vfio_ccw_mdev_close_device(struct vfio_device *vdev)
173 {
174 	struct vfio_ccw_private *private =
175 		container_of(vdev, struct vfio_ccw_private, vdev);
176 
177 	vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_CLOSE);
178 	vfio_ccw_unregister_dev_regions(private);
179 }
180 
181 static ssize_t vfio_ccw_mdev_read_io_region(struct vfio_ccw_private *private,
182 					    char __user *buf, size_t count,
183 					    loff_t *ppos)
184 {
185 	loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK;
186 	struct ccw_io_region *region;
187 	int ret;
188 
189 	if (pos + count > sizeof(*region))
190 		return -EINVAL;
191 
192 	mutex_lock(&private->io_mutex);
193 	region = private->io_region;
194 	if (copy_to_user(buf, (void *)region + pos, count))
195 		ret = -EFAULT;
196 	else
197 		ret = count;
198 	mutex_unlock(&private->io_mutex);
199 	return ret;
200 }
201 
202 static ssize_t vfio_ccw_mdev_read(struct vfio_device *vdev,
203 				  char __user *buf,
204 				  size_t count,
205 				  loff_t *ppos)
206 {
207 	struct vfio_ccw_private *private =
208 		container_of(vdev, struct vfio_ccw_private, vdev);
209 	unsigned int index = VFIO_CCW_OFFSET_TO_INDEX(*ppos);
210 
211 	if (index >= VFIO_CCW_NUM_REGIONS + private->num_regions)
212 		return -EINVAL;
213 
214 	switch (index) {
215 	case VFIO_CCW_CONFIG_REGION_INDEX:
216 		return vfio_ccw_mdev_read_io_region(private, buf, count, ppos);
217 	default:
218 		index -= VFIO_CCW_NUM_REGIONS;
219 		return private->region[index].ops->read(private, buf, count,
220 							ppos);
221 	}
222 
223 	return -EINVAL;
224 }
225 
226 static ssize_t vfio_ccw_mdev_write_io_region(struct vfio_ccw_private *private,
227 					     const char __user *buf,
228 					     size_t count, loff_t *ppos)
229 {
230 	loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK;
231 	struct ccw_io_region *region;
232 	int ret;
233 
234 	if (pos + count > sizeof(*region))
235 		return -EINVAL;
236 
237 	if (!mutex_trylock(&private->io_mutex))
238 		return -EAGAIN;
239 
240 	region = private->io_region;
241 	if (copy_from_user((void *)region + pos, buf, count)) {
242 		ret = -EFAULT;
243 		goto out_unlock;
244 	}
245 
246 	vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_IO_REQ);
247 	ret = (region->ret_code != 0) ? region->ret_code : count;
248 
249 out_unlock:
250 	mutex_unlock(&private->io_mutex);
251 	return ret;
252 }
253 
254 static ssize_t vfio_ccw_mdev_write(struct vfio_device *vdev,
255 				   const char __user *buf,
256 				   size_t count,
257 				   loff_t *ppos)
258 {
259 	struct vfio_ccw_private *private =
260 		container_of(vdev, struct vfio_ccw_private, vdev);
261 	unsigned int index = VFIO_CCW_OFFSET_TO_INDEX(*ppos);
262 
263 	if (index >= VFIO_CCW_NUM_REGIONS + private->num_regions)
264 		return -EINVAL;
265 
266 	switch (index) {
267 	case VFIO_CCW_CONFIG_REGION_INDEX:
268 		return vfio_ccw_mdev_write_io_region(private, buf, count, ppos);
269 	default:
270 		index -= VFIO_CCW_NUM_REGIONS;
271 		return private->region[index].ops->write(private, buf, count,
272 							 ppos);
273 	}
274 
275 	return -EINVAL;
276 }
277 
278 static int vfio_ccw_mdev_get_device_info(struct vfio_ccw_private *private,
279 					 struct vfio_device_info *info)
280 {
281 	info->flags = VFIO_DEVICE_FLAGS_CCW | VFIO_DEVICE_FLAGS_RESET;
282 	info->num_regions = VFIO_CCW_NUM_REGIONS + private->num_regions;
283 	info->num_irqs = VFIO_CCW_NUM_IRQS;
284 
285 	return 0;
286 }
287 
288 static int vfio_ccw_mdev_get_region_info(struct vfio_ccw_private *private,
289 					 struct vfio_region_info *info,
290 					 unsigned long arg)
291 {
292 	int i;
293 
294 	switch (info->index) {
295 	case VFIO_CCW_CONFIG_REGION_INDEX:
296 		info->offset = 0;
297 		info->size = sizeof(struct ccw_io_region);
298 		info->flags = VFIO_REGION_INFO_FLAG_READ
299 			      | VFIO_REGION_INFO_FLAG_WRITE;
300 		return 0;
301 	default: /* all other regions are handled via capability chain */
302 	{
303 		struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
304 		struct vfio_region_info_cap_type cap_type = {
305 			.header.id = VFIO_REGION_INFO_CAP_TYPE,
306 			.header.version = 1 };
307 		int ret;
308 
309 		if (info->index >=
310 		    VFIO_CCW_NUM_REGIONS + private->num_regions)
311 			return -EINVAL;
312 
313 		info->index = array_index_nospec(info->index,
314 						 VFIO_CCW_NUM_REGIONS +
315 						 private->num_regions);
316 
317 		i = info->index - VFIO_CCW_NUM_REGIONS;
318 
319 		info->offset = VFIO_CCW_INDEX_TO_OFFSET(info->index);
320 		info->size = private->region[i].size;
321 		info->flags = private->region[i].flags;
322 
323 		cap_type.type = private->region[i].type;
324 		cap_type.subtype = private->region[i].subtype;
325 
326 		ret = vfio_info_add_capability(&caps, &cap_type.header,
327 					       sizeof(cap_type));
328 		if (ret)
329 			return ret;
330 
331 		info->flags |= VFIO_REGION_INFO_FLAG_CAPS;
332 		if (info->argsz < sizeof(*info) + caps.size) {
333 			info->argsz = sizeof(*info) + caps.size;
334 			info->cap_offset = 0;
335 		} else {
336 			vfio_info_cap_shift(&caps, sizeof(*info));
337 			if (copy_to_user((void __user *)arg + sizeof(*info),
338 					 caps.buf, caps.size)) {
339 				kfree(caps.buf);
340 				return -EFAULT;
341 			}
342 			info->cap_offset = sizeof(*info);
343 		}
344 
345 		kfree(caps.buf);
346 
347 	}
348 	}
349 	return 0;
350 }
351 
352 static int vfio_ccw_mdev_get_irq_info(struct vfio_irq_info *info)
353 {
354 	switch (info->index) {
355 	case VFIO_CCW_IO_IRQ_INDEX:
356 	case VFIO_CCW_CRW_IRQ_INDEX:
357 	case VFIO_CCW_REQ_IRQ_INDEX:
358 		info->count = 1;
359 		info->flags = VFIO_IRQ_INFO_EVENTFD;
360 		break;
361 	default:
362 		return -EINVAL;
363 	}
364 
365 	return 0;
366 }
367 
368 static int vfio_ccw_mdev_set_irqs(struct vfio_ccw_private *private,
369 				  uint32_t flags,
370 				  uint32_t index,
371 				  void __user *data)
372 {
373 	struct eventfd_ctx **ctx;
374 
375 	if (!(flags & VFIO_IRQ_SET_ACTION_TRIGGER))
376 		return -EINVAL;
377 
378 	switch (index) {
379 	case VFIO_CCW_IO_IRQ_INDEX:
380 		ctx = &private->io_trigger;
381 		break;
382 	case VFIO_CCW_CRW_IRQ_INDEX:
383 		ctx = &private->crw_trigger;
384 		break;
385 	case VFIO_CCW_REQ_IRQ_INDEX:
386 		ctx = &private->req_trigger;
387 		break;
388 	default:
389 		return -EINVAL;
390 	}
391 
392 	switch (flags & VFIO_IRQ_SET_DATA_TYPE_MASK) {
393 	case VFIO_IRQ_SET_DATA_NONE:
394 	{
395 		if (*ctx)
396 			eventfd_signal(*ctx, 1);
397 		return 0;
398 	}
399 	case VFIO_IRQ_SET_DATA_BOOL:
400 	{
401 		uint8_t trigger;
402 
403 		if (get_user(trigger, (uint8_t __user *)data))
404 			return -EFAULT;
405 
406 		if (trigger && *ctx)
407 			eventfd_signal(*ctx, 1);
408 		return 0;
409 	}
410 	case VFIO_IRQ_SET_DATA_EVENTFD:
411 	{
412 		int32_t fd;
413 
414 		if (get_user(fd, (int32_t __user *)data))
415 			return -EFAULT;
416 
417 		if (fd == -1) {
418 			if (*ctx)
419 				eventfd_ctx_put(*ctx);
420 			*ctx = NULL;
421 		} else if (fd >= 0) {
422 			struct eventfd_ctx *efdctx;
423 
424 			efdctx = eventfd_ctx_fdget(fd);
425 			if (IS_ERR(efdctx))
426 				return PTR_ERR(efdctx);
427 
428 			if (*ctx)
429 				eventfd_ctx_put(*ctx);
430 
431 			*ctx = efdctx;
432 		} else
433 			return -EINVAL;
434 
435 		return 0;
436 	}
437 	default:
438 		return -EINVAL;
439 	}
440 }
441 
442 int vfio_ccw_register_dev_region(struct vfio_ccw_private *private,
443 				 unsigned int subtype,
444 				 const struct vfio_ccw_regops *ops,
445 				 size_t size, u32 flags, void *data)
446 {
447 	struct vfio_ccw_region *region;
448 
449 	region = krealloc(private->region,
450 			  (private->num_regions + 1) * sizeof(*region),
451 			  GFP_KERNEL);
452 	if (!region)
453 		return -ENOMEM;
454 
455 	private->region = region;
456 	private->region[private->num_regions].type = VFIO_REGION_TYPE_CCW;
457 	private->region[private->num_regions].subtype = subtype;
458 	private->region[private->num_regions].ops = ops;
459 	private->region[private->num_regions].size = size;
460 	private->region[private->num_regions].flags = flags;
461 	private->region[private->num_regions].data = data;
462 
463 	private->num_regions++;
464 
465 	return 0;
466 }
467 
468 void vfio_ccw_unregister_dev_regions(struct vfio_ccw_private *private)
469 {
470 	int i;
471 
472 	for (i = 0; i < private->num_regions; i++)
473 		private->region[i].ops->release(private, &private->region[i]);
474 	private->num_regions = 0;
475 	kfree(private->region);
476 	private->region = NULL;
477 }
478 
479 static ssize_t vfio_ccw_mdev_ioctl(struct vfio_device *vdev,
480 				   unsigned int cmd,
481 				   unsigned long arg)
482 {
483 	struct vfio_ccw_private *private =
484 		container_of(vdev, struct vfio_ccw_private, vdev);
485 	int ret = 0;
486 	unsigned long minsz;
487 
488 	switch (cmd) {
489 	case VFIO_DEVICE_GET_INFO:
490 	{
491 		struct vfio_device_info info;
492 
493 		minsz = offsetofend(struct vfio_device_info, num_irqs);
494 
495 		if (copy_from_user(&info, (void __user *)arg, minsz))
496 			return -EFAULT;
497 
498 		if (info.argsz < minsz)
499 			return -EINVAL;
500 
501 		ret = vfio_ccw_mdev_get_device_info(private, &info);
502 		if (ret)
503 			return ret;
504 
505 		return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
506 	}
507 	case VFIO_DEVICE_GET_REGION_INFO:
508 	{
509 		struct vfio_region_info info;
510 
511 		minsz = offsetofend(struct vfio_region_info, offset);
512 
513 		if (copy_from_user(&info, (void __user *)arg, minsz))
514 			return -EFAULT;
515 
516 		if (info.argsz < minsz)
517 			return -EINVAL;
518 
519 		ret = vfio_ccw_mdev_get_region_info(private, &info, arg);
520 		if (ret)
521 			return ret;
522 
523 		return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
524 	}
525 	case VFIO_DEVICE_GET_IRQ_INFO:
526 	{
527 		struct vfio_irq_info info;
528 
529 		minsz = offsetofend(struct vfio_irq_info, count);
530 
531 		if (copy_from_user(&info, (void __user *)arg, minsz))
532 			return -EFAULT;
533 
534 		if (info.argsz < minsz || info.index >= VFIO_CCW_NUM_IRQS)
535 			return -EINVAL;
536 
537 		ret = vfio_ccw_mdev_get_irq_info(&info);
538 		if (ret)
539 			return ret;
540 
541 		if (info.count == -1)
542 			return -EINVAL;
543 
544 		return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
545 	}
546 	case VFIO_DEVICE_SET_IRQS:
547 	{
548 		struct vfio_irq_set hdr;
549 		size_t data_size;
550 		void __user *data;
551 
552 		minsz = offsetofend(struct vfio_irq_set, count);
553 
554 		if (copy_from_user(&hdr, (void __user *)arg, minsz))
555 			return -EFAULT;
556 
557 		ret = vfio_set_irqs_validate_and_prepare(&hdr, 1,
558 							 VFIO_CCW_NUM_IRQS,
559 							 &data_size);
560 		if (ret)
561 			return ret;
562 
563 		data = (void __user *)(arg + minsz);
564 		return vfio_ccw_mdev_set_irqs(private, hdr.flags, hdr.index,
565 					      data);
566 	}
567 	case VFIO_DEVICE_RESET:
568 		return vfio_ccw_mdev_reset(private);
569 	default:
570 		return -ENOTTY;
571 	}
572 }
573 
574 /* Request removal of the device*/
575 static void vfio_ccw_mdev_request(struct vfio_device *vdev, unsigned int count)
576 {
577 	struct vfio_ccw_private *private =
578 		container_of(vdev, struct vfio_ccw_private, vdev);
579 	struct device *dev = vdev->dev;
580 
581 	if (private->req_trigger) {
582 		if (!(count % 10))
583 			dev_notice_ratelimited(dev,
584 					       "Relaying device request to user (#%u)\n",
585 					       count);
586 
587 		eventfd_signal(private->req_trigger, 1);
588 	} else if (count == 0) {
589 		dev_notice(dev,
590 			   "No device request channel registered, blocked until released by user\n");
591 	}
592 }
593 
594 static const struct vfio_device_ops vfio_ccw_dev_ops = {
595 	.open_device = vfio_ccw_mdev_open_device,
596 	.close_device = vfio_ccw_mdev_close_device,
597 	.read = vfio_ccw_mdev_read,
598 	.write = vfio_ccw_mdev_write,
599 	.ioctl = vfio_ccw_mdev_ioctl,
600 	.request = vfio_ccw_mdev_request,
601 	.dma_unmap = vfio_ccw_dma_unmap,
602 };
603 
604 struct mdev_driver vfio_ccw_mdev_driver = {
605 	.driver = {
606 		.name = "vfio_ccw_mdev",
607 		.owner = THIS_MODULE,
608 		.mod_name = KBUILD_MODNAME,
609 	},
610 	.probe = vfio_ccw_mdev_probe,
611 	.remove = vfio_ccw_mdev_remove,
612 	.supported_type_groups  = mdev_type_groups,
613 };
614