xref: /openbmc/linux/drivers/s390/cio/vfio_ccw_chp.c (revision 82df5b73)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Channel path related status regions for vfio_ccw
4  *
5  * Copyright IBM Corp. 2020
6  *
7  * Author(s): Farhan Ali <alifm@linux.ibm.com>
8  *            Eric Farman <farman@linux.ibm.com>
9  */
10 
11 #include <linux/vfio.h>
12 #include "vfio_ccw_private.h"
13 
14 static ssize_t vfio_ccw_schib_region_read(struct vfio_ccw_private *private,
15 					  char __user *buf, size_t count,
16 					  loff_t *ppos)
17 {
18 	unsigned int i = VFIO_CCW_OFFSET_TO_INDEX(*ppos) - VFIO_CCW_NUM_REGIONS;
19 	loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK;
20 	struct ccw_schib_region *region;
21 	int ret;
22 
23 	if (pos + count > sizeof(*region))
24 		return -EINVAL;
25 
26 	mutex_lock(&private->io_mutex);
27 	region = private->region[i].data;
28 
29 	if (cio_update_schib(private->sch)) {
30 		ret = -ENODEV;
31 		goto out;
32 	}
33 
34 	memcpy(region, &private->sch->schib, sizeof(*region));
35 
36 	if (copy_to_user(buf, (void *)region + pos, count)) {
37 		ret = -EFAULT;
38 		goto out;
39 	}
40 
41 	ret = count;
42 
43 out:
44 	mutex_unlock(&private->io_mutex);
45 	return ret;
46 }
47 
48 static ssize_t vfio_ccw_schib_region_write(struct vfio_ccw_private *private,
49 					   const char __user *buf, size_t count,
50 					   loff_t *ppos)
51 {
52 	return -EINVAL;
53 }
54 
55 
56 static void vfio_ccw_schib_region_release(struct vfio_ccw_private *private,
57 					  struct vfio_ccw_region *region)
58 {
59 
60 }
61 
62 static const struct vfio_ccw_regops vfio_ccw_schib_region_ops = {
63 	.read = vfio_ccw_schib_region_read,
64 	.write = vfio_ccw_schib_region_write,
65 	.release = vfio_ccw_schib_region_release,
66 };
67 
68 int vfio_ccw_register_schib_dev_regions(struct vfio_ccw_private *private)
69 {
70 	return vfio_ccw_register_dev_region(private,
71 					    VFIO_REGION_SUBTYPE_CCW_SCHIB,
72 					    &vfio_ccw_schib_region_ops,
73 					    sizeof(struct ccw_schib_region),
74 					    VFIO_REGION_INFO_FLAG_READ,
75 					    private->schib_region);
76 }
77 
78 static ssize_t vfio_ccw_crw_region_read(struct vfio_ccw_private *private,
79 					char __user *buf, size_t count,
80 					loff_t *ppos)
81 {
82 	unsigned int i = VFIO_CCW_OFFSET_TO_INDEX(*ppos) - VFIO_CCW_NUM_REGIONS;
83 	loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK;
84 	struct ccw_crw_region *region;
85 	struct vfio_ccw_crw *crw;
86 	int ret;
87 
88 	if (pos + count > sizeof(*region))
89 		return -EINVAL;
90 
91 	crw = list_first_entry_or_null(&private->crw,
92 				       struct vfio_ccw_crw, next);
93 
94 	if (crw)
95 		list_del(&crw->next);
96 
97 	mutex_lock(&private->io_mutex);
98 	region = private->region[i].data;
99 
100 	if (crw)
101 		memcpy(&region->crw, &crw->crw, sizeof(region->crw));
102 
103 	if (copy_to_user(buf, (void *)region + pos, count))
104 		ret = -EFAULT;
105 	else
106 		ret = count;
107 
108 	region->crw = 0;
109 
110 	mutex_unlock(&private->io_mutex);
111 
112 	kfree(crw);
113 
114 	/* Notify the guest if more CRWs are on our queue */
115 	if (!list_empty(&private->crw) && private->crw_trigger)
116 		eventfd_signal(private->crw_trigger, 1);
117 
118 	return ret;
119 }
120 
121 static ssize_t vfio_ccw_crw_region_write(struct vfio_ccw_private *private,
122 					 const char __user *buf, size_t count,
123 					 loff_t *ppos)
124 {
125 	return -EINVAL;
126 }
127 
128 static void vfio_ccw_crw_region_release(struct vfio_ccw_private *private,
129 					struct vfio_ccw_region *region)
130 {
131 
132 }
133 
134 static const struct vfio_ccw_regops vfio_ccw_crw_region_ops = {
135 	.read = vfio_ccw_crw_region_read,
136 	.write = vfio_ccw_crw_region_write,
137 	.release = vfio_ccw_crw_region_release,
138 };
139 
140 int vfio_ccw_register_crw_dev_regions(struct vfio_ccw_private *private)
141 {
142 	return vfio_ccw_register_dev_region(private,
143 					    VFIO_REGION_SUBTYPE_CCW_CRW,
144 					    &vfio_ccw_crw_region_ops,
145 					    sizeof(struct ccw_crw_region),
146 					    VFIO_REGION_INFO_FLAG_READ,
147 					    private->crw_region);
148 }
149