1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * VDPA simulator for block device.
4  *
5  * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
6  * Copyright (c) 2021, Red Hat Inc. All rights reserved.
7  *
8  */
9 
10 #include <linux/init.h>
11 #include <linux/module.h>
12 #include <linux/device.h>
13 #include <linux/kernel.h>
14 #include <linux/sched.h>
15 #include <linux/blkdev.h>
16 #include <linux/vringh.h>
17 #include <linux/vdpa.h>
18 #include <uapi/linux/virtio_blk.h>
19 
20 #include "vdpa_sim.h"
21 
22 #define DRV_VERSION  "0.1"
23 #define DRV_AUTHOR   "Max Gurtovoy <mgurtovoy@nvidia.com>"
24 #define DRV_DESC     "vDPA Device Simulator for block device"
25 #define DRV_LICENSE  "GPL v2"
26 
27 #define VDPASIM_BLK_FEATURES	(VDPASIM_FEATURES | \
28 				 (1ULL << VIRTIO_BLK_F_SIZE_MAX) | \
29 				 (1ULL << VIRTIO_BLK_F_SEG_MAX)  | \
30 				 (1ULL << VIRTIO_BLK_F_BLK_SIZE) | \
31 				 (1ULL << VIRTIO_BLK_F_TOPOLOGY) | \
32 				 (1ULL << VIRTIO_BLK_F_MQ))
33 
34 #define VDPASIM_BLK_CAPACITY	0x40000
35 #define VDPASIM_BLK_SIZE_MAX	0x1000
36 #define VDPASIM_BLK_SEG_MAX	32
37 #define VDPASIM_BLK_VQ_NUM	1
38 
39 static char vdpasim_blk_id[VIRTIO_BLK_ID_BYTES] = "vdpa_blk_sim";
40 
41 static bool vdpasim_blk_check_range(u64 start_sector, size_t range_size)
42 {
43 	u64 range_sectors = range_size >> SECTOR_SHIFT;
44 
45 	if (range_size > VDPASIM_BLK_SIZE_MAX * VDPASIM_BLK_SEG_MAX)
46 		return false;
47 
48 	if (start_sector > VDPASIM_BLK_CAPACITY)
49 		return false;
50 
51 	if (range_sectors > VDPASIM_BLK_CAPACITY - start_sector)
52 		return false;
53 
54 	return true;
55 }
56 
57 /* Returns 'true' if the request is handled (with or without an I/O error)
58  * and the status is correctly written in the last byte of the 'in iov',
59  * 'false' otherwise.
60  */
61 static bool vdpasim_blk_handle_req(struct vdpasim *vdpasim,
62 				   struct vdpasim_virtqueue *vq)
63 {
64 	size_t pushed = 0, to_pull, to_push;
65 	struct virtio_blk_outhdr hdr;
66 	ssize_t bytes;
67 	loff_t offset;
68 	u64 sector;
69 	u8 status;
70 	u32 type;
71 	int ret;
72 
73 	ret = vringh_getdesc_iotlb(&vq->vring, &vq->out_iov, &vq->in_iov,
74 				   &vq->head, GFP_ATOMIC);
75 	if (ret != 1)
76 		return false;
77 
78 	if (vq->out_iov.used < 1 || vq->in_iov.used < 1) {
79 		dev_err(&vdpasim->vdpa.dev, "missing headers - out_iov: %u in_iov %u\n",
80 			vq->out_iov.used, vq->in_iov.used);
81 		return false;
82 	}
83 
84 	if (vq->in_iov.iov[vq->in_iov.used - 1].iov_len < 1) {
85 		dev_err(&vdpasim->vdpa.dev, "request in header too short\n");
86 		return false;
87 	}
88 
89 	/* The last byte is the status and we checked if the last iov has
90 	 * enough room for it.
91 	 */
92 	to_push = vringh_kiov_length(&vq->in_iov) - 1;
93 
94 	to_pull = vringh_kiov_length(&vq->out_iov);
95 
96 	bytes = vringh_iov_pull_iotlb(&vq->vring, &vq->out_iov, &hdr,
97 				      sizeof(hdr));
98 	if (bytes != sizeof(hdr)) {
99 		dev_err(&vdpasim->vdpa.dev, "request out header too short\n");
100 		return false;
101 	}
102 
103 	to_pull -= bytes;
104 
105 	type = vdpasim32_to_cpu(vdpasim, hdr.type);
106 	sector = vdpasim64_to_cpu(vdpasim, hdr.sector);
107 	offset = sector << SECTOR_SHIFT;
108 	status = VIRTIO_BLK_S_OK;
109 
110 	switch (type) {
111 	case VIRTIO_BLK_T_IN:
112 		if (!vdpasim_blk_check_range(sector, to_push)) {
113 			dev_err(&vdpasim->vdpa.dev,
114 				"reading over the capacity - offset: 0x%llx len: 0x%zx\n",
115 				offset, to_push);
116 			status = VIRTIO_BLK_S_IOERR;
117 			break;
118 		}
119 
120 		bytes = vringh_iov_push_iotlb(&vq->vring, &vq->in_iov,
121 					      vdpasim->buffer + offset,
122 					      to_push);
123 		if (bytes < 0) {
124 			dev_err(&vdpasim->vdpa.dev,
125 				"vringh_iov_push_iotlb() error: %zd offset: 0x%llx len: 0x%zx\n",
126 				bytes, offset, to_push);
127 			status = VIRTIO_BLK_S_IOERR;
128 			break;
129 		}
130 
131 		pushed += bytes;
132 		break;
133 
134 	case VIRTIO_BLK_T_OUT:
135 		if (!vdpasim_blk_check_range(sector, to_pull)) {
136 			dev_err(&vdpasim->vdpa.dev,
137 				"writing over the capacity - offset: 0x%llx len: 0x%zx\n",
138 				offset, to_pull);
139 			status = VIRTIO_BLK_S_IOERR;
140 			break;
141 		}
142 
143 		bytes = vringh_iov_pull_iotlb(&vq->vring, &vq->out_iov,
144 					      vdpasim->buffer + offset,
145 					      to_pull);
146 		if (bytes < 0) {
147 			dev_err(&vdpasim->vdpa.dev,
148 				"vringh_iov_pull_iotlb() error: %zd offset: 0x%llx len: 0x%zx\n",
149 				bytes, offset, to_pull);
150 			status = VIRTIO_BLK_S_IOERR;
151 			break;
152 		}
153 		break;
154 
155 	case VIRTIO_BLK_T_GET_ID:
156 		bytes = vringh_iov_push_iotlb(&vq->vring, &vq->in_iov,
157 					      vdpasim_blk_id,
158 					      VIRTIO_BLK_ID_BYTES);
159 		if (bytes < 0) {
160 			dev_err(&vdpasim->vdpa.dev,
161 				"vringh_iov_push_iotlb() error: %zd\n", bytes);
162 			status = VIRTIO_BLK_S_IOERR;
163 			break;
164 		}
165 
166 		pushed += bytes;
167 		break;
168 
169 	default:
170 		dev_warn(&vdpasim->vdpa.dev,
171 			 "Unsupported request type %d\n", type);
172 		status = VIRTIO_BLK_S_IOERR;
173 		break;
174 	}
175 
176 	/* If some operations fail, we need to skip the remaining bytes
177 	 * to put the status in the last byte
178 	 */
179 	if (to_push - pushed > 0)
180 		vringh_kiov_advance(&vq->in_iov, to_push - pushed);
181 
182 	/* Last byte is the status */
183 	bytes = vringh_iov_push_iotlb(&vq->vring, &vq->in_iov, &status, 1);
184 	if (bytes != 1)
185 		return false;
186 
187 	pushed += bytes;
188 
189 	/* Make sure data is wrote before advancing index */
190 	smp_wmb();
191 
192 	vringh_complete_iotlb(&vq->vring, vq->head, pushed);
193 
194 	return true;
195 }
196 
197 static void vdpasim_blk_work(struct work_struct *work)
198 {
199 	struct vdpasim *vdpasim = container_of(work, struct vdpasim, work);
200 	int i;
201 
202 	spin_lock(&vdpasim->lock);
203 
204 	if (!(vdpasim->status & VIRTIO_CONFIG_S_DRIVER_OK))
205 		goto out;
206 
207 	for (i = 0; i < VDPASIM_BLK_VQ_NUM; i++) {
208 		struct vdpasim_virtqueue *vq = &vdpasim->vqs[i];
209 
210 		if (!vq->ready)
211 			continue;
212 
213 		while (vdpasim_blk_handle_req(vdpasim, vq)) {
214 			/* Make sure used is visible before rasing the interrupt. */
215 			smp_wmb();
216 
217 			local_bh_disable();
218 			if (vringh_need_notify_iotlb(&vq->vring) > 0)
219 				vringh_notify(&vq->vring);
220 			local_bh_enable();
221 		}
222 	}
223 out:
224 	spin_unlock(&vdpasim->lock);
225 }
226 
227 static void vdpasim_blk_get_config(struct vdpasim *vdpasim, void *config)
228 {
229 	struct virtio_blk_config *blk_config = config;
230 
231 	memset(config, 0, sizeof(struct virtio_blk_config));
232 
233 	blk_config->capacity = cpu_to_vdpasim64(vdpasim, VDPASIM_BLK_CAPACITY);
234 	blk_config->size_max = cpu_to_vdpasim32(vdpasim, VDPASIM_BLK_SIZE_MAX);
235 	blk_config->seg_max = cpu_to_vdpasim32(vdpasim, VDPASIM_BLK_SEG_MAX);
236 	blk_config->num_queues = cpu_to_vdpasim16(vdpasim, VDPASIM_BLK_VQ_NUM);
237 	blk_config->min_io_size = cpu_to_vdpasim16(vdpasim, 1);
238 	blk_config->opt_io_size = cpu_to_vdpasim32(vdpasim, 1);
239 	blk_config->blk_size = cpu_to_vdpasim32(vdpasim, SECTOR_SIZE);
240 }
241 
242 static void vdpasim_blk_mgmtdev_release(struct device *dev)
243 {
244 }
245 
246 static struct device vdpasim_blk_mgmtdev = {
247 	.init_name = "vdpasim_blk",
248 	.release = vdpasim_blk_mgmtdev_release,
249 };
250 
251 static int vdpasim_blk_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
252 			       const struct vdpa_dev_set_config *config)
253 {
254 	struct vdpasim_dev_attr dev_attr = {};
255 	struct vdpasim *simdev;
256 	int ret;
257 
258 	dev_attr.mgmt_dev = mdev;
259 	dev_attr.name = name;
260 	dev_attr.id = VIRTIO_ID_BLOCK;
261 	dev_attr.supported_features = VDPASIM_BLK_FEATURES;
262 	dev_attr.nvqs = VDPASIM_BLK_VQ_NUM;
263 	dev_attr.config_size = sizeof(struct virtio_blk_config);
264 	dev_attr.get_config = vdpasim_blk_get_config;
265 	dev_attr.work_fn = vdpasim_blk_work;
266 	dev_attr.buffer_size = VDPASIM_BLK_CAPACITY << SECTOR_SHIFT;
267 
268 	simdev = vdpasim_create(&dev_attr);
269 	if (IS_ERR(simdev))
270 		return PTR_ERR(simdev);
271 
272 	ret = _vdpa_register_device(&simdev->vdpa, VDPASIM_BLK_VQ_NUM);
273 	if (ret)
274 		goto put_dev;
275 
276 	return 0;
277 
278 put_dev:
279 	put_device(&simdev->vdpa.dev);
280 	return ret;
281 }
282 
283 static void vdpasim_blk_dev_del(struct vdpa_mgmt_dev *mdev,
284 				struct vdpa_device *dev)
285 {
286 	struct vdpasim *simdev = container_of(dev, struct vdpasim, vdpa);
287 
288 	_vdpa_unregister_device(&simdev->vdpa);
289 }
290 
291 static const struct vdpa_mgmtdev_ops vdpasim_blk_mgmtdev_ops = {
292 	.dev_add = vdpasim_blk_dev_add,
293 	.dev_del = vdpasim_blk_dev_del
294 };
295 
296 static struct virtio_device_id id_table[] = {
297 	{ VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
298 	{ 0 },
299 };
300 
301 static struct vdpa_mgmt_dev mgmt_dev = {
302 	.device = &vdpasim_blk_mgmtdev,
303 	.id_table = id_table,
304 	.ops = &vdpasim_blk_mgmtdev_ops,
305 };
306 
307 static int __init vdpasim_blk_init(void)
308 {
309 	int ret;
310 
311 	ret = device_register(&vdpasim_blk_mgmtdev);
312 	if (ret)
313 		return ret;
314 
315 	ret = vdpa_mgmtdev_register(&mgmt_dev);
316 	if (ret)
317 		goto parent_err;
318 
319 	return 0;
320 
321 parent_err:
322 	device_unregister(&vdpasim_blk_mgmtdev);
323 	return ret;
324 }
325 
326 static void __exit vdpasim_blk_exit(void)
327 {
328 	vdpa_mgmtdev_unregister(&mgmt_dev);
329 	device_unregister(&vdpasim_blk_mgmtdev);
330 }
331 
332 module_init(vdpasim_blk_init)
333 module_exit(vdpasim_blk_exit)
334 
335 MODULE_VERSION(DRV_VERSION);
336 MODULE_LICENSE(DRV_LICENSE);
337 MODULE_AUTHOR(DRV_AUTHOR);
338 MODULE_DESCRIPTION(DRV_DESC);
339