xref: /openbmc/linux/drivers/vdpa/vdpa.c (revision 2cf1c348)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * vDPA bus.
4  *
5  * Copyright (c) 2020, Red Hat. All rights reserved.
6  *     Author: Jason Wang <jasowang@redhat.com>
7  *
8  */
9 
10 #include <linux/module.h>
11 #include <linux/idr.h>
12 #include <linux/slab.h>
13 #include <linux/vdpa.h>
14 #include <uapi/linux/vdpa.h>
15 #include <net/genetlink.h>
16 #include <linux/mod_devicetable.h>
17 #include <linux/virtio_ids.h>
18 
19 static LIST_HEAD(mdev_head);
20 /* A global mutex that protects vdpa management device and device level operations. */
21 static DEFINE_MUTEX(vdpa_dev_mutex);
22 static DEFINE_IDA(vdpa_index_ida);
23 
24 void vdpa_set_status(struct vdpa_device *vdev, u8 status)
25 {
26 	mutex_lock(&vdev->cf_mutex);
27 	vdev->config->set_status(vdev, status);
28 	mutex_unlock(&vdev->cf_mutex);
29 }
30 EXPORT_SYMBOL(vdpa_set_status);
31 
32 static struct genl_family vdpa_nl_family;
33 
34 static int vdpa_dev_probe(struct device *d)
35 {
36 	struct vdpa_device *vdev = dev_to_vdpa(d);
37 	struct vdpa_driver *drv = drv_to_vdpa(vdev->dev.driver);
38 	const struct vdpa_config_ops *ops = vdev->config;
39 	u32 max_num, min_num = 1;
40 	int ret = 0;
41 
42 	max_num = ops->get_vq_num_max(vdev);
43 	if (ops->get_vq_num_min)
44 		min_num = ops->get_vq_num_min(vdev);
45 	if (max_num < min_num)
46 		return -EINVAL;
47 
48 	if (drv && drv->probe)
49 		ret = drv->probe(vdev);
50 
51 	return ret;
52 }
53 
54 static void vdpa_dev_remove(struct device *d)
55 {
56 	struct vdpa_device *vdev = dev_to_vdpa(d);
57 	struct vdpa_driver *drv = drv_to_vdpa(vdev->dev.driver);
58 
59 	if (drv && drv->remove)
60 		drv->remove(vdev);
61 }
62 
63 static int vdpa_dev_match(struct device *dev, struct device_driver *drv)
64 {
65 	struct vdpa_device *vdev = dev_to_vdpa(dev);
66 
67 	/* Check override first, and if set, only use the named driver */
68 	if (vdev->driver_override)
69 		return strcmp(vdev->driver_override, drv->name) == 0;
70 
71 	/* Currently devices must be supported by all vDPA bus drivers */
72 	return 1;
73 }
74 
75 static ssize_t driver_override_store(struct device *dev,
76 				     struct device_attribute *attr,
77 				     const char *buf, size_t count)
78 {
79 	struct vdpa_device *vdev = dev_to_vdpa(dev);
80 	const char *driver_override, *old;
81 	char *cp;
82 
83 	/* We need to keep extra room for a newline */
84 	if (count >= (PAGE_SIZE - 1))
85 		return -EINVAL;
86 
87 	driver_override = kstrndup(buf, count, GFP_KERNEL);
88 	if (!driver_override)
89 		return -ENOMEM;
90 
91 	cp = strchr(driver_override, '\n');
92 	if (cp)
93 		*cp = '\0';
94 
95 	device_lock(dev);
96 	old = vdev->driver_override;
97 	if (strlen(driver_override)) {
98 		vdev->driver_override = driver_override;
99 	} else {
100 		kfree(driver_override);
101 		vdev->driver_override = NULL;
102 	}
103 	device_unlock(dev);
104 
105 	kfree(old);
106 
107 	return count;
108 }
109 
110 static ssize_t driver_override_show(struct device *dev,
111 				    struct device_attribute *attr, char *buf)
112 {
113 	struct vdpa_device *vdev = dev_to_vdpa(dev);
114 	ssize_t len;
115 
116 	device_lock(dev);
117 	len = snprintf(buf, PAGE_SIZE, "%s\n", vdev->driver_override);
118 	device_unlock(dev);
119 
120 	return len;
121 }
122 static DEVICE_ATTR_RW(driver_override);
123 
124 static struct attribute *vdpa_dev_attrs[] = {
125 	&dev_attr_driver_override.attr,
126 	NULL,
127 };
128 
129 static const struct attribute_group vdpa_dev_group = {
130 	.attrs  = vdpa_dev_attrs,
131 };
132 __ATTRIBUTE_GROUPS(vdpa_dev);
133 
134 static struct bus_type vdpa_bus = {
135 	.name  = "vdpa",
136 	.dev_groups = vdpa_dev_groups,
137 	.match = vdpa_dev_match,
138 	.probe = vdpa_dev_probe,
139 	.remove = vdpa_dev_remove,
140 };
141 
142 static void vdpa_release_dev(struct device *d)
143 {
144 	struct vdpa_device *vdev = dev_to_vdpa(d);
145 	const struct vdpa_config_ops *ops = vdev->config;
146 
147 	if (ops->free)
148 		ops->free(vdev);
149 
150 	ida_simple_remove(&vdpa_index_ida, vdev->index);
151 	mutex_destroy(&vdev->cf_mutex);
152 	kfree(vdev->driver_override);
153 	kfree(vdev);
154 }
155 
156 /**
157  * __vdpa_alloc_device - allocate and initilaize a vDPA device
158  * This allows driver to some prepartion after device is
159  * initialized but before registered.
160  * @parent: the parent device
161  * @config: the bus operations that is supported by this device
162  * @size: size of the parent structure that contains private data
163  * @name: name of the vdpa device; optional.
164  * @use_va: indicate whether virtual address must be used by this device
165  *
166  * Driver should use vdpa_alloc_device() wrapper macro instead of
167  * using this directly.
168  *
169  * Return: Returns an error when parent/config/dma_dev is not set or fail to get
170  *	   ida.
171  */
172 struct vdpa_device *__vdpa_alloc_device(struct device *parent,
173 					const struct vdpa_config_ops *config,
174 					size_t size, const char *name,
175 					bool use_va)
176 {
177 	struct vdpa_device *vdev;
178 	int err = -EINVAL;
179 
180 	if (!config)
181 		goto err;
182 
183 	if (!!config->dma_map != !!config->dma_unmap)
184 		goto err;
185 
186 	/* It should only work for the device that use on-chip IOMMU */
187 	if (use_va && !(config->dma_map || config->set_map))
188 		goto err;
189 
190 	err = -ENOMEM;
191 	vdev = kzalloc(size, GFP_KERNEL);
192 	if (!vdev)
193 		goto err;
194 
195 	err = ida_alloc(&vdpa_index_ida, GFP_KERNEL);
196 	if (err < 0)
197 		goto err_ida;
198 
199 	vdev->dev.bus = &vdpa_bus;
200 	vdev->dev.parent = parent;
201 	vdev->dev.release = vdpa_release_dev;
202 	vdev->index = err;
203 	vdev->config = config;
204 	vdev->features_valid = false;
205 	vdev->use_va = use_va;
206 
207 	if (name)
208 		err = dev_set_name(&vdev->dev, "%s", name);
209 	else
210 		err = dev_set_name(&vdev->dev, "vdpa%u", vdev->index);
211 	if (err)
212 		goto err_name;
213 
214 	mutex_init(&vdev->cf_mutex);
215 	device_initialize(&vdev->dev);
216 
217 	return vdev;
218 
219 err_name:
220 	ida_simple_remove(&vdpa_index_ida, vdev->index);
221 err_ida:
222 	kfree(vdev);
223 err:
224 	return ERR_PTR(err);
225 }
226 EXPORT_SYMBOL_GPL(__vdpa_alloc_device);
227 
228 static int vdpa_name_match(struct device *dev, const void *data)
229 {
230 	struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev);
231 
232 	return (strcmp(dev_name(&vdev->dev), data) == 0);
233 }
234 
235 static int __vdpa_register_device(struct vdpa_device *vdev, int nvqs)
236 {
237 	struct device *dev;
238 
239 	vdev->nvqs = nvqs;
240 
241 	lockdep_assert_held(&vdpa_dev_mutex);
242 	dev = bus_find_device(&vdpa_bus, NULL, dev_name(&vdev->dev), vdpa_name_match);
243 	if (dev) {
244 		put_device(dev);
245 		return -EEXIST;
246 	}
247 	return device_add(&vdev->dev);
248 }
249 
250 /**
251  * _vdpa_register_device - register a vDPA device with vdpa lock held
252  * Caller must have a succeed call of vdpa_alloc_device() before.
253  * Caller must invoke this routine in the management device dev_add()
254  * callback after setting up valid mgmtdev for this vdpa device.
255  * @vdev: the vdpa device to be registered to vDPA bus
256  * @nvqs: number of virtqueues supported by this device
257  *
258  * Return: Returns an error when fail to add device to vDPA bus
259  */
260 int _vdpa_register_device(struct vdpa_device *vdev, int nvqs)
261 {
262 	if (!vdev->mdev)
263 		return -EINVAL;
264 
265 	return __vdpa_register_device(vdev, nvqs);
266 }
267 EXPORT_SYMBOL_GPL(_vdpa_register_device);
268 
269 /**
270  * vdpa_register_device - register a vDPA device
271  * Callers must have a succeed call of vdpa_alloc_device() before.
272  * @vdev: the vdpa device to be registered to vDPA bus
273  * @nvqs: number of virtqueues supported by this device
274  *
275  * Return: Returns an error when fail to add to vDPA bus
276  */
277 int vdpa_register_device(struct vdpa_device *vdev, int nvqs)
278 {
279 	int err;
280 
281 	mutex_lock(&vdpa_dev_mutex);
282 	err = __vdpa_register_device(vdev, nvqs);
283 	mutex_unlock(&vdpa_dev_mutex);
284 	return err;
285 }
286 EXPORT_SYMBOL_GPL(vdpa_register_device);
287 
288 /**
289  * _vdpa_unregister_device - unregister a vDPA device
290  * Caller must invoke this routine as part of management device dev_del()
291  * callback.
292  * @vdev: the vdpa device to be unregisted from vDPA bus
293  */
294 void _vdpa_unregister_device(struct vdpa_device *vdev)
295 {
296 	lockdep_assert_held(&vdpa_dev_mutex);
297 	WARN_ON(!vdev->mdev);
298 	device_unregister(&vdev->dev);
299 }
300 EXPORT_SYMBOL_GPL(_vdpa_unregister_device);
301 
302 /**
303  * vdpa_unregister_device - unregister a vDPA device
304  * @vdev: the vdpa device to be unregisted from vDPA bus
305  */
306 void vdpa_unregister_device(struct vdpa_device *vdev)
307 {
308 	mutex_lock(&vdpa_dev_mutex);
309 	device_unregister(&vdev->dev);
310 	mutex_unlock(&vdpa_dev_mutex);
311 }
312 EXPORT_SYMBOL_GPL(vdpa_unregister_device);
313 
314 /**
315  * __vdpa_register_driver - register a vDPA device driver
316  * @drv: the vdpa device driver to be registered
317  * @owner: module owner of the driver
318  *
319  * Return: Returns an err when fail to do the registration
320  */
321 int __vdpa_register_driver(struct vdpa_driver *drv, struct module *owner)
322 {
323 	drv->driver.bus = &vdpa_bus;
324 	drv->driver.owner = owner;
325 
326 	return driver_register(&drv->driver);
327 }
328 EXPORT_SYMBOL_GPL(__vdpa_register_driver);
329 
330 /**
331  * vdpa_unregister_driver - unregister a vDPA device driver
332  * @drv: the vdpa device driver to be unregistered
333  */
334 void vdpa_unregister_driver(struct vdpa_driver *drv)
335 {
336 	driver_unregister(&drv->driver);
337 }
338 EXPORT_SYMBOL_GPL(vdpa_unregister_driver);
339 
340 /**
341  * vdpa_mgmtdev_register - register a vdpa management device
342  *
343  * @mdev: Pointer to vdpa management device
344  * vdpa_mgmtdev_register() register a vdpa management device which supports
345  * vdpa device management.
346  * Return: Returns 0 on success or failure when required callback ops are not
347  *         initialized.
348  */
349 int vdpa_mgmtdev_register(struct vdpa_mgmt_dev *mdev)
350 {
351 	if (!mdev->device || !mdev->ops || !mdev->ops->dev_add || !mdev->ops->dev_del)
352 		return -EINVAL;
353 
354 	INIT_LIST_HEAD(&mdev->list);
355 	mutex_lock(&vdpa_dev_mutex);
356 	list_add_tail(&mdev->list, &mdev_head);
357 	mutex_unlock(&vdpa_dev_mutex);
358 	return 0;
359 }
360 EXPORT_SYMBOL_GPL(vdpa_mgmtdev_register);
361 
362 static int vdpa_match_remove(struct device *dev, void *data)
363 {
364 	struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev);
365 	struct vdpa_mgmt_dev *mdev = vdev->mdev;
366 
367 	if (mdev == data)
368 		mdev->ops->dev_del(mdev, vdev);
369 	return 0;
370 }
371 
372 void vdpa_mgmtdev_unregister(struct vdpa_mgmt_dev *mdev)
373 {
374 	mutex_lock(&vdpa_dev_mutex);
375 
376 	list_del(&mdev->list);
377 
378 	/* Filter out all the entries belong to this management device and delete it. */
379 	bus_for_each_dev(&vdpa_bus, NULL, mdev, vdpa_match_remove);
380 
381 	mutex_unlock(&vdpa_dev_mutex);
382 }
383 EXPORT_SYMBOL_GPL(vdpa_mgmtdev_unregister);
384 
385 static void vdpa_get_config_unlocked(struct vdpa_device *vdev,
386 				     unsigned int offset,
387 				     void *buf, unsigned int len)
388 {
389 	const struct vdpa_config_ops *ops = vdev->config;
390 
391 	/*
392 	 * Config accesses aren't supposed to trigger before features are set.
393 	 * If it does happen we assume a legacy guest.
394 	 */
395 	if (!vdev->features_valid)
396 		vdpa_set_features(vdev, 0, true);
397 	ops->get_config(vdev, offset, buf, len);
398 }
399 
400 /**
401  * vdpa_get_config - Get one or more device configuration fields.
402  * @vdev: vdpa device to operate on
403  * @offset: starting byte offset of the field
404  * @buf: buffer pointer to read to
405  * @len: length of the configuration fields in bytes
406  */
407 void vdpa_get_config(struct vdpa_device *vdev, unsigned int offset,
408 		     void *buf, unsigned int len)
409 {
410 	mutex_lock(&vdev->cf_mutex);
411 	vdpa_get_config_unlocked(vdev, offset, buf, len);
412 	mutex_unlock(&vdev->cf_mutex);
413 }
414 EXPORT_SYMBOL_GPL(vdpa_get_config);
415 
416 /**
417  * vdpa_set_config - Set one or more device configuration fields.
418  * @vdev: vdpa device to operate on
419  * @offset: starting byte offset of the field
420  * @buf: buffer pointer to read from
421  * @length: length of the configuration fields in bytes
422  */
423 void vdpa_set_config(struct vdpa_device *vdev, unsigned int offset,
424 		     const void *buf, unsigned int length)
425 {
426 	mutex_lock(&vdev->cf_mutex);
427 	vdev->config->set_config(vdev, offset, buf, length);
428 	mutex_unlock(&vdev->cf_mutex);
429 }
430 EXPORT_SYMBOL_GPL(vdpa_set_config);
431 
432 static bool mgmtdev_handle_match(const struct vdpa_mgmt_dev *mdev,
433 				 const char *busname, const char *devname)
434 {
435 	/* Bus name is optional for simulated management device, so ignore the
436 	 * device with bus if bus attribute is provided.
437 	 */
438 	if ((busname && !mdev->device->bus) || (!busname && mdev->device->bus))
439 		return false;
440 
441 	if (!busname && strcmp(dev_name(mdev->device), devname) == 0)
442 		return true;
443 
444 	if (busname && (strcmp(mdev->device->bus->name, busname) == 0) &&
445 	    (strcmp(dev_name(mdev->device), devname) == 0))
446 		return true;
447 
448 	return false;
449 }
450 
451 static struct vdpa_mgmt_dev *vdpa_mgmtdev_get_from_attr(struct nlattr **attrs)
452 {
453 	struct vdpa_mgmt_dev *mdev;
454 	const char *busname = NULL;
455 	const char *devname;
456 
457 	if (!attrs[VDPA_ATTR_MGMTDEV_DEV_NAME])
458 		return ERR_PTR(-EINVAL);
459 	devname = nla_data(attrs[VDPA_ATTR_MGMTDEV_DEV_NAME]);
460 	if (attrs[VDPA_ATTR_MGMTDEV_BUS_NAME])
461 		busname = nla_data(attrs[VDPA_ATTR_MGMTDEV_BUS_NAME]);
462 
463 	list_for_each_entry(mdev, &mdev_head, list) {
464 		if (mgmtdev_handle_match(mdev, busname, devname))
465 			return mdev;
466 	}
467 	return ERR_PTR(-ENODEV);
468 }
469 
470 static int vdpa_nl_mgmtdev_handle_fill(struct sk_buff *msg, const struct vdpa_mgmt_dev *mdev)
471 {
472 	if (mdev->device->bus &&
473 	    nla_put_string(msg, VDPA_ATTR_MGMTDEV_BUS_NAME, mdev->device->bus->name))
474 		return -EMSGSIZE;
475 	if (nla_put_string(msg, VDPA_ATTR_MGMTDEV_DEV_NAME, dev_name(mdev->device)))
476 		return -EMSGSIZE;
477 	return 0;
478 }
479 
480 static int vdpa_mgmtdev_fill(const struct vdpa_mgmt_dev *mdev, struct sk_buff *msg,
481 			     u32 portid, u32 seq, int flags)
482 {
483 	u64 supported_classes = 0;
484 	void *hdr;
485 	int i = 0;
486 	int err;
487 
488 	hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags, VDPA_CMD_MGMTDEV_NEW);
489 	if (!hdr)
490 		return -EMSGSIZE;
491 	err = vdpa_nl_mgmtdev_handle_fill(msg, mdev);
492 	if (err)
493 		goto msg_err;
494 
495 	while (mdev->id_table[i].device) {
496 		if (mdev->id_table[i].device <= 63)
497 			supported_classes |= BIT_ULL(mdev->id_table[i].device);
498 		i++;
499 	}
500 
501 	if (nla_put_u64_64bit(msg, VDPA_ATTR_MGMTDEV_SUPPORTED_CLASSES,
502 			      supported_classes, VDPA_ATTR_UNSPEC)) {
503 		err = -EMSGSIZE;
504 		goto msg_err;
505 	}
506 	if (nla_put_u32(msg, VDPA_ATTR_DEV_MGMTDEV_MAX_VQS,
507 			mdev->max_supported_vqs)) {
508 		err = -EMSGSIZE;
509 		goto msg_err;
510 	}
511 	if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_SUPPORTED_FEATURES,
512 			      mdev->supported_features, VDPA_ATTR_PAD)) {
513 		err = -EMSGSIZE;
514 		goto msg_err;
515 	}
516 
517 	genlmsg_end(msg, hdr);
518 	return 0;
519 
520 msg_err:
521 	genlmsg_cancel(msg, hdr);
522 	return err;
523 }
524 
525 static int vdpa_nl_cmd_mgmtdev_get_doit(struct sk_buff *skb, struct genl_info *info)
526 {
527 	struct vdpa_mgmt_dev *mdev;
528 	struct sk_buff *msg;
529 	int err;
530 
531 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
532 	if (!msg)
533 		return -ENOMEM;
534 
535 	mutex_lock(&vdpa_dev_mutex);
536 	mdev = vdpa_mgmtdev_get_from_attr(info->attrs);
537 	if (IS_ERR(mdev)) {
538 		mutex_unlock(&vdpa_dev_mutex);
539 		NL_SET_ERR_MSG_MOD(info->extack, "Fail to find the specified mgmt device");
540 		err = PTR_ERR(mdev);
541 		goto out;
542 	}
543 
544 	err = vdpa_mgmtdev_fill(mdev, msg, info->snd_portid, info->snd_seq, 0);
545 	mutex_unlock(&vdpa_dev_mutex);
546 	if (err)
547 		goto out;
548 	err = genlmsg_reply(msg, info);
549 	return err;
550 
551 out:
552 	nlmsg_free(msg);
553 	return err;
554 }
555 
556 static int
557 vdpa_nl_cmd_mgmtdev_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb)
558 {
559 	struct vdpa_mgmt_dev *mdev;
560 	int start = cb->args[0];
561 	int idx = 0;
562 	int err;
563 
564 	mutex_lock(&vdpa_dev_mutex);
565 	list_for_each_entry(mdev, &mdev_head, list) {
566 		if (idx < start) {
567 			idx++;
568 			continue;
569 		}
570 		err = vdpa_mgmtdev_fill(mdev, msg, NETLINK_CB(cb->skb).portid,
571 					cb->nlh->nlmsg_seq, NLM_F_MULTI);
572 		if (err)
573 			goto out;
574 		idx++;
575 	}
576 out:
577 	mutex_unlock(&vdpa_dev_mutex);
578 	cb->args[0] = idx;
579 	return msg->len;
580 }
581 
582 #define VDPA_DEV_NET_ATTRS_MASK (BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR) | \
583 				 BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MTU)     | \
584 				 BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP))
585 
586 static int vdpa_nl_cmd_dev_add_set_doit(struct sk_buff *skb, struct genl_info *info)
587 {
588 	struct vdpa_dev_set_config config = {};
589 	struct nlattr **nl_attrs = info->attrs;
590 	struct vdpa_mgmt_dev *mdev;
591 	const u8 *macaddr;
592 	const char *name;
593 	int err = 0;
594 
595 	if (!info->attrs[VDPA_ATTR_DEV_NAME])
596 		return -EINVAL;
597 
598 	name = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
599 
600 	if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MACADDR]) {
601 		macaddr = nla_data(nl_attrs[VDPA_ATTR_DEV_NET_CFG_MACADDR]);
602 		memcpy(config.net.mac, macaddr, sizeof(config.net.mac));
603 		config.mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR);
604 	}
605 	if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MTU]) {
606 		config.net.mtu =
607 			nla_get_u16(nl_attrs[VDPA_ATTR_DEV_NET_CFG_MTU]);
608 		config.mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MTU);
609 	}
610 	if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MAX_VQP]) {
611 		config.net.max_vq_pairs =
612 			nla_get_u16(nl_attrs[VDPA_ATTR_DEV_NET_CFG_MAX_VQP]);
613 		if (!config.net.max_vq_pairs) {
614 			NL_SET_ERR_MSG_MOD(info->extack,
615 					   "At least one pair of VQs is required");
616 			return -EINVAL;
617 		}
618 		config.mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP);
619 	}
620 
621 	/* Skip checking capability if user didn't prefer to configure any
622 	 * device networking attributes. It is likely that user might have used
623 	 * a device specific method to configure such attributes or using device
624 	 * default attributes.
625 	 */
626 	if ((config.mask & VDPA_DEV_NET_ATTRS_MASK) &&
627 	    !netlink_capable(skb, CAP_NET_ADMIN))
628 		return -EPERM;
629 
630 	mutex_lock(&vdpa_dev_mutex);
631 	mdev = vdpa_mgmtdev_get_from_attr(info->attrs);
632 	if (IS_ERR(mdev)) {
633 		NL_SET_ERR_MSG_MOD(info->extack, "Fail to find the specified management device");
634 		err = PTR_ERR(mdev);
635 		goto err;
636 	}
637 	if ((config.mask & mdev->config_attr_mask) != config.mask) {
638 		NL_SET_ERR_MSG_MOD(info->extack,
639 				   "All provided attributes are not supported");
640 		err = -EOPNOTSUPP;
641 		goto err;
642 	}
643 
644 	err = mdev->ops->dev_add(mdev, name, &config);
645 err:
646 	mutex_unlock(&vdpa_dev_mutex);
647 	return err;
648 }
649 
650 static int vdpa_nl_cmd_dev_del_set_doit(struct sk_buff *skb, struct genl_info *info)
651 {
652 	struct vdpa_mgmt_dev *mdev;
653 	struct vdpa_device *vdev;
654 	struct device *dev;
655 	const char *name;
656 	int err = 0;
657 
658 	if (!info->attrs[VDPA_ATTR_DEV_NAME])
659 		return -EINVAL;
660 	name = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
661 
662 	mutex_lock(&vdpa_dev_mutex);
663 	dev = bus_find_device(&vdpa_bus, NULL, name, vdpa_name_match);
664 	if (!dev) {
665 		NL_SET_ERR_MSG_MOD(info->extack, "device not found");
666 		err = -ENODEV;
667 		goto dev_err;
668 	}
669 	vdev = container_of(dev, struct vdpa_device, dev);
670 	if (!vdev->mdev) {
671 		NL_SET_ERR_MSG_MOD(info->extack, "Only user created device can be deleted by user");
672 		err = -EINVAL;
673 		goto mdev_err;
674 	}
675 	mdev = vdev->mdev;
676 	mdev->ops->dev_del(mdev, vdev);
677 mdev_err:
678 	put_device(dev);
679 dev_err:
680 	mutex_unlock(&vdpa_dev_mutex);
681 	return err;
682 }
683 
684 static int
685 vdpa_dev_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid, u32 seq,
686 	      int flags, struct netlink_ext_ack *extack)
687 {
688 	u16 max_vq_size;
689 	u16 min_vq_size = 1;
690 	u32 device_id;
691 	u32 vendor_id;
692 	void *hdr;
693 	int err;
694 
695 	hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags, VDPA_CMD_DEV_NEW);
696 	if (!hdr)
697 		return -EMSGSIZE;
698 
699 	err = vdpa_nl_mgmtdev_handle_fill(msg, vdev->mdev);
700 	if (err)
701 		goto msg_err;
702 
703 	device_id = vdev->config->get_device_id(vdev);
704 	vendor_id = vdev->config->get_vendor_id(vdev);
705 	max_vq_size = vdev->config->get_vq_num_max(vdev);
706 	if (vdev->config->get_vq_num_min)
707 		min_vq_size = vdev->config->get_vq_num_min(vdev);
708 
709 	err = -EMSGSIZE;
710 	if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev)))
711 		goto msg_err;
712 	if (nla_put_u32(msg, VDPA_ATTR_DEV_ID, device_id))
713 		goto msg_err;
714 	if (nla_put_u32(msg, VDPA_ATTR_DEV_VENDOR_ID, vendor_id))
715 		goto msg_err;
716 	if (nla_put_u32(msg, VDPA_ATTR_DEV_MAX_VQS, vdev->nvqs))
717 		goto msg_err;
718 	if (nla_put_u16(msg, VDPA_ATTR_DEV_MAX_VQ_SIZE, max_vq_size))
719 		goto msg_err;
720 	if (nla_put_u16(msg, VDPA_ATTR_DEV_MIN_VQ_SIZE, min_vq_size))
721 		goto msg_err;
722 
723 	genlmsg_end(msg, hdr);
724 	return 0;
725 
726 msg_err:
727 	genlmsg_cancel(msg, hdr);
728 	return err;
729 }
730 
731 static int vdpa_nl_cmd_dev_get_doit(struct sk_buff *skb, struct genl_info *info)
732 {
733 	struct vdpa_device *vdev;
734 	struct sk_buff *msg;
735 	const char *devname;
736 	struct device *dev;
737 	int err;
738 
739 	if (!info->attrs[VDPA_ATTR_DEV_NAME])
740 		return -EINVAL;
741 	devname = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
742 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
743 	if (!msg)
744 		return -ENOMEM;
745 
746 	mutex_lock(&vdpa_dev_mutex);
747 	dev = bus_find_device(&vdpa_bus, NULL, devname, vdpa_name_match);
748 	if (!dev) {
749 		NL_SET_ERR_MSG_MOD(info->extack, "device not found");
750 		err = -ENODEV;
751 		goto err;
752 	}
753 	vdev = container_of(dev, struct vdpa_device, dev);
754 	if (!vdev->mdev) {
755 		err = -EINVAL;
756 		goto mdev_err;
757 	}
758 	err = vdpa_dev_fill(vdev, msg, info->snd_portid, info->snd_seq, 0, info->extack);
759 	if (!err)
760 		err = genlmsg_reply(msg, info);
761 mdev_err:
762 	put_device(dev);
763 err:
764 	mutex_unlock(&vdpa_dev_mutex);
765 	if (err)
766 		nlmsg_free(msg);
767 	return err;
768 }
769 
770 struct vdpa_dev_dump_info {
771 	struct sk_buff *msg;
772 	struct netlink_callback *cb;
773 	int start_idx;
774 	int idx;
775 };
776 
777 static int vdpa_dev_dump(struct device *dev, void *data)
778 {
779 	struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev);
780 	struct vdpa_dev_dump_info *info = data;
781 	int err;
782 
783 	if (!vdev->mdev)
784 		return 0;
785 	if (info->idx < info->start_idx) {
786 		info->idx++;
787 		return 0;
788 	}
789 	err = vdpa_dev_fill(vdev, info->msg, NETLINK_CB(info->cb->skb).portid,
790 			    info->cb->nlh->nlmsg_seq, NLM_F_MULTI, info->cb->extack);
791 	if (err)
792 		return err;
793 
794 	info->idx++;
795 	return 0;
796 }
797 
798 static int vdpa_nl_cmd_dev_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb)
799 {
800 	struct vdpa_dev_dump_info info;
801 
802 	info.msg = msg;
803 	info.cb = cb;
804 	info.start_idx = cb->args[0];
805 	info.idx = 0;
806 
807 	mutex_lock(&vdpa_dev_mutex);
808 	bus_for_each_dev(&vdpa_bus, NULL, &info, vdpa_dev_dump);
809 	mutex_unlock(&vdpa_dev_mutex);
810 	cb->args[0] = info.idx;
811 	return msg->len;
812 }
813 
814 static int vdpa_dev_net_mq_config_fill(struct vdpa_device *vdev,
815 				       struct sk_buff *msg, u64 features,
816 				       const struct virtio_net_config *config)
817 {
818 	u16 val_u16;
819 
820 	if ((features & BIT_ULL(VIRTIO_NET_F_MQ)) == 0)
821 		return 0;
822 
823 	val_u16 = le16_to_cpu(config->max_virtqueue_pairs);
824 	return nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MAX_VQP, val_u16);
825 }
826 
827 static int vdpa_dev_net_config_fill(struct vdpa_device *vdev, struct sk_buff *msg)
828 {
829 	struct virtio_net_config config = {};
830 	u64 features;
831 	u16 val_u16;
832 
833 	vdpa_get_config_unlocked(vdev, 0, &config, sizeof(config));
834 
835 	if (nla_put(msg, VDPA_ATTR_DEV_NET_CFG_MACADDR, sizeof(config.mac),
836 		    config.mac))
837 		return -EMSGSIZE;
838 
839 	val_u16 = le16_to_cpu(config.status);
840 	if (nla_put_u16(msg, VDPA_ATTR_DEV_NET_STATUS, val_u16))
841 		return -EMSGSIZE;
842 
843 	val_u16 = le16_to_cpu(config.mtu);
844 	if (nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MTU, val_u16))
845 		return -EMSGSIZE;
846 
847 	features = vdev->config->get_driver_features(vdev);
848 	if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_NEGOTIATED_FEATURES, features,
849 			      VDPA_ATTR_PAD))
850 		return -EMSGSIZE;
851 
852 	return vdpa_dev_net_mq_config_fill(vdev, msg, features, &config);
853 }
854 
855 static int
856 vdpa_dev_config_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid, u32 seq,
857 		     int flags, struct netlink_ext_ack *extack)
858 {
859 	u32 device_id;
860 	void *hdr;
861 	u8 status;
862 	int err;
863 
864 	mutex_lock(&vdev->cf_mutex);
865 	status = vdev->config->get_status(vdev);
866 	if (!(status & VIRTIO_CONFIG_S_FEATURES_OK)) {
867 		NL_SET_ERR_MSG_MOD(extack, "Features negotiation not completed");
868 		err = -EAGAIN;
869 		goto out;
870 	}
871 
872 	hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags,
873 			  VDPA_CMD_DEV_CONFIG_GET);
874 	if (!hdr) {
875 		err = -EMSGSIZE;
876 		goto out;
877 	}
878 
879 	if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev))) {
880 		err = -EMSGSIZE;
881 		goto msg_err;
882 	}
883 
884 	device_id = vdev->config->get_device_id(vdev);
885 	if (nla_put_u32(msg, VDPA_ATTR_DEV_ID, device_id)) {
886 		err = -EMSGSIZE;
887 		goto msg_err;
888 	}
889 
890 	switch (device_id) {
891 	case VIRTIO_ID_NET:
892 		err = vdpa_dev_net_config_fill(vdev, msg);
893 		break;
894 	default:
895 		err = -EOPNOTSUPP;
896 		break;
897 	}
898 	if (err)
899 		goto msg_err;
900 
901 	mutex_unlock(&vdev->cf_mutex);
902 	genlmsg_end(msg, hdr);
903 	return 0;
904 
905 msg_err:
906 	genlmsg_cancel(msg, hdr);
907 out:
908 	mutex_unlock(&vdev->cf_mutex);
909 	return err;
910 }
911 
912 static int vdpa_nl_cmd_dev_config_get_doit(struct sk_buff *skb, struct genl_info *info)
913 {
914 	struct vdpa_device *vdev;
915 	struct sk_buff *msg;
916 	const char *devname;
917 	struct device *dev;
918 	int err;
919 
920 	if (!info->attrs[VDPA_ATTR_DEV_NAME])
921 		return -EINVAL;
922 	devname = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
923 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
924 	if (!msg)
925 		return -ENOMEM;
926 
927 	mutex_lock(&vdpa_dev_mutex);
928 	dev = bus_find_device(&vdpa_bus, NULL, devname, vdpa_name_match);
929 	if (!dev) {
930 		NL_SET_ERR_MSG_MOD(info->extack, "device not found");
931 		err = -ENODEV;
932 		goto dev_err;
933 	}
934 	vdev = container_of(dev, struct vdpa_device, dev);
935 	if (!vdev->mdev) {
936 		NL_SET_ERR_MSG_MOD(info->extack, "unmanaged vdpa device");
937 		err = -EINVAL;
938 		goto mdev_err;
939 	}
940 	err = vdpa_dev_config_fill(vdev, msg, info->snd_portid, info->snd_seq,
941 				   0, info->extack);
942 	if (!err)
943 		err = genlmsg_reply(msg, info);
944 
945 mdev_err:
946 	put_device(dev);
947 dev_err:
948 	mutex_unlock(&vdpa_dev_mutex);
949 	if (err)
950 		nlmsg_free(msg);
951 	return err;
952 }
953 
954 static int vdpa_dev_config_dump(struct device *dev, void *data)
955 {
956 	struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev);
957 	struct vdpa_dev_dump_info *info = data;
958 	int err;
959 
960 	if (!vdev->mdev)
961 		return 0;
962 	if (info->idx < info->start_idx) {
963 		info->idx++;
964 		return 0;
965 	}
966 	err = vdpa_dev_config_fill(vdev, info->msg, NETLINK_CB(info->cb->skb).portid,
967 				   info->cb->nlh->nlmsg_seq, NLM_F_MULTI,
968 				   info->cb->extack);
969 	if (err)
970 		return err;
971 
972 	info->idx++;
973 	return 0;
974 }
975 
976 static int
977 vdpa_nl_cmd_dev_config_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb)
978 {
979 	struct vdpa_dev_dump_info info;
980 
981 	info.msg = msg;
982 	info.cb = cb;
983 	info.start_idx = cb->args[0];
984 	info.idx = 0;
985 
986 	mutex_lock(&vdpa_dev_mutex);
987 	bus_for_each_dev(&vdpa_bus, NULL, &info, vdpa_dev_config_dump);
988 	mutex_unlock(&vdpa_dev_mutex);
989 	cb->args[0] = info.idx;
990 	return msg->len;
991 }
992 
993 static const struct nla_policy vdpa_nl_policy[VDPA_ATTR_MAX + 1] = {
994 	[VDPA_ATTR_MGMTDEV_BUS_NAME] = { .type = NLA_NUL_STRING },
995 	[VDPA_ATTR_MGMTDEV_DEV_NAME] = { .type = NLA_STRING },
996 	[VDPA_ATTR_DEV_NAME] = { .type = NLA_STRING },
997 	[VDPA_ATTR_DEV_NET_CFG_MACADDR] = NLA_POLICY_ETH_ADDR,
998 	/* virtio spec 1.1 section 5.1.4.1 for valid MTU range */
999 	[VDPA_ATTR_DEV_NET_CFG_MTU] = NLA_POLICY_MIN(NLA_U16, 68),
1000 };
1001 
1002 static const struct genl_ops vdpa_nl_ops[] = {
1003 	{
1004 		.cmd = VDPA_CMD_MGMTDEV_GET,
1005 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1006 		.doit = vdpa_nl_cmd_mgmtdev_get_doit,
1007 		.dumpit = vdpa_nl_cmd_mgmtdev_get_dumpit,
1008 	},
1009 	{
1010 		.cmd = VDPA_CMD_DEV_NEW,
1011 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1012 		.doit = vdpa_nl_cmd_dev_add_set_doit,
1013 		.flags = GENL_ADMIN_PERM,
1014 	},
1015 	{
1016 		.cmd = VDPA_CMD_DEV_DEL,
1017 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1018 		.doit = vdpa_nl_cmd_dev_del_set_doit,
1019 		.flags = GENL_ADMIN_PERM,
1020 	},
1021 	{
1022 		.cmd = VDPA_CMD_DEV_GET,
1023 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1024 		.doit = vdpa_nl_cmd_dev_get_doit,
1025 		.dumpit = vdpa_nl_cmd_dev_get_dumpit,
1026 	},
1027 	{
1028 		.cmd = VDPA_CMD_DEV_CONFIG_GET,
1029 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1030 		.doit = vdpa_nl_cmd_dev_config_get_doit,
1031 		.dumpit = vdpa_nl_cmd_dev_config_get_dumpit,
1032 	},
1033 };
1034 
1035 static struct genl_family vdpa_nl_family __ro_after_init = {
1036 	.name = VDPA_GENL_NAME,
1037 	.version = VDPA_GENL_VERSION,
1038 	.maxattr = VDPA_ATTR_MAX,
1039 	.policy = vdpa_nl_policy,
1040 	.netnsok = false,
1041 	.module = THIS_MODULE,
1042 	.ops = vdpa_nl_ops,
1043 	.n_ops = ARRAY_SIZE(vdpa_nl_ops),
1044 };
1045 
1046 static int vdpa_init(void)
1047 {
1048 	int err;
1049 
1050 	err = bus_register(&vdpa_bus);
1051 	if (err)
1052 		return err;
1053 	err = genl_register_family(&vdpa_nl_family);
1054 	if (err)
1055 		goto err;
1056 	return 0;
1057 
1058 err:
1059 	bus_unregister(&vdpa_bus);
1060 	return err;
1061 }
1062 
1063 static void __exit vdpa_exit(void)
1064 {
1065 	genl_unregister_family(&vdpa_nl_family);
1066 	bus_unregister(&vdpa_bus);
1067 	ida_destroy(&vdpa_index_ida);
1068 }
1069 core_initcall(vdpa_init);
1070 module_exit(vdpa_exit);
1071 
1072 MODULE_AUTHOR("Jason Wang <jasowang@redhat.com>");
1073 MODULE_LICENSE("GPL v2");
1074