xref: /openbmc/linux/drivers/vdpa/vdpa.c (revision 275487b4)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * vDPA bus.
4  *
5  * Copyright (c) 2020, Red Hat. All rights reserved.
6  *     Author: Jason Wang <jasowang@redhat.com>
7  *
8  */
9 
10 #include <linux/module.h>
11 #include <linux/idr.h>
12 #include <linux/slab.h>
13 #include <linux/vdpa.h>
14 #include <uapi/linux/vdpa.h>
15 #include <net/genetlink.h>
16 #include <linux/mod_devicetable.h>
17 #include <linux/virtio_ids.h>
18 
19 static LIST_HEAD(mdev_head);
20 /* A global mutex that protects vdpa management device and device level operations. */
21 static DECLARE_RWSEM(vdpa_dev_lock);
22 static DEFINE_IDA(vdpa_index_ida);
23 
24 void vdpa_set_status(struct vdpa_device *vdev, u8 status)
25 {
26 	down_write(&vdev->cf_lock);
27 	vdev->config->set_status(vdev, status);
28 	up_write(&vdev->cf_lock);
29 }
30 EXPORT_SYMBOL(vdpa_set_status);
31 
32 static struct genl_family vdpa_nl_family;
33 
34 static int vdpa_dev_probe(struct device *d)
35 {
36 	struct vdpa_device *vdev = dev_to_vdpa(d);
37 	struct vdpa_driver *drv = drv_to_vdpa(vdev->dev.driver);
38 	const struct vdpa_config_ops *ops = vdev->config;
39 	u32 max_num, min_num = 1;
40 	int ret = 0;
41 
42 	d->dma_mask = &d->coherent_dma_mask;
43 	ret = dma_set_mask_and_coherent(d, DMA_BIT_MASK(64));
44 	if (ret)
45 		return ret;
46 
47 	max_num = ops->get_vq_num_max(vdev);
48 	if (ops->get_vq_num_min)
49 		min_num = ops->get_vq_num_min(vdev);
50 	if (max_num < min_num)
51 		return -EINVAL;
52 
53 	if (drv && drv->probe)
54 		ret = drv->probe(vdev);
55 
56 	return ret;
57 }
58 
59 static void vdpa_dev_remove(struct device *d)
60 {
61 	struct vdpa_device *vdev = dev_to_vdpa(d);
62 	struct vdpa_driver *drv = drv_to_vdpa(vdev->dev.driver);
63 
64 	if (drv && drv->remove)
65 		drv->remove(vdev);
66 }
67 
68 static int vdpa_dev_match(struct device *dev, struct device_driver *drv)
69 {
70 	struct vdpa_device *vdev = dev_to_vdpa(dev);
71 
72 	/* Check override first, and if set, only use the named driver */
73 	if (vdev->driver_override)
74 		return strcmp(vdev->driver_override, drv->name) == 0;
75 
76 	/* Currently devices must be supported by all vDPA bus drivers */
77 	return 1;
78 }
79 
80 static ssize_t driver_override_store(struct device *dev,
81 				     struct device_attribute *attr,
82 				     const char *buf, size_t count)
83 {
84 	struct vdpa_device *vdev = dev_to_vdpa(dev);
85 	int ret;
86 
87 	ret = driver_set_override(dev, &vdev->driver_override, buf, count);
88 	if (ret)
89 		return ret;
90 
91 	return count;
92 }
93 
94 static ssize_t driver_override_show(struct device *dev,
95 				    struct device_attribute *attr, char *buf)
96 {
97 	struct vdpa_device *vdev = dev_to_vdpa(dev);
98 	ssize_t len;
99 
100 	device_lock(dev);
101 	len = snprintf(buf, PAGE_SIZE, "%s\n", vdev->driver_override);
102 	device_unlock(dev);
103 
104 	return len;
105 }
106 static DEVICE_ATTR_RW(driver_override);
107 
108 static struct attribute *vdpa_dev_attrs[] = {
109 	&dev_attr_driver_override.attr,
110 	NULL,
111 };
112 
113 static const struct attribute_group vdpa_dev_group = {
114 	.attrs  = vdpa_dev_attrs,
115 };
116 __ATTRIBUTE_GROUPS(vdpa_dev);
117 
118 static struct bus_type vdpa_bus = {
119 	.name  = "vdpa",
120 	.dev_groups = vdpa_dev_groups,
121 	.match = vdpa_dev_match,
122 	.probe = vdpa_dev_probe,
123 	.remove = vdpa_dev_remove,
124 };
125 
126 static void vdpa_release_dev(struct device *d)
127 {
128 	struct vdpa_device *vdev = dev_to_vdpa(d);
129 	const struct vdpa_config_ops *ops = vdev->config;
130 
131 	if (ops->free)
132 		ops->free(vdev);
133 
134 	ida_simple_remove(&vdpa_index_ida, vdev->index);
135 	kfree(vdev->driver_override);
136 	kfree(vdev);
137 }
138 
139 /**
140  * __vdpa_alloc_device - allocate and initilaize a vDPA device
141  * This allows driver to some prepartion after device is
142  * initialized but before registered.
143  * @parent: the parent device
144  * @config: the bus operations that is supported by this device
145  * @ngroups: number of groups supported by this device
146  * @nas: number of address spaces supported by this device
147  * @size: size of the parent structure that contains private data
148  * @name: name of the vdpa device; optional.
149  * @use_va: indicate whether virtual address must be used by this device
150  *
151  * Driver should use vdpa_alloc_device() wrapper macro instead of
152  * using this directly.
153  *
154  * Return: Returns an error when parent/config/dma_dev is not set or fail to get
155  *	   ida.
156  */
157 struct vdpa_device *__vdpa_alloc_device(struct device *parent,
158 					const struct vdpa_config_ops *config,
159 					unsigned int ngroups, unsigned int nas,
160 					size_t size, const char *name,
161 					bool use_va)
162 {
163 	struct vdpa_device *vdev;
164 	int err = -EINVAL;
165 
166 	if (!config)
167 		goto err;
168 
169 	if (!!config->dma_map != !!config->dma_unmap)
170 		goto err;
171 
172 	/* It should only work for the device that use on-chip IOMMU */
173 	if (use_va && !(config->dma_map || config->set_map))
174 		goto err;
175 
176 	err = -ENOMEM;
177 	vdev = kzalloc(size, GFP_KERNEL);
178 	if (!vdev)
179 		goto err;
180 
181 	err = ida_alloc(&vdpa_index_ida, GFP_KERNEL);
182 	if (err < 0)
183 		goto err_ida;
184 
185 	vdev->dev.bus = &vdpa_bus;
186 	vdev->dev.parent = parent;
187 	vdev->dev.release = vdpa_release_dev;
188 	vdev->index = err;
189 	vdev->config = config;
190 	vdev->features_valid = false;
191 	vdev->use_va = use_va;
192 	vdev->ngroups = ngroups;
193 	vdev->nas = nas;
194 
195 	if (name)
196 		err = dev_set_name(&vdev->dev, "%s", name);
197 	else
198 		err = dev_set_name(&vdev->dev, "vdpa%u", vdev->index);
199 	if (err)
200 		goto err_name;
201 
202 	init_rwsem(&vdev->cf_lock);
203 	device_initialize(&vdev->dev);
204 
205 	return vdev;
206 
207 err_name:
208 	ida_simple_remove(&vdpa_index_ida, vdev->index);
209 err_ida:
210 	kfree(vdev);
211 err:
212 	return ERR_PTR(err);
213 }
214 EXPORT_SYMBOL_GPL(__vdpa_alloc_device);
215 
216 static int vdpa_name_match(struct device *dev, const void *data)
217 {
218 	struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev);
219 
220 	return (strcmp(dev_name(&vdev->dev), data) == 0);
221 }
222 
223 static int __vdpa_register_device(struct vdpa_device *vdev, u32 nvqs)
224 {
225 	struct device *dev;
226 
227 	vdev->nvqs = nvqs;
228 
229 	lockdep_assert_held(&vdpa_dev_lock);
230 	dev = bus_find_device(&vdpa_bus, NULL, dev_name(&vdev->dev), vdpa_name_match);
231 	if (dev) {
232 		put_device(dev);
233 		return -EEXIST;
234 	}
235 	return device_add(&vdev->dev);
236 }
237 
238 /**
239  * _vdpa_register_device - register a vDPA device with vdpa lock held
240  * Caller must have a succeed call of vdpa_alloc_device() before.
241  * Caller must invoke this routine in the management device dev_add()
242  * callback after setting up valid mgmtdev for this vdpa device.
243  * @vdev: the vdpa device to be registered to vDPA bus
244  * @nvqs: number of virtqueues supported by this device
245  *
246  * Return: Returns an error when fail to add device to vDPA bus
247  */
248 int _vdpa_register_device(struct vdpa_device *vdev, u32 nvqs)
249 {
250 	if (!vdev->mdev)
251 		return -EINVAL;
252 
253 	return __vdpa_register_device(vdev, nvqs);
254 }
255 EXPORT_SYMBOL_GPL(_vdpa_register_device);
256 
257 /**
258  * vdpa_register_device - register a vDPA device
259  * Callers must have a succeed call of vdpa_alloc_device() before.
260  * @vdev: the vdpa device to be registered to vDPA bus
261  * @nvqs: number of virtqueues supported by this device
262  *
263  * Return: Returns an error when fail to add to vDPA bus
264  */
265 int vdpa_register_device(struct vdpa_device *vdev, u32 nvqs)
266 {
267 	int err;
268 
269 	down_write(&vdpa_dev_lock);
270 	err = __vdpa_register_device(vdev, nvqs);
271 	up_write(&vdpa_dev_lock);
272 	return err;
273 }
274 EXPORT_SYMBOL_GPL(vdpa_register_device);
275 
276 /**
277  * _vdpa_unregister_device - unregister a vDPA device
278  * Caller must invoke this routine as part of management device dev_del()
279  * callback.
280  * @vdev: the vdpa device to be unregisted from vDPA bus
281  */
282 void _vdpa_unregister_device(struct vdpa_device *vdev)
283 {
284 	lockdep_assert_held(&vdpa_dev_lock);
285 	WARN_ON(!vdev->mdev);
286 	device_unregister(&vdev->dev);
287 }
288 EXPORT_SYMBOL_GPL(_vdpa_unregister_device);
289 
290 /**
291  * vdpa_unregister_device - unregister a vDPA device
292  * @vdev: the vdpa device to be unregisted from vDPA bus
293  */
294 void vdpa_unregister_device(struct vdpa_device *vdev)
295 {
296 	down_write(&vdpa_dev_lock);
297 	device_unregister(&vdev->dev);
298 	up_write(&vdpa_dev_lock);
299 }
300 EXPORT_SYMBOL_GPL(vdpa_unregister_device);
301 
302 /**
303  * __vdpa_register_driver - register a vDPA device driver
304  * @drv: the vdpa device driver to be registered
305  * @owner: module owner of the driver
306  *
307  * Return: Returns an err when fail to do the registration
308  */
309 int __vdpa_register_driver(struct vdpa_driver *drv, struct module *owner)
310 {
311 	drv->driver.bus = &vdpa_bus;
312 	drv->driver.owner = owner;
313 
314 	return driver_register(&drv->driver);
315 }
316 EXPORT_SYMBOL_GPL(__vdpa_register_driver);
317 
318 /**
319  * vdpa_unregister_driver - unregister a vDPA device driver
320  * @drv: the vdpa device driver to be unregistered
321  */
322 void vdpa_unregister_driver(struct vdpa_driver *drv)
323 {
324 	driver_unregister(&drv->driver);
325 }
326 EXPORT_SYMBOL_GPL(vdpa_unregister_driver);
327 
328 /**
329  * vdpa_mgmtdev_register - register a vdpa management device
330  *
331  * @mdev: Pointer to vdpa management device
332  * vdpa_mgmtdev_register() register a vdpa management device which supports
333  * vdpa device management.
334  * Return: Returns 0 on success or failure when required callback ops are not
335  *         initialized.
336  */
337 int vdpa_mgmtdev_register(struct vdpa_mgmt_dev *mdev)
338 {
339 	if (!mdev->device || !mdev->ops || !mdev->ops->dev_add || !mdev->ops->dev_del)
340 		return -EINVAL;
341 
342 	INIT_LIST_HEAD(&mdev->list);
343 	down_write(&vdpa_dev_lock);
344 	list_add_tail(&mdev->list, &mdev_head);
345 	up_write(&vdpa_dev_lock);
346 	return 0;
347 }
348 EXPORT_SYMBOL_GPL(vdpa_mgmtdev_register);
349 
350 static int vdpa_match_remove(struct device *dev, void *data)
351 {
352 	struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev);
353 	struct vdpa_mgmt_dev *mdev = vdev->mdev;
354 
355 	if (mdev == data)
356 		mdev->ops->dev_del(mdev, vdev);
357 	return 0;
358 }
359 
360 void vdpa_mgmtdev_unregister(struct vdpa_mgmt_dev *mdev)
361 {
362 	down_write(&vdpa_dev_lock);
363 
364 	list_del(&mdev->list);
365 
366 	/* Filter out all the entries belong to this management device and delete it. */
367 	bus_for_each_dev(&vdpa_bus, NULL, mdev, vdpa_match_remove);
368 
369 	up_write(&vdpa_dev_lock);
370 }
371 EXPORT_SYMBOL_GPL(vdpa_mgmtdev_unregister);
372 
373 static void vdpa_get_config_unlocked(struct vdpa_device *vdev,
374 				     unsigned int offset,
375 				     void *buf, unsigned int len)
376 {
377 	const struct vdpa_config_ops *ops = vdev->config;
378 
379 	/*
380 	 * Config accesses aren't supposed to trigger before features are set.
381 	 * If it does happen we assume a legacy guest.
382 	 */
383 	if (!vdev->features_valid)
384 		vdpa_set_features_unlocked(vdev, 0);
385 	ops->get_config(vdev, offset, buf, len);
386 }
387 
388 /**
389  * vdpa_get_config - Get one or more device configuration fields.
390  * @vdev: vdpa device to operate on
391  * @offset: starting byte offset of the field
392  * @buf: buffer pointer to read to
393  * @len: length of the configuration fields in bytes
394  */
395 void vdpa_get_config(struct vdpa_device *vdev, unsigned int offset,
396 		     void *buf, unsigned int len)
397 {
398 	down_read(&vdev->cf_lock);
399 	vdpa_get_config_unlocked(vdev, offset, buf, len);
400 	up_read(&vdev->cf_lock);
401 }
402 EXPORT_SYMBOL_GPL(vdpa_get_config);
403 
404 /**
405  * vdpa_set_config - Set one or more device configuration fields.
406  * @vdev: vdpa device to operate on
407  * @offset: starting byte offset of the field
408  * @buf: buffer pointer to read from
409  * @length: length of the configuration fields in bytes
410  */
411 void vdpa_set_config(struct vdpa_device *vdev, unsigned int offset,
412 		     const void *buf, unsigned int length)
413 {
414 	down_write(&vdev->cf_lock);
415 	vdev->config->set_config(vdev, offset, buf, length);
416 	up_write(&vdev->cf_lock);
417 }
418 EXPORT_SYMBOL_GPL(vdpa_set_config);
419 
420 static bool mgmtdev_handle_match(const struct vdpa_mgmt_dev *mdev,
421 				 const char *busname, const char *devname)
422 {
423 	/* Bus name is optional for simulated management device, so ignore the
424 	 * device with bus if bus attribute is provided.
425 	 */
426 	if ((busname && !mdev->device->bus) || (!busname && mdev->device->bus))
427 		return false;
428 
429 	if (!busname && strcmp(dev_name(mdev->device), devname) == 0)
430 		return true;
431 
432 	if (busname && (strcmp(mdev->device->bus->name, busname) == 0) &&
433 	    (strcmp(dev_name(mdev->device), devname) == 0))
434 		return true;
435 
436 	return false;
437 }
438 
439 static struct vdpa_mgmt_dev *vdpa_mgmtdev_get_from_attr(struct nlattr **attrs)
440 {
441 	struct vdpa_mgmt_dev *mdev;
442 	const char *busname = NULL;
443 	const char *devname;
444 
445 	if (!attrs[VDPA_ATTR_MGMTDEV_DEV_NAME])
446 		return ERR_PTR(-EINVAL);
447 	devname = nla_data(attrs[VDPA_ATTR_MGMTDEV_DEV_NAME]);
448 	if (attrs[VDPA_ATTR_MGMTDEV_BUS_NAME])
449 		busname = nla_data(attrs[VDPA_ATTR_MGMTDEV_BUS_NAME]);
450 
451 	list_for_each_entry(mdev, &mdev_head, list) {
452 		if (mgmtdev_handle_match(mdev, busname, devname))
453 			return mdev;
454 	}
455 	return ERR_PTR(-ENODEV);
456 }
457 
458 static int vdpa_nl_mgmtdev_handle_fill(struct sk_buff *msg, const struct vdpa_mgmt_dev *mdev)
459 {
460 	if (mdev->device->bus &&
461 	    nla_put_string(msg, VDPA_ATTR_MGMTDEV_BUS_NAME, mdev->device->bus->name))
462 		return -EMSGSIZE;
463 	if (nla_put_string(msg, VDPA_ATTR_MGMTDEV_DEV_NAME, dev_name(mdev->device)))
464 		return -EMSGSIZE;
465 	return 0;
466 }
467 
468 static int vdpa_mgmtdev_fill(const struct vdpa_mgmt_dev *mdev, struct sk_buff *msg,
469 			     u32 portid, u32 seq, int flags)
470 {
471 	u64 supported_classes = 0;
472 	void *hdr;
473 	int i = 0;
474 	int err;
475 
476 	hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags, VDPA_CMD_MGMTDEV_NEW);
477 	if (!hdr)
478 		return -EMSGSIZE;
479 	err = vdpa_nl_mgmtdev_handle_fill(msg, mdev);
480 	if (err)
481 		goto msg_err;
482 
483 	while (mdev->id_table[i].device) {
484 		if (mdev->id_table[i].device <= 63)
485 			supported_classes |= BIT_ULL(mdev->id_table[i].device);
486 		i++;
487 	}
488 
489 	if (nla_put_u64_64bit(msg, VDPA_ATTR_MGMTDEV_SUPPORTED_CLASSES,
490 			      supported_classes, VDPA_ATTR_UNSPEC)) {
491 		err = -EMSGSIZE;
492 		goto msg_err;
493 	}
494 	if (nla_put_u32(msg, VDPA_ATTR_DEV_MGMTDEV_MAX_VQS,
495 			mdev->max_supported_vqs)) {
496 		err = -EMSGSIZE;
497 		goto msg_err;
498 	}
499 	if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_SUPPORTED_FEATURES,
500 			      mdev->supported_features, VDPA_ATTR_PAD)) {
501 		err = -EMSGSIZE;
502 		goto msg_err;
503 	}
504 
505 	genlmsg_end(msg, hdr);
506 	return 0;
507 
508 msg_err:
509 	genlmsg_cancel(msg, hdr);
510 	return err;
511 }
512 
513 static int vdpa_nl_cmd_mgmtdev_get_doit(struct sk_buff *skb, struct genl_info *info)
514 {
515 	struct vdpa_mgmt_dev *mdev;
516 	struct sk_buff *msg;
517 	int err;
518 
519 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
520 	if (!msg)
521 		return -ENOMEM;
522 
523 	down_read(&vdpa_dev_lock);
524 	mdev = vdpa_mgmtdev_get_from_attr(info->attrs);
525 	if (IS_ERR(mdev)) {
526 		up_read(&vdpa_dev_lock);
527 		NL_SET_ERR_MSG_MOD(info->extack, "Fail to find the specified mgmt device");
528 		err = PTR_ERR(mdev);
529 		goto out;
530 	}
531 
532 	err = vdpa_mgmtdev_fill(mdev, msg, info->snd_portid, info->snd_seq, 0);
533 	up_read(&vdpa_dev_lock);
534 	if (err)
535 		goto out;
536 	err = genlmsg_reply(msg, info);
537 	return err;
538 
539 out:
540 	nlmsg_free(msg);
541 	return err;
542 }
543 
544 static int
545 vdpa_nl_cmd_mgmtdev_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb)
546 {
547 	struct vdpa_mgmt_dev *mdev;
548 	int start = cb->args[0];
549 	int idx = 0;
550 	int err;
551 
552 	down_read(&vdpa_dev_lock);
553 	list_for_each_entry(mdev, &mdev_head, list) {
554 		if (idx < start) {
555 			idx++;
556 			continue;
557 		}
558 		err = vdpa_mgmtdev_fill(mdev, msg, NETLINK_CB(cb->skb).portid,
559 					cb->nlh->nlmsg_seq, NLM_F_MULTI);
560 		if (err)
561 			goto out;
562 		idx++;
563 	}
564 out:
565 	up_read(&vdpa_dev_lock);
566 	cb->args[0] = idx;
567 	return msg->len;
568 }
569 
570 #define VDPA_DEV_NET_ATTRS_MASK (BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR) | \
571 				 BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MTU)     | \
572 				 BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP))
573 
574 static int vdpa_nl_cmd_dev_add_set_doit(struct sk_buff *skb, struct genl_info *info)
575 {
576 	struct vdpa_dev_set_config config = {};
577 	struct nlattr **nl_attrs = info->attrs;
578 	struct vdpa_mgmt_dev *mdev;
579 	const u8 *macaddr;
580 	const char *name;
581 	int err = 0;
582 
583 	if (!info->attrs[VDPA_ATTR_DEV_NAME])
584 		return -EINVAL;
585 
586 	name = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
587 
588 	if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MACADDR]) {
589 		macaddr = nla_data(nl_attrs[VDPA_ATTR_DEV_NET_CFG_MACADDR]);
590 		memcpy(config.net.mac, macaddr, sizeof(config.net.mac));
591 		config.mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR);
592 	}
593 	if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MTU]) {
594 		config.net.mtu =
595 			nla_get_u16(nl_attrs[VDPA_ATTR_DEV_NET_CFG_MTU]);
596 		config.mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MTU);
597 	}
598 	if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MAX_VQP]) {
599 		config.net.max_vq_pairs =
600 			nla_get_u16(nl_attrs[VDPA_ATTR_DEV_NET_CFG_MAX_VQP]);
601 		if (!config.net.max_vq_pairs) {
602 			NL_SET_ERR_MSG_MOD(info->extack,
603 					   "At least one pair of VQs is required");
604 			return -EINVAL;
605 		}
606 		config.mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP);
607 	}
608 	if (nl_attrs[VDPA_ATTR_DEV_FEATURES]) {
609 		config.device_features =
610 			nla_get_u64(nl_attrs[VDPA_ATTR_DEV_FEATURES]);
611 		config.mask |= BIT_ULL(VDPA_ATTR_DEV_FEATURES);
612 	}
613 
614 	/* Skip checking capability if user didn't prefer to configure any
615 	 * device networking attributes. It is likely that user might have used
616 	 * a device specific method to configure such attributes or using device
617 	 * default attributes.
618 	 */
619 	if ((config.mask & VDPA_DEV_NET_ATTRS_MASK) &&
620 	    !netlink_capable(skb, CAP_NET_ADMIN))
621 		return -EPERM;
622 
623 	down_write(&vdpa_dev_lock);
624 	mdev = vdpa_mgmtdev_get_from_attr(info->attrs);
625 	if (IS_ERR(mdev)) {
626 		NL_SET_ERR_MSG_MOD(info->extack, "Fail to find the specified management device");
627 		err = PTR_ERR(mdev);
628 		goto err;
629 	}
630 
631 	if ((config.mask & mdev->config_attr_mask) != config.mask) {
632 		NL_SET_ERR_MSG_FMT_MOD(info->extack,
633 				       "Some provided attributes are not supported: 0x%llx",
634 				       config.mask & ~mdev->config_attr_mask);
635 		err = -EOPNOTSUPP;
636 		goto err;
637 	}
638 
639 	err = mdev->ops->dev_add(mdev, name, &config);
640 err:
641 	up_write(&vdpa_dev_lock);
642 	return err;
643 }
644 
645 static int vdpa_nl_cmd_dev_del_set_doit(struct sk_buff *skb, struct genl_info *info)
646 {
647 	struct vdpa_mgmt_dev *mdev;
648 	struct vdpa_device *vdev;
649 	struct device *dev;
650 	const char *name;
651 	int err = 0;
652 
653 	if (!info->attrs[VDPA_ATTR_DEV_NAME])
654 		return -EINVAL;
655 	name = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
656 
657 	down_write(&vdpa_dev_lock);
658 	dev = bus_find_device(&vdpa_bus, NULL, name, vdpa_name_match);
659 	if (!dev) {
660 		NL_SET_ERR_MSG_MOD(info->extack, "device not found");
661 		err = -ENODEV;
662 		goto dev_err;
663 	}
664 	vdev = container_of(dev, struct vdpa_device, dev);
665 	if (!vdev->mdev) {
666 		NL_SET_ERR_MSG_MOD(info->extack, "Only user created device can be deleted by user");
667 		err = -EINVAL;
668 		goto mdev_err;
669 	}
670 	mdev = vdev->mdev;
671 	mdev->ops->dev_del(mdev, vdev);
672 mdev_err:
673 	put_device(dev);
674 dev_err:
675 	up_write(&vdpa_dev_lock);
676 	return err;
677 }
678 
679 static int
680 vdpa_dev_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid, u32 seq,
681 	      int flags, struct netlink_ext_ack *extack)
682 {
683 	u16 max_vq_size;
684 	u16 min_vq_size = 1;
685 	u32 device_id;
686 	u32 vendor_id;
687 	void *hdr;
688 	int err;
689 
690 	hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags, VDPA_CMD_DEV_NEW);
691 	if (!hdr)
692 		return -EMSGSIZE;
693 
694 	err = vdpa_nl_mgmtdev_handle_fill(msg, vdev->mdev);
695 	if (err)
696 		goto msg_err;
697 
698 	device_id = vdev->config->get_device_id(vdev);
699 	vendor_id = vdev->config->get_vendor_id(vdev);
700 	max_vq_size = vdev->config->get_vq_num_max(vdev);
701 	if (vdev->config->get_vq_num_min)
702 		min_vq_size = vdev->config->get_vq_num_min(vdev);
703 
704 	err = -EMSGSIZE;
705 	if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev)))
706 		goto msg_err;
707 	if (nla_put_u32(msg, VDPA_ATTR_DEV_ID, device_id))
708 		goto msg_err;
709 	if (nla_put_u32(msg, VDPA_ATTR_DEV_VENDOR_ID, vendor_id))
710 		goto msg_err;
711 	if (nla_put_u32(msg, VDPA_ATTR_DEV_MAX_VQS, vdev->nvqs))
712 		goto msg_err;
713 	if (nla_put_u16(msg, VDPA_ATTR_DEV_MAX_VQ_SIZE, max_vq_size))
714 		goto msg_err;
715 	if (nla_put_u16(msg, VDPA_ATTR_DEV_MIN_VQ_SIZE, min_vq_size))
716 		goto msg_err;
717 
718 	genlmsg_end(msg, hdr);
719 	return 0;
720 
721 msg_err:
722 	genlmsg_cancel(msg, hdr);
723 	return err;
724 }
725 
726 static int vdpa_nl_cmd_dev_get_doit(struct sk_buff *skb, struct genl_info *info)
727 {
728 	struct vdpa_device *vdev;
729 	struct sk_buff *msg;
730 	const char *devname;
731 	struct device *dev;
732 	int err;
733 
734 	if (!info->attrs[VDPA_ATTR_DEV_NAME])
735 		return -EINVAL;
736 	devname = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
737 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
738 	if (!msg)
739 		return -ENOMEM;
740 
741 	down_read(&vdpa_dev_lock);
742 	dev = bus_find_device(&vdpa_bus, NULL, devname, vdpa_name_match);
743 	if (!dev) {
744 		NL_SET_ERR_MSG_MOD(info->extack, "device not found");
745 		err = -ENODEV;
746 		goto err;
747 	}
748 	vdev = container_of(dev, struct vdpa_device, dev);
749 	if (!vdev->mdev) {
750 		err = -EINVAL;
751 		goto mdev_err;
752 	}
753 	err = vdpa_dev_fill(vdev, msg, info->snd_portid, info->snd_seq, 0, info->extack);
754 	if (err)
755 		goto mdev_err;
756 
757 	err = genlmsg_reply(msg, info);
758 	put_device(dev);
759 	up_read(&vdpa_dev_lock);
760 	return err;
761 
762 mdev_err:
763 	put_device(dev);
764 err:
765 	up_read(&vdpa_dev_lock);
766 	nlmsg_free(msg);
767 	return err;
768 }
769 
770 struct vdpa_dev_dump_info {
771 	struct sk_buff *msg;
772 	struct netlink_callback *cb;
773 	int start_idx;
774 	int idx;
775 };
776 
777 static int vdpa_dev_dump(struct device *dev, void *data)
778 {
779 	struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev);
780 	struct vdpa_dev_dump_info *info = data;
781 	int err;
782 
783 	if (!vdev->mdev)
784 		return 0;
785 	if (info->idx < info->start_idx) {
786 		info->idx++;
787 		return 0;
788 	}
789 	err = vdpa_dev_fill(vdev, info->msg, NETLINK_CB(info->cb->skb).portid,
790 			    info->cb->nlh->nlmsg_seq, NLM_F_MULTI, info->cb->extack);
791 	if (err)
792 		return err;
793 
794 	info->idx++;
795 	return 0;
796 }
797 
798 static int vdpa_nl_cmd_dev_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb)
799 {
800 	struct vdpa_dev_dump_info info;
801 
802 	info.msg = msg;
803 	info.cb = cb;
804 	info.start_idx = cb->args[0];
805 	info.idx = 0;
806 
807 	down_read(&vdpa_dev_lock);
808 	bus_for_each_dev(&vdpa_bus, NULL, &info, vdpa_dev_dump);
809 	up_read(&vdpa_dev_lock);
810 	cb->args[0] = info.idx;
811 	return msg->len;
812 }
813 
814 static int vdpa_dev_net_mq_config_fill(struct sk_buff *msg, u64 features,
815 				       const struct virtio_net_config *config)
816 {
817 	u16 val_u16;
818 
819 	if ((features & BIT_ULL(VIRTIO_NET_F_MQ)) == 0 &&
820 	    (features & BIT_ULL(VIRTIO_NET_F_RSS)) == 0)
821 		return 0;
822 
823 	val_u16 = __virtio16_to_cpu(true, config->max_virtqueue_pairs);
824 
825 	return nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MAX_VQP, val_u16);
826 }
827 
828 static int vdpa_dev_net_mtu_config_fill(struct sk_buff *msg, u64 features,
829 					const struct virtio_net_config *config)
830 {
831 	u16 val_u16;
832 
833 	if ((features & BIT_ULL(VIRTIO_NET_F_MTU)) == 0)
834 		return 0;
835 
836 	val_u16 = __virtio16_to_cpu(true, config->mtu);
837 
838 	return nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MTU, val_u16);
839 }
840 
841 static int vdpa_dev_net_mac_config_fill(struct sk_buff *msg, u64 features,
842 					const struct virtio_net_config *config)
843 {
844 	if ((features & BIT_ULL(VIRTIO_NET_F_MAC)) == 0)
845 		return 0;
846 
847 	return  nla_put(msg, VDPA_ATTR_DEV_NET_CFG_MACADDR,
848 			sizeof(config->mac), config->mac);
849 }
850 
851 static int vdpa_dev_net_config_fill(struct vdpa_device *vdev, struct sk_buff *msg)
852 {
853 	struct virtio_net_config config = {};
854 	u64 features_device;
855 	u16 val_u16;
856 
857 	vdev->config->get_config(vdev, 0, &config, sizeof(config));
858 
859 	val_u16 = __virtio16_to_cpu(true, config.status);
860 	if (nla_put_u16(msg, VDPA_ATTR_DEV_NET_STATUS, val_u16))
861 		return -EMSGSIZE;
862 
863 	features_device = vdev->config->get_device_features(vdev);
864 
865 	if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_FEATURES, features_device,
866 			      VDPA_ATTR_PAD))
867 		return -EMSGSIZE;
868 
869 	if (vdpa_dev_net_mtu_config_fill(msg, features_device, &config))
870 		return -EMSGSIZE;
871 
872 	if (vdpa_dev_net_mac_config_fill(msg, features_device, &config))
873 		return -EMSGSIZE;
874 
875 	return vdpa_dev_net_mq_config_fill(msg, features_device, &config);
876 }
877 
878 static int
879 vdpa_dev_config_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid, u32 seq,
880 		     int flags, struct netlink_ext_ack *extack)
881 {
882 	u64 features_driver;
883 	u8 status = 0;
884 	u32 device_id;
885 	void *hdr;
886 	int err;
887 
888 	down_read(&vdev->cf_lock);
889 	hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags,
890 			  VDPA_CMD_DEV_CONFIG_GET);
891 	if (!hdr) {
892 		err = -EMSGSIZE;
893 		goto out;
894 	}
895 
896 	if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev))) {
897 		err = -EMSGSIZE;
898 		goto msg_err;
899 	}
900 
901 	device_id = vdev->config->get_device_id(vdev);
902 	if (nla_put_u32(msg, VDPA_ATTR_DEV_ID, device_id)) {
903 		err = -EMSGSIZE;
904 		goto msg_err;
905 	}
906 
907 	/* only read driver features after the feature negotiation is done */
908 	status = vdev->config->get_status(vdev);
909 	if (status & VIRTIO_CONFIG_S_FEATURES_OK) {
910 		features_driver = vdev->config->get_driver_features(vdev);
911 		if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_NEGOTIATED_FEATURES, features_driver,
912 				      VDPA_ATTR_PAD)) {
913 			err = -EMSGSIZE;
914 			goto msg_err;
915 		}
916 	}
917 
918 	switch (device_id) {
919 	case VIRTIO_ID_NET:
920 		err = vdpa_dev_net_config_fill(vdev, msg);
921 		break;
922 	default:
923 		err = -EOPNOTSUPP;
924 		break;
925 	}
926 	if (err)
927 		goto msg_err;
928 
929 	up_read(&vdev->cf_lock);
930 	genlmsg_end(msg, hdr);
931 	return 0;
932 
933 msg_err:
934 	genlmsg_cancel(msg, hdr);
935 out:
936 	up_read(&vdev->cf_lock);
937 	return err;
938 }
939 
940 static int vdpa_fill_stats_rec(struct vdpa_device *vdev, struct sk_buff *msg,
941 			       struct genl_info *info, u32 index)
942 {
943 	struct virtio_net_config config = {};
944 	u64 features;
945 	u8 status;
946 	int err;
947 
948 	status = vdev->config->get_status(vdev);
949 	if (!(status & VIRTIO_CONFIG_S_FEATURES_OK)) {
950 		NL_SET_ERR_MSG_MOD(info->extack, "feature negotiation not complete");
951 		return -EAGAIN;
952 	}
953 	vdpa_get_config_unlocked(vdev, 0, &config, sizeof(config));
954 
955 	features = vdev->config->get_driver_features(vdev);
956 	if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_NEGOTIATED_FEATURES,
957 			      features, VDPA_ATTR_PAD))
958 		return -EMSGSIZE;
959 
960 	err = vdpa_dev_net_mq_config_fill(msg, features, &config);
961 	if (err)
962 		return err;
963 
964 	if (nla_put_u32(msg, VDPA_ATTR_DEV_QUEUE_INDEX, index))
965 		return -EMSGSIZE;
966 
967 	err = vdev->config->get_vendor_vq_stats(vdev, index, msg, info->extack);
968 	if (err)
969 		return err;
970 
971 	return 0;
972 }
973 
974 static int vendor_stats_fill(struct vdpa_device *vdev, struct sk_buff *msg,
975 			     struct genl_info *info, u32 index)
976 {
977 	int err;
978 
979 	down_read(&vdev->cf_lock);
980 	if (!vdev->config->get_vendor_vq_stats) {
981 		err = -EOPNOTSUPP;
982 		goto out;
983 	}
984 
985 	err = vdpa_fill_stats_rec(vdev, msg, info, index);
986 out:
987 	up_read(&vdev->cf_lock);
988 	return err;
989 }
990 
991 static int vdpa_dev_vendor_stats_fill(struct vdpa_device *vdev,
992 				      struct sk_buff *msg,
993 				      struct genl_info *info, u32 index)
994 {
995 	u32 device_id;
996 	void *hdr;
997 	int err;
998 	u32 portid = info->snd_portid;
999 	u32 seq = info->snd_seq;
1000 	u32 flags = 0;
1001 
1002 	hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags,
1003 			  VDPA_CMD_DEV_VSTATS_GET);
1004 	if (!hdr)
1005 		return -EMSGSIZE;
1006 
1007 	if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev))) {
1008 		err = -EMSGSIZE;
1009 		goto undo_msg;
1010 	}
1011 
1012 	device_id = vdev->config->get_device_id(vdev);
1013 	if (nla_put_u32(msg, VDPA_ATTR_DEV_ID, device_id)) {
1014 		err = -EMSGSIZE;
1015 		goto undo_msg;
1016 	}
1017 
1018 	switch (device_id) {
1019 	case VIRTIO_ID_NET:
1020 		if (index > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX) {
1021 			NL_SET_ERR_MSG_MOD(info->extack, "queue index exceeds max value");
1022 			err = -ERANGE;
1023 			break;
1024 		}
1025 
1026 		err = vendor_stats_fill(vdev, msg, info, index);
1027 		break;
1028 	default:
1029 		err = -EOPNOTSUPP;
1030 		break;
1031 	}
1032 	genlmsg_end(msg, hdr);
1033 
1034 	return err;
1035 
1036 undo_msg:
1037 	genlmsg_cancel(msg, hdr);
1038 	return err;
1039 }
1040 
1041 static int vdpa_nl_cmd_dev_config_get_doit(struct sk_buff *skb, struct genl_info *info)
1042 {
1043 	struct vdpa_device *vdev;
1044 	struct sk_buff *msg;
1045 	const char *devname;
1046 	struct device *dev;
1047 	int err;
1048 
1049 	if (!info->attrs[VDPA_ATTR_DEV_NAME])
1050 		return -EINVAL;
1051 	devname = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
1052 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1053 	if (!msg)
1054 		return -ENOMEM;
1055 
1056 	down_read(&vdpa_dev_lock);
1057 	dev = bus_find_device(&vdpa_bus, NULL, devname, vdpa_name_match);
1058 	if (!dev) {
1059 		NL_SET_ERR_MSG_MOD(info->extack, "device not found");
1060 		err = -ENODEV;
1061 		goto dev_err;
1062 	}
1063 	vdev = container_of(dev, struct vdpa_device, dev);
1064 	if (!vdev->mdev) {
1065 		NL_SET_ERR_MSG_MOD(info->extack, "unmanaged vdpa device");
1066 		err = -EINVAL;
1067 		goto mdev_err;
1068 	}
1069 	err = vdpa_dev_config_fill(vdev, msg, info->snd_portid, info->snd_seq,
1070 				   0, info->extack);
1071 	if (!err)
1072 		err = genlmsg_reply(msg, info);
1073 
1074 mdev_err:
1075 	put_device(dev);
1076 dev_err:
1077 	up_read(&vdpa_dev_lock);
1078 	if (err)
1079 		nlmsg_free(msg);
1080 	return err;
1081 }
1082 
1083 static int vdpa_dev_config_dump(struct device *dev, void *data)
1084 {
1085 	struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev);
1086 	struct vdpa_dev_dump_info *info = data;
1087 	int err;
1088 
1089 	if (!vdev->mdev)
1090 		return 0;
1091 	if (info->idx < info->start_idx) {
1092 		info->idx++;
1093 		return 0;
1094 	}
1095 	err = vdpa_dev_config_fill(vdev, info->msg, NETLINK_CB(info->cb->skb).portid,
1096 				   info->cb->nlh->nlmsg_seq, NLM_F_MULTI,
1097 				   info->cb->extack);
1098 	if (err)
1099 		return err;
1100 
1101 	info->idx++;
1102 	return 0;
1103 }
1104 
1105 static int
1106 vdpa_nl_cmd_dev_config_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb)
1107 {
1108 	struct vdpa_dev_dump_info info;
1109 
1110 	info.msg = msg;
1111 	info.cb = cb;
1112 	info.start_idx = cb->args[0];
1113 	info.idx = 0;
1114 
1115 	down_read(&vdpa_dev_lock);
1116 	bus_for_each_dev(&vdpa_bus, NULL, &info, vdpa_dev_config_dump);
1117 	up_read(&vdpa_dev_lock);
1118 	cb->args[0] = info.idx;
1119 	return msg->len;
1120 }
1121 
1122 static int vdpa_nl_cmd_dev_stats_get_doit(struct sk_buff *skb,
1123 					  struct genl_info *info)
1124 {
1125 	struct vdpa_device *vdev;
1126 	struct sk_buff *msg;
1127 	const char *devname;
1128 	struct device *dev;
1129 	u32 index;
1130 	int err;
1131 
1132 	if (!info->attrs[VDPA_ATTR_DEV_NAME])
1133 		return -EINVAL;
1134 
1135 	if (!info->attrs[VDPA_ATTR_DEV_QUEUE_INDEX])
1136 		return -EINVAL;
1137 
1138 	devname = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
1139 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1140 	if (!msg)
1141 		return -ENOMEM;
1142 
1143 	index = nla_get_u32(info->attrs[VDPA_ATTR_DEV_QUEUE_INDEX]);
1144 	down_read(&vdpa_dev_lock);
1145 	dev = bus_find_device(&vdpa_bus, NULL, devname, vdpa_name_match);
1146 	if (!dev) {
1147 		NL_SET_ERR_MSG_MOD(info->extack, "device not found");
1148 		err = -ENODEV;
1149 		goto dev_err;
1150 	}
1151 	vdev = container_of(dev, struct vdpa_device, dev);
1152 	if (!vdev->mdev) {
1153 		NL_SET_ERR_MSG_MOD(info->extack, "unmanaged vdpa device");
1154 		err = -EINVAL;
1155 		goto mdev_err;
1156 	}
1157 	err = vdpa_dev_vendor_stats_fill(vdev, msg, info, index);
1158 	if (err)
1159 		goto mdev_err;
1160 
1161 	err = genlmsg_reply(msg, info);
1162 
1163 	put_device(dev);
1164 	up_read(&vdpa_dev_lock);
1165 
1166 	return err;
1167 
1168 mdev_err:
1169 	put_device(dev);
1170 dev_err:
1171 	nlmsg_free(msg);
1172 	up_read(&vdpa_dev_lock);
1173 	return err;
1174 }
1175 
1176 static const struct nla_policy vdpa_nl_policy[VDPA_ATTR_MAX + 1] = {
1177 	[VDPA_ATTR_MGMTDEV_BUS_NAME] = { .type = NLA_NUL_STRING },
1178 	[VDPA_ATTR_MGMTDEV_DEV_NAME] = { .type = NLA_STRING },
1179 	[VDPA_ATTR_DEV_NAME] = { .type = NLA_STRING },
1180 	[VDPA_ATTR_DEV_NET_CFG_MACADDR] = NLA_POLICY_ETH_ADDR,
1181 	/* virtio spec 1.1 section 5.1.4.1 for valid MTU range */
1182 	[VDPA_ATTR_DEV_NET_CFG_MTU] = NLA_POLICY_MIN(NLA_U16, 68),
1183 };
1184 
1185 static const struct genl_ops vdpa_nl_ops[] = {
1186 	{
1187 		.cmd = VDPA_CMD_MGMTDEV_GET,
1188 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1189 		.doit = vdpa_nl_cmd_mgmtdev_get_doit,
1190 		.dumpit = vdpa_nl_cmd_mgmtdev_get_dumpit,
1191 	},
1192 	{
1193 		.cmd = VDPA_CMD_DEV_NEW,
1194 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1195 		.doit = vdpa_nl_cmd_dev_add_set_doit,
1196 		.flags = GENL_ADMIN_PERM,
1197 	},
1198 	{
1199 		.cmd = VDPA_CMD_DEV_DEL,
1200 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1201 		.doit = vdpa_nl_cmd_dev_del_set_doit,
1202 		.flags = GENL_ADMIN_PERM,
1203 	},
1204 	{
1205 		.cmd = VDPA_CMD_DEV_GET,
1206 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1207 		.doit = vdpa_nl_cmd_dev_get_doit,
1208 		.dumpit = vdpa_nl_cmd_dev_get_dumpit,
1209 	},
1210 	{
1211 		.cmd = VDPA_CMD_DEV_CONFIG_GET,
1212 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1213 		.doit = vdpa_nl_cmd_dev_config_get_doit,
1214 		.dumpit = vdpa_nl_cmd_dev_config_get_dumpit,
1215 	},
1216 	{
1217 		.cmd = VDPA_CMD_DEV_VSTATS_GET,
1218 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1219 		.doit = vdpa_nl_cmd_dev_stats_get_doit,
1220 		.flags = GENL_ADMIN_PERM,
1221 	},
1222 };
1223 
1224 static struct genl_family vdpa_nl_family __ro_after_init = {
1225 	.name = VDPA_GENL_NAME,
1226 	.version = VDPA_GENL_VERSION,
1227 	.maxattr = VDPA_ATTR_MAX,
1228 	.policy = vdpa_nl_policy,
1229 	.netnsok = false,
1230 	.module = THIS_MODULE,
1231 	.ops = vdpa_nl_ops,
1232 	.n_ops = ARRAY_SIZE(vdpa_nl_ops),
1233 	.resv_start_op = VDPA_CMD_DEV_VSTATS_GET + 1,
1234 };
1235 
1236 static int vdpa_init(void)
1237 {
1238 	int err;
1239 
1240 	err = bus_register(&vdpa_bus);
1241 	if (err)
1242 		return err;
1243 	err = genl_register_family(&vdpa_nl_family);
1244 	if (err)
1245 		goto err;
1246 	return 0;
1247 
1248 err:
1249 	bus_unregister(&vdpa_bus);
1250 	return err;
1251 }
1252 
1253 static void __exit vdpa_exit(void)
1254 {
1255 	genl_unregister_family(&vdpa_nl_family);
1256 	bus_unregister(&vdpa_bus);
1257 	ida_destroy(&vdpa_index_ida);
1258 }
1259 core_initcall(vdpa_init);
1260 module_exit(vdpa_exit);
1261 
1262 MODULE_AUTHOR("Jason Wang <jasowang@redhat.com>");
1263 MODULE_LICENSE("GPL v2");
1264