xref: /openbmc/linux/drivers/vdpa/vdpa.c (revision 8938c48f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * vDPA bus.
4  *
5  * Copyright (c) 2020, Red Hat. All rights reserved.
6  *     Author: Jason Wang <jasowang@redhat.com>
7  *
8  */
9 
10 #include <linux/module.h>
11 #include <linux/idr.h>
12 #include <linux/slab.h>
13 #include <linux/vdpa.h>
14 #include <uapi/linux/vdpa.h>
15 #include <net/genetlink.h>
16 #include <linux/mod_devicetable.h>
17 
18 static LIST_HEAD(mdev_head);
19 /* A global mutex that protects vdpa management device and device level operations. */
20 static DEFINE_MUTEX(vdpa_dev_mutex);
21 static DEFINE_IDA(vdpa_index_ida);
22 
23 static struct genl_family vdpa_nl_family;
24 
25 static int vdpa_dev_probe(struct device *d)
26 {
27 	struct vdpa_device *vdev = dev_to_vdpa(d);
28 	struct vdpa_driver *drv = drv_to_vdpa(vdev->dev.driver);
29 	int ret = 0;
30 
31 	if (drv && drv->probe)
32 		ret = drv->probe(vdev);
33 
34 	return ret;
35 }
36 
37 static int vdpa_dev_remove(struct device *d)
38 {
39 	struct vdpa_device *vdev = dev_to_vdpa(d);
40 	struct vdpa_driver *drv = drv_to_vdpa(vdev->dev.driver);
41 
42 	if (drv && drv->remove)
43 		drv->remove(vdev);
44 
45 	return 0;
46 }
47 
48 static struct bus_type vdpa_bus = {
49 	.name  = "vdpa",
50 	.probe = vdpa_dev_probe,
51 	.remove = vdpa_dev_remove,
52 };
53 
54 static void vdpa_release_dev(struct device *d)
55 {
56 	struct vdpa_device *vdev = dev_to_vdpa(d);
57 	const struct vdpa_config_ops *ops = vdev->config;
58 
59 	if (ops->free)
60 		ops->free(vdev);
61 
62 	ida_simple_remove(&vdpa_index_ida, vdev->index);
63 	kfree(vdev);
64 }
65 
66 /**
67  * __vdpa_alloc_device - allocate and initilaize a vDPA device
68  * This allows driver to some prepartion after device is
69  * initialized but before registered.
70  * @parent: the parent device
71  * @config: the bus operations that is supported by this device
72  * @nvqs: number of virtqueues supported by this device
73  * @size: size of the parent structure that contains private data
74  * @name: name of the vdpa device; optional.
75  *
76  * Driver should use vdpa_alloc_device() wrapper macro instead of
77  * using this directly.
78  *
79  * Returns an error when parent/config/dma_dev is not set or fail to get
80  * ida.
81  */
82 struct vdpa_device *__vdpa_alloc_device(struct device *parent,
83 					const struct vdpa_config_ops *config,
84 					int nvqs, size_t size, const char *name)
85 {
86 	struct vdpa_device *vdev;
87 	int err = -EINVAL;
88 
89 	if (!config)
90 		goto err;
91 
92 	if (!!config->dma_map != !!config->dma_unmap)
93 		goto err;
94 
95 	err = -ENOMEM;
96 	vdev = kzalloc(size, GFP_KERNEL);
97 	if (!vdev)
98 		goto err;
99 
100 	err = ida_alloc(&vdpa_index_ida, GFP_KERNEL);
101 	if (err < 0)
102 		goto err_ida;
103 
104 	vdev->dev.bus = &vdpa_bus;
105 	vdev->dev.parent = parent;
106 	vdev->dev.release = vdpa_release_dev;
107 	vdev->index = err;
108 	vdev->config = config;
109 	vdev->features_valid = false;
110 	vdev->nvqs = nvqs;
111 
112 	if (name)
113 		err = dev_set_name(&vdev->dev, "%s", name);
114 	else
115 		err = dev_set_name(&vdev->dev, "vdpa%u", vdev->index);
116 	if (err)
117 		goto err_name;
118 
119 	device_initialize(&vdev->dev);
120 
121 	return vdev;
122 
123 err_name:
124 	ida_simple_remove(&vdpa_index_ida, vdev->index);
125 err_ida:
126 	kfree(vdev);
127 err:
128 	return ERR_PTR(err);
129 }
130 EXPORT_SYMBOL_GPL(__vdpa_alloc_device);
131 
132 static int vdpa_name_match(struct device *dev, const void *data)
133 {
134 	struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev);
135 
136 	return (strcmp(dev_name(&vdev->dev), data) == 0);
137 }
138 
139 static int __vdpa_register_device(struct vdpa_device *vdev)
140 {
141 	struct device *dev;
142 
143 	lockdep_assert_held(&vdpa_dev_mutex);
144 	dev = bus_find_device(&vdpa_bus, NULL, dev_name(&vdev->dev), vdpa_name_match);
145 	if (dev) {
146 		put_device(dev);
147 		return -EEXIST;
148 	}
149 	return device_add(&vdev->dev);
150 }
151 
152 /**
153  * _vdpa_register_device - register a vDPA device with vdpa lock held
154  * Caller must have a succeed call of vdpa_alloc_device() before.
155  * Caller must invoke this routine in the management device dev_add()
156  * callback after setting up valid mgmtdev for this vdpa device.
157  * @vdev: the vdpa device to be registered to vDPA bus
158  *
159  * Returns an error when fail to add device to vDPA bus
160  */
161 int _vdpa_register_device(struct vdpa_device *vdev)
162 {
163 	if (!vdev->mdev)
164 		return -EINVAL;
165 
166 	return __vdpa_register_device(vdev);
167 }
168 EXPORT_SYMBOL_GPL(_vdpa_register_device);
169 
170 /**
171  * vdpa_register_device - register a vDPA device
172  * Callers must have a succeed call of vdpa_alloc_device() before.
173  * @vdev: the vdpa device to be registered to vDPA bus
174  *
175  * Returns an error when fail to add to vDPA bus
176  */
177 int vdpa_register_device(struct vdpa_device *vdev)
178 {
179 	int err;
180 
181 	mutex_lock(&vdpa_dev_mutex);
182 	err = __vdpa_register_device(vdev);
183 	mutex_unlock(&vdpa_dev_mutex);
184 	return err;
185 }
186 EXPORT_SYMBOL_GPL(vdpa_register_device);
187 
188 /**
189  * _vdpa_unregister_device - unregister a vDPA device
190  * Caller must invoke this routine as part of management device dev_del()
191  * callback.
192  * @vdev: the vdpa device to be unregisted from vDPA bus
193  */
194 void _vdpa_unregister_device(struct vdpa_device *vdev)
195 {
196 	lockdep_assert_held(&vdpa_dev_mutex);
197 	WARN_ON(!vdev->mdev);
198 	device_unregister(&vdev->dev);
199 }
200 EXPORT_SYMBOL_GPL(_vdpa_unregister_device);
201 
202 /**
203  * vdpa_unregister_device - unregister a vDPA device
204  * @vdev: the vdpa device to be unregisted from vDPA bus
205  */
206 void vdpa_unregister_device(struct vdpa_device *vdev)
207 {
208 	mutex_lock(&vdpa_dev_mutex);
209 	device_unregister(&vdev->dev);
210 	mutex_unlock(&vdpa_dev_mutex);
211 }
212 EXPORT_SYMBOL_GPL(vdpa_unregister_device);
213 
214 /**
215  * __vdpa_register_driver - register a vDPA device driver
216  * @drv: the vdpa device driver to be registered
217  * @owner: module owner of the driver
218  *
219  * Returns an err when fail to do the registration
220  */
221 int __vdpa_register_driver(struct vdpa_driver *drv, struct module *owner)
222 {
223 	drv->driver.bus = &vdpa_bus;
224 	drv->driver.owner = owner;
225 
226 	return driver_register(&drv->driver);
227 }
228 EXPORT_SYMBOL_GPL(__vdpa_register_driver);
229 
230 /**
231  * vdpa_unregister_driver - unregister a vDPA device driver
232  * @drv: the vdpa device driver to be unregistered
233  */
234 void vdpa_unregister_driver(struct vdpa_driver *drv)
235 {
236 	driver_unregister(&drv->driver);
237 }
238 EXPORT_SYMBOL_GPL(vdpa_unregister_driver);
239 
240 /**
241  * vdpa_mgmtdev_register - register a vdpa management device
242  *
243  * @mdev: Pointer to vdpa management device
244  * vdpa_mgmtdev_register() register a vdpa management device which supports
245  * vdpa device management.
246  */
247 int vdpa_mgmtdev_register(struct vdpa_mgmt_dev *mdev)
248 {
249 	if (!mdev->device || !mdev->ops || !mdev->ops->dev_add || !mdev->ops->dev_del)
250 		return -EINVAL;
251 
252 	INIT_LIST_HEAD(&mdev->list);
253 	mutex_lock(&vdpa_dev_mutex);
254 	list_add_tail(&mdev->list, &mdev_head);
255 	mutex_unlock(&vdpa_dev_mutex);
256 	return 0;
257 }
258 EXPORT_SYMBOL_GPL(vdpa_mgmtdev_register);
259 
260 static int vdpa_match_remove(struct device *dev, void *data)
261 {
262 	struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev);
263 	struct vdpa_mgmt_dev *mdev = vdev->mdev;
264 
265 	if (mdev == data)
266 		mdev->ops->dev_del(mdev, vdev);
267 	return 0;
268 }
269 
270 void vdpa_mgmtdev_unregister(struct vdpa_mgmt_dev *mdev)
271 {
272 	mutex_lock(&vdpa_dev_mutex);
273 
274 	list_del(&mdev->list);
275 
276 	/* Filter out all the entries belong to this management device and delete it. */
277 	bus_for_each_dev(&vdpa_bus, NULL, mdev, vdpa_match_remove);
278 
279 	mutex_unlock(&vdpa_dev_mutex);
280 }
281 EXPORT_SYMBOL_GPL(vdpa_mgmtdev_unregister);
282 
283 static bool mgmtdev_handle_match(const struct vdpa_mgmt_dev *mdev,
284 				 const char *busname, const char *devname)
285 {
286 	/* Bus name is optional for simulated management device, so ignore the
287 	 * device with bus if bus attribute is provided.
288 	 */
289 	if ((busname && !mdev->device->bus) || (!busname && mdev->device->bus))
290 		return false;
291 
292 	if (!busname && strcmp(dev_name(mdev->device), devname) == 0)
293 		return true;
294 
295 	if (busname && (strcmp(mdev->device->bus->name, busname) == 0) &&
296 	    (strcmp(dev_name(mdev->device), devname) == 0))
297 		return true;
298 
299 	return false;
300 }
301 
302 static struct vdpa_mgmt_dev *vdpa_mgmtdev_get_from_attr(struct nlattr **attrs)
303 {
304 	struct vdpa_mgmt_dev *mdev;
305 	const char *busname = NULL;
306 	const char *devname;
307 
308 	if (!attrs[VDPA_ATTR_MGMTDEV_DEV_NAME])
309 		return ERR_PTR(-EINVAL);
310 	devname = nla_data(attrs[VDPA_ATTR_MGMTDEV_DEV_NAME]);
311 	if (attrs[VDPA_ATTR_MGMTDEV_BUS_NAME])
312 		busname = nla_data(attrs[VDPA_ATTR_MGMTDEV_BUS_NAME]);
313 
314 	list_for_each_entry(mdev, &mdev_head, list) {
315 		if (mgmtdev_handle_match(mdev, busname, devname))
316 			return mdev;
317 	}
318 	return ERR_PTR(-ENODEV);
319 }
320 
321 static int vdpa_nl_mgmtdev_handle_fill(struct sk_buff *msg, const struct vdpa_mgmt_dev *mdev)
322 {
323 	if (mdev->device->bus &&
324 	    nla_put_string(msg, VDPA_ATTR_MGMTDEV_BUS_NAME, mdev->device->bus->name))
325 		return -EMSGSIZE;
326 	if (nla_put_string(msg, VDPA_ATTR_MGMTDEV_DEV_NAME, dev_name(mdev->device)))
327 		return -EMSGSIZE;
328 	return 0;
329 }
330 
331 static int vdpa_mgmtdev_fill(const struct vdpa_mgmt_dev *mdev, struct sk_buff *msg,
332 			     u32 portid, u32 seq, int flags)
333 {
334 	u64 supported_classes = 0;
335 	void *hdr;
336 	int i = 0;
337 	int err;
338 
339 	hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags, VDPA_CMD_MGMTDEV_NEW);
340 	if (!hdr)
341 		return -EMSGSIZE;
342 	err = vdpa_nl_mgmtdev_handle_fill(msg, mdev);
343 	if (err)
344 		goto msg_err;
345 
346 	while (mdev->id_table[i].device) {
347 		supported_classes |= BIT(mdev->id_table[i].device);
348 		i++;
349 	}
350 
351 	if (nla_put_u64_64bit(msg, VDPA_ATTR_MGMTDEV_SUPPORTED_CLASSES,
352 			      supported_classes, VDPA_ATTR_UNSPEC)) {
353 		err = -EMSGSIZE;
354 		goto msg_err;
355 	}
356 
357 	genlmsg_end(msg, hdr);
358 	return 0;
359 
360 msg_err:
361 	genlmsg_cancel(msg, hdr);
362 	return err;
363 }
364 
365 static int vdpa_nl_cmd_mgmtdev_get_doit(struct sk_buff *skb, struct genl_info *info)
366 {
367 	struct vdpa_mgmt_dev *mdev;
368 	struct sk_buff *msg;
369 	int err;
370 
371 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
372 	if (!msg)
373 		return -ENOMEM;
374 
375 	mutex_lock(&vdpa_dev_mutex);
376 	mdev = vdpa_mgmtdev_get_from_attr(info->attrs);
377 	if (IS_ERR(mdev)) {
378 		mutex_unlock(&vdpa_dev_mutex);
379 		NL_SET_ERR_MSG_MOD(info->extack, "Fail to find the specified mgmt device");
380 		err = PTR_ERR(mdev);
381 		goto out;
382 	}
383 
384 	err = vdpa_mgmtdev_fill(mdev, msg, info->snd_portid, info->snd_seq, 0);
385 	mutex_unlock(&vdpa_dev_mutex);
386 	if (err)
387 		goto out;
388 	err = genlmsg_reply(msg, info);
389 	return err;
390 
391 out:
392 	nlmsg_free(msg);
393 	return err;
394 }
395 
396 static int
397 vdpa_nl_cmd_mgmtdev_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb)
398 {
399 	struct vdpa_mgmt_dev *mdev;
400 	int start = cb->args[0];
401 	int idx = 0;
402 	int err;
403 
404 	mutex_lock(&vdpa_dev_mutex);
405 	list_for_each_entry(mdev, &mdev_head, list) {
406 		if (idx < start) {
407 			idx++;
408 			continue;
409 		}
410 		err = vdpa_mgmtdev_fill(mdev, msg, NETLINK_CB(cb->skb).portid,
411 					cb->nlh->nlmsg_seq, NLM_F_MULTI);
412 		if (err)
413 			goto out;
414 		idx++;
415 	}
416 out:
417 	mutex_unlock(&vdpa_dev_mutex);
418 	cb->args[0] = idx;
419 	return msg->len;
420 }
421 
422 static int vdpa_nl_cmd_dev_add_set_doit(struct sk_buff *skb, struct genl_info *info)
423 {
424 	struct vdpa_mgmt_dev *mdev;
425 	const char *name;
426 	int err = 0;
427 
428 	if (!info->attrs[VDPA_ATTR_DEV_NAME])
429 		return -EINVAL;
430 
431 	name = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
432 
433 	mutex_lock(&vdpa_dev_mutex);
434 	mdev = vdpa_mgmtdev_get_from_attr(info->attrs);
435 	if (IS_ERR(mdev)) {
436 		NL_SET_ERR_MSG_MOD(info->extack, "Fail to find the specified management device");
437 		err = PTR_ERR(mdev);
438 		goto err;
439 	}
440 
441 	err = mdev->ops->dev_add(mdev, name);
442 err:
443 	mutex_unlock(&vdpa_dev_mutex);
444 	return err;
445 }
446 
447 static int vdpa_nl_cmd_dev_del_set_doit(struct sk_buff *skb, struct genl_info *info)
448 {
449 	struct vdpa_mgmt_dev *mdev;
450 	struct vdpa_device *vdev;
451 	struct device *dev;
452 	const char *name;
453 	int err = 0;
454 
455 	if (!info->attrs[VDPA_ATTR_DEV_NAME])
456 		return -EINVAL;
457 	name = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
458 
459 	mutex_lock(&vdpa_dev_mutex);
460 	dev = bus_find_device(&vdpa_bus, NULL, name, vdpa_name_match);
461 	if (!dev) {
462 		NL_SET_ERR_MSG_MOD(info->extack, "device not found");
463 		err = -ENODEV;
464 		goto dev_err;
465 	}
466 	vdev = container_of(dev, struct vdpa_device, dev);
467 	if (!vdev->mdev) {
468 		NL_SET_ERR_MSG_MOD(info->extack, "Only user created device can be deleted by user");
469 		err = -EINVAL;
470 		goto mdev_err;
471 	}
472 	mdev = vdev->mdev;
473 	mdev->ops->dev_del(mdev, vdev);
474 mdev_err:
475 	put_device(dev);
476 dev_err:
477 	mutex_unlock(&vdpa_dev_mutex);
478 	return err;
479 }
480 
481 static int
482 vdpa_dev_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid, u32 seq,
483 	      int flags, struct netlink_ext_ack *extack)
484 {
485 	u16 max_vq_size;
486 	u32 device_id;
487 	u32 vendor_id;
488 	void *hdr;
489 	int err;
490 
491 	hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags, VDPA_CMD_DEV_NEW);
492 	if (!hdr)
493 		return -EMSGSIZE;
494 
495 	err = vdpa_nl_mgmtdev_handle_fill(msg, vdev->mdev);
496 	if (err)
497 		goto msg_err;
498 
499 	device_id = vdev->config->get_device_id(vdev);
500 	vendor_id = vdev->config->get_vendor_id(vdev);
501 	max_vq_size = vdev->config->get_vq_num_max(vdev);
502 
503 	err = -EMSGSIZE;
504 	if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev)))
505 		goto msg_err;
506 	if (nla_put_u32(msg, VDPA_ATTR_DEV_ID, device_id))
507 		goto msg_err;
508 	if (nla_put_u32(msg, VDPA_ATTR_DEV_VENDOR_ID, vendor_id))
509 		goto msg_err;
510 	if (nla_put_u32(msg, VDPA_ATTR_DEV_MAX_VQS, vdev->nvqs))
511 		goto msg_err;
512 	if (nla_put_u16(msg, VDPA_ATTR_DEV_MAX_VQ_SIZE, max_vq_size))
513 		goto msg_err;
514 
515 	genlmsg_end(msg, hdr);
516 	return 0;
517 
518 msg_err:
519 	genlmsg_cancel(msg, hdr);
520 	return err;
521 }
522 
523 static int vdpa_nl_cmd_dev_get_doit(struct sk_buff *skb, struct genl_info *info)
524 {
525 	struct vdpa_device *vdev;
526 	struct sk_buff *msg;
527 	const char *devname;
528 	struct device *dev;
529 	int err;
530 
531 	if (!info->attrs[VDPA_ATTR_DEV_NAME])
532 		return -EINVAL;
533 	devname = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
534 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
535 	if (!msg)
536 		return -ENOMEM;
537 
538 	mutex_lock(&vdpa_dev_mutex);
539 	dev = bus_find_device(&vdpa_bus, NULL, devname, vdpa_name_match);
540 	if (!dev) {
541 		NL_SET_ERR_MSG_MOD(info->extack, "device not found");
542 		err = -ENODEV;
543 		goto err;
544 	}
545 	vdev = container_of(dev, struct vdpa_device, dev);
546 	if (!vdev->mdev) {
547 		err = -EINVAL;
548 		goto mdev_err;
549 	}
550 	err = vdpa_dev_fill(vdev, msg, info->snd_portid, info->snd_seq, 0, info->extack);
551 	if (!err)
552 		err = genlmsg_reply(msg, info);
553 mdev_err:
554 	put_device(dev);
555 err:
556 	mutex_unlock(&vdpa_dev_mutex);
557 	if (err)
558 		nlmsg_free(msg);
559 	return err;
560 }
561 
562 struct vdpa_dev_dump_info {
563 	struct sk_buff *msg;
564 	struct netlink_callback *cb;
565 	int start_idx;
566 	int idx;
567 };
568 
569 static int vdpa_dev_dump(struct device *dev, void *data)
570 {
571 	struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev);
572 	struct vdpa_dev_dump_info *info = data;
573 	int err;
574 
575 	if (!vdev->mdev)
576 		return 0;
577 	if (info->idx < info->start_idx) {
578 		info->idx++;
579 		return 0;
580 	}
581 	err = vdpa_dev_fill(vdev, info->msg, NETLINK_CB(info->cb->skb).portid,
582 			    info->cb->nlh->nlmsg_seq, NLM_F_MULTI, info->cb->extack);
583 	if (err)
584 		return err;
585 
586 	info->idx++;
587 	return 0;
588 }
589 
590 static int vdpa_nl_cmd_dev_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb)
591 {
592 	struct vdpa_dev_dump_info info;
593 
594 	info.msg = msg;
595 	info.cb = cb;
596 	info.start_idx = cb->args[0];
597 	info.idx = 0;
598 
599 	mutex_lock(&vdpa_dev_mutex);
600 	bus_for_each_dev(&vdpa_bus, NULL, &info, vdpa_dev_dump);
601 	mutex_unlock(&vdpa_dev_mutex);
602 	cb->args[0] = info.idx;
603 	return msg->len;
604 }
605 
606 static const struct nla_policy vdpa_nl_policy[VDPA_ATTR_MAX + 1] = {
607 	[VDPA_ATTR_MGMTDEV_BUS_NAME] = { .type = NLA_NUL_STRING },
608 	[VDPA_ATTR_MGMTDEV_DEV_NAME] = { .type = NLA_STRING },
609 	[VDPA_ATTR_DEV_NAME] = { .type = NLA_STRING },
610 };
611 
612 static const struct genl_ops vdpa_nl_ops[] = {
613 	{
614 		.cmd = VDPA_CMD_MGMTDEV_GET,
615 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
616 		.doit = vdpa_nl_cmd_mgmtdev_get_doit,
617 		.dumpit = vdpa_nl_cmd_mgmtdev_get_dumpit,
618 	},
619 	{
620 		.cmd = VDPA_CMD_DEV_NEW,
621 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
622 		.doit = vdpa_nl_cmd_dev_add_set_doit,
623 		.flags = GENL_ADMIN_PERM,
624 	},
625 	{
626 		.cmd = VDPA_CMD_DEV_DEL,
627 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
628 		.doit = vdpa_nl_cmd_dev_del_set_doit,
629 		.flags = GENL_ADMIN_PERM,
630 	},
631 	{
632 		.cmd = VDPA_CMD_DEV_GET,
633 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
634 		.doit = vdpa_nl_cmd_dev_get_doit,
635 		.dumpit = vdpa_nl_cmd_dev_get_dumpit,
636 	},
637 };
638 
639 static struct genl_family vdpa_nl_family __ro_after_init = {
640 	.name = VDPA_GENL_NAME,
641 	.version = VDPA_GENL_VERSION,
642 	.maxattr = VDPA_ATTR_MAX,
643 	.policy = vdpa_nl_policy,
644 	.netnsok = false,
645 	.module = THIS_MODULE,
646 	.ops = vdpa_nl_ops,
647 	.n_ops = ARRAY_SIZE(vdpa_nl_ops),
648 };
649 
650 static int vdpa_init(void)
651 {
652 	int err;
653 
654 	err = bus_register(&vdpa_bus);
655 	if (err)
656 		return err;
657 	err = genl_register_family(&vdpa_nl_family);
658 	if (err)
659 		goto err;
660 	return 0;
661 
662 err:
663 	bus_unregister(&vdpa_bus);
664 	return err;
665 }
666 
667 static void __exit vdpa_exit(void)
668 {
669 	genl_unregister_family(&vdpa_nl_family);
670 	bus_unregister(&vdpa_bus);
671 	ida_destroy(&vdpa_index_ida);
672 }
673 core_initcall(vdpa_init);
674 module_exit(vdpa_exit);
675 
676 MODULE_AUTHOR("Jason Wang <jasowang@redhat.com>");
677 MODULE_LICENSE("GPL v2");
678