1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * vDPA bus. 4 * 5 * Copyright (c) 2020, Red Hat. All rights reserved. 6 * Author: Jason Wang <jasowang@redhat.com> 7 * 8 */ 9 10 #include <linux/module.h> 11 #include <linux/idr.h> 12 #include <linux/slab.h> 13 #include <linux/vdpa.h> 14 #include <uapi/linux/vdpa.h> 15 #include <net/genetlink.h> 16 #include <linux/mod_devicetable.h> 17 #include <linux/virtio_ids.h> 18 19 static LIST_HEAD(mdev_head); 20 /* A global mutex that protects vdpa management device and device level operations. */ 21 static DEFINE_MUTEX(vdpa_dev_mutex); 22 static DEFINE_IDA(vdpa_index_ida); 23 24 static struct genl_family vdpa_nl_family; 25 26 static int vdpa_dev_probe(struct device *d) 27 { 28 struct vdpa_device *vdev = dev_to_vdpa(d); 29 struct vdpa_driver *drv = drv_to_vdpa(vdev->dev.driver); 30 const struct vdpa_config_ops *ops = vdev->config; 31 u32 max_num, min_num = 1; 32 int ret = 0; 33 34 max_num = ops->get_vq_num_max(vdev); 35 if (ops->get_vq_num_min) 36 min_num = ops->get_vq_num_min(vdev); 37 if (max_num < min_num) 38 return -EINVAL; 39 40 if (drv && drv->probe) 41 ret = drv->probe(vdev); 42 43 return ret; 44 } 45 46 static void vdpa_dev_remove(struct device *d) 47 { 48 struct vdpa_device *vdev = dev_to_vdpa(d); 49 struct vdpa_driver *drv = drv_to_vdpa(vdev->dev.driver); 50 51 if (drv && drv->remove) 52 drv->remove(vdev); 53 } 54 55 static struct bus_type vdpa_bus = { 56 .name = "vdpa", 57 .probe = vdpa_dev_probe, 58 .remove = vdpa_dev_remove, 59 }; 60 61 static void vdpa_release_dev(struct device *d) 62 { 63 struct vdpa_device *vdev = dev_to_vdpa(d); 64 const struct vdpa_config_ops *ops = vdev->config; 65 66 if (ops->free) 67 ops->free(vdev); 68 69 ida_simple_remove(&vdpa_index_ida, vdev->index); 70 mutex_destroy(&vdev->cf_mutex); 71 kfree(vdev); 72 } 73 74 /** 75 * __vdpa_alloc_device - allocate and initilaize a vDPA device 76 * This allows driver to some prepartion after device is 77 * initialized but before registered. 78 * @parent: the parent device 79 * @config: the bus operations that is supported by this device 80 * @size: size of the parent structure that contains private data 81 * @name: name of the vdpa device; optional. 82 * @use_va: indicate whether virtual address must be used by this device 83 * 84 * Driver should use vdpa_alloc_device() wrapper macro instead of 85 * using this directly. 86 * 87 * Return: Returns an error when parent/config/dma_dev is not set or fail to get 88 * ida. 89 */ 90 struct vdpa_device *__vdpa_alloc_device(struct device *parent, 91 const struct vdpa_config_ops *config, 92 size_t size, const char *name, 93 bool use_va) 94 { 95 struct vdpa_device *vdev; 96 int err = -EINVAL; 97 98 if (!config) 99 goto err; 100 101 if (!!config->dma_map != !!config->dma_unmap) 102 goto err; 103 104 /* It should only work for the device that use on-chip IOMMU */ 105 if (use_va && !(config->dma_map || config->set_map)) 106 goto err; 107 108 err = -ENOMEM; 109 vdev = kzalloc(size, GFP_KERNEL); 110 if (!vdev) 111 goto err; 112 113 err = ida_alloc(&vdpa_index_ida, GFP_KERNEL); 114 if (err < 0) 115 goto err_ida; 116 117 vdev->dev.bus = &vdpa_bus; 118 vdev->dev.parent = parent; 119 vdev->dev.release = vdpa_release_dev; 120 vdev->index = err; 121 vdev->config = config; 122 vdev->features_valid = false; 123 vdev->use_va = use_va; 124 125 if (name) 126 err = dev_set_name(&vdev->dev, "%s", name); 127 else 128 err = dev_set_name(&vdev->dev, "vdpa%u", vdev->index); 129 if (err) 130 goto err_name; 131 132 mutex_init(&vdev->cf_mutex); 133 device_initialize(&vdev->dev); 134 135 return vdev; 136 137 err_name: 138 ida_simple_remove(&vdpa_index_ida, vdev->index); 139 err_ida: 140 kfree(vdev); 141 err: 142 return ERR_PTR(err); 143 } 144 EXPORT_SYMBOL_GPL(__vdpa_alloc_device); 145 146 static int vdpa_name_match(struct device *dev, const void *data) 147 { 148 struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev); 149 150 return (strcmp(dev_name(&vdev->dev), data) == 0); 151 } 152 153 static int __vdpa_register_device(struct vdpa_device *vdev, int nvqs) 154 { 155 struct device *dev; 156 157 vdev->nvqs = nvqs; 158 159 lockdep_assert_held(&vdpa_dev_mutex); 160 dev = bus_find_device(&vdpa_bus, NULL, dev_name(&vdev->dev), vdpa_name_match); 161 if (dev) { 162 put_device(dev); 163 return -EEXIST; 164 } 165 return device_add(&vdev->dev); 166 } 167 168 /** 169 * _vdpa_register_device - register a vDPA device with vdpa lock held 170 * Caller must have a succeed call of vdpa_alloc_device() before. 171 * Caller must invoke this routine in the management device dev_add() 172 * callback after setting up valid mgmtdev for this vdpa device. 173 * @vdev: the vdpa device to be registered to vDPA bus 174 * @nvqs: number of virtqueues supported by this device 175 * 176 * Return: Returns an error when fail to add device to vDPA bus 177 */ 178 int _vdpa_register_device(struct vdpa_device *vdev, int nvqs) 179 { 180 if (!vdev->mdev) 181 return -EINVAL; 182 183 return __vdpa_register_device(vdev, nvqs); 184 } 185 EXPORT_SYMBOL_GPL(_vdpa_register_device); 186 187 /** 188 * vdpa_register_device - register a vDPA device 189 * Callers must have a succeed call of vdpa_alloc_device() before. 190 * @vdev: the vdpa device to be registered to vDPA bus 191 * @nvqs: number of virtqueues supported by this device 192 * 193 * Return: Returns an error when fail to add to vDPA bus 194 */ 195 int vdpa_register_device(struct vdpa_device *vdev, int nvqs) 196 { 197 int err; 198 199 mutex_lock(&vdpa_dev_mutex); 200 err = __vdpa_register_device(vdev, nvqs); 201 mutex_unlock(&vdpa_dev_mutex); 202 return err; 203 } 204 EXPORT_SYMBOL_GPL(vdpa_register_device); 205 206 /** 207 * _vdpa_unregister_device - unregister a vDPA device 208 * Caller must invoke this routine as part of management device dev_del() 209 * callback. 210 * @vdev: the vdpa device to be unregisted from vDPA bus 211 */ 212 void _vdpa_unregister_device(struct vdpa_device *vdev) 213 { 214 lockdep_assert_held(&vdpa_dev_mutex); 215 WARN_ON(!vdev->mdev); 216 device_unregister(&vdev->dev); 217 } 218 EXPORT_SYMBOL_GPL(_vdpa_unregister_device); 219 220 /** 221 * vdpa_unregister_device - unregister a vDPA device 222 * @vdev: the vdpa device to be unregisted from vDPA bus 223 */ 224 void vdpa_unregister_device(struct vdpa_device *vdev) 225 { 226 mutex_lock(&vdpa_dev_mutex); 227 device_unregister(&vdev->dev); 228 mutex_unlock(&vdpa_dev_mutex); 229 } 230 EXPORT_SYMBOL_GPL(vdpa_unregister_device); 231 232 /** 233 * __vdpa_register_driver - register a vDPA device driver 234 * @drv: the vdpa device driver to be registered 235 * @owner: module owner of the driver 236 * 237 * Return: Returns an err when fail to do the registration 238 */ 239 int __vdpa_register_driver(struct vdpa_driver *drv, struct module *owner) 240 { 241 drv->driver.bus = &vdpa_bus; 242 drv->driver.owner = owner; 243 244 return driver_register(&drv->driver); 245 } 246 EXPORT_SYMBOL_GPL(__vdpa_register_driver); 247 248 /** 249 * vdpa_unregister_driver - unregister a vDPA device driver 250 * @drv: the vdpa device driver to be unregistered 251 */ 252 void vdpa_unregister_driver(struct vdpa_driver *drv) 253 { 254 driver_unregister(&drv->driver); 255 } 256 EXPORT_SYMBOL_GPL(vdpa_unregister_driver); 257 258 /** 259 * vdpa_mgmtdev_register - register a vdpa management device 260 * 261 * @mdev: Pointer to vdpa management device 262 * vdpa_mgmtdev_register() register a vdpa management device which supports 263 * vdpa device management. 264 * Return: Returns 0 on success or failure when required callback ops are not 265 * initialized. 266 */ 267 int vdpa_mgmtdev_register(struct vdpa_mgmt_dev *mdev) 268 { 269 if (!mdev->device || !mdev->ops || !mdev->ops->dev_add || !mdev->ops->dev_del) 270 return -EINVAL; 271 272 INIT_LIST_HEAD(&mdev->list); 273 mutex_lock(&vdpa_dev_mutex); 274 list_add_tail(&mdev->list, &mdev_head); 275 mutex_unlock(&vdpa_dev_mutex); 276 return 0; 277 } 278 EXPORT_SYMBOL_GPL(vdpa_mgmtdev_register); 279 280 static int vdpa_match_remove(struct device *dev, void *data) 281 { 282 struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev); 283 struct vdpa_mgmt_dev *mdev = vdev->mdev; 284 285 if (mdev == data) 286 mdev->ops->dev_del(mdev, vdev); 287 return 0; 288 } 289 290 void vdpa_mgmtdev_unregister(struct vdpa_mgmt_dev *mdev) 291 { 292 mutex_lock(&vdpa_dev_mutex); 293 294 list_del(&mdev->list); 295 296 /* Filter out all the entries belong to this management device and delete it. */ 297 bus_for_each_dev(&vdpa_bus, NULL, mdev, vdpa_match_remove); 298 299 mutex_unlock(&vdpa_dev_mutex); 300 } 301 EXPORT_SYMBOL_GPL(vdpa_mgmtdev_unregister); 302 303 /** 304 * vdpa_get_config - Get one or more device configuration fields. 305 * @vdev: vdpa device to operate on 306 * @offset: starting byte offset of the field 307 * @buf: buffer pointer to read to 308 * @len: length of the configuration fields in bytes 309 */ 310 void vdpa_get_config(struct vdpa_device *vdev, unsigned int offset, 311 void *buf, unsigned int len) 312 { 313 const struct vdpa_config_ops *ops = vdev->config; 314 315 mutex_lock(&vdev->cf_mutex); 316 /* 317 * Config accesses aren't supposed to trigger before features are set. 318 * If it does happen we assume a legacy guest. 319 */ 320 if (!vdev->features_valid) 321 vdpa_set_features(vdev, 0); 322 ops->get_config(vdev, offset, buf, len); 323 mutex_unlock(&vdev->cf_mutex); 324 } 325 EXPORT_SYMBOL_GPL(vdpa_get_config); 326 327 /** 328 * vdpa_set_config - Set one or more device configuration fields. 329 * @vdev: vdpa device to operate on 330 * @offset: starting byte offset of the field 331 * @buf: buffer pointer to read from 332 * @length: length of the configuration fields in bytes 333 */ 334 void vdpa_set_config(struct vdpa_device *vdev, unsigned int offset, 335 const void *buf, unsigned int length) 336 { 337 mutex_lock(&vdev->cf_mutex); 338 vdev->config->set_config(vdev, offset, buf, length); 339 mutex_unlock(&vdev->cf_mutex); 340 } 341 EXPORT_SYMBOL_GPL(vdpa_set_config); 342 343 static bool mgmtdev_handle_match(const struct vdpa_mgmt_dev *mdev, 344 const char *busname, const char *devname) 345 { 346 /* Bus name is optional for simulated management device, so ignore the 347 * device with bus if bus attribute is provided. 348 */ 349 if ((busname && !mdev->device->bus) || (!busname && mdev->device->bus)) 350 return false; 351 352 if (!busname && strcmp(dev_name(mdev->device), devname) == 0) 353 return true; 354 355 if (busname && (strcmp(mdev->device->bus->name, busname) == 0) && 356 (strcmp(dev_name(mdev->device), devname) == 0)) 357 return true; 358 359 return false; 360 } 361 362 static struct vdpa_mgmt_dev *vdpa_mgmtdev_get_from_attr(struct nlattr **attrs) 363 { 364 struct vdpa_mgmt_dev *mdev; 365 const char *busname = NULL; 366 const char *devname; 367 368 if (!attrs[VDPA_ATTR_MGMTDEV_DEV_NAME]) 369 return ERR_PTR(-EINVAL); 370 devname = nla_data(attrs[VDPA_ATTR_MGMTDEV_DEV_NAME]); 371 if (attrs[VDPA_ATTR_MGMTDEV_BUS_NAME]) 372 busname = nla_data(attrs[VDPA_ATTR_MGMTDEV_BUS_NAME]); 373 374 list_for_each_entry(mdev, &mdev_head, list) { 375 if (mgmtdev_handle_match(mdev, busname, devname)) 376 return mdev; 377 } 378 return ERR_PTR(-ENODEV); 379 } 380 381 static int vdpa_nl_mgmtdev_handle_fill(struct sk_buff *msg, const struct vdpa_mgmt_dev *mdev) 382 { 383 if (mdev->device->bus && 384 nla_put_string(msg, VDPA_ATTR_MGMTDEV_BUS_NAME, mdev->device->bus->name)) 385 return -EMSGSIZE; 386 if (nla_put_string(msg, VDPA_ATTR_MGMTDEV_DEV_NAME, dev_name(mdev->device))) 387 return -EMSGSIZE; 388 return 0; 389 } 390 391 static int vdpa_mgmtdev_fill(const struct vdpa_mgmt_dev *mdev, struct sk_buff *msg, 392 u32 portid, u32 seq, int flags) 393 { 394 u64 supported_classes = 0; 395 void *hdr; 396 int i = 0; 397 int err; 398 399 hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags, VDPA_CMD_MGMTDEV_NEW); 400 if (!hdr) 401 return -EMSGSIZE; 402 err = vdpa_nl_mgmtdev_handle_fill(msg, mdev); 403 if (err) 404 goto msg_err; 405 406 while (mdev->id_table[i].device) { 407 supported_classes |= BIT(mdev->id_table[i].device); 408 i++; 409 } 410 411 if (nla_put_u64_64bit(msg, VDPA_ATTR_MGMTDEV_SUPPORTED_CLASSES, 412 supported_classes, VDPA_ATTR_UNSPEC)) { 413 err = -EMSGSIZE; 414 goto msg_err; 415 } 416 417 genlmsg_end(msg, hdr); 418 return 0; 419 420 msg_err: 421 genlmsg_cancel(msg, hdr); 422 return err; 423 } 424 425 static int vdpa_nl_cmd_mgmtdev_get_doit(struct sk_buff *skb, struct genl_info *info) 426 { 427 struct vdpa_mgmt_dev *mdev; 428 struct sk_buff *msg; 429 int err; 430 431 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 432 if (!msg) 433 return -ENOMEM; 434 435 mutex_lock(&vdpa_dev_mutex); 436 mdev = vdpa_mgmtdev_get_from_attr(info->attrs); 437 if (IS_ERR(mdev)) { 438 mutex_unlock(&vdpa_dev_mutex); 439 NL_SET_ERR_MSG_MOD(info->extack, "Fail to find the specified mgmt device"); 440 err = PTR_ERR(mdev); 441 goto out; 442 } 443 444 err = vdpa_mgmtdev_fill(mdev, msg, info->snd_portid, info->snd_seq, 0); 445 mutex_unlock(&vdpa_dev_mutex); 446 if (err) 447 goto out; 448 err = genlmsg_reply(msg, info); 449 return err; 450 451 out: 452 nlmsg_free(msg); 453 return err; 454 } 455 456 static int 457 vdpa_nl_cmd_mgmtdev_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb) 458 { 459 struct vdpa_mgmt_dev *mdev; 460 int start = cb->args[0]; 461 int idx = 0; 462 int err; 463 464 mutex_lock(&vdpa_dev_mutex); 465 list_for_each_entry(mdev, &mdev_head, list) { 466 if (idx < start) { 467 idx++; 468 continue; 469 } 470 err = vdpa_mgmtdev_fill(mdev, msg, NETLINK_CB(cb->skb).portid, 471 cb->nlh->nlmsg_seq, NLM_F_MULTI); 472 if (err) 473 goto out; 474 idx++; 475 } 476 out: 477 mutex_unlock(&vdpa_dev_mutex); 478 cb->args[0] = idx; 479 return msg->len; 480 } 481 482 #define VDPA_DEV_NET_ATTRS_MASK ((1 << VDPA_ATTR_DEV_NET_CFG_MACADDR) | \ 483 (1 << VDPA_ATTR_DEV_NET_CFG_MTU)) 484 485 static int vdpa_nl_cmd_dev_add_set_doit(struct sk_buff *skb, struct genl_info *info) 486 { 487 struct vdpa_dev_set_config config = {}; 488 struct nlattr **nl_attrs = info->attrs; 489 struct vdpa_mgmt_dev *mdev; 490 const u8 *macaddr; 491 const char *name; 492 int err = 0; 493 494 if (!info->attrs[VDPA_ATTR_DEV_NAME]) 495 return -EINVAL; 496 497 name = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]); 498 499 if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MACADDR]) { 500 macaddr = nla_data(nl_attrs[VDPA_ATTR_DEV_NET_CFG_MACADDR]); 501 memcpy(config.net.mac, macaddr, sizeof(config.net.mac)); 502 config.mask |= (1 << VDPA_ATTR_DEV_NET_CFG_MACADDR); 503 } 504 if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MTU]) { 505 config.net.mtu = 506 nla_get_u16(nl_attrs[VDPA_ATTR_DEV_NET_CFG_MTU]); 507 config.mask |= (1 << VDPA_ATTR_DEV_NET_CFG_MTU); 508 } 509 510 /* Skip checking capability if user didn't prefer to configure any 511 * device networking attributes. It is likely that user might have used 512 * a device specific method to configure such attributes or using device 513 * default attributes. 514 */ 515 if ((config.mask & VDPA_DEV_NET_ATTRS_MASK) && 516 !netlink_capable(skb, CAP_NET_ADMIN)) 517 return -EPERM; 518 519 mutex_lock(&vdpa_dev_mutex); 520 mdev = vdpa_mgmtdev_get_from_attr(info->attrs); 521 if (IS_ERR(mdev)) { 522 NL_SET_ERR_MSG_MOD(info->extack, "Fail to find the specified management device"); 523 err = PTR_ERR(mdev); 524 goto err; 525 } 526 if ((config.mask & mdev->config_attr_mask) != config.mask) { 527 NL_SET_ERR_MSG_MOD(info->extack, 528 "All provided attributes are not supported"); 529 err = -EOPNOTSUPP; 530 goto err; 531 } 532 533 err = mdev->ops->dev_add(mdev, name, &config); 534 err: 535 mutex_unlock(&vdpa_dev_mutex); 536 return err; 537 } 538 539 static int vdpa_nl_cmd_dev_del_set_doit(struct sk_buff *skb, struct genl_info *info) 540 { 541 struct vdpa_mgmt_dev *mdev; 542 struct vdpa_device *vdev; 543 struct device *dev; 544 const char *name; 545 int err = 0; 546 547 if (!info->attrs[VDPA_ATTR_DEV_NAME]) 548 return -EINVAL; 549 name = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]); 550 551 mutex_lock(&vdpa_dev_mutex); 552 dev = bus_find_device(&vdpa_bus, NULL, name, vdpa_name_match); 553 if (!dev) { 554 NL_SET_ERR_MSG_MOD(info->extack, "device not found"); 555 err = -ENODEV; 556 goto dev_err; 557 } 558 vdev = container_of(dev, struct vdpa_device, dev); 559 if (!vdev->mdev) { 560 NL_SET_ERR_MSG_MOD(info->extack, "Only user created device can be deleted by user"); 561 err = -EINVAL; 562 goto mdev_err; 563 } 564 mdev = vdev->mdev; 565 mdev->ops->dev_del(mdev, vdev); 566 mdev_err: 567 put_device(dev); 568 dev_err: 569 mutex_unlock(&vdpa_dev_mutex); 570 return err; 571 } 572 573 static int 574 vdpa_dev_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid, u32 seq, 575 int flags, struct netlink_ext_ack *extack) 576 { 577 u16 max_vq_size; 578 u16 min_vq_size = 1; 579 u32 device_id; 580 u32 vendor_id; 581 void *hdr; 582 int err; 583 584 hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags, VDPA_CMD_DEV_NEW); 585 if (!hdr) 586 return -EMSGSIZE; 587 588 err = vdpa_nl_mgmtdev_handle_fill(msg, vdev->mdev); 589 if (err) 590 goto msg_err; 591 592 device_id = vdev->config->get_device_id(vdev); 593 vendor_id = vdev->config->get_vendor_id(vdev); 594 max_vq_size = vdev->config->get_vq_num_max(vdev); 595 if (vdev->config->get_vq_num_min) 596 min_vq_size = vdev->config->get_vq_num_min(vdev); 597 598 err = -EMSGSIZE; 599 if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev))) 600 goto msg_err; 601 if (nla_put_u32(msg, VDPA_ATTR_DEV_ID, device_id)) 602 goto msg_err; 603 if (nla_put_u32(msg, VDPA_ATTR_DEV_VENDOR_ID, vendor_id)) 604 goto msg_err; 605 if (nla_put_u32(msg, VDPA_ATTR_DEV_MAX_VQS, vdev->nvqs)) 606 goto msg_err; 607 if (nla_put_u16(msg, VDPA_ATTR_DEV_MAX_VQ_SIZE, max_vq_size)) 608 goto msg_err; 609 if (nla_put_u16(msg, VDPA_ATTR_DEV_MIN_VQ_SIZE, min_vq_size)) 610 goto msg_err; 611 612 genlmsg_end(msg, hdr); 613 return 0; 614 615 msg_err: 616 genlmsg_cancel(msg, hdr); 617 return err; 618 } 619 620 static int vdpa_nl_cmd_dev_get_doit(struct sk_buff *skb, struct genl_info *info) 621 { 622 struct vdpa_device *vdev; 623 struct sk_buff *msg; 624 const char *devname; 625 struct device *dev; 626 int err; 627 628 if (!info->attrs[VDPA_ATTR_DEV_NAME]) 629 return -EINVAL; 630 devname = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]); 631 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 632 if (!msg) 633 return -ENOMEM; 634 635 mutex_lock(&vdpa_dev_mutex); 636 dev = bus_find_device(&vdpa_bus, NULL, devname, vdpa_name_match); 637 if (!dev) { 638 NL_SET_ERR_MSG_MOD(info->extack, "device not found"); 639 err = -ENODEV; 640 goto err; 641 } 642 vdev = container_of(dev, struct vdpa_device, dev); 643 if (!vdev->mdev) { 644 err = -EINVAL; 645 goto mdev_err; 646 } 647 err = vdpa_dev_fill(vdev, msg, info->snd_portid, info->snd_seq, 0, info->extack); 648 if (!err) 649 err = genlmsg_reply(msg, info); 650 mdev_err: 651 put_device(dev); 652 err: 653 mutex_unlock(&vdpa_dev_mutex); 654 if (err) 655 nlmsg_free(msg); 656 return err; 657 } 658 659 struct vdpa_dev_dump_info { 660 struct sk_buff *msg; 661 struct netlink_callback *cb; 662 int start_idx; 663 int idx; 664 }; 665 666 static int vdpa_dev_dump(struct device *dev, void *data) 667 { 668 struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev); 669 struct vdpa_dev_dump_info *info = data; 670 int err; 671 672 if (!vdev->mdev) 673 return 0; 674 if (info->idx < info->start_idx) { 675 info->idx++; 676 return 0; 677 } 678 err = vdpa_dev_fill(vdev, info->msg, NETLINK_CB(info->cb->skb).portid, 679 info->cb->nlh->nlmsg_seq, NLM_F_MULTI, info->cb->extack); 680 if (err) 681 return err; 682 683 info->idx++; 684 return 0; 685 } 686 687 static int vdpa_nl_cmd_dev_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb) 688 { 689 struct vdpa_dev_dump_info info; 690 691 info.msg = msg; 692 info.cb = cb; 693 info.start_idx = cb->args[0]; 694 info.idx = 0; 695 696 mutex_lock(&vdpa_dev_mutex); 697 bus_for_each_dev(&vdpa_bus, NULL, &info, vdpa_dev_dump); 698 mutex_unlock(&vdpa_dev_mutex); 699 cb->args[0] = info.idx; 700 return msg->len; 701 } 702 703 static int vdpa_dev_net_mq_config_fill(struct vdpa_device *vdev, 704 struct sk_buff *msg, u64 features, 705 const struct virtio_net_config *config) 706 { 707 u16 val_u16; 708 709 if ((features & (1ULL << VIRTIO_NET_F_MQ)) == 0) 710 return 0; 711 712 val_u16 = le16_to_cpu(config->max_virtqueue_pairs); 713 return nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MAX_VQP, val_u16); 714 } 715 716 static int vdpa_dev_net_config_fill(struct vdpa_device *vdev, struct sk_buff *msg) 717 { 718 struct virtio_net_config config = {}; 719 u64 features; 720 u16 val_u16; 721 722 vdpa_get_config(vdev, 0, &config, sizeof(config)); 723 724 if (nla_put(msg, VDPA_ATTR_DEV_NET_CFG_MACADDR, sizeof(config.mac), 725 config.mac)) 726 return -EMSGSIZE; 727 728 val_u16 = le16_to_cpu(config.status); 729 if (nla_put_u16(msg, VDPA_ATTR_DEV_NET_STATUS, val_u16)) 730 return -EMSGSIZE; 731 732 val_u16 = le16_to_cpu(config.mtu); 733 if (nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MTU, val_u16)) 734 return -EMSGSIZE; 735 736 features = vdev->config->get_features(vdev); 737 738 return vdpa_dev_net_mq_config_fill(vdev, msg, features, &config); 739 } 740 741 static int 742 vdpa_dev_config_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid, u32 seq, 743 int flags, struct netlink_ext_ack *extack) 744 { 745 u32 device_id; 746 void *hdr; 747 int err; 748 749 hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags, 750 VDPA_CMD_DEV_CONFIG_GET); 751 if (!hdr) 752 return -EMSGSIZE; 753 754 if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev))) { 755 err = -EMSGSIZE; 756 goto msg_err; 757 } 758 759 device_id = vdev->config->get_device_id(vdev); 760 if (nla_put_u32(msg, VDPA_ATTR_DEV_ID, device_id)) { 761 err = -EMSGSIZE; 762 goto msg_err; 763 } 764 765 switch (device_id) { 766 case VIRTIO_ID_NET: 767 err = vdpa_dev_net_config_fill(vdev, msg); 768 break; 769 default: 770 err = -EOPNOTSUPP; 771 break; 772 } 773 if (err) 774 goto msg_err; 775 776 genlmsg_end(msg, hdr); 777 return 0; 778 779 msg_err: 780 genlmsg_cancel(msg, hdr); 781 return err; 782 } 783 784 static int vdpa_nl_cmd_dev_config_get_doit(struct sk_buff *skb, struct genl_info *info) 785 { 786 struct vdpa_device *vdev; 787 struct sk_buff *msg; 788 const char *devname; 789 struct device *dev; 790 int err; 791 792 if (!info->attrs[VDPA_ATTR_DEV_NAME]) 793 return -EINVAL; 794 devname = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]); 795 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 796 if (!msg) 797 return -ENOMEM; 798 799 mutex_lock(&vdpa_dev_mutex); 800 dev = bus_find_device(&vdpa_bus, NULL, devname, vdpa_name_match); 801 if (!dev) { 802 NL_SET_ERR_MSG_MOD(info->extack, "device not found"); 803 err = -ENODEV; 804 goto dev_err; 805 } 806 vdev = container_of(dev, struct vdpa_device, dev); 807 if (!vdev->mdev) { 808 NL_SET_ERR_MSG_MOD(info->extack, "unmanaged vdpa device"); 809 err = -EINVAL; 810 goto mdev_err; 811 } 812 err = vdpa_dev_config_fill(vdev, msg, info->snd_portid, info->snd_seq, 813 0, info->extack); 814 if (!err) 815 err = genlmsg_reply(msg, info); 816 817 mdev_err: 818 put_device(dev); 819 dev_err: 820 mutex_unlock(&vdpa_dev_mutex); 821 if (err) 822 nlmsg_free(msg); 823 return err; 824 } 825 826 static int vdpa_dev_config_dump(struct device *dev, void *data) 827 { 828 struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev); 829 struct vdpa_dev_dump_info *info = data; 830 int err; 831 832 if (!vdev->mdev) 833 return 0; 834 if (info->idx < info->start_idx) { 835 info->idx++; 836 return 0; 837 } 838 err = vdpa_dev_config_fill(vdev, info->msg, NETLINK_CB(info->cb->skb).portid, 839 info->cb->nlh->nlmsg_seq, NLM_F_MULTI, 840 info->cb->extack); 841 if (err) 842 return err; 843 844 info->idx++; 845 return 0; 846 } 847 848 static int 849 vdpa_nl_cmd_dev_config_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb) 850 { 851 struct vdpa_dev_dump_info info; 852 853 info.msg = msg; 854 info.cb = cb; 855 info.start_idx = cb->args[0]; 856 info.idx = 0; 857 858 mutex_lock(&vdpa_dev_mutex); 859 bus_for_each_dev(&vdpa_bus, NULL, &info, vdpa_dev_config_dump); 860 mutex_unlock(&vdpa_dev_mutex); 861 cb->args[0] = info.idx; 862 return msg->len; 863 } 864 865 static const struct nla_policy vdpa_nl_policy[VDPA_ATTR_MAX + 1] = { 866 [VDPA_ATTR_MGMTDEV_BUS_NAME] = { .type = NLA_NUL_STRING }, 867 [VDPA_ATTR_MGMTDEV_DEV_NAME] = { .type = NLA_STRING }, 868 [VDPA_ATTR_DEV_NAME] = { .type = NLA_STRING }, 869 [VDPA_ATTR_DEV_NET_CFG_MACADDR] = NLA_POLICY_ETH_ADDR, 870 /* virtio spec 1.1 section 5.1.4.1 for valid MTU range */ 871 [VDPA_ATTR_DEV_NET_CFG_MTU] = NLA_POLICY_MIN(NLA_U16, 68), 872 }; 873 874 static const struct genl_ops vdpa_nl_ops[] = { 875 { 876 .cmd = VDPA_CMD_MGMTDEV_GET, 877 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 878 .doit = vdpa_nl_cmd_mgmtdev_get_doit, 879 .dumpit = vdpa_nl_cmd_mgmtdev_get_dumpit, 880 }, 881 { 882 .cmd = VDPA_CMD_DEV_NEW, 883 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 884 .doit = vdpa_nl_cmd_dev_add_set_doit, 885 .flags = GENL_ADMIN_PERM, 886 }, 887 { 888 .cmd = VDPA_CMD_DEV_DEL, 889 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 890 .doit = vdpa_nl_cmd_dev_del_set_doit, 891 .flags = GENL_ADMIN_PERM, 892 }, 893 { 894 .cmd = VDPA_CMD_DEV_GET, 895 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 896 .doit = vdpa_nl_cmd_dev_get_doit, 897 .dumpit = vdpa_nl_cmd_dev_get_dumpit, 898 }, 899 { 900 .cmd = VDPA_CMD_DEV_CONFIG_GET, 901 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 902 .doit = vdpa_nl_cmd_dev_config_get_doit, 903 .dumpit = vdpa_nl_cmd_dev_config_get_dumpit, 904 }, 905 }; 906 907 static struct genl_family vdpa_nl_family __ro_after_init = { 908 .name = VDPA_GENL_NAME, 909 .version = VDPA_GENL_VERSION, 910 .maxattr = VDPA_ATTR_MAX, 911 .policy = vdpa_nl_policy, 912 .netnsok = false, 913 .module = THIS_MODULE, 914 .ops = vdpa_nl_ops, 915 .n_ops = ARRAY_SIZE(vdpa_nl_ops), 916 }; 917 918 static int vdpa_init(void) 919 { 920 int err; 921 922 err = bus_register(&vdpa_bus); 923 if (err) 924 return err; 925 err = genl_register_family(&vdpa_nl_family); 926 if (err) 927 goto err; 928 return 0; 929 930 err: 931 bus_unregister(&vdpa_bus); 932 return err; 933 } 934 935 static void __exit vdpa_exit(void) 936 { 937 genl_unregister_family(&vdpa_nl_family); 938 bus_unregister(&vdpa_bus); 939 ida_destroy(&vdpa_index_ida); 940 } 941 core_initcall(vdpa_init); 942 module_exit(vdpa_exit); 943 944 MODULE_AUTHOR("Jason Wang <jasowang@redhat.com>"); 945 MODULE_LICENSE("GPL v2"); 946