1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * VDPA simulator for networking device. 4 * 5 * Copyright (c) 2020, Red Hat Inc. All rights reserved. 6 * Author: Jason Wang <jasowang@redhat.com> 7 * 8 */ 9 10 #include <linux/init.h> 11 #include <linux/module.h> 12 #include <linux/device.h> 13 #include <linux/kernel.h> 14 #include <linux/sched.h> 15 #include <linux/etherdevice.h> 16 #include <linux/vringh.h> 17 #include <linux/vdpa.h> 18 #include <uapi/linux/virtio_net.h> 19 #include <uapi/linux/vdpa.h> 20 21 #include "vdpa_sim.h" 22 23 #define DRV_VERSION "0.1" 24 #define DRV_AUTHOR "Jason Wang <jasowang@redhat.com>" 25 #define DRV_DESC "vDPA Device Simulator for networking device" 26 #define DRV_LICENSE "GPL v2" 27 28 #define VDPASIM_NET_FEATURES (VDPASIM_FEATURES | \ 29 (1ULL << VIRTIO_NET_F_MAC) | \ 30 (1ULL << VIRTIO_NET_F_MTU) | \ 31 (1ULL << VIRTIO_NET_F_CTRL_VQ) | \ 32 (1ULL << VIRTIO_NET_F_CTRL_MAC_ADDR)) 33 34 /* 3 virtqueues, 2 address spaces, 2 virtqueue groups */ 35 #define VDPASIM_NET_VQ_NUM 3 36 #define VDPASIM_NET_AS_NUM 2 37 #define VDPASIM_NET_GROUP_NUM 2 38 39 static void vdpasim_net_complete(struct vdpasim_virtqueue *vq, size_t len) 40 { 41 /* Make sure data is wrote before advancing index */ 42 smp_wmb(); 43 44 vringh_complete_iotlb(&vq->vring, vq->head, len); 45 46 /* Make sure used is visible before rasing the interrupt. */ 47 smp_wmb(); 48 49 local_bh_disable(); 50 if (vringh_need_notify_iotlb(&vq->vring) > 0) 51 vringh_notify(&vq->vring); 52 local_bh_enable(); 53 } 54 55 static bool receive_filter(struct vdpasim *vdpasim, size_t len) 56 { 57 bool modern = vdpasim->features & (1ULL << VIRTIO_F_VERSION_1); 58 size_t hdr_len = modern ? sizeof(struct virtio_net_hdr_v1) : 59 sizeof(struct virtio_net_hdr); 60 struct virtio_net_config *vio_config = vdpasim->config; 61 62 if (len < ETH_ALEN + hdr_len) 63 return false; 64 65 if (is_broadcast_ether_addr(vdpasim->buffer + hdr_len) || 66 is_multicast_ether_addr(vdpasim->buffer + hdr_len)) 67 return true; 68 if (!strncmp(vdpasim->buffer + hdr_len, vio_config->mac, ETH_ALEN)) 69 return true; 70 71 return false; 72 } 73 74 static virtio_net_ctrl_ack vdpasim_handle_ctrl_mac(struct vdpasim *vdpasim, 75 u8 cmd) 76 { 77 struct virtio_net_config *vio_config = vdpasim->config; 78 struct vdpasim_virtqueue *cvq = &vdpasim->vqs[2]; 79 virtio_net_ctrl_ack status = VIRTIO_NET_ERR; 80 size_t read; 81 82 switch (cmd) { 83 case VIRTIO_NET_CTRL_MAC_ADDR_SET: 84 read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->in_iov, 85 vio_config->mac, ETH_ALEN); 86 if (read == ETH_ALEN) 87 status = VIRTIO_NET_OK; 88 break; 89 default: 90 break; 91 } 92 93 return status; 94 } 95 96 static void vdpasim_handle_cvq(struct vdpasim *vdpasim) 97 { 98 struct vdpasim_virtqueue *cvq = &vdpasim->vqs[2]; 99 virtio_net_ctrl_ack status = VIRTIO_NET_ERR; 100 struct virtio_net_ctrl_hdr ctrl; 101 size_t read, write; 102 int err; 103 104 if (!(vdpasim->features & (1ULL << VIRTIO_NET_F_CTRL_VQ))) 105 return; 106 107 if (!cvq->ready) 108 return; 109 110 while (true) { 111 err = vringh_getdesc_iotlb(&cvq->vring, &cvq->in_iov, 112 &cvq->out_iov, 113 &cvq->head, GFP_ATOMIC); 114 if (err <= 0) 115 break; 116 117 read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->in_iov, &ctrl, 118 sizeof(ctrl)); 119 if (read != sizeof(ctrl)) 120 break; 121 122 switch (ctrl.class) { 123 case VIRTIO_NET_CTRL_MAC: 124 status = vdpasim_handle_ctrl_mac(vdpasim, ctrl.cmd); 125 break; 126 default: 127 break; 128 } 129 130 /* Make sure data is wrote before advancing index */ 131 smp_wmb(); 132 133 write = vringh_iov_push_iotlb(&cvq->vring, &cvq->out_iov, 134 &status, sizeof(status)); 135 vringh_complete_iotlb(&cvq->vring, cvq->head, write); 136 vringh_kiov_cleanup(&cvq->in_iov); 137 vringh_kiov_cleanup(&cvq->out_iov); 138 139 /* Make sure used is visible before rasing the interrupt. */ 140 smp_wmb(); 141 142 local_bh_disable(); 143 if (cvq->cb) 144 cvq->cb(cvq->private); 145 local_bh_enable(); 146 } 147 } 148 149 static void vdpasim_net_work(struct work_struct *work) 150 { 151 struct vdpasim *vdpasim = container_of(work, struct vdpasim, work); 152 struct vdpasim_virtqueue *txq = &vdpasim->vqs[1]; 153 struct vdpasim_virtqueue *rxq = &vdpasim->vqs[0]; 154 ssize_t read, write; 155 int pkts = 0; 156 int err; 157 158 spin_lock(&vdpasim->lock); 159 160 if (!vdpasim->running) 161 goto out; 162 163 if (!(vdpasim->status & VIRTIO_CONFIG_S_DRIVER_OK)) 164 goto out; 165 166 vdpasim_handle_cvq(vdpasim); 167 168 if (!txq->ready || !rxq->ready) 169 goto out; 170 171 while (true) { 172 err = vringh_getdesc_iotlb(&txq->vring, &txq->out_iov, NULL, 173 &txq->head, GFP_ATOMIC); 174 if (err <= 0) 175 break; 176 177 read = vringh_iov_pull_iotlb(&txq->vring, &txq->out_iov, 178 vdpasim->buffer, 179 PAGE_SIZE); 180 181 if (!receive_filter(vdpasim, read)) { 182 vdpasim_net_complete(txq, 0); 183 continue; 184 } 185 186 err = vringh_getdesc_iotlb(&rxq->vring, NULL, &rxq->in_iov, 187 &rxq->head, GFP_ATOMIC); 188 if (err <= 0) { 189 vdpasim_net_complete(txq, 0); 190 break; 191 } 192 193 write = vringh_iov_push_iotlb(&rxq->vring, &rxq->in_iov, 194 vdpasim->buffer, read); 195 if (write <= 0) 196 break; 197 198 vdpasim_net_complete(txq, 0); 199 vdpasim_net_complete(rxq, write); 200 201 if (++pkts > 4) { 202 schedule_work(&vdpasim->work); 203 goto out; 204 } 205 } 206 207 out: 208 spin_unlock(&vdpasim->lock); 209 } 210 211 static void vdpasim_net_get_config(struct vdpasim *vdpasim, void *config) 212 { 213 struct virtio_net_config *net_config = config; 214 215 net_config->status = cpu_to_vdpasim16(vdpasim, VIRTIO_NET_S_LINK_UP); 216 } 217 218 static void vdpasim_net_setup_config(struct vdpasim *vdpasim, 219 const struct vdpa_dev_set_config *config) 220 { 221 struct virtio_net_config *vio_config = vdpasim->config; 222 223 if (config->mask & (1 << VDPA_ATTR_DEV_NET_CFG_MACADDR)) 224 memcpy(vio_config->mac, config->net.mac, ETH_ALEN); 225 if (config->mask & (1 << VDPA_ATTR_DEV_NET_CFG_MTU)) 226 vio_config->mtu = cpu_to_vdpasim16(vdpasim, config->net.mtu); 227 else 228 /* Setup default MTU to be 1500 */ 229 vio_config->mtu = cpu_to_vdpasim16(vdpasim, 1500); 230 } 231 232 static void vdpasim_net_mgmtdev_release(struct device *dev) 233 { 234 } 235 236 static struct device vdpasim_net_mgmtdev = { 237 .init_name = "vdpasim_net", 238 .release = vdpasim_net_mgmtdev_release, 239 }; 240 241 static int vdpasim_net_dev_add(struct vdpa_mgmt_dev *mdev, const char *name, 242 const struct vdpa_dev_set_config *config) 243 { 244 struct vdpasim_dev_attr dev_attr = {}; 245 struct vdpasim *simdev; 246 int ret; 247 248 dev_attr.mgmt_dev = mdev; 249 dev_attr.name = name; 250 dev_attr.id = VIRTIO_ID_NET; 251 dev_attr.supported_features = VDPASIM_NET_FEATURES; 252 dev_attr.nvqs = VDPASIM_NET_VQ_NUM; 253 dev_attr.ngroups = VDPASIM_NET_GROUP_NUM; 254 dev_attr.nas = VDPASIM_NET_AS_NUM; 255 dev_attr.config_size = sizeof(struct virtio_net_config); 256 dev_attr.get_config = vdpasim_net_get_config; 257 dev_attr.work_fn = vdpasim_net_work; 258 dev_attr.buffer_size = PAGE_SIZE; 259 260 simdev = vdpasim_create(&dev_attr, config); 261 if (IS_ERR(simdev)) 262 return PTR_ERR(simdev); 263 264 vdpasim_net_setup_config(simdev, config); 265 266 ret = _vdpa_register_device(&simdev->vdpa, VDPASIM_NET_VQ_NUM); 267 if (ret) 268 goto reg_err; 269 270 return 0; 271 272 reg_err: 273 put_device(&simdev->vdpa.dev); 274 return ret; 275 } 276 277 static void vdpasim_net_dev_del(struct vdpa_mgmt_dev *mdev, 278 struct vdpa_device *dev) 279 { 280 struct vdpasim *simdev = container_of(dev, struct vdpasim, vdpa); 281 282 _vdpa_unregister_device(&simdev->vdpa); 283 } 284 285 static const struct vdpa_mgmtdev_ops vdpasim_net_mgmtdev_ops = { 286 .dev_add = vdpasim_net_dev_add, 287 .dev_del = vdpasim_net_dev_del 288 }; 289 290 static struct virtio_device_id id_table[] = { 291 { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID }, 292 { 0 }, 293 }; 294 295 static struct vdpa_mgmt_dev mgmt_dev = { 296 .device = &vdpasim_net_mgmtdev, 297 .id_table = id_table, 298 .ops = &vdpasim_net_mgmtdev_ops, 299 .config_attr_mask = (1 << VDPA_ATTR_DEV_NET_CFG_MACADDR | 300 1 << VDPA_ATTR_DEV_NET_CFG_MTU | 301 1 << VDPA_ATTR_DEV_FEATURES), 302 .max_supported_vqs = VDPASIM_NET_VQ_NUM, 303 .supported_features = VDPASIM_NET_FEATURES, 304 }; 305 306 static int __init vdpasim_net_init(void) 307 { 308 int ret; 309 310 ret = device_register(&vdpasim_net_mgmtdev); 311 if (ret) { 312 put_device(&vdpasim_net_mgmtdev); 313 return ret; 314 } 315 316 ret = vdpa_mgmtdev_register(&mgmt_dev); 317 if (ret) 318 goto parent_err; 319 return 0; 320 321 parent_err: 322 device_unregister(&vdpasim_net_mgmtdev); 323 return ret; 324 } 325 326 static void __exit vdpasim_net_exit(void) 327 { 328 vdpa_mgmtdev_unregister(&mgmt_dev); 329 device_unregister(&vdpasim_net_mgmtdev); 330 } 331 332 module_init(vdpasim_net_init); 333 module_exit(vdpasim_net_exit); 334 335 MODULE_VERSION(DRV_VERSION); 336 MODULE_LICENSE(DRV_LICENSE); 337 MODULE_AUTHOR(DRV_AUTHOR); 338 MODULE_DESCRIPTION(DRV_DESC); 339