1 /* 2 * xen paravirt network card backend 3 * 4 * (c) Gerd Hoffmann <kraxel@redhat.com> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; under version 2 of the License. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License along 16 * with this program; if not, see <http://www.gnu.org/licenses/>. 17 * 18 * Contributions after 2012-01-13 are licensed under the terms of the 19 * GNU GPL, version 2 or (at your option) any later version. 20 */ 21 22 #include "qemu/osdep.h" 23 #include <sys/socket.h> 24 #include <sys/ioctl.h> 25 #include <sys/wait.h> 26 27 #include "net/net.h" 28 #include "net/checksum.h" 29 #include "net/util.h" 30 #include "hw/xen/xen-legacy-backend.h" 31 32 #include "hw/xen/interface/io/netif.h" 33 34 /* ------------------------------------------------------------- */ 35 36 struct XenNetDev { 37 struct XenLegacyDevice xendev; /* must be first */ 38 char *mac; 39 int tx_work; 40 int tx_ring_ref; 41 int rx_ring_ref; 42 struct netif_tx_sring *txs; 43 struct netif_rx_sring *rxs; 44 netif_tx_back_ring_t tx_ring; 45 netif_rx_back_ring_t rx_ring; 46 NICConf conf; 47 NICState *nic; 48 }; 49 50 /* ------------------------------------------------------------- */ 51 52 static void net_tx_response(struct XenNetDev *netdev, netif_tx_request_t *txp, int8_t st) 53 { 54 RING_IDX i = netdev->tx_ring.rsp_prod_pvt; 55 netif_tx_response_t *resp; 56 int notify; 57 58 resp = RING_GET_RESPONSE(&netdev->tx_ring, i); 59 resp->id = txp->id; 60 resp->status = st; 61 62 #if 0 63 if (txp->flags & NETTXF_extra_info) { 64 RING_GET_RESPONSE(&netdev->tx_ring, ++i)->status = NETIF_RSP_NULL; 65 } 66 #endif 67 68 netdev->tx_ring.rsp_prod_pvt = ++i; 69 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netdev->tx_ring, notify); 70 if (notify) { 71 xen_pv_send_notify(&netdev->xendev); 72 } 73 74 if (i == netdev->tx_ring.req_cons) { 75 int more_to_do; 76 RING_FINAL_CHECK_FOR_REQUESTS(&netdev->tx_ring, more_to_do); 77 if (more_to_do) { 78 netdev->tx_work++; 79 } 80 } 81 } 82 83 static void net_tx_error(struct XenNetDev *netdev, netif_tx_request_t *txp, RING_IDX end) 84 { 85 #if 0 86 /* 87 * Hmm, why netback fails everything in the ring? 88 * Should we do that even when not supporting SG and TSO? 89 */ 90 RING_IDX cons = netdev->tx_ring.req_cons; 91 92 do { 93 make_tx_response(netif, txp, NETIF_RSP_ERROR); 94 if (cons >= end) { 95 break; 96 } 97 txp = RING_GET_REQUEST(&netdev->tx_ring, cons++); 98 } while (1); 99 netdev->tx_ring.req_cons = cons; 100 netif_schedule_work(netif); 101 netif_put(netif); 102 #else 103 net_tx_response(netdev, txp, NETIF_RSP_ERROR); 104 #endif 105 } 106 107 static void net_tx_packets(struct XenNetDev *netdev) 108 { 109 netif_tx_request_t txreq; 110 RING_IDX rc, rp; 111 void *page; 112 void *tmpbuf = NULL; 113 114 for (;;) { 115 rc = netdev->tx_ring.req_cons; 116 rp = netdev->tx_ring.sring->req_prod; 117 xen_rmb(); /* Ensure we see queued requests up to 'rp'. */ 118 119 while ((rc != rp)) { 120 if (RING_REQUEST_CONS_OVERFLOW(&netdev->tx_ring, rc)) { 121 break; 122 } 123 memcpy(&txreq, RING_GET_REQUEST(&netdev->tx_ring, rc), sizeof(txreq)); 124 netdev->tx_ring.req_cons = ++rc; 125 126 #if 1 127 /* should not happen in theory, we don't announce the * 128 * feature-{sg,gso,whatelse} flags in xenstore (yet?) */ 129 if (txreq.flags & NETTXF_extra_info) { 130 xen_pv_printf(&netdev->xendev, 0, "FIXME: extra info flag\n"); 131 net_tx_error(netdev, &txreq, rc); 132 continue; 133 } 134 if (txreq.flags & NETTXF_more_data) { 135 xen_pv_printf(&netdev->xendev, 0, "FIXME: more data flag\n"); 136 net_tx_error(netdev, &txreq, rc); 137 continue; 138 } 139 #endif 140 141 if (txreq.size < 14) { 142 xen_pv_printf(&netdev->xendev, 0, "bad packet size: %d\n", 143 txreq.size); 144 net_tx_error(netdev, &txreq, rc); 145 continue; 146 } 147 148 if ((txreq.offset + txreq.size) > XEN_PAGE_SIZE) { 149 xen_pv_printf(&netdev->xendev, 0, "error: page crossing\n"); 150 net_tx_error(netdev, &txreq, rc); 151 continue; 152 } 153 154 xen_pv_printf(&netdev->xendev, 3, 155 "tx packet ref %d, off %d, len %d, flags 0x%x%s%s%s%s\n", 156 txreq.gref, txreq.offset, txreq.size, txreq.flags, 157 (txreq.flags & NETTXF_csum_blank) ? " csum_blank" : "", 158 (txreq.flags & NETTXF_data_validated) ? " data_validated" : "", 159 (txreq.flags & NETTXF_more_data) ? " more_data" : "", 160 (txreq.flags & NETTXF_extra_info) ? " extra_info" : ""); 161 162 page = xen_be_map_grant_ref(&netdev->xendev, txreq.gref, 163 PROT_READ); 164 if (page == NULL) { 165 xen_pv_printf(&netdev->xendev, 0, 166 "error: tx gref dereference failed (%d)\n", 167 txreq.gref); 168 net_tx_error(netdev, &txreq, rc); 169 continue; 170 } 171 if (txreq.flags & NETTXF_csum_blank) { 172 /* have read-only mapping -> can't fill checksum in-place */ 173 if (!tmpbuf) { 174 tmpbuf = g_malloc(XEN_PAGE_SIZE); 175 } 176 memcpy(tmpbuf, page + txreq.offset, txreq.size); 177 net_checksum_calculate(tmpbuf, txreq.size, CSUM_ALL); 178 qemu_send_packet(qemu_get_queue(netdev->nic), tmpbuf, 179 txreq.size); 180 } else { 181 qemu_send_packet(qemu_get_queue(netdev->nic), 182 page + txreq.offset, txreq.size); 183 } 184 xen_be_unmap_grant_ref(&netdev->xendev, page, txreq.gref); 185 net_tx_response(netdev, &txreq, NETIF_RSP_OKAY); 186 } 187 if (!netdev->tx_work) { 188 break; 189 } 190 netdev->tx_work = 0; 191 } 192 g_free(tmpbuf); 193 } 194 195 /* ------------------------------------------------------------- */ 196 197 static void net_rx_response(struct XenNetDev *netdev, 198 netif_rx_request_t *req, int8_t st, 199 uint16_t offset, uint16_t size, 200 uint16_t flags) 201 { 202 RING_IDX i = netdev->rx_ring.rsp_prod_pvt; 203 netif_rx_response_t *resp; 204 int notify; 205 206 resp = RING_GET_RESPONSE(&netdev->rx_ring, i); 207 resp->offset = offset; 208 resp->flags = flags; 209 resp->id = req->id; 210 resp->status = (int16_t)size; 211 if (st < 0) { 212 resp->status = (int16_t)st; 213 } 214 215 xen_pv_printf(&netdev->xendev, 3, 216 "rx response: idx %d, status %d, flags 0x%x\n", 217 i, resp->status, resp->flags); 218 219 netdev->rx_ring.rsp_prod_pvt = ++i; 220 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netdev->rx_ring, notify); 221 if (notify) { 222 xen_pv_send_notify(&netdev->xendev); 223 } 224 } 225 226 #define NET_IP_ALIGN 2 227 228 static ssize_t net_rx_packet(NetClientState *nc, const uint8_t *buf, size_t size) 229 { 230 struct XenNetDev *netdev = qemu_get_nic_opaque(nc); 231 netif_rx_request_t rxreq; 232 RING_IDX rc, rp; 233 void *page; 234 235 if (netdev->xendev.be_state != XenbusStateConnected) { 236 return -1; 237 } 238 239 rc = netdev->rx_ring.req_cons; 240 rp = netdev->rx_ring.sring->req_prod; 241 xen_rmb(); /* Ensure we see queued requests up to 'rp'. */ 242 243 if (rc == rp || RING_REQUEST_CONS_OVERFLOW(&netdev->rx_ring, rc)) { 244 return 0; 245 } 246 if (size > XEN_PAGE_SIZE - NET_IP_ALIGN) { 247 xen_pv_printf(&netdev->xendev, 0, "packet too big (%lu > %ld)", 248 (unsigned long)size, XEN_PAGE_SIZE - NET_IP_ALIGN); 249 return -1; 250 } 251 252 memcpy(&rxreq, RING_GET_REQUEST(&netdev->rx_ring, rc), sizeof(rxreq)); 253 netdev->rx_ring.req_cons = ++rc; 254 255 page = xen_be_map_grant_ref(&netdev->xendev, rxreq.gref, PROT_WRITE); 256 if (page == NULL) { 257 xen_pv_printf(&netdev->xendev, 0, 258 "error: rx gref dereference failed (%d)\n", 259 rxreq.gref); 260 net_rx_response(netdev, &rxreq, NETIF_RSP_ERROR, 0, 0, 0); 261 return -1; 262 } 263 memcpy(page + NET_IP_ALIGN, buf, size); 264 xen_be_unmap_grant_ref(&netdev->xendev, page, rxreq.gref); 265 net_rx_response(netdev, &rxreq, NETIF_RSP_OKAY, NET_IP_ALIGN, size, 0); 266 267 return size; 268 } 269 270 /* ------------------------------------------------------------- */ 271 272 static NetClientInfo net_xen_info = { 273 .type = NET_CLIENT_DRIVER_NIC, 274 .size = sizeof(NICState), 275 .receive = net_rx_packet, 276 }; 277 278 static int net_init(struct XenLegacyDevice *xendev) 279 { 280 struct XenNetDev *netdev = container_of(xendev, struct XenNetDev, xendev); 281 282 /* read xenstore entries */ 283 if (netdev->mac == NULL) { 284 netdev->mac = xenstore_read_be_str(&netdev->xendev, "mac"); 285 } 286 287 /* do we have all we need? */ 288 if (netdev->mac == NULL) { 289 return -1; 290 } 291 292 if (net_parse_macaddr(netdev->conf.macaddr.a, netdev->mac) < 0) { 293 return -1; 294 } 295 296 netdev->nic = qemu_new_nic(&net_xen_info, &netdev->conf, 297 "xen", NULL, netdev); 298 299 qemu_set_info_str(qemu_get_queue(netdev->nic), 300 "nic: xenbus vif macaddr=%s", netdev->mac); 301 302 /* fill info */ 303 xenstore_write_be_int(&netdev->xendev, "feature-rx-copy", 1); 304 xenstore_write_be_int(&netdev->xendev, "feature-rx-flip", 0); 305 306 return 0; 307 } 308 309 static int net_connect(struct XenLegacyDevice *xendev) 310 { 311 struct XenNetDev *netdev = container_of(xendev, struct XenNetDev, xendev); 312 int rx_copy; 313 314 if (xenstore_read_fe_int(&netdev->xendev, "tx-ring-ref", 315 &netdev->tx_ring_ref) == -1) { 316 return -1; 317 } 318 if (xenstore_read_fe_int(&netdev->xendev, "rx-ring-ref", 319 &netdev->rx_ring_ref) == -1) { 320 return 1; 321 } 322 if (xenstore_read_fe_int(&netdev->xendev, "event-channel", 323 &netdev->xendev.remote_port) == -1) { 324 return -1; 325 } 326 327 if (xenstore_read_fe_int(&netdev->xendev, "request-rx-copy", &rx_copy) == -1) { 328 rx_copy = 0; 329 } 330 if (rx_copy == 0) { 331 xen_pv_printf(&netdev->xendev, 0, 332 "frontend doesn't support rx-copy.\n"); 333 return -1; 334 } 335 336 netdev->txs = xen_be_map_grant_ref(&netdev->xendev, 337 netdev->tx_ring_ref, 338 PROT_READ | PROT_WRITE); 339 if (!netdev->txs) { 340 return -1; 341 } 342 netdev->rxs = xen_be_map_grant_ref(&netdev->xendev, 343 netdev->rx_ring_ref, 344 PROT_READ | PROT_WRITE); 345 if (!netdev->rxs) { 346 xen_be_unmap_grant_ref(&netdev->xendev, netdev->txs, 347 netdev->tx_ring_ref); 348 netdev->txs = NULL; 349 return -1; 350 } 351 BACK_RING_INIT(&netdev->tx_ring, netdev->txs, XEN_PAGE_SIZE); 352 BACK_RING_INIT(&netdev->rx_ring, netdev->rxs, XEN_PAGE_SIZE); 353 354 xen_be_bind_evtchn(&netdev->xendev); 355 356 xen_pv_printf(&netdev->xendev, 1, "ok: tx-ring-ref %d, rx-ring-ref %d, " 357 "remote port %d, local port %d\n", 358 netdev->tx_ring_ref, netdev->rx_ring_ref, 359 netdev->xendev.remote_port, netdev->xendev.local_port); 360 361 net_tx_packets(netdev); 362 return 0; 363 } 364 365 static void net_disconnect(struct XenLegacyDevice *xendev) 366 { 367 struct XenNetDev *netdev = container_of(xendev, struct XenNetDev, xendev); 368 369 xen_pv_unbind_evtchn(&netdev->xendev); 370 371 if (netdev->txs) { 372 xen_be_unmap_grant_ref(&netdev->xendev, netdev->txs, 373 netdev->tx_ring_ref); 374 netdev->txs = NULL; 375 } 376 if (netdev->rxs) { 377 xen_be_unmap_grant_ref(&netdev->xendev, netdev->rxs, 378 netdev->rx_ring_ref); 379 netdev->rxs = NULL; 380 } 381 } 382 383 static void net_event(struct XenLegacyDevice *xendev) 384 { 385 struct XenNetDev *netdev = container_of(xendev, struct XenNetDev, xendev); 386 net_tx_packets(netdev); 387 qemu_flush_queued_packets(qemu_get_queue(netdev->nic)); 388 } 389 390 static int net_free(struct XenLegacyDevice *xendev) 391 { 392 struct XenNetDev *netdev = container_of(xendev, struct XenNetDev, xendev); 393 394 if (netdev->nic) { 395 qemu_del_nic(netdev->nic); 396 netdev->nic = NULL; 397 } 398 g_free(netdev->mac); 399 netdev->mac = NULL; 400 return 0; 401 } 402 403 /* ------------------------------------------------------------- */ 404 405 struct XenDevOps xen_netdev_ops = { 406 .size = sizeof(struct XenNetDev), 407 .flags = DEVOPS_FLAG_NEED_GNTDEV, 408 .init = net_init, 409 .initialise = net_connect, 410 .event = net_event, 411 .disconnect = net_disconnect, 412 .free = net_free, 413 }; 414