xref: /openbmc/qemu/net/netmap.c (revision ad4ec2798fd7066bc9d879dcbdeae96073ad370f)
1  /*
2   * netmap access for qemu
3   *
4   * Copyright (c) 2012-2013 Luigi Rizzo
5   *
6   * Permission is hereby granted, free of charge, to any person obtaining a copy
7   * of this software and associated documentation files (the "Software"), to deal
8   * in the Software without restriction, including without limitation the rights
9   * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10   * copies of the Software, and to permit persons to whom the Software is
11   * furnished to do so, subject to the following conditions:
12   *
13   * The above copyright notice and this permission notice shall be included in
14   * all copies or substantial portions of the Software.
15   *
16   * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17   * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18   * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19   * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20   * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21   * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22   * THE SOFTWARE.
23   */
24  
25  
26  #include "qemu/osdep.h"
27  #include <sys/ioctl.h>
28  #include <net/if.h>
29  #define NETMAP_WITH_LIBS
30  #include <net/netmap.h>
31  #include <net/netmap_user.h>
32  
33  #include "net/net.h"
34  #include "net/tap.h"
35  #include "clients.h"
36  #include "qemu/error-report.h"
37  #include "qapi/error.h"
38  #include "qemu/iov.h"
39  #include "qemu/cutils.h"
40  #include "qemu/main-loop.h"
41  
42  typedef struct NetmapState {
43      NetClientState      nc;
44      struct nm_desc      *nmd;
45      char                ifname[IFNAMSIZ];
46      struct netmap_ring  *tx;
47      struct netmap_ring  *rx;
48      bool                read_poll;
49      bool                write_poll;
50      struct iovec        iov[IOV_MAX];
51      int                 vnet_hdr_len;  /* Current virtio-net header length. */
52  } NetmapState;
53  
54  #ifndef __FreeBSD__
55  #define pkt_copy bcopy
56  #else
57  /* A fast copy routine only for multiples of 64 bytes, non overlapped. */
58  static inline void
59  pkt_copy(const void *_src, void *_dst, int l)
60  {
61      const uint64_t *src = _src;
62      uint64_t *dst = _dst;
63      if (unlikely(l >= 1024)) {
64          bcopy(src, dst, l);
65          return;
66      }
67      for (; l > 0; l -= 64) {
68          *dst++ = *src++;
69          *dst++ = *src++;
70          *dst++ = *src++;
71          *dst++ = *src++;
72          *dst++ = *src++;
73          *dst++ = *src++;
74          *dst++ = *src++;
75          *dst++ = *src++;
76      }
77  }
78  #endif /* __FreeBSD__ */
79  
80  /*
81   * Open a netmap device. We assume there is only one queue
82   * (which is the case for the VALE bridge).
83   */
84  static struct nm_desc *netmap_open(const NetdevNetmapOptions *nm_opts,
85                                     Error **errp)
86  {
87      struct nm_desc *nmd;
88      struct nmreq req;
89  
90      memset(&req, 0, sizeof(req));
91  
92      nmd = nm_open(nm_opts->ifname, &req, NETMAP_NO_TX_POLL,
93                    NULL);
94      if (nmd == NULL) {
95          error_setg_errno(errp, errno, "Failed to nm_open() %s",
96                           nm_opts->ifname);
97          return NULL;
98      }
99  
100      return nmd;
101  }
102  
103  static void netmap_send(void *opaque);
104  static void netmap_writable(void *opaque);
105  
106  /* Set the event-loop handlers for the netmap backend. */
107  static void netmap_update_fd_handler(NetmapState *s)
108  {
109      qemu_set_fd_handler(s->nmd->fd,
110                          s->read_poll ? netmap_send : NULL,
111                          s->write_poll ? netmap_writable : NULL,
112                          s);
113  }
114  
115  /* Update the read handler. */
116  static void netmap_read_poll(NetmapState *s, bool enable)
117  {
118      if (s->read_poll != enable) { /* Do nothing if not changed. */
119          s->read_poll = enable;
120          netmap_update_fd_handler(s);
121      }
122  }
123  
124  /* Update the write handler. */
125  static void netmap_write_poll(NetmapState *s, bool enable)
126  {
127      if (s->write_poll != enable) {
128          s->write_poll = enable;
129          netmap_update_fd_handler(s);
130      }
131  }
132  
133  static void netmap_poll(NetClientState *nc, bool enable)
134  {
135      NetmapState *s = DO_UPCAST(NetmapState, nc, nc);
136  
137      if (s->read_poll != enable || s->write_poll != enable) {
138          s->write_poll = enable;
139          s->read_poll  = enable;
140          netmap_update_fd_handler(s);
141      }
142  }
143  
144  /*
145   * The fd_write() callback, invoked if the fd is marked as
146   * writable after a poll. Unregister the handler and flush any
147   * buffered packets.
148   */
149  static void netmap_writable(void *opaque)
150  {
151      NetmapState *s = opaque;
152  
153      netmap_write_poll(s, false);
154      qemu_flush_queued_packets(&s->nc);
155  }
156  
157  static ssize_t netmap_receive_iov(NetClientState *nc,
158                      const struct iovec *iov, int iovcnt)
159  {
160      NetmapState *s = DO_UPCAST(NetmapState, nc, nc);
161      struct netmap_ring *ring = s->tx;
162      unsigned int tail = ring->tail;
163      ssize_t totlen = 0;
164      uint32_t last;
165      uint32_t idx;
166      uint8_t *dst;
167      int j;
168      uint32_t i;
169  
170      last = i = ring->head;
171  
172      if (nm_ring_space(ring) < iovcnt) {
173          /* Not enough netmap slots. Tell the kernel that we have seen the new
174           * available slots (so that it notifies us again when it has more
175           * ones), but without publishing any new slots to be processed
176           * (e.g., we don't advance ring->head). */
177          ring->cur = tail;
178          netmap_write_poll(s, true);
179          return 0;
180      }
181  
182      for (j = 0; j < iovcnt; j++) {
183          int iov_frag_size = iov[j].iov_len;
184          int offset = 0;
185          int nm_frag_size;
186  
187          totlen += iov_frag_size;
188  
189          /* Split each iovec fragment over more netmap slots, if
190             necessary. */
191          while (iov_frag_size) {
192              nm_frag_size = MIN(iov_frag_size, ring->nr_buf_size);
193  
194              if (unlikely(i == tail)) {
195                  /* We ran out of netmap slots while splitting the
196                     iovec fragments. */
197                  ring->cur = tail;
198                  netmap_write_poll(s, true);
199                  return 0;
200              }
201  
202              idx = ring->slot[i].buf_idx;
203              dst = (uint8_t *)NETMAP_BUF(ring, idx);
204  
205              ring->slot[i].len = nm_frag_size;
206              ring->slot[i].flags = NS_MOREFRAG;
207              pkt_copy(iov[j].iov_base + offset, dst, nm_frag_size);
208  
209              last = i;
210              i = nm_ring_next(ring, i);
211  
212              offset += nm_frag_size;
213              iov_frag_size -= nm_frag_size;
214          }
215      }
216      /* The last slot must not have NS_MOREFRAG set. */
217      ring->slot[last].flags &= ~NS_MOREFRAG;
218  
219      /* Now update ring->head and ring->cur to publish the new slots and
220       * the new wakeup point. */
221      ring->head = ring->cur = i;
222  
223      ioctl(s->nmd->fd, NIOCTXSYNC, NULL);
224  
225      return totlen;
226  }
227  
228  static ssize_t netmap_receive(NetClientState *nc,
229        const uint8_t *buf, size_t size)
230  {
231      struct iovec iov;
232  
233      iov.iov_base = (void *)buf;
234      iov.iov_len = size;
235  
236      return netmap_receive_iov(nc, &iov, 1);
237  }
238  
239  /* Complete a previous send (backend --> guest) and enable the
240     fd_read callback. */
241  static void netmap_send_completed(NetClientState *nc, ssize_t len)
242  {
243      NetmapState *s = DO_UPCAST(NetmapState, nc, nc);
244  
245      netmap_read_poll(s, true);
246  }
247  
248  static void netmap_send(void *opaque)
249  {
250      NetmapState *s = opaque;
251      struct netmap_ring *ring = s->rx;
252      unsigned int tail = ring->tail;
253  
254      /* Keep sending while there are available slots in the netmap
255         RX ring and the forwarding path towards the peer is open. */
256      while (ring->head != tail) {
257          uint32_t i = ring->head;
258          uint32_t idx;
259          bool morefrag;
260          int iovcnt = 0;
261          int iovsize;
262  
263          /* Get a (possibly multi-slot) packet. */
264          do {
265              idx = ring->slot[i].buf_idx;
266              morefrag = (ring->slot[i].flags & NS_MOREFRAG);
267              s->iov[iovcnt].iov_base = (void *)NETMAP_BUF(ring, idx);
268              s->iov[iovcnt].iov_len = ring->slot[i].len;
269              iovcnt++;
270              i = nm_ring_next(ring, i);
271          } while (i != tail && morefrag);
272  
273          /* Advance ring->cur to tell the kernel that we have seen the slots. */
274          ring->cur = i;
275  
276          if (unlikely(morefrag)) {
277              /* This is a truncated packet, so we can stop without releasing the
278               * incomplete slots by updating ring->head. We will hopefully
279               * re-read the complete packet the next time we are called. */
280              break;
281          }
282  
283          iovsize = qemu_sendv_packet_async(&s->nc, s->iov, iovcnt,
284                                              netmap_send_completed);
285  
286          /* Release the slots to the kernel. */
287          ring->head = i;
288  
289          if (iovsize == 0) {
290              /* The peer does not receive anymore. Packet is queued, stop
291               * reading from the backend until netmap_send_completed(). */
292              netmap_read_poll(s, false);
293              break;
294          }
295      }
296  }
297  
298  /* Flush and close. */
299  static void netmap_cleanup(NetClientState *nc)
300  {
301      NetmapState *s = DO_UPCAST(NetmapState, nc, nc);
302  
303      qemu_purge_queued_packets(nc);
304  
305      netmap_poll(nc, false);
306      nm_close(s->nmd);
307      s->nmd = NULL;
308  }
309  
310  /* Offloading manipulation support callbacks. */
311  static int netmap_fd_set_vnet_hdr_len(NetmapState *s, int len)
312  {
313      struct nmreq req;
314  
315      /* Issue a NETMAP_BDG_VNET_HDR command to change the virtio-net header
316       * length for the netmap adapter associated to 's->ifname'.
317       */
318      memset(&req, 0, sizeof(req));
319      pstrcpy(req.nr_name, sizeof(req.nr_name), s->ifname);
320      req.nr_version = NETMAP_API;
321      req.nr_cmd = NETMAP_BDG_VNET_HDR;
322      req.nr_arg1 = len;
323  
324      return ioctl(s->nmd->fd, NIOCREGIF, &req);
325  }
326  
327  static bool netmap_has_vnet_hdr_len(NetClientState *nc, int len)
328  {
329      NetmapState *s = DO_UPCAST(NetmapState, nc, nc);
330      int prev_len = s->vnet_hdr_len;
331  
332      /* Check that we can set the new length. */
333      if (netmap_fd_set_vnet_hdr_len(s, len)) {
334          return false;
335      }
336  
337      /* Restore the previous length. */
338      if (netmap_fd_set_vnet_hdr_len(s, prev_len)) {
339          error_report("Failed to restore vnet-hdr length %d on %s: %s",
340                       prev_len, s->ifname, strerror(errno));
341          abort();
342      }
343  
344      return true;
345  }
346  
347  /* A netmap interface that supports virtio-net headers always
348   * supports UFO, so we use this callback also for the has_ufo hook. */
349  static bool netmap_has_vnet_hdr(NetClientState *nc)
350  {
351      return netmap_has_vnet_hdr_len(nc, sizeof(struct virtio_net_hdr));
352  }
353  
354  static void netmap_using_vnet_hdr(NetClientState *nc, bool enable)
355  {
356  }
357  
358  static void netmap_set_vnet_hdr_len(NetClientState *nc, int len)
359  {
360      NetmapState *s = DO_UPCAST(NetmapState, nc, nc);
361      int err;
362  
363      err = netmap_fd_set_vnet_hdr_len(s, len);
364      if (err) {
365          error_report("Unable to set vnet-hdr length %d on %s: %s",
366                       len, s->ifname, strerror(errno));
367      } else {
368          /* Keep track of the current length. */
369          s->vnet_hdr_len = len;
370      }
371  }
372  
373  static void netmap_set_offload(NetClientState *nc, int csum, int tso4, int tso6,
374                                 int ecn, int ufo, int uso4, int uso6)
375  {
376      NetmapState *s = DO_UPCAST(NetmapState, nc, nc);
377  
378      /* Setting a virtio-net header length greater than zero automatically
379       * enables the offloadings. */
380      if (!s->vnet_hdr_len) {
381          netmap_set_vnet_hdr_len(nc, sizeof(struct virtio_net_hdr));
382      }
383  }
384  
385  /* NetClientInfo methods */
386  static NetClientInfo net_netmap_info = {
387      .type = NET_CLIENT_DRIVER_NETMAP,
388      .size = sizeof(NetmapState),
389      .receive = netmap_receive,
390      .receive_iov = netmap_receive_iov,
391      .poll = netmap_poll,
392      .cleanup = netmap_cleanup,
393      .has_ufo = netmap_has_vnet_hdr,
394      .has_vnet_hdr = netmap_has_vnet_hdr,
395      .has_vnet_hdr_len = netmap_has_vnet_hdr_len,
396      .using_vnet_hdr = netmap_using_vnet_hdr,
397      .set_offload = netmap_set_offload,
398      .set_vnet_hdr_len = netmap_set_vnet_hdr_len,
399  };
400  
401  /* The exported init function
402   *
403   * ... -net netmap,ifname="..."
404   */
405  int net_init_netmap(const Netdev *netdev,
406                      const char *name, NetClientState *peer, Error **errp)
407  {
408      const NetdevNetmapOptions *netmap_opts = &netdev->u.netmap;
409      struct nm_desc *nmd;
410      NetClientState *nc;
411      Error *err = NULL;
412      NetmapState *s;
413  
414      nmd = netmap_open(netmap_opts, &err);
415      if (err) {
416          error_propagate(errp, err);
417          return -1;
418      }
419      /* Create the object. */
420      nc = qemu_new_net_client(&net_netmap_info, peer, "netmap", name);
421      s = DO_UPCAST(NetmapState, nc, nc);
422      s->nmd = nmd;
423      s->tx = NETMAP_TXRING(nmd->nifp, 0);
424      s->rx = NETMAP_RXRING(nmd->nifp, 0);
425      s->vnet_hdr_len = 0;
426      pstrcpy(s->ifname, sizeof(s->ifname), netmap_opts->ifname);
427      netmap_read_poll(s, true); /* Initially only poll for reads. */
428  
429      return 0;
430  }
431  
432