veth.c (7a085c3aad94cce7e11031c6800e41668418ae4c) | veth.c (d0522f1cd25edb796548f91e04766fa3cbc3b6df) |
---|---|
1/* 2 * drivers/net/veth.c 3 * 4 * Copyright (C) 2007 OpenVZ http://openvz.org, SWsoft Inc 5 * 6 * Author: Pavel Emelianov <xemul@openvz.org> 7 * Ethtool interface from: Eric W. Biederman <ebiederm@xmission.com> 8 * --- 10 unchanged lines hidden (view full) --- 19#include <net/xfrm.h> 20#include <net/xdp.h> 21#include <linux/veth.h> 22#include <linux/module.h> 23#include <linux/bpf.h> 24#include <linux/filter.h> 25#include <linux/ptr_ring.h> 26#include <linux/bpf_trace.h> | 1/* 2 * drivers/net/veth.c 3 * 4 * Copyright (C) 2007 OpenVZ http://openvz.org, SWsoft Inc 5 * 6 * Author: Pavel Emelianov <xemul@openvz.org> 7 * Ethtool interface from: Eric W. Biederman <ebiederm@xmission.com> 8 * --- 10 unchanged lines hidden (view full) --- 19#include <net/xfrm.h> 20#include <net/xdp.h> 21#include <linux/veth.h> 22#include <linux/module.h> 23#include <linux/bpf.h> 24#include <linux/filter.h> 25#include <linux/ptr_ring.h> 26#include <linux/bpf_trace.h> |
27#include <linux/net_tstamp.h> |
|
27 28#define DRV_NAME "veth" 29#define DRV_VERSION "1.0" 30 31#define VETH_XDP_FLAG BIT(0) 32#define VETH_RING_SIZE 256 33#define VETH_XDP_HEADROOM (XDP_PACKET_HEADROOM + NET_IP_ALIGN) 34 35/* Separating two types of XDP xmit */ 36#define VETH_XDP_TX BIT(0) 37#define VETH_XDP_REDIR BIT(1) 38 | 28 29#define DRV_NAME "veth" 30#define DRV_VERSION "1.0" 31 32#define VETH_XDP_FLAG BIT(0) 33#define VETH_RING_SIZE 256 34#define VETH_XDP_HEADROOM (XDP_PACKET_HEADROOM + NET_IP_ALIGN) 35 36/* Separating two types of XDP xmit */ 37#define VETH_XDP_TX BIT(0) 38#define VETH_XDP_REDIR BIT(1) 39 |
39struct pcpu_vstats { 40 u64 packets; 41 u64 bytes; | 40struct veth_rq_stats { 41 u64 xdp_packets; 42 u64 xdp_bytes; 43 u64 xdp_drops; |
42 struct u64_stats_sync syncp; 43}; 44 45struct veth_rq { 46 struct napi_struct xdp_napi; 47 struct net_device *dev; 48 struct bpf_prog __rcu *xdp_prog; 49 struct xdp_mem_info xdp_mem; | 44 struct u64_stats_sync syncp; 45}; 46 47struct veth_rq { 48 struct napi_struct xdp_napi; 49 struct net_device *dev; 50 struct bpf_prog __rcu *xdp_prog; 51 struct xdp_mem_info xdp_mem; |
52 struct veth_rq_stats stats; |
|
50 bool rx_notify_masked; 51 struct ptr_ring xdp_ring; 52 struct xdp_rxq_info xdp_rxq; 53}; 54 55struct veth_priv { 56 struct net_device __rcu *peer; 57 atomic64_t dropped; 58 struct bpf_prog *_xdp_prog; 59 struct veth_rq *rq; 60 unsigned int requested_headroom; 61}; 62 63/* 64 * ethtool interface 65 */ 66 | 53 bool rx_notify_masked; 54 struct ptr_ring xdp_ring; 55 struct xdp_rxq_info xdp_rxq; 56}; 57 58struct veth_priv { 59 struct net_device __rcu *peer; 60 atomic64_t dropped; 61 struct bpf_prog *_xdp_prog; 62 struct veth_rq *rq; 63 unsigned int requested_headroom; 64}; 65 66/* 67 * ethtool interface 68 */ 69 |
70struct veth_q_stat_desc { 71 char desc[ETH_GSTRING_LEN]; 72 size_t offset; 73}; 74 75#define VETH_RQ_STAT(m) offsetof(struct veth_rq_stats, m) 76 77static const struct veth_q_stat_desc veth_rq_stats_desc[] = { 78 { "xdp_packets", VETH_RQ_STAT(xdp_packets) }, 79 { "xdp_bytes", VETH_RQ_STAT(xdp_bytes) }, 80 { "xdp_drops", VETH_RQ_STAT(xdp_drops) }, 81}; 82 83#define VETH_RQ_STATS_LEN ARRAY_SIZE(veth_rq_stats_desc) 84 |
|
67static struct { 68 const char string[ETH_GSTRING_LEN]; 69} ethtool_stats_keys[] = { 70 { "peer_ifindex" }, 71}; 72 73static int veth_get_link_ksettings(struct net_device *dev, 74 struct ethtool_link_ksettings *cmd) --- 8 unchanged lines hidden (view full) --- 83static void veth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 84{ 85 strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 86 strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 87} 88 89static void veth_get_strings(struct net_device *dev, u32 stringset, u8 *buf) 90{ | 85static struct { 86 const char string[ETH_GSTRING_LEN]; 87} ethtool_stats_keys[] = { 88 { "peer_ifindex" }, 89}; 90 91static int veth_get_link_ksettings(struct net_device *dev, 92 struct ethtool_link_ksettings *cmd) --- 8 unchanged lines hidden (view full) --- 101static void veth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 102{ 103 strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 104 strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 105} 106 107static void veth_get_strings(struct net_device *dev, u32 stringset, u8 *buf) 108{ |
109 char *p = (char *)buf; 110 int i, j; 111 |
|
91 switch(stringset) { 92 case ETH_SS_STATS: | 112 switch(stringset) { 113 case ETH_SS_STATS: |
93 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys)); | 114 memcpy(p, ðtool_stats_keys, sizeof(ethtool_stats_keys)); 115 p += sizeof(ethtool_stats_keys); 116 for (i = 0; i < dev->real_num_rx_queues; i++) { 117 for (j = 0; j < VETH_RQ_STATS_LEN; j++) { 118 snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_%s", 119 i, veth_rq_stats_desc[j].desc); 120 p += ETH_GSTRING_LEN; 121 } 122 } |
94 break; 95 } 96} 97 98static int veth_get_sset_count(struct net_device *dev, int sset) 99{ 100 switch (sset) { 101 case ETH_SS_STATS: | 123 break; 124 } 125} 126 127static int veth_get_sset_count(struct net_device *dev, int sset) 128{ 129 switch (sset) { 130 case ETH_SS_STATS: |
102 return ARRAY_SIZE(ethtool_stats_keys); | 131 return ARRAY_SIZE(ethtool_stats_keys) + 132 VETH_RQ_STATS_LEN * dev->real_num_rx_queues; |
103 default: 104 return -EOPNOTSUPP; 105 } 106} 107 108static void veth_get_ethtool_stats(struct net_device *dev, 109 struct ethtool_stats *stats, u64 *data) 110{ 111 struct veth_priv *priv = netdev_priv(dev); 112 struct net_device *peer = rtnl_dereference(priv->peer); | 133 default: 134 return -EOPNOTSUPP; 135 } 136} 137 138static void veth_get_ethtool_stats(struct net_device *dev, 139 struct ethtool_stats *stats, u64 *data) 140{ 141 struct veth_priv *priv = netdev_priv(dev); 142 struct net_device *peer = rtnl_dereference(priv->peer); |
143 int i, j, idx; |
|
113 114 data[0] = peer ? peer->ifindex : 0; | 144 145 data[0] = peer ? peer->ifindex : 0; |
146 idx = 1; 147 for (i = 0; i < dev->real_num_rx_queues; i++) { 148 const struct veth_rq_stats *rq_stats = &priv->rq[i].stats; 149 const void *stats_base = (void *)rq_stats; 150 unsigned int start; 151 size_t offset; 152 153 do { 154 start = u64_stats_fetch_begin_irq(&rq_stats->syncp); 155 for (j = 0; j < VETH_RQ_STATS_LEN; j++) { 156 offset = veth_rq_stats_desc[j].offset; 157 data[idx + j] = *(u64 *)(stats_base + offset); 158 } 159 } while (u64_stats_fetch_retry_irq(&rq_stats->syncp, start)); 160 idx += VETH_RQ_STATS_LEN; 161 } |
|
115} 116 | 162} 163 |
164static int veth_get_ts_info(struct net_device *dev, 165 struct ethtool_ts_info *info) 166{ 167 info->so_timestamping = 168 SOF_TIMESTAMPING_TX_SOFTWARE | 169 SOF_TIMESTAMPING_RX_SOFTWARE | 170 SOF_TIMESTAMPING_SOFTWARE; 171 info->phc_index = -1; 172 173 return 0; 174} 175 |
|
117static const struct ethtool_ops veth_ethtool_ops = { 118 .get_drvinfo = veth_get_drvinfo, 119 .get_link = ethtool_op_get_link, 120 .get_strings = veth_get_strings, 121 .get_sset_count = veth_get_sset_count, 122 .get_ethtool_stats = veth_get_ethtool_stats, 123 .get_link_ksettings = veth_get_link_ksettings, | 176static const struct ethtool_ops veth_ethtool_ops = { 177 .get_drvinfo = veth_get_drvinfo, 178 .get_link = ethtool_op_get_link, 179 .get_strings = veth_get_strings, 180 .get_sset_count = veth_get_sset_count, 181 .get_ethtool_stats = veth_get_ethtool_stats, 182 .get_link_ksettings = veth_get_link_ksettings, |
183 .get_ts_info = veth_get_ts_info, |
|
124}; 125 126/* general routines */ 127 128static bool veth_is_xdp_frame(void *ptr) 129{ 130 return (unsigned long)ptr & VETH_XDP_FLAG; 131} --- 64 unchanged lines hidden (view full) --- 196 rxq = skb_get_queue_mapping(skb); 197 if (rxq < rcv->real_num_rx_queues) { 198 rq = &rcv_priv->rq[rxq]; 199 rcv_xdp = rcu_access_pointer(rq->xdp_prog); 200 if (rcv_xdp) 201 skb_record_rx_queue(skb, rxq); 202 } 203 | 184}; 185 186/* general routines */ 187 188static bool veth_is_xdp_frame(void *ptr) 189{ 190 return (unsigned long)ptr & VETH_XDP_FLAG; 191} --- 64 unchanged lines hidden (view full) --- 256 rxq = skb_get_queue_mapping(skb); 257 if (rxq < rcv->real_num_rx_queues) { 258 rq = &rcv_priv->rq[rxq]; 259 rcv_xdp = rcu_access_pointer(rq->xdp_prog); 260 if (rcv_xdp) 261 skb_record_rx_queue(skb, rxq); 262 } 263 |
264 skb_tx_timestamp(skb); |
|
204 if (likely(veth_forward_skb(rcv, skb, rq, rcv_xdp) == NET_RX_SUCCESS)) { | 265 if (likely(veth_forward_skb(rcv, skb, rq, rcv_xdp) == NET_RX_SUCCESS)) { |
205 struct pcpu_vstats *stats = this_cpu_ptr(dev->vstats); | 266 if (!rcv_xdp) { 267 struct pcpu_lstats *stats = this_cpu_ptr(dev->lstats); |
206 | 268 |
207 u64_stats_update_begin(&stats->syncp); 208 stats->bytes += length; 209 stats->packets++; 210 u64_stats_update_end(&stats->syncp); | 269 u64_stats_update_begin(&stats->syncp); 270 stats->bytes += length; 271 stats->packets++; 272 u64_stats_update_end(&stats->syncp); 273 } |
211 } else { 212drop: 213 atomic64_inc(&priv->dropped); 214 } 215 216 if (rcv_xdp) 217 __veth_xdp_flush(rq); 218 219 rcu_read_unlock(); 220 221 return NETDEV_TX_OK; 222} 223 | 274 } else { 275drop: 276 atomic64_inc(&priv->dropped); 277 } 278 279 if (rcv_xdp) 280 __veth_xdp_flush(rq); 281 282 rcu_read_unlock(); 283 284 return NETDEV_TX_OK; 285} 286 |
224static u64 veth_stats_one(struct pcpu_vstats *result, struct net_device *dev) | 287static u64 veth_stats_tx(struct pcpu_lstats *result, struct net_device *dev) |
225{ 226 struct veth_priv *priv = netdev_priv(dev); 227 int cpu; 228 229 result->packets = 0; 230 result->bytes = 0; 231 for_each_possible_cpu(cpu) { | 288{ 289 struct veth_priv *priv = netdev_priv(dev); 290 int cpu; 291 292 result->packets = 0; 293 result->bytes = 0; 294 for_each_possible_cpu(cpu) { |
232 struct pcpu_vstats *stats = per_cpu_ptr(dev->vstats, cpu); | 295 struct pcpu_lstats *stats = per_cpu_ptr(dev->lstats, cpu); |
233 u64 packets, bytes; 234 unsigned int start; 235 236 do { 237 start = u64_stats_fetch_begin_irq(&stats->syncp); 238 packets = stats->packets; 239 bytes = stats->bytes; 240 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 241 result->packets += packets; 242 result->bytes += bytes; 243 } 244 return atomic64_read(&priv->dropped); 245} 246 | 296 u64 packets, bytes; 297 unsigned int start; 298 299 do { 300 start = u64_stats_fetch_begin_irq(&stats->syncp); 301 packets = stats->packets; 302 bytes = stats->bytes; 303 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 304 result->packets += packets; 305 result->bytes += bytes; 306 } 307 return atomic64_read(&priv->dropped); 308} 309 |
310static void veth_stats_rx(struct veth_rq_stats *result, struct net_device *dev) 311{ 312 struct veth_priv *priv = netdev_priv(dev); 313 int i; 314 315 result->xdp_packets = 0; 316 result->xdp_bytes = 0; 317 result->xdp_drops = 0; 318 for (i = 0; i < dev->num_rx_queues; i++) { 319 struct veth_rq_stats *stats = &priv->rq[i].stats; 320 u64 packets, bytes, drops; 321 unsigned int start; 322 323 do { 324 start = u64_stats_fetch_begin_irq(&stats->syncp); 325 packets = stats->xdp_packets; 326 bytes = stats->xdp_bytes; 327 drops = stats->xdp_drops; 328 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 329 result->xdp_packets += packets; 330 result->xdp_bytes += bytes; 331 result->xdp_drops += drops; 332 } 333} 334 |
|
247static void veth_get_stats64(struct net_device *dev, 248 struct rtnl_link_stats64 *tot) 249{ 250 struct veth_priv *priv = netdev_priv(dev); 251 struct net_device *peer; | 335static void veth_get_stats64(struct net_device *dev, 336 struct rtnl_link_stats64 *tot) 337{ 338 struct veth_priv *priv = netdev_priv(dev); 339 struct net_device *peer; |
252 struct pcpu_vstats one; | 340 struct veth_rq_stats rx; 341 struct pcpu_lstats tx; |
253 | 342 |
254 tot->tx_dropped = veth_stats_one(&one, dev); 255 tot->tx_bytes = one.bytes; 256 tot->tx_packets = one.packets; | 343 tot->tx_dropped = veth_stats_tx(&tx, dev); 344 tot->tx_bytes = tx.bytes; 345 tot->tx_packets = tx.packets; |
257 | 346 |
347 veth_stats_rx(&rx, dev); 348 tot->rx_dropped = rx.xdp_drops; 349 tot->rx_bytes = rx.xdp_bytes; 350 tot->rx_packets = rx.xdp_packets; 351 |
|
258 rcu_read_lock(); 259 peer = rcu_dereference(priv->peer); 260 if (peer) { | 352 rcu_read_lock(); 353 peer = rcu_dereference(priv->peer); 354 if (peer) { |
261 tot->rx_dropped = veth_stats_one(&one, peer); 262 tot->rx_bytes = one.bytes; 263 tot->rx_packets = one.packets; | 355 tot->rx_dropped += veth_stats_tx(&tx, peer); 356 tot->rx_bytes += tx.bytes; 357 tot->rx_packets += tx.packets; 358 359 veth_stats_rx(&rx, peer); 360 tot->tx_bytes += rx.xdp_bytes; 361 tot->tx_packets += rx.xdp_packets; |
264 } 265 rcu_read_unlock(); 266} 267 268/* fake multicast ability */ 269static void veth_set_multicast_list(struct net_device *dev) 270{ 271} --- 22 unchanged lines hidden (view full) --- 294 return smp_processor_id() % dev->real_num_rx_queues; 295} 296 297static int veth_xdp_xmit(struct net_device *dev, int n, 298 struct xdp_frame **frames, u32 flags) 299{ 300 struct veth_priv *rcv_priv, *priv = netdev_priv(dev); 301 struct net_device *rcv; | 362 } 363 rcu_read_unlock(); 364} 365 366/* fake multicast ability */ 367static void veth_set_multicast_list(struct net_device *dev) 368{ 369} --- 22 unchanged lines hidden (view full) --- 392 return smp_processor_id() % dev->real_num_rx_queues; 393} 394 395static int veth_xdp_xmit(struct net_device *dev, int n, 396 struct xdp_frame **frames, u32 flags) 397{ 398 struct veth_priv *rcv_priv, *priv = netdev_priv(dev); 399 struct net_device *rcv; |
400 int i, ret, drops = n; |
|
302 unsigned int max_len; 303 struct veth_rq *rq; | 401 unsigned int max_len; 402 struct veth_rq *rq; |
304 int i, drops = 0; | |
305 | 403 |
306 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 307 return -EINVAL; | 404 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) { 405 ret = -EINVAL; 406 goto drop; 407 } |
308 309 rcv = rcu_dereference(priv->peer); | 408 409 rcv = rcu_dereference(priv->peer); |
310 if (unlikely(!rcv)) 311 return -ENXIO; | 410 if (unlikely(!rcv)) { 411 ret = -ENXIO; 412 goto drop; 413 } |
312 313 rcv_priv = netdev_priv(rcv); 314 rq = &rcv_priv->rq[veth_select_rxq(rcv)]; 315 /* Non-NULL xdp_prog ensures that xdp_ring is initialized on receive 316 * side. This means an XDP program is loaded on the peer and the peer 317 * device is up. 318 */ | 414 415 rcv_priv = netdev_priv(rcv); 416 rq = &rcv_priv->rq[veth_select_rxq(rcv)]; 417 /* Non-NULL xdp_prog ensures that xdp_ring is initialized on receive 418 * side. This means an XDP program is loaded on the peer and the peer 419 * device is up. 420 */ |
319 if (!rcu_access_pointer(rq->xdp_prog)) 320 return -ENXIO; | 421 if (!rcu_access_pointer(rq->xdp_prog)) { 422 ret = -ENXIO; 423 goto drop; 424 } |
321 | 425 |
426 drops = 0; |
|
322 max_len = rcv->mtu + rcv->hard_header_len + VLAN_HLEN; 323 324 spin_lock(&rq->xdp_ring.producer_lock); 325 for (i = 0; i < n; i++) { 326 struct xdp_frame *frame = frames[i]; 327 void *ptr = veth_xdp_to_ptr(frame); 328 329 if (unlikely(frame->len > max_len || 330 __ptr_ring_produce(&rq->xdp_ring, ptr))) { 331 xdp_return_frame_rx_napi(frame); 332 drops++; 333 } 334 } 335 spin_unlock(&rq->xdp_ring.producer_lock); 336 337 if (flags & XDP_XMIT_FLUSH) 338 __veth_xdp_flush(rq); 339 | 427 max_len = rcv->mtu + rcv->hard_header_len + VLAN_HLEN; 428 429 spin_lock(&rq->xdp_ring.producer_lock); 430 for (i = 0; i < n; i++) { 431 struct xdp_frame *frame = frames[i]; 432 void *ptr = veth_xdp_to_ptr(frame); 433 434 if (unlikely(frame->len > max_len || 435 __ptr_ring_produce(&rq->xdp_ring, ptr))) { 436 xdp_return_frame_rx_napi(frame); 437 drops++; 438 } 439 } 440 spin_unlock(&rq->xdp_ring.producer_lock); 441 442 if (flags & XDP_XMIT_FLUSH) 443 __veth_xdp_flush(rq); 444 |
340 return n - drops; | 445 if (likely(!drops)) 446 return n; 447 448 ret = n - drops; 449drop: 450 atomic64_add(drops, &priv->dropped); 451 452 return ret; |
341} 342 343static void veth_xdp_flush(struct net_device *dev) 344{ 345 struct veth_priv *rcv_priv, *priv = netdev_priv(dev); 346 struct net_device *rcv; 347 struct veth_rq *rq; 348 --- 232 unchanged lines hidden (view full) --- 581 rcu_read_unlock(); 582 page_frag_free(xdp.data); 583xdp_xmit: 584 return NULL; 585} 586 587static int veth_xdp_rcv(struct veth_rq *rq, int budget, unsigned int *xdp_xmit) 588{ | 453} 454 455static void veth_xdp_flush(struct net_device *dev) 456{ 457 struct veth_priv *rcv_priv, *priv = netdev_priv(dev); 458 struct net_device *rcv; 459 struct veth_rq *rq; 460 --- 232 unchanged lines hidden (view full) --- 693 rcu_read_unlock(); 694 page_frag_free(xdp.data); 695xdp_xmit: 696 return NULL; 697} 698 699static int veth_xdp_rcv(struct veth_rq *rq, int budget, unsigned int *xdp_xmit) 700{ |
589 int i, done = 0; | 701 int i, done = 0, drops = 0, bytes = 0; |
590 591 for (i = 0; i < budget; i++) { 592 void *ptr = __ptr_ring_consume(&rq->xdp_ring); | 702 703 for (i = 0; i < budget; i++) { 704 void *ptr = __ptr_ring_consume(&rq->xdp_ring); |
705 unsigned int xdp_xmit_one = 0; |
|
593 struct sk_buff *skb; 594 595 if (!ptr) 596 break; 597 598 if (veth_is_xdp_frame(ptr)) { | 706 struct sk_buff *skb; 707 708 if (!ptr) 709 break; 710 711 if (veth_is_xdp_frame(ptr)) { |
599 skb = veth_xdp_rcv_one(rq, veth_ptr_to_xdp(ptr), 600 xdp_xmit); | 712 struct xdp_frame *frame = veth_ptr_to_xdp(ptr); 713 714 bytes += frame->len; 715 skb = veth_xdp_rcv_one(rq, frame, &xdp_xmit_one); |
601 } else { | 716 } else { |
602 skb = veth_xdp_rcv_skb(rq, ptr, xdp_xmit); | 717 skb = ptr; 718 bytes += skb->len; 719 skb = veth_xdp_rcv_skb(rq, skb, &xdp_xmit_one); |
603 } | 720 } |
721 *xdp_xmit |= xdp_xmit_one; |
|
604 605 if (skb) 606 napi_gro_receive(&rq->xdp_napi, skb); | 722 723 if (skb) 724 napi_gro_receive(&rq->xdp_napi, skb); |
725 else if (!xdp_xmit_one) 726 drops++; |
|
607 608 done++; 609 } 610 | 727 728 done++; 729 } 730 |
731 u64_stats_update_begin(&rq->stats.syncp); 732 rq->stats.xdp_packets += done; 733 rq->stats.xdp_bytes += bytes; 734 rq->stats.xdp_drops += drops; 735 u64_stats_update_end(&rq->stats.syncp); 736 |
|
611 return done; 612} 613 614static int veth_poll(struct napi_struct *napi, int budget) 615{ 616 struct veth_rq *rq = 617 container_of(napi, struct veth_rq, xdp_napi); 618 unsigned int xdp_xmit = 0; --- 174 unchanged lines hidden (view full) --- 793{ 794 struct veth_priv *priv = netdev_priv(dev); 795 int i; 796 797 priv->rq = kcalloc(dev->num_rx_queues, sizeof(*priv->rq), GFP_KERNEL); 798 if (!priv->rq) 799 return -ENOMEM; 800 | 737 return done; 738} 739 740static int veth_poll(struct napi_struct *napi, int budget) 741{ 742 struct veth_rq *rq = 743 container_of(napi, struct veth_rq, xdp_napi); 744 unsigned int xdp_xmit = 0; --- 174 unchanged lines hidden (view full) --- 919{ 920 struct veth_priv *priv = netdev_priv(dev); 921 int i; 922 923 priv->rq = kcalloc(dev->num_rx_queues, sizeof(*priv->rq), GFP_KERNEL); 924 if (!priv->rq) 925 return -ENOMEM; 926 |
801 for (i = 0; i < dev->num_rx_queues; i++) | 927 for (i = 0; i < dev->num_rx_queues; i++) { |
802 priv->rq[i].dev = dev; | 928 priv->rq[i].dev = dev; |
929 u64_stats_init(&priv->rq[i].stats.syncp); 930 } |
|
803 804 return 0; 805} 806 807static void veth_free_queues(struct net_device *dev) 808{ 809 struct veth_priv *priv = netdev_priv(dev); 810 811 kfree(priv->rq); 812} 813 814static int veth_dev_init(struct net_device *dev) 815{ 816 int err; 817 | 931 932 return 0; 933} 934 935static void veth_free_queues(struct net_device *dev) 936{ 937 struct veth_priv *priv = netdev_priv(dev); 938 939 kfree(priv->rq); 940} 941 942static int veth_dev_init(struct net_device *dev) 943{ 944 int err; 945 |
818 dev->vstats = netdev_alloc_pcpu_stats(struct pcpu_vstats); 819 if (!dev->vstats) | 946 dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats); 947 if (!dev->lstats) |
820 return -ENOMEM; 821 822 err = veth_alloc_queues(dev); 823 if (err) { | 948 return -ENOMEM; 949 950 err = veth_alloc_queues(dev); 951 if (err) { |
824 free_percpu(dev->vstats); | 952 free_percpu(dev->lstats); |
825 return err; 826 } 827 828 return 0; 829} 830 831static void veth_dev_free(struct net_device *dev) 832{ 833 veth_free_queues(dev); | 953 return err; 954 } 955 956 return 0; 957} 958 959static void veth_dev_free(struct net_device *dev) 960{ 961 veth_free_queues(dev); |
834 free_percpu(dev->vstats); | 962 free_percpu(dev->lstats); |
835} 836 837#ifdef CONFIG_NET_POLL_CONTROLLER 838static void veth_poll_controller(struct net_device *dev) 839{ 840 /* veth only receives frames when its peer sends one 841 * Since it has nothing to do with disabling irqs, we are guaranteed 842 * never to have pending data when we poll for it so --- 277 unchanged lines hidden (view full) --- 1120 name_assign_type = NET_NAME_ENUM; 1121 } 1122 1123 net = rtnl_link_get_net(src_net, tbp); 1124 if (IS_ERR(net)) 1125 return PTR_ERR(net); 1126 1127 peer = rtnl_create_link(net, ifname, name_assign_type, | 963} 964 965#ifdef CONFIG_NET_POLL_CONTROLLER 966static void veth_poll_controller(struct net_device *dev) 967{ 968 /* veth only receives frames when its peer sends one 969 * Since it has nothing to do with disabling irqs, we are guaranteed 970 * never to have pending data when we poll for it so --- 277 unchanged lines hidden (view full) --- 1248 name_assign_type = NET_NAME_ENUM; 1249 } 1250 1251 net = rtnl_link_get_net(src_net, tbp); 1252 if (IS_ERR(net)) 1253 return PTR_ERR(net); 1254 1255 peer = rtnl_create_link(net, ifname, name_assign_type, |
1128 &veth_link_ops, tbp); | 1256 &veth_link_ops, tbp, extack); |
1129 if (IS_ERR(peer)) { 1130 put_net(net); 1131 return PTR_ERR(peer); 1132 } 1133 1134 if (!ifmp || !tbp[IFLA_ADDRESS]) 1135 eth_hw_addr_random(peer); 1136 --- 128 unchanged lines hidden --- | 1257 if (IS_ERR(peer)) { 1258 put_net(net); 1259 return PTR_ERR(peer); 1260 } 1261 1262 if (!ifmp || !tbp[IFLA_ADDRESS]) 1263 eth_hw_addr_random(peer); 1264 --- 128 unchanged lines hidden --- |