Lines Matching +full:build +full:- +full:tci
1 // SPDX-License-Identifier: GPL-2.0-or-later
11 * Florian la Roche <rzsfl@rz.uni-sb.de>
19 * D.J. Barrow : Fixed bug where dev->refcnt gets set
64 * Pekka Riikonen : Netdev boot-time settings code
66 * indefinitely on dev->refcnt
67 * J Hadi Salim : - Backlog queue sampling
68 * - netif_rx() feedback
158 #include "net-sysfs.h"
204 while (++net->dev_base_seq == 0)
212 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
217 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
224 spin_lock_irqsave(&sd->input_pkt_queue.lock, *flags);
232 spin_lock_irq(&sd->input_pkt_queue.lock);
241 spin_unlock_irqrestore(&sd->input_pkt_queue.lock, *flags);
249 spin_unlock_irq(&sd->input_pkt_queue.lock);
262 INIT_HLIST_NODE(&name_node->hlist);
263 name_node->dev = dev;
264 name_node->name = name;
273 name_node = netdev_name_node_alloc(dev, dev->name);
276 INIT_LIST_HEAD(&name_node->list);
288 hlist_add_head_rcu(&name_node->hlist,
289 dev_name_hash(net, name_node->name));
294 hlist_del_rcu(&name_node->hlist);
304 if (!strcmp(name_node->name, name))
316 if (!strcmp(name_node->name, name))
334 return -EEXIST;
337 return -ENOMEM;
339 /* The node that holds dev->name acts as a head of per-device list. */
340 list_add_tail(&name_node->list, &dev->name_node->list);
347 list_del(&name_node->list);
348 kfree(name_node->name);
359 return -ENOENT;
363 if (name_node == dev->name_node || name_node->dev != dev)
364 return -EINVAL;
377 list_for_each_entry_safe(name_node, tmp, &dev->name_node->list, list)
390 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
391 netdev_name_node_add(net, dev->name_node);
392 hlist_add_head_rcu(&dev->index_hlist,
393 dev_index_hash(net, dev->ifindex));
400 WARN_ON(xa_store(&net->dev_by_index, dev->ifindex, dev, GFP_KERNEL));
415 xa_erase(&net->dev_by_index, dev->ifindex);
423 list_del_rcu(&dev->dev_list);
424 netdev_name_node_del(dev->name_node);
425 hlist_del_rcu(&dev->index_hlist);
448 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
449 * according to dev->type
496 return ARRAY_SIZE(netdev_lock_type) - 1;
513 i = netdev_lock_pos(dev->type);
514 lockdep_set_class_and_name(&dev->addr_list_lock,
547 * is cloned and should be copied-on-write, so that it will
549 * --ANK (980803)
554 if (pt->type == htons(ETH_P_ALL))
555 return pt->dev ? &pt->dev->ptype_all : &ptype_all;
557 return pt->dev ? &pt->dev->ptype_specific :
558 &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
562 * dev_add_pack - add packet handler
579 list_add_rcu(&pt->list, head);
585 * __dev_remove_pack - remove packet handler
606 list_del_rcu(&pt->list);
618 * dev_remove_pack - remove packet handler
645 * dev_get_iflink - get 'iflink' value of a interface
654 if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
655 return dev->netdev_ops->ndo_get_iflink(dev);
657 return dev->ifindex;
662 * dev_fill_metadata_dst - Retrieve tunnel egress information.
674 if (!dev->netdev_ops || !dev->netdev_ops->ndo_fill_metadata_dst)
675 return -EINVAL;
679 return -ENOMEM;
680 if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX)))
681 return -EINVAL;
683 return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb);
689 int k = stack->num_paths++;
694 return &stack->path[k];
708 stack->num_paths = 0;
709 while (ctx.dev && ctx.dev->netdev_ops->ndo_fill_forward_path) {
713 return -1;
716 ret = ctx.dev->netdev_ops->ndo_fill_forward_path(&ctx, path);
718 return -1;
721 return -1;
729 return -1;
730 path->type = DEV_PATH_ETHERNET;
731 path->dev = ctx.dev;
738 * __dev_get_by_name - find a device by its name
754 return node_name ? node_name->dev : NULL;
759 * dev_get_by_name_rcu - find a device by its name
775 return node_name ? node_name->dev : NULL;
793 * netdev_get_by_name() - find a device by its name
818 * __dev_get_by_index - find a device by its ifindex
835 if (dev->ifindex == ifindex)
843 * dev_get_by_index_rcu - find a device by its ifindex
859 if (dev->ifindex == ifindex)
880 * netdev_get_by_index() - find a device by its ifindex
904 * dev_get_by_napi_id - find a device by napi_id
924 return napi ? napi->dev : NULL;
929 * netdev_get_name - get a netdevice name, knowing its ifindex.
944 ret = -ENODEV;
948 strcpy(name, dev->name);
960 return dev->type == type && !memcmp(dev->dev_addr, ha, dev->addr_len);
964 * dev_getbyhwaddr_rcu - find a device by its hardware address
991 * dev_getbyhwaddr() - find a device by its hardware address
1022 if (dev->type == type) {
1033 * __dev_get_by_flags - find any device with given flags
1052 if (((dev->flags ^ if_flags) & mask) == 0) {
1062 * dev_valid_name - check if name is okay for network device
1088 * __dev_alloc_name - allocate a name for a device
1093 * Passed a format string - eg "lt%d" it will try and find a suitable
1094 * id. It scans list of devices to build up a free map, then chooses
1111 return -EINVAL;
1121 return -EINVAL;
1126 return -ENOMEM;
1132 if (!sscanf(name_node->name, name, &i))
1139 if (!strncmp(buf, name_node->name, IFNAMSIZ))
1142 if (!sscanf(d->name, name, &i))
1149 if (!strncmp(buf, d->name, IFNAMSIZ))
1165 return -ENFILE;
1174 return -EINVAL;
1180 return -EEXIST;
1198 strscpy(dev->name, buf, IFNAMSIZ);
1203 * dev_alloc_name - allocate a name for a device
1207 * Passed a format string - eg "lt%d" it will try and find a suitable
1208 * id. It scans list of devices to build up a free map, then chooses
1230 strscpy(dev->name, buf, IFNAMSIZ);
1235 * dev_change_name - change name of a device
1257 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
1262 memcpy(oldname, dev->name, IFNAMSIZ);
1272 dev->flags & IFF_UP ? " (while UP)" : "");
1274 old_assign_type = dev->name_assign_type;
1275 dev->name_assign_type = NET_NAME_RENAMED;
1278 ret = device_rename(&dev->dev, dev->name);
1280 memcpy(dev->name, oldname, IFNAMSIZ);
1281 dev->name_assign_type = old_assign_type;
1291 netdev_name_node_del(dev->name_node);
1297 netdev_name_node_add(net, dev->name_node);
1308 memcpy(dev->name, oldname, IFNAMSIZ);
1310 dev->name_assign_type = old_assign_type;
1323 * dev_set_alias - change ifalias of a device
1335 return -EINVAL;
1340 return -ENOMEM;
1342 memcpy(new_alias->ifalias, alias, len);
1343 new_alias->ifalias[len] = 0;
1347 new_alias = rcu_replace_pointer(dev->ifalias, new_alias,
1359 * dev_get_alias - get ifalias of a device
1373 alias = rcu_dereference(dev->ifalias);
1375 ret = snprintf(name, len, "%s", alias->ifalias);
1382 * netdev_features_change - device changes features
1394 * netdev_state_change - device changes state
1403 if (dev->flags & IFF_UP) {
1416 * __netdev_notify_peers - notify network peers about existence of @dev,
1435 * netdev_notify_peers - notify network peers about existence of @dev
1462 n->thread = kthread_run(napi_threaded_poll, n, "napi/%s-%d",
1463 n->dev->name, n->napi_id);
1464 if (IS_ERR(n->thread)) {
1465 err = PTR_ERR(n->thread);
1467 n->thread = NULL;
1475 const struct net_device_ops *ops = dev->netdev_ops;
1482 /* may be detached because parent is runtime-suspended */
1483 if (dev->dev.parent)
1484 pm_runtime_resume(dev->dev.parent);
1486 return -ENODEV;
1500 set_bit(__LINK_STATE_START, &dev->state);
1502 if (ops->ndo_validate_addr)
1503 ret = ops->ndo_validate_addr(dev);
1505 if (!ret && ops->ndo_open)
1506 ret = ops->ndo_open(dev);
1511 clear_bit(__LINK_STATE_START, &dev->state);
1513 dev->flags |= IFF_UP;
1516 add_device_randomness(dev->dev_addr, dev->addr_len);
1523 * dev_open - prepare an interface for use.
1539 if (dev->flags & IFF_UP)
1566 clear_bit(__LINK_STATE_START, &dev->state);
1571 * dev->stop() will invoke napi_disable() on all of it's
1580 const struct net_device_ops *ops = dev->netdev_ops;
1586 * We allow it to be called even after a DETACH hot-plug
1589 if (ops->ndo_stop)
1590 ops->ndo_stop(dev);
1592 dev->flags &= ~IFF_UP;
1601 list_add(&dev->close_list, &single);
1612 if (!(dev->flags & IFF_UP))
1613 list_del_init(&dev->close_list);
1621 list_del_init(&dev->close_list);
1627 * dev_close - shutdown an interface.
1637 if (dev->flags & IFF_UP) {
1640 list_add(&dev->close_list, &single);
1649 * dev_disable_lro - disable Large Receive Offload on a device
1661 dev->wanted_features &= ~NETIF_F_LRO;
1664 if (unlikely(dev->features & NETIF_F_LRO))
1673 * dev_disable_gro_hw - disable HW Generic Receive Offload on a device
1682 dev->wanted_features &= ~NETIF_F_GRO_HW;
1685 if (unlikely(dev->features & NETIF_F_GRO_HW))
1720 return nb->notifier_call(nb, val, &info);
1733 if (!(dev->flags & IFF_UP))
1743 if (dev->flags & IFF_UP) {
1782 * register_netdevice_notifier - register a network notifier block
1829 * unregister_netdevice_notifier - unregister a network notifier block
1870 err = raw_notifier_chain_register(&net->netdev_chain, nb);
1883 raw_notifier_chain_unregister(&net->netdev_chain, nb);
1892 err = raw_notifier_chain_unregister(&net->netdev_chain, nb);
1901 * register_netdevice_notifier_net - register a per-netns network notifier block
1927 * unregister_netdevice_notifier_net - unregister a per-netns
1971 nn->nb = nb;
1972 list_add(&nn->list, &dev->net_notifier_list);
1986 list_del(&nn->list);
1998 list_for_each_entry(nn, &dev->net_notifier_list, list)
1999 __move_netdevice_notifier_net(dev_net(dev), net, nn->nb);
2003 * call_netdevice_notifiers_info - call all network notifier blocks
2014 struct net *net = dev_net(info->dev);
2019 /* Run per-netns notifier block chain first, then run the global one.
2021 * all notifier block registrators get converted to be per-netns.
2023 ret = raw_notifier_call_chain(&net->netdev_chain, val, info);
2030 * call_netdevice_notifiers_info_robust - call per-netns notifier blocks
2037 * Call all per-netns network notifier blocks, but not notifier blocks on
2047 struct net *net = dev_net(info->dev);
2051 return raw_notifier_call_chain_robust(&net->netdev_chain,
2068 * call_netdevice_notifiers - call all network notifier blocks
2083 * call_netdevice_notifiers_mtu - call all network notifier blocks
2178 if (atomic_try_cmpxchg(&netstamp_wanted, &wanted, wanted - 1))
2191 skb->tstamp = 0;
2192 skb->mono_delivery_time = 0;
2194 skb->tstamp = ktime_get_real();
2199 if ((COND) && !(SKB)->tstamp) \
2200 (SKB)->tstamp = ktime_get_real(); \
2215 skb->protocol = eth_type_trans(skb, dev);
2229 * dev_forward_skb - loopback an skb to another netif
2262 return -ENOMEM;
2263 refcount_inc(&skb->users);
2264 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2276 if (ptype->type != type)
2287 if (!ptype->af_packet_priv || !skb->sk)
2290 if (ptype->id_match)
2291 return ptype->id_match(ptype, skb->sk);
2292 else if ((struct sock *)ptype->af_packet_priv == skb->sk)
2299 * dev_nit_active - return true if any network interface taps are in use
2305 return !list_empty(&ptype_all) || !list_empty(&dev->ptype_all);
2324 if (READ_ONCE(ptype->ignore_outgoing))
2328 * they originated from - MvS (miquels@drinkel.ow.org)
2334 deliver_skb(skb2, pt_prev, skb->dev);
2346 /* skb->nh should be correctly
2352 if (skb_network_header(skb2) < skb2->data ||
2355 ntohs(skb2->protocol),
2356 dev->name);
2360 skb2->transport_header = skb2->network_header;
2361 skb2->pkt_type = PACKET_OUTGOING;
2366 ptype_list = &dev->ptype_all;
2372 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
2381 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
2396 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
2399 if (tc->offset + tc->count > txq) {
2401 dev->num_tc = 0;
2409 tc = &dev->tc_to_txq[q];
2410 if (tc->offset + tc->count > txq) {
2420 if (dev->num_tc) {
2421 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
2426 if ((txq - tc->offset) < tc->count)
2430 /* didn't find it, just return -1 to indicate no match */
2431 return -1;
2446 struct xps_dev_maps *old_maps, int tci, u16 index)
2451 map = xmap_dereference(dev_maps->attr_map[tci]);
2455 for (pos = map->len; pos--;) {
2456 if (map->queues[pos] != index)
2459 if (map->len > 1) {
2460 map->queues[pos] = map->queues[--map->len];
2465 RCU_INIT_POINTER(old_maps->attr_map[tci], NULL);
2466 RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL);
2478 int num_tc = dev_maps->num_tc;
2480 int tci;
2482 for (tci = cpu * num_tc; num_tc--; tci++) {
2485 for (i = count, j = offset; i--; j++) {
2486 if (!remove_xps_queue(dev_maps, NULL, tci, j))
2504 RCU_INIT_POINTER(dev->xps_maps[type], NULL);
2516 dev_maps = xmap_dereference(dev->xps_maps[type]);
2520 for (j = 0; j < dev_maps->nr_ids; j++)
2526 for (i = offset + (count - 1); count--; i--)
2552 netif_reset_xps_queues(dev, index, dev->num_tx_queues - index);
2562 for (pos = 0; map && pos < map->len; pos++) {
2563 if (map->queues[pos] != index)
2568 /* Need to add tx-queue to this CPU's/rx-queue's existing map */
2570 if (pos < map->alloc_len)
2573 alloc_len = map->alloc_len * 2;
2576 /* Need to allocate new map to store tx-queue on this CPU's/rx-queue's
2588 new_map->queues[i] = map->queues[i];
2589 new_map->alloc_len = alloc_len;
2590 new_map->len = pos;
2600 int i, tci = index * dev_maps->num_tc;
2604 for (i = 0; i < dev_maps->num_tc; i++, tci++) {
2609 map = xmap_dereference(dev_maps->attr_map[tci]);
2610 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
2621 int i, j, tci, numa_node_id = -2;
2626 WARN_ON_ONCE(index >= dev->num_tx_queues);
2628 if (dev->num_tc) {
2630 num_tc = dev->num_tc;
2632 return -EINVAL;
2635 dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev;
2639 return -EINVAL;
2644 dev_maps = xmap_dereference(dev->xps_maps[type]);
2646 maps_sz = XPS_RXQ_DEV_MAPS_SIZE(num_tc, dev->num_rx_queues);
2647 nr_ids = dev->num_rx_queues;
2659 * setting up now, as dev->num_tc or nr_ids could have been updated in
2664 dev_maps->num_tc == num_tc && dev_maps->nr_ids == nr_ids)
2668 for (j = -1; j = netif_attrmask_next_and(j, online_mask, mask, nr_ids),
2674 return -ENOMEM;
2677 new_dev_maps->nr_ids = nr_ids;
2678 new_dev_maps->num_tc = num_tc;
2681 tci = j * num_tc + tc;
2682 map = copy ? xmap_dereference(dev_maps->attr_map[tci]) : NULL;
2688 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
2704 tci = j * num_tc + tc;
2707 /* add tx-queue to CPU/rx-queue maps */
2712 map = xmap_dereference(new_dev_maps->attr_map[tci]);
2713 while ((pos < map->len) && (map->queues[pos] != index))
2716 if (pos == map->len)
2717 map->queues[map->len++] = index;
2720 if (numa_node_id == -2)
2723 numa_node_id = -1;
2733 rcu_assign_pointer(dev->xps_maps[type], new_dev_maps);
2739 for (j = 0; j < dev_maps->nr_ids; j++) {
2740 for (i = num_tc, tci = j * dev_maps->num_tc; i--; tci++) {
2741 map = xmap_dereference(dev_maps->attr_map[tci]);
2746 new_map = xmap_dereference(new_dev_maps->attr_map[tci]);
2751 RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL);
2772 /* removes tx-queue from unused CPUs/rx-queues */
2773 for (j = 0; j < dev_maps->nr_ids; j++) {
2774 tci = j * dev_maps->num_tc;
2776 for (i = 0; i < dev_maps->num_tc; i++, tci++) {
2778 netif_attr_test_mask(j, mask, dev_maps->nr_ids) &&
2779 netif_attr_test_online(j, online_mask, dev_maps->nr_ids))
2784 tci, index);
2802 for (i = num_tc, tci = j * num_tc; i--; tci++) {
2803 new_map = xmap_dereference(new_dev_maps->attr_map[tci]);
2805 xmap_dereference(dev_maps->attr_map[tci]) :
2815 return -ENOMEM;
2835 struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues];
2838 while (txq-- != &dev->_tx[0]) {
2839 if (txq->sb_dev)
2840 netdev_unbind_sb_channel(dev, txq->sb_dev);
2852 dev->num_tc = 0;
2853 memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
2854 memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
2860 if (tc >= dev->num_tc)
2861 return -EINVAL;
2866 dev->tc_to_txq[tc].count = count;
2867 dev->tc_to_txq[tc].offset = offset;
2875 return -EINVAL;
2882 dev->num_tc = num_tc;
2890 struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues];
2895 memset(sb_dev->tc_to_txq, 0, sizeof(sb_dev->tc_to_txq));
2896 memset(sb_dev->prio_tc_map, 0, sizeof(sb_dev->prio_tc_map));
2898 while (txq-- != &dev->_tx[0]) {
2899 if (txq->sb_dev == sb_dev)
2900 txq->sb_dev = NULL;
2910 if (sb_dev->num_tc >= 0 || tc >= dev->num_tc)
2911 return -EINVAL;
2914 if ((offset + count) > dev->real_num_tx_queues)
2915 return -EINVAL;
2918 sb_dev->tc_to_txq[tc].count = count;
2919 sb_dev->tc_to_txq[tc].offset = offset;
2924 while (count--)
2925 netdev_get_tx_queue(dev, count + offset)->sb_dev = sb_dev;
2935 return -ENODEV;
2937 /* We allow channels 1 - 32767 to be used for subordinate channels.
2943 return -EINVAL;
2945 dev->num_tc = -channel;
2960 disabling = txq < dev->real_num_tx_queues;
2962 if (txq < 1 || txq > dev->num_tx_queues)
2963 return -EINVAL;
2965 if (dev->reg_state == NETREG_REGISTERED ||
2966 dev->reg_state == NETREG_UNREGISTERING) {
2969 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2974 if (dev->num_tc)
2979 dev->real_num_tx_queues = txq;
2989 dev->real_num_tx_queues = txq;
2998 * netif_set_real_num_rx_queues - set actual number of RX queues used
3011 if (rxq < 1 || rxq > dev->num_rx_queues)
3012 return -EINVAL;
3014 if (dev->reg_state == NETREG_REGISTERED) {
3017 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
3023 dev->real_num_rx_queues = rxq;
3030 * netif_set_real_num_queues - set actual number of RX and TX queues used
3041 unsigned int old_rxq = dev->real_num_rx_queues;
3044 if (txq < 1 || txq > dev->num_tx_queues ||
3045 rxq < 1 || rxq > dev->num_rx_queues)
3046 return -EINVAL;
3048 /* Start from increases, so the error path only does decreases -
3051 if (rxq > dev->real_num_rx_queues) {
3056 if (txq > dev->real_num_tx_queues) {
3061 if (rxq < dev->real_num_rx_queues)
3063 if (txq < dev->real_num_tx_queues)
3074 * netif_set_tso_max_size() - set the max size of TSO frames supported
3076 * @size: max skb->len of a TSO frame
3078 * Set the limit on the size of TSO super-frames the device can handle.
3084 dev->tso_max_size = min(GSO_MAX_SIZE, size);
3085 if (size < READ_ONCE(dev->gso_max_size))
3087 if (size < READ_ONCE(dev->gso_ipv4_max_size))
3093 * netif_set_tso_max_segs() - set the max number of segs supported for TSO
3098 * a single TSO super-frame.
3103 dev->tso_max_segs = segs;
3104 if (segs < READ_ONCE(dev->gso_max_segs))
3110 * netif_inherit_tso_max() - copy all TSO limits from a lower device to an upper
3116 netif_set_tso_max_size(to, from->tso_max_size);
3117 netif_set_tso_max_segs(to, from->tso_max_segs);
3122 * netif_get_num_default_rss_queues - default number of RSS queues
3153 q->next_sched = NULL;
3154 *sd->output_queue_tailp = q;
3155 sd->output_queue_tailp = &q->next_sched;
3162 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
3173 return (struct dev_kfree_skb_cb *)skb->cb;
3180 struct Qdisc *q = rcu_dereference(txq->qdisc);
3190 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) {
3194 q = rcu_dereference(dev_queue->qdisc);
3208 if (likely(refcount_read(&skb->users) == 1)) {
3210 refcount_set(&skb->users, 0);
3211 } else if (likely(!refcount_dec_and_test(&skb->users))) {
3214 get_kfree_skb_cb(skb)->reason = reason;
3216 skb->next = __this_cpu_read(softnet_data.completion_queue);
3234 * netif_device_detach - mark device as removed
3241 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
3249 * netif_device_attach - mark device as attached
3256 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
3274 u16 qcount = dev->real_num_tx_queues;
3276 if (dev->num_tc) {
3277 u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
3279 qoffset = sb_dev->tc_to_txq[tc].offset;
3280 qcount = sb_dev->tc_to_txq[tc].count;
3283 sb_dev->name, qoffset, tc);
3285 qcount = dev->real_num_tx_queues;
3293 hash -= qoffset;
3295 hash -= qcount;
3305 struct net_device *dev = skb->dev;
3312 if (dev->dev.parent)
3313 name = dev_driver_string(dev->dev.parent);
3319 name, dev ? &dev->features : &null_features,
3320 skb->sk ? &skb->sk->sk_route_caps : &null_features);
3332 if (skb->ip_summed == CHECKSUM_COMPLETE)
3337 return -EINVAL;
3350 ret = -EINVAL;
3357 csum = skb_checksum(skb, offset, skb->len - offset, 0);
3359 offset += skb->csum_offset;
3370 *(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0;
3372 skb->ip_summed = CHECKSUM_NONE;
3383 if (skb->ip_summed != CHECKSUM_PARTIAL)
3400 ret = -EINVAL;
3409 skb->len - start, ~(__u32)0,
3411 *(__le32 *)(skb->data + offset) = crc32c_csum;
3419 __be16 type = skb->protocol;
3428 eth = (struct ethhdr *)skb->data;
3429 type = eth->h_proto;
3458 if (!(dev->features & NETIF_F_HIGHDMA)) {
3459 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3460 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3479 features &= skb->dev->mpls_features;
3500 if (skb->ip_summed != CHECKSUM_NONE &&
3504 if (illegal_highdma(skb->dev, skb))
3529 u16 gso_segs = skb_shinfo(skb)->gso_segs;
3531 if (gso_segs > READ_ONCE(dev->gso_max_segs))
3534 if (unlikely(skb->len >= netif_get_gso_max_size(dev, skb)))
3537 if (!skb_shinfo(skb)->gso_type) {
3548 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL))
3549 features &= ~dev->gso_partial_features;
3554 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
3555 struct iphdr *iph = skb->encapsulation ?
3558 if (!(iph->frag_off & htons(IP_DF)))
3567 struct net_device *dev = skb->dev;
3568 netdev_features_t features = dev->features;
3577 if (skb->encapsulation)
3578 features &= dev->hw_enc_features;
3582 dev->vlan_features |
3586 if (dev->netdev_ops->ndo_features_check)
3587 features &= dev->netdev_ops->ndo_features_check(skb, dev,
3605 len = skb->len;
3620 struct sk_buff *next = skb->next;
3625 skb->next = next;
3645 !vlan_hw_offload_capable(features, skb->vlan_proto))
3666 switch (skb->csum_offset) {
3710 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3711 if (skb->encapsulation)
3738 next = skb->next;
3742 skb->prev = skb;
3751 tail->next = skb;
3752 /* If skb was segmented, skb->prev points to
3755 tail = skb->prev;
3765 qdisc_skb_cb(skb)->pkt_len = skb->len;
3770 if (shinfo->gso_size && skb_transport_header_was_set(skb)) {
3771 u16 gso_segs = shinfo->gso_segs;
3778 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
3786 } else if (shinfo->gso_type & SKB_GSO_UDP_L4) {
3794 if (unlikely(shinfo->gso_type & SKB_GSO_DODGY)) {
3795 int payload = skb->len - hdr_len;
3800 gso_segs = DIV_ROUND_UP(payload, shinfo->gso_size);
3802 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
3812 rc = q->enqueue(skb, q, to_free) & NET_XMIT_MASK;
3829 if (q->flags & TCQ_F_NOLOCK) {
3830 if (q->flags & TCQ_F_CAN_BYPASS && nolock_qdisc_is_empty(q) &&
3833 * of q->seqlock to protect from racing with requeuing.
3865 * This permits qdisc->running owner to get the lock more
3874 spin_lock(&q->busylock);
3877 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
3880 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
3883 * This is a work-conserving queue; there are no old skbs
3884 * waiting to be sent out; and the qdisc is not running -
3892 spin_unlock(&q->busylock);
3904 spin_unlock(&q->busylock);
3915 spin_unlock(&q->busylock);
3926 if (skb->priority)
3928 map = rcu_dereference_bh(skb->dev->priomap);
3935 prioidx = sock_cgroup_prioidx(&sk->sk_cgrp_data);
3937 if (prioidx < map->priomap_len)
3938 skb->priority = map->priomap[prioidx];
3945 * dev_loopback_xmit - loop back @skb
3954 skb->pkt_type = PACKET_LOOPBACK;
3955 if (skb->ip_summed == CHECKSUM_NONE)
3956 skb->ip_summed = CHECKSUM_UNNECESSARY;
3990 struct mini_Qdisc *miniq = rcu_dereference_bh(entry->miniq);
3996 tc_skb_cb(skb)->mru = 0;
3997 tc_skb_cb(skb)->post_ct = false;
4000 ret = tcf_classify(skb, miniq->block, miniq->filter_list, &res, false);
4008 skb->tc_index = TC_H_MIN(res.classid);
4036 __skb_push(skb, skb->mac_len);
4044 __skb_pull(skb, skb->mac_len);
4052 struct bpf_mprog_entry *entry = rcu_dereference_bh(skb->dev->tcx_ingress);
4062 qdisc_skb_cb(skb)->pkt_len = skb->len;
4078 __skb_push(skb, skb->mac_len);
4079 if (skb_do_redirect(skb) == -EAGAIN) {
4080 __skb_pull(skb, skb->mac_len);
4107 struct bpf_mprog_entry *entry = rcu_dereference_bh(dev->tcx_egress);
4113 /* qdisc_skb_cb(skb)->pkt_len & tcx_set_ingress() was
4163 struct xps_dev_maps *dev_maps, unsigned int tci)
4165 int tc = netdev_get_prio_tc_map(dev, skb->priority);
4167 int queue_index = -1;
4169 if (tc >= dev_maps->num_tc || tci >= dev_maps->nr_ids)
4172 tci *= dev_maps->num_tc;
4173 tci += tc;
4175 map = rcu_dereference(dev_maps->attr_map[tci]);
4177 if (map->len == 1)
4178 queue_index = map->queues[0];
4180 queue_index = map->queues[reciprocal_scale(
4181 skb_get_hash(skb), map->len)];
4182 if (unlikely(queue_index >= dev->real_num_tx_queues))
4183 queue_index = -1;
4194 struct sock *sk = skb->sk;
4195 int queue_index = -1;
4198 return -1;
4204 dev_maps = rcu_dereference(sb_dev->xps_maps[XPS_RXQS]);
4206 int tci = sk_rx_queue_get(sk);
4208 if (tci >= 0)
4210 tci);
4215 dev_maps = rcu_dereference(sb_dev->xps_maps[XPS_CPUS]);
4217 unsigned int tci = skb->sender_cpu - 1;
4220 tci);
4227 return -1;
4241 return (u16)raw_smp_processor_id() % dev->real_num_tx_queues;
4248 struct sock *sk = skb->sk;
4253 if (queue_index < 0 || skb->ooo_okay ||
4254 queue_index >= dev->real_num_tx_queues) {
4262 rcu_access_pointer(sk->sk_dst_cache))
4279 u32 sender_cpu = skb->sender_cpu - 1;
4282 skb->sender_cpu = raw_smp_processor_id() + 1;
4285 if (dev->real_num_tx_queues != 1) {
4286 const struct net_device_ops *ops = dev->netdev_ops;
4288 if (ops->ndo_select_queue)
4289 queue_index = ops->ndo_select_queue(dev, skb, sb_dev);
4301 * __dev_queue_xmit() - transmit a buffer
4317 * * 0 - buffer successfully transmitted
4318 * * positive qdisc return code - NET_XMIT_DROP etc.
4319 * * negative errno - other errors
4323 struct net_device *dev = skb->dev;
4326 int rc = -ENOMEM;
4332 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
4333 __skb_tstamp_tx(skb, NULL, NULL, skb->sk, SCM_TSTAMP_SCHED);
4364 /* If device/qdisc don't need skb->dst, release it right now while
4367 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
4375 q = rcu_dereference_bh(txq->qdisc);
4378 if (q->enqueue) {
4395 if (dev->flags & IFF_UP) {
4398 /* Other cpus might concurrently change txq->xmit_lock_owner
4399 * to -1 or to their cpu id, but not to our id.
4401 if (READ_ONCE(txq->xmit_lock_owner) != cpu) {
4422 dev->name);
4429 dev->name);
4433 rc = -ENETDOWN;
4447 struct net_device *dev = skb->dev;
4508 if (test_bit(NAPI_STATE_THREADED, &napi->state)) {
4512 * read on napi->thread. Only call
4515 thread = READ_ONCE(napi->thread);
4522 if (READ_ONCE(thread->__state) != TASK_INTERRUPTIBLE)
4523 set_bit(NAPI_STATE_SCHED_THREADED, &napi->state);
4529 list_add_tail(&napi->poll_list, &sd->poll_list);
4530 WRITE_ONCE(napi->list_owner, smp_processor_id());
4534 if (!sd->in_net_rx_action)
4540 /* One global table that all flow-based protocols share. */
4565 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
4566 !(dev->features & NETIF_F_NTUPLE))
4568 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
4572 rxqueue = dev->_rx + rxq_index;
4573 flow_table = rcu_dereference(rxqueue->rps_flow_table);
4576 flow_id = skb_get_hash(skb) & flow_table->mask;
4577 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
4582 rflow = &flow_table->flows[flow_id];
4583 rflow->filter = rc;
4584 if (old_rflow->filter == rflow->filter)
4585 old_rflow->filter = RPS_NO_FILTER;
4588 rflow->last_qtail =
4592 rflow->cpu = next_cpu;
4605 struct netdev_rx_queue *rxqueue = dev->_rx;
4608 int cpu = -1;
4615 if (unlikely(index >= dev->real_num_rx_queues)) {
4616 WARN_ONCE(dev->real_num_rx_queues > 1,
4619 dev->name, index, dev->real_num_rx_queues);
4627 flow_table = rcu_dereference(rxqueue->rps_flow_table);
4628 map = rcu_dereference(rxqueue->rps_map);
4646 ident = READ_ONCE(sock_flow_table->ents[hash & sock_flow_table->mask]);
4655 rflow = &flow_table->flows[hash & flow_table->mask];
4656 tcpu = rflow->cpu;
4660 * different from current CPU (one in the rx-queue flow
4662 * - Current CPU is unset (>= nr_cpu_ids).
4663 * - Current CPU is offline.
4664 * - The current CPU's queue tail has advanced beyond the
4671 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
4672 rflow->last_qtail)) >= 0)) {
4687 tcpu = map->cpus[reciprocal_scale(hash, map->len)];
4701 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
4714 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
4721 flow_table = rcu_dereference(rxqueue->rps_flow_table);
4722 if (flow_table && flow_id <= flow_table->mask) {
4723 rflow = &flow_table->flows[flow_id];
4724 cpu = READ_ONCE(rflow->cpu);
4725 if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
4726 ((int)(per_cpu(softnet_data, cpu).input_queue_head -
4727 rflow->last_qtail) <
4728 (int)(10 * flow_table->mask)))
4743 ____napi_schedule(sd, &sd->backlog);
4744 sd->received_rps++;
4755 smp_store_release(&sd->defer_ipi_scheduled, 0);
4759 * After we queued a packet into sd->input_pkt_queue,
4762 * - If this is another cpu queue, link it to our rps_ipi_list,
4765 * - If this is our own queue, NAPI schedule our backlog.
4774 sd->rps_ipi_next = mysd->rps_ipi_list;
4775 mysd->rps_ipi_list = sd;
4780 if (!mysd->in_net_rx_action && !mysd->in_napi_threaded_poll)
4785 __napi_schedule_irqoff(&mysd->backlog);
4805 fl = rcu_dereference(sd->flow_limit);
4807 new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
4808 old_flow = fl->history[fl->history_head];
4809 fl->history[fl->history_head] = new_flow;
4811 fl->history_head++;
4812 fl->history_head &= FLOW_LIMIT_HISTORY - 1;
4814 if (likely(fl->buckets[old_flow]))
4815 fl->buckets[old_flow]--;
4817 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
4818 fl->count++;
4844 if (!netif_running(skb->dev))
4846 qlen = skb_queue_len(&sd->input_pkt_queue);
4850 __skb_queue_tail(&sd->input_pkt_queue, skb);
4859 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state))
4866 sd->dropped++;
4869 dev_core_stats_rx_dropped_inc(skb->dev);
4876 struct net_device *dev = skb->dev;
4879 rxqueue = dev->_rx;
4884 if (unlikely(index >= dev->real_num_rx_queues)) {
4885 WARN_ONCE(dev->real_num_rx_queues > 1,
4888 dev->name, index, dev->real_num_rx_queues);
4912 mac_len = skb->data - skb_mac_header(skb);
4913 hard_start = skb->data - skb_headroom(skb);
4916 frame_sz = (void *)skb_end_pointer(skb) - hard_start;
4920 xdp_init_buff(xdp, frame_sz, &rxqueue->xdp_rxq);
4921 xdp_prepare_buff(xdp, hard_start, skb_headroom(skb) - mac_len,
4924 orig_data_end = xdp->data_end;
4925 orig_data = xdp->data;
4926 eth = (struct ethhdr *)xdp->data;
4927 orig_host = ether_addr_equal_64bits(eth->h_dest, skb->dev->dev_addr);
4928 orig_bcast = is_multicast_ether_addr_64bits(eth->h_dest);
4929 orig_eth_type = eth->h_proto;
4934 off = xdp->data - orig_data;
4939 __skb_push(skb, -off);
4941 skb->mac_header += off;
4946 off = xdp->data_end - orig_data_end;
4948 skb_set_tail_pointer(skb, xdp->data_end - xdp->data);
4949 skb->len += off; /* positive on grow, negative on shrink */
4953 eth = (struct ethhdr *)xdp->data;
4954 if ((orig_eth_type != eth->h_proto) ||
4955 (orig_host != ether_addr_equal_64bits(eth->h_dest,
4956 skb->dev->dev_addr)) ||
4957 (orig_bcast != is_multicast_ether_addr_64bits(eth->h_dest))) {
4959 skb->pkt_type = PACKET_HOST;
4960 skb->protocol = eth_type_trans(skb, skb->dev);
4976 metalen = xdp->data - xdp->data_meta;
5003 int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb);
5004 int troom = skb->tail + skb->data_len - skb->end;
5024 bpf_warn_invalid_xdp_action(skb->dev, xdp_prog, act);
5027 trace_xdp_exception(skb->dev, xdp_prog, act);
5039 * network taps in order to match in-driver-XDP behavior. This also means
5041 * and DDOS attacks will be more effective. In-driver-XDP use dedicated TX
5046 struct net_device *dev = skb->dev;
5080 err = xdp_do_generic_redirect(skb->dev, skb,
5114 cpu = get_rps_cpu(skb->dev, skb, &rflow);
5118 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
5132 * __netif_rx - Slightly optimized version of netif_rx
5153 * netif_rx - post buffer to the network code
5191 if (sd->completion_queue) {
5195 clist = sd->completion_queue;
5196 sd->completion_queue = NULL;
5202 clist = clist->next;
5204 WARN_ON(refcount_read(&skb->users));
5205 if (likely(get_kfree_skb_cb(skb)->reason == SKB_CONSUMED))
5209 get_kfree_skb_cb(skb)->reason);
5211 if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
5215 get_kfree_skb_cb(skb)->reason);
5219 if (sd->output_queue) {
5223 head = sd->output_queue;
5224 sd->output_queue = NULL;
5225 sd->output_queue_tailp = &sd->output_queue;
5234 head = head->next_sched;
5236 /* We need to make sure head->next_sched is read
5241 if (!(q->flags & TCQ_F_NOLOCK)) {
5245 &q->state))) {
5254 clear_bit(__QDISC_STATE_SCHED, &q->state);
5258 clear_bit(__QDISC_STATE_SCHED, &q->state);
5278 * netdev_is_rx_handler_busy - check if receive handler is registered
5289 return dev && rtnl_dereference(dev->rx_handler);
5294 * netdev_rx_handler_register - register receive handler
5312 return -EBUSY;
5314 if (dev->priv_flags & IFF_NO_RX_HANDLER)
5315 return -EINVAL;
5318 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
5319 rcu_assign_pointer(dev->rx_handler, rx_handler);
5326 * netdev_rx_handler_unregister - unregister receive handler
5337 RCU_INIT_POINTER(dev->rx_handler, NULL);
5343 RCU_INIT_POINTER(dev->rx_handler_data, NULL);
5353 switch (skb->protocol) {
5399 orig_dev = skb->dev;
5409 skb->skb_iif = skb->dev->ifindex;
5417 ret2 = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
5426 if (eth_type_vlan(skb->protocol)) {
5444 list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) {
5484 rx_handler = rcu_dereference(skb->dev->rx_handler);
5506 if (unlikely(skb_vlan_tag_present(skb)) && !netdev_uses_dsa(skb->dev)) {
5512 skb->pkt_type = PACKET_OTHERHOST;
5513 } else if (eth_type_vlan(skb->protocol)) {
5537 * and set skb->priority like in vlan_do_receive()
5543 type = skb->protocol;
5553 &orig_dev->ptype_specific);
5555 if (unlikely(skb->dev != orig_dev)) {
5557 &skb->dev->ptype_specific);
5567 dev_core_stats_rx_dropped_inc(skb->dev);
5569 dev_core_stats_rx_nohandler_inc(skb->dev);
5572 * me how you were going to use this. :-)
5579 * then skb should also be non-NULL.
5590 struct net_device *orig_dev = skb->dev;
5596 ret = INDIRECT_CALL_INET(pt_prev->func, ipv6_rcv, ip_rcv, skb,
5597 skb->dev, pt_prev, orig_dev);
5602 * netif_receive_skb_core - special purpose version of netif_receive_skb
5638 if (pt_prev->list_func != NULL)
5639 INDIRECT_CALL_INET(pt_prev->list_func, ipv6_list_rcv,
5644 pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
5650 /* Fast-path assumptions:
5651 * - There is no RX handler.
5652 * - Only one packet_type matches.
5653 * If either of these fails, we will end up doing some per-packet
5654 * processing in-line, then handling the 'last ptype' for the whole
5655 * sublist. This can't cause out-of-order delivery to any single ptype,
5657 * other ptypes are handled per-packet.
5668 struct net_device *orig_dev = skb->dev;
5683 list_add_tail(&skb->list, &sublist);
5699 * - be delivered to SOCK_MEMALLOC sockets only
5700 * - stay away from userspace
5701 * - have bounded memory usage
5726 list_cut_before(&sublist, head, &skb->list);
5747 struct bpf_prog *old = rtnl_dereference(dev->xdp_prog);
5748 struct bpf_prog *new = xdp->prog;
5751 switch (xdp->command) {
5753 rcu_assign_pointer(dev->xdp_prog, new);
5767 ret = -EINVAL;
5787 int cpu = get_rps_cpu(skb->dev, skb, &rflow);
5790 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
5811 list_add_tail(&skb->list, &sublist);
5820 int cpu = get_rps_cpu(skb->dev, skb, &rflow);
5825 enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
5835 * netif_receive_skb - process receive buffer from network
5863 * netif_receive_skb_list - process many receive buffers from network
5899 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
5900 if (skb->dev->reg_state == NETREG_UNREGISTERING) {
5901 __skb_unlink(skb, &sd->input_pkt_queue);
5908 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
5909 if (skb->dev->reg_state == NETREG_UNREGISTERING) {
5910 __skb_unlink(skb, &sd->process_queue);
5929 do_flush = !skb_queue_empty(&sd->input_pkt_queue) ||
5930 !skb_queue_empty_lockless(&sd->process_queue);
5937 * input_pkt_queue and process_queue even if the latter could end-up
5979 struct softnet_data *next = remsd->rps_ipi_next;
5981 if (cpu_online(remsd->cpu))
5982 smp_call_function_single_async(remsd->cpu, &remsd->csd);
5995 struct softnet_data *remsd = sd->rps_ipi_list;
5998 sd->rps_ipi_list = NULL;
6012 return sd->rps_ipi_list != NULL;
6032 napi->weight = READ_ONCE(dev_rx_weight);
6036 while ((skb = __skb_dequeue(&sd->process_queue))) {
6047 if (skb_queue_empty(&sd->input_pkt_queue)) {
6056 napi->state = 0;
6059 skb_queue_splice_tail_init(&sd->input_pkt_queue,
6060 &sd->process_queue);
6069 * __napi_schedule - schedule for receive
6086 * napi_schedule_prep - check if napi can be scheduled
6096 unsigned long new, val = READ_ONCE(n->state);
6111 } while (!try_cmpxchg(&n->state, &val, new));
6118 * __napi_schedule_irqoff - schedule for receive
6125 * due to force-threaded interrupts and spinlock substitution.
6147 if (unlikely(n->state & (NAPIF_STATE_NPSVC |
6152 if (n->gro_bitmask)
6153 timeout = READ_ONCE(n->dev->gro_flush_timeout);
6154 n->defer_hard_irqs_count = READ_ONCE(n->dev->napi_defer_hard_irqs);
6156 if (n->defer_hard_irqs_count > 0) {
6157 n->defer_hard_irqs_count--;
6158 timeout = READ_ONCE(n->dev->gro_flush_timeout);
6162 if (n->gro_bitmask) {
6172 if (unlikely(!list_empty(&n->poll_list))) {
6173 /* If n->poll_list is not empty, we need to mask irqs */
6175 list_del_init(&n->poll_list);
6178 WRITE_ONCE(n->list_owner, -1);
6180 val = READ_ONCE(n->state);
6189 * because we will call napi->poll() one more time.
6194 } while (!try_cmpxchg(&n->state, &val, new));
6202 hrtimer_start(&n->timer, ns_to_ktime(timeout),
6215 if (napi->napi_id == napi_id)
6231 if (napi->gro_bitmask) {
6239 clear_bit(NAPI_STATE_SCHED, &napi->state);
6252 * Since we are about to call napi->poll() once more, we can safely
6258 clear_bit(NAPI_STATE_MISSED, &napi->state);
6259 clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state);
6264 napi->defer_hard_irqs_count = READ_ONCE(napi->dev->napi_defer_hard_irqs);
6265 timeout = READ_ONCE(napi->dev->gro_flush_timeout);
6266 if (napi->defer_hard_irqs_count && timeout) {
6267 hrtimer_start(&napi->timer, ns_to_ktime(timeout), HRTIMER_MODE_REL_PINNED);
6272 /* All we really want here is to re-enable device interrupts.
6275 rc = napi->poll(napi, budget);
6276 /* We can't gro_normal_list() here, because napi->poll() might have
6312 unsigned long val = READ_ONCE(napi->state);
6315 * we avoid dirtying napi->state as much as we can.
6320 set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state);
6323 if (cmpxchg(&napi->state, val,
6327 set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state);
6331 napi_poll = napi->poll;
6338 __NET_ADD_STATS(dev_net(napi->dev),
6371 if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state))
6381 napi->napi_id = napi_gen_id;
6383 hlist_add_head_rcu(&napi->napi_hash_node,
6384 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
6396 hlist_del_init_rcu(&napi->napi_hash_node);
6411 !test_and_set_bit(NAPI_STATE_SCHED, &napi->state)) {
6412 clear_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state);
6424 INIT_LIST_HEAD(&napi->gro_hash[i].list);
6425 napi->gro_hash[i].count = 0;
6427 napi->gro_bitmask = 0;
6435 if (dev->threaded == threaded)
6439 list_for_each_entry(napi, &dev->napi_list, dev_list) {
6440 if (!napi->thread) {
6450 dev->threaded = threaded;
6463 list_for_each_entry(napi, &dev->napi_list, dev_list)
6464 assign_bit(NAPI_STATE_THREADED, &napi->state, threaded);
6473 if (WARN_ON(test_and_set_bit(NAPI_STATE_LISTED, &napi->state)))
6476 INIT_LIST_HEAD(&napi->poll_list);
6477 INIT_HLIST_NODE(&napi->napi_hash_node);
6478 hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
6479 napi->timer.function = napi_watchdog;
6481 napi->skb = NULL;
6482 INIT_LIST_HEAD(&napi->rx_list);
6483 napi->rx_count = 0;
6484 napi->poll = poll;
6488 napi->weight = weight;
6489 napi->dev = dev;
6491 napi->poll_owner = -1;
6493 napi->list_owner = -1;
6494 set_bit(NAPI_STATE_SCHED, &napi->state);
6495 set_bit(NAPI_STATE_NPSVC, &napi->state);
6496 list_add_rcu(&napi->dev_list, &dev->napi_list);
6499 /* Create kthread for this napi if dev->threaded is set.
6500 * Clear dev->threaded if kthread creation failed so that
6503 if (dev->threaded && napi_kthread_create(napi))
6504 dev->threaded = 0;
6513 set_bit(NAPI_STATE_DISABLE, &n->state);
6515 val = READ_ONCE(n->state);
6519 val = READ_ONCE(n->state);
6524 } while (!try_cmpxchg(&n->state, &val, new));
6526 hrtimer_cancel(&n->timer);
6528 clear_bit(NAPI_STATE_DISABLE, &n->state);
6533 * napi_enable - enable NAPI scheduling
6541 unsigned long new, val = READ_ONCE(n->state);
6547 if (n->dev->threaded && n->thread)
6549 } while (!try_cmpxchg(&n->state, &val, new));
6560 list_for_each_entry_safe(skb, n, &napi->gro_hash[i].list, list)
6562 napi->gro_hash[i].count = 0;
6569 if (!test_and_clear_bit(NAPI_STATE_LISTED, &napi->state))
6573 list_del_rcu(&napi->dev_list);
6577 napi->gro_bitmask = 0;
6579 if (napi->thread) {
6580 kthread_stop(napi->thread);
6581 napi->thread = NULL;
6590 weight = n->weight;
6595 * actually make the ->poll() call. Therefore we avoid
6596 * accidentally calling ->poll() when NAPI is not scheduled.
6599 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
6600 work = n->poll(n, weight);
6605 netdev_err_once(n->dev, "NAPI poll function %pS returned %d, exceeding its budget of %d.\n",
6606 n->poll, work, weight);
6614 * move the instance around on the list at-will.
6621 /* The NAPI context has more processing work, but busy-polling
6627 * that the NAPI is re-scheduled.
6634 if (n->gro_bitmask) {
6646 if (unlikely(!list_empty(&n->poll_list))) {
6648 n->dev ? n->dev->name : "backlog");
6663 list_del_init(&n->poll_list);
6670 list_add_tail(&n->poll_list, repoll);
6689 if (test_bit(NAPI_STATE_SCHED_THREADED, &napi->state) || woken) {
6690 WARN_ON(!list_empty(&napi->poll_list));
6702 return -1;
6710 if (!READ_ONCE(sd->defer_list))
6713 spin_lock(&sd->defer_lock);
6714 skb = sd->defer_list;
6715 sd->defer_list = NULL;
6716 sd->defer_count = 0;
6717 spin_unlock(&sd->defer_lock);
6720 next = skb->next;
6740 sd->in_napi_threaded_poll = true;
6746 sd->in_napi_threaded_poll = false;
6776 sd->in_net_rx_action = true;
6778 list_splice_init(&sd->poll_list, &list);
6788 sd->in_net_rx_action = false;
6792 * sd->in_net_rx_action was true.
6794 if (!list_empty(&sd->poll_list))
6803 budget -= napi_poll(n, &repoll);
6811 sd->time_squeeze++;
6818 list_splice_tail_init(&sd->poll_list, &list);
6820 list_splice(&list, &sd->poll_list);
6821 if (!list_empty(&sd->poll_list))
6824 sd->in_net_rx_action = false;
6856 if (adj->dev == adj_dev)
6865 struct net_device *dev = (struct net_device *)priv->data;
6871 * netdev_has_upper_dev - Check if device is linked to an upper device
6894 * netdev_has_upper_dev_all_rcu - Check if device is linked to an upper device
6916 * netdev_has_any_upper_dev - Check if device is linked to some device
6926 return !list_empty(&dev->adj_list.upper);
6931 * netdev_master_upper_dev_get - Get master upper device
6943 if (list_empty(&dev->adj_list.upper))
6946 upper = list_first_entry(&dev->adj_list.upper,
6948 if (likely(upper->master))
6949 return upper->dev;
6960 if (list_empty(&dev->adj_list.upper))
6963 upper = list_first_entry(&dev->adj_list.upper,
6965 if (likely(upper->master) && !upper->ignore)
6966 return upper->dev;
6971 * netdev_has_any_lower_dev - Check if device is linked to some device
6981 return !list_empty(&dev->adj_list.lower);
6990 return adj->private;
6995 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
7009 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
7011 if (&upper->list == &dev->adj_list.upper)
7014 *iter = &upper->list;
7016 return upper->dev;
7026 upper = list_entry((*iter)->next, struct netdev_adjacent, list);
7028 if (&upper->list == &dev->adj_list.upper)
7031 *iter = &upper->list;
7032 *ignore = upper->ignore;
7034 return upper->dev;
7044 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
7046 if (&upper->list == &dev->adj_list.upper)
7049 *iter = &upper->list;
7051 return upper->dev;
7065 iter = &dev->adj_list.upper;
7083 niter = &udev->adj_list.upper;
7092 next = dev_stack[--cur];
7113 iter = &dev->adj_list.upper;
7129 niter = &udev->adj_list.upper;
7138 next = dev_stack[--cur];
7165 * netdev_lower_get_next_private - Get the next ->private from the
7170 * Gets the next netdev_adjacent->private from the dev's lower neighbour
7182 if (&lower->list == &dev->adj_list.lower)
7185 *iter = lower->list.next;
7187 return lower->private;
7192 * netdev_lower_get_next_private_rcu - Get the next ->private from the
7198 * Gets the next netdev_adjacent->private from the dev's lower neighbour
7208 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
7210 if (&lower->list == &dev->adj_list.lower)
7213 *iter = &lower->list;
7215 return lower->private;
7220 * netdev_lower_get_next - Get the next device from the lower neighbour
7236 if (&lower->list == &dev->adj_list.lower)
7239 *iter = lower->list.next;
7241 return lower->dev;
7250 lower = list_entry((*iter)->next, struct netdev_adjacent, list);
7252 if (&lower->list == &dev->adj_list.lower)
7255 *iter = &lower->list;
7257 return lower->dev;
7266 lower = list_entry((*iter)->next, struct netdev_adjacent, list);
7268 if (&lower->list == &dev->adj_list.lower)
7271 *iter = &lower->list;
7272 *ignore = lower->ignore;
7274 return lower->dev;
7287 iter = &dev->adj_list.lower;
7303 niter = &ldev->adj_list.lower;
7312 next = dev_stack[--cur];
7335 iter = &dev->adj_list.lower;
7353 niter = &ldev->adj_list.lower;
7362 next = dev_stack[--cur];
7378 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
7379 if (&lower->list == &dev->adj_list.lower)
7382 *iter = &lower->list;
7384 return lower->dev;
7395 for (iter = &dev->adj_list.upper,
7401 if (max_depth < udev->upper_level)
7402 max_depth = udev->upper_level;
7415 for (iter = &dev->adj_list.lower,
7421 if (max_depth < ldev->lower_level)
7422 max_depth = ldev->lower_level;
7431 dev->upper_level = __netdev_upper_depth(dev) + 1;
7440 if (list_empty(&dev->unlink_list))
7441 list_add_tail(&dev->unlink_list, &net_unlink_list);
7448 dev->lower_level = __netdev_lower_depth(dev) + 1;
7454 if (priv->flags & NESTED_SYNC_IMM)
7455 dev->nested_level = dev->lower_level - 1;
7456 if (priv->flags & NESTED_SYNC_TODO)
7472 iter = &dev->adj_list.lower;
7488 niter = &ldev->adj_list.lower;
7497 next = dev_stack[--cur];
7510 * netdev_lower_get_first_private_rcu - Get the first ->private from the
7515 * Gets the first netdev_adjacent->private from the dev's lower neighbour
7522 lower = list_first_or_null_rcu(&dev->adj_list.lower,
7525 return lower->private;
7531 * netdev_master_upper_dev_get_rcu - Get master upper device
7541 upper = list_first_or_null_rcu(&dev->adj_list.upper,
7543 if (upper && likely(upper->master))
7544 return upper->dev;
7555 sprintf(linkname, dev_list == &dev->adj_list.upper ?
7556 "upper_%s" : "lower_%s", adj_dev->name);
7557 return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
7566 sprintf(linkname, dev_list == &dev->adj_list.upper ?
7568 sysfs_remove_link(&(dev->dev.kobj), linkname);
7575 return (dev_list == &dev->adj_list.upper ||
7576 dev_list == &dev->adj_list.lower) &&
7591 adj->ref_nr += 1;
7592 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d\n",
7593 dev->name, adj_dev->name, adj->ref_nr);
7600 return -ENOMEM;
7602 adj->dev = adj_dev;
7603 adj->master = master;
7604 adj->ref_nr = 1;
7605 adj->private = private;
7606 adj->ignore = false;
7607 netdev_hold(adj_dev, &adj->dev_tracker, GFP_KERNEL);
7609 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n",
7610 dev->name, adj_dev->name, adj->ref_nr, adj_dev->name);
7620 ret = sysfs_create_link(&(dev->dev.kobj),
7621 &(adj_dev->dev.kobj), "master");
7625 list_add_rcu(&adj->list, dev_list);
7627 list_add_tail_rcu(&adj->list, dev_list);
7634 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
7636 netdev_put(adj_dev, &adj->dev_tracker);
7650 dev->name, adj_dev->name, ref_nr);
7656 dev->name, adj_dev->name);
7661 if (adj->ref_nr > ref_nr) {
7662 pr_debug("adjacency: %s to %s ref_nr - %d = %d\n",
7663 dev->name, adj_dev->name, ref_nr,
7664 adj->ref_nr - ref_nr);
7665 adj->ref_nr -= ref_nr;
7669 if (adj->master)
7670 sysfs_remove_link(&(dev->dev.kobj), "master");
7673 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
7675 list_del_rcu(&adj->list);
7677 adj_dev->name, dev->name, adj_dev->name);
7678 netdev_put(adj_dev, &adj->dev_tracker);
7720 &dev->adj_list.upper,
7721 &upper_dev->adj_list.lower,
7729 &dev->adj_list.upper,
7730 &upper_dev->adj_list.lower);
7755 return -EBUSY;
7759 return -EBUSY;
7761 if ((dev->lower_level + upper_dev->upper_level) > MAX_NEST_DEV)
7762 return -EMLINK;
7766 return -EEXIST;
7770 return master_dev == upper_dev ? -EEXIST : -EBUSY;
7806 * netdev_upper_dev_link - Add a link to the upper device
7831 * netdev_master_upper_dev_link - Add a master link to the upper device
7839 * one master upper device can be linked, although other non-master devices
7892 * netdev_upper_dev_unlink - Removes a link to upper device
7917 adj = __netdev_find_adj(lower_dev, &upper_dev->adj_list.lower);
7919 adj->ignore = val;
7921 adj = __netdev_find_adj(upper_dev, &lower_dev->adj_list.upper);
7923 adj->ignore = val;
8006 * netdev_bonding_info_change - Dispatch event about slave change
8038 dev->offload_xstats_l3 = kzalloc(sizeof(*dev->offload_xstats_l3),
8040 if (!dev->offload_xstats_l3)
8041 return -ENOMEM;
8053 kfree(dev->offload_xstats_l3);
8054 dev->offload_xstats_l3 = NULL;
8065 return -EALREADY;
8073 return -EINVAL;
8086 kfree(dev->offload_xstats_l3);
8087 dev->offload_xstats_l3 = NULL;
8096 return -EALREADY;
8105 return -EINVAL;
8120 return dev->offload_xstats_l3;
8148 dest->rx_packets += src->rx_packets;
8149 dest->tx_packets += src->tx_packets;
8150 dest->rx_bytes += src->rx_bytes;
8151 dest->tx_bytes += src->tx_bytes;
8152 dest->rx_errors += src->rx_errors;
8153 dest->tx_errors += src->tx_errors;
8154 dest->rx_dropped += src->rx_dropped;
8155 dest->tx_dropped += src->tx_dropped;
8156 dest->multicast += src->multicast;
8198 return -EINVAL;
8235 report_delta->used = true;
8236 netdev_hw_stats64_add(&report_delta->stats, stats);
8243 report_used->used = true;
8264 * netdev_get_xmit_slave - Get the xmit slave of master device
8278 const struct net_device_ops *ops = dev->netdev_ops;
8280 if (!ops->ndo_get_xmit_slave)
8282 return ops->ndo_get_xmit_slave(dev, skb, all_slaves);
8289 const struct net_device_ops *ops = dev->netdev_ops;
8291 if (!ops->ndo_sk_get_lower_dev)
8293 return ops->ndo_sk_get_lower_dev(dev, sk);
8297 * netdev_sk_get_lowest_dev - Get the lowest device in chain given device and socket
8325 list_for_each_entry(iter, &dev->adj_list.upper, list) {
8326 if (!net_eq(net, dev_net(iter->dev)))
8328 netdev_adjacent_sysfs_add(iter->dev, dev,
8329 &iter->dev->adj_list.lower);
8330 netdev_adjacent_sysfs_add(dev, iter->dev,
8331 &dev->adj_list.upper);
8334 list_for_each_entry(iter, &dev->adj_list.lower, list) {
8335 if (!net_eq(net, dev_net(iter->dev)))
8337 netdev_adjacent_sysfs_add(iter->dev, dev,
8338 &iter->dev->adj_list.upper);
8339 netdev_adjacent_sysfs_add(dev, iter->dev,
8340 &dev->adj_list.lower);
8350 list_for_each_entry(iter, &dev->adj_list.upper, list) {
8351 if (!net_eq(net, dev_net(iter->dev)))
8353 netdev_adjacent_sysfs_del(iter->dev, dev->name,
8354 &iter->dev->adj_list.lower);
8355 netdev_adjacent_sysfs_del(dev, iter->dev->name,
8356 &dev->adj_list.upper);
8359 list_for_each_entry(iter, &dev->adj_list.lower, list) {
8360 if (!net_eq(net, dev_net(iter->dev)))
8362 netdev_adjacent_sysfs_del(iter->dev, dev->name,
8363 &iter->dev->adj_list.upper);
8364 netdev_adjacent_sysfs_del(dev, iter->dev->name,
8365 &dev->adj_list.lower);
8375 list_for_each_entry(iter, &dev->adj_list.upper, list) {
8376 if (!net_eq(net, dev_net(iter->dev)))
8378 netdev_adjacent_sysfs_del(iter->dev, oldname,
8379 &iter->dev->adj_list.lower);
8380 netdev_adjacent_sysfs_add(iter->dev, dev,
8381 &iter->dev->adj_list.lower);
8384 list_for_each_entry(iter, &dev->adj_list.lower, list) {
8385 if (!net_eq(net, dev_net(iter->dev)))
8387 netdev_adjacent_sysfs_del(iter->dev, oldname,
8388 &iter->dev->adj_list.upper);
8389 netdev_adjacent_sysfs_add(iter->dev, dev,
8390 &iter->dev->adj_list.upper);
8401 lower = __netdev_find_adj(lower_dev, &dev->adj_list.lower);
8405 return lower->private;
8411 * netdev_lower_state_changed - Dispatch event about lower device state change
8434 const struct net_device_ops *ops = dev->netdev_ops;
8436 if (ops->ndo_change_rx_flags)
8437 ops->ndo_change_rx_flags(dev, flags);
8442 unsigned int old_flags = dev->flags;
8448 dev->flags |= IFF_PROMISC;
8449 dev->promiscuity += inc;
8450 if (dev->promiscuity == 0) {
8456 dev->flags &= ~IFF_PROMISC;
8458 dev->promiscuity -= inc;
8460 return -EOVERFLOW;
8463 if (dev->flags != old_flags) {
8465 dev->flags & IFF_PROMISC ? "entered" : "left");
8471 dev->name, (dev->flags & IFF_PROMISC),
8487 * dev_set_promiscuity - update promiscuity count on a device
8499 unsigned int old_flags = dev->flags;
8505 if (dev->flags != old_flags)
8513 unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
8517 dev->flags |= IFF_ALLMULTI;
8518 dev->allmulti += inc;
8519 if (dev->allmulti == 0) {
8525 dev->flags &= ~IFF_ALLMULTI;
8527 dev->allmulti -= inc;
8529 return -EOVERFLOW;
8532 if (dev->flags ^ old_flags) {
8534 dev->flags & IFF_ALLMULTI ? "entered" : "left");
8539 dev->gflags ^ old_gflags, 0, NULL);
8545 * dev_set_allmulti - update allmulti count on a device
8571 const struct net_device_ops *ops = dev->netdev_ops;
8574 if (!(dev->flags&IFF_UP))
8580 if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
8584 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
8586 dev->uc_promisc = true;
8587 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
8588 __dev_set_promiscuity(dev, -1, false);
8589 dev->uc_promisc = false;
8593 if (ops->ndo_set_rx_mode)
8594 ops->ndo_set_rx_mode(dev);
8605 * dev_get_flags - get flags reported to userspace
8614 flags = (dev->flags & ~(IFF_PROMISC |
8619 (dev->gflags & (IFF_PROMISC |
8638 unsigned int old_flags = dev->flags;
8647 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
8650 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
8676 if ((flags ^ dev->gflags) & IFF_PROMISC) {
8677 int inc = (flags & IFF_PROMISC) ? 1 : -1;
8678 unsigned int old_flags = dev->flags;
8680 dev->gflags ^= IFF_PROMISC;
8683 if (dev->flags != old_flags)
8691 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
8692 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
8694 dev->gflags ^= IFF_ALLMULTI;
8705 unsigned int changes = dev->flags ^ old_flags;
8711 if (dev->flags & IFF_UP)
8717 if (dev->flags & IFF_UP &&
8731 * dev_change_flags - change device settings
8743 unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
8749 changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
8757 const struct net_device_ops *ops = dev->netdev_ops;
8759 if (ops->ndo_change_mtu)
8760 return ops->ndo_change_mtu(dev, new_mtu);
8762 /* Pairs with all the lockless reads of dev->mtu in the stack */
8763 WRITE_ONCE(dev->mtu, new_mtu);
8772 if (new_mtu < 0 || new_mtu < dev->min_mtu) {
8774 return -EINVAL;
8777 if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) {
8779 return -EINVAL;
8785 * dev_set_mtu_ext - Change maximum transfer unit
8797 if (new_mtu == dev->mtu)
8805 return -ENODEV;
8812 orig_mtu = dev->mtu;
8839 net_err_ratelimited("%s: %s\n", dev->name, extack._msg);
8845 * dev_change_tx_queue_len - Change TX queue length of a netdevice
8851 unsigned int orig_len = dev->tx_queue_len;
8855 return -ERANGE;
8858 dev->tx_queue_len = new_len;
8872 dev->tx_queue_len = orig_len;
8877 * dev_set_group - Change group this device belongs to
8883 dev->group = new_group;
8887 * dev_pre_changeaddr_notify - Call NETDEV_PRE_CHANGEADDR.
8908 * dev_set_mac_address - Change Media Access Control Address
8918 const struct net_device_ops *ops = dev->netdev_ops;
8921 if (!ops->ndo_set_mac_address)
8922 return -EOPNOTSUPP;
8923 if (sa->sa_family != dev->type)
8924 return -EINVAL;
8926 return -ENODEV;
8927 err = dev_pre_changeaddr_notify(dev, sa->sa_data, extack);
8930 if (memcmp(dev->dev_addr, sa->sa_data, dev->addr_len)) {
8931 err = ops->ndo_set_mac_address(dev, sa);
8935 dev->addr_assign_type = NET_ADDR_SET;
8937 add_device_randomness(dev->dev_addr, dev->addr_len);
8958 size_t size = sizeof(sa->sa_data_min);
8967 ret = -ENODEV;
8970 if (!dev->addr_len)
8971 memset(sa->sa_data, 0, size);
8973 memcpy(sa->sa_data, dev->dev_addr,
8974 min_t(size_t, size, dev->addr_len));
8975 sa->sa_family = dev->type;
8985 * dev_change_carrier - Change device carrier
8993 const struct net_device_ops *ops = dev->netdev_ops;
8995 if (!ops->ndo_change_carrier)
8996 return -EOPNOTSUPP;
8998 return -ENODEV;
8999 return ops->ndo_change_carrier(dev, new_carrier);
9003 * dev_get_phys_port_id - Get device physical port ID
9012 const struct net_device_ops *ops = dev->netdev_ops;
9014 if (!ops->ndo_get_phys_port_id)
9015 return -EOPNOTSUPP;
9016 return ops->ndo_get_phys_port_id(dev, ppid);
9020 * dev_get_phys_port_name - Get device physical port name
9030 const struct net_device_ops *ops = dev->netdev_ops;
9033 if (ops->ndo_get_phys_port_name) {
9034 err = ops->ndo_get_phys_port_name(dev, name, len);
9035 if (err != -EOPNOTSUPP)
9042 * dev_get_port_parent_id - Get the device's port parent identifier
9053 const struct net_device_ops *ops = dev->netdev_ops;
9059 if (ops->ndo_get_port_parent_id) {
9060 err = ops->ndo_get_port_parent_id(dev, ppid);
9061 if (err != -EOPNOTSUPP)
9066 if (!recurse || err != -EOPNOTSUPP)
9076 return -EOPNOTSUPP;
9084 * netdev_port_same_parent_id - Indicate if two network devices have
9103 * dev_change_proto_down - set carrier according to proto_down.
9110 if (!(dev->priv_flags & IFF_CHANGE_PROTO_DOWN))
9111 return -EOPNOTSUPP;
9113 return -ENODEV;
9118 dev->proto_down = proto_down;
9123 * dev_change_proto_down_reason - proto down reason
9135 dev->proto_down_reason = value;
9139 dev->proto_down_reason |= BIT(b);
9141 dev->proto_down_reason &= ~BIT(b);
9160 return dev->netdev_ops->ndo_bpf ? XDP_MODE_DRV : XDP_MODE_SKB;
9170 return dev->netdev_ops->ndo_bpf;
9179 return dev->xdp_state[mode].link;
9188 return link->link.prog;
9189 return dev->xdp_state[mode].prog;
9198 if (dev->xdp_state[i].prog || dev->xdp_state[i].link)
9208 return prog ? prog->aux->id : 0;
9214 dev->xdp_state[mode].link = link;
9215 dev->xdp_state[mode].prog = NULL;
9221 dev->xdp_state[mode].link = NULL;
9222 dev->xdp_state[mode].prog = prog;
9279 /* auto-detach link from net device */
9282 link->dev = NULL;
9306 return -EINVAL;
9310 return -EINVAL;
9315 return -EINVAL;
9321 return -EINVAL;
9326 return -EINVAL;
9333 return -EBUSY;
9340 return -EEXIST;
9348 return -EBUSY;
9352 return -EEXIST;
9357 new_prog = link->link.prog;
9366 return -EBUSY;
9370 return -EEXIST;
9372 if (!offload && bpf_prog_is_offloaded(new_prog->aux)) {
9374 return -EINVAL;
9376 if (bpf_prog_is_dev_bound(new_prog->aux) && !bpf_offload_dev_match(new_prog, dev)) {
9378 return -EINVAL;
9380 if (bpf_prog_is_dev_bound(new_prog->aux) && mode == XDP_MODE_SKB) {
9381 NL_SET_ERR_MSG(extack, "Can't attach device-bound programs in generic mode");
9382 return -EINVAL;
9384 if (new_prog->expected_attach_type == BPF_XDP_DEVMAP) {
9386 return -EINVAL;
9388 if (new_prog->expected_attach_type == BPF_XDP_CPUMAP) {
9390 return -EINVAL;
9399 return -EOPNOTSUPP;
9421 return dev_xdp_attach(dev, extack, link, NULL, NULL, link->flags);
9433 mode = dev_xdp_mode(dev, link->flags);
9435 return -EINVAL;
9449 /* if racing with net_device's tear down, xdp_link->dev might be
9450 * already NULL, in which case link was already auto-detached
9452 if (xdp_link->dev) {
9453 WARN_ON(dev_xdp_detach_link(xdp_link->dev, NULL, xdp_link));
9454 xdp_link->dev = NULL;
9480 if (xdp_link->dev)
9481 ifindex = xdp_link->dev->ifindex;
9494 if (xdp_link->dev)
9495 ifindex = xdp_link->dev->ifindex;
9498 info->xdp.ifindex = ifindex;
9512 /* link might have been auto-released already, so fail */
9513 if (!xdp_link->dev) {
9514 err = -ENOLINK;
9518 if (old_prog && link->prog != old_prog) {
9519 err = -EPERM;
9522 old_prog = link->prog;
9523 if (old_prog->type != new_prog->type ||
9524 old_prog->expected_attach_type != new_prog->expected_attach_type) {
9525 err = -EINVAL;
9530 /* no-op, don't disturb drivers */
9535 mode = dev_xdp_mode(xdp_link->dev, xdp_link->flags);
9536 bpf_op = dev_xdp_bpf_op(xdp_link->dev, mode);
9537 err = dev_xdp_install(xdp_link->dev, mode, bpf_op, NULL,
9538 xdp_link->flags, new_prog);
9542 old_prog = xchg(&link->prog, new_prog);
9561 struct net *net = current->nsproxy->net_ns;
9569 dev = dev_get_by_index(net, attr->link_create.target_ifindex);
9572 return -EINVAL;
9577 err = -ENOMEM;
9581 bpf_link_init(&link->link, BPF_LINK_TYPE_XDP, &bpf_xdp_link_lops, prog);
9582 link->dev = dev;
9583 link->flags = attr->link_create.flags;
9585 err = bpf_link_prime(&link->link, &link_primer);
9595 link->dev = NULL;
9615 * dev_change_xdp_fd - set or clear a bpf program for a device rx path
9620 * @flags: xdp-related flags
9661 * dev_index_reserve() - allocate an ifindex in a namespace
9669 * Return: a suitable unique value for a new device interface number or -errno.
9677 return -EINVAL;
9681 err = xa_alloc_cyclic(&net->dev_by_index, &ifindex, NULL,
9682 xa_limit_31b, &net->ifindex, GFP_KERNEL);
9684 err = xa_insert(&net->dev_by_index, ifindex, NULL, GFP_KERNEL);
9694 WARN_ON(xa_erase(&net->dev_by_index, ifindex));
9703 list_add_tail(&dev->todo_list, &net_todo_list);
9704 atomic_inc(&dev_net(dev)->dev_unreg_count);
9716 if (!(upper->wanted_features & feature)
9719 &feature, upper->name);
9736 if (!(features & feature) && (lower->features & feature)) {
9738 &feature, lower->name);
9739 lower->wanted_features &= ~feature;
9742 if (unlikely(lower->features & feature))
9744 &feature, lower->name);
9795 if ((features & dev->gso_partial_features) &&
9799 features &= ~dev->gso_partial_features;
9814 /* LRO/HW-GRO features cannot be combined with RX-FCS */
9817 netdev_dbg(dev, "Dropping LRO feature since RX-FCS is requested.\n");
9822 netdev_dbg(dev, "Dropping HW-GRO feature since RX-FCS is requested.\n");
9828 netdev_dbg(dev, "Dropping LRO feature since HW-GRO is requested.\n");
9856 int err = -1;
9862 if (dev->netdev_ops->ndo_fix_features)
9863 features = dev->netdev_ops->ndo_fix_features(dev, features);
9872 if (dev->features == features)
9875 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
9876 &dev->features, &features);
9878 if (dev->netdev_ops->ndo_set_features)
9879 err = dev->netdev_ops->ndo_set_features(dev, features);
9886 err, &features, &dev->features);
9887 /* return non-0 since some features might have changed and
9890 return -1;
9901 netdev_features_t diff = features ^ dev->features;
9907 * Thus we need to update dev->features
9912 dev->features = features;
9921 dev->features = features;
9930 dev->features = features;
9937 dev->features = features;
9944 * netdev_update_features - recalculate device features
9947 * Recalculate dev->features set and send notifications if it
9959 * netdev_change_features - recalculate device features
9962 * Recalculate dev->features set and send notifications even
9964 * netdev_update_features() if also dev->vlan_features might
9976 * netif_stacked_transfer_operstate - transfer operstate
9987 if (rootdev->operstate == IF_OPER_DORMANT)
9992 if (rootdev->operstate == IF_OPER_TESTING)
10006 unsigned int i, count = dev->num_rx_queues;
10015 return -ENOMEM;
10017 dev->_rx = rx;
10022 /* XDP RX-queue setup */
10031 while (i--)
10033 kvfree(dev->_rx);
10034 dev->_rx = NULL;
10040 unsigned int i, count = dev->num_rx_queues;
10043 if (!dev->_rx)
10047 xdp_rxq_info_unreg(&dev->_rx[i].xdp_rxq);
10049 kvfree(dev->_rx);
10056 spin_lock_init(&queue->_xmit_lock);
10057 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
10058 queue->xmit_lock_owner = -1;
10060 queue->dev = dev;
10062 dql_init(&queue->dql, HZ);
10068 kvfree(dev->_tx);
10073 unsigned int count = dev->num_tx_queues;
10078 return -EINVAL;
10082 return -ENOMEM;
10084 dev->_tx = tx;
10087 spin_lock_init(&dev->tx_global_lock);
10096 for (i = 0; i < dev->num_tx_queues; i++) {
10112 if (dev->netdev_ops->ndo_get_peer_dev &&
10113 dev->pcpu_stat_type != NETDEV_PCPU_STAT_TSTATS)
10114 return -EOPNOTSUPP;
10116 switch (dev->pcpu_stat_type) {
10120 v = dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
10123 v = dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
10126 v = dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats);
10129 return -EINVAL;
10132 return v ? 0 : -ENOMEM;
10137 switch (dev->pcpu_stat_type) {
10141 free_percpu(dev->lstats);
10144 free_percpu(dev->tstats);
10147 free_percpu(dev->dstats);
10153 * register_netdevice() - register a network device
10158 * Callers must hold the rtnl lock - you may want register_netdev()
10174 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
10177 ret = ethtool_check_ops(dev->ethtool_ops);
10181 spin_lock_init(&dev->addr_list_lock);
10184 ret = dev_get_valid_name(net, dev, dev->name);
10188 ret = -ENOMEM;
10189 dev->name_node = netdev_name_node_head_alloc(dev);
10190 if (!dev->name_node)
10194 if (dev->netdev_ops->ndo_init) {
10195 ret = dev->netdev_ops->ndo_init(dev);
10198 ret = -EIO;
10203 if (((dev->hw_features | dev->features) &
10205 (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
10206 !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
10208 ret = -EINVAL;
10216 ret = dev_index_reserve(net, dev->ifindex);
10219 dev->ifindex = ret;
10224 dev->hw_features |= (NETIF_F_SOFT_FEATURES | NETIF_F_SOFT_FEATURES_OFF);
10225 dev->features |= NETIF_F_SOFT_FEATURES;
10227 if (dev->udp_tunnel_nic_info) {
10228 dev->features |= NETIF_F_RX_UDP_TUNNEL_PORT;
10229 dev->hw_features |= NETIF_F_RX_UDP_TUNNEL_PORT;
10232 dev->wanted_features = dev->features & dev->hw_features;
10234 if (!(dev->flags & IFF_LOOPBACK))
10235 dev->hw_features |= NETIF_F_NOCACHE_COPY;
10242 if (dev->hw_features & NETIF_F_TSO)
10243 dev->hw_features |= NETIF_F_TSO_MANGLEID;
10244 if (dev->vlan_features & NETIF_F_TSO)
10245 dev->vlan_features |= NETIF_F_TSO_MANGLEID;
10246 if (dev->mpls_features & NETIF_F_TSO)
10247 dev->mpls_features |= NETIF_F_TSO_MANGLEID;
10248 if (dev->hw_enc_features & NETIF_F_TSO)
10249 dev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
10253 dev->vlan_features |= NETIF_F_HIGHDMA;
10257 dev->hw_enc_features |= NETIF_F_SG | NETIF_F_GSO_PARTIAL;
10261 dev->mpls_features |= NETIF_F_SG;
10270 dev->reg_state = ret ? NETREG_UNREGISTERED : NETREG_REGISTERED;
10282 set_bit(__LINK_STATE_PRESENT, &dev->state);
10288 netdev_hold(dev, &dev->dev_registered_tracker, GFP_KERNEL);
10291 add_device_randomness(dev->dev_addr, dev->addr_len);
10297 if (dev->addr_assign_type == NET_ADDR_PERM)
10298 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
10305 dev->needs_free_netdev = false;
10313 if (!dev->rtnl_link_ops ||
10314 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
10323 dev_index_release(net, dev->ifindex);
10327 if (dev->netdev_ops->ndo_uninit)
10328 dev->netdev_ops->ndo_uninit(dev);
10329 if (dev->priv_destructor)
10330 dev->priv_destructor(dev);
10332 netdev_name_node_free(dev->name_node);
10338 * init_dummy_netdev - init a dummy network device for NAPI
10359 dev->reg_state = NETREG_DUMMY;
10362 INIT_LIST_HEAD(&dev->napi_list);
10365 set_bit(__LINK_STATE_PRESENT, &dev->state);
10366 set_bit(__LINK_STATE_START, &dev->state);
10382 * register_netdev - register a network device
10399 return -EINTR;
10412 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
10415 return refcount_read(&dev->dev_refcnt);
10425 * netdev_wait_allrefs_any - wait until all references are gone.
10462 &dev->state)) {
10495 dev->name, netdev_refcnt_read(dev));
10496 ref_tracker_dir_print(&dev->refcnt_tracker, 10);
10541 list_del_init(&dev->unlink_list);
10542 dev->nested_level = dev->lower_level - 1;
10556 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
10558 list_del(&dev->todo_list);
10563 dev->reg_state = NETREG_UNREGISTERED;
10570 list_del(&dev->todo_list);
10574 BUG_ON(!list_empty(&dev->ptype_all));
10575 BUG_ON(!list_empty(&dev->ptype_specific));
10576 WARN_ON(rcu_access_pointer(dev->ip_ptr));
10577 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
10580 if (dev->priv_destructor)
10581 dev->priv_destructor(dev);
10582 if (dev->needs_free_netdev)
10585 if (atomic_dec_and_test(&dev_net(dev)->dev_unreg_count))
10589 kobject_put(&dev->dev.kobj);
10610 sizeof(*stats64) - n * sizeof(u64));
10621 if (p && cmpxchg(&dev->core_stats, NULL, p))
10625 return READ_ONCE(dev->core_stats);
10630 * dev_get_stats - get network device statistics
10636 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
10642 const struct net_device_ops *ops = dev->netdev_ops;
10645 if (ops->ndo_get_stats64) {
10647 ops->ndo_get_stats64(dev, storage);
10648 } else if (ops->ndo_get_stats) {
10649 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
10651 netdev_stats_to_stats64(storage, &dev->stats);
10655 p = READ_ONCE(dev->core_stats);
10662 storage->rx_dropped += READ_ONCE(core_stats->rx_dropped);
10663 storage->tx_dropped += READ_ONCE(core_stats->tx_dropped);
10664 storage->rx_nohandler += READ_ONCE(core_stats->rx_nohandler);
10665 storage->rx_otherhost_dropped += READ_ONCE(core_stats->rx_otherhost_dropped);
10673 * dev_fetch_sw_netstats - get per-cpu network device statistics
10675 * @netstats: per-cpu network stats to read from
10677 * Read per-cpu network statistics and populate the related fields in @s.
10691 start = u64_stats_fetch_begin(&stats->syncp);
10692 rx_packets = u64_stats_read(&stats->rx_packets);
10693 rx_bytes = u64_stats_read(&stats->rx_bytes);
10694 tx_packets = u64_stats_read(&stats->tx_packets);
10695 tx_bytes = u64_stats_read(&stats->tx_bytes);
10696 } while (u64_stats_fetch_retry(&stats->syncp, start));
10698 s->rx_packets += rx_packets;
10699 s->rx_bytes += rx_bytes;
10700 s->tx_packets += tx_packets;
10701 s->tx_bytes += tx_bytes;
10707 * dev_get_tstats64 - ndo_get_stats64 implementation
10711 * Populate @s from dev->stats and dev->tstats. Can be used as
10716 netdev_stats_to_stats64(s, &dev->stats);
10717 dev_fetch_sw_netstats(s, dev->tstats);
10732 RCU_INIT_POINTER(queue->qdisc, &noop_qdisc);
10733 RCU_INIT_POINTER(queue->qdisc_sleeping, &noop_qdisc);
10734 rcu_assign_pointer(dev->ingress_queue, queue);
10744 if (dev->ethtool_ops == &default_ethtool_ops)
10745 dev->ethtool_ops = ops;
10750 * netdev_sw_irq_coalesce_default_on() - enable SW IRQ coalescing by default
10758 WARN_ON(dev->reg_state == NETREG_REGISTERED);
10761 dev->gro_flush_timeout = 20000;
10762 dev->napi_defer_hard_irqs = 1;
10769 char *addr = (char *)dev - dev->padded;
10775 * alloc_netdev_mqs - allocate network device
10796 BUG_ON(strlen(name) >= sizeof(dev->name));
10810 /* ensure 32-byte alignment of private area */
10814 /* ensure 32-byte alignment of whole construct */
10815 alloc_size += NETDEV_ALIGN - 1;
10822 dev->padded = (char *)dev - (char *)p;
10824 ref_tracker_dir_init(&dev->refcnt_tracker, 128, name);
10826 dev->pcpu_refcnt = alloc_percpu(int);
10827 if (!dev->pcpu_refcnt)
10831 refcount_set(&dev->dev_refcnt, 1);
10842 dev->gso_max_size = GSO_LEGACY_MAX_SIZE;
10843 dev->xdp_zc_max_segs = 1;
10844 dev->gso_max_segs = GSO_MAX_SEGS;
10845 dev->gro_max_size = GRO_LEGACY_MAX_SIZE;
10846 dev->gso_ipv4_max_size = GSO_LEGACY_MAX_SIZE;
10847 dev->gro_ipv4_max_size = GRO_LEGACY_MAX_SIZE;
10848 dev->tso_max_size = TSO_LEGACY_MAX_SIZE;
10849 dev->tso_max_segs = TSO_MAX_SEGS;
10850 dev->upper_level = 1;
10851 dev->lower_level = 1;
10853 dev->nested_level = 0;
10854 INIT_LIST_HEAD(&dev->unlink_list);
10857 INIT_LIST_HEAD(&dev->napi_list);
10858 INIT_LIST_HEAD(&dev->unreg_list);
10859 INIT_LIST_HEAD(&dev->close_list);
10860 INIT_LIST_HEAD(&dev->link_watch_list);
10861 INIT_LIST_HEAD(&dev->adj_list.upper);
10862 INIT_LIST_HEAD(&dev->adj_list.lower);
10863 INIT_LIST_HEAD(&dev->ptype_all);
10864 INIT_LIST_HEAD(&dev->ptype_specific);
10865 INIT_LIST_HEAD(&dev->net_notifier_list);
10867 hash_init(dev->qdisc_hash);
10869 dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
10872 if (!dev->tx_queue_len) {
10873 dev->priv_flags |= IFF_NO_QUEUE;
10874 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
10877 dev->num_tx_queues = txqs;
10878 dev->real_num_tx_queues = txqs;
10882 dev->num_rx_queues = rxqs;
10883 dev->real_num_rx_queues = rxqs;
10887 strcpy(dev->name, name);
10888 dev->name_assign_type = name_assign_type;
10889 dev->group = INIT_NETDEV_GROUP;
10890 if (!dev->ethtool_ops)
10891 dev->ethtool_ops = &default_ethtool_ops;
10903 free_percpu(dev->pcpu_refcnt);
10912 * free_netdev - free network device
10930 if (dev->reg_state == NETREG_UNREGISTERING) {
10932 dev->needs_free_netdev = true;
10939 kfree(rcu_dereference_protected(dev->ingress_queue, 1));
10944 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
10947 ref_tracker_dir_exit(&dev->refcnt_tracker);
10949 free_percpu(dev->pcpu_refcnt);
10950 dev->pcpu_refcnt = NULL;
10952 free_percpu(dev->core_stats);
10953 dev->core_stats = NULL;
10954 free_percpu(dev->xdp_bulkq);
10955 dev->xdp_bulkq = NULL;
10958 if (dev->reg_state == NETREG_UNINITIALIZED) {
10963 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
10964 dev->reg_state = NETREG_RELEASED;
10967 put_device(&dev->dev);
10972 * synchronize_net - Synchronize with packet receive processing
10988 * unregister_netdevice_queue - remove device from the kernel
11005 list_move_tail(&dev->unreg_list, head);
11009 list_add(&dev->unreg_list, &single);
11032 if (dev->reg_state == NETREG_UNINITIALIZED) {
11034 dev->name, dev);
11037 list_del(&dev->unreg_list);
11040 dev->dismantle = true;
11041 BUG_ON(dev->reg_state != NETREG_REGISTERED);
11046 list_add_tail(&dev->close_list, &close_head);
11053 dev->reg_state = NETREG_UNREGISTERING;
11076 if (!dev->rtnl_link_ops ||
11077 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
11089 netdev_name_node_free(dev->name_node);
11093 if (dev->netdev_ops->ndo_uninit)
11094 dev->netdev_ops->ndo_uninit(dev);
11114 netdev_put(dev, &dev->dev_registered_tracker);
11122 * unregister_netdevice_many - unregister many devices
11135 * unregister_netdev - remove device from the kernel
11154 * __dev_change_net_namespace - move device to different nethost namespace
11180 err = -EINVAL;
11181 if (dev->features & NETIF_F_NETNS_LOCAL)
11185 if (dev->reg_state != NETREG_REGISTERED)
11196 err = -EEXIST;
11197 if (netdev_name_in_use(net, dev->name)) {
11206 err = -EEXIST;
11208 if (netdev_name_in_use(net, name_node->name))
11218 err = dev_index_reserve(net, dev->ifindex);
11219 if (err == -EBUSY)
11244 * Note that dev->reg_state stays at NETREG_REGISTERED.
11262 /* Send a netdev-removed uevent to the old namespace */
11263 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
11266 /* Move per-net netdevice notifiers that are following the netdevice */
11271 dev->ifindex = new_ifindex;
11273 /* Send a netdev-add uevent to the new namespace */
11274 kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
11278 strscpy(dev->name, new_name, IFNAMSIZ);
11281 err = device_rename(&dev->dev, dev->name);
11322 list_skb = &sd->completion_queue;
11324 list_skb = &(*list_skb)->next;
11326 *list_skb = oldsd->completion_queue;
11327 oldsd->completion_queue = NULL;
11330 if (oldsd->output_queue) {
11331 *sd->output_queue_tailp = oldsd->output_queue;
11332 sd->output_queue_tailp = oldsd->output_queue_tailp;
11333 oldsd->output_queue = NULL;
11334 oldsd->output_queue_tailp = &oldsd->output_queue;
11340 while (!list_empty(&oldsd->poll_list)) {
11341 struct napi_struct *napi = list_first_entry(&oldsd->poll_list,
11345 list_del_init(&napi->poll_list);
11346 if (napi->poll == process_backlog)
11347 napi->state = 0;
11356 remsd = oldsd->rps_ipi_list;
11357 oldsd->rps_ipi_list = NULL;
11363 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
11367 while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
11376 * netdev_increment_features - increment feature set by one
11422 INIT_LIST_HEAD(&net->dev_base_head);
11424 net->dev_name_head = netdev_create_hash();
11425 if (net->dev_name_head == NULL)
11428 net->dev_index_head = netdev_create_hash();
11429 if (net->dev_index_head == NULL)
11432 xa_init_flags(&net->dev_by_index, XA_FLAGS_ALLOC1);
11434 RAW_INIT_NOTIFIER_HEAD(&net->netdev_chain);
11439 kfree(net->dev_name_head);
11441 return -ENOMEM;
11445 * netdev_drivername - network driver for the device
11456 parent = dev->dev.parent;
11460 driver = parent->driver;
11461 if (driver && driver->name)
11462 return driver->name;
11469 if (dev && dev->dev.parent) {
11470 dev_printk_emit(level[1] - '0',
11471 dev->dev.parent,
11473 dev_driver_string(dev->dev.parent),
11474 dev_name(dev->dev.parent),
11529 kfree(net->dev_name_head);
11530 kfree(net->dev_index_head);
11531 xa_destroy(&net->dev_by_index);
11533 WARN_ON_ONCE(!list_empty(&net->dev_base_head));
11555 if (dev->features & NETIF_F_NETNS_LOCAL)
11559 if (dev->rtnl_link_ops && !dev->rtnl_link_ops->netns_refund)
11563 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
11568 if (netdev_name_in_use(&init_net, name_node->name)) {
11577 __func__, dev->name, err);
11602 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink)
11603 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
11629 int i, rc = -ENOMEM;
11656 skb_queue_head_init(&sd->input_pkt_queue);
11657 skb_queue_head_init(&sd->process_queue);
11659 skb_queue_head_init(&sd->xfrm_backlog);
11661 INIT_LIST_HEAD(&sd->poll_list);
11662 sd->output_queue_tailp = &sd->output_queue;
11664 INIT_CSD(&sd->csd, rps_trigger_softirq, sd);
11665 sd->cpu = i;
11667 INIT_CSD(&sd->defer_csd, trigger_rx_softirq, sd);
11668 spin_lock_init(&sd->defer_lock);
11670 init_gro_hash(&sd->backlog);
11671 sd->backlog.poll = process_backlog;
11672 sd->backlog.weight = weight_p;