Lines Matching refs:dev

166 					   struct net_device *dev,
254 static struct netdev_name_node *netdev_name_node_alloc(struct net_device *dev, in netdev_name_node_alloc() argument
263 name_node->dev = dev; in netdev_name_node_alloc()
269 netdev_name_node_head_alloc(struct net_device *dev) in netdev_name_node_head_alloc() argument
273 name_node = netdev_name_node_alloc(dev, dev->name); in netdev_name_node_head_alloc()
327 int netdev_name_node_alt_create(struct net_device *dev, const char *name) in netdev_name_node_alt_create() argument
330 struct net *net = dev_net(dev); in netdev_name_node_alt_create()
335 name_node = netdev_name_node_alloc(dev, name); in netdev_name_node_alt_create()
340 list_add_tail(&name_node->list, &dev->name_node->list); in netdev_name_node_alt_create()
352 int netdev_name_node_alt_destroy(struct net_device *dev, const char *name) in netdev_name_node_alt_destroy() argument
355 struct net *net = dev_net(dev); in netdev_name_node_alt_destroy()
363 if (name_node == dev->name_node || name_node->dev != dev) in netdev_name_node_alt_destroy()
373 static void netdev_name_node_alt_flush(struct net_device *dev) in netdev_name_node_alt_flush() argument
377 list_for_each_entry_safe(name_node, tmp, &dev->name_node->list, list) in netdev_name_node_alt_flush()
382 static void list_netdevice(struct net_device *dev) in list_netdevice() argument
385 struct net *net = dev_net(dev); in list_netdevice()
390 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head); in list_netdevice()
391 netdev_name_node_add(net, dev->name_node); in list_netdevice()
392 hlist_add_head_rcu(&dev->index_hlist, in list_netdevice()
393 dev_index_hash(net, dev->ifindex)); in list_netdevice()
396 netdev_for_each_altname(dev, name_node) in list_netdevice()
400 WARN_ON(xa_store(&net->dev_by_index, dev->ifindex, dev, GFP_KERNEL)); in list_netdevice()
408 static void unlist_netdevice(struct net_device *dev, bool lock) in unlist_netdevice() argument
411 struct net *net = dev_net(dev); in unlist_netdevice()
415 xa_erase(&net->dev_by_index, dev->ifindex); in unlist_netdevice()
417 netdev_for_each_altname(dev, name_node) in unlist_netdevice()
423 list_del_rcu(&dev->dev_list); in unlist_netdevice()
424 netdev_name_node_del(dev->name_node); in unlist_netdevice()
425 hlist_del_rcu(&dev->index_hlist); in unlist_netdevice()
429 dev_base_seq_inc(dev_net(dev)); in unlist_netdevice()
509 static inline void netdev_set_addr_lockdep_class(struct net_device *dev) in netdev_set_addr_lockdep_class() argument
513 i = netdev_lock_pos(dev->type); in netdev_set_addr_lockdep_class()
514 lockdep_set_class_and_name(&dev->addr_list_lock, in netdev_set_addr_lockdep_class()
524 static inline void netdev_set_addr_lockdep_class(struct net_device *dev) in netdev_set_addr_lockdep_class() argument
555 return pt->dev ? &pt->dev->ptype_all : &ptype_all; in ptype_head()
557 return pt->dev ? &pt->dev->ptype_specific : in ptype_head()
652 int dev_get_iflink(const struct net_device *dev) in dev_get_iflink() argument
654 if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink) in dev_get_iflink()
655 return dev->netdev_ops->ndo_get_iflink(dev); in dev_get_iflink()
657 return dev->ifindex; in dev_get_iflink()
670 int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) in dev_fill_metadata_dst() argument
674 if (!dev->netdev_ops || !dev->netdev_ops->ndo_fill_metadata_dst) in dev_fill_metadata_dst()
683 return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb); in dev_fill_metadata_dst()
697 int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr, in dev_fill_forward_path() argument
702 .dev = dev, in dev_fill_forward_path()
709 while (ctx.dev && ctx.dev->netdev_ops->ndo_fill_forward_path) { in dev_fill_forward_path()
710 last_dev = ctx.dev; in dev_fill_forward_path()
716 ret = ctx.dev->netdev_ops->ndo_fill_forward_path(&ctx, path); in dev_fill_forward_path()
720 if (WARN_ON_ONCE(last_dev == ctx.dev)) in dev_fill_forward_path()
724 if (!ctx.dev) in dev_fill_forward_path()
731 path->dev = ctx.dev; in dev_fill_forward_path()
754 return node_name ? node_name->dev : NULL; in __dev_get_by_name()
775 return node_name ? node_name->dev : NULL; in dev_get_by_name_rcu()
782 struct net_device *dev; in dev_get_by_name() local
785 dev = dev_get_by_name_rcu(net, name); in dev_get_by_name()
786 dev_hold(dev); in dev_get_by_name()
788 return dev; in dev_get_by_name()
808 struct net_device *dev; in netdev_get_by_name() local
810 dev = dev_get_by_name(net, name); in netdev_get_by_name()
811 if (dev) in netdev_get_by_name()
812 netdev_tracker_alloc(dev, tracker, gfp); in netdev_get_by_name()
813 return dev; in netdev_get_by_name()
831 struct net_device *dev; in __dev_get_by_index() local
834 hlist_for_each_entry(dev, head, index_hlist) in __dev_get_by_index()
835 if (dev->ifindex == ifindex) in __dev_get_by_index()
836 return dev; in __dev_get_by_index()
855 struct net_device *dev; in dev_get_by_index_rcu() local
858 hlist_for_each_entry_rcu(dev, head, index_hlist) in dev_get_by_index_rcu()
859 if (dev->ifindex == ifindex) in dev_get_by_index_rcu()
860 return dev; in dev_get_by_index_rcu()
869 struct net_device *dev; in dev_get_by_index() local
872 dev = dev_get_by_index_rcu(net, ifindex); in dev_get_by_index()
873 dev_hold(dev); in dev_get_by_index()
875 return dev; in dev_get_by_index()
894 struct net_device *dev; in netdev_get_by_index() local
896 dev = dev_get_by_index(net, ifindex); in netdev_get_by_index()
897 if (dev) in netdev_get_by_index()
898 netdev_tracker_alloc(dev, tracker, gfp); in netdev_get_by_index()
899 return dev; in netdev_get_by_index()
924 return napi ? napi->dev : NULL; in dev_get_by_napi_id()
936 struct net_device *dev; in netdev_get_name() local
942 dev = dev_get_by_index_rcu(net, ifindex); in netdev_get_name()
943 if (!dev) { in netdev_get_name()
948 strcpy(name, dev->name); in netdev_get_name()
974 struct net_device *dev; in dev_getbyhwaddr_rcu() local
976 for_each_netdev_rcu(net, dev) in dev_getbyhwaddr_rcu()
977 if (dev->type == type && in dev_getbyhwaddr_rcu()
978 !memcmp(dev->dev_addr, ha, dev->addr_len)) in dev_getbyhwaddr_rcu()
979 return dev; in dev_getbyhwaddr_rcu()
987 struct net_device *dev, *ret = NULL; in dev_getfirstbyhwtype() local
990 for_each_netdev_rcu(net, dev) in dev_getfirstbyhwtype()
991 if (dev->type == type) { in dev_getfirstbyhwtype()
992 dev_hold(dev); in dev_getfirstbyhwtype()
993 ret = dev; in dev_getfirstbyhwtype()
1015 struct net_device *dev, *ret; in __dev_get_by_flags() local
1020 for_each_netdev(net, dev) { in __dev_get_by_flags()
1021 if (((dev->flags ^ if_flags) & mask) == 0) { in __dev_get_by_flags()
1022 ret = dev; in __dev_get_by_flags()
1137 static int dev_prep_valid_name(struct net *net, struct net_device *dev, in dev_prep_valid_name() argument
1158 struct net_device *dev, in dev_alloc_name_ns() argument
1167 strscpy(dev->name, buf, IFNAMSIZ); in dev_alloc_name_ns()
1185 int dev_alloc_name(struct net_device *dev, const char *name) in dev_alloc_name() argument
1187 return dev_alloc_name_ns(dev_net(dev), dev, name); in dev_alloc_name()
1191 static int dev_get_valid_name(struct net *net, struct net_device *dev, in dev_get_valid_name() argument
1197 ret = dev_prep_valid_name(net, dev, name, buf); in dev_get_valid_name()
1199 strscpy(dev->name, buf, IFNAMSIZ); in dev_get_valid_name()
1211 int dev_change_name(struct net_device *dev, const char *newname) in dev_change_name() argument
1220 BUG_ON(!dev_net(dev)); in dev_change_name()
1222 net = dev_net(dev); in dev_change_name()
1226 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) { in dev_change_name()
1231 memcpy(oldname, dev->name, IFNAMSIZ); in dev_change_name()
1233 err = dev_get_valid_name(net, dev, newname); in dev_change_name()
1240 netdev_info(dev, "renamed from %s%s\n", oldname, in dev_change_name()
1241 dev->flags & IFF_UP ? " (while UP)" : ""); in dev_change_name()
1243 old_assign_type = dev->name_assign_type; in dev_change_name()
1244 dev->name_assign_type = NET_NAME_RENAMED; in dev_change_name()
1247 ret = device_rename(&dev->dev, dev->name); in dev_change_name()
1249 memcpy(dev->name, oldname, IFNAMSIZ); in dev_change_name()
1250 dev->name_assign_type = old_assign_type; in dev_change_name()
1257 netdev_adjacent_rename_links(dev, oldname); in dev_change_name()
1260 netdev_name_node_del(dev->name_node); in dev_change_name()
1266 netdev_name_node_add(net, dev->name_node); in dev_change_name()
1269 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev); in dev_change_name()
1277 memcpy(dev->name, oldname, IFNAMSIZ); in dev_change_name()
1279 dev->name_assign_type = old_assign_type; in dev_change_name()
1283 netdev_err(dev, "name change rollback failed: %d\n", in dev_change_name()
1299 int dev_set_alias(struct net_device *dev, const char *alias, size_t len) in dev_set_alias() argument
1316 new_alias = rcu_replace_pointer(dev->ifalias, new_alias, in dev_set_alias()
1336 int dev_get_alias(const struct net_device *dev, char *name, size_t len) in dev_get_alias() argument
1342 alias = rcu_dereference(dev->ifalias); in dev_get_alias()
1356 void netdev_features_change(struct net_device *dev) in netdev_features_change() argument
1358 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev); in netdev_features_change()
1370 void netdev_state_change(struct net_device *dev) in netdev_state_change() argument
1372 if (dev->flags & IFF_UP) { in netdev_state_change()
1374 .info.dev = dev, in netdev_state_change()
1379 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL, 0, NULL); in netdev_state_change()
1395 void __netdev_notify_peers(struct net_device *dev) in __netdev_notify_peers() argument
1398 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev); in __netdev_notify_peers()
1399 call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev); in __netdev_notify_peers()
1413 void netdev_notify_peers(struct net_device *dev) in netdev_notify_peers() argument
1416 __netdev_notify_peers(dev); in netdev_notify_peers()
1432 n->dev->name, n->napi_id); in napi_kthread_create()
1442 static int __dev_open(struct net_device *dev, struct netlink_ext_ack *extack) in __dev_open() argument
1444 const struct net_device_ops *ops = dev->netdev_ops; in __dev_open()
1448 dev_addr_check(dev); in __dev_open()
1450 if (!netif_device_present(dev)) { in __dev_open()
1452 if (dev->dev.parent) in __dev_open()
1453 pm_runtime_resume(dev->dev.parent); in __dev_open()
1454 if (!netif_device_present(dev)) in __dev_open()
1462 netpoll_poll_disable(dev); in __dev_open()
1464 ret = call_netdevice_notifiers_extack(NETDEV_PRE_UP, dev, extack); in __dev_open()
1469 set_bit(__LINK_STATE_START, &dev->state); in __dev_open()
1472 ret = ops->ndo_validate_addr(dev); in __dev_open()
1475 ret = ops->ndo_open(dev); in __dev_open()
1477 netpoll_poll_enable(dev); in __dev_open()
1480 clear_bit(__LINK_STATE_START, &dev->state); in __dev_open()
1482 dev->flags |= IFF_UP; in __dev_open()
1483 dev_set_rx_mode(dev); in __dev_open()
1484 dev_activate(dev); in __dev_open()
1485 add_device_randomness(dev->dev_addr, dev->addr_len); in __dev_open()
1504 int dev_open(struct net_device *dev, struct netlink_ext_ack *extack) in dev_open() argument
1508 if (dev->flags & IFF_UP) in dev_open()
1511 ret = __dev_open(dev, extack); in dev_open()
1515 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP | IFF_RUNNING, GFP_KERNEL, 0, NULL); in dev_open()
1516 call_netdevice_notifiers(NETDEV_UP, dev); in dev_open()
1524 struct net_device *dev; in __dev_close_many() local
1529 list_for_each_entry(dev, head, close_list) { in __dev_close_many()
1531 netpoll_poll_disable(dev); in __dev_close_many()
1533 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev); in __dev_close_many()
1535 clear_bit(__LINK_STATE_START, &dev->state); in __dev_close_many()
1548 list_for_each_entry(dev, head, close_list) { in __dev_close_many()
1549 const struct net_device_ops *ops = dev->netdev_ops; in __dev_close_many()
1559 ops->ndo_stop(dev); in __dev_close_many()
1561 dev->flags &= ~IFF_UP; in __dev_close_many()
1562 netpoll_poll_enable(dev); in __dev_close_many()
1566 static void __dev_close(struct net_device *dev) in __dev_close() argument
1570 list_add(&dev->close_list, &single); in __dev_close()
1577 struct net_device *dev, *tmp; in dev_close_many() local
1580 list_for_each_entry_safe(dev, tmp, head, close_list) in dev_close_many()
1581 if (!(dev->flags & IFF_UP)) in dev_close_many()
1582 list_del_init(&dev->close_list); in dev_close_many()
1586 list_for_each_entry_safe(dev, tmp, head, close_list) { in dev_close_many()
1587 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP | IFF_RUNNING, GFP_KERNEL, 0, NULL); in dev_close_many()
1588 call_netdevice_notifiers(NETDEV_DOWN, dev); in dev_close_many()
1590 list_del_init(&dev->close_list); in dev_close_many()
1604 void dev_close(struct net_device *dev) in dev_close() argument
1606 if (dev->flags & IFF_UP) { in dev_close()
1609 list_add(&dev->close_list, &single); in dev_close()
1625 void dev_disable_lro(struct net_device *dev) in dev_disable_lro() argument
1630 dev->wanted_features &= ~NETIF_F_LRO; in dev_disable_lro()
1631 netdev_update_features(dev); in dev_disable_lro()
1633 if (unlikely(dev->features & NETIF_F_LRO)) in dev_disable_lro()
1634 netdev_WARN(dev, "failed to disable LRO!\n"); in dev_disable_lro()
1636 netdev_for_each_lower_dev(dev, lower_dev, iter) in dev_disable_lro()
1649 static void dev_disable_gro_hw(struct net_device *dev) in dev_disable_gro_hw() argument
1651 dev->wanted_features &= ~NETIF_F_GRO_HW; in dev_disable_gro_hw()
1652 netdev_update_features(dev); in dev_disable_gro_hw()
1654 if (unlikely(dev->features & NETIF_F_GRO_HW)) in dev_disable_gro_hw()
1655 netdev_WARN(dev, "failed to disable GRO_HW!\n"); in dev_disable_gro_hw()
1683 struct net_device *dev) in call_netdevice_notifier() argument
1686 .dev = dev, in call_netdevice_notifier()
1693 struct net_device *dev) in call_netdevice_register_notifiers() argument
1697 err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev); in call_netdevice_register_notifiers()
1702 if (!(dev->flags & IFF_UP)) in call_netdevice_register_notifiers()
1705 call_netdevice_notifier(nb, NETDEV_UP, dev); in call_netdevice_register_notifiers()
1710 struct net_device *dev) in call_netdevice_unregister_notifiers() argument
1712 if (dev->flags & IFF_UP) { in call_netdevice_unregister_notifiers()
1714 dev); in call_netdevice_unregister_notifiers()
1715 call_netdevice_notifier(nb, NETDEV_DOWN, dev); in call_netdevice_unregister_notifiers()
1717 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev); in call_netdevice_unregister_notifiers()
1723 struct net_device *dev; in call_netdevice_register_net_notifiers() local
1726 for_each_netdev(net, dev) { in call_netdevice_register_net_notifiers()
1727 err = call_netdevice_register_notifiers(nb, dev); in call_netdevice_register_net_notifiers()
1734 for_each_netdev_continue_reverse(net, dev) in call_netdevice_register_net_notifiers()
1735 call_netdevice_unregister_notifiers(nb, dev); in call_netdevice_register_net_notifiers()
1742 struct net_device *dev; in call_netdevice_unregister_net_notifiers() local
1744 for_each_netdev(net, dev) in call_netdevice_unregister_net_notifiers()
1745 call_netdevice_unregister_notifiers(nb, dev); in call_netdevice_unregister_net_notifiers()
1931 int register_netdevice_notifier_dev_net(struct net_device *dev, in register_netdevice_notifier_dev_net() argument
1938 err = __register_netdevice_notifier_net(dev_net(dev), nb, false); in register_netdevice_notifier_dev_net()
1941 list_add(&nn->list, &dev->net_notifier_list); in register_netdevice_notifier_dev_net()
1948 int unregister_netdevice_notifier_dev_net(struct net_device *dev, in unregister_netdevice_notifier_dev_net() argument
1956 err = __unregister_netdevice_notifier_net(dev_net(dev), nb); in unregister_netdevice_notifier_dev_net()
1962 static void move_netdevice_notifiers_dev_net(struct net_device *dev, in move_netdevice_notifiers_dev_net() argument
1967 list_for_each_entry(nn, &dev->net_notifier_list, list) in move_netdevice_notifiers_dev_net()
1968 __move_netdevice_notifier_net(dev_net(dev), net, nn->nb); in move_netdevice_notifiers_dev_net()
1983 struct net *net = dev_net(info->dev); in call_netdevice_notifiers_info()
2016 struct net *net = dev_net(info->dev); in call_netdevice_notifiers_info_robust()
2025 struct net_device *dev, in call_netdevice_notifiers_extack() argument
2029 .dev = dev, in call_netdevice_notifiers_extack()
2045 int call_netdevice_notifiers(unsigned long val, struct net_device *dev) in call_netdevice_notifiers() argument
2047 return call_netdevice_notifiers_extack(val, dev, NULL); in call_netdevice_notifiers()
2061 struct net_device *dev, u32 arg) in call_netdevice_notifiers_mtu() argument
2064 .info.dev = dev, in call_netdevice_notifiers_mtu()
2172 bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb) in is_skb_forwardable() argument
2174 return __is_skb_forwardable(dev, skb, true); in is_skb_forwardable()
2178 static int __dev_forward_skb2(struct net_device *dev, struct sk_buff *skb, in __dev_forward_skb2() argument
2181 int ret = ____dev_forward_skb(dev, skb, check_mtu); in __dev_forward_skb2()
2184 skb->protocol = eth_type_trans(skb, dev); in __dev_forward_skb2()
2191 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb) in __dev_forward_skb() argument
2193 return __dev_forward_skb2(dev, skb, true); in __dev_forward_skb()
2215 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) in dev_forward_skb() argument
2217 return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb); in dev_forward_skb()
2221 int dev_forward_skb_nomtu(struct net_device *dev, struct sk_buff *skb) in dev_forward_skb_nomtu() argument
2223 return __dev_forward_skb2(dev, skb, false) ?: netif_rx_internal(skb); in dev_forward_skb_nomtu()
2233 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev); in deliver_skb()
2272 bool dev_nit_active(struct net_device *dev) in dev_nit_active() argument
2274 return !list_empty(&ptype_all) || !list_empty(&dev->ptype_all); in dev_nit_active()
2283 void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) in dev_queue_xmit_nit() argument
2303 deliver_skb(skb2, pt_prev, skb->dev); in dev_queue_xmit_nit()
2325 dev->name); in dev_queue_xmit_nit()
2335 ptype_list = &dev->ptype_all; in dev_queue_xmit_nit()
2341 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev); in dev_queue_xmit_nit()
2362 static void netif_setup_tc(struct net_device *dev, unsigned int txq) in netif_setup_tc() argument
2365 struct netdev_tc_txq *tc = &dev->tc_to_txq[0]; in netif_setup_tc()
2369 …netdev_warn(dev, "Number of in use tx queues changed invalidating tc mappings. Priority traffic cl… in netif_setup_tc()
2370 dev->num_tc = 0; in netif_setup_tc()
2376 int q = netdev_get_prio_tc_map(dev, i); in netif_setup_tc()
2378 tc = &dev->tc_to_txq[q]; in netif_setup_tc()
2380 …netdev_warn(dev, "Number of in use tx queues changed. Priority %i to tc mapping %i is no longer va… in netif_setup_tc()
2382 netdev_set_prio_tc_map(dev, i, 0); in netif_setup_tc()
2387 int netdev_txq_to_tc(struct net_device *dev, unsigned int txq) in netdev_txq_to_tc() argument
2389 if (dev->num_tc) { in netdev_txq_to_tc()
2390 struct netdev_tc_txq *tc = &dev->tc_to_txq[0]; in netdev_txq_to_tc()
2443 static bool remove_xps_queue_cpu(struct net_device *dev, in remove_xps_queue_cpu() argument
2465 static void reset_xps_maps(struct net_device *dev, in reset_xps_maps() argument
2473 RCU_INIT_POINTER(dev->xps_maps[type], NULL); in reset_xps_maps()
2478 static void clean_xps_maps(struct net_device *dev, enum xps_map_type type, in clean_xps_maps() argument
2485 dev_maps = xmap_dereference(dev->xps_maps[type]); in clean_xps_maps()
2490 active |= remove_xps_queue_cpu(dev, dev_maps, j, offset, count); in clean_xps_maps()
2492 reset_xps_maps(dev, dev_maps, type); in clean_xps_maps()
2497 netdev_get_tx_queue(dev, i), NUMA_NO_NODE); in clean_xps_maps()
2501 static void netif_reset_xps_queues(struct net_device *dev, u16 offset, in netif_reset_xps_queues() argument
2511 clean_xps_maps(dev, XPS_RXQS, offset, count); in netif_reset_xps_queues()
2513 clean_xps_maps(dev, XPS_CPUS, offset, count); in netif_reset_xps_queues()
2519 static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index) in netif_reset_xps_queues_gt() argument
2521 netif_reset_xps_queues(dev, index, dev->num_tx_queues - index); in netif_reset_xps_queues_gt()
2584 int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask, in __netif_set_xps_queue() argument
2595 WARN_ON_ONCE(index >= dev->num_tx_queues); in __netif_set_xps_queue()
2597 if (dev->num_tc) { in __netif_set_xps_queue()
2599 num_tc = dev->num_tc; in __netif_set_xps_queue()
2604 dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev; in __netif_set_xps_queue()
2606 tc = netdev_txq_to_tc(dev, index); in __netif_set_xps_queue()
2613 dev_maps = xmap_dereference(dev->xps_maps[type]); in __netif_set_xps_queue()
2615 maps_sz = XPS_RXQ_DEV_MAPS_SIZE(num_tc, dev->num_rx_queues); in __netif_set_xps_queue()
2616 nr_ids = dev->num_rx_queues; in __netif_set_xps_queue()
2702 rcu_assign_pointer(dev->xps_maps[type], new_dev_maps); in __netif_set_xps_queue()
2734 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index), in __netif_set_xps_queue()
2762 reset_xps_maps(dev, dev_maps, type); in __netif_set_xps_queue()
2788 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, in netif_set_xps_queue() argument
2794 ret = __netif_set_xps_queue(dev, cpumask_bits(mask), index, XPS_CPUS); in netif_set_xps_queue()
2802 static void netdev_unbind_all_sb_channels(struct net_device *dev) in netdev_unbind_all_sb_channels() argument
2804 struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues]; in netdev_unbind_all_sb_channels()
2807 while (txq-- != &dev->_tx[0]) { in netdev_unbind_all_sb_channels()
2809 netdev_unbind_sb_channel(dev, txq->sb_dev); in netdev_unbind_all_sb_channels()
2813 void netdev_reset_tc(struct net_device *dev) in netdev_reset_tc() argument
2816 netif_reset_xps_queues_gt(dev, 0); in netdev_reset_tc()
2818 netdev_unbind_all_sb_channels(dev); in netdev_reset_tc()
2821 dev->num_tc = 0; in netdev_reset_tc()
2822 memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq)); in netdev_reset_tc()
2823 memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map)); in netdev_reset_tc()
2827 int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset) in netdev_set_tc_queue() argument
2829 if (tc >= dev->num_tc) in netdev_set_tc_queue()
2833 netif_reset_xps_queues(dev, offset, count); in netdev_set_tc_queue()
2835 dev->tc_to_txq[tc].count = count; in netdev_set_tc_queue()
2836 dev->tc_to_txq[tc].offset = offset; in netdev_set_tc_queue()
2841 int netdev_set_num_tc(struct net_device *dev, u8 num_tc) in netdev_set_num_tc() argument
2847 netif_reset_xps_queues_gt(dev, 0); in netdev_set_num_tc()
2849 netdev_unbind_all_sb_channels(dev); in netdev_set_num_tc()
2851 dev->num_tc = num_tc; in netdev_set_num_tc()
2856 void netdev_unbind_sb_channel(struct net_device *dev, in netdev_unbind_sb_channel() argument
2859 struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues]; in netdev_unbind_sb_channel()
2867 while (txq-- != &dev->_tx[0]) { in netdev_unbind_sb_channel()
2874 int netdev_bind_sb_channel_queue(struct net_device *dev, in netdev_bind_sb_channel_queue() argument
2879 if (sb_dev->num_tc >= 0 || tc >= dev->num_tc) in netdev_bind_sb_channel_queue()
2883 if ((offset + count) > dev->real_num_tx_queues) in netdev_bind_sb_channel_queue()
2894 netdev_get_tx_queue(dev, count + offset)->sb_dev = sb_dev; in netdev_bind_sb_channel_queue()
2900 int netdev_set_sb_channel(struct net_device *dev, u16 channel) in netdev_set_sb_channel() argument
2903 if (netif_is_multiqueue(dev)) in netdev_set_sb_channel()
2914 dev->num_tc = -channel; in netdev_set_sb_channel()
2924 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) in netif_set_real_num_tx_queues() argument
2929 disabling = txq < dev->real_num_tx_queues; in netif_set_real_num_tx_queues()
2931 if (txq < 1 || txq > dev->num_tx_queues) in netif_set_real_num_tx_queues()
2934 if (dev->reg_state == NETREG_REGISTERED || in netif_set_real_num_tx_queues()
2935 dev->reg_state == NETREG_UNREGISTERING) { in netif_set_real_num_tx_queues()
2938 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues, in netif_set_real_num_tx_queues()
2943 if (dev->num_tc) in netif_set_real_num_tx_queues()
2944 netif_setup_tc(dev, txq); in netif_set_real_num_tx_queues()
2946 dev_qdisc_change_real_num_tx(dev, txq); in netif_set_real_num_tx_queues()
2948 dev->real_num_tx_queues = txq; in netif_set_real_num_tx_queues()
2952 qdisc_reset_all_tx_gt(dev, txq); in netif_set_real_num_tx_queues()
2954 netif_reset_xps_queues_gt(dev, txq); in netif_set_real_num_tx_queues()
2958 dev->real_num_tx_queues = txq; in netif_set_real_num_tx_queues()
2976 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq) in netif_set_real_num_rx_queues() argument
2980 if (rxq < 1 || rxq > dev->num_rx_queues) in netif_set_real_num_rx_queues()
2983 if (dev->reg_state == NETREG_REGISTERED) { in netif_set_real_num_rx_queues()
2986 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues, in netif_set_real_num_rx_queues()
2992 dev->real_num_rx_queues = rxq; in netif_set_real_num_rx_queues()
3007 int netif_set_real_num_queues(struct net_device *dev, in netif_set_real_num_queues() argument
3010 unsigned int old_rxq = dev->real_num_rx_queues; in netif_set_real_num_queues()
3013 if (txq < 1 || txq > dev->num_tx_queues || in netif_set_real_num_queues()
3014 rxq < 1 || rxq > dev->num_rx_queues) in netif_set_real_num_queues()
3020 if (rxq > dev->real_num_rx_queues) { in netif_set_real_num_queues()
3021 err = netif_set_real_num_rx_queues(dev, rxq); in netif_set_real_num_queues()
3025 if (txq > dev->real_num_tx_queues) { in netif_set_real_num_queues()
3026 err = netif_set_real_num_tx_queues(dev, txq); in netif_set_real_num_queues()
3030 if (rxq < dev->real_num_rx_queues) in netif_set_real_num_queues()
3031 WARN_ON(netif_set_real_num_rx_queues(dev, rxq)); in netif_set_real_num_queues()
3032 if (txq < dev->real_num_tx_queues) in netif_set_real_num_queues()
3033 WARN_ON(netif_set_real_num_tx_queues(dev, txq)); in netif_set_real_num_queues()
3037 WARN_ON(netif_set_real_num_rx_queues(dev, old_rxq)); in netif_set_real_num_queues()
3051 void netif_set_tso_max_size(struct net_device *dev, unsigned int size) in netif_set_tso_max_size() argument
3053 dev->tso_max_size = min(GSO_MAX_SIZE, size); in netif_set_tso_max_size()
3054 if (size < READ_ONCE(dev->gso_max_size)) in netif_set_tso_max_size()
3055 netif_set_gso_max_size(dev, size); in netif_set_tso_max_size()
3056 if (size < READ_ONCE(dev->gso_ipv4_max_size)) in netif_set_tso_max_size()
3057 netif_set_gso_ipv4_max_size(dev, size); in netif_set_tso_max_size()
3070 void netif_set_tso_max_segs(struct net_device *dev, unsigned int segs) in netif_set_tso_max_segs() argument
3072 dev->tso_max_segs = segs; in netif_set_tso_max_segs()
3073 if (segs < READ_ONCE(dev->gso_max_segs)) in netif_set_tso_max_segs()
3074 netif_set_gso_max_segs(dev, segs); in netif_set_tso_max_segs()
3208 void netif_device_detach(struct net_device *dev) in netif_device_detach() argument
3210 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) && in netif_device_detach()
3211 netif_running(dev)) { in netif_device_detach()
3212 netif_tx_stop_all_queues(dev); in netif_device_detach()
3223 void netif_device_attach(struct net_device *dev) in netif_device_attach() argument
3225 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) && in netif_device_attach()
3226 netif_running(dev)) { in netif_device_attach()
3227 netif_tx_wake_all_queues(dev); in netif_device_attach()
3228 __netdev_watchdog_up(dev); in netif_device_attach()
3237 static u16 skb_tx_hash(const struct net_device *dev, in skb_tx_hash() argument
3243 u16 qcount = dev->real_num_tx_queues; in skb_tx_hash()
3245 if (dev->num_tc) { in skb_tx_hash()
3246 u8 tc = netdev_get_prio_tc_map(dev, skb->priority); in skb_tx_hash()
3254 qcount = dev->real_num_tx_queues; in skb_tx_hash()
3274 struct net_device *dev = skb->dev; in skb_warn_bad_offload() local
3280 if (dev) { in skb_warn_bad_offload()
3281 if (dev->dev.parent) in skb_warn_bad_offload()
3282 name = dev_driver_string(dev->dev.parent); in skb_warn_bad_offload()
3284 name = netdev_name(dev); in skb_warn_bad_offload()
3288 name, dev ? &dev->features : &null_features, in skb_warn_bad_offload()
3407 static void do_netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb) in do_netdev_rx_csum_fault() argument
3409 netdev_err(dev, "hw csum failure\n"); in do_netdev_rx_csum_fault()
3414 void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb) in netdev_rx_csum_fault() argument
3416 DO_ONCE_LITE(do_netdev_rx_csum_fault, dev, skb); in netdev_rx_csum_fault()
3422 static int illegal_highdma(struct net_device *dev, struct sk_buff *skb) in illegal_highdma() argument
3427 if (!(dev->features & NETIF_F_HIGHDMA)) { in illegal_highdma()
3448 features &= skb->dev->mpls_features; in net_mpls_features()
3473 if (illegal_highdma(skb->dev, skb)) in harmonize_features()
3480 struct net_device *dev, in passthru_features_check() argument
3488 struct net_device *dev, in dflt_features_check() argument
3495 struct net_device *dev, in gso_features_check() argument
3500 if (gso_segs > READ_ONCE(dev->gso_max_segs)) in gso_features_check()
3503 if (unlikely(skb->len >= READ_ONCE(dev->gso_max_size))) in gso_features_check()
3518 features &= ~dev->gso_partial_features; in gso_features_check()
3536 struct net_device *dev = skb->dev; in netif_skb_features() local
3537 netdev_features_t features = dev->features; in netif_skb_features()
3540 features = gso_features_check(skb, dev, features); in netif_skb_features()
3547 features &= dev->hw_enc_features; in netif_skb_features()
3551 dev->vlan_features | in netif_skb_features()
3555 if (dev->netdev_ops->ndo_features_check) in netif_skb_features()
3556 features &= dev->netdev_ops->ndo_features_check(skb, dev, in netif_skb_features()
3559 features &= dflt_features_check(skb, dev, features); in netif_skb_features()
3565 static int xmit_one(struct sk_buff *skb, struct net_device *dev, in xmit_one() argument
3571 if (dev_nit_active(dev)) in xmit_one()
3572 dev_queue_xmit_nit(skb, dev); in xmit_one()
3575 trace_net_dev_start_xmit(skb, dev); in xmit_one()
3576 rc = netdev_start_xmit(skb, dev, txq, more); in xmit_one()
3577 trace_net_dev_xmit(skb, rc, dev, len); in xmit_one()
3582 struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev, in dev_hard_start_xmit() argument
3592 rc = xmit_one(skb, dev, txq, next != NULL); in dev_hard_start_xmit()
3641 static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev, bool *again) in validate_xmit_skb() argument
3650 skb = sk_validate_xmit_skb(skb, dev); in validate_xmit_skb()
3692 dev_core_stats_tx_dropped_inc(dev); in validate_xmit_skb()
3696 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again) in validate_xmit_skb_list() argument
3707 skb = validate_xmit_skb(skb, dev, again); in validate_xmit_skb_list()
3778 struct net_device *dev, in __dev_xmit_skb() argument
3803 if (sch_direct_xmit(skb, q, dev, txq, NULL, true) && in __dev_xmit_skb()
3849 if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) { in __dev_xmit_skb()
3887 map = rcu_dereference_bh(skb->dev->priomap); in skb_update_prio()
3925 netdev_tx_queue_mapping(struct net_device *dev, struct sk_buff *skb) in netdev_tx_queue_mapping() argument
3929 return netdev_get_tx_queue(dev, netdev_cap_txqueue(dev, qm)); in netdev_tx_queue_mapping()
4011 struct bpf_mprog_entry *entry = rcu_dereference_bh(skb->dev->tcx_ingress); in sch_handle_ingress()
4064 sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev) in sch_handle_egress() argument
4066 struct bpf_mprog_entry *entry = rcu_dereference_bh(dev->tcx_egress); in sch_handle_egress()
4114 sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev) in sch_handle_egress() argument
4121 static int __get_xps_queue_idx(struct net_device *dev, struct sk_buff *skb, in __get_xps_queue_idx() argument
4124 int tc = netdev_get_prio_tc_map(dev, skb->priority); in __get_xps_queue_idx()
4141 if (unlikely(queue_index >= dev->real_num_tx_queues)) in __get_xps_queue_idx()
4148 static int get_xps_queue(struct net_device *dev, struct net_device *sb_dev, in get_xps_queue() argument
4168 queue_index = __get_xps_queue_idx(dev, skb, dev_maps, in get_xps_queue()
4178 queue_index = __get_xps_queue_idx(dev, skb, dev_maps, in get_xps_queue()
4190 u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb, in dev_pick_tx_zero() argument
4197 u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb, in dev_pick_tx_cpu_id() argument
4200 return (u16)raw_smp_processor_id() % dev->real_num_tx_queues; in dev_pick_tx_cpu_id()
4204 u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb, in netdev_pick_tx() argument
4210 sb_dev = sb_dev ? : dev; in netdev_pick_tx()
4213 queue_index >= dev->real_num_tx_queues) { in netdev_pick_tx()
4214 int new_index = get_xps_queue(dev, sb_dev, skb); in netdev_pick_tx()
4217 new_index = skb_tx_hash(dev, sb_dev, skb); in netdev_pick_tx()
4231 struct netdev_queue *netdev_core_pick_tx(struct net_device *dev, in netdev_core_pick_tx() argument
4244 if (dev->real_num_tx_queues != 1) { in netdev_core_pick_tx()
4245 const struct net_device_ops *ops = dev->netdev_ops; in netdev_core_pick_tx()
4248 queue_index = ops->ndo_select_queue(dev, skb, sb_dev); in netdev_core_pick_tx()
4250 queue_index = netdev_pick_tx(dev, skb, sb_dev); in netdev_core_pick_tx()
4252 queue_index = netdev_cap_txqueue(dev, queue_index); in netdev_core_pick_tx()
4256 return netdev_get_tx_queue(dev, queue_index); in netdev_core_pick_tx()
4282 struct net_device *dev = skb->dev; in __dev_queue_xmit() local
4306 skb = nf_hook_egress(skb, &rc, dev); in __dev_queue_xmit()
4314 skb = sch_handle_egress(skb, &rc, dev); in __dev_queue_xmit()
4320 txq = netdev_tx_queue_mapping(dev, skb); in __dev_queue_xmit()
4326 if (dev->priv_flags & IFF_XMIT_DST_RELEASE) in __dev_queue_xmit()
4332 txq = netdev_core_pick_tx(dev, skb, sb_dev); in __dev_queue_xmit()
4338 rc = __dev_xmit_skb(skb, q, dev, txq); in __dev_queue_xmit()
4354 if (dev->flags & IFF_UP) { in __dev_queue_xmit()
4364 skb = validate_xmit_skb(skb, dev, &again); in __dev_queue_xmit()
4368 HARD_TX_LOCK(dev, txq, cpu); in __dev_queue_xmit()
4372 skb = dev_hard_start_xmit(skb, dev, txq, &rc); in __dev_queue_xmit()
4375 HARD_TX_UNLOCK(dev, txq); in __dev_queue_xmit()
4379 HARD_TX_UNLOCK(dev, txq); in __dev_queue_xmit()
4381 dev->name); in __dev_queue_xmit()
4388 dev->name); in __dev_queue_xmit()
4395 dev_core_stats_tx_dropped_inc(dev); in __dev_queue_xmit()
4406 struct net_device *dev = skb->dev; in __dev_direct_xmit() local
4412 if (unlikely(!netif_running(dev) || in __dev_direct_xmit()
4413 !netif_carrier_ok(dev))) in __dev_direct_xmit()
4416 skb = validate_xmit_skb_list(skb, dev, &again); in __dev_direct_xmit()
4421 txq = skb_get_tx_queue(dev, skb); in __dev_direct_xmit()
4426 HARD_TX_LOCK(dev, txq, smp_processor_id()); in __dev_direct_xmit()
4428 ret = netdev_start_xmit(skb, dev, txq, false); in __dev_direct_xmit()
4429 HARD_TX_UNLOCK(dev, txq); in __dev_direct_xmit()
4435 dev_core_stats_tx_dropped_inc(dev); in __dev_direct_xmit()
4511 set_rps_cpu(struct net_device *dev, struct sk_buff *skb, in set_rps_cpu() argument
4524 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap || in set_rps_cpu()
4525 !(dev->features & NETIF_F_NTUPLE)) in set_rps_cpu()
4527 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu); in set_rps_cpu()
4531 rxqueue = dev->_rx + rxq_index; in set_rps_cpu()
4536 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb, in set_rps_cpu()
4560 static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, in get_rps_cpu() argument
4564 struct netdev_rx_queue *rxqueue = dev->_rx; in get_rps_cpu()
4574 if (unlikely(index >= dev->real_num_rx_queues)) { in get_rps_cpu()
4575 WARN_ONCE(dev->real_num_rx_queues > 1, in get_rps_cpu()
4578 dev->name, index, dev->real_num_rx_queues); in get_rps_cpu()
4633 rflow = set_rps_cpu(dev, skb, rflow, next_cpu); in get_rps_cpu()
4670 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, in rps_may_expire_flow() argument
4673 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index; in rps_may_expire_flow()
4803 if (!netif_running(skb->dev)) in enqueue_to_backlog()
4828 dev_core_stats_rx_dropped_inc(skb->dev); in enqueue_to_backlog()
4835 struct net_device *dev = skb->dev; in netif_get_rxqueue() local
4838 rxqueue = dev->_rx; in netif_get_rxqueue()
4843 if (unlikely(index >= dev->real_num_rx_queues)) { in netif_get_rxqueue()
4844 WARN_ONCE(dev->real_num_rx_queues > 1, in netif_get_rxqueue()
4847 dev->name, index, dev->real_num_rx_queues); in netif_get_rxqueue()
4886 orig_host = ether_addr_equal_64bits(eth->h_dest, skb->dev->dev_addr); in bpf_prog_run_generic_xdp()
4915 skb->dev->dev_addr)) || in bpf_prog_run_generic_xdp()
4919 skb->protocol = eth_type_trans(skb, skb->dev); in bpf_prog_run_generic_xdp()
4983 bpf_warn_invalid_xdp_action(skb->dev, xdp_prog, act); in netif_receive_generic_xdp()
4986 trace_xdp_exception(skb->dev, xdp_prog, act); in netif_receive_generic_xdp()
5005 struct net_device *dev = skb->dev; in generic_xdp_tx() local
5010 txq = netdev_core_pick_tx(dev, skb, NULL); in generic_xdp_tx()
5012 HARD_TX_LOCK(dev, txq, cpu); in generic_xdp_tx()
5014 rc = netdev_start_xmit(skb, dev, txq, 0); in generic_xdp_tx()
5018 HARD_TX_UNLOCK(dev, txq); in generic_xdp_tx()
5020 trace_xdp_exception(dev, xdp_prog, XDP_TX); in generic_xdp_tx()
5021 dev_core_stats_tx_dropped_inc(dev); in generic_xdp_tx()
5039 err = xdp_do_generic_redirect(skb->dev, skb, in do_xdp_generic()
5073 cpu = get_rps_cpu(skb->dev, skb, &rflow); in netif_rx_internal()
5231 int (*br_fdb_test_addr_hook)(struct net_device *dev,
5245 bool netdev_is_rx_handler_busy(struct net_device *dev) in netdev_is_rx_handler_busy() argument
5248 return dev && rtnl_dereference(dev->rx_handler); in netdev_is_rx_handler_busy()
5266 int netdev_rx_handler_register(struct net_device *dev, in netdev_rx_handler_register() argument
5270 if (netdev_is_rx_handler_busy(dev)) in netdev_rx_handler_register()
5273 if (dev->priv_flags & IFF_NO_RX_HANDLER) in netdev_rx_handler_register()
5277 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data); in netdev_rx_handler_register()
5278 rcu_assign_pointer(dev->rx_handler, rx_handler); in netdev_rx_handler_register()
5292 void netdev_rx_handler_unregister(struct net_device *dev) in netdev_rx_handler_unregister() argument
5296 RCU_INIT_POINTER(dev->rx_handler, NULL); in netdev_rx_handler_unregister()
5302 RCU_INIT_POINTER(dev->rx_handler_data, NULL); in netdev_rx_handler_unregister()
5358 orig_dev = skb->dev; in __netif_receive_skb_core()
5368 skb->skb_iif = skb->dev->ifindex; in __netif_receive_skb_core()
5376 ret2 = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb); in __netif_receive_skb_core()
5403 list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) { in __netif_receive_skb_core()
5443 rx_handler = rcu_dereference(skb->dev->rx_handler); in __netif_receive_skb_core()
5465 if (unlikely(skb_vlan_tag_present(skb)) && !netdev_uses_dsa(skb->dev)) { in __netif_receive_skb_core()
5514 if (unlikely(skb->dev != orig_dev)) { in __netif_receive_skb_core()
5516 &skb->dev->ptype_specific); in __netif_receive_skb_core()
5526 dev_core_stats_rx_dropped_inc(skb->dev); in __netif_receive_skb_core()
5528 dev_core_stats_rx_nohandler_inc(skb->dev); in __netif_receive_skb_core()
5549 struct net_device *orig_dev = skb->dev; in __netif_receive_skb_one_core()
5556 skb->dev, pt_prev, orig_dev); in __netif_receive_skb_one_core()
5603 pt_prev->func(skb, skb->dev, pt_prev, orig_dev); in __netif_receive_skb_list_ptype()
5627 struct net_device *orig_dev = skb->dev; in __netif_receive_skb_list_core()
5704 static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp) in generic_xdp_install() argument
5706 struct bpf_prog *old = rtnl_dereference(dev->xdp_prog); in generic_xdp_install()
5712 rcu_assign_pointer(dev->xdp_prog, new); in generic_xdp_install()
5720 dev_disable_lro(dev); in generic_xdp_install()
5721 dev_disable_gro_hw(dev); in generic_xdp_install()
5746 int cpu = get_rps_cpu(skb->dev, skb, &rflow); in netif_receive_skb_internal()
5779 int cpu = get_rps_cpu(skb->dev, skb, &rflow); in netif_receive_skb_list_internal()
5859 if (skb->dev->reg_state == NETREG_UNREGISTERING) { in flush_backlog()
5868 if (skb->dev->reg_state == NETREG_UNREGISTERING) { in flush_backlog()
6112 timeout = READ_ONCE(n->dev->gro_flush_timeout); in napi_complete_done()
6113 n->defer_hard_irqs_count = READ_ONCE(n->dev->napi_defer_hard_irqs); in napi_complete_done()
6117 timeout = READ_ONCE(n->dev->gro_flush_timeout); in napi_complete_done()
6223 napi->defer_hard_irqs_count = READ_ONCE(napi->dev->napi_defer_hard_irqs); in busy_poll_stop()
6224 timeout = READ_ONCE(napi->dev->gro_flush_timeout); in busy_poll_stop()
6297 __NET_ADD_STATS(dev_net(napi->dev), in napi_busy_loop()
6389 int dev_set_threaded(struct net_device *dev, bool threaded) in dev_set_threaded() argument
6394 if (dev->threaded == threaded) in dev_set_threaded()
6398 list_for_each_entry(napi, &dev->napi_list, dev_list) { in dev_set_threaded()
6409 dev->threaded = threaded; in dev_set_threaded()
6422 list_for_each_entry(napi, &dev->napi_list, dev_list) in dev_set_threaded()
6429 void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi, in netif_napi_add_weight() argument
6445 netdev_err_once(dev, "%s() called with weight %d\n", __func__, in netif_napi_add_weight()
6448 napi->dev = dev; in netif_napi_add_weight()
6455 list_add_rcu(&napi->dev_list, &dev->napi_list); in netif_napi_add_weight()
6462 if (dev->threaded && napi_kthread_create(napi)) in netif_napi_add_weight()
6463 dev->threaded = 0; in netif_napi_add_weight()
6506 if (n->dev->threaded && n->thread) in napi_enable()
6564 netdev_err_once(n->dev, "NAPI poll function %pS returned %d, exceeding its budget of %d.\n", in __napi_poll()
6607 n->dev ? n->dev->name : "backlog"); in __napi_poll()
6790 struct net_device *dev; member
6815 if (adj->dev == adj_dev) in __netdev_find_adj()
6824 struct net_device *dev = (struct net_device *)priv->data; in ____netdev_has_upper_dev() local
6826 return upper_dev == dev; in ____netdev_has_upper_dev()
6838 bool netdev_has_upper_dev(struct net_device *dev, in netdev_has_upper_dev() argument
6847 return netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev, in netdev_has_upper_dev()
6862 bool netdev_has_upper_dev_all_rcu(struct net_device *dev, in netdev_has_upper_dev_all_rcu() argument
6869 return !!netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev, in netdev_has_upper_dev_all_rcu()
6881 bool netdev_has_any_upper_dev(struct net_device *dev) in netdev_has_any_upper_dev() argument
6885 return !list_empty(&dev->adj_list.upper); in netdev_has_any_upper_dev()
6896 struct net_device *netdev_master_upper_dev_get(struct net_device *dev) in netdev_master_upper_dev_get() argument
6902 if (list_empty(&dev->adj_list.upper)) in netdev_master_upper_dev_get()
6905 upper = list_first_entry(&dev->adj_list.upper, in netdev_master_upper_dev_get()
6908 return upper->dev; in netdev_master_upper_dev_get()
6913 static struct net_device *__netdev_master_upper_dev_get(struct net_device *dev) in __netdev_master_upper_dev_get() argument
6919 if (list_empty(&dev->adj_list.upper)) in __netdev_master_upper_dev_get()
6922 upper = list_first_entry(&dev->adj_list.upper, in __netdev_master_upper_dev_get()
6925 return upper->dev; in __netdev_master_upper_dev_get()
6936 static bool netdev_has_any_lower_dev(struct net_device *dev) in netdev_has_any_lower_dev() argument
6940 return !list_empty(&dev->adj_list.lower); in netdev_has_any_lower_dev()
6961 struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev, in netdev_upper_get_next_dev_rcu() argument
6970 if (&upper->list == &dev->adj_list.upper) in netdev_upper_get_next_dev_rcu()
6975 return upper->dev; in netdev_upper_get_next_dev_rcu()
6979 static struct net_device *__netdev_next_upper_dev(struct net_device *dev, in __netdev_next_upper_dev() argument
6987 if (&upper->list == &dev->adj_list.upper) in __netdev_next_upper_dev()
6993 return upper->dev; in __netdev_next_upper_dev()
6996 static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev, in netdev_next_upper_dev_rcu() argument
7005 if (&upper->list == &dev->adj_list.upper) in netdev_next_upper_dev_rcu()
7010 return upper->dev; in netdev_next_upper_dev_rcu()
7013 static int __netdev_walk_all_upper_dev(struct net_device *dev, in __netdev_walk_all_upper_dev() argument
7014 int (*fn)(struct net_device *dev, in __netdev_walk_all_upper_dev() argument
7023 now = dev; in __netdev_walk_all_upper_dev()
7024 iter = &dev->adj_list.upper; in __netdev_walk_all_upper_dev()
7027 if (now != dev) { in __netdev_walk_all_upper_dev()
7062 int netdev_walk_all_upper_dev_rcu(struct net_device *dev, in netdev_walk_all_upper_dev_rcu() argument
7063 int (*fn)(struct net_device *dev, in netdev_walk_all_upper_dev_rcu() argument
7071 now = dev; in netdev_walk_all_upper_dev_rcu()
7072 iter = &dev->adj_list.upper; in netdev_walk_all_upper_dev_rcu()
7075 if (now != dev) { in netdev_walk_all_upper_dev_rcu()
7109 static bool __netdev_has_upper_dev(struct net_device *dev, in __netdev_has_upper_dev() argument
7119 return __netdev_walk_all_upper_dev(dev, ____netdev_has_upper_dev, in __netdev_has_upper_dev()
7134 void *netdev_lower_get_next_private(struct net_device *dev, in netdev_lower_get_next_private() argument
7141 if (&lower->list == &dev->adj_list.lower) in netdev_lower_get_next_private()
7160 void *netdev_lower_get_next_private_rcu(struct net_device *dev, in netdev_lower_get_next_private_rcu() argument
7169 if (&lower->list == &dev->adj_list.lower) in netdev_lower_get_next_private_rcu()
7189 void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter) in netdev_lower_get_next() argument
7195 if (&lower->list == &dev->adj_list.lower) in netdev_lower_get_next()
7200 return lower->dev; in netdev_lower_get_next()
7204 static struct net_device *netdev_next_lower_dev(struct net_device *dev, in netdev_next_lower_dev() argument
7211 if (&lower->list == &dev->adj_list.lower) in netdev_next_lower_dev()
7216 return lower->dev; in netdev_next_lower_dev()
7219 static struct net_device *__netdev_next_lower_dev(struct net_device *dev, in __netdev_next_lower_dev() argument
7227 if (&lower->list == &dev->adj_list.lower) in __netdev_next_lower_dev()
7233 return lower->dev; in __netdev_next_lower_dev()
7236 int netdev_walk_all_lower_dev(struct net_device *dev, in netdev_walk_all_lower_dev() argument
7237 int (*fn)(struct net_device *dev, in netdev_walk_all_lower_dev() argument
7245 now = dev; in netdev_walk_all_lower_dev()
7246 iter = &dev->adj_list.lower; in netdev_walk_all_lower_dev()
7249 if (now != dev) { in netdev_walk_all_lower_dev()
7283 static int __netdev_walk_all_lower_dev(struct net_device *dev, in __netdev_walk_all_lower_dev() argument
7284 int (*fn)(struct net_device *dev, in __netdev_walk_all_lower_dev() argument
7293 now = dev; in __netdev_walk_all_lower_dev()
7294 iter = &dev->adj_list.lower; in __netdev_walk_all_lower_dev()
7297 if (now != dev) { in __netdev_walk_all_lower_dev()
7332 struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev, in netdev_next_lower_dev_rcu() argument
7338 if (&lower->list == &dev->adj_list.lower) in netdev_next_lower_dev_rcu()
7343 return lower->dev; in netdev_next_lower_dev_rcu()
7347 static u8 __netdev_upper_depth(struct net_device *dev) in __netdev_upper_depth() argument
7354 for (iter = &dev->adj_list.upper, in __netdev_upper_depth()
7355 udev = __netdev_next_upper_dev(dev, &iter, &ignore); in __netdev_upper_depth()
7357 udev = __netdev_next_upper_dev(dev, &iter, &ignore)) { in __netdev_upper_depth()
7367 static u8 __netdev_lower_depth(struct net_device *dev) in __netdev_lower_depth() argument
7374 for (iter = &dev->adj_list.lower, in __netdev_lower_depth()
7375 ldev = __netdev_next_lower_dev(dev, &iter, &ignore); in __netdev_lower_depth()
7377 ldev = __netdev_next_lower_dev(dev, &iter, &ignore)) { in __netdev_lower_depth()
7387 static int __netdev_update_upper_level(struct net_device *dev, in __netdev_update_upper_level() argument
7390 dev->upper_level = __netdev_upper_depth(dev) + 1; in __netdev_update_upper_level()
7397 static void net_unlink_todo(struct net_device *dev) in net_unlink_todo() argument
7399 if (list_empty(&dev->unlink_list)) in net_unlink_todo()
7400 list_add_tail(&dev->unlink_list, &net_unlink_list); in net_unlink_todo()
7404 static int __netdev_update_lower_level(struct net_device *dev, in __netdev_update_lower_level() argument
7407 dev->lower_level = __netdev_lower_depth(dev) + 1; in __netdev_update_lower_level()
7414 dev->nested_level = dev->lower_level - 1; in __netdev_update_lower_level()
7416 net_unlink_todo(dev); in __netdev_update_lower_level()
7421 int netdev_walk_all_lower_dev_rcu(struct net_device *dev, in netdev_walk_all_lower_dev_rcu() argument
7422 int (*fn)(struct net_device *dev, in netdev_walk_all_lower_dev_rcu() argument
7430 now = dev; in netdev_walk_all_lower_dev_rcu()
7431 iter = &dev->adj_list.lower; in netdev_walk_all_lower_dev_rcu()
7434 if (now != dev) { in netdev_walk_all_lower_dev_rcu()
7477 void *netdev_lower_get_first_private_rcu(struct net_device *dev) in netdev_lower_get_first_private_rcu() argument
7481 lower = list_first_or_null_rcu(&dev->adj_list.lower, in netdev_lower_get_first_private_rcu()
7496 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev) in netdev_master_upper_dev_get_rcu() argument
7500 upper = list_first_or_null_rcu(&dev->adj_list.upper, in netdev_master_upper_dev_get_rcu()
7503 return upper->dev; in netdev_master_upper_dev_get_rcu()
7508 static int netdev_adjacent_sysfs_add(struct net_device *dev, in netdev_adjacent_sysfs_add() argument
7514 sprintf(linkname, dev_list == &dev->adj_list.upper ? in netdev_adjacent_sysfs_add()
7516 return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj), in netdev_adjacent_sysfs_add()
7519 static void netdev_adjacent_sysfs_del(struct net_device *dev, in netdev_adjacent_sysfs_del() argument
7525 sprintf(linkname, dev_list == &dev->adj_list.upper ? in netdev_adjacent_sysfs_del()
7527 sysfs_remove_link(&(dev->dev.kobj), linkname); in netdev_adjacent_sysfs_del()
7530 static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev, in netdev_adjacent_is_neigh_list() argument
7534 return (dev_list == &dev->adj_list.upper || in netdev_adjacent_is_neigh_list()
7535 dev_list == &dev->adj_list.lower) && in netdev_adjacent_is_neigh_list()
7536 net_eq(dev_net(dev), dev_net(adj_dev)); in netdev_adjacent_is_neigh_list()
7539 static int __netdev_adjacent_dev_insert(struct net_device *dev, in __netdev_adjacent_dev_insert() argument
7552 dev->name, adj_dev->name, adj->ref_nr); in __netdev_adjacent_dev_insert()
7561 adj->dev = adj_dev; in __netdev_adjacent_dev_insert()
7569 dev->name, adj_dev->name, adj->ref_nr, adj_dev->name); in __netdev_adjacent_dev_insert()
7571 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) { in __netdev_adjacent_dev_insert()
7572 ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list); in __netdev_adjacent_dev_insert()
7579 ret = sysfs_create_link(&(dev->dev.kobj), in __netdev_adjacent_dev_insert()
7580 &(adj_dev->dev.kobj), "master"); in __netdev_adjacent_dev_insert()
7592 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) in __netdev_adjacent_dev_insert()
7593 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list); in __netdev_adjacent_dev_insert()
7601 static void __netdev_adjacent_dev_remove(struct net_device *dev, in __netdev_adjacent_dev_remove() argument
7609 dev->name, adj_dev->name, ref_nr); in __netdev_adjacent_dev_remove()
7615 dev->name, adj_dev->name); in __netdev_adjacent_dev_remove()
7622 dev->name, adj_dev->name, ref_nr, in __netdev_adjacent_dev_remove()
7629 sysfs_remove_link(&(dev->dev.kobj), "master"); in __netdev_adjacent_dev_remove()
7631 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) in __netdev_adjacent_dev_remove()
7632 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list); in __netdev_adjacent_dev_remove()
7636 adj_dev->name, dev->name, adj_dev->name); in __netdev_adjacent_dev_remove()
7641 static int __netdev_adjacent_dev_link_lists(struct net_device *dev, in __netdev_adjacent_dev_link_lists() argument
7649 ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list, in __netdev_adjacent_dev_link_lists()
7654 ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list, in __netdev_adjacent_dev_link_lists()
7657 __netdev_adjacent_dev_remove(dev, upper_dev, 1, up_list); in __netdev_adjacent_dev_link_lists()
7664 static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev, in __netdev_adjacent_dev_unlink_lists() argument
7670 __netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list); in __netdev_adjacent_dev_unlink_lists()
7671 __netdev_adjacent_dev_remove(upper_dev, dev, ref_nr, down_list); in __netdev_adjacent_dev_unlink_lists()
7674 static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev, in __netdev_adjacent_dev_link_neighbour() argument
7678 return __netdev_adjacent_dev_link_lists(dev, upper_dev, in __netdev_adjacent_dev_link_neighbour()
7679 &dev->adj_list.upper, in __netdev_adjacent_dev_link_neighbour()
7684 static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev, in __netdev_adjacent_dev_unlink_neighbour() argument
7687 __netdev_adjacent_dev_unlink_lists(dev, upper_dev, 1, in __netdev_adjacent_dev_unlink_neighbour()
7688 &dev->adj_list.upper, in __netdev_adjacent_dev_unlink_neighbour()
7692 static int __netdev_upper_dev_link(struct net_device *dev, in __netdev_upper_dev_link() argument
7700 .dev = dev, in __netdev_upper_dev_link()
7713 if (dev == upper_dev) in __netdev_upper_dev_link()
7717 if (__netdev_has_upper_dev(upper_dev, dev)) in __netdev_upper_dev_link()
7720 if ((dev->lower_level + upper_dev->upper_level) > MAX_NEST_DEV) in __netdev_upper_dev_link()
7724 if (__netdev_has_upper_dev(dev, upper_dev)) in __netdev_upper_dev_link()
7727 master_dev = __netdev_master_upper_dev_get(dev); in __netdev_upper_dev_link()
7738 ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, upper_priv, in __netdev_upper_dev_link()
7749 __netdev_update_upper_level(dev, NULL); in __netdev_upper_dev_link()
7750 __netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL); in __netdev_upper_dev_link()
7759 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev); in __netdev_upper_dev_link()
7775 int netdev_upper_dev_link(struct net_device *dev, in netdev_upper_dev_link() argument
7784 return __netdev_upper_dev_link(dev, upper_dev, false, in netdev_upper_dev_link()
7803 int netdev_master_upper_dev_link(struct net_device *dev, in netdev_master_upper_dev_link() argument
7813 return __netdev_upper_dev_link(dev, upper_dev, true, in netdev_master_upper_dev_link()
7818 static void __netdev_upper_dev_unlink(struct net_device *dev, in __netdev_upper_dev_unlink() argument
7824 .dev = dev, in __netdev_upper_dev_unlink()
7832 changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev; in __netdev_upper_dev_unlink()
7837 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev); in __netdev_upper_dev_unlink()
7842 __netdev_update_upper_level(dev, NULL); in __netdev_upper_dev_unlink()
7843 __netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL); in __netdev_upper_dev_unlink()
7858 void netdev_upper_dev_unlink(struct net_device *dev, in netdev_upper_dev_unlink() argument
7866 __netdev_upper_dev_unlink(dev, upper_dev, &priv); in netdev_upper_dev_unlink()
7899 struct net_device *dev, in netdev_adjacent_change_prepare() argument
7912 netdev_adjacent_dev_disable(dev, old_dev); in netdev_adjacent_change_prepare()
7913 err = __netdev_upper_dev_link(new_dev, dev, false, NULL, NULL, &priv, in netdev_adjacent_change_prepare()
7917 netdev_adjacent_dev_enable(dev, old_dev); in netdev_adjacent_change_prepare()
7927 struct net_device *dev) in netdev_adjacent_change_commit() argument
7940 netdev_adjacent_dev_enable(dev, old_dev); in netdev_adjacent_change_commit()
7941 __netdev_upper_dev_unlink(old_dev, dev, &priv); in netdev_adjacent_change_commit()
7947 struct net_device *dev) in netdev_adjacent_change_abort() argument
7958 netdev_adjacent_dev_enable(dev, old_dev); in netdev_adjacent_change_abort()
7960 __netdev_upper_dev_unlink(new_dev, dev, &priv); in netdev_adjacent_change_abort()
7972 void netdev_bonding_info_change(struct net_device *dev, in netdev_bonding_info_change() argument
7976 .info.dev = dev, in netdev_bonding_info_change()
7986 static int netdev_offload_xstats_enable_l3(struct net_device *dev, in netdev_offload_xstats_enable_l3() argument
7990 .info.dev = dev, in netdev_offload_xstats_enable_l3()
7997 dev->offload_xstats_l3 = kzalloc(sizeof(*dev->offload_xstats_l3), in netdev_offload_xstats_enable_l3()
7999 if (!dev->offload_xstats_l3) in netdev_offload_xstats_enable_l3()
8012 kfree(dev->offload_xstats_l3); in netdev_offload_xstats_enable_l3()
8013 dev->offload_xstats_l3 = NULL; in netdev_offload_xstats_enable_l3()
8017 int netdev_offload_xstats_enable(struct net_device *dev, in netdev_offload_xstats_enable() argument
8023 if (netdev_offload_xstats_enabled(dev, type)) in netdev_offload_xstats_enable()
8028 return netdev_offload_xstats_enable_l3(dev, extack); in netdev_offload_xstats_enable()
8036 static void netdev_offload_xstats_disable_l3(struct net_device *dev) in netdev_offload_xstats_disable_l3() argument
8039 .info.dev = dev, in netdev_offload_xstats_disable_l3()
8045 kfree(dev->offload_xstats_l3); in netdev_offload_xstats_disable_l3()
8046 dev->offload_xstats_l3 = NULL; in netdev_offload_xstats_disable_l3()
8049 int netdev_offload_xstats_disable(struct net_device *dev, in netdev_offload_xstats_disable() argument
8054 if (!netdev_offload_xstats_enabled(dev, type)) in netdev_offload_xstats_disable()
8059 netdev_offload_xstats_disable_l3(dev); in netdev_offload_xstats_disable()
8068 static void netdev_offload_xstats_disable_all(struct net_device *dev) in netdev_offload_xstats_disable_all() argument
8070 netdev_offload_xstats_disable(dev, NETDEV_OFFLOAD_XSTATS_TYPE_L3); in netdev_offload_xstats_disable_all()
8074 netdev_offload_xstats_get_ptr(const struct net_device *dev, in netdev_offload_xstats_get_ptr() argument
8079 return dev->offload_xstats_l3; in netdev_offload_xstats_get_ptr()
8086 bool netdev_offload_xstats_enabled(const struct net_device *dev, in netdev_offload_xstats_enabled() argument
8091 return netdev_offload_xstats_get_ptr(dev, type); in netdev_offload_xstats_enabled()
8118 static int netdev_offload_xstats_get_used(struct net_device *dev, in netdev_offload_xstats_get_used() argument
8125 .info.dev = dev, in netdev_offload_xstats_get_used()
8132 WARN_ON(!netdev_offload_xstats_enabled(dev, type)); in netdev_offload_xstats_get_used()
8139 static int netdev_offload_xstats_get_stats(struct net_device *dev, in netdev_offload_xstats_get_stats() argument
8147 .info.dev = dev, in netdev_offload_xstats_get_stats()
8155 stats = netdev_offload_xstats_get_ptr(dev, type); in netdev_offload_xstats_get_stats()
8174 int netdev_offload_xstats_get(struct net_device *dev, in netdev_offload_xstats_get() argument
8182 return netdev_offload_xstats_get_stats(dev, type, p_stats, in netdev_offload_xstats_get()
8185 return netdev_offload_xstats_get_used(dev, type, p_used, in netdev_offload_xstats_get()
8206 void netdev_offload_xstats_push_delta(struct net_device *dev, in netdev_offload_xstats_push_delta() argument
8214 stats = netdev_offload_xstats_get_ptr(dev, type); in netdev_offload_xstats_push_delta()
8233 struct net_device *netdev_get_xmit_slave(struct net_device *dev, in netdev_get_xmit_slave() argument
8237 const struct net_device_ops *ops = dev->netdev_ops; in netdev_get_xmit_slave()
8241 return ops->ndo_get_xmit_slave(dev, skb, all_slaves); in netdev_get_xmit_slave()
8245 static struct net_device *netdev_sk_get_lower_dev(struct net_device *dev, in netdev_sk_get_lower_dev() argument
8248 const struct net_device_ops *ops = dev->netdev_ops; in netdev_sk_get_lower_dev()
8252 return ops->ndo_sk_get_lower_dev(dev, sk); in netdev_sk_get_lower_dev()
8263 struct net_device *netdev_sk_get_lowest_dev(struct net_device *dev, in netdev_sk_get_lowest_dev() argument
8268 lower = netdev_sk_get_lower_dev(dev, sk); in netdev_sk_get_lowest_dev()
8270 dev = lower; in netdev_sk_get_lowest_dev()
8271 lower = netdev_sk_get_lower_dev(dev, sk); in netdev_sk_get_lowest_dev()
8274 return dev; in netdev_sk_get_lowest_dev()
8278 static void netdev_adjacent_add_links(struct net_device *dev) in netdev_adjacent_add_links() argument
8282 struct net *net = dev_net(dev); in netdev_adjacent_add_links()
8284 list_for_each_entry(iter, &dev->adj_list.upper, list) { in netdev_adjacent_add_links()
8285 if (!net_eq(net, dev_net(iter->dev))) in netdev_adjacent_add_links()
8287 netdev_adjacent_sysfs_add(iter->dev, dev, in netdev_adjacent_add_links()
8288 &iter->dev->adj_list.lower); in netdev_adjacent_add_links()
8289 netdev_adjacent_sysfs_add(dev, iter->dev, in netdev_adjacent_add_links()
8290 &dev->adj_list.upper); in netdev_adjacent_add_links()
8293 list_for_each_entry(iter, &dev->adj_list.lower, list) { in netdev_adjacent_add_links()
8294 if (!net_eq(net, dev_net(iter->dev))) in netdev_adjacent_add_links()
8296 netdev_adjacent_sysfs_add(iter->dev, dev, in netdev_adjacent_add_links()
8297 &iter->dev->adj_list.upper); in netdev_adjacent_add_links()
8298 netdev_adjacent_sysfs_add(dev, iter->dev, in netdev_adjacent_add_links()
8299 &dev->adj_list.lower); in netdev_adjacent_add_links()
8303 static void netdev_adjacent_del_links(struct net_device *dev) in netdev_adjacent_del_links() argument
8307 struct net *net = dev_net(dev); in netdev_adjacent_del_links()
8309 list_for_each_entry(iter, &dev->adj_list.upper, list) { in netdev_adjacent_del_links()
8310 if (!net_eq(net, dev_net(iter->dev))) in netdev_adjacent_del_links()
8312 netdev_adjacent_sysfs_del(iter->dev, dev->name, in netdev_adjacent_del_links()
8313 &iter->dev->adj_list.lower); in netdev_adjacent_del_links()
8314 netdev_adjacent_sysfs_del(dev, iter->dev->name, in netdev_adjacent_del_links()
8315 &dev->adj_list.upper); in netdev_adjacent_del_links()
8318 list_for_each_entry(iter, &dev->adj_list.lower, list) { in netdev_adjacent_del_links()
8319 if (!net_eq(net, dev_net(iter->dev))) in netdev_adjacent_del_links()
8321 netdev_adjacent_sysfs_del(iter->dev, dev->name, in netdev_adjacent_del_links()
8322 &iter->dev->adj_list.upper); in netdev_adjacent_del_links()
8323 netdev_adjacent_sysfs_del(dev, iter->dev->name, in netdev_adjacent_del_links()
8324 &dev->adj_list.lower); in netdev_adjacent_del_links()
8328 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname) in netdev_adjacent_rename_links() argument
8332 struct net *net = dev_net(dev); in netdev_adjacent_rename_links()
8334 list_for_each_entry(iter, &dev->adj_list.upper, list) { in netdev_adjacent_rename_links()
8335 if (!net_eq(net, dev_net(iter->dev))) in netdev_adjacent_rename_links()
8337 netdev_adjacent_sysfs_del(iter->dev, oldname, in netdev_adjacent_rename_links()
8338 &iter->dev->adj_list.lower); in netdev_adjacent_rename_links()
8339 netdev_adjacent_sysfs_add(iter->dev, dev, in netdev_adjacent_rename_links()
8340 &iter->dev->adj_list.lower); in netdev_adjacent_rename_links()
8343 list_for_each_entry(iter, &dev->adj_list.lower, list) { in netdev_adjacent_rename_links()
8344 if (!net_eq(net, dev_net(iter->dev))) in netdev_adjacent_rename_links()
8346 netdev_adjacent_sysfs_del(iter->dev, oldname, in netdev_adjacent_rename_links()
8347 &iter->dev->adj_list.upper); in netdev_adjacent_rename_links()
8348 netdev_adjacent_sysfs_add(iter->dev, dev, in netdev_adjacent_rename_links()
8349 &iter->dev->adj_list.upper); in netdev_adjacent_rename_links()
8353 void *netdev_lower_dev_get_private(struct net_device *dev, in netdev_lower_dev_get_private() argument
8360 lower = __netdev_find_adj(lower_dev, &dev->adj_list.lower); in netdev_lower_dev_get_private()
8381 .info.dev = lower_dev, in netdev_lower_state_changed()
8391 static void dev_change_rx_flags(struct net_device *dev, int flags) in dev_change_rx_flags() argument
8393 const struct net_device_ops *ops = dev->netdev_ops; in dev_change_rx_flags()
8396 ops->ndo_change_rx_flags(dev, flags); in dev_change_rx_flags()
8399 static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify) in __dev_set_promiscuity() argument
8401 unsigned int old_flags = dev->flags; in __dev_set_promiscuity()
8407 dev->flags |= IFF_PROMISC; in __dev_set_promiscuity()
8408 dev->promiscuity += inc; in __dev_set_promiscuity()
8409 if (dev->promiscuity == 0) { in __dev_set_promiscuity()
8415 dev->flags &= ~IFF_PROMISC; in __dev_set_promiscuity()
8417 dev->promiscuity -= inc; in __dev_set_promiscuity()
8418 …netdev_warn(dev, "promiscuity touches roof, set promiscuity failed. promiscuity feature of device … in __dev_set_promiscuity()
8422 if (dev->flags != old_flags) { in __dev_set_promiscuity()
8423 netdev_info(dev, "%s promiscuous mode\n", in __dev_set_promiscuity()
8424 dev->flags & IFF_PROMISC ? "entered" : "left"); in __dev_set_promiscuity()
8430 dev->name, (dev->flags & IFF_PROMISC), in __dev_set_promiscuity()
8438 dev_change_rx_flags(dev, IFF_PROMISC); in __dev_set_promiscuity()
8441 __dev_notify_flags(dev, old_flags, IFF_PROMISC, 0, NULL); in __dev_set_promiscuity()
8456 int dev_set_promiscuity(struct net_device *dev, int inc) in dev_set_promiscuity() argument
8458 unsigned int old_flags = dev->flags; in dev_set_promiscuity()
8461 err = __dev_set_promiscuity(dev, inc, true); in dev_set_promiscuity()
8464 if (dev->flags != old_flags) in dev_set_promiscuity()
8465 dev_set_rx_mode(dev); in dev_set_promiscuity()
8470 static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify) in __dev_set_allmulti() argument
8472 unsigned int old_flags = dev->flags, old_gflags = dev->gflags; in __dev_set_allmulti()
8476 dev->flags |= IFF_ALLMULTI; in __dev_set_allmulti()
8477 dev->allmulti += inc; in __dev_set_allmulti()
8478 if (dev->allmulti == 0) { in __dev_set_allmulti()
8484 dev->flags &= ~IFF_ALLMULTI; in __dev_set_allmulti()
8486 dev->allmulti -= inc; in __dev_set_allmulti()
8487 …netdev_warn(dev, "allmulti touches roof, set allmulti failed. allmulti feature of device might be … in __dev_set_allmulti()
8491 if (dev->flags ^ old_flags) { in __dev_set_allmulti()
8492 netdev_info(dev, "%s allmulticast mode\n", in __dev_set_allmulti()
8493 dev->flags & IFF_ALLMULTI ? "entered" : "left"); in __dev_set_allmulti()
8494 dev_change_rx_flags(dev, IFF_ALLMULTI); in __dev_set_allmulti()
8495 dev_set_rx_mode(dev); in __dev_set_allmulti()
8497 __dev_notify_flags(dev, old_flags, in __dev_set_allmulti()
8498 dev->gflags ^ old_gflags, 0, NULL); in __dev_set_allmulti()
8516 int dev_set_allmulti(struct net_device *dev, int inc) in dev_set_allmulti() argument
8518 return __dev_set_allmulti(dev, inc, true); in dev_set_allmulti()
8528 void __dev_set_rx_mode(struct net_device *dev) in __dev_set_rx_mode() argument
8530 const struct net_device_ops *ops = dev->netdev_ops; in __dev_set_rx_mode()
8533 if (!(dev->flags&IFF_UP)) in __dev_set_rx_mode()
8536 if (!netif_device_present(dev)) in __dev_set_rx_mode()
8539 if (!(dev->priv_flags & IFF_UNICAST_FLT)) { in __dev_set_rx_mode()
8543 if (!netdev_uc_empty(dev) && !dev->uc_promisc) { in __dev_set_rx_mode()
8544 __dev_set_promiscuity(dev, 1, false); in __dev_set_rx_mode()
8545 dev->uc_promisc = true; in __dev_set_rx_mode()
8546 } else if (netdev_uc_empty(dev) && dev->uc_promisc) { in __dev_set_rx_mode()
8547 __dev_set_promiscuity(dev, -1, false); in __dev_set_rx_mode()
8548 dev->uc_promisc = false; in __dev_set_rx_mode()
8553 ops->ndo_set_rx_mode(dev); in __dev_set_rx_mode()
8556 void dev_set_rx_mode(struct net_device *dev) in dev_set_rx_mode() argument
8558 netif_addr_lock_bh(dev); in dev_set_rx_mode()
8559 __dev_set_rx_mode(dev); in dev_set_rx_mode()
8560 netif_addr_unlock_bh(dev); in dev_set_rx_mode()
8569 unsigned int dev_get_flags(const struct net_device *dev) in dev_get_flags() argument
8573 flags = (dev->flags & ~(IFF_PROMISC | in dev_get_flags()
8578 (dev->gflags & (IFF_PROMISC | in dev_get_flags()
8581 if (netif_running(dev)) { in dev_get_flags()
8582 if (netif_oper_up(dev)) in dev_get_flags()
8584 if (netif_carrier_ok(dev)) in dev_get_flags()
8586 if (netif_dormant(dev)) in dev_get_flags()
8594 int __dev_change_flags(struct net_device *dev, unsigned int flags, in __dev_change_flags() argument
8597 unsigned int old_flags = dev->flags; in __dev_change_flags()
8606 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP | in __dev_change_flags()
8609 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC | in __dev_change_flags()
8617 dev_change_rx_flags(dev, IFF_MULTICAST); in __dev_change_flags()
8619 dev_set_rx_mode(dev); in __dev_change_flags()
8630 __dev_close(dev); in __dev_change_flags()
8632 ret = __dev_open(dev, extack); in __dev_change_flags()
8635 if ((flags ^ dev->gflags) & IFF_PROMISC) { in __dev_change_flags()
8637 unsigned int old_flags = dev->flags; in __dev_change_flags()
8639 dev->gflags ^= IFF_PROMISC; in __dev_change_flags()
8641 if (__dev_set_promiscuity(dev, inc, false) >= 0) in __dev_change_flags()
8642 if (dev->flags != old_flags) in __dev_change_flags()
8643 dev_set_rx_mode(dev); in __dev_change_flags()
8650 if ((flags ^ dev->gflags) & IFF_ALLMULTI) { in __dev_change_flags()
8653 dev->gflags ^= IFF_ALLMULTI; in __dev_change_flags()
8654 __dev_set_allmulti(dev, inc, false); in __dev_change_flags()
8660 void __dev_notify_flags(struct net_device *dev, unsigned int old_flags, in __dev_notify_flags() argument
8664 unsigned int changes = dev->flags ^ old_flags; in __dev_notify_flags()
8667 rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC, portid, nlh); in __dev_notify_flags()
8670 if (dev->flags & IFF_UP) in __dev_notify_flags()
8671 call_netdevice_notifiers(NETDEV_UP, dev); in __dev_notify_flags()
8673 call_netdevice_notifiers(NETDEV_DOWN, dev); in __dev_notify_flags()
8676 if (dev->flags & IFF_UP && in __dev_notify_flags()
8680 .dev = dev, in __dev_notify_flags()
8698 int dev_change_flags(struct net_device *dev, unsigned int flags, in dev_change_flags() argument
8702 unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags; in dev_change_flags()
8704 ret = __dev_change_flags(dev, flags, extack); in dev_change_flags()
8708 changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags); in dev_change_flags()
8709 __dev_notify_flags(dev, old_flags, changes, 0, NULL); in dev_change_flags()
8714 int __dev_set_mtu(struct net_device *dev, int new_mtu) in __dev_set_mtu() argument
8716 const struct net_device_ops *ops = dev->netdev_ops; in __dev_set_mtu()
8719 return ops->ndo_change_mtu(dev, new_mtu); in __dev_set_mtu()
8722 WRITE_ONCE(dev->mtu, new_mtu); in __dev_set_mtu()
8727 int dev_validate_mtu(struct net_device *dev, int new_mtu, in dev_validate_mtu() argument
8731 if (new_mtu < 0 || new_mtu < dev->min_mtu) { in dev_validate_mtu()
8736 if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) { in dev_validate_mtu()
8751 int dev_set_mtu_ext(struct net_device *dev, int new_mtu, in dev_set_mtu_ext() argument
8756 if (new_mtu == dev->mtu) in dev_set_mtu_ext()
8759 err = dev_validate_mtu(dev, new_mtu, extack); in dev_set_mtu_ext()
8763 if (!netif_device_present(dev)) in dev_set_mtu_ext()
8766 err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev); in dev_set_mtu_ext()
8771 orig_mtu = dev->mtu; in dev_set_mtu_ext()
8772 err = __dev_set_mtu(dev, new_mtu); in dev_set_mtu_ext()
8775 err = call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev, in dev_set_mtu_ext()
8782 __dev_set_mtu(dev, orig_mtu); in dev_set_mtu_ext()
8783 call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev, in dev_set_mtu_ext()
8790 int dev_set_mtu(struct net_device *dev, int new_mtu) in dev_set_mtu() argument
8796 err = dev_set_mtu_ext(dev, new_mtu, &extack); in dev_set_mtu()
8798 net_err_ratelimited("%s: %s\n", dev->name, extack._msg); in dev_set_mtu()
8808 int dev_change_tx_queue_len(struct net_device *dev, unsigned long new_len) in dev_change_tx_queue_len() argument
8810 unsigned int orig_len = dev->tx_queue_len; in dev_change_tx_queue_len()
8817 dev->tx_queue_len = new_len; in dev_change_tx_queue_len()
8818 res = call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN, dev); in dev_change_tx_queue_len()
8822 res = dev_qdisc_change_tx_queue_len(dev); in dev_change_tx_queue_len()
8830 netdev_err(dev, "refused to change device tx_queue_len\n"); in dev_change_tx_queue_len()
8831 dev->tx_queue_len = orig_len; in dev_change_tx_queue_len()
8840 void dev_set_group(struct net_device *dev, int new_group) in dev_set_group() argument
8842 dev->group = new_group; in dev_set_group()
8851 int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr, in dev_pre_changeaddr_notify() argument
8855 .info.dev = dev, in dev_pre_changeaddr_notify()
8874 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa, in dev_set_mac_address() argument
8877 const struct net_device_ops *ops = dev->netdev_ops; in dev_set_mac_address()
8882 if (sa->sa_family != dev->type) in dev_set_mac_address()
8884 if (!netif_device_present(dev)) in dev_set_mac_address()
8886 err = dev_pre_changeaddr_notify(dev, sa->sa_data, extack); in dev_set_mac_address()
8889 if (memcmp(dev->dev_addr, sa->sa_data, dev->addr_len)) { in dev_set_mac_address()
8890 err = ops->ndo_set_mac_address(dev, sa); in dev_set_mac_address()
8894 dev->addr_assign_type = NET_ADDR_SET; in dev_set_mac_address()
8895 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); in dev_set_mac_address()
8896 add_device_randomness(dev->dev_addr, dev->addr_len); in dev_set_mac_address()
8903 int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa, in dev_set_mac_address_user() argument
8909 ret = dev_set_mac_address(dev, sa, extack); in dev_set_mac_address_user()
8918 struct net_device *dev; in dev_get_mac_address() local
8924 dev = dev_get_by_name_rcu(net, dev_name); in dev_get_mac_address()
8925 if (!dev) { in dev_get_mac_address()
8929 if (!dev->addr_len) in dev_get_mac_address()
8932 memcpy(sa->sa_data, dev->dev_addr, in dev_get_mac_address()
8933 min_t(size_t, size, dev->addr_len)); in dev_get_mac_address()
8934 sa->sa_family = dev->type; in dev_get_mac_address()
8950 int dev_change_carrier(struct net_device *dev, bool new_carrier) in dev_change_carrier() argument
8952 const struct net_device_ops *ops = dev->netdev_ops; in dev_change_carrier()
8956 if (!netif_device_present(dev)) in dev_change_carrier()
8958 return ops->ndo_change_carrier(dev, new_carrier); in dev_change_carrier()
8968 int dev_get_phys_port_id(struct net_device *dev, in dev_get_phys_port_id() argument
8971 const struct net_device_ops *ops = dev->netdev_ops; in dev_get_phys_port_id()
8975 return ops->ndo_get_phys_port_id(dev, ppid); in dev_get_phys_port_id()
8986 int dev_get_phys_port_name(struct net_device *dev, in dev_get_phys_port_name() argument
8989 const struct net_device_ops *ops = dev->netdev_ops; in dev_get_phys_port_name()
8993 err = ops->ndo_get_phys_port_name(dev, name, len); in dev_get_phys_port_name()
8997 return devlink_compat_phys_port_name_get(dev, name, len); in dev_get_phys_port_name()
9008 int dev_get_port_parent_id(struct net_device *dev, in dev_get_port_parent_id() argument
9012 const struct net_device_ops *ops = dev->netdev_ops; in dev_get_port_parent_id()
9019 err = ops->ndo_get_port_parent_id(dev, ppid); in dev_get_port_parent_id()
9024 err = devlink_compat_switch_id_get(dev, ppid); in dev_get_port_parent_id()
9028 netdev_for_each_lower_dev(dev, lower_dev, iter) { in dev_get_port_parent_id()
9067 int dev_change_proto_down(struct net_device *dev, bool proto_down) in dev_change_proto_down() argument
9069 if (!(dev->priv_flags & IFF_CHANGE_PROTO_DOWN)) in dev_change_proto_down()
9071 if (!netif_device_present(dev)) in dev_change_proto_down()
9074 netif_carrier_off(dev); in dev_change_proto_down()
9076 netif_carrier_on(dev); in dev_change_proto_down()
9077 dev->proto_down = proto_down; in dev_change_proto_down()
9088 void dev_change_proto_down_reason(struct net_device *dev, unsigned long mask, in dev_change_proto_down_reason() argument
9094 dev->proto_down_reason = value; in dev_change_proto_down_reason()
9098 dev->proto_down_reason |= BIT(b); in dev_change_proto_down_reason()
9100 dev->proto_down_reason &= ~BIT(b); in dev_change_proto_down_reason()
9107 struct net_device *dev; /* protected by rtnl_lock, no refcnt held */ member
9111 static enum bpf_xdp_mode dev_xdp_mode(struct net_device *dev, u32 flags) in dev_xdp_mode() argument
9119 return dev->netdev_ops->ndo_bpf ? XDP_MODE_DRV : XDP_MODE_SKB; in dev_xdp_mode()
9122 static bpf_op_t dev_xdp_bpf_op(struct net_device *dev, enum bpf_xdp_mode mode) in dev_xdp_bpf_op() argument
9129 return dev->netdev_ops->ndo_bpf; in dev_xdp_bpf_op()
9135 static struct bpf_xdp_link *dev_xdp_link(struct net_device *dev, in dev_xdp_link() argument
9138 return dev->xdp_state[mode].link; in dev_xdp_link()
9141 static struct bpf_prog *dev_xdp_prog(struct net_device *dev, in dev_xdp_prog() argument
9144 struct bpf_xdp_link *link = dev_xdp_link(dev, mode); in dev_xdp_prog()
9148 return dev->xdp_state[mode].prog; in dev_xdp_prog()
9151 u8 dev_xdp_prog_count(struct net_device *dev) in dev_xdp_prog_count() argument
9157 if (dev->xdp_state[i].prog || dev->xdp_state[i].link) in dev_xdp_prog_count()
9163 u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode) in dev_xdp_prog_id() argument
9165 struct bpf_prog *prog = dev_xdp_prog(dev, mode); in dev_xdp_prog_id()
9170 static void dev_xdp_set_link(struct net_device *dev, enum bpf_xdp_mode mode, in dev_xdp_set_link() argument
9173 dev->xdp_state[mode].link = link; in dev_xdp_set_link()
9174 dev->xdp_state[mode].prog = NULL; in dev_xdp_set_link()
9177 static void dev_xdp_set_prog(struct net_device *dev, enum bpf_xdp_mode mode, in dev_xdp_set_prog() argument
9180 dev->xdp_state[mode].link = NULL; in dev_xdp_set_prog()
9181 dev->xdp_state[mode].prog = prog; in dev_xdp_set_prog()
9184 static int dev_xdp_install(struct net_device *dev, enum bpf_xdp_mode mode, in dev_xdp_install() argument
9205 err = bpf_op(dev, &xdp); in dev_xdp_install()
9213 bpf_prog_change_xdp(dev_xdp_prog(dev, mode), prog); in dev_xdp_install()
9218 static void dev_xdp_uninstall(struct net_device *dev) in dev_xdp_uninstall() argument
9228 prog = dev_xdp_prog(dev, mode); in dev_xdp_uninstall()
9232 bpf_op = dev_xdp_bpf_op(dev, mode); in dev_xdp_uninstall()
9236 WARN_ON(dev_xdp_install(dev, mode, bpf_op, NULL, 0, NULL)); in dev_xdp_uninstall()
9239 link = dev_xdp_link(dev, mode); in dev_xdp_uninstall()
9241 link->dev = NULL; in dev_xdp_uninstall()
9245 dev_xdp_set_link(dev, mode, NULL); in dev_xdp_uninstall()
9249 static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack, in dev_xdp_attach() argument
9277 if (!num_modes && dev_xdp_prog_count(dev) > 1) { in dev_xdp_attach()
9288 mode = dev_xdp_mode(dev, flags); in dev_xdp_attach()
9290 if (dev_xdp_link(dev, mode)) { in dev_xdp_attach()
9296 netdev_for_each_upper_dev_rcu(dev, upper, iter) { in dev_xdp_attach()
9303 cur_prog = dev_xdp_prog(dev, mode); in dev_xdp_attach()
9327 if (!offload && dev_xdp_prog(dev, other_mode)) { in dev_xdp_attach()
9335 if (bpf_prog_is_dev_bound(new_prog->aux) && !bpf_offload_dev_match(new_prog, dev)) { in dev_xdp_attach()
9351 bpf_op = dev_xdp_bpf_op(dev, mode); in dev_xdp_attach()
9357 err = dev_xdp_install(dev, mode, bpf_op, extack, flags, new_prog); in dev_xdp_attach()
9363 dev_xdp_set_link(dev, mode, link); in dev_xdp_attach()
9365 dev_xdp_set_prog(dev, mode, new_prog); in dev_xdp_attach()
9372 static int dev_xdp_attach_link(struct net_device *dev, in dev_xdp_attach_link() argument
9376 return dev_xdp_attach(dev, extack, link, NULL, NULL, link->flags); in dev_xdp_attach_link()
9379 static int dev_xdp_detach_link(struct net_device *dev, in dev_xdp_detach_link() argument
9388 mode = dev_xdp_mode(dev, link->flags); in dev_xdp_detach_link()
9389 if (dev_xdp_link(dev, mode) != link) in dev_xdp_detach_link()
9392 bpf_op = dev_xdp_bpf_op(dev, mode); in dev_xdp_detach_link()
9393 WARN_ON(dev_xdp_install(dev, mode, bpf_op, NULL, 0, NULL)); in dev_xdp_detach_link()
9394 dev_xdp_set_link(dev, mode, NULL); in dev_xdp_detach_link()
9407 if (xdp_link->dev) { in bpf_xdp_link_release()
9408 WARN_ON(dev_xdp_detach_link(xdp_link->dev, NULL, xdp_link)); in bpf_xdp_link_release()
9409 xdp_link->dev = NULL; in bpf_xdp_link_release()
9435 if (xdp_link->dev) in bpf_xdp_link_show_fdinfo()
9436 ifindex = xdp_link->dev->ifindex; in bpf_xdp_link_show_fdinfo()
9449 if (xdp_link->dev) in bpf_xdp_link_fill_link_info()
9450 ifindex = xdp_link->dev->ifindex; in bpf_xdp_link_fill_link_info()
9468 if (!xdp_link->dev) { in bpf_xdp_link_update()
9490 mode = dev_xdp_mode(xdp_link->dev, xdp_link->flags); in bpf_xdp_link_update()
9491 bpf_op = dev_xdp_bpf_op(xdp_link->dev, mode); in bpf_xdp_link_update()
9492 err = dev_xdp_install(xdp_link->dev, mode, bpf_op, NULL, in bpf_xdp_link_update()
9520 struct net_device *dev; in bpf_xdp_link_attach() local
9524 dev = dev_get_by_index(net, attr->link_create.target_ifindex); in bpf_xdp_link_attach()
9525 if (!dev) { in bpf_xdp_link_attach()
9537 link->dev = dev; in bpf_xdp_link_attach()
9546 err = dev_xdp_attach_link(dev, &extack, link); in bpf_xdp_link_attach()
9550 link->dev = NULL; in bpf_xdp_link_attach()
9558 dev_put(dev); in bpf_xdp_link_attach()
9565 dev_put(dev); in bpf_xdp_link_attach()
9579 int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, in dev_change_xdp_fd() argument
9582 enum bpf_xdp_mode mode = dev_xdp_mode(dev, flags); in dev_change_xdp_fd()
9605 err = dev_xdp_attach(dev, extack, NULL, new_prog, old_prog, flags); in dev_change_xdp_fd()
9656 static void net_set_todo(struct net_device *dev) in net_set_todo() argument
9658 list_add_tail(&dev->todo_list, &net_todo_list); in net_set_todo()
9659 atomic_inc(&dev_net(dev)->dev_unreg_count); in net_set_todo()
9706 static netdev_features_t netdev_fix_features(struct net_device *dev, in netdev_fix_features() argument
9712 netdev_warn(dev, "mixed HW and IP checksum settings.\n"); in netdev_fix_features()
9718 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n"); in netdev_fix_features()
9724 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n"); in netdev_fix_features()
9731 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n"); in netdev_fix_features()
9745 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n"); in netdev_fix_features()
9750 if ((features & dev->gso_partial_features) && in netdev_fix_features()
9752 netdev_dbg(dev, in netdev_fix_features()
9754 features &= ~dev->gso_partial_features; in netdev_fix_features()
9764 netdev_dbg(dev, "Dropping NETIF_F_GRO_HW since no RXCSUM feature.\n"); in netdev_fix_features()
9772 netdev_dbg(dev, "Dropping LRO feature since RX-FCS is requested.\n"); in netdev_fix_features()
9777 netdev_dbg(dev, "Dropping HW-GRO feature since RX-FCS is requested.\n"); in netdev_fix_features()
9783 netdev_dbg(dev, "Dropping LRO feature since HW-GRO is requested.\n"); in netdev_fix_features()
9793 netdev_dbg(dev, "Dropping TLS TX HW offload feature since no CSUM feature.\n"); in netdev_fix_features()
9799 netdev_dbg(dev, "Dropping TLS RX HW offload feature since no RXCSUM feature.\n"); in netdev_fix_features()
9806 int __netdev_update_features(struct net_device *dev) in __netdev_update_features() argument
9815 features = netdev_get_wanted_features(dev); in __netdev_update_features()
9817 if (dev->netdev_ops->ndo_fix_features) in __netdev_update_features()
9818 features = dev->netdev_ops->ndo_fix_features(dev, features); in __netdev_update_features()
9821 features = netdev_fix_features(dev, features); in __netdev_update_features()
9824 netdev_for_each_upper_dev_rcu(dev, upper, iter) in __netdev_update_features()
9825 features = netdev_sync_upper_features(dev, upper, features); in __netdev_update_features()
9827 if (dev->features == features) in __netdev_update_features()
9830 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n", in __netdev_update_features()
9831 &dev->features, &features); in __netdev_update_features()
9833 if (dev->netdev_ops->ndo_set_features) in __netdev_update_features()
9834 err = dev->netdev_ops->ndo_set_features(dev, features); in __netdev_update_features()
9839 netdev_err(dev, in __netdev_update_features()
9841 err, &features, &dev->features); in __netdev_update_features()
9852 netdev_for_each_lower_dev(dev, lower, iter) in __netdev_update_features()
9853 netdev_sync_lower_features(dev, lower, features); in __netdev_update_features()
9856 netdev_features_t diff = features ^ dev->features; in __netdev_update_features()
9867 dev->features = features; in __netdev_update_features()
9868 udp_tunnel_get_rx_info(dev); in __netdev_update_features()
9870 udp_tunnel_drop_rx_info(dev); in __netdev_update_features()
9876 dev->features = features; in __netdev_update_features()
9877 err |= vlan_get_rx_ctag_filter_info(dev); in __netdev_update_features()
9879 vlan_drop_rx_ctag_filter_info(dev); in __netdev_update_features()
9885 dev->features = features; in __netdev_update_features()
9886 err |= vlan_get_rx_stag_filter_info(dev); in __netdev_update_features()
9888 vlan_drop_rx_stag_filter_info(dev); in __netdev_update_features()
9892 dev->features = features; in __netdev_update_features()
9906 void netdev_update_features(struct net_device *dev) in netdev_update_features() argument
9908 if (__netdev_update_features(dev)) in netdev_update_features()
9909 netdev_features_change(dev); in netdev_update_features()
9923 void netdev_change_features(struct net_device *dev) in netdev_change_features() argument
9925 __netdev_update_features(dev); in netdev_change_features()
9926 netdev_features_change(dev); in netdev_change_features()
9940 struct net_device *dev) in netif_stacked_transfer_operstate() argument
9943 netif_dormant_on(dev); in netif_stacked_transfer_operstate()
9945 netif_dormant_off(dev); in netif_stacked_transfer_operstate()
9948 netif_testing_on(dev); in netif_stacked_transfer_operstate()
9950 netif_testing_off(dev); in netif_stacked_transfer_operstate()
9953 netif_carrier_on(dev); in netif_stacked_transfer_operstate()
9955 netif_carrier_off(dev); in netif_stacked_transfer_operstate()
9959 static int netif_alloc_rx_queues(struct net_device *dev) in netif_alloc_rx_queues() argument
9961 unsigned int i, count = dev->num_rx_queues; in netif_alloc_rx_queues()
9972 dev->_rx = rx; in netif_alloc_rx_queues()
9975 rx[i].dev = dev; in netif_alloc_rx_queues()
9978 err = xdp_rxq_info_reg(&rx[i].xdp_rxq, dev, i, 0); in netif_alloc_rx_queues()
9988 kvfree(dev->_rx); in netif_alloc_rx_queues()
9989 dev->_rx = NULL; in netif_alloc_rx_queues()
9993 static void netif_free_rx_queues(struct net_device *dev) in netif_free_rx_queues() argument
9995 unsigned int i, count = dev->num_rx_queues; in netif_free_rx_queues()
9998 if (!dev->_rx) in netif_free_rx_queues()
10002 xdp_rxq_info_unreg(&dev->_rx[i].xdp_rxq); in netif_free_rx_queues()
10004 kvfree(dev->_rx); in netif_free_rx_queues()
10007 static void netdev_init_one_queue(struct net_device *dev, in netdev_init_one_queue() argument
10012 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type); in netdev_init_one_queue()
10015 queue->dev = dev; in netdev_init_one_queue()
10021 static void netif_free_tx_queues(struct net_device *dev) in netif_free_tx_queues() argument
10023 kvfree(dev->_tx); in netif_free_tx_queues()
10026 static int netif_alloc_netdev_queues(struct net_device *dev) in netif_alloc_netdev_queues() argument
10028 unsigned int count = dev->num_tx_queues; in netif_alloc_netdev_queues()
10039 dev->_tx = tx; in netif_alloc_netdev_queues()
10041 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL); in netif_alloc_netdev_queues()
10042 spin_lock_init(&dev->tx_global_lock); in netif_alloc_netdev_queues()
10047 void netif_tx_stop_all_queues(struct net_device *dev) in netif_tx_stop_all_queues() argument
10051 for (i = 0; i < dev->num_tx_queues; i++) { in netif_tx_stop_all_queues()
10052 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); in netif_tx_stop_all_queues()
10059 static int netdev_do_alloc_pcpu_stats(struct net_device *dev) in netdev_do_alloc_pcpu_stats() argument
10067 if (dev->netdev_ops->ndo_get_peer_dev && in netdev_do_alloc_pcpu_stats()
10068 dev->pcpu_stat_type != NETDEV_PCPU_STAT_TSTATS) in netdev_do_alloc_pcpu_stats()
10071 switch (dev->pcpu_stat_type) { in netdev_do_alloc_pcpu_stats()
10075 v = dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats); in netdev_do_alloc_pcpu_stats()
10078 v = dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); in netdev_do_alloc_pcpu_stats()
10081 v = dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats); in netdev_do_alloc_pcpu_stats()
10090 static void netdev_do_free_pcpu_stats(struct net_device *dev) in netdev_do_free_pcpu_stats() argument
10092 switch (dev->pcpu_stat_type) { in netdev_do_free_pcpu_stats()
10096 free_percpu(dev->lstats); in netdev_do_free_pcpu_stats()
10099 free_percpu(dev->tstats); in netdev_do_free_pcpu_stats()
10102 free_percpu(dev->dstats); in netdev_do_free_pcpu_stats()
10116 int register_netdevice(struct net_device *dev) in register_netdevice() argument
10119 struct net *net = dev_net(dev); in register_netdevice()
10129 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED); in register_netdevice()
10132 ret = ethtool_check_ops(dev->ethtool_ops); in register_netdevice()
10136 spin_lock_init(&dev->addr_list_lock); in register_netdevice()
10137 netdev_set_addr_lockdep_class(dev); in register_netdevice()
10139 ret = dev_get_valid_name(net, dev, dev->name); in register_netdevice()
10144 dev->name_node = netdev_name_node_head_alloc(dev); in register_netdevice()
10145 if (!dev->name_node) in register_netdevice()
10149 if (dev->netdev_ops->ndo_init) { in register_netdevice()
10150 ret = dev->netdev_ops->ndo_init(dev); in register_netdevice()
10158 if (((dev->hw_features | dev->features) & in register_netdevice()
10160 (!dev->netdev_ops->ndo_vlan_rx_add_vid || in register_netdevice()
10161 !dev->netdev_ops->ndo_vlan_rx_kill_vid)) { in register_netdevice()
10162 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n"); in register_netdevice()
10167 ret = netdev_do_alloc_pcpu_stats(dev); in register_netdevice()
10171 ret = dev_index_reserve(net, dev->ifindex); in register_netdevice()
10174 dev->ifindex = ret; in register_netdevice()
10179 dev->hw_features |= (NETIF_F_SOFT_FEATURES | NETIF_F_SOFT_FEATURES_OFF); in register_netdevice()
10180 dev->features |= NETIF_F_SOFT_FEATURES; in register_netdevice()
10182 if (dev->udp_tunnel_nic_info) { in register_netdevice()
10183 dev->features |= NETIF_F_RX_UDP_TUNNEL_PORT; in register_netdevice()
10184 dev->hw_features |= NETIF_F_RX_UDP_TUNNEL_PORT; in register_netdevice()
10187 dev->wanted_features = dev->features & dev->hw_features; in register_netdevice()
10189 if (!(dev->flags & IFF_LOOPBACK)) in register_netdevice()
10190 dev->hw_features |= NETIF_F_NOCACHE_COPY; in register_netdevice()
10197 if (dev->hw_features & NETIF_F_TSO) in register_netdevice()
10198 dev->hw_features |= NETIF_F_TSO_MANGLEID; in register_netdevice()
10199 if (dev->vlan_features & NETIF_F_TSO) in register_netdevice()
10200 dev->vlan_features |= NETIF_F_TSO_MANGLEID; in register_netdevice()
10201 if (dev->mpls_features & NETIF_F_TSO) in register_netdevice()
10202 dev->mpls_features |= NETIF_F_TSO_MANGLEID; in register_netdevice()
10203 if (dev->hw_enc_features & NETIF_F_TSO) in register_netdevice()
10204 dev->hw_enc_features |= NETIF_F_TSO_MANGLEID; in register_netdevice()
10208 dev->vlan_features |= NETIF_F_HIGHDMA; in register_netdevice()
10212 dev->hw_enc_features |= NETIF_F_SG | NETIF_F_GSO_PARTIAL; in register_netdevice()
10216 dev->mpls_features |= NETIF_F_SG; in register_netdevice()
10218 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev); in register_netdevice()
10223 ret = netdev_register_kobject(dev); in register_netdevice()
10225 dev->reg_state = ret ? NETREG_UNREGISTERED : NETREG_REGISTERED; in register_netdevice()
10230 __netdev_update_features(dev); in register_netdevice()
10237 set_bit(__LINK_STATE_PRESENT, &dev->state); in register_netdevice()
10239 linkwatch_init_dev(dev); in register_netdevice()
10241 dev_init_scheduler(dev); in register_netdevice()
10243 netdev_hold(dev, &dev->dev_registered_tracker, GFP_KERNEL); in register_netdevice()
10244 list_netdevice(dev); in register_netdevice()
10246 add_device_randomness(dev->dev_addr, dev->addr_len); in register_netdevice()
10252 if (dev->addr_assign_type == NET_ADDR_PERM) in register_netdevice()
10253 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); in register_netdevice()
10256 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev); in register_netdevice()
10260 dev->needs_free_netdev = false; in register_netdevice()
10261 unregister_netdevice_queue(dev, NULL); in register_netdevice()
10268 if (!dev->rtnl_link_ops || in register_netdevice()
10269 dev->rtnl_link_state == RTNL_LINK_INITIALIZED) in register_netdevice()
10270 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL, 0, NULL); in register_netdevice()
10276 call_netdevice_notifiers(NETDEV_PRE_UNINIT, dev); in register_netdevice()
10278 dev_index_release(net, dev->ifindex); in register_netdevice()
10280 netdev_do_free_pcpu_stats(dev); in register_netdevice()
10282 if (dev->netdev_ops->ndo_uninit) in register_netdevice()
10283 dev->netdev_ops->ndo_uninit(dev); in register_netdevice()
10284 if (dev->priv_destructor) in register_netdevice()
10285 dev->priv_destructor(dev); in register_netdevice()
10287 netdev_name_node_free(dev->name_node); in register_netdevice()
10302 int init_dummy_netdev(struct net_device *dev) in init_dummy_netdev() argument
10309 memset(dev, 0, sizeof(struct net_device)); in init_dummy_netdev()
10314 dev->reg_state = NETREG_DUMMY; in init_dummy_netdev()
10317 INIT_LIST_HEAD(&dev->napi_list); in init_dummy_netdev()
10320 set_bit(__LINK_STATE_PRESENT, &dev->state); in init_dummy_netdev()
10321 set_bit(__LINK_STATE_START, &dev->state); in init_dummy_netdev()
10324 dev_net_set(dev, &init_net); in init_dummy_netdev()
10349 int register_netdev(struct net_device *dev) in register_netdev() argument
10355 err = register_netdevice(dev); in register_netdev()
10361 int netdev_refcnt_read(const struct net_device *dev) in netdev_refcnt_read() argument
10367 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i); in netdev_refcnt_read()
10370 return refcount_read(&dev->dev_refcnt); in netdev_refcnt_read()
10394 struct net_device *dev; in netdev_wait_allrefs_any() local
10399 list_for_each_entry(dev, list, todo_list) in netdev_wait_allrefs_any()
10400 if (netdev_refcnt_read(dev) == 1) in netdev_wait_allrefs_any()
10401 return dev; in netdev_wait_allrefs_any()
10408 list_for_each_entry(dev, list, todo_list) in netdev_wait_allrefs_any()
10409 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); in netdev_wait_allrefs_any()
10415 list_for_each_entry(dev, list, todo_list) in netdev_wait_allrefs_any()
10417 &dev->state)) { in netdev_wait_allrefs_any()
10442 list_for_each_entry(dev, list, todo_list) in netdev_wait_allrefs_any()
10443 if (netdev_refcnt_read(dev) == 1) in netdev_wait_allrefs_any()
10444 return dev; in netdev_wait_allrefs_any()
10448 list_for_each_entry(dev, list, todo_list) { in netdev_wait_allrefs_any()
10450 dev->name, netdev_refcnt_read(dev)); in netdev_wait_allrefs_any()
10451 ref_tracker_dir_print(&dev->refcnt_tracker, 10); in netdev_wait_allrefs_any()
10485 struct net_device *dev, *tmp; in netdev_run_todo() local
10493 struct net_device *dev = list_first_entry(&unlink_list, in netdev_run_todo() local
10496 list_del_init(&dev->unlink_list); in netdev_run_todo()
10497 dev->nested_level = dev->lower_level - 1; in netdev_run_todo()
10510 list_for_each_entry_safe(dev, tmp, &list, todo_list) { in netdev_run_todo()
10511 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) { in netdev_run_todo()
10512 netdev_WARN(dev, "run_todo but not unregistering\n"); in netdev_run_todo()
10513 list_del(&dev->todo_list); in netdev_run_todo()
10518 dev->reg_state = NETREG_UNREGISTERED; in netdev_run_todo()
10520 linkwatch_forget_dev(dev); in netdev_run_todo()
10524 dev = netdev_wait_allrefs_any(&list); in netdev_run_todo()
10525 list_del(&dev->todo_list); in netdev_run_todo()
10528 BUG_ON(netdev_refcnt_read(dev) != 1); in netdev_run_todo()
10529 BUG_ON(!list_empty(&dev->ptype_all)); in netdev_run_todo()
10530 BUG_ON(!list_empty(&dev->ptype_specific)); in netdev_run_todo()
10531 WARN_ON(rcu_access_pointer(dev->ip_ptr)); in netdev_run_todo()
10532 WARN_ON(rcu_access_pointer(dev->ip6_ptr)); in netdev_run_todo()
10534 netdev_do_free_pcpu_stats(dev); in netdev_run_todo()
10535 if (dev->priv_destructor) in netdev_run_todo()
10536 dev->priv_destructor(dev); in netdev_run_todo()
10537 if (dev->needs_free_netdev) in netdev_run_todo()
10538 free_netdev(dev); in netdev_run_todo()
10540 if (atomic_dec_and_test(&dev_net(dev)->dev_unreg_count)) in netdev_run_todo()
10544 kobject_put(&dev->dev.kobj); in netdev_run_todo()
10569 struct net_device_core_stats __percpu *netdev_core_stats_alloc(struct net_device *dev) in netdev_core_stats_alloc() argument
10576 if (p && cmpxchg(&dev->core_stats, NULL, p)) in netdev_core_stats_alloc()
10580 return READ_ONCE(dev->core_stats); in netdev_core_stats_alloc()
10594 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, in dev_get_stats() argument
10597 const struct net_device_ops *ops = dev->netdev_ops; in dev_get_stats()
10602 ops->ndo_get_stats64(dev, storage); in dev_get_stats()
10604 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev)); in dev_get_stats()
10606 netdev_stats_to_stats64(storage, &dev->stats); in dev_get_stats()
10610 p = READ_ONCE(dev->core_stats); in dev_get_stats()
10669 void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s) in dev_get_tstats64() argument
10671 netdev_stats_to_stats64(s, &dev->stats); in dev_get_tstats64()
10672 dev_fetch_sw_netstats(s, dev->tstats); in dev_get_tstats64()
10676 struct netdev_queue *dev_ingress_queue_create(struct net_device *dev) in dev_ingress_queue_create() argument
10678 struct netdev_queue *queue = dev_ingress_queue(dev); in dev_ingress_queue_create()
10686 netdev_init_one_queue(dev, queue, NULL); in dev_ingress_queue_create()
10689 rcu_assign_pointer(dev->ingress_queue, queue); in dev_ingress_queue_create()
10696 void netdev_set_default_ethtool_ops(struct net_device *dev, in netdev_set_default_ethtool_ops() argument
10699 if (dev->ethtool_ops == &default_ethtool_ops) in netdev_set_default_ethtool_ops()
10700 dev->ethtool_ops = ops; in netdev_set_default_ethtool_ops()
10711 void netdev_sw_irq_coalesce_default_on(struct net_device *dev) in netdev_sw_irq_coalesce_default_on() argument
10713 WARN_ON(dev->reg_state == NETREG_REGISTERED); in netdev_sw_irq_coalesce_default_on()
10716 dev->gro_flush_timeout = 20000; in netdev_sw_irq_coalesce_default_on()
10717 dev->napi_defer_hard_irqs = 1; in netdev_sw_irq_coalesce_default_on()
10722 void netdev_freemem(struct net_device *dev) in netdev_freemem() argument
10724 char *addr = (char *)dev - dev->padded; in netdev_freemem()
10747 struct net_device *dev; in alloc_netdev_mqs() local
10751 BUG_ON(strlen(name) >= sizeof(dev->name)); in alloc_netdev_mqs()
10776 dev = PTR_ALIGN(p, NETDEV_ALIGN); in alloc_netdev_mqs()
10777 dev->padded = (char *)dev - (char *)p; in alloc_netdev_mqs()
10779 ref_tracker_dir_init(&dev->refcnt_tracker, 128, name); in alloc_netdev_mqs()
10781 dev->pcpu_refcnt = alloc_percpu(int); in alloc_netdev_mqs()
10782 if (!dev->pcpu_refcnt) in alloc_netdev_mqs()
10784 __dev_hold(dev); in alloc_netdev_mqs()
10786 refcount_set(&dev->dev_refcnt, 1); in alloc_netdev_mqs()
10789 if (dev_addr_init(dev)) in alloc_netdev_mqs()
10792 dev_mc_init(dev); in alloc_netdev_mqs()
10793 dev_uc_init(dev); in alloc_netdev_mqs()
10795 dev_net_set(dev, &init_net); in alloc_netdev_mqs()
10797 dev->gso_max_size = GSO_LEGACY_MAX_SIZE; in alloc_netdev_mqs()
10798 dev->xdp_zc_max_segs = 1; in alloc_netdev_mqs()
10799 dev->gso_max_segs = GSO_MAX_SEGS; in alloc_netdev_mqs()
10800 dev->gro_max_size = GRO_LEGACY_MAX_SIZE; in alloc_netdev_mqs()
10801 dev->gso_ipv4_max_size = GSO_LEGACY_MAX_SIZE; in alloc_netdev_mqs()
10802 dev->gro_ipv4_max_size = GRO_LEGACY_MAX_SIZE; in alloc_netdev_mqs()
10803 dev->tso_max_size = TSO_LEGACY_MAX_SIZE; in alloc_netdev_mqs()
10804 dev->tso_max_segs = TSO_MAX_SEGS; in alloc_netdev_mqs()
10805 dev->upper_level = 1; in alloc_netdev_mqs()
10806 dev->lower_level = 1; in alloc_netdev_mqs()
10808 dev->nested_level = 0; in alloc_netdev_mqs()
10809 INIT_LIST_HEAD(&dev->unlink_list); in alloc_netdev_mqs()
10812 INIT_LIST_HEAD(&dev->napi_list); in alloc_netdev_mqs()
10813 INIT_LIST_HEAD(&dev->unreg_list); in alloc_netdev_mqs()
10814 INIT_LIST_HEAD(&dev->close_list); in alloc_netdev_mqs()
10815 INIT_LIST_HEAD(&dev->link_watch_list); in alloc_netdev_mqs()
10816 INIT_LIST_HEAD(&dev->adj_list.upper); in alloc_netdev_mqs()
10817 INIT_LIST_HEAD(&dev->adj_list.lower); in alloc_netdev_mqs()
10818 INIT_LIST_HEAD(&dev->ptype_all); in alloc_netdev_mqs()
10819 INIT_LIST_HEAD(&dev->ptype_specific); in alloc_netdev_mqs()
10820 INIT_LIST_HEAD(&dev->net_notifier_list); in alloc_netdev_mqs()
10822 hash_init(dev->qdisc_hash); in alloc_netdev_mqs()
10824 dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM; in alloc_netdev_mqs()
10825 setup(dev); in alloc_netdev_mqs()
10827 if (!dev->tx_queue_len) { in alloc_netdev_mqs()
10828 dev->priv_flags |= IFF_NO_QUEUE; in alloc_netdev_mqs()
10829 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN; in alloc_netdev_mqs()
10832 dev->num_tx_queues = txqs; in alloc_netdev_mqs()
10833 dev->real_num_tx_queues = txqs; in alloc_netdev_mqs()
10834 if (netif_alloc_netdev_queues(dev)) in alloc_netdev_mqs()
10837 dev->num_rx_queues = rxqs; in alloc_netdev_mqs()
10838 dev->real_num_rx_queues = rxqs; in alloc_netdev_mqs()
10839 if (netif_alloc_rx_queues(dev)) in alloc_netdev_mqs()
10842 strcpy(dev->name, name); in alloc_netdev_mqs()
10843 dev->name_assign_type = name_assign_type; in alloc_netdev_mqs()
10844 dev->group = INIT_NETDEV_GROUP; in alloc_netdev_mqs()
10845 if (!dev->ethtool_ops) in alloc_netdev_mqs()
10846 dev->ethtool_ops = &default_ethtool_ops; in alloc_netdev_mqs()
10848 nf_hook_netdev_init(dev); in alloc_netdev_mqs()
10850 return dev; in alloc_netdev_mqs()
10853 free_netdev(dev); in alloc_netdev_mqs()
10858 free_percpu(dev->pcpu_refcnt); in alloc_netdev_mqs()
10861 netdev_freemem(dev); in alloc_netdev_mqs()
10875 void free_netdev(struct net_device *dev) in free_netdev() argument
10885 if (dev->reg_state == NETREG_UNREGISTERING) { in free_netdev()
10887 dev->needs_free_netdev = true; in free_netdev()
10891 netif_free_tx_queues(dev); in free_netdev()
10892 netif_free_rx_queues(dev); in free_netdev()
10894 kfree(rcu_dereference_protected(dev->ingress_queue, 1)); in free_netdev()
10897 dev_addr_flush(dev); in free_netdev()
10899 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list) in free_netdev()
10902 ref_tracker_dir_exit(&dev->refcnt_tracker); in free_netdev()
10904 free_percpu(dev->pcpu_refcnt); in free_netdev()
10905 dev->pcpu_refcnt = NULL; in free_netdev()
10907 free_percpu(dev->core_stats); in free_netdev()
10908 dev->core_stats = NULL; in free_netdev()
10909 free_percpu(dev->xdp_bulkq); in free_netdev()
10910 dev->xdp_bulkq = NULL; in free_netdev()
10913 if (dev->reg_state == NETREG_UNINITIALIZED) { in free_netdev()
10914 netdev_freemem(dev); in free_netdev()
10918 BUG_ON(dev->reg_state != NETREG_UNREGISTERED); in free_netdev()
10919 dev->reg_state = NETREG_RELEASED; in free_netdev()
10922 put_device(&dev->dev); in free_netdev()
10955 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head) in unregister_netdevice_queue() argument
10960 list_move_tail(&dev->unreg_list, head); in unregister_netdevice_queue()
10964 list_add(&dev->unreg_list, &single); in unregister_netdevice_queue()
10973 struct net_device *dev, *tmp; in unregister_netdevice_many_notify() local
10982 list_for_each_entry_safe(dev, tmp, head, unreg_list) { in unregister_netdevice_many_notify()
10987 if (dev->reg_state == NETREG_UNINITIALIZED) { in unregister_netdevice_many_notify()
10989 dev->name, dev); in unregister_netdevice_many_notify()
10992 list_del(&dev->unreg_list); in unregister_netdevice_many_notify()
10995 dev->dismantle = true; in unregister_netdevice_many_notify()
10996 BUG_ON(dev->reg_state != NETREG_REGISTERED); in unregister_netdevice_many_notify()
11000 list_for_each_entry(dev, head, unreg_list) in unregister_netdevice_many_notify()
11001 list_add_tail(&dev->close_list, &close_head); in unregister_netdevice_many_notify()
11004 list_for_each_entry(dev, head, unreg_list) { in unregister_netdevice_many_notify()
11007 unlist_netdevice(dev, false); in unregister_netdevice_many_notify()
11008 dev->reg_state = NETREG_UNREGISTERING; in unregister_netdevice_many_notify()
11015 list_for_each_entry(dev, head, unreg_list) { in unregister_netdevice_many_notify()
11019 dev_shutdown(dev); in unregister_netdevice_many_notify()
11020 dev_tcx_uninstall(dev); in unregister_netdevice_many_notify()
11021 dev_xdp_uninstall(dev); in unregister_netdevice_many_notify()
11022 bpf_dev_bound_netdev_unregister(dev); in unregister_netdevice_many_notify()
11024 netdev_offload_xstats_disable_all(dev); in unregister_netdevice_many_notify()
11029 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); in unregister_netdevice_many_notify()
11031 if (!dev->rtnl_link_ops || in unregister_netdevice_many_notify()
11032 dev->rtnl_link_state == RTNL_LINK_INITIALIZED) in unregister_netdevice_many_notify()
11033 skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, 0, in unregister_netdevice_many_notify()
11040 dev_uc_flush(dev); in unregister_netdevice_many_notify()
11041 dev_mc_flush(dev); in unregister_netdevice_many_notify()
11043 netdev_name_node_alt_flush(dev); in unregister_netdevice_many_notify()
11044 netdev_name_node_free(dev->name_node); in unregister_netdevice_many_notify()
11046 call_netdevice_notifiers(NETDEV_PRE_UNINIT, dev); in unregister_netdevice_many_notify()
11048 if (dev->netdev_ops->ndo_uninit) in unregister_netdevice_many_notify()
11049 dev->netdev_ops->ndo_uninit(dev); in unregister_netdevice_many_notify()
11052 rtmsg_ifinfo_send(skb, dev, GFP_KERNEL, portid, nlh); in unregister_netdevice_many_notify()
11055 WARN_ON(netdev_has_any_upper_dev(dev)); in unregister_netdevice_many_notify()
11056 WARN_ON(netdev_has_any_lower_dev(dev)); in unregister_netdevice_many_notify()
11059 netdev_unregister_kobject(dev); in unregister_netdevice_many_notify()
11062 netif_reset_xps_queues_gt(dev, 0); in unregister_netdevice_many_notify()
11068 list_for_each_entry(dev, head, unreg_list) { in unregister_netdevice_many_notify()
11069 netdev_put(dev, &dev->dev_registered_tracker); in unregister_netdevice_many_notify()
11070 net_set_todo(dev); in unregister_netdevice_many_notify()
11100 void unregister_netdev(struct net_device *dev) in unregister_netdev() argument
11103 unregister_netdevice(dev); in unregister_netdev()
11124 int __dev_change_net_namespace(struct net_device *dev, struct net *net, in __dev_change_net_namespace() argument
11128 struct net *net_old = dev_net(dev); in __dev_change_net_namespace()
11136 if (dev->features & NETIF_F_NETNS_LOCAL) in __dev_change_net_namespace()
11140 if (dev->reg_state != NETREG_REGISTERED) in __dev_change_net_namespace()
11152 if (netdev_name_in_use(net, dev->name)) { in __dev_change_net_namespace()
11156 err = dev_prep_valid_name(net, dev, pat, new_name); in __dev_change_net_namespace()
11162 netdev_for_each_altname(dev, name_node) in __dev_change_net_namespace()
11173 err = dev_index_reserve(net, dev->ifindex); in __dev_change_net_namespace()
11186 dev_close(dev); in __dev_change_net_namespace()
11189 unlist_netdevice(dev, true); in __dev_change_net_namespace()
11194 dev_shutdown(dev); in __dev_change_net_namespace()
11203 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); in __dev_change_net_namespace()
11206 new_nsid = peernet2id_alloc(dev_net(dev), net, GFP_KERNEL); in __dev_change_net_namespace()
11208 rtmsg_ifinfo_newnet(RTM_DELLINK, dev, ~0U, GFP_KERNEL, &new_nsid, in __dev_change_net_namespace()
11214 dev_uc_flush(dev); in __dev_change_net_namespace()
11215 dev_mc_flush(dev); in __dev_change_net_namespace()
11218 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE); in __dev_change_net_namespace()
11219 netdev_adjacent_del_links(dev); in __dev_change_net_namespace()
11222 move_netdevice_notifiers_dev_net(dev, net); in __dev_change_net_namespace()
11225 dev_net_set(dev, net); in __dev_change_net_namespace()
11226 dev->ifindex = new_ifindex; in __dev_change_net_namespace()
11229 kobject_uevent(&dev->dev.kobj, KOBJ_ADD); in __dev_change_net_namespace()
11230 netdev_adjacent_add_links(dev); in __dev_change_net_namespace()
11233 strscpy(dev->name, new_name, IFNAMSIZ); in __dev_change_net_namespace()
11236 err = device_rename(&dev->dev, dev->name); in __dev_change_net_namespace()
11242 err = netdev_change_owner(dev, net_old, net); in __dev_change_net_namespace()
11246 list_netdevice(dev); in __dev_change_net_namespace()
11249 call_netdevice_notifiers(NETDEV_REGISTER, dev); in __dev_change_net_namespace()
11255 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL, 0, NULL); in __dev_change_net_namespace()
11405 const char *netdev_drivername(const struct net_device *dev) in netdev_drivername() argument
11411 parent = dev->dev.parent; in netdev_drivername()
11421 static void __netdev_printk(const char *level, const struct net_device *dev, in __netdev_printk() argument
11424 if (dev && dev->dev.parent) { in __netdev_printk()
11426 dev->dev.parent, in __netdev_printk()
11428 dev_driver_string(dev->dev.parent), in __netdev_printk()
11429 dev_name(dev->dev.parent), in __netdev_printk()
11430 netdev_name(dev), netdev_reg_state(dev), in __netdev_printk()
11432 } else if (dev) { in __netdev_printk()
11434 level, netdev_name(dev), netdev_reg_state(dev), vaf); in __netdev_printk()
11440 void netdev_printk(const char *level, const struct net_device *dev, in netdev_printk() argument
11451 __netdev_printk(level, dev, &vaf); in netdev_printk()
11458 void func(const struct net_device *dev, const char *fmt, ...) \
11468 __netdev_printk(level, dev, &vaf); \
11499 struct net_device *dev, *aux; in default_device_exit_net() local
11505 for_each_netdev_safe(net, dev, aux) { in default_device_exit_net()
11510 if (dev->features & NETIF_F_NETNS_LOCAL) in default_device_exit_net()
11514 if (dev->rtnl_link_ops && !dev->rtnl_link_ops->netns_refund) in default_device_exit_net()
11518 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex); in default_device_exit_net()
11522 netdev_for_each_altname_safe(dev, name_node, tmp) in default_device_exit_net()
11529 err = dev_change_net_namespace(dev, &init_net, fb_name); in default_device_exit_net()
11532 __func__, dev->name, err); in default_device_exit_net()
11545 struct net_device *dev; in default_device_exit_batch() local
11556 for_each_netdev_reverse(net, dev) { in default_device_exit_batch()
11557 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink) in default_device_exit_batch()
11558 dev->rtnl_link_ops->dellink(dev, &dev_kill_list); in default_device_exit_batch()
11560 unregister_netdevice_queue(dev, &dev_kill_list); in default_device_exit_batch()