Lines Matching refs:vxlan
61 static int vxlan_sock_add(struct vxlan_dev *vxlan);
63 static void vxlan_vs_del_dev(struct vxlan_dev *vxlan);
108 if (!node->vxlan) in vxlan_vs_find_vni()
111 if (node->vxlan->cfg.flags & VXLAN_F_VNIFILTER) { in vxlan_vs_find_vni()
112 vnode = vxlan_vnifilter_lookup(node->vxlan, vni); in vxlan_vs_find_vni()
115 } else if (node->vxlan->default_dst.remote_vni != vni) { in vxlan_vs_find_vni()
120 const struct vxlan_config *cfg = &node->vxlan->cfg; in vxlan_vs_find_vni()
129 return node->vxlan; in vxlan_vs_find_vni()
150 static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan, in vxlan_fdb_info() argument
192 ndm->ndm_ifindex = vxlan->dev->ifindex; in vxlan_fdb_info()
198 if (!net_eq(dev_net(vxlan->dev), vxlan->net) && in vxlan_fdb_info()
200 peernet2id(dev_net(vxlan->dev), vxlan->net))) in vxlan_fdb_info()
214 rdst->remote_port != vxlan->cfg.dst_port && in vxlan_fdb_info()
217 if (rdst->remote_vni != vxlan->default_dst.remote_vni && in vxlan_fdb_info()
225 if ((vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA) && fdb->vni && in vxlan_fdb_info()
258 static void __vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb, in __vxlan_fdb_notify() argument
261 struct net *net = dev_net(vxlan->dev); in __vxlan_fdb_notify()
269 err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0, rd); in __vxlan_fdb_notify()
284 static void vxlan_fdb_switchdev_notifier_info(const struct vxlan_dev *vxlan, in vxlan_fdb_switchdev_notifier_info() argument
290 fdb_info->info.dev = vxlan->dev; in vxlan_fdb_switchdev_notifier_info()
302 static int vxlan_fdb_switchdev_call_notifiers(struct vxlan_dev *vxlan, in vxlan_fdb_switchdev_call_notifiers() argument
317 vxlan_fdb_switchdev_notifier_info(vxlan, fdb, rd, NULL, &info); in vxlan_fdb_switchdev_call_notifiers()
318 ret = call_switchdev_notifiers(notifier_type, vxlan->dev, in vxlan_fdb_switchdev_call_notifiers()
323 static int vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb, in vxlan_fdb_notify() argument
332 err = vxlan_fdb_switchdev_call_notifiers(vxlan, fdb, rd, in vxlan_fdb_notify()
338 vxlan_fdb_switchdev_call_notifiers(vxlan, fdb, rd, in vxlan_fdb_notify()
344 __vxlan_fdb_notify(vxlan, fdb, rd, type); in vxlan_fdb_notify()
350 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_ip_miss() local
359 vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH, true, NULL); in vxlan_ip_miss()
362 static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN]) in vxlan_fdb_miss() argument
371 vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH, true, NULL); in vxlan_fdb_miss()
396 u32 fdb_head_index(struct vxlan_dev *vxlan, const u8 *mac, __be32 vni) in fdb_head_index() argument
398 if (vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA) in fdb_head_index()
405 static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan, in vxlan_fdb_head() argument
408 return &vxlan->fdb_head[fdb_head_index(vxlan, mac, vni)]; in vxlan_fdb_head()
412 static struct vxlan_fdb *__vxlan_find_mac(struct vxlan_dev *vxlan, in __vxlan_find_mac() argument
415 struct hlist_head *head = vxlan_fdb_head(vxlan, mac, vni); in __vxlan_find_mac()
420 if (vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA) { in __vxlan_find_mac()
432 static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan, in vxlan_find_mac() argument
437 f = __vxlan_find_mac(vxlan, mac, vni); in vxlan_find_mac()
465 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_fdb_find_uc() local
479 f = __vxlan_find_mac(vxlan, eth_addr, vni); in vxlan_fdb_find_uc()
486 vxlan_fdb_switchdev_notifier_info(vxlan, f, rdst, NULL, fdb_info); in vxlan_fdb_find_uc()
495 const struct vxlan_dev *vxlan, in vxlan_fdb_notify_one() argument
503 vxlan_fdb_switchdev_notifier_info(vxlan, f, rdst, extack, &fdb_info); in vxlan_fdb_notify_one()
513 struct vxlan_dev *vxlan; in vxlan_fdb_replay() local
521 vxlan = netdev_priv(dev); in vxlan_fdb_replay()
524 spin_lock_bh(&vxlan->hash_lock[h]); in vxlan_fdb_replay()
525 hlist_for_each_entry(f, &vxlan->fdb_head[h], hlist) { in vxlan_fdb_replay()
528 rc = vxlan_fdb_notify_one(nb, vxlan, in vxlan_fdb_replay()
536 spin_unlock_bh(&vxlan->hash_lock[h]); in vxlan_fdb_replay()
541 spin_unlock_bh(&vxlan->hash_lock[h]); in vxlan_fdb_replay()
548 struct vxlan_dev *vxlan; in vxlan_fdb_clear_offload() local
555 vxlan = netdev_priv(dev); in vxlan_fdb_clear_offload()
558 spin_lock_bh(&vxlan->hash_lock[h]); in vxlan_fdb_clear_offload()
559 hlist_for_each_entry(f, &vxlan->fdb_head[h], hlist) in vxlan_fdb_clear_offload()
563 spin_unlock_bh(&vxlan->hash_lock[h]); in vxlan_fdb_clear_offload()
793 static struct vxlan_fdb *vxlan_fdb_alloc(struct vxlan_dev *vxlan, const u8 *mac, in vxlan_fdb_alloc() argument
807 RCU_INIT_POINTER(f->vdev, vxlan); in vxlan_fdb_alloc()
815 static void vxlan_fdb_insert(struct vxlan_dev *vxlan, const u8 *mac, in vxlan_fdb_insert() argument
818 ++vxlan->addrcnt; in vxlan_fdb_insert()
820 vxlan_fdb_head(vxlan, mac, src_vni)); in vxlan_fdb_insert()
823 static int vxlan_fdb_nh_update(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb, in vxlan_fdb_nh_update() argument
833 nh = nexthop_find_by_id(vxlan->net, nhid); in vxlan_fdb_nh_update()
855 switch (vxlan->default_dst.remote_ip.sa.sa_family) { in vxlan_fdb_nh_update()
885 int vxlan_fdb_create(struct vxlan_dev *vxlan, in vxlan_fdb_create() argument
896 if (vxlan->cfg.addrmax && in vxlan_fdb_create()
897 vxlan->addrcnt >= vxlan->cfg.addrmax) in vxlan_fdb_create()
900 netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip); in vxlan_fdb_create()
901 f = vxlan_fdb_alloc(vxlan, mac, state, src_vni, ndm_flags); in vxlan_fdb_create()
906 rc = vxlan_fdb_nh_update(vxlan, f, nhid, extack); in vxlan_fdb_create()
947 static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f, in vxlan_fdb_destroy() argument
952 netdev_dbg(vxlan->dev, "delete %pM\n", f->eth_addr); in vxlan_fdb_destroy()
954 --vxlan->addrcnt; in vxlan_fdb_destroy()
957 vxlan_fdb_notify(vxlan, f, NULL, RTM_DELNEIGH, in vxlan_fdb_destroy()
961 vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH, in vxlan_fdb_destroy()
978 static int vxlan_fdb_update_existing(struct vxlan_dev *vxlan, in vxlan_fdb_update_existing() argument
1028 rc = vxlan_fdb_nh_update(vxlan, f, nhid, extack); in vxlan_fdb_update_existing()
1058 err = vxlan_fdb_notify(vxlan, f, rd, RTM_NEWNEIGH, in vxlan_fdb_update_existing()
1078 static int vxlan_fdb_update_create(struct vxlan_dev *vxlan, in vxlan_fdb_update_create() argument
1095 netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip); in vxlan_fdb_update_create()
1096 rc = vxlan_fdb_create(vxlan, mac, ip, state, port, src_vni, in vxlan_fdb_update_create()
1101 vxlan_fdb_insert(vxlan, mac, src_vni, f); in vxlan_fdb_update_create()
1102 rc = vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH, in vxlan_fdb_update_create()
1110 vxlan_fdb_destroy(vxlan, f, false, false); in vxlan_fdb_update_create()
1115 int vxlan_fdb_update(struct vxlan_dev *vxlan, in vxlan_fdb_update() argument
1125 f = __vxlan_find_mac(vxlan, mac, src_vni); in vxlan_fdb_update()
1128 netdev_dbg(vxlan->dev, in vxlan_fdb_update()
1133 return vxlan_fdb_update_existing(vxlan, ip, state, flags, port, in vxlan_fdb_update()
1140 return vxlan_fdb_update_create(vxlan, mac, ip, state, flags, in vxlan_fdb_update()
1147 static void vxlan_fdb_dst_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f, in vxlan_fdb_dst_destroy() argument
1151 vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH, swdev_notify, NULL); in vxlan_fdb_dst_destroy()
1155 static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan, in vxlan_fdb_parse() argument
1160 struct net *net = dev_net(vxlan->dev); in vxlan_fdb_parse()
1176 union vxlan_addr *remote = &vxlan->default_dst.remote_ip; in vxlan_fdb_parse()
1196 *port = vxlan->cfg.dst_port; in vxlan_fdb_parse()
1206 *vni = vxlan->default_dst.remote_vni; in vxlan_fdb_parse()
1216 *src_vni = vxlan->default_dst.remote_vni; in vxlan_fdb_parse()
1250 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_fdb_add() local
1268 err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &src_vni, &vni, &ifindex, in vxlan_fdb_add()
1273 if (vxlan->default_dst.remote_ip.sa.sa_family != ip.sa.sa_family) in vxlan_fdb_add()
1276 hash_index = fdb_head_index(vxlan, addr, src_vni); in vxlan_fdb_add()
1277 spin_lock_bh(&vxlan->hash_lock[hash_index]); in vxlan_fdb_add()
1278 err = vxlan_fdb_update(vxlan, addr, &ip, ndm->ndm_state, flags, in vxlan_fdb_add()
1282 spin_unlock_bh(&vxlan->hash_lock[hash_index]); in vxlan_fdb_add()
1287 int __vxlan_fdb_delete(struct vxlan_dev *vxlan, in __vxlan_fdb_delete() argument
1296 f = vxlan_find_mac(vxlan, addr, src_vni); in __vxlan_fdb_delete()
1310 vxlan_fdb_dst_destroy(vxlan, f, rd, swdev_notify); in __vxlan_fdb_delete()
1314 vxlan_fdb_destroy(vxlan, f, true, swdev_notify); in __vxlan_fdb_delete()
1326 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_fdb_delete() local
1334 err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &src_vni, &vni, &ifindex, in vxlan_fdb_delete()
1339 hash_index = fdb_head_index(vxlan, addr, src_vni); in vxlan_fdb_delete()
1340 spin_lock_bh(&vxlan->hash_lock[hash_index]); in vxlan_fdb_delete()
1341 err = __vxlan_fdb_delete(vxlan, addr, ip, port, src_vni, vni, ifindex, in vxlan_fdb_delete()
1343 spin_unlock_bh(&vxlan->hash_lock[hash_index]); in vxlan_fdb_delete()
1353 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_fdb_dump() local
1361 hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) { in vxlan_fdb_dump()
1367 err = vxlan_fdb_info(skb, vxlan, f, in vxlan_fdb_dump()
1385 err = vxlan_fdb_info(skb, vxlan, f, in vxlan_fdb_dump()
1411 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_fdb_get() local
1419 vni = vxlan->default_dst.remote_vni; in vxlan_fdb_get()
1423 f = __vxlan_find_mac(vxlan, addr, vni); in vxlan_fdb_get()
1430 err = vxlan_fdb_info(skb, vxlan, f, portid, seq, in vxlan_fdb_get()
1445 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_snoop() local
1459 f = vxlan_find_mac(vxlan, src_mac, vni); in vxlan_snoop()
1482 vxlan_fdb_notify(vxlan, f, rdst, RTM_NEWNEIGH, true, NULL); in vxlan_snoop()
1484 u32 hash_index = fdb_head_index(vxlan, src_mac, vni); in vxlan_snoop()
1487 spin_lock(&vxlan->hash_lock[hash_index]); in vxlan_snoop()
1491 vxlan_fdb_update(vxlan, src_mac, src_ip, in vxlan_snoop()
1494 vxlan->cfg.dst_port, in vxlan_snoop()
1496 vxlan->default_dst.remote_vni, in vxlan_snoop()
1498 spin_unlock(&vxlan->hash_lock[hash_index]); in vxlan_snoop()
1525 static void vxlan_sock_release(struct vxlan_dev *vxlan) in vxlan_sock_release() argument
1527 struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock); in vxlan_sock_release()
1529 struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock); in vxlan_sock_release()
1531 RCU_INIT_POINTER(vxlan->vn6_sock, NULL); in vxlan_sock_release()
1534 RCU_INIT_POINTER(vxlan->vn4_sock, NULL); in vxlan_sock_release()
1537 if (vxlan->cfg.flags & VXLAN_F_VNIFILTER) in vxlan_sock_release()
1538 vxlan_vs_del_vnigrp(vxlan); in vxlan_sock_release()
1540 vxlan_vs_del_dev(vxlan); in vxlan_sock_release()
1607 static bool vxlan_set_mac(struct vxlan_dev *vxlan, in vxlan_set_mac() argument
1615 skb->protocol = eth_type_trans(skb, vxlan->dev); in vxlan_set_mac()
1619 if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr)) in vxlan_set_mac()
1633 if ((vxlan->cfg.flags & VXLAN_F_LEARN) && in vxlan_set_mac()
1668 struct vxlan_dev *vxlan; in vxlan_rcv() local
1701 vxlan = vxlan_vs_find_vni(vs, skb->dev->ifindex, vni, &vninode); in vxlan_rcv()
1702 if (!vxlan) in vxlan_rcv()
1716 !net_eq(vxlan->net, dev_net(vxlan->dev)))) in vxlan_rcv()
1758 if (!vxlan_set_mac(vxlan, vs, skb, vni)) in vxlan_rcv()
1762 skb->dev = vxlan->dev; in vxlan_rcv()
1775 DEV_STATS_INC(vxlan->dev, rx_length_errors); in vxlan_rcv()
1776 DEV_STATS_INC(vxlan->dev, rx_errors); in vxlan_rcv()
1777 vxlan_vnifilter_count(vxlan, vni, vninode, in vxlan_rcv()
1786 DEV_STATS_INC(vxlan->dev, rx_frame_errors); in vxlan_rcv()
1787 DEV_STATS_INC(vxlan->dev, rx_errors); in vxlan_rcv()
1788 vxlan_vnifilter_count(vxlan, vni, vninode, in vxlan_rcv()
1795 if (unlikely(!(vxlan->dev->flags & IFF_UP))) { in vxlan_rcv()
1797 dev_core_stats_rx_dropped_inc(vxlan->dev); in vxlan_rcv()
1798 vxlan_vnifilter_count(vxlan, vni, vninode, in vxlan_rcv()
1803 dev_sw_netstats_rx_add(vxlan->dev, skb->len); in vxlan_rcv()
1804 vxlan_vnifilter_count(vxlan, vni, vninode, VXLAN_VNI_STATS_RX, skb->len); in vxlan_rcv()
1805 gro_cells_receive(&vxlan->gro_cells, skb); in vxlan_rcv()
1820 struct vxlan_dev *vxlan; in vxlan_err_lookup() local
1838 vxlan = vxlan_vs_find_vni(vs, skb->dev->ifindex, vni, NULL); in vxlan_err_lookup()
1839 if (!vxlan) in vxlan_err_lookup()
1847 struct vxlan_dev *vxlan = netdev_priv(dev); in arp_reduce() local
1858 vxlan_vnifilter_count(vxlan, vni, NULL, in arp_reduce()
1894 f = vxlan_find_mac(vxlan, n->ha, vni); in arp_reduce()
1916 vxlan_vnifilter_count(vxlan, vni, NULL, in arp_reduce()
1920 } else if (vxlan->cfg.flags & VXLAN_F_L3MISS) { in arp_reduce()
2027 struct vxlan_dev *vxlan = netdev_priv(dev); in neigh_reduce() local
2058 f = vxlan_find_mac(vxlan, n->ha, vni); in neigh_reduce()
2075 vxlan_vnifilter_count(vxlan, vni, NULL, in neigh_reduce()
2078 } else if (vxlan->cfg.flags & VXLAN_F_L3MISS) { in neigh_reduce()
2096 struct vxlan_dev *vxlan = netdev_priv(dev); in route_shortcircuit() local
2112 if (!n && (vxlan->cfg.flags & VXLAN_F_L3MISS)) { in route_shortcircuit()
2133 if (!n && (vxlan->cfg.flags & VXLAN_F_L3MISS)) { in route_shortcircuit()
2241 static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, struct net_device *dev, in vxlan_get_route() argument
2274 rt = ip_route_output_key(vxlan->net, &fl4); in vxlan_get_route()
2293 static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan, in vxlan6_get_route() argument
2329 ndst = ipv6_stub->ipv6_dst_lookup_flow(vxlan->net, sock6->sock->sk, in vxlan6_get_route()
2401 struct vxlan_dev *vxlan, in encap_bypass_if_local() argument
2417 vxlan->cfg.flags & VXLAN_F_LOCALBYPASS) { in encap_bypass_if_local()
2421 dst_vxlan = vxlan_find_vni(vxlan->net, dst_ifindex, vni, in encap_bypass_if_local()
2423 vxlan->cfg.flags); in encap_bypass_if_local()
2426 vxlan_vnifilter_count(vxlan, vni, NULL, in encap_bypass_if_local()
2432 vxlan_encap_bypass(skb, vxlan, dst_vxlan, vni, true); in encap_bypass_if_local()
2444 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_xmit_one() local
2456 u32 flags = vxlan->cfg.flags; in vxlan_xmit_one()
2458 bool xnet = !net_eq(vxlan->net, dev_net(vxlan->dev)); in vxlan_xmit_one()
2471 vxlan_encap_bypass(skb, vxlan, vxlan, in vxlan_xmit_one()
2478 dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port; in vxlan_xmit_one()
2481 local_ip = vxlan->cfg.saddr; in vxlan_xmit_one()
2487 ttl = vxlan->cfg.ttl; in vxlan_xmit_one()
2492 tos = vxlan->cfg.tos; in vxlan_xmit_one()
2501 label = vxlan->cfg.label; in vxlan_xmit_one()
2518 dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port; in vxlan_xmit_one()
2535 src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min, in vxlan_xmit_one()
2536 vxlan->cfg.port_max, true); in vxlan_xmit_one()
2540 struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock); in vxlan_xmit_one()
2547 rt = vxlan_get_route(vxlan, dev, sock4, skb, ifindex, tos, in vxlan_xmit_one()
2559 err = encap_bypass_if_local(skb, dev, vxlan, dst, in vxlan_xmit_one()
2565 if (vxlan->cfg.df == VXLAN_DF_SET) { in vxlan_xmit_one()
2567 } else if (vxlan->cfg.df == VXLAN_DF_INHERIT) { in vxlan_xmit_one()
2598 vxlan_encap_bypass(skb, vxlan, vxlan, vni, false); in vxlan_xmit_one()
2615 struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock); in vxlan_xmit_one()
2620 ndst = vxlan6_get_route(vxlan, dev, sock6, skb, ifindex, tos, in vxlan_xmit_one()
2634 err = encap_bypass_if_local(skb, dev, vxlan, dst, in vxlan_xmit_one()
2661 vxlan_encap_bypass(skb, vxlan, vxlan, vni, false); in vxlan_xmit_one()
2680 vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_TX, pkt_len); in vxlan_xmit_one()
2687 vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_TX_DROPS, 0); in vxlan_xmit_one()
2699 vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_TX_ERRORS, 0); in vxlan_xmit_one()
2740 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_xmit_nhid() local
2758 if (vxlan->cfg.saddr.sa.sa_family != nh_rdst.remote_ip.sa.sa_family) in vxlan_xmit_nhid()
2784 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_xmit() local
2797 if (vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA) { in vxlan_xmit()
2811 if (vxlan->cfg.flags & VXLAN_F_PROXY) { in vxlan_xmit()
2832 if (vxlan->cfg.flags & VXLAN_F_MDB) { in vxlan_xmit()
2836 mdb_entry = vxlan_mdb_entry_skb_get(vxlan, skb, vni); in vxlan_xmit()
2840 ret = vxlan_mdb_xmit(vxlan, mdb_entry, skb); in vxlan_xmit()
2848 f = vxlan_find_mac(vxlan, eth->h_dest, vni); in vxlan_xmit()
2851 if (f && (f->flags & NTF_ROUTER) && (vxlan->cfg.flags & VXLAN_F_RSC) && in vxlan_xmit()
2856 f = vxlan_find_mac(vxlan, eth->h_dest, vni); in vxlan_xmit()
2860 f = vxlan_find_mac(vxlan, all_zeros_mac, vni); in vxlan_xmit()
2862 if ((vxlan->cfg.flags & VXLAN_F_L2MISS) && in vxlan_xmit()
2864 vxlan_fdb_miss(vxlan, eth->h_dest); in vxlan_xmit()
2867 vxlan_vnifilter_count(vxlan, vni, NULL, in vxlan_xmit()
2876 (vni ? : vxlan->default_dst.remote_vni), did_rsc); in vxlan_xmit()
2901 struct vxlan_dev *vxlan = from_timer(vxlan, t, age_timer); in vxlan_cleanup() local
2905 if (!netif_running(vxlan->dev)) in vxlan_cleanup()
2911 spin_lock(&vxlan->hash_lock[h]); in vxlan_cleanup()
2912 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) { in vxlan_cleanup()
2923 timeout = f->used + vxlan->cfg.age_interval * HZ; in vxlan_cleanup()
2925 netdev_dbg(vxlan->dev, in vxlan_cleanup()
2929 vxlan_fdb_destroy(vxlan, f, true, true); in vxlan_cleanup()
2933 spin_unlock(&vxlan->hash_lock[h]); in vxlan_cleanup()
2936 mod_timer(&vxlan->age_timer, next_timer); in vxlan_cleanup()
2939 static void vxlan_vs_del_dev(struct vxlan_dev *vxlan) in vxlan_vs_del_dev() argument
2941 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); in vxlan_vs_del_dev()
2944 hlist_del_init_rcu(&vxlan->hlist4.hlist); in vxlan_vs_del_dev()
2946 hlist_del_init_rcu(&vxlan->hlist6.hlist); in vxlan_vs_del_dev()
2951 static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan, in vxlan_vs_add_dev() argument
2954 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); in vxlan_vs_add_dev()
2955 __be32 vni = vxlan->default_dst.remote_vni; in vxlan_vs_add_dev()
2957 node->vxlan = vxlan; in vxlan_vs_add_dev()
2966 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_init() local
2969 if (vxlan->cfg.flags & VXLAN_F_VNIFILTER) in vxlan_init()
2970 vxlan_vnigroup_init(vxlan); in vxlan_init()
2978 err = gro_cells_init(&vxlan->gro_cells, dev); in vxlan_init()
2982 err = vxlan_mdb_init(vxlan); in vxlan_init()
2990 gro_cells_destroy(&vxlan->gro_cells); in vxlan_init()
2994 if (vxlan->cfg.flags & VXLAN_F_VNIFILTER) in vxlan_init()
2995 vxlan_vnigroup_uninit(vxlan); in vxlan_init()
2999 static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan, __be32 vni) in vxlan_fdb_delete_default() argument
3002 u32 hash_index = fdb_head_index(vxlan, all_zeros_mac, vni); in vxlan_fdb_delete_default()
3004 spin_lock_bh(&vxlan->hash_lock[hash_index]); in vxlan_fdb_delete_default()
3005 f = __vxlan_find_mac(vxlan, all_zeros_mac, vni); in vxlan_fdb_delete_default()
3007 vxlan_fdb_destroy(vxlan, f, true, true); in vxlan_fdb_delete_default()
3008 spin_unlock_bh(&vxlan->hash_lock[hash_index]); in vxlan_fdb_delete_default()
3013 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_uninit() local
3015 vxlan_mdb_fini(vxlan); in vxlan_uninit()
3017 if (vxlan->cfg.flags & VXLAN_F_VNIFILTER) in vxlan_uninit()
3018 vxlan_vnigroup_uninit(vxlan); in vxlan_uninit()
3020 gro_cells_destroy(&vxlan->gro_cells); in vxlan_uninit()
3022 vxlan_fdb_delete_default(vxlan, vxlan->cfg.vni); in vxlan_uninit()
3030 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_open() local
3033 ret = vxlan_sock_add(vxlan); in vxlan_open()
3037 ret = vxlan_multicast_join(vxlan); in vxlan_open()
3039 vxlan_sock_release(vxlan); in vxlan_open()
3043 if (vxlan->cfg.age_interval) in vxlan_open()
3044 mod_timer(&vxlan->age_timer, jiffies + FDB_AGE_INTERVAL); in vxlan_open()
3050 static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all) in vxlan_flush() argument
3057 spin_lock_bh(&vxlan->hash_lock[h]); in vxlan_flush()
3058 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) { in vxlan_flush()
3065 f->vni == vxlan->cfg.vni) in vxlan_flush()
3067 vxlan_fdb_destroy(vxlan, f, true, true); in vxlan_flush()
3069 spin_unlock_bh(&vxlan->hash_lock[h]); in vxlan_flush()
3076 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_stop() local
3078 vxlan_multicast_leave(vxlan); in vxlan_stop()
3080 del_timer_sync(&vxlan->age_timer); in vxlan_stop()
3082 vxlan_flush(vxlan, false); in vxlan_stop()
3083 vxlan_sock_release(vxlan); in vxlan_stop()
3095 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_change_mtu() local
3096 struct vxlan_rdst *dst = &vxlan->default_dst; in vxlan_change_mtu()
3097 struct net_device *lowerdev = __dev_get_by_index(vxlan->net, in vxlan_change_mtu()
3104 int max_mtu = lowerdev->mtu - vxlan_headroom(vxlan->cfg.flags); in vxlan_change_mtu()
3115 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_fill_metadata_dst() local
3119 sport = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min, in vxlan_fill_metadata_dst()
3120 vxlan->cfg.port_max, true); in vxlan_fill_metadata_dst()
3121 dport = info->key.tp_dst ? : vxlan->cfg.dst_port; in vxlan_fill_metadata_dst()
3124 struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock); in vxlan_fill_metadata_dst()
3127 rt = vxlan_get_route(vxlan, dev, sock4, skb, 0, info->key.tos, in vxlan_fill_metadata_dst()
3137 struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock); in vxlan_fill_metadata_dst()
3140 ndst = vxlan6_get_route(vxlan, dev, sock6, skb, 0, info->key.tos, in vxlan_fill_metadata_dst()
3226 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_setup() local
3251 INIT_LIST_HEAD(&vxlan->next); in vxlan_setup()
3253 timer_setup(&vxlan->age_timer, vxlan_cleanup, TIMER_DEFERRABLE); in vxlan_setup()
3255 vxlan->dev = dev; in vxlan_setup()
3258 spin_lock_init(&vxlan->hash_lock[h]); in vxlan_setup()
3259 INIT_HLIST_HEAD(&vxlan->fdb_head[h]); in vxlan_setup()
3392 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_get_link_ksettings() local
3393 struct vxlan_rdst *dst = &vxlan->default_dst; in vxlan_get_link_ksettings()
3394 struct net_device *lowerdev = __dev_get_by_index(vxlan->net, in vxlan_get_link_ksettings()
3500 static int __vxlan_sock_add(struct vxlan_dev *vxlan, bool ipv6) in __vxlan_sock_add() argument
3502 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); in __vxlan_sock_add()
3503 bool metadata = vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA; in __vxlan_sock_add()
3508 if (vxlan->cfg.remote_ifindex) in __vxlan_sock_add()
3510 vxlan->net, vxlan->cfg.remote_ifindex); in __vxlan_sock_add()
3512 if (!vxlan->cfg.no_share) { in __vxlan_sock_add()
3514 vs = vxlan_find_sock(vxlan->net, ipv6 ? AF_INET6 : AF_INET, in __vxlan_sock_add()
3515 vxlan->cfg.dst_port, vxlan->cfg.flags, in __vxlan_sock_add()
3524 vs = vxlan_socket_create(vxlan->net, ipv6, in __vxlan_sock_add()
3525 vxlan->cfg.dst_port, vxlan->cfg.flags, in __vxlan_sock_add()
3531 rcu_assign_pointer(vxlan->vn6_sock, vs); in __vxlan_sock_add()
3532 node = &vxlan->hlist6; in __vxlan_sock_add()
3536 rcu_assign_pointer(vxlan->vn4_sock, vs); in __vxlan_sock_add()
3537 node = &vxlan->hlist4; in __vxlan_sock_add()
3540 if (metadata && (vxlan->cfg.flags & VXLAN_F_VNIFILTER)) in __vxlan_sock_add()
3541 vxlan_vs_add_vnigrp(vxlan, vs, ipv6); in __vxlan_sock_add()
3543 vxlan_vs_add_dev(vs, vxlan, node); in __vxlan_sock_add()
3548 static int vxlan_sock_add(struct vxlan_dev *vxlan) in vxlan_sock_add() argument
3550 bool metadata = vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA; in vxlan_sock_add()
3551 bool ipv6 = vxlan->cfg.flags & VXLAN_F_IPV6 || metadata; in vxlan_sock_add()
3555 RCU_INIT_POINTER(vxlan->vn4_sock, NULL); in vxlan_sock_add()
3557 RCU_INIT_POINTER(vxlan->vn6_sock, NULL); in vxlan_sock_add()
3559 ret = __vxlan_sock_add(vxlan, true); in vxlan_sock_add()
3565 ret = __vxlan_sock_add(vxlan, false); in vxlan_sock_add()
3567 vxlan_sock_release(vxlan); in vxlan_sock_add()
3571 int vxlan_vni_in_use(struct net *src_net, struct vxlan_dev *vxlan, in vxlan_vni_in_use() argument
3578 if (tmp == vxlan) in vxlan_vni_in_use()
3754 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_config_apply() local
3755 struct vxlan_rdst *dst = &vxlan->default_dst; in vxlan_config_apply()
3769 vxlan->net = src_net; in vxlan_config_apply()
3802 memcpy(&vxlan->cfg, conf, sizeof(*conf)); in vxlan_config_apply()
3809 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_dev_configure() local
3813 ret = vxlan_config_validate(src_net, conf, &lowerdev, vxlan, extack); in vxlan_dev_configure()
3827 struct vxlan_dev *vxlan = netdev_priv(dev); in __vxlan_dev_create() local
3834 dst = &vxlan->default_dst; in __vxlan_dev_create()
3843 err = vxlan_fdb_create(vxlan, all_zeros_mac, in __vxlan_dev_create()
3846 vxlan->cfg.dst_port, in __vxlan_dev_create()
3877 vxlan_fdb_insert(vxlan, all_zeros_mac, dst->remote_vni, f); in __vxlan_dev_create()
3880 err = vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), in __vxlan_dev_create()
3883 vxlan_fdb_destroy(vxlan, f, false, false); in __vxlan_dev_create()
3890 list_add(&vxlan->next, &vn->vxlan_list); in __vxlan_dev_create()
3942 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_nl2conf() local
3949 memcpy(conf, &vxlan->cfg, sizeof(*conf)); in vxlan_nl2conf()
4239 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_changelink() local
4245 dst = &vxlan->default_dst; in vxlan_changelink()
4250 err = vxlan_config_validate(vxlan->net, &conf, &lowerdev, in vxlan_changelink()
4251 vxlan, extack); in vxlan_changelink()
4265 u32 hash_index = fdb_head_index(vxlan, all_zeros_mac, conf.vni); in vxlan_changelink()
4267 spin_lock_bh(&vxlan->hash_lock[hash_index]); in vxlan_changelink()
4269 err = vxlan_fdb_update(vxlan, all_zeros_mac, in vxlan_changelink()
4273 vxlan->cfg.dst_port, in vxlan_changelink()
4278 spin_unlock_bh(&vxlan->hash_lock[hash_index]); in vxlan_changelink()
4285 __vxlan_fdb_delete(vxlan, all_zeros_mac, in vxlan_changelink()
4287 vxlan->cfg.dst_port, in vxlan_changelink()
4292 spin_unlock_bh(&vxlan->hash_lock[hash_index]); in vxlan_changelink()
4297 if (vxlan->cfg.flags & VXLAN_F_VNIFILTER) { in vxlan_changelink()
4298 err = vxlan_vnilist_update_group(vxlan, &dst->remote_ip, in vxlan_changelink()
4308 if (conf.age_interval != vxlan->cfg.age_interval) in vxlan_changelink()
4309 mod_timer(&vxlan->age_timer, jiffies); in vxlan_changelink()
4314 vxlan_config_apply(dev, &conf, lowerdev, vxlan->net, true); in vxlan_changelink()
4320 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_dellink() local
4322 vxlan_flush(vxlan, true); in vxlan_dellink()
4324 list_del(&vxlan->next); in vxlan_dellink()
4326 if (vxlan->default_dst.remote_dev) in vxlan_dellink()
4327 netdev_upper_dev_unlink(vxlan->default_dst.remote_dev, dev); in vxlan_dellink()
4367 const struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_fill_info() local
4368 const struct vxlan_rdst *dst = &vxlan->default_dst; in vxlan_fill_info()
4370 .low = htons(vxlan->cfg.port_min), in vxlan_fill_info()
4371 .high = htons(vxlan->cfg.port_max), in vxlan_fill_info()
4394 if (!vxlan_addr_any(&vxlan->cfg.saddr)) { in vxlan_fill_info()
4395 if (vxlan->cfg.saddr.sa.sa_family == AF_INET) { in vxlan_fill_info()
4397 vxlan->cfg.saddr.sin.sin_addr.s_addr)) in vxlan_fill_info()
4402 &vxlan->cfg.saddr.sin6.sin6_addr)) in vxlan_fill_info()
4408 if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->cfg.ttl) || in vxlan_fill_info()
4410 !!(vxlan->cfg.flags & VXLAN_F_TTL_INHERIT)) || in vxlan_fill_info()
4411 nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->cfg.tos) || in vxlan_fill_info()
4412 nla_put_u8(skb, IFLA_VXLAN_DF, vxlan->cfg.df) || in vxlan_fill_info()
4413 nla_put_be32(skb, IFLA_VXLAN_LABEL, vxlan->cfg.label) || in vxlan_fill_info()
4415 !!(vxlan->cfg.flags & VXLAN_F_LEARN)) || in vxlan_fill_info()
4417 !!(vxlan->cfg.flags & VXLAN_F_PROXY)) || in vxlan_fill_info()
4419 !!(vxlan->cfg.flags & VXLAN_F_RSC)) || in vxlan_fill_info()
4421 !!(vxlan->cfg.flags & VXLAN_F_L2MISS)) || in vxlan_fill_info()
4423 !!(vxlan->cfg.flags & VXLAN_F_L3MISS)) || in vxlan_fill_info()
4425 !!(vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA)) || in vxlan_fill_info()
4426 nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->cfg.age_interval) || in vxlan_fill_info()
4427 nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->cfg.addrmax) || in vxlan_fill_info()
4428 nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->cfg.dst_port) || in vxlan_fill_info()
4430 !(vxlan->cfg.flags & VXLAN_F_UDP_ZERO_CSUM_TX)) || in vxlan_fill_info()
4432 !!(vxlan->cfg.flags & VXLAN_F_UDP_ZERO_CSUM6_TX)) || in vxlan_fill_info()
4434 !!(vxlan->cfg.flags & VXLAN_F_UDP_ZERO_CSUM6_RX)) || in vxlan_fill_info()
4436 !!(vxlan->cfg.flags & VXLAN_F_REMCSUM_TX)) || in vxlan_fill_info()
4438 !!(vxlan->cfg.flags & VXLAN_F_REMCSUM_RX)) || in vxlan_fill_info()
4440 !!(vxlan->cfg.flags & VXLAN_F_LOCALBYPASS))) in vxlan_fill_info()
4446 if (vxlan->cfg.flags & VXLAN_F_GBP && in vxlan_fill_info()
4450 if (vxlan->cfg.flags & VXLAN_F_GPE && in vxlan_fill_info()
4454 if (vxlan->cfg.flags & VXLAN_F_REMCSUM_NOPARTIAL && in vxlan_fill_info()
4458 if (vxlan->cfg.flags & VXLAN_F_VNIFILTER && in vxlan_fill_info()
4460 !!(vxlan->cfg.flags & VXLAN_F_VNIFILTER))) in vxlan_fill_info()
4471 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_get_link_net() local
4473 return vxlan->net; in vxlan_get_link_net()
4528 struct vxlan_dev *vxlan, *next; in vxlan_handle_lowerdev_unregister() local
4531 list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) { in vxlan_handle_lowerdev_unregister()
4532 struct vxlan_rdst *dst = &vxlan->default_dst; in vxlan_handle_lowerdev_unregister()
4541 vxlan_dellink(vxlan->dev, &list_kill); in vxlan_handle_lowerdev_unregister()
4571 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_fdb_offloaded_set() local
4576 hash_index = fdb_head_index(vxlan, fdb_info->eth_addr, fdb_info->vni); in vxlan_fdb_offloaded_set()
4578 spin_lock_bh(&vxlan->hash_lock[hash_index]); in vxlan_fdb_offloaded_set()
4580 f = vxlan_find_mac(vxlan, fdb_info->eth_addr, fdb_info->vni); in vxlan_fdb_offloaded_set()
4594 spin_unlock_bh(&vxlan->hash_lock[hash_index]); in vxlan_fdb_offloaded_set()
4601 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_fdb_external_learn_add() local
4606 hash_index = fdb_head_index(vxlan, fdb_info->eth_addr, fdb_info->vni); in vxlan_fdb_external_learn_add()
4609 spin_lock_bh(&vxlan->hash_lock[hash_index]); in vxlan_fdb_external_learn_add()
4610 err = vxlan_fdb_update(vxlan, fdb_info->eth_addr, &fdb_info->remote_ip, in vxlan_fdb_external_learn_add()
4619 spin_unlock_bh(&vxlan->hash_lock[hash_index]); in vxlan_fdb_external_learn_add()
4628 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_fdb_external_learn_del() local
4633 hash_index = fdb_head_index(vxlan, fdb_info->eth_addr, fdb_info->vni); in vxlan_fdb_external_learn_del()
4634 spin_lock_bh(&vxlan->hash_lock[hash_index]); in vxlan_fdb_external_learn_del()
4636 f = vxlan_find_mac(vxlan, fdb_info->eth_addr, fdb_info->vni); in vxlan_fdb_external_learn_del()
4640 err = __vxlan_fdb_delete(vxlan, fdb_info->eth_addr, in vxlan_fdb_external_learn_del()
4648 spin_unlock_bh(&vxlan->hash_lock[hash_index]); in vxlan_fdb_external_learn_del()
4696 struct vxlan_dev *vxlan; in vxlan_fdb_nh_flush() local
4701 vxlan = rcu_dereference(fdb->vdev); in vxlan_fdb_nh_flush()
4702 WARN_ON(!vxlan); in vxlan_fdb_nh_flush()
4703 hash_index = fdb_head_index(vxlan, fdb->eth_addr, in vxlan_fdb_nh_flush()
4704 vxlan->default_dst.remote_vni); in vxlan_fdb_nh_flush()
4705 spin_lock_bh(&vxlan->hash_lock[hash_index]); in vxlan_fdb_nh_flush()
4707 vxlan_fdb_destroy(vxlan, fdb, false, false); in vxlan_fdb_nh_flush()
4708 spin_unlock_bh(&vxlan->hash_lock[hash_index]); in vxlan_fdb_nh_flush()
4750 struct vxlan_dev *vxlan, *next; in vxlan_destroy_tunnels() local
4757 list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) { in vxlan_destroy_tunnels()
4761 if (!net_eq(dev_net(vxlan->dev), net)) in vxlan_destroy_tunnels()
4762 unregister_netdevice_queue(vxlan->dev, head); in vxlan_destroy_tunnels()