1cc4e3835SJakub Kicinski // SPDX-License-Identifier: GPL-2.0-only 2cc4e3835SJakub Kicinski // Copyright (c) 2020 Facebook Inc. 3cc4e3835SJakub Kicinski 4c7d759ebSJakub Kicinski #include <linux/ethtool_netlink.h> 5cc4e3835SJakub Kicinski #include <linux/netdevice.h> 6cc4e3835SJakub Kicinski #include <linux/slab.h> 7cc4e3835SJakub Kicinski #include <linux/types.h> 8cc4e3835SJakub Kicinski #include <linux/workqueue.h> 9cc4e3835SJakub Kicinski #include <net/udp_tunnel.h> 10966e5059SJakub Kicinski #include <net/vxlan.h> 11cc4e3835SJakub Kicinski 12cc4e3835SJakub Kicinski enum udp_tunnel_nic_table_entry_flags { 13cc4e3835SJakub Kicinski UDP_TUNNEL_NIC_ENTRY_ADD = BIT(0), 14cc4e3835SJakub Kicinski UDP_TUNNEL_NIC_ENTRY_DEL = BIT(1), 15cc4e3835SJakub Kicinski UDP_TUNNEL_NIC_ENTRY_OP_FAIL = BIT(2), 16cc4e3835SJakub Kicinski UDP_TUNNEL_NIC_ENTRY_FROZEN = BIT(3), 17cc4e3835SJakub Kicinski }; 18cc4e3835SJakub Kicinski 19cc4e3835SJakub Kicinski struct udp_tunnel_nic_table_entry { 20cc4e3835SJakub Kicinski __be16 port; 21cc4e3835SJakub Kicinski u8 type; 22cc4e3835SJakub Kicinski u8 flags; 23*74cc6d18SJakub Kicinski u16 use_cnt; 24*74cc6d18SJakub Kicinski #define UDP_TUNNEL_NIC_USE_CNT_MAX U16_MAX 25cc4e3835SJakub Kicinski u8 hw_priv; 26cc4e3835SJakub Kicinski }; 27cc4e3835SJakub Kicinski 28cc4e3835SJakub Kicinski /** 29cc4e3835SJakub Kicinski * struct udp_tunnel_nic - UDP tunnel port offload state 30cc4e3835SJakub Kicinski * @work: async work for talking to hardware from process context 31cc4e3835SJakub Kicinski * @dev: netdev pointer 32cc4e3835SJakub Kicinski * @need_sync: at least one port start changed 33cc4e3835SJakub Kicinski * @need_replay: space was freed, we need a replay of all ports 34cc4e3835SJakub Kicinski * @work_pending: @work is currently scheduled 35cc4e3835SJakub Kicinski * @n_tables: number of tables under @entries 36cc4e3835SJakub Kicinski * @missed: bitmap of tables which overflown 37cc4e3835SJakub Kicinski * @entries: table of tables of ports currently offloaded 38cc4e3835SJakub Kicinski */ 39cc4e3835SJakub Kicinski struct udp_tunnel_nic { 40cc4e3835SJakub Kicinski struct work_struct work; 41cc4e3835SJakub Kicinski 42cc4e3835SJakub Kicinski struct net_device *dev; 43cc4e3835SJakub Kicinski 44cc4e3835SJakub Kicinski u8 need_sync:1; 45cc4e3835SJakub Kicinski u8 need_replay:1; 46cc4e3835SJakub Kicinski u8 work_pending:1; 47cc4e3835SJakub Kicinski 48cc4e3835SJakub Kicinski unsigned int n_tables; 49cc4e3835SJakub Kicinski unsigned long missed; 50cc4e3835SJakub Kicinski struct udp_tunnel_nic_table_entry **entries; 51cc4e3835SJakub Kicinski }; 52cc4e3835SJakub Kicinski 53cc4e3835SJakub Kicinski /* We ensure all work structs are done using driver state, but not the code. 54cc4e3835SJakub Kicinski * We need a workqueue we can flush before module gets removed. 55cc4e3835SJakub Kicinski */ 56cc4e3835SJakub Kicinski static struct workqueue_struct *udp_tunnel_nic_workqueue; 57cc4e3835SJakub Kicinski 58cc4e3835SJakub Kicinski static const char *udp_tunnel_nic_tunnel_type_name(unsigned int type) 59cc4e3835SJakub Kicinski { 60cc4e3835SJakub Kicinski switch (type) { 61cc4e3835SJakub Kicinski case UDP_TUNNEL_TYPE_VXLAN: 62cc4e3835SJakub Kicinski return "vxlan"; 63cc4e3835SJakub Kicinski case UDP_TUNNEL_TYPE_GENEVE: 64cc4e3835SJakub Kicinski return "geneve"; 65cc4e3835SJakub Kicinski case UDP_TUNNEL_TYPE_VXLAN_GPE: 66cc4e3835SJakub Kicinski return "vxlan-gpe"; 67cc4e3835SJakub Kicinski default: 68cc4e3835SJakub Kicinski return "unknown"; 69cc4e3835SJakub Kicinski } 70cc4e3835SJakub Kicinski } 71cc4e3835SJakub Kicinski 72cc4e3835SJakub Kicinski static bool 73cc4e3835SJakub Kicinski udp_tunnel_nic_entry_is_free(struct udp_tunnel_nic_table_entry *entry) 74cc4e3835SJakub Kicinski { 75cc4e3835SJakub Kicinski return entry->use_cnt == 0 && !entry->flags; 76cc4e3835SJakub Kicinski } 77cc4e3835SJakub Kicinski 78cc4e3835SJakub Kicinski static bool 79c7d759ebSJakub Kicinski udp_tunnel_nic_entry_is_present(struct udp_tunnel_nic_table_entry *entry) 80c7d759ebSJakub Kicinski { 81c7d759ebSJakub Kicinski return entry->use_cnt && !(entry->flags & ~UDP_TUNNEL_NIC_ENTRY_FROZEN); 82c7d759ebSJakub Kicinski } 83c7d759ebSJakub Kicinski 84c7d759ebSJakub Kicinski static bool 85cc4e3835SJakub Kicinski udp_tunnel_nic_entry_is_frozen(struct udp_tunnel_nic_table_entry *entry) 86cc4e3835SJakub Kicinski { 87cc4e3835SJakub Kicinski return entry->flags & UDP_TUNNEL_NIC_ENTRY_FROZEN; 88cc4e3835SJakub Kicinski } 89cc4e3835SJakub Kicinski 90cc4e3835SJakub Kicinski static void 91cc4e3835SJakub Kicinski udp_tunnel_nic_entry_freeze_used(struct udp_tunnel_nic_table_entry *entry) 92cc4e3835SJakub Kicinski { 93cc4e3835SJakub Kicinski if (!udp_tunnel_nic_entry_is_free(entry)) 94cc4e3835SJakub Kicinski entry->flags |= UDP_TUNNEL_NIC_ENTRY_FROZEN; 95cc4e3835SJakub Kicinski } 96cc4e3835SJakub Kicinski 97cc4e3835SJakub Kicinski static void 98cc4e3835SJakub Kicinski udp_tunnel_nic_entry_unfreeze(struct udp_tunnel_nic_table_entry *entry) 99cc4e3835SJakub Kicinski { 100cc4e3835SJakub Kicinski entry->flags &= ~UDP_TUNNEL_NIC_ENTRY_FROZEN; 101cc4e3835SJakub Kicinski } 102cc4e3835SJakub Kicinski 103cc4e3835SJakub Kicinski static bool 104cc4e3835SJakub Kicinski udp_tunnel_nic_entry_is_queued(struct udp_tunnel_nic_table_entry *entry) 105cc4e3835SJakub Kicinski { 106cc4e3835SJakub Kicinski return entry->flags & (UDP_TUNNEL_NIC_ENTRY_ADD | 107cc4e3835SJakub Kicinski UDP_TUNNEL_NIC_ENTRY_DEL); 108cc4e3835SJakub Kicinski } 109cc4e3835SJakub Kicinski 110cc4e3835SJakub Kicinski static void 111cc4e3835SJakub Kicinski udp_tunnel_nic_entry_queue(struct udp_tunnel_nic *utn, 112cc4e3835SJakub Kicinski struct udp_tunnel_nic_table_entry *entry, 113cc4e3835SJakub Kicinski unsigned int flag) 114cc4e3835SJakub Kicinski { 115cc4e3835SJakub Kicinski entry->flags |= flag; 116cc4e3835SJakub Kicinski utn->need_sync = 1; 117cc4e3835SJakub Kicinski } 118cc4e3835SJakub Kicinski 119cc4e3835SJakub Kicinski static void 120cc4e3835SJakub Kicinski udp_tunnel_nic_ti_from_entry(struct udp_tunnel_nic_table_entry *entry, 121cc4e3835SJakub Kicinski struct udp_tunnel_info *ti) 122cc4e3835SJakub Kicinski { 123cc4e3835SJakub Kicinski memset(ti, 0, sizeof(*ti)); 124cc4e3835SJakub Kicinski ti->port = entry->port; 125cc4e3835SJakub Kicinski ti->type = entry->type; 126cc4e3835SJakub Kicinski ti->hw_priv = entry->hw_priv; 127cc4e3835SJakub Kicinski } 128cc4e3835SJakub Kicinski 129cc4e3835SJakub Kicinski static bool 130cc4e3835SJakub Kicinski udp_tunnel_nic_is_empty(struct net_device *dev, struct udp_tunnel_nic *utn) 131cc4e3835SJakub Kicinski { 132cc4e3835SJakub Kicinski const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info; 133cc4e3835SJakub Kicinski unsigned int i, j; 134cc4e3835SJakub Kicinski 135cc4e3835SJakub Kicinski for (i = 0; i < utn->n_tables; i++) 136cc4e3835SJakub Kicinski for (j = 0; j < info->tables[i].n_entries; j++) 137cc4e3835SJakub Kicinski if (!udp_tunnel_nic_entry_is_free(&utn->entries[i][j])) 138cc4e3835SJakub Kicinski return false; 139cc4e3835SJakub Kicinski return true; 140cc4e3835SJakub Kicinski } 141cc4e3835SJakub Kicinski 142cc4e3835SJakub Kicinski static bool 143cc4e3835SJakub Kicinski udp_tunnel_nic_should_replay(struct net_device *dev, struct udp_tunnel_nic *utn) 144cc4e3835SJakub Kicinski { 145cc4e3835SJakub Kicinski const struct udp_tunnel_nic_table_info *table; 146cc4e3835SJakub Kicinski unsigned int i, j; 147cc4e3835SJakub Kicinski 148cc4e3835SJakub Kicinski if (!utn->missed) 149cc4e3835SJakub Kicinski return false; 150cc4e3835SJakub Kicinski 151cc4e3835SJakub Kicinski for (i = 0; i < utn->n_tables; i++) { 152cc4e3835SJakub Kicinski table = &dev->udp_tunnel_nic_info->tables[i]; 153cc4e3835SJakub Kicinski if (!test_bit(i, &utn->missed)) 154cc4e3835SJakub Kicinski continue; 155cc4e3835SJakub Kicinski 156cc4e3835SJakub Kicinski for (j = 0; j < table->n_entries; j++) 157cc4e3835SJakub Kicinski if (udp_tunnel_nic_entry_is_free(&utn->entries[i][j])) 158cc4e3835SJakub Kicinski return true; 159cc4e3835SJakub Kicinski } 160cc4e3835SJakub Kicinski 161cc4e3835SJakub Kicinski return false; 162cc4e3835SJakub Kicinski } 163cc4e3835SJakub Kicinski 164cc4e3835SJakub Kicinski static void 165cc4e3835SJakub Kicinski __udp_tunnel_nic_get_port(struct net_device *dev, unsigned int table, 166cc4e3835SJakub Kicinski unsigned int idx, struct udp_tunnel_info *ti) 167cc4e3835SJakub Kicinski { 168cc4e3835SJakub Kicinski struct udp_tunnel_nic_table_entry *entry; 169cc4e3835SJakub Kicinski struct udp_tunnel_nic *utn; 170cc4e3835SJakub Kicinski 171cc4e3835SJakub Kicinski utn = dev->udp_tunnel_nic; 172cc4e3835SJakub Kicinski entry = &utn->entries[table][idx]; 173cc4e3835SJakub Kicinski 174cc4e3835SJakub Kicinski if (entry->use_cnt) 175cc4e3835SJakub Kicinski udp_tunnel_nic_ti_from_entry(entry, ti); 176cc4e3835SJakub Kicinski } 177cc4e3835SJakub Kicinski 178cc4e3835SJakub Kicinski static void 179cc4e3835SJakub Kicinski __udp_tunnel_nic_set_port_priv(struct net_device *dev, unsigned int table, 180cc4e3835SJakub Kicinski unsigned int idx, u8 priv) 181cc4e3835SJakub Kicinski { 182cc4e3835SJakub Kicinski dev->udp_tunnel_nic->entries[table][idx].hw_priv = priv; 183cc4e3835SJakub Kicinski } 184cc4e3835SJakub Kicinski 185cc4e3835SJakub Kicinski static void 186cc4e3835SJakub Kicinski udp_tunnel_nic_entry_update_done(struct udp_tunnel_nic_table_entry *entry, 187cc4e3835SJakub Kicinski int err) 188cc4e3835SJakub Kicinski { 189cc4e3835SJakub Kicinski bool dodgy = entry->flags & UDP_TUNNEL_NIC_ENTRY_OP_FAIL; 190cc4e3835SJakub Kicinski 191cc4e3835SJakub Kicinski WARN_ON_ONCE(entry->flags & UDP_TUNNEL_NIC_ENTRY_ADD && 192cc4e3835SJakub Kicinski entry->flags & UDP_TUNNEL_NIC_ENTRY_DEL); 193cc4e3835SJakub Kicinski 194cc4e3835SJakub Kicinski if (entry->flags & UDP_TUNNEL_NIC_ENTRY_ADD && 195cc4e3835SJakub Kicinski (!err || (err == -EEXIST && dodgy))) 196cc4e3835SJakub Kicinski entry->flags &= ~UDP_TUNNEL_NIC_ENTRY_ADD; 197cc4e3835SJakub Kicinski 198cc4e3835SJakub Kicinski if (entry->flags & UDP_TUNNEL_NIC_ENTRY_DEL && 199cc4e3835SJakub Kicinski (!err || (err == -ENOENT && dodgy))) 200cc4e3835SJakub Kicinski entry->flags &= ~UDP_TUNNEL_NIC_ENTRY_DEL; 201cc4e3835SJakub Kicinski 202cc4e3835SJakub Kicinski if (!err) 203cc4e3835SJakub Kicinski entry->flags &= ~UDP_TUNNEL_NIC_ENTRY_OP_FAIL; 204cc4e3835SJakub Kicinski else 205cc4e3835SJakub Kicinski entry->flags |= UDP_TUNNEL_NIC_ENTRY_OP_FAIL; 206cc4e3835SJakub Kicinski } 207cc4e3835SJakub Kicinski 208cc4e3835SJakub Kicinski static void 209cc4e3835SJakub Kicinski udp_tunnel_nic_device_sync_one(struct net_device *dev, 210cc4e3835SJakub Kicinski struct udp_tunnel_nic *utn, 211cc4e3835SJakub Kicinski unsigned int table, unsigned int idx) 212cc4e3835SJakub Kicinski { 213cc4e3835SJakub Kicinski struct udp_tunnel_nic_table_entry *entry; 214cc4e3835SJakub Kicinski struct udp_tunnel_info ti; 215cc4e3835SJakub Kicinski int err; 216cc4e3835SJakub Kicinski 217cc4e3835SJakub Kicinski entry = &utn->entries[table][idx]; 218cc4e3835SJakub Kicinski if (!udp_tunnel_nic_entry_is_queued(entry)) 219cc4e3835SJakub Kicinski return; 220cc4e3835SJakub Kicinski 221cc4e3835SJakub Kicinski udp_tunnel_nic_ti_from_entry(entry, &ti); 222cc4e3835SJakub Kicinski if (entry->flags & UDP_TUNNEL_NIC_ENTRY_ADD) 223cc4e3835SJakub Kicinski err = dev->udp_tunnel_nic_info->set_port(dev, table, idx, &ti); 224cc4e3835SJakub Kicinski else 225cc4e3835SJakub Kicinski err = dev->udp_tunnel_nic_info->unset_port(dev, table, idx, 226cc4e3835SJakub Kicinski &ti); 227cc4e3835SJakub Kicinski udp_tunnel_nic_entry_update_done(entry, err); 228cc4e3835SJakub Kicinski 229cc4e3835SJakub Kicinski if (err) 230cc4e3835SJakub Kicinski netdev_warn(dev, 231cc4e3835SJakub Kicinski "UDP tunnel port sync failed port %d type %s: %d\n", 232cc4e3835SJakub Kicinski be16_to_cpu(entry->port), 233cc4e3835SJakub Kicinski udp_tunnel_nic_tunnel_type_name(entry->type), 234cc4e3835SJakub Kicinski err); 235cc4e3835SJakub Kicinski } 236cc4e3835SJakub Kicinski 237cc4e3835SJakub Kicinski static void 238cc4e3835SJakub Kicinski udp_tunnel_nic_device_sync_by_port(struct net_device *dev, 239cc4e3835SJakub Kicinski struct udp_tunnel_nic *utn) 240cc4e3835SJakub Kicinski { 241cc4e3835SJakub Kicinski const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info; 242cc4e3835SJakub Kicinski unsigned int i, j; 243cc4e3835SJakub Kicinski 244cc4e3835SJakub Kicinski for (i = 0; i < utn->n_tables; i++) 245cc4e3835SJakub Kicinski for (j = 0; j < info->tables[i].n_entries; j++) 246cc4e3835SJakub Kicinski udp_tunnel_nic_device_sync_one(dev, utn, i, j); 247cc4e3835SJakub Kicinski } 248cc4e3835SJakub Kicinski 249cc4e3835SJakub Kicinski static void 250cc4e3835SJakub Kicinski udp_tunnel_nic_device_sync_by_table(struct net_device *dev, 251cc4e3835SJakub Kicinski struct udp_tunnel_nic *utn) 252cc4e3835SJakub Kicinski { 253cc4e3835SJakub Kicinski const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info; 254cc4e3835SJakub Kicinski unsigned int i, j; 255cc4e3835SJakub Kicinski int err; 256cc4e3835SJakub Kicinski 257cc4e3835SJakub Kicinski for (i = 0; i < utn->n_tables; i++) { 258cc4e3835SJakub Kicinski /* Find something that needs sync in this table */ 259cc4e3835SJakub Kicinski for (j = 0; j < info->tables[i].n_entries; j++) 260cc4e3835SJakub Kicinski if (udp_tunnel_nic_entry_is_queued(&utn->entries[i][j])) 261cc4e3835SJakub Kicinski break; 262cc4e3835SJakub Kicinski if (j == info->tables[i].n_entries) 263cc4e3835SJakub Kicinski continue; 264cc4e3835SJakub Kicinski 265cc4e3835SJakub Kicinski err = info->sync_table(dev, i); 266cc4e3835SJakub Kicinski if (err) 267cc4e3835SJakub Kicinski netdev_warn(dev, "UDP tunnel port sync failed for table %d: %d\n", 268cc4e3835SJakub Kicinski i, err); 269cc4e3835SJakub Kicinski 270cc4e3835SJakub Kicinski for (j = 0; j < info->tables[i].n_entries; j++) { 271cc4e3835SJakub Kicinski struct udp_tunnel_nic_table_entry *entry; 272cc4e3835SJakub Kicinski 273cc4e3835SJakub Kicinski entry = &utn->entries[i][j]; 274cc4e3835SJakub Kicinski if (udp_tunnel_nic_entry_is_queued(entry)) 275cc4e3835SJakub Kicinski udp_tunnel_nic_entry_update_done(entry, err); 276cc4e3835SJakub Kicinski } 277cc4e3835SJakub Kicinski } 278cc4e3835SJakub Kicinski } 279cc4e3835SJakub Kicinski 280cc4e3835SJakub Kicinski static void 281cc4e3835SJakub Kicinski __udp_tunnel_nic_device_sync(struct net_device *dev, struct udp_tunnel_nic *utn) 282cc4e3835SJakub Kicinski { 283cc4e3835SJakub Kicinski if (!utn->need_sync) 284cc4e3835SJakub Kicinski return; 285cc4e3835SJakub Kicinski 286cc4e3835SJakub Kicinski if (dev->udp_tunnel_nic_info->sync_table) 287cc4e3835SJakub Kicinski udp_tunnel_nic_device_sync_by_table(dev, utn); 288cc4e3835SJakub Kicinski else 289cc4e3835SJakub Kicinski udp_tunnel_nic_device_sync_by_port(dev, utn); 290cc4e3835SJakub Kicinski 291cc4e3835SJakub Kicinski utn->need_sync = 0; 292cc4e3835SJakub Kicinski /* Can't replay directly here, in case we come from the tunnel driver's 293cc4e3835SJakub Kicinski * notification - trying to replay may deadlock inside tunnel driver. 294cc4e3835SJakub Kicinski */ 295cc4e3835SJakub Kicinski utn->need_replay = udp_tunnel_nic_should_replay(dev, utn); 296cc4e3835SJakub Kicinski } 297cc4e3835SJakub Kicinski 298cc4e3835SJakub Kicinski static void 299cc4e3835SJakub Kicinski udp_tunnel_nic_device_sync(struct net_device *dev, struct udp_tunnel_nic *utn) 300cc4e3835SJakub Kicinski { 301cc4e3835SJakub Kicinski const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info; 302cc4e3835SJakub Kicinski bool may_sleep; 303cc4e3835SJakub Kicinski 304cc4e3835SJakub Kicinski if (!utn->need_sync) 305cc4e3835SJakub Kicinski return; 306cc4e3835SJakub Kicinski 307cc4e3835SJakub Kicinski /* Drivers which sleep in the callback need to update from 308cc4e3835SJakub Kicinski * the workqueue, if we come from the tunnel driver's notification. 309cc4e3835SJakub Kicinski */ 310cc4e3835SJakub Kicinski may_sleep = info->flags & UDP_TUNNEL_NIC_INFO_MAY_SLEEP; 311cc4e3835SJakub Kicinski if (!may_sleep) 312cc4e3835SJakub Kicinski __udp_tunnel_nic_device_sync(dev, utn); 313cc4e3835SJakub Kicinski if (may_sleep || utn->need_replay) { 314cc4e3835SJakub Kicinski queue_work(udp_tunnel_nic_workqueue, &utn->work); 315cc4e3835SJakub Kicinski utn->work_pending = 1; 316cc4e3835SJakub Kicinski } 317cc4e3835SJakub Kicinski } 318cc4e3835SJakub Kicinski 319cc4e3835SJakub Kicinski static bool 320cc4e3835SJakub Kicinski udp_tunnel_nic_table_is_capable(const struct udp_tunnel_nic_table_info *table, 321cc4e3835SJakub Kicinski struct udp_tunnel_info *ti) 322cc4e3835SJakub Kicinski { 323cc4e3835SJakub Kicinski return table->tunnel_types & ti->type; 324cc4e3835SJakub Kicinski } 325cc4e3835SJakub Kicinski 326cc4e3835SJakub Kicinski static bool 327cc4e3835SJakub Kicinski udp_tunnel_nic_is_capable(struct net_device *dev, struct udp_tunnel_nic *utn, 328cc4e3835SJakub Kicinski struct udp_tunnel_info *ti) 329cc4e3835SJakub Kicinski { 330cc4e3835SJakub Kicinski const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info; 331cc4e3835SJakub Kicinski unsigned int i; 332cc4e3835SJakub Kicinski 333cc4e3835SJakub Kicinski /* Special case IPv4-only NICs */ 334cc4e3835SJakub Kicinski if (info->flags & UDP_TUNNEL_NIC_INFO_IPV4_ONLY && 335cc4e3835SJakub Kicinski ti->sa_family != AF_INET) 336cc4e3835SJakub Kicinski return false; 337cc4e3835SJakub Kicinski 338cc4e3835SJakub Kicinski for (i = 0; i < utn->n_tables; i++) 339cc4e3835SJakub Kicinski if (udp_tunnel_nic_table_is_capable(&info->tables[i], ti)) 340cc4e3835SJakub Kicinski return true; 341cc4e3835SJakub Kicinski return false; 342cc4e3835SJakub Kicinski } 343cc4e3835SJakub Kicinski 344cc4e3835SJakub Kicinski static int 345cc4e3835SJakub Kicinski udp_tunnel_nic_has_collision(struct net_device *dev, struct udp_tunnel_nic *utn, 346cc4e3835SJakub Kicinski struct udp_tunnel_info *ti) 347cc4e3835SJakub Kicinski { 348cc4e3835SJakub Kicinski const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info; 349cc4e3835SJakub Kicinski struct udp_tunnel_nic_table_entry *entry; 350cc4e3835SJakub Kicinski unsigned int i, j; 351cc4e3835SJakub Kicinski 352cc4e3835SJakub Kicinski for (i = 0; i < utn->n_tables; i++) 353cc4e3835SJakub Kicinski for (j = 0; j < info->tables[i].n_entries; j++) { 354cc4e3835SJakub Kicinski entry = &utn->entries[i][j]; 355cc4e3835SJakub Kicinski 356cc4e3835SJakub Kicinski if (!udp_tunnel_nic_entry_is_free(entry) && 357cc4e3835SJakub Kicinski entry->port == ti->port && 358cc4e3835SJakub Kicinski entry->type != ti->type) { 359cc4e3835SJakub Kicinski __set_bit(i, &utn->missed); 360cc4e3835SJakub Kicinski return true; 361cc4e3835SJakub Kicinski } 362cc4e3835SJakub Kicinski } 363cc4e3835SJakub Kicinski return false; 364cc4e3835SJakub Kicinski } 365cc4e3835SJakub Kicinski 366cc4e3835SJakub Kicinski static void 367cc4e3835SJakub Kicinski udp_tunnel_nic_entry_adj(struct udp_tunnel_nic *utn, 368cc4e3835SJakub Kicinski unsigned int table, unsigned int idx, int use_cnt_adj) 369cc4e3835SJakub Kicinski { 370cc4e3835SJakub Kicinski struct udp_tunnel_nic_table_entry *entry = &utn->entries[table][idx]; 371cc4e3835SJakub Kicinski bool dodgy = entry->flags & UDP_TUNNEL_NIC_ENTRY_OP_FAIL; 372cc4e3835SJakub Kicinski unsigned int from, to; 373cc4e3835SJakub Kicinski 374*74cc6d18SJakub Kicinski WARN_ON(entry->use_cnt + (u32)use_cnt_adj > U16_MAX); 375*74cc6d18SJakub Kicinski 376cc4e3835SJakub Kicinski /* If not going from used to unused or vice versa - all done. 377cc4e3835SJakub Kicinski * For dodgy entries make sure we try to sync again (queue the entry). 378cc4e3835SJakub Kicinski */ 379cc4e3835SJakub Kicinski entry->use_cnt += use_cnt_adj; 380cc4e3835SJakub Kicinski if (!dodgy && !entry->use_cnt == !(entry->use_cnt - use_cnt_adj)) 381cc4e3835SJakub Kicinski return; 382cc4e3835SJakub Kicinski 383cc4e3835SJakub Kicinski /* Cancel the op before it was sent to the device, if possible, 384cc4e3835SJakub Kicinski * otherwise we'd need to take special care to issue commands 385cc4e3835SJakub Kicinski * in the same order the ports arrived. 386cc4e3835SJakub Kicinski */ 387cc4e3835SJakub Kicinski if (use_cnt_adj < 0) { 388cc4e3835SJakub Kicinski from = UDP_TUNNEL_NIC_ENTRY_ADD; 389cc4e3835SJakub Kicinski to = UDP_TUNNEL_NIC_ENTRY_DEL; 390cc4e3835SJakub Kicinski } else { 391cc4e3835SJakub Kicinski from = UDP_TUNNEL_NIC_ENTRY_DEL; 392cc4e3835SJakub Kicinski to = UDP_TUNNEL_NIC_ENTRY_ADD; 393cc4e3835SJakub Kicinski } 394cc4e3835SJakub Kicinski 395cc4e3835SJakub Kicinski if (entry->flags & from) { 396cc4e3835SJakub Kicinski entry->flags &= ~from; 397cc4e3835SJakub Kicinski if (!dodgy) 398cc4e3835SJakub Kicinski return; 399cc4e3835SJakub Kicinski } 400cc4e3835SJakub Kicinski 401cc4e3835SJakub Kicinski udp_tunnel_nic_entry_queue(utn, entry, to); 402cc4e3835SJakub Kicinski } 403cc4e3835SJakub Kicinski 404cc4e3835SJakub Kicinski static bool 405cc4e3835SJakub Kicinski udp_tunnel_nic_entry_try_adj(struct udp_tunnel_nic *utn, 406cc4e3835SJakub Kicinski unsigned int table, unsigned int idx, 407cc4e3835SJakub Kicinski struct udp_tunnel_info *ti, int use_cnt_adj) 408cc4e3835SJakub Kicinski { 409cc4e3835SJakub Kicinski struct udp_tunnel_nic_table_entry *entry = &utn->entries[table][idx]; 410cc4e3835SJakub Kicinski 411cc4e3835SJakub Kicinski if (udp_tunnel_nic_entry_is_free(entry) || 412cc4e3835SJakub Kicinski entry->port != ti->port || 413cc4e3835SJakub Kicinski entry->type != ti->type) 414cc4e3835SJakub Kicinski return false; 415cc4e3835SJakub Kicinski 416cc4e3835SJakub Kicinski if (udp_tunnel_nic_entry_is_frozen(entry)) 417cc4e3835SJakub Kicinski return true; 418cc4e3835SJakub Kicinski 419cc4e3835SJakub Kicinski udp_tunnel_nic_entry_adj(utn, table, idx, use_cnt_adj); 420cc4e3835SJakub Kicinski return true; 421cc4e3835SJakub Kicinski } 422cc4e3835SJakub Kicinski 423cc4e3835SJakub Kicinski /* Try to find existing matching entry and adjust its use count, instead of 424cc4e3835SJakub Kicinski * adding a new one. Returns true if entry was found. In case of delete the 425cc4e3835SJakub Kicinski * entry may have gotten removed in the process, in which case it will be 426cc4e3835SJakub Kicinski * queued for removal. 427cc4e3835SJakub Kicinski */ 428cc4e3835SJakub Kicinski static bool 429cc4e3835SJakub Kicinski udp_tunnel_nic_try_existing(struct net_device *dev, struct udp_tunnel_nic *utn, 430cc4e3835SJakub Kicinski struct udp_tunnel_info *ti, int use_cnt_adj) 431cc4e3835SJakub Kicinski { 432cc4e3835SJakub Kicinski const struct udp_tunnel_nic_table_info *table; 433cc4e3835SJakub Kicinski unsigned int i, j; 434cc4e3835SJakub Kicinski 435cc4e3835SJakub Kicinski for (i = 0; i < utn->n_tables; i++) { 436cc4e3835SJakub Kicinski table = &dev->udp_tunnel_nic_info->tables[i]; 437cc4e3835SJakub Kicinski if (!udp_tunnel_nic_table_is_capable(table, ti)) 438cc4e3835SJakub Kicinski continue; 439cc4e3835SJakub Kicinski 440cc4e3835SJakub Kicinski for (j = 0; j < table->n_entries; j++) 441cc4e3835SJakub Kicinski if (udp_tunnel_nic_entry_try_adj(utn, i, j, ti, 442cc4e3835SJakub Kicinski use_cnt_adj)) 443cc4e3835SJakub Kicinski return true; 444cc4e3835SJakub Kicinski } 445cc4e3835SJakub Kicinski 446cc4e3835SJakub Kicinski return false; 447cc4e3835SJakub Kicinski } 448cc4e3835SJakub Kicinski 449cc4e3835SJakub Kicinski static bool 450cc4e3835SJakub Kicinski udp_tunnel_nic_add_existing(struct net_device *dev, struct udp_tunnel_nic *utn, 451cc4e3835SJakub Kicinski struct udp_tunnel_info *ti) 452cc4e3835SJakub Kicinski { 453cc4e3835SJakub Kicinski return udp_tunnel_nic_try_existing(dev, utn, ti, +1); 454cc4e3835SJakub Kicinski } 455cc4e3835SJakub Kicinski 456cc4e3835SJakub Kicinski static bool 457cc4e3835SJakub Kicinski udp_tunnel_nic_del_existing(struct net_device *dev, struct udp_tunnel_nic *utn, 458cc4e3835SJakub Kicinski struct udp_tunnel_info *ti) 459cc4e3835SJakub Kicinski { 460cc4e3835SJakub Kicinski return udp_tunnel_nic_try_existing(dev, utn, ti, -1); 461cc4e3835SJakub Kicinski } 462cc4e3835SJakub Kicinski 463cc4e3835SJakub Kicinski static bool 464cc4e3835SJakub Kicinski udp_tunnel_nic_add_new(struct net_device *dev, struct udp_tunnel_nic *utn, 465cc4e3835SJakub Kicinski struct udp_tunnel_info *ti) 466cc4e3835SJakub Kicinski { 467cc4e3835SJakub Kicinski const struct udp_tunnel_nic_table_info *table; 468cc4e3835SJakub Kicinski unsigned int i, j; 469cc4e3835SJakub Kicinski 470cc4e3835SJakub Kicinski for (i = 0; i < utn->n_tables; i++) { 471cc4e3835SJakub Kicinski table = &dev->udp_tunnel_nic_info->tables[i]; 472cc4e3835SJakub Kicinski if (!udp_tunnel_nic_table_is_capable(table, ti)) 473cc4e3835SJakub Kicinski continue; 474cc4e3835SJakub Kicinski 475cc4e3835SJakub Kicinski for (j = 0; j < table->n_entries; j++) { 476cc4e3835SJakub Kicinski struct udp_tunnel_nic_table_entry *entry; 477cc4e3835SJakub Kicinski 478cc4e3835SJakub Kicinski entry = &utn->entries[i][j]; 479cc4e3835SJakub Kicinski if (!udp_tunnel_nic_entry_is_free(entry)) 480cc4e3835SJakub Kicinski continue; 481cc4e3835SJakub Kicinski 482cc4e3835SJakub Kicinski entry->port = ti->port; 483cc4e3835SJakub Kicinski entry->type = ti->type; 484cc4e3835SJakub Kicinski entry->use_cnt = 1; 485cc4e3835SJakub Kicinski udp_tunnel_nic_entry_queue(utn, entry, 486cc4e3835SJakub Kicinski UDP_TUNNEL_NIC_ENTRY_ADD); 487cc4e3835SJakub Kicinski return true; 488cc4e3835SJakub Kicinski } 489cc4e3835SJakub Kicinski 490cc4e3835SJakub Kicinski /* The different table may still fit this port in, but there 491cc4e3835SJakub Kicinski * are no devices currently which have multiple tables accepting 492cc4e3835SJakub Kicinski * the same tunnel type, and false positives are okay. 493cc4e3835SJakub Kicinski */ 494cc4e3835SJakub Kicinski __set_bit(i, &utn->missed); 495cc4e3835SJakub Kicinski } 496cc4e3835SJakub Kicinski 497cc4e3835SJakub Kicinski return false; 498cc4e3835SJakub Kicinski } 499cc4e3835SJakub Kicinski 500cc4e3835SJakub Kicinski static void 501cc4e3835SJakub Kicinski __udp_tunnel_nic_add_port(struct net_device *dev, struct udp_tunnel_info *ti) 502cc4e3835SJakub Kicinski { 503cc4e3835SJakub Kicinski const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info; 504cc4e3835SJakub Kicinski struct udp_tunnel_nic *utn; 505cc4e3835SJakub Kicinski 506cc4e3835SJakub Kicinski utn = dev->udp_tunnel_nic; 507cc4e3835SJakub Kicinski if (!utn) 508cc4e3835SJakub Kicinski return; 509cc4e3835SJakub Kicinski if (!netif_running(dev) && info->flags & UDP_TUNNEL_NIC_INFO_OPEN_ONLY) 510cc4e3835SJakub Kicinski return; 511966e5059SJakub Kicinski if (info->flags & UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN && 512966e5059SJakub Kicinski ti->port == htons(IANA_VXLAN_UDP_PORT)) { 513966e5059SJakub Kicinski if (ti->type != UDP_TUNNEL_TYPE_VXLAN) 514966e5059SJakub Kicinski netdev_warn(dev, "device assumes port 4789 will be used by vxlan tunnels\n"); 515966e5059SJakub Kicinski return; 516966e5059SJakub Kicinski } 517cc4e3835SJakub Kicinski 518cc4e3835SJakub Kicinski if (!udp_tunnel_nic_is_capable(dev, utn, ti)) 519cc4e3835SJakub Kicinski return; 520cc4e3835SJakub Kicinski 521cc4e3835SJakub Kicinski /* It may happen that a tunnel of one type is removed and different 522cc4e3835SJakub Kicinski * tunnel type tries to reuse its port before the device was informed. 523cc4e3835SJakub Kicinski * Rely on utn->missed to re-add this port later. 524cc4e3835SJakub Kicinski */ 525cc4e3835SJakub Kicinski if (udp_tunnel_nic_has_collision(dev, utn, ti)) 526cc4e3835SJakub Kicinski return; 527cc4e3835SJakub Kicinski 528cc4e3835SJakub Kicinski if (!udp_tunnel_nic_add_existing(dev, utn, ti)) 529cc4e3835SJakub Kicinski udp_tunnel_nic_add_new(dev, utn, ti); 530cc4e3835SJakub Kicinski 531cc4e3835SJakub Kicinski udp_tunnel_nic_device_sync(dev, utn); 532cc4e3835SJakub Kicinski } 533cc4e3835SJakub Kicinski 534cc4e3835SJakub Kicinski static void 535cc4e3835SJakub Kicinski __udp_tunnel_nic_del_port(struct net_device *dev, struct udp_tunnel_info *ti) 536cc4e3835SJakub Kicinski { 537cc4e3835SJakub Kicinski struct udp_tunnel_nic *utn; 538cc4e3835SJakub Kicinski 539cc4e3835SJakub Kicinski utn = dev->udp_tunnel_nic; 540cc4e3835SJakub Kicinski if (!utn) 541cc4e3835SJakub Kicinski return; 542cc4e3835SJakub Kicinski 543cc4e3835SJakub Kicinski if (!udp_tunnel_nic_is_capable(dev, utn, ti)) 544cc4e3835SJakub Kicinski return; 545cc4e3835SJakub Kicinski 546cc4e3835SJakub Kicinski udp_tunnel_nic_del_existing(dev, utn, ti); 547cc4e3835SJakub Kicinski 548cc4e3835SJakub Kicinski udp_tunnel_nic_device_sync(dev, utn); 549cc4e3835SJakub Kicinski } 550cc4e3835SJakub Kicinski 551cc4e3835SJakub Kicinski static void __udp_tunnel_nic_reset_ntf(struct net_device *dev) 552cc4e3835SJakub Kicinski { 553cc4e3835SJakub Kicinski const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info; 554cc4e3835SJakub Kicinski struct udp_tunnel_nic *utn; 555cc4e3835SJakub Kicinski unsigned int i, j; 556cc4e3835SJakub Kicinski 557cc4e3835SJakub Kicinski ASSERT_RTNL(); 558cc4e3835SJakub Kicinski 559cc4e3835SJakub Kicinski utn = dev->udp_tunnel_nic; 560cc4e3835SJakub Kicinski if (!utn) 561cc4e3835SJakub Kicinski return; 562cc4e3835SJakub Kicinski 563cc4e3835SJakub Kicinski utn->need_sync = false; 564cc4e3835SJakub Kicinski for (i = 0; i < utn->n_tables; i++) 565cc4e3835SJakub Kicinski for (j = 0; j < info->tables[i].n_entries; j++) { 566cc4e3835SJakub Kicinski struct udp_tunnel_nic_table_entry *entry; 567cc4e3835SJakub Kicinski 568cc4e3835SJakub Kicinski entry = &utn->entries[i][j]; 569cc4e3835SJakub Kicinski 570cc4e3835SJakub Kicinski entry->flags &= ~(UDP_TUNNEL_NIC_ENTRY_DEL | 571cc4e3835SJakub Kicinski UDP_TUNNEL_NIC_ENTRY_OP_FAIL); 572cc4e3835SJakub Kicinski /* We don't release rtnl across ops */ 573cc4e3835SJakub Kicinski WARN_ON(entry->flags & UDP_TUNNEL_NIC_ENTRY_FROZEN); 574cc4e3835SJakub Kicinski if (!entry->use_cnt) 575cc4e3835SJakub Kicinski continue; 576cc4e3835SJakub Kicinski 577cc4e3835SJakub Kicinski udp_tunnel_nic_entry_queue(utn, entry, 578cc4e3835SJakub Kicinski UDP_TUNNEL_NIC_ENTRY_ADD); 579cc4e3835SJakub Kicinski } 580cc4e3835SJakub Kicinski 581cc4e3835SJakub Kicinski __udp_tunnel_nic_device_sync(dev, utn); 582cc4e3835SJakub Kicinski } 583cc4e3835SJakub Kicinski 584c7d759ebSJakub Kicinski static size_t 585c7d759ebSJakub Kicinski __udp_tunnel_nic_dump_size(struct net_device *dev, unsigned int table) 586c7d759ebSJakub Kicinski { 587c7d759ebSJakub Kicinski const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info; 588c7d759ebSJakub Kicinski struct udp_tunnel_nic *utn; 589c7d759ebSJakub Kicinski unsigned int j; 590c7d759ebSJakub Kicinski size_t size; 591c7d759ebSJakub Kicinski 592c7d759ebSJakub Kicinski utn = dev->udp_tunnel_nic; 593c7d759ebSJakub Kicinski if (!utn) 594c7d759ebSJakub Kicinski return 0; 595c7d759ebSJakub Kicinski 596c7d759ebSJakub Kicinski size = 0; 597c7d759ebSJakub Kicinski for (j = 0; j < info->tables[table].n_entries; j++) { 598c7d759ebSJakub Kicinski if (!udp_tunnel_nic_entry_is_present(&utn->entries[table][j])) 599c7d759ebSJakub Kicinski continue; 600c7d759ebSJakub Kicinski 601c7d759ebSJakub Kicinski size += nla_total_size(0) + /* _TABLE_ENTRY */ 602c7d759ebSJakub Kicinski nla_total_size(sizeof(__be16)) + /* _ENTRY_PORT */ 603c7d759ebSJakub Kicinski nla_total_size(sizeof(u32)); /* _ENTRY_TYPE */ 604c7d759ebSJakub Kicinski } 605c7d759ebSJakub Kicinski 606c7d759ebSJakub Kicinski return size; 607c7d759ebSJakub Kicinski } 608c7d759ebSJakub Kicinski 609c7d759ebSJakub Kicinski static int 610c7d759ebSJakub Kicinski __udp_tunnel_nic_dump_write(struct net_device *dev, unsigned int table, 611c7d759ebSJakub Kicinski struct sk_buff *skb) 612c7d759ebSJakub Kicinski { 613c7d759ebSJakub Kicinski const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info; 614c7d759ebSJakub Kicinski struct udp_tunnel_nic *utn; 615c7d759ebSJakub Kicinski struct nlattr *nest; 616c7d759ebSJakub Kicinski unsigned int j; 617c7d759ebSJakub Kicinski 618c7d759ebSJakub Kicinski utn = dev->udp_tunnel_nic; 619c7d759ebSJakub Kicinski if (!utn) 620c7d759ebSJakub Kicinski return 0; 621c7d759ebSJakub Kicinski 622c7d759ebSJakub Kicinski for (j = 0; j < info->tables[table].n_entries; j++) { 623c7d759ebSJakub Kicinski if (!udp_tunnel_nic_entry_is_present(&utn->entries[table][j])) 624c7d759ebSJakub Kicinski continue; 625c7d759ebSJakub Kicinski 626c7d759ebSJakub Kicinski nest = nla_nest_start(skb, ETHTOOL_A_TUNNEL_UDP_TABLE_ENTRY); 627c7d759ebSJakub Kicinski 628c7d759ebSJakub Kicinski if (nla_put_be16(skb, ETHTOOL_A_TUNNEL_UDP_ENTRY_PORT, 629c7d759ebSJakub Kicinski utn->entries[table][j].port) || 630c7d759ebSJakub Kicinski nla_put_u32(skb, ETHTOOL_A_TUNNEL_UDP_ENTRY_TYPE, 631c7d759ebSJakub Kicinski ilog2(utn->entries[table][j].type))) 632c7d759ebSJakub Kicinski goto err_cancel; 633c7d759ebSJakub Kicinski 634c7d759ebSJakub Kicinski nla_nest_end(skb, nest); 635c7d759ebSJakub Kicinski } 636c7d759ebSJakub Kicinski 637c7d759ebSJakub Kicinski return 0; 638c7d759ebSJakub Kicinski 639c7d759ebSJakub Kicinski err_cancel: 640c7d759ebSJakub Kicinski nla_nest_cancel(skb, nest); 641c7d759ebSJakub Kicinski return -EMSGSIZE; 642c7d759ebSJakub Kicinski } 643c7d759ebSJakub Kicinski 644cc4e3835SJakub Kicinski static const struct udp_tunnel_nic_ops __udp_tunnel_nic_ops = { 645cc4e3835SJakub Kicinski .get_port = __udp_tunnel_nic_get_port, 646cc4e3835SJakub Kicinski .set_port_priv = __udp_tunnel_nic_set_port_priv, 647cc4e3835SJakub Kicinski .add_port = __udp_tunnel_nic_add_port, 648cc4e3835SJakub Kicinski .del_port = __udp_tunnel_nic_del_port, 649cc4e3835SJakub Kicinski .reset_ntf = __udp_tunnel_nic_reset_ntf, 650c7d759ebSJakub Kicinski .dump_size = __udp_tunnel_nic_dump_size, 651c7d759ebSJakub Kicinski .dump_write = __udp_tunnel_nic_dump_write, 652cc4e3835SJakub Kicinski }; 653cc4e3835SJakub Kicinski 654cc4e3835SJakub Kicinski static void 655cc4e3835SJakub Kicinski udp_tunnel_nic_flush(struct net_device *dev, struct udp_tunnel_nic *utn) 656cc4e3835SJakub Kicinski { 657cc4e3835SJakub Kicinski const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info; 658cc4e3835SJakub Kicinski unsigned int i, j; 659cc4e3835SJakub Kicinski 660cc4e3835SJakub Kicinski for (i = 0; i < utn->n_tables; i++) 661cc4e3835SJakub Kicinski for (j = 0; j < info->tables[i].n_entries; j++) { 662cc4e3835SJakub Kicinski int adj_cnt = -utn->entries[i][j].use_cnt; 663cc4e3835SJakub Kicinski 664cc4e3835SJakub Kicinski if (adj_cnt) 665cc4e3835SJakub Kicinski udp_tunnel_nic_entry_adj(utn, i, j, adj_cnt); 666cc4e3835SJakub Kicinski } 667cc4e3835SJakub Kicinski 668cc4e3835SJakub Kicinski __udp_tunnel_nic_device_sync(dev, utn); 669cc4e3835SJakub Kicinski 670cc4e3835SJakub Kicinski for (i = 0; i < utn->n_tables; i++) 671cc4e3835SJakub Kicinski memset(utn->entries[i], 0, array_size(info->tables[i].n_entries, 672cc4e3835SJakub Kicinski sizeof(**utn->entries))); 673cc4e3835SJakub Kicinski WARN_ON(utn->need_sync); 674cc4e3835SJakub Kicinski utn->need_replay = 0; 675cc4e3835SJakub Kicinski } 676cc4e3835SJakub Kicinski 677cc4e3835SJakub Kicinski static void 678cc4e3835SJakub Kicinski udp_tunnel_nic_replay(struct net_device *dev, struct udp_tunnel_nic *utn) 679cc4e3835SJakub Kicinski { 680cc4e3835SJakub Kicinski const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info; 681*74cc6d18SJakub Kicinski struct udp_tunnel_nic_shared_node *node; 682cc4e3835SJakub Kicinski unsigned int i, j; 683cc4e3835SJakub Kicinski 684cc4e3835SJakub Kicinski /* Freeze all the ports we are already tracking so that the replay 685cc4e3835SJakub Kicinski * does not double up the refcount. 686cc4e3835SJakub Kicinski */ 687cc4e3835SJakub Kicinski for (i = 0; i < utn->n_tables; i++) 688cc4e3835SJakub Kicinski for (j = 0; j < info->tables[i].n_entries; j++) 689cc4e3835SJakub Kicinski udp_tunnel_nic_entry_freeze_used(&utn->entries[i][j]); 690cc4e3835SJakub Kicinski utn->missed = 0; 691cc4e3835SJakub Kicinski utn->need_replay = 0; 692cc4e3835SJakub Kicinski 693*74cc6d18SJakub Kicinski if (!info->shared) { 694cc4e3835SJakub Kicinski udp_tunnel_get_rx_info(dev); 695*74cc6d18SJakub Kicinski } else { 696*74cc6d18SJakub Kicinski list_for_each_entry(node, &info->shared->devices, list) 697*74cc6d18SJakub Kicinski udp_tunnel_get_rx_info(node->dev); 698*74cc6d18SJakub Kicinski } 699cc4e3835SJakub Kicinski 700cc4e3835SJakub Kicinski for (i = 0; i < utn->n_tables; i++) 701cc4e3835SJakub Kicinski for (j = 0; j < info->tables[i].n_entries; j++) 702cc4e3835SJakub Kicinski udp_tunnel_nic_entry_unfreeze(&utn->entries[i][j]); 703cc4e3835SJakub Kicinski } 704cc4e3835SJakub Kicinski 705cc4e3835SJakub Kicinski static void udp_tunnel_nic_device_sync_work(struct work_struct *work) 706cc4e3835SJakub Kicinski { 707cc4e3835SJakub Kicinski struct udp_tunnel_nic *utn = 708cc4e3835SJakub Kicinski container_of(work, struct udp_tunnel_nic, work); 709cc4e3835SJakub Kicinski 710cc4e3835SJakub Kicinski rtnl_lock(); 711cc4e3835SJakub Kicinski utn->work_pending = 0; 712cc4e3835SJakub Kicinski __udp_tunnel_nic_device_sync(utn->dev, utn); 713cc4e3835SJakub Kicinski 714cc4e3835SJakub Kicinski if (utn->need_replay) 715cc4e3835SJakub Kicinski udp_tunnel_nic_replay(utn->dev, utn); 716cc4e3835SJakub Kicinski rtnl_unlock(); 717cc4e3835SJakub Kicinski } 718cc4e3835SJakub Kicinski 719cc4e3835SJakub Kicinski static struct udp_tunnel_nic * 720cc4e3835SJakub Kicinski udp_tunnel_nic_alloc(const struct udp_tunnel_nic_info *info, 721cc4e3835SJakub Kicinski unsigned int n_tables) 722cc4e3835SJakub Kicinski { 723cc4e3835SJakub Kicinski struct udp_tunnel_nic *utn; 724cc4e3835SJakub Kicinski unsigned int i; 725cc4e3835SJakub Kicinski 726cc4e3835SJakub Kicinski utn = kzalloc(sizeof(*utn), GFP_KERNEL); 727cc4e3835SJakub Kicinski if (!utn) 728cc4e3835SJakub Kicinski return NULL; 729cc4e3835SJakub Kicinski utn->n_tables = n_tables; 730cc4e3835SJakub Kicinski INIT_WORK(&utn->work, udp_tunnel_nic_device_sync_work); 731cc4e3835SJakub Kicinski 732cc4e3835SJakub Kicinski utn->entries = kmalloc_array(n_tables, sizeof(void *), GFP_KERNEL); 733cc4e3835SJakub Kicinski if (!utn->entries) 734cc4e3835SJakub Kicinski goto err_free_utn; 735cc4e3835SJakub Kicinski 736cc4e3835SJakub Kicinski for (i = 0; i < n_tables; i++) { 737cc4e3835SJakub Kicinski utn->entries[i] = kcalloc(info->tables[i].n_entries, 738cc4e3835SJakub Kicinski sizeof(*utn->entries[i]), GFP_KERNEL); 739cc4e3835SJakub Kicinski if (!utn->entries[i]) 740cc4e3835SJakub Kicinski goto err_free_prev_entries; 741cc4e3835SJakub Kicinski } 742cc4e3835SJakub Kicinski 743cc4e3835SJakub Kicinski return utn; 744cc4e3835SJakub Kicinski 745cc4e3835SJakub Kicinski err_free_prev_entries: 746cc4e3835SJakub Kicinski while (i--) 747cc4e3835SJakub Kicinski kfree(utn->entries[i]); 748cc4e3835SJakub Kicinski kfree(utn->entries); 749cc4e3835SJakub Kicinski err_free_utn: 750cc4e3835SJakub Kicinski kfree(utn); 751cc4e3835SJakub Kicinski return NULL; 752cc4e3835SJakub Kicinski } 753cc4e3835SJakub Kicinski 754*74cc6d18SJakub Kicinski static void udp_tunnel_nic_free(struct udp_tunnel_nic *utn) 755*74cc6d18SJakub Kicinski { 756*74cc6d18SJakub Kicinski unsigned int i; 757*74cc6d18SJakub Kicinski 758*74cc6d18SJakub Kicinski for (i = 0; i < utn->n_tables; i++) 759*74cc6d18SJakub Kicinski kfree(utn->entries[i]); 760*74cc6d18SJakub Kicinski kfree(utn->entries); 761*74cc6d18SJakub Kicinski kfree(utn); 762*74cc6d18SJakub Kicinski } 763*74cc6d18SJakub Kicinski 764cc4e3835SJakub Kicinski static int udp_tunnel_nic_register(struct net_device *dev) 765cc4e3835SJakub Kicinski { 766cc4e3835SJakub Kicinski const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info; 767*74cc6d18SJakub Kicinski struct udp_tunnel_nic_shared_node *node = NULL; 768cc4e3835SJakub Kicinski struct udp_tunnel_nic *utn; 769cc4e3835SJakub Kicinski unsigned int n_tables, i; 770cc4e3835SJakub Kicinski 771cc4e3835SJakub Kicinski BUILD_BUG_ON(sizeof(utn->missed) * BITS_PER_BYTE < 772cc4e3835SJakub Kicinski UDP_TUNNEL_NIC_MAX_TABLES); 773*74cc6d18SJakub Kicinski /* Expect use count of at most 2 (IPv4, IPv6) per device */ 774*74cc6d18SJakub Kicinski BUILD_BUG_ON(UDP_TUNNEL_NIC_USE_CNT_MAX < 775*74cc6d18SJakub Kicinski UDP_TUNNEL_NIC_MAX_SHARING_DEVICES * 2); 776cc4e3835SJakub Kicinski 777*74cc6d18SJakub Kicinski /* Check that the driver info is sane */ 778cc4e3835SJakub Kicinski if (WARN_ON(!info->set_port != !info->unset_port) || 779cc4e3835SJakub Kicinski WARN_ON(!info->set_port == !info->sync_table) || 780cc4e3835SJakub Kicinski WARN_ON(!info->tables[0].n_entries)) 781cc4e3835SJakub Kicinski return -EINVAL; 782cc4e3835SJakub Kicinski 783*74cc6d18SJakub Kicinski if (WARN_ON(info->shared && 784*74cc6d18SJakub Kicinski info->flags & UDP_TUNNEL_NIC_INFO_OPEN_ONLY)) 785*74cc6d18SJakub Kicinski return -EINVAL; 786*74cc6d18SJakub Kicinski 787cc4e3835SJakub Kicinski n_tables = 1; 788cc4e3835SJakub Kicinski for (i = 1; i < UDP_TUNNEL_NIC_MAX_TABLES; i++) { 789cc4e3835SJakub Kicinski if (!info->tables[i].n_entries) 790cc4e3835SJakub Kicinski continue; 791cc4e3835SJakub Kicinski 792cc4e3835SJakub Kicinski n_tables++; 793cc4e3835SJakub Kicinski if (WARN_ON(!info->tables[i - 1].n_entries)) 794cc4e3835SJakub Kicinski return -EINVAL; 795cc4e3835SJakub Kicinski } 796cc4e3835SJakub Kicinski 797*74cc6d18SJakub Kicinski /* Create UDP tunnel state structures */ 798*74cc6d18SJakub Kicinski if (info->shared) { 799*74cc6d18SJakub Kicinski node = kzalloc(sizeof(*node), GFP_KERNEL); 800*74cc6d18SJakub Kicinski if (!node) 801cc4e3835SJakub Kicinski return -ENOMEM; 802cc4e3835SJakub Kicinski 803*74cc6d18SJakub Kicinski node->dev = dev; 804*74cc6d18SJakub Kicinski } 805*74cc6d18SJakub Kicinski 806*74cc6d18SJakub Kicinski if (info->shared && info->shared->udp_tunnel_nic_info) { 807*74cc6d18SJakub Kicinski utn = info->shared->udp_tunnel_nic_info; 808*74cc6d18SJakub Kicinski } else { 809*74cc6d18SJakub Kicinski utn = udp_tunnel_nic_alloc(info, n_tables); 810*74cc6d18SJakub Kicinski if (!utn) { 811*74cc6d18SJakub Kicinski kfree(node); 812*74cc6d18SJakub Kicinski return -ENOMEM; 813*74cc6d18SJakub Kicinski } 814*74cc6d18SJakub Kicinski } 815*74cc6d18SJakub Kicinski 816*74cc6d18SJakub Kicinski if (info->shared) { 817*74cc6d18SJakub Kicinski if (!info->shared->udp_tunnel_nic_info) { 818*74cc6d18SJakub Kicinski INIT_LIST_HEAD(&info->shared->devices); 819*74cc6d18SJakub Kicinski info->shared->udp_tunnel_nic_info = utn; 820*74cc6d18SJakub Kicinski } 821*74cc6d18SJakub Kicinski 822*74cc6d18SJakub Kicinski list_add_tail(&node->list, &info->shared->devices); 823*74cc6d18SJakub Kicinski } 824*74cc6d18SJakub Kicinski 825cc4e3835SJakub Kicinski utn->dev = dev; 826cc4e3835SJakub Kicinski dev_hold(dev); 827cc4e3835SJakub Kicinski dev->udp_tunnel_nic = utn; 828cc4e3835SJakub Kicinski 829cc4e3835SJakub Kicinski if (!(info->flags & UDP_TUNNEL_NIC_INFO_OPEN_ONLY)) 830cc4e3835SJakub Kicinski udp_tunnel_get_rx_info(dev); 831cc4e3835SJakub Kicinski 832cc4e3835SJakub Kicinski return 0; 833cc4e3835SJakub Kicinski } 834cc4e3835SJakub Kicinski 835cc4e3835SJakub Kicinski static void 836cc4e3835SJakub Kicinski udp_tunnel_nic_unregister(struct net_device *dev, struct udp_tunnel_nic *utn) 837cc4e3835SJakub Kicinski { 838*74cc6d18SJakub Kicinski const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info; 839*74cc6d18SJakub Kicinski 840*74cc6d18SJakub Kicinski /* For a shared table remove this dev from the list of sharing devices 841*74cc6d18SJakub Kicinski * and if there are other devices just detach. 842*74cc6d18SJakub Kicinski */ 843*74cc6d18SJakub Kicinski if (info->shared) { 844*74cc6d18SJakub Kicinski struct udp_tunnel_nic_shared_node *node, *first; 845*74cc6d18SJakub Kicinski 846*74cc6d18SJakub Kicinski list_for_each_entry(node, &info->shared->devices, list) 847*74cc6d18SJakub Kicinski if (node->dev == dev) 848*74cc6d18SJakub Kicinski break; 849*74cc6d18SJakub Kicinski if (node->dev != dev) 850*74cc6d18SJakub Kicinski return; 851*74cc6d18SJakub Kicinski 852*74cc6d18SJakub Kicinski list_del(&node->list); 853*74cc6d18SJakub Kicinski kfree(node); 854*74cc6d18SJakub Kicinski 855*74cc6d18SJakub Kicinski first = list_first_entry_or_null(&info->shared->devices, 856*74cc6d18SJakub Kicinski typeof(*first), list); 857*74cc6d18SJakub Kicinski if (first) { 858*74cc6d18SJakub Kicinski udp_tunnel_drop_rx_info(dev); 859*74cc6d18SJakub Kicinski utn->dev = first->dev; 860*74cc6d18SJakub Kicinski goto release_dev; 861*74cc6d18SJakub Kicinski } 862*74cc6d18SJakub Kicinski 863*74cc6d18SJakub Kicinski info->shared->udp_tunnel_nic_info = NULL; 864*74cc6d18SJakub Kicinski } 865cc4e3835SJakub Kicinski 866cc4e3835SJakub Kicinski /* Flush before we check work, so we don't waste time adding entries 867cc4e3835SJakub Kicinski * from the work which we will boot immediately. 868cc4e3835SJakub Kicinski */ 869cc4e3835SJakub Kicinski udp_tunnel_nic_flush(dev, utn); 870cc4e3835SJakub Kicinski 871cc4e3835SJakub Kicinski /* Wait for the work to be done using the state, netdev core will 872cc4e3835SJakub Kicinski * retry unregister until we give up our reference on this device. 873cc4e3835SJakub Kicinski */ 874cc4e3835SJakub Kicinski if (utn->work_pending) 875cc4e3835SJakub Kicinski return; 876cc4e3835SJakub Kicinski 877*74cc6d18SJakub Kicinski udp_tunnel_nic_free(utn); 878*74cc6d18SJakub Kicinski release_dev: 879cc4e3835SJakub Kicinski dev->udp_tunnel_nic = NULL; 880cc4e3835SJakub Kicinski dev_put(dev); 881cc4e3835SJakub Kicinski } 882cc4e3835SJakub Kicinski 883cc4e3835SJakub Kicinski static int 884cc4e3835SJakub Kicinski udp_tunnel_nic_netdevice_event(struct notifier_block *unused, 885cc4e3835SJakub Kicinski unsigned long event, void *ptr) 886cc4e3835SJakub Kicinski { 887cc4e3835SJakub Kicinski struct net_device *dev = netdev_notifier_info_to_dev(ptr); 888cc4e3835SJakub Kicinski const struct udp_tunnel_nic_info *info; 889cc4e3835SJakub Kicinski struct udp_tunnel_nic *utn; 890cc4e3835SJakub Kicinski 891cc4e3835SJakub Kicinski info = dev->udp_tunnel_nic_info; 892cc4e3835SJakub Kicinski if (!info) 893cc4e3835SJakub Kicinski return NOTIFY_DONE; 894cc4e3835SJakub Kicinski 895cc4e3835SJakub Kicinski if (event == NETDEV_REGISTER) { 896cc4e3835SJakub Kicinski int err; 897cc4e3835SJakub Kicinski 898cc4e3835SJakub Kicinski err = udp_tunnel_nic_register(dev); 899cc4e3835SJakub Kicinski if (err) 900cc4e3835SJakub Kicinski netdev_WARN(dev, "failed to register for UDP tunnel offloads: %d", err); 901cc4e3835SJakub Kicinski return notifier_from_errno(err); 902cc4e3835SJakub Kicinski } 903cc4e3835SJakub Kicinski /* All other events will need the udp_tunnel_nic state */ 904cc4e3835SJakub Kicinski utn = dev->udp_tunnel_nic; 905cc4e3835SJakub Kicinski if (!utn) 906cc4e3835SJakub Kicinski return NOTIFY_DONE; 907cc4e3835SJakub Kicinski 908cc4e3835SJakub Kicinski if (event == NETDEV_UNREGISTER) { 909cc4e3835SJakub Kicinski udp_tunnel_nic_unregister(dev, utn); 910cc4e3835SJakub Kicinski return NOTIFY_OK; 911cc4e3835SJakub Kicinski } 912cc4e3835SJakub Kicinski 913cc4e3835SJakub Kicinski /* All other events only matter if NIC has to be programmed open */ 914cc4e3835SJakub Kicinski if (!(info->flags & UDP_TUNNEL_NIC_INFO_OPEN_ONLY)) 915cc4e3835SJakub Kicinski return NOTIFY_DONE; 916cc4e3835SJakub Kicinski 917cc4e3835SJakub Kicinski if (event == NETDEV_UP) { 918cc4e3835SJakub Kicinski WARN_ON(!udp_tunnel_nic_is_empty(dev, utn)); 919cc4e3835SJakub Kicinski udp_tunnel_get_rx_info(dev); 920cc4e3835SJakub Kicinski return NOTIFY_OK; 921cc4e3835SJakub Kicinski } 922cc4e3835SJakub Kicinski if (event == NETDEV_GOING_DOWN) { 923cc4e3835SJakub Kicinski udp_tunnel_nic_flush(dev, utn); 924cc4e3835SJakub Kicinski return NOTIFY_OK; 925cc4e3835SJakub Kicinski } 926cc4e3835SJakub Kicinski 927cc4e3835SJakub Kicinski return NOTIFY_DONE; 928cc4e3835SJakub Kicinski } 929cc4e3835SJakub Kicinski 930cc4e3835SJakub Kicinski static struct notifier_block udp_tunnel_nic_notifier_block __read_mostly = { 931cc4e3835SJakub Kicinski .notifier_call = udp_tunnel_nic_netdevice_event, 932cc4e3835SJakub Kicinski }; 933cc4e3835SJakub Kicinski 934cc4e3835SJakub Kicinski static int __init udp_tunnel_nic_init_module(void) 935cc4e3835SJakub Kicinski { 936cc4e3835SJakub Kicinski int err; 937cc4e3835SJakub Kicinski 938cc4e3835SJakub Kicinski udp_tunnel_nic_workqueue = alloc_workqueue("udp_tunnel_nic", 0, 0); 939cc4e3835SJakub Kicinski if (!udp_tunnel_nic_workqueue) 940cc4e3835SJakub Kicinski return -ENOMEM; 941cc4e3835SJakub Kicinski 942cc4e3835SJakub Kicinski rtnl_lock(); 943cc4e3835SJakub Kicinski udp_tunnel_nic_ops = &__udp_tunnel_nic_ops; 944cc4e3835SJakub Kicinski rtnl_unlock(); 945cc4e3835SJakub Kicinski 946cc4e3835SJakub Kicinski err = register_netdevice_notifier(&udp_tunnel_nic_notifier_block); 947cc4e3835SJakub Kicinski if (err) 948cc4e3835SJakub Kicinski goto err_unset_ops; 949cc4e3835SJakub Kicinski 950cc4e3835SJakub Kicinski return 0; 951cc4e3835SJakub Kicinski 952cc4e3835SJakub Kicinski err_unset_ops: 953cc4e3835SJakub Kicinski rtnl_lock(); 954cc4e3835SJakub Kicinski udp_tunnel_nic_ops = NULL; 955cc4e3835SJakub Kicinski rtnl_unlock(); 956cc4e3835SJakub Kicinski destroy_workqueue(udp_tunnel_nic_workqueue); 957cc4e3835SJakub Kicinski return err; 958cc4e3835SJakub Kicinski } 959cc4e3835SJakub Kicinski late_initcall(udp_tunnel_nic_init_module); 960cc4e3835SJakub Kicinski 961cc4e3835SJakub Kicinski static void __exit udp_tunnel_nic_cleanup_module(void) 962cc4e3835SJakub Kicinski { 963cc4e3835SJakub Kicinski unregister_netdevice_notifier(&udp_tunnel_nic_notifier_block); 964cc4e3835SJakub Kicinski 965cc4e3835SJakub Kicinski rtnl_lock(); 966cc4e3835SJakub Kicinski udp_tunnel_nic_ops = NULL; 967cc4e3835SJakub Kicinski rtnl_unlock(); 968cc4e3835SJakub Kicinski 969cc4e3835SJakub Kicinski destroy_workqueue(udp_tunnel_nic_workqueue); 970cc4e3835SJakub Kicinski } 971cc4e3835SJakub Kicinski module_exit(udp_tunnel_nic_cleanup_module); 972cc4e3835SJakub Kicinski 973cc4e3835SJakub Kicinski MODULE_LICENSE("GPL"); 974