1cc4e3835SJakub Kicinski // SPDX-License-Identifier: GPL-2.0-only 2cc4e3835SJakub Kicinski // Copyright (c) 2020 Facebook Inc. 3cc4e3835SJakub Kicinski 4*c7d759ebSJakub Kicinski #include <linux/ethtool_netlink.h> 5cc4e3835SJakub Kicinski #include <linux/netdevice.h> 6cc4e3835SJakub Kicinski #include <linux/slab.h> 7cc4e3835SJakub Kicinski #include <linux/types.h> 8cc4e3835SJakub Kicinski #include <linux/workqueue.h> 9cc4e3835SJakub Kicinski #include <net/udp_tunnel.h> 10cc4e3835SJakub Kicinski 11cc4e3835SJakub Kicinski enum udp_tunnel_nic_table_entry_flags { 12cc4e3835SJakub Kicinski UDP_TUNNEL_NIC_ENTRY_ADD = BIT(0), 13cc4e3835SJakub Kicinski UDP_TUNNEL_NIC_ENTRY_DEL = BIT(1), 14cc4e3835SJakub Kicinski UDP_TUNNEL_NIC_ENTRY_OP_FAIL = BIT(2), 15cc4e3835SJakub Kicinski UDP_TUNNEL_NIC_ENTRY_FROZEN = BIT(3), 16cc4e3835SJakub Kicinski }; 17cc4e3835SJakub Kicinski 18cc4e3835SJakub Kicinski struct udp_tunnel_nic_table_entry { 19cc4e3835SJakub Kicinski __be16 port; 20cc4e3835SJakub Kicinski u8 type; 21cc4e3835SJakub Kicinski u8 use_cnt; 22cc4e3835SJakub Kicinski u8 flags; 23cc4e3835SJakub Kicinski u8 hw_priv; 24cc4e3835SJakub Kicinski }; 25cc4e3835SJakub Kicinski 26cc4e3835SJakub Kicinski /** 27cc4e3835SJakub Kicinski * struct udp_tunnel_nic - UDP tunnel port offload state 28cc4e3835SJakub Kicinski * @work: async work for talking to hardware from process context 29cc4e3835SJakub Kicinski * @dev: netdev pointer 30cc4e3835SJakub Kicinski * @need_sync: at least one port start changed 31cc4e3835SJakub Kicinski * @need_replay: space was freed, we need a replay of all ports 32cc4e3835SJakub Kicinski * @work_pending: @work is currently scheduled 33cc4e3835SJakub Kicinski * @n_tables: number of tables under @entries 34cc4e3835SJakub Kicinski * @missed: bitmap of tables which overflown 35cc4e3835SJakub Kicinski * @entries: table of tables of ports currently offloaded 36cc4e3835SJakub Kicinski */ 37cc4e3835SJakub Kicinski struct udp_tunnel_nic { 38cc4e3835SJakub Kicinski struct work_struct work; 39cc4e3835SJakub Kicinski 40cc4e3835SJakub Kicinski struct net_device *dev; 41cc4e3835SJakub Kicinski 42cc4e3835SJakub Kicinski u8 need_sync:1; 43cc4e3835SJakub Kicinski u8 need_replay:1; 44cc4e3835SJakub Kicinski u8 work_pending:1; 45cc4e3835SJakub Kicinski 46cc4e3835SJakub Kicinski unsigned int n_tables; 47cc4e3835SJakub Kicinski unsigned long missed; 48cc4e3835SJakub Kicinski struct udp_tunnel_nic_table_entry **entries; 49cc4e3835SJakub Kicinski }; 50cc4e3835SJakub Kicinski 51cc4e3835SJakub Kicinski /* We ensure all work structs are done using driver state, but not the code. 52cc4e3835SJakub Kicinski * We need a workqueue we can flush before module gets removed. 53cc4e3835SJakub Kicinski */ 54cc4e3835SJakub Kicinski static struct workqueue_struct *udp_tunnel_nic_workqueue; 55cc4e3835SJakub Kicinski 56cc4e3835SJakub Kicinski static const char *udp_tunnel_nic_tunnel_type_name(unsigned int type) 57cc4e3835SJakub Kicinski { 58cc4e3835SJakub Kicinski switch (type) { 59cc4e3835SJakub Kicinski case UDP_TUNNEL_TYPE_VXLAN: 60cc4e3835SJakub Kicinski return "vxlan"; 61cc4e3835SJakub Kicinski case UDP_TUNNEL_TYPE_GENEVE: 62cc4e3835SJakub Kicinski return "geneve"; 63cc4e3835SJakub Kicinski case UDP_TUNNEL_TYPE_VXLAN_GPE: 64cc4e3835SJakub Kicinski return "vxlan-gpe"; 65cc4e3835SJakub Kicinski default: 66cc4e3835SJakub Kicinski return "unknown"; 67cc4e3835SJakub Kicinski } 68cc4e3835SJakub Kicinski } 69cc4e3835SJakub Kicinski 70cc4e3835SJakub Kicinski static bool 71cc4e3835SJakub Kicinski udp_tunnel_nic_entry_is_free(struct udp_tunnel_nic_table_entry *entry) 72cc4e3835SJakub Kicinski { 73cc4e3835SJakub Kicinski return entry->use_cnt == 0 && !entry->flags; 74cc4e3835SJakub Kicinski } 75cc4e3835SJakub Kicinski 76cc4e3835SJakub Kicinski static bool 77*c7d759ebSJakub Kicinski udp_tunnel_nic_entry_is_present(struct udp_tunnel_nic_table_entry *entry) 78*c7d759ebSJakub Kicinski { 79*c7d759ebSJakub Kicinski return entry->use_cnt && !(entry->flags & ~UDP_TUNNEL_NIC_ENTRY_FROZEN); 80*c7d759ebSJakub Kicinski } 81*c7d759ebSJakub Kicinski 82*c7d759ebSJakub Kicinski static bool 83cc4e3835SJakub Kicinski udp_tunnel_nic_entry_is_frozen(struct udp_tunnel_nic_table_entry *entry) 84cc4e3835SJakub Kicinski { 85cc4e3835SJakub Kicinski return entry->flags & UDP_TUNNEL_NIC_ENTRY_FROZEN; 86cc4e3835SJakub Kicinski } 87cc4e3835SJakub Kicinski 88cc4e3835SJakub Kicinski static void 89cc4e3835SJakub Kicinski udp_tunnel_nic_entry_freeze_used(struct udp_tunnel_nic_table_entry *entry) 90cc4e3835SJakub Kicinski { 91cc4e3835SJakub Kicinski if (!udp_tunnel_nic_entry_is_free(entry)) 92cc4e3835SJakub Kicinski entry->flags |= UDP_TUNNEL_NIC_ENTRY_FROZEN; 93cc4e3835SJakub Kicinski } 94cc4e3835SJakub Kicinski 95cc4e3835SJakub Kicinski static void 96cc4e3835SJakub Kicinski udp_tunnel_nic_entry_unfreeze(struct udp_tunnel_nic_table_entry *entry) 97cc4e3835SJakub Kicinski { 98cc4e3835SJakub Kicinski entry->flags &= ~UDP_TUNNEL_NIC_ENTRY_FROZEN; 99cc4e3835SJakub Kicinski } 100cc4e3835SJakub Kicinski 101cc4e3835SJakub Kicinski static bool 102cc4e3835SJakub Kicinski udp_tunnel_nic_entry_is_queued(struct udp_tunnel_nic_table_entry *entry) 103cc4e3835SJakub Kicinski { 104cc4e3835SJakub Kicinski return entry->flags & (UDP_TUNNEL_NIC_ENTRY_ADD | 105cc4e3835SJakub Kicinski UDP_TUNNEL_NIC_ENTRY_DEL); 106cc4e3835SJakub Kicinski } 107cc4e3835SJakub Kicinski 108cc4e3835SJakub Kicinski static void 109cc4e3835SJakub Kicinski udp_tunnel_nic_entry_queue(struct udp_tunnel_nic *utn, 110cc4e3835SJakub Kicinski struct udp_tunnel_nic_table_entry *entry, 111cc4e3835SJakub Kicinski unsigned int flag) 112cc4e3835SJakub Kicinski { 113cc4e3835SJakub Kicinski entry->flags |= flag; 114cc4e3835SJakub Kicinski utn->need_sync = 1; 115cc4e3835SJakub Kicinski } 116cc4e3835SJakub Kicinski 117cc4e3835SJakub Kicinski static void 118cc4e3835SJakub Kicinski udp_tunnel_nic_ti_from_entry(struct udp_tunnel_nic_table_entry *entry, 119cc4e3835SJakub Kicinski struct udp_tunnel_info *ti) 120cc4e3835SJakub Kicinski { 121cc4e3835SJakub Kicinski memset(ti, 0, sizeof(*ti)); 122cc4e3835SJakub Kicinski ti->port = entry->port; 123cc4e3835SJakub Kicinski ti->type = entry->type; 124cc4e3835SJakub Kicinski ti->hw_priv = entry->hw_priv; 125cc4e3835SJakub Kicinski } 126cc4e3835SJakub Kicinski 127cc4e3835SJakub Kicinski static bool 128cc4e3835SJakub Kicinski udp_tunnel_nic_is_empty(struct net_device *dev, struct udp_tunnel_nic *utn) 129cc4e3835SJakub Kicinski { 130cc4e3835SJakub Kicinski const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info; 131cc4e3835SJakub Kicinski unsigned int i, j; 132cc4e3835SJakub Kicinski 133cc4e3835SJakub Kicinski for (i = 0; i < utn->n_tables; i++) 134cc4e3835SJakub Kicinski for (j = 0; j < info->tables[i].n_entries; j++) 135cc4e3835SJakub Kicinski if (!udp_tunnel_nic_entry_is_free(&utn->entries[i][j])) 136cc4e3835SJakub Kicinski return false; 137cc4e3835SJakub Kicinski return true; 138cc4e3835SJakub Kicinski } 139cc4e3835SJakub Kicinski 140cc4e3835SJakub Kicinski static bool 141cc4e3835SJakub Kicinski udp_tunnel_nic_should_replay(struct net_device *dev, struct udp_tunnel_nic *utn) 142cc4e3835SJakub Kicinski { 143cc4e3835SJakub Kicinski const struct udp_tunnel_nic_table_info *table; 144cc4e3835SJakub Kicinski unsigned int i, j; 145cc4e3835SJakub Kicinski 146cc4e3835SJakub Kicinski if (!utn->missed) 147cc4e3835SJakub Kicinski return false; 148cc4e3835SJakub Kicinski 149cc4e3835SJakub Kicinski for (i = 0; i < utn->n_tables; i++) { 150cc4e3835SJakub Kicinski table = &dev->udp_tunnel_nic_info->tables[i]; 151cc4e3835SJakub Kicinski if (!test_bit(i, &utn->missed)) 152cc4e3835SJakub Kicinski continue; 153cc4e3835SJakub Kicinski 154cc4e3835SJakub Kicinski for (j = 0; j < table->n_entries; j++) 155cc4e3835SJakub Kicinski if (udp_tunnel_nic_entry_is_free(&utn->entries[i][j])) 156cc4e3835SJakub Kicinski return true; 157cc4e3835SJakub Kicinski } 158cc4e3835SJakub Kicinski 159cc4e3835SJakub Kicinski return false; 160cc4e3835SJakub Kicinski } 161cc4e3835SJakub Kicinski 162cc4e3835SJakub Kicinski static void 163cc4e3835SJakub Kicinski __udp_tunnel_nic_get_port(struct net_device *dev, unsigned int table, 164cc4e3835SJakub Kicinski unsigned int idx, struct udp_tunnel_info *ti) 165cc4e3835SJakub Kicinski { 166cc4e3835SJakub Kicinski struct udp_tunnel_nic_table_entry *entry; 167cc4e3835SJakub Kicinski struct udp_tunnel_nic *utn; 168cc4e3835SJakub Kicinski 169cc4e3835SJakub Kicinski utn = dev->udp_tunnel_nic; 170cc4e3835SJakub Kicinski entry = &utn->entries[table][idx]; 171cc4e3835SJakub Kicinski 172cc4e3835SJakub Kicinski if (entry->use_cnt) 173cc4e3835SJakub Kicinski udp_tunnel_nic_ti_from_entry(entry, ti); 174cc4e3835SJakub Kicinski } 175cc4e3835SJakub Kicinski 176cc4e3835SJakub Kicinski static void 177cc4e3835SJakub Kicinski __udp_tunnel_nic_set_port_priv(struct net_device *dev, unsigned int table, 178cc4e3835SJakub Kicinski unsigned int idx, u8 priv) 179cc4e3835SJakub Kicinski { 180cc4e3835SJakub Kicinski dev->udp_tunnel_nic->entries[table][idx].hw_priv = priv; 181cc4e3835SJakub Kicinski } 182cc4e3835SJakub Kicinski 183cc4e3835SJakub Kicinski static void 184cc4e3835SJakub Kicinski udp_tunnel_nic_entry_update_done(struct udp_tunnel_nic_table_entry *entry, 185cc4e3835SJakub Kicinski int err) 186cc4e3835SJakub Kicinski { 187cc4e3835SJakub Kicinski bool dodgy = entry->flags & UDP_TUNNEL_NIC_ENTRY_OP_FAIL; 188cc4e3835SJakub Kicinski 189cc4e3835SJakub Kicinski WARN_ON_ONCE(entry->flags & UDP_TUNNEL_NIC_ENTRY_ADD && 190cc4e3835SJakub Kicinski entry->flags & UDP_TUNNEL_NIC_ENTRY_DEL); 191cc4e3835SJakub Kicinski 192cc4e3835SJakub Kicinski if (entry->flags & UDP_TUNNEL_NIC_ENTRY_ADD && 193cc4e3835SJakub Kicinski (!err || (err == -EEXIST && dodgy))) 194cc4e3835SJakub Kicinski entry->flags &= ~UDP_TUNNEL_NIC_ENTRY_ADD; 195cc4e3835SJakub Kicinski 196cc4e3835SJakub Kicinski if (entry->flags & UDP_TUNNEL_NIC_ENTRY_DEL && 197cc4e3835SJakub Kicinski (!err || (err == -ENOENT && dodgy))) 198cc4e3835SJakub Kicinski entry->flags &= ~UDP_TUNNEL_NIC_ENTRY_DEL; 199cc4e3835SJakub Kicinski 200cc4e3835SJakub Kicinski if (!err) 201cc4e3835SJakub Kicinski entry->flags &= ~UDP_TUNNEL_NIC_ENTRY_OP_FAIL; 202cc4e3835SJakub Kicinski else 203cc4e3835SJakub Kicinski entry->flags |= UDP_TUNNEL_NIC_ENTRY_OP_FAIL; 204cc4e3835SJakub Kicinski } 205cc4e3835SJakub Kicinski 206cc4e3835SJakub Kicinski static void 207cc4e3835SJakub Kicinski udp_tunnel_nic_device_sync_one(struct net_device *dev, 208cc4e3835SJakub Kicinski struct udp_tunnel_nic *utn, 209cc4e3835SJakub Kicinski unsigned int table, unsigned int idx) 210cc4e3835SJakub Kicinski { 211cc4e3835SJakub Kicinski struct udp_tunnel_nic_table_entry *entry; 212cc4e3835SJakub Kicinski struct udp_tunnel_info ti; 213cc4e3835SJakub Kicinski int err; 214cc4e3835SJakub Kicinski 215cc4e3835SJakub Kicinski entry = &utn->entries[table][idx]; 216cc4e3835SJakub Kicinski if (!udp_tunnel_nic_entry_is_queued(entry)) 217cc4e3835SJakub Kicinski return; 218cc4e3835SJakub Kicinski 219cc4e3835SJakub Kicinski udp_tunnel_nic_ti_from_entry(entry, &ti); 220cc4e3835SJakub Kicinski if (entry->flags & UDP_TUNNEL_NIC_ENTRY_ADD) 221cc4e3835SJakub Kicinski err = dev->udp_tunnel_nic_info->set_port(dev, table, idx, &ti); 222cc4e3835SJakub Kicinski else 223cc4e3835SJakub Kicinski err = dev->udp_tunnel_nic_info->unset_port(dev, table, idx, 224cc4e3835SJakub Kicinski &ti); 225cc4e3835SJakub Kicinski udp_tunnel_nic_entry_update_done(entry, err); 226cc4e3835SJakub Kicinski 227cc4e3835SJakub Kicinski if (err) 228cc4e3835SJakub Kicinski netdev_warn(dev, 229cc4e3835SJakub Kicinski "UDP tunnel port sync failed port %d type %s: %d\n", 230cc4e3835SJakub Kicinski be16_to_cpu(entry->port), 231cc4e3835SJakub Kicinski udp_tunnel_nic_tunnel_type_name(entry->type), 232cc4e3835SJakub Kicinski err); 233cc4e3835SJakub Kicinski } 234cc4e3835SJakub Kicinski 235cc4e3835SJakub Kicinski static void 236cc4e3835SJakub Kicinski udp_tunnel_nic_device_sync_by_port(struct net_device *dev, 237cc4e3835SJakub Kicinski struct udp_tunnel_nic *utn) 238cc4e3835SJakub Kicinski { 239cc4e3835SJakub Kicinski const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info; 240cc4e3835SJakub Kicinski unsigned int i, j; 241cc4e3835SJakub Kicinski 242cc4e3835SJakub Kicinski for (i = 0; i < utn->n_tables; i++) 243cc4e3835SJakub Kicinski for (j = 0; j < info->tables[i].n_entries; j++) 244cc4e3835SJakub Kicinski udp_tunnel_nic_device_sync_one(dev, utn, i, j); 245cc4e3835SJakub Kicinski } 246cc4e3835SJakub Kicinski 247cc4e3835SJakub Kicinski static void 248cc4e3835SJakub Kicinski udp_tunnel_nic_device_sync_by_table(struct net_device *dev, 249cc4e3835SJakub Kicinski struct udp_tunnel_nic *utn) 250cc4e3835SJakub Kicinski { 251cc4e3835SJakub Kicinski const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info; 252cc4e3835SJakub Kicinski unsigned int i, j; 253cc4e3835SJakub Kicinski int err; 254cc4e3835SJakub Kicinski 255cc4e3835SJakub Kicinski for (i = 0; i < utn->n_tables; i++) { 256cc4e3835SJakub Kicinski /* Find something that needs sync in this table */ 257cc4e3835SJakub Kicinski for (j = 0; j < info->tables[i].n_entries; j++) 258cc4e3835SJakub Kicinski if (udp_tunnel_nic_entry_is_queued(&utn->entries[i][j])) 259cc4e3835SJakub Kicinski break; 260cc4e3835SJakub Kicinski if (j == info->tables[i].n_entries) 261cc4e3835SJakub Kicinski continue; 262cc4e3835SJakub Kicinski 263cc4e3835SJakub Kicinski err = info->sync_table(dev, i); 264cc4e3835SJakub Kicinski if (err) 265cc4e3835SJakub Kicinski netdev_warn(dev, "UDP tunnel port sync failed for table %d: %d\n", 266cc4e3835SJakub Kicinski i, err); 267cc4e3835SJakub Kicinski 268cc4e3835SJakub Kicinski for (j = 0; j < info->tables[i].n_entries; j++) { 269cc4e3835SJakub Kicinski struct udp_tunnel_nic_table_entry *entry; 270cc4e3835SJakub Kicinski 271cc4e3835SJakub Kicinski entry = &utn->entries[i][j]; 272cc4e3835SJakub Kicinski if (udp_tunnel_nic_entry_is_queued(entry)) 273cc4e3835SJakub Kicinski udp_tunnel_nic_entry_update_done(entry, err); 274cc4e3835SJakub Kicinski } 275cc4e3835SJakub Kicinski } 276cc4e3835SJakub Kicinski } 277cc4e3835SJakub Kicinski 278cc4e3835SJakub Kicinski static void 279cc4e3835SJakub Kicinski __udp_tunnel_nic_device_sync(struct net_device *dev, struct udp_tunnel_nic *utn) 280cc4e3835SJakub Kicinski { 281cc4e3835SJakub Kicinski if (!utn->need_sync) 282cc4e3835SJakub Kicinski return; 283cc4e3835SJakub Kicinski 284cc4e3835SJakub Kicinski if (dev->udp_tunnel_nic_info->sync_table) 285cc4e3835SJakub Kicinski udp_tunnel_nic_device_sync_by_table(dev, utn); 286cc4e3835SJakub Kicinski else 287cc4e3835SJakub Kicinski udp_tunnel_nic_device_sync_by_port(dev, utn); 288cc4e3835SJakub Kicinski 289cc4e3835SJakub Kicinski utn->need_sync = 0; 290cc4e3835SJakub Kicinski /* Can't replay directly here, in case we come from the tunnel driver's 291cc4e3835SJakub Kicinski * notification - trying to replay may deadlock inside tunnel driver. 292cc4e3835SJakub Kicinski */ 293cc4e3835SJakub Kicinski utn->need_replay = udp_tunnel_nic_should_replay(dev, utn); 294cc4e3835SJakub Kicinski } 295cc4e3835SJakub Kicinski 296cc4e3835SJakub Kicinski static void 297cc4e3835SJakub Kicinski udp_tunnel_nic_device_sync(struct net_device *dev, struct udp_tunnel_nic *utn) 298cc4e3835SJakub Kicinski { 299cc4e3835SJakub Kicinski const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info; 300cc4e3835SJakub Kicinski bool may_sleep; 301cc4e3835SJakub Kicinski 302cc4e3835SJakub Kicinski if (!utn->need_sync) 303cc4e3835SJakub Kicinski return; 304cc4e3835SJakub Kicinski 305cc4e3835SJakub Kicinski /* Drivers which sleep in the callback need to update from 306cc4e3835SJakub Kicinski * the workqueue, if we come from the tunnel driver's notification. 307cc4e3835SJakub Kicinski */ 308cc4e3835SJakub Kicinski may_sleep = info->flags & UDP_TUNNEL_NIC_INFO_MAY_SLEEP; 309cc4e3835SJakub Kicinski if (!may_sleep) 310cc4e3835SJakub Kicinski __udp_tunnel_nic_device_sync(dev, utn); 311cc4e3835SJakub Kicinski if (may_sleep || utn->need_replay) { 312cc4e3835SJakub Kicinski queue_work(udp_tunnel_nic_workqueue, &utn->work); 313cc4e3835SJakub Kicinski utn->work_pending = 1; 314cc4e3835SJakub Kicinski } 315cc4e3835SJakub Kicinski } 316cc4e3835SJakub Kicinski 317cc4e3835SJakub Kicinski static bool 318cc4e3835SJakub Kicinski udp_tunnel_nic_table_is_capable(const struct udp_tunnel_nic_table_info *table, 319cc4e3835SJakub Kicinski struct udp_tunnel_info *ti) 320cc4e3835SJakub Kicinski { 321cc4e3835SJakub Kicinski return table->tunnel_types & ti->type; 322cc4e3835SJakub Kicinski } 323cc4e3835SJakub Kicinski 324cc4e3835SJakub Kicinski static bool 325cc4e3835SJakub Kicinski udp_tunnel_nic_is_capable(struct net_device *dev, struct udp_tunnel_nic *utn, 326cc4e3835SJakub Kicinski struct udp_tunnel_info *ti) 327cc4e3835SJakub Kicinski { 328cc4e3835SJakub Kicinski const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info; 329cc4e3835SJakub Kicinski unsigned int i; 330cc4e3835SJakub Kicinski 331cc4e3835SJakub Kicinski /* Special case IPv4-only NICs */ 332cc4e3835SJakub Kicinski if (info->flags & UDP_TUNNEL_NIC_INFO_IPV4_ONLY && 333cc4e3835SJakub Kicinski ti->sa_family != AF_INET) 334cc4e3835SJakub Kicinski return false; 335cc4e3835SJakub Kicinski 336cc4e3835SJakub Kicinski for (i = 0; i < utn->n_tables; i++) 337cc4e3835SJakub Kicinski if (udp_tunnel_nic_table_is_capable(&info->tables[i], ti)) 338cc4e3835SJakub Kicinski return true; 339cc4e3835SJakub Kicinski return false; 340cc4e3835SJakub Kicinski } 341cc4e3835SJakub Kicinski 342cc4e3835SJakub Kicinski static int 343cc4e3835SJakub Kicinski udp_tunnel_nic_has_collision(struct net_device *dev, struct udp_tunnel_nic *utn, 344cc4e3835SJakub Kicinski struct udp_tunnel_info *ti) 345cc4e3835SJakub Kicinski { 346cc4e3835SJakub Kicinski const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info; 347cc4e3835SJakub Kicinski struct udp_tunnel_nic_table_entry *entry; 348cc4e3835SJakub Kicinski unsigned int i, j; 349cc4e3835SJakub Kicinski 350cc4e3835SJakub Kicinski for (i = 0; i < utn->n_tables; i++) 351cc4e3835SJakub Kicinski for (j = 0; j < info->tables[i].n_entries; j++) { 352cc4e3835SJakub Kicinski entry = &utn->entries[i][j]; 353cc4e3835SJakub Kicinski 354cc4e3835SJakub Kicinski if (!udp_tunnel_nic_entry_is_free(entry) && 355cc4e3835SJakub Kicinski entry->port == ti->port && 356cc4e3835SJakub Kicinski entry->type != ti->type) { 357cc4e3835SJakub Kicinski __set_bit(i, &utn->missed); 358cc4e3835SJakub Kicinski return true; 359cc4e3835SJakub Kicinski } 360cc4e3835SJakub Kicinski } 361cc4e3835SJakub Kicinski return false; 362cc4e3835SJakub Kicinski } 363cc4e3835SJakub Kicinski 364cc4e3835SJakub Kicinski static void 365cc4e3835SJakub Kicinski udp_tunnel_nic_entry_adj(struct udp_tunnel_nic *utn, 366cc4e3835SJakub Kicinski unsigned int table, unsigned int idx, int use_cnt_adj) 367cc4e3835SJakub Kicinski { 368cc4e3835SJakub Kicinski struct udp_tunnel_nic_table_entry *entry = &utn->entries[table][idx]; 369cc4e3835SJakub Kicinski bool dodgy = entry->flags & UDP_TUNNEL_NIC_ENTRY_OP_FAIL; 370cc4e3835SJakub Kicinski unsigned int from, to; 371cc4e3835SJakub Kicinski 372cc4e3835SJakub Kicinski /* If not going from used to unused or vice versa - all done. 373cc4e3835SJakub Kicinski * For dodgy entries make sure we try to sync again (queue the entry). 374cc4e3835SJakub Kicinski */ 375cc4e3835SJakub Kicinski entry->use_cnt += use_cnt_adj; 376cc4e3835SJakub Kicinski if (!dodgy && !entry->use_cnt == !(entry->use_cnt - use_cnt_adj)) 377cc4e3835SJakub Kicinski return; 378cc4e3835SJakub Kicinski 379cc4e3835SJakub Kicinski /* Cancel the op before it was sent to the device, if possible, 380cc4e3835SJakub Kicinski * otherwise we'd need to take special care to issue commands 381cc4e3835SJakub Kicinski * in the same order the ports arrived. 382cc4e3835SJakub Kicinski */ 383cc4e3835SJakub Kicinski if (use_cnt_adj < 0) { 384cc4e3835SJakub Kicinski from = UDP_TUNNEL_NIC_ENTRY_ADD; 385cc4e3835SJakub Kicinski to = UDP_TUNNEL_NIC_ENTRY_DEL; 386cc4e3835SJakub Kicinski } else { 387cc4e3835SJakub Kicinski from = UDP_TUNNEL_NIC_ENTRY_DEL; 388cc4e3835SJakub Kicinski to = UDP_TUNNEL_NIC_ENTRY_ADD; 389cc4e3835SJakub Kicinski } 390cc4e3835SJakub Kicinski 391cc4e3835SJakub Kicinski if (entry->flags & from) { 392cc4e3835SJakub Kicinski entry->flags &= ~from; 393cc4e3835SJakub Kicinski if (!dodgy) 394cc4e3835SJakub Kicinski return; 395cc4e3835SJakub Kicinski } 396cc4e3835SJakub Kicinski 397cc4e3835SJakub Kicinski udp_tunnel_nic_entry_queue(utn, entry, to); 398cc4e3835SJakub Kicinski } 399cc4e3835SJakub Kicinski 400cc4e3835SJakub Kicinski static bool 401cc4e3835SJakub Kicinski udp_tunnel_nic_entry_try_adj(struct udp_tunnel_nic *utn, 402cc4e3835SJakub Kicinski unsigned int table, unsigned int idx, 403cc4e3835SJakub Kicinski struct udp_tunnel_info *ti, int use_cnt_adj) 404cc4e3835SJakub Kicinski { 405cc4e3835SJakub Kicinski struct udp_tunnel_nic_table_entry *entry = &utn->entries[table][idx]; 406cc4e3835SJakub Kicinski 407cc4e3835SJakub Kicinski if (udp_tunnel_nic_entry_is_free(entry) || 408cc4e3835SJakub Kicinski entry->port != ti->port || 409cc4e3835SJakub Kicinski entry->type != ti->type) 410cc4e3835SJakub Kicinski return false; 411cc4e3835SJakub Kicinski 412cc4e3835SJakub Kicinski if (udp_tunnel_nic_entry_is_frozen(entry)) 413cc4e3835SJakub Kicinski return true; 414cc4e3835SJakub Kicinski 415cc4e3835SJakub Kicinski udp_tunnel_nic_entry_adj(utn, table, idx, use_cnt_adj); 416cc4e3835SJakub Kicinski return true; 417cc4e3835SJakub Kicinski } 418cc4e3835SJakub Kicinski 419cc4e3835SJakub Kicinski /* Try to find existing matching entry and adjust its use count, instead of 420cc4e3835SJakub Kicinski * adding a new one. Returns true if entry was found. In case of delete the 421cc4e3835SJakub Kicinski * entry may have gotten removed in the process, in which case it will be 422cc4e3835SJakub Kicinski * queued for removal. 423cc4e3835SJakub Kicinski */ 424cc4e3835SJakub Kicinski static bool 425cc4e3835SJakub Kicinski udp_tunnel_nic_try_existing(struct net_device *dev, struct udp_tunnel_nic *utn, 426cc4e3835SJakub Kicinski struct udp_tunnel_info *ti, int use_cnt_adj) 427cc4e3835SJakub Kicinski { 428cc4e3835SJakub Kicinski const struct udp_tunnel_nic_table_info *table; 429cc4e3835SJakub Kicinski unsigned int i, j; 430cc4e3835SJakub Kicinski 431cc4e3835SJakub Kicinski for (i = 0; i < utn->n_tables; i++) { 432cc4e3835SJakub Kicinski table = &dev->udp_tunnel_nic_info->tables[i]; 433cc4e3835SJakub Kicinski if (!udp_tunnel_nic_table_is_capable(table, ti)) 434cc4e3835SJakub Kicinski continue; 435cc4e3835SJakub Kicinski 436cc4e3835SJakub Kicinski for (j = 0; j < table->n_entries; j++) 437cc4e3835SJakub Kicinski if (udp_tunnel_nic_entry_try_adj(utn, i, j, ti, 438cc4e3835SJakub Kicinski use_cnt_adj)) 439cc4e3835SJakub Kicinski return true; 440cc4e3835SJakub Kicinski } 441cc4e3835SJakub Kicinski 442cc4e3835SJakub Kicinski return false; 443cc4e3835SJakub Kicinski } 444cc4e3835SJakub Kicinski 445cc4e3835SJakub Kicinski static bool 446cc4e3835SJakub Kicinski udp_tunnel_nic_add_existing(struct net_device *dev, struct udp_tunnel_nic *utn, 447cc4e3835SJakub Kicinski struct udp_tunnel_info *ti) 448cc4e3835SJakub Kicinski { 449cc4e3835SJakub Kicinski return udp_tunnel_nic_try_existing(dev, utn, ti, +1); 450cc4e3835SJakub Kicinski } 451cc4e3835SJakub Kicinski 452cc4e3835SJakub Kicinski static bool 453cc4e3835SJakub Kicinski udp_tunnel_nic_del_existing(struct net_device *dev, struct udp_tunnel_nic *utn, 454cc4e3835SJakub Kicinski struct udp_tunnel_info *ti) 455cc4e3835SJakub Kicinski { 456cc4e3835SJakub Kicinski return udp_tunnel_nic_try_existing(dev, utn, ti, -1); 457cc4e3835SJakub Kicinski } 458cc4e3835SJakub Kicinski 459cc4e3835SJakub Kicinski static bool 460cc4e3835SJakub Kicinski udp_tunnel_nic_add_new(struct net_device *dev, struct udp_tunnel_nic *utn, 461cc4e3835SJakub Kicinski struct udp_tunnel_info *ti) 462cc4e3835SJakub Kicinski { 463cc4e3835SJakub Kicinski const struct udp_tunnel_nic_table_info *table; 464cc4e3835SJakub Kicinski unsigned int i, j; 465cc4e3835SJakub Kicinski 466cc4e3835SJakub Kicinski for (i = 0; i < utn->n_tables; i++) { 467cc4e3835SJakub Kicinski table = &dev->udp_tunnel_nic_info->tables[i]; 468cc4e3835SJakub Kicinski if (!udp_tunnel_nic_table_is_capable(table, ti)) 469cc4e3835SJakub Kicinski continue; 470cc4e3835SJakub Kicinski 471cc4e3835SJakub Kicinski for (j = 0; j < table->n_entries; j++) { 472cc4e3835SJakub Kicinski struct udp_tunnel_nic_table_entry *entry; 473cc4e3835SJakub Kicinski 474cc4e3835SJakub Kicinski entry = &utn->entries[i][j]; 475cc4e3835SJakub Kicinski if (!udp_tunnel_nic_entry_is_free(entry)) 476cc4e3835SJakub Kicinski continue; 477cc4e3835SJakub Kicinski 478cc4e3835SJakub Kicinski entry->port = ti->port; 479cc4e3835SJakub Kicinski entry->type = ti->type; 480cc4e3835SJakub Kicinski entry->use_cnt = 1; 481cc4e3835SJakub Kicinski udp_tunnel_nic_entry_queue(utn, entry, 482cc4e3835SJakub Kicinski UDP_TUNNEL_NIC_ENTRY_ADD); 483cc4e3835SJakub Kicinski return true; 484cc4e3835SJakub Kicinski } 485cc4e3835SJakub Kicinski 486cc4e3835SJakub Kicinski /* The different table may still fit this port in, but there 487cc4e3835SJakub Kicinski * are no devices currently which have multiple tables accepting 488cc4e3835SJakub Kicinski * the same tunnel type, and false positives are okay. 489cc4e3835SJakub Kicinski */ 490cc4e3835SJakub Kicinski __set_bit(i, &utn->missed); 491cc4e3835SJakub Kicinski } 492cc4e3835SJakub Kicinski 493cc4e3835SJakub Kicinski return false; 494cc4e3835SJakub Kicinski } 495cc4e3835SJakub Kicinski 496cc4e3835SJakub Kicinski static void 497cc4e3835SJakub Kicinski __udp_tunnel_nic_add_port(struct net_device *dev, struct udp_tunnel_info *ti) 498cc4e3835SJakub Kicinski { 499cc4e3835SJakub Kicinski const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info; 500cc4e3835SJakub Kicinski struct udp_tunnel_nic *utn; 501cc4e3835SJakub Kicinski 502cc4e3835SJakub Kicinski utn = dev->udp_tunnel_nic; 503cc4e3835SJakub Kicinski if (!utn) 504cc4e3835SJakub Kicinski return; 505cc4e3835SJakub Kicinski if (!netif_running(dev) && info->flags & UDP_TUNNEL_NIC_INFO_OPEN_ONLY) 506cc4e3835SJakub Kicinski return; 507cc4e3835SJakub Kicinski 508cc4e3835SJakub Kicinski if (!udp_tunnel_nic_is_capable(dev, utn, ti)) 509cc4e3835SJakub Kicinski return; 510cc4e3835SJakub Kicinski 511cc4e3835SJakub Kicinski /* It may happen that a tunnel of one type is removed and different 512cc4e3835SJakub Kicinski * tunnel type tries to reuse its port before the device was informed. 513cc4e3835SJakub Kicinski * Rely on utn->missed to re-add this port later. 514cc4e3835SJakub Kicinski */ 515cc4e3835SJakub Kicinski if (udp_tunnel_nic_has_collision(dev, utn, ti)) 516cc4e3835SJakub Kicinski return; 517cc4e3835SJakub Kicinski 518cc4e3835SJakub Kicinski if (!udp_tunnel_nic_add_existing(dev, utn, ti)) 519cc4e3835SJakub Kicinski udp_tunnel_nic_add_new(dev, utn, ti); 520cc4e3835SJakub Kicinski 521cc4e3835SJakub Kicinski udp_tunnel_nic_device_sync(dev, utn); 522cc4e3835SJakub Kicinski } 523cc4e3835SJakub Kicinski 524cc4e3835SJakub Kicinski static void 525cc4e3835SJakub Kicinski __udp_tunnel_nic_del_port(struct net_device *dev, struct udp_tunnel_info *ti) 526cc4e3835SJakub Kicinski { 527cc4e3835SJakub Kicinski struct udp_tunnel_nic *utn; 528cc4e3835SJakub Kicinski 529cc4e3835SJakub Kicinski utn = dev->udp_tunnel_nic; 530cc4e3835SJakub Kicinski if (!utn) 531cc4e3835SJakub Kicinski return; 532cc4e3835SJakub Kicinski 533cc4e3835SJakub Kicinski if (!udp_tunnel_nic_is_capable(dev, utn, ti)) 534cc4e3835SJakub Kicinski return; 535cc4e3835SJakub Kicinski 536cc4e3835SJakub Kicinski udp_tunnel_nic_del_existing(dev, utn, ti); 537cc4e3835SJakub Kicinski 538cc4e3835SJakub Kicinski udp_tunnel_nic_device_sync(dev, utn); 539cc4e3835SJakub Kicinski } 540cc4e3835SJakub Kicinski 541cc4e3835SJakub Kicinski static void __udp_tunnel_nic_reset_ntf(struct net_device *dev) 542cc4e3835SJakub Kicinski { 543cc4e3835SJakub Kicinski const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info; 544cc4e3835SJakub Kicinski struct udp_tunnel_nic *utn; 545cc4e3835SJakub Kicinski unsigned int i, j; 546cc4e3835SJakub Kicinski 547cc4e3835SJakub Kicinski ASSERT_RTNL(); 548cc4e3835SJakub Kicinski 549cc4e3835SJakub Kicinski utn = dev->udp_tunnel_nic; 550cc4e3835SJakub Kicinski if (!utn) 551cc4e3835SJakub Kicinski return; 552cc4e3835SJakub Kicinski 553cc4e3835SJakub Kicinski utn->need_sync = false; 554cc4e3835SJakub Kicinski for (i = 0; i < utn->n_tables; i++) 555cc4e3835SJakub Kicinski for (j = 0; j < info->tables[i].n_entries; j++) { 556cc4e3835SJakub Kicinski struct udp_tunnel_nic_table_entry *entry; 557cc4e3835SJakub Kicinski 558cc4e3835SJakub Kicinski entry = &utn->entries[i][j]; 559cc4e3835SJakub Kicinski 560cc4e3835SJakub Kicinski entry->flags &= ~(UDP_TUNNEL_NIC_ENTRY_DEL | 561cc4e3835SJakub Kicinski UDP_TUNNEL_NIC_ENTRY_OP_FAIL); 562cc4e3835SJakub Kicinski /* We don't release rtnl across ops */ 563cc4e3835SJakub Kicinski WARN_ON(entry->flags & UDP_TUNNEL_NIC_ENTRY_FROZEN); 564cc4e3835SJakub Kicinski if (!entry->use_cnt) 565cc4e3835SJakub Kicinski continue; 566cc4e3835SJakub Kicinski 567cc4e3835SJakub Kicinski udp_tunnel_nic_entry_queue(utn, entry, 568cc4e3835SJakub Kicinski UDP_TUNNEL_NIC_ENTRY_ADD); 569cc4e3835SJakub Kicinski } 570cc4e3835SJakub Kicinski 571cc4e3835SJakub Kicinski __udp_tunnel_nic_device_sync(dev, utn); 572cc4e3835SJakub Kicinski } 573cc4e3835SJakub Kicinski 574*c7d759ebSJakub Kicinski static size_t 575*c7d759ebSJakub Kicinski __udp_tunnel_nic_dump_size(struct net_device *dev, unsigned int table) 576*c7d759ebSJakub Kicinski { 577*c7d759ebSJakub Kicinski const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info; 578*c7d759ebSJakub Kicinski struct udp_tunnel_nic *utn; 579*c7d759ebSJakub Kicinski unsigned int j; 580*c7d759ebSJakub Kicinski size_t size; 581*c7d759ebSJakub Kicinski 582*c7d759ebSJakub Kicinski utn = dev->udp_tunnel_nic; 583*c7d759ebSJakub Kicinski if (!utn) 584*c7d759ebSJakub Kicinski return 0; 585*c7d759ebSJakub Kicinski 586*c7d759ebSJakub Kicinski size = 0; 587*c7d759ebSJakub Kicinski for (j = 0; j < info->tables[table].n_entries; j++) { 588*c7d759ebSJakub Kicinski if (!udp_tunnel_nic_entry_is_present(&utn->entries[table][j])) 589*c7d759ebSJakub Kicinski continue; 590*c7d759ebSJakub Kicinski 591*c7d759ebSJakub Kicinski size += nla_total_size(0) + /* _TABLE_ENTRY */ 592*c7d759ebSJakub Kicinski nla_total_size(sizeof(__be16)) + /* _ENTRY_PORT */ 593*c7d759ebSJakub Kicinski nla_total_size(sizeof(u32)); /* _ENTRY_TYPE */ 594*c7d759ebSJakub Kicinski } 595*c7d759ebSJakub Kicinski 596*c7d759ebSJakub Kicinski return size; 597*c7d759ebSJakub Kicinski } 598*c7d759ebSJakub Kicinski 599*c7d759ebSJakub Kicinski static int 600*c7d759ebSJakub Kicinski __udp_tunnel_nic_dump_write(struct net_device *dev, unsigned int table, 601*c7d759ebSJakub Kicinski struct sk_buff *skb) 602*c7d759ebSJakub Kicinski { 603*c7d759ebSJakub Kicinski const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info; 604*c7d759ebSJakub Kicinski struct udp_tunnel_nic *utn; 605*c7d759ebSJakub Kicinski struct nlattr *nest; 606*c7d759ebSJakub Kicinski unsigned int j; 607*c7d759ebSJakub Kicinski 608*c7d759ebSJakub Kicinski utn = dev->udp_tunnel_nic; 609*c7d759ebSJakub Kicinski if (!utn) 610*c7d759ebSJakub Kicinski return 0; 611*c7d759ebSJakub Kicinski 612*c7d759ebSJakub Kicinski for (j = 0; j < info->tables[table].n_entries; j++) { 613*c7d759ebSJakub Kicinski if (!udp_tunnel_nic_entry_is_present(&utn->entries[table][j])) 614*c7d759ebSJakub Kicinski continue; 615*c7d759ebSJakub Kicinski 616*c7d759ebSJakub Kicinski nest = nla_nest_start(skb, ETHTOOL_A_TUNNEL_UDP_TABLE_ENTRY); 617*c7d759ebSJakub Kicinski 618*c7d759ebSJakub Kicinski if (nla_put_be16(skb, ETHTOOL_A_TUNNEL_UDP_ENTRY_PORT, 619*c7d759ebSJakub Kicinski utn->entries[table][j].port) || 620*c7d759ebSJakub Kicinski nla_put_u32(skb, ETHTOOL_A_TUNNEL_UDP_ENTRY_TYPE, 621*c7d759ebSJakub Kicinski ilog2(utn->entries[table][j].type))) 622*c7d759ebSJakub Kicinski goto err_cancel; 623*c7d759ebSJakub Kicinski 624*c7d759ebSJakub Kicinski nla_nest_end(skb, nest); 625*c7d759ebSJakub Kicinski } 626*c7d759ebSJakub Kicinski 627*c7d759ebSJakub Kicinski return 0; 628*c7d759ebSJakub Kicinski 629*c7d759ebSJakub Kicinski err_cancel: 630*c7d759ebSJakub Kicinski nla_nest_cancel(skb, nest); 631*c7d759ebSJakub Kicinski return -EMSGSIZE; 632*c7d759ebSJakub Kicinski } 633*c7d759ebSJakub Kicinski 634cc4e3835SJakub Kicinski static const struct udp_tunnel_nic_ops __udp_tunnel_nic_ops = { 635cc4e3835SJakub Kicinski .get_port = __udp_tunnel_nic_get_port, 636cc4e3835SJakub Kicinski .set_port_priv = __udp_tunnel_nic_set_port_priv, 637cc4e3835SJakub Kicinski .add_port = __udp_tunnel_nic_add_port, 638cc4e3835SJakub Kicinski .del_port = __udp_tunnel_nic_del_port, 639cc4e3835SJakub Kicinski .reset_ntf = __udp_tunnel_nic_reset_ntf, 640*c7d759ebSJakub Kicinski .dump_size = __udp_tunnel_nic_dump_size, 641*c7d759ebSJakub Kicinski .dump_write = __udp_tunnel_nic_dump_write, 642cc4e3835SJakub Kicinski }; 643cc4e3835SJakub Kicinski 644cc4e3835SJakub Kicinski static void 645cc4e3835SJakub Kicinski udp_tunnel_nic_flush(struct net_device *dev, struct udp_tunnel_nic *utn) 646cc4e3835SJakub Kicinski { 647cc4e3835SJakub Kicinski const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info; 648cc4e3835SJakub Kicinski unsigned int i, j; 649cc4e3835SJakub Kicinski 650cc4e3835SJakub Kicinski for (i = 0; i < utn->n_tables; i++) 651cc4e3835SJakub Kicinski for (j = 0; j < info->tables[i].n_entries; j++) { 652cc4e3835SJakub Kicinski int adj_cnt = -utn->entries[i][j].use_cnt; 653cc4e3835SJakub Kicinski 654cc4e3835SJakub Kicinski if (adj_cnt) 655cc4e3835SJakub Kicinski udp_tunnel_nic_entry_adj(utn, i, j, adj_cnt); 656cc4e3835SJakub Kicinski } 657cc4e3835SJakub Kicinski 658cc4e3835SJakub Kicinski __udp_tunnel_nic_device_sync(dev, utn); 659cc4e3835SJakub Kicinski 660cc4e3835SJakub Kicinski for (i = 0; i < utn->n_tables; i++) 661cc4e3835SJakub Kicinski memset(utn->entries[i], 0, array_size(info->tables[i].n_entries, 662cc4e3835SJakub Kicinski sizeof(**utn->entries))); 663cc4e3835SJakub Kicinski WARN_ON(utn->need_sync); 664cc4e3835SJakub Kicinski utn->need_replay = 0; 665cc4e3835SJakub Kicinski } 666cc4e3835SJakub Kicinski 667cc4e3835SJakub Kicinski static void 668cc4e3835SJakub Kicinski udp_tunnel_nic_replay(struct net_device *dev, struct udp_tunnel_nic *utn) 669cc4e3835SJakub Kicinski { 670cc4e3835SJakub Kicinski const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info; 671cc4e3835SJakub Kicinski unsigned int i, j; 672cc4e3835SJakub Kicinski 673cc4e3835SJakub Kicinski /* Freeze all the ports we are already tracking so that the replay 674cc4e3835SJakub Kicinski * does not double up the refcount. 675cc4e3835SJakub Kicinski */ 676cc4e3835SJakub Kicinski for (i = 0; i < utn->n_tables; i++) 677cc4e3835SJakub Kicinski for (j = 0; j < info->tables[i].n_entries; j++) 678cc4e3835SJakub Kicinski udp_tunnel_nic_entry_freeze_used(&utn->entries[i][j]); 679cc4e3835SJakub Kicinski utn->missed = 0; 680cc4e3835SJakub Kicinski utn->need_replay = 0; 681cc4e3835SJakub Kicinski 682cc4e3835SJakub Kicinski udp_tunnel_get_rx_info(dev); 683cc4e3835SJakub Kicinski 684cc4e3835SJakub Kicinski for (i = 0; i < utn->n_tables; i++) 685cc4e3835SJakub Kicinski for (j = 0; j < info->tables[i].n_entries; j++) 686cc4e3835SJakub Kicinski udp_tunnel_nic_entry_unfreeze(&utn->entries[i][j]); 687cc4e3835SJakub Kicinski } 688cc4e3835SJakub Kicinski 689cc4e3835SJakub Kicinski static void udp_tunnel_nic_device_sync_work(struct work_struct *work) 690cc4e3835SJakub Kicinski { 691cc4e3835SJakub Kicinski struct udp_tunnel_nic *utn = 692cc4e3835SJakub Kicinski container_of(work, struct udp_tunnel_nic, work); 693cc4e3835SJakub Kicinski 694cc4e3835SJakub Kicinski rtnl_lock(); 695cc4e3835SJakub Kicinski utn->work_pending = 0; 696cc4e3835SJakub Kicinski __udp_tunnel_nic_device_sync(utn->dev, utn); 697cc4e3835SJakub Kicinski 698cc4e3835SJakub Kicinski if (utn->need_replay) 699cc4e3835SJakub Kicinski udp_tunnel_nic_replay(utn->dev, utn); 700cc4e3835SJakub Kicinski rtnl_unlock(); 701cc4e3835SJakub Kicinski } 702cc4e3835SJakub Kicinski 703cc4e3835SJakub Kicinski static struct udp_tunnel_nic * 704cc4e3835SJakub Kicinski udp_tunnel_nic_alloc(const struct udp_tunnel_nic_info *info, 705cc4e3835SJakub Kicinski unsigned int n_tables) 706cc4e3835SJakub Kicinski { 707cc4e3835SJakub Kicinski struct udp_tunnel_nic *utn; 708cc4e3835SJakub Kicinski unsigned int i; 709cc4e3835SJakub Kicinski 710cc4e3835SJakub Kicinski utn = kzalloc(sizeof(*utn), GFP_KERNEL); 711cc4e3835SJakub Kicinski if (!utn) 712cc4e3835SJakub Kicinski return NULL; 713cc4e3835SJakub Kicinski utn->n_tables = n_tables; 714cc4e3835SJakub Kicinski INIT_WORK(&utn->work, udp_tunnel_nic_device_sync_work); 715cc4e3835SJakub Kicinski 716cc4e3835SJakub Kicinski utn->entries = kmalloc_array(n_tables, sizeof(void *), GFP_KERNEL); 717cc4e3835SJakub Kicinski if (!utn->entries) 718cc4e3835SJakub Kicinski goto err_free_utn; 719cc4e3835SJakub Kicinski 720cc4e3835SJakub Kicinski for (i = 0; i < n_tables; i++) { 721cc4e3835SJakub Kicinski utn->entries[i] = kcalloc(info->tables[i].n_entries, 722cc4e3835SJakub Kicinski sizeof(*utn->entries[i]), GFP_KERNEL); 723cc4e3835SJakub Kicinski if (!utn->entries[i]) 724cc4e3835SJakub Kicinski goto err_free_prev_entries; 725cc4e3835SJakub Kicinski } 726cc4e3835SJakub Kicinski 727cc4e3835SJakub Kicinski return utn; 728cc4e3835SJakub Kicinski 729cc4e3835SJakub Kicinski err_free_prev_entries: 730cc4e3835SJakub Kicinski while (i--) 731cc4e3835SJakub Kicinski kfree(utn->entries[i]); 732cc4e3835SJakub Kicinski kfree(utn->entries); 733cc4e3835SJakub Kicinski err_free_utn: 734cc4e3835SJakub Kicinski kfree(utn); 735cc4e3835SJakub Kicinski return NULL; 736cc4e3835SJakub Kicinski } 737cc4e3835SJakub Kicinski 738cc4e3835SJakub Kicinski static int udp_tunnel_nic_register(struct net_device *dev) 739cc4e3835SJakub Kicinski { 740cc4e3835SJakub Kicinski const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info; 741cc4e3835SJakub Kicinski struct udp_tunnel_nic *utn; 742cc4e3835SJakub Kicinski unsigned int n_tables, i; 743cc4e3835SJakub Kicinski 744cc4e3835SJakub Kicinski BUILD_BUG_ON(sizeof(utn->missed) * BITS_PER_BYTE < 745cc4e3835SJakub Kicinski UDP_TUNNEL_NIC_MAX_TABLES); 746cc4e3835SJakub Kicinski 747cc4e3835SJakub Kicinski if (WARN_ON(!info->set_port != !info->unset_port) || 748cc4e3835SJakub Kicinski WARN_ON(!info->set_port == !info->sync_table) || 749cc4e3835SJakub Kicinski WARN_ON(!info->tables[0].n_entries)) 750cc4e3835SJakub Kicinski return -EINVAL; 751cc4e3835SJakub Kicinski 752cc4e3835SJakub Kicinski n_tables = 1; 753cc4e3835SJakub Kicinski for (i = 1; i < UDP_TUNNEL_NIC_MAX_TABLES; i++) { 754cc4e3835SJakub Kicinski if (!info->tables[i].n_entries) 755cc4e3835SJakub Kicinski continue; 756cc4e3835SJakub Kicinski 757cc4e3835SJakub Kicinski n_tables++; 758cc4e3835SJakub Kicinski if (WARN_ON(!info->tables[i - 1].n_entries)) 759cc4e3835SJakub Kicinski return -EINVAL; 760cc4e3835SJakub Kicinski } 761cc4e3835SJakub Kicinski 762cc4e3835SJakub Kicinski utn = udp_tunnel_nic_alloc(info, n_tables); 763cc4e3835SJakub Kicinski if (!utn) 764cc4e3835SJakub Kicinski return -ENOMEM; 765cc4e3835SJakub Kicinski 766cc4e3835SJakub Kicinski utn->dev = dev; 767cc4e3835SJakub Kicinski dev_hold(dev); 768cc4e3835SJakub Kicinski dev->udp_tunnel_nic = utn; 769cc4e3835SJakub Kicinski 770cc4e3835SJakub Kicinski if (!(info->flags & UDP_TUNNEL_NIC_INFO_OPEN_ONLY)) 771cc4e3835SJakub Kicinski udp_tunnel_get_rx_info(dev); 772cc4e3835SJakub Kicinski 773cc4e3835SJakub Kicinski return 0; 774cc4e3835SJakub Kicinski } 775cc4e3835SJakub Kicinski 776cc4e3835SJakub Kicinski static void 777cc4e3835SJakub Kicinski udp_tunnel_nic_unregister(struct net_device *dev, struct udp_tunnel_nic *utn) 778cc4e3835SJakub Kicinski { 779cc4e3835SJakub Kicinski unsigned int i; 780cc4e3835SJakub Kicinski 781cc4e3835SJakub Kicinski /* Flush before we check work, so we don't waste time adding entries 782cc4e3835SJakub Kicinski * from the work which we will boot immediately. 783cc4e3835SJakub Kicinski */ 784cc4e3835SJakub Kicinski udp_tunnel_nic_flush(dev, utn); 785cc4e3835SJakub Kicinski 786cc4e3835SJakub Kicinski /* Wait for the work to be done using the state, netdev core will 787cc4e3835SJakub Kicinski * retry unregister until we give up our reference on this device. 788cc4e3835SJakub Kicinski */ 789cc4e3835SJakub Kicinski if (utn->work_pending) 790cc4e3835SJakub Kicinski return; 791cc4e3835SJakub Kicinski 792cc4e3835SJakub Kicinski for (i = 0; i < utn->n_tables; i++) 793cc4e3835SJakub Kicinski kfree(utn->entries[i]); 794cc4e3835SJakub Kicinski kfree(utn->entries); 795cc4e3835SJakub Kicinski kfree(utn); 796cc4e3835SJakub Kicinski dev->udp_tunnel_nic = NULL; 797cc4e3835SJakub Kicinski dev_put(dev); 798cc4e3835SJakub Kicinski } 799cc4e3835SJakub Kicinski 800cc4e3835SJakub Kicinski static int 801cc4e3835SJakub Kicinski udp_tunnel_nic_netdevice_event(struct notifier_block *unused, 802cc4e3835SJakub Kicinski unsigned long event, void *ptr) 803cc4e3835SJakub Kicinski { 804cc4e3835SJakub Kicinski struct net_device *dev = netdev_notifier_info_to_dev(ptr); 805cc4e3835SJakub Kicinski const struct udp_tunnel_nic_info *info; 806cc4e3835SJakub Kicinski struct udp_tunnel_nic *utn; 807cc4e3835SJakub Kicinski 808cc4e3835SJakub Kicinski info = dev->udp_tunnel_nic_info; 809cc4e3835SJakub Kicinski if (!info) 810cc4e3835SJakub Kicinski return NOTIFY_DONE; 811cc4e3835SJakub Kicinski 812cc4e3835SJakub Kicinski if (event == NETDEV_REGISTER) { 813cc4e3835SJakub Kicinski int err; 814cc4e3835SJakub Kicinski 815cc4e3835SJakub Kicinski err = udp_tunnel_nic_register(dev); 816cc4e3835SJakub Kicinski if (err) 817cc4e3835SJakub Kicinski netdev_WARN(dev, "failed to register for UDP tunnel offloads: %d", err); 818cc4e3835SJakub Kicinski return notifier_from_errno(err); 819cc4e3835SJakub Kicinski } 820cc4e3835SJakub Kicinski /* All other events will need the udp_tunnel_nic state */ 821cc4e3835SJakub Kicinski utn = dev->udp_tunnel_nic; 822cc4e3835SJakub Kicinski if (!utn) 823cc4e3835SJakub Kicinski return NOTIFY_DONE; 824cc4e3835SJakub Kicinski 825cc4e3835SJakub Kicinski if (event == NETDEV_UNREGISTER) { 826cc4e3835SJakub Kicinski udp_tunnel_nic_unregister(dev, utn); 827cc4e3835SJakub Kicinski return NOTIFY_OK; 828cc4e3835SJakub Kicinski } 829cc4e3835SJakub Kicinski 830cc4e3835SJakub Kicinski /* All other events only matter if NIC has to be programmed open */ 831cc4e3835SJakub Kicinski if (!(info->flags & UDP_TUNNEL_NIC_INFO_OPEN_ONLY)) 832cc4e3835SJakub Kicinski return NOTIFY_DONE; 833cc4e3835SJakub Kicinski 834cc4e3835SJakub Kicinski if (event == NETDEV_UP) { 835cc4e3835SJakub Kicinski WARN_ON(!udp_tunnel_nic_is_empty(dev, utn)); 836cc4e3835SJakub Kicinski udp_tunnel_get_rx_info(dev); 837cc4e3835SJakub Kicinski return NOTIFY_OK; 838cc4e3835SJakub Kicinski } 839cc4e3835SJakub Kicinski if (event == NETDEV_GOING_DOWN) { 840cc4e3835SJakub Kicinski udp_tunnel_nic_flush(dev, utn); 841cc4e3835SJakub Kicinski return NOTIFY_OK; 842cc4e3835SJakub Kicinski } 843cc4e3835SJakub Kicinski 844cc4e3835SJakub Kicinski return NOTIFY_DONE; 845cc4e3835SJakub Kicinski } 846cc4e3835SJakub Kicinski 847cc4e3835SJakub Kicinski static struct notifier_block udp_tunnel_nic_notifier_block __read_mostly = { 848cc4e3835SJakub Kicinski .notifier_call = udp_tunnel_nic_netdevice_event, 849cc4e3835SJakub Kicinski }; 850cc4e3835SJakub Kicinski 851cc4e3835SJakub Kicinski static int __init udp_tunnel_nic_init_module(void) 852cc4e3835SJakub Kicinski { 853cc4e3835SJakub Kicinski int err; 854cc4e3835SJakub Kicinski 855cc4e3835SJakub Kicinski udp_tunnel_nic_workqueue = alloc_workqueue("udp_tunnel_nic", 0, 0); 856cc4e3835SJakub Kicinski if (!udp_tunnel_nic_workqueue) 857cc4e3835SJakub Kicinski return -ENOMEM; 858cc4e3835SJakub Kicinski 859cc4e3835SJakub Kicinski rtnl_lock(); 860cc4e3835SJakub Kicinski udp_tunnel_nic_ops = &__udp_tunnel_nic_ops; 861cc4e3835SJakub Kicinski rtnl_unlock(); 862cc4e3835SJakub Kicinski 863cc4e3835SJakub Kicinski err = register_netdevice_notifier(&udp_tunnel_nic_notifier_block); 864cc4e3835SJakub Kicinski if (err) 865cc4e3835SJakub Kicinski goto err_unset_ops; 866cc4e3835SJakub Kicinski 867cc4e3835SJakub Kicinski return 0; 868cc4e3835SJakub Kicinski 869cc4e3835SJakub Kicinski err_unset_ops: 870cc4e3835SJakub Kicinski rtnl_lock(); 871cc4e3835SJakub Kicinski udp_tunnel_nic_ops = NULL; 872cc4e3835SJakub Kicinski rtnl_unlock(); 873cc4e3835SJakub Kicinski destroy_workqueue(udp_tunnel_nic_workqueue); 874cc4e3835SJakub Kicinski return err; 875cc4e3835SJakub Kicinski } 876cc4e3835SJakub Kicinski late_initcall(udp_tunnel_nic_init_module); 877cc4e3835SJakub Kicinski 878cc4e3835SJakub Kicinski static void __exit udp_tunnel_nic_cleanup_module(void) 879cc4e3835SJakub Kicinski { 880cc4e3835SJakub Kicinski unregister_netdevice_notifier(&udp_tunnel_nic_notifier_block); 881cc4e3835SJakub Kicinski 882cc4e3835SJakub Kicinski rtnl_lock(); 883cc4e3835SJakub Kicinski udp_tunnel_nic_ops = NULL; 884cc4e3835SJakub Kicinski rtnl_unlock(); 885cc4e3835SJakub Kicinski 886cc4e3835SJakub Kicinski destroy_workqueue(udp_tunnel_nic_workqueue); 887cc4e3835SJakub Kicinski } 888cc4e3835SJakub Kicinski module_exit(udp_tunnel_nic_cleanup_module); 889cc4e3835SJakub Kicinski 890cc4e3835SJakub Kicinski MODULE_LICENSE("GPL"); 891