xref: /openbmc/linux/net/ipv4/udp_tunnel_nic.c (revision cc4e3835eff474aa274d6e1d18f69d9d296d3b76)
1*cc4e3835SJakub Kicinski // SPDX-License-Identifier: GPL-2.0-only
2*cc4e3835SJakub Kicinski // Copyright (c) 2020 Facebook Inc.
3*cc4e3835SJakub Kicinski 
4*cc4e3835SJakub Kicinski #include <linux/netdevice.h>
5*cc4e3835SJakub Kicinski #include <linux/slab.h>
6*cc4e3835SJakub Kicinski #include <linux/types.h>
7*cc4e3835SJakub Kicinski #include <linux/workqueue.h>
8*cc4e3835SJakub Kicinski #include <net/udp_tunnel.h>
9*cc4e3835SJakub Kicinski 
10*cc4e3835SJakub Kicinski enum udp_tunnel_nic_table_entry_flags {
11*cc4e3835SJakub Kicinski 	UDP_TUNNEL_NIC_ENTRY_ADD	= BIT(0),
12*cc4e3835SJakub Kicinski 	UDP_TUNNEL_NIC_ENTRY_DEL	= BIT(1),
13*cc4e3835SJakub Kicinski 	UDP_TUNNEL_NIC_ENTRY_OP_FAIL	= BIT(2),
14*cc4e3835SJakub Kicinski 	UDP_TUNNEL_NIC_ENTRY_FROZEN	= BIT(3),
15*cc4e3835SJakub Kicinski };
16*cc4e3835SJakub Kicinski 
17*cc4e3835SJakub Kicinski struct udp_tunnel_nic_table_entry {
18*cc4e3835SJakub Kicinski 	__be16 port;
19*cc4e3835SJakub Kicinski 	u8 type;
20*cc4e3835SJakub Kicinski 	u8 use_cnt;
21*cc4e3835SJakub Kicinski 	u8 flags;
22*cc4e3835SJakub Kicinski 	u8 hw_priv;
23*cc4e3835SJakub Kicinski };
24*cc4e3835SJakub Kicinski 
25*cc4e3835SJakub Kicinski /**
26*cc4e3835SJakub Kicinski  * struct udp_tunnel_nic - UDP tunnel port offload state
27*cc4e3835SJakub Kicinski  * @work:	async work for talking to hardware from process context
28*cc4e3835SJakub Kicinski  * @dev:	netdev pointer
29*cc4e3835SJakub Kicinski  * @need_sync:	at least one port start changed
30*cc4e3835SJakub Kicinski  * @need_replay: space was freed, we need a replay of all ports
31*cc4e3835SJakub Kicinski  * @work_pending: @work is currently scheduled
32*cc4e3835SJakub Kicinski  * @n_tables:	number of tables under @entries
33*cc4e3835SJakub Kicinski  * @missed:	bitmap of tables which overflown
34*cc4e3835SJakub Kicinski  * @entries:	table of tables of ports currently offloaded
35*cc4e3835SJakub Kicinski  */
36*cc4e3835SJakub Kicinski struct udp_tunnel_nic {
37*cc4e3835SJakub Kicinski 	struct work_struct work;
38*cc4e3835SJakub Kicinski 
39*cc4e3835SJakub Kicinski 	struct net_device *dev;
40*cc4e3835SJakub Kicinski 
41*cc4e3835SJakub Kicinski 	u8 need_sync:1;
42*cc4e3835SJakub Kicinski 	u8 need_replay:1;
43*cc4e3835SJakub Kicinski 	u8 work_pending:1;
44*cc4e3835SJakub Kicinski 
45*cc4e3835SJakub Kicinski 	unsigned int n_tables;
46*cc4e3835SJakub Kicinski 	unsigned long missed;
47*cc4e3835SJakub Kicinski 	struct udp_tunnel_nic_table_entry **entries;
48*cc4e3835SJakub Kicinski };
49*cc4e3835SJakub Kicinski 
50*cc4e3835SJakub Kicinski /* We ensure all work structs are done using driver state, but not the code.
51*cc4e3835SJakub Kicinski  * We need a workqueue we can flush before module gets removed.
52*cc4e3835SJakub Kicinski  */
53*cc4e3835SJakub Kicinski static struct workqueue_struct *udp_tunnel_nic_workqueue;
54*cc4e3835SJakub Kicinski 
55*cc4e3835SJakub Kicinski static const char *udp_tunnel_nic_tunnel_type_name(unsigned int type)
56*cc4e3835SJakub Kicinski {
57*cc4e3835SJakub Kicinski 	switch (type) {
58*cc4e3835SJakub Kicinski 	case UDP_TUNNEL_TYPE_VXLAN:
59*cc4e3835SJakub Kicinski 		return "vxlan";
60*cc4e3835SJakub Kicinski 	case UDP_TUNNEL_TYPE_GENEVE:
61*cc4e3835SJakub Kicinski 		return "geneve";
62*cc4e3835SJakub Kicinski 	case UDP_TUNNEL_TYPE_VXLAN_GPE:
63*cc4e3835SJakub Kicinski 		return "vxlan-gpe";
64*cc4e3835SJakub Kicinski 	default:
65*cc4e3835SJakub Kicinski 		return "unknown";
66*cc4e3835SJakub Kicinski 	}
67*cc4e3835SJakub Kicinski }
68*cc4e3835SJakub Kicinski 
69*cc4e3835SJakub Kicinski static bool
70*cc4e3835SJakub Kicinski udp_tunnel_nic_entry_is_free(struct udp_tunnel_nic_table_entry *entry)
71*cc4e3835SJakub Kicinski {
72*cc4e3835SJakub Kicinski 	return entry->use_cnt == 0 && !entry->flags;
73*cc4e3835SJakub Kicinski }
74*cc4e3835SJakub Kicinski 
75*cc4e3835SJakub Kicinski static bool
76*cc4e3835SJakub Kicinski udp_tunnel_nic_entry_is_frozen(struct udp_tunnel_nic_table_entry *entry)
77*cc4e3835SJakub Kicinski {
78*cc4e3835SJakub Kicinski 	return entry->flags & UDP_TUNNEL_NIC_ENTRY_FROZEN;
79*cc4e3835SJakub Kicinski }
80*cc4e3835SJakub Kicinski 
81*cc4e3835SJakub Kicinski static void
82*cc4e3835SJakub Kicinski udp_tunnel_nic_entry_freeze_used(struct udp_tunnel_nic_table_entry *entry)
83*cc4e3835SJakub Kicinski {
84*cc4e3835SJakub Kicinski 	if (!udp_tunnel_nic_entry_is_free(entry))
85*cc4e3835SJakub Kicinski 		entry->flags |= UDP_TUNNEL_NIC_ENTRY_FROZEN;
86*cc4e3835SJakub Kicinski }
87*cc4e3835SJakub Kicinski 
88*cc4e3835SJakub Kicinski static void
89*cc4e3835SJakub Kicinski udp_tunnel_nic_entry_unfreeze(struct udp_tunnel_nic_table_entry *entry)
90*cc4e3835SJakub Kicinski {
91*cc4e3835SJakub Kicinski 	entry->flags &= ~UDP_TUNNEL_NIC_ENTRY_FROZEN;
92*cc4e3835SJakub Kicinski }
93*cc4e3835SJakub Kicinski 
94*cc4e3835SJakub Kicinski static bool
95*cc4e3835SJakub Kicinski udp_tunnel_nic_entry_is_queued(struct udp_tunnel_nic_table_entry *entry)
96*cc4e3835SJakub Kicinski {
97*cc4e3835SJakub Kicinski 	return entry->flags & (UDP_TUNNEL_NIC_ENTRY_ADD |
98*cc4e3835SJakub Kicinski 			       UDP_TUNNEL_NIC_ENTRY_DEL);
99*cc4e3835SJakub Kicinski }
100*cc4e3835SJakub Kicinski 
101*cc4e3835SJakub Kicinski static void
102*cc4e3835SJakub Kicinski udp_tunnel_nic_entry_queue(struct udp_tunnel_nic *utn,
103*cc4e3835SJakub Kicinski 			   struct udp_tunnel_nic_table_entry *entry,
104*cc4e3835SJakub Kicinski 			   unsigned int flag)
105*cc4e3835SJakub Kicinski {
106*cc4e3835SJakub Kicinski 	entry->flags |= flag;
107*cc4e3835SJakub Kicinski 	utn->need_sync = 1;
108*cc4e3835SJakub Kicinski }
109*cc4e3835SJakub Kicinski 
110*cc4e3835SJakub Kicinski static void
111*cc4e3835SJakub Kicinski udp_tunnel_nic_ti_from_entry(struct udp_tunnel_nic_table_entry *entry,
112*cc4e3835SJakub Kicinski 			     struct udp_tunnel_info *ti)
113*cc4e3835SJakub Kicinski {
114*cc4e3835SJakub Kicinski 	memset(ti, 0, sizeof(*ti));
115*cc4e3835SJakub Kicinski 	ti->port = entry->port;
116*cc4e3835SJakub Kicinski 	ti->type = entry->type;
117*cc4e3835SJakub Kicinski 	ti->hw_priv = entry->hw_priv;
118*cc4e3835SJakub Kicinski }
119*cc4e3835SJakub Kicinski 
120*cc4e3835SJakub Kicinski static bool
121*cc4e3835SJakub Kicinski udp_tunnel_nic_is_empty(struct net_device *dev, struct udp_tunnel_nic *utn)
122*cc4e3835SJakub Kicinski {
123*cc4e3835SJakub Kicinski 	const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
124*cc4e3835SJakub Kicinski 	unsigned int i, j;
125*cc4e3835SJakub Kicinski 
126*cc4e3835SJakub Kicinski 	for (i = 0; i < utn->n_tables; i++)
127*cc4e3835SJakub Kicinski 		for (j = 0; j < info->tables[i].n_entries; j++)
128*cc4e3835SJakub Kicinski 			if (!udp_tunnel_nic_entry_is_free(&utn->entries[i][j]))
129*cc4e3835SJakub Kicinski 				return false;
130*cc4e3835SJakub Kicinski 	return true;
131*cc4e3835SJakub Kicinski }
132*cc4e3835SJakub Kicinski 
133*cc4e3835SJakub Kicinski static bool
134*cc4e3835SJakub Kicinski udp_tunnel_nic_should_replay(struct net_device *dev, struct udp_tunnel_nic *utn)
135*cc4e3835SJakub Kicinski {
136*cc4e3835SJakub Kicinski 	const struct udp_tunnel_nic_table_info *table;
137*cc4e3835SJakub Kicinski 	unsigned int i, j;
138*cc4e3835SJakub Kicinski 
139*cc4e3835SJakub Kicinski 	if (!utn->missed)
140*cc4e3835SJakub Kicinski 		return false;
141*cc4e3835SJakub Kicinski 
142*cc4e3835SJakub Kicinski 	for (i = 0; i < utn->n_tables; i++) {
143*cc4e3835SJakub Kicinski 		table = &dev->udp_tunnel_nic_info->tables[i];
144*cc4e3835SJakub Kicinski 		if (!test_bit(i, &utn->missed))
145*cc4e3835SJakub Kicinski 			continue;
146*cc4e3835SJakub Kicinski 
147*cc4e3835SJakub Kicinski 		for (j = 0; j < table->n_entries; j++)
148*cc4e3835SJakub Kicinski 			if (udp_tunnel_nic_entry_is_free(&utn->entries[i][j]))
149*cc4e3835SJakub Kicinski 				return true;
150*cc4e3835SJakub Kicinski 	}
151*cc4e3835SJakub Kicinski 
152*cc4e3835SJakub Kicinski 	return false;
153*cc4e3835SJakub Kicinski }
154*cc4e3835SJakub Kicinski 
155*cc4e3835SJakub Kicinski static void
156*cc4e3835SJakub Kicinski __udp_tunnel_nic_get_port(struct net_device *dev, unsigned int table,
157*cc4e3835SJakub Kicinski 			  unsigned int idx, struct udp_tunnel_info *ti)
158*cc4e3835SJakub Kicinski {
159*cc4e3835SJakub Kicinski 	struct udp_tunnel_nic_table_entry *entry;
160*cc4e3835SJakub Kicinski 	struct udp_tunnel_nic *utn;
161*cc4e3835SJakub Kicinski 
162*cc4e3835SJakub Kicinski 	utn = dev->udp_tunnel_nic;
163*cc4e3835SJakub Kicinski 	entry = &utn->entries[table][idx];
164*cc4e3835SJakub Kicinski 
165*cc4e3835SJakub Kicinski 	if (entry->use_cnt)
166*cc4e3835SJakub Kicinski 		udp_tunnel_nic_ti_from_entry(entry, ti);
167*cc4e3835SJakub Kicinski }
168*cc4e3835SJakub Kicinski 
169*cc4e3835SJakub Kicinski static void
170*cc4e3835SJakub Kicinski __udp_tunnel_nic_set_port_priv(struct net_device *dev, unsigned int table,
171*cc4e3835SJakub Kicinski 			       unsigned int idx, u8 priv)
172*cc4e3835SJakub Kicinski {
173*cc4e3835SJakub Kicinski 	dev->udp_tunnel_nic->entries[table][idx].hw_priv = priv;
174*cc4e3835SJakub Kicinski }
175*cc4e3835SJakub Kicinski 
176*cc4e3835SJakub Kicinski static void
177*cc4e3835SJakub Kicinski udp_tunnel_nic_entry_update_done(struct udp_tunnel_nic_table_entry *entry,
178*cc4e3835SJakub Kicinski 				 int err)
179*cc4e3835SJakub Kicinski {
180*cc4e3835SJakub Kicinski 	bool dodgy = entry->flags & UDP_TUNNEL_NIC_ENTRY_OP_FAIL;
181*cc4e3835SJakub Kicinski 
182*cc4e3835SJakub Kicinski 	WARN_ON_ONCE(entry->flags & UDP_TUNNEL_NIC_ENTRY_ADD &&
183*cc4e3835SJakub Kicinski 		     entry->flags & UDP_TUNNEL_NIC_ENTRY_DEL);
184*cc4e3835SJakub Kicinski 
185*cc4e3835SJakub Kicinski 	if (entry->flags & UDP_TUNNEL_NIC_ENTRY_ADD &&
186*cc4e3835SJakub Kicinski 	    (!err || (err == -EEXIST && dodgy)))
187*cc4e3835SJakub Kicinski 		entry->flags &= ~UDP_TUNNEL_NIC_ENTRY_ADD;
188*cc4e3835SJakub Kicinski 
189*cc4e3835SJakub Kicinski 	if (entry->flags & UDP_TUNNEL_NIC_ENTRY_DEL &&
190*cc4e3835SJakub Kicinski 	    (!err || (err == -ENOENT && dodgy)))
191*cc4e3835SJakub Kicinski 		entry->flags &= ~UDP_TUNNEL_NIC_ENTRY_DEL;
192*cc4e3835SJakub Kicinski 
193*cc4e3835SJakub Kicinski 	if (!err)
194*cc4e3835SJakub Kicinski 		entry->flags &= ~UDP_TUNNEL_NIC_ENTRY_OP_FAIL;
195*cc4e3835SJakub Kicinski 	else
196*cc4e3835SJakub Kicinski 		entry->flags |= UDP_TUNNEL_NIC_ENTRY_OP_FAIL;
197*cc4e3835SJakub Kicinski }
198*cc4e3835SJakub Kicinski 
199*cc4e3835SJakub Kicinski static void
200*cc4e3835SJakub Kicinski udp_tunnel_nic_device_sync_one(struct net_device *dev,
201*cc4e3835SJakub Kicinski 			       struct udp_tunnel_nic *utn,
202*cc4e3835SJakub Kicinski 			       unsigned int table, unsigned int idx)
203*cc4e3835SJakub Kicinski {
204*cc4e3835SJakub Kicinski 	struct udp_tunnel_nic_table_entry *entry;
205*cc4e3835SJakub Kicinski 	struct udp_tunnel_info ti;
206*cc4e3835SJakub Kicinski 	int err;
207*cc4e3835SJakub Kicinski 
208*cc4e3835SJakub Kicinski 	entry = &utn->entries[table][idx];
209*cc4e3835SJakub Kicinski 	if (!udp_tunnel_nic_entry_is_queued(entry))
210*cc4e3835SJakub Kicinski 		return;
211*cc4e3835SJakub Kicinski 
212*cc4e3835SJakub Kicinski 	udp_tunnel_nic_ti_from_entry(entry, &ti);
213*cc4e3835SJakub Kicinski 	if (entry->flags & UDP_TUNNEL_NIC_ENTRY_ADD)
214*cc4e3835SJakub Kicinski 		err = dev->udp_tunnel_nic_info->set_port(dev, table, idx, &ti);
215*cc4e3835SJakub Kicinski 	else
216*cc4e3835SJakub Kicinski 		err = dev->udp_tunnel_nic_info->unset_port(dev, table, idx,
217*cc4e3835SJakub Kicinski 							   &ti);
218*cc4e3835SJakub Kicinski 	udp_tunnel_nic_entry_update_done(entry, err);
219*cc4e3835SJakub Kicinski 
220*cc4e3835SJakub Kicinski 	if (err)
221*cc4e3835SJakub Kicinski 		netdev_warn(dev,
222*cc4e3835SJakub Kicinski 			    "UDP tunnel port sync failed port %d type %s: %d\n",
223*cc4e3835SJakub Kicinski 			    be16_to_cpu(entry->port),
224*cc4e3835SJakub Kicinski 			    udp_tunnel_nic_tunnel_type_name(entry->type),
225*cc4e3835SJakub Kicinski 			    err);
226*cc4e3835SJakub Kicinski }
227*cc4e3835SJakub Kicinski 
228*cc4e3835SJakub Kicinski static void
229*cc4e3835SJakub Kicinski udp_tunnel_nic_device_sync_by_port(struct net_device *dev,
230*cc4e3835SJakub Kicinski 				   struct udp_tunnel_nic *utn)
231*cc4e3835SJakub Kicinski {
232*cc4e3835SJakub Kicinski 	const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
233*cc4e3835SJakub Kicinski 	unsigned int i, j;
234*cc4e3835SJakub Kicinski 
235*cc4e3835SJakub Kicinski 	for (i = 0; i < utn->n_tables; i++)
236*cc4e3835SJakub Kicinski 		for (j = 0; j < info->tables[i].n_entries; j++)
237*cc4e3835SJakub Kicinski 			udp_tunnel_nic_device_sync_one(dev, utn, i, j);
238*cc4e3835SJakub Kicinski }
239*cc4e3835SJakub Kicinski 
240*cc4e3835SJakub Kicinski static void
241*cc4e3835SJakub Kicinski udp_tunnel_nic_device_sync_by_table(struct net_device *dev,
242*cc4e3835SJakub Kicinski 				    struct udp_tunnel_nic *utn)
243*cc4e3835SJakub Kicinski {
244*cc4e3835SJakub Kicinski 	const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
245*cc4e3835SJakub Kicinski 	unsigned int i, j;
246*cc4e3835SJakub Kicinski 	int err;
247*cc4e3835SJakub Kicinski 
248*cc4e3835SJakub Kicinski 	for (i = 0; i < utn->n_tables; i++) {
249*cc4e3835SJakub Kicinski 		/* Find something that needs sync in this table */
250*cc4e3835SJakub Kicinski 		for (j = 0; j < info->tables[i].n_entries; j++)
251*cc4e3835SJakub Kicinski 			if (udp_tunnel_nic_entry_is_queued(&utn->entries[i][j]))
252*cc4e3835SJakub Kicinski 				break;
253*cc4e3835SJakub Kicinski 		if (j == info->tables[i].n_entries)
254*cc4e3835SJakub Kicinski 			continue;
255*cc4e3835SJakub Kicinski 
256*cc4e3835SJakub Kicinski 		err = info->sync_table(dev, i);
257*cc4e3835SJakub Kicinski 		if (err)
258*cc4e3835SJakub Kicinski 			netdev_warn(dev, "UDP tunnel port sync failed for table %d: %d\n",
259*cc4e3835SJakub Kicinski 				    i, err);
260*cc4e3835SJakub Kicinski 
261*cc4e3835SJakub Kicinski 		for (j = 0; j < info->tables[i].n_entries; j++) {
262*cc4e3835SJakub Kicinski 			struct udp_tunnel_nic_table_entry *entry;
263*cc4e3835SJakub Kicinski 
264*cc4e3835SJakub Kicinski 			entry = &utn->entries[i][j];
265*cc4e3835SJakub Kicinski 			if (udp_tunnel_nic_entry_is_queued(entry))
266*cc4e3835SJakub Kicinski 				udp_tunnel_nic_entry_update_done(entry, err);
267*cc4e3835SJakub Kicinski 		}
268*cc4e3835SJakub Kicinski 	}
269*cc4e3835SJakub Kicinski }
270*cc4e3835SJakub Kicinski 
271*cc4e3835SJakub Kicinski static void
272*cc4e3835SJakub Kicinski __udp_tunnel_nic_device_sync(struct net_device *dev, struct udp_tunnel_nic *utn)
273*cc4e3835SJakub Kicinski {
274*cc4e3835SJakub Kicinski 	if (!utn->need_sync)
275*cc4e3835SJakub Kicinski 		return;
276*cc4e3835SJakub Kicinski 
277*cc4e3835SJakub Kicinski 	if (dev->udp_tunnel_nic_info->sync_table)
278*cc4e3835SJakub Kicinski 		udp_tunnel_nic_device_sync_by_table(dev, utn);
279*cc4e3835SJakub Kicinski 	else
280*cc4e3835SJakub Kicinski 		udp_tunnel_nic_device_sync_by_port(dev, utn);
281*cc4e3835SJakub Kicinski 
282*cc4e3835SJakub Kicinski 	utn->need_sync = 0;
283*cc4e3835SJakub Kicinski 	/* Can't replay directly here, in case we come from the tunnel driver's
284*cc4e3835SJakub Kicinski 	 * notification - trying to replay may deadlock inside tunnel driver.
285*cc4e3835SJakub Kicinski 	 */
286*cc4e3835SJakub Kicinski 	utn->need_replay = udp_tunnel_nic_should_replay(dev, utn);
287*cc4e3835SJakub Kicinski }
288*cc4e3835SJakub Kicinski 
289*cc4e3835SJakub Kicinski static void
290*cc4e3835SJakub Kicinski udp_tunnel_nic_device_sync(struct net_device *dev, struct udp_tunnel_nic *utn)
291*cc4e3835SJakub Kicinski {
292*cc4e3835SJakub Kicinski 	const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
293*cc4e3835SJakub Kicinski 	bool may_sleep;
294*cc4e3835SJakub Kicinski 
295*cc4e3835SJakub Kicinski 	if (!utn->need_sync)
296*cc4e3835SJakub Kicinski 		return;
297*cc4e3835SJakub Kicinski 
298*cc4e3835SJakub Kicinski 	/* Drivers which sleep in the callback need to update from
299*cc4e3835SJakub Kicinski 	 * the workqueue, if we come from the tunnel driver's notification.
300*cc4e3835SJakub Kicinski 	 */
301*cc4e3835SJakub Kicinski 	may_sleep = info->flags & UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
302*cc4e3835SJakub Kicinski 	if (!may_sleep)
303*cc4e3835SJakub Kicinski 		__udp_tunnel_nic_device_sync(dev, utn);
304*cc4e3835SJakub Kicinski 	if (may_sleep || utn->need_replay) {
305*cc4e3835SJakub Kicinski 		queue_work(udp_tunnel_nic_workqueue, &utn->work);
306*cc4e3835SJakub Kicinski 		utn->work_pending = 1;
307*cc4e3835SJakub Kicinski 	}
308*cc4e3835SJakub Kicinski }
309*cc4e3835SJakub Kicinski 
310*cc4e3835SJakub Kicinski static bool
311*cc4e3835SJakub Kicinski udp_tunnel_nic_table_is_capable(const struct udp_tunnel_nic_table_info *table,
312*cc4e3835SJakub Kicinski 				struct udp_tunnel_info *ti)
313*cc4e3835SJakub Kicinski {
314*cc4e3835SJakub Kicinski 	return table->tunnel_types & ti->type;
315*cc4e3835SJakub Kicinski }
316*cc4e3835SJakub Kicinski 
317*cc4e3835SJakub Kicinski static bool
318*cc4e3835SJakub Kicinski udp_tunnel_nic_is_capable(struct net_device *dev, struct udp_tunnel_nic *utn,
319*cc4e3835SJakub Kicinski 			  struct udp_tunnel_info *ti)
320*cc4e3835SJakub Kicinski {
321*cc4e3835SJakub Kicinski 	const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
322*cc4e3835SJakub Kicinski 	unsigned int i;
323*cc4e3835SJakub Kicinski 
324*cc4e3835SJakub Kicinski 	/* Special case IPv4-only NICs */
325*cc4e3835SJakub Kicinski 	if (info->flags & UDP_TUNNEL_NIC_INFO_IPV4_ONLY &&
326*cc4e3835SJakub Kicinski 	    ti->sa_family != AF_INET)
327*cc4e3835SJakub Kicinski 		return false;
328*cc4e3835SJakub Kicinski 
329*cc4e3835SJakub Kicinski 	for (i = 0; i < utn->n_tables; i++)
330*cc4e3835SJakub Kicinski 		if (udp_tunnel_nic_table_is_capable(&info->tables[i], ti))
331*cc4e3835SJakub Kicinski 			return true;
332*cc4e3835SJakub Kicinski 	return false;
333*cc4e3835SJakub Kicinski }
334*cc4e3835SJakub Kicinski 
335*cc4e3835SJakub Kicinski static int
336*cc4e3835SJakub Kicinski udp_tunnel_nic_has_collision(struct net_device *dev, struct udp_tunnel_nic *utn,
337*cc4e3835SJakub Kicinski 			     struct udp_tunnel_info *ti)
338*cc4e3835SJakub Kicinski {
339*cc4e3835SJakub Kicinski 	const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
340*cc4e3835SJakub Kicinski 	struct udp_tunnel_nic_table_entry *entry;
341*cc4e3835SJakub Kicinski 	unsigned int i, j;
342*cc4e3835SJakub Kicinski 
343*cc4e3835SJakub Kicinski 	for (i = 0; i < utn->n_tables; i++)
344*cc4e3835SJakub Kicinski 		for (j = 0; j < info->tables[i].n_entries; j++) {
345*cc4e3835SJakub Kicinski 			entry =	&utn->entries[i][j];
346*cc4e3835SJakub Kicinski 
347*cc4e3835SJakub Kicinski 			if (!udp_tunnel_nic_entry_is_free(entry) &&
348*cc4e3835SJakub Kicinski 			    entry->port == ti->port &&
349*cc4e3835SJakub Kicinski 			    entry->type != ti->type) {
350*cc4e3835SJakub Kicinski 				__set_bit(i, &utn->missed);
351*cc4e3835SJakub Kicinski 				return true;
352*cc4e3835SJakub Kicinski 			}
353*cc4e3835SJakub Kicinski 		}
354*cc4e3835SJakub Kicinski 	return false;
355*cc4e3835SJakub Kicinski }
356*cc4e3835SJakub Kicinski 
357*cc4e3835SJakub Kicinski static void
358*cc4e3835SJakub Kicinski udp_tunnel_nic_entry_adj(struct udp_tunnel_nic *utn,
359*cc4e3835SJakub Kicinski 			 unsigned int table, unsigned int idx, int use_cnt_adj)
360*cc4e3835SJakub Kicinski {
361*cc4e3835SJakub Kicinski 	struct udp_tunnel_nic_table_entry *entry =  &utn->entries[table][idx];
362*cc4e3835SJakub Kicinski 	bool dodgy = entry->flags & UDP_TUNNEL_NIC_ENTRY_OP_FAIL;
363*cc4e3835SJakub Kicinski 	unsigned int from, to;
364*cc4e3835SJakub Kicinski 
365*cc4e3835SJakub Kicinski 	/* If not going from used to unused or vice versa - all done.
366*cc4e3835SJakub Kicinski 	 * For dodgy entries make sure we try to sync again (queue the entry).
367*cc4e3835SJakub Kicinski 	 */
368*cc4e3835SJakub Kicinski 	entry->use_cnt += use_cnt_adj;
369*cc4e3835SJakub Kicinski 	if (!dodgy && !entry->use_cnt == !(entry->use_cnt - use_cnt_adj))
370*cc4e3835SJakub Kicinski 		return;
371*cc4e3835SJakub Kicinski 
372*cc4e3835SJakub Kicinski 	/* Cancel the op before it was sent to the device, if possible,
373*cc4e3835SJakub Kicinski 	 * otherwise we'd need to take special care to issue commands
374*cc4e3835SJakub Kicinski 	 * in the same order the ports arrived.
375*cc4e3835SJakub Kicinski 	 */
376*cc4e3835SJakub Kicinski 	if (use_cnt_adj < 0) {
377*cc4e3835SJakub Kicinski 		from = UDP_TUNNEL_NIC_ENTRY_ADD;
378*cc4e3835SJakub Kicinski 		to = UDP_TUNNEL_NIC_ENTRY_DEL;
379*cc4e3835SJakub Kicinski 	} else {
380*cc4e3835SJakub Kicinski 		from = UDP_TUNNEL_NIC_ENTRY_DEL;
381*cc4e3835SJakub Kicinski 		to = UDP_TUNNEL_NIC_ENTRY_ADD;
382*cc4e3835SJakub Kicinski 	}
383*cc4e3835SJakub Kicinski 
384*cc4e3835SJakub Kicinski 	if (entry->flags & from) {
385*cc4e3835SJakub Kicinski 		entry->flags &= ~from;
386*cc4e3835SJakub Kicinski 		if (!dodgy)
387*cc4e3835SJakub Kicinski 			return;
388*cc4e3835SJakub Kicinski 	}
389*cc4e3835SJakub Kicinski 
390*cc4e3835SJakub Kicinski 	udp_tunnel_nic_entry_queue(utn, entry, to);
391*cc4e3835SJakub Kicinski }
392*cc4e3835SJakub Kicinski 
393*cc4e3835SJakub Kicinski static bool
394*cc4e3835SJakub Kicinski udp_tunnel_nic_entry_try_adj(struct udp_tunnel_nic *utn,
395*cc4e3835SJakub Kicinski 			     unsigned int table, unsigned int idx,
396*cc4e3835SJakub Kicinski 			     struct udp_tunnel_info *ti, int use_cnt_adj)
397*cc4e3835SJakub Kicinski {
398*cc4e3835SJakub Kicinski 	struct udp_tunnel_nic_table_entry *entry =  &utn->entries[table][idx];
399*cc4e3835SJakub Kicinski 
400*cc4e3835SJakub Kicinski 	if (udp_tunnel_nic_entry_is_free(entry) ||
401*cc4e3835SJakub Kicinski 	    entry->port != ti->port ||
402*cc4e3835SJakub Kicinski 	    entry->type != ti->type)
403*cc4e3835SJakub Kicinski 		return false;
404*cc4e3835SJakub Kicinski 
405*cc4e3835SJakub Kicinski 	if (udp_tunnel_nic_entry_is_frozen(entry))
406*cc4e3835SJakub Kicinski 		return true;
407*cc4e3835SJakub Kicinski 
408*cc4e3835SJakub Kicinski 	udp_tunnel_nic_entry_adj(utn, table, idx, use_cnt_adj);
409*cc4e3835SJakub Kicinski 	return true;
410*cc4e3835SJakub Kicinski }
411*cc4e3835SJakub Kicinski 
412*cc4e3835SJakub Kicinski /* Try to find existing matching entry and adjust its use count, instead of
413*cc4e3835SJakub Kicinski  * adding a new one. Returns true if entry was found. In case of delete the
414*cc4e3835SJakub Kicinski  * entry may have gotten removed in the process, in which case it will be
415*cc4e3835SJakub Kicinski  * queued for removal.
416*cc4e3835SJakub Kicinski  */
417*cc4e3835SJakub Kicinski static bool
418*cc4e3835SJakub Kicinski udp_tunnel_nic_try_existing(struct net_device *dev, struct udp_tunnel_nic *utn,
419*cc4e3835SJakub Kicinski 			    struct udp_tunnel_info *ti, int use_cnt_adj)
420*cc4e3835SJakub Kicinski {
421*cc4e3835SJakub Kicinski 	const struct udp_tunnel_nic_table_info *table;
422*cc4e3835SJakub Kicinski 	unsigned int i, j;
423*cc4e3835SJakub Kicinski 
424*cc4e3835SJakub Kicinski 	for (i = 0; i < utn->n_tables; i++) {
425*cc4e3835SJakub Kicinski 		table = &dev->udp_tunnel_nic_info->tables[i];
426*cc4e3835SJakub Kicinski 		if (!udp_tunnel_nic_table_is_capable(table, ti))
427*cc4e3835SJakub Kicinski 			continue;
428*cc4e3835SJakub Kicinski 
429*cc4e3835SJakub Kicinski 		for (j = 0; j < table->n_entries; j++)
430*cc4e3835SJakub Kicinski 			if (udp_tunnel_nic_entry_try_adj(utn, i, j, ti,
431*cc4e3835SJakub Kicinski 							 use_cnt_adj))
432*cc4e3835SJakub Kicinski 				return true;
433*cc4e3835SJakub Kicinski 	}
434*cc4e3835SJakub Kicinski 
435*cc4e3835SJakub Kicinski 	return false;
436*cc4e3835SJakub Kicinski }
437*cc4e3835SJakub Kicinski 
438*cc4e3835SJakub Kicinski static bool
439*cc4e3835SJakub Kicinski udp_tunnel_nic_add_existing(struct net_device *dev, struct udp_tunnel_nic *utn,
440*cc4e3835SJakub Kicinski 			    struct udp_tunnel_info *ti)
441*cc4e3835SJakub Kicinski {
442*cc4e3835SJakub Kicinski 	return udp_tunnel_nic_try_existing(dev, utn, ti, +1);
443*cc4e3835SJakub Kicinski }
444*cc4e3835SJakub Kicinski 
445*cc4e3835SJakub Kicinski static bool
446*cc4e3835SJakub Kicinski udp_tunnel_nic_del_existing(struct net_device *dev, struct udp_tunnel_nic *utn,
447*cc4e3835SJakub Kicinski 			    struct udp_tunnel_info *ti)
448*cc4e3835SJakub Kicinski {
449*cc4e3835SJakub Kicinski 	return udp_tunnel_nic_try_existing(dev, utn, ti, -1);
450*cc4e3835SJakub Kicinski }
451*cc4e3835SJakub Kicinski 
452*cc4e3835SJakub Kicinski static bool
453*cc4e3835SJakub Kicinski udp_tunnel_nic_add_new(struct net_device *dev, struct udp_tunnel_nic *utn,
454*cc4e3835SJakub Kicinski 		       struct udp_tunnel_info *ti)
455*cc4e3835SJakub Kicinski {
456*cc4e3835SJakub Kicinski 	const struct udp_tunnel_nic_table_info *table;
457*cc4e3835SJakub Kicinski 	unsigned int i, j;
458*cc4e3835SJakub Kicinski 
459*cc4e3835SJakub Kicinski 	for (i = 0; i < utn->n_tables; i++) {
460*cc4e3835SJakub Kicinski 		table = &dev->udp_tunnel_nic_info->tables[i];
461*cc4e3835SJakub Kicinski 		if (!udp_tunnel_nic_table_is_capable(table, ti))
462*cc4e3835SJakub Kicinski 			continue;
463*cc4e3835SJakub Kicinski 
464*cc4e3835SJakub Kicinski 		for (j = 0; j < table->n_entries; j++) {
465*cc4e3835SJakub Kicinski 			struct udp_tunnel_nic_table_entry *entry;
466*cc4e3835SJakub Kicinski 
467*cc4e3835SJakub Kicinski 			entry = &utn->entries[i][j];
468*cc4e3835SJakub Kicinski 			if (!udp_tunnel_nic_entry_is_free(entry))
469*cc4e3835SJakub Kicinski 				continue;
470*cc4e3835SJakub Kicinski 
471*cc4e3835SJakub Kicinski 			entry->port = ti->port;
472*cc4e3835SJakub Kicinski 			entry->type = ti->type;
473*cc4e3835SJakub Kicinski 			entry->use_cnt = 1;
474*cc4e3835SJakub Kicinski 			udp_tunnel_nic_entry_queue(utn, entry,
475*cc4e3835SJakub Kicinski 						   UDP_TUNNEL_NIC_ENTRY_ADD);
476*cc4e3835SJakub Kicinski 			return true;
477*cc4e3835SJakub Kicinski 		}
478*cc4e3835SJakub Kicinski 
479*cc4e3835SJakub Kicinski 		/* The different table may still fit this port in, but there
480*cc4e3835SJakub Kicinski 		 * are no devices currently which have multiple tables accepting
481*cc4e3835SJakub Kicinski 		 * the same tunnel type, and false positives are okay.
482*cc4e3835SJakub Kicinski 		 */
483*cc4e3835SJakub Kicinski 		__set_bit(i, &utn->missed);
484*cc4e3835SJakub Kicinski 	}
485*cc4e3835SJakub Kicinski 
486*cc4e3835SJakub Kicinski 	return false;
487*cc4e3835SJakub Kicinski }
488*cc4e3835SJakub Kicinski 
489*cc4e3835SJakub Kicinski static void
490*cc4e3835SJakub Kicinski __udp_tunnel_nic_add_port(struct net_device *dev, struct udp_tunnel_info *ti)
491*cc4e3835SJakub Kicinski {
492*cc4e3835SJakub Kicinski 	const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
493*cc4e3835SJakub Kicinski 	struct udp_tunnel_nic *utn;
494*cc4e3835SJakub Kicinski 
495*cc4e3835SJakub Kicinski 	utn = dev->udp_tunnel_nic;
496*cc4e3835SJakub Kicinski 	if (!utn)
497*cc4e3835SJakub Kicinski 		return;
498*cc4e3835SJakub Kicinski 	if (!netif_running(dev) && info->flags & UDP_TUNNEL_NIC_INFO_OPEN_ONLY)
499*cc4e3835SJakub Kicinski 		return;
500*cc4e3835SJakub Kicinski 
501*cc4e3835SJakub Kicinski 	if (!udp_tunnel_nic_is_capable(dev, utn, ti))
502*cc4e3835SJakub Kicinski 		return;
503*cc4e3835SJakub Kicinski 
504*cc4e3835SJakub Kicinski 	/* It may happen that a tunnel of one type is removed and different
505*cc4e3835SJakub Kicinski 	 * tunnel type tries to reuse its port before the device was informed.
506*cc4e3835SJakub Kicinski 	 * Rely on utn->missed to re-add this port later.
507*cc4e3835SJakub Kicinski 	 */
508*cc4e3835SJakub Kicinski 	if (udp_tunnel_nic_has_collision(dev, utn, ti))
509*cc4e3835SJakub Kicinski 		return;
510*cc4e3835SJakub Kicinski 
511*cc4e3835SJakub Kicinski 	if (!udp_tunnel_nic_add_existing(dev, utn, ti))
512*cc4e3835SJakub Kicinski 		udp_tunnel_nic_add_new(dev, utn, ti);
513*cc4e3835SJakub Kicinski 
514*cc4e3835SJakub Kicinski 	udp_tunnel_nic_device_sync(dev, utn);
515*cc4e3835SJakub Kicinski }
516*cc4e3835SJakub Kicinski 
517*cc4e3835SJakub Kicinski static void
518*cc4e3835SJakub Kicinski __udp_tunnel_nic_del_port(struct net_device *dev, struct udp_tunnel_info *ti)
519*cc4e3835SJakub Kicinski {
520*cc4e3835SJakub Kicinski 	struct udp_tunnel_nic *utn;
521*cc4e3835SJakub Kicinski 
522*cc4e3835SJakub Kicinski 	utn = dev->udp_tunnel_nic;
523*cc4e3835SJakub Kicinski 	if (!utn)
524*cc4e3835SJakub Kicinski 		return;
525*cc4e3835SJakub Kicinski 
526*cc4e3835SJakub Kicinski 	if (!udp_tunnel_nic_is_capable(dev, utn, ti))
527*cc4e3835SJakub Kicinski 		return;
528*cc4e3835SJakub Kicinski 
529*cc4e3835SJakub Kicinski 	udp_tunnel_nic_del_existing(dev, utn, ti);
530*cc4e3835SJakub Kicinski 
531*cc4e3835SJakub Kicinski 	udp_tunnel_nic_device_sync(dev, utn);
532*cc4e3835SJakub Kicinski }
533*cc4e3835SJakub Kicinski 
534*cc4e3835SJakub Kicinski static void __udp_tunnel_nic_reset_ntf(struct net_device *dev)
535*cc4e3835SJakub Kicinski {
536*cc4e3835SJakub Kicinski 	const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
537*cc4e3835SJakub Kicinski 	struct udp_tunnel_nic *utn;
538*cc4e3835SJakub Kicinski 	unsigned int i, j;
539*cc4e3835SJakub Kicinski 
540*cc4e3835SJakub Kicinski 	ASSERT_RTNL();
541*cc4e3835SJakub Kicinski 
542*cc4e3835SJakub Kicinski 	utn = dev->udp_tunnel_nic;
543*cc4e3835SJakub Kicinski 	if (!utn)
544*cc4e3835SJakub Kicinski 		return;
545*cc4e3835SJakub Kicinski 
546*cc4e3835SJakub Kicinski 	utn->need_sync = false;
547*cc4e3835SJakub Kicinski 	for (i = 0; i < utn->n_tables; i++)
548*cc4e3835SJakub Kicinski 		for (j = 0; j < info->tables[i].n_entries; j++) {
549*cc4e3835SJakub Kicinski 			struct udp_tunnel_nic_table_entry *entry;
550*cc4e3835SJakub Kicinski 
551*cc4e3835SJakub Kicinski 			entry = &utn->entries[i][j];
552*cc4e3835SJakub Kicinski 
553*cc4e3835SJakub Kicinski 			entry->flags &= ~(UDP_TUNNEL_NIC_ENTRY_DEL |
554*cc4e3835SJakub Kicinski 					  UDP_TUNNEL_NIC_ENTRY_OP_FAIL);
555*cc4e3835SJakub Kicinski 			/* We don't release rtnl across ops */
556*cc4e3835SJakub Kicinski 			WARN_ON(entry->flags & UDP_TUNNEL_NIC_ENTRY_FROZEN);
557*cc4e3835SJakub Kicinski 			if (!entry->use_cnt)
558*cc4e3835SJakub Kicinski 				continue;
559*cc4e3835SJakub Kicinski 
560*cc4e3835SJakub Kicinski 			udp_tunnel_nic_entry_queue(utn, entry,
561*cc4e3835SJakub Kicinski 						   UDP_TUNNEL_NIC_ENTRY_ADD);
562*cc4e3835SJakub Kicinski 		}
563*cc4e3835SJakub Kicinski 
564*cc4e3835SJakub Kicinski 	__udp_tunnel_nic_device_sync(dev, utn);
565*cc4e3835SJakub Kicinski }
566*cc4e3835SJakub Kicinski 
567*cc4e3835SJakub Kicinski static const struct udp_tunnel_nic_ops __udp_tunnel_nic_ops = {
568*cc4e3835SJakub Kicinski 	.get_port	= __udp_tunnel_nic_get_port,
569*cc4e3835SJakub Kicinski 	.set_port_priv	= __udp_tunnel_nic_set_port_priv,
570*cc4e3835SJakub Kicinski 	.add_port	= __udp_tunnel_nic_add_port,
571*cc4e3835SJakub Kicinski 	.del_port	= __udp_tunnel_nic_del_port,
572*cc4e3835SJakub Kicinski 	.reset_ntf	= __udp_tunnel_nic_reset_ntf,
573*cc4e3835SJakub Kicinski };
574*cc4e3835SJakub Kicinski 
575*cc4e3835SJakub Kicinski static void
576*cc4e3835SJakub Kicinski udp_tunnel_nic_flush(struct net_device *dev, struct udp_tunnel_nic *utn)
577*cc4e3835SJakub Kicinski {
578*cc4e3835SJakub Kicinski 	const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
579*cc4e3835SJakub Kicinski 	unsigned int i, j;
580*cc4e3835SJakub Kicinski 
581*cc4e3835SJakub Kicinski 	for (i = 0; i < utn->n_tables; i++)
582*cc4e3835SJakub Kicinski 		for (j = 0; j < info->tables[i].n_entries; j++) {
583*cc4e3835SJakub Kicinski 			int adj_cnt = -utn->entries[i][j].use_cnt;
584*cc4e3835SJakub Kicinski 
585*cc4e3835SJakub Kicinski 			if (adj_cnt)
586*cc4e3835SJakub Kicinski 				udp_tunnel_nic_entry_adj(utn, i, j, adj_cnt);
587*cc4e3835SJakub Kicinski 		}
588*cc4e3835SJakub Kicinski 
589*cc4e3835SJakub Kicinski 	__udp_tunnel_nic_device_sync(dev, utn);
590*cc4e3835SJakub Kicinski 
591*cc4e3835SJakub Kicinski 	for (i = 0; i < utn->n_tables; i++)
592*cc4e3835SJakub Kicinski 		memset(utn->entries[i], 0, array_size(info->tables[i].n_entries,
593*cc4e3835SJakub Kicinski 						      sizeof(**utn->entries)));
594*cc4e3835SJakub Kicinski 	WARN_ON(utn->need_sync);
595*cc4e3835SJakub Kicinski 	utn->need_replay = 0;
596*cc4e3835SJakub Kicinski }
597*cc4e3835SJakub Kicinski 
598*cc4e3835SJakub Kicinski static void
599*cc4e3835SJakub Kicinski udp_tunnel_nic_replay(struct net_device *dev, struct udp_tunnel_nic *utn)
600*cc4e3835SJakub Kicinski {
601*cc4e3835SJakub Kicinski 	const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
602*cc4e3835SJakub Kicinski 	unsigned int i, j;
603*cc4e3835SJakub Kicinski 
604*cc4e3835SJakub Kicinski 	/* Freeze all the ports we are already tracking so that the replay
605*cc4e3835SJakub Kicinski 	 * does not double up the refcount.
606*cc4e3835SJakub Kicinski 	 */
607*cc4e3835SJakub Kicinski 	for (i = 0; i < utn->n_tables; i++)
608*cc4e3835SJakub Kicinski 		for (j = 0; j < info->tables[i].n_entries; j++)
609*cc4e3835SJakub Kicinski 			udp_tunnel_nic_entry_freeze_used(&utn->entries[i][j]);
610*cc4e3835SJakub Kicinski 	utn->missed = 0;
611*cc4e3835SJakub Kicinski 	utn->need_replay = 0;
612*cc4e3835SJakub Kicinski 
613*cc4e3835SJakub Kicinski 	udp_tunnel_get_rx_info(dev);
614*cc4e3835SJakub Kicinski 
615*cc4e3835SJakub Kicinski 	for (i = 0; i < utn->n_tables; i++)
616*cc4e3835SJakub Kicinski 		for (j = 0; j < info->tables[i].n_entries; j++)
617*cc4e3835SJakub Kicinski 			udp_tunnel_nic_entry_unfreeze(&utn->entries[i][j]);
618*cc4e3835SJakub Kicinski }
619*cc4e3835SJakub Kicinski 
620*cc4e3835SJakub Kicinski static void udp_tunnel_nic_device_sync_work(struct work_struct *work)
621*cc4e3835SJakub Kicinski {
622*cc4e3835SJakub Kicinski 	struct udp_tunnel_nic *utn =
623*cc4e3835SJakub Kicinski 		container_of(work, struct udp_tunnel_nic, work);
624*cc4e3835SJakub Kicinski 
625*cc4e3835SJakub Kicinski 	rtnl_lock();
626*cc4e3835SJakub Kicinski 	utn->work_pending = 0;
627*cc4e3835SJakub Kicinski 	__udp_tunnel_nic_device_sync(utn->dev, utn);
628*cc4e3835SJakub Kicinski 
629*cc4e3835SJakub Kicinski 	if (utn->need_replay)
630*cc4e3835SJakub Kicinski 		udp_tunnel_nic_replay(utn->dev, utn);
631*cc4e3835SJakub Kicinski 	rtnl_unlock();
632*cc4e3835SJakub Kicinski }
633*cc4e3835SJakub Kicinski 
634*cc4e3835SJakub Kicinski static struct udp_tunnel_nic *
635*cc4e3835SJakub Kicinski udp_tunnel_nic_alloc(const struct udp_tunnel_nic_info *info,
636*cc4e3835SJakub Kicinski 		     unsigned int n_tables)
637*cc4e3835SJakub Kicinski {
638*cc4e3835SJakub Kicinski 	struct udp_tunnel_nic *utn;
639*cc4e3835SJakub Kicinski 	unsigned int i;
640*cc4e3835SJakub Kicinski 
641*cc4e3835SJakub Kicinski 	utn = kzalloc(sizeof(*utn), GFP_KERNEL);
642*cc4e3835SJakub Kicinski 	if (!utn)
643*cc4e3835SJakub Kicinski 		return NULL;
644*cc4e3835SJakub Kicinski 	utn->n_tables = n_tables;
645*cc4e3835SJakub Kicinski 	INIT_WORK(&utn->work, udp_tunnel_nic_device_sync_work);
646*cc4e3835SJakub Kicinski 
647*cc4e3835SJakub Kicinski 	utn->entries = kmalloc_array(n_tables, sizeof(void *), GFP_KERNEL);
648*cc4e3835SJakub Kicinski 	if (!utn->entries)
649*cc4e3835SJakub Kicinski 		goto err_free_utn;
650*cc4e3835SJakub Kicinski 
651*cc4e3835SJakub Kicinski 	for (i = 0; i < n_tables; i++) {
652*cc4e3835SJakub Kicinski 		utn->entries[i] = kcalloc(info->tables[i].n_entries,
653*cc4e3835SJakub Kicinski 					  sizeof(*utn->entries[i]), GFP_KERNEL);
654*cc4e3835SJakub Kicinski 		if (!utn->entries[i])
655*cc4e3835SJakub Kicinski 			goto err_free_prev_entries;
656*cc4e3835SJakub Kicinski 	}
657*cc4e3835SJakub Kicinski 
658*cc4e3835SJakub Kicinski 	return utn;
659*cc4e3835SJakub Kicinski 
660*cc4e3835SJakub Kicinski err_free_prev_entries:
661*cc4e3835SJakub Kicinski 	while (i--)
662*cc4e3835SJakub Kicinski 		kfree(utn->entries[i]);
663*cc4e3835SJakub Kicinski 	kfree(utn->entries);
664*cc4e3835SJakub Kicinski err_free_utn:
665*cc4e3835SJakub Kicinski 	kfree(utn);
666*cc4e3835SJakub Kicinski 	return NULL;
667*cc4e3835SJakub Kicinski }
668*cc4e3835SJakub Kicinski 
669*cc4e3835SJakub Kicinski static int udp_tunnel_nic_register(struct net_device *dev)
670*cc4e3835SJakub Kicinski {
671*cc4e3835SJakub Kicinski 	const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
672*cc4e3835SJakub Kicinski 	struct udp_tunnel_nic *utn;
673*cc4e3835SJakub Kicinski 	unsigned int n_tables, i;
674*cc4e3835SJakub Kicinski 
675*cc4e3835SJakub Kicinski 	BUILD_BUG_ON(sizeof(utn->missed) * BITS_PER_BYTE <
676*cc4e3835SJakub Kicinski 		     UDP_TUNNEL_NIC_MAX_TABLES);
677*cc4e3835SJakub Kicinski 
678*cc4e3835SJakub Kicinski 	if (WARN_ON(!info->set_port != !info->unset_port) ||
679*cc4e3835SJakub Kicinski 	    WARN_ON(!info->set_port == !info->sync_table) ||
680*cc4e3835SJakub Kicinski 	    WARN_ON(!info->tables[0].n_entries))
681*cc4e3835SJakub Kicinski 		return -EINVAL;
682*cc4e3835SJakub Kicinski 
683*cc4e3835SJakub Kicinski 	n_tables = 1;
684*cc4e3835SJakub Kicinski 	for (i = 1; i < UDP_TUNNEL_NIC_MAX_TABLES; i++) {
685*cc4e3835SJakub Kicinski 		if (!info->tables[i].n_entries)
686*cc4e3835SJakub Kicinski 			continue;
687*cc4e3835SJakub Kicinski 
688*cc4e3835SJakub Kicinski 		n_tables++;
689*cc4e3835SJakub Kicinski 		if (WARN_ON(!info->tables[i - 1].n_entries))
690*cc4e3835SJakub Kicinski 			return -EINVAL;
691*cc4e3835SJakub Kicinski 	}
692*cc4e3835SJakub Kicinski 
693*cc4e3835SJakub Kicinski 	utn = udp_tunnel_nic_alloc(info, n_tables);
694*cc4e3835SJakub Kicinski 	if (!utn)
695*cc4e3835SJakub Kicinski 		return -ENOMEM;
696*cc4e3835SJakub Kicinski 
697*cc4e3835SJakub Kicinski 	utn->dev = dev;
698*cc4e3835SJakub Kicinski 	dev_hold(dev);
699*cc4e3835SJakub Kicinski 	dev->udp_tunnel_nic = utn;
700*cc4e3835SJakub Kicinski 
701*cc4e3835SJakub Kicinski 	if (!(info->flags & UDP_TUNNEL_NIC_INFO_OPEN_ONLY))
702*cc4e3835SJakub Kicinski 		udp_tunnel_get_rx_info(dev);
703*cc4e3835SJakub Kicinski 
704*cc4e3835SJakub Kicinski 	return 0;
705*cc4e3835SJakub Kicinski }
706*cc4e3835SJakub Kicinski 
707*cc4e3835SJakub Kicinski static void
708*cc4e3835SJakub Kicinski udp_tunnel_nic_unregister(struct net_device *dev, struct udp_tunnel_nic *utn)
709*cc4e3835SJakub Kicinski {
710*cc4e3835SJakub Kicinski 	unsigned int i;
711*cc4e3835SJakub Kicinski 
712*cc4e3835SJakub Kicinski 	/* Flush before we check work, so we don't waste time adding entries
713*cc4e3835SJakub Kicinski 	 * from the work which we will boot immediately.
714*cc4e3835SJakub Kicinski 	 */
715*cc4e3835SJakub Kicinski 	udp_tunnel_nic_flush(dev, utn);
716*cc4e3835SJakub Kicinski 
717*cc4e3835SJakub Kicinski 	/* Wait for the work to be done using the state, netdev core will
718*cc4e3835SJakub Kicinski 	 * retry unregister until we give up our reference on this device.
719*cc4e3835SJakub Kicinski 	 */
720*cc4e3835SJakub Kicinski 	if (utn->work_pending)
721*cc4e3835SJakub Kicinski 		return;
722*cc4e3835SJakub Kicinski 
723*cc4e3835SJakub Kicinski 	for (i = 0; i < utn->n_tables; i++)
724*cc4e3835SJakub Kicinski 		kfree(utn->entries[i]);
725*cc4e3835SJakub Kicinski 	kfree(utn->entries);
726*cc4e3835SJakub Kicinski 	kfree(utn);
727*cc4e3835SJakub Kicinski 	dev->udp_tunnel_nic = NULL;
728*cc4e3835SJakub Kicinski 	dev_put(dev);
729*cc4e3835SJakub Kicinski }
730*cc4e3835SJakub Kicinski 
731*cc4e3835SJakub Kicinski static int
732*cc4e3835SJakub Kicinski udp_tunnel_nic_netdevice_event(struct notifier_block *unused,
733*cc4e3835SJakub Kicinski 			       unsigned long event, void *ptr)
734*cc4e3835SJakub Kicinski {
735*cc4e3835SJakub Kicinski 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
736*cc4e3835SJakub Kicinski 	const struct udp_tunnel_nic_info *info;
737*cc4e3835SJakub Kicinski 	struct udp_tunnel_nic *utn;
738*cc4e3835SJakub Kicinski 
739*cc4e3835SJakub Kicinski 	info = dev->udp_tunnel_nic_info;
740*cc4e3835SJakub Kicinski 	if (!info)
741*cc4e3835SJakub Kicinski 		return NOTIFY_DONE;
742*cc4e3835SJakub Kicinski 
743*cc4e3835SJakub Kicinski 	if (event == NETDEV_REGISTER) {
744*cc4e3835SJakub Kicinski 		int err;
745*cc4e3835SJakub Kicinski 
746*cc4e3835SJakub Kicinski 		err = udp_tunnel_nic_register(dev);
747*cc4e3835SJakub Kicinski 		if (err)
748*cc4e3835SJakub Kicinski 			netdev_WARN(dev, "failed to register for UDP tunnel offloads: %d", err);
749*cc4e3835SJakub Kicinski 		return notifier_from_errno(err);
750*cc4e3835SJakub Kicinski 	}
751*cc4e3835SJakub Kicinski 	/* All other events will need the udp_tunnel_nic state */
752*cc4e3835SJakub Kicinski 	utn = dev->udp_tunnel_nic;
753*cc4e3835SJakub Kicinski 	if (!utn)
754*cc4e3835SJakub Kicinski 		return NOTIFY_DONE;
755*cc4e3835SJakub Kicinski 
756*cc4e3835SJakub Kicinski 	if (event == NETDEV_UNREGISTER) {
757*cc4e3835SJakub Kicinski 		udp_tunnel_nic_unregister(dev, utn);
758*cc4e3835SJakub Kicinski 		return NOTIFY_OK;
759*cc4e3835SJakub Kicinski 	}
760*cc4e3835SJakub Kicinski 
761*cc4e3835SJakub Kicinski 	/* All other events only matter if NIC has to be programmed open */
762*cc4e3835SJakub Kicinski 	if (!(info->flags & UDP_TUNNEL_NIC_INFO_OPEN_ONLY))
763*cc4e3835SJakub Kicinski 		return NOTIFY_DONE;
764*cc4e3835SJakub Kicinski 
765*cc4e3835SJakub Kicinski 	if (event == NETDEV_UP) {
766*cc4e3835SJakub Kicinski 		WARN_ON(!udp_tunnel_nic_is_empty(dev, utn));
767*cc4e3835SJakub Kicinski 		udp_tunnel_get_rx_info(dev);
768*cc4e3835SJakub Kicinski 		return NOTIFY_OK;
769*cc4e3835SJakub Kicinski 	}
770*cc4e3835SJakub Kicinski 	if (event == NETDEV_GOING_DOWN) {
771*cc4e3835SJakub Kicinski 		udp_tunnel_nic_flush(dev, utn);
772*cc4e3835SJakub Kicinski 		return NOTIFY_OK;
773*cc4e3835SJakub Kicinski 	}
774*cc4e3835SJakub Kicinski 
775*cc4e3835SJakub Kicinski 	return NOTIFY_DONE;
776*cc4e3835SJakub Kicinski }
777*cc4e3835SJakub Kicinski 
778*cc4e3835SJakub Kicinski static struct notifier_block udp_tunnel_nic_notifier_block __read_mostly = {
779*cc4e3835SJakub Kicinski 	.notifier_call = udp_tunnel_nic_netdevice_event,
780*cc4e3835SJakub Kicinski };
781*cc4e3835SJakub Kicinski 
782*cc4e3835SJakub Kicinski static int __init udp_tunnel_nic_init_module(void)
783*cc4e3835SJakub Kicinski {
784*cc4e3835SJakub Kicinski 	int err;
785*cc4e3835SJakub Kicinski 
786*cc4e3835SJakub Kicinski 	udp_tunnel_nic_workqueue = alloc_workqueue("udp_tunnel_nic", 0, 0);
787*cc4e3835SJakub Kicinski 	if (!udp_tunnel_nic_workqueue)
788*cc4e3835SJakub Kicinski 		return -ENOMEM;
789*cc4e3835SJakub Kicinski 
790*cc4e3835SJakub Kicinski 	rtnl_lock();
791*cc4e3835SJakub Kicinski 	udp_tunnel_nic_ops = &__udp_tunnel_nic_ops;
792*cc4e3835SJakub Kicinski 	rtnl_unlock();
793*cc4e3835SJakub Kicinski 
794*cc4e3835SJakub Kicinski 	err = register_netdevice_notifier(&udp_tunnel_nic_notifier_block);
795*cc4e3835SJakub Kicinski 	if (err)
796*cc4e3835SJakub Kicinski 		goto err_unset_ops;
797*cc4e3835SJakub Kicinski 
798*cc4e3835SJakub Kicinski 	return 0;
799*cc4e3835SJakub Kicinski 
800*cc4e3835SJakub Kicinski err_unset_ops:
801*cc4e3835SJakub Kicinski 	rtnl_lock();
802*cc4e3835SJakub Kicinski 	udp_tunnel_nic_ops = NULL;
803*cc4e3835SJakub Kicinski 	rtnl_unlock();
804*cc4e3835SJakub Kicinski 	destroy_workqueue(udp_tunnel_nic_workqueue);
805*cc4e3835SJakub Kicinski 	return err;
806*cc4e3835SJakub Kicinski }
807*cc4e3835SJakub Kicinski late_initcall(udp_tunnel_nic_init_module);
808*cc4e3835SJakub Kicinski 
809*cc4e3835SJakub Kicinski static void __exit udp_tunnel_nic_cleanup_module(void)
810*cc4e3835SJakub Kicinski {
811*cc4e3835SJakub Kicinski 	unregister_netdevice_notifier(&udp_tunnel_nic_notifier_block);
812*cc4e3835SJakub Kicinski 
813*cc4e3835SJakub Kicinski 	rtnl_lock();
814*cc4e3835SJakub Kicinski 	udp_tunnel_nic_ops = NULL;
815*cc4e3835SJakub Kicinski 	rtnl_unlock();
816*cc4e3835SJakub Kicinski 
817*cc4e3835SJakub Kicinski 	destroy_workqueue(udp_tunnel_nic_workqueue);
818*cc4e3835SJakub Kicinski }
819*cc4e3835SJakub Kicinski module_exit(udp_tunnel_nic_cleanup_module);
820*cc4e3835SJakub Kicinski 
821*cc4e3835SJakub Kicinski MODULE_LICENSE("GPL");
822