1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
3 
4 #include <linux/etherdevice.h>
5 #include <linux/lockdep.h>
6 #include <linux/pci.h>
7 #include <linux/skbuff.h>
8 #include <linux/vmalloc.h>
9 #include <net/devlink.h>
10 #include <net/dst_metadata.h>
11 
12 #include "main.h"
13 #include "../nfpcore/nfp_cpp.h"
14 #include "../nfpcore/nfp_nffw.h"
15 #include "../nfpcore/nfp_nsp.h"
16 #include "../nfp_app.h"
17 #include "../nfp_main.h"
18 #include "../nfp_net.h"
19 #include "../nfp_net_repr.h"
20 #include "../nfp_port.h"
21 #include "./cmsg.h"
22 
23 #define NFP_FLOWER_ALLOWED_VER 0x0001000000010000UL
24 
25 static const char *nfp_flower_extra_cap(struct nfp_app *app, struct nfp_net *nn)
26 {
27 	return "FLOWER";
28 }
29 
30 static enum devlink_eswitch_mode eswitch_mode_get(struct nfp_app *app)
31 {
32 	return DEVLINK_ESWITCH_MODE_SWITCHDEV;
33 }
34 
35 static struct nfp_flower_non_repr_priv *
36 nfp_flower_non_repr_priv_lookup(struct nfp_app *app, struct net_device *netdev)
37 {
38 	struct nfp_flower_priv *priv = app->priv;
39 	struct nfp_flower_non_repr_priv *entry;
40 
41 	ASSERT_RTNL();
42 
43 	list_for_each_entry(entry, &priv->non_repr_priv, list)
44 		if (entry->netdev == netdev)
45 			return entry;
46 
47 	return NULL;
48 }
49 
50 void
51 __nfp_flower_non_repr_priv_get(struct nfp_flower_non_repr_priv *non_repr_priv)
52 {
53 	non_repr_priv->ref_count++;
54 }
55 
56 struct nfp_flower_non_repr_priv *
57 nfp_flower_non_repr_priv_get(struct nfp_app *app, struct net_device *netdev)
58 {
59 	struct nfp_flower_priv *priv = app->priv;
60 	struct nfp_flower_non_repr_priv *entry;
61 
62 	entry = nfp_flower_non_repr_priv_lookup(app, netdev);
63 	if (entry)
64 		goto inc_ref;
65 
66 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
67 	if (!entry)
68 		return NULL;
69 
70 	entry->netdev = netdev;
71 	list_add(&entry->list, &priv->non_repr_priv);
72 
73 inc_ref:
74 	__nfp_flower_non_repr_priv_get(entry);
75 	return entry;
76 }
77 
78 void
79 __nfp_flower_non_repr_priv_put(struct nfp_flower_non_repr_priv *non_repr_priv)
80 {
81 	if (--non_repr_priv->ref_count)
82 		return;
83 
84 	list_del(&non_repr_priv->list);
85 	kfree(non_repr_priv);
86 }
87 
88 void
89 nfp_flower_non_repr_priv_put(struct nfp_app *app, struct net_device *netdev)
90 {
91 	struct nfp_flower_non_repr_priv *entry;
92 
93 	entry = nfp_flower_non_repr_priv_lookup(app, netdev);
94 	if (!entry)
95 		return;
96 
97 	__nfp_flower_non_repr_priv_put(entry);
98 }
99 
100 static enum nfp_repr_type
101 nfp_flower_repr_get_type_and_port(struct nfp_app *app, u32 port_id, u8 *port)
102 {
103 	switch (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port_id)) {
104 	case NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT:
105 		*port = FIELD_GET(NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM,
106 				  port_id);
107 		return NFP_REPR_TYPE_PHYS_PORT;
108 
109 	case NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT:
110 		*port = FIELD_GET(NFP_FLOWER_CMSG_PORT_VNIC, port_id);
111 		if (FIELD_GET(NFP_FLOWER_CMSG_PORT_VNIC_TYPE, port_id) ==
112 		    NFP_FLOWER_CMSG_PORT_VNIC_TYPE_PF)
113 			return NFP_REPR_TYPE_PF;
114 		else
115 			return NFP_REPR_TYPE_VF;
116 	}
117 
118 	return __NFP_REPR_TYPE_MAX;
119 }
120 
121 static struct net_device *
122 nfp_flower_repr_get(struct nfp_app *app, u32 port_id)
123 {
124 	enum nfp_repr_type repr_type;
125 	struct nfp_reprs *reprs;
126 	u8 port = 0;
127 
128 	repr_type = nfp_flower_repr_get_type_and_port(app, port_id, &port);
129 	if (repr_type > NFP_REPR_TYPE_MAX)
130 		return NULL;
131 
132 	reprs = rcu_dereference(app->reprs[repr_type]);
133 	if (!reprs)
134 		return NULL;
135 
136 	if (port >= reprs->num_reprs)
137 		return NULL;
138 
139 	return rcu_dereference(reprs->reprs[port]);
140 }
141 
142 static int
143 nfp_flower_reprs_reify(struct nfp_app *app, enum nfp_repr_type type,
144 		       bool exists)
145 {
146 	struct nfp_reprs *reprs;
147 	int i, err, count = 0;
148 
149 	reprs = rcu_dereference_protected(app->reprs[type],
150 					  lockdep_is_held(&app->pf->lock));
151 	if (!reprs)
152 		return 0;
153 
154 	for (i = 0; i < reprs->num_reprs; i++) {
155 		struct net_device *netdev;
156 
157 		netdev = nfp_repr_get_locked(app, reprs, i);
158 		if (netdev) {
159 			struct nfp_repr *repr = netdev_priv(netdev);
160 
161 			err = nfp_flower_cmsg_portreify(repr, exists);
162 			if (err)
163 				return err;
164 			count++;
165 		}
166 	}
167 
168 	return count;
169 }
170 
171 static int
172 nfp_flower_wait_repr_reify(struct nfp_app *app, atomic_t *replies, int tot_repl)
173 {
174 	struct nfp_flower_priv *priv = app->priv;
175 
176 	if (!tot_repl)
177 		return 0;
178 
179 	lockdep_assert_held(&app->pf->lock);
180 	if (!wait_event_timeout(priv->reify_wait_queue,
181 				atomic_read(replies) >= tot_repl,
182 				NFP_FL_REPLY_TIMEOUT)) {
183 		nfp_warn(app->cpp, "Not all reprs responded to reify\n");
184 		return -EIO;
185 	}
186 
187 	return 0;
188 }
189 
190 static int
191 nfp_flower_repr_netdev_open(struct nfp_app *app, struct nfp_repr *repr)
192 {
193 	int err;
194 
195 	err = nfp_flower_cmsg_portmod(repr, true, repr->netdev->mtu, false);
196 	if (err)
197 		return err;
198 
199 	netif_tx_wake_all_queues(repr->netdev);
200 
201 	return 0;
202 }
203 
204 static int
205 nfp_flower_repr_netdev_stop(struct nfp_app *app, struct nfp_repr *repr)
206 {
207 	netif_tx_disable(repr->netdev);
208 
209 	return nfp_flower_cmsg_portmod(repr, false, repr->netdev->mtu, false);
210 }
211 
212 static void
213 nfp_flower_repr_netdev_clean(struct nfp_app *app, struct net_device *netdev)
214 {
215 	struct nfp_repr *repr = netdev_priv(netdev);
216 
217 	kfree(repr->app_priv);
218 }
219 
220 static void
221 nfp_flower_repr_netdev_preclean(struct nfp_app *app, struct net_device *netdev)
222 {
223 	struct nfp_repr *repr = netdev_priv(netdev);
224 	struct nfp_flower_priv *priv = app->priv;
225 	atomic_t *replies = &priv->reify_replies;
226 	int err;
227 
228 	atomic_set(replies, 0);
229 	err = nfp_flower_cmsg_portreify(repr, false);
230 	if (err) {
231 		nfp_warn(app->cpp, "Failed to notify firmware about repr destruction\n");
232 		return;
233 	}
234 
235 	nfp_flower_wait_repr_reify(app, replies, 1);
236 }
237 
238 static void nfp_flower_sriov_disable(struct nfp_app *app)
239 {
240 	struct nfp_flower_priv *priv = app->priv;
241 
242 	if (!priv->nn)
243 		return;
244 
245 	nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_VF);
246 }
247 
248 static int
249 nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
250 			    enum nfp_flower_cmsg_port_vnic_type vnic_type,
251 			    enum nfp_repr_type repr_type, unsigned int cnt)
252 {
253 	u8 nfp_pcie = nfp_cppcore_pcie_unit(app->pf->cpp);
254 	struct nfp_flower_priv *priv = app->priv;
255 	atomic_t *replies = &priv->reify_replies;
256 	struct nfp_flower_repr_priv *repr_priv;
257 	enum nfp_port_type port_type;
258 	struct nfp_repr *nfp_repr;
259 	struct nfp_reprs *reprs;
260 	int i, err, reify_cnt;
261 	const u8 queue = 0;
262 
263 	port_type = repr_type == NFP_REPR_TYPE_PF ? NFP_PORT_PF_PORT :
264 						    NFP_PORT_VF_PORT;
265 
266 	reprs = nfp_reprs_alloc(cnt);
267 	if (!reprs)
268 		return -ENOMEM;
269 
270 	for (i = 0; i < cnt; i++) {
271 		struct net_device *repr;
272 		struct nfp_port *port;
273 		u32 port_id;
274 
275 		repr = nfp_repr_alloc(app);
276 		if (!repr) {
277 			err = -ENOMEM;
278 			goto err_reprs_clean;
279 		}
280 
281 		repr_priv = kzalloc(sizeof(*repr_priv), GFP_KERNEL);
282 		if (!repr_priv) {
283 			err = -ENOMEM;
284 			goto err_reprs_clean;
285 		}
286 
287 		nfp_repr = netdev_priv(repr);
288 		nfp_repr->app_priv = repr_priv;
289 		repr_priv->nfp_repr = nfp_repr;
290 
291 		/* For now we only support 1 PF */
292 		WARN_ON(repr_type == NFP_REPR_TYPE_PF && i);
293 
294 		port = nfp_port_alloc(app, port_type, repr);
295 		if (IS_ERR(port)) {
296 			err = PTR_ERR(port);
297 			nfp_repr_free(repr);
298 			goto err_reprs_clean;
299 		}
300 		if (repr_type == NFP_REPR_TYPE_PF) {
301 			port->pf_id = i;
302 			port->vnic = priv->nn->dp.ctrl_bar;
303 		} else {
304 			port->pf_id = 0;
305 			port->vf_id = i;
306 			port->vnic =
307 				app->pf->vf_cfg_mem + i * NFP_NET_CFG_BAR_SZ;
308 		}
309 
310 		eth_hw_addr_random(repr);
311 
312 		port_id = nfp_flower_cmsg_pcie_port(nfp_pcie, vnic_type,
313 						    i, queue);
314 		err = nfp_repr_init(app, repr,
315 				    port_id, port, priv->nn->dp.netdev);
316 		if (err) {
317 			nfp_port_free(port);
318 			nfp_repr_free(repr);
319 			goto err_reprs_clean;
320 		}
321 
322 		RCU_INIT_POINTER(reprs->reprs[i], repr);
323 		nfp_info(app->cpp, "%s%d Representor(%s) created\n",
324 			 repr_type == NFP_REPR_TYPE_PF ? "PF" : "VF", i,
325 			 repr->name);
326 	}
327 
328 	nfp_app_reprs_set(app, repr_type, reprs);
329 
330 	atomic_set(replies, 0);
331 	reify_cnt = nfp_flower_reprs_reify(app, repr_type, true);
332 	if (reify_cnt < 0) {
333 		err = reify_cnt;
334 		nfp_warn(app->cpp, "Failed to notify firmware about repr creation\n");
335 		goto err_reprs_remove;
336 	}
337 
338 	err = nfp_flower_wait_repr_reify(app, replies, reify_cnt);
339 	if (err)
340 		goto err_reprs_remove;
341 
342 	return 0;
343 err_reprs_remove:
344 	reprs = nfp_app_reprs_set(app, repr_type, NULL);
345 err_reprs_clean:
346 	nfp_reprs_clean_and_free(app, reprs);
347 	return err;
348 }
349 
350 static int nfp_flower_sriov_enable(struct nfp_app *app, int num_vfs)
351 {
352 	struct nfp_flower_priv *priv = app->priv;
353 
354 	if (!priv->nn)
355 		return 0;
356 
357 	return nfp_flower_spawn_vnic_reprs(app,
358 					   NFP_FLOWER_CMSG_PORT_VNIC_TYPE_VF,
359 					   NFP_REPR_TYPE_VF, num_vfs);
360 }
361 
362 static int
363 nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
364 {
365 	struct nfp_eth_table *eth_tbl = app->pf->eth_tbl;
366 	atomic_t *replies = &priv->reify_replies;
367 	struct nfp_flower_repr_priv *repr_priv;
368 	struct nfp_repr *nfp_repr;
369 	struct sk_buff *ctrl_skb;
370 	struct nfp_reprs *reprs;
371 	int err, reify_cnt;
372 	unsigned int i;
373 
374 	ctrl_skb = nfp_flower_cmsg_mac_repr_start(app, eth_tbl->count);
375 	if (!ctrl_skb)
376 		return -ENOMEM;
377 
378 	reprs = nfp_reprs_alloc(eth_tbl->max_index + 1);
379 	if (!reprs) {
380 		err = -ENOMEM;
381 		goto err_free_ctrl_skb;
382 	}
383 
384 	for (i = 0; i < eth_tbl->count; i++) {
385 		unsigned int phys_port = eth_tbl->ports[i].index;
386 		struct net_device *repr;
387 		struct nfp_port *port;
388 		u32 cmsg_port_id;
389 
390 		repr = nfp_repr_alloc(app);
391 		if (!repr) {
392 			err = -ENOMEM;
393 			goto err_reprs_clean;
394 		}
395 
396 		repr_priv = kzalloc(sizeof(*repr_priv), GFP_KERNEL);
397 		if (!repr_priv) {
398 			err = -ENOMEM;
399 			goto err_reprs_clean;
400 		}
401 
402 		nfp_repr = netdev_priv(repr);
403 		nfp_repr->app_priv = repr_priv;
404 		repr_priv->nfp_repr = nfp_repr;
405 
406 		port = nfp_port_alloc(app, NFP_PORT_PHYS_PORT, repr);
407 		if (IS_ERR(port)) {
408 			err = PTR_ERR(port);
409 			nfp_repr_free(repr);
410 			goto err_reprs_clean;
411 		}
412 		err = nfp_port_init_phy_port(app->pf, app, port, i);
413 		if (err) {
414 			nfp_port_free(port);
415 			nfp_repr_free(repr);
416 			goto err_reprs_clean;
417 		}
418 
419 		SET_NETDEV_DEV(repr, &priv->nn->pdev->dev);
420 		nfp_net_get_mac_addr(app->pf, repr, port);
421 
422 		cmsg_port_id = nfp_flower_cmsg_phys_port(phys_port);
423 		err = nfp_repr_init(app, repr,
424 				    cmsg_port_id, port, priv->nn->dp.netdev);
425 		if (err) {
426 			nfp_port_free(port);
427 			nfp_repr_free(repr);
428 			goto err_reprs_clean;
429 		}
430 
431 		nfp_flower_cmsg_mac_repr_add(ctrl_skb, i,
432 					     eth_tbl->ports[i].nbi,
433 					     eth_tbl->ports[i].base,
434 					     phys_port);
435 
436 		RCU_INIT_POINTER(reprs->reprs[phys_port], repr);
437 		nfp_info(app->cpp, "Phys Port %d Representor(%s) created\n",
438 			 phys_port, repr->name);
439 	}
440 
441 	nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, reprs);
442 
443 	/* The REIFY/MAC_REPR control messages should be sent after the MAC
444 	 * representors are registered using nfp_app_reprs_set().  This is
445 	 * because the firmware may respond with control messages for the
446 	 * MAC representors, f.e. to provide the driver with information
447 	 * about their state, and without registration the driver will drop
448 	 * any such messages.
449 	 */
450 	atomic_set(replies, 0);
451 	reify_cnt = nfp_flower_reprs_reify(app, NFP_REPR_TYPE_PHYS_PORT, true);
452 	if (reify_cnt < 0) {
453 		err = reify_cnt;
454 		nfp_warn(app->cpp, "Failed to notify firmware about repr creation\n");
455 		goto err_reprs_remove;
456 	}
457 
458 	err = nfp_flower_wait_repr_reify(app, replies, reify_cnt);
459 	if (err)
460 		goto err_reprs_remove;
461 
462 	nfp_ctrl_tx(app->ctrl, ctrl_skb);
463 
464 	return 0;
465 err_reprs_remove:
466 	reprs = nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, NULL);
467 err_reprs_clean:
468 	nfp_reprs_clean_and_free(app, reprs);
469 err_free_ctrl_skb:
470 	kfree_skb(ctrl_skb);
471 	return err;
472 }
473 
474 static int nfp_flower_vnic_alloc(struct nfp_app *app, struct nfp_net *nn,
475 				 unsigned int id)
476 {
477 	if (id > 0) {
478 		nfp_warn(app->cpp, "FlowerNIC doesn't support more than one data vNIC\n");
479 		goto err_invalid_port;
480 	}
481 
482 	eth_hw_addr_random(nn->dp.netdev);
483 	netif_keep_dst(nn->dp.netdev);
484 	nn->vnic_no_name = true;
485 
486 	return 0;
487 
488 err_invalid_port:
489 	nn->port = nfp_port_alloc(app, NFP_PORT_INVALID, nn->dp.netdev);
490 	return PTR_ERR_OR_ZERO(nn->port);
491 }
492 
493 static void nfp_flower_vnic_clean(struct nfp_app *app, struct nfp_net *nn)
494 {
495 	struct nfp_flower_priv *priv = app->priv;
496 
497 	if (app->pf->num_vfs)
498 		nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_VF);
499 	nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PF);
500 	nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PHYS_PORT);
501 
502 	priv->nn = NULL;
503 }
504 
505 static int nfp_flower_vnic_init(struct nfp_app *app, struct nfp_net *nn)
506 {
507 	struct nfp_flower_priv *priv = app->priv;
508 	int err;
509 
510 	priv->nn = nn;
511 
512 	err = nfp_flower_spawn_phy_reprs(app, app->priv);
513 	if (err)
514 		goto err_clear_nn;
515 
516 	err = nfp_flower_spawn_vnic_reprs(app,
517 					  NFP_FLOWER_CMSG_PORT_VNIC_TYPE_PF,
518 					  NFP_REPR_TYPE_PF, 1);
519 	if (err)
520 		goto err_destroy_reprs_phy;
521 
522 	if (app->pf->num_vfs) {
523 		err = nfp_flower_spawn_vnic_reprs(app,
524 						  NFP_FLOWER_CMSG_PORT_VNIC_TYPE_VF,
525 						  NFP_REPR_TYPE_VF,
526 						  app->pf->num_vfs);
527 		if (err)
528 			goto err_destroy_reprs_pf;
529 	}
530 
531 	return 0;
532 
533 err_destroy_reprs_pf:
534 	nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PF);
535 err_destroy_reprs_phy:
536 	nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PHYS_PORT);
537 err_clear_nn:
538 	priv->nn = NULL;
539 	return err;
540 }
541 
542 static int nfp_flower_init(struct nfp_app *app)
543 {
544 	u64 version, features, ctx_count, num_mems;
545 	const struct nfp_pf *pf = app->pf;
546 	struct nfp_flower_priv *app_priv;
547 	int err;
548 
549 	if (!pf->eth_tbl) {
550 		nfp_warn(app->cpp, "FlowerNIC requires eth table\n");
551 		return -EINVAL;
552 	}
553 
554 	if (!pf->mac_stats_bar) {
555 		nfp_warn(app->cpp, "FlowerNIC requires mac_stats BAR\n");
556 		return -EINVAL;
557 	}
558 
559 	if (!pf->vf_cfg_bar) {
560 		nfp_warn(app->cpp, "FlowerNIC requires vf_cfg BAR\n");
561 		return -EINVAL;
562 	}
563 
564 	version = nfp_rtsym_read_le(app->pf->rtbl, "hw_flower_version", &err);
565 	if (err) {
566 		nfp_warn(app->cpp, "FlowerNIC requires hw_flower_version memory symbol\n");
567 		return err;
568 	}
569 
570 	num_mems = nfp_rtsym_read_le(app->pf->rtbl, "CONFIG_FC_HOST_CTX_SPLIT",
571 				     &err);
572 	if (err) {
573 		nfp_warn(app->cpp,
574 			 "FlowerNIC: unsupported host context memory: %d\n",
575 			 err);
576 		err = 0;
577 		num_mems = 1;
578 	}
579 
580 	if (!FIELD_FIT(NFP_FL_STAT_ID_MU_NUM, num_mems) || !num_mems) {
581 		nfp_warn(app->cpp,
582 			 "FlowerNIC: invalid host context memory: %llu\n",
583 			 num_mems);
584 		return -EINVAL;
585 	}
586 
587 	ctx_count = nfp_rtsym_read_le(app->pf->rtbl, "CONFIG_FC_HOST_CTX_COUNT",
588 				      &err);
589 	if (err) {
590 		nfp_warn(app->cpp,
591 			 "FlowerNIC: unsupported host context count: %d\n",
592 			 err);
593 		err = 0;
594 		ctx_count = BIT(17);
595 	}
596 
597 	/* We need to ensure hardware has enough flower capabilities. */
598 	if (version != NFP_FLOWER_ALLOWED_VER) {
599 		nfp_warn(app->cpp, "FlowerNIC: unsupported firmware version\n");
600 		return -EINVAL;
601 	}
602 
603 	app_priv = vzalloc(sizeof(struct nfp_flower_priv));
604 	if (!app_priv)
605 		return -ENOMEM;
606 
607 	app_priv->total_mem_units = num_mems;
608 	app_priv->active_mem_unit = 0;
609 	app_priv->stats_ring_size = roundup_pow_of_two(ctx_count);
610 	app->priv = app_priv;
611 	app_priv->app = app;
612 	skb_queue_head_init(&app_priv->cmsg_skbs_high);
613 	skb_queue_head_init(&app_priv->cmsg_skbs_low);
614 	INIT_WORK(&app_priv->cmsg_work, nfp_flower_cmsg_process_rx);
615 	init_waitqueue_head(&app_priv->reify_wait_queue);
616 
617 	init_waitqueue_head(&app_priv->mtu_conf.wait_q);
618 	spin_lock_init(&app_priv->mtu_conf.lock);
619 
620 	err = nfp_flower_metadata_init(app, ctx_count, num_mems);
621 	if (err)
622 		goto err_free_app_priv;
623 
624 	/* Extract the extra features supported by the firmware. */
625 	features = nfp_rtsym_read_le(app->pf->rtbl,
626 				     "_abi_flower_extra_features", &err);
627 	if (err)
628 		app_priv->flower_ext_feats = 0;
629 	else
630 		app_priv->flower_ext_feats = features;
631 
632 	/* Tell the firmware that the driver supports lag. */
633 	err = nfp_rtsym_write_le(app->pf->rtbl,
634 				 "_abi_flower_balance_sync_enable", 1);
635 	if (!err) {
636 		app_priv->flower_ext_feats |= NFP_FL_FEATS_LAG;
637 		nfp_flower_lag_init(&app_priv->nfp_lag);
638 	} else if (err == -ENOENT) {
639 		nfp_warn(app->cpp, "LAG not supported by FW.\n");
640 	} else {
641 		goto err_cleanup_metadata;
642 	}
643 
644 	INIT_LIST_HEAD(&app_priv->indr_block_cb_priv);
645 	INIT_LIST_HEAD(&app_priv->non_repr_priv);
646 
647 	return 0;
648 
649 err_cleanup_metadata:
650 	nfp_flower_metadata_cleanup(app);
651 err_free_app_priv:
652 	vfree(app->priv);
653 	return err;
654 }
655 
656 static void nfp_flower_clean(struct nfp_app *app)
657 {
658 	struct nfp_flower_priv *app_priv = app->priv;
659 
660 	skb_queue_purge(&app_priv->cmsg_skbs_high);
661 	skb_queue_purge(&app_priv->cmsg_skbs_low);
662 	flush_work(&app_priv->cmsg_work);
663 
664 	if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG)
665 		nfp_flower_lag_cleanup(&app_priv->nfp_lag);
666 
667 	nfp_flower_metadata_cleanup(app);
668 	vfree(app->priv);
669 	app->priv = NULL;
670 }
671 
672 static bool nfp_flower_check_ack(struct nfp_flower_priv *app_priv)
673 {
674 	bool ret;
675 
676 	spin_lock_bh(&app_priv->mtu_conf.lock);
677 	ret = app_priv->mtu_conf.ack;
678 	spin_unlock_bh(&app_priv->mtu_conf.lock);
679 
680 	return ret;
681 }
682 
683 static int
684 nfp_flower_repr_change_mtu(struct nfp_app *app, struct net_device *netdev,
685 			   int new_mtu)
686 {
687 	struct nfp_flower_priv *app_priv = app->priv;
688 	struct nfp_repr *repr = netdev_priv(netdev);
689 	int err;
690 
691 	/* Only need to config FW for physical port MTU change. */
692 	if (repr->port->type != NFP_PORT_PHYS_PORT)
693 		return 0;
694 
695 	if (!(app_priv->flower_ext_feats & NFP_FL_NBI_MTU_SETTING)) {
696 		nfp_err(app->cpp, "Physical port MTU setting not supported\n");
697 		return -EINVAL;
698 	}
699 
700 	spin_lock_bh(&app_priv->mtu_conf.lock);
701 	app_priv->mtu_conf.ack = false;
702 	app_priv->mtu_conf.requested_val = new_mtu;
703 	app_priv->mtu_conf.portnum = repr->dst->u.port_info.port_id;
704 	spin_unlock_bh(&app_priv->mtu_conf.lock);
705 
706 	err = nfp_flower_cmsg_portmod(repr, netif_carrier_ok(netdev), new_mtu,
707 				      true);
708 	if (err) {
709 		spin_lock_bh(&app_priv->mtu_conf.lock);
710 		app_priv->mtu_conf.requested_val = 0;
711 		spin_unlock_bh(&app_priv->mtu_conf.lock);
712 		return err;
713 	}
714 
715 	/* Wait for fw to ack the change. */
716 	if (!wait_event_timeout(app_priv->mtu_conf.wait_q,
717 				nfp_flower_check_ack(app_priv),
718 				NFP_FL_REPLY_TIMEOUT)) {
719 		spin_lock_bh(&app_priv->mtu_conf.lock);
720 		app_priv->mtu_conf.requested_val = 0;
721 		spin_unlock_bh(&app_priv->mtu_conf.lock);
722 		nfp_warn(app->cpp, "MTU change not verified with fw\n");
723 		return -EIO;
724 	}
725 
726 	return 0;
727 }
728 
729 static int nfp_flower_start(struct nfp_app *app)
730 {
731 	struct nfp_flower_priv *app_priv = app->priv;
732 	int err;
733 
734 	if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
735 		err = nfp_flower_lag_reset(&app_priv->nfp_lag);
736 		if (err)
737 			return err;
738 	}
739 
740 	return nfp_tunnel_config_start(app);
741 }
742 
743 static void nfp_flower_stop(struct nfp_app *app)
744 {
745 	nfp_tunnel_config_stop(app);
746 }
747 
748 static int
749 nfp_flower_netdev_event(struct nfp_app *app, struct net_device *netdev,
750 			unsigned long event, void *ptr)
751 {
752 	struct nfp_flower_priv *app_priv = app->priv;
753 	int ret;
754 
755 	if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
756 		ret = nfp_flower_lag_netdev_event(app_priv, netdev, event, ptr);
757 		if (ret & NOTIFY_STOP_MASK)
758 			return ret;
759 	}
760 
761 	ret = nfp_flower_reg_indir_block_handler(app, netdev, event);
762 	if (ret & NOTIFY_STOP_MASK)
763 		return ret;
764 
765 	return nfp_tunnel_mac_event_handler(app, netdev, event, ptr);
766 }
767 
768 const struct nfp_app_type app_flower = {
769 	.id		= NFP_APP_FLOWER_NIC,
770 	.name		= "flower",
771 
772 	.ctrl_cap_mask	= ~0U,
773 	.ctrl_has_meta	= true,
774 
775 	.extra_cap	= nfp_flower_extra_cap,
776 
777 	.init		= nfp_flower_init,
778 	.clean		= nfp_flower_clean,
779 
780 	.repr_change_mtu  = nfp_flower_repr_change_mtu,
781 
782 	.vnic_alloc	= nfp_flower_vnic_alloc,
783 	.vnic_init	= nfp_flower_vnic_init,
784 	.vnic_clean	= nfp_flower_vnic_clean,
785 
786 	.repr_preclean	= nfp_flower_repr_netdev_preclean,
787 	.repr_clean	= nfp_flower_repr_netdev_clean,
788 
789 	.repr_open	= nfp_flower_repr_netdev_open,
790 	.repr_stop	= nfp_flower_repr_netdev_stop,
791 
792 	.start		= nfp_flower_start,
793 	.stop		= nfp_flower_stop,
794 
795 	.netdev_event	= nfp_flower_netdev_event,
796 
797 	.ctrl_msg_rx	= nfp_flower_cmsg_rx,
798 
799 	.sriov_enable	= nfp_flower_sriov_enable,
800 	.sriov_disable	= nfp_flower_sriov_disable,
801 
802 	.eswitch_mode_get  = eswitch_mode_get,
803 	.repr_get	= nfp_flower_repr_get,
804 
805 	.setup_tc	= nfp_flower_setup_tc,
806 };
807