1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
3 
4 #include <linux/etherdevice.h>
5 #include <linux/lockdep.h>
6 #include <linux/pci.h>
7 #include <linux/skbuff.h>
8 #include <linux/vmalloc.h>
9 #include <net/devlink.h>
10 #include <net/dst_metadata.h>
11 
12 #include "main.h"
13 #include "../nfpcore/nfp_cpp.h"
14 #include "../nfpcore/nfp_nffw.h"
15 #include "../nfpcore/nfp_nsp.h"
16 #include "../nfp_app.h"
17 #include "../nfp_main.h"
18 #include "../nfp_net.h"
19 #include "../nfp_net_repr.h"
20 #include "../nfp_port.h"
21 #include "./cmsg.h"
22 
23 #define NFP_FLOWER_ALLOWED_VER 0x0001000000010000UL
24 
25 static const char *nfp_flower_extra_cap(struct nfp_app *app, struct nfp_net *nn)
26 {
27 	return "FLOWER";
28 }
29 
30 static enum devlink_eswitch_mode eswitch_mode_get(struct nfp_app *app)
31 {
32 	return DEVLINK_ESWITCH_MODE_SWITCHDEV;
33 }
34 
35 static enum nfp_repr_type
36 nfp_flower_repr_get_type_and_port(struct nfp_app *app, u32 port_id, u8 *port)
37 {
38 	switch (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port_id)) {
39 	case NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT:
40 		*port = FIELD_GET(NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM,
41 				  port_id);
42 		return NFP_REPR_TYPE_PHYS_PORT;
43 
44 	case NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT:
45 		*port = FIELD_GET(NFP_FLOWER_CMSG_PORT_VNIC, port_id);
46 		if (FIELD_GET(NFP_FLOWER_CMSG_PORT_VNIC_TYPE, port_id) ==
47 		    NFP_FLOWER_CMSG_PORT_VNIC_TYPE_PF)
48 			return NFP_REPR_TYPE_PF;
49 		else
50 			return NFP_REPR_TYPE_VF;
51 	}
52 
53 	return __NFP_REPR_TYPE_MAX;
54 }
55 
56 static struct net_device *
57 nfp_flower_repr_get(struct nfp_app *app, u32 port_id)
58 {
59 	enum nfp_repr_type repr_type;
60 	struct nfp_reprs *reprs;
61 	u8 port = 0;
62 
63 	repr_type = nfp_flower_repr_get_type_and_port(app, port_id, &port);
64 	if (repr_type > NFP_REPR_TYPE_MAX)
65 		return NULL;
66 
67 	reprs = rcu_dereference(app->reprs[repr_type]);
68 	if (!reprs)
69 		return NULL;
70 
71 	if (port >= reprs->num_reprs)
72 		return NULL;
73 
74 	return rcu_dereference(reprs->reprs[port]);
75 }
76 
77 static int
78 nfp_flower_reprs_reify(struct nfp_app *app, enum nfp_repr_type type,
79 		       bool exists)
80 {
81 	struct nfp_reprs *reprs;
82 	int i, err, count = 0;
83 
84 	reprs = rcu_dereference_protected(app->reprs[type],
85 					  lockdep_is_held(&app->pf->lock));
86 	if (!reprs)
87 		return 0;
88 
89 	for (i = 0; i < reprs->num_reprs; i++) {
90 		struct net_device *netdev;
91 
92 		netdev = nfp_repr_get_locked(app, reprs, i);
93 		if (netdev) {
94 			struct nfp_repr *repr = netdev_priv(netdev);
95 
96 			err = nfp_flower_cmsg_portreify(repr, exists);
97 			if (err)
98 				return err;
99 			count++;
100 		}
101 	}
102 
103 	return count;
104 }
105 
106 static int
107 nfp_flower_wait_repr_reify(struct nfp_app *app, atomic_t *replies, int tot_repl)
108 {
109 	struct nfp_flower_priv *priv = app->priv;
110 	int err;
111 
112 	if (!tot_repl)
113 		return 0;
114 
115 	lockdep_assert_held(&app->pf->lock);
116 	err = wait_event_interruptible_timeout(priv->reify_wait_queue,
117 					       atomic_read(replies) >= tot_repl,
118 					       msecs_to_jiffies(10));
119 	if (err <= 0) {
120 		nfp_warn(app->cpp, "Not all reprs responded to reify\n");
121 		return -EIO;
122 	}
123 
124 	return 0;
125 }
126 
127 static int
128 nfp_flower_repr_netdev_open(struct nfp_app *app, struct nfp_repr *repr)
129 {
130 	int err;
131 
132 	err = nfp_flower_cmsg_portmod(repr, true, repr->netdev->mtu, false);
133 	if (err)
134 		return err;
135 
136 	netif_tx_wake_all_queues(repr->netdev);
137 
138 	return 0;
139 }
140 
141 static int
142 nfp_flower_repr_netdev_stop(struct nfp_app *app, struct nfp_repr *repr)
143 {
144 	netif_tx_disable(repr->netdev);
145 
146 	return nfp_flower_cmsg_portmod(repr, false, repr->netdev->mtu, false);
147 }
148 
149 static int
150 nfp_flower_repr_netdev_init(struct nfp_app *app, struct net_device *netdev)
151 {
152 	return tc_setup_cb_egdev_register(netdev,
153 					  nfp_flower_setup_tc_egress_cb,
154 					  netdev_priv(netdev));
155 }
156 
157 static void
158 nfp_flower_repr_netdev_clean(struct nfp_app *app, struct net_device *netdev)
159 {
160 	struct nfp_repr *repr = netdev_priv(netdev);
161 
162 	kfree(repr->app_priv);
163 
164 	tc_setup_cb_egdev_unregister(netdev, nfp_flower_setup_tc_egress_cb,
165 				     netdev_priv(netdev));
166 }
167 
168 static void
169 nfp_flower_repr_netdev_preclean(struct nfp_app *app, struct net_device *netdev)
170 {
171 	struct nfp_repr *repr = netdev_priv(netdev);
172 	struct nfp_flower_priv *priv = app->priv;
173 	atomic_t *replies = &priv->reify_replies;
174 	int err;
175 
176 	atomic_set(replies, 0);
177 	err = nfp_flower_cmsg_portreify(repr, false);
178 	if (err) {
179 		nfp_warn(app->cpp, "Failed to notify firmware about repr destruction\n");
180 		return;
181 	}
182 
183 	nfp_flower_wait_repr_reify(app, replies, 1);
184 }
185 
186 static void nfp_flower_sriov_disable(struct nfp_app *app)
187 {
188 	struct nfp_flower_priv *priv = app->priv;
189 
190 	if (!priv->nn)
191 		return;
192 
193 	nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_VF);
194 }
195 
196 static int
197 nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
198 			    enum nfp_flower_cmsg_port_vnic_type vnic_type,
199 			    enum nfp_repr_type repr_type, unsigned int cnt)
200 {
201 	u8 nfp_pcie = nfp_cppcore_pcie_unit(app->pf->cpp);
202 	struct nfp_flower_priv *priv = app->priv;
203 	atomic_t *replies = &priv->reify_replies;
204 	struct nfp_flower_repr_priv *repr_priv;
205 	enum nfp_port_type port_type;
206 	struct nfp_repr *nfp_repr;
207 	struct nfp_reprs *reprs;
208 	int i, err, reify_cnt;
209 	const u8 queue = 0;
210 
211 	port_type = repr_type == NFP_REPR_TYPE_PF ? NFP_PORT_PF_PORT :
212 						    NFP_PORT_VF_PORT;
213 
214 	reprs = nfp_reprs_alloc(cnt);
215 	if (!reprs)
216 		return -ENOMEM;
217 
218 	for (i = 0; i < cnt; i++) {
219 		struct net_device *repr;
220 		struct nfp_port *port;
221 		u32 port_id;
222 
223 		repr = nfp_repr_alloc(app);
224 		if (!repr) {
225 			err = -ENOMEM;
226 			goto err_reprs_clean;
227 		}
228 
229 		repr_priv = kzalloc(sizeof(*repr_priv), GFP_KERNEL);
230 		if (!repr_priv) {
231 			err = -ENOMEM;
232 			goto err_reprs_clean;
233 		}
234 
235 		nfp_repr = netdev_priv(repr);
236 		nfp_repr->app_priv = repr_priv;
237 
238 		/* For now we only support 1 PF */
239 		WARN_ON(repr_type == NFP_REPR_TYPE_PF && i);
240 
241 		port = nfp_port_alloc(app, port_type, repr);
242 		if (IS_ERR(port)) {
243 			err = PTR_ERR(port);
244 			nfp_repr_free(repr);
245 			goto err_reprs_clean;
246 		}
247 		if (repr_type == NFP_REPR_TYPE_PF) {
248 			port->pf_id = i;
249 			port->vnic = priv->nn->dp.ctrl_bar;
250 		} else {
251 			port->pf_id = 0;
252 			port->vf_id = i;
253 			port->vnic =
254 				app->pf->vf_cfg_mem + i * NFP_NET_CFG_BAR_SZ;
255 		}
256 
257 		eth_hw_addr_random(repr);
258 
259 		port_id = nfp_flower_cmsg_pcie_port(nfp_pcie, vnic_type,
260 						    i, queue);
261 		err = nfp_repr_init(app, repr,
262 				    port_id, port, priv->nn->dp.netdev);
263 		if (err) {
264 			nfp_port_free(port);
265 			nfp_repr_free(repr);
266 			goto err_reprs_clean;
267 		}
268 
269 		RCU_INIT_POINTER(reprs->reprs[i], repr);
270 		nfp_info(app->cpp, "%s%d Representor(%s) created\n",
271 			 repr_type == NFP_REPR_TYPE_PF ? "PF" : "VF", i,
272 			 repr->name);
273 	}
274 
275 	nfp_app_reprs_set(app, repr_type, reprs);
276 
277 	atomic_set(replies, 0);
278 	reify_cnt = nfp_flower_reprs_reify(app, repr_type, true);
279 	if (reify_cnt < 0) {
280 		err = reify_cnt;
281 		nfp_warn(app->cpp, "Failed to notify firmware about repr creation\n");
282 		goto err_reprs_remove;
283 	}
284 
285 	err = nfp_flower_wait_repr_reify(app, replies, reify_cnt);
286 	if (err)
287 		goto err_reprs_remove;
288 
289 	return 0;
290 err_reprs_remove:
291 	reprs = nfp_app_reprs_set(app, repr_type, NULL);
292 err_reprs_clean:
293 	nfp_reprs_clean_and_free(app, reprs);
294 	return err;
295 }
296 
297 static int nfp_flower_sriov_enable(struct nfp_app *app, int num_vfs)
298 {
299 	struct nfp_flower_priv *priv = app->priv;
300 
301 	if (!priv->nn)
302 		return 0;
303 
304 	return nfp_flower_spawn_vnic_reprs(app,
305 					   NFP_FLOWER_CMSG_PORT_VNIC_TYPE_VF,
306 					   NFP_REPR_TYPE_VF, num_vfs);
307 }
308 
309 static int
310 nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
311 {
312 	struct nfp_eth_table *eth_tbl = app->pf->eth_tbl;
313 	atomic_t *replies = &priv->reify_replies;
314 	struct nfp_flower_repr_priv *repr_priv;
315 	struct nfp_repr *nfp_repr;
316 	struct sk_buff *ctrl_skb;
317 	struct nfp_reprs *reprs;
318 	int err, reify_cnt;
319 	unsigned int i;
320 
321 	ctrl_skb = nfp_flower_cmsg_mac_repr_start(app, eth_tbl->count);
322 	if (!ctrl_skb)
323 		return -ENOMEM;
324 
325 	reprs = nfp_reprs_alloc(eth_tbl->max_index + 1);
326 	if (!reprs) {
327 		err = -ENOMEM;
328 		goto err_free_ctrl_skb;
329 	}
330 
331 	for (i = 0; i < eth_tbl->count; i++) {
332 		unsigned int phys_port = eth_tbl->ports[i].index;
333 		struct net_device *repr;
334 		struct nfp_port *port;
335 		u32 cmsg_port_id;
336 
337 		repr = nfp_repr_alloc(app);
338 		if (!repr) {
339 			err = -ENOMEM;
340 			goto err_reprs_clean;
341 		}
342 
343 		repr_priv = kzalloc(sizeof(*repr_priv), GFP_KERNEL);
344 		if (!repr_priv) {
345 			err = -ENOMEM;
346 			goto err_reprs_clean;
347 		}
348 
349 		nfp_repr = netdev_priv(repr);
350 		nfp_repr->app_priv = repr_priv;
351 
352 		port = nfp_port_alloc(app, NFP_PORT_PHYS_PORT, repr);
353 		if (IS_ERR(port)) {
354 			err = PTR_ERR(port);
355 			nfp_repr_free(repr);
356 			goto err_reprs_clean;
357 		}
358 		err = nfp_port_init_phy_port(app->pf, app, port, i);
359 		if (err) {
360 			nfp_port_free(port);
361 			nfp_repr_free(repr);
362 			goto err_reprs_clean;
363 		}
364 
365 		SET_NETDEV_DEV(repr, &priv->nn->pdev->dev);
366 		nfp_net_get_mac_addr(app->pf, repr, port);
367 
368 		cmsg_port_id = nfp_flower_cmsg_phys_port(phys_port);
369 		err = nfp_repr_init(app, repr,
370 				    cmsg_port_id, port, priv->nn->dp.netdev);
371 		if (err) {
372 			nfp_port_free(port);
373 			nfp_repr_free(repr);
374 			goto err_reprs_clean;
375 		}
376 
377 		nfp_flower_cmsg_mac_repr_add(ctrl_skb, i,
378 					     eth_tbl->ports[i].nbi,
379 					     eth_tbl->ports[i].base,
380 					     phys_port);
381 
382 		RCU_INIT_POINTER(reprs->reprs[phys_port], repr);
383 		nfp_info(app->cpp, "Phys Port %d Representor(%s) created\n",
384 			 phys_port, repr->name);
385 	}
386 
387 	nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, reprs);
388 
389 	/* The REIFY/MAC_REPR control messages should be sent after the MAC
390 	 * representors are registered using nfp_app_reprs_set().  This is
391 	 * because the firmware may respond with control messages for the
392 	 * MAC representors, f.e. to provide the driver with information
393 	 * about their state, and without registration the driver will drop
394 	 * any such messages.
395 	 */
396 	atomic_set(replies, 0);
397 	reify_cnt = nfp_flower_reprs_reify(app, NFP_REPR_TYPE_PHYS_PORT, true);
398 	if (reify_cnt < 0) {
399 		err = reify_cnt;
400 		nfp_warn(app->cpp, "Failed to notify firmware about repr creation\n");
401 		goto err_reprs_remove;
402 	}
403 
404 	err = nfp_flower_wait_repr_reify(app, replies, reify_cnt);
405 	if (err)
406 		goto err_reprs_remove;
407 
408 	nfp_ctrl_tx(app->ctrl, ctrl_skb);
409 
410 	return 0;
411 err_reprs_remove:
412 	reprs = nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, NULL);
413 err_reprs_clean:
414 	nfp_reprs_clean_and_free(app, reprs);
415 err_free_ctrl_skb:
416 	kfree_skb(ctrl_skb);
417 	return err;
418 }
419 
420 static int nfp_flower_vnic_alloc(struct nfp_app *app, struct nfp_net *nn,
421 				 unsigned int id)
422 {
423 	if (id > 0) {
424 		nfp_warn(app->cpp, "FlowerNIC doesn't support more than one data vNIC\n");
425 		goto err_invalid_port;
426 	}
427 
428 	eth_hw_addr_random(nn->dp.netdev);
429 	netif_keep_dst(nn->dp.netdev);
430 	nn->vnic_no_name = true;
431 
432 	return 0;
433 
434 err_invalid_port:
435 	nn->port = nfp_port_alloc(app, NFP_PORT_INVALID, nn->dp.netdev);
436 	return PTR_ERR_OR_ZERO(nn->port);
437 }
438 
439 static void nfp_flower_vnic_clean(struct nfp_app *app, struct nfp_net *nn)
440 {
441 	struct nfp_flower_priv *priv = app->priv;
442 
443 	if (app->pf->num_vfs)
444 		nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_VF);
445 	nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PF);
446 	nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PHYS_PORT);
447 
448 	priv->nn = NULL;
449 }
450 
451 static int nfp_flower_vnic_init(struct nfp_app *app, struct nfp_net *nn)
452 {
453 	struct nfp_flower_priv *priv = app->priv;
454 	int err;
455 
456 	priv->nn = nn;
457 
458 	err = nfp_flower_spawn_phy_reprs(app, app->priv);
459 	if (err)
460 		goto err_clear_nn;
461 
462 	err = nfp_flower_spawn_vnic_reprs(app,
463 					  NFP_FLOWER_CMSG_PORT_VNIC_TYPE_PF,
464 					  NFP_REPR_TYPE_PF, 1);
465 	if (err)
466 		goto err_destroy_reprs_phy;
467 
468 	if (app->pf->num_vfs) {
469 		err = nfp_flower_spawn_vnic_reprs(app,
470 						  NFP_FLOWER_CMSG_PORT_VNIC_TYPE_VF,
471 						  NFP_REPR_TYPE_VF,
472 						  app->pf->num_vfs);
473 		if (err)
474 			goto err_destroy_reprs_pf;
475 	}
476 
477 	return 0;
478 
479 err_destroy_reprs_pf:
480 	nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PF);
481 err_destroy_reprs_phy:
482 	nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PHYS_PORT);
483 err_clear_nn:
484 	priv->nn = NULL;
485 	return err;
486 }
487 
488 static int nfp_flower_init(struct nfp_app *app)
489 {
490 	const struct nfp_pf *pf = app->pf;
491 	u64 version, features, ctx_count;
492 	struct nfp_flower_priv *app_priv;
493 	int err;
494 
495 	if (!pf->eth_tbl) {
496 		nfp_warn(app->cpp, "FlowerNIC requires eth table\n");
497 		return -EINVAL;
498 	}
499 
500 	if (!pf->mac_stats_bar) {
501 		nfp_warn(app->cpp, "FlowerNIC requires mac_stats BAR\n");
502 		return -EINVAL;
503 	}
504 
505 	if (!pf->vf_cfg_bar) {
506 		nfp_warn(app->cpp, "FlowerNIC requires vf_cfg BAR\n");
507 		return -EINVAL;
508 	}
509 
510 	version = nfp_rtsym_read_le(app->pf->rtbl, "hw_flower_version", &err);
511 	if (err) {
512 		nfp_warn(app->cpp, "FlowerNIC requires hw_flower_version memory symbol\n");
513 		return err;
514 	}
515 
516 	ctx_count = nfp_rtsym_read_le(app->pf->rtbl, "CONFIG_FC_HOST_CTX_COUNT",
517 				      &err);
518 	if (err) {
519 		nfp_warn(app->cpp,
520 			 "FlowerNIC: unsupported host context count: %d\n",
521 			 err);
522 		err = 0;
523 		ctx_count = BIT(17);
524 	}
525 
526 	/* We need to ensure hardware has enough flower capabilities. */
527 	if (version != NFP_FLOWER_ALLOWED_VER) {
528 		nfp_warn(app->cpp, "FlowerNIC: unsupported firmware version\n");
529 		return -EINVAL;
530 	}
531 
532 	app_priv = vzalloc(sizeof(struct nfp_flower_priv));
533 	if (!app_priv)
534 		return -ENOMEM;
535 
536 	app_priv->stats_ring_size = roundup_pow_of_two(ctx_count);
537 	app->priv = app_priv;
538 	app_priv->app = app;
539 	skb_queue_head_init(&app_priv->cmsg_skbs_high);
540 	skb_queue_head_init(&app_priv->cmsg_skbs_low);
541 	INIT_WORK(&app_priv->cmsg_work, nfp_flower_cmsg_process_rx);
542 	init_waitqueue_head(&app_priv->reify_wait_queue);
543 
544 	init_waitqueue_head(&app_priv->mtu_conf.wait_q);
545 	spin_lock_init(&app_priv->mtu_conf.lock);
546 
547 	err = nfp_flower_metadata_init(app, ctx_count);
548 	if (err)
549 		goto err_free_app_priv;
550 
551 	/* Extract the extra features supported by the firmware. */
552 	features = nfp_rtsym_read_le(app->pf->rtbl,
553 				     "_abi_flower_extra_features", &err);
554 	if (err)
555 		app_priv->flower_ext_feats = 0;
556 	else
557 		app_priv->flower_ext_feats = features;
558 
559 	/* Tell the firmware that the driver supports lag. */
560 	err = nfp_rtsym_write_le(app->pf->rtbl,
561 				 "_abi_flower_balance_sync_enable", 1);
562 	if (!err) {
563 		app_priv->flower_ext_feats |= NFP_FL_FEATS_LAG;
564 		nfp_flower_lag_init(&app_priv->nfp_lag);
565 	} else if (err == -ENOENT) {
566 		nfp_warn(app->cpp, "LAG not supported by FW.\n");
567 	} else {
568 		goto err_cleanup_metadata;
569 	}
570 
571 	return 0;
572 
573 err_cleanup_metadata:
574 	nfp_flower_metadata_cleanup(app);
575 err_free_app_priv:
576 	vfree(app->priv);
577 	return err;
578 }
579 
580 static void nfp_flower_clean(struct nfp_app *app)
581 {
582 	struct nfp_flower_priv *app_priv = app->priv;
583 
584 	skb_queue_purge(&app_priv->cmsg_skbs_high);
585 	skb_queue_purge(&app_priv->cmsg_skbs_low);
586 	flush_work(&app_priv->cmsg_work);
587 
588 	if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG)
589 		nfp_flower_lag_cleanup(&app_priv->nfp_lag);
590 
591 	nfp_flower_metadata_cleanup(app);
592 	vfree(app->priv);
593 	app->priv = NULL;
594 }
595 
596 static bool nfp_flower_check_ack(struct nfp_flower_priv *app_priv)
597 {
598 	bool ret;
599 
600 	spin_lock_bh(&app_priv->mtu_conf.lock);
601 	ret = app_priv->mtu_conf.ack;
602 	spin_unlock_bh(&app_priv->mtu_conf.lock);
603 
604 	return ret;
605 }
606 
607 static int
608 nfp_flower_repr_change_mtu(struct nfp_app *app, struct net_device *netdev,
609 			   int new_mtu)
610 {
611 	struct nfp_flower_priv *app_priv = app->priv;
612 	struct nfp_repr *repr = netdev_priv(netdev);
613 	int err, ack;
614 
615 	/* Only need to config FW for physical port MTU change. */
616 	if (repr->port->type != NFP_PORT_PHYS_PORT)
617 		return 0;
618 
619 	if (!(app_priv->flower_ext_feats & NFP_FL_NBI_MTU_SETTING)) {
620 		nfp_err(app->cpp, "Physical port MTU setting not supported\n");
621 		return -EINVAL;
622 	}
623 
624 	spin_lock_bh(&app_priv->mtu_conf.lock);
625 	app_priv->mtu_conf.ack = false;
626 	app_priv->mtu_conf.requested_val = new_mtu;
627 	app_priv->mtu_conf.portnum = repr->dst->u.port_info.port_id;
628 	spin_unlock_bh(&app_priv->mtu_conf.lock);
629 
630 	err = nfp_flower_cmsg_portmod(repr, netif_carrier_ok(netdev), new_mtu,
631 				      true);
632 	if (err) {
633 		spin_lock_bh(&app_priv->mtu_conf.lock);
634 		app_priv->mtu_conf.requested_val = 0;
635 		spin_unlock_bh(&app_priv->mtu_conf.lock);
636 		return err;
637 	}
638 
639 	/* Wait for fw to ack the change. */
640 	ack = wait_event_timeout(app_priv->mtu_conf.wait_q,
641 				 nfp_flower_check_ack(app_priv),
642 				 msecs_to_jiffies(10));
643 
644 	if (!ack) {
645 		spin_lock_bh(&app_priv->mtu_conf.lock);
646 		app_priv->mtu_conf.requested_val = 0;
647 		spin_unlock_bh(&app_priv->mtu_conf.lock);
648 		nfp_warn(app->cpp, "MTU change not verified with fw\n");
649 		return -EIO;
650 	}
651 
652 	return 0;
653 }
654 
655 static int nfp_flower_start(struct nfp_app *app)
656 {
657 	struct nfp_flower_priv *app_priv = app->priv;
658 	int err;
659 
660 	if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
661 		err = nfp_flower_lag_reset(&app_priv->nfp_lag);
662 		if (err)
663 			return err;
664 
665 		err = register_netdevice_notifier(&app_priv->nfp_lag.lag_nb);
666 		if (err)
667 			return err;
668 	}
669 
670 	return nfp_tunnel_config_start(app);
671 }
672 
673 static void nfp_flower_stop(struct nfp_app *app)
674 {
675 	struct nfp_flower_priv *app_priv = app->priv;
676 
677 	if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG)
678 		unregister_netdevice_notifier(&app_priv->nfp_lag.lag_nb);
679 
680 	nfp_tunnel_config_stop(app);
681 }
682 
683 const struct nfp_app_type app_flower = {
684 	.id		= NFP_APP_FLOWER_NIC,
685 	.name		= "flower",
686 
687 	.ctrl_cap_mask	= ~0U,
688 	.ctrl_has_meta	= true,
689 
690 	.extra_cap	= nfp_flower_extra_cap,
691 
692 	.init		= nfp_flower_init,
693 	.clean		= nfp_flower_clean,
694 
695 	.repr_change_mtu  = nfp_flower_repr_change_mtu,
696 
697 	.vnic_alloc	= nfp_flower_vnic_alloc,
698 	.vnic_init	= nfp_flower_vnic_init,
699 	.vnic_clean	= nfp_flower_vnic_clean,
700 
701 	.repr_init	= nfp_flower_repr_netdev_init,
702 	.repr_preclean	= nfp_flower_repr_netdev_preclean,
703 	.repr_clean	= nfp_flower_repr_netdev_clean,
704 
705 	.repr_open	= nfp_flower_repr_netdev_open,
706 	.repr_stop	= nfp_flower_repr_netdev_stop,
707 
708 	.start		= nfp_flower_start,
709 	.stop		= nfp_flower_stop,
710 
711 	.ctrl_msg_rx	= nfp_flower_cmsg_rx,
712 
713 	.sriov_enable	= nfp_flower_sriov_enable,
714 	.sriov_disable	= nfp_flower_sriov_disable,
715 
716 	.eswitch_mode_get  = eswitch_mode_get,
717 	.repr_get	= nfp_flower_repr_get,
718 
719 	.setup_tc	= nfp_flower_setup_tc,
720 };
721