1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
3 
4 #include <linux/etherdevice.h>
5 #include <linux/lockdep.h>
6 #include <linux/pci.h>
7 #include <linux/skbuff.h>
8 #include <linux/vmalloc.h>
9 #include <net/devlink.h>
10 #include <net/dst_metadata.h>
11 
12 #include "main.h"
13 #include "../nfpcore/nfp_cpp.h"
14 #include "../nfpcore/nfp_nffw.h"
15 #include "../nfpcore/nfp_nsp.h"
16 #include "../nfp_app.h"
17 #include "../nfp_main.h"
18 #include "../nfp_net.h"
19 #include "../nfp_net_repr.h"
20 #include "../nfp_port.h"
21 #include "./cmsg.h"
22 
23 #define NFP_FLOWER_ALLOWED_VER 0x0001000000010000UL
24 
25 static const char *nfp_flower_extra_cap(struct nfp_app *app, struct nfp_net *nn)
26 {
27 	return "FLOWER";
28 }
29 
30 static enum devlink_eswitch_mode eswitch_mode_get(struct nfp_app *app)
31 {
32 	return DEVLINK_ESWITCH_MODE_SWITCHDEV;
33 }
34 
35 static enum nfp_repr_type
36 nfp_flower_repr_get_type_and_port(struct nfp_app *app, u32 port_id, u8 *port)
37 {
38 	switch (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port_id)) {
39 	case NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT:
40 		*port = FIELD_GET(NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM,
41 				  port_id);
42 		return NFP_REPR_TYPE_PHYS_PORT;
43 
44 	case NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT:
45 		*port = FIELD_GET(NFP_FLOWER_CMSG_PORT_VNIC, port_id);
46 		if (FIELD_GET(NFP_FLOWER_CMSG_PORT_VNIC_TYPE, port_id) ==
47 		    NFP_FLOWER_CMSG_PORT_VNIC_TYPE_PF)
48 			return NFP_REPR_TYPE_PF;
49 		else
50 			return NFP_REPR_TYPE_VF;
51 	}
52 
53 	return __NFP_REPR_TYPE_MAX;
54 }
55 
56 static struct net_device *
57 nfp_flower_repr_get(struct nfp_app *app, u32 port_id)
58 {
59 	enum nfp_repr_type repr_type;
60 	struct nfp_reprs *reprs;
61 	u8 port = 0;
62 
63 	repr_type = nfp_flower_repr_get_type_and_port(app, port_id, &port);
64 	if (repr_type > NFP_REPR_TYPE_MAX)
65 		return NULL;
66 
67 	reprs = rcu_dereference(app->reprs[repr_type]);
68 	if (!reprs)
69 		return NULL;
70 
71 	if (port >= reprs->num_reprs)
72 		return NULL;
73 
74 	return rcu_dereference(reprs->reprs[port]);
75 }
76 
77 static int
78 nfp_flower_reprs_reify(struct nfp_app *app, enum nfp_repr_type type,
79 		       bool exists)
80 {
81 	struct nfp_reprs *reprs;
82 	int i, err, count = 0;
83 
84 	reprs = rcu_dereference_protected(app->reprs[type],
85 					  lockdep_is_held(&app->pf->lock));
86 	if (!reprs)
87 		return 0;
88 
89 	for (i = 0; i < reprs->num_reprs; i++) {
90 		struct net_device *netdev;
91 
92 		netdev = nfp_repr_get_locked(app, reprs, i);
93 		if (netdev) {
94 			struct nfp_repr *repr = netdev_priv(netdev);
95 
96 			err = nfp_flower_cmsg_portreify(repr, exists);
97 			if (err)
98 				return err;
99 			count++;
100 		}
101 	}
102 
103 	return count;
104 }
105 
106 static int
107 nfp_flower_wait_repr_reify(struct nfp_app *app, atomic_t *replies, int tot_repl)
108 {
109 	struct nfp_flower_priv *priv = app->priv;
110 	int err;
111 
112 	if (!tot_repl)
113 		return 0;
114 
115 	lockdep_assert_held(&app->pf->lock);
116 	err = wait_event_interruptible_timeout(priv->reify_wait_queue,
117 					       atomic_read(replies) >= tot_repl,
118 					       msecs_to_jiffies(10));
119 	if (err <= 0) {
120 		nfp_warn(app->cpp, "Not all reprs responded to reify\n");
121 		return -EIO;
122 	}
123 
124 	return 0;
125 }
126 
127 static int
128 nfp_flower_repr_netdev_open(struct nfp_app *app, struct nfp_repr *repr)
129 {
130 	int err;
131 
132 	err = nfp_flower_cmsg_portmod(repr, true, repr->netdev->mtu, false);
133 	if (err)
134 		return err;
135 
136 	netif_tx_wake_all_queues(repr->netdev);
137 
138 	return 0;
139 }
140 
141 static int
142 nfp_flower_repr_netdev_stop(struct nfp_app *app, struct nfp_repr *repr)
143 {
144 	netif_tx_disable(repr->netdev);
145 
146 	return nfp_flower_cmsg_portmod(repr, false, repr->netdev->mtu, false);
147 }
148 
149 static void
150 nfp_flower_repr_netdev_clean(struct nfp_app *app, struct net_device *netdev)
151 {
152 	struct nfp_repr *repr = netdev_priv(netdev);
153 
154 	kfree(repr->app_priv);
155 }
156 
157 static void
158 nfp_flower_repr_netdev_preclean(struct nfp_app *app, struct net_device *netdev)
159 {
160 	struct nfp_repr *repr = netdev_priv(netdev);
161 	struct nfp_flower_priv *priv = app->priv;
162 	atomic_t *replies = &priv->reify_replies;
163 	int err;
164 
165 	atomic_set(replies, 0);
166 	err = nfp_flower_cmsg_portreify(repr, false);
167 	if (err) {
168 		nfp_warn(app->cpp, "Failed to notify firmware about repr destruction\n");
169 		return;
170 	}
171 
172 	nfp_flower_wait_repr_reify(app, replies, 1);
173 }
174 
175 static void nfp_flower_sriov_disable(struct nfp_app *app)
176 {
177 	struct nfp_flower_priv *priv = app->priv;
178 
179 	if (!priv->nn)
180 		return;
181 
182 	nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_VF);
183 }
184 
185 static int
186 nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
187 			    enum nfp_flower_cmsg_port_vnic_type vnic_type,
188 			    enum nfp_repr_type repr_type, unsigned int cnt)
189 {
190 	u8 nfp_pcie = nfp_cppcore_pcie_unit(app->pf->cpp);
191 	struct nfp_flower_priv *priv = app->priv;
192 	atomic_t *replies = &priv->reify_replies;
193 	struct nfp_flower_repr_priv *repr_priv;
194 	enum nfp_port_type port_type;
195 	struct nfp_repr *nfp_repr;
196 	struct nfp_reprs *reprs;
197 	int i, err, reify_cnt;
198 	const u8 queue = 0;
199 
200 	port_type = repr_type == NFP_REPR_TYPE_PF ? NFP_PORT_PF_PORT :
201 						    NFP_PORT_VF_PORT;
202 
203 	reprs = nfp_reprs_alloc(cnt);
204 	if (!reprs)
205 		return -ENOMEM;
206 
207 	for (i = 0; i < cnt; i++) {
208 		struct net_device *repr;
209 		struct nfp_port *port;
210 		u32 port_id;
211 
212 		repr = nfp_repr_alloc(app);
213 		if (!repr) {
214 			err = -ENOMEM;
215 			goto err_reprs_clean;
216 		}
217 
218 		repr_priv = kzalloc(sizeof(*repr_priv), GFP_KERNEL);
219 		if (!repr_priv) {
220 			err = -ENOMEM;
221 			goto err_reprs_clean;
222 		}
223 
224 		nfp_repr = netdev_priv(repr);
225 		nfp_repr->app_priv = repr_priv;
226 
227 		/* For now we only support 1 PF */
228 		WARN_ON(repr_type == NFP_REPR_TYPE_PF && i);
229 
230 		port = nfp_port_alloc(app, port_type, repr);
231 		if (IS_ERR(port)) {
232 			err = PTR_ERR(port);
233 			nfp_repr_free(repr);
234 			goto err_reprs_clean;
235 		}
236 		if (repr_type == NFP_REPR_TYPE_PF) {
237 			port->pf_id = i;
238 			port->vnic = priv->nn->dp.ctrl_bar;
239 		} else {
240 			port->pf_id = 0;
241 			port->vf_id = i;
242 			port->vnic =
243 				app->pf->vf_cfg_mem + i * NFP_NET_CFG_BAR_SZ;
244 		}
245 
246 		eth_hw_addr_random(repr);
247 
248 		port_id = nfp_flower_cmsg_pcie_port(nfp_pcie, vnic_type,
249 						    i, queue);
250 		err = nfp_repr_init(app, repr,
251 				    port_id, port, priv->nn->dp.netdev);
252 		if (err) {
253 			nfp_port_free(port);
254 			nfp_repr_free(repr);
255 			goto err_reprs_clean;
256 		}
257 
258 		RCU_INIT_POINTER(reprs->reprs[i], repr);
259 		nfp_info(app->cpp, "%s%d Representor(%s) created\n",
260 			 repr_type == NFP_REPR_TYPE_PF ? "PF" : "VF", i,
261 			 repr->name);
262 	}
263 
264 	nfp_app_reprs_set(app, repr_type, reprs);
265 
266 	atomic_set(replies, 0);
267 	reify_cnt = nfp_flower_reprs_reify(app, repr_type, true);
268 	if (reify_cnt < 0) {
269 		err = reify_cnt;
270 		nfp_warn(app->cpp, "Failed to notify firmware about repr creation\n");
271 		goto err_reprs_remove;
272 	}
273 
274 	err = nfp_flower_wait_repr_reify(app, replies, reify_cnt);
275 	if (err)
276 		goto err_reprs_remove;
277 
278 	return 0;
279 err_reprs_remove:
280 	reprs = nfp_app_reprs_set(app, repr_type, NULL);
281 err_reprs_clean:
282 	nfp_reprs_clean_and_free(app, reprs);
283 	return err;
284 }
285 
286 static int nfp_flower_sriov_enable(struct nfp_app *app, int num_vfs)
287 {
288 	struct nfp_flower_priv *priv = app->priv;
289 
290 	if (!priv->nn)
291 		return 0;
292 
293 	return nfp_flower_spawn_vnic_reprs(app,
294 					   NFP_FLOWER_CMSG_PORT_VNIC_TYPE_VF,
295 					   NFP_REPR_TYPE_VF, num_vfs);
296 }
297 
298 static int
299 nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
300 {
301 	struct nfp_eth_table *eth_tbl = app->pf->eth_tbl;
302 	atomic_t *replies = &priv->reify_replies;
303 	struct nfp_flower_repr_priv *repr_priv;
304 	struct nfp_repr *nfp_repr;
305 	struct sk_buff *ctrl_skb;
306 	struct nfp_reprs *reprs;
307 	int err, reify_cnt;
308 	unsigned int i;
309 
310 	ctrl_skb = nfp_flower_cmsg_mac_repr_start(app, eth_tbl->count);
311 	if (!ctrl_skb)
312 		return -ENOMEM;
313 
314 	reprs = nfp_reprs_alloc(eth_tbl->max_index + 1);
315 	if (!reprs) {
316 		err = -ENOMEM;
317 		goto err_free_ctrl_skb;
318 	}
319 
320 	for (i = 0; i < eth_tbl->count; i++) {
321 		unsigned int phys_port = eth_tbl->ports[i].index;
322 		struct net_device *repr;
323 		struct nfp_port *port;
324 		u32 cmsg_port_id;
325 
326 		repr = nfp_repr_alloc(app);
327 		if (!repr) {
328 			err = -ENOMEM;
329 			goto err_reprs_clean;
330 		}
331 
332 		repr_priv = kzalloc(sizeof(*repr_priv), GFP_KERNEL);
333 		if (!repr_priv) {
334 			err = -ENOMEM;
335 			goto err_reprs_clean;
336 		}
337 
338 		nfp_repr = netdev_priv(repr);
339 		nfp_repr->app_priv = repr_priv;
340 
341 		port = nfp_port_alloc(app, NFP_PORT_PHYS_PORT, repr);
342 		if (IS_ERR(port)) {
343 			err = PTR_ERR(port);
344 			nfp_repr_free(repr);
345 			goto err_reprs_clean;
346 		}
347 		err = nfp_port_init_phy_port(app->pf, app, port, i);
348 		if (err) {
349 			nfp_port_free(port);
350 			nfp_repr_free(repr);
351 			goto err_reprs_clean;
352 		}
353 
354 		SET_NETDEV_DEV(repr, &priv->nn->pdev->dev);
355 		nfp_net_get_mac_addr(app->pf, repr, port);
356 
357 		cmsg_port_id = nfp_flower_cmsg_phys_port(phys_port);
358 		err = nfp_repr_init(app, repr,
359 				    cmsg_port_id, port, priv->nn->dp.netdev);
360 		if (err) {
361 			nfp_port_free(port);
362 			nfp_repr_free(repr);
363 			goto err_reprs_clean;
364 		}
365 
366 		nfp_flower_cmsg_mac_repr_add(ctrl_skb, i,
367 					     eth_tbl->ports[i].nbi,
368 					     eth_tbl->ports[i].base,
369 					     phys_port);
370 
371 		RCU_INIT_POINTER(reprs->reprs[phys_port], repr);
372 		nfp_info(app->cpp, "Phys Port %d Representor(%s) created\n",
373 			 phys_port, repr->name);
374 	}
375 
376 	nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, reprs);
377 
378 	/* The REIFY/MAC_REPR control messages should be sent after the MAC
379 	 * representors are registered using nfp_app_reprs_set().  This is
380 	 * because the firmware may respond with control messages for the
381 	 * MAC representors, f.e. to provide the driver with information
382 	 * about their state, and without registration the driver will drop
383 	 * any such messages.
384 	 */
385 	atomic_set(replies, 0);
386 	reify_cnt = nfp_flower_reprs_reify(app, NFP_REPR_TYPE_PHYS_PORT, true);
387 	if (reify_cnt < 0) {
388 		err = reify_cnt;
389 		nfp_warn(app->cpp, "Failed to notify firmware about repr creation\n");
390 		goto err_reprs_remove;
391 	}
392 
393 	err = nfp_flower_wait_repr_reify(app, replies, reify_cnt);
394 	if (err)
395 		goto err_reprs_remove;
396 
397 	nfp_ctrl_tx(app->ctrl, ctrl_skb);
398 
399 	return 0;
400 err_reprs_remove:
401 	reprs = nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, NULL);
402 err_reprs_clean:
403 	nfp_reprs_clean_and_free(app, reprs);
404 err_free_ctrl_skb:
405 	kfree_skb(ctrl_skb);
406 	return err;
407 }
408 
409 static int nfp_flower_vnic_alloc(struct nfp_app *app, struct nfp_net *nn,
410 				 unsigned int id)
411 {
412 	if (id > 0) {
413 		nfp_warn(app->cpp, "FlowerNIC doesn't support more than one data vNIC\n");
414 		goto err_invalid_port;
415 	}
416 
417 	eth_hw_addr_random(nn->dp.netdev);
418 	netif_keep_dst(nn->dp.netdev);
419 	nn->vnic_no_name = true;
420 
421 	return 0;
422 
423 err_invalid_port:
424 	nn->port = nfp_port_alloc(app, NFP_PORT_INVALID, nn->dp.netdev);
425 	return PTR_ERR_OR_ZERO(nn->port);
426 }
427 
428 static void nfp_flower_vnic_clean(struct nfp_app *app, struct nfp_net *nn)
429 {
430 	struct nfp_flower_priv *priv = app->priv;
431 
432 	if (app->pf->num_vfs)
433 		nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_VF);
434 	nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PF);
435 	nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PHYS_PORT);
436 
437 	priv->nn = NULL;
438 }
439 
440 static int nfp_flower_vnic_init(struct nfp_app *app, struct nfp_net *nn)
441 {
442 	struct nfp_flower_priv *priv = app->priv;
443 	int err;
444 
445 	priv->nn = nn;
446 
447 	err = nfp_flower_spawn_phy_reprs(app, app->priv);
448 	if (err)
449 		goto err_clear_nn;
450 
451 	err = nfp_flower_spawn_vnic_reprs(app,
452 					  NFP_FLOWER_CMSG_PORT_VNIC_TYPE_PF,
453 					  NFP_REPR_TYPE_PF, 1);
454 	if (err)
455 		goto err_destroy_reprs_phy;
456 
457 	if (app->pf->num_vfs) {
458 		err = nfp_flower_spawn_vnic_reprs(app,
459 						  NFP_FLOWER_CMSG_PORT_VNIC_TYPE_VF,
460 						  NFP_REPR_TYPE_VF,
461 						  app->pf->num_vfs);
462 		if (err)
463 			goto err_destroy_reprs_pf;
464 	}
465 
466 	return 0;
467 
468 err_destroy_reprs_pf:
469 	nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PF);
470 err_destroy_reprs_phy:
471 	nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PHYS_PORT);
472 err_clear_nn:
473 	priv->nn = NULL;
474 	return err;
475 }
476 
477 static int nfp_flower_init(struct nfp_app *app)
478 {
479 	const struct nfp_pf *pf = app->pf;
480 	u64 version, features, ctx_count;
481 	struct nfp_flower_priv *app_priv;
482 	int err;
483 
484 	if (!pf->eth_tbl) {
485 		nfp_warn(app->cpp, "FlowerNIC requires eth table\n");
486 		return -EINVAL;
487 	}
488 
489 	if (!pf->mac_stats_bar) {
490 		nfp_warn(app->cpp, "FlowerNIC requires mac_stats BAR\n");
491 		return -EINVAL;
492 	}
493 
494 	if (!pf->vf_cfg_bar) {
495 		nfp_warn(app->cpp, "FlowerNIC requires vf_cfg BAR\n");
496 		return -EINVAL;
497 	}
498 
499 	version = nfp_rtsym_read_le(app->pf->rtbl, "hw_flower_version", &err);
500 	if (err) {
501 		nfp_warn(app->cpp, "FlowerNIC requires hw_flower_version memory symbol\n");
502 		return err;
503 	}
504 
505 	ctx_count = nfp_rtsym_read_le(app->pf->rtbl, "CONFIG_FC_HOST_CTX_COUNT",
506 				      &err);
507 	if (err) {
508 		nfp_warn(app->cpp,
509 			 "FlowerNIC: unsupported host context count: %d\n",
510 			 err);
511 		err = 0;
512 		ctx_count = BIT(17);
513 	}
514 
515 	/* We need to ensure hardware has enough flower capabilities. */
516 	if (version != NFP_FLOWER_ALLOWED_VER) {
517 		nfp_warn(app->cpp, "FlowerNIC: unsupported firmware version\n");
518 		return -EINVAL;
519 	}
520 
521 	app_priv = vzalloc(sizeof(struct nfp_flower_priv));
522 	if (!app_priv)
523 		return -ENOMEM;
524 
525 	app_priv->stats_ring_size = roundup_pow_of_two(ctx_count);
526 	app->priv = app_priv;
527 	app_priv->app = app;
528 	skb_queue_head_init(&app_priv->cmsg_skbs_high);
529 	skb_queue_head_init(&app_priv->cmsg_skbs_low);
530 	INIT_WORK(&app_priv->cmsg_work, nfp_flower_cmsg_process_rx);
531 	init_waitqueue_head(&app_priv->reify_wait_queue);
532 
533 	init_waitqueue_head(&app_priv->mtu_conf.wait_q);
534 	spin_lock_init(&app_priv->mtu_conf.lock);
535 
536 	err = nfp_flower_metadata_init(app, ctx_count);
537 	if (err)
538 		goto err_free_app_priv;
539 
540 	/* Extract the extra features supported by the firmware. */
541 	features = nfp_rtsym_read_le(app->pf->rtbl,
542 				     "_abi_flower_extra_features", &err);
543 	if (err)
544 		app_priv->flower_ext_feats = 0;
545 	else
546 		app_priv->flower_ext_feats = features;
547 
548 	/* Tell the firmware that the driver supports lag. */
549 	err = nfp_rtsym_write_le(app->pf->rtbl,
550 				 "_abi_flower_balance_sync_enable", 1);
551 	if (!err) {
552 		app_priv->flower_ext_feats |= NFP_FL_FEATS_LAG;
553 		nfp_flower_lag_init(&app_priv->nfp_lag);
554 	} else if (err == -ENOENT) {
555 		nfp_warn(app->cpp, "LAG not supported by FW.\n");
556 	} else {
557 		goto err_cleanup_metadata;
558 	}
559 
560 	INIT_LIST_HEAD(&app_priv->indr_block_cb_priv);
561 
562 	return 0;
563 
564 err_cleanup_metadata:
565 	nfp_flower_metadata_cleanup(app);
566 err_free_app_priv:
567 	vfree(app->priv);
568 	return err;
569 }
570 
571 static void nfp_flower_clean(struct nfp_app *app)
572 {
573 	struct nfp_flower_priv *app_priv = app->priv;
574 
575 	skb_queue_purge(&app_priv->cmsg_skbs_high);
576 	skb_queue_purge(&app_priv->cmsg_skbs_low);
577 	flush_work(&app_priv->cmsg_work);
578 
579 	if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG)
580 		nfp_flower_lag_cleanup(&app_priv->nfp_lag);
581 
582 	nfp_flower_metadata_cleanup(app);
583 	vfree(app->priv);
584 	app->priv = NULL;
585 }
586 
587 static bool nfp_flower_check_ack(struct nfp_flower_priv *app_priv)
588 {
589 	bool ret;
590 
591 	spin_lock_bh(&app_priv->mtu_conf.lock);
592 	ret = app_priv->mtu_conf.ack;
593 	spin_unlock_bh(&app_priv->mtu_conf.lock);
594 
595 	return ret;
596 }
597 
598 static int
599 nfp_flower_repr_change_mtu(struct nfp_app *app, struct net_device *netdev,
600 			   int new_mtu)
601 {
602 	struct nfp_flower_priv *app_priv = app->priv;
603 	struct nfp_repr *repr = netdev_priv(netdev);
604 	int err, ack;
605 
606 	/* Only need to config FW for physical port MTU change. */
607 	if (repr->port->type != NFP_PORT_PHYS_PORT)
608 		return 0;
609 
610 	if (!(app_priv->flower_ext_feats & NFP_FL_NBI_MTU_SETTING)) {
611 		nfp_err(app->cpp, "Physical port MTU setting not supported\n");
612 		return -EINVAL;
613 	}
614 
615 	spin_lock_bh(&app_priv->mtu_conf.lock);
616 	app_priv->mtu_conf.ack = false;
617 	app_priv->mtu_conf.requested_val = new_mtu;
618 	app_priv->mtu_conf.portnum = repr->dst->u.port_info.port_id;
619 	spin_unlock_bh(&app_priv->mtu_conf.lock);
620 
621 	err = nfp_flower_cmsg_portmod(repr, netif_carrier_ok(netdev), new_mtu,
622 				      true);
623 	if (err) {
624 		spin_lock_bh(&app_priv->mtu_conf.lock);
625 		app_priv->mtu_conf.requested_val = 0;
626 		spin_unlock_bh(&app_priv->mtu_conf.lock);
627 		return err;
628 	}
629 
630 	/* Wait for fw to ack the change. */
631 	ack = wait_event_timeout(app_priv->mtu_conf.wait_q,
632 				 nfp_flower_check_ack(app_priv),
633 				 msecs_to_jiffies(10));
634 
635 	if (!ack) {
636 		spin_lock_bh(&app_priv->mtu_conf.lock);
637 		app_priv->mtu_conf.requested_val = 0;
638 		spin_unlock_bh(&app_priv->mtu_conf.lock);
639 		nfp_warn(app->cpp, "MTU change not verified with fw\n");
640 		return -EIO;
641 	}
642 
643 	return 0;
644 }
645 
646 static int nfp_flower_start(struct nfp_app *app)
647 {
648 	struct nfp_flower_priv *app_priv = app->priv;
649 	int err;
650 
651 	if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
652 		err = nfp_flower_lag_reset(&app_priv->nfp_lag);
653 		if (err)
654 			return err;
655 	}
656 
657 	return nfp_tunnel_config_start(app);
658 }
659 
660 static void nfp_flower_stop(struct nfp_app *app)
661 {
662 	nfp_tunnel_config_stop(app);
663 }
664 
665 static int
666 nfp_flower_netdev_event(struct nfp_app *app, struct net_device *netdev,
667 			unsigned long event, void *ptr)
668 {
669 	struct nfp_flower_priv *app_priv = app->priv;
670 	int ret;
671 
672 	if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
673 		ret = nfp_flower_lag_netdev_event(app_priv, netdev, event, ptr);
674 		if (ret & NOTIFY_STOP_MASK)
675 			return ret;
676 	}
677 
678 	ret = nfp_flower_reg_indir_block_handler(app, netdev, event);
679 	if (ret & NOTIFY_STOP_MASK)
680 		return ret;
681 
682 	return nfp_tunnel_mac_event_handler(app, netdev, event, ptr);
683 }
684 
685 const struct nfp_app_type app_flower = {
686 	.id		= NFP_APP_FLOWER_NIC,
687 	.name		= "flower",
688 
689 	.ctrl_cap_mask	= ~0U,
690 	.ctrl_has_meta	= true,
691 
692 	.extra_cap	= nfp_flower_extra_cap,
693 
694 	.init		= nfp_flower_init,
695 	.clean		= nfp_flower_clean,
696 
697 	.repr_change_mtu  = nfp_flower_repr_change_mtu,
698 
699 	.vnic_alloc	= nfp_flower_vnic_alloc,
700 	.vnic_init	= nfp_flower_vnic_init,
701 	.vnic_clean	= nfp_flower_vnic_clean,
702 
703 	.repr_preclean	= nfp_flower_repr_netdev_preclean,
704 	.repr_clean	= nfp_flower_repr_netdev_clean,
705 
706 	.repr_open	= nfp_flower_repr_netdev_open,
707 	.repr_stop	= nfp_flower_repr_netdev_stop,
708 
709 	.start		= nfp_flower_start,
710 	.stop		= nfp_flower_stop,
711 
712 	.netdev_event	= nfp_flower_netdev_event,
713 
714 	.ctrl_msg_rx	= nfp_flower_cmsg_rx,
715 
716 	.sriov_enable	= nfp_flower_sriov_enable,
717 	.sriov_disable	= nfp_flower_sriov_disable,
718 
719 	.eswitch_mode_get  = eswitch_mode_get,
720 	.repr_get	= nfp_flower_repr_get,
721 
722 	.setup_tc	= nfp_flower_setup_tc,
723 };
724