1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
3 
4 #include <linux/etherdevice.h>
5 #include <linux/lockdep.h>
6 #include <linux/pci.h>
7 #include <linux/skbuff.h>
8 #include <linux/vmalloc.h>
9 #include <net/devlink.h>
10 #include <net/dst_metadata.h>
11 
12 #include "main.h"
13 #include "../nfpcore/nfp_cpp.h"
14 #include "../nfpcore/nfp_nffw.h"
15 #include "../nfpcore/nfp_nsp.h"
16 #include "../nfp_app.h"
17 #include "../nfp_main.h"
18 #include "../nfp_net.h"
19 #include "../nfp_net_repr.h"
20 #include "../nfp_port.h"
21 #include "./cmsg.h"
22 
23 #define NFP_FLOWER_ALLOWED_VER 0x0001000000010000UL
24 
25 #define NFP_MIN_INT_PORT_ID	1
26 #define NFP_MAX_INT_PORT_ID	256
27 
28 static const char *nfp_flower_extra_cap(struct nfp_app *app, struct nfp_net *nn)
29 {
30 	return "FLOWER";
31 }
32 
33 static enum devlink_eswitch_mode eswitch_mode_get(struct nfp_app *app)
34 {
35 	return DEVLINK_ESWITCH_MODE_SWITCHDEV;
36 }
37 
38 static int
39 nfp_flower_lookup_internal_port_id(struct nfp_flower_priv *priv,
40 				   struct net_device *netdev)
41 {
42 	struct net_device *entry;
43 	int i, id = 0;
44 
45 	rcu_read_lock();
46 	idr_for_each_entry(&priv->internal_ports.port_ids, entry, i)
47 		if (entry == netdev) {
48 			id = i;
49 			break;
50 		}
51 	rcu_read_unlock();
52 
53 	return id;
54 }
55 
56 static int
57 nfp_flower_get_internal_port_id(struct nfp_app *app, struct net_device *netdev)
58 {
59 	struct nfp_flower_priv *priv = app->priv;
60 	int id;
61 
62 	id = nfp_flower_lookup_internal_port_id(priv, netdev);
63 	if (id > 0)
64 		return id;
65 
66 	idr_preload(GFP_ATOMIC);
67 	spin_lock_bh(&priv->internal_ports.lock);
68 	id = idr_alloc(&priv->internal_ports.port_ids, netdev,
69 		       NFP_MIN_INT_PORT_ID, NFP_MAX_INT_PORT_ID, GFP_ATOMIC);
70 	spin_unlock_bh(&priv->internal_ports.lock);
71 	idr_preload_end();
72 
73 	return id;
74 }
75 
76 u32 nfp_flower_get_port_id_from_netdev(struct nfp_app *app,
77 				       struct net_device *netdev)
78 {
79 	int ext_port;
80 
81 	if (nfp_netdev_is_nfp_repr(netdev)) {
82 		return nfp_repr_get_port_id(netdev);
83 	} else if (nfp_flower_internal_port_can_offload(app, netdev)) {
84 		ext_port = nfp_flower_get_internal_port_id(app, netdev);
85 		if (ext_port < 0)
86 			return 0;
87 
88 		return nfp_flower_internal_port_get_port_id(ext_port);
89 	}
90 
91 	return 0;
92 }
93 
94 static void
95 nfp_flower_free_internal_port_id(struct nfp_app *app, struct net_device *netdev)
96 {
97 	struct nfp_flower_priv *priv = app->priv;
98 	int id;
99 
100 	id = nfp_flower_lookup_internal_port_id(priv, netdev);
101 	if (!id)
102 		return;
103 
104 	spin_lock_bh(&priv->internal_ports.lock);
105 	idr_remove(&priv->internal_ports.port_ids, id);
106 	spin_unlock_bh(&priv->internal_ports.lock);
107 }
108 
109 static int
110 nfp_flower_internal_port_event_handler(struct nfp_app *app,
111 				       struct net_device *netdev,
112 				       unsigned long event)
113 {
114 	if (event == NETDEV_UNREGISTER &&
115 	    nfp_flower_internal_port_can_offload(app, netdev))
116 		nfp_flower_free_internal_port_id(app, netdev);
117 
118 	return NOTIFY_OK;
119 }
120 
121 static void nfp_flower_internal_port_init(struct nfp_flower_priv *priv)
122 {
123 	spin_lock_init(&priv->internal_ports.lock);
124 	idr_init(&priv->internal_ports.port_ids);
125 }
126 
127 static void nfp_flower_internal_port_cleanup(struct nfp_flower_priv *priv)
128 {
129 	idr_destroy(&priv->internal_ports.port_ids);
130 }
131 
132 static struct nfp_flower_non_repr_priv *
133 nfp_flower_non_repr_priv_lookup(struct nfp_app *app, struct net_device *netdev)
134 {
135 	struct nfp_flower_priv *priv = app->priv;
136 	struct nfp_flower_non_repr_priv *entry;
137 
138 	ASSERT_RTNL();
139 
140 	list_for_each_entry(entry, &priv->non_repr_priv, list)
141 		if (entry->netdev == netdev)
142 			return entry;
143 
144 	return NULL;
145 }
146 
147 void
148 __nfp_flower_non_repr_priv_get(struct nfp_flower_non_repr_priv *non_repr_priv)
149 {
150 	non_repr_priv->ref_count++;
151 }
152 
153 struct nfp_flower_non_repr_priv *
154 nfp_flower_non_repr_priv_get(struct nfp_app *app, struct net_device *netdev)
155 {
156 	struct nfp_flower_priv *priv = app->priv;
157 	struct nfp_flower_non_repr_priv *entry;
158 
159 	entry = nfp_flower_non_repr_priv_lookup(app, netdev);
160 	if (entry)
161 		goto inc_ref;
162 
163 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
164 	if (!entry)
165 		return NULL;
166 
167 	entry->netdev = netdev;
168 	list_add(&entry->list, &priv->non_repr_priv);
169 
170 inc_ref:
171 	__nfp_flower_non_repr_priv_get(entry);
172 	return entry;
173 }
174 
175 void
176 __nfp_flower_non_repr_priv_put(struct nfp_flower_non_repr_priv *non_repr_priv)
177 {
178 	if (--non_repr_priv->ref_count)
179 		return;
180 
181 	list_del(&non_repr_priv->list);
182 	kfree(non_repr_priv);
183 }
184 
185 void
186 nfp_flower_non_repr_priv_put(struct nfp_app *app, struct net_device *netdev)
187 {
188 	struct nfp_flower_non_repr_priv *entry;
189 
190 	entry = nfp_flower_non_repr_priv_lookup(app, netdev);
191 	if (!entry)
192 		return;
193 
194 	__nfp_flower_non_repr_priv_put(entry);
195 }
196 
197 static enum nfp_repr_type
198 nfp_flower_repr_get_type_and_port(struct nfp_app *app, u32 port_id, u8 *port)
199 {
200 	switch (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port_id)) {
201 	case NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT:
202 		*port = FIELD_GET(NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM,
203 				  port_id);
204 		return NFP_REPR_TYPE_PHYS_PORT;
205 
206 	case NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT:
207 		*port = FIELD_GET(NFP_FLOWER_CMSG_PORT_VNIC, port_id);
208 		if (FIELD_GET(NFP_FLOWER_CMSG_PORT_VNIC_TYPE, port_id) ==
209 		    NFP_FLOWER_CMSG_PORT_VNIC_TYPE_PF)
210 			return NFP_REPR_TYPE_PF;
211 		else
212 			return NFP_REPR_TYPE_VF;
213 	}
214 
215 	return __NFP_REPR_TYPE_MAX;
216 }
217 
218 static struct net_device *
219 nfp_flower_repr_get(struct nfp_app *app, u32 port_id, bool *redir_egress)
220 {
221 	enum nfp_repr_type repr_type;
222 	struct nfp_reprs *reprs;
223 	u8 port = 0;
224 
225 	repr_type = nfp_flower_repr_get_type_and_port(app, port_id, &port);
226 	if (repr_type > NFP_REPR_TYPE_MAX)
227 		return NULL;
228 
229 	reprs = rcu_dereference(app->reprs[repr_type]);
230 	if (!reprs)
231 		return NULL;
232 
233 	if (port >= reprs->num_reprs)
234 		return NULL;
235 
236 	return rcu_dereference(reprs->reprs[port]);
237 }
238 
239 static int
240 nfp_flower_reprs_reify(struct nfp_app *app, enum nfp_repr_type type,
241 		       bool exists)
242 {
243 	struct nfp_reprs *reprs;
244 	int i, err, count = 0;
245 
246 	reprs = rcu_dereference_protected(app->reprs[type],
247 					  lockdep_is_held(&app->pf->lock));
248 	if (!reprs)
249 		return 0;
250 
251 	for (i = 0; i < reprs->num_reprs; i++) {
252 		struct net_device *netdev;
253 
254 		netdev = nfp_repr_get_locked(app, reprs, i);
255 		if (netdev) {
256 			struct nfp_repr *repr = netdev_priv(netdev);
257 
258 			err = nfp_flower_cmsg_portreify(repr, exists);
259 			if (err)
260 				return err;
261 			count++;
262 		}
263 	}
264 
265 	return count;
266 }
267 
268 static int
269 nfp_flower_wait_repr_reify(struct nfp_app *app, atomic_t *replies, int tot_repl)
270 {
271 	struct nfp_flower_priv *priv = app->priv;
272 
273 	if (!tot_repl)
274 		return 0;
275 
276 	lockdep_assert_held(&app->pf->lock);
277 	if (!wait_event_timeout(priv->reify_wait_queue,
278 				atomic_read(replies) >= tot_repl,
279 				NFP_FL_REPLY_TIMEOUT)) {
280 		nfp_warn(app->cpp, "Not all reprs responded to reify\n");
281 		return -EIO;
282 	}
283 
284 	return 0;
285 }
286 
287 static int
288 nfp_flower_repr_netdev_open(struct nfp_app *app, struct nfp_repr *repr)
289 {
290 	int err;
291 
292 	err = nfp_flower_cmsg_portmod(repr, true, repr->netdev->mtu, false);
293 	if (err)
294 		return err;
295 
296 	netif_tx_wake_all_queues(repr->netdev);
297 
298 	return 0;
299 }
300 
301 static int
302 nfp_flower_repr_netdev_stop(struct nfp_app *app, struct nfp_repr *repr)
303 {
304 	netif_tx_disable(repr->netdev);
305 
306 	return nfp_flower_cmsg_portmod(repr, false, repr->netdev->mtu, false);
307 }
308 
309 static void
310 nfp_flower_repr_netdev_clean(struct nfp_app *app, struct net_device *netdev)
311 {
312 	struct nfp_repr *repr = netdev_priv(netdev);
313 
314 	kfree(repr->app_priv);
315 }
316 
317 static void
318 nfp_flower_repr_netdev_preclean(struct nfp_app *app, struct net_device *netdev)
319 {
320 	struct nfp_repr *repr = netdev_priv(netdev);
321 	struct nfp_flower_priv *priv = app->priv;
322 	atomic_t *replies = &priv->reify_replies;
323 	int err;
324 
325 	atomic_set(replies, 0);
326 	err = nfp_flower_cmsg_portreify(repr, false);
327 	if (err) {
328 		nfp_warn(app->cpp, "Failed to notify firmware about repr destruction\n");
329 		return;
330 	}
331 
332 	nfp_flower_wait_repr_reify(app, replies, 1);
333 }
334 
335 static void nfp_flower_sriov_disable(struct nfp_app *app)
336 {
337 	struct nfp_flower_priv *priv = app->priv;
338 
339 	if (!priv->nn)
340 		return;
341 
342 	nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_VF);
343 }
344 
345 static int
346 nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
347 			    enum nfp_flower_cmsg_port_vnic_type vnic_type,
348 			    enum nfp_repr_type repr_type, unsigned int cnt)
349 {
350 	u8 nfp_pcie = nfp_cppcore_pcie_unit(app->pf->cpp);
351 	struct nfp_flower_priv *priv = app->priv;
352 	atomic_t *replies = &priv->reify_replies;
353 	struct nfp_flower_repr_priv *repr_priv;
354 	enum nfp_port_type port_type;
355 	struct nfp_repr *nfp_repr;
356 	struct nfp_reprs *reprs;
357 	int i, err, reify_cnt;
358 	const u8 queue = 0;
359 
360 	port_type = repr_type == NFP_REPR_TYPE_PF ? NFP_PORT_PF_PORT :
361 						    NFP_PORT_VF_PORT;
362 
363 	reprs = nfp_reprs_alloc(cnt);
364 	if (!reprs)
365 		return -ENOMEM;
366 
367 	for (i = 0; i < cnt; i++) {
368 		struct net_device *repr;
369 		struct nfp_port *port;
370 		u32 port_id;
371 
372 		repr = nfp_repr_alloc(app);
373 		if (!repr) {
374 			err = -ENOMEM;
375 			goto err_reprs_clean;
376 		}
377 
378 		repr_priv = kzalloc(sizeof(*repr_priv), GFP_KERNEL);
379 		if (!repr_priv) {
380 			err = -ENOMEM;
381 			goto err_reprs_clean;
382 		}
383 
384 		nfp_repr = netdev_priv(repr);
385 		nfp_repr->app_priv = repr_priv;
386 		repr_priv->nfp_repr = nfp_repr;
387 
388 		/* For now we only support 1 PF */
389 		WARN_ON(repr_type == NFP_REPR_TYPE_PF && i);
390 
391 		port = nfp_port_alloc(app, port_type, repr);
392 		if (IS_ERR(port)) {
393 			err = PTR_ERR(port);
394 			nfp_repr_free(repr);
395 			goto err_reprs_clean;
396 		}
397 		if (repr_type == NFP_REPR_TYPE_PF) {
398 			port->pf_id = i;
399 			port->vnic = priv->nn->dp.ctrl_bar;
400 		} else {
401 			port->pf_id = 0;
402 			port->vf_id = i;
403 			port->vnic =
404 				app->pf->vf_cfg_mem + i * NFP_NET_CFG_BAR_SZ;
405 		}
406 
407 		eth_hw_addr_random(repr);
408 
409 		port_id = nfp_flower_cmsg_pcie_port(nfp_pcie, vnic_type,
410 						    i, queue);
411 		err = nfp_repr_init(app, repr,
412 				    port_id, port, priv->nn->dp.netdev);
413 		if (err) {
414 			nfp_port_free(port);
415 			nfp_repr_free(repr);
416 			goto err_reprs_clean;
417 		}
418 
419 		RCU_INIT_POINTER(reprs->reprs[i], repr);
420 		nfp_info(app->cpp, "%s%d Representor(%s) created\n",
421 			 repr_type == NFP_REPR_TYPE_PF ? "PF" : "VF", i,
422 			 repr->name);
423 	}
424 
425 	nfp_app_reprs_set(app, repr_type, reprs);
426 
427 	atomic_set(replies, 0);
428 	reify_cnt = nfp_flower_reprs_reify(app, repr_type, true);
429 	if (reify_cnt < 0) {
430 		err = reify_cnt;
431 		nfp_warn(app->cpp, "Failed to notify firmware about repr creation\n");
432 		goto err_reprs_remove;
433 	}
434 
435 	err = nfp_flower_wait_repr_reify(app, replies, reify_cnt);
436 	if (err)
437 		goto err_reprs_remove;
438 
439 	return 0;
440 err_reprs_remove:
441 	reprs = nfp_app_reprs_set(app, repr_type, NULL);
442 err_reprs_clean:
443 	nfp_reprs_clean_and_free(app, reprs);
444 	return err;
445 }
446 
447 static int nfp_flower_sriov_enable(struct nfp_app *app, int num_vfs)
448 {
449 	struct nfp_flower_priv *priv = app->priv;
450 
451 	if (!priv->nn)
452 		return 0;
453 
454 	return nfp_flower_spawn_vnic_reprs(app,
455 					   NFP_FLOWER_CMSG_PORT_VNIC_TYPE_VF,
456 					   NFP_REPR_TYPE_VF, num_vfs);
457 }
458 
459 static int
460 nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
461 {
462 	struct nfp_eth_table *eth_tbl = app->pf->eth_tbl;
463 	atomic_t *replies = &priv->reify_replies;
464 	struct nfp_flower_repr_priv *repr_priv;
465 	struct nfp_repr *nfp_repr;
466 	struct sk_buff *ctrl_skb;
467 	struct nfp_reprs *reprs;
468 	int err, reify_cnt;
469 	unsigned int i;
470 
471 	ctrl_skb = nfp_flower_cmsg_mac_repr_start(app, eth_tbl->count);
472 	if (!ctrl_skb)
473 		return -ENOMEM;
474 
475 	reprs = nfp_reprs_alloc(eth_tbl->max_index + 1);
476 	if (!reprs) {
477 		err = -ENOMEM;
478 		goto err_free_ctrl_skb;
479 	}
480 
481 	for (i = 0; i < eth_tbl->count; i++) {
482 		unsigned int phys_port = eth_tbl->ports[i].index;
483 		struct net_device *repr;
484 		struct nfp_port *port;
485 		u32 cmsg_port_id;
486 
487 		repr = nfp_repr_alloc(app);
488 		if (!repr) {
489 			err = -ENOMEM;
490 			goto err_reprs_clean;
491 		}
492 
493 		repr_priv = kzalloc(sizeof(*repr_priv), GFP_KERNEL);
494 		if (!repr_priv) {
495 			err = -ENOMEM;
496 			goto err_reprs_clean;
497 		}
498 
499 		nfp_repr = netdev_priv(repr);
500 		nfp_repr->app_priv = repr_priv;
501 		repr_priv->nfp_repr = nfp_repr;
502 
503 		port = nfp_port_alloc(app, NFP_PORT_PHYS_PORT, repr);
504 		if (IS_ERR(port)) {
505 			err = PTR_ERR(port);
506 			nfp_repr_free(repr);
507 			goto err_reprs_clean;
508 		}
509 		err = nfp_port_init_phy_port(app->pf, app, port, i);
510 		if (err) {
511 			nfp_port_free(port);
512 			nfp_repr_free(repr);
513 			goto err_reprs_clean;
514 		}
515 
516 		SET_NETDEV_DEV(repr, &priv->nn->pdev->dev);
517 		nfp_net_get_mac_addr(app->pf, repr, port);
518 
519 		cmsg_port_id = nfp_flower_cmsg_phys_port(phys_port);
520 		err = nfp_repr_init(app, repr,
521 				    cmsg_port_id, port, priv->nn->dp.netdev);
522 		if (err) {
523 			nfp_port_free(port);
524 			nfp_repr_free(repr);
525 			goto err_reprs_clean;
526 		}
527 
528 		nfp_flower_cmsg_mac_repr_add(ctrl_skb, i,
529 					     eth_tbl->ports[i].nbi,
530 					     eth_tbl->ports[i].base,
531 					     phys_port);
532 
533 		RCU_INIT_POINTER(reprs->reprs[phys_port], repr);
534 		nfp_info(app->cpp, "Phys Port %d Representor(%s) created\n",
535 			 phys_port, repr->name);
536 	}
537 
538 	nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, reprs);
539 
540 	/* The REIFY/MAC_REPR control messages should be sent after the MAC
541 	 * representors are registered using nfp_app_reprs_set().  This is
542 	 * because the firmware may respond with control messages for the
543 	 * MAC representors, f.e. to provide the driver with information
544 	 * about their state, and without registration the driver will drop
545 	 * any such messages.
546 	 */
547 	atomic_set(replies, 0);
548 	reify_cnt = nfp_flower_reprs_reify(app, NFP_REPR_TYPE_PHYS_PORT, true);
549 	if (reify_cnt < 0) {
550 		err = reify_cnt;
551 		nfp_warn(app->cpp, "Failed to notify firmware about repr creation\n");
552 		goto err_reprs_remove;
553 	}
554 
555 	err = nfp_flower_wait_repr_reify(app, replies, reify_cnt);
556 	if (err)
557 		goto err_reprs_remove;
558 
559 	nfp_ctrl_tx(app->ctrl, ctrl_skb);
560 
561 	return 0;
562 err_reprs_remove:
563 	reprs = nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, NULL);
564 err_reprs_clean:
565 	nfp_reprs_clean_and_free(app, reprs);
566 err_free_ctrl_skb:
567 	kfree_skb(ctrl_skb);
568 	return err;
569 }
570 
571 static int nfp_flower_vnic_alloc(struct nfp_app *app, struct nfp_net *nn,
572 				 unsigned int id)
573 {
574 	if (id > 0) {
575 		nfp_warn(app->cpp, "FlowerNIC doesn't support more than one data vNIC\n");
576 		goto err_invalid_port;
577 	}
578 
579 	eth_hw_addr_random(nn->dp.netdev);
580 	netif_keep_dst(nn->dp.netdev);
581 	nn->vnic_no_name = true;
582 
583 	return 0;
584 
585 err_invalid_port:
586 	nn->port = nfp_port_alloc(app, NFP_PORT_INVALID, nn->dp.netdev);
587 	return PTR_ERR_OR_ZERO(nn->port);
588 }
589 
590 static void nfp_flower_vnic_clean(struct nfp_app *app, struct nfp_net *nn)
591 {
592 	struct nfp_flower_priv *priv = app->priv;
593 
594 	if (app->pf->num_vfs)
595 		nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_VF);
596 	nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PF);
597 	nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PHYS_PORT);
598 
599 	priv->nn = NULL;
600 }
601 
602 static int nfp_flower_vnic_init(struct nfp_app *app, struct nfp_net *nn)
603 {
604 	struct nfp_flower_priv *priv = app->priv;
605 	int err;
606 
607 	priv->nn = nn;
608 
609 	err = nfp_flower_spawn_phy_reprs(app, app->priv);
610 	if (err)
611 		goto err_clear_nn;
612 
613 	err = nfp_flower_spawn_vnic_reprs(app,
614 					  NFP_FLOWER_CMSG_PORT_VNIC_TYPE_PF,
615 					  NFP_REPR_TYPE_PF, 1);
616 	if (err)
617 		goto err_destroy_reprs_phy;
618 
619 	if (app->pf->num_vfs) {
620 		err = nfp_flower_spawn_vnic_reprs(app,
621 						  NFP_FLOWER_CMSG_PORT_VNIC_TYPE_VF,
622 						  NFP_REPR_TYPE_VF,
623 						  app->pf->num_vfs);
624 		if (err)
625 			goto err_destroy_reprs_pf;
626 	}
627 
628 	return 0;
629 
630 err_destroy_reprs_pf:
631 	nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PF);
632 err_destroy_reprs_phy:
633 	nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PHYS_PORT);
634 err_clear_nn:
635 	priv->nn = NULL;
636 	return err;
637 }
638 
639 static int nfp_flower_init(struct nfp_app *app)
640 {
641 	u64 version, features, ctx_count, num_mems;
642 	const struct nfp_pf *pf = app->pf;
643 	struct nfp_flower_priv *app_priv;
644 	int err;
645 
646 	if (!pf->eth_tbl) {
647 		nfp_warn(app->cpp, "FlowerNIC requires eth table\n");
648 		return -EINVAL;
649 	}
650 
651 	if (!pf->mac_stats_bar) {
652 		nfp_warn(app->cpp, "FlowerNIC requires mac_stats BAR\n");
653 		return -EINVAL;
654 	}
655 
656 	if (!pf->vf_cfg_bar) {
657 		nfp_warn(app->cpp, "FlowerNIC requires vf_cfg BAR\n");
658 		return -EINVAL;
659 	}
660 
661 	version = nfp_rtsym_read_le(app->pf->rtbl, "hw_flower_version", &err);
662 	if (err) {
663 		nfp_warn(app->cpp, "FlowerNIC requires hw_flower_version memory symbol\n");
664 		return err;
665 	}
666 
667 	num_mems = nfp_rtsym_read_le(app->pf->rtbl, "CONFIG_FC_HOST_CTX_SPLIT",
668 				     &err);
669 	if (err) {
670 		nfp_warn(app->cpp,
671 			 "FlowerNIC: unsupported host context memory: %d\n",
672 			 err);
673 		err = 0;
674 		num_mems = 1;
675 	}
676 
677 	if (!FIELD_FIT(NFP_FL_STAT_ID_MU_NUM, num_mems) || !num_mems) {
678 		nfp_warn(app->cpp,
679 			 "FlowerNIC: invalid host context memory: %llu\n",
680 			 num_mems);
681 		return -EINVAL;
682 	}
683 
684 	ctx_count = nfp_rtsym_read_le(app->pf->rtbl, "CONFIG_FC_HOST_CTX_COUNT",
685 				      &err);
686 	if (err) {
687 		nfp_warn(app->cpp,
688 			 "FlowerNIC: unsupported host context count: %d\n",
689 			 err);
690 		err = 0;
691 		ctx_count = BIT(17);
692 	}
693 
694 	/* We need to ensure hardware has enough flower capabilities. */
695 	if (version != NFP_FLOWER_ALLOWED_VER) {
696 		nfp_warn(app->cpp, "FlowerNIC: unsupported firmware version\n");
697 		return -EINVAL;
698 	}
699 
700 	app_priv = vzalloc(sizeof(struct nfp_flower_priv));
701 	if (!app_priv)
702 		return -ENOMEM;
703 
704 	app_priv->total_mem_units = num_mems;
705 	app_priv->active_mem_unit = 0;
706 	app_priv->stats_ring_size = roundup_pow_of_two(ctx_count);
707 	app->priv = app_priv;
708 	app_priv->app = app;
709 	skb_queue_head_init(&app_priv->cmsg_skbs_high);
710 	skb_queue_head_init(&app_priv->cmsg_skbs_low);
711 	INIT_WORK(&app_priv->cmsg_work, nfp_flower_cmsg_process_rx);
712 	init_waitqueue_head(&app_priv->reify_wait_queue);
713 
714 	init_waitqueue_head(&app_priv->mtu_conf.wait_q);
715 	spin_lock_init(&app_priv->mtu_conf.lock);
716 
717 	err = nfp_flower_metadata_init(app, ctx_count, num_mems);
718 	if (err)
719 		goto err_free_app_priv;
720 
721 	/* Extract the extra features supported by the firmware. */
722 	features = nfp_rtsym_read_le(app->pf->rtbl,
723 				     "_abi_flower_extra_features", &err);
724 	if (err)
725 		app_priv->flower_ext_feats = 0;
726 	else
727 		app_priv->flower_ext_feats = features;
728 
729 	/* Tell the firmware that the driver supports lag. */
730 	err = nfp_rtsym_write_le(app->pf->rtbl,
731 				 "_abi_flower_balance_sync_enable", 1);
732 	if (!err) {
733 		app_priv->flower_ext_feats |= NFP_FL_FEATS_LAG;
734 		nfp_flower_lag_init(&app_priv->nfp_lag);
735 	} else if (err == -ENOENT) {
736 		nfp_warn(app->cpp, "LAG not supported by FW.\n");
737 	} else {
738 		goto err_cleanup_metadata;
739 	}
740 
741 	if (app_priv->flower_ext_feats & NFP_FL_FEATS_FLOW_MOD) {
742 		/* Tell the firmware that the driver supports flow merging. */
743 		err = nfp_rtsym_write_le(app->pf->rtbl,
744 					 "_abi_flower_merge_hint_enable", 1);
745 		if (!err) {
746 			app_priv->flower_ext_feats |= NFP_FL_FEATS_FLOW_MERGE;
747 			nfp_flower_internal_port_init(app_priv);
748 		} else if (err == -ENOENT) {
749 			nfp_warn(app->cpp, "Flow merge not supported by FW.\n");
750 		} else {
751 			goto err_lag_clean;
752 		}
753 	} else {
754 		nfp_warn(app->cpp, "Flow mod/merge not supported by FW.\n");
755 	}
756 
757 	INIT_LIST_HEAD(&app_priv->indr_block_cb_priv);
758 	INIT_LIST_HEAD(&app_priv->non_repr_priv);
759 
760 	return 0;
761 
762 err_lag_clean:
763 	if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG)
764 		nfp_flower_lag_cleanup(&app_priv->nfp_lag);
765 err_cleanup_metadata:
766 	nfp_flower_metadata_cleanup(app);
767 err_free_app_priv:
768 	vfree(app->priv);
769 	return err;
770 }
771 
772 static void nfp_flower_clean(struct nfp_app *app)
773 {
774 	struct nfp_flower_priv *app_priv = app->priv;
775 
776 	skb_queue_purge(&app_priv->cmsg_skbs_high);
777 	skb_queue_purge(&app_priv->cmsg_skbs_low);
778 	flush_work(&app_priv->cmsg_work);
779 
780 	if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG)
781 		nfp_flower_lag_cleanup(&app_priv->nfp_lag);
782 
783 	if (app_priv->flower_ext_feats & NFP_FL_FEATS_FLOW_MERGE)
784 		nfp_flower_internal_port_cleanup(app_priv);
785 
786 	nfp_flower_metadata_cleanup(app);
787 	vfree(app->priv);
788 	app->priv = NULL;
789 }
790 
791 static bool nfp_flower_check_ack(struct nfp_flower_priv *app_priv)
792 {
793 	bool ret;
794 
795 	spin_lock_bh(&app_priv->mtu_conf.lock);
796 	ret = app_priv->mtu_conf.ack;
797 	spin_unlock_bh(&app_priv->mtu_conf.lock);
798 
799 	return ret;
800 }
801 
802 static int
803 nfp_flower_repr_change_mtu(struct nfp_app *app, struct net_device *netdev,
804 			   int new_mtu)
805 {
806 	struct nfp_flower_priv *app_priv = app->priv;
807 	struct nfp_repr *repr = netdev_priv(netdev);
808 	int err;
809 
810 	/* Only need to config FW for physical port MTU change. */
811 	if (repr->port->type != NFP_PORT_PHYS_PORT)
812 		return 0;
813 
814 	if (!(app_priv->flower_ext_feats & NFP_FL_NBI_MTU_SETTING)) {
815 		nfp_err(app->cpp, "Physical port MTU setting not supported\n");
816 		return -EINVAL;
817 	}
818 
819 	spin_lock_bh(&app_priv->mtu_conf.lock);
820 	app_priv->mtu_conf.ack = false;
821 	app_priv->mtu_conf.requested_val = new_mtu;
822 	app_priv->mtu_conf.portnum = repr->dst->u.port_info.port_id;
823 	spin_unlock_bh(&app_priv->mtu_conf.lock);
824 
825 	err = nfp_flower_cmsg_portmod(repr, netif_carrier_ok(netdev), new_mtu,
826 				      true);
827 	if (err) {
828 		spin_lock_bh(&app_priv->mtu_conf.lock);
829 		app_priv->mtu_conf.requested_val = 0;
830 		spin_unlock_bh(&app_priv->mtu_conf.lock);
831 		return err;
832 	}
833 
834 	/* Wait for fw to ack the change. */
835 	if (!wait_event_timeout(app_priv->mtu_conf.wait_q,
836 				nfp_flower_check_ack(app_priv),
837 				NFP_FL_REPLY_TIMEOUT)) {
838 		spin_lock_bh(&app_priv->mtu_conf.lock);
839 		app_priv->mtu_conf.requested_val = 0;
840 		spin_unlock_bh(&app_priv->mtu_conf.lock);
841 		nfp_warn(app->cpp, "MTU change not verified with fw\n");
842 		return -EIO;
843 	}
844 
845 	return 0;
846 }
847 
848 static int nfp_flower_start(struct nfp_app *app)
849 {
850 	struct nfp_flower_priv *app_priv = app->priv;
851 	int err;
852 
853 	if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
854 		err = nfp_flower_lag_reset(&app_priv->nfp_lag);
855 		if (err)
856 			return err;
857 	}
858 
859 	return nfp_tunnel_config_start(app);
860 }
861 
862 static void nfp_flower_stop(struct nfp_app *app)
863 {
864 	nfp_tunnel_config_stop(app);
865 }
866 
867 static int
868 nfp_flower_netdev_event(struct nfp_app *app, struct net_device *netdev,
869 			unsigned long event, void *ptr)
870 {
871 	struct nfp_flower_priv *app_priv = app->priv;
872 	int ret;
873 
874 	if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
875 		ret = nfp_flower_lag_netdev_event(app_priv, netdev, event, ptr);
876 		if (ret & NOTIFY_STOP_MASK)
877 			return ret;
878 	}
879 
880 	ret = nfp_flower_reg_indir_block_handler(app, netdev, event);
881 	if (ret & NOTIFY_STOP_MASK)
882 		return ret;
883 
884 	ret = nfp_flower_internal_port_event_handler(app, netdev, event);
885 	if (ret & NOTIFY_STOP_MASK)
886 		return ret;
887 
888 	return nfp_tunnel_mac_event_handler(app, netdev, event, ptr);
889 }
890 
891 const struct nfp_app_type app_flower = {
892 	.id		= NFP_APP_FLOWER_NIC,
893 	.name		= "flower",
894 
895 	.ctrl_cap_mask	= ~0U,
896 	.ctrl_has_meta	= true,
897 
898 	.extra_cap	= nfp_flower_extra_cap,
899 
900 	.init		= nfp_flower_init,
901 	.clean		= nfp_flower_clean,
902 
903 	.repr_change_mtu  = nfp_flower_repr_change_mtu,
904 
905 	.vnic_alloc	= nfp_flower_vnic_alloc,
906 	.vnic_init	= nfp_flower_vnic_init,
907 	.vnic_clean	= nfp_flower_vnic_clean,
908 
909 	.repr_preclean	= nfp_flower_repr_netdev_preclean,
910 	.repr_clean	= nfp_flower_repr_netdev_clean,
911 
912 	.repr_open	= nfp_flower_repr_netdev_open,
913 	.repr_stop	= nfp_flower_repr_netdev_stop,
914 
915 	.start		= nfp_flower_start,
916 	.stop		= nfp_flower_stop,
917 
918 	.netdev_event	= nfp_flower_netdev_event,
919 
920 	.ctrl_msg_rx	= nfp_flower_cmsg_rx,
921 
922 	.sriov_enable	= nfp_flower_sriov_enable,
923 	.sriov_disable	= nfp_flower_sriov_disable,
924 
925 	.eswitch_mode_get  = eswitch_mode_get,
926 	.dev_get	= nfp_flower_repr_get,
927 
928 	.setup_tc	= nfp_flower_setup_tc,
929 };
930