1 /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
3 
4 #ifndef __NFP_FLOWER_H__
5 #define __NFP_FLOWER_H__ 1
6 
7 #include "cmsg.h"
8 #include "../nfp_net.h"
9 
10 #include <linux/circ_buf.h>
11 #include <linux/hashtable.h>
12 #include <linux/rhashtable.h>
13 #include <linux/time64.h>
14 #include <linux/types.h>
15 #include <net/pkt_cls.h>
16 #include <net/tcp.h>
17 #include <linux/workqueue.h>
18 #include <linux/idr.h>
19 
20 struct nfp_fl_pre_lag;
21 struct net_device;
22 struct nfp_app;
23 
24 #define NFP_FL_STAT_ID_MU_NUM		GENMASK(31, 22)
25 #define NFP_FL_STAT_ID_STAT		GENMASK(21, 0)
26 
27 #define NFP_FL_STATS_ELEM_RS		sizeof_field(struct nfp_fl_stats_id, \
28 						     init_unalloc)
29 #define NFP_FLOWER_MASK_ENTRY_RS	256
30 #define NFP_FLOWER_MASK_ELEMENT_RS	1
31 #define NFP_FLOWER_MASK_HASH_BITS	10
32 
33 #define NFP_FLOWER_KEY_MAX_LW		32
34 
35 #define NFP_FL_META_FLAG_MANAGE_MASK	BIT(7)
36 
37 #define NFP_FL_MASK_REUSE_TIME_NS	40000
38 #define NFP_FL_MASK_ID_LOCATION		1
39 
40 /* Extra features bitmap. */
41 #define NFP_FL_FEATS_GENEVE		BIT(0)
42 #define NFP_FL_NBI_MTU_SETTING		BIT(1)
43 #define NFP_FL_FEATS_GENEVE_OPT		BIT(2)
44 #define NFP_FL_FEATS_VLAN_PCP		BIT(3)
45 #define NFP_FL_FEATS_VF_RLIM		BIT(4)
46 #define NFP_FL_FEATS_FLOW_MOD		BIT(5)
47 #define NFP_FL_FEATS_PRE_TUN_RULES	BIT(6)
48 #define NFP_FL_FEATS_IPV6_TUN		BIT(7)
49 #define NFP_FL_FEATS_VLAN_QINQ		BIT(8)
50 #define NFP_FL_FEATS_HOST_ACK		BIT(31)
51 
52 #define NFP_FL_ENABLE_FLOW_MERGE	BIT(0)
53 #define NFP_FL_ENABLE_LAG		BIT(1)
54 
55 #define NFP_FL_FEATS_HOST \
56 	(NFP_FL_FEATS_GENEVE | \
57 	NFP_FL_NBI_MTU_SETTING | \
58 	NFP_FL_FEATS_GENEVE_OPT | \
59 	NFP_FL_FEATS_VLAN_PCP | \
60 	NFP_FL_FEATS_VF_RLIM | \
61 	NFP_FL_FEATS_FLOW_MOD | \
62 	NFP_FL_FEATS_PRE_TUN_RULES | \
63 	NFP_FL_FEATS_IPV6_TUN | \
64 	NFP_FL_FEATS_VLAN_QINQ)
65 
66 struct nfp_fl_mask_id {
67 	struct circ_buf mask_id_free_list;
68 	ktime_t *last_used;
69 	u8 init_unallocated;
70 };
71 
72 struct nfp_fl_stats_id {
73 	struct circ_buf free_list;
74 	u32 init_unalloc;
75 	u8 repeated_em_count;
76 };
77 
78 /**
79  * struct nfp_fl_tunnel_offloads - priv data for tunnel offloads
80  * @offloaded_macs:	Hashtable of the offloaded MAC addresses
81  * @ipv4_off_list:	List of IPv4 addresses to offload
82  * @ipv6_off_list:	List of IPv6 addresses to offload
83  * @neigh_off_list_v4:	List of IPv4 neighbour offloads
84  * @neigh_off_list_v6:	List of IPv6 neighbour offloads
85  * @ipv4_off_lock:	Lock for the IPv4 address list
86  * @ipv6_off_lock:	Lock for the IPv6 address list
87  * @neigh_off_lock_v4:	Lock for the IPv4 neighbour address list
88  * @neigh_off_lock_v6:	Lock for the IPv6 neighbour address list
89  * @mac_off_ids:	IDA to manage id assignment for offloaded MACs
90  * @neigh_nb:		Notifier to monitor neighbour state
91  */
92 struct nfp_fl_tunnel_offloads {
93 	struct rhashtable offloaded_macs;
94 	struct list_head ipv4_off_list;
95 	struct list_head ipv6_off_list;
96 	struct list_head neigh_off_list_v4;
97 	struct list_head neigh_off_list_v6;
98 	struct mutex ipv4_off_lock;
99 	struct mutex ipv6_off_lock;
100 	spinlock_t neigh_off_lock_v4;
101 	spinlock_t neigh_off_lock_v6;
102 	struct ida mac_off_ids;
103 	struct notifier_block neigh_nb;
104 };
105 
106 /**
107  * struct nfp_mtu_conf - manage MTU setting
108  * @portnum:		NFP port number of repr with requested MTU change
109  * @requested_val:	MTU value requested for repr
110  * @ack:		Received ack that MTU has been correctly set
111  * @wait_q:		Wait queue for MTU acknowledgements
112  * @lock:		Lock for setting/reading MTU variables
113  */
114 struct nfp_mtu_conf {
115 	u32 portnum;
116 	unsigned int requested_val;
117 	bool ack;
118 	wait_queue_head_t wait_q;
119 	spinlock_t lock;
120 };
121 
122 /**
123  * struct nfp_fl_lag - Flower APP priv data for link aggregation
124  * @work:		Work queue for writing configs to the HW
125  * @lock:		Lock to protect lag_group_list
126  * @group_list:		List of all master/slave groups offloaded
127  * @ida_handle:		IDA to handle group ids
128  * @pkt_num:		Incremented for each config packet sent
129  * @batch_ver:		Incremented for each batch of config packets
130  * @global_inst:	Instance allocator for groups
131  * @rst_cfg:		Marker to reset HW LAG config
132  * @retrans_skbs:	Cmsgs that could not be processed by HW and require
133  *			retransmission
134  */
135 struct nfp_fl_lag {
136 	struct delayed_work work;
137 	struct mutex lock;
138 	struct list_head group_list;
139 	struct ida ida_handle;
140 	unsigned int pkt_num;
141 	unsigned int batch_ver;
142 	u8 global_inst;
143 	bool rst_cfg;
144 	struct sk_buff_head retrans_skbs;
145 };
146 
147 /**
148  * struct nfp_fl_internal_ports - Flower APP priv data for additional ports
149  * @port_ids:	Assignment of ids to any additional ports
150  * @lock:	Lock for extra ports list
151  */
152 struct nfp_fl_internal_ports {
153 	struct idr port_ids;
154 	spinlock_t lock;
155 };
156 
157 /**
158  * struct nfp_flower_priv - Flower APP per-vNIC priv data
159  * @app:		Back pointer to app
160  * @nn:			Pointer to vNIC
161  * @mask_id_seed:	Seed used for mask hash table
162  * @flower_version:	HW version of flower
163  * @flower_ext_feats:	Bitmap of extra features the HW supports
164  * @flower_en_feats:	Bitmap of features enabled by HW
165  * @stats_ids:		List of free stats ids
166  * @mask_ids:		List of free mask ids
167  * @mask_table:		Hash table used to store masks
168  * @stats_ring_size:	Maximum number of allowed stats ids
169  * @flow_table:		Hash table used to store flower rules
170  * @stats:		Stored stats updates for flower rules
171  * @stats_lock:		Lock for flower rule stats updates
172  * @stats_ctx_table:	Hash table to map stats contexts to its flow rule
173  * @cmsg_work:		Workqueue for control messages processing
174  * @cmsg_skbs_high:	List of higher priority skbs for control message
175  *			processing
176  * @cmsg_skbs_low:	List of lower priority skbs for control message
177  *			processing
178  * @tun:		Tunnel offload data
179  * @reify_replies:	atomically stores the number of replies received
180  *			from firmware for repr reify
181  * @reify_wait_queue:	wait queue for repr reify response counting
182  * @mtu_conf:		Configuration of repr MTU value
183  * @nfp_lag:		Link aggregation data block
184  * @indr_block_cb_priv:	List of priv data passed to indirect block cbs
185  * @non_repr_priv:	List of offloaded non-repr ports and their priv data
186  * @active_mem_unit:	Current active memory unit for flower rules
187  * @total_mem_units:	Total number of available memory units for flower rules
188  * @internal_ports:	Internal port ids used in offloaded rules
189  * @qos_stats_work:	Workqueue for qos stats processing
190  * @qos_rate_limiters:	Current active qos rate limiters
191  * @qos_stats_lock:	Lock on qos stats updates
192  * @pre_tun_rule_cnt:	Number of pre-tunnel rules offloaded
193  */
194 struct nfp_flower_priv {
195 	struct nfp_app *app;
196 	struct nfp_net *nn;
197 	u32 mask_id_seed;
198 	u64 flower_version;
199 	u64 flower_ext_feats;
200 	u8 flower_en_feats;
201 	struct nfp_fl_stats_id stats_ids;
202 	struct nfp_fl_mask_id mask_ids;
203 	DECLARE_HASHTABLE(mask_table, NFP_FLOWER_MASK_HASH_BITS);
204 	u32 stats_ring_size;
205 	struct rhashtable flow_table;
206 	struct nfp_fl_stats *stats;
207 	spinlock_t stats_lock; /* lock stats */
208 	struct rhashtable stats_ctx_table;
209 	struct work_struct cmsg_work;
210 	struct sk_buff_head cmsg_skbs_high;
211 	struct sk_buff_head cmsg_skbs_low;
212 	struct nfp_fl_tunnel_offloads tun;
213 	atomic_t reify_replies;
214 	wait_queue_head_t reify_wait_queue;
215 	struct nfp_mtu_conf mtu_conf;
216 	struct nfp_fl_lag nfp_lag;
217 	struct list_head indr_block_cb_priv;
218 	struct list_head non_repr_priv;
219 	unsigned int active_mem_unit;
220 	unsigned int total_mem_units;
221 	struct nfp_fl_internal_ports internal_ports;
222 	struct delayed_work qos_stats_work;
223 	unsigned int qos_rate_limiters;
224 	spinlock_t qos_stats_lock; /* Protect the qos stats */
225 	int pre_tun_rule_cnt;
226 };
227 
228 /**
229  * struct nfp_fl_qos - Flower APP priv data for quality of service
230  * @netdev_port_id:	NFP port number of repr with qos info
231  * @curr_stats:		Currently stored stats updates for qos info
232  * @prev_stats:		Previously stored updates for qos info
233  * @last_update:	Stored time when last stats were updated
234  */
235 struct nfp_fl_qos {
236 	u32 netdev_port_id;
237 	struct nfp_stat_pair curr_stats;
238 	struct nfp_stat_pair prev_stats;
239 	u64 last_update;
240 };
241 
242 /**
243  * struct nfp_flower_repr_priv - Flower APP per-repr priv data
244  * @nfp_repr:		Back pointer to nfp_repr
245  * @lag_port_flags:	Extended port flags to record lag state of repr
246  * @mac_offloaded:	Flag indicating a MAC address is offloaded for repr
247  * @offloaded_mac_addr:	MAC address that has been offloaded for repr
248  * @block_shared:	Flag indicating if offload applies to shared blocks
249  * @mac_list:		List entry of reprs that share the same offloaded MAC
250  * @qos_table:		Stored info on filters implementing qos
251  * @on_bridge:		Indicates if the repr is attached to a bridge
252  */
253 struct nfp_flower_repr_priv {
254 	struct nfp_repr *nfp_repr;
255 	unsigned long lag_port_flags;
256 	bool mac_offloaded;
257 	u8 offloaded_mac_addr[ETH_ALEN];
258 	bool block_shared;
259 	struct list_head mac_list;
260 	struct nfp_fl_qos qos_table;
261 	bool on_bridge;
262 };
263 
264 /**
265  * struct nfp_flower_non_repr_priv - Priv data for non-repr offloaded ports
266  * @list:		List entry of offloaded reprs
267  * @netdev:		Pointer to non-repr net_device
268  * @ref_count:		Number of references held for this priv data
269  * @mac_offloaded:	Flag indicating a MAC address is offloaded for device
270  * @offloaded_mac_addr:	MAC address that has been offloaded for dev
271  */
272 struct nfp_flower_non_repr_priv {
273 	struct list_head list;
274 	struct net_device *netdev;
275 	int ref_count;
276 	bool mac_offloaded;
277 	u8 offloaded_mac_addr[ETH_ALEN];
278 };
279 
280 struct nfp_fl_key_ls {
281 	u32 key_layer_two;
282 	u8 key_layer;
283 	int key_size;
284 };
285 
286 struct nfp_fl_rule_metadata {
287 	u8 key_len;
288 	u8 mask_len;
289 	u8 act_len;
290 	u8 flags;
291 	__be32 host_ctx_id;
292 	__be64 host_cookie __packed;
293 	__be64 flow_version __packed;
294 	__be32 shortcut;
295 };
296 
297 struct nfp_fl_stats {
298 	u64 pkts;
299 	u64 bytes;
300 	u64 used;
301 };
302 
303 /**
304  * struct nfp_ipv6_addr_entry - cached IPv6 addresses
305  * @ipv6_addr:	IP address
306  * @ref_count:	number of rules currently using this IP
307  * @list:	list pointer
308  */
309 struct nfp_ipv6_addr_entry {
310 	struct in6_addr ipv6_addr;
311 	int ref_count;
312 	struct list_head list;
313 };
314 
315 struct nfp_fl_payload {
316 	struct nfp_fl_rule_metadata meta;
317 	unsigned long tc_flower_cookie;
318 	struct rhash_head fl_node;
319 	struct rcu_head rcu;
320 	__be32 nfp_tun_ipv4_addr;
321 	struct nfp_ipv6_addr_entry *nfp_tun_ipv6;
322 	struct net_device *ingress_dev;
323 	char *unmasked_data;
324 	char *mask_data;
325 	char *action_data;
326 	struct list_head linked_flows;
327 	bool in_hw;
328 	struct {
329 		struct net_device *dev;
330 		__be16 vlan_tci;
331 		__be16 port_idx;
332 	} pre_tun_rule;
333 };
334 
335 struct nfp_fl_payload_link {
336 	/* A link contains a pointer to a merge flow and an associated sub_flow.
337 	 * Each merge flow will feature in 2 links to its underlying sub_flows.
338 	 * A sub_flow will have at least 1 link to a merge flow or more if it
339 	 * has been used to create multiple merge flows.
340 	 *
341 	 * For a merge flow, 'linked_flows' in its nfp_fl_payload struct lists
342 	 * all links to sub_flows (sub_flow.flow) via merge.list.
343 	 * For a sub_flow, 'linked_flows' gives all links to merge flows it has
344 	 * formed (merge_flow.flow) via sub_flow.list.
345 	 */
346 	struct {
347 		struct list_head list;
348 		struct nfp_fl_payload *flow;
349 	} merge_flow, sub_flow;
350 };
351 
352 extern const struct rhashtable_params nfp_flower_table_params;
353 
354 struct nfp_fl_stats_frame {
355 	__be32 stats_con_id;
356 	__be32 pkt_count;
357 	__be64 byte_count;
358 	__be64 stats_cookie;
359 };
360 
361 static inline bool
362 nfp_flower_internal_port_can_offload(struct nfp_app *app,
363 				     struct net_device *netdev)
364 {
365 	struct nfp_flower_priv *app_priv = app->priv;
366 
367 	if (!(app_priv->flower_en_feats & NFP_FL_ENABLE_FLOW_MERGE))
368 		return false;
369 	if (!netdev->rtnl_link_ops)
370 		return false;
371 	if (!strcmp(netdev->rtnl_link_ops->kind, "openvswitch"))
372 		return true;
373 
374 	return false;
375 }
376 
377 /* The address of the merged flow acts as its cookie.
378  * Cookies supplied to us by TC flower are also addresses to allocated
379  * memory and thus this scheme should not generate any collisions.
380  */
381 static inline bool nfp_flower_is_merge_flow(struct nfp_fl_payload *flow_pay)
382 {
383 	return flow_pay->tc_flower_cookie == (unsigned long)flow_pay;
384 }
385 
386 static inline bool nfp_flower_is_supported_bridge(struct net_device *netdev)
387 {
388 	return netif_is_ovs_master(netdev);
389 }
390 
391 int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
392 			     unsigned int host_ctx_split);
393 void nfp_flower_metadata_cleanup(struct nfp_app *app);
394 
395 int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
396 			enum tc_setup_type type, void *type_data);
397 int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
398 				     struct nfp_fl_payload *sub_flow1,
399 				     struct nfp_fl_payload *sub_flow2);
400 int nfp_flower_compile_flow_match(struct nfp_app *app,
401 				  struct flow_cls_offload *flow,
402 				  struct nfp_fl_key_ls *key_ls,
403 				  struct net_device *netdev,
404 				  struct nfp_fl_payload *nfp_flow,
405 				  enum nfp_flower_tun_type tun_type,
406 				  struct netlink_ext_ack *extack);
407 int nfp_flower_compile_action(struct nfp_app *app,
408 			      struct flow_cls_offload *flow,
409 			      struct net_device *netdev,
410 			      struct nfp_fl_payload *nfp_flow,
411 			      struct netlink_ext_ack *extack);
412 int nfp_compile_flow_metadata(struct nfp_app *app,
413 			      struct flow_cls_offload *flow,
414 			      struct nfp_fl_payload *nfp_flow,
415 			      struct net_device *netdev,
416 			      struct netlink_ext_ack *extack);
417 void __nfp_modify_flow_metadata(struct nfp_flower_priv *priv,
418 				struct nfp_fl_payload *nfp_flow);
419 int nfp_modify_flow_metadata(struct nfp_app *app,
420 			     struct nfp_fl_payload *nfp_flow);
421 
422 struct nfp_fl_payload *
423 nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie,
424 			   struct net_device *netdev);
425 struct nfp_fl_payload *
426 nfp_flower_get_fl_payload_from_ctx(struct nfp_app *app, u32 ctx_id);
427 struct nfp_fl_payload *
428 nfp_flower_remove_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie);
429 
430 void nfp_flower_rx_flow_stats(struct nfp_app *app, struct sk_buff *skb);
431 
432 int nfp_tunnel_config_start(struct nfp_app *app);
433 void nfp_tunnel_config_stop(struct nfp_app *app);
434 int nfp_tunnel_mac_event_handler(struct nfp_app *app,
435 				 struct net_device *netdev,
436 				 unsigned long event, void *ptr);
437 void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4);
438 void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4);
439 void
440 nfp_tunnel_put_ipv6_off(struct nfp_app *app, struct nfp_ipv6_addr_entry *entry);
441 struct nfp_ipv6_addr_entry *
442 nfp_tunnel_add_ipv6_off(struct nfp_app *app, struct in6_addr *ipv6);
443 void nfp_tunnel_request_route_v4(struct nfp_app *app, struct sk_buff *skb);
444 void nfp_tunnel_request_route_v6(struct nfp_app *app, struct sk_buff *skb);
445 void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb);
446 void nfp_tunnel_keep_alive_v6(struct nfp_app *app, struct sk_buff *skb);
447 void nfp_flower_lag_init(struct nfp_fl_lag *lag);
448 void nfp_flower_lag_cleanup(struct nfp_fl_lag *lag);
449 int nfp_flower_lag_reset(struct nfp_fl_lag *lag);
450 int nfp_flower_lag_netdev_event(struct nfp_flower_priv *priv,
451 				struct net_device *netdev,
452 				unsigned long event, void *ptr);
453 bool nfp_flower_lag_unprocessed_msg(struct nfp_app *app, struct sk_buff *skb);
454 int nfp_flower_lag_populate_pre_action(struct nfp_app *app,
455 				       struct net_device *master,
456 				       struct nfp_fl_pre_lag *pre_act,
457 				       struct netlink_ext_ack *extack);
458 int nfp_flower_lag_get_output_id(struct nfp_app *app,
459 				 struct net_device *master);
460 void nfp_flower_qos_init(struct nfp_app *app);
461 void nfp_flower_qos_cleanup(struct nfp_app *app);
462 int nfp_flower_setup_qos_offload(struct nfp_app *app, struct net_device *netdev,
463 				 struct tc_cls_matchall_offload *flow);
464 void nfp_flower_stats_rlim_reply(struct nfp_app *app, struct sk_buff *skb);
465 int nfp_flower_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, void *cb_priv,
466 				enum tc_setup_type type, void *type_data,
467 				void *data,
468 				void (*cleanup)(struct flow_block_cb *block_cb));
469 void nfp_flower_setup_indr_tc_release(void *cb_priv);
470 
471 void
472 __nfp_flower_non_repr_priv_get(struct nfp_flower_non_repr_priv *non_repr_priv);
473 struct nfp_flower_non_repr_priv *
474 nfp_flower_non_repr_priv_get(struct nfp_app *app, struct net_device *netdev);
475 void
476 __nfp_flower_non_repr_priv_put(struct nfp_flower_non_repr_priv *non_repr_priv);
477 void
478 nfp_flower_non_repr_priv_put(struct nfp_app *app, struct net_device *netdev);
479 u32 nfp_flower_get_port_id_from_netdev(struct nfp_app *app,
480 				       struct net_device *netdev);
481 int nfp_flower_xmit_pre_tun_flow(struct nfp_app *app,
482 				 struct nfp_fl_payload *flow);
483 int nfp_flower_xmit_pre_tun_del_flow(struct nfp_app *app,
484 				     struct nfp_fl_payload *flow);
485 #endif
486