1 /*
2  * drivers/net/ethernet/mellanox/mlxsw/spectrum.h
3  * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5  * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6  * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the names of the copyright holders nor the names of its
17  *    contributors may be used to endorse or promote products derived from
18  *    this software without specific prior written permission.
19  *
20  * Alternatively, this software may be distributed under the terms of the
21  * GNU General Public License ("GPL") version 2 as published by the Free
22  * Software Foundation.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36 
37 #ifndef _MLXSW_SPECTRUM_H
38 #define _MLXSW_SPECTRUM_H
39 
40 #include <linux/types.h>
41 #include <linux/netdevice.h>
42 #include <linux/rhashtable.h>
43 #include <linux/bitops.h>
44 #include <linux/if_vlan.h>
45 #include <linux/list.h>
46 #include <linux/dcbnl.h>
47 #include <linux/in6.h>
48 #include <linux/notifier.h>
49 
50 #include "port.h"
51 #include "core.h"
52 
53 #define MLXSW_SP_VFID_BASE VLAN_N_VID
54 #define MLXSW_SP_VFID_MAX 6656	/* Bridged VLAN interfaces */
55 
56 #define MLXSW_SP_RFID_BASE 15360
57 #define MLXSW_SP_INVALID_RIF 0xffff
58 
59 #define MLXSW_SP_MID_MAX 7000
60 
61 #define MLXSW_SP_PORTS_PER_CLUSTER_MAX 4
62 
63 #define MLXSW_SP_LPM_TREE_MIN 2 /* trees 0 and 1 are reserved */
64 #define MLXSW_SP_LPM_TREE_MAX 22
65 #define MLXSW_SP_LPM_TREE_COUNT (MLXSW_SP_LPM_TREE_MAX - MLXSW_SP_LPM_TREE_MIN)
66 
67 #define MLXSW_SP_PORT_BASE_SPEED 25000	/* Mb/s */
68 
69 #define MLXSW_SP_BYTES_PER_CELL 96
70 
71 #define MLXSW_SP_BYTES_TO_CELLS(b) DIV_ROUND_UP(b, MLXSW_SP_BYTES_PER_CELL)
72 #define MLXSW_SP_CELLS_TO_BYTES(c) (c * MLXSW_SP_BYTES_PER_CELL)
73 
74 #define MLXSW_SP_KVD_LINEAR_SIZE 65536 /* entries */
75 #define MLXSW_SP_KVD_GRANULARITY 128
76 
77 /* Maximum delay buffer needed in case of PAUSE frames, in cells.
78  * Assumes 100m cable and maximum MTU.
79  */
80 #define MLXSW_SP_PAUSE_DELAY 612
81 
82 #define MLXSW_SP_CELL_FACTOR 2	/* 2 * cell_size / (IPG + cell_size + 1) */
83 
84 static inline u16 mlxsw_sp_pfc_delay_get(int mtu, u16 delay)
85 {
86 	delay = MLXSW_SP_BYTES_TO_CELLS(DIV_ROUND_UP(delay, BITS_PER_BYTE));
87 	return MLXSW_SP_CELL_FACTOR * delay + MLXSW_SP_BYTES_TO_CELLS(mtu);
88 }
89 
90 struct mlxsw_sp_port;
91 
92 struct mlxsw_sp_upper {
93 	struct net_device *dev;
94 	unsigned int ref_count;
95 };
96 
97 struct mlxsw_sp_fid {
98 	void (*leave)(struct mlxsw_sp_port *mlxsw_sp_vport);
99 	struct list_head list;
100 	unsigned int ref_count;
101 	struct net_device *dev;
102 	struct mlxsw_sp_rif *r;
103 	u16 fid;
104 };
105 
106 struct mlxsw_sp_rif {
107 	struct net_device *dev;
108 	unsigned int ref_count;
109 	struct mlxsw_sp_fid *f;
110 	unsigned char addr[ETH_ALEN];
111 	int mtu;
112 	u16 rif;
113 };
114 
115 struct mlxsw_sp_mid {
116 	struct list_head list;
117 	unsigned char addr[ETH_ALEN];
118 	u16 vid;
119 	u16 mid;
120 	unsigned int ref_count;
121 };
122 
123 static inline u16 mlxsw_sp_vfid_to_fid(u16 vfid)
124 {
125 	return MLXSW_SP_VFID_BASE + vfid;
126 }
127 
128 static inline u16 mlxsw_sp_fid_to_vfid(u16 fid)
129 {
130 	return fid - MLXSW_SP_VFID_BASE;
131 }
132 
133 static inline bool mlxsw_sp_fid_is_vfid(u16 fid)
134 {
135 	return fid >= MLXSW_SP_VFID_BASE && fid < MLXSW_SP_RFID_BASE;
136 }
137 
138 static inline bool mlxsw_sp_fid_is_rfid(u16 fid)
139 {
140 	return fid >= MLXSW_SP_RFID_BASE;
141 }
142 
143 static inline u16 mlxsw_sp_rif_sp_to_fid(u16 rif)
144 {
145 	return MLXSW_SP_RFID_BASE + rif;
146 }
147 
148 struct mlxsw_sp_sb_pr {
149 	enum mlxsw_reg_sbpr_mode mode;
150 	u32 size;
151 };
152 
153 struct mlxsw_cp_sb_occ {
154 	u32 cur;
155 	u32 max;
156 };
157 
158 struct mlxsw_sp_sb_cm {
159 	u32 min_buff;
160 	u32 max_buff;
161 	u8 pool;
162 	struct mlxsw_cp_sb_occ occ;
163 };
164 
165 struct mlxsw_sp_sb_pm {
166 	u32 min_buff;
167 	u32 max_buff;
168 	struct mlxsw_cp_sb_occ occ;
169 };
170 
171 #define MLXSW_SP_SB_POOL_COUNT	4
172 #define MLXSW_SP_SB_TC_COUNT	8
173 
174 struct mlxsw_sp_sb {
175 	struct mlxsw_sp_sb_pr prs[2][MLXSW_SP_SB_POOL_COUNT];
176 	struct {
177 		struct mlxsw_sp_sb_cm cms[2][MLXSW_SP_SB_TC_COUNT];
178 		struct mlxsw_sp_sb_pm pms[2][MLXSW_SP_SB_POOL_COUNT];
179 	} ports[MLXSW_PORT_MAX_PORTS];
180 };
181 
182 #define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE)
183 
184 struct mlxsw_sp_prefix_usage {
185 	DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
186 };
187 
188 enum mlxsw_sp_l3proto {
189 	MLXSW_SP_L3_PROTO_IPV4,
190 	MLXSW_SP_L3_PROTO_IPV6,
191 };
192 
193 struct mlxsw_sp_lpm_tree {
194 	u8 id; /* tree ID */
195 	unsigned int ref_count;
196 	enum mlxsw_sp_l3proto proto;
197 	struct mlxsw_sp_prefix_usage prefix_usage;
198 };
199 
200 struct mlxsw_sp_fib;
201 
202 struct mlxsw_sp_vr {
203 	u16 id; /* virtual router ID */
204 	bool used;
205 	enum mlxsw_sp_l3proto proto;
206 	u32 tb_id; /* kernel fib table id */
207 	struct mlxsw_sp_lpm_tree *lpm_tree;
208 	struct mlxsw_sp_fib *fib;
209 };
210 
211 enum mlxsw_sp_span_type {
212 	MLXSW_SP_SPAN_EGRESS,
213 	MLXSW_SP_SPAN_INGRESS
214 };
215 
216 struct mlxsw_sp_span_inspected_port {
217 	struct list_head list;
218 	enum mlxsw_sp_span_type type;
219 	u8 local_port;
220 };
221 
222 struct mlxsw_sp_span_entry {
223 	u8 local_port;
224 	bool used;
225 	struct list_head bound_ports_list;
226 	int ref_count;
227 	int id;
228 };
229 
230 enum mlxsw_sp_port_mall_action_type {
231 	MLXSW_SP_PORT_MALL_MIRROR,
232 };
233 
234 struct mlxsw_sp_port_mall_mirror_tc_entry {
235 	u8 to_local_port;
236 	bool ingress;
237 };
238 
239 struct mlxsw_sp_port_mall_tc_entry {
240 	struct list_head list;
241 	unsigned long cookie;
242 	enum mlxsw_sp_port_mall_action_type type;
243 	union {
244 		struct mlxsw_sp_port_mall_mirror_tc_entry mirror;
245 	};
246 };
247 
248 struct mlxsw_sp_router {
249 	struct mlxsw_sp_lpm_tree lpm_trees[MLXSW_SP_LPM_TREE_COUNT];
250 	struct mlxsw_sp_vr *vrs;
251 	struct rhashtable neigh_ht;
252 	struct {
253 		struct delayed_work dw;
254 		unsigned long interval;	/* ms */
255 	} neighs_update;
256 	struct delayed_work nexthop_probe_dw;
257 #define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */
258 	struct list_head nexthop_group_list;
259 	struct list_head nexthop_neighs_list;
260 	bool aborted;
261 };
262 
263 struct mlxsw_sp {
264 	struct {
265 		struct list_head list;
266 		DECLARE_BITMAP(mapped, MLXSW_SP_VFID_MAX);
267 	} vfids;
268 	struct {
269 		struct list_head list;
270 		DECLARE_BITMAP(mapped, MLXSW_SP_MID_MAX);
271 	} br_mids;
272 	struct list_head fids;	/* VLAN-aware bridge FIDs */
273 	struct mlxsw_sp_rif **rifs;
274 	struct mlxsw_sp_port **ports;
275 	struct mlxsw_core *core;
276 	const struct mlxsw_bus_info *bus_info;
277 	unsigned char base_mac[ETH_ALEN];
278 	struct {
279 		struct delayed_work dw;
280 #define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100
281 		unsigned int interval; /* ms */
282 	} fdb_notify;
283 #define MLXSW_SP_MIN_AGEING_TIME 10
284 #define MLXSW_SP_MAX_AGEING_TIME 1000000
285 #define MLXSW_SP_DEFAULT_AGEING_TIME 300
286 	u32 ageing_time;
287 	struct mlxsw_sp_upper master_bridge;
288 	struct mlxsw_sp_upper *lags;
289 	u8 port_to_module[MLXSW_PORT_MAX_PORTS];
290 	struct mlxsw_sp_sb sb;
291 	struct mlxsw_sp_router router;
292 	struct {
293 		DECLARE_BITMAP(usage, MLXSW_SP_KVD_LINEAR_SIZE);
294 	} kvdl;
295 
296 	struct {
297 		struct mlxsw_sp_span_entry *entries;
298 		int entries_count;
299 	} span;
300 	struct notifier_block fib_nb;
301 };
302 
303 static inline struct mlxsw_sp_upper *
304 mlxsw_sp_lag_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
305 {
306 	return &mlxsw_sp->lags[lag_id];
307 }
308 
309 struct mlxsw_sp_port_pcpu_stats {
310 	u64			rx_packets;
311 	u64			rx_bytes;
312 	u64			tx_packets;
313 	u64			tx_bytes;
314 	struct u64_stats_sync	syncp;
315 	u32			tx_dropped;
316 };
317 
318 struct mlxsw_sp_port {
319 	struct mlxsw_core_port core_port; /* must be first */
320 	struct net_device *dev;
321 	struct mlxsw_sp_port_pcpu_stats __percpu *pcpu_stats;
322 	struct mlxsw_sp *mlxsw_sp;
323 	u8 local_port;
324 	u8 stp_state;
325 	u8 learning:1,
326 	   learning_sync:1,
327 	   uc_flood:1,
328 	   bridged:1,
329 	   lagged:1,
330 	   split:1;
331 	u16 pvid;
332 	u16 lag_id;
333 	struct {
334 		struct list_head list;
335 		struct mlxsw_sp_fid *f;
336 		u16 vid;
337 	} vport;
338 	struct {
339 		u8 tx_pause:1,
340 		   rx_pause:1,
341 		   autoneg:1;
342 	} link;
343 	struct {
344 		struct ieee_ets *ets;
345 		struct ieee_maxrate *maxrate;
346 		struct ieee_pfc *pfc;
347 	} dcb;
348 	struct {
349 		u8 module;
350 		u8 width;
351 		u8 lane;
352 	} mapping;
353 	/* 802.1Q bridge VLANs */
354 	unsigned long *active_vlans;
355 	unsigned long *untagged_vlans;
356 	/* VLAN interfaces */
357 	struct list_head vports_list;
358 	/* TC handles */
359 	struct list_head mall_tc_list;
360 	struct {
361 		#define MLXSW_HW_STATS_UPDATE_TIME HZ
362 		struct rtnl_link_stats64 *cache;
363 		struct delayed_work update_dw;
364 	} hw_stats;
365 };
366 
367 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev);
368 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port);
369 
370 static inline bool
371 mlxsw_sp_port_is_pause_en(const struct mlxsw_sp_port *mlxsw_sp_port)
372 {
373 	return mlxsw_sp_port->link.tx_pause || mlxsw_sp_port->link.rx_pause;
374 }
375 
376 static inline struct mlxsw_sp_port *
377 mlxsw_sp_port_lagged_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id, u8 port_index)
378 {
379 	struct mlxsw_sp_port *mlxsw_sp_port;
380 	u8 local_port;
381 
382 	local_port = mlxsw_core_lag_mapping_get(mlxsw_sp->core,
383 						lag_id, port_index);
384 	mlxsw_sp_port = mlxsw_sp->ports[local_port];
385 	return mlxsw_sp_port && mlxsw_sp_port->lagged ? mlxsw_sp_port : NULL;
386 }
387 
388 static inline u16
389 mlxsw_sp_vport_vid_get(const struct mlxsw_sp_port *mlxsw_sp_vport)
390 {
391 	return mlxsw_sp_vport->vport.vid;
392 }
393 
394 static inline bool
395 mlxsw_sp_port_is_vport(const struct mlxsw_sp_port *mlxsw_sp_port)
396 {
397 	u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
398 
399 	return vid != 0;
400 }
401 
402 static inline void mlxsw_sp_vport_fid_set(struct mlxsw_sp_port *mlxsw_sp_vport,
403 					  struct mlxsw_sp_fid *f)
404 {
405 	mlxsw_sp_vport->vport.f = f;
406 }
407 
408 static inline struct mlxsw_sp_fid *
409 mlxsw_sp_vport_fid_get(const struct mlxsw_sp_port *mlxsw_sp_vport)
410 {
411 	return mlxsw_sp_vport->vport.f;
412 }
413 
414 static inline struct net_device *
415 mlxsw_sp_vport_dev_get(const struct mlxsw_sp_port *mlxsw_sp_vport)
416 {
417 	struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
418 
419 	return f ? f->dev : NULL;
420 }
421 
422 static inline struct mlxsw_sp_port *
423 mlxsw_sp_port_vport_find(const struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
424 {
425 	struct mlxsw_sp_port *mlxsw_sp_vport;
426 
427 	list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
428 			    vport.list) {
429 		if (mlxsw_sp_vport_vid_get(mlxsw_sp_vport) == vid)
430 			return mlxsw_sp_vport;
431 	}
432 
433 	return NULL;
434 }
435 
436 static inline struct mlxsw_sp_port *
437 mlxsw_sp_port_vport_find_by_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
438 				u16 fid)
439 {
440 	struct mlxsw_sp_port *mlxsw_sp_vport;
441 
442 	list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
443 			    vport.list) {
444 		struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
445 
446 		if (f && f->fid == fid)
447 			return mlxsw_sp_vport;
448 	}
449 
450 	return NULL;
451 }
452 
453 static inline struct mlxsw_sp_fid *mlxsw_sp_fid_find(struct mlxsw_sp *mlxsw_sp,
454 						     u16 fid)
455 {
456 	struct mlxsw_sp_fid *f;
457 
458 	list_for_each_entry(f, &mlxsw_sp->fids, list)
459 		if (f->fid == fid)
460 			return f;
461 
462 	return NULL;
463 }
464 
465 static inline struct mlxsw_sp_fid *
466 mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp,
467 		   const struct net_device *br_dev)
468 {
469 	struct mlxsw_sp_fid *f;
470 
471 	list_for_each_entry(f, &mlxsw_sp->vfids.list, list)
472 		if (f->dev == br_dev)
473 			return f;
474 
475 	return NULL;
476 }
477 
478 static inline struct mlxsw_sp_rif *
479 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
480 			 const struct net_device *dev)
481 {
482 	struct mlxsw_resources *resources;
483 	int i;
484 
485 	resources = mlxsw_core_resources_get(mlxsw_sp->core);
486 
487 	for (i = 0; i < resources->max_rif; i++)
488 		if (mlxsw_sp->rifs[i] && mlxsw_sp->rifs[i]->dev == dev)
489 			return mlxsw_sp->rifs[i];
490 
491 	return NULL;
492 }
493 
494 enum mlxsw_sp_flood_table {
495 	MLXSW_SP_FLOOD_TABLE_UC,
496 	MLXSW_SP_FLOOD_TABLE_BM,
497 };
498 
499 int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp);
500 void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp);
501 int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port);
502 int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
503 			 unsigned int sb_index, u16 pool_index,
504 			 struct devlink_sb_pool_info *pool_info);
505 int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
506 			 unsigned int sb_index, u16 pool_index, u32 size,
507 			 enum devlink_sb_threshold_type threshold_type);
508 int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
509 			      unsigned int sb_index, u16 pool_index,
510 			      u32 *p_threshold);
511 int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port,
512 			      unsigned int sb_index, u16 pool_index,
513 			      u32 threshold);
514 int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port,
515 				 unsigned int sb_index, u16 tc_index,
516 				 enum devlink_sb_pool_type pool_type,
517 				 u16 *p_pool_index, u32 *p_threshold);
518 int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
519 				 unsigned int sb_index, u16 tc_index,
520 				 enum devlink_sb_pool_type pool_type,
521 				 u16 pool_index, u32 threshold);
522 int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
523 			     unsigned int sb_index);
524 int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
525 			      unsigned int sb_index);
526 int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
527 				  unsigned int sb_index, u16 pool_index,
528 				  u32 *p_cur, u32 *p_max);
529 int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
530 				     unsigned int sb_index, u16 tc_index,
531 				     enum devlink_sb_pool_type pool_type,
532 				     u32 *p_cur, u32 *p_max);
533 
534 int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp);
535 void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp);
536 int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port);
537 void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port);
538 void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port);
539 int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
540 				 enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid,
541 				 u16 vid);
542 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
543 			   u16 vid_end, bool is_member, bool untagged);
544 int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
545 			     bool set);
546 void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port);
547 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
548 int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid);
549 int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
550 			bool adding);
551 struct mlxsw_sp_fid *mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid);
552 void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f);
553 void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
554 				 struct mlxsw_sp_rif *r);
555 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
556 			  enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
557 			  bool dwrr, u8 dwrr_weight);
558 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
559 			      u8 switch_prio, u8 tclass);
560 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
561 				 u8 *prio_tc, bool pause_en,
562 				 struct ieee_pfc *my_pfc);
563 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
564 				  enum mlxsw_reg_qeec_hr hr, u8 index,
565 				  u8 next_index, u32 maxrate);
566 int __mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
567 				     u16 vid_begin, u16 vid_end,
568 				     bool learn_enable);
569 
570 #ifdef CONFIG_MLXSW_SPECTRUM_DCB
571 
572 int mlxsw_sp_port_dcb_init(struct mlxsw_sp_port *mlxsw_sp_port);
573 void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port);
574 
575 #else
576 
577 static inline int mlxsw_sp_port_dcb_init(struct mlxsw_sp_port *mlxsw_sp_port)
578 {
579 	return 0;
580 }
581 
582 static inline void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port)
583 {}
584 
585 #endif
586 
587 int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
588 void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp);
589 int mlxsw_sp_router_neigh_construct(struct net_device *dev,
590 				    struct neighbour *n);
591 void mlxsw_sp_router_neigh_destroy(struct net_device *dev,
592 				   struct neighbour *n);
593 int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
594 				   unsigned long event, void *ptr);
595 
596 int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count);
597 void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index);
598 
599 #endif
600