1 /*
2  * drivers/net/ethernet/mellanox/mlxsw/spectrum.h
3  * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5  * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6  * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the names of the copyright holders nor the names of its
17  *    contributors may be used to endorse or promote products derived from
18  *    this software without specific prior written permission.
19  *
20  * Alternatively, this software may be distributed under the terms of the
21  * GNU General Public License ("GPL") version 2 as published by the Free
22  * Software Foundation.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36 
37 #ifndef _MLXSW_SPECTRUM_H
38 #define _MLXSW_SPECTRUM_H
39 
40 #include <linux/types.h>
41 #include <linux/netdevice.h>
42 #include <linux/bitops.h>
43 #include <linux/if_vlan.h>
44 #include <linux/list.h>
45 #include <net/switchdev.h>
46 
47 #include "port.h"
48 #include "core.h"
49 
50 #define MLXSW_SP_VFID_BASE VLAN_N_VID
51 #define MLXSW_SP_VFID_PORT_MAX 512	/* Non-bridged VLAN interfaces */
52 #define MLXSW_SP_VFID_BR_MAX 8192	/* Bridged VLAN interfaces */
53 #define MLXSW_SP_VFID_MAX (MLXSW_SP_VFID_PORT_MAX + MLXSW_SP_VFID_BR_MAX)
54 
55 #define MLXSW_SP_LAG_MAX 64
56 #define MLXSW_SP_PORT_PER_LAG_MAX 16
57 
58 #define MLXSW_SP_MID_MAX 7000
59 
60 struct mlxsw_sp_port;
61 
62 struct mlxsw_sp_upper {
63 	struct net_device *dev;
64 	unsigned int ref_count;
65 };
66 
67 struct mlxsw_sp_vfid {
68 	struct list_head list;
69 	u16 nr_vports;
70 	u16 vfid;	/* Starting at 0 */
71 	struct net_device *br_dev;
72 	u16 vid;
73 };
74 
75 struct mlxsw_sp_mid {
76 	struct list_head list;
77 	unsigned char addr[ETH_ALEN];
78 	u16 vid;
79 	u16 mid;
80 	unsigned int ref_count;
81 };
82 
83 static inline u16 mlxsw_sp_vfid_to_fid(u16 vfid)
84 {
85 	return MLXSW_SP_VFID_BASE + vfid;
86 }
87 
88 static inline u16 mlxsw_sp_fid_to_vfid(u16 fid)
89 {
90 	return fid - MLXSW_SP_VFID_BASE;
91 }
92 
93 static inline bool mlxsw_sp_fid_is_vfid(u16 fid)
94 {
95 	return fid >= MLXSW_SP_VFID_BASE;
96 }
97 
98 struct mlxsw_sp {
99 	struct {
100 		struct list_head list;
101 		unsigned long mapped[BITS_TO_LONGS(MLXSW_SP_VFID_PORT_MAX)];
102 	} port_vfids;
103 	struct {
104 		struct list_head list;
105 		unsigned long mapped[BITS_TO_LONGS(MLXSW_SP_VFID_BR_MAX)];
106 	} br_vfids;
107 	struct {
108 		struct list_head list;
109 		unsigned long mapped[BITS_TO_LONGS(MLXSW_SP_MID_MAX)];
110 	} br_mids;
111 	unsigned long active_fids[BITS_TO_LONGS(VLAN_N_VID)];
112 	struct mlxsw_sp_port **ports;
113 	struct mlxsw_core *core;
114 	const struct mlxsw_bus_info *bus_info;
115 	unsigned char base_mac[ETH_ALEN];
116 	struct {
117 		struct delayed_work dw;
118 #define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100
119 		unsigned int interval; /* ms */
120 	} fdb_notify;
121 #define MLXSW_SP_DEFAULT_AGEING_TIME 300
122 	u32 ageing_time;
123 	struct mlxsw_sp_upper master_bridge;
124 	struct mlxsw_sp_upper lags[MLXSW_SP_LAG_MAX];
125 };
126 
127 static inline struct mlxsw_sp_upper *
128 mlxsw_sp_lag_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
129 {
130 	return &mlxsw_sp->lags[lag_id];
131 }
132 
133 struct mlxsw_sp_port_pcpu_stats {
134 	u64			rx_packets;
135 	u64			rx_bytes;
136 	u64			tx_packets;
137 	u64			tx_bytes;
138 	struct u64_stats_sync	syncp;
139 	u32			tx_dropped;
140 };
141 
142 struct mlxsw_sp_port {
143 	struct net_device *dev;
144 	struct mlxsw_sp_port_pcpu_stats __percpu *pcpu_stats;
145 	struct mlxsw_sp *mlxsw_sp;
146 	u8 local_port;
147 	u8 stp_state;
148 	u8 learning:1,
149 	   learning_sync:1,
150 	   uc_flood:1,
151 	   bridged:1,
152 	   lagged:1;
153 	u16 pvid;
154 	u16 lag_id;
155 	struct {
156 		struct list_head list;
157 		struct mlxsw_sp_vfid *vfid;
158 		u16 vid;
159 	} vport;
160 	/* 802.1Q bridge VLANs */
161 	unsigned long *active_vlans;
162 	unsigned long *untagged_vlans;
163 	/* VLAN interfaces */
164 	struct list_head vports_list;
165 };
166 
167 static inline struct mlxsw_sp_port *
168 mlxsw_sp_port_lagged_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id, u8 port_index)
169 {
170 	struct mlxsw_sp_port *mlxsw_sp_port;
171 	u8 local_port;
172 
173 	local_port = mlxsw_core_lag_mapping_get(mlxsw_sp->core,
174 						lag_id, port_index);
175 	mlxsw_sp_port = mlxsw_sp->ports[local_port];
176 	return mlxsw_sp_port && mlxsw_sp_port->lagged ? mlxsw_sp_port : NULL;
177 }
178 
179 static inline bool
180 mlxsw_sp_port_is_vport(const struct mlxsw_sp_port *mlxsw_sp_port)
181 {
182 	return mlxsw_sp_port->vport.vfid;
183 }
184 
185 static inline struct net_device *
186 mlxsw_sp_vport_br_get(const struct mlxsw_sp_port *mlxsw_sp_vport)
187 {
188 	return mlxsw_sp_vport->vport.vfid->br_dev;
189 }
190 
191 static inline u16
192 mlxsw_sp_vport_vid_get(const struct mlxsw_sp_port *mlxsw_sp_vport)
193 {
194 	return mlxsw_sp_vport->vport.vid;
195 }
196 
197 static inline u16
198 mlxsw_sp_vport_vfid_get(const struct mlxsw_sp_port *mlxsw_sp_vport)
199 {
200 	return mlxsw_sp_vport->vport.vfid->vfid;
201 }
202 
203 static inline struct mlxsw_sp_port *
204 mlxsw_sp_port_vport_find(const struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
205 {
206 	struct mlxsw_sp_port *mlxsw_sp_vport;
207 
208 	list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
209 			    vport.list) {
210 		if (mlxsw_sp_vport_vid_get(mlxsw_sp_vport) == vid)
211 			return mlxsw_sp_vport;
212 	}
213 
214 	return NULL;
215 }
216 
217 static inline struct mlxsw_sp_port *
218 mlxsw_sp_port_vport_find_by_vfid(const struct mlxsw_sp_port *mlxsw_sp_port,
219 				 u16 vfid)
220 {
221 	struct mlxsw_sp_port *mlxsw_sp_vport;
222 
223 	list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
224 			    vport.list) {
225 		if (mlxsw_sp_vport_vfid_get(mlxsw_sp_vport) == vfid)
226 			return mlxsw_sp_vport;
227 	}
228 
229 	return NULL;
230 }
231 
232 enum mlxsw_sp_flood_table {
233 	MLXSW_SP_FLOOD_TABLE_UC,
234 	MLXSW_SP_FLOOD_TABLE_BM,
235 };
236 
237 int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp);
238 int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port);
239 
240 int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp);
241 void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp);
242 int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port);
243 void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port);
244 void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port);
245 int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
246 				 enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid,
247 				 u16 vid);
248 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
249 			   u16 vid_end, bool is_member, bool untagged);
250 int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
251 			  u16 vid);
252 int mlxsw_sp_port_kill_vid(struct net_device *dev,
253 			   __be16 __always_unused proto, u16 vid);
254 int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 vfid,
255 			     bool set, bool only_uc);
256 void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port);
257 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
258 
259 #endif
260