1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * DPAA2 Ethernet Switch declarations
4  *
5  * Copyright 2014-2016 Freescale Semiconductor Inc.
6  * Copyright 2017-2021 NXP
7  *
8  */
9 
10 #ifndef __ETHSW_H
11 #define __ETHSW_H
12 
13 #include <linux/netdevice.h>
14 #include <linux/etherdevice.h>
15 #include <linux/rtnetlink.h>
16 #include <linux/if_vlan.h>
17 #include <uapi/linux/if_bridge.h>
18 #include <net/switchdev.h>
19 #include <linux/if_bridge.h>
20 #include <linux/fsl/mc.h>
21 #include <net/pkt_cls.h>
22 #include <soc/fsl/dpaa2-io.h>
23 
24 #include "dpsw.h"
25 
26 /* Number of IRQs supported */
27 #define DPSW_IRQ_NUM	2
28 
29 /* Port is member of VLAN */
30 #define ETHSW_VLAN_MEMBER	1
31 /* VLAN to be treated as untagged on egress */
32 #define ETHSW_VLAN_UNTAGGED	2
33 /* Untagged frames will be assigned to this VLAN */
34 #define ETHSW_VLAN_PVID		4
35 /* VLAN configured on the switch */
36 #define ETHSW_VLAN_GLOBAL	8
37 
38 /* Maximum Frame Length supported by HW (currently 10k) */
39 #define DPAA2_MFL		(10 * 1024)
40 #define ETHSW_MAX_FRAME_LENGTH	(DPAA2_MFL - VLAN_ETH_HLEN - ETH_FCS_LEN)
41 #define ETHSW_L2_MAX_FRM(mtu)	((mtu) + VLAN_ETH_HLEN + ETH_FCS_LEN)
42 
43 #define ETHSW_FEATURE_MAC_ADDR	BIT(0)
44 
45 /* Number of receive queues (one RX and one TX_CONF) */
46 #define DPAA2_SWITCH_RX_NUM_FQS	2
47 
48 /* Hardware requires alignment for ingress/egress buffer addresses */
49 #define DPAA2_SWITCH_RX_BUF_RAW_SIZE	PAGE_SIZE
50 #define DPAA2_SWITCH_RX_BUF_TAILROOM \
51 	SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
52 #define DPAA2_SWITCH_RX_BUF_SIZE \
53 	(DPAA2_SWITCH_RX_BUF_RAW_SIZE - DPAA2_SWITCH_RX_BUF_TAILROOM)
54 
55 #define DPAA2_SWITCH_STORE_SIZE 16
56 
57 /* Buffer management */
58 #define BUFS_PER_CMD			7
59 #define DPAA2_ETHSW_NUM_BUFS		(1024 * BUFS_PER_CMD)
60 #define DPAA2_ETHSW_REFILL_THRESH	(DPAA2_ETHSW_NUM_BUFS * 5 / 6)
61 
62 /* Number of times to retry DPIO portal operations while waiting
63  * for portal to finish executing current command and become
64  * available. We want to avoid being stuck in a while loop in case
65  * hardware becomes unresponsive, but not give up too easily if
66  * the portal really is busy for valid reasons
67  */
68 #define DPAA2_SWITCH_SWP_BUSY_RETRIES		1000
69 
70 /* Hardware annotation buffer size */
71 #define DPAA2_SWITCH_HWA_SIZE			64
72 /* Software annotation buffer size */
73 #define DPAA2_SWITCH_SWA_SIZE			64
74 
75 #define DPAA2_SWITCH_TX_BUF_ALIGN		64
76 
77 #define DPAA2_SWITCH_TX_DATA_OFFSET \
78 	(DPAA2_SWITCH_HWA_SIZE + DPAA2_SWITCH_SWA_SIZE)
79 
80 #define DPAA2_SWITCH_NEEDED_HEADROOM \
81 	(DPAA2_SWITCH_TX_DATA_OFFSET + DPAA2_SWITCH_TX_BUF_ALIGN)
82 
83 #define DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES	16
84 #define DPAA2_ETHSW_PORT_DEFAULT_TRAPS		1
85 
86 #define DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE	256
87 
88 extern const struct ethtool_ops dpaa2_switch_port_ethtool_ops;
89 
90 struct ethsw_core;
91 
92 struct dpaa2_switch_fq {
93 	struct ethsw_core *ethsw;
94 	enum dpsw_queue_type type;
95 	struct dpaa2_io_store *store;
96 	struct dpaa2_io_notification_ctx nctx;
97 	struct napi_struct napi;
98 	u32 fqid;
99 };
100 
101 struct dpaa2_switch_fdb {
102 	struct net_device	*bridge_dev;
103 	u16			fdb_id;
104 	bool			in_use;
105 };
106 
107 struct dpaa2_switch_acl_entry {
108 	struct list_head	list;
109 	u16			prio;
110 	unsigned long		cookie;
111 
112 	struct dpsw_acl_entry_cfg cfg;
113 	struct dpsw_acl_key	key;
114 };
115 
116 struct dpaa2_switch_acl_tbl {
117 	struct list_head	entries;
118 	struct ethsw_core	*ethsw;
119 	u64			ports;
120 
121 	u16			id;
122 	u8			num_rules;
123 	bool			in_use;
124 };
125 
126 static inline bool
127 dpaa2_switch_acl_tbl_is_full(struct dpaa2_switch_acl_tbl *acl_tbl)
128 {
129 	if ((acl_tbl->num_rules + DPAA2_ETHSW_PORT_DEFAULT_TRAPS) >=
130 	    DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES)
131 		return true;
132 	return false;
133 }
134 
135 /* Per port private data */
136 struct ethsw_port_priv {
137 	struct net_device	*netdev;
138 	u16			idx;
139 	struct ethsw_core	*ethsw_data;
140 	u8			link_state;
141 	u8			stp_state;
142 
143 	u8			vlans[VLAN_VID_MASK + 1];
144 	u16			pvid;
145 	u16			tx_qdid;
146 
147 	struct dpaa2_switch_fdb	*fdb;
148 	bool			bcast_flood;
149 	bool			ucast_flood;
150 	bool			learn_ena;
151 
152 	struct dpaa2_switch_acl_tbl *acl_tbl;
153 };
154 
155 /* Switch data */
156 struct ethsw_core {
157 	struct device			*dev;
158 	struct fsl_mc_io		*mc_io;
159 	u16				dpsw_handle;
160 	struct dpsw_attr		sw_attr;
161 	u16				major, minor;
162 	unsigned long			features;
163 	int				dev_id;
164 	struct ethsw_port_priv		**ports;
165 	struct iommu_domain		*iommu_domain;
166 
167 	u8				vlans[VLAN_VID_MASK + 1];
168 
169 	struct workqueue_struct		*workqueue;
170 
171 	struct dpaa2_switch_fq		fq[DPAA2_SWITCH_RX_NUM_FQS];
172 	struct fsl_mc_device		*dpbp_dev;
173 	int				buf_count;
174 	u16				bpid;
175 	int				napi_users;
176 
177 	struct dpaa2_switch_fdb		*fdbs;
178 	struct dpaa2_switch_acl_tbl	*acls;
179 };
180 
181 static inline int dpaa2_switch_get_index(struct ethsw_core *ethsw,
182 					 struct net_device *netdev)
183 {
184 	int i;
185 
186 	for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
187 		if (ethsw->ports[i]->netdev == netdev)
188 			return ethsw->ports[i]->idx;
189 
190 	return -EINVAL;
191 }
192 
193 static inline bool dpaa2_switch_supports_cpu_traffic(struct ethsw_core *ethsw)
194 {
195 	if (ethsw->sw_attr.options & DPSW_OPT_CTRL_IF_DIS) {
196 		dev_err(ethsw->dev, "Control Interface is disabled, cannot probe\n");
197 		return false;
198 	}
199 
200 	if (ethsw->sw_attr.flooding_cfg != DPSW_FLOODING_PER_FDB) {
201 		dev_err(ethsw->dev, "Flooding domain is not per FDB, cannot probe\n");
202 		return false;
203 	}
204 
205 	if (ethsw->sw_attr.broadcast_cfg != DPSW_BROADCAST_PER_FDB) {
206 		dev_err(ethsw->dev, "Broadcast domain is not per FDB, cannot probe\n");
207 		return false;
208 	}
209 
210 	if (ethsw->sw_attr.max_fdbs < ethsw->sw_attr.num_ifs) {
211 		dev_err(ethsw->dev, "The number of FDBs is lower than the number of ports, cannot probe\n");
212 		return false;
213 	}
214 
215 	return true;
216 }
217 
218 bool dpaa2_switch_port_dev_check(const struct net_device *netdev);
219 
220 int dpaa2_switch_port_vlans_add(struct net_device *netdev,
221 				const struct switchdev_obj_port_vlan *vlan);
222 
223 int dpaa2_switch_port_vlans_del(struct net_device *netdev,
224 				const struct switchdev_obj_port_vlan *vlan);
225 
226 typedef int dpaa2_switch_fdb_cb_t(struct ethsw_port_priv *port_priv,
227 				  struct fdb_dump_entry *fdb_entry,
228 				  void *data);
229 
230 /* TC offload */
231 
232 int dpaa2_switch_cls_flower_replace(struct dpaa2_switch_acl_tbl *acl_tbl,
233 				    struct flow_cls_offload *cls);
234 
235 int dpaa2_switch_cls_flower_destroy(struct dpaa2_switch_acl_tbl *acl_tbl,
236 				    struct flow_cls_offload *cls);
237 
238 int dpaa2_switch_cls_matchall_replace(struct dpaa2_switch_acl_tbl *acl_tbl,
239 				      struct tc_cls_matchall_offload *cls);
240 
241 int dpaa2_switch_cls_matchall_destroy(struct dpaa2_switch_acl_tbl *acl_tbl,
242 				      struct tc_cls_matchall_offload *cls);
243 
244 int dpaa2_switch_acl_entry_add(struct dpaa2_switch_acl_tbl *acl_tbl,
245 			       struct dpaa2_switch_acl_entry *entry);
246 #endif	/* __ETHSW_H */
247