1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * DPAA2 Ethernet Switch declarations 4 * 5 * Copyright 2014-2016 Freescale Semiconductor Inc. 6 * Copyright 2017-2021 NXP 7 * 8 */ 9 10 #ifndef __ETHSW_H 11 #define __ETHSW_H 12 13 #include <linux/netdevice.h> 14 #include <linux/etherdevice.h> 15 #include <linux/rtnetlink.h> 16 #include <linux/if_vlan.h> 17 #include <uapi/linux/if_bridge.h> 18 #include <net/switchdev.h> 19 #include <linux/if_bridge.h> 20 #include <linux/fsl/mc.h> 21 #include <net/pkt_cls.h> 22 #include <soc/fsl/dpaa2-io.h> 23 24 #include "dpsw.h" 25 26 /* Number of IRQs supported */ 27 #define DPSW_IRQ_NUM 2 28 29 /* Port is member of VLAN */ 30 #define ETHSW_VLAN_MEMBER 1 31 /* VLAN to be treated as untagged on egress */ 32 #define ETHSW_VLAN_UNTAGGED 2 33 /* Untagged frames will be assigned to this VLAN */ 34 #define ETHSW_VLAN_PVID 4 35 /* VLAN configured on the switch */ 36 #define ETHSW_VLAN_GLOBAL 8 37 38 /* Maximum Frame Length supported by HW (currently 10k) */ 39 #define DPAA2_MFL (10 * 1024) 40 #define ETHSW_MAX_FRAME_LENGTH (DPAA2_MFL - VLAN_ETH_HLEN - ETH_FCS_LEN) 41 #define ETHSW_L2_MAX_FRM(mtu) ((mtu) + VLAN_ETH_HLEN + ETH_FCS_LEN) 42 43 #define ETHSW_FEATURE_MAC_ADDR BIT(0) 44 45 /* Number of receive queues (one RX and one TX_CONF) */ 46 #define DPAA2_SWITCH_RX_NUM_FQS 2 47 48 /* Hardware requires alignment for ingress/egress buffer addresses */ 49 #define DPAA2_SWITCH_RX_BUF_RAW_SIZE PAGE_SIZE 50 #define DPAA2_SWITCH_RX_BUF_TAILROOM \ 51 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) 52 #define DPAA2_SWITCH_RX_BUF_SIZE \ 53 (DPAA2_SWITCH_RX_BUF_RAW_SIZE - DPAA2_SWITCH_RX_BUF_TAILROOM) 54 55 #define DPAA2_SWITCH_STORE_SIZE 16 56 57 /* Buffer management */ 58 #define BUFS_PER_CMD 7 59 #define DPAA2_ETHSW_NUM_BUFS (1024 * BUFS_PER_CMD) 60 #define DPAA2_ETHSW_REFILL_THRESH (DPAA2_ETHSW_NUM_BUFS * 5 / 6) 61 62 /* Number of times to retry DPIO portal operations while waiting 63 * for portal to finish executing current command and become 64 * available. We want to avoid being stuck in a while loop in case 65 * hardware becomes unresponsive, but not give up too easily if 66 * the portal really is busy for valid reasons 67 */ 68 #define DPAA2_SWITCH_SWP_BUSY_RETRIES 1000 69 70 /* Hardware annotation buffer size */ 71 #define DPAA2_SWITCH_HWA_SIZE 64 72 /* Software annotation buffer size */ 73 #define DPAA2_SWITCH_SWA_SIZE 64 74 75 #define DPAA2_SWITCH_TX_BUF_ALIGN 64 76 77 #define DPAA2_SWITCH_TX_DATA_OFFSET \ 78 (DPAA2_SWITCH_HWA_SIZE + DPAA2_SWITCH_SWA_SIZE) 79 80 #define DPAA2_SWITCH_NEEDED_HEADROOM \ 81 (DPAA2_SWITCH_TX_DATA_OFFSET + DPAA2_SWITCH_TX_BUF_ALIGN) 82 83 #define DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES 16 84 #define DPAA2_ETHSW_PORT_DEFAULT_TRAPS 1 85 86 #define DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE 256 87 88 extern const struct ethtool_ops dpaa2_switch_port_ethtool_ops; 89 90 struct ethsw_core; 91 92 struct dpaa2_switch_fq { 93 struct ethsw_core *ethsw; 94 enum dpsw_queue_type type; 95 struct dpaa2_io_store *store; 96 struct dpaa2_io_notification_ctx nctx; 97 struct napi_struct napi; 98 u32 fqid; 99 }; 100 101 struct dpaa2_switch_fdb { 102 struct net_device *bridge_dev; 103 u16 fdb_id; 104 bool in_use; 105 }; 106 107 struct dpaa2_switch_acl_entry { 108 struct list_head list; 109 u16 prio; 110 unsigned long cookie; 111 112 struct dpsw_acl_entry_cfg cfg; 113 struct dpsw_acl_key key; 114 }; 115 116 struct dpaa2_switch_mirror_entry { 117 struct list_head list; 118 struct dpsw_reflection_cfg cfg; 119 unsigned long cookie; 120 u16 if_id; 121 }; 122 123 struct dpaa2_switch_filter_block { 124 struct ethsw_core *ethsw; 125 u64 ports; 126 bool in_use; 127 128 struct list_head acl_entries; 129 u16 acl_id; 130 u8 num_acl_rules; 131 132 struct list_head mirror_entries; 133 }; 134 135 static inline bool 136 dpaa2_switch_acl_tbl_is_full(struct dpaa2_switch_filter_block *filter_block) 137 { 138 if ((filter_block->num_acl_rules + DPAA2_ETHSW_PORT_DEFAULT_TRAPS) >= 139 DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES) 140 return true; 141 return false; 142 } 143 144 /* Per port private data */ 145 struct ethsw_port_priv { 146 struct net_device *netdev; 147 u16 idx; 148 struct ethsw_core *ethsw_data; 149 u8 link_state; 150 u8 stp_state; 151 152 u8 vlans[VLAN_VID_MASK + 1]; 153 u16 pvid; 154 u16 tx_qdid; 155 156 struct dpaa2_switch_fdb *fdb; 157 bool bcast_flood; 158 bool ucast_flood; 159 bool learn_ena; 160 161 struct dpaa2_switch_filter_block *filter_block; 162 }; 163 164 /* Switch data */ 165 struct ethsw_core { 166 struct device *dev; 167 struct fsl_mc_io *mc_io; 168 u16 dpsw_handle; 169 struct dpsw_attr sw_attr; 170 u16 major, minor; 171 unsigned long features; 172 int dev_id; 173 struct ethsw_port_priv **ports; 174 struct iommu_domain *iommu_domain; 175 176 u8 vlans[VLAN_VID_MASK + 1]; 177 178 struct workqueue_struct *workqueue; 179 180 struct dpaa2_switch_fq fq[DPAA2_SWITCH_RX_NUM_FQS]; 181 struct fsl_mc_device *dpbp_dev; 182 int buf_count; 183 u16 bpid; 184 int napi_users; 185 186 struct dpaa2_switch_fdb *fdbs; 187 struct dpaa2_switch_filter_block *filter_blocks; 188 u16 mirror_port; 189 }; 190 191 static inline int dpaa2_switch_get_index(struct ethsw_core *ethsw, 192 struct net_device *netdev) 193 { 194 int i; 195 196 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) 197 if (ethsw->ports[i]->netdev == netdev) 198 return ethsw->ports[i]->idx; 199 200 return -EINVAL; 201 } 202 203 static inline bool dpaa2_switch_supports_cpu_traffic(struct ethsw_core *ethsw) 204 { 205 if (ethsw->sw_attr.options & DPSW_OPT_CTRL_IF_DIS) { 206 dev_err(ethsw->dev, "Control Interface is disabled, cannot probe\n"); 207 return false; 208 } 209 210 if (ethsw->sw_attr.flooding_cfg != DPSW_FLOODING_PER_FDB) { 211 dev_err(ethsw->dev, "Flooding domain is not per FDB, cannot probe\n"); 212 return false; 213 } 214 215 if (ethsw->sw_attr.broadcast_cfg != DPSW_BROADCAST_PER_FDB) { 216 dev_err(ethsw->dev, "Broadcast domain is not per FDB, cannot probe\n"); 217 return false; 218 } 219 220 if (ethsw->sw_attr.max_fdbs < ethsw->sw_attr.num_ifs) { 221 dev_err(ethsw->dev, "The number of FDBs is lower than the number of ports, cannot probe\n"); 222 return false; 223 } 224 225 return true; 226 } 227 228 bool dpaa2_switch_port_dev_check(const struct net_device *netdev); 229 230 int dpaa2_switch_port_vlans_add(struct net_device *netdev, 231 const struct switchdev_obj_port_vlan *vlan); 232 233 int dpaa2_switch_port_vlans_del(struct net_device *netdev, 234 const struct switchdev_obj_port_vlan *vlan); 235 236 typedef int dpaa2_switch_fdb_cb_t(struct ethsw_port_priv *port_priv, 237 struct fdb_dump_entry *fdb_entry, 238 void *data); 239 240 /* TC offload */ 241 242 int dpaa2_switch_cls_flower_replace(struct dpaa2_switch_filter_block *block, 243 struct flow_cls_offload *cls); 244 245 int dpaa2_switch_cls_flower_destroy(struct dpaa2_switch_filter_block *block, 246 struct flow_cls_offload *cls); 247 248 int dpaa2_switch_cls_matchall_replace(struct dpaa2_switch_filter_block *block, 249 struct tc_cls_matchall_offload *cls); 250 251 int dpaa2_switch_cls_matchall_destroy(struct dpaa2_switch_filter_block *block, 252 struct tc_cls_matchall_offload *cls); 253 254 int dpaa2_switch_acl_entry_add(struct dpaa2_switch_filter_block *block, 255 struct dpaa2_switch_acl_entry *entry); 256 257 int dpaa2_switch_block_offload_mirror(struct dpaa2_switch_filter_block *block, 258 struct ethsw_port_priv *port_priv); 259 260 int dpaa2_switch_block_unoffload_mirror(struct dpaa2_switch_filter_block *block, 261 struct ethsw_port_priv *port_priv); 262 #endif /* __ETHSW_H */ 263