1 /* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ 2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ 3 4 #ifndef _MLXSW_PCI_HW_H 5 #define _MLXSW_PCI_HW_H 6 7 #include <linux/bitops.h> 8 9 #include "item.h" 10 11 #define MLXSW_PCI_BAR0_SIZE (1024 * 1024) /* 1MB */ 12 #define MLXSW_PCI_PAGE_SIZE 4096 13 14 #define MLXSW_PCI_CIR_BASE 0x71000 15 #define MLXSW_PCI_CIR_IN_PARAM_HI MLXSW_PCI_CIR_BASE 16 #define MLXSW_PCI_CIR_IN_PARAM_LO (MLXSW_PCI_CIR_BASE + 0x04) 17 #define MLXSW_PCI_CIR_IN_MODIFIER (MLXSW_PCI_CIR_BASE + 0x08) 18 #define MLXSW_PCI_CIR_OUT_PARAM_HI (MLXSW_PCI_CIR_BASE + 0x0C) 19 #define MLXSW_PCI_CIR_OUT_PARAM_LO (MLXSW_PCI_CIR_BASE + 0x10) 20 #define MLXSW_PCI_CIR_TOKEN (MLXSW_PCI_CIR_BASE + 0x14) 21 #define MLXSW_PCI_CIR_CTRL (MLXSW_PCI_CIR_BASE + 0x18) 22 #define MLXSW_PCI_CIR_CTRL_GO_BIT BIT(23) 23 #define MLXSW_PCI_CIR_CTRL_EVREQ_BIT BIT(22) 24 #define MLXSW_PCI_CIR_CTRL_OPCODE_MOD_SHIFT 12 25 #define MLXSW_PCI_CIR_CTRL_STATUS_SHIFT 24 26 #define MLXSW_PCI_CIR_TIMEOUT_MSECS 1000 27 28 #define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 900000 29 #define MLXSW_PCI_SW_RESET_WAIT_MSECS 200 30 #define MLXSW_PCI_FW_READY 0xA1844 31 #define MLXSW_PCI_FW_READY_MASK 0xFFFF 32 #define MLXSW_PCI_FW_READY_MAGIC 0x5E 33 34 #define MLXSW_PCI_DOORBELL_SDQ_OFFSET 0x000 35 #define MLXSW_PCI_DOORBELL_RDQ_OFFSET 0x200 36 #define MLXSW_PCI_DOORBELL_CQ_OFFSET 0x400 37 #define MLXSW_PCI_DOORBELL_EQ_OFFSET 0x600 38 #define MLXSW_PCI_DOORBELL_ARM_CQ_OFFSET 0x800 39 #define MLXSW_PCI_DOORBELL_ARM_EQ_OFFSET 0xA00 40 41 #define MLXSW_PCI_DOORBELL(offset, type_offset, num) \ 42 ((offset) + (type_offset) + (num) * 4) 43 44 #define MLXSW_PCI_FREE_RUNNING_CLOCK_H(offset) (offset) 45 #define MLXSW_PCI_FREE_RUNNING_CLOCK_L(offset) ((offset) + 4) 46 47 #define MLXSW_PCI_CQS_MAX 96 48 #define MLXSW_PCI_EQS_COUNT 2 49 #define MLXSW_PCI_EQ_ASYNC_NUM 0 50 #define MLXSW_PCI_EQ_COMP_NUM 1 51 52 #define MLXSW_PCI_SDQS_MIN 2 /* EMAD and control traffic */ 53 #define MLXSW_PCI_SDQ_EMAD_INDEX 0 54 #define MLXSW_PCI_SDQ_EMAD_TC 0 55 #define MLXSW_PCI_SDQ_CTL_TC 3 56 57 #define MLXSW_PCI_AQ_PAGES 8 58 #define MLXSW_PCI_AQ_SIZE (MLXSW_PCI_PAGE_SIZE * MLXSW_PCI_AQ_PAGES) 59 #define MLXSW_PCI_WQE_SIZE 32 /* 32 bytes per element */ 60 #define MLXSW_PCI_CQE01_SIZE 16 /* 16 bytes per element */ 61 #define MLXSW_PCI_CQE2_SIZE 32 /* 32 bytes per element */ 62 #define MLXSW_PCI_CQE_SIZE_MAX MLXSW_PCI_CQE2_SIZE 63 #define MLXSW_PCI_EQE_SIZE 16 /* 16 bytes per element */ 64 #define MLXSW_PCI_WQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_WQE_SIZE) 65 #define MLXSW_PCI_CQE01_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_CQE01_SIZE) 66 #define MLXSW_PCI_CQE2_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_CQE2_SIZE) 67 #define MLXSW_PCI_EQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_EQE_SIZE) 68 #define MLXSW_PCI_EQE_UPDATE_COUNT 0x80 69 70 #define MLXSW_PCI_WQE_SG_ENTRIES 3 71 #define MLXSW_PCI_WQE_TYPE_ETHERNET 0xA 72 73 /* pci_wqe_c 74 * If set it indicates that a completion should be reported upon 75 * execution of this descriptor. 76 */ 77 MLXSW_ITEM32(pci, wqe, c, 0x00, 31, 1); 78 79 /* pci_wqe_lp 80 * Local Processing, set if packet should be processed by the local 81 * switch hardware: 82 * For Ethernet EMAD (Direct Route and non Direct Route) - 83 * must be set if packet destination is local device 84 * For InfiniBand CTL - must be set if packet destination is local device 85 * Otherwise it must be clear 86 * Local Process packets must not exceed the size of 2K (including payload 87 * and headers). 88 */ 89 MLXSW_ITEM32(pci, wqe, lp, 0x00, 30, 1); 90 91 /* pci_wqe_type 92 * Packet type. 93 */ 94 MLXSW_ITEM32(pci, wqe, type, 0x00, 23, 4); 95 96 /* pci_wqe_byte_count 97 * Size of i-th scatter/gather entry, 0 if entry is unused. 98 */ 99 MLXSW_ITEM16_INDEXED(pci, wqe, byte_count, 0x02, 0, 14, 0x02, 0x00, false); 100 101 /* pci_wqe_address 102 * Physical address of i-th scatter/gather entry. 103 * Gather Entries must be 2Byte aligned. 104 */ 105 MLXSW_ITEM64_INDEXED(pci, wqe, address, 0x08, 0, 64, 0x8, 0x0, false); 106 107 enum mlxsw_pci_cqe_v { 108 MLXSW_PCI_CQE_V0, 109 MLXSW_PCI_CQE_V1, 110 MLXSW_PCI_CQE_V2, 111 }; 112 113 #define mlxsw_pci_cqe_item_helpers(name, v0, v1, v2) \ 114 static inline u32 mlxsw_pci_cqe_##name##_get(enum mlxsw_pci_cqe_v v, char *cqe) \ 115 { \ 116 switch (v) { \ 117 default: \ 118 case MLXSW_PCI_CQE_V0: \ 119 return mlxsw_pci_cqe##v0##_##name##_get(cqe); \ 120 case MLXSW_PCI_CQE_V1: \ 121 return mlxsw_pci_cqe##v1##_##name##_get(cqe); \ 122 case MLXSW_PCI_CQE_V2: \ 123 return mlxsw_pci_cqe##v2##_##name##_get(cqe); \ 124 } \ 125 } \ 126 static inline void mlxsw_pci_cqe_##name##_set(enum mlxsw_pci_cqe_v v, \ 127 char *cqe, u32 val) \ 128 { \ 129 switch (v) { \ 130 default: \ 131 case MLXSW_PCI_CQE_V0: \ 132 mlxsw_pci_cqe##v0##_##name##_set(cqe, val); \ 133 break; \ 134 case MLXSW_PCI_CQE_V1: \ 135 mlxsw_pci_cqe##v1##_##name##_set(cqe, val); \ 136 break; \ 137 case MLXSW_PCI_CQE_V2: \ 138 mlxsw_pci_cqe##v2##_##name##_set(cqe, val); \ 139 break; \ 140 } \ 141 } 142 143 /* pci_cqe_lag 144 * Packet arrives from a port which is a LAG 145 */ 146 MLXSW_ITEM32(pci, cqe0, lag, 0x00, 23, 1); 147 MLXSW_ITEM32(pci, cqe12, lag, 0x00, 24, 1); 148 mlxsw_pci_cqe_item_helpers(lag, 0, 12, 12); 149 150 /* pci_cqe_system_port/lag_id 151 * When lag=0: System port on which the packet was received 152 * When lag=1: 153 * bits [15:4] LAG ID on which the packet was received 154 * bits [3:0] sub_port on which the packet was received 155 */ 156 MLXSW_ITEM32(pci, cqe, system_port, 0x00, 0, 16); 157 MLXSW_ITEM32(pci, cqe0, lag_id, 0x00, 4, 12); 158 MLXSW_ITEM32(pci, cqe12, lag_id, 0x00, 0, 16); 159 mlxsw_pci_cqe_item_helpers(lag_id, 0, 12, 12); 160 MLXSW_ITEM32(pci, cqe0, lag_subport, 0x00, 0, 4); 161 MLXSW_ITEM32(pci, cqe12, lag_subport, 0x00, 16, 8); 162 mlxsw_pci_cqe_item_helpers(lag_subport, 0, 12, 12); 163 164 /* pci_cqe_wqe_counter 165 * WQE count of the WQEs completed on the associated dqn 166 */ 167 MLXSW_ITEM32(pci, cqe, wqe_counter, 0x04, 16, 16); 168 169 /* pci_cqe_byte_count 170 * Byte count of received packets including additional two 171 * Reserved Bytes that are append to the end of the frame. 172 * Reserved for Send CQE. 173 */ 174 MLXSW_ITEM32(pci, cqe, byte_count, 0x04, 0, 14); 175 176 #define MLXSW_PCI_CQE2_MIRROR_CONG_INVALID 0xFFFF 177 178 /* pci_cqe_mirror_cong_high 179 * Congestion level in units of 8KB of the egress traffic class of the original 180 * packet that does mirroring to the CPU. Value of 0xFFFF means that the 181 * congestion level is invalid. 182 */ 183 MLXSW_ITEM32(pci, cqe2, mirror_cong_high, 0x08, 16, 4); 184 185 /* pci_cqe_trap_id 186 * Trap ID that captured the packet. 187 */ 188 MLXSW_ITEM32(pci, cqe, trap_id, 0x08, 0, 10); 189 190 /* pci_cqe_crc 191 * Length include CRC. Indicates the length field includes 192 * the packet's CRC. 193 */ 194 MLXSW_ITEM32(pci, cqe0, crc, 0x0C, 8, 1); 195 MLXSW_ITEM32(pci, cqe12, crc, 0x0C, 9, 1); 196 mlxsw_pci_cqe_item_helpers(crc, 0, 12, 12); 197 198 /* pci_cqe_e 199 * CQE with Error. 200 */ 201 MLXSW_ITEM32(pci, cqe0, e, 0x0C, 7, 1); 202 MLXSW_ITEM32(pci, cqe12, e, 0x00, 27, 1); 203 mlxsw_pci_cqe_item_helpers(e, 0, 12, 12); 204 205 /* pci_cqe_sr 206 * 1 - Send Queue 207 * 0 - Receive Queue 208 */ 209 MLXSW_ITEM32(pci, cqe0, sr, 0x0C, 6, 1); 210 MLXSW_ITEM32(pci, cqe12, sr, 0x00, 26, 1); 211 mlxsw_pci_cqe_item_helpers(sr, 0, 12, 12); 212 213 /* pci_cqe_dqn 214 * Descriptor Queue (DQ) Number. 215 */ 216 MLXSW_ITEM32(pci, cqe0, dqn, 0x0C, 1, 5); 217 MLXSW_ITEM32(pci, cqe12, dqn, 0x0C, 1, 6); 218 mlxsw_pci_cqe_item_helpers(dqn, 0, 12, 12); 219 220 #define MLXSW_PCI_CQE2_MIRROR_TCLASS_INVALID 0x1F 221 222 /* pci_cqe_mirror_tclass 223 * The egress traffic class of the original packet that does mirroring to the 224 * CPU. Value of 0x1F means that the traffic class is invalid. 225 */ 226 MLXSW_ITEM32(pci, cqe2, mirror_tclass, 0x10, 27, 5); 227 228 /* pci_cqe_tx_lag 229 * The Tx port of a packet that is mirrored / sampled to the CPU is a LAG. 230 */ 231 MLXSW_ITEM32(pci, cqe2, tx_lag, 0x10, 24, 1); 232 233 /* pci_cqe_tx_lag_subport 234 * The port index within the LAG of a packet that is mirrored / sampled to the 235 * CPU. Reserved when tx_lag is 0. 236 */ 237 MLXSW_ITEM32(pci, cqe2, tx_lag_subport, 0x10, 16, 8); 238 239 #define MLXSW_PCI_CQE2_TX_PORT_MULTI_PORT 0xFFFE 240 #define MLXSW_PCI_CQE2_TX_PORT_INVALID 0xFFFF 241 242 /* pci_cqe_tx_lag_id 243 * The Tx LAG ID of the original packet that is mirrored / sampled to the CPU. 244 * Value of 0xFFFE means multi-port. Value fo 0xFFFF means that the Tx LAG ID 245 * is invalid. Reserved when tx_lag is 0. 246 */ 247 MLXSW_ITEM32(pci, cqe2, tx_lag_id, 0x10, 0, 16); 248 249 /* pci_cqe_tx_system_port 250 * The Tx port of the original packet that is mirrored / sampled to the CPU. 251 * Value of 0xFFFE means multi-port. Value fo 0xFFFF means that the Tx port is 252 * invalid. Reserved when tx_lag is 1. 253 */ 254 MLXSW_ITEM32(pci, cqe2, tx_system_port, 0x10, 0, 16); 255 256 /* pci_cqe_mirror_cong_low 257 * Congestion level in units of 8KB of the egress traffic class of the original 258 * packet that does mirroring to the CPU. Value of 0xFFFF means that the 259 * congestion level is invalid. 260 */ 261 MLXSW_ITEM32(pci, cqe2, mirror_cong_low, 0x14, 20, 12); 262 263 #define MLXSW_PCI_CQE2_MIRROR_CONG_SHIFT 13 /* Units of 8KB. */ 264 265 static inline u16 mlxsw_pci_cqe2_mirror_cong_get(const char *cqe) 266 { 267 u16 cong_high = mlxsw_pci_cqe2_mirror_cong_high_get(cqe); 268 u16 cong_low = mlxsw_pci_cqe2_mirror_cong_low_get(cqe); 269 270 return cong_high << 12 | cong_low; 271 } 272 273 /* pci_cqe_user_def_val_orig_pkt_len 274 * When trap_id is an ACL: User defined value from policy engine action. 275 */ 276 MLXSW_ITEM32(pci, cqe2, user_def_val_orig_pkt_len, 0x14, 0, 20); 277 278 /* pci_cqe_mirror_reason 279 * Mirror reason. 280 */ 281 MLXSW_ITEM32(pci, cqe2, mirror_reason, 0x18, 24, 8); 282 283 #define MLXSW_PCI_CQE2_MIRROR_LATENCY_INVALID 0xFFFFFF 284 285 /* pci_cqe_mirror_latency 286 * End-to-end latency of the original packet that does mirroring to the CPU. 287 * Value of 0xFFFFFF means that the latency is invalid. Units are according to 288 * MOGCR.mirror_latency_units. 289 */ 290 MLXSW_ITEM32(pci, cqe2, mirror_latency, 0x1C, 8, 24); 291 292 /* pci_cqe_owner 293 * Ownership bit. 294 */ 295 MLXSW_ITEM32(pci, cqe01, owner, 0x0C, 0, 1); 296 MLXSW_ITEM32(pci, cqe2, owner, 0x1C, 0, 1); 297 mlxsw_pci_cqe_item_helpers(owner, 01, 01, 2); 298 299 /* pci_eqe_event_type 300 * Event type. 301 */ 302 MLXSW_ITEM32(pci, eqe, event_type, 0x0C, 24, 8); 303 #define MLXSW_PCI_EQE_EVENT_TYPE_COMP 0x00 304 #define MLXSW_PCI_EQE_EVENT_TYPE_CMD 0x0A 305 306 /* pci_eqe_event_sub_type 307 * Event type. 308 */ 309 MLXSW_ITEM32(pci, eqe, event_sub_type, 0x0C, 16, 8); 310 311 /* pci_eqe_cqn 312 * Completion Queue that triggered this EQE. 313 */ 314 MLXSW_ITEM32(pci, eqe, cqn, 0x0C, 8, 7); 315 316 /* pci_eqe_owner 317 * Ownership bit. 318 */ 319 MLXSW_ITEM32(pci, eqe, owner, 0x0C, 0, 1); 320 321 /* pci_eqe_cmd_token 322 * Command completion event - token 323 */ 324 MLXSW_ITEM32(pci, eqe, cmd_token, 0x00, 16, 16); 325 326 /* pci_eqe_cmd_status 327 * Command completion event - status 328 */ 329 MLXSW_ITEM32(pci, eqe, cmd_status, 0x00, 0, 8); 330 331 /* pci_eqe_cmd_out_param_h 332 * Command completion event - output parameter - higher part 333 */ 334 MLXSW_ITEM32(pci, eqe, cmd_out_param_h, 0x04, 0, 32); 335 336 /* pci_eqe_cmd_out_param_l 337 * Command completion event - output parameter - lower part 338 */ 339 MLXSW_ITEM32(pci, eqe, cmd_out_param_l, 0x08, 0, 32); 340 341 #endif 342