1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 #ifndef _ICE_LAN_TX_RX_H_ 5 #define _ICE_LAN_TX_RX_H_ 6 7 union ice_32byte_rx_desc { 8 struct { 9 __le64 pkt_addr; /* Packet buffer address */ 10 __le64 hdr_addr; /* Header buffer address */ 11 /* bit 0 of hdr_addr is DD bit */ 12 __le64 rsvd1; 13 __le64 rsvd2; 14 } read; 15 struct { 16 struct { 17 struct { 18 __le16 mirroring_status; 19 __le16 l2tag1; 20 } lo_dword; 21 union { 22 __le32 rss; /* RSS Hash */ 23 __le32 fd_id; /* Flow Director filter id */ 24 } hi_dword; 25 } qword0; 26 struct { 27 /* status/error/PTYPE/length */ 28 __le64 status_error_len; 29 } qword1; 30 struct { 31 __le16 ext_status; /* extended status */ 32 __le16 rsvd; 33 __le16 l2tag2_1; 34 __le16 l2tag2_2; 35 } qword2; 36 struct { 37 __le32 reserved; 38 __le32 fd_id; 39 } qword3; 40 } wb; /* writeback */ 41 }; 42 43 struct ice_rx_ptype_decoded { 44 u32 ptype:10; 45 u32 known:1; 46 u32 outer_ip:1; 47 u32 outer_ip_ver:2; 48 u32 outer_frag:1; 49 u32 tunnel_type:3; 50 u32 tunnel_end_prot:2; 51 u32 tunnel_end_frag:1; 52 u32 inner_prot:4; 53 u32 payload_layer:3; 54 }; 55 56 enum ice_rx_ptype_outer_ip { 57 ICE_RX_PTYPE_OUTER_L2 = 0, 58 ICE_RX_PTYPE_OUTER_IP = 1, 59 }; 60 61 enum ice_rx_ptype_outer_ip_ver { 62 ICE_RX_PTYPE_OUTER_NONE = 0, 63 ICE_RX_PTYPE_OUTER_IPV4 = 1, 64 ICE_RX_PTYPE_OUTER_IPV6 = 2, 65 }; 66 67 enum ice_rx_ptype_outer_fragmented { 68 ICE_RX_PTYPE_NOT_FRAG = 0, 69 ICE_RX_PTYPE_FRAG = 1, 70 }; 71 72 enum ice_rx_ptype_tunnel_type { 73 ICE_RX_PTYPE_TUNNEL_NONE = 0, 74 ICE_RX_PTYPE_TUNNEL_IP_IP = 1, 75 ICE_RX_PTYPE_TUNNEL_IP_GRENAT = 2, 76 ICE_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3, 77 ICE_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4, 78 }; 79 80 enum ice_rx_ptype_tunnel_end_prot { 81 ICE_RX_PTYPE_TUNNEL_END_NONE = 0, 82 ICE_RX_PTYPE_TUNNEL_END_IPV4 = 1, 83 ICE_RX_PTYPE_TUNNEL_END_IPV6 = 2, 84 }; 85 86 enum ice_rx_ptype_inner_prot { 87 ICE_RX_PTYPE_INNER_PROT_NONE = 0, 88 ICE_RX_PTYPE_INNER_PROT_UDP = 1, 89 ICE_RX_PTYPE_INNER_PROT_TCP = 2, 90 ICE_RX_PTYPE_INNER_PROT_SCTP = 3, 91 ICE_RX_PTYPE_INNER_PROT_ICMP = 4, 92 ICE_RX_PTYPE_INNER_PROT_TIMESYNC = 5, 93 }; 94 95 enum ice_rx_ptype_payload_layer { 96 ICE_RX_PTYPE_PAYLOAD_LAYER_NONE = 0, 97 ICE_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1, 98 ICE_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2, 99 ICE_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3, 100 }; 101 102 /* RX Flex Descriptor 103 * This descriptor is used instead of the legacy version descriptor when 104 * ice_rlan_ctx.adv_desc is set 105 */ 106 union ice_32b_rx_flex_desc { 107 struct { 108 __le64 pkt_addr; /* Packet buffer address */ 109 __le64 hdr_addr; /* Header buffer address */ 110 /* bit 0 of hdr_addr is DD bit */ 111 __le64 rsvd1; 112 __le64 rsvd2; 113 } read; 114 struct { 115 /* Qword 0 */ 116 u8 rxdid; /* descriptor builder profile id */ 117 u8 mir_id_umb_cast; /* mirror=[5:0], umb=[7:6] */ 118 __le16 ptype_flex_flags0; /* ptype=[9:0], ff0=[15:10] */ 119 __le16 pkt_len; /* [15:14] are reserved */ 120 __le16 hdr_len_sph_flex_flags1; /* header=[10:0] */ 121 /* sph=[11:11] */ 122 /* ff1/ext=[15:12] */ 123 124 /* Qword 1 */ 125 __le16 status_error0; 126 __le16 l2tag1; 127 __le16 flex_meta0; 128 __le16 flex_meta1; 129 130 /* Qword 2 */ 131 __le16 status_error1; 132 u8 flex_flags2; 133 u8 time_stamp_low; 134 __le16 l2tag2_1st; 135 __le16 l2tag2_2nd; 136 137 /* Qword 3 */ 138 __le16 flex_meta2; 139 __le16 flex_meta3; 140 union { 141 struct { 142 __le16 flex_meta4; 143 __le16 flex_meta5; 144 } flex; 145 __le32 ts_high; 146 } flex_ts; 147 } wb; /* writeback */ 148 }; 149 150 /* Rx Flex Descriptor NIC Profile 151 * This descriptor corresponds to RxDID 2 which contains 152 * metadata fields for RSS, flow id and timestamp info 153 */ 154 struct ice_32b_rx_flex_desc_nic { 155 /* Qword 0 */ 156 u8 rxdid; 157 u8 mir_id_umb_cast; 158 __le16 ptype_flexi_flags0; 159 __le16 pkt_len; 160 __le16 hdr_len_sph_flex_flags1; 161 162 /* Qword 1 */ 163 __le16 status_error0; 164 __le16 l2tag1; 165 __le32 rss_hash; 166 167 /* Qword 2 */ 168 __le16 status_error1; 169 u8 flexi_flags2; 170 u8 ts_low; 171 __le16 l2tag2_1st; 172 __le16 l2tag2_2nd; 173 174 /* Qword 3 */ 175 __le32 flow_id; 176 union { 177 struct { 178 __le16 vlan_id; 179 __le16 flow_id_ipv6; 180 } flex; 181 __le32 ts_high; 182 } flex_ts; 183 }; 184 185 /* Receive Flex Descriptor profile IDs: There are a total 186 * of 64 profiles where profile IDs 0/1 are for legacy; and 187 * profiles 2-63 are flex profiles that can be programmed 188 * with a specific metadata (profile 7 reserved for HW) 189 */ 190 enum ice_rxdid { 191 ICE_RXDID_LEGACY_0 = 0, 192 ICE_RXDID_LEGACY_1 = 1, 193 ICE_RXDID_FLEX_NIC = 2, 194 ICE_RXDID_FLEX_NIC_2 = 6, 195 ICE_RXDID_HW = 7, 196 ICE_RXDID_LAST = 63, 197 }; 198 199 /* Receive Flex Descriptor Rx opcode values */ 200 #define ICE_RX_OPC_MDID 0x01 201 202 /* Receive Descriptor MDID values */ 203 enum ice_flex_rx_mdid { 204 ICE_RX_MDID_FLOW_ID_LOWER = 5, 205 ICE_RX_MDID_FLOW_ID_HIGH, 206 ICE_RX_MDID_SRC_VSI = 19, 207 ICE_RX_MDID_HASH_LOW = 56, 208 ICE_RX_MDID_HASH_HIGH, 209 }; 210 211 /* Rx Flag64 packet flag bits */ 212 enum ice_rx_flg64_bits { 213 ICE_RXFLG_PKT_DSI = 0, 214 ICE_RXFLG_EVLAN_x8100 = 15, 215 ICE_RXFLG_EVLAN_x9100, 216 ICE_RXFLG_VLAN_x8100, 217 ICE_RXFLG_TNL_MAC = 22, 218 ICE_RXFLG_TNL_VLAN, 219 ICE_RXFLG_PKT_FRG, 220 ICE_RXFLG_FIN = 32, 221 ICE_RXFLG_SYN, 222 ICE_RXFLG_RST, 223 ICE_RXFLG_TNL0 = 38, 224 ICE_RXFLG_TNL1, 225 ICE_RXFLG_TNL2, 226 ICE_RXFLG_UDP_GRE, 227 ICE_RXFLG_RSVD = 63 228 }; 229 230 /* for ice_32byte_rx_flex_desc.ptype_flexi_flags0 member */ 231 #define ICE_RX_FLEX_DESC_PTYPE_M (0x3FF) /* 10-bits */ 232 233 /* for ice_32byte_rx_flex_desc.pkt_length member */ 234 #define ICE_RX_FLX_DESC_PKT_LEN_M (0x3FFF) /* 14-bits */ 235 236 enum ice_rx_flex_desc_status_error_0_bits { 237 /* Note: These are predefined bit offsets */ 238 ICE_RX_FLEX_DESC_STATUS0_DD_S = 0, 239 ICE_RX_FLEX_DESC_STATUS0_EOF_S, 240 ICE_RX_FLEX_DESC_STATUS0_HBO_S, 241 ICE_RX_FLEX_DESC_STATUS0_L3L4P_S, 242 ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S, 243 ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S, 244 ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S, 245 ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S, 246 ICE_RX_FLEX_DESC_STATUS0_LPBK_S, 247 ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S, 248 ICE_RX_FLEX_DESC_STATUS0_RXE_S, 249 ICE_RX_FLEX_DESC_STATUS0_CRCP_S, 250 ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S, 251 ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S, 252 ICE_RX_FLEX_DESC_STATUS0_XTRMD0_VALID_S, 253 ICE_RX_FLEX_DESC_STATUS0_XTRMD1_VALID_S, 254 ICE_RX_FLEX_DESC_STATUS0_LAST /* this entry must be last!!! */ 255 }; 256 257 #define ICE_RXQ_CTX_SIZE_DWORDS 8 258 #define ICE_RXQ_CTX_SZ (ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32)) 259 260 /* RLAN Rx queue context data 261 * 262 * The sizes of the variables may be larger than needed due to crossing byte 263 * boundaries. If we do not have the width of the variable set to the correct 264 * size then we could end up shifting bits off the top of the variable when the 265 * variable is at the top of a byte and crosses over into the next byte. 266 */ 267 struct ice_rlan_ctx { 268 u16 head; 269 u16 cpuid; /* bigger than needed, see above for reason */ 270 #define ICE_RLAN_BASE_S 7 271 u64 base; 272 u16 qlen; 273 #define ICE_RLAN_CTX_DBUF_S 7 274 u16 dbuf; /* bigger than needed, see above for reason */ 275 #define ICE_RLAN_CTX_HBUF_S 6 276 u16 hbuf; /* bigger than needed, see above for reason */ 277 u8 dtype; 278 u8 dsize; 279 u8 crcstrip; 280 u8 l2tsel; 281 u8 hsplit_0; 282 u8 hsplit_1; 283 u8 showiv; 284 u32 rxmax; /* bigger than needed, see above for reason */ 285 u8 tphrdesc_ena; 286 u8 tphwdesc_ena; 287 u8 tphdata_ena; 288 u8 tphhead_ena; 289 u16 lrxqthresh; /* bigger than needed, see above for reason */ 290 }; 291 292 struct ice_ctx_ele { 293 u16 offset; 294 u16 size_of; 295 u16 width; 296 u16 lsb; 297 }; 298 299 #define ICE_CTX_STORE(_struct, _ele, _width, _lsb) { \ 300 .offset = offsetof(struct _struct, _ele), \ 301 .size_of = FIELD_SIZEOF(struct _struct, _ele), \ 302 .width = _width, \ 303 .lsb = _lsb, \ 304 } 305 306 /* for hsplit_0 field of Rx RLAN context */ 307 enum ice_rlan_ctx_rx_hsplit_0 { 308 ICE_RLAN_RX_HSPLIT_0_NO_SPLIT = 0, 309 ICE_RLAN_RX_HSPLIT_0_SPLIT_L2 = 1, 310 ICE_RLAN_RX_HSPLIT_0_SPLIT_IP = 2, 311 ICE_RLAN_RX_HSPLIT_0_SPLIT_TCP_UDP = 4, 312 ICE_RLAN_RX_HSPLIT_0_SPLIT_SCTP = 8, 313 }; 314 315 /* for hsplit_1 field of Rx RLAN context */ 316 enum ice_rlan_ctx_rx_hsplit_1 { 317 ICE_RLAN_RX_HSPLIT_1_NO_SPLIT = 0, 318 ICE_RLAN_RX_HSPLIT_1_SPLIT_L2 = 1, 319 ICE_RLAN_RX_HSPLIT_1_SPLIT_ALWAYS = 2, 320 }; 321 322 /* TX Descriptor */ 323 struct ice_tx_desc { 324 __le64 buf_addr; /* Address of descriptor's data buf */ 325 __le64 cmd_type_offset_bsz; 326 }; 327 328 enum ice_tx_desc_dtype_value { 329 ICE_TX_DESC_DTYPE_DATA = 0x0, 330 ICE_TX_DESC_DTYPE_CTX = 0x1, 331 /* DESC_DONE - HW has completed write-back of descriptor */ 332 ICE_TX_DESC_DTYPE_DESC_DONE = 0xF, 333 }; 334 335 #define ICE_TXD_QW1_CMD_S 4 336 #define ICE_TXD_QW1_CMD_M (0xFFFUL << ICE_TXD_QW1_CMD_S) 337 338 enum ice_tx_desc_cmd_bits { 339 ICE_TX_DESC_CMD_EOP = 0x0001, 340 ICE_TX_DESC_CMD_RS = 0x0002, 341 ICE_TX_DESC_CMD_IL2TAG1 = 0x0008, 342 ICE_TX_DESC_CMD_IIPT_IPV6 = 0x0020, /* 2 BITS */ 343 ICE_TX_DESC_CMD_IIPT_IPV4 = 0x0040, /* 2 BITS */ 344 ICE_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, /* 2 BITS */ 345 ICE_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100, /* 2 BITS */ 346 ICE_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300, /* 2 BITS */ 347 }; 348 349 #define ICE_TXD_QW1_OFFSET_S 16 350 #define ICE_TXD_QW1_OFFSET_M (0x3FFFFULL << ICE_TXD_QW1_OFFSET_S) 351 352 enum ice_tx_desc_len_fields { 353 /* Note: These are predefined bit offsets */ 354 ICE_TX_DESC_LEN_MACLEN_S = 0, /* 7 BITS */ 355 ICE_TX_DESC_LEN_IPLEN_S = 7, /* 7 BITS */ 356 ICE_TX_DESC_LEN_L4_LEN_S = 14 /* 4 BITS */ 357 }; 358 359 #define ICE_TXD_QW1_MACLEN_M (0x7FUL << ICE_TX_DESC_LEN_MACLEN_S) 360 #define ICE_TXD_QW1_IPLEN_M (0x7FUL << ICE_TX_DESC_LEN_IPLEN_S) 361 #define ICE_TXD_QW1_L4LEN_M (0xFUL << ICE_TX_DESC_LEN_L4_LEN_S) 362 363 /* Tx descriptor field limits in bytes */ 364 #define ICE_TXD_MACLEN_MAX ((ICE_TXD_QW1_MACLEN_M >> \ 365 ICE_TX_DESC_LEN_MACLEN_S) * ICE_BYTES_PER_WORD) 366 #define ICE_TXD_IPLEN_MAX ((ICE_TXD_QW1_IPLEN_M >> \ 367 ICE_TX_DESC_LEN_IPLEN_S) * ICE_BYTES_PER_DWORD) 368 #define ICE_TXD_L4LEN_MAX ((ICE_TXD_QW1_L4LEN_M >> \ 369 ICE_TX_DESC_LEN_L4_LEN_S) * ICE_BYTES_PER_DWORD) 370 371 #define ICE_TXD_QW1_TX_BUF_SZ_S 34 372 #define ICE_TXD_QW1_L2TAG1_S 48 373 374 /* Context descriptors */ 375 struct ice_tx_ctx_desc { 376 __le32 tunneling_params; 377 __le16 l2tag2; 378 __le16 rsvd; 379 __le64 qw1; 380 }; 381 382 #define ICE_TXD_CTX_QW1_CMD_S 4 383 #define ICE_TXD_CTX_QW1_CMD_M (0x7FUL << ICE_TXD_CTX_QW1_CMD_S) 384 385 #define ICE_TXD_CTX_QW1_TSO_LEN_S 30 386 #define ICE_TXD_CTX_QW1_TSO_LEN_M \ 387 (0x3FFFFULL << ICE_TXD_CTX_QW1_TSO_LEN_S) 388 389 #define ICE_TXD_CTX_QW1_MSS_S 50 390 391 enum ice_tx_ctx_desc_cmd_bits { 392 ICE_TX_CTX_DESC_TSO = 0x01, 393 ICE_TX_CTX_DESC_TSYN = 0x02, 394 ICE_TX_CTX_DESC_IL2TAG2 = 0x04, 395 ICE_TX_CTX_DESC_IL2TAG2_IL2H = 0x08, 396 ICE_TX_CTX_DESC_SWTCH_NOTAG = 0x00, 397 ICE_TX_CTX_DESC_SWTCH_UPLINK = 0x10, 398 ICE_TX_CTX_DESC_SWTCH_LOCAL = 0x20, 399 ICE_TX_CTX_DESC_SWTCH_VSI = 0x30, 400 ICE_TX_CTX_DESC_RESERVED = 0x40 401 }; 402 403 #define ICE_LAN_TXQ_MAX_QGRPS 127 404 #define ICE_LAN_TXQ_MAX_QDIS 1023 405 406 /* Tx queue context data 407 * 408 * The sizes of the variables may be larger than needed due to crossing byte 409 * boundaries. If we do not have the width of the variable set to the correct 410 * size then we could end up shifting bits off the top of the variable when the 411 * variable is at the top of a byte and crosses over into the next byte. 412 */ 413 struct ice_tlan_ctx { 414 #define ICE_TLAN_CTX_BASE_S 7 415 u64 base; /* base is defined in 128-byte units */ 416 u8 port_num; 417 u16 cgd_num; /* bigger than needed, see above for reason */ 418 u8 pf_num; 419 u16 vmvf_num; 420 u8 vmvf_type; 421 #define ICE_TLAN_CTX_VMVF_TYPE_VF 0 422 #define ICE_TLAN_CTX_VMVF_TYPE_VMQ 1 423 #define ICE_TLAN_CTX_VMVF_TYPE_PF 2 424 u16 src_vsi; 425 u8 tsyn_ena; 426 u8 alt_vlan; 427 u16 cpuid; /* bigger than needed, see above for reason */ 428 u8 wb_mode; 429 u8 tphrd_desc; 430 u8 tphrd; 431 u8 tphwr_desc; 432 u16 cmpq_id; 433 u16 qnum_in_func; 434 u8 itr_notification_mode; 435 u8 adjust_prof_id; 436 u32 qlen; /* bigger than needed, see above for reason */ 437 u8 quanta_prof_idx; 438 u8 tso_ena; 439 u16 tso_qnum; 440 u8 legacy_int; 441 u8 drop_ena; 442 u8 cache_prof_idx; 443 u8 pkt_shaper_prof_idx; 444 u8 int_q_state; /* width not needed - internal do not write */ 445 }; 446 447 /* macro to make the table lines short */ 448 #define ICE_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\ 449 { PTYPE, \ 450 1, \ 451 ICE_RX_PTYPE_OUTER_##OUTER_IP, \ 452 ICE_RX_PTYPE_OUTER_##OUTER_IP_VER, \ 453 ICE_RX_PTYPE_##OUTER_FRAG, \ 454 ICE_RX_PTYPE_TUNNEL_##T, \ 455 ICE_RX_PTYPE_TUNNEL_END_##TE, \ 456 ICE_RX_PTYPE_##TEF, \ 457 ICE_RX_PTYPE_INNER_PROT_##I, \ 458 ICE_RX_PTYPE_PAYLOAD_LAYER_##PL } 459 460 #define ICE_PTT_UNUSED_ENTRY(PTYPE) { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 } 461 462 /* shorter macros makes the table fit but are terse */ 463 #define ICE_RX_PTYPE_NOF ICE_RX_PTYPE_NOT_FRAG 464 465 /* Lookup table mapping the HW PTYPE to the bit field for decoding */ 466 static const struct ice_rx_ptype_decoded ice_ptype_lkup[] = { 467 /* L2 Packet types */ 468 ICE_PTT_UNUSED_ENTRY(0), 469 ICE_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 470 ICE_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), 471 }; 472 473 static inline struct ice_rx_ptype_decoded ice_decode_rx_desc_ptype(u16 ptype) 474 { 475 return ice_ptype_lkup[ptype]; 476 } 477 478 #define ICE_LINK_SPEED_UNKNOWN 0 479 #define ICE_LINK_SPEED_10MBPS 10 480 #define ICE_LINK_SPEED_100MBPS 100 481 #define ICE_LINK_SPEED_1000MBPS 1000 482 #define ICE_LINK_SPEED_2500MBPS 2500 483 #define ICE_LINK_SPEED_5000MBPS 5000 484 #define ICE_LINK_SPEED_10000MBPS 10000 485 #define ICE_LINK_SPEED_20000MBPS 20000 486 #define ICE_LINK_SPEED_25000MBPS 25000 487 #define ICE_LINK_SPEED_40000MBPS 40000 488 489 #endif /* _ICE_LAN_TX_RX_H_ */ 490