1 /* SPDX-License-Identifier: GPL-2.0+ */ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #ifndef __HNS3_ENET_H 5 #define __HNS3_ENET_H 6 7 #include <linux/dim.h> 8 #include <linux/if_vlan.h> 9 10 #include "hnae3.h" 11 12 enum hns3_nic_state { 13 HNS3_NIC_STATE_TESTING, 14 HNS3_NIC_STATE_RESETTING, 15 HNS3_NIC_STATE_INITED, 16 HNS3_NIC_STATE_DOWN, 17 HNS3_NIC_STATE_DISABLED, 18 HNS3_NIC_STATE_REMOVING, 19 HNS3_NIC_STATE_SERVICE_INITED, 20 HNS3_NIC_STATE_SERVICE_SCHED, 21 HNS3_NIC_STATE2_RESET_REQUESTED, 22 HNS3_NIC_STATE_HW_TX_CSUM_ENABLE, 23 HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, 24 HNS3_NIC_STATE_MAX 25 }; 26 27 #define HNS3_RING_RX_RING_BASEADDR_L_REG 0x00000 28 #define HNS3_RING_RX_RING_BASEADDR_H_REG 0x00004 29 #define HNS3_RING_RX_RING_BD_NUM_REG 0x00008 30 #define HNS3_RING_RX_RING_BD_LEN_REG 0x0000C 31 #define HNS3_RING_RX_RING_TAIL_REG 0x00018 32 #define HNS3_RING_RX_RING_HEAD_REG 0x0001C 33 #define HNS3_RING_RX_RING_FBDNUM_REG 0x00020 34 #define HNS3_RING_RX_RING_PKTNUM_RECORD_REG 0x0002C 35 36 #define HNS3_RING_TX_RING_BASEADDR_L_REG 0x00040 37 #define HNS3_RING_TX_RING_BASEADDR_H_REG 0x00044 38 #define HNS3_RING_TX_RING_BD_NUM_REG 0x00048 39 #define HNS3_RING_TX_RING_TC_REG 0x00050 40 #define HNS3_RING_TX_RING_TAIL_REG 0x00058 41 #define HNS3_RING_TX_RING_HEAD_REG 0x0005C 42 #define HNS3_RING_TX_RING_FBDNUM_REG 0x00060 43 #define HNS3_RING_TX_RING_OFFSET_REG 0x00064 44 #define HNS3_RING_TX_RING_EBDNUM_REG 0x00068 45 #define HNS3_RING_TX_RING_PKTNUM_RECORD_REG 0x0006C 46 #define HNS3_RING_TX_RING_EBD_OFFSET_REG 0x00070 47 #define HNS3_RING_TX_RING_BD_ERR_REG 0x00074 48 #define HNS3_RING_EN_REG 0x00090 49 #define HNS3_RING_RX_EN_REG 0x00098 50 #define HNS3_RING_TX_EN_REG 0x000D4 51 52 #define HNS3_RX_HEAD_SIZE 256 53 54 #define HNS3_TX_TIMEOUT (5 * HZ) 55 #define HNS3_RING_NAME_LEN 16 56 #define HNS3_BUFFER_SIZE_2048 2048 57 #define HNS3_RING_MAX_PENDING 32760 58 #define HNS3_RING_MIN_PENDING 72 59 #define HNS3_RING_BD_MULTIPLE 8 60 /* max frame size of mac */ 61 #define HNS3_MAX_MTU(max_frm_size) \ 62 ((max_frm_size) - (ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN)) 63 64 #define HNS3_BD_SIZE_512_TYPE 0 65 #define HNS3_BD_SIZE_1024_TYPE 1 66 #define HNS3_BD_SIZE_2048_TYPE 2 67 #define HNS3_BD_SIZE_4096_TYPE 3 68 69 #define HNS3_RX_FLAG_VLAN_PRESENT 0x1 70 #define HNS3_RX_FLAG_L3ID_IPV4 0x0 71 #define HNS3_RX_FLAG_L3ID_IPV6 0x1 72 #define HNS3_RX_FLAG_L4ID_UDP 0x0 73 #define HNS3_RX_FLAG_L4ID_TCP 0x1 74 75 #define HNS3_RXD_DMAC_S 0 76 #define HNS3_RXD_DMAC_M (0x3 << HNS3_RXD_DMAC_S) 77 #define HNS3_RXD_VLAN_S 2 78 #define HNS3_RXD_VLAN_M (0x3 << HNS3_RXD_VLAN_S) 79 #define HNS3_RXD_L3ID_S 4 80 #define HNS3_RXD_L3ID_M (0xf << HNS3_RXD_L3ID_S) 81 #define HNS3_RXD_L4ID_S 8 82 #define HNS3_RXD_L4ID_M (0xf << HNS3_RXD_L4ID_S) 83 #define HNS3_RXD_FRAG_B 12 84 #define HNS3_RXD_STRP_TAGP_S 13 85 #define HNS3_RXD_STRP_TAGP_M (0x3 << HNS3_RXD_STRP_TAGP_S) 86 87 #define HNS3_RXD_L2E_B 16 88 #define HNS3_RXD_L3E_B 17 89 #define HNS3_RXD_L4E_B 18 90 #define HNS3_RXD_TRUNCAT_B 19 91 #define HNS3_RXD_HOI_B 20 92 #define HNS3_RXD_DOI_B 21 93 #define HNS3_RXD_OL3E_B 22 94 #define HNS3_RXD_OL4E_B 23 95 #define HNS3_RXD_GRO_COUNT_S 24 96 #define HNS3_RXD_GRO_COUNT_M (0x3f << HNS3_RXD_GRO_COUNT_S) 97 #define HNS3_RXD_GRO_FIXID_B 30 98 #define HNS3_RXD_GRO_ECN_B 31 99 100 #define HNS3_RXD_ODMAC_S 0 101 #define HNS3_RXD_ODMAC_M (0x3 << HNS3_RXD_ODMAC_S) 102 #define HNS3_RXD_OVLAN_S 2 103 #define HNS3_RXD_OVLAN_M (0x3 << HNS3_RXD_OVLAN_S) 104 #define HNS3_RXD_OL3ID_S 4 105 #define HNS3_RXD_OL3ID_M (0xf << HNS3_RXD_OL3ID_S) 106 #define HNS3_RXD_OL4ID_S 8 107 #define HNS3_RXD_OL4ID_M (0xf << HNS3_RXD_OL4ID_S) 108 #define HNS3_RXD_FBHI_S 12 109 #define HNS3_RXD_FBHI_M (0x3 << HNS3_RXD_FBHI_S) 110 #define HNS3_RXD_FBLI_S 14 111 #define HNS3_RXD_FBLI_M (0x3 << HNS3_RXD_FBLI_S) 112 113 #define HNS3_RXD_PTYPE_S 4 114 #define HNS3_RXD_PTYPE_M GENMASK(11, 4) 115 116 #define HNS3_RXD_BDTYPE_S 0 117 #define HNS3_RXD_BDTYPE_M (0xf << HNS3_RXD_BDTYPE_S) 118 #define HNS3_RXD_VLD_B 4 119 #define HNS3_RXD_UDP0_B 5 120 #define HNS3_RXD_EXTEND_B 7 121 #define HNS3_RXD_FE_B 8 122 #define HNS3_RXD_LUM_B 9 123 #define HNS3_RXD_CRCP_B 10 124 #define HNS3_RXD_L3L4P_B 11 125 #define HNS3_RXD_TSIDX_S 12 126 #define HNS3_RXD_TSIDX_M (0x3 << HNS3_RXD_TSIDX_S) 127 #define HNS3_RXD_TS_VLD_B 14 128 #define HNS3_RXD_LKBK_B 15 129 #define HNS3_RXD_GRO_SIZE_S 16 130 #define HNS3_RXD_GRO_SIZE_M (0x3fff << HNS3_RXD_GRO_SIZE_S) 131 132 #define HNS3_TXD_L3T_S 0 133 #define HNS3_TXD_L3T_M (0x3 << HNS3_TXD_L3T_S) 134 #define HNS3_TXD_L4T_S 2 135 #define HNS3_TXD_L4T_M (0x3 << HNS3_TXD_L4T_S) 136 #define HNS3_TXD_L3CS_B 4 137 #define HNS3_TXD_L4CS_B 5 138 #define HNS3_TXD_VLAN_B 6 139 #define HNS3_TXD_TSO_B 7 140 141 #define HNS3_TXD_L2LEN_S 8 142 #define HNS3_TXD_L2LEN_M (0xff << HNS3_TXD_L2LEN_S) 143 #define HNS3_TXD_L3LEN_S 16 144 #define HNS3_TXD_L3LEN_M (0xff << HNS3_TXD_L3LEN_S) 145 #define HNS3_TXD_L4LEN_S 24 146 #define HNS3_TXD_L4LEN_M (0xff << HNS3_TXD_L4LEN_S) 147 148 #define HNS3_TXD_CSUM_START_S 8 149 #define HNS3_TXD_CSUM_START_M (0xffff << HNS3_TXD_CSUM_START_S) 150 151 #define HNS3_TXD_OL3T_S 0 152 #define HNS3_TXD_OL3T_M (0x3 << HNS3_TXD_OL3T_S) 153 #define HNS3_TXD_OVLAN_B 2 154 #define HNS3_TXD_MACSEC_B 3 155 #define HNS3_TXD_TUNTYPE_S 4 156 #define HNS3_TXD_TUNTYPE_M (0xf << HNS3_TXD_TUNTYPE_S) 157 158 #define HNS3_TXD_CSUM_OFFSET_S 8 159 #define HNS3_TXD_CSUM_OFFSET_M (0xffff << HNS3_TXD_CSUM_OFFSET_S) 160 161 #define HNS3_TXD_BDTYPE_S 0 162 #define HNS3_TXD_BDTYPE_M (0xf << HNS3_TXD_BDTYPE_S) 163 #define HNS3_TXD_FE_B 4 164 #define HNS3_TXD_SC_S 5 165 #define HNS3_TXD_SC_M (0x3 << HNS3_TXD_SC_S) 166 #define HNS3_TXD_EXTEND_B 7 167 #define HNS3_TXD_VLD_B 8 168 #define HNS3_TXD_RI_B 9 169 #define HNS3_TXD_RA_B 10 170 #define HNS3_TXD_TSYN_B 11 171 #define HNS3_TXD_DECTTL_S 12 172 #define HNS3_TXD_DECTTL_M (0xf << HNS3_TXD_DECTTL_S) 173 174 #define HNS3_TXD_OL4CS_B 22 175 176 #define HNS3_TXD_MSS_S 0 177 #define HNS3_TXD_MSS_M (0x3fff << HNS3_TXD_MSS_S) 178 #define HNS3_TXD_HW_CS_B 14 179 180 #define HNS3_VECTOR_TX_IRQ BIT_ULL(0) 181 #define HNS3_VECTOR_RX_IRQ BIT_ULL(1) 182 183 #define HNS3_VECTOR_NOT_INITED 0 184 #define HNS3_VECTOR_INITED 1 185 186 #define HNS3_MAX_BD_SIZE 65535 187 #define HNS3_MAX_TSO_BD_NUM 63U 188 #define HNS3_MAX_TSO_SIZE \ 189 (HNS3_MAX_BD_SIZE * HNS3_MAX_TSO_BD_NUM) 190 191 #define HNS3_MAX_NON_TSO_SIZE(max_non_tso_bd_num) \ 192 (HNS3_MAX_BD_SIZE * (max_non_tso_bd_num)) 193 194 #define HNS3_VECTOR_GL0_OFFSET 0x100 195 #define HNS3_VECTOR_GL1_OFFSET 0x200 196 #define HNS3_VECTOR_GL2_OFFSET 0x300 197 #define HNS3_VECTOR_RL_OFFSET 0x900 198 #define HNS3_VECTOR_RL_EN_B 6 199 #define HNS3_VECTOR_TX_QL_OFFSET 0xe00 200 #define HNS3_VECTOR_RX_QL_OFFSET 0xf00 201 202 #define HNS3_RING_EN_B 0 203 204 enum hns3_pkt_l2t_type { 205 HNS3_L2_TYPE_UNICAST, 206 HNS3_L2_TYPE_MULTICAST, 207 HNS3_L2_TYPE_BROADCAST, 208 HNS3_L2_TYPE_INVALID, 209 }; 210 211 enum hns3_pkt_l3t_type { 212 HNS3_L3T_NONE, 213 HNS3_L3T_IPV6, 214 HNS3_L3T_IPV4, 215 HNS3_L3T_RESERVED 216 }; 217 218 enum hns3_pkt_l4t_type { 219 HNS3_L4T_UNKNOWN, 220 HNS3_L4T_TCP, 221 HNS3_L4T_UDP, 222 HNS3_L4T_SCTP 223 }; 224 225 enum hns3_pkt_ol3t_type { 226 HNS3_OL3T_NONE, 227 HNS3_OL3T_IPV6, 228 HNS3_OL3T_IPV4_NO_CSUM, 229 HNS3_OL3T_IPV4_CSUM 230 }; 231 232 enum hns3_pkt_tun_type { 233 HNS3_TUN_NONE, 234 HNS3_TUN_MAC_IN_UDP, 235 HNS3_TUN_NVGRE, 236 HNS3_TUN_OTHER 237 }; 238 239 /* hardware spec ring buffer format */ 240 struct __packed hns3_desc { 241 union { 242 __le64 addr; 243 __le16 csum; 244 struct { 245 __le32 ts_nsec; 246 __le32 ts_sec; 247 }; 248 }; 249 union { 250 struct { 251 __le16 vlan_tag; 252 __le16 send_size; 253 union { 254 __le32 type_cs_vlan_tso_len; 255 struct { 256 __u8 type_cs_vlan_tso; 257 __u8 l2_len; 258 __u8 l3_len; 259 __u8 l4_len; 260 }; 261 }; 262 __le16 outer_vlan_tag; 263 __le16 tv; 264 265 union { 266 __le32 ol_type_vlan_len_msec; 267 struct { 268 __u8 ol_type_vlan_msec; 269 __u8 ol2_len; 270 __u8 ol3_len; 271 __u8 ol4_len; 272 }; 273 }; 274 275 __le32 paylen_ol4cs; 276 __le16 bdtp_fe_sc_vld_ra_ri; 277 __le16 mss_hw_csum; 278 } tx; 279 280 struct { 281 __le32 l234_info; 282 __le16 pkt_len; 283 __le16 size; 284 285 __le32 rss_hash; 286 __le16 fd_id; 287 __le16 vlan_tag; 288 289 union { 290 __le32 ol_info; 291 struct { 292 __le16 o_dm_vlan_id_fb; 293 __le16 ot_vlan_tag; 294 }; 295 }; 296 297 __le32 bd_base_info; 298 } rx; 299 }; 300 }; 301 302 enum hns3_desc_type { 303 DESC_TYPE_UNKNOWN = 0, 304 DESC_TYPE_SKB = 1 << 0, 305 DESC_TYPE_FRAGLIST_SKB = 1 << 1, 306 DESC_TYPE_PAGE = 1 << 2, 307 DESC_TYPE_BOUNCE_ALL = 1 << 3, 308 DESC_TYPE_BOUNCE_HEAD = 1 << 4, 309 DESC_TYPE_SGL_SKB = 1 << 5, 310 }; 311 312 struct hns3_desc_cb { 313 dma_addr_t dma; /* dma address of this desc */ 314 void *buf; /* cpu addr for a desc */ 315 316 /* priv data for the desc, e.g. skb when use with ip stack */ 317 void *priv; 318 319 union { 320 u32 page_offset; /* for rx */ 321 u32 send_bytes; /* for tx */ 322 }; 323 324 u32 length; /* length of the buffer */ 325 326 u16 reuse_flag; 327 328 /* desc type, used by the ring user to mark the type of the priv data */ 329 u16 type; 330 u16 pagecnt_bias; 331 }; 332 333 enum hns3_pkt_l3type { 334 HNS3_L3_TYPE_IPV4, 335 HNS3_L3_TYPE_IPV6, 336 HNS3_L3_TYPE_ARP, 337 HNS3_L3_TYPE_RARP, 338 HNS3_L3_TYPE_IPV4_OPT, 339 HNS3_L3_TYPE_IPV6_EXT, 340 HNS3_L3_TYPE_LLDP, 341 HNS3_L3_TYPE_BPDU, 342 HNS3_L3_TYPE_MAC_PAUSE, 343 HNS3_L3_TYPE_PFC_PAUSE,/* 0x9*/ 344 345 /* reserved for 0xA~0xB */ 346 347 HNS3_L3_TYPE_CNM = 0xc, 348 349 /* reserved for 0xD~0xE */ 350 351 HNS3_L3_TYPE_PARSE_FAIL = 0xf /* must be last */ 352 }; 353 354 enum hns3_pkt_l4type { 355 HNS3_L4_TYPE_UDP, 356 HNS3_L4_TYPE_TCP, 357 HNS3_L4_TYPE_GRE, 358 HNS3_L4_TYPE_SCTP, 359 HNS3_L4_TYPE_IGMP, 360 HNS3_L4_TYPE_ICMP, 361 362 /* reserved for 0x6~0xE */ 363 364 HNS3_L4_TYPE_PARSE_FAIL = 0xf /* must be last */ 365 }; 366 367 enum hns3_pkt_ol3type { 368 HNS3_OL3_TYPE_IPV4 = 0, 369 HNS3_OL3_TYPE_IPV6, 370 /* reserved for 0x2~0x3 */ 371 HNS3_OL3_TYPE_IPV4_OPT = 4, 372 HNS3_OL3_TYPE_IPV6_EXT, 373 374 /* reserved for 0x6~0xE */ 375 376 HNS3_OL3_TYPE_PARSE_FAIL = 0xf /* must be last */ 377 }; 378 379 enum hns3_pkt_ol4type { 380 HNS3_OL4_TYPE_NO_TUN, 381 HNS3_OL4_TYPE_MAC_IN_UDP, 382 HNS3_OL4_TYPE_NVGRE, 383 HNS3_OL4_TYPE_UNKNOWN 384 }; 385 386 struct hns3_rx_ptype { 387 u32 ptype:8; 388 u32 csum_level:2; 389 u32 ip_summed:2; 390 u32 l3_type:4; 391 u32 valid:1; 392 }; 393 394 struct ring_stats { 395 u64 sw_err_cnt; 396 u64 seg_pkt_cnt; 397 union { 398 struct { 399 u64 tx_pkts; 400 u64 tx_bytes; 401 u64 tx_more; 402 u64 restart_queue; 403 u64 tx_busy; 404 u64 tx_copy; 405 u64 tx_vlan_err; 406 u64 tx_l4_proto_err; 407 u64 tx_l2l3l4_err; 408 u64 tx_tso_err; 409 u64 over_max_recursion; 410 u64 hw_limitation; 411 u64 tx_bounce; 412 u64 tx_spare_full; 413 u64 copy_bits_err; 414 u64 tx_sgl; 415 u64 skb2sgl_err; 416 u64 map_sg_err; 417 }; 418 struct { 419 u64 rx_pkts; 420 u64 rx_bytes; 421 u64 rx_err_cnt; 422 u64 reuse_pg_cnt; 423 u64 err_pkt_len; 424 u64 err_bd_num; 425 u64 l2_err; 426 u64 l3l4_csum_err; 427 u64 csum_complete; 428 u64 rx_multicast; 429 u64 non_reuse_pg; 430 u64 frag_alloc_err; 431 u64 frag_alloc; 432 }; 433 __le16 csum; 434 }; 435 }; 436 437 struct hns3_tx_spare { 438 dma_addr_t dma; 439 void *buf; 440 u32 next_to_use; 441 u32 next_to_clean; 442 u32 last_to_clean; 443 u32 len; 444 }; 445 446 struct hns3_enet_ring { 447 struct hns3_desc *desc; /* dma map address space */ 448 struct hns3_desc_cb *desc_cb; 449 struct hns3_enet_ring *next; 450 struct hns3_enet_tqp_vector *tqp_vector; 451 struct hnae3_queue *tqp; 452 int queue_index; 453 struct device *dev; /* will be used for DMA mapping of descriptors */ 454 455 /* statistic */ 456 struct ring_stats stats; 457 struct u64_stats_sync syncp; 458 459 dma_addr_t desc_dma_addr; 460 u32 buf_size; /* size for hnae_desc->addr, preset by AE */ 461 u16 desc_num; /* total number of desc */ 462 int next_to_use; /* idx of next spare desc */ 463 464 /* idx of lastest sent desc, the ring is empty when equal to 465 * next_to_use 466 */ 467 int next_to_clean; 468 u32 flag; /* ring attribute */ 469 470 int pending_buf; 471 union { 472 /* for Tx ring */ 473 struct { 474 u32 fd_qb_tx_sample; 475 int last_to_use; /* last idx used by xmit */ 476 u32 tx_copybreak; 477 struct hns3_tx_spare *tx_spare; 478 }; 479 480 /* for Rx ring */ 481 struct { 482 u32 pull_len; /* memcpy len for current rx packet */ 483 u32 rx_copybreak; 484 u32 frag_num; 485 /* first buffer address for current packet */ 486 unsigned char *va; 487 struct sk_buff *skb; 488 struct sk_buff *tail_skb; 489 }; 490 }; 491 } ____cacheline_internodealigned_in_smp; 492 493 enum hns3_flow_level_range { 494 HNS3_FLOW_LOW = 0, 495 HNS3_FLOW_MID = 1, 496 HNS3_FLOW_HIGH = 2, 497 HNS3_FLOW_ULTRA = 3, 498 }; 499 500 #define HNS3_INT_GL_50K 0x0014 501 #define HNS3_INT_GL_20K 0x0032 502 #define HNS3_INT_GL_18K 0x0036 503 #define HNS3_INT_GL_8K 0x007C 504 505 #define HNS3_INT_GL_1US BIT(31) 506 507 #define HNS3_INT_RL_MAX 0x00EC 508 #define HNS3_INT_RL_ENABLE_MASK 0x40 509 510 #define HNS3_INT_QL_DEFAULT_CFG 0x20 511 512 struct hns3_enet_coalesce { 513 u16 int_gl; 514 u16 int_ql; 515 u16 int_ql_max; 516 u8 adapt_enable:1; 517 u8 ql_enable:1; 518 u8 unit_1us:1; 519 enum hns3_flow_level_range flow_level; 520 }; 521 522 struct hns3_enet_ring_group { 523 /* array of pointers to rings */ 524 struct hns3_enet_ring *ring; 525 u64 total_bytes; /* total bytes processed this group */ 526 u64 total_packets; /* total packets processed this group */ 527 u16 count; 528 struct hns3_enet_coalesce coal; 529 struct dim dim; 530 }; 531 532 struct hns3_enet_tqp_vector { 533 struct hnae3_handle *handle; 534 u8 __iomem *mask_addr; 535 int vector_irq; 536 int irq_init_flag; 537 538 u16 idx; /* index in the TQP vector array per handle. */ 539 540 struct napi_struct napi; 541 542 struct hns3_enet_ring_group rx_group; 543 struct hns3_enet_ring_group tx_group; 544 545 cpumask_t affinity_mask; 546 u16 num_tqps; /* total number of tqps in TQP vector */ 547 struct irq_affinity_notify affinity_notify; 548 549 char name[HNAE3_INT_NAME_LEN]; 550 551 u64 event_cnt; 552 } ____cacheline_internodealigned_in_smp; 553 554 struct hns3_nic_priv { 555 struct hnae3_handle *ae_handle; 556 struct net_device *netdev; 557 struct device *dev; 558 559 /** 560 * the cb for nic to manage the ring buffer, the first half of the 561 * array is for tx_ring and vice versa for the second half 562 */ 563 struct hns3_enet_ring *ring; 564 struct hns3_enet_tqp_vector *tqp_vector; 565 u16 vector_num; 566 u8 max_non_tso_bd_num; 567 568 u64 tx_timeout_count; 569 570 unsigned long state; 571 572 struct hns3_enet_coalesce tx_coal; 573 struct hns3_enet_coalesce rx_coal; 574 u32 tx_copybreak; 575 u32 rx_copybreak; 576 }; 577 578 union l3_hdr_info { 579 struct iphdr *v4; 580 struct ipv6hdr *v6; 581 unsigned char *hdr; 582 }; 583 584 union l4_hdr_info { 585 struct tcphdr *tcp; 586 struct udphdr *udp; 587 struct gre_base_hdr *gre; 588 unsigned char *hdr; 589 }; 590 591 struct hns3_hw_error_info { 592 enum hnae3_hw_error_type type; 593 const char *msg; 594 }; 595 596 static inline int ring_space(struct hns3_enet_ring *ring) 597 { 598 /* This smp_load_acquire() pairs with smp_store_release() in 599 * hns3_nic_reclaim_one_desc called by hns3_clean_tx_ring. 600 */ 601 int begin = smp_load_acquire(&ring->next_to_clean); 602 int end = READ_ONCE(ring->next_to_use); 603 604 return ((end >= begin) ? (ring->desc_num - end + begin) : 605 (begin - end)) - 1; 606 } 607 608 static inline u32 hns3_read_reg(void __iomem *base, u32 reg) 609 { 610 return readl(base + reg); 611 } 612 613 static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value) 614 { 615 u8 __iomem *reg_addr = READ_ONCE(base); 616 617 writel(value, reg_addr + reg); 618 } 619 620 #define hns3_read_dev(a, reg) \ 621 hns3_read_reg((a)->io_base, reg) 622 623 static inline bool hns3_nic_resetting(struct net_device *netdev) 624 { 625 struct hns3_nic_priv *priv = netdev_priv(netdev); 626 627 return test_bit(HNS3_NIC_STATE_RESETTING, &priv->state); 628 } 629 630 #define hns3_write_dev(a, reg, value) \ 631 hns3_write_reg((a)->io_base, reg, value) 632 633 #define ring_to_dev(ring) ((ring)->dev) 634 635 #define ring_to_netdev(ring) ((ring)->tqp_vector->napi.dev) 636 637 #define ring_to_dma_dir(ring) (HNAE3_IS_TX_RING(ring) ? \ 638 DMA_TO_DEVICE : DMA_FROM_DEVICE) 639 640 #define hns3_buf_size(_ring) ((_ring)->buf_size) 641 642 static inline unsigned int hns3_page_order(struct hns3_enet_ring *ring) 643 { 644 #if (PAGE_SIZE < 8192) 645 if (ring->buf_size > (PAGE_SIZE / 2)) 646 return 1; 647 #endif 648 return 0; 649 } 650 651 #define hns3_page_size(_ring) (PAGE_SIZE << hns3_page_order(_ring)) 652 653 /* iterator for handling rings in ring group */ 654 #define hns3_for_each_ring(pos, head) \ 655 for (pos = (head).ring; (pos); pos = (pos)->next) 656 657 #define hns3_get_handle(ndev) \ 658 (((struct hns3_nic_priv *)netdev_priv(ndev))->ae_handle) 659 660 #define hns3_gl_usec_to_reg(int_gl) ((int_gl) >> 1) 661 #define hns3_gl_round_down(int_gl) round_down(int_gl, 2) 662 663 #define hns3_rl_usec_to_reg(int_rl) ((int_rl) >> 2) 664 #define hns3_rl_round_down(int_rl) round_down(int_rl, 4) 665 666 void hns3_ethtool_set_ops(struct net_device *netdev); 667 int hns3_set_channels(struct net_device *netdev, 668 struct ethtool_channels *ch); 669 670 void hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget); 671 int hns3_init_all_ring(struct hns3_nic_priv *priv); 672 int hns3_nic_reset_all_ring(struct hnae3_handle *h); 673 void hns3_fini_ring(struct hns3_enet_ring *ring); 674 netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev); 675 bool hns3_is_phys_func(struct pci_dev *pdev); 676 int hns3_clean_rx_ring( 677 struct hns3_enet_ring *ring, int budget, 678 void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *)); 679 680 void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector, 681 u32 gl_value); 682 void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector, 683 u32 gl_value); 684 void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector, 685 u32 rl_value); 686 void hns3_set_vector_coalesce_rx_ql(struct hns3_enet_tqp_vector *tqp_vector, 687 u32 ql_value); 688 void hns3_set_vector_coalesce_tx_ql(struct hns3_enet_tqp_vector *tqp_vector, 689 u32 ql_value); 690 691 void hns3_request_update_promisc_mode(struct hnae3_handle *handle); 692 693 #ifdef CONFIG_HNS3_DCB 694 void hns3_dcbnl_setup(struct hnae3_handle *handle); 695 #else 696 static inline void hns3_dcbnl_setup(struct hnae3_handle *handle) {} 697 #endif 698 699 int hns3_dbg_init(struct hnae3_handle *handle); 700 void hns3_dbg_uninit(struct hnae3_handle *handle); 701 void hns3_dbg_register_debugfs(const char *debugfs_dir_name); 702 void hns3_dbg_unregister_debugfs(void); 703 void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size); 704 u16 hns3_get_max_available_channels(struct hnae3_handle *h); 705 #endif 706