1 /* 2 * aQuantia Corporation Network Driver 3 * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 */ 9 10 /* File hw_atl_b0.c: Definition of Atlantic hardware specific functions. */ 11 12 #include "../aq_hw.h" 13 #include "../aq_hw_utils.h" 14 #include "../aq_ring.h" 15 #include "../aq_nic.h" 16 #include "hw_atl_b0.h" 17 #include "hw_atl_utils.h" 18 #include "hw_atl_llh.h" 19 #include "hw_atl_b0_internal.h" 20 #include "hw_atl_llh_internal.h" 21 22 #define DEFAULT_B0_BOARD_BASIC_CAPABILITIES \ 23 .is_64_dma = true, \ 24 .msix_irqs = 4U, \ 25 .irq_mask = ~0U, \ 26 .vecs = HW_ATL_B0_RSS_MAX, \ 27 .tcs = HW_ATL_B0_TC_MAX, \ 28 .rxd_alignment = 1U, \ 29 .rxd_size = HW_ATL_B0_RXD_SIZE, \ 30 .rxds = 4U * 1024U, \ 31 .txd_alignment = 1U, \ 32 .txd_size = HW_ATL_B0_TXD_SIZE, \ 33 .txds = 8U * 1024U, \ 34 .txhwb_alignment = 4096U, \ 35 .tx_rings = HW_ATL_B0_TX_RINGS, \ 36 .rx_rings = HW_ATL_B0_RX_RINGS, \ 37 .hw_features = NETIF_F_HW_CSUM | \ 38 NETIF_F_RXCSUM | \ 39 NETIF_F_RXHASH | \ 40 NETIF_F_SG | \ 41 NETIF_F_TSO | \ 42 NETIF_F_LRO, \ 43 .hw_priv_flags = IFF_UNICAST_FLT, \ 44 .flow_control = true, \ 45 .mtu = HW_ATL_B0_MTU_JUMBO, \ 46 .mac_regs_count = 88, \ 47 .hw_alive_check_addr = 0x10U 48 49 const struct aq_hw_caps_s hw_atl_b0_caps_aqc100 = { 50 DEFAULT_B0_BOARD_BASIC_CAPABILITIES, 51 .media_type = AQ_HW_MEDIA_TYPE_FIBRE, 52 .link_speed_msk = HW_ATL_B0_RATE_10G | 53 HW_ATL_B0_RATE_5G | 54 HW_ATL_B0_RATE_2G5 | 55 HW_ATL_B0_RATE_1G | 56 HW_ATL_B0_RATE_100M, 57 }; 58 59 const struct aq_hw_caps_s hw_atl_b0_caps_aqc107 = { 60 DEFAULT_B0_BOARD_BASIC_CAPABILITIES, 61 .media_type = AQ_HW_MEDIA_TYPE_TP, 62 .link_speed_msk = HW_ATL_B0_RATE_10G | 63 HW_ATL_B0_RATE_5G | 64 HW_ATL_B0_RATE_2G5 | 65 HW_ATL_B0_RATE_1G | 66 HW_ATL_B0_RATE_100M, 67 }; 68 69 const struct aq_hw_caps_s hw_atl_b0_caps_aqc108 = { 70 DEFAULT_B0_BOARD_BASIC_CAPABILITIES, 71 .media_type = AQ_HW_MEDIA_TYPE_TP, 72 .link_speed_msk = HW_ATL_B0_RATE_5G | 73 HW_ATL_B0_RATE_2G5 | 74 HW_ATL_B0_RATE_1G | 75 HW_ATL_B0_RATE_100M, 76 }; 77 78 const struct aq_hw_caps_s hw_atl_b0_caps_aqc109 = { 79 DEFAULT_B0_BOARD_BASIC_CAPABILITIES, 80 .media_type = AQ_HW_MEDIA_TYPE_TP, 81 .link_speed_msk = HW_ATL_B0_RATE_2G5 | 82 HW_ATL_B0_RATE_1G | 83 HW_ATL_B0_RATE_100M, 84 }; 85 86 static int hw_atl_b0_hw_reset(struct aq_hw_s *self) 87 { 88 int err = 0; 89 90 err = hw_atl_utils_soft_reset(self); 91 if (err) 92 return err; 93 94 self->aq_fw_ops->set_state(self, MPI_RESET); 95 96 err = aq_hw_err_from_flags(self); 97 98 return err; 99 } 100 101 static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self) 102 { 103 u32 tc = 0U; 104 u32 buff_size = 0U; 105 unsigned int i_priority = 0U; 106 bool is_rx_flow_control = false; 107 108 /* TPS Descriptor rate init */ 109 hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U); 110 hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA); 111 112 /* TPS VM init */ 113 hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U); 114 115 /* TPS TC credits init */ 116 hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U); 117 hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, 0U); 118 119 hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, 0U); 120 hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, 0U); 121 hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, 0U); 122 hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, 0U); 123 124 /* Tx buf size */ 125 buff_size = HW_ATL_B0_TXBUF_MAX; 126 127 hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc); 128 hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(self, 129 (buff_size * 130 (1024 / 32U) * 66U) / 131 100U, tc); 132 hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(self, 133 (buff_size * 134 (1024 / 32U) * 50U) / 135 100U, tc); 136 137 /* QoS Rx buf size per TC */ 138 tc = 0; 139 is_rx_flow_control = (AQ_NIC_FC_RX & self->aq_nic_cfg->flow_control); 140 buff_size = HW_ATL_B0_RXBUF_MAX; 141 142 hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc); 143 hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(self, 144 (buff_size * 145 (1024U / 32U) * 66U) / 146 100U, tc); 147 hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(self, 148 (buff_size * 149 (1024U / 32U) * 50U) / 150 100U, tc); 151 hw_atl_rpb_rx_xoff_en_per_tc_set(self, is_rx_flow_control ? 1U : 0U, tc); 152 153 /* QoS 802.1p priority -> TC mapping */ 154 for (i_priority = 8U; i_priority--;) 155 hw_atl_rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U); 156 157 return aq_hw_err_from_flags(self); 158 } 159 160 static int hw_atl_b0_hw_rss_hash_set(struct aq_hw_s *self, 161 struct aq_rss_parameters *rss_params) 162 { 163 struct aq_nic_cfg_s *cfg = self->aq_nic_cfg; 164 int err = 0; 165 unsigned int i = 0U; 166 unsigned int addr = 0U; 167 168 for (i = 10, addr = 0U; i--; ++addr) { 169 u32 key_data = cfg->is_rss ? 170 __swab32(rss_params->hash_secret_key[i]) : 0U; 171 hw_atl_rpf_rss_key_wr_data_set(self, key_data); 172 hw_atl_rpf_rss_key_addr_set(self, addr); 173 hw_atl_rpf_rss_key_wr_en_set(self, 1U); 174 AQ_HW_WAIT_FOR(hw_atl_rpf_rss_key_wr_en_get(self) == 0, 175 1000U, 10U); 176 if (err < 0) 177 goto err_exit; 178 } 179 180 err = aq_hw_err_from_flags(self); 181 182 err_exit: 183 return err; 184 } 185 186 static int hw_atl_b0_hw_rss_set(struct aq_hw_s *self, 187 struct aq_rss_parameters *rss_params) 188 { 189 u8 *indirection_table = rss_params->indirection_table; 190 u32 i = 0U; 191 u32 num_rss_queues = max(1U, self->aq_nic_cfg->num_rss_queues); 192 int err = 0; 193 u16 bitary[(HW_ATL_B0_RSS_REDIRECTION_MAX * 194 HW_ATL_B0_RSS_REDIRECTION_BITS / 16U)]; 195 196 memset(bitary, 0, sizeof(bitary)); 197 198 for (i = HW_ATL_B0_RSS_REDIRECTION_MAX; i--;) { 199 (*(u32 *)(bitary + ((i * 3U) / 16U))) |= 200 ((indirection_table[i] % num_rss_queues) << 201 ((i * 3U) & 0xFU)); 202 } 203 204 for (i = ARRAY_SIZE(bitary); i--;) { 205 hw_atl_rpf_rss_redir_tbl_wr_data_set(self, bitary[i]); 206 hw_atl_rpf_rss_redir_tbl_addr_set(self, i); 207 hw_atl_rpf_rss_redir_wr_en_set(self, 1U); 208 AQ_HW_WAIT_FOR(hw_atl_rpf_rss_redir_wr_en_get(self) == 0, 209 1000U, 10U); 210 if (err < 0) 211 goto err_exit; 212 } 213 214 err = aq_hw_err_from_flags(self); 215 216 err_exit: 217 return err; 218 } 219 220 static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self, 221 struct aq_nic_cfg_s *aq_nic_cfg) 222 { 223 unsigned int i; 224 225 /* TX checksums offloads*/ 226 hw_atl_tpo_ipv4header_crc_offload_en_set(self, 1); 227 hw_atl_tpo_tcp_udp_crc_offload_en_set(self, 1); 228 229 /* RX checksums offloads*/ 230 hw_atl_rpo_ipv4header_crc_offload_en_set(self, 1); 231 hw_atl_rpo_tcp_udp_crc_offload_en_set(self, 1); 232 233 /* LSO offloads*/ 234 hw_atl_tdm_large_send_offload_en_set(self, 0xFFFFFFFFU); 235 236 /* LRO offloads */ 237 { 238 unsigned int val = (8U < HW_ATL_B0_LRO_RXD_MAX) ? 0x3U : 239 ((4U < HW_ATL_B0_LRO_RXD_MAX) ? 0x2U : 240 ((2U < HW_ATL_B0_LRO_RXD_MAX) ? 0x1U : 0x0)); 241 242 for (i = 0; i < HW_ATL_B0_RINGS_MAX; i++) 243 hw_atl_rpo_lro_max_num_of_descriptors_set(self, val, i); 244 245 hw_atl_rpo_lro_time_base_divider_set(self, 0x61AU); 246 hw_atl_rpo_lro_inactive_interval_set(self, 0); 247 hw_atl_rpo_lro_max_coalescing_interval_set(self, 2); 248 249 hw_atl_rpo_lro_qsessions_lim_set(self, 1U); 250 251 hw_atl_rpo_lro_total_desc_lim_set(self, 2U); 252 253 hw_atl_rpo_lro_patch_optimization_en_set(self, 0U); 254 255 hw_atl_rpo_lro_min_pay_of_first_pkt_set(self, 10U); 256 257 hw_atl_rpo_lro_pkt_lim_set(self, 1U); 258 259 hw_atl_rpo_lro_en_set(self, 260 aq_nic_cfg->is_lro ? 0xFFFFFFFFU : 0U); 261 } 262 return aq_hw_err_from_flags(self); 263 } 264 265 static int hw_atl_b0_hw_init_tx_path(struct aq_hw_s *self) 266 { 267 hw_atl_thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U); 268 hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U); 269 hw_atl_thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU); 270 271 /* Tx interrupts */ 272 hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U); 273 274 /* misc */ 275 aq_hw_write_reg(self, 0x00007040U, IS_CHIP_FEATURE(TPO2) ? 276 0x00010000U : 0x00000000U); 277 hw_atl_tdm_tx_dca_en_set(self, 0U); 278 hw_atl_tdm_tx_dca_mode_set(self, 0U); 279 280 hw_atl_tpb_tx_path_scp_ins_en_set(self, 1U); 281 282 return aq_hw_err_from_flags(self); 283 } 284 285 static int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self) 286 { 287 struct aq_nic_cfg_s *cfg = self->aq_nic_cfg; 288 int i; 289 290 /* Rx TC/RSS number config */ 291 hw_atl_rpb_rpf_rx_traf_class_mode_set(self, 1U); 292 293 /* Rx flow control */ 294 hw_atl_rpb_rx_flow_ctl_mode_set(self, 1U); 295 296 /* RSS Ring selection */ 297 hw_atl_reg_rx_flr_rss_control1set(self, cfg->is_rss ? 298 0xB3333333U : 0x00000000U); 299 300 /* Multicast filters */ 301 for (i = HW_ATL_B0_MAC_MAX; i--;) { 302 hw_atl_rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i); 303 hw_atl_rpfl2unicast_flr_act_set(self, 1U, i); 304 } 305 306 hw_atl_reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U); 307 hw_atl_reg_rx_flr_mcst_flr_set(self, 0x00010FFFU, 0U); 308 309 /* Vlan filters */ 310 hw_atl_rpf_vlan_outer_etht_set(self, 0x88A8U); 311 hw_atl_rpf_vlan_inner_etht_set(self, 0x8100U); 312 313 if (cfg->vlan_id) { 314 hw_atl_rpf_vlan_flr_act_set(self, 1U, 0U); 315 hw_atl_rpf_vlan_id_flr_set(self, 0U, 0U); 316 hw_atl_rpf_vlan_flr_en_set(self, 0U, 0U); 317 318 hw_atl_rpf_vlan_accept_untagged_packets_set(self, 1U); 319 hw_atl_rpf_vlan_untagged_act_set(self, 1U); 320 321 hw_atl_rpf_vlan_flr_act_set(self, 1U, 1U); 322 hw_atl_rpf_vlan_id_flr_set(self, cfg->vlan_id, 0U); 323 hw_atl_rpf_vlan_flr_en_set(self, 1U, 1U); 324 } else { 325 hw_atl_rpf_vlan_prom_mode_en_set(self, 1); 326 } 327 328 /* Rx Interrupts */ 329 hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U); 330 331 /* misc */ 332 aq_hw_write_reg(self, 0x00005040U, 333 IS_CHIP_FEATURE(RPF2) ? 0x000F0000U : 0x00000000U); 334 335 hw_atl_rpfl2broadcast_flr_act_set(self, 1U); 336 hw_atl_rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U)); 337 338 hw_atl_rdm_rx_dca_en_set(self, 0U); 339 hw_atl_rdm_rx_dca_mode_set(self, 0U); 340 341 return aq_hw_err_from_flags(self); 342 } 343 344 static int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr) 345 { 346 int err = 0; 347 unsigned int h = 0U; 348 unsigned int l = 0U; 349 350 if (!mac_addr) { 351 err = -EINVAL; 352 goto err_exit; 353 } 354 h = (mac_addr[0] << 8) | (mac_addr[1]); 355 l = (mac_addr[2] << 24) | (mac_addr[3] << 16) | 356 (mac_addr[4] << 8) | mac_addr[5]; 357 358 hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC); 359 hw_atl_rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_B0_MAC); 360 hw_atl_rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_B0_MAC); 361 hw_atl_rpfl2_uc_flr_en_set(self, 1U, HW_ATL_B0_MAC); 362 363 err = aq_hw_err_from_flags(self); 364 365 err_exit: 366 return err; 367 } 368 369 static int hw_atl_b0_hw_init(struct aq_hw_s *self, u8 *mac_addr) 370 { 371 static u32 aq_hw_atl_igcr_table_[4][2] = { 372 { 0x20000000U, 0x20000000U }, /* AQ_IRQ_INVALID */ 373 { 0x20000080U, 0x20000080U }, /* AQ_IRQ_LEGACY */ 374 { 0x20000021U, 0x20000025U }, /* AQ_IRQ_MSI */ 375 { 0x20000022U, 0x20000026U } /* AQ_IRQ_MSIX */ 376 }; 377 378 int err = 0; 379 u32 val; 380 381 struct aq_nic_cfg_s *aq_nic_cfg = self->aq_nic_cfg; 382 383 hw_atl_b0_hw_init_tx_path(self); 384 hw_atl_b0_hw_init_rx_path(self); 385 386 hw_atl_b0_hw_mac_addr_set(self, mac_addr); 387 388 self->aq_fw_ops->set_link_speed(self, aq_nic_cfg->link_speed_msk); 389 self->aq_fw_ops->set_state(self, MPI_INIT); 390 391 hw_atl_b0_hw_qos_set(self); 392 hw_atl_b0_hw_rss_set(self, &aq_nic_cfg->aq_rss); 393 hw_atl_b0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss); 394 395 /* Force limit MRRS on RDM/TDM to 2K */ 396 val = aq_hw_read_reg(self, HW_ATL_PCI_REG_CONTROL6_ADR); 397 aq_hw_write_reg(self, HW_ATL_PCI_REG_CONTROL6_ADR, 398 (val & ~0x707) | 0x404); 399 400 /* TX DMA total request limit. B0 hardware is not capable to 401 * handle more than (8K-MRRS) incoming DMA data. 402 * Value 24 in 256byte units 403 */ 404 aq_hw_write_reg(self, HW_ATL_TX_DMA_TOTAL_REQ_LIMIT_ADR, 24); 405 406 /* Reset link status and read out initial hardware counters */ 407 self->aq_link_status.mbps = 0; 408 self->aq_fw_ops->update_stats(self); 409 410 err = aq_hw_err_from_flags(self); 411 if (err < 0) 412 goto err_exit; 413 414 /* Interrupts */ 415 hw_atl_reg_irq_glb_ctl_set(self, 416 aq_hw_atl_igcr_table_[aq_nic_cfg->irq_type] 417 [(aq_nic_cfg->vecs > 1U) ? 418 1 : 0]); 419 420 hw_atl_itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask); 421 422 /* Interrupts */ 423 hw_atl_reg_gen_irq_map_set(self, 424 ((HW_ATL_B0_ERR_INT << 0x18) | (1U << 0x1F)) | 425 ((HW_ATL_B0_ERR_INT << 0x10) | (1U << 0x17)), 0U); 426 427 hw_atl_b0_hw_offload_set(self, aq_nic_cfg); 428 429 err_exit: 430 return err; 431 } 432 433 static int hw_atl_b0_hw_ring_tx_start(struct aq_hw_s *self, 434 struct aq_ring_s *ring) 435 { 436 hw_atl_tdm_tx_desc_en_set(self, 1, ring->idx); 437 return aq_hw_err_from_flags(self); 438 } 439 440 static int hw_atl_b0_hw_ring_rx_start(struct aq_hw_s *self, 441 struct aq_ring_s *ring) 442 { 443 hw_atl_rdm_rx_desc_en_set(self, 1, ring->idx); 444 return aq_hw_err_from_flags(self); 445 } 446 447 static int hw_atl_b0_hw_start(struct aq_hw_s *self) 448 { 449 hw_atl_tpb_tx_buff_en_set(self, 1); 450 hw_atl_rpb_rx_buff_en_set(self, 1); 451 return aq_hw_err_from_flags(self); 452 } 453 454 static int hw_atl_b0_hw_tx_ring_tail_update(struct aq_hw_s *self, 455 struct aq_ring_s *ring) 456 { 457 hw_atl_reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx); 458 return 0; 459 } 460 461 static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self, 462 struct aq_ring_s *ring, 463 unsigned int frags) 464 { 465 struct aq_ring_buff_s *buff = NULL; 466 struct hw_atl_txd_s *txd = NULL; 467 unsigned int buff_pa_len = 0U; 468 unsigned int pkt_len = 0U; 469 unsigned int frag_count = 0U; 470 bool is_gso = false; 471 472 buff = &ring->buff_ring[ring->sw_tail]; 473 pkt_len = (buff->is_eop && buff->is_sop) ? buff->len : buff->len_pkt; 474 475 for (frag_count = 0; frag_count < frags; frag_count++) { 476 txd = (struct hw_atl_txd_s *)&ring->dx_ring[ring->sw_tail * 477 HW_ATL_B0_TXD_SIZE]; 478 txd->ctl = 0; 479 txd->ctl2 = 0; 480 txd->buf_addr = 0; 481 482 buff = &ring->buff_ring[ring->sw_tail]; 483 484 if (buff->is_txc) { 485 txd->ctl |= (buff->len_l3 << 31) | 486 (buff->len_l2 << 24) | 487 HW_ATL_B0_TXD_CTL_CMD_TCP | 488 HW_ATL_B0_TXD_CTL_DESC_TYPE_TXC; 489 txd->ctl2 |= (buff->mss << 16) | 490 (buff->len_l4 << 8) | 491 (buff->len_l3 >> 1); 492 493 pkt_len -= (buff->len_l4 + 494 buff->len_l3 + 495 buff->len_l2); 496 is_gso = true; 497 498 if (buff->is_ipv6) 499 txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_IPV6; 500 } else { 501 buff_pa_len = buff->len; 502 503 txd->buf_addr = buff->pa; 504 txd->ctl |= (HW_ATL_B0_TXD_CTL_BLEN & 505 ((u32)buff_pa_len << 4)); 506 txd->ctl |= HW_ATL_B0_TXD_CTL_DESC_TYPE_TXD; 507 /* PAY_LEN */ 508 txd->ctl2 |= HW_ATL_B0_TXD_CTL2_LEN & (pkt_len << 14); 509 510 if (is_gso) { 511 txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_LSO; 512 txd->ctl2 |= HW_ATL_B0_TXD_CTL2_CTX_EN; 513 } 514 515 /* Tx checksum offloads */ 516 if (buff->is_ip_cso) 517 txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_IPCSO; 518 519 if (buff->is_udp_cso || buff->is_tcp_cso) 520 txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_TUCSO; 521 522 if (unlikely(buff->is_eop)) { 523 txd->ctl |= HW_ATL_B0_TXD_CTL_EOP; 524 txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_WB; 525 is_gso = false; 526 } 527 } 528 529 ring->sw_tail = aq_ring_next_dx(ring, ring->sw_tail); 530 } 531 532 hw_atl_b0_hw_tx_ring_tail_update(self, ring); 533 return aq_hw_err_from_flags(self); 534 } 535 536 static int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self, 537 struct aq_ring_s *aq_ring, 538 struct aq_ring_param_s *aq_ring_param) 539 { 540 u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa; 541 u32 dma_desc_addr_msw = (u32)(((u64)aq_ring->dx_ring_pa) >> 32); 542 543 hw_atl_rdm_rx_desc_en_set(self, false, aq_ring->idx); 544 545 hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx); 546 547 hw_atl_reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw, 548 aq_ring->idx); 549 550 hw_atl_reg_rx_dma_desc_base_addressmswset(self, 551 dma_desc_addr_msw, aq_ring->idx); 552 553 hw_atl_rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx); 554 555 hw_atl_rdm_rx_desc_data_buff_size_set(self, 556 AQ_CFG_RX_FRAME_MAX / 1024U, 557 aq_ring->idx); 558 559 hw_atl_rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx); 560 hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx); 561 hw_atl_rpo_rx_desc_vlan_stripping_set(self, 0U, aq_ring->idx); 562 563 /* Rx ring set mode */ 564 565 /* Mapping interrupt vector */ 566 hw_atl_itr_irq_map_rx_set(self, aq_ring_param->vec_idx, aq_ring->idx); 567 hw_atl_itr_irq_map_en_rx_set(self, true, aq_ring->idx); 568 569 hw_atl_rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx); 570 hw_atl_rdm_rx_desc_dca_en_set(self, 0U, aq_ring->idx); 571 hw_atl_rdm_rx_head_dca_en_set(self, 0U, aq_ring->idx); 572 hw_atl_rdm_rx_pld_dca_en_set(self, 0U, aq_ring->idx); 573 574 return aq_hw_err_from_flags(self); 575 } 576 577 static int hw_atl_b0_hw_ring_tx_init(struct aq_hw_s *self, 578 struct aq_ring_s *aq_ring, 579 struct aq_ring_param_s *aq_ring_param) 580 { 581 u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa; 582 u32 dma_desc_msw_addr = (u32)(((u64)aq_ring->dx_ring_pa) >> 32); 583 584 hw_atl_reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr, 585 aq_ring->idx); 586 587 hw_atl_reg_tx_dma_desc_base_addressmswset(self, dma_desc_msw_addr, 588 aq_ring->idx); 589 590 hw_atl_tdm_tx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx); 591 592 hw_atl_b0_hw_tx_ring_tail_update(self, aq_ring); 593 594 /* Set Tx threshold */ 595 hw_atl_tdm_tx_desc_wr_wb_threshold_set(self, 0U, aq_ring->idx); 596 597 /* Mapping interrupt vector */ 598 hw_atl_itr_irq_map_tx_set(self, aq_ring_param->vec_idx, aq_ring->idx); 599 hw_atl_itr_irq_map_en_tx_set(self, true, aq_ring->idx); 600 601 hw_atl_tdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx); 602 hw_atl_tdm_tx_desc_dca_en_set(self, 0U, aq_ring->idx); 603 604 return aq_hw_err_from_flags(self); 605 } 606 607 static int hw_atl_b0_hw_ring_rx_fill(struct aq_hw_s *self, 608 struct aq_ring_s *ring, 609 unsigned int sw_tail_old) 610 { 611 for (; sw_tail_old != ring->sw_tail; 612 sw_tail_old = aq_ring_next_dx(ring, sw_tail_old)) { 613 struct hw_atl_rxd_s *rxd = 614 (struct hw_atl_rxd_s *)&ring->dx_ring[sw_tail_old * 615 HW_ATL_B0_RXD_SIZE]; 616 617 struct aq_ring_buff_s *buff = &ring->buff_ring[sw_tail_old]; 618 619 rxd->buf_addr = buff->pa; 620 rxd->hdr_addr = 0U; 621 } 622 623 hw_atl_reg_rx_dma_desc_tail_ptr_set(self, sw_tail_old, ring->idx); 624 625 return aq_hw_err_from_flags(self); 626 } 627 628 static int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self, 629 struct aq_ring_s *ring) 630 { 631 int err = 0; 632 unsigned int hw_head_ = hw_atl_tdm_tx_desc_head_ptr_get(self, ring->idx); 633 634 if (aq_utils_obj_test(&self->flags, AQ_HW_FLAG_ERR_UNPLUG)) { 635 err = -ENXIO; 636 goto err_exit; 637 } 638 ring->hw_head = hw_head_; 639 err = aq_hw_err_from_flags(self); 640 641 err_exit: 642 return err; 643 } 644 645 static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self, 646 struct aq_ring_s *ring) 647 { 648 struct device *ndev = aq_nic_get_dev(ring->aq_nic); 649 650 for (; ring->hw_head != ring->sw_tail; 651 ring->hw_head = aq_ring_next_dx(ring, ring->hw_head)) { 652 struct aq_ring_buff_s *buff = NULL; 653 struct hw_atl_rxd_wb_s *rxd_wb = (struct hw_atl_rxd_wb_s *) 654 &ring->dx_ring[ring->hw_head * HW_ATL_B0_RXD_SIZE]; 655 656 unsigned int is_err = 1U; 657 unsigned int is_rx_check_sum_enabled = 0U; 658 unsigned int pkt_type = 0U; 659 660 if (!(rxd_wb->status & 0x1U)) { /* RxD is not done */ 661 break; 662 } 663 664 buff = &ring->buff_ring[ring->hw_head]; 665 666 is_err = (0x0000003CU & rxd_wb->status); 667 668 is_rx_check_sum_enabled = (rxd_wb->type) & (0x3U << 19); 669 is_err &= ~0x20U; /* exclude validity bit */ 670 671 pkt_type = 0xFFU & (rxd_wb->type >> 4); 672 673 if (is_rx_check_sum_enabled) { 674 if (0x0U == (pkt_type & 0x3U)) 675 buff->is_ip_cso = (is_err & 0x08U) ? 0U : 1U; 676 677 if (0x4U == (pkt_type & 0x1CU)) 678 buff->is_udp_cso = buff->is_cso_err ? 0U : 1U; 679 else if (0x0U == (pkt_type & 0x1CU)) 680 buff->is_tcp_cso = buff->is_cso_err ? 0U : 1U; 681 682 /* Checksum offload workaround for small packets */ 683 if (rxd_wb->pkt_len <= 60) { 684 buff->is_ip_cso = 0U; 685 buff->is_cso_err = 0U; 686 } 687 } 688 689 is_err &= ~0x18U; 690 691 dma_unmap_page(ndev, buff->pa, buff->len, DMA_FROM_DEVICE); 692 693 if (is_err || rxd_wb->type & 0x1000U) { 694 /* status error or DMA error */ 695 buff->is_error = 1U; 696 } else { 697 if (self->aq_nic_cfg->is_rss) { 698 /* last 4 byte */ 699 u16 rss_type = rxd_wb->type & 0xFU; 700 701 if (rss_type && rss_type < 0x8U) { 702 buff->is_hash_l4 = (rss_type == 0x4 || 703 rss_type == 0x5); 704 buff->rss_hash = rxd_wb->rss_hash; 705 } 706 } 707 708 if (HW_ATL_B0_RXD_WB_STAT2_EOP & rxd_wb->status) { 709 buff->len = rxd_wb->pkt_len % 710 AQ_CFG_RX_FRAME_MAX; 711 buff->len = buff->len ? 712 buff->len : AQ_CFG_RX_FRAME_MAX; 713 buff->next = 0U; 714 buff->is_eop = 1U; 715 } else { 716 if (HW_ATL_B0_RXD_WB_STAT2_RSCCNT & 717 rxd_wb->status) { 718 /* LRO */ 719 buff->next = rxd_wb->next_desc_ptr; 720 ++ring->stats.rx.lro_packets; 721 } else { 722 /* jumbo */ 723 buff->next = 724 aq_ring_next_dx(ring, 725 ring->hw_head); 726 ++ring->stats.rx.jumbo_packets; 727 } 728 } 729 } 730 } 731 732 return aq_hw_err_from_flags(self); 733 } 734 735 static int hw_atl_b0_hw_irq_enable(struct aq_hw_s *self, u64 mask) 736 { 737 hw_atl_itr_irq_msk_setlsw_set(self, LODWORD(mask)); 738 return aq_hw_err_from_flags(self); 739 } 740 741 static int hw_atl_b0_hw_irq_disable(struct aq_hw_s *self, u64 mask) 742 { 743 hw_atl_itr_irq_msk_clearlsw_set(self, LODWORD(mask)); 744 hw_atl_itr_irq_status_clearlsw_set(self, LODWORD(mask)); 745 746 atomic_inc(&self->dpc); 747 return aq_hw_err_from_flags(self); 748 } 749 750 static int hw_atl_b0_hw_irq_read(struct aq_hw_s *self, u64 *mask) 751 { 752 *mask = hw_atl_itr_irq_statuslsw_get(self); 753 return aq_hw_err_from_flags(self); 754 } 755 756 #define IS_FILTER_ENABLED(_F_) ((packet_filter & (_F_)) ? 1U : 0U) 757 758 static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self, 759 unsigned int packet_filter) 760 { 761 unsigned int i = 0U; 762 763 hw_atl_rpfl2promiscuous_mode_en_set(self, IS_FILTER_ENABLED(IFF_PROMISC)); 764 hw_atl_rpfl2multicast_flr_en_set(self, 765 IS_FILTER_ENABLED(IFF_MULTICAST), 0); 766 767 hw_atl_rpfl2_accept_all_mc_packets_set(self, 768 IS_FILTER_ENABLED(IFF_ALLMULTI)); 769 770 hw_atl_rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST)); 771 772 self->aq_nic_cfg->is_mc_list_enabled = IS_FILTER_ENABLED(IFF_MULTICAST); 773 774 for (i = HW_ATL_B0_MAC_MIN; i < HW_ATL_B0_MAC_MAX; ++i) 775 hw_atl_rpfl2_uc_flr_en_set(self, 776 (self->aq_nic_cfg->is_mc_list_enabled && 777 (i <= self->aq_nic_cfg->mc_list_count)) ? 778 1U : 0U, i); 779 780 return aq_hw_err_from_flags(self); 781 } 782 783 #undef IS_FILTER_ENABLED 784 785 static int hw_atl_b0_hw_multicast_list_set(struct aq_hw_s *self, 786 u8 ar_mac 787 [AQ_CFG_MULTICAST_ADDRESS_MAX] 788 [ETH_ALEN], 789 u32 count) 790 { 791 int err = 0; 792 793 if (count > (HW_ATL_B0_MAC_MAX - HW_ATL_B0_MAC_MIN)) { 794 err = -EBADRQC; 795 goto err_exit; 796 } 797 for (self->aq_nic_cfg->mc_list_count = 0U; 798 self->aq_nic_cfg->mc_list_count < count; 799 ++self->aq_nic_cfg->mc_list_count) { 800 u32 i = self->aq_nic_cfg->mc_list_count; 801 u32 h = (ar_mac[i][0] << 8) | (ar_mac[i][1]); 802 u32 l = (ar_mac[i][2] << 24) | (ar_mac[i][3] << 16) | 803 (ar_mac[i][4] << 8) | ar_mac[i][5]; 804 805 hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC_MIN + i); 806 807 hw_atl_rpfl2unicast_dest_addresslsw_set(self, 808 l, HW_ATL_B0_MAC_MIN + i); 809 810 hw_atl_rpfl2unicast_dest_addressmsw_set(self, 811 h, HW_ATL_B0_MAC_MIN + i); 812 813 hw_atl_rpfl2_uc_flr_en_set(self, 814 (self->aq_nic_cfg->is_mc_list_enabled), 815 HW_ATL_B0_MAC_MIN + i); 816 } 817 818 err = aq_hw_err_from_flags(self); 819 820 err_exit: 821 return err; 822 } 823 824 static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self) 825 { 826 unsigned int i = 0U; 827 u32 itr_tx = 2U; 828 u32 itr_rx = 2U; 829 830 switch (self->aq_nic_cfg->itr) { 831 case AQ_CFG_INTERRUPT_MODERATION_ON: 832 case AQ_CFG_INTERRUPT_MODERATION_AUTO: 833 hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 0U); 834 hw_atl_tdm_tdm_intr_moder_en_set(self, 1U); 835 hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 0U); 836 hw_atl_rdm_rdm_intr_moder_en_set(self, 1U); 837 838 if (self->aq_nic_cfg->itr == AQ_CFG_INTERRUPT_MODERATION_ON) { 839 /* HW timers are in 2us units */ 840 int tx_max_timer = self->aq_nic_cfg->tx_itr / 2; 841 int tx_min_timer = tx_max_timer / 2; 842 843 int rx_max_timer = self->aq_nic_cfg->rx_itr / 2; 844 int rx_min_timer = rx_max_timer / 2; 845 846 tx_max_timer = min(HW_ATL_INTR_MODER_MAX, tx_max_timer); 847 tx_min_timer = min(HW_ATL_INTR_MODER_MIN, tx_min_timer); 848 rx_max_timer = min(HW_ATL_INTR_MODER_MAX, rx_max_timer); 849 rx_min_timer = min(HW_ATL_INTR_MODER_MIN, rx_min_timer); 850 851 itr_tx |= tx_min_timer << 0x8U; 852 itr_tx |= tx_max_timer << 0x10U; 853 itr_rx |= rx_min_timer << 0x8U; 854 itr_rx |= rx_max_timer << 0x10U; 855 } else { 856 static unsigned int hw_atl_b0_timers_table_tx_[][2] = { 857 {0xfU, 0xffU}, /* 10Gbit */ 858 {0xfU, 0x1ffU}, /* 5Gbit */ 859 {0xfU, 0x1ffU}, /* 5Gbit 5GS */ 860 {0xfU, 0x1ffU}, /* 2.5Gbit */ 861 {0xfU, 0x1ffU}, /* 1Gbit */ 862 {0xfU, 0x1ffU}, /* 100Mbit */ 863 }; 864 865 static unsigned int hw_atl_b0_timers_table_rx_[][2] = { 866 {0x6U, 0x38U},/* 10Gbit */ 867 {0xCU, 0x70U},/* 5Gbit */ 868 {0xCU, 0x70U},/* 5Gbit 5GS */ 869 {0x18U, 0xE0U},/* 2.5Gbit */ 870 {0x30U, 0x80U},/* 1Gbit */ 871 {0x4U, 0x50U},/* 100Mbit */ 872 }; 873 874 unsigned int speed_index = 875 hw_atl_utils_mbps_2_speed_index( 876 self->aq_link_status.mbps); 877 878 /* Update user visible ITR settings */ 879 self->aq_nic_cfg->tx_itr = hw_atl_b0_timers_table_tx_ 880 [speed_index][1] * 2; 881 self->aq_nic_cfg->rx_itr = hw_atl_b0_timers_table_rx_ 882 [speed_index][1] * 2; 883 884 itr_tx |= hw_atl_b0_timers_table_tx_ 885 [speed_index][0] << 0x8U; 886 itr_tx |= hw_atl_b0_timers_table_tx_ 887 [speed_index][1] << 0x10U; 888 889 itr_rx |= hw_atl_b0_timers_table_rx_ 890 [speed_index][0] << 0x8U; 891 itr_rx |= hw_atl_b0_timers_table_rx_ 892 [speed_index][1] << 0x10U; 893 } 894 break; 895 case AQ_CFG_INTERRUPT_MODERATION_OFF: 896 hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U); 897 hw_atl_tdm_tdm_intr_moder_en_set(self, 0U); 898 hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U); 899 hw_atl_rdm_rdm_intr_moder_en_set(self, 0U); 900 itr_tx = 0U; 901 itr_rx = 0U; 902 break; 903 } 904 905 for (i = HW_ATL_B0_RINGS_MAX; i--;) { 906 hw_atl_reg_tx_intr_moder_ctrl_set(self, itr_tx, i); 907 hw_atl_reg_rx_intr_moder_ctrl_set(self, itr_rx, i); 908 } 909 910 return aq_hw_err_from_flags(self); 911 } 912 913 static int hw_atl_b0_hw_stop(struct aq_hw_s *self) 914 { 915 hw_atl_b0_hw_irq_disable(self, HW_ATL_B0_INT_MASK); 916 return aq_hw_err_from_flags(self); 917 } 918 919 static int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self, 920 struct aq_ring_s *ring) 921 { 922 hw_atl_tdm_tx_desc_en_set(self, 0U, ring->idx); 923 return aq_hw_err_from_flags(self); 924 } 925 926 static int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self, 927 struct aq_ring_s *ring) 928 { 929 hw_atl_rdm_rx_desc_en_set(self, 0U, ring->idx); 930 return aq_hw_err_from_flags(self); 931 } 932 933 const struct aq_hw_ops hw_atl_ops_b0 = { 934 .hw_set_mac_address = hw_atl_b0_hw_mac_addr_set, 935 .hw_init = hw_atl_b0_hw_init, 936 .hw_deinit = hw_atl_utils_hw_deinit, 937 .hw_set_power = hw_atl_utils_hw_set_power, 938 .hw_reset = hw_atl_b0_hw_reset, 939 .hw_start = hw_atl_b0_hw_start, 940 .hw_ring_tx_start = hw_atl_b0_hw_ring_tx_start, 941 .hw_ring_tx_stop = hw_atl_b0_hw_ring_tx_stop, 942 .hw_ring_rx_start = hw_atl_b0_hw_ring_rx_start, 943 .hw_ring_rx_stop = hw_atl_b0_hw_ring_rx_stop, 944 .hw_stop = hw_atl_b0_hw_stop, 945 946 .hw_ring_tx_xmit = hw_atl_b0_hw_ring_tx_xmit, 947 .hw_ring_tx_head_update = hw_atl_b0_hw_ring_tx_head_update, 948 949 .hw_ring_rx_receive = hw_atl_b0_hw_ring_rx_receive, 950 .hw_ring_rx_fill = hw_atl_b0_hw_ring_rx_fill, 951 952 .hw_irq_enable = hw_atl_b0_hw_irq_enable, 953 .hw_irq_disable = hw_atl_b0_hw_irq_disable, 954 .hw_irq_read = hw_atl_b0_hw_irq_read, 955 956 .hw_ring_rx_init = hw_atl_b0_hw_ring_rx_init, 957 .hw_ring_tx_init = hw_atl_b0_hw_ring_tx_init, 958 .hw_packet_filter_set = hw_atl_b0_hw_packet_filter_set, 959 .hw_multicast_list_set = hw_atl_b0_hw_multicast_list_set, 960 .hw_interrupt_moderation_set = hw_atl_b0_hw_interrupt_moderation_set, 961 .hw_rss_set = hw_atl_b0_hw_rss_set, 962 .hw_rss_hash_set = hw_atl_b0_hw_rss_hash_set, 963 .hw_get_regs = hw_atl_utils_hw_get_regs, 964 .hw_get_hw_stats = hw_atl_utils_get_hw_stats, 965 .hw_get_fw_version = hw_atl_utils_get_fw_version, 966 }; 967