1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Atlantic Network Driver 3 * Copyright (C) 2020 Marvell International Ltd. 4 */ 5 6 #include "aq_hw.h" 7 #include "aq_hw_utils.h" 8 #include "aq_ring.h" 9 #include "aq_nic.h" 10 #include "hw_atl/hw_atl_b0.h" 11 #include "hw_atl/hw_atl_utils.h" 12 #include "hw_atl/hw_atl_llh.h" 13 #include "hw_atl/hw_atl_llh_internal.h" 14 #include "hw_atl2_utils.h" 15 #include "hw_atl2_llh.h" 16 #include "hw_atl2_internal.h" 17 #include "hw_atl2_llh_internal.h" 18 19 static int hw_atl2_act_rslvr_table_set(struct aq_hw_s *self, u8 location, 20 u32 tag, u32 mask, u32 action); 21 22 #define DEFAULT_BOARD_BASIC_CAPABILITIES \ 23 .is_64_dma = true, \ 24 .msix_irqs = 8U, \ 25 .irq_mask = ~0U, \ 26 .vecs = HW_ATL2_RSS_MAX, \ 27 .tcs_max = HW_ATL2_TC_MAX, \ 28 .rxd_alignment = 1U, \ 29 .rxd_size = HW_ATL2_RXD_SIZE, \ 30 .rxds_max = HW_ATL2_MAX_RXD, \ 31 .rxds_min = HW_ATL2_MIN_RXD, \ 32 .txd_alignment = 1U, \ 33 .txd_size = HW_ATL2_TXD_SIZE, \ 34 .txds_max = HW_ATL2_MAX_TXD, \ 35 .txds_min = HW_ATL2_MIN_TXD, \ 36 .txhwb_alignment = 4096U, \ 37 .tx_rings = HW_ATL2_TX_RINGS, \ 38 .rx_rings = HW_ATL2_RX_RINGS, \ 39 .hw_features = NETIF_F_HW_CSUM | \ 40 NETIF_F_RXCSUM | \ 41 NETIF_F_RXHASH | \ 42 NETIF_F_SG | \ 43 NETIF_F_TSO | \ 44 NETIF_F_TSO6 | \ 45 NETIF_F_LRO | \ 46 NETIF_F_NTUPLE | \ 47 NETIF_F_HW_VLAN_CTAG_FILTER | \ 48 NETIF_F_HW_VLAN_CTAG_RX | \ 49 NETIF_F_HW_VLAN_CTAG_TX | \ 50 NETIF_F_GSO_UDP_L4 | \ 51 NETIF_F_GSO_PARTIAL | \ 52 NETIF_F_HW_TC, \ 53 .hw_priv_flags = IFF_UNICAST_FLT, \ 54 .flow_control = true, \ 55 .mtu = HW_ATL2_MTU_JUMBO, \ 56 .mac_regs_count = 72, \ 57 .hw_alive_check_addr = 0x10U, \ 58 .priv_data_len = sizeof(struct hw_atl2_priv) 59 60 const struct aq_hw_caps_s hw_atl2_caps_aqc113 = { 61 DEFAULT_BOARD_BASIC_CAPABILITIES, 62 .media_type = AQ_HW_MEDIA_TYPE_TP, 63 .link_speed_msk = AQ_NIC_RATE_10G | 64 AQ_NIC_RATE_5G | 65 AQ_NIC_RATE_2G5 | 66 AQ_NIC_RATE_1G | 67 AQ_NIC_RATE_1G_HALF | 68 AQ_NIC_RATE_100M | 69 AQ_NIC_RATE_100M_HALF | 70 AQ_NIC_RATE_10M | 71 AQ_NIC_RATE_10M_HALF, 72 }; 73 74 static u32 hw_atl2_sem_act_rslvr_get(struct aq_hw_s *self) 75 { 76 return hw_atl_reg_glb_cpu_sem_get(self, HW_ATL2_FW_SM_ACT_RSLVR); 77 } 78 79 static int hw_atl2_hw_reset(struct aq_hw_s *self) 80 { 81 struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv; 82 int err; 83 84 err = hw_atl2_utils_soft_reset(self); 85 if (err) 86 return err; 87 88 memset(priv, 0, sizeof(*priv)); 89 90 self->aq_fw_ops->set_state(self, MPI_RESET); 91 92 err = aq_hw_err_from_flags(self); 93 94 return err; 95 } 96 97 static int hw_atl2_hw_queue_to_tc_map_set(struct aq_hw_s *self) 98 { 99 struct aq_nic_cfg_s *cfg = self->aq_nic_cfg; 100 unsigned int tcs, q_per_tc; 101 unsigned int tc, q; 102 u32 rx_map = 0; 103 u32 tx_map = 0; 104 105 hw_atl2_tpb_tx_tc_q_rand_map_en_set(self, 1U); 106 107 switch (cfg->tc_mode) { 108 case AQ_TC_MODE_8TCS: 109 tcs = 8; 110 q_per_tc = 4; 111 break; 112 case AQ_TC_MODE_4TCS: 113 tcs = 4; 114 q_per_tc = 8; 115 break; 116 default: 117 return -EINVAL; 118 } 119 120 for (tc = 0; tc != tcs; tc++) { 121 unsigned int tc_q_offset = tc * q_per_tc; 122 123 for (q = tc_q_offset; q != tc_q_offset + q_per_tc; q++) { 124 rx_map |= tc << HW_ATL2_RX_Q_TC_MAP_SHIFT(q); 125 if (HW_ATL2_RX_Q_TC_MAP_ADR(q) != 126 HW_ATL2_RX_Q_TC_MAP_ADR(q + 1)) { 127 aq_hw_write_reg(self, 128 HW_ATL2_RX_Q_TC_MAP_ADR(q), 129 rx_map); 130 rx_map = 0; 131 } 132 133 tx_map |= tc << HW_ATL2_TX_Q_TC_MAP_SHIFT(q); 134 if (HW_ATL2_TX_Q_TC_MAP_ADR(q) != 135 HW_ATL2_TX_Q_TC_MAP_ADR(q + 1)) { 136 aq_hw_write_reg(self, 137 HW_ATL2_TX_Q_TC_MAP_ADR(q), 138 tx_map); 139 tx_map = 0; 140 } 141 } 142 } 143 144 return aq_hw_err_from_flags(self); 145 } 146 147 static int hw_atl2_hw_qos_set(struct aq_hw_s *self) 148 { 149 struct aq_nic_cfg_s *cfg = self->aq_nic_cfg; 150 u32 tx_buff_size = HW_ATL2_TXBUF_MAX; 151 u32 rx_buff_size = HW_ATL2_RXBUF_MAX; 152 unsigned int prio = 0U; 153 u32 tc = 0U; 154 155 /* TPS Descriptor rate init */ 156 hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U); 157 hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA); 158 159 /* TPS VM init */ 160 hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U); 161 162 tx_buff_size /= cfg->tcs; 163 rx_buff_size /= cfg->tcs; 164 for (tc = 0; tc < cfg->tcs; tc++) { 165 u32 threshold = 0U; 166 167 /* Tx buf size TC0 */ 168 hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, tx_buff_size, tc); 169 170 threshold = (tx_buff_size * (1024 / 32U) * 66U) / 100U; 171 hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(self, threshold, tc); 172 173 threshold = (tx_buff_size * (1024 / 32U) * 50U) / 100U; 174 hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(self, threshold, tc); 175 176 /* QoS Rx buf size per TC */ 177 hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, rx_buff_size, tc); 178 179 threshold = (rx_buff_size * (1024U / 32U) * 66U) / 100U; 180 hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(self, threshold, tc); 181 182 threshold = (rx_buff_size * (1024U / 32U) * 50U) / 100U; 183 hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(self, threshold, tc); 184 185 hw_atl_b0_set_fc(self, self->aq_nic_cfg->fc.req, tc); 186 } 187 188 /* QoS 802.1p priority -> TC mapping */ 189 for (prio = 0; prio < 8; ++prio) 190 hw_atl_rpf_rpb_user_priority_tc_map_set(self, prio, 191 cfg->prio_tc_map[prio]); 192 193 /* ATL2 Apply ring to TC mapping */ 194 hw_atl2_hw_queue_to_tc_map_set(self); 195 196 return aq_hw_err_from_flags(self); 197 } 198 199 static int hw_atl2_hw_rss_set(struct aq_hw_s *self, 200 struct aq_rss_parameters *rss_params) 201 { 202 u8 *indirection_table = rss_params->indirection_table; 203 const u32 num_tcs = aq_hw_num_tcs(self); 204 u32 rpf_redir2_enable; 205 int tc; 206 int i; 207 208 rpf_redir2_enable = num_tcs > 4 ? 1 : 0; 209 210 hw_atl2_rpf_redirection_table2_select_set(self, rpf_redir2_enable); 211 212 for (i = HW_ATL2_RSS_REDIRECTION_MAX; i--;) { 213 for (tc = 0; tc != num_tcs; tc++) { 214 hw_atl2_new_rpf_rss_redir_set(self, tc, i, 215 tc * 216 aq_hw_q_per_tc(self) + 217 indirection_table[i]); 218 } 219 } 220 221 return aq_hw_err_from_flags(self); 222 } 223 224 static int hw_atl2_hw_init_tx_tc_rate_limit(struct aq_hw_s *self) 225 { 226 static const u32 max_weight = BIT(HW_ATL2_TPS_DATA_TCTWEIGHT_WIDTH) - 1; 227 /* Scale factor is based on the number of bits in fractional portion */ 228 static const u32 scale = BIT(HW_ATL_TPS_DESC_RATE_Y_WIDTH); 229 static const u32 frac_msk = HW_ATL_TPS_DESC_RATE_Y_MSK >> 230 HW_ATL_TPS_DESC_RATE_Y_SHIFT; 231 const u32 link_speed = self->aq_link_status.mbps; 232 struct aq_nic_cfg_s *nic_cfg = self->aq_nic_cfg; 233 unsigned long num_min_rated_tcs = 0; 234 u32 tc_weight[AQ_CFG_TCS_MAX]; 235 u32 fixed_max_credit_4b; 236 u32 fixed_max_credit; 237 u8 min_rate_msk = 0; 238 u32 sum_weight = 0; 239 int tc; 240 241 /* By default max_credit is based upon MTU (in unit of 64b) */ 242 fixed_max_credit = nic_cfg->aq_hw_caps->mtu / 64; 243 /* in unit of 4b */ 244 fixed_max_credit_4b = nic_cfg->aq_hw_caps->mtu / 4; 245 246 if (link_speed) { 247 min_rate_msk = nic_cfg->tc_min_rate_msk & 248 (BIT(nic_cfg->tcs) - 1); 249 num_min_rated_tcs = hweight8(min_rate_msk); 250 } 251 252 /* First, calculate weights where min_rate is specified */ 253 if (num_min_rated_tcs) { 254 for (tc = 0; tc != nic_cfg->tcs; tc++) { 255 if (!nic_cfg->tc_min_rate[tc]) { 256 tc_weight[tc] = 0; 257 continue; 258 } 259 260 tc_weight[tc] = (-1L + link_speed + 261 nic_cfg->tc_min_rate[tc] * 262 max_weight) / 263 link_speed; 264 tc_weight[tc] = min(tc_weight[tc], max_weight); 265 sum_weight += tc_weight[tc]; 266 } 267 } 268 269 /* WSP, if min_rate is set for at least one TC. 270 * RR otherwise. 271 */ 272 hw_atl2_tps_tx_pkt_shed_data_arb_mode_set(self, min_rate_msk ? 1U : 0U); 273 /* Data TC Arbiter takes precedence over Descriptor TC Arbiter, 274 * leave Descriptor TC Arbiter as RR. 275 */ 276 hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U); 277 278 hw_atl_tps_tx_desc_rate_mode_set(self, nic_cfg->is_qos ? 1U : 0U); 279 280 for (tc = 0; tc != nic_cfg->tcs; tc++) { 281 const u32 en = (nic_cfg->tc_max_rate[tc] != 0) ? 1U : 0U; 282 const u32 desc = AQ_NIC_CFG_TCVEC2RING(nic_cfg, tc, 0); 283 u32 weight, max_credit; 284 285 hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, tc, 286 fixed_max_credit); 287 hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, tc, 0x1E); 288 289 if (num_min_rated_tcs) { 290 weight = tc_weight[tc]; 291 292 if (!weight && sum_weight < max_weight) 293 weight = (max_weight - sum_weight) / 294 (nic_cfg->tcs - num_min_rated_tcs); 295 else if (!weight) 296 weight = 0x640; 297 298 max_credit = max(2 * weight, fixed_max_credit_4b); 299 } else { 300 weight = 0x640; 301 max_credit = 0xFFF0; 302 } 303 304 hw_atl2_tps_tx_pkt_shed_tc_data_weight_set(self, tc, weight); 305 hw_atl2_tps_tx_pkt_shed_tc_data_max_credit_set(self, tc, 306 max_credit); 307 308 hw_atl_tps_tx_desc_rate_en_set(self, desc, en); 309 310 if (en) { 311 /* Nominal rate is always 10G */ 312 const u32 rate = 10000U * scale / 313 nic_cfg->tc_max_rate[tc]; 314 const u32 rate_int = rate >> 315 HW_ATL_TPS_DESC_RATE_Y_WIDTH; 316 const u32 rate_frac = rate & frac_msk; 317 318 hw_atl_tps_tx_desc_rate_x_set(self, desc, rate_int); 319 hw_atl_tps_tx_desc_rate_y_set(self, desc, rate_frac); 320 } else { 321 /* A value of 1 indicates the queue is not 322 * rate controlled. 323 */ 324 hw_atl_tps_tx_desc_rate_x_set(self, desc, 1U); 325 hw_atl_tps_tx_desc_rate_y_set(self, desc, 0U); 326 } 327 } 328 for (tc = nic_cfg->tcs; tc != AQ_CFG_TCS_MAX; tc++) { 329 const u32 desc = AQ_NIC_CFG_TCVEC2RING(nic_cfg, tc, 0); 330 331 hw_atl_tps_tx_desc_rate_en_set(self, desc, 0U); 332 hw_atl_tps_tx_desc_rate_x_set(self, desc, 1U); 333 hw_atl_tps_tx_desc_rate_y_set(self, desc, 0U); 334 } 335 336 return aq_hw_err_from_flags(self); 337 } 338 339 static int hw_atl2_hw_init_tx_path(struct aq_hw_s *self) 340 { 341 struct aq_nic_cfg_s *nic_cfg = self->aq_nic_cfg; 342 343 /* Tx TC/RSS number config */ 344 hw_atl_tpb_tps_tx_tc_mode_set(self, nic_cfg->tc_mode); 345 346 hw_atl_thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U); 347 hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U); 348 hw_atl_thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU); 349 350 /* Tx interrupts */ 351 hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U); 352 353 /* misc */ 354 hw_atl_tdm_tx_dca_en_set(self, 0U); 355 hw_atl_tdm_tx_dca_mode_set(self, 0U); 356 357 hw_atl_tpb_tx_path_scp_ins_en_set(self, 1U); 358 359 hw_atl2_tpb_tx_buf_clk_gate_en_set(self, 0U); 360 361 return aq_hw_err_from_flags(self); 362 } 363 364 static void hw_atl2_hw_init_new_rx_filters(struct aq_hw_s *self) 365 { 366 struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv; 367 u8 *prio_tc_map = self->aq_nic_cfg->prio_tc_map; 368 u16 action; 369 u8 index; 370 int i; 371 372 /* Action Resolver Table (ART) is used by RPF to decide which action 373 * to take with a packet based upon input tag and tag mask, where: 374 * - input tag is a combination of 3-bit VLan Prio (PTP) and 375 * 29-bit concatenation of all tags from filter block; 376 * - tag mask is a mask used for matching against input tag. 377 * The input_tag is compared with the all the Requested_tags in the 378 * Record table to find a match. Action field of the selected matched 379 * REC entry is used for further processing. If multiple entries match, 380 * the lowest REC entry, Action field will be selected. 381 */ 382 hw_atl2_rpf_act_rslvr_section_en_set(self, 0xFFFF); 383 hw_atl2_rpfl2_uc_flr_tag_set(self, HW_ATL2_RPF_TAG_BASE_UC, 384 HW_ATL2_MAC_UC); 385 hw_atl2_rpfl2_bc_flr_tag_set(self, HW_ATL2_RPF_TAG_BASE_UC); 386 387 /* FW reserves the beginning of ART, thus all driver entries must 388 * start from the offset specified in FW caps. 389 */ 390 index = priv->art_base_index + HW_ATL2_RPF_L2_PROMISC_OFF_INDEX; 391 hw_atl2_act_rslvr_table_set(self, index, 0, 392 HW_ATL2_RPF_TAG_UC_MASK | 393 HW_ATL2_RPF_TAG_ALLMC_MASK, 394 HW_ATL2_ACTION_DROP); 395 396 index = priv->art_base_index + HW_ATL2_RPF_VLAN_PROMISC_OFF_INDEX; 397 hw_atl2_act_rslvr_table_set(self, index, 0, 398 HW_ATL2_RPF_TAG_VLAN_MASK | 399 HW_ATL2_RPF_TAG_UNTAG_MASK, 400 HW_ATL2_ACTION_DROP); 401 402 /* Configure ART to map given VLan Prio (PCP) to the TC index for 403 * RSS redirection table. 404 */ 405 for (i = 0; i < 8; i++) { 406 action = HW_ATL2_ACTION_ASSIGN_TC(prio_tc_map[i]); 407 408 index = priv->art_base_index + HW_ATL2_RPF_PCP_TO_TC_INDEX + i; 409 hw_atl2_act_rslvr_table_set(self, index, 410 i << HW_ATL2_RPF_TAG_PCP_OFFSET, 411 HW_ATL2_RPF_TAG_PCP_MASK, action); 412 } 413 } 414 415 static void hw_atl2_hw_new_rx_filter_vlan_promisc(struct aq_hw_s *self, 416 bool promisc) 417 { 418 u16 off_action = (!promisc && 419 !hw_atl_rpfl2promiscuous_mode_en_get(self)) ? 420 HW_ATL2_ACTION_DROP : HW_ATL2_ACTION_DISABLE; 421 struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv; 422 u8 index; 423 424 index = priv->art_base_index + HW_ATL2_RPF_VLAN_PROMISC_OFF_INDEX; 425 hw_atl2_act_rslvr_table_set(self, index, 0, 426 HW_ATL2_RPF_TAG_VLAN_MASK | 427 HW_ATL2_RPF_TAG_UNTAG_MASK, off_action); 428 } 429 430 static void hw_atl2_hw_new_rx_filter_promisc(struct aq_hw_s *self, bool promisc) 431 { 432 u16 off_action = promisc ? HW_ATL2_ACTION_DISABLE : HW_ATL2_ACTION_DROP; 433 struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv; 434 bool vlan_promisc_enable; 435 u8 index; 436 437 index = priv->art_base_index + HW_ATL2_RPF_L2_PROMISC_OFF_INDEX; 438 hw_atl2_act_rslvr_table_set(self, index, 0, 439 HW_ATL2_RPF_TAG_UC_MASK | 440 HW_ATL2_RPF_TAG_ALLMC_MASK, 441 off_action); 442 443 /* turn VLAN promisc mode too */ 444 vlan_promisc_enable = hw_atl_rpf_vlan_prom_mode_en_get(self); 445 hw_atl2_hw_new_rx_filter_vlan_promisc(self, promisc | 446 vlan_promisc_enable); 447 } 448 449 static int hw_atl2_act_rslvr_table_set(struct aq_hw_s *self, u8 location, 450 u32 tag, u32 mask, u32 action) 451 { 452 u32 val; 453 int err; 454 455 err = readx_poll_timeout_atomic(hw_atl2_sem_act_rslvr_get, 456 self, val, val == 1, 457 1, 10000U); 458 if (err) 459 return err; 460 461 hw_atl2_rpf_act_rslvr_record_set(self, location, tag, mask, 462 action); 463 464 hw_atl_reg_glb_cpu_sem_set(self, 1, HW_ATL2_FW_SM_ACT_RSLVR); 465 466 return err; 467 } 468 469 static int hw_atl2_hw_init_rx_path(struct aq_hw_s *self) 470 { 471 struct aq_nic_cfg_s *cfg = self->aq_nic_cfg; 472 int i; 473 474 /* Rx TC/RSS number config */ 475 hw_atl_rpb_rpf_rx_traf_class_mode_set(self, cfg->tc_mode); 476 477 /* Rx flow control */ 478 hw_atl_rpb_rx_flow_ctl_mode_set(self, 1U); 479 480 hw_atl2_rpf_rss_hash_type_set(self, HW_ATL2_RPF_RSS_HASH_TYPE_ALL); 481 482 /* RSS Ring selection */ 483 hw_atl_b0_hw_init_rx_rss_ctrl1(self); 484 485 /* Multicast filters */ 486 for (i = HW_ATL2_MAC_MAX; i--;) { 487 hw_atl_rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i); 488 hw_atl_rpfl2unicast_flr_act_set(self, 1U, i); 489 } 490 491 hw_atl_reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U); 492 hw_atl_reg_rx_flr_mcst_flr_set(self, HW_ATL_MCAST_FLT_ANY_TO_HOST, 0U); 493 494 /* Vlan filters */ 495 hw_atl_rpf_vlan_outer_etht_set(self, ETH_P_8021AD); 496 hw_atl_rpf_vlan_inner_etht_set(self, ETH_P_8021Q); 497 498 hw_atl_rpf_vlan_prom_mode_en_set(self, 1); 499 500 /* Always accept untagged packets */ 501 hw_atl_rpf_vlan_accept_untagged_packets_set(self, 1U); 502 hw_atl_rpf_vlan_untagged_act_set(self, 1U); 503 504 hw_atl2_hw_init_new_rx_filters(self); 505 506 /* Rx Interrupts */ 507 hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U); 508 509 hw_atl_rpfl2broadcast_flr_act_set(self, 1U); 510 hw_atl_rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U)); 511 512 hw_atl_rdm_rx_dca_en_set(self, 0U); 513 hw_atl_rdm_rx_dca_mode_set(self, 0U); 514 515 return aq_hw_err_from_flags(self); 516 } 517 518 static int hw_atl2_hw_init(struct aq_hw_s *self, u8 *mac_addr) 519 { 520 static u32 aq_hw_atl2_igcr_table_[4][2] = { 521 [AQ_HW_IRQ_INVALID] = { 0x20000000U, 0x20000000U }, 522 [AQ_HW_IRQ_LEGACY] = { 0x20000080U, 0x20000080U }, 523 [AQ_HW_IRQ_MSI] = { 0x20000021U, 0x20000025U }, 524 [AQ_HW_IRQ_MSIX] = { 0x20000022U, 0x20000026U }, 525 }; 526 527 struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv; 528 struct aq_nic_cfg_s *aq_nic_cfg = self->aq_nic_cfg; 529 u8 base_index, count; 530 int err; 531 532 err = hw_atl2_utils_get_action_resolve_table_caps(self, &base_index, 533 &count); 534 if (err) 535 return err; 536 537 priv->art_base_index = 8 * base_index; 538 539 hw_atl2_init_launchtime(self); 540 541 hw_atl2_hw_init_tx_path(self); 542 hw_atl2_hw_init_rx_path(self); 543 544 hw_atl_b0_hw_mac_addr_set(self, mac_addr); 545 546 self->aq_fw_ops->set_link_speed(self, aq_nic_cfg->link_speed_msk); 547 self->aq_fw_ops->set_state(self, MPI_INIT); 548 549 hw_atl2_hw_qos_set(self); 550 hw_atl2_hw_rss_set(self, &aq_nic_cfg->aq_rss); 551 hw_atl_b0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss); 552 553 hw_atl2_rpf_new_enable_set(self, 1); 554 555 /* Reset link status and read out initial hardware counters */ 556 self->aq_link_status.mbps = 0; 557 self->aq_fw_ops->update_stats(self); 558 559 err = aq_hw_err_from_flags(self); 560 if (err < 0) 561 goto err_exit; 562 563 /* Interrupts */ 564 hw_atl_reg_irq_glb_ctl_set(self, 565 aq_hw_atl2_igcr_table_[aq_nic_cfg->irq_type] 566 [(aq_nic_cfg->vecs > 1U) ? 567 1 : 0]); 568 569 hw_atl_itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask); 570 571 /* Interrupts */ 572 hw_atl_reg_gen_irq_map_set(self, 573 ((HW_ATL2_ERR_INT << 0x18) | 574 (1U << 0x1F)) | 575 ((HW_ATL2_ERR_INT << 0x10) | 576 (1U << 0x17)), 0U); 577 578 hw_atl_b0_hw_offload_set(self, aq_nic_cfg); 579 580 err_exit: 581 return err; 582 } 583 584 static int hw_atl2_hw_ring_rx_init(struct aq_hw_s *self, 585 struct aq_ring_s *aq_ring, 586 struct aq_ring_param_s *aq_ring_param) 587 { 588 return hw_atl_b0_hw_ring_rx_init(self, aq_ring, aq_ring_param); 589 } 590 591 static int hw_atl2_hw_ring_tx_init(struct aq_hw_s *self, 592 struct aq_ring_s *aq_ring, 593 struct aq_ring_param_s *aq_ring_param) 594 { 595 return hw_atl_b0_hw_ring_tx_init(self, aq_ring, aq_ring_param); 596 } 597 598 #define IS_FILTER_ENABLED(_F_) ((packet_filter & (_F_)) ? 1U : 0U) 599 600 static int hw_atl2_hw_packet_filter_set(struct aq_hw_s *self, 601 unsigned int packet_filter) 602 { 603 hw_atl2_hw_new_rx_filter_promisc(self, IS_FILTER_ENABLED(IFF_PROMISC)); 604 605 return hw_atl_b0_hw_packet_filter_set(self, packet_filter); 606 } 607 608 #undef IS_FILTER_ENABLED 609 610 static int hw_atl2_hw_multicast_list_set(struct aq_hw_s *self, 611 u8 ar_mac 612 [AQ_HW_MULTICAST_ADDRESS_MAX] 613 [ETH_ALEN], 614 u32 count) 615 { 616 struct aq_nic_cfg_s *cfg = self->aq_nic_cfg; 617 int err = 0; 618 619 if (count > (HW_ATL2_MAC_MAX - HW_ATL2_MAC_MIN)) { 620 err = -EBADRQC; 621 goto err_exit; 622 } 623 for (cfg->mc_list_count = 0U; 624 cfg->mc_list_count < count; 625 ++cfg->mc_list_count) { 626 u32 i = cfg->mc_list_count; 627 u32 h = (ar_mac[i][0] << 8) | (ar_mac[i][1]); 628 u32 l = (ar_mac[i][2] << 24) | (ar_mac[i][3] << 16) | 629 (ar_mac[i][4] << 8) | ar_mac[i][5]; 630 631 hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL2_MAC_MIN + i); 632 633 hw_atl_rpfl2unicast_dest_addresslsw_set(self, l, 634 HW_ATL2_MAC_MIN + i); 635 636 hw_atl_rpfl2unicast_dest_addressmsw_set(self, h, 637 HW_ATL2_MAC_MIN + i); 638 639 hw_atl2_rpfl2_uc_flr_tag_set(self, 1, HW_ATL2_MAC_MIN + i); 640 641 hw_atl_rpfl2_uc_flr_en_set(self, (cfg->is_mc_list_enabled), 642 HW_ATL2_MAC_MIN + i); 643 } 644 645 err = aq_hw_err_from_flags(self); 646 647 err_exit: 648 return err; 649 } 650 651 static int hw_atl2_hw_interrupt_moderation_set(struct aq_hw_s *self) 652 { 653 unsigned int i = 0U; 654 u32 itr_tx = 2U; 655 u32 itr_rx = 2U; 656 657 switch (self->aq_nic_cfg->itr) { 658 case AQ_CFG_INTERRUPT_MODERATION_ON: 659 case AQ_CFG_INTERRUPT_MODERATION_AUTO: 660 hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 0U); 661 hw_atl_tdm_tdm_intr_moder_en_set(self, 1U); 662 hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 0U); 663 hw_atl_rdm_rdm_intr_moder_en_set(self, 1U); 664 665 if (self->aq_nic_cfg->itr == AQ_CFG_INTERRUPT_MODERATION_ON) { 666 /* HW timers are in 2us units */ 667 int tx_max_timer = self->aq_nic_cfg->tx_itr / 2; 668 int tx_min_timer = tx_max_timer / 2; 669 670 int rx_max_timer = self->aq_nic_cfg->rx_itr / 2; 671 int rx_min_timer = rx_max_timer / 2; 672 673 tx_max_timer = min(HW_ATL2_INTR_MODER_MAX, 674 tx_max_timer); 675 tx_min_timer = min(HW_ATL2_INTR_MODER_MIN, 676 tx_min_timer); 677 rx_max_timer = min(HW_ATL2_INTR_MODER_MAX, 678 rx_max_timer); 679 rx_min_timer = min(HW_ATL2_INTR_MODER_MIN, 680 rx_min_timer); 681 682 itr_tx |= tx_min_timer << 0x8U; 683 itr_tx |= tx_max_timer << 0x10U; 684 itr_rx |= rx_min_timer << 0x8U; 685 itr_rx |= rx_max_timer << 0x10U; 686 } else { 687 static unsigned int hw_atl2_timers_table_tx_[][2] = { 688 {0xfU, 0xffU}, /* 10Gbit */ 689 {0xfU, 0x1ffU}, /* 5Gbit */ 690 {0xfU, 0x1ffU}, /* 5Gbit 5GS */ 691 {0xfU, 0x1ffU}, /* 2.5Gbit */ 692 {0xfU, 0x1ffU}, /* 1Gbit */ 693 {0xfU, 0x1ffU}, /* 100Mbit */ 694 }; 695 static unsigned int hw_atl2_timers_table_rx_[][2] = { 696 {0x6U, 0x38U},/* 10Gbit */ 697 {0xCU, 0x70U},/* 5Gbit */ 698 {0xCU, 0x70U},/* 5Gbit 5GS */ 699 {0x18U, 0xE0U},/* 2.5Gbit */ 700 {0x30U, 0x80U},/* 1Gbit */ 701 {0x4U, 0x50U},/* 100Mbit */ 702 }; 703 unsigned int mbps = self->aq_link_status.mbps; 704 unsigned int speed_index; 705 706 speed_index = hw_atl_utils_mbps_2_speed_index(mbps); 707 708 /* Update user visible ITR settings */ 709 self->aq_nic_cfg->tx_itr = hw_atl2_timers_table_tx_ 710 [speed_index][1] * 2; 711 self->aq_nic_cfg->rx_itr = hw_atl2_timers_table_rx_ 712 [speed_index][1] * 2; 713 714 itr_tx |= hw_atl2_timers_table_tx_ 715 [speed_index][0] << 0x8U; 716 itr_tx |= hw_atl2_timers_table_tx_ 717 [speed_index][1] << 0x10U; 718 719 itr_rx |= hw_atl2_timers_table_rx_ 720 [speed_index][0] << 0x8U; 721 itr_rx |= hw_atl2_timers_table_rx_ 722 [speed_index][1] << 0x10U; 723 } 724 break; 725 case AQ_CFG_INTERRUPT_MODERATION_OFF: 726 hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U); 727 hw_atl_tdm_tdm_intr_moder_en_set(self, 0U); 728 hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U); 729 hw_atl_rdm_rdm_intr_moder_en_set(self, 0U); 730 itr_tx = 0U; 731 itr_rx = 0U; 732 break; 733 } 734 735 for (i = HW_ATL2_RINGS_MAX; i--;) { 736 hw_atl2_reg_tx_intr_moder_ctrl_set(self, itr_tx, i); 737 hw_atl_reg_rx_intr_moder_ctrl_set(self, itr_rx, i); 738 } 739 740 return aq_hw_err_from_flags(self); 741 } 742 743 static int hw_atl2_hw_stop(struct aq_hw_s *self) 744 { 745 hw_atl_b0_hw_irq_disable(self, HW_ATL2_INT_MASK); 746 747 return 0; 748 } 749 750 static struct aq_stats_s *hw_atl2_utils_get_hw_stats(struct aq_hw_s *self) 751 { 752 return &self->curr_stats; 753 } 754 755 static int hw_atl2_hw_vlan_set(struct aq_hw_s *self, 756 struct aq_rx_filter_vlan *aq_vlans) 757 { 758 struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv; 759 u32 queue; 760 u8 index; 761 int i; 762 763 hw_atl_rpf_vlan_prom_mode_en_set(self, 1U); 764 765 for (i = 0; i < HW_ATL_VLAN_MAX_FILTERS; i++) { 766 queue = HW_ATL2_ACTION_ASSIGN_QUEUE(aq_vlans[i].queue); 767 768 hw_atl_rpf_vlan_flr_en_set(self, 0U, i); 769 hw_atl_rpf_vlan_rxq_en_flr_set(self, 0U, i); 770 index = priv->art_base_index + HW_ATL2_RPF_VLAN_USER_INDEX + i; 771 hw_atl2_act_rslvr_table_set(self, index, 0, 0, 772 HW_ATL2_ACTION_DISABLE); 773 if (aq_vlans[i].enable) { 774 hw_atl_rpf_vlan_id_flr_set(self, 775 aq_vlans[i].vlan_id, i); 776 hw_atl_rpf_vlan_flr_act_set(self, 1U, i); 777 hw_atl_rpf_vlan_flr_en_set(self, 1U, i); 778 779 if (aq_vlans[i].queue != 0xFF) { 780 hw_atl_rpf_vlan_rxq_flr_set(self, 781 aq_vlans[i].queue, 782 i); 783 hw_atl_rpf_vlan_rxq_en_flr_set(self, 1U, i); 784 785 hw_atl2_rpf_vlan_flr_tag_set(self, i + 2, i); 786 787 index = priv->art_base_index + 788 HW_ATL2_RPF_VLAN_USER_INDEX + i; 789 hw_atl2_act_rslvr_table_set(self, index, 790 (i + 2) << HW_ATL2_RPF_TAG_VLAN_OFFSET, 791 HW_ATL2_RPF_TAG_VLAN_MASK, queue); 792 } else { 793 hw_atl2_rpf_vlan_flr_tag_set(self, 1, i); 794 } 795 } 796 } 797 798 return aq_hw_err_from_flags(self); 799 } 800 801 static int hw_atl2_hw_vlan_ctrl(struct aq_hw_s *self, bool enable) 802 { 803 /* set promisc in case of disabing the vlan filter */ 804 hw_atl_rpf_vlan_prom_mode_en_set(self, !enable); 805 hw_atl2_hw_new_rx_filter_vlan_promisc(self, !enable); 806 807 return aq_hw_err_from_flags(self); 808 } 809 810 const struct aq_hw_ops hw_atl2_ops = { 811 .hw_soft_reset = hw_atl2_utils_soft_reset, 812 .hw_prepare = hw_atl2_utils_initfw, 813 .hw_set_mac_address = hw_atl_b0_hw_mac_addr_set, 814 .hw_init = hw_atl2_hw_init, 815 .hw_reset = hw_atl2_hw_reset, 816 .hw_start = hw_atl_b0_hw_start, 817 .hw_ring_tx_start = hw_atl_b0_hw_ring_tx_start, 818 .hw_ring_tx_stop = hw_atl_b0_hw_ring_tx_stop, 819 .hw_ring_rx_start = hw_atl_b0_hw_ring_rx_start, 820 .hw_ring_rx_stop = hw_atl_b0_hw_ring_rx_stop, 821 .hw_stop = hw_atl2_hw_stop, 822 823 .hw_ring_tx_xmit = hw_atl_b0_hw_ring_tx_xmit, 824 .hw_ring_tx_head_update = hw_atl_b0_hw_ring_tx_head_update, 825 826 .hw_ring_rx_receive = hw_atl_b0_hw_ring_rx_receive, 827 .hw_ring_rx_fill = hw_atl_b0_hw_ring_rx_fill, 828 829 .hw_irq_enable = hw_atl_b0_hw_irq_enable, 830 .hw_irq_disable = hw_atl_b0_hw_irq_disable, 831 .hw_irq_read = hw_atl_b0_hw_irq_read, 832 833 .hw_ring_rx_init = hw_atl2_hw_ring_rx_init, 834 .hw_ring_tx_init = hw_atl2_hw_ring_tx_init, 835 .hw_packet_filter_set = hw_atl2_hw_packet_filter_set, 836 .hw_filter_vlan_set = hw_atl2_hw_vlan_set, 837 .hw_filter_vlan_ctrl = hw_atl2_hw_vlan_ctrl, 838 .hw_multicast_list_set = hw_atl2_hw_multicast_list_set, 839 .hw_interrupt_moderation_set = hw_atl2_hw_interrupt_moderation_set, 840 .hw_rss_set = hw_atl2_hw_rss_set, 841 .hw_rss_hash_set = hw_atl_b0_hw_rss_hash_set, 842 .hw_tc_rate_limit_set = hw_atl2_hw_init_tx_tc_rate_limit, 843 .hw_get_hw_stats = hw_atl2_utils_get_hw_stats, 844 .hw_get_fw_version = hw_atl2_utils_get_fw_version, 845 .hw_set_offload = hw_atl_b0_hw_offload_set, 846 .hw_set_loopback = hw_atl_b0_set_loopback, 847 .hw_set_fc = hw_atl_b0_set_fc, 848 }; 849