1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2019-2020 Realtek Corporation 3 */ 4 5 #include "cam.h" 6 #include "debug.h" 7 #include "fw.h" 8 #include "mac.h" 9 #include "ps.h" 10 #include "reg.h" 11 #include "util.h" 12 13 int rtw89_mac_check_mac_en(struct rtw89_dev *rtwdev, u8 mac_idx, 14 enum rtw89_mac_hwmod_sel sel) 15 { 16 u32 val, r_val; 17 18 if (sel == RTW89_DMAC_SEL) { 19 r_val = rtw89_read32(rtwdev, R_AX_DMAC_FUNC_EN); 20 val = (B_AX_MAC_FUNC_EN | B_AX_DMAC_FUNC_EN); 21 } else if (sel == RTW89_CMAC_SEL && mac_idx == 0) { 22 r_val = rtw89_read32(rtwdev, R_AX_CMAC_FUNC_EN); 23 val = B_AX_CMAC_EN; 24 } else if (sel == RTW89_CMAC_SEL && mac_idx == 1) { 25 r_val = rtw89_read32(rtwdev, R_AX_SYS_ISO_CTRL_EXTEND); 26 val = B_AX_CMAC1_FEN; 27 } else { 28 return -EINVAL; 29 } 30 if (r_val == RTW89_R32_EA || r_val == RTW89_R32_DEAD || 31 (val & r_val) != val) 32 return -EFAULT; 33 34 return 0; 35 } 36 37 int rtw89_mac_write_lte(struct rtw89_dev *rtwdev, const u32 offset, u32 val) 38 { 39 u8 lte_ctrl; 40 int ret; 41 42 ret = read_poll_timeout(rtw89_read8, lte_ctrl, (lte_ctrl & BIT(5)) != 0, 43 50, 50000, false, rtwdev, R_AX_LTE_CTRL + 3); 44 if (ret) 45 rtw89_err(rtwdev, "[ERR]lte not ready(W)\n"); 46 47 rtw89_write32(rtwdev, R_AX_LTE_WDATA, val); 48 rtw89_write32(rtwdev, R_AX_LTE_CTRL, 0xC00F0000 | offset); 49 50 return ret; 51 } 52 53 int rtw89_mac_read_lte(struct rtw89_dev *rtwdev, const u32 offset, u32 *val) 54 { 55 u8 lte_ctrl; 56 int ret; 57 58 ret = read_poll_timeout(rtw89_read8, lte_ctrl, (lte_ctrl & BIT(5)) != 0, 59 50, 50000, false, rtwdev, R_AX_LTE_CTRL + 3); 60 if (ret) 61 rtw89_err(rtwdev, "[ERR]lte not ready(W)\n"); 62 63 rtw89_write32(rtwdev, R_AX_LTE_CTRL, 0x800F0000 | offset); 64 *val = rtw89_read32(rtwdev, R_AX_LTE_RDATA); 65 66 return ret; 67 } 68 69 static 70 int dle_dfi_ctrl(struct rtw89_dev *rtwdev, struct rtw89_mac_dle_dfi_ctrl *ctrl) 71 { 72 u32 ctrl_reg, data_reg, ctrl_data; 73 u32 val; 74 int ret; 75 76 switch (ctrl->type) { 77 case DLE_CTRL_TYPE_WDE: 78 ctrl_reg = R_AX_WDE_DBG_FUN_INTF_CTL; 79 data_reg = R_AX_WDE_DBG_FUN_INTF_DATA; 80 ctrl_data = FIELD_PREP(B_AX_WDE_DFI_TRGSEL_MASK, ctrl->target) | 81 FIELD_PREP(B_AX_WDE_DFI_ADDR_MASK, ctrl->addr) | 82 B_AX_WDE_DFI_ACTIVE; 83 break; 84 case DLE_CTRL_TYPE_PLE: 85 ctrl_reg = R_AX_PLE_DBG_FUN_INTF_CTL; 86 data_reg = R_AX_PLE_DBG_FUN_INTF_DATA; 87 ctrl_data = FIELD_PREP(B_AX_PLE_DFI_TRGSEL_MASK, ctrl->target) | 88 FIELD_PREP(B_AX_PLE_DFI_ADDR_MASK, ctrl->addr) | 89 B_AX_PLE_DFI_ACTIVE; 90 break; 91 default: 92 rtw89_warn(rtwdev, "[ERR] dfi ctrl type %d\n", ctrl->type); 93 return -EINVAL; 94 } 95 96 rtw89_write32(rtwdev, ctrl_reg, ctrl_data); 97 98 ret = read_poll_timeout_atomic(rtw89_read32, val, !(val & B_AX_WDE_DFI_ACTIVE), 99 1, 1000, false, rtwdev, ctrl_reg); 100 if (ret) { 101 rtw89_warn(rtwdev, "[ERR] dle dfi ctrl 0x%X set 0x%X timeout\n", 102 ctrl_reg, ctrl_data); 103 return ret; 104 } 105 106 ctrl->out_data = rtw89_read32(rtwdev, data_reg); 107 return 0; 108 } 109 110 static int dle_dfi_quota(struct rtw89_dev *rtwdev, 111 struct rtw89_mac_dle_dfi_quota *quota) 112 { 113 struct rtw89_mac_dle_dfi_ctrl ctrl; 114 int ret; 115 116 ctrl.type = quota->dle_type; 117 ctrl.target = DLE_DFI_TYPE_QUOTA; 118 ctrl.addr = quota->qtaid; 119 ret = dle_dfi_ctrl(rtwdev, &ctrl); 120 if (ret) { 121 rtw89_warn(rtwdev, "[ERR]dle_dfi_ctrl %d\n", ret); 122 return ret; 123 } 124 125 quota->rsv_pgnum = FIELD_GET(B_AX_DLE_RSV_PGNUM, ctrl.out_data); 126 quota->use_pgnum = FIELD_GET(B_AX_DLE_USE_PGNUM, ctrl.out_data); 127 return 0; 128 } 129 130 static int dle_dfi_qempty(struct rtw89_dev *rtwdev, 131 struct rtw89_mac_dle_dfi_qempty *qempty) 132 { 133 struct rtw89_mac_dle_dfi_ctrl ctrl; 134 u32 ret; 135 136 ctrl.type = qempty->dle_type; 137 ctrl.target = DLE_DFI_TYPE_QEMPTY; 138 ctrl.addr = qempty->grpsel; 139 ret = dle_dfi_ctrl(rtwdev, &ctrl); 140 if (ret) { 141 rtw89_warn(rtwdev, "[ERR]dle_dfi_ctrl %d\n", ret); 142 return ret; 143 } 144 145 qempty->qempty = FIELD_GET(B_AX_DLE_QEMPTY_GRP, ctrl.out_data); 146 return 0; 147 } 148 149 static void dump_err_status_dispatcher(struct rtw89_dev *rtwdev) 150 { 151 rtw89_info(rtwdev, "R_AX_HOST_DISPATCHER_ALWAYS_IMR=0x%08x ", 152 rtw89_read32(rtwdev, R_AX_HOST_DISPATCHER_ERR_IMR)); 153 rtw89_info(rtwdev, "R_AX_HOST_DISPATCHER_ALWAYS_ISR=0x%08x\n", 154 rtw89_read32(rtwdev, R_AX_HOST_DISPATCHER_ERR_ISR)); 155 rtw89_info(rtwdev, "R_AX_CPU_DISPATCHER_ALWAYS_IMR=0x%08x ", 156 rtw89_read32(rtwdev, R_AX_CPU_DISPATCHER_ERR_IMR)); 157 rtw89_info(rtwdev, "R_AX_CPU_DISPATCHER_ALWAYS_ISR=0x%08x\n", 158 rtw89_read32(rtwdev, R_AX_CPU_DISPATCHER_ERR_ISR)); 159 rtw89_info(rtwdev, "R_AX_OTHER_DISPATCHER_ALWAYS_IMR=0x%08x ", 160 rtw89_read32(rtwdev, R_AX_OTHER_DISPATCHER_ERR_IMR)); 161 rtw89_info(rtwdev, "R_AX_OTHER_DISPATCHER_ALWAYS_ISR=0x%08x\n", 162 rtw89_read32(rtwdev, R_AX_OTHER_DISPATCHER_ERR_ISR)); 163 } 164 165 static void rtw89_mac_dump_qta_lost(struct rtw89_dev *rtwdev) 166 { 167 struct rtw89_mac_dle_dfi_qempty qempty; 168 struct rtw89_mac_dle_dfi_quota quota; 169 struct rtw89_mac_dle_dfi_ctrl ctrl; 170 u32 val, not_empty, i; 171 int ret; 172 173 qempty.dle_type = DLE_CTRL_TYPE_PLE; 174 qempty.grpsel = 0; 175 ret = dle_dfi_qempty(rtwdev, &qempty); 176 if (ret) 177 rtw89_warn(rtwdev, "%s: query DLE fail\n", __func__); 178 else 179 rtw89_info(rtwdev, "DLE group0 empty: 0x%x\n", qempty.qempty); 180 181 for (not_empty = ~qempty.qempty, i = 0; not_empty != 0; not_empty >>= 1, i++) { 182 if (!(not_empty & BIT(0))) 183 continue; 184 ctrl.type = DLE_CTRL_TYPE_PLE; 185 ctrl.target = DLE_DFI_TYPE_QLNKTBL; 186 ctrl.addr = (QLNKTBL_ADDR_INFO_SEL_0 ? QLNKTBL_ADDR_INFO_SEL : 0) | 187 FIELD_PREP(QLNKTBL_ADDR_TBL_IDX_MASK, i); 188 ret = dle_dfi_ctrl(rtwdev, &ctrl); 189 if (ret) 190 rtw89_warn(rtwdev, "%s: query DLE fail\n", __func__); 191 else 192 rtw89_info(rtwdev, "qidx%d pktcnt = %ld\n", i, 193 FIELD_GET(QLNKTBL_DATA_SEL1_PKT_CNT_MASK, 194 ctrl.out_data)); 195 } 196 197 quota.dle_type = DLE_CTRL_TYPE_PLE; 198 quota.qtaid = 6; 199 ret = dle_dfi_quota(rtwdev, "a); 200 if (ret) 201 rtw89_warn(rtwdev, "%s: query DLE fail\n", __func__); 202 else 203 rtw89_info(rtwdev, "quota6 rsv/use: 0x%x/0x%x\n", 204 quota.rsv_pgnum, quota.use_pgnum); 205 206 val = rtw89_read32(rtwdev, R_AX_PLE_QTA6_CFG); 207 rtw89_info(rtwdev, "[PLE][CMAC0_RX]min_pgnum=0x%lx\n", 208 FIELD_GET(B_AX_PLE_Q6_MIN_SIZE_MASK, val)); 209 rtw89_info(rtwdev, "[PLE][CMAC0_RX]max_pgnum=0x%lx\n", 210 FIELD_GET(B_AX_PLE_Q6_MAX_SIZE_MASK, val)); 211 212 dump_err_status_dispatcher(rtwdev); 213 } 214 215 static void rtw89_mac_dump_l0_to_l1(struct rtw89_dev *rtwdev, 216 enum mac_ax_err_info err) 217 { 218 u32 dbg, event; 219 220 dbg = rtw89_read32(rtwdev, R_AX_SER_DBG_INFO); 221 event = FIELD_GET(B_AX_L0_TO_L1_EVENT_MASK, dbg); 222 223 switch (event) { 224 case MAC_AX_L0_TO_L1_RX_QTA_LOST: 225 rtw89_info(rtwdev, "quota lost!\n"); 226 rtw89_mac_dump_qta_lost(rtwdev); 227 break; 228 default: 229 break; 230 } 231 } 232 233 static void rtw89_mac_dump_err_status(struct rtw89_dev *rtwdev, 234 enum mac_ax_err_info err) 235 { 236 u32 dmac_err, cmac_err; 237 238 if (err != MAC_AX_ERR_L1_ERR_DMAC && 239 err != MAC_AX_ERR_L0_PROMOTE_TO_L1) 240 return; 241 242 rtw89_info(rtwdev, "--->\nerr=0x%x\n", err); 243 rtw89_info(rtwdev, "R_AX_SER_DBG_INFO =0x%08x\n", 244 rtw89_read32(rtwdev, R_AX_SER_DBG_INFO)); 245 246 cmac_err = rtw89_read32(rtwdev, R_AX_CMAC_ERR_ISR); 247 rtw89_info(rtwdev, "R_AX_CMAC_ERR_ISR =0x%08x\n", cmac_err); 248 dmac_err = rtw89_read32(rtwdev, R_AX_DMAC_ERR_ISR); 249 rtw89_info(rtwdev, "R_AX_DMAC_ERR_ISR =0x%08x\n", dmac_err); 250 251 if (dmac_err) { 252 rtw89_info(rtwdev, "R_AX_WDE_ERR_FLAG_CFG =0x%08x ", 253 rtw89_read32(rtwdev, R_AX_WDE_ERR_FLAG_CFG)); 254 rtw89_info(rtwdev, "R_AX_PLE_ERR_FLAG_CFG =0x%08x\n", 255 rtw89_read32(rtwdev, R_AX_PLE_ERR_FLAG_CFG)); 256 } 257 258 if (dmac_err & B_AX_WDRLS_ERR_FLAG) { 259 rtw89_info(rtwdev, "R_AX_WDRLS_ERR_IMR =0x%08x ", 260 rtw89_read32(rtwdev, R_AX_WDRLS_ERR_IMR)); 261 rtw89_info(rtwdev, "R_AX_WDRLS_ERR_ISR =0x%08x\n", 262 rtw89_read32(rtwdev, R_AX_WDRLS_ERR_ISR)); 263 } 264 265 if (dmac_err & B_AX_WSEC_ERR_FLAG) { 266 rtw89_info(rtwdev, "R_AX_SEC_ERR_IMR_ISR =0x%08x\n", 267 rtw89_read32(rtwdev, R_AX_SEC_DEBUG)); 268 rtw89_info(rtwdev, "SEC_local_Register 0x9D00 =0x%08x\n", 269 rtw89_read32(rtwdev, R_AX_SEC_ENG_CTRL)); 270 rtw89_info(rtwdev, "SEC_local_Register 0x9D04 =0x%08x\n", 271 rtw89_read32(rtwdev, R_AX_SEC_MPDU_PROC)); 272 rtw89_info(rtwdev, "SEC_local_Register 0x9D10 =0x%08x\n", 273 rtw89_read32(rtwdev, R_AX_SEC_CAM_ACCESS)); 274 rtw89_info(rtwdev, "SEC_local_Register 0x9D14 =0x%08x\n", 275 rtw89_read32(rtwdev, R_AX_SEC_CAM_RDATA)); 276 rtw89_info(rtwdev, "SEC_local_Register 0x9D18 =0x%08x\n", 277 rtw89_read32(rtwdev, R_AX_SEC_CAM_WDATA)); 278 rtw89_info(rtwdev, "SEC_local_Register 0x9D20 =0x%08x\n", 279 rtw89_read32(rtwdev, R_AX_SEC_TX_DEBUG)); 280 rtw89_info(rtwdev, "SEC_local_Register 0x9D24 =0x%08x\n", 281 rtw89_read32(rtwdev, R_AX_SEC_RX_DEBUG)); 282 rtw89_info(rtwdev, "SEC_local_Register 0x9D28 =0x%08x\n", 283 rtw89_read32(rtwdev, R_AX_SEC_TRX_PKT_CNT)); 284 rtw89_info(rtwdev, "SEC_local_Register 0x9D2C =0x%08x\n", 285 rtw89_read32(rtwdev, R_AX_SEC_TRX_BLK_CNT)); 286 } 287 288 if (dmac_err & B_AX_MPDU_ERR_FLAG) { 289 rtw89_info(rtwdev, "R_AX_MPDU_TX_ERR_IMR =0x%08x ", 290 rtw89_read32(rtwdev, R_AX_MPDU_TX_ERR_IMR)); 291 rtw89_info(rtwdev, "R_AX_MPDU_TX_ERR_ISR =0x%08x\n", 292 rtw89_read32(rtwdev, R_AX_MPDU_TX_ERR_ISR)); 293 rtw89_info(rtwdev, "R_AX_MPDU_RX_ERR_IMR =0x%08x ", 294 rtw89_read32(rtwdev, R_AX_MPDU_RX_ERR_IMR)); 295 rtw89_info(rtwdev, "R_AX_MPDU_RX_ERR_ISR =0x%08x\n", 296 rtw89_read32(rtwdev, R_AX_MPDU_RX_ERR_ISR)); 297 } 298 299 if (dmac_err & B_AX_STA_SCHEDULER_ERR_FLAG) { 300 rtw89_info(rtwdev, "R_AX_STA_SCHEDULER_ERR_IMR =0x%08x ", 301 rtw89_read32(rtwdev, R_AX_STA_SCHEDULER_ERR_IMR)); 302 rtw89_info(rtwdev, "R_AX_STA_SCHEDULER_ERR_ISR= 0x%08x\n", 303 rtw89_read32(rtwdev, R_AX_STA_SCHEDULER_ERR_ISR)); 304 } 305 306 if (dmac_err & B_AX_WDE_DLE_ERR_FLAG) { 307 rtw89_info(rtwdev, "R_AX_WDE_ERR_IMR=0x%08x ", 308 rtw89_read32(rtwdev, R_AX_WDE_ERR_IMR)); 309 rtw89_info(rtwdev, "R_AX_WDE_ERR_ISR=0x%08x\n", 310 rtw89_read32(rtwdev, R_AX_WDE_ERR_ISR)); 311 rtw89_info(rtwdev, "R_AX_PLE_ERR_IMR=0x%08x ", 312 rtw89_read32(rtwdev, R_AX_PLE_ERR_IMR)); 313 rtw89_info(rtwdev, "R_AX_PLE_ERR_FLAG_ISR=0x%08x\n", 314 rtw89_read32(rtwdev, R_AX_PLE_ERR_FLAG_ISR)); 315 dump_err_status_dispatcher(rtwdev); 316 } 317 318 if (dmac_err & B_AX_TXPKTCTRL_ERR_FLAG) { 319 rtw89_info(rtwdev, "R_AX_TXPKTCTL_ERR_IMR_ISR=0x%08x\n", 320 rtw89_read32(rtwdev, R_AX_TXPKTCTL_ERR_IMR_ISR)); 321 rtw89_info(rtwdev, "R_AX_TXPKTCTL_ERR_IMR_ISR_B1=0x%08x\n", 322 rtw89_read32(rtwdev, R_AX_TXPKTCTL_ERR_IMR_ISR_B1)); 323 } 324 325 if (dmac_err & B_AX_PLE_DLE_ERR_FLAG) { 326 rtw89_info(rtwdev, "R_AX_WDE_ERR_IMR=0x%08x ", 327 rtw89_read32(rtwdev, R_AX_WDE_ERR_IMR)); 328 rtw89_info(rtwdev, "R_AX_WDE_ERR_ISR=0x%08x\n", 329 rtw89_read32(rtwdev, R_AX_WDE_ERR_ISR)); 330 rtw89_info(rtwdev, "R_AX_PLE_ERR_IMR=0x%08x ", 331 rtw89_read32(rtwdev, R_AX_PLE_ERR_IMR)); 332 rtw89_info(rtwdev, "R_AX_PLE_ERR_FLAG_ISR=0x%08x\n", 333 rtw89_read32(rtwdev, R_AX_PLE_ERR_FLAG_ISR)); 334 rtw89_info(rtwdev, "R_AX_WD_CPUQ_OP_0=0x%08x\n", 335 rtw89_read32(rtwdev, R_AX_WD_CPUQ_OP_0)); 336 rtw89_info(rtwdev, "R_AX_WD_CPUQ_OP_1=0x%08x\n", 337 rtw89_read32(rtwdev, R_AX_WD_CPUQ_OP_1)); 338 rtw89_info(rtwdev, "R_AX_WD_CPUQ_OP_2=0x%08x\n", 339 rtw89_read32(rtwdev, R_AX_WD_CPUQ_OP_2)); 340 rtw89_info(rtwdev, "R_AX_WD_CPUQ_OP_STATUS=0x%08x\n", 341 rtw89_read32(rtwdev, R_AX_WD_CPUQ_OP_STATUS)); 342 rtw89_info(rtwdev, "R_AX_PL_CPUQ_OP_0=0x%08x\n", 343 rtw89_read32(rtwdev, R_AX_PL_CPUQ_OP_0)); 344 rtw89_info(rtwdev, "R_AX_PL_CPUQ_OP_1=0x%08x\n", 345 rtw89_read32(rtwdev, R_AX_PL_CPUQ_OP_1)); 346 rtw89_info(rtwdev, "R_AX_PL_CPUQ_OP_2=0x%08x\n", 347 rtw89_read32(rtwdev, R_AX_PL_CPUQ_OP_2)); 348 rtw89_info(rtwdev, "R_AX_PL_CPUQ_OP_STATUS=0x%08x\n", 349 rtw89_read32(rtwdev, R_AX_PL_CPUQ_OP_STATUS)); 350 rtw89_info(rtwdev, "R_AX_RXDMA_PKT_INFO_0=0x%08x\n", 351 rtw89_read32(rtwdev, R_AX_RXDMA_PKT_INFO_0)); 352 rtw89_info(rtwdev, "R_AX_RXDMA_PKT_INFO_1=0x%08x\n", 353 rtw89_read32(rtwdev, R_AX_RXDMA_PKT_INFO_1)); 354 rtw89_info(rtwdev, "R_AX_RXDMA_PKT_INFO_2=0x%08x\n", 355 rtw89_read32(rtwdev, R_AX_RXDMA_PKT_INFO_2)); 356 dump_err_status_dispatcher(rtwdev); 357 } 358 359 if (dmac_err & B_AX_PKTIN_ERR_FLAG) { 360 rtw89_info(rtwdev, "R_AX_PKTIN_ERR_IMR =0x%08x ", 361 rtw89_read32(rtwdev, R_AX_PKTIN_ERR_IMR)); 362 rtw89_info(rtwdev, "R_AX_PKTIN_ERR_ISR =0x%08x\n", 363 rtw89_read32(rtwdev, R_AX_PKTIN_ERR_ISR)); 364 rtw89_info(rtwdev, "R_AX_PKTIN_ERR_IMR =0x%08x ", 365 rtw89_read32(rtwdev, R_AX_PKTIN_ERR_IMR)); 366 rtw89_info(rtwdev, "R_AX_PKTIN_ERR_ISR =0x%08x\n", 367 rtw89_read32(rtwdev, R_AX_PKTIN_ERR_ISR)); 368 } 369 370 if (dmac_err & B_AX_DISPATCH_ERR_FLAG) 371 dump_err_status_dispatcher(rtwdev); 372 373 if (dmac_err & B_AX_DLE_CPUIO_ERR_FLAG) { 374 rtw89_info(rtwdev, "R_AX_CPUIO_ERR_IMR=0x%08x ", 375 rtw89_read32(rtwdev, R_AX_CPUIO_ERR_IMR)); 376 rtw89_info(rtwdev, "R_AX_CPUIO_ERR_ISR=0x%08x\n", 377 rtw89_read32(rtwdev, R_AX_CPUIO_ERR_ISR)); 378 } 379 380 if (dmac_err & BIT(11)) { 381 rtw89_info(rtwdev, "R_AX_BBRPT_COM_ERR_IMR_ISR=0x%08x\n", 382 rtw89_read32(rtwdev, R_AX_BBRPT_COM_ERR_IMR_ISR)); 383 } 384 385 if (cmac_err & B_AX_SCHEDULE_TOP_ERR_IND) { 386 rtw89_info(rtwdev, "R_AX_SCHEDULE_ERR_IMR=0x%08x ", 387 rtw89_read32(rtwdev, R_AX_SCHEDULE_ERR_IMR)); 388 rtw89_info(rtwdev, "R_AX_SCHEDULE_ERR_ISR=0x%04x\n", 389 rtw89_read16(rtwdev, R_AX_SCHEDULE_ERR_ISR)); 390 } 391 392 if (cmac_err & B_AX_PTCL_TOP_ERR_IND) { 393 rtw89_info(rtwdev, "R_AX_PTCL_IMR0=0x%08x ", 394 rtw89_read32(rtwdev, R_AX_PTCL_IMR0)); 395 rtw89_info(rtwdev, "R_AX_PTCL_ISR0=0x%08x\n", 396 rtw89_read32(rtwdev, R_AX_PTCL_ISR0)); 397 } 398 399 if (cmac_err & B_AX_DMA_TOP_ERR_IND) { 400 rtw89_info(rtwdev, "R_AX_DLE_CTRL=0x%08x\n", 401 rtw89_read32(rtwdev, R_AX_DLE_CTRL)); 402 } 403 404 if (cmac_err & B_AX_PHYINTF_ERR_IND) { 405 rtw89_info(rtwdev, "R_AX_PHYINFO_ERR_IMR=0x%08x\n", 406 rtw89_read32(rtwdev, R_AX_PHYINFO_ERR_IMR)); 407 } 408 409 if (cmac_err & B_AX_TXPWR_CTRL_ERR_IND) { 410 rtw89_info(rtwdev, "R_AX_TXPWR_IMR=0x%08x ", 411 rtw89_read32(rtwdev, R_AX_TXPWR_IMR)); 412 rtw89_info(rtwdev, "R_AX_TXPWR_ISR=0x%08x\n", 413 rtw89_read32(rtwdev, R_AX_TXPWR_ISR)); 414 } 415 416 if (cmac_err & B_AX_WMAC_RX_ERR_IND) { 417 rtw89_info(rtwdev, "R_AX_DBGSEL_TRXPTCL=0x%08x ", 418 rtw89_read32(rtwdev, R_AX_DBGSEL_TRXPTCL)); 419 rtw89_info(rtwdev, "R_AX_PHYINFO_ERR_ISR=0x%08x\n", 420 rtw89_read32(rtwdev, R_AX_PHYINFO_ERR_ISR)); 421 } 422 423 if (cmac_err & B_AX_WMAC_TX_ERR_IND) { 424 rtw89_info(rtwdev, "R_AX_TMAC_ERR_IMR_ISR=0x%08x ", 425 rtw89_read32(rtwdev, R_AX_TMAC_ERR_IMR_ISR)); 426 rtw89_info(rtwdev, "R_AX_DBGSEL_TRXPTCL=0x%08x\n", 427 rtw89_read32(rtwdev, R_AX_DBGSEL_TRXPTCL)); 428 } 429 430 rtwdev->hci.ops->dump_err_status(rtwdev); 431 432 if (err == MAC_AX_ERR_L0_PROMOTE_TO_L1) 433 rtw89_mac_dump_l0_to_l1(rtwdev, err); 434 435 rtw89_info(rtwdev, "<---\n"); 436 } 437 438 u32 rtw89_mac_get_err_status(struct rtw89_dev *rtwdev) 439 { 440 u32 err; 441 int ret; 442 443 ret = read_poll_timeout(rtw89_read32, err, (err != 0), 1000, 100000, 444 false, rtwdev, R_AX_HALT_C2H_CTRL); 445 if (ret) { 446 rtw89_warn(rtwdev, "Polling FW err status fail\n"); 447 return ret; 448 } 449 450 err = rtw89_read32(rtwdev, R_AX_HALT_C2H); 451 rtw89_write32(rtwdev, R_AX_HALT_C2H_CTRL, 0); 452 453 rtw89_fw_st_dbg_dump(rtwdev); 454 rtw89_mac_dump_err_status(rtwdev, err); 455 456 return err; 457 } 458 EXPORT_SYMBOL(rtw89_mac_get_err_status); 459 460 int rtw89_mac_set_err_status(struct rtw89_dev *rtwdev, u32 err) 461 { 462 u32 halt; 463 int ret = 0; 464 465 if (err > MAC_AX_SET_ERR_MAX) { 466 rtw89_err(rtwdev, "Bad set-err-status value 0x%08x\n", err); 467 return -EINVAL; 468 } 469 470 ret = read_poll_timeout(rtw89_read32, halt, (halt == 0x0), 1000, 471 100000, false, rtwdev, R_AX_HALT_H2C_CTRL); 472 if (ret) { 473 rtw89_err(rtwdev, "FW doesn't receive previous msg\n"); 474 return -EFAULT; 475 } 476 477 rtw89_write32(rtwdev, R_AX_HALT_H2C, err); 478 rtw89_write32(rtwdev, R_AX_HALT_H2C_CTRL, B_AX_HALT_H2C_TRIGGER); 479 480 return 0; 481 } 482 EXPORT_SYMBOL(rtw89_mac_set_err_status); 483 484 const struct rtw89_hfc_prec_cfg rtw_hfc_preccfg_pcie = { 485 2, 40, 0, 0, 1, 0, 0, 0 486 }; 487 488 static int hfc_reset_param(struct rtw89_dev *rtwdev) 489 { 490 struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param; 491 struct rtw89_hfc_param_ini param_ini = {NULL}; 492 u8 qta_mode = rtwdev->mac.dle_info.qta_mode; 493 494 switch (rtwdev->hci.type) { 495 case RTW89_HCI_TYPE_PCIE: 496 param_ini = rtwdev->chip->hfc_param_ini[qta_mode]; 497 param->en = 0; 498 break; 499 default: 500 return -EINVAL; 501 } 502 503 if (param_ini.pub_cfg) 504 param->pub_cfg = *param_ini.pub_cfg; 505 506 if (param_ini.prec_cfg) { 507 param->prec_cfg = *param_ini.prec_cfg; 508 rtwdev->hal.sw_amsdu_max_size = 509 param->prec_cfg.wp_ch07_prec * HFC_PAGE_UNIT; 510 } 511 512 if (param_ini.ch_cfg) 513 param->ch_cfg = param_ini.ch_cfg; 514 515 memset(¶m->ch_info, 0, sizeof(param->ch_info)); 516 memset(¶m->pub_info, 0, sizeof(param->pub_info)); 517 param->mode = param_ini.mode; 518 519 return 0; 520 } 521 522 static int hfc_ch_cfg_chk(struct rtw89_dev *rtwdev, u8 ch) 523 { 524 struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param; 525 const struct rtw89_hfc_ch_cfg *ch_cfg = param->ch_cfg; 526 const struct rtw89_hfc_pub_cfg *pub_cfg = ¶m->pub_cfg; 527 const struct rtw89_hfc_prec_cfg *prec_cfg = ¶m->prec_cfg; 528 529 if (ch >= RTW89_DMA_CH_NUM) 530 return -EINVAL; 531 532 if ((ch_cfg[ch].min && ch_cfg[ch].min < prec_cfg->ch011_prec) || 533 ch_cfg[ch].max > pub_cfg->pub_max) 534 return -EINVAL; 535 if (ch_cfg[ch].grp >= grp_num) 536 return -EINVAL; 537 538 return 0; 539 } 540 541 static int hfc_pub_info_chk(struct rtw89_dev *rtwdev) 542 { 543 struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param; 544 const struct rtw89_hfc_pub_cfg *cfg = ¶m->pub_cfg; 545 struct rtw89_hfc_pub_info *info = ¶m->pub_info; 546 547 if (info->g0_used + info->g1_used + info->pub_aval != cfg->pub_max) { 548 if (rtwdev->chip->chip_id == RTL8852A) 549 return 0; 550 else 551 return -EFAULT; 552 } 553 554 return 0; 555 } 556 557 static int hfc_pub_cfg_chk(struct rtw89_dev *rtwdev) 558 { 559 struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param; 560 const struct rtw89_hfc_pub_cfg *pub_cfg = ¶m->pub_cfg; 561 562 if (pub_cfg->grp0 + pub_cfg->grp1 != pub_cfg->pub_max) 563 return -EFAULT; 564 565 return 0; 566 } 567 568 static int hfc_ch_ctrl(struct rtw89_dev *rtwdev, u8 ch) 569 { 570 struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param; 571 const struct rtw89_hfc_ch_cfg *cfg = param->ch_cfg; 572 int ret = 0; 573 u32 val = 0; 574 575 ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL); 576 if (ret) 577 return ret; 578 579 ret = hfc_ch_cfg_chk(rtwdev, ch); 580 if (ret) 581 return ret; 582 583 if (ch > RTW89_DMA_B1HI) 584 return -EINVAL; 585 586 val = u32_encode_bits(cfg[ch].min, B_AX_MIN_PG_MASK) | 587 u32_encode_bits(cfg[ch].max, B_AX_MAX_PG_MASK) | 588 (cfg[ch].grp ? B_AX_GRP : 0); 589 rtw89_write32(rtwdev, R_AX_ACH0_PAGE_CTRL + ch * 4, val); 590 591 return 0; 592 } 593 594 static int hfc_upd_ch_info(struct rtw89_dev *rtwdev, u8 ch) 595 { 596 struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param; 597 struct rtw89_hfc_ch_info *info = param->ch_info; 598 const struct rtw89_hfc_ch_cfg *cfg = param->ch_cfg; 599 u32 val; 600 u32 ret; 601 602 ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL); 603 if (ret) 604 return ret; 605 606 if (ch > RTW89_DMA_H2C) 607 return -EINVAL; 608 609 val = rtw89_read32(rtwdev, R_AX_ACH0_PAGE_INFO + ch * 4); 610 info[ch].aval = u32_get_bits(val, B_AX_AVAL_PG_MASK); 611 if (ch < RTW89_DMA_H2C) 612 info[ch].used = u32_get_bits(val, B_AX_USE_PG_MASK); 613 else 614 info[ch].used = cfg[ch].min - info[ch].aval; 615 616 return 0; 617 } 618 619 static int hfc_pub_ctrl(struct rtw89_dev *rtwdev) 620 { 621 const struct rtw89_hfc_pub_cfg *cfg = &rtwdev->mac.hfc_param.pub_cfg; 622 u32 val; 623 int ret; 624 625 ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL); 626 if (ret) 627 return ret; 628 629 ret = hfc_pub_cfg_chk(rtwdev); 630 if (ret) 631 return ret; 632 633 val = u32_encode_bits(cfg->grp0, B_AX_PUBPG_G0_MASK) | 634 u32_encode_bits(cfg->grp1, B_AX_PUBPG_G1_MASK); 635 rtw89_write32(rtwdev, R_AX_PUB_PAGE_CTRL1, val); 636 637 val = u32_encode_bits(cfg->wp_thrd, B_AX_WP_THRD_MASK); 638 rtw89_write32(rtwdev, R_AX_WP_PAGE_CTRL2, val); 639 640 return 0; 641 } 642 643 static int hfc_upd_mix_info(struct rtw89_dev *rtwdev) 644 { 645 struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param; 646 struct rtw89_hfc_pub_cfg *pub_cfg = ¶m->pub_cfg; 647 struct rtw89_hfc_prec_cfg *prec_cfg = ¶m->prec_cfg; 648 struct rtw89_hfc_pub_info *info = ¶m->pub_info; 649 u32 val; 650 int ret; 651 652 ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL); 653 if (ret) 654 return ret; 655 656 val = rtw89_read32(rtwdev, R_AX_PUB_PAGE_INFO1); 657 info->g0_used = u32_get_bits(val, B_AX_G0_USE_PG_MASK); 658 info->g1_used = u32_get_bits(val, B_AX_G1_USE_PG_MASK); 659 val = rtw89_read32(rtwdev, R_AX_PUB_PAGE_INFO3); 660 info->g0_aval = u32_get_bits(val, B_AX_G0_AVAL_PG_MASK); 661 info->g1_aval = u32_get_bits(val, B_AX_G1_AVAL_PG_MASK); 662 info->pub_aval = 663 u32_get_bits(rtw89_read32(rtwdev, R_AX_PUB_PAGE_INFO2), 664 B_AX_PUB_AVAL_PG_MASK); 665 info->wp_aval = 666 u32_get_bits(rtw89_read32(rtwdev, R_AX_WP_PAGE_INFO1), 667 B_AX_WP_AVAL_PG_MASK); 668 669 val = rtw89_read32(rtwdev, R_AX_HCI_FC_CTRL); 670 param->en = val & B_AX_HCI_FC_EN ? 1 : 0; 671 param->h2c_en = val & B_AX_HCI_FC_CH12_EN ? 1 : 0; 672 param->mode = u32_get_bits(val, B_AX_HCI_FC_MODE_MASK); 673 prec_cfg->ch011_full_cond = 674 u32_get_bits(val, B_AX_HCI_FC_WD_FULL_COND_MASK); 675 prec_cfg->h2c_full_cond = 676 u32_get_bits(val, B_AX_HCI_FC_CH12_FULL_COND_MASK); 677 prec_cfg->wp_ch07_full_cond = 678 u32_get_bits(val, B_AX_HCI_FC_WP_CH07_FULL_COND_MASK); 679 prec_cfg->wp_ch811_full_cond = 680 u32_get_bits(val, B_AX_HCI_FC_WP_CH811_FULL_COND_MASK); 681 682 val = rtw89_read32(rtwdev, R_AX_CH_PAGE_CTRL); 683 prec_cfg->ch011_prec = u32_get_bits(val, B_AX_PREC_PAGE_CH011_MASK); 684 prec_cfg->h2c_prec = u32_get_bits(val, B_AX_PREC_PAGE_CH12_MASK); 685 686 val = rtw89_read32(rtwdev, R_AX_PUB_PAGE_CTRL2); 687 pub_cfg->pub_max = u32_get_bits(val, B_AX_PUBPG_ALL_MASK); 688 689 val = rtw89_read32(rtwdev, R_AX_WP_PAGE_CTRL1); 690 prec_cfg->wp_ch07_prec = u32_get_bits(val, B_AX_PREC_PAGE_WP_CH07_MASK); 691 prec_cfg->wp_ch811_prec = u32_get_bits(val, B_AX_PREC_PAGE_WP_CH811_MASK); 692 693 val = rtw89_read32(rtwdev, R_AX_WP_PAGE_CTRL2); 694 pub_cfg->wp_thrd = u32_get_bits(val, B_AX_WP_THRD_MASK); 695 696 val = rtw89_read32(rtwdev, R_AX_PUB_PAGE_CTRL1); 697 pub_cfg->grp0 = u32_get_bits(val, B_AX_PUBPG_G0_MASK); 698 pub_cfg->grp1 = u32_get_bits(val, B_AX_PUBPG_G1_MASK); 699 700 ret = hfc_pub_info_chk(rtwdev); 701 if (param->en && ret) 702 return ret; 703 704 return 0; 705 } 706 707 static void hfc_h2c_cfg(struct rtw89_dev *rtwdev) 708 { 709 struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param; 710 const struct rtw89_hfc_prec_cfg *prec_cfg = ¶m->prec_cfg; 711 u32 val; 712 713 val = u32_encode_bits(prec_cfg->h2c_prec, B_AX_PREC_PAGE_CH12_MASK); 714 rtw89_write32(rtwdev, R_AX_CH_PAGE_CTRL, val); 715 716 rtw89_write32_mask(rtwdev, R_AX_HCI_FC_CTRL, 717 B_AX_HCI_FC_CH12_FULL_COND_MASK, 718 prec_cfg->h2c_full_cond); 719 } 720 721 static void hfc_mix_cfg(struct rtw89_dev *rtwdev) 722 { 723 struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param; 724 const struct rtw89_hfc_pub_cfg *pub_cfg = ¶m->pub_cfg; 725 const struct rtw89_hfc_prec_cfg *prec_cfg = ¶m->prec_cfg; 726 u32 val; 727 728 val = u32_encode_bits(prec_cfg->ch011_prec, B_AX_PREC_PAGE_CH011_MASK) | 729 u32_encode_bits(prec_cfg->h2c_prec, B_AX_PREC_PAGE_CH12_MASK); 730 rtw89_write32(rtwdev, R_AX_CH_PAGE_CTRL, val); 731 732 val = u32_encode_bits(pub_cfg->pub_max, B_AX_PUBPG_ALL_MASK); 733 rtw89_write32(rtwdev, R_AX_PUB_PAGE_CTRL2, val); 734 735 val = u32_encode_bits(prec_cfg->wp_ch07_prec, 736 B_AX_PREC_PAGE_WP_CH07_MASK) | 737 u32_encode_bits(prec_cfg->wp_ch811_prec, 738 B_AX_PREC_PAGE_WP_CH811_MASK); 739 rtw89_write32(rtwdev, R_AX_WP_PAGE_CTRL1, val); 740 741 val = u32_replace_bits(rtw89_read32(rtwdev, R_AX_HCI_FC_CTRL), 742 param->mode, B_AX_HCI_FC_MODE_MASK); 743 val = u32_replace_bits(val, prec_cfg->ch011_full_cond, 744 B_AX_HCI_FC_WD_FULL_COND_MASK); 745 val = u32_replace_bits(val, prec_cfg->h2c_full_cond, 746 B_AX_HCI_FC_CH12_FULL_COND_MASK); 747 val = u32_replace_bits(val, prec_cfg->wp_ch07_full_cond, 748 B_AX_HCI_FC_WP_CH07_FULL_COND_MASK); 749 val = u32_replace_bits(val, prec_cfg->wp_ch811_full_cond, 750 B_AX_HCI_FC_WP_CH811_FULL_COND_MASK); 751 rtw89_write32(rtwdev, R_AX_HCI_FC_CTRL, val); 752 } 753 754 static void hfc_func_en(struct rtw89_dev *rtwdev, bool en, bool h2c_en) 755 { 756 struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param; 757 u32 val; 758 759 val = rtw89_read32(rtwdev, R_AX_HCI_FC_CTRL); 760 param->en = en; 761 param->h2c_en = h2c_en; 762 val = en ? (val | B_AX_HCI_FC_EN) : (val & ~B_AX_HCI_FC_EN); 763 val = h2c_en ? (val | B_AX_HCI_FC_CH12_EN) : 764 (val & ~B_AX_HCI_FC_CH12_EN); 765 rtw89_write32(rtwdev, R_AX_HCI_FC_CTRL, val); 766 } 767 768 static int hfc_init(struct rtw89_dev *rtwdev, bool reset, bool en, bool h2c_en) 769 { 770 u8 ch; 771 u32 ret = 0; 772 773 if (reset) 774 ret = hfc_reset_param(rtwdev); 775 if (ret) 776 return ret; 777 778 ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL); 779 if (ret) 780 return ret; 781 782 hfc_func_en(rtwdev, false, false); 783 784 if (!en && h2c_en) { 785 hfc_h2c_cfg(rtwdev); 786 hfc_func_en(rtwdev, en, h2c_en); 787 return ret; 788 } 789 790 for (ch = RTW89_DMA_ACH0; ch < RTW89_DMA_H2C; ch++) { 791 ret = hfc_ch_ctrl(rtwdev, ch); 792 if (ret) 793 return ret; 794 } 795 796 ret = hfc_pub_ctrl(rtwdev); 797 if (ret) 798 return ret; 799 800 hfc_mix_cfg(rtwdev); 801 if (en || h2c_en) { 802 hfc_func_en(rtwdev, en, h2c_en); 803 udelay(10); 804 } 805 for (ch = RTW89_DMA_ACH0; ch < RTW89_DMA_H2C; ch++) { 806 ret = hfc_upd_ch_info(rtwdev, ch); 807 if (ret) 808 return ret; 809 } 810 ret = hfc_upd_mix_info(rtwdev); 811 812 return ret; 813 } 814 815 #define PWR_POLL_CNT 2000 816 static int pwr_cmd_poll(struct rtw89_dev *rtwdev, 817 const struct rtw89_pwr_cfg *cfg) 818 { 819 u8 val = 0; 820 int ret; 821 u32 addr = cfg->base == PWR_INTF_MSK_SDIO ? 822 cfg->addr | SDIO_LOCAL_BASE_ADDR : cfg->addr; 823 824 ret = read_poll_timeout(rtw89_read8, val, !((val ^ cfg->val) & cfg->msk), 825 1000, 1000 * PWR_POLL_CNT, false, rtwdev, addr); 826 827 if (!ret) 828 return 0; 829 830 rtw89_warn(rtwdev, "[ERR] Polling timeout\n"); 831 rtw89_warn(rtwdev, "[ERR] addr: %X, %X\n", addr, cfg->addr); 832 rtw89_warn(rtwdev, "[ERR] val: %X, %X\n", val, cfg->val); 833 834 return -EBUSY; 835 } 836 837 static int rtw89_mac_sub_pwr_seq(struct rtw89_dev *rtwdev, u8 cv_msk, 838 u8 intf_msk, const struct rtw89_pwr_cfg *cfg) 839 { 840 const struct rtw89_pwr_cfg *cur_cfg; 841 u32 addr; 842 u8 val; 843 844 for (cur_cfg = cfg; cur_cfg->cmd != PWR_CMD_END; cur_cfg++) { 845 if (!(cur_cfg->intf_msk & intf_msk) || 846 !(cur_cfg->cv_msk & cv_msk)) 847 continue; 848 849 switch (cur_cfg->cmd) { 850 case PWR_CMD_WRITE: 851 addr = cur_cfg->addr; 852 853 if (cur_cfg->base == PWR_BASE_SDIO) 854 addr |= SDIO_LOCAL_BASE_ADDR; 855 856 val = rtw89_read8(rtwdev, addr); 857 val &= ~(cur_cfg->msk); 858 val |= (cur_cfg->val & cur_cfg->msk); 859 860 rtw89_write8(rtwdev, addr, val); 861 break; 862 case PWR_CMD_POLL: 863 if (pwr_cmd_poll(rtwdev, cur_cfg)) 864 return -EBUSY; 865 break; 866 case PWR_CMD_DELAY: 867 if (cur_cfg->val == PWR_DELAY_US) 868 udelay(cur_cfg->addr); 869 else 870 fsleep(cur_cfg->addr * 1000); 871 break; 872 default: 873 return -EINVAL; 874 } 875 } 876 877 return 0; 878 } 879 880 static int rtw89_mac_pwr_seq(struct rtw89_dev *rtwdev, 881 const struct rtw89_pwr_cfg * const *cfg_seq) 882 { 883 int ret; 884 885 for (; *cfg_seq; cfg_seq++) { 886 ret = rtw89_mac_sub_pwr_seq(rtwdev, BIT(rtwdev->hal.cv), 887 PWR_INTF_MSK_PCIE, *cfg_seq); 888 if (ret) 889 return -EBUSY; 890 } 891 892 return 0; 893 } 894 895 static enum rtw89_rpwm_req_pwr_state 896 rtw89_mac_get_req_pwr_state(struct rtw89_dev *rtwdev) 897 { 898 enum rtw89_rpwm_req_pwr_state state; 899 900 switch (rtwdev->ps_mode) { 901 case RTW89_PS_MODE_RFOFF: 902 state = RTW89_MAC_RPWM_REQ_PWR_STATE_BAND0_RFOFF; 903 break; 904 case RTW89_PS_MODE_CLK_GATED: 905 state = RTW89_MAC_RPWM_REQ_PWR_STATE_CLK_GATED; 906 break; 907 case RTW89_PS_MODE_PWR_GATED: 908 state = RTW89_MAC_RPWM_REQ_PWR_STATE_PWR_GATED; 909 break; 910 default: 911 state = RTW89_MAC_RPWM_REQ_PWR_STATE_ACTIVE; 912 break; 913 } 914 return state; 915 } 916 917 static void rtw89_mac_send_rpwm(struct rtw89_dev *rtwdev, 918 enum rtw89_rpwm_req_pwr_state req_pwr_state) 919 { 920 u16 request; 921 922 request = rtw89_read16(rtwdev, R_AX_RPWM); 923 request ^= request | PS_RPWM_TOGGLE; 924 925 rtwdev->mac.rpwm_seq_num = (rtwdev->mac.rpwm_seq_num + 1) & 926 RPWM_SEQ_NUM_MAX; 927 request |= FIELD_PREP(PS_RPWM_SEQ_NUM, rtwdev->mac.rpwm_seq_num); 928 929 request |= req_pwr_state; 930 931 if (req_pwr_state < RTW89_MAC_RPWM_REQ_PWR_STATE_CLK_GATED) 932 request |= PS_RPWM_ACK; 933 934 rtw89_write16(rtwdev, rtwdev->hci.rpwm_addr, request); 935 } 936 937 static int rtw89_mac_check_cpwm_state(struct rtw89_dev *rtwdev, 938 enum rtw89_rpwm_req_pwr_state req_pwr_state) 939 { 940 bool request_deep_mode; 941 bool in_deep_mode; 942 u8 rpwm_req_num; 943 u8 cpwm_rsp_seq; 944 u8 cpwm_seq; 945 u8 cpwm_status; 946 947 if (req_pwr_state >= RTW89_MAC_RPWM_REQ_PWR_STATE_CLK_GATED) 948 request_deep_mode = true; 949 else 950 request_deep_mode = false; 951 952 if (rtw89_read32_mask(rtwdev, R_AX_LDM, B_AX_EN_32K)) 953 in_deep_mode = true; 954 else 955 in_deep_mode = false; 956 957 if (request_deep_mode != in_deep_mode) 958 return -EPERM; 959 960 if (request_deep_mode) 961 return 0; 962 963 rpwm_req_num = rtwdev->mac.rpwm_seq_num; 964 cpwm_rsp_seq = rtw89_read16_mask(rtwdev, R_AX_CPWM, 965 PS_CPWM_RSP_SEQ_NUM); 966 967 if (rpwm_req_num != cpwm_rsp_seq) 968 return -EPERM; 969 970 rtwdev->mac.cpwm_seq_num = (rtwdev->mac.cpwm_seq_num + 1) & 971 CPWM_SEQ_NUM_MAX; 972 973 cpwm_seq = rtw89_read16_mask(rtwdev, R_AX_CPWM, PS_CPWM_SEQ_NUM); 974 if (cpwm_seq != rtwdev->mac.cpwm_seq_num) 975 return -EPERM; 976 977 cpwm_status = rtw89_read16_mask(rtwdev, R_AX_CPWM, PS_CPWM_STATE); 978 if (cpwm_status != req_pwr_state) 979 return -EPERM; 980 981 return 0; 982 } 983 984 void rtw89_mac_power_mode_change(struct rtw89_dev *rtwdev, bool enter) 985 { 986 enum rtw89_rpwm_req_pwr_state state; 987 int ret; 988 989 if (enter) 990 state = rtw89_mac_get_req_pwr_state(rtwdev); 991 else 992 state = RTW89_MAC_RPWM_REQ_PWR_STATE_ACTIVE; 993 994 rtw89_mac_send_rpwm(rtwdev, state); 995 ret = read_poll_timeout_atomic(rtw89_mac_check_cpwm_state, ret, !ret, 996 1000, 15000, false, rtwdev, state); 997 if (ret) 998 rtw89_err(rtwdev, "firmware failed to ack for %s ps mode\n", 999 enter ? "entering" : "leaving"); 1000 } 1001 1002 static int rtw89_mac_power_switch(struct rtw89_dev *rtwdev, bool on) 1003 { 1004 #define PWR_ACT 1 1005 const struct rtw89_chip_info *chip = rtwdev->chip; 1006 const struct rtw89_pwr_cfg * const *cfg_seq; 1007 struct rtw89_hal *hal = &rtwdev->hal; 1008 int ret; 1009 u8 val; 1010 1011 if (on) 1012 cfg_seq = chip->pwr_on_seq; 1013 else 1014 cfg_seq = chip->pwr_off_seq; 1015 1016 if (test_bit(RTW89_FLAG_FW_RDY, rtwdev->flags)) 1017 __rtw89_leave_ps_mode(rtwdev); 1018 1019 val = rtw89_read32_mask(rtwdev, R_AX_IC_PWR_STATE, B_AX_WLMAC_PWR_STE_MASK); 1020 if (on && val == PWR_ACT) { 1021 rtw89_err(rtwdev, "MAC has already powered on\n"); 1022 return -EBUSY; 1023 } 1024 1025 ret = rtw89_mac_pwr_seq(rtwdev, cfg_seq); 1026 if (ret) 1027 return ret; 1028 1029 if (on) { 1030 set_bit(RTW89_FLAG_POWERON, rtwdev->flags); 1031 rtw89_write8(rtwdev, R_AX_SCOREBOARD + 3, MAC_AX_NOTIFY_TP_MAJOR); 1032 } else { 1033 clear_bit(RTW89_FLAG_POWERON, rtwdev->flags); 1034 clear_bit(RTW89_FLAG_FW_RDY, rtwdev->flags); 1035 rtw89_write8(rtwdev, R_AX_SCOREBOARD + 3, MAC_AX_NOTIFY_PWR_MAJOR); 1036 hal->current_channel = 0; 1037 } 1038 1039 return 0; 1040 #undef PWR_ACT 1041 } 1042 1043 void rtw89_mac_pwr_off(struct rtw89_dev *rtwdev) 1044 { 1045 rtw89_mac_power_switch(rtwdev, false); 1046 } 1047 1048 static int cmac_func_en(struct rtw89_dev *rtwdev, u8 mac_idx, bool en) 1049 { 1050 u32 func_en = 0; 1051 u32 ck_en = 0; 1052 u32 c1pc_en = 0; 1053 u32 addrl_func_en[] = {R_AX_CMAC_FUNC_EN, R_AX_CMAC_FUNC_EN_C1}; 1054 u32 addrl_ck_en[] = {R_AX_CK_EN, R_AX_CK_EN_C1}; 1055 1056 func_en = B_AX_CMAC_EN | B_AX_CMAC_TXEN | B_AX_CMAC_RXEN | 1057 B_AX_PHYINTF_EN | B_AX_CMAC_DMA_EN | B_AX_PTCLTOP_EN | 1058 B_AX_SCHEDULER_EN | B_AX_TMAC_EN | B_AX_RMAC_EN; 1059 ck_en = B_AX_CMAC_CKEN | B_AX_PHYINTF_CKEN | B_AX_CMAC_DMA_CKEN | 1060 B_AX_PTCLTOP_CKEN | B_AX_SCHEDULER_CKEN | B_AX_TMAC_CKEN | 1061 B_AX_RMAC_CKEN; 1062 c1pc_en = B_AX_R_SYM_WLCMAC1_PC_EN | 1063 B_AX_R_SYM_WLCMAC1_P1_PC_EN | 1064 B_AX_R_SYM_WLCMAC1_P2_PC_EN | 1065 B_AX_R_SYM_WLCMAC1_P3_PC_EN | 1066 B_AX_R_SYM_WLCMAC1_P4_PC_EN; 1067 1068 if (en) { 1069 if (mac_idx == RTW89_MAC_1) { 1070 rtw89_write32_set(rtwdev, R_AX_AFE_CTRL1, c1pc_en); 1071 rtw89_write32_clr(rtwdev, R_AX_SYS_ISO_CTRL_EXTEND, 1072 B_AX_R_SYM_ISO_CMAC12PP); 1073 rtw89_write32_set(rtwdev, R_AX_SYS_ISO_CTRL_EXTEND, 1074 B_AX_CMAC1_FEN); 1075 } 1076 rtw89_write32_set(rtwdev, addrl_ck_en[mac_idx], ck_en); 1077 rtw89_write32_set(rtwdev, addrl_func_en[mac_idx], func_en); 1078 } else { 1079 rtw89_write32_clr(rtwdev, addrl_func_en[mac_idx], func_en); 1080 rtw89_write32_clr(rtwdev, addrl_ck_en[mac_idx], ck_en); 1081 if (mac_idx == RTW89_MAC_1) { 1082 rtw89_write32_clr(rtwdev, R_AX_SYS_ISO_CTRL_EXTEND, 1083 B_AX_CMAC1_FEN); 1084 rtw89_write32_set(rtwdev, R_AX_SYS_ISO_CTRL_EXTEND, 1085 B_AX_R_SYM_ISO_CMAC12PP); 1086 rtw89_write32_clr(rtwdev, R_AX_AFE_CTRL1, c1pc_en); 1087 } 1088 } 1089 1090 return 0; 1091 } 1092 1093 static int dmac_func_en(struct rtw89_dev *rtwdev) 1094 { 1095 u32 val32; 1096 1097 val32 = (B_AX_MAC_FUNC_EN | B_AX_DMAC_FUNC_EN | B_AX_MAC_SEC_EN | 1098 B_AX_DISPATCHER_EN | B_AX_DLE_CPUIO_EN | B_AX_PKT_IN_EN | 1099 B_AX_DMAC_TBL_EN | B_AX_PKT_BUF_EN | B_AX_STA_SCH_EN | 1100 B_AX_TXPKT_CTRL_EN | B_AX_WD_RLS_EN | B_AX_MPDU_PROC_EN); 1101 rtw89_write32(rtwdev, R_AX_DMAC_FUNC_EN, val32); 1102 1103 val32 = (B_AX_MAC_SEC_CLK_EN | B_AX_DISPATCHER_CLK_EN | 1104 B_AX_DLE_CPUIO_CLK_EN | B_AX_PKT_IN_CLK_EN | 1105 B_AX_STA_SCH_CLK_EN | B_AX_TXPKT_CTRL_CLK_EN | 1106 B_AX_WD_RLS_CLK_EN); 1107 rtw89_write32(rtwdev, R_AX_DMAC_CLK_EN, val32); 1108 1109 return 0; 1110 } 1111 1112 static int chip_func_en(struct rtw89_dev *rtwdev) 1113 { 1114 rtw89_write32_set(rtwdev, R_AX_SPSLDO_ON_CTRL0, B_AX_OCP_L1_MASK); 1115 1116 return 0; 1117 } 1118 1119 static int rtw89_mac_sys_init(struct rtw89_dev *rtwdev) 1120 { 1121 int ret; 1122 1123 ret = dmac_func_en(rtwdev); 1124 if (ret) 1125 return ret; 1126 1127 ret = cmac_func_en(rtwdev, 0, true); 1128 if (ret) 1129 return ret; 1130 1131 ret = chip_func_en(rtwdev); 1132 if (ret) 1133 return ret; 1134 1135 return ret; 1136 } 1137 1138 /* PCIE 64 */ 1139 const struct rtw89_dle_size wde_size0 = { 1140 RTW89_WDE_PG_64, 4095, 1, 1141 }; 1142 1143 /* DLFW */ 1144 const struct rtw89_dle_size wde_size4 = { 1145 RTW89_WDE_PG_64, 0, 4096, 1146 }; 1147 1148 /* PCIE */ 1149 const struct rtw89_dle_size ple_size0 = { 1150 RTW89_PLE_PG_128, 1520, 16, 1151 }; 1152 1153 /* DLFW */ 1154 const struct rtw89_dle_size ple_size4 = { 1155 RTW89_PLE_PG_128, 64, 1472, 1156 }; 1157 1158 /* PCIE 64 */ 1159 const struct rtw89_wde_quota wde_qt0 = { 1160 3792, 196, 0, 107, 1161 }; 1162 1163 /* DLFW */ 1164 const struct rtw89_wde_quota wde_qt4 = { 1165 0, 0, 0, 0, 1166 }; 1167 1168 /* PCIE SCC */ 1169 const struct rtw89_ple_quota ple_qt4 = { 1170 264, 0, 16, 20, 26, 13, 356, 0, 32, 40, 8, 1171 }; 1172 1173 /* PCIE SCC */ 1174 const struct rtw89_ple_quota ple_qt5 = { 1175 264, 0, 32, 20, 64, 13, 1101, 0, 64, 128, 120, 1176 }; 1177 1178 /* DLFW */ 1179 const struct rtw89_ple_quota ple_qt13 = { 1180 0, 0, 16, 48, 0, 0, 0, 0, 0, 0, 0 1181 }; 1182 1183 static const struct rtw89_dle_mem *get_dle_mem_cfg(struct rtw89_dev *rtwdev, 1184 enum rtw89_qta_mode mode) 1185 { 1186 struct rtw89_mac_info *mac = &rtwdev->mac; 1187 const struct rtw89_dle_mem *cfg; 1188 1189 cfg = &rtwdev->chip->dle_mem[mode]; 1190 if (!cfg) 1191 return NULL; 1192 1193 if (cfg->mode != mode) { 1194 rtw89_warn(rtwdev, "qta mode unmatch!\n"); 1195 return NULL; 1196 } 1197 1198 mac->dle_info.wde_pg_size = cfg->wde_size->pge_size; 1199 mac->dle_info.ple_pg_size = cfg->ple_size->pge_size; 1200 mac->dle_info.qta_mode = mode; 1201 mac->dle_info.c0_rx_qta = cfg->ple_min_qt->cma0_dma; 1202 mac->dle_info.c1_rx_qta = cfg->ple_min_qt->cma1_dma; 1203 1204 return cfg; 1205 } 1206 1207 static inline u32 dle_used_size(const struct rtw89_dle_size *wde, 1208 const struct rtw89_dle_size *ple) 1209 { 1210 return wde->pge_size * (wde->lnk_pge_num + wde->unlnk_pge_num) + 1211 ple->pge_size * (ple->lnk_pge_num + ple->unlnk_pge_num); 1212 } 1213 1214 static void dle_func_en(struct rtw89_dev *rtwdev, bool enable) 1215 { 1216 if (enable) 1217 rtw89_write32_set(rtwdev, R_AX_DMAC_FUNC_EN, 1218 B_AX_DLE_WDE_EN | B_AX_DLE_PLE_EN); 1219 else 1220 rtw89_write32_clr(rtwdev, R_AX_DMAC_FUNC_EN, 1221 B_AX_DLE_WDE_EN | B_AX_DLE_PLE_EN); 1222 } 1223 1224 static void dle_clk_en(struct rtw89_dev *rtwdev, bool enable) 1225 { 1226 if (enable) 1227 rtw89_write32_set(rtwdev, R_AX_DMAC_CLK_EN, 1228 B_AX_DLE_WDE_CLK_EN | B_AX_DLE_PLE_CLK_EN); 1229 else 1230 rtw89_write32_clr(rtwdev, R_AX_DMAC_CLK_EN, 1231 B_AX_DLE_WDE_CLK_EN | B_AX_DLE_PLE_CLK_EN); 1232 } 1233 1234 static int dle_mix_cfg(struct rtw89_dev *rtwdev, const struct rtw89_dle_mem *cfg) 1235 { 1236 const struct rtw89_dle_size *size_cfg; 1237 u32 val; 1238 u8 bound = 0; 1239 1240 val = rtw89_read32(rtwdev, R_AX_WDE_PKTBUF_CFG); 1241 size_cfg = cfg->wde_size; 1242 1243 switch (size_cfg->pge_size) { 1244 default: 1245 case RTW89_WDE_PG_64: 1246 val = u32_replace_bits(val, S_AX_WDE_PAGE_SEL_64, 1247 B_AX_WDE_PAGE_SEL_MASK); 1248 break; 1249 case RTW89_WDE_PG_128: 1250 val = u32_replace_bits(val, S_AX_WDE_PAGE_SEL_128, 1251 B_AX_WDE_PAGE_SEL_MASK); 1252 break; 1253 case RTW89_WDE_PG_256: 1254 rtw89_err(rtwdev, "[ERR]WDE DLE doesn't support 256 byte!\n"); 1255 return -EINVAL; 1256 } 1257 1258 val = u32_replace_bits(val, bound, B_AX_WDE_START_BOUND_MASK); 1259 val = u32_replace_bits(val, size_cfg->lnk_pge_num, 1260 B_AX_WDE_FREE_PAGE_NUM_MASK); 1261 rtw89_write32(rtwdev, R_AX_WDE_PKTBUF_CFG, val); 1262 1263 val = rtw89_read32(rtwdev, R_AX_PLE_PKTBUF_CFG); 1264 bound = (size_cfg->lnk_pge_num + size_cfg->unlnk_pge_num) 1265 * size_cfg->pge_size / DLE_BOUND_UNIT; 1266 size_cfg = cfg->ple_size; 1267 1268 switch (size_cfg->pge_size) { 1269 default: 1270 case RTW89_PLE_PG_64: 1271 rtw89_err(rtwdev, "[ERR]PLE DLE doesn't support 64 byte!\n"); 1272 return -EINVAL; 1273 case RTW89_PLE_PG_128: 1274 val = u32_replace_bits(val, S_AX_PLE_PAGE_SEL_128, 1275 B_AX_PLE_PAGE_SEL_MASK); 1276 break; 1277 case RTW89_PLE_PG_256: 1278 val = u32_replace_bits(val, S_AX_PLE_PAGE_SEL_256, 1279 B_AX_PLE_PAGE_SEL_MASK); 1280 break; 1281 } 1282 1283 val = u32_replace_bits(val, bound, B_AX_PLE_START_BOUND_MASK); 1284 val = u32_replace_bits(val, size_cfg->lnk_pge_num, 1285 B_AX_PLE_FREE_PAGE_NUM_MASK); 1286 rtw89_write32(rtwdev, R_AX_PLE_PKTBUF_CFG, val); 1287 1288 return 0; 1289 } 1290 1291 #define INVALID_QT_WCPU U16_MAX 1292 #define SET_QUOTA_VAL(_min_x, _max_x, _module, _idx) \ 1293 do { \ 1294 val = ((_min_x) & \ 1295 B_AX_ ## _module ## _MIN_SIZE_MASK) | \ 1296 (((_max_x) << 16) & \ 1297 B_AX_ ## _module ## _MAX_SIZE_MASK); \ 1298 rtw89_write32(rtwdev, \ 1299 R_AX_ ## _module ## _QTA ## _idx ## _CFG, \ 1300 val); \ 1301 } while (0) 1302 #define SET_QUOTA(_x, _module, _idx) \ 1303 SET_QUOTA_VAL(min_cfg->_x, max_cfg->_x, _module, _idx) 1304 1305 static void wde_quota_cfg(struct rtw89_dev *rtwdev, 1306 const struct rtw89_wde_quota *min_cfg, 1307 const struct rtw89_wde_quota *max_cfg, 1308 u16 ext_wde_min_qt_wcpu) 1309 { 1310 u16 min_qt_wcpu = ext_wde_min_qt_wcpu != INVALID_QT_WCPU ? 1311 ext_wde_min_qt_wcpu : min_cfg->wcpu; 1312 u32 val; 1313 1314 SET_QUOTA(hif, WDE, 0); 1315 SET_QUOTA_VAL(min_qt_wcpu, max_cfg->wcpu, WDE, 1); 1316 SET_QUOTA(pkt_in, WDE, 3); 1317 SET_QUOTA(cpu_io, WDE, 4); 1318 } 1319 1320 static void ple_quota_cfg(struct rtw89_dev *rtwdev, 1321 const struct rtw89_ple_quota *min_cfg, 1322 const struct rtw89_ple_quota *max_cfg) 1323 { 1324 u32 val; 1325 1326 SET_QUOTA(cma0_tx, PLE, 0); 1327 SET_QUOTA(cma1_tx, PLE, 1); 1328 SET_QUOTA(c2h, PLE, 2); 1329 SET_QUOTA(h2c, PLE, 3); 1330 SET_QUOTA(wcpu, PLE, 4); 1331 SET_QUOTA(mpdu_proc, PLE, 5); 1332 SET_QUOTA(cma0_dma, PLE, 6); 1333 SET_QUOTA(cma1_dma, PLE, 7); 1334 SET_QUOTA(bb_rpt, PLE, 8); 1335 SET_QUOTA(wd_rel, PLE, 9); 1336 SET_QUOTA(cpu_io, PLE, 10); 1337 } 1338 1339 #undef SET_QUOTA 1340 1341 static void dle_quota_cfg(struct rtw89_dev *rtwdev, 1342 const struct rtw89_dle_mem *cfg, 1343 u16 ext_wde_min_qt_wcpu) 1344 { 1345 wde_quota_cfg(rtwdev, cfg->wde_min_qt, cfg->wde_max_qt, ext_wde_min_qt_wcpu); 1346 ple_quota_cfg(rtwdev, cfg->ple_min_qt, cfg->ple_max_qt); 1347 } 1348 1349 static int dle_init(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mode, 1350 enum rtw89_qta_mode ext_mode) 1351 { 1352 const struct rtw89_dle_mem *cfg, *ext_cfg; 1353 u16 ext_wde_min_qt_wcpu = INVALID_QT_WCPU; 1354 int ret = 0; 1355 u32 ini; 1356 1357 ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL); 1358 if (ret) 1359 return ret; 1360 1361 cfg = get_dle_mem_cfg(rtwdev, mode); 1362 if (!cfg) { 1363 rtw89_err(rtwdev, "[ERR]get_dle_mem_cfg\n"); 1364 ret = -EINVAL; 1365 goto error; 1366 } 1367 1368 if (mode == RTW89_QTA_DLFW) { 1369 ext_cfg = get_dle_mem_cfg(rtwdev, ext_mode); 1370 if (!ext_cfg) { 1371 rtw89_err(rtwdev, "[ERR]get_dle_ext_mem_cfg %d\n", 1372 ext_mode); 1373 ret = -EINVAL; 1374 goto error; 1375 } 1376 ext_wde_min_qt_wcpu = ext_cfg->wde_min_qt->wcpu; 1377 } 1378 1379 if (dle_used_size(cfg->wde_size, cfg->ple_size) != rtwdev->chip->fifo_size) { 1380 rtw89_err(rtwdev, "[ERR]wd/dle mem cfg\n"); 1381 ret = -EINVAL; 1382 goto error; 1383 } 1384 1385 dle_func_en(rtwdev, false); 1386 dle_clk_en(rtwdev, true); 1387 1388 ret = dle_mix_cfg(rtwdev, cfg); 1389 if (ret) { 1390 rtw89_err(rtwdev, "[ERR] dle mix cfg\n"); 1391 goto error; 1392 } 1393 dle_quota_cfg(rtwdev, cfg, ext_wde_min_qt_wcpu); 1394 1395 dle_func_en(rtwdev, true); 1396 1397 ret = read_poll_timeout(rtw89_read32, ini, 1398 (ini & WDE_MGN_INI_RDY) == WDE_MGN_INI_RDY, 1, 1399 2000, false, rtwdev, R_AX_WDE_INI_STATUS); 1400 if (ret) { 1401 rtw89_err(rtwdev, "[ERR]WDE cfg ready\n"); 1402 return ret; 1403 } 1404 1405 ret = read_poll_timeout(rtw89_read32, ini, 1406 (ini & WDE_MGN_INI_RDY) == WDE_MGN_INI_RDY, 1, 1407 2000, false, rtwdev, R_AX_PLE_INI_STATUS); 1408 if (ret) { 1409 rtw89_err(rtwdev, "[ERR]PLE cfg ready\n"); 1410 return ret; 1411 } 1412 1413 return 0; 1414 error: 1415 dle_func_en(rtwdev, false); 1416 rtw89_err(rtwdev, "[ERR]trxcfg wde 0x8900 = %x\n", 1417 rtw89_read32(rtwdev, R_AX_WDE_INI_STATUS)); 1418 rtw89_err(rtwdev, "[ERR]trxcfg ple 0x8D00 = %x\n", 1419 rtw89_read32(rtwdev, R_AX_PLE_INI_STATUS)); 1420 1421 return ret; 1422 } 1423 1424 static bool dle_is_txq_empty(struct rtw89_dev *rtwdev) 1425 { 1426 u32 msk32; 1427 u32 val32; 1428 1429 msk32 = B_AX_WDE_EMPTY_QUE_CMAC0_ALL_AC | B_AX_WDE_EMPTY_QUE_CMAC0_MBH | 1430 B_AX_WDE_EMPTY_QUE_CMAC1_MBH | B_AX_WDE_EMPTY_QUE_CMAC0_WMM0 | 1431 B_AX_WDE_EMPTY_QUE_CMAC0_WMM1 | B_AX_WDE_EMPTY_QUE_OTHERS | 1432 B_AX_PLE_EMPTY_QUE_DMAC_MPDU_TX | B_AX_PLE_EMPTY_QTA_DMAC_H2C | 1433 B_AX_PLE_EMPTY_QUE_DMAC_SEC_TX | B_AX_WDE_EMPTY_QUE_DMAC_PKTIN | 1434 B_AX_WDE_EMPTY_QTA_DMAC_HIF | B_AX_WDE_EMPTY_QTA_DMAC_WLAN_CPU | 1435 B_AX_WDE_EMPTY_QTA_DMAC_PKTIN | B_AX_WDE_EMPTY_QTA_DMAC_CPUIO | 1436 B_AX_PLE_EMPTY_QTA_DMAC_B0_TXPL | 1437 B_AX_PLE_EMPTY_QTA_DMAC_B1_TXPL | 1438 B_AX_PLE_EMPTY_QTA_DMAC_MPDU_TX | 1439 B_AX_PLE_EMPTY_QTA_DMAC_CPUIO | 1440 B_AX_WDE_EMPTY_QTA_DMAC_DATA_CPU | 1441 B_AX_PLE_EMPTY_QTA_DMAC_WLAN_CPU; 1442 val32 = rtw89_read32(rtwdev, R_AX_DLE_EMPTY0); 1443 1444 if ((val32 & msk32) == msk32) 1445 return true; 1446 1447 return false; 1448 } 1449 1450 static int sta_sch_init(struct rtw89_dev *rtwdev) 1451 { 1452 u32 p_val; 1453 u8 val; 1454 int ret; 1455 1456 ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL); 1457 if (ret) 1458 return ret; 1459 1460 val = rtw89_read8(rtwdev, R_AX_SS_CTRL); 1461 val |= B_AX_SS_EN; 1462 rtw89_write8(rtwdev, R_AX_SS_CTRL, val); 1463 1464 ret = read_poll_timeout(rtw89_read32, p_val, p_val & B_AX_SS_INIT_DONE_1, 1465 1, TRXCFG_WAIT_CNT, false, rtwdev, R_AX_SS_CTRL); 1466 if (ret) { 1467 rtw89_err(rtwdev, "[ERR]STA scheduler init\n"); 1468 return ret; 1469 } 1470 1471 rtw89_write32_set(rtwdev, R_AX_SS_CTRL, B_AX_SS_WARM_INIT_FLG); 1472 1473 return 0; 1474 } 1475 1476 static int mpdu_proc_init(struct rtw89_dev *rtwdev) 1477 { 1478 int ret; 1479 1480 ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL); 1481 if (ret) 1482 return ret; 1483 1484 rtw89_write32(rtwdev, R_AX_ACTION_FWD0, TRXCFG_MPDU_PROC_ACT_FRWD); 1485 rtw89_write32(rtwdev, R_AX_TF_FWD, TRXCFG_MPDU_PROC_TF_FRWD); 1486 rtw89_write32_set(rtwdev, R_AX_MPDU_PROC, 1487 B_AX_APPEND_FCS | B_AX_A_ICV_ERR); 1488 rtw89_write32(rtwdev, R_AX_CUT_AMSDU_CTRL, TRXCFG_MPDU_PROC_CUT_CTRL); 1489 1490 return 0; 1491 } 1492 1493 static int sec_eng_init(struct rtw89_dev *rtwdev) 1494 { 1495 u32 val = 0; 1496 int ret; 1497 1498 ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL); 1499 if (ret) 1500 return ret; 1501 1502 val = rtw89_read32(rtwdev, R_AX_SEC_ENG_CTRL); 1503 /* init clock */ 1504 val |= (B_AX_CLK_EN_CGCMP | B_AX_CLK_EN_WAPI | B_AX_CLK_EN_WEP_TKIP); 1505 /* init TX encryption */ 1506 val |= (B_AX_SEC_TX_ENC | B_AX_SEC_RX_DEC); 1507 val |= (B_AX_MC_DEC | B_AX_BC_DEC); 1508 val &= ~B_AX_TX_PARTIAL_MODE; 1509 rtw89_write32(rtwdev, R_AX_SEC_ENG_CTRL, val); 1510 1511 /* init MIC ICV append */ 1512 val = rtw89_read32(rtwdev, R_AX_SEC_MPDU_PROC); 1513 val |= (B_AX_APPEND_ICV | B_AX_APPEND_MIC); 1514 1515 /* option init */ 1516 rtw89_write32(rtwdev, R_AX_SEC_MPDU_PROC, val); 1517 1518 return 0; 1519 } 1520 1521 static int dmac_init(struct rtw89_dev *rtwdev, u8 mac_idx) 1522 { 1523 int ret; 1524 1525 ret = dle_init(rtwdev, rtwdev->mac.qta_mode, RTW89_QTA_INVALID); 1526 if (ret) { 1527 rtw89_err(rtwdev, "[ERR]DLE init %d\n", ret); 1528 return ret; 1529 } 1530 1531 ret = hfc_init(rtwdev, true, true, true); 1532 if (ret) { 1533 rtw89_err(rtwdev, "[ERR]HCI FC init %d\n", ret); 1534 return ret; 1535 } 1536 1537 ret = sta_sch_init(rtwdev); 1538 if (ret) { 1539 rtw89_err(rtwdev, "[ERR]STA SCH init %d\n", ret); 1540 return ret; 1541 } 1542 1543 ret = mpdu_proc_init(rtwdev); 1544 if (ret) { 1545 rtw89_err(rtwdev, "[ERR]MPDU Proc init %d\n", ret); 1546 return ret; 1547 } 1548 1549 ret = sec_eng_init(rtwdev); 1550 if (ret) { 1551 rtw89_err(rtwdev, "[ERR]Security Engine init %d\n", ret); 1552 return ret; 1553 } 1554 1555 return ret; 1556 } 1557 1558 static int addr_cam_init(struct rtw89_dev *rtwdev, u8 mac_idx) 1559 { 1560 u32 val, reg; 1561 u16 p_val; 1562 int ret; 1563 1564 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 1565 if (ret) 1566 return ret; 1567 1568 reg = rtw89_mac_reg_by_idx(R_AX_ADDR_CAM_CTRL, mac_idx); 1569 1570 val = rtw89_read32(rtwdev, reg); 1571 val |= u32_encode_bits(0x7f, B_AX_ADDR_CAM_RANGE_MASK) | 1572 B_AX_ADDR_CAM_CLR | B_AX_ADDR_CAM_EN; 1573 rtw89_write32(rtwdev, reg, val); 1574 1575 ret = read_poll_timeout(rtw89_read16, p_val, !(p_val & B_AX_ADDR_CAM_CLR), 1576 1, TRXCFG_WAIT_CNT, false, rtwdev, B_AX_ADDR_CAM_CLR); 1577 if (ret) { 1578 rtw89_err(rtwdev, "[ERR]ADDR_CAM reset\n"); 1579 return ret; 1580 } 1581 1582 return 0; 1583 } 1584 1585 static int scheduler_init(struct rtw89_dev *rtwdev, u8 mac_idx) 1586 { 1587 u32 ret; 1588 u32 reg; 1589 1590 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 1591 if (ret) 1592 return ret; 1593 1594 reg = rtw89_mac_reg_by_idx(R_AX_PREBKF_CFG_0, mac_idx); 1595 rtw89_write32_mask(rtwdev, reg, B_AX_PREBKF_TIME_MASK, SCH_PREBKF_24US); 1596 1597 return 0; 1598 } 1599 1600 static int rtw89_mac_typ_fltr_opt(struct rtw89_dev *rtwdev, 1601 enum rtw89_machdr_frame_type type, 1602 enum rtw89_mac_fwd_target fwd_target, 1603 u8 mac_idx) 1604 { 1605 u32 reg; 1606 u32 val; 1607 1608 switch (fwd_target) { 1609 case RTW89_FWD_DONT_CARE: 1610 val = RX_FLTR_FRAME_DROP; 1611 break; 1612 case RTW89_FWD_TO_HOST: 1613 val = RX_FLTR_FRAME_TO_HOST; 1614 break; 1615 case RTW89_FWD_TO_WLAN_CPU: 1616 val = RX_FLTR_FRAME_TO_WLCPU; 1617 break; 1618 default: 1619 rtw89_err(rtwdev, "[ERR]set rx filter fwd target err\n"); 1620 return -EINVAL; 1621 } 1622 1623 switch (type) { 1624 case RTW89_MGNT: 1625 reg = rtw89_mac_reg_by_idx(R_AX_MGNT_FLTR, mac_idx); 1626 break; 1627 case RTW89_CTRL: 1628 reg = rtw89_mac_reg_by_idx(R_AX_CTRL_FLTR, mac_idx); 1629 break; 1630 case RTW89_DATA: 1631 reg = rtw89_mac_reg_by_idx(R_AX_DATA_FLTR, mac_idx); 1632 break; 1633 default: 1634 rtw89_err(rtwdev, "[ERR]set rx filter type err\n"); 1635 return -EINVAL; 1636 } 1637 rtw89_write32(rtwdev, reg, val); 1638 1639 return 0; 1640 } 1641 1642 static int rx_fltr_init(struct rtw89_dev *rtwdev, u8 mac_idx) 1643 { 1644 int ret, i; 1645 u32 mac_ftlr, plcp_ftlr; 1646 1647 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 1648 if (ret) 1649 return ret; 1650 1651 for (i = RTW89_MGNT; i <= RTW89_DATA; i++) { 1652 ret = rtw89_mac_typ_fltr_opt(rtwdev, i, RTW89_FWD_TO_HOST, 1653 mac_idx); 1654 if (ret) 1655 return ret; 1656 } 1657 mac_ftlr = rtwdev->hal.rx_fltr; 1658 plcp_ftlr = B_AX_CCK_CRC_CHK | B_AX_CCK_SIG_CHK | 1659 B_AX_LSIG_PARITY_CHK_EN | B_AX_SIGA_CRC_CHK | 1660 B_AX_VHT_SU_SIGB_CRC_CHK | B_AX_VHT_MU_SIGB_CRC_CHK | 1661 B_AX_HE_SIGB_CRC_CHK; 1662 rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, mac_idx), 1663 mac_ftlr); 1664 rtw89_write16(rtwdev, rtw89_mac_reg_by_idx(R_AX_PLCP_HDR_FLTR, mac_idx), 1665 plcp_ftlr); 1666 1667 return 0; 1668 } 1669 1670 static void _patch_dis_resp_chk(struct rtw89_dev *rtwdev, u8 mac_idx) 1671 { 1672 u32 reg, val32; 1673 u32 b_rsp_chk_nav, b_rsp_chk_cca; 1674 1675 b_rsp_chk_nav = B_AX_RSP_CHK_TXNAV | B_AX_RSP_CHK_INTRA_NAV | 1676 B_AX_RSP_CHK_BASIC_NAV; 1677 b_rsp_chk_cca = B_AX_RSP_CHK_SEC_CCA_80 | B_AX_RSP_CHK_SEC_CCA_40 | 1678 B_AX_RSP_CHK_SEC_CCA_20 | B_AX_RSP_CHK_BTCCA | 1679 B_AX_RSP_CHK_EDCCA | B_AX_RSP_CHK_CCA; 1680 1681 switch (rtwdev->chip->chip_id) { 1682 case RTL8852A: 1683 case RTL8852B: 1684 reg = rtw89_mac_reg_by_idx(R_AX_RSP_CHK_SIG, mac_idx); 1685 val32 = rtw89_read32(rtwdev, reg) & ~b_rsp_chk_nav; 1686 rtw89_write32(rtwdev, reg, val32); 1687 1688 reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_0, mac_idx); 1689 val32 = rtw89_read32(rtwdev, reg) & ~b_rsp_chk_cca; 1690 rtw89_write32(rtwdev, reg, val32); 1691 break; 1692 default: 1693 reg = rtw89_mac_reg_by_idx(R_AX_RSP_CHK_SIG, mac_idx); 1694 val32 = rtw89_read32(rtwdev, reg) | b_rsp_chk_nav; 1695 rtw89_write32(rtwdev, reg, val32); 1696 1697 reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_0, mac_idx); 1698 val32 = rtw89_read32(rtwdev, reg) | b_rsp_chk_cca; 1699 rtw89_write32(rtwdev, reg, val32); 1700 break; 1701 } 1702 } 1703 1704 static int cca_ctrl_init(struct rtw89_dev *rtwdev, u8 mac_idx) 1705 { 1706 u32 val, reg; 1707 int ret; 1708 1709 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 1710 if (ret) 1711 return ret; 1712 1713 reg = rtw89_mac_reg_by_idx(R_AX_CCA_CONTROL, mac_idx); 1714 val = rtw89_read32(rtwdev, reg); 1715 val |= (B_AX_TB_CHK_BASIC_NAV | B_AX_TB_CHK_BTCCA | 1716 B_AX_TB_CHK_EDCCA | B_AX_TB_CHK_CCA_P20 | 1717 B_AX_SIFS_CHK_BTCCA | B_AX_SIFS_CHK_CCA_P20 | 1718 B_AX_CTN_CHK_INTRA_NAV | 1719 B_AX_CTN_CHK_BASIC_NAV | B_AX_CTN_CHK_BTCCA | 1720 B_AX_CTN_CHK_EDCCA | B_AX_CTN_CHK_CCA_S80 | 1721 B_AX_CTN_CHK_CCA_S40 | B_AX_CTN_CHK_CCA_S20 | 1722 B_AX_CTN_CHK_CCA_P20 | B_AX_SIFS_CHK_EDCCA); 1723 val &= ~(B_AX_TB_CHK_TX_NAV | B_AX_TB_CHK_CCA_S80 | 1724 B_AX_TB_CHK_CCA_S40 | B_AX_TB_CHK_CCA_S20 | 1725 B_AX_SIFS_CHK_CCA_S80 | B_AX_SIFS_CHK_CCA_S40 | 1726 B_AX_SIFS_CHK_CCA_S20 | B_AX_CTN_CHK_TXNAV); 1727 1728 rtw89_write32(rtwdev, reg, val); 1729 1730 _patch_dis_resp_chk(rtwdev, mac_idx); 1731 1732 return 0; 1733 } 1734 1735 static int spatial_reuse_init(struct rtw89_dev *rtwdev, u8 mac_idx) 1736 { 1737 u32 reg; 1738 int ret; 1739 1740 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 1741 if (ret) 1742 return ret; 1743 reg = rtw89_mac_reg_by_idx(R_AX_RX_SR_CTRL, mac_idx); 1744 rtw89_write8_clr(rtwdev, reg, B_AX_SR_EN); 1745 1746 return 0; 1747 } 1748 1749 static int tmac_init(struct rtw89_dev *rtwdev, u8 mac_idx) 1750 { 1751 u32 reg; 1752 int ret; 1753 1754 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 1755 if (ret) 1756 return ret; 1757 1758 reg = rtw89_mac_reg_by_idx(R_AX_MAC_LOOPBACK, mac_idx); 1759 rtw89_write32_clr(rtwdev, reg, B_AX_MACLBK_EN); 1760 1761 return 0; 1762 } 1763 1764 static int trxptcl_init(struct rtw89_dev *rtwdev, u8 mac_idx) 1765 { 1766 u32 reg, val, sifs; 1767 int ret; 1768 1769 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 1770 if (ret) 1771 return ret; 1772 1773 reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_0, mac_idx); 1774 val = rtw89_read32(rtwdev, reg); 1775 val &= ~B_AX_WMAC_SPEC_SIFS_CCK_MASK; 1776 val |= FIELD_PREP(B_AX_WMAC_SPEC_SIFS_CCK_MASK, WMAC_SPEC_SIFS_CCK); 1777 1778 switch (rtwdev->chip->chip_id) { 1779 case RTL8852A: 1780 sifs = WMAC_SPEC_SIFS_OFDM_52A; 1781 break; 1782 case RTL8852B: 1783 sifs = WMAC_SPEC_SIFS_OFDM_52B; 1784 break; 1785 default: 1786 sifs = WMAC_SPEC_SIFS_OFDM_52C; 1787 break; 1788 } 1789 val &= ~B_AX_WMAC_SPEC_SIFS_OFDM_MASK; 1790 val |= FIELD_PREP(B_AX_WMAC_SPEC_SIFS_OFDM_MASK, sifs); 1791 rtw89_write32(rtwdev, reg, val); 1792 1793 reg = rtw89_mac_reg_by_idx(R_AX_RXTRIG_TEST_USER_2, mac_idx); 1794 rtw89_write32_set(rtwdev, reg, B_AX_RXTRIG_FCSCHK_EN); 1795 1796 return 0; 1797 } 1798 1799 static int rmac_init(struct rtw89_dev *rtwdev, u8 mac_idx) 1800 { 1801 #define TRXCFG_RMAC_CCA_TO 32 1802 #define TRXCFG_RMAC_DATA_TO 15 1803 #define RX_MAX_LEN_UNIT 512 1804 #define PLD_RLS_MAX_PG 127 1805 int ret; 1806 u32 reg, rx_max_len, rx_qta; 1807 u16 val; 1808 1809 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 1810 if (ret) 1811 return ret; 1812 1813 reg = rtw89_mac_reg_by_idx(R_AX_RESPBA_CAM_CTRL, mac_idx); 1814 rtw89_write8_set(rtwdev, reg, B_AX_SSN_SEL); 1815 1816 reg = rtw89_mac_reg_by_idx(R_AX_DLK_PROTECT_CTL, mac_idx); 1817 val = rtw89_read16(rtwdev, reg); 1818 val = u16_replace_bits(val, TRXCFG_RMAC_DATA_TO, 1819 B_AX_RX_DLK_DATA_TIME_MASK); 1820 val = u16_replace_bits(val, TRXCFG_RMAC_CCA_TO, 1821 B_AX_RX_DLK_CCA_TIME_MASK); 1822 rtw89_write16(rtwdev, reg, val); 1823 1824 reg = rtw89_mac_reg_by_idx(R_AX_RCR, mac_idx); 1825 rtw89_write8_mask(rtwdev, reg, B_AX_CH_EN_MASK, 0x1); 1826 1827 reg = rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, mac_idx); 1828 if (mac_idx == RTW89_MAC_0) 1829 rx_qta = rtwdev->mac.dle_info.c0_rx_qta; 1830 else 1831 rx_qta = rtwdev->mac.dle_info.c1_rx_qta; 1832 rx_qta = rx_qta > PLD_RLS_MAX_PG ? PLD_RLS_MAX_PG : rx_qta; 1833 rx_max_len = (rx_qta - 1) * rtwdev->mac.dle_info.ple_pg_size / 1834 RX_MAX_LEN_UNIT; 1835 rx_max_len = rx_max_len > B_AX_RX_MPDU_MAX_LEN_SIZE ? 1836 B_AX_RX_MPDU_MAX_LEN_SIZE : rx_max_len; 1837 rtw89_write32_mask(rtwdev, reg, B_AX_RX_MPDU_MAX_LEN_MASK, rx_max_len); 1838 1839 if (rtwdev->chip->chip_id == RTL8852A && 1840 rtwdev->hal.cv == CHIP_CBV) { 1841 rtw89_write16_mask(rtwdev, 1842 rtw89_mac_reg_by_idx(R_AX_DLK_PROTECT_CTL, mac_idx), 1843 B_AX_RX_DLK_CCA_TIME_MASK, 0); 1844 rtw89_write16_set(rtwdev, rtw89_mac_reg_by_idx(R_AX_RCR, mac_idx), 1845 BIT(12)); 1846 } 1847 1848 reg = rtw89_mac_reg_by_idx(R_AX_PLCP_HDR_FLTR, mac_idx); 1849 rtw89_write8_clr(rtwdev, reg, B_AX_VHT_SU_SIGB_CRC_CHK); 1850 1851 return ret; 1852 } 1853 1854 static int cmac_com_init(struct rtw89_dev *rtwdev, u8 mac_idx) 1855 { 1856 u32 val, reg; 1857 int ret; 1858 1859 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 1860 if (ret) 1861 return ret; 1862 1863 reg = rtw89_mac_reg_by_idx(R_AX_TX_SUB_CARRIER_VALUE, mac_idx); 1864 val = rtw89_read32(rtwdev, reg); 1865 val = u32_replace_bits(val, 0, B_AX_TXSC_20M_MASK); 1866 val = u32_replace_bits(val, 0, B_AX_TXSC_40M_MASK); 1867 val = u32_replace_bits(val, 0, B_AX_TXSC_80M_MASK); 1868 rtw89_write32(rtwdev, reg, val); 1869 1870 return 0; 1871 } 1872 1873 static bool is_qta_dbcc(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mode) 1874 { 1875 const struct rtw89_dle_mem *cfg; 1876 1877 cfg = get_dle_mem_cfg(rtwdev, mode); 1878 if (!cfg) { 1879 rtw89_err(rtwdev, "[ERR]get_dle_mem_cfg\n"); 1880 return false; 1881 } 1882 1883 return (cfg->ple_min_qt->cma1_dma && cfg->ple_max_qt->cma1_dma); 1884 } 1885 1886 static int ptcl_init(struct rtw89_dev *rtwdev, u8 mac_idx) 1887 { 1888 u32 val, reg; 1889 int ret; 1890 1891 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 1892 if (ret) 1893 return ret; 1894 1895 if (rtwdev->hci.type == RTW89_HCI_TYPE_PCIE) { 1896 reg = rtw89_mac_reg_by_idx(R_AX_SIFS_SETTING, mac_idx); 1897 val = rtw89_read32(rtwdev, reg); 1898 val = u32_replace_bits(val, S_AX_CTS2S_TH_1K, 1899 B_AX_HW_CTS2SELF_PKT_LEN_TH_MASK); 1900 val |= B_AX_HW_CTS2SELF_EN; 1901 rtw89_write32(rtwdev, reg, val); 1902 1903 reg = rtw89_mac_reg_by_idx(R_AX_PTCL_FSM_MON, mac_idx); 1904 val = rtw89_read32(rtwdev, reg); 1905 val = u32_replace_bits(val, S_AX_PTCL_TO_2MS, B_AX_PTCL_TX_ARB_TO_THR_MASK); 1906 val &= ~B_AX_PTCL_TX_ARB_TO_MODE; 1907 rtw89_write32(rtwdev, reg, val); 1908 } 1909 1910 reg = rtw89_mac_reg_by_idx(R_AX_SIFS_SETTING, mac_idx); 1911 val = rtw89_read32(rtwdev, reg); 1912 val = u32_replace_bits(val, S_AX_CTS2S_TH_SEC_256B, B_AX_HW_CTS2SELF_PKT_LEN_TH_TWW_MASK); 1913 val |= B_AX_HW_CTS2SELF_EN; 1914 rtw89_write32(rtwdev, reg, val); 1915 1916 return 0; 1917 } 1918 1919 static int cmac_init(struct rtw89_dev *rtwdev, u8 mac_idx) 1920 { 1921 int ret; 1922 1923 ret = scheduler_init(rtwdev, mac_idx); 1924 if (ret) { 1925 rtw89_err(rtwdev, "[ERR]CMAC%d SCH init %d\n", mac_idx, ret); 1926 return ret; 1927 } 1928 1929 ret = addr_cam_init(rtwdev, mac_idx); 1930 if (ret) { 1931 rtw89_err(rtwdev, "[ERR]CMAC%d ADDR_CAM reset %d\n", mac_idx, 1932 ret); 1933 return ret; 1934 } 1935 1936 ret = rx_fltr_init(rtwdev, mac_idx); 1937 if (ret) { 1938 rtw89_err(rtwdev, "[ERR]CMAC%d RX filter init %d\n", mac_idx, 1939 ret); 1940 return ret; 1941 } 1942 1943 ret = cca_ctrl_init(rtwdev, mac_idx); 1944 if (ret) { 1945 rtw89_err(rtwdev, "[ERR]CMAC%d CCA CTRL init %d\n", mac_idx, 1946 ret); 1947 return ret; 1948 } 1949 1950 ret = spatial_reuse_init(rtwdev, mac_idx); 1951 if (ret) { 1952 rtw89_err(rtwdev, "[ERR]CMAC%d Spatial Reuse init %d\n", 1953 mac_idx, ret); 1954 return ret; 1955 } 1956 1957 ret = tmac_init(rtwdev, mac_idx); 1958 if (ret) { 1959 rtw89_err(rtwdev, "[ERR]CMAC%d TMAC init %d\n", mac_idx, ret); 1960 return ret; 1961 } 1962 1963 ret = trxptcl_init(rtwdev, mac_idx); 1964 if (ret) { 1965 rtw89_err(rtwdev, "[ERR]CMAC%d TRXPTCL init %d\n", mac_idx, ret); 1966 return ret; 1967 } 1968 1969 ret = rmac_init(rtwdev, mac_idx); 1970 if (ret) { 1971 rtw89_err(rtwdev, "[ERR]CMAC%d RMAC init %d\n", mac_idx, ret); 1972 return ret; 1973 } 1974 1975 ret = cmac_com_init(rtwdev, mac_idx); 1976 if (ret) { 1977 rtw89_err(rtwdev, "[ERR]CMAC%d Com init %d\n", mac_idx, ret); 1978 return ret; 1979 } 1980 1981 ret = ptcl_init(rtwdev, mac_idx); 1982 if (ret) { 1983 rtw89_err(rtwdev, "[ERR]CMAC%d PTCL init %d\n", mac_idx, ret); 1984 return ret; 1985 } 1986 1987 return ret; 1988 } 1989 1990 static int rtw89_mac_read_phycap(struct rtw89_dev *rtwdev, 1991 struct rtw89_mac_c2h_info *c2h_info) 1992 { 1993 struct rtw89_mac_h2c_info h2c_info = {0}; 1994 u32 ret; 1995 1996 h2c_info.id = RTW89_FWCMD_H2CREG_FUNC_GET_FEATURE; 1997 h2c_info.content_len = 0; 1998 1999 ret = rtw89_fw_msg_reg(rtwdev, &h2c_info, c2h_info); 2000 if (ret) 2001 return ret; 2002 2003 if (c2h_info->id != RTW89_FWCMD_C2HREG_FUNC_PHY_CAP) 2004 return -EINVAL; 2005 2006 return 0; 2007 } 2008 2009 int rtw89_mac_setup_phycap(struct rtw89_dev *rtwdev) 2010 { 2011 struct rtw89_hal *hal = &rtwdev->hal; 2012 const struct rtw89_chip_info *chip = rtwdev->chip; 2013 struct rtw89_mac_c2h_info c2h_info = {0}; 2014 struct rtw89_c2h_phy_cap *cap = 2015 (struct rtw89_c2h_phy_cap *)&c2h_info.c2hreg[0]; 2016 u32 ret; 2017 2018 ret = rtw89_mac_read_phycap(rtwdev, &c2h_info); 2019 if (ret) 2020 return ret; 2021 2022 hal->tx_nss = cap->tx_nss ? 2023 min_t(u8, cap->tx_nss, chip->tx_nss) : chip->tx_nss; 2024 hal->rx_nss = cap->rx_nss ? 2025 min_t(u8, cap->rx_nss, chip->rx_nss) : chip->rx_nss; 2026 2027 rtw89_debug(rtwdev, RTW89_DBG_FW, 2028 "phycap hal/phy/chip: tx_nss=0x%x/0x%x/0x%x rx_nss=0x%x/0x%x/0x%x\n", 2029 hal->tx_nss, cap->tx_nss, chip->tx_nss, 2030 hal->rx_nss, cap->rx_nss, chip->rx_nss); 2031 2032 return 0; 2033 } 2034 2035 static int rtw89_hw_sch_tx_en_h2c(struct rtw89_dev *rtwdev, u8 band, 2036 u16 tx_en_u16, u16 mask_u16) 2037 { 2038 u32 ret; 2039 struct rtw89_mac_c2h_info c2h_info = {0}; 2040 struct rtw89_mac_h2c_info h2c_info = {0}; 2041 struct rtw89_h2creg_sch_tx_en *h2creg = 2042 (struct rtw89_h2creg_sch_tx_en *)h2c_info.h2creg; 2043 2044 h2c_info.id = RTW89_FWCMD_H2CREG_FUNC_SCH_TX_EN; 2045 h2c_info.content_len = sizeof(*h2creg) - RTW89_H2CREG_HDR_LEN; 2046 h2creg->tx_en = tx_en_u16; 2047 h2creg->mask = mask_u16; 2048 h2creg->band = band; 2049 2050 ret = rtw89_fw_msg_reg(rtwdev, &h2c_info, &c2h_info); 2051 if (ret) 2052 return ret; 2053 2054 if (c2h_info.id != RTW89_FWCMD_C2HREG_FUNC_TX_PAUSE_RPT) 2055 return -EINVAL; 2056 2057 return 0; 2058 } 2059 2060 static int rtw89_set_hw_sch_tx_en(struct rtw89_dev *rtwdev, u8 mac_idx, 2061 u16 tx_en, u16 tx_en_mask) 2062 { 2063 u32 reg = rtw89_mac_reg_by_idx(R_AX_CTN_TXEN, mac_idx); 2064 u16 val; 2065 int ret; 2066 2067 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 2068 if (ret) 2069 return ret; 2070 2071 if (test_bit(RTW89_FLAG_FW_RDY, rtwdev->flags)) 2072 return rtw89_hw_sch_tx_en_h2c(rtwdev, mac_idx, 2073 tx_en, tx_en_mask); 2074 2075 val = rtw89_read16(rtwdev, reg); 2076 val = (val & ~tx_en_mask) | (tx_en & tx_en_mask); 2077 rtw89_write16(rtwdev, reg, val); 2078 2079 return 0; 2080 } 2081 2082 int rtw89_mac_stop_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx, 2083 u16 *tx_en, enum rtw89_sch_tx_sel sel) 2084 { 2085 int ret; 2086 2087 *tx_en = rtw89_read16(rtwdev, 2088 rtw89_mac_reg_by_idx(R_AX_CTN_TXEN, mac_idx)); 2089 2090 switch (sel) { 2091 case RTW89_SCH_TX_SEL_ALL: 2092 ret = rtw89_set_hw_sch_tx_en(rtwdev, mac_idx, 0, 0xffff); 2093 if (ret) 2094 return ret; 2095 break; 2096 case RTW89_SCH_TX_SEL_HIQ: 2097 ret = rtw89_set_hw_sch_tx_en(rtwdev, mac_idx, 2098 0, B_AX_CTN_TXEN_HGQ); 2099 if (ret) 2100 return ret; 2101 break; 2102 case RTW89_SCH_TX_SEL_MG0: 2103 ret = rtw89_set_hw_sch_tx_en(rtwdev, mac_idx, 2104 0, B_AX_CTN_TXEN_MGQ); 2105 if (ret) 2106 return ret; 2107 break; 2108 case RTW89_SCH_TX_SEL_MACID: 2109 ret = rtw89_set_hw_sch_tx_en(rtwdev, mac_idx, 0, 0xffff); 2110 if (ret) 2111 return ret; 2112 break; 2113 default: 2114 return 0; 2115 } 2116 2117 return 0; 2118 } 2119 2120 int rtw89_mac_resume_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx, u16 tx_en) 2121 { 2122 int ret; 2123 2124 ret = rtw89_set_hw_sch_tx_en(rtwdev, mac_idx, tx_en, 0xffff); 2125 if (ret) 2126 return ret; 2127 2128 return 0; 2129 } 2130 2131 static u16 rtw89_mac_dle_buf_req(struct rtw89_dev *rtwdev, u16 buf_len, 2132 bool wd) 2133 { 2134 u32 val, reg; 2135 int ret; 2136 2137 reg = wd ? R_AX_WD_BUF_REQ : R_AX_PL_BUF_REQ; 2138 val = buf_len; 2139 val |= B_AX_WD_BUF_REQ_EXEC; 2140 rtw89_write32(rtwdev, reg, val); 2141 2142 reg = wd ? R_AX_WD_BUF_STATUS : R_AX_PL_BUF_STATUS; 2143 2144 ret = read_poll_timeout(rtw89_read32, val, val & B_AX_WD_BUF_STAT_DONE, 2145 1, 2000, false, rtwdev, reg); 2146 if (ret) 2147 return 0xffff; 2148 2149 return FIELD_GET(B_AX_WD_BUF_STAT_PKTID_MASK, val); 2150 } 2151 2152 static int rtw89_mac_set_cpuio(struct rtw89_dev *rtwdev, 2153 struct rtw89_cpuio_ctrl *ctrl_para, 2154 bool wd) 2155 { 2156 u32 val, cmd_type, reg; 2157 int ret; 2158 2159 cmd_type = ctrl_para->cmd_type; 2160 2161 reg = wd ? R_AX_WD_CPUQ_OP_2 : R_AX_PL_CPUQ_OP_2; 2162 val = 0; 2163 val = u32_replace_bits(val, ctrl_para->start_pktid, 2164 B_AX_WD_CPUQ_OP_STRT_PKTID_MASK); 2165 val = u32_replace_bits(val, ctrl_para->end_pktid, 2166 B_AX_WD_CPUQ_OP_END_PKTID_MASK); 2167 rtw89_write32(rtwdev, reg, val); 2168 2169 reg = wd ? R_AX_WD_CPUQ_OP_1 : R_AX_PL_CPUQ_OP_1; 2170 val = 0; 2171 val = u32_replace_bits(val, ctrl_para->src_pid, 2172 B_AX_CPUQ_OP_SRC_PID_MASK); 2173 val = u32_replace_bits(val, ctrl_para->src_qid, 2174 B_AX_CPUQ_OP_SRC_QID_MASK); 2175 val = u32_replace_bits(val, ctrl_para->dst_pid, 2176 B_AX_CPUQ_OP_DST_PID_MASK); 2177 val = u32_replace_bits(val, ctrl_para->dst_qid, 2178 B_AX_CPUQ_OP_DST_QID_MASK); 2179 rtw89_write32(rtwdev, reg, val); 2180 2181 reg = wd ? R_AX_WD_CPUQ_OP_0 : R_AX_PL_CPUQ_OP_0; 2182 val = 0; 2183 val = u32_replace_bits(val, cmd_type, 2184 B_AX_CPUQ_OP_CMD_TYPE_MASK); 2185 val = u32_replace_bits(val, ctrl_para->macid, 2186 B_AX_CPUQ_OP_MACID_MASK); 2187 val = u32_replace_bits(val, ctrl_para->pkt_num, 2188 B_AX_CPUQ_OP_PKTNUM_MASK); 2189 val |= B_AX_WD_CPUQ_OP_EXEC; 2190 rtw89_write32(rtwdev, reg, val); 2191 2192 reg = wd ? R_AX_WD_CPUQ_OP_STATUS : R_AX_PL_CPUQ_OP_STATUS; 2193 2194 ret = read_poll_timeout(rtw89_read32, val, val & B_AX_WD_CPUQ_OP_STAT_DONE, 2195 1, 2000, false, rtwdev, reg); 2196 if (ret) 2197 return ret; 2198 2199 if (cmd_type == CPUIO_OP_CMD_GET_1ST_PID || 2200 cmd_type == CPUIO_OP_CMD_GET_NEXT_PID) 2201 ctrl_para->pktid = FIELD_GET(B_AX_WD_CPUQ_OP_PKTID_MASK, val); 2202 2203 return 0; 2204 } 2205 2206 static int dle_quota_change(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mode) 2207 { 2208 const struct rtw89_dle_mem *cfg; 2209 struct rtw89_cpuio_ctrl ctrl_para = {0}; 2210 u16 pkt_id; 2211 int ret; 2212 2213 cfg = get_dle_mem_cfg(rtwdev, mode); 2214 if (!cfg) { 2215 rtw89_err(rtwdev, "[ERR]wd/dle mem cfg\n"); 2216 return -EINVAL; 2217 } 2218 2219 if (dle_used_size(cfg->wde_size, cfg->ple_size) != rtwdev->chip->fifo_size) { 2220 rtw89_err(rtwdev, "[ERR]wd/dle mem cfg\n"); 2221 return -EINVAL; 2222 } 2223 2224 dle_quota_cfg(rtwdev, cfg, INVALID_QT_WCPU); 2225 2226 pkt_id = rtw89_mac_dle_buf_req(rtwdev, 0x20, true); 2227 if (pkt_id == 0xffff) { 2228 rtw89_err(rtwdev, "[ERR]WDE DLE buf req\n"); 2229 return -ENOMEM; 2230 } 2231 2232 ctrl_para.cmd_type = CPUIO_OP_CMD_ENQ_TO_HEAD; 2233 ctrl_para.start_pktid = pkt_id; 2234 ctrl_para.end_pktid = pkt_id; 2235 ctrl_para.pkt_num = 0; 2236 ctrl_para.dst_pid = WDE_DLE_PORT_ID_WDRLS; 2237 ctrl_para.dst_qid = WDE_DLE_QUEID_NO_REPORT; 2238 ret = rtw89_mac_set_cpuio(rtwdev, &ctrl_para, true); 2239 if (ret) { 2240 rtw89_err(rtwdev, "[ERR]WDE DLE enqueue to head\n"); 2241 return -EFAULT; 2242 } 2243 2244 pkt_id = rtw89_mac_dle_buf_req(rtwdev, 0x20, false); 2245 if (pkt_id == 0xffff) { 2246 rtw89_err(rtwdev, "[ERR]PLE DLE buf req\n"); 2247 return -ENOMEM; 2248 } 2249 2250 ctrl_para.cmd_type = CPUIO_OP_CMD_ENQ_TO_HEAD; 2251 ctrl_para.start_pktid = pkt_id; 2252 ctrl_para.end_pktid = pkt_id; 2253 ctrl_para.pkt_num = 0; 2254 ctrl_para.dst_pid = PLE_DLE_PORT_ID_PLRLS; 2255 ctrl_para.dst_qid = PLE_DLE_QUEID_NO_REPORT; 2256 ret = rtw89_mac_set_cpuio(rtwdev, &ctrl_para, false); 2257 if (ret) { 2258 rtw89_err(rtwdev, "[ERR]PLE DLE enqueue to head\n"); 2259 return -EFAULT; 2260 } 2261 2262 return 0; 2263 } 2264 2265 static int band_idle_ck_b(struct rtw89_dev *rtwdev, u8 mac_idx) 2266 { 2267 int ret; 2268 u32 reg; 2269 u8 val; 2270 2271 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 2272 if (ret) 2273 return ret; 2274 2275 reg = rtw89_mac_reg_by_idx(R_AX_PTCL_TX_CTN_SEL, mac_idx); 2276 2277 ret = read_poll_timeout(rtw89_read8, val, 2278 (val & B_AX_PTCL_TX_ON_STAT) == 0, 2279 SW_CVR_DUR_US, 2280 SW_CVR_DUR_US * PTCL_IDLE_POLL_CNT, 2281 false, rtwdev, reg); 2282 if (ret) 2283 return ret; 2284 2285 return 0; 2286 } 2287 2288 static int band1_enable(struct rtw89_dev *rtwdev) 2289 { 2290 int ret, i; 2291 u32 sleep_bak[4] = {0}; 2292 u32 pause_bak[4] = {0}; 2293 u16 tx_en; 2294 2295 ret = rtw89_mac_stop_sch_tx(rtwdev, 0, &tx_en, RTW89_SCH_TX_SEL_ALL); 2296 if (ret) { 2297 rtw89_err(rtwdev, "[ERR]stop sch tx %d\n", ret); 2298 return ret; 2299 } 2300 2301 for (i = 0; i < 4; i++) { 2302 sleep_bak[i] = rtw89_read32(rtwdev, R_AX_MACID_SLEEP_0 + i * 4); 2303 pause_bak[i] = rtw89_read32(rtwdev, R_AX_SS_MACID_PAUSE_0 + i * 4); 2304 rtw89_write32(rtwdev, R_AX_MACID_SLEEP_0 + i * 4, U32_MAX); 2305 rtw89_write32(rtwdev, R_AX_SS_MACID_PAUSE_0 + i * 4, U32_MAX); 2306 } 2307 2308 ret = band_idle_ck_b(rtwdev, 0); 2309 if (ret) { 2310 rtw89_err(rtwdev, "[ERR]tx idle poll %d\n", ret); 2311 return ret; 2312 } 2313 2314 ret = dle_quota_change(rtwdev, rtwdev->mac.qta_mode); 2315 if (ret) { 2316 rtw89_err(rtwdev, "[ERR]DLE quota change %d\n", ret); 2317 return ret; 2318 } 2319 2320 for (i = 0; i < 4; i++) { 2321 rtw89_write32(rtwdev, R_AX_MACID_SLEEP_0 + i * 4, sleep_bak[i]); 2322 rtw89_write32(rtwdev, R_AX_SS_MACID_PAUSE_0 + i * 4, pause_bak[i]); 2323 } 2324 2325 ret = rtw89_mac_resume_sch_tx(rtwdev, 0, tx_en); 2326 if (ret) { 2327 rtw89_err(rtwdev, "[ERR]CMAC1 resume sch tx %d\n", ret); 2328 return ret; 2329 } 2330 2331 ret = cmac_func_en(rtwdev, 1, true); 2332 if (ret) { 2333 rtw89_err(rtwdev, "[ERR]CMAC1 func en %d\n", ret); 2334 return ret; 2335 } 2336 2337 ret = cmac_init(rtwdev, 1); 2338 if (ret) { 2339 rtw89_err(rtwdev, "[ERR]CMAC1 init %d\n", ret); 2340 return ret; 2341 } 2342 2343 rtw89_write32_set(rtwdev, R_AX_SYS_ISO_CTRL_EXTEND, 2344 B_AX_R_SYM_FEN_WLBBFUN_1 | B_AX_R_SYM_FEN_WLBBGLB_1); 2345 2346 return 0; 2347 } 2348 2349 static int rtw89_mac_enable_imr(struct rtw89_dev *rtwdev, u8 mac_idx, 2350 enum rtw89_mac_hwmod_sel sel) 2351 { 2352 u32 reg, val; 2353 int ret; 2354 2355 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, sel); 2356 if (ret) { 2357 rtw89_err(rtwdev, "MAC%d mac_idx%d is not ready\n", 2358 sel, mac_idx); 2359 return ret; 2360 } 2361 2362 if (sel == RTW89_DMAC_SEL) { 2363 rtw89_write32_clr(rtwdev, R_AX_TXPKTCTL_ERR_IMR_ISR, 2364 B_AX_TXPKTCTL_USRCTL_RLSBMPLEN_ERR_INT_EN | 2365 B_AX_TXPKTCTL_USRCTL_RDNRLSCMD_ERR_INT_EN | 2366 B_AX_TXPKTCTL_CMDPSR_FRZTO_ERR_INT_EN); 2367 rtw89_write32_clr(rtwdev, R_AX_TXPKTCTL_ERR_IMR_ISR_B1, 2368 B_AX_TXPKTCTL_USRCTL_RLSBMPLEN_ERR_INT_EN | 2369 B_AX_TXPKTCTL_USRCTL_RDNRLSCMD_ERR_INT_EN); 2370 rtw89_write32_clr(rtwdev, R_AX_HOST_DISPATCHER_ERR_IMR, 2371 B_AX_HDT_PKT_FAIL_DBG_INT_EN | 2372 B_AX_HDT_OFFSET_UNMATCH_INT_EN); 2373 rtw89_write32_clr(rtwdev, R_AX_CPU_DISPATCHER_ERR_IMR, 2374 B_AX_CPU_SHIFT_EN_ERR_INT_EN); 2375 rtw89_write32_clr(rtwdev, R_AX_PLE_ERR_IMR, 2376 B_AX_PLE_GETNPG_STRPG_ERR_INT_EN); 2377 rtw89_write32_clr(rtwdev, R_AX_WDRLS_ERR_IMR, 2378 B_AX_WDRLS_PLEBREQ_TO_ERR_INT_EN); 2379 rtw89_write32_set(rtwdev, R_AX_HD0IMR, B_AX_WDT_PTFM_INT_EN); 2380 rtw89_write32_clr(rtwdev, R_AX_TXPKTCTL_ERR_IMR_ISR, 2381 B_AX_TXPKTCTL_USRCTL_NOINIT_ERR_INT_EN); 2382 } else if (sel == RTW89_CMAC_SEL) { 2383 reg = rtw89_mac_reg_by_idx(R_AX_SCHEDULE_ERR_IMR, mac_idx); 2384 rtw89_write32_clr(rtwdev, reg, 2385 B_AX_SORT_NON_IDLE_ERR_INT_EN); 2386 2387 reg = rtw89_mac_reg_by_idx(R_AX_DLE_CTRL, mac_idx); 2388 rtw89_write32_clr(rtwdev, reg, 2389 B_AX_NO_RESERVE_PAGE_ERR_IMR | 2390 B_AX_RXDATA_FSM_HANG_ERROR_IMR); 2391 2392 reg = rtw89_mac_reg_by_idx(R_AX_PTCL_IMR0, mac_idx); 2393 val = B_AX_F2PCMD_USER_ALLC_ERR_INT_EN | 2394 B_AX_TX_RECORD_PKTID_ERR_INT_EN | 2395 B_AX_FSM_TIMEOUT_ERR_INT_EN; 2396 rtw89_write32(rtwdev, reg, val); 2397 2398 reg = rtw89_mac_reg_by_idx(R_AX_PHYINFO_ERR_IMR, mac_idx); 2399 rtw89_write32_set(rtwdev, reg, 2400 B_AX_PHY_TXON_TIMEOUT_INT_EN | 2401 B_AX_CCK_CCA_TIMEOUT_INT_EN | 2402 B_AX_OFDM_CCA_TIMEOUT_INT_EN | 2403 B_AX_DATA_ON_TIMEOUT_INT_EN | 2404 B_AX_STS_ON_TIMEOUT_INT_EN | 2405 B_AX_CSI_ON_TIMEOUT_INT_EN); 2406 2407 reg = rtw89_mac_reg_by_idx(R_AX_RMAC_ERR_ISR, mac_idx); 2408 val = rtw89_read32(rtwdev, reg); 2409 val |= (B_AX_RMAC_RX_CSI_TIMEOUT_INT_EN | 2410 B_AX_RMAC_RX_TIMEOUT_INT_EN | 2411 B_AX_RMAC_CSI_TIMEOUT_INT_EN); 2412 val &= ~(B_AX_RMAC_CCA_TO_IDLE_TIMEOUT_INT_EN | 2413 B_AX_RMAC_DATA_ON_TO_IDLE_TIMEOUT_INT_EN | 2414 B_AX_RMAC_CCA_TIMEOUT_INT_EN | 2415 B_AX_RMAC_DATA_ON_TIMEOUT_INT_EN); 2416 rtw89_write32(rtwdev, reg, val); 2417 } else { 2418 return -EINVAL; 2419 } 2420 2421 return 0; 2422 } 2423 2424 static int rtw89_mac_dbcc_enable(struct rtw89_dev *rtwdev, bool enable) 2425 { 2426 int ret = 0; 2427 2428 if (enable) { 2429 ret = band1_enable(rtwdev); 2430 if (ret) { 2431 rtw89_err(rtwdev, "[ERR] band1_enable %d\n", ret); 2432 return ret; 2433 } 2434 2435 ret = rtw89_mac_enable_imr(rtwdev, RTW89_MAC_1, RTW89_CMAC_SEL); 2436 if (ret) { 2437 rtw89_err(rtwdev, "[ERR] enable CMAC1 IMR %d\n", ret); 2438 return ret; 2439 } 2440 } else { 2441 rtw89_err(rtwdev, "[ERR] disable dbcc is not implemented not\n"); 2442 return -EINVAL; 2443 } 2444 2445 return 0; 2446 } 2447 2448 static int set_host_rpr(struct rtw89_dev *rtwdev) 2449 { 2450 if (rtwdev->hci.type == RTW89_HCI_TYPE_PCIE) { 2451 rtw89_write32_mask(rtwdev, R_AX_WDRLS_CFG, 2452 B_AX_WDRLS_MODE_MASK, RTW89_RPR_MODE_POH); 2453 rtw89_write32_set(rtwdev, R_AX_RLSRPT0_CFG0, 2454 B_AX_RLSRPT0_FLTR_MAP_MASK); 2455 } else { 2456 rtw89_write32_mask(rtwdev, R_AX_WDRLS_CFG, 2457 B_AX_WDRLS_MODE_MASK, RTW89_RPR_MODE_STF); 2458 rtw89_write32_clr(rtwdev, R_AX_RLSRPT0_CFG0, 2459 B_AX_RLSRPT0_FLTR_MAP_MASK); 2460 } 2461 2462 rtw89_write32_mask(rtwdev, R_AX_RLSRPT0_CFG1, B_AX_RLSRPT0_AGGNUM_MASK, 30); 2463 rtw89_write32_mask(rtwdev, R_AX_RLSRPT0_CFG1, B_AX_RLSRPT0_TO_MASK, 255); 2464 2465 return 0; 2466 } 2467 2468 static int rtw89_mac_trx_init(struct rtw89_dev *rtwdev) 2469 { 2470 enum rtw89_qta_mode qta_mode = rtwdev->mac.qta_mode; 2471 int ret; 2472 2473 ret = dmac_init(rtwdev, 0); 2474 if (ret) { 2475 rtw89_err(rtwdev, "[ERR]DMAC init %d\n", ret); 2476 return ret; 2477 } 2478 2479 ret = cmac_init(rtwdev, 0); 2480 if (ret) { 2481 rtw89_err(rtwdev, "[ERR]CMAC%d init %d\n", 0, ret); 2482 return ret; 2483 } 2484 2485 if (is_qta_dbcc(rtwdev, qta_mode)) { 2486 ret = rtw89_mac_dbcc_enable(rtwdev, true); 2487 if (ret) { 2488 rtw89_err(rtwdev, "[ERR]dbcc_enable init %d\n", ret); 2489 return ret; 2490 } 2491 } 2492 2493 ret = rtw89_mac_enable_imr(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL); 2494 if (ret) { 2495 rtw89_err(rtwdev, "[ERR] enable DMAC IMR %d\n", ret); 2496 return ret; 2497 } 2498 2499 ret = rtw89_mac_enable_imr(rtwdev, RTW89_MAC_0, RTW89_CMAC_SEL); 2500 if (ret) { 2501 rtw89_err(rtwdev, "[ERR] to enable CMAC0 IMR %d\n", ret); 2502 return ret; 2503 } 2504 2505 ret = set_host_rpr(rtwdev); 2506 if (ret) { 2507 rtw89_err(rtwdev, "[ERR] set host rpr %d\n", ret); 2508 return ret; 2509 } 2510 2511 return 0; 2512 } 2513 2514 static void rtw89_mac_disable_cpu(struct rtw89_dev *rtwdev) 2515 { 2516 clear_bit(RTW89_FLAG_FW_RDY, rtwdev->flags); 2517 2518 rtw89_write32_clr(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_WCPU_EN); 2519 rtw89_write32_clr(rtwdev, R_AX_SYS_CLK_CTRL, B_AX_CPU_CLK_EN); 2520 } 2521 2522 static int rtw89_mac_enable_cpu(struct rtw89_dev *rtwdev, u8 boot_reason, 2523 bool dlfw) 2524 { 2525 u32 val; 2526 int ret; 2527 2528 if (rtw89_read32(rtwdev, R_AX_PLATFORM_ENABLE) & B_AX_WCPU_EN) 2529 return -EFAULT; 2530 2531 rtw89_write32(rtwdev, R_AX_HALT_H2C_CTRL, 0); 2532 rtw89_write32(rtwdev, R_AX_HALT_C2H_CTRL, 0); 2533 2534 rtw89_write32_set(rtwdev, R_AX_SYS_CLK_CTRL, B_AX_CPU_CLK_EN); 2535 2536 val = rtw89_read32(rtwdev, R_AX_WCPU_FW_CTRL); 2537 val &= ~(B_AX_WCPU_FWDL_EN | B_AX_H2C_PATH_RDY | B_AX_FWDL_PATH_RDY); 2538 val = u32_replace_bits(val, RTW89_FWDL_INITIAL_STATE, 2539 B_AX_WCPU_FWDL_STS_MASK); 2540 2541 if (dlfw) 2542 val |= B_AX_WCPU_FWDL_EN; 2543 2544 rtw89_write32(rtwdev, R_AX_WCPU_FW_CTRL, val); 2545 rtw89_write16_mask(rtwdev, R_AX_BOOT_REASON, B_AX_BOOT_REASON_MASK, 2546 boot_reason); 2547 rtw89_write32_set(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_WCPU_EN); 2548 2549 if (!dlfw) { 2550 mdelay(5); 2551 2552 ret = rtw89_fw_check_rdy(rtwdev); 2553 if (ret) 2554 return ret; 2555 } 2556 2557 return 0; 2558 } 2559 2560 static int rtw89_mac_fw_dl_pre_init(struct rtw89_dev *rtwdev) 2561 { 2562 u32 val; 2563 int ret; 2564 2565 val = B_AX_MAC_FUNC_EN | B_AX_DMAC_FUNC_EN | B_AX_DISPATCHER_EN | 2566 B_AX_PKT_BUF_EN; 2567 rtw89_write32(rtwdev, R_AX_DMAC_FUNC_EN, val); 2568 2569 val = B_AX_DISPATCHER_CLK_EN; 2570 rtw89_write32(rtwdev, R_AX_DMAC_CLK_EN, val); 2571 2572 ret = dle_init(rtwdev, RTW89_QTA_DLFW, rtwdev->mac.qta_mode); 2573 if (ret) { 2574 rtw89_err(rtwdev, "[ERR]DLE pre init %d\n", ret); 2575 return ret; 2576 } 2577 2578 ret = hfc_init(rtwdev, true, false, true); 2579 if (ret) { 2580 rtw89_err(rtwdev, "[ERR]HCI FC pre init %d\n", ret); 2581 return ret; 2582 } 2583 2584 return ret; 2585 } 2586 2587 static void rtw89_mac_hci_func_en(struct rtw89_dev *rtwdev) 2588 { 2589 rtw89_write32_set(rtwdev, R_AX_HCI_FUNC_EN, 2590 B_AX_HCI_TXDMA_EN | B_AX_HCI_RXDMA_EN); 2591 } 2592 2593 void rtw89_mac_enable_bb_rf(struct rtw89_dev *rtwdev) 2594 { 2595 rtw89_write8_set(rtwdev, R_AX_SYS_FUNC_EN, 2596 B_AX_FEN_BBRSTB | B_AX_FEN_BB_GLB_RSTN); 2597 rtw89_write32_set(rtwdev, R_AX_WLRF_CTRL, 2598 B_AX_WLRF1_CTRL_7 | B_AX_WLRF1_CTRL_1 | 2599 B_AX_WLRF_CTRL_7 | B_AX_WLRF_CTRL_1); 2600 rtw89_write8_set(rtwdev, R_AX_PHYREG_SET, PHYREG_SET_ALL_CYCLE); 2601 } 2602 2603 void rtw89_mac_disable_bb_rf(struct rtw89_dev *rtwdev) 2604 { 2605 rtw89_write8_clr(rtwdev, R_AX_SYS_FUNC_EN, 2606 B_AX_FEN_BBRSTB | B_AX_FEN_BB_GLB_RSTN); 2607 rtw89_write32_clr(rtwdev, R_AX_WLRF_CTRL, 2608 B_AX_WLRF1_CTRL_7 | B_AX_WLRF1_CTRL_1 | 2609 B_AX_WLRF_CTRL_7 | B_AX_WLRF_CTRL_1); 2610 rtw89_write8_clr(rtwdev, R_AX_PHYREG_SET, PHYREG_SET_ALL_CYCLE); 2611 } 2612 2613 int rtw89_mac_partial_init(struct rtw89_dev *rtwdev) 2614 { 2615 int ret; 2616 2617 ret = rtw89_mac_power_switch(rtwdev, true); 2618 if (ret) { 2619 rtw89_mac_power_switch(rtwdev, false); 2620 ret = rtw89_mac_power_switch(rtwdev, true); 2621 if (ret) 2622 return ret; 2623 } 2624 2625 rtw89_mac_hci_func_en(rtwdev); 2626 2627 if (rtwdev->hci.ops->mac_pre_init) { 2628 ret = rtwdev->hci.ops->mac_pre_init(rtwdev); 2629 if (ret) 2630 return ret; 2631 } 2632 2633 ret = rtw89_mac_fw_dl_pre_init(rtwdev); 2634 if (ret) 2635 return ret; 2636 2637 rtw89_mac_disable_cpu(rtwdev); 2638 ret = rtw89_mac_enable_cpu(rtwdev, 0, true); 2639 if (ret) 2640 return ret; 2641 2642 ret = rtw89_fw_download(rtwdev, RTW89_FW_NORMAL); 2643 if (ret) 2644 return ret; 2645 2646 return 0; 2647 } 2648 2649 int rtw89_mac_init(struct rtw89_dev *rtwdev) 2650 { 2651 int ret; 2652 2653 ret = rtw89_mac_partial_init(rtwdev); 2654 if (ret) 2655 goto fail; 2656 2657 rtw89_mac_enable_bb_rf(rtwdev); 2658 2659 ret = rtw89_mac_sys_init(rtwdev); 2660 if (ret) 2661 goto fail; 2662 2663 ret = rtw89_mac_trx_init(rtwdev); 2664 if (ret) 2665 goto fail; 2666 2667 if (rtwdev->hci.ops->mac_post_init) { 2668 ret = rtwdev->hci.ops->mac_post_init(rtwdev); 2669 if (ret) 2670 goto fail; 2671 } 2672 2673 rtw89_fw_send_all_early_h2c(rtwdev); 2674 rtw89_fw_h2c_set_ofld_cfg(rtwdev); 2675 2676 return ret; 2677 fail: 2678 rtw89_mac_power_switch(rtwdev, false); 2679 2680 return ret; 2681 } 2682 2683 static void rtw89_mac_dmac_tbl_init(struct rtw89_dev *rtwdev, u8 macid) 2684 { 2685 u8 i; 2686 2687 for (i = 0; i < 4; i++) { 2688 rtw89_write32(rtwdev, R_AX_FILTER_MODEL_ADDR, 2689 DMAC_TBL_BASE_ADDR + (macid << 4) + (i << 2)); 2690 rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY, 0); 2691 } 2692 } 2693 2694 static void rtw89_mac_cmac_tbl_init(struct rtw89_dev *rtwdev, u8 macid) 2695 { 2696 rtw89_write32(rtwdev, R_AX_FILTER_MODEL_ADDR, 2697 CMAC_TBL_BASE_ADDR + macid * CCTL_INFO_SIZE); 2698 rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY, 0x4); 2699 rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY + 4, 0x400A0004); 2700 rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY + 8, 0); 2701 rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY + 12, 0); 2702 rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY + 16, 0); 2703 rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY + 20, 0xE43000B); 2704 rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY + 24, 0); 2705 rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY + 28, 0xB8109); 2706 } 2707 2708 static int rtw89_set_macid_pause(struct rtw89_dev *rtwdev, u8 macid, bool pause) 2709 { 2710 u8 sh = FIELD_GET(GENMASK(4, 0), macid); 2711 u8 grp = macid >> 5; 2712 int ret; 2713 2714 ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_CMAC_SEL); 2715 if (ret) 2716 return ret; 2717 2718 rtw89_fw_h2c_macid_pause(rtwdev, sh, grp, pause); 2719 2720 return 0; 2721 } 2722 2723 static const struct rtw89_port_reg rtw_port_base = { 2724 .port_cfg = R_AX_PORT_CFG_P0, 2725 .tbtt_prohib = R_AX_TBTT_PROHIB_P0, 2726 .bcn_area = R_AX_BCN_AREA_P0, 2727 .bcn_early = R_AX_BCNERLYINT_CFG_P0, 2728 .tbtt_early = R_AX_TBTTERLYINT_CFG_P0, 2729 .tbtt_agg = R_AX_TBTT_AGG_P0, 2730 .bcn_space = R_AX_BCN_SPACE_CFG_P0, 2731 .bcn_forcetx = R_AX_BCN_FORCETX_P0, 2732 .bcn_err_cnt = R_AX_BCN_ERR_CNT_P0, 2733 .bcn_err_flag = R_AX_BCN_ERR_FLAG_P0, 2734 .dtim_ctrl = R_AX_DTIM_CTRL_P0, 2735 .tbtt_shift = R_AX_TBTT_SHIFT_P0, 2736 .bcn_cnt_tmr = R_AX_BCN_CNT_TMR_P0, 2737 .tsftr_l = R_AX_TSFTR_LOW_P0, 2738 .tsftr_h = R_AX_TSFTR_HIGH_P0 2739 }; 2740 2741 #define BCN_INTERVAL 100 2742 #define BCN_ERLY_DEF 160 2743 #define BCN_SETUP_DEF 2 2744 #define BCN_HOLD_DEF 200 2745 #define BCN_MASK_DEF 0 2746 #define TBTT_ERLY_DEF 5 2747 #define BCN_SET_UNIT 32 2748 #define BCN_ERLY_SET_DLY (10 * 2) 2749 2750 static void rtw89_mac_port_cfg_func_sw(struct rtw89_dev *rtwdev, 2751 struct rtw89_vif *rtwvif) 2752 { 2753 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); 2754 const struct rtw89_port_reg *p = &rtw_port_base; 2755 2756 if (!rtw89_read32_port_mask(rtwdev, rtwvif, p->port_cfg, B_AX_PORT_FUNC_EN)) 2757 return; 2758 2759 rtw89_write32_port_clr(rtwdev, rtwvif, p->tbtt_prohib, B_AX_TBTT_SETUP_MASK); 2760 rtw89_write32_port_mask(rtwdev, rtwvif, p->tbtt_prohib, B_AX_TBTT_HOLD_MASK, 1); 2761 rtw89_write16_port_clr(rtwdev, rtwvif, p->tbtt_early, B_AX_TBTTERLY_MASK); 2762 rtw89_write16_port_clr(rtwdev, rtwvif, p->bcn_early, B_AX_BCNERLY_MASK); 2763 2764 msleep(vif->bss_conf.beacon_int + 1); 2765 2766 rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, B_AX_PORT_FUNC_EN | 2767 B_AX_BRK_SETUP); 2768 rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, B_AX_TSFTR_RST); 2769 rtw89_write32_port(rtwdev, rtwvif, p->bcn_cnt_tmr, 0); 2770 } 2771 2772 static void rtw89_mac_port_cfg_tx_rpt(struct rtw89_dev *rtwdev, 2773 struct rtw89_vif *rtwvif, bool en) 2774 { 2775 const struct rtw89_port_reg *p = &rtw_port_base; 2776 2777 if (en) 2778 rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, B_AX_TXBCN_RPT_EN); 2779 else 2780 rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, B_AX_TXBCN_RPT_EN); 2781 } 2782 2783 static void rtw89_mac_port_cfg_rx_rpt(struct rtw89_dev *rtwdev, 2784 struct rtw89_vif *rtwvif, bool en) 2785 { 2786 const struct rtw89_port_reg *p = &rtw_port_base; 2787 2788 if (en) 2789 rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, B_AX_RXBCN_RPT_EN); 2790 else 2791 rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, B_AX_RXBCN_RPT_EN); 2792 } 2793 2794 static void rtw89_mac_port_cfg_net_type(struct rtw89_dev *rtwdev, 2795 struct rtw89_vif *rtwvif) 2796 { 2797 const struct rtw89_port_reg *p = &rtw_port_base; 2798 2799 rtw89_write32_port_mask(rtwdev, rtwvif, p->port_cfg, B_AX_NET_TYPE_MASK, 2800 rtwvif->net_type); 2801 } 2802 2803 static void rtw89_mac_port_cfg_bcn_prct(struct rtw89_dev *rtwdev, 2804 struct rtw89_vif *rtwvif) 2805 { 2806 const struct rtw89_port_reg *p = &rtw_port_base; 2807 bool en = rtwvif->net_type != RTW89_NET_TYPE_NO_LINK; 2808 u32 bits = B_AX_TBTT_PROHIB_EN | B_AX_BRK_SETUP; 2809 2810 if (en) 2811 rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, bits); 2812 else 2813 rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, bits); 2814 } 2815 2816 static void rtw89_mac_port_cfg_rx_sw(struct rtw89_dev *rtwdev, 2817 struct rtw89_vif *rtwvif) 2818 { 2819 const struct rtw89_port_reg *p = &rtw_port_base; 2820 bool en = rtwvif->net_type == RTW89_NET_TYPE_INFRA || 2821 rtwvif->net_type == RTW89_NET_TYPE_AD_HOC; 2822 u32 bit = B_AX_RX_BSSID_FIT_EN; 2823 2824 if (en) 2825 rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, bit); 2826 else 2827 rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, bit); 2828 } 2829 2830 static void rtw89_mac_port_cfg_rx_sync(struct rtw89_dev *rtwdev, 2831 struct rtw89_vif *rtwvif) 2832 { 2833 const struct rtw89_port_reg *p = &rtw_port_base; 2834 bool en = rtwvif->net_type == RTW89_NET_TYPE_INFRA || 2835 rtwvif->net_type == RTW89_NET_TYPE_AD_HOC; 2836 2837 if (en) 2838 rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, B_AX_TSF_UDT_EN); 2839 else 2840 rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, B_AX_TSF_UDT_EN); 2841 } 2842 2843 static void rtw89_mac_port_cfg_tx_sw(struct rtw89_dev *rtwdev, 2844 struct rtw89_vif *rtwvif) 2845 { 2846 const struct rtw89_port_reg *p = &rtw_port_base; 2847 bool en = rtwvif->net_type == RTW89_NET_TYPE_AP_MODE || 2848 rtwvif->net_type == RTW89_NET_TYPE_AD_HOC; 2849 2850 if (en) 2851 rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, B_AX_BCNTX_EN); 2852 else 2853 rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, B_AX_BCNTX_EN); 2854 } 2855 2856 static void rtw89_mac_port_cfg_bcn_intv(struct rtw89_dev *rtwdev, 2857 struct rtw89_vif *rtwvif) 2858 { 2859 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); 2860 const struct rtw89_port_reg *p = &rtw_port_base; 2861 u16 bcn_int = vif->bss_conf.beacon_int ? vif->bss_conf.beacon_int : BCN_INTERVAL; 2862 2863 rtw89_write32_port_mask(rtwdev, rtwvif, p->bcn_space, B_AX_BCN_SPACE_MASK, 2864 bcn_int); 2865 } 2866 2867 static void rtw89_mac_port_cfg_bcn_setup_time(struct rtw89_dev *rtwdev, 2868 struct rtw89_vif *rtwvif) 2869 { 2870 const struct rtw89_port_reg *p = &rtw_port_base; 2871 2872 rtw89_write32_port_mask(rtwdev, rtwvif, p->tbtt_prohib, 2873 B_AX_TBTT_SETUP_MASK, BCN_SETUP_DEF); 2874 } 2875 2876 static void rtw89_mac_port_cfg_bcn_hold_time(struct rtw89_dev *rtwdev, 2877 struct rtw89_vif *rtwvif) 2878 { 2879 const struct rtw89_port_reg *p = &rtw_port_base; 2880 2881 rtw89_write32_port_mask(rtwdev, rtwvif, p->tbtt_prohib, 2882 B_AX_TBTT_HOLD_MASK, BCN_HOLD_DEF); 2883 } 2884 2885 static void rtw89_mac_port_cfg_bcn_mask_area(struct rtw89_dev *rtwdev, 2886 struct rtw89_vif *rtwvif) 2887 { 2888 const struct rtw89_port_reg *p = &rtw_port_base; 2889 2890 rtw89_write32_port_mask(rtwdev, rtwvif, p->bcn_area, 2891 B_AX_BCN_MSK_AREA_MASK, BCN_MASK_DEF); 2892 } 2893 2894 static void rtw89_mac_port_cfg_tbtt_early(struct rtw89_dev *rtwdev, 2895 struct rtw89_vif *rtwvif) 2896 { 2897 const struct rtw89_port_reg *p = &rtw_port_base; 2898 2899 rtw89_write16_port_mask(rtwdev, rtwvif, p->tbtt_early, 2900 B_AX_TBTTERLY_MASK, TBTT_ERLY_DEF); 2901 } 2902 2903 static void rtw89_mac_port_cfg_bss_color(struct rtw89_dev *rtwdev, 2904 struct rtw89_vif *rtwvif) 2905 { 2906 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); 2907 static const u32 masks[RTW89_PORT_NUM] = { 2908 B_AX_BSS_COLOB_AX_PORT_0_MASK, B_AX_BSS_COLOB_AX_PORT_1_MASK, 2909 B_AX_BSS_COLOB_AX_PORT_2_MASK, B_AX_BSS_COLOB_AX_PORT_3_MASK, 2910 B_AX_BSS_COLOB_AX_PORT_4_MASK, 2911 }; 2912 u8 port = rtwvif->port; 2913 u32 reg_base; 2914 u32 reg; 2915 u8 bss_color; 2916 2917 bss_color = vif->bss_conf.he_bss_color.color; 2918 reg_base = port >= 4 ? R_AX_PTCL_BSS_COLOR_1 : R_AX_PTCL_BSS_COLOR_0; 2919 reg = rtw89_mac_reg_by_idx(reg_base, rtwvif->mac_idx); 2920 rtw89_write32_mask(rtwdev, reg, masks[port], bss_color); 2921 } 2922 2923 static void rtw89_mac_port_cfg_mbssid(struct rtw89_dev *rtwdev, 2924 struct rtw89_vif *rtwvif) 2925 { 2926 u8 port = rtwvif->port; 2927 u32 reg; 2928 2929 if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) 2930 return; 2931 2932 if (port == 0) { 2933 reg = rtw89_mac_reg_by_idx(R_AX_MBSSID_CTRL, rtwvif->mac_idx); 2934 rtw89_write32_clr(rtwdev, reg, B_AX_P0MB_ALL_MASK); 2935 } 2936 } 2937 2938 static void rtw89_mac_port_cfg_hiq_drop(struct rtw89_dev *rtwdev, 2939 struct rtw89_vif *rtwvif) 2940 { 2941 u8 port = rtwvif->port; 2942 u32 reg; 2943 u32 val; 2944 2945 reg = rtw89_mac_reg_by_idx(R_AX_MBSSID_DROP_0, rtwvif->mac_idx); 2946 val = rtw89_read32(rtwdev, reg); 2947 val &= ~FIELD_PREP(B_AX_PORT_DROP_4_0_MASK, BIT(port)); 2948 if (port == 0) 2949 val &= ~BIT(0); 2950 rtw89_write32(rtwdev, reg, val); 2951 } 2952 2953 static void rtw89_mac_port_cfg_func_en(struct rtw89_dev *rtwdev, 2954 struct rtw89_vif *rtwvif) 2955 { 2956 const struct rtw89_port_reg *p = &rtw_port_base; 2957 2958 rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, B_AX_PORT_FUNC_EN); 2959 } 2960 2961 static void rtw89_mac_port_cfg_bcn_early(struct rtw89_dev *rtwdev, 2962 struct rtw89_vif *rtwvif) 2963 { 2964 const struct rtw89_port_reg *p = &rtw_port_base; 2965 2966 rtw89_write32_port_mask(rtwdev, rtwvif, p->bcn_early, B_AX_BCNERLY_MASK, 2967 BCN_ERLY_DEF); 2968 } 2969 2970 int rtw89_mac_vif_init(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) 2971 { 2972 int ret; 2973 2974 ret = rtw89_mac_port_update(rtwdev, rtwvif); 2975 if (ret) 2976 return ret; 2977 2978 rtw89_mac_dmac_tbl_init(rtwdev, rtwvif->mac_id); 2979 rtw89_mac_cmac_tbl_init(rtwdev, rtwvif->mac_id); 2980 2981 ret = rtw89_set_macid_pause(rtwdev, rtwvif->mac_id, false); 2982 if (ret) 2983 return ret; 2984 2985 ret = rtw89_fw_h2c_vif_maintain(rtwdev, rtwvif, RTW89_VIF_CREATE); 2986 if (ret) 2987 return ret; 2988 2989 ret = rtw89_cam_init(rtwdev, rtwvif); 2990 if (ret) 2991 return ret; 2992 2993 ret = rtw89_fw_h2c_cam(rtwdev, rtwvif, NULL, NULL); 2994 if (ret) 2995 return ret; 2996 2997 ret = rtw89_fw_h2c_default_cmac_tbl(rtwdev, rtwvif->mac_id); 2998 if (ret) 2999 return ret; 3000 3001 return 0; 3002 } 3003 3004 int rtw89_mac_vif_deinit(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) 3005 { 3006 int ret; 3007 3008 ret = rtw89_fw_h2c_vif_maintain(rtwdev, rtwvif, RTW89_VIF_REMOVE); 3009 if (ret) 3010 return ret; 3011 3012 rtw89_cam_deinit(rtwdev, rtwvif); 3013 3014 ret = rtw89_fw_h2c_cam(rtwdev, rtwvif, NULL, NULL); 3015 if (ret) 3016 return ret; 3017 3018 return 0; 3019 } 3020 3021 int rtw89_mac_port_update(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) 3022 { 3023 u8 port = rtwvif->port; 3024 3025 if (port >= RTW89_PORT_NUM) 3026 return -EINVAL; 3027 3028 rtw89_mac_port_cfg_func_sw(rtwdev, rtwvif); 3029 rtw89_mac_port_cfg_tx_rpt(rtwdev, rtwvif, false); 3030 rtw89_mac_port_cfg_rx_rpt(rtwdev, rtwvif, false); 3031 rtw89_mac_port_cfg_net_type(rtwdev, rtwvif); 3032 rtw89_mac_port_cfg_bcn_prct(rtwdev, rtwvif); 3033 rtw89_mac_port_cfg_rx_sw(rtwdev, rtwvif); 3034 rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif); 3035 rtw89_mac_port_cfg_tx_sw(rtwdev, rtwvif); 3036 rtw89_mac_port_cfg_bcn_intv(rtwdev, rtwvif); 3037 rtw89_mac_port_cfg_bcn_setup_time(rtwdev, rtwvif); 3038 rtw89_mac_port_cfg_bcn_hold_time(rtwdev, rtwvif); 3039 rtw89_mac_port_cfg_bcn_mask_area(rtwdev, rtwvif); 3040 rtw89_mac_port_cfg_tbtt_early(rtwdev, rtwvif); 3041 rtw89_mac_port_cfg_bss_color(rtwdev, rtwvif); 3042 rtw89_mac_port_cfg_mbssid(rtwdev, rtwvif); 3043 rtw89_mac_port_cfg_hiq_drop(rtwdev, rtwvif); 3044 rtw89_mac_port_cfg_func_en(rtwdev, rtwvif); 3045 fsleep(BCN_ERLY_SET_DLY); 3046 rtw89_mac_port_cfg_bcn_early(rtwdev, rtwvif); 3047 3048 return 0; 3049 } 3050 3051 int rtw89_mac_add_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) 3052 { 3053 int ret; 3054 3055 rtwvif->mac_id = rtw89_core_acquire_bit_map(rtwdev->mac_id_map, 3056 RTW89_MAX_MAC_ID_NUM); 3057 if (rtwvif->mac_id == RTW89_MAX_MAC_ID_NUM) 3058 return -ENOSPC; 3059 3060 ret = rtw89_mac_vif_init(rtwdev, rtwvif); 3061 if (ret) 3062 goto release_mac_id; 3063 3064 return 0; 3065 3066 release_mac_id: 3067 rtw89_core_release_bit_map(rtwdev->mac_id_map, rtwvif->mac_id); 3068 3069 return ret; 3070 } 3071 3072 int rtw89_mac_remove_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) 3073 { 3074 int ret; 3075 3076 ret = rtw89_mac_vif_deinit(rtwdev, rtwvif); 3077 rtw89_core_release_bit_map(rtwdev->mac_id_map, rtwvif->mac_id); 3078 3079 return ret; 3080 } 3081 3082 static void 3083 rtw89_mac_c2h_macid_pause(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 3084 { 3085 } 3086 3087 static void 3088 rtw89_mac_c2h_rec_ack(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 3089 { 3090 rtw89_debug(rtwdev, RTW89_DBG_FW, 3091 "C2H rev ack recv, cat: %d, class: %d, func: %d, seq : %d\n", 3092 RTW89_GET_MAC_C2H_REV_ACK_CAT(c2h->data), 3093 RTW89_GET_MAC_C2H_REV_ACK_CLASS(c2h->data), 3094 RTW89_GET_MAC_C2H_REV_ACK_FUNC(c2h->data), 3095 RTW89_GET_MAC_C2H_REV_ACK_H2C_SEQ(c2h->data)); 3096 } 3097 3098 static void 3099 rtw89_mac_c2h_done_ack(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 3100 { 3101 rtw89_debug(rtwdev, RTW89_DBG_FW, 3102 "C2H done ack recv, cat: %d, class: %d, func: %d, ret: %d, seq : %d\n", 3103 RTW89_GET_MAC_C2H_DONE_ACK_CAT(c2h->data), 3104 RTW89_GET_MAC_C2H_DONE_ACK_CLASS(c2h->data), 3105 RTW89_GET_MAC_C2H_DONE_ACK_FUNC(c2h->data), 3106 RTW89_GET_MAC_C2H_DONE_ACK_H2C_RETURN(c2h->data), 3107 RTW89_GET_MAC_C2H_DONE_ACK_H2C_SEQ(c2h->data)); 3108 } 3109 3110 static void 3111 rtw89_mac_c2h_log(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 3112 { 3113 rtw89_info(rtwdev, "%*s", RTW89_GET_C2H_LOG_LEN(len), 3114 RTW89_GET_C2H_LOG_SRT_PRT(c2h->data)); 3115 } 3116 3117 static 3118 void (* const rtw89_mac_c2h_ofld_handler[])(struct rtw89_dev *rtwdev, 3119 struct sk_buff *c2h, u32 len) = { 3120 [RTW89_MAC_C2H_FUNC_EFUSE_DUMP] = NULL, 3121 [RTW89_MAC_C2H_FUNC_READ_RSP] = NULL, 3122 [RTW89_MAC_C2H_FUNC_PKT_OFLD_RSP] = NULL, 3123 [RTW89_MAC_C2H_FUNC_BCN_RESEND] = NULL, 3124 [RTW89_MAC_C2H_FUNC_MACID_PAUSE] = rtw89_mac_c2h_macid_pause, 3125 }; 3126 3127 static 3128 void (* const rtw89_mac_c2h_info_handler[])(struct rtw89_dev *rtwdev, 3129 struct sk_buff *c2h, u32 len) = { 3130 [RTW89_MAC_C2H_FUNC_REC_ACK] = rtw89_mac_c2h_rec_ack, 3131 [RTW89_MAC_C2H_FUNC_DONE_ACK] = rtw89_mac_c2h_done_ack, 3132 [RTW89_MAC_C2H_FUNC_C2H_LOG] = rtw89_mac_c2h_log, 3133 }; 3134 3135 void rtw89_mac_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb, 3136 u32 len, u8 class, u8 func) 3137 { 3138 void (*handler)(struct rtw89_dev *rtwdev, 3139 struct sk_buff *c2h, u32 len) = NULL; 3140 3141 switch (class) { 3142 case RTW89_MAC_C2H_CLASS_INFO: 3143 if (func < RTW89_MAC_C2H_FUNC_INFO_MAX) 3144 handler = rtw89_mac_c2h_info_handler[func]; 3145 break; 3146 case RTW89_MAC_C2H_CLASS_OFLD: 3147 if (func < RTW89_MAC_C2H_FUNC_OFLD_MAX) 3148 handler = rtw89_mac_c2h_ofld_handler[func]; 3149 break; 3150 case RTW89_MAC_C2H_CLASS_FWDBG: 3151 return; 3152 default: 3153 rtw89_info(rtwdev, "c2h class %d not support\n", class); 3154 return; 3155 } 3156 if (!handler) { 3157 rtw89_info(rtwdev, "c2h class %d func %d not support\n", class, 3158 func); 3159 return; 3160 } 3161 handler(rtwdev, skb, len); 3162 } 3163 3164 bool rtw89_mac_get_txpwr_cr(struct rtw89_dev *rtwdev, 3165 enum rtw89_phy_idx phy_idx, 3166 u32 reg_base, u32 *cr) 3167 { 3168 const struct rtw89_dle_mem *dle_mem = rtwdev->chip->dle_mem; 3169 enum rtw89_qta_mode mode = dle_mem->mode; 3170 u32 addr = rtw89_mac_reg_by_idx(reg_base, phy_idx); 3171 3172 if (addr < R_AX_PWR_RATE_CTRL || addr > CMAC1_END_ADDR) { 3173 rtw89_err(rtwdev, "[TXPWR] addr=0x%x exceed txpwr cr\n", 3174 addr); 3175 goto error; 3176 } 3177 3178 if (addr >= CMAC1_START_ADDR && addr <= CMAC1_END_ADDR) 3179 if (mode == RTW89_QTA_SCC) { 3180 rtw89_err(rtwdev, 3181 "[TXPWR] addr=0x%x but hw not enable\n", 3182 addr); 3183 goto error; 3184 } 3185 3186 *cr = addr; 3187 return true; 3188 3189 error: 3190 rtw89_err(rtwdev, "[TXPWR] check txpwr cr 0x%x(phy%d) fail\n", 3191 addr, phy_idx); 3192 3193 return false; 3194 } 3195 3196 int rtw89_mac_cfg_ppdu_status(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable) 3197 { 3198 u32 reg = rtw89_mac_reg_by_idx(R_AX_PPDU_STAT, mac_idx); 3199 int ret = 0; 3200 3201 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 3202 if (ret) 3203 return ret; 3204 3205 if (!enable) { 3206 rtw89_write32_clr(rtwdev, reg, B_AX_PPDU_STAT_RPT_EN); 3207 return ret; 3208 } 3209 3210 rtw89_write32(rtwdev, reg, B_AX_PPDU_STAT_RPT_EN | 3211 B_AX_APP_MAC_INFO_RPT | 3212 B_AX_APP_RX_CNT_RPT | B_AX_APP_PLCP_HDR_RPT | 3213 B_AX_PPDU_STAT_RPT_CRC32); 3214 rtw89_write32_mask(rtwdev, R_AX_HW_RPT_FWD, B_AX_FWD_PPDU_STAT_MASK, 3215 RTW89_PRPT_DEST_HOST); 3216 3217 return ret; 3218 } 3219 3220 void rtw89_mac_update_rts_threshold(struct rtw89_dev *rtwdev, u8 mac_idx) 3221 { 3222 #define MAC_AX_TIME_TH_SH 5 3223 #define MAC_AX_LEN_TH_SH 4 3224 #define MAC_AX_TIME_TH_MAX 255 3225 #define MAC_AX_LEN_TH_MAX 255 3226 #define MAC_AX_TIME_TH_DEF 88 3227 #define MAC_AX_LEN_TH_DEF 4080 3228 struct ieee80211_hw *hw = rtwdev->hw; 3229 u32 rts_threshold = hw->wiphy->rts_threshold; 3230 u32 time_th, len_th; 3231 u32 reg; 3232 3233 if (rts_threshold == (u32)-1) { 3234 time_th = MAC_AX_TIME_TH_DEF; 3235 len_th = MAC_AX_LEN_TH_DEF; 3236 } else { 3237 time_th = MAC_AX_TIME_TH_MAX << MAC_AX_TIME_TH_SH; 3238 len_th = rts_threshold; 3239 } 3240 3241 time_th = min_t(u32, time_th >> MAC_AX_TIME_TH_SH, MAC_AX_TIME_TH_MAX); 3242 len_th = min_t(u32, len_th >> MAC_AX_LEN_TH_SH, MAC_AX_LEN_TH_MAX); 3243 3244 reg = rtw89_mac_reg_by_idx(R_AX_AGG_LEN_HT_0, mac_idx); 3245 rtw89_write16_mask(rtwdev, reg, B_AX_RTS_TXTIME_TH_MASK, time_th); 3246 rtw89_write16_mask(rtwdev, reg, B_AX_RTS_LEN_TH_MASK, len_th); 3247 } 3248 3249 void rtw89_mac_flush_txq(struct rtw89_dev *rtwdev, u32 queues, bool drop) 3250 { 3251 bool empty; 3252 int ret; 3253 3254 if (!test_bit(RTW89_FLAG_POWERON, rtwdev->flags)) 3255 return; 3256 3257 ret = read_poll_timeout(dle_is_txq_empty, empty, empty, 3258 10000, 200000, false, rtwdev); 3259 if (ret && !drop && (rtwdev->total_sta_assoc || rtwdev->scanning)) 3260 rtw89_info(rtwdev, "timed out to flush queues\n"); 3261 } 3262 3263 int rtw89_mac_coex_init(struct rtw89_dev *rtwdev, const struct rtw89_mac_ax_coex *coex) 3264 { 3265 u8 val; 3266 u16 val16; 3267 u32 val32; 3268 int ret; 3269 3270 rtw89_write8_set(rtwdev, R_AX_GPIO_MUXCFG, B_AX_ENBT); 3271 rtw89_write8_set(rtwdev, R_AX_BTC_FUNC_EN, B_AX_PTA_WL_TX_EN); 3272 rtw89_write8_set(rtwdev, R_AX_BT_COEX_CFG_2 + 1, B_AX_GNT_BT_POLARITY >> 8); 3273 rtw89_write8_set(rtwdev, R_AX_CSR_MODE, B_AX_STATIS_BT_EN | B_AX_WL_ACT_MSK); 3274 rtw89_write8_set(rtwdev, R_AX_CSR_MODE + 2, B_AX_BT_CNT_RST >> 16); 3275 rtw89_write8_clr(rtwdev, R_AX_TRXPTCL_RESP_0 + 3, B_AX_RSP_CHK_BTCCA >> 24); 3276 3277 val16 = rtw89_read16(rtwdev, R_AX_CCA_CFG_0); 3278 val16 = (val16 | B_AX_BTCCA_EN) & ~B_AX_BTCCA_BRK_TXOP_EN; 3279 rtw89_write16(rtwdev, R_AX_CCA_CFG_0, val16); 3280 3281 ret = rtw89_mac_read_lte(rtwdev, R_AX_LTE_SW_CFG_2, &val32); 3282 if (ret) { 3283 rtw89_err(rtwdev, "Read R_AX_LTE_SW_CFG_2 fail!\n"); 3284 return ret; 3285 } 3286 val32 = val32 & B_AX_WL_RX_CTRL; 3287 ret = rtw89_mac_write_lte(rtwdev, R_AX_LTE_SW_CFG_2, val32); 3288 if (ret) { 3289 rtw89_err(rtwdev, "Write R_AX_LTE_SW_CFG_2 fail!\n"); 3290 return ret; 3291 } 3292 3293 switch (coex->pta_mode) { 3294 case RTW89_MAC_AX_COEX_RTK_MODE: 3295 val = rtw89_read8(rtwdev, R_AX_GPIO_MUXCFG); 3296 val &= ~B_AX_BTMODE_MASK; 3297 val |= FIELD_PREP(B_AX_BTMODE_MASK, MAC_AX_BT_MODE_0_3); 3298 rtw89_write8(rtwdev, R_AX_GPIO_MUXCFG, val); 3299 3300 val = rtw89_read8(rtwdev, R_AX_TDMA_MODE); 3301 rtw89_write8(rtwdev, R_AX_TDMA_MODE, val | B_AX_RTK_BT_ENABLE); 3302 3303 val = rtw89_read8(rtwdev, R_AX_BT_COEX_CFG_5); 3304 val &= ~B_AX_BT_RPT_SAMPLE_RATE_MASK; 3305 val |= FIELD_PREP(B_AX_BT_RPT_SAMPLE_RATE_MASK, MAC_AX_RTK_RATE); 3306 rtw89_write8(rtwdev, R_AX_BT_COEX_CFG_5, val); 3307 break; 3308 case RTW89_MAC_AX_COEX_CSR_MODE: 3309 val = rtw89_read8(rtwdev, R_AX_GPIO_MUXCFG); 3310 val &= ~B_AX_BTMODE_MASK; 3311 val |= FIELD_PREP(B_AX_BTMODE_MASK, MAC_AX_BT_MODE_2); 3312 rtw89_write8(rtwdev, R_AX_GPIO_MUXCFG, val); 3313 3314 val16 = rtw89_read16(rtwdev, R_AX_CSR_MODE); 3315 val16 &= ~B_AX_BT_PRI_DETECT_TO_MASK; 3316 val16 |= FIELD_PREP(B_AX_BT_PRI_DETECT_TO_MASK, MAC_AX_CSR_PRI_TO); 3317 val16 &= ~B_AX_BT_TRX_INIT_DETECT_MASK; 3318 val16 |= FIELD_PREP(B_AX_BT_TRX_INIT_DETECT_MASK, MAC_AX_CSR_TRX_TO); 3319 val16 &= ~B_AX_BT_STAT_DELAY_MASK; 3320 val16 |= FIELD_PREP(B_AX_BT_STAT_DELAY_MASK, MAC_AX_CSR_DELAY); 3321 val16 |= B_AX_ENHANCED_BT; 3322 rtw89_write16(rtwdev, R_AX_CSR_MODE, val16); 3323 3324 rtw89_write8(rtwdev, R_AX_BT_COEX_CFG_2, MAC_AX_CSR_RATE); 3325 break; 3326 default: 3327 return -EINVAL; 3328 } 3329 3330 switch (coex->direction) { 3331 case RTW89_MAC_AX_COEX_INNER: 3332 val = rtw89_read8(rtwdev, R_AX_GPIO_MUXCFG + 1); 3333 val = (val & ~BIT(2)) | BIT(1); 3334 rtw89_write8(rtwdev, R_AX_GPIO_MUXCFG + 1, val); 3335 break; 3336 case RTW89_MAC_AX_COEX_OUTPUT: 3337 val = rtw89_read8(rtwdev, R_AX_GPIO_MUXCFG + 1); 3338 val = val | BIT(1) | BIT(0); 3339 rtw89_write8(rtwdev, R_AX_GPIO_MUXCFG + 1, val); 3340 break; 3341 case RTW89_MAC_AX_COEX_INPUT: 3342 val = rtw89_read8(rtwdev, R_AX_GPIO_MUXCFG + 1); 3343 val = val & ~(BIT(2) | BIT(1)); 3344 rtw89_write8(rtwdev, R_AX_GPIO_MUXCFG + 1, val); 3345 break; 3346 default: 3347 return -EINVAL; 3348 } 3349 3350 return 0; 3351 } 3352 3353 int rtw89_mac_cfg_gnt(struct rtw89_dev *rtwdev, 3354 const struct rtw89_mac_ax_coex_gnt *gnt_cfg) 3355 { 3356 u32 val, ret; 3357 3358 ret = rtw89_mac_read_lte(rtwdev, R_AX_LTE_SW_CFG_1, &val); 3359 if (ret) { 3360 rtw89_err(rtwdev, "Read LTE fail!\n"); 3361 return ret; 3362 } 3363 val = (gnt_cfg->band[0].gnt_bt ? 3364 B_AX_GNT_BT_RFC_S0_SW_VAL | B_AX_GNT_BT_BB_S0_SW_VAL : 0) | 3365 (gnt_cfg->band[0].gnt_bt_sw_en ? 3366 B_AX_GNT_BT_RFC_S0_SW_CTRL | B_AX_GNT_BT_BB_S0_SW_CTRL : 0) | 3367 (gnt_cfg->band[0].gnt_wl ? 3368 B_AX_GNT_WL_RFC_S0_SW_VAL | B_AX_GNT_WL_BB_S0_SW_VAL : 0) | 3369 (gnt_cfg->band[0].gnt_wl_sw_en ? 3370 B_AX_GNT_WL_RFC_S0_SW_CTRL | B_AX_GNT_WL_BB_S0_SW_CTRL : 0) | 3371 (gnt_cfg->band[1].gnt_bt ? 3372 B_AX_GNT_BT_RFC_S1_SW_VAL | B_AX_GNT_BT_BB_S1_SW_VAL : 0) | 3373 (gnt_cfg->band[1].gnt_bt_sw_en ? 3374 B_AX_GNT_BT_RFC_S1_SW_CTRL | B_AX_GNT_BT_BB_S1_SW_CTRL : 0) | 3375 (gnt_cfg->band[1].gnt_wl ? 3376 B_AX_GNT_WL_RFC_S1_SW_VAL | B_AX_GNT_WL_BB_S1_SW_VAL : 0) | 3377 (gnt_cfg->band[1].gnt_wl_sw_en ? 3378 B_AX_GNT_WL_RFC_S1_SW_CTRL | B_AX_GNT_WL_BB_S1_SW_CTRL : 0); 3379 ret = rtw89_mac_write_lte(rtwdev, R_AX_LTE_SW_CFG_1, val); 3380 if (ret) { 3381 rtw89_err(rtwdev, "Write LTE fail!\n"); 3382 return ret; 3383 } 3384 3385 return 0; 3386 } 3387 3388 int rtw89_mac_cfg_plt(struct rtw89_dev *rtwdev, struct rtw89_mac_ax_plt *plt) 3389 { 3390 u32 reg; 3391 u8 val; 3392 int ret; 3393 3394 ret = rtw89_mac_check_mac_en(rtwdev, plt->band, RTW89_CMAC_SEL); 3395 if (ret) 3396 return ret; 3397 3398 reg = rtw89_mac_reg_by_idx(R_AX_BT_PLT, plt->band); 3399 val = (plt->tx & RTW89_MAC_AX_PLT_LTE_RX ? B_AX_TX_PLT_GNT_LTE_RX : 0) | 3400 (plt->tx & RTW89_MAC_AX_PLT_GNT_BT_TX ? B_AX_TX_PLT_GNT_BT_TX : 0) | 3401 (plt->tx & RTW89_MAC_AX_PLT_GNT_BT_RX ? B_AX_TX_PLT_GNT_BT_RX : 0) | 3402 (plt->tx & RTW89_MAC_AX_PLT_GNT_WL ? B_AX_TX_PLT_GNT_WL : 0) | 3403 (plt->rx & RTW89_MAC_AX_PLT_LTE_RX ? B_AX_RX_PLT_GNT_LTE_RX : 0) | 3404 (plt->rx & RTW89_MAC_AX_PLT_GNT_BT_TX ? B_AX_RX_PLT_GNT_BT_TX : 0) | 3405 (plt->rx & RTW89_MAC_AX_PLT_GNT_BT_RX ? B_AX_RX_PLT_GNT_BT_RX : 0) | 3406 (plt->rx & RTW89_MAC_AX_PLT_GNT_WL ? B_AX_RX_PLT_GNT_WL : 0); 3407 rtw89_write8(rtwdev, reg, val); 3408 3409 return 0; 3410 } 3411 3412 void rtw89_mac_cfg_sb(struct rtw89_dev *rtwdev, u32 val) 3413 { 3414 u32 fw_sb; 3415 3416 fw_sb = rtw89_read32(rtwdev, R_AX_SCOREBOARD); 3417 fw_sb = FIELD_GET(B_MAC_AX_SB_FW_MASK, fw_sb); 3418 fw_sb = fw_sb & ~B_MAC_AX_BTGS1_NOTIFY; 3419 if (!test_bit(RTW89_FLAG_POWERON, rtwdev->flags)) 3420 fw_sb = fw_sb | MAC_AX_NOTIFY_PWR_MAJOR; 3421 else 3422 fw_sb = fw_sb | MAC_AX_NOTIFY_TP_MAJOR; 3423 val = FIELD_GET(B_MAC_AX_SB_DRV_MASK, val); 3424 val = B_AX_TOGGLE | 3425 FIELD_PREP(B_MAC_AX_SB_DRV_MASK, val) | 3426 FIELD_PREP(B_MAC_AX_SB_FW_MASK, fw_sb); 3427 rtw89_write32(rtwdev, R_AX_SCOREBOARD, val); 3428 fsleep(1000); /* avoid BT FW loss information */ 3429 } 3430 3431 u32 rtw89_mac_get_sb(struct rtw89_dev *rtwdev) 3432 { 3433 return rtw89_read32(rtwdev, R_AX_SCOREBOARD); 3434 } 3435 3436 int rtw89_mac_cfg_ctrl_path(struct rtw89_dev *rtwdev, bool wl) 3437 { 3438 u8 val = rtw89_read8(rtwdev, R_AX_SYS_SDIO_CTRL + 3); 3439 3440 val = wl ? val | BIT(2) : val & ~BIT(2); 3441 rtw89_write8(rtwdev, R_AX_SYS_SDIO_CTRL + 3, val); 3442 3443 return 0; 3444 } 3445 3446 bool rtw89_mac_get_ctrl_path(struct rtw89_dev *rtwdev) 3447 { 3448 u8 val = rtw89_read8(rtwdev, R_AX_SYS_SDIO_CTRL + 3); 3449 3450 return FIELD_GET(B_AX_LTE_MUX_CTRL_PATH >> 24, val); 3451 } 3452 3453 u16 rtw89_mac_get_plt_cnt(struct rtw89_dev *rtwdev, u8 band) 3454 { 3455 u32 reg; 3456 u16 cnt; 3457 3458 reg = rtw89_mac_reg_by_idx(R_AX_BT_PLT, band); 3459 cnt = rtw89_read32_mask(rtwdev, reg, B_AX_BT_PLT_PKT_CNT_MASK); 3460 rtw89_write16_set(rtwdev, reg, B_AX_BT_PLT_RST); 3461 3462 return cnt; 3463 } 3464 3465 static void rtw89_mac_bfee_ctrl(struct rtw89_dev *rtwdev, u8 mac_idx, bool en) 3466 { 3467 u32 reg; 3468 u32 mask = B_AX_BFMEE_HT_NDPA_EN | B_AX_BFMEE_VHT_NDPA_EN | 3469 B_AX_BFMEE_HE_NDPA_EN; 3470 3471 rtw89_debug(rtwdev, RTW89_DBG_BF, "set bfee ndpa_en to %d\n", en); 3472 reg = rtw89_mac_reg_by_idx(R_AX_BFMEE_RESP_OPTION, mac_idx); 3473 if (en) { 3474 set_bit(RTW89_FLAG_BFEE_EN, rtwdev->flags); 3475 rtw89_write32_set(rtwdev, reg, mask); 3476 } else { 3477 clear_bit(RTW89_FLAG_BFEE_EN, rtwdev->flags); 3478 rtw89_write32_clr(rtwdev, reg, mask); 3479 } 3480 } 3481 3482 static int rtw89_mac_init_bfee(struct rtw89_dev *rtwdev, u8 mac_idx) 3483 { 3484 u32 reg; 3485 u32 val32; 3486 int ret; 3487 3488 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 3489 if (ret) 3490 return ret; 3491 3492 /* AP mode set tx gid to 63 */ 3493 /* STA mode set tx gid to 0(default) */ 3494 reg = rtw89_mac_reg_by_idx(R_AX_BFMER_CTRL_0, mac_idx); 3495 rtw89_write32_set(rtwdev, reg, B_AX_BFMER_NDP_BFEN); 3496 3497 reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_CSI_RRSC, mac_idx); 3498 rtw89_write32(rtwdev, reg, CSI_RRSC_BMAP); 3499 3500 reg = rtw89_mac_reg_by_idx(R_AX_BFMEE_RESP_OPTION, mac_idx); 3501 val32 = FIELD_PREP(B_AX_BFMEE_BFRP_RX_STANDBY_TIMER_MASK, BFRP_RX_STANDBY_TIMER); 3502 val32 |= FIELD_PREP(B_AX_BFMEE_NDP_RX_STANDBY_TIMER_MASK, NDP_RX_STANDBY_TIMER); 3503 rtw89_write32(rtwdev, reg, val32); 3504 rtw89_mac_bfee_ctrl(rtwdev, mac_idx, true); 3505 3506 reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_CSI_CTRL_0, mac_idx); 3507 rtw89_write32_set(rtwdev, reg, B_AX_BFMEE_BFPARAM_SEL | 3508 B_AX_BFMEE_USE_NSTS | 3509 B_AX_BFMEE_CSI_GID_SEL | 3510 B_AX_BFMEE_CSI_FORCE_RETE_EN); 3511 reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_CSI_RATE, mac_idx); 3512 rtw89_write32(rtwdev, reg, 3513 u32_encode_bits(CSI_INIT_RATE_HT, B_AX_BFMEE_HT_CSI_RATE_MASK) | 3514 u32_encode_bits(CSI_INIT_RATE_VHT, B_AX_BFMEE_VHT_CSI_RATE_MASK) | 3515 u32_encode_bits(CSI_INIT_RATE_HE, B_AX_BFMEE_HE_CSI_RATE_MASK)); 3516 3517 return 0; 3518 } 3519 3520 static int rtw89_mac_set_csi_para_reg(struct rtw89_dev *rtwdev, 3521 struct ieee80211_vif *vif, 3522 struct ieee80211_sta *sta) 3523 { 3524 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 3525 u8 mac_idx = rtwvif->mac_idx; 3526 u8 nc = 1, nr = 3, ng = 0, cb = 1, cs = 1, ldpc_en = 1, stbc_en = 1; 3527 u8 port_sel = rtwvif->port; 3528 u8 sound_dim = 3, t; 3529 u8 *phy_cap = sta->he_cap.he_cap_elem.phy_cap_info; 3530 u32 reg; 3531 u16 val; 3532 int ret; 3533 3534 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 3535 if (ret) 3536 return ret; 3537 3538 if ((phy_cap[3] & IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER) || 3539 (phy_cap[4] & IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER)) { 3540 ldpc_en &= !!(phy_cap[1] & IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD); 3541 stbc_en &= !!(phy_cap[2] & IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ); 3542 t = FIELD_GET(IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK, 3543 phy_cap[5]); 3544 sound_dim = min(sound_dim, t); 3545 } 3546 if ((sta->vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) || 3547 (sta->vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)) { 3548 ldpc_en &= !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC); 3549 stbc_en &= !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_MASK); 3550 t = FIELD_GET(IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK, 3551 sta->vht_cap.cap); 3552 sound_dim = min(sound_dim, t); 3553 } 3554 nc = min(nc, sound_dim); 3555 nr = min(nr, sound_dim); 3556 3557 reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_CSI_CTRL_0, mac_idx); 3558 rtw89_write32_set(rtwdev, reg, B_AX_BFMEE_BFPARAM_SEL); 3559 3560 val = FIELD_PREP(B_AX_BFMEE_CSIINFO0_NC_MASK, nc) | 3561 FIELD_PREP(B_AX_BFMEE_CSIINFO0_NR_MASK, nr) | 3562 FIELD_PREP(B_AX_BFMEE_CSIINFO0_NG_MASK, ng) | 3563 FIELD_PREP(B_AX_BFMEE_CSIINFO0_CB_MASK, cb) | 3564 FIELD_PREP(B_AX_BFMEE_CSIINFO0_CS_MASK, cs) | 3565 FIELD_PREP(B_AX_BFMEE_CSIINFO0_LDPC_EN, ldpc_en) | 3566 FIELD_PREP(B_AX_BFMEE_CSIINFO0_STBC_EN, stbc_en); 3567 3568 if (port_sel == 0) 3569 reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_CSI_CTRL_0, mac_idx); 3570 else 3571 reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_CSI_CTRL_1, mac_idx); 3572 3573 rtw89_write16(rtwdev, reg, val); 3574 3575 return 0; 3576 } 3577 3578 static int rtw89_mac_csi_rrsc(struct rtw89_dev *rtwdev, 3579 struct ieee80211_vif *vif, 3580 struct ieee80211_sta *sta) 3581 { 3582 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 3583 u32 rrsc = BIT(RTW89_MAC_BF_RRSC_6M) | BIT(RTW89_MAC_BF_RRSC_24M); 3584 u32 reg; 3585 u8 mac_idx = rtwvif->mac_idx; 3586 int ret; 3587 3588 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 3589 if (ret) 3590 return ret; 3591 3592 if (sta->he_cap.has_he) { 3593 rrsc |= (BIT(RTW89_MAC_BF_RRSC_HE_MSC0) | 3594 BIT(RTW89_MAC_BF_RRSC_HE_MSC3) | 3595 BIT(RTW89_MAC_BF_RRSC_HE_MSC5)); 3596 } 3597 if (sta->vht_cap.vht_supported) { 3598 rrsc |= (BIT(RTW89_MAC_BF_RRSC_VHT_MSC0) | 3599 BIT(RTW89_MAC_BF_RRSC_VHT_MSC3) | 3600 BIT(RTW89_MAC_BF_RRSC_VHT_MSC5)); 3601 } 3602 if (sta->ht_cap.ht_supported) { 3603 rrsc |= (BIT(RTW89_MAC_BF_RRSC_HT_MSC0) | 3604 BIT(RTW89_MAC_BF_RRSC_HT_MSC3) | 3605 BIT(RTW89_MAC_BF_RRSC_HT_MSC5)); 3606 } 3607 reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_CSI_CTRL_0, mac_idx); 3608 rtw89_write32_set(rtwdev, reg, B_AX_BFMEE_BFPARAM_SEL); 3609 rtw89_write32_clr(rtwdev, reg, B_AX_BFMEE_CSI_FORCE_RETE_EN); 3610 rtw89_write32(rtwdev, 3611 rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_CSI_RRSC, mac_idx), 3612 rrsc); 3613 3614 return 0; 3615 } 3616 3617 void rtw89_mac_bf_assoc(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 3618 struct ieee80211_sta *sta) 3619 { 3620 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 3621 3622 if (rtw89_sta_has_beamformer_cap(sta)) { 3623 rtw89_debug(rtwdev, RTW89_DBG_BF, 3624 "initialize bfee for new association\n"); 3625 rtw89_mac_init_bfee(rtwdev, rtwvif->mac_idx); 3626 rtw89_mac_set_csi_para_reg(rtwdev, vif, sta); 3627 rtw89_mac_csi_rrsc(rtwdev, vif, sta); 3628 } 3629 } 3630 3631 void rtw89_mac_bf_disassoc(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 3632 struct ieee80211_sta *sta) 3633 { 3634 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 3635 3636 rtw89_mac_bfee_ctrl(rtwdev, rtwvif->mac_idx, false); 3637 } 3638 3639 void rtw89_mac_bf_set_gid_table(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 3640 struct ieee80211_bss_conf *conf) 3641 { 3642 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 3643 u8 mac_idx = rtwvif->mac_idx; 3644 __le32 *p; 3645 3646 rtw89_debug(rtwdev, RTW89_DBG_BF, "update bf GID table\n"); 3647 3648 p = (__le32 *)conf->mu_group.membership; 3649 rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(R_AX_GID_POSITION_EN0, mac_idx), 3650 le32_to_cpu(p[0])); 3651 rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(R_AX_GID_POSITION_EN1, mac_idx), 3652 le32_to_cpu(p[1])); 3653 3654 p = (__le32 *)conf->mu_group.position; 3655 rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(R_AX_GID_POSITION0, mac_idx), 3656 le32_to_cpu(p[0])); 3657 rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(R_AX_GID_POSITION1, mac_idx), 3658 le32_to_cpu(p[1])); 3659 rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(R_AX_GID_POSITION2, mac_idx), 3660 le32_to_cpu(p[2])); 3661 rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(R_AX_GID_POSITION3, mac_idx), 3662 le32_to_cpu(p[3])); 3663 } 3664 3665 struct rtw89_mac_bf_monitor_iter_data { 3666 struct rtw89_dev *rtwdev; 3667 struct ieee80211_sta *down_sta; 3668 int count; 3669 }; 3670 3671 static 3672 void rtw89_mac_bf_monitor_calc_iter(void *data, struct ieee80211_sta *sta) 3673 { 3674 struct rtw89_mac_bf_monitor_iter_data *iter_data = 3675 (struct rtw89_mac_bf_monitor_iter_data *)data; 3676 struct ieee80211_sta *down_sta = iter_data->down_sta; 3677 int *count = &iter_data->count; 3678 3679 if (down_sta == sta) 3680 return; 3681 3682 if (rtw89_sta_has_beamformer_cap(sta)) 3683 (*count)++; 3684 } 3685 3686 void rtw89_mac_bf_monitor_calc(struct rtw89_dev *rtwdev, 3687 struct ieee80211_sta *sta, bool disconnect) 3688 { 3689 struct rtw89_mac_bf_monitor_iter_data data; 3690 3691 data.rtwdev = rtwdev; 3692 data.down_sta = disconnect ? sta : NULL; 3693 data.count = 0; 3694 ieee80211_iterate_stations_atomic(rtwdev->hw, 3695 rtw89_mac_bf_monitor_calc_iter, 3696 &data); 3697 3698 rtw89_debug(rtwdev, RTW89_DBG_BF, "bfee STA count=%d\n", data.count); 3699 if (data.count) 3700 set_bit(RTW89_FLAG_BFEE_MON, rtwdev->flags); 3701 else 3702 clear_bit(RTW89_FLAG_BFEE_MON, rtwdev->flags); 3703 } 3704 3705 void _rtw89_mac_bf_monitor_track(struct rtw89_dev *rtwdev) 3706 { 3707 struct rtw89_traffic_stats *stats = &rtwdev->stats; 3708 struct rtw89_vif *rtwvif; 3709 bool en = stats->tx_tfc_lv <= stats->rx_tfc_lv; 3710 bool old = test_bit(RTW89_FLAG_BFEE_EN, rtwdev->flags); 3711 3712 if (en == old) 3713 return; 3714 3715 rtw89_for_each_rtwvif(rtwdev, rtwvif) 3716 rtw89_mac_bfee_ctrl(rtwdev, rtwvif->mac_idx, en); 3717 } 3718 3719 static int 3720 __rtw89_mac_set_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta, 3721 u32 tx_time) 3722 { 3723 #define MAC_AX_DFLT_TX_TIME 5280 3724 u8 mac_idx = rtwsta->rtwvif->mac_idx; 3725 u32 max_tx_time = tx_time == 0 ? MAC_AX_DFLT_TX_TIME : tx_time; 3726 u32 reg; 3727 int ret = 0; 3728 3729 if (rtwsta->cctl_tx_time) { 3730 rtwsta->ampdu_max_time = (max_tx_time - 512) >> 9; 3731 ret = rtw89_fw_h2c_txtime_cmac_tbl(rtwdev, rtwsta); 3732 } else { 3733 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 3734 if (ret) { 3735 rtw89_warn(rtwdev, "failed to check cmac in set txtime\n"); 3736 return ret; 3737 } 3738 3739 reg = rtw89_mac_reg_by_idx(R_AX_AMPDU_AGG_LIMIT, mac_idx); 3740 rtw89_write32_mask(rtwdev, reg, B_AX_AMPDU_MAX_TIME_MASK, 3741 max_tx_time >> 5); 3742 } 3743 3744 return ret; 3745 } 3746 3747 int rtw89_mac_set_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta, 3748 bool resume, u32 tx_time) 3749 { 3750 int ret = 0; 3751 3752 if (!resume) { 3753 rtwsta->cctl_tx_time = true; 3754 ret = __rtw89_mac_set_tx_time(rtwdev, rtwsta, tx_time); 3755 } else { 3756 ret = __rtw89_mac_set_tx_time(rtwdev, rtwsta, tx_time); 3757 rtwsta->cctl_tx_time = false; 3758 } 3759 3760 return ret; 3761 } 3762 3763 int rtw89_mac_get_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta, 3764 u32 *tx_time) 3765 { 3766 u8 mac_idx = rtwsta->rtwvif->mac_idx; 3767 u32 reg; 3768 int ret = 0; 3769 3770 if (rtwsta->cctl_tx_time) { 3771 *tx_time = (rtwsta->ampdu_max_time + 1) << 9; 3772 } else { 3773 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 3774 if (ret) { 3775 rtw89_warn(rtwdev, "failed to check cmac in tx_time\n"); 3776 return ret; 3777 } 3778 3779 reg = rtw89_mac_reg_by_idx(R_AX_AMPDU_AGG_LIMIT, mac_idx); 3780 *tx_time = rtw89_read32_mask(rtwdev, reg, B_AX_AMPDU_MAX_TIME_MASK) << 5; 3781 } 3782 3783 return ret; 3784 } 3785 3786 int rtw89_mac_set_tx_retry_limit(struct rtw89_dev *rtwdev, 3787 struct rtw89_sta *rtwsta, 3788 bool resume, u8 tx_retry) 3789 { 3790 int ret = 0; 3791 3792 rtwsta->data_tx_cnt_lmt = tx_retry; 3793 3794 if (!resume) { 3795 rtwsta->cctl_tx_retry_limit = true; 3796 ret = rtw89_fw_h2c_txtime_cmac_tbl(rtwdev, rtwsta); 3797 } else { 3798 ret = rtw89_fw_h2c_txtime_cmac_tbl(rtwdev, rtwsta); 3799 rtwsta->cctl_tx_retry_limit = false; 3800 } 3801 3802 return ret; 3803 } 3804 3805 int rtw89_mac_get_tx_retry_limit(struct rtw89_dev *rtwdev, 3806 struct rtw89_sta *rtwsta, u8 *tx_retry) 3807 { 3808 u8 mac_idx = rtwsta->rtwvif->mac_idx; 3809 u32 reg; 3810 int ret = 0; 3811 3812 if (rtwsta->cctl_tx_retry_limit) { 3813 *tx_retry = rtwsta->data_tx_cnt_lmt; 3814 } else { 3815 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 3816 if (ret) { 3817 rtw89_warn(rtwdev, "failed to check cmac in rty_lmt\n"); 3818 return ret; 3819 } 3820 3821 reg = rtw89_mac_reg_by_idx(R_AX_TXCNT, mac_idx); 3822 *tx_retry = rtw89_read32_mask(rtwdev, reg, B_AX_L_TXCNT_LMT_MASK); 3823 } 3824 3825 return ret; 3826 } 3827 3828 int rtw89_mac_set_hw_muedca_ctrl(struct rtw89_dev *rtwdev, 3829 struct rtw89_vif *rtwvif, bool en) 3830 { 3831 u8 mac_idx = rtwvif->mac_idx; 3832 u16 set = B_AX_MUEDCA_EN_0 | B_AX_SET_MUEDCATIMER_TF_0; 3833 u32 reg; 3834 u32 ret; 3835 3836 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 3837 if (ret) 3838 return ret; 3839 3840 reg = rtw89_mac_reg_by_idx(R_AX_MUEDCA_EN, mac_idx); 3841 if (en) 3842 rtw89_write16_set(rtwdev, reg, set); 3843 else 3844 rtw89_write16_clr(rtwdev, reg, set); 3845 3846 return 0; 3847 } 3848