1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2019-2020 Realtek Corporation 3 */ 4 5 #include "cam.h" 6 #include "debug.h" 7 #include "fw.h" 8 #include "mac.h" 9 #include "ps.h" 10 #include "reg.h" 11 #include "util.h" 12 13 int rtw89_mac_check_mac_en(struct rtw89_dev *rtwdev, u8 mac_idx, 14 enum rtw89_mac_hwmod_sel sel) 15 { 16 u32 val, r_val; 17 18 if (sel == RTW89_DMAC_SEL) { 19 r_val = rtw89_read32(rtwdev, R_AX_DMAC_FUNC_EN); 20 val = (B_AX_MAC_FUNC_EN | B_AX_DMAC_FUNC_EN); 21 } else if (sel == RTW89_CMAC_SEL && mac_idx == 0) { 22 r_val = rtw89_read32(rtwdev, R_AX_CMAC_FUNC_EN); 23 val = B_AX_CMAC_EN; 24 } else if (sel == RTW89_CMAC_SEL && mac_idx == 1) { 25 r_val = rtw89_read32(rtwdev, R_AX_SYS_ISO_CTRL_EXTEND); 26 val = B_AX_CMAC1_FEN; 27 } else { 28 return -EINVAL; 29 } 30 if (r_val == RTW89_R32_EA || r_val == RTW89_R32_DEAD || 31 (val & r_val) != val) 32 return -EFAULT; 33 34 return 0; 35 } 36 37 int rtw89_mac_write_lte(struct rtw89_dev *rtwdev, const u32 offset, u32 val) 38 { 39 u8 lte_ctrl; 40 int ret; 41 42 ret = read_poll_timeout(rtw89_read8, lte_ctrl, (lte_ctrl & BIT(5)) != 0, 43 50, 50000, false, rtwdev, R_AX_LTE_CTRL + 3); 44 if (ret) 45 rtw89_err(rtwdev, "[ERR]lte not ready(W)\n"); 46 47 rtw89_write32(rtwdev, R_AX_LTE_WDATA, val); 48 rtw89_write32(rtwdev, R_AX_LTE_CTRL, 0xC00F0000 | offset); 49 50 return ret; 51 } 52 53 int rtw89_mac_read_lte(struct rtw89_dev *rtwdev, const u32 offset, u32 *val) 54 { 55 u8 lte_ctrl; 56 int ret; 57 58 ret = read_poll_timeout(rtw89_read8, lte_ctrl, (lte_ctrl & BIT(5)) != 0, 59 50, 50000, false, rtwdev, R_AX_LTE_CTRL + 3); 60 if (ret) 61 rtw89_err(rtwdev, "[ERR]lte not ready(W)\n"); 62 63 rtw89_write32(rtwdev, R_AX_LTE_CTRL, 0x800F0000 | offset); 64 *val = rtw89_read32(rtwdev, R_AX_LTE_RDATA); 65 66 return ret; 67 } 68 69 static 70 int dle_dfi_ctrl(struct rtw89_dev *rtwdev, struct rtw89_mac_dle_dfi_ctrl *ctrl) 71 { 72 u32 ctrl_reg, data_reg, ctrl_data; 73 u32 val; 74 int ret; 75 76 switch (ctrl->type) { 77 case DLE_CTRL_TYPE_WDE: 78 ctrl_reg = R_AX_WDE_DBG_FUN_INTF_CTL; 79 data_reg = R_AX_WDE_DBG_FUN_INTF_DATA; 80 ctrl_data = FIELD_PREP(B_AX_WDE_DFI_TRGSEL_MASK, ctrl->target) | 81 FIELD_PREP(B_AX_WDE_DFI_ADDR_MASK, ctrl->addr) | 82 B_AX_WDE_DFI_ACTIVE; 83 break; 84 case DLE_CTRL_TYPE_PLE: 85 ctrl_reg = R_AX_PLE_DBG_FUN_INTF_CTL; 86 data_reg = R_AX_PLE_DBG_FUN_INTF_DATA; 87 ctrl_data = FIELD_PREP(B_AX_PLE_DFI_TRGSEL_MASK, ctrl->target) | 88 FIELD_PREP(B_AX_PLE_DFI_ADDR_MASK, ctrl->addr) | 89 B_AX_PLE_DFI_ACTIVE; 90 break; 91 default: 92 rtw89_warn(rtwdev, "[ERR] dfi ctrl type %d\n", ctrl->type); 93 return -EINVAL; 94 } 95 96 rtw89_write32(rtwdev, ctrl_reg, ctrl_data); 97 98 ret = read_poll_timeout_atomic(rtw89_read32, val, !(val & B_AX_WDE_DFI_ACTIVE), 99 1, 1000, false, rtwdev, ctrl_reg); 100 if (ret) { 101 rtw89_warn(rtwdev, "[ERR] dle dfi ctrl 0x%X set 0x%X timeout\n", 102 ctrl_reg, ctrl_data); 103 return ret; 104 } 105 106 ctrl->out_data = rtw89_read32(rtwdev, data_reg); 107 return 0; 108 } 109 110 static int dle_dfi_quota(struct rtw89_dev *rtwdev, 111 struct rtw89_mac_dle_dfi_quota *quota) 112 { 113 struct rtw89_mac_dle_dfi_ctrl ctrl; 114 int ret; 115 116 ctrl.type = quota->dle_type; 117 ctrl.target = DLE_DFI_TYPE_QUOTA; 118 ctrl.addr = quota->qtaid; 119 ret = dle_dfi_ctrl(rtwdev, &ctrl); 120 if (ret) { 121 rtw89_warn(rtwdev, "[ERR]dle_dfi_ctrl %d\n", ret); 122 return ret; 123 } 124 125 quota->rsv_pgnum = FIELD_GET(B_AX_DLE_RSV_PGNUM, ctrl.out_data); 126 quota->use_pgnum = FIELD_GET(B_AX_DLE_USE_PGNUM, ctrl.out_data); 127 return 0; 128 } 129 130 static int dle_dfi_qempty(struct rtw89_dev *rtwdev, 131 struct rtw89_mac_dle_dfi_qempty *qempty) 132 { 133 struct rtw89_mac_dle_dfi_ctrl ctrl; 134 u32 ret; 135 136 ctrl.type = qempty->dle_type; 137 ctrl.target = DLE_DFI_TYPE_QEMPTY; 138 ctrl.addr = qempty->grpsel; 139 ret = dle_dfi_ctrl(rtwdev, &ctrl); 140 if (ret) { 141 rtw89_warn(rtwdev, "[ERR]dle_dfi_ctrl %d\n", ret); 142 return ret; 143 } 144 145 qempty->qempty = FIELD_GET(B_AX_DLE_QEMPTY_GRP, ctrl.out_data); 146 return 0; 147 } 148 149 static void dump_err_status_dispatcher(struct rtw89_dev *rtwdev) 150 { 151 rtw89_info(rtwdev, "R_AX_HOST_DISPATCHER_ALWAYS_IMR=0x%08x ", 152 rtw89_read32(rtwdev, R_AX_HOST_DISPATCHER_ERR_IMR)); 153 rtw89_info(rtwdev, "R_AX_HOST_DISPATCHER_ALWAYS_ISR=0x%08x\n", 154 rtw89_read32(rtwdev, R_AX_HOST_DISPATCHER_ERR_ISR)); 155 rtw89_info(rtwdev, "R_AX_CPU_DISPATCHER_ALWAYS_IMR=0x%08x ", 156 rtw89_read32(rtwdev, R_AX_CPU_DISPATCHER_ERR_IMR)); 157 rtw89_info(rtwdev, "R_AX_CPU_DISPATCHER_ALWAYS_ISR=0x%08x\n", 158 rtw89_read32(rtwdev, R_AX_CPU_DISPATCHER_ERR_ISR)); 159 rtw89_info(rtwdev, "R_AX_OTHER_DISPATCHER_ALWAYS_IMR=0x%08x ", 160 rtw89_read32(rtwdev, R_AX_OTHER_DISPATCHER_ERR_IMR)); 161 rtw89_info(rtwdev, "R_AX_OTHER_DISPATCHER_ALWAYS_ISR=0x%08x\n", 162 rtw89_read32(rtwdev, R_AX_OTHER_DISPATCHER_ERR_ISR)); 163 } 164 165 static void rtw89_mac_dump_qta_lost(struct rtw89_dev *rtwdev) 166 { 167 struct rtw89_mac_dle_dfi_qempty qempty; 168 struct rtw89_mac_dle_dfi_quota quota; 169 struct rtw89_mac_dle_dfi_ctrl ctrl; 170 u32 val, not_empty, i; 171 int ret; 172 173 qempty.dle_type = DLE_CTRL_TYPE_PLE; 174 qempty.grpsel = 0; 175 ret = dle_dfi_qempty(rtwdev, &qempty); 176 if (ret) 177 rtw89_warn(rtwdev, "%s: query DLE fail\n", __func__); 178 else 179 rtw89_info(rtwdev, "DLE group0 empty: 0x%x\n", qempty.qempty); 180 181 for (not_empty = ~qempty.qempty, i = 0; not_empty != 0; not_empty >>= 1, i++) { 182 if (!(not_empty & BIT(0))) 183 continue; 184 ctrl.type = DLE_CTRL_TYPE_PLE; 185 ctrl.target = DLE_DFI_TYPE_QLNKTBL; 186 ctrl.addr = (QLNKTBL_ADDR_INFO_SEL_0 ? QLNKTBL_ADDR_INFO_SEL : 0) | 187 FIELD_PREP(QLNKTBL_ADDR_TBL_IDX_MASK, i); 188 ret = dle_dfi_ctrl(rtwdev, &ctrl); 189 if (ret) 190 rtw89_warn(rtwdev, "%s: query DLE fail\n", __func__); 191 else 192 rtw89_info(rtwdev, "qidx%d pktcnt = %ld\n", i, 193 FIELD_GET(QLNKTBL_DATA_SEL1_PKT_CNT_MASK, 194 ctrl.out_data)); 195 } 196 197 quota.dle_type = DLE_CTRL_TYPE_PLE; 198 quota.qtaid = 6; 199 ret = dle_dfi_quota(rtwdev, "a); 200 if (ret) 201 rtw89_warn(rtwdev, "%s: query DLE fail\n", __func__); 202 else 203 rtw89_info(rtwdev, "quota6 rsv/use: 0x%x/0x%x\n", 204 quota.rsv_pgnum, quota.use_pgnum); 205 206 val = rtw89_read32(rtwdev, R_AX_PLE_QTA6_CFG); 207 rtw89_info(rtwdev, "[PLE][CMAC0_RX]min_pgnum=0x%lx\n", 208 FIELD_GET(B_AX_PLE_Q6_MIN_SIZE_MASK, val)); 209 rtw89_info(rtwdev, "[PLE][CMAC0_RX]max_pgnum=0x%lx\n", 210 FIELD_GET(B_AX_PLE_Q6_MAX_SIZE_MASK, val)); 211 212 dump_err_status_dispatcher(rtwdev); 213 } 214 215 static void rtw89_mac_dump_l0_to_l1(struct rtw89_dev *rtwdev, 216 enum mac_ax_err_info err) 217 { 218 u32 dbg, event; 219 220 dbg = rtw89_read32(rtwdev, R_AX_SER_DBG_INFO); 221 event = FIELD_GET(B_AX_L0_TO_L1_EVENT_MASK, dbg); 222 223 switch (event) { 224 case MAC_AX_L0_TO_L1_RX_QTA_LOST: 225 rtw89_info(rtwdev, "quota lost!\n"); 226 rtw89_mac_dump_qta_lost(rtwdev); 227 break; 228 default: 229 break; 230 } 231 } 232 233 static void rtw89_mac_dump_err_status(struct rtw89_dev *rtwdev, 234 enum mac_ax_err_info err) 235 { 236 u32 dmac_err, cmac_err; 237 238 if (err != MAC_AX_ERR_L1_ERR_DMAC && 239 err != MAC_AX_ERR_L0_PROMOTE_TO_L1) 240 return; 241 242 rtw89_info(rtwdev, "--->\nerr=0x%x\n", err); 243 rtw89_info(rtwdev, "R_AX_SER_DBG_INFO =0x%08x\n", 244 rtw89_read32(rtwdev, R_AX_SER_DBG_INFO)); 245 246 cmac_err = rtw89_read32(rtwdev, R_AX_CMAC_ERR_ISR); 247 rtw89_info(rtwdev, "R_AX_CMAC_ERR_ISR =0x%08x\n", cmac_err); 248 dmac_err = rtw89_read32(rtwdev, R_AX_DMAC_ERR_ISR); 249 rtw89_info(rtwdev, "R_AX_DMAC_ERR_ISR =0x%08x\n", dmac_err); 250 251 if (dmac_err) { 252 rtw89_info(rtwdev, "R_AX_WDE_ERR_FLAG_CFG =0x%08x ", 253 rtw89_read32(rtwdev, R_AX_WDE_ERR_FLAG_CFG)); 254 rtw89_info(rtwdev, "R_AX_PLE_ERR_FLAG_CFG =0x%08x\n", 255 rtw89_read32(rtwdev, R_AX_PLE_ERR_FLAG_CFG)); 256 } 257 258 if (dmac_err & B_AX_WDRLS_ERR_FLAG) { 259 rtw89_info(rtwdev, "R_AX_WDRLS_ERR_IMR =0x%08x ", 260 rtw89_read32(rtwdev, R_AX_WDRLS_ERR_IMR)); 261 rtw89_info(rtwdev, "R_AX_WDRLS_ERR_ISR =0x%08x\n", 262 rtw89_read32(rtwdev, R_AX_WDRLS_ERR_ISR)); 263 } 264 265 if (dmac_err & B_AX_WSEC_ERR_FLAG) { 266 rtw89_info(rtwdev, "R_AX_SEC_ERR_IMR_ISR =0x%08x\n", 267 rtw89_read32(rtwdev, R_AX_SEC_DEBUG)); 268 rtw89_info(rtwdev, "SEC_local_Register 0x9D00 =0x%08x\n", 269 rtw89_read32(rtwdev, R_AX_SEC_ENG_CTRL)); 270 rtw89_info(rtwdev, "SEC_local_Register 0x9D04 =0x%08x\n", 271 rtw89_read32(rtwdev, R_AX_SEC_MPDU_PROC)); 272 rtw89_info(rtwdev, "SEC_local_Register 0x9D10 =0x%08x\n", 273 rtw89_read32(rtwdev, R_AX_SEC_CAM_ACCESS)); 274 rtw89_info(rtwdev, "SEC_local_Register 0x9D14 =0x%08x\n", 275 rtw89_read32(rtwdev, R_AX_SEC_CAM_RDATA)); 276 rtw89_info(rtwdev, "SEC_local_Register 0x9D18 =0x%08x\n", 277 rtw89_read32(rtwdev, R_AX_SEC_CAM_WDATA)); 278 rtw89_info(rtwdev, "SEC_local_Register 0x9D20 =0x%08x\n", 279 rtw89_read32(rtwdev, R_AX_SEC_TX_DEBUG)); 280 rtw89_info(rtwdev, "SEC_local_Register 0x9D24 =0x%08x\n", 281 rtw89_read32(rtwdev, R_AX_SEC_RX_DEBUG)); 282 rtw89_info(rtwdev, "SEC_local_Register 0x9D28 =0x%08x\n", 283 rtw89_read32(rtwdev, R_AX_SEC_TRX_PKT_CNT)); 284 rtw89_info(rtwdev, "SEC_local_Register 0x9D2C =0x%08x\n", 285 rtw89_read32(rtwdev, R_AX_SEC_TRX_BLK_CNT)); 286 } 287 288 if (dmac_err & B_AX_MPDU_ERR_FLAG) { 289 rtw89_info(rtwdev, "R_AX_MPDU_TX_ERR_IMR =0x%08x ", 290 rtw89_read32(rtwdev, R_AX_MPDU_TX_ERR_IMR)); 291 rtw89_info(rtwdev, "R_AX_MPDU_TX_ERR_ISR =0x%08x\n", 292 rtw89_read32(rtwdev, R_AX_MPDU_TX_ERR_ISR)); 293 rtw89_info(rtwdev, "R_AX_MPDU_RX_ERR_IMR =0x%08x ", 294 rtw89_read32(rtwdev, R_AX_MPDU_RX_ERR_IMR)); 295 rtw89_info(rtwdev, "R_AX_MPDU_RX_ERR_ISR =0x%08x\n", 296 rtw89_read32(rtwdev, R_AX_MPDU_RX_ERR_ISR)); 297 } 298 299 if (dmac_err & B_AX_STA_SCHEDULER_ERR_FLAG) { 300 rtw89_info(rtwdev, "R_AX_STA_SCHEDULER_ERR_IMR =0x%08x ", 301 rtw89_read32(rtwdev, R_AX_STA_SCHEDULER_ERR_IMR)); 302 rtw89_info(rtwdev, "R_AX_STA_SCHEDULER_ERR_ISR= 0x%08x\n", 303 rtw89_read32(rtwdev, R_AX_STA_SCHEDULER_ERR_ISR)); 304 } 305 306 if (dmac_err & B_AX_WDE_DLE_ERR_FLAG) { 307 rtw89_info(rtwdev, "R_AX_WDE_ERR_IMR=0x%08x ", 308 rtw89_read32(rtwdev, R_AX_WDE_ERR_IMR)); 309 rtw89_info(rtwdev, "R_AX_WDE_ERR_ISR=0x%08x\n", 310 rtw89_read32(rtwdev, R_AX_WDE_ERR_ISR)); 311 rtw89_info(rtwdev, "R_AX_PLE_ERR_IMR=0x%08x ", 312 rtw89_read32(rtwdev, R_AX_PLE_ERR_IMR)); 313 rtw89_info(rtwdev, "R_AX_PLE_ERR_FLAG_ISR=0x%08x\n", 314 rtw89_read32(rtwdev, R_AX_PLE_ERR_FLAG_ISR)); 315 dump_err_status_dispatcher(rtwdev); 316 } 317 318 if (dmac_err & B_AX_TXPKTCTRL_ERR_FLAG) { 319 rtw89_info(rtwdev, "R_AX_TXPKTCTL_ERR_IMR_ISR=0x%08x\n", 320 rtw89_read32(rtwdev, R_AX_TXPKTCTL_ERR_IMR_ISR)); 321 rtw89_info(rtwdev, "R_AX_TXPKTCTL_ERR_IMR_ISR_B1=0x%08x\n", 322 rtw89_read32(rtwdev, R_AX_TXPKTCTL_ERR_IMR_ISR_B1)); 323 } 324 325 if (dmac_err & B_AX_PLE_DLE_ERR_FLAG) { 326 rtw89_info(rtwdev, "R_AX_WDE_ERR_IMR=0x%08x ", 327 rtw89_read32(rtwdev, R_AX_WDE_ERR_IMR)); 328 rtw89_info(rtwdev, "R_AX_WDE_ERR_ISR=0x%08x\n", 329 rtw89_read32(rtwdev, R_AX_WDE_ERR_ISR)); 330 rtw89_info(rtwdev, "R_AX_PLE_ERR_IMR=0x%08x ", 331 rtw89_read32(rtwdev, R_AX_PLE_ERR_IMR)); 332 rtw89_info(rtwdev, "R_AX_PLE_ERR_FLAG_ISR=0x%08x\n", 333 rtw89_read32(rtwdev, R_AX_PLE_ERR_FLAG_ISR)); 334 rtw89_info(rtwdev, "R_AX_WD_CPUQ_OP_0=0x%08x\n", 335 rtw89_read32(rtwdev, R_AX_WD_CPUQ_OP_0)); 336 rtw89_info(rtwdev, "R_AX_WD_CPUQ_OP_1=0x%08x\n", 337 rtw89_read32(rtwdev, R_AX_WD_CPUQ_OP_1)); 338 rtw89_info(rtwdev, "R_AX_WD_CPUQ_OP_2=0x%08x\n", 339 rtw89_read32(rtwdev, R_AX_WD_CPUQ_OP_2)); 340 rtw89_info(rtwdev, "R_AX_WD_CPUQ_OP_STATUS=0x%08x\n", 341 rtw89_read32(rtwdev, R_AX_WD_CPUQ_OP_STATUS)); 342 rtw89_info(rtwdev, "R_AX_PL_CPUQ_OP_0=0x%08x\n", 343 rtw89_read32(rtwdev, R_AX_PL_CPUQ_OP_0)); 344 rtw89_info(rtwdev, "R_AX_PL_CPUQ_OP_1=0x%08x\n", 345 rtw89_read32(rtwdev, R_AX_PL_CPUQ_OP_1)); 346 rtw89_info(rtwdev, "R_AX_PL_CPUQ_OP_2=0x%08x\n", 347 rtw89_read32(rtwdev, R_AX_PL_CPUQ_OP_2)); 348 rtw89_info(rtwdev, "R_AX_PL_CPUQ_OP_STATUS=0x%08x\n", 349 rtw89_read32(rtwdev, R_AX_PL_CPUQ_OP_STATUS)); 350 rtw89_info(rtwdev, "R_AX_RXDMA_PKT_INFO_0=0x%08x\n", 351 rtw89_read32(rtwdev, R_AX_RXDMA_PKT_INFO_0)); 352 rtw89_info(rtwdev, "R_AX_RXDMA_PKT_INFO_1=0x%08x\n", 353 rtw89_read32(rtwdev, R_AX_RXDMA_PKT_INFO_1)); 354 rtw89_info(rtwdev, "R_AX_RXDMA_PKT_INFO_2=0x%08x\n", 355 rtw89_read32(rtwdev, R_AX_RXDMA_PKT_INFO_2)); 356 dump_err_status_dispatcher(rtwdev); 357 } 358 359 if (dmac_err & B_AX_PKTIN_ERR_FLAG) { 360 rtw89_info(rtwdev, "R_AX_PKTIN_ERR_IMR =0x%08x ", 361 rtw89_read32(rtwdev, R_AX_PKTIN_ERR_IMR)); 362 rtw89_info(rtwdev, "R_AX_PKTIN_ERR_ISR =0x%08x\n", 363 rtw89_read32(rtwdev, R_AX_PKTIN_ERR_ISR)); 364 rtw89_info(rtwdev, "R_AX_PKTIN_ERR_IMR =0x%08x ", 365 rtw89_read32(rtwdev, R_AX_PKTIN_ERR_IMR)); 366 rtw89_info(rtwdev, "R_AX_PKTIN_ERR_ISR =0x%08x\n", 367 rtw89_read32(rtwdev, R_AX_PKTIN_ERR_ISR)); 368 } 369 370 if (dmac_err & B_AX_DISPATCH_ERR_FLAG) 371 dump_err_status_dispatcher(rtwdev); 372 373 if (dmac_err & B_AX_DLE_CPUIO_ERR_FLAG) { 374 rtw89_info(rtwdev, "R_AX_CPUIO_ERR_IMR=0x%08x ", 375 rtw89_read32(rtwdev, R_AX_CPUIO_ERR_IMR)); 376 rtw89_info(rtwdev, "R_AX_CPUIO_ERR_ISR=0x%08x\n", 377 rtw89_read32(rtwdev, R_AX_CPUIO_ERR_ISR)); 378 } 379 380 if (dmac_err & BIT(11)) { 381 rtw89_info(rtwdev, "R_AX_BBRPT_COM_ERR_IMR_ISR=0x%08x\n", 382 rtw89_read32(rtwdev, R_AX_BBRPT_COM_ERR_IMR_ISR)); 383 } 384 385 if (cmac_err & B_AX_SCHEDULE_TOP_ERR_IND) { 386 rtw89_info(rtwdev, "R_AX_SCHEDULE_ERR_IMR=0x%08x ", 387 rtw89_read32(rtwdev, R_AX_SCHEDULE_ERR_IMR)); 388 rtw89_info(rtwdev, "R_AX_SCHEDULE_ERR_ISR=0x%04x\n", 389 rtw89_read16(rtwdev, R_AX_SCHEDULE_ERR_ISR)); 390 } 391 392 if (cmac_err & B_AX_PTCL_TOP_ERR_IND) { 393 rtw89_info(rtwdev, "R_AX_PTCL_IMR0=0x%08x ", 394 rtw89_read32(rtwdev, R_AX_PTCL_IMR0)); 395 rtw89_info(rtwdev, "R_AX_PTCL_ISR0=0x%08x\n", 396 rtw89_read32(rtwdev, R_AX_PTCL_ISR0)); 397 } 398 399 if (cmac_err & B_AX_DMA_TOP_ERR_IND) { 400 rtw89_info(rtwdev, "R_AX_DLE_CTRL=0x%08x\n", 401 rtw89_read32(rtwdev, R_AX_DLE_CTRL)); 402 } 403 404 if (cmac_err & B_AX_PHYINTF_ERR_IND) { 405 rtw89_info(rtwdev, "R_AX_PHYINFO_ERR_IMR=0x%08x\n", 406 rtw89_read32(rtwdev, R_AX_PHYINFO_ERR_IMR)); 407 } 408 409 if (cmac_err & B_AX_TXPWR_CTRL_ERR_IND) { 410 rtw89_info(rtwdev, "R_AX_TXPWR_IMR=0x%08x ", 411 rtw89_read32(rtwdev, R_AX_TXPWR_IMR)); 412 rtw89_info(rtwdev, "R_AX_TXPWR_ISR=0x%08x\n", 413 rtw89_read32(rtwdev, R_AX_TXPWR_ISR)); 414 } 415 416 if (cmac_err & B_AX_WMAC_RX_ERR_IND) { 417 rtw89_info(rtwdev, "R_AX_DBGSEL_TRXPTCL=0x%08x ", 418 rtw89_read32(rtwdev, R_AX_DBGSEL_TRXPTCL)); 419 rtw89_info(rtwdev, "R_AX_PHYINFO_ERR_ISR=0x%08x\n", 420 rtw89_read32(rtwdev, R_AX_PHYINFO_ERR_ISR)); 421 } 422 423 if (cmac_err & B_AX_WMAC_TX_ERR_IND) { 424 rtw89_info(rtwdev, "R_AX_TMAC_ERR_IMR_ISR=0x%08x ", 425 rtw89_read32(rtwdev, R_AX_TMAC_ERR_IMR_ISR)); 426 rtw89_info(rtwdev, "R_AX_DBGSEL_TRXPTCL=0x%08x\n", 427 rtw89_read32(rtwdev, R_AX_DBGSEL_TRXPTCL)); 428 } 429 430 rtwdev->hci.ops->dump_err_status(rtwdev); 431 432 if (err == MAC_AX_ERR_L0_PROMOTE_TO_L1) 433 rtw89_mac_dump_l0_to_l1(rtwdev, err); 434 435 rtw89_info(rtwdev, "<---\n"); 436 } 437 438 u32 rtw89_mac_get_err_status(struct rtw89_dev *rtwdev) 439 { 440 u32 err; 441 int ret; 442 443 ret = read_poll_timeout(rtw89_read32, err, (err != 0), 1000, 100000, 444 false, rtwdev, R_AX_HALT_C2H_CTRL); 445 if (ret) { 446 rtw89_warn(rtwdev, "Polling FW err status fail\n"); 447 return ret; 448 } 449 450 err = rtw89_read32(rtwdev, R_AX_HALT_C2H); 451 rtw89_write32(rtwdev, R_AX_HALT_C2H_CTRL, 0); 452 453 rtw89_fw_st_dbg_dump(rtwdev); 454 rtw89_mac_dump_err_status(rtwdev, err); 455 456 return err; 457 } 458 EXPORT_SYMBOL(rtw89_mac_get_err_status); 459 460 int rtw89_mac_set_err_status(struct rtw89_dev *rtwdev, u32 err) 461 { 462 u32 halt; 463 int ret = 0; 464 465 if (err > MAC_AX_SET_ERR_MAX) { 466 rtw89_err(rtwdev, "Bad set-err-status value 0x%08x\n", err); 467 return -EINVAL; 468 } 469 470 ret = read_poll_timeout(rtw89_read32, halt, (halt == 0x0), 1000, 471 100000, false, rtwdev, R_AX_HALT_H2C_CTRL); 472 if (ret) { 473 rtw89_err(rtwdev, "FW doesn't receive previous msg\n"); 474 return -EFAULT; 475 } 476 477 rtw89_write32(rtwdev, R_AX_HALT_H2C, err); 478 rtw89_write32(rtwdev, R_AX_HALT_H2C_CTRL, B_AX_HALT_H2C_TRIGGER); 479 480 return 0; 481 } 482 EXPORT_SYMBOL(rtw89_mac_set_err_status); 483 484 const struct rtw89_hfc_prec_cfg rtw_hfc_preccfg_pcie = { 485 2, 40, 0, 0, 1, 0, 0, 0 486 }; 487 488 static int hfc_reset_param(struct rtw89_dev *rtwdev) 489 { 490 struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param; 491 struct rtw89_hfc_param_ini param_ini = {NULL}; 492 u8 qta_mode = rtwdev->mac.dle_info.qta_mode; 493 494 switch (rtwdev->hci.type) { 495 case RTW89_HCI_TYPE_PCIE: 496 param_ini = rtwdev->chip->hfc_param_ini[qta_mode]; 497 param->en = 0; 498 break; 499 default: 500 return -EINVAL; 501 } 502 503 if (param_ini.pub_cfg) 504 param->pub_cfg = *param_ini.pub_cfg; 505 506 if (param_ini.prec_cfg) { 507 param->prec_cfg = *param_ini.prec_cfg; 508 rtwdev->hal.sw_amsdu_max_size = 509 param->prec_cfg.wp_ch07_prec * HFC_PAGE_UNIT; 510 } 511 512 if (param_ini.ch_cfg) 513 param->ch_cfg = param_ini.ch_cfg; 514 515 memset(¶m->ch_info, 0, sizeof(param->ch_info)); 516 memset(¶m->pub_info, 0, sizeof(param->pub_info)); 517 param->mode = param_ini.mode; 518 519 return 0; 520 } 521 522 static int hfc_ch_cfg_chk(struct rtw89_dev *rtwdev, u8 ch) 523 { 524 struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param; 525 const struct rtw89_hfc_ch_cfg *ch_cfg = param->ch_cfg; 526 const struct rtw89_hfc_pub_cfg *pub_cfg = ¶m->pub_cfg; 527 const struct rtw89_hfc_prec_cfg *prec_cfg = ¶m->prec_cfg; 528 529 if (ch >= RTW89_DMA_CH_NUM) 530 return -EINVAL; 531 532 if ((ch_cfg[ch].min && ch_cfg[ch].min < prec_cfg->ch011_prec) || 533 ch_cfg[ch].max > pub_cfg->pub_max) 534 return -EINVAL; 535 if (ch_cfg[ch].grp >= grp_num) 536 return -EINVAL; 537 538 return 0; 539 } 540 541 static int hfc_pub_info_chk(struct rtw89_dev *rtwdev) 542 { 543 struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param; 544 const struct rtw89_hfc_pub_cfg *cfg = ¶m->pub_cfg; 545 struct rtw89_hfc_pub_info *info = ¶m->pub_info; 546 547 if (info->g0_used + info->g1_used + info->pub_aval != cfg->pub_max) { 548 if (rtwdev->chip->chip_id == RTL8852A) 549 return 0; 550 else 551 return -EFAULT; 552 } 553 554 return 0; 555 } 556 557 static int hfc_pub_cfg_chk(struct rtw89_dev *rtwdev) 558 { 559 struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param; 560 const struct rtw89_hfc_pub_cfg *pub_cfg = ¶m->pub_cfg; 561 562 if (pub_cfg->grp0 + pub_cfg->grp1 != pub_cfg->pub_max) 563 return -EFAULT; 564 565 return 0; 566 } 567 568 static int hfc_ch_ctrl(struct rtw89_dev *rtwdev, u8 ch) 569 { 570 struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param; 571 const struct rtw89_hfc_ch_cfg *cfg = param->ch_cfg; 572 int ret = 0; 573 u32 val = 0; 574 575 ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL); 576 if (ret) 577 return ret; 578 579 ret = hfc_ch_cfg_chk(rtwdev, ch); 580 if (ret) 581 return ret; 582 583 if (ch > RTW89_DMA_B1HI) 584 return -EINVAL; 585 586 val = u32_encode_bits(cfg[ch].min, B_AX_MIN_PG_MASK) | 587 u32_encode_bits(cfg[ch].max, B_AX_MAX_PG_MASK) | 588 (cfg[ch].grp ? B_AX_GRP : 0); 589 rtw89_write32(rtwdev, R_AX_ACH0_PAGE_CTRL + ch * 4, val); 590 591 return 0; 592 } 593 594 static int hfc_upd_ch_info(struct rtw89_dev *rtwdev, u8 ch) 595 { 596 struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param; 597 struct rtw89_hfc_ch_info *info = param->ch_info; 598 const struct rtw89_hfc_ch_cfg *cfg = param->ch_cfg; 599 u32 val; 600 u32 ret; 601 602 ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL); 603 if (ret) 604 return ret; 605 606 if (ch > RTW89_DMA_H2C) 607 return -EINVAL; 608 609 val = rtw89_read32(rtwdev, R_AX_ACH0_PAGE_INFO + ch * 4); 610 info[ch].aval = u32_get_bits(val, B_AX_AVAL_PG_MASK); 611 if (ch < RTW89_DMA_H2C) 612 info[ch].used = u32_get_bits(val, B_AX_USE_PG_MASK); 613 else 614 info[ch].used = cfg[ch].min - info[ch].aval; 615 616 return 0; 617 } 618 619 static int hfc_pub_ctrl(struct rtw89_dev *rtwdev) 620 { 621 const struct rtw89_hfc_pub_cfg *cfg = &rtwdev->mac.hfc_param.pub_cfg; 622 u32 val; 623 int ret; 624 625 ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL); 626 if (ret) 627 return ret; 628 629 ret = hfc_pub_cfg_chk(rtwdev); 630 if (ret) 631 return ret; 632 633 val = u32_encode_bits(cfg->grp0, B_AX_PUBPG_G0_MASK) | 634 u32_encode_bits(cfg->grp1, B_AX_PUBPG_G1_MASK); 635 rtw89_write32(rtwdev, R_AX_PUB_PAGE_CTRL1, val); 636 637 val = u32_encode_bits(cfg->wp_thrd, B_AX_WP_THRD_MASK); 638 rtw89_write32(rtwdev, R_AX_WP_PAGE_CTRL2, val); 639 640 return 0; 641 } 642 643 static int hfc_upd_mix_info(struct rtw89_dev *rtwdev) 644 { 645 struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param; 646 struct rtw89_hfc_pub_cfg *pub_cfg = ¶m->pub_cfg; 647 struct rtw89_hfc_prec_cfg *prec_cfg = ¶m->prec_cfg; 648 struct rtw89_hfc_pub_info *info = ¶m->pub_info; 649 u32 val; 650 int ret; 651 652 ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL); 653 if (ret) 654 return ret; 655 656 val = rtw89_read32(rtwdev, R_AX_PUB_PAGE_INFO1); 657 info->g0_used = u32_get_bits(val, B_AX_G0_USE_PG_MASK); 658 info->g1_used = u32_get_bits(val, B_AX_G1_USE_PG_MASK); 659 val = rtw89_read32(rtwdev, R_AX_PUB_PAGE_INFO3); 660 info->g0_aval = u32_get_bits(val, B_AX_G0_AVAL_PG_MASK); 661 info->g1_aval = u32_get_bits(val, B_AX_G1_AVAL_PG_MASK); 662 info->pub_aval = 663 u32_get_bits(rtw89_read32(rtwdev, R_AX_PUB_PAGE_INFO2), 664 B_AX_PUB_AVAL_PG_MASK); 665 info->wp_aval = 666 u32_get_bits(rtw89_read32(rtwdev, R_AX_WP_PAGE_INFO1), 667 B_AX_WP_AVAL_PG_MASK); 668 669 val = rtw89_read32(rtwdev, R_AX_HCI_FC_CTRL); 670 param->en = val & B_AX_HCI_FC_EN ? 1 : 0; 671 param->h2c_en = val & B_AX_HCI_FC_CH12_EN ? 1 : 0; 672 param->mode = u32_get_bits(val, B_AX_HCI_FC_MODE_MASK); 673 prec_cfg->ch011_full_cond = 674 u32_get_bits(val, B_AX_HCI_FC_WD_FULL_COND_MASK); 675 prec_cfg->h2c_full_cond = 676 u32_get_bits(val, B_AX_HCI_FC_CH12_FULL_COND_MASK); 677 prec_cfg->wp_ch07_full_cond = 678 u32_get_bits(val, B_AX_HCI_FC_WP_CH07_FULL_COND_MASK); 679 prec_cfg->wp_ch811_full_cond = 680 u32_get_bits(val, B_AX_HCI_FC_WP_CH811_FULL_COND_MASK); 681 682 val = rtw89_read32(rtwdev, R_AX_CH_PAGE_CTRL); 683 prec_cfg->ch011_prec = u32_get_bits(val, B_AX_PREC_PAGE_CH011_MASK); 684 prec_cfg->h2c_prec = u32_get_bits(val, B_AX_PREC_PAGE_CH12_MASK); 685 686 val = rtw89_read32(rtwdev, R_AX_PUB_PAGE_CTRL2); 687 pub_cfg->pub_max = u32_get_bits(val, B_AX_PUBPG_ALL_MASK); 688 689 val = rtw89_read32(rtwdev, R_AX_WP_PAGE_CTRL1); 690 prec_cfg->wp_ch07_prec = u32_get_bits(val, B_AX_PREC_PAGE_WP_CH07_MASK); 691 prec_cfg->wp_ch811_prec = u32_get_bits(val, B_AX_PREC_PAGE_WP_CH811_MASK); 692 693 val = rtw89_read32(rtwdev, R_AX_WP_PAGE_CTRL2); 694 pub_cfg->wp_thrd = u32_get_bits(val, B_AX_WP_THRD_MASK); 695 696 val = rtw89_read32(rtwdev, R_AX_PUB_PAGE_CTRL1); 697 pub_cfg->grp0 = u32_get_bits(val, B_AX_PUBPG_G0_MASK); 698 pub_cfg->grp1 = u32_get_bits(val, B_AX_PUBPG_G1_MASK); 699 700 ret = hfc_pub_info_chk(rtwdev); 701 if (param->en && ret) 702 return ret; 703 704 return 0; 705 } 706 707 static void hfc_h2c_cfg(struct rtw89_dev *rtwdev) 708 { 709 struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param; 710 const struct rtw89_hfc_prec_cfg *prec_cfg = ¶m->prec_cfg; 711 u32 val; 712 713 val = u32_encode_bits(prec_cfg->h2c_prec, B_AX_PREC_PAGE_CH12_MASK); 714 rtw89_write32(rtwdev, R_AX_CH_PAGE_CTRL, val); 715 716 rtw89_write32_mask(rtwdev, R_AX_HCI_FC_CTRL, 717 B_AX_HCI_FC_CH12_FULL_COND_MASK, 718 prec_cfg->h2c_full_cond); 719 } 720 721 static void hfc_mix_cfg(struct rtw89_dev *rtwdev) 722 { 723 struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param; 724 const struct rtw89_hfc_pub_cfg *pub_cfg = ¶m->pub_cfg; 725 const struct rtw89_hfc_prec_cfg *prec_cfg = ¶m->prec_cfg; 726 u32 val; 727 728 val = u32_encode_bits(prec_cfg->ch011_prec, B_AX_PREC_PAGE_CH011_MASK) | 729 u32_encode_bits(prec_cfg->h2c_prec, B_AX_PREC_PAGE_CH12_MASK); 730 rtw89_write32(rtwdev, R_AX_CH_PAGE_CTRL, val); 731 732 val = u32_encode_bits(pub_cfg->pub_max, B_AX_PUBPG_ALL_MASK); 733 rtw89_write32(rtwdev, R_AX_PUB_PAGE_CTRL2, val); 734 735 val = u32_encode_bits(prec_cfg->wp_ch07_prec, 736 B_AX_PREC_PAGE_WP_CH07_MASK) | 737 u32_encode_bits(prec_cfg->wp_ch811_prec, 738 B_AX_PREC_PAGE_WP_CH811_MASK); 739 rtw89_write32(rtwdev, R_AX_WP_PAGE_CTRL1, val); 740 741 val = u32_replace_bits(rtw89_read32(rtwdev, R_AX_HCI_FC_CTRL), 742 param->mode, B_AX_HCI_FC_MODE_MASK); 743 val = u32_replace_bits(val, prec_cfg->ch011_full_cond, 744 B_AX_HCI_FC_WD_FULL_COND_MASK); 745 val = u32_replace_bits(val, prec_cfg->h2c_full_cond, 746 B_AX_HCI_FC_CH12_FULL_COND_MASK); 747 val = u32_replace_bits(val, prec_cfg->wp_ch07_full_cond, 748 B_AX_HCI_FC_WP_CH07_FULL_COND_MASK); 749 val = u32_replace_bits(val, prec_cfg->wp_ch811_full_cond, 750 B_AX_HCI_FC_WP_CH811_FULL_COND_MASK); 751 rtw89_write32(rtwdev, R_AX_HCI_FC_CTRL, val); 752 } 753 754 static void hfc_func_en(struct rtw89_dev *rtwdev, bool en, bool h2c_en) 755 { 756 struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param; 757 u32 val; 758 759 val = rtw89_read32(rtwdev, R_AX_HCI_FC_CTRL); 760 param->en = en; 761 param->h2c_en = h2c_en; 762 val = en ? (val | B_AX_HCI_FC_EN) : (val & ~B_AX_HCI_FC_EN); 763 val = h2c_en ? (val | B_AX_HCI_FC_CH12_EN) : 764 (val & ~B_AX_HCI_FC_CH12_EN); 765 rtw89_write32(rtwdev, R_AX_HCI_FC_CTRL, val); 766 } 767 768 static int hfc_init(struct rtw89_dev *rtwdev, bool reset, bool en, bool h2c_en) 769 { 770 u8 ch; 771 u32 ret = 0; 772 773 if (reset) 774 ret = hfc_reset_param(rtwdev); 775 if (ret) 776 return ret; 777 778 ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL); 779 if (ret) 780 return ret; 781 782 hfc_func_en(rtwdev, false, false); 783 784 if (!en && h2c_en) { 785 hfc_h2c_cfg(rtwdev); 786 hfc_func_en(rtwdev, en, h2c_en); 787 return ret; 788 } 789 790 for (ch = RTW89_DMA_ACH0; ch < RTW89_DMA_H2C; ch++) { 791 ret = hfc_ch_ctrl(rtwdev, ch); 792 if (ret) 793 return ret; 794 } 795 796 ret = hfc_pub_ctrl(rtwdev); 797 if (ret) 798 return ret; 799 800 hfc_mix_cfg(rtwdev); 801 if (en || h2c_en) { 802 hfc_func_en(rtwdev, en, h2c_en); 803 udelay(10); 804 } 805 for (ch = RTW89_DMA_ACH0; ch < RTW89_DMA_H2C; ch++) { 806 ret = hfc_upd_ch_info(rtwdev, ch); 807 if (ret) 808 return ret; 809 } 810 ret = hfc_upd_mix_info(rtwdev); 811 812 return ret; 813 } 814 815 #define PWR_POLL_CNT 2000 816 static int pwr_cmd_poll(struct rtw89_dev *rtwdev, 817 const struct rtw89_pwr_cfg *cfg) 818 { 819 u8 val = 0; 820 int ret; 821 u32 addr = cfg->base == PWR_INTF_MSK_SDIO ? 822 cfg->addr | SDIO_LOCAL_BASE_ADDR : cfg->addr; 823 824 ret = read_poll_timeout(rtw89_read8, val, !((val ^ cfg->val) & cfg->msk), 825 1000, 1000 * PWR_POLL_CNT, false, rtwdev, addr); 826 827 if (!ret) 828 return 0; 829 830 rtw89_warn(rtwdev, "[ERR] Polling timeout\n"); 831 rtw89_warn(rtwdev, "[ERR] addr: %X, %X\n", addr, cfg->addr); 832 rtw89_warn(rtwdev, "[ERR] val: %X, %X\n", val, cfg->val); 833 834 return -EBUSY; 835 } 836 837 static int rtw89_mac_sub_pwr_seq(struct rtw89_dev *rtwdev, u8 cv_msk, 838 u8 intf_msk, const struct rtw89_pwr_cfg *cfg) 839 { 840 const struct rtw89_pwr_cfg *cur_cfg; 841 u32 addr; 842 u8 val; 843 844 for (cur_cfg = cfg; cur_cfg->cmd != PWR_CMD_END; cur_cfg++) { 845 if (!(cur_cfg->intf_msk & intf_msk) || 846 !(cur_cfg->cv_msk & cv_msk)) 847 continue; 848 849 switch (cur_cfg->cmd) { 850 case PWR_CMD_WRITE: 851 addr = cur_cfg->addr; 852 853 if (cur_cfg->base == PWR_BASE_SDIO) 854 addr |= SDIO_LOCAL_BASE_ADDR; 855 856 val = rtw89_read8(rtwdev, addr); 857 val &= ~(cur_cfg->msk); 858 val |= (cur_cfg->val & cur_cfg->msk); 859 860 rtw89_write8(rtwdev, addr, val); 861 break; 862 case PWR_CMD_POLL: 863 if (pwr_cmd_poll(rtwdev, cur_cfg)) 864 return -EBUSY; 865 break; 866 case PWR_CMD_DELAY: 867 if (cur_cfg->val == PWR_DELAY_US) 868 udelay(cur_cfg->addr); 869 else 870 fsleep(cur_cfg->addr * 1000); 871 break; 872 default: 873 return -EINVAL; 874 } 875 } 876 877 return 0; 878 } 879 880 static int rtw89_mac_pwr_seq(struct rtw89_dev *rtwdev, 881 const struct rtw89_pwr_cfg * const *cfg_seq) 882 { 883 int ret; 884 885 for (; *cfg_seq; cfg_seq++) { 886 ret = rtw89_mac_sub_pwr_seq(rtwdev, BIT(rtwdev->hal.cv), 887 PWR_INTF_MSK_PCIE, *cfg_seq); 888 if (ret) 889 return -EBUSY; 890 } 891 892 return 0; 893 } 894 895 static enum rtw89_rpwm_req_pwr_state 896 rtw89_mac_get_req_pwr_state(struct rtw89_dev *rtwdev) 897 { 898 enum rtw89_rpwm_req_pwr_state state; 899 900 switch (rtwdev->ps_mode) { 901 case RTW89_PS_MODE_RFOFF: 902 state = RTW89_MAC_RPWM_REQ_PWR_STATE_BAND0_RFOFF; 903 break; 904 case RTW89_PS_MODE_CLK_GATED: 905 state = RTW89_MAC_RPWM_REQ_PWR_STATE_CLK_GATED; 906 break; 907 case RTW89_PS_MODE_PWR_GATED: 908 state = RTW89_MAC_RPWM_REQ_PWR_STATE_PWR_GATED; 909 break; 910 default: 911 state = RTW89_MAC_RPWM_REQ_PWR_STATE_ACTIVE; 912 break; 913 } 914 return state; 915 } 916 917 static void rtw89_mac_send_rpwm(struct rtw89_dev *rtwdev, 918 enum rtw89_rpwm_req_pwr_state req_pwr_state) 919 { 920 u16 request; 921 922 request = rtw89_read16(rtwdev, R_AX_RPWM); 923 request ^= request | PS_RPWM_TOGGLE; 924 925 rtwdev->mac.rpwm_seq_num = (rtwdev->mac.rpwm_seq_num + 1) & 926 RPWM_SEQ_NUM_MAX; 927 request |= FIELD_PREP(PS_RPWM_SEQ_NUM, rtwdev->mac.rpwm_seq_num); 928 929 request |= req_pwr_state; 930 931 if (req_pwr_state < RTW89_MAC_RPWM_REQ_PWR_STATE_CLK_GATED) 932 request |= PS_RPWM_ACK; 933 934 rtw89_write16(rtwdev, rtwdev->hci.rpwm_addr, request); 935 } 936 937 static int rtw89_mac_check_cpwm_state(struct rtw89_dev *rtwdev, 938 enum rtw89_rpwm_req_pwr_state req_pwr_state) 939 { 940 bool request_deep_mode; 941 bool in_deep_mode; 942 u8 rpwm_req_num; 943 u8 cpwm_rsp_seq; 944 u8 cpwm_seq; 945 u8 cpwm_status; 946 947 if (req_pwr_state >= RTW89_MAC_RPWM_REQ_PWR_STATE_CLK_GATED) 948 request_deep_mode = true; 949 else 950 request_deep_mode = false; 951 952 if (rtw89_read32_mask(rtwdev, R_AX_LDM, B_AX_EN_32K)) 953 in_deep_mode = true; 954 else 955 in_deep_mode = false; 956 957 if (request_deep_mode != in_deep_mode) 958 return -EPERM; 959 960 if (request_deep_mode) 961 return 0; 962 963 rpwm_req_num = rtwdev->mac.rpwm_seq_num; 964 cpwm_rsp_seq = rtw89_read16_mask(rtwdev, R_AX_CPWM, 965 PS_CPWM_RSP_SEQ_NUM); 966 967 if (rpwm_req_num != cpwm_rsp_seq) 968 return -EPERM; 969 970 rtwdev->mac.cpwm_seq_num = (rtwdev->mac.cpwm_seq_num + 1) & 971 CPWM_SEQ_NUM_MAX; 972 973 cpwm_seq = rtw89_read16_mask(rtwdev, R_AX_CPWM, PS_CPWM_SEQ_NUM); 974 if (cpwm_seq != rtwdev->mac.cpwm_seq_num) 975 return -EPERM; 976 977 cpwm_status = rtw89_read16_mask(rtwdev, R_AX_CPWM, PS_CPWM_STATE); 978 if (cpwm_status != req_pwr_state) 979 return -EPERM; 980 981 return 0; 982 } 983 984 void rtw89_mac_power_mode_change(struct rtw89_dev *rtwdev, bool enter) 985 { 986 enum rtw89_rpwm_req_pwr_state state; 987 int ret; 988 989 if (enter) 990 state = rtw89_mac_get_req_pwr_state(rtwdev); 991 else 992 state = RTW89_MAC_RPWM_REQ_PWR_STATE_ACTIVE; 993 994 rtw89_mac_send_rpwm(rtwdev, state); 995 ret = read_poll_timeout_atomic(rtw89_mac_check_cpwm_state, ret, !ret, 996 1000, 15000, false, rtwdev, state); 997 if (ret) 998 rtw89_err(rtwdev, "firmware failed to ack for %s ps mode\n", 999 enter ? "entering" : "leaving"); 1000 } 1001 1002 static int rtw89_mac_power_switch(struct rtw89_dev *rtwdev, bool on) 1003 { 1004 #define PWR_ACT 1 1005 const struct rtw89_chip_info *chip = rtwdev->chip; 1006 const struct rtw89_pwr_cfg * const *cfg_seq; 1007 struct rtw89_hal *hal = &rtwdev->hal; 1008 int ret; 1009 u8 val; 1010 1011 if (on) 1012 cfg_seq = chip->pwr_on_seq; 1013 else 1014 cfg_seq = chip->pwr_off_seq; 1015 1016 if (test_bit(RTW89_FLAG_FW_RDY, rtwdev->flags)) 1017 __rtw89_leave_ps_mode(rtwdev); 1018 1019 val = rtw89_read32_mask(rtwdev, R_AX_IC_PWR_STATE, B_AX_WLMAC_PWR_STE_MASK); 1020 if (on && val == PWR_ACT) { 1021 rtw89_err(rtwdev, "MAC has already powered on\n"); 1022 return -EBUSY; 1023 } 1024 1025 ret = rtw89_mac_pwr_seq(rtwdev, cfg_seq); 1026 if (ret) 1027 return ret; 1028 1029 if (on) { 1030 set_bit(RTW89_FLAG_POWERON, rtwdev->flags); 1031 rtw89_write8(rtwdev, R_AX_SCOREBOARD + 3, MAC_AX_NOTIFY_TP_MAJOR); 1032 } else { 1033 clear_bit(RTW89_FLAG_POWERON, rtwdev->flags); 1034 clear_bit(RTW89_FLAG_FW_RDY, rtwdev->flags); 1035 rtw89_write8(rtwdev, R_AX_SCOREBOARD + 3, MAC_AX_NOTIFY_PWR_MAJOR); 1036 hal->current_channel = 0; 1037 } 1038 1039 return 0; 1040 #undef PWR_ACT 1041 } 1042 1043 void rtw89_mac_pwr_off(struct rtw89_dev *rtwdev) 1044 { 1045 rtw89_mac_power_switch(rtwdev, false); 1046 } 1047 1048 static int cmac_func_en(struct rtw89_dev *rtwdev, u8 mac_idx, bool en) 1049 { 1050 u32 func_en = 0; 1051 u32 ck_en = 0; 1052 u32 c1pc_en = 0; 1053 u32 addrl_func_en[] = {R_AX_CMAC_FUNC_EN, R_AX_CMAC_FUNC_EN_C1}; 1054 u32 addrl_ck_en[] = {R_AX_CK_EN, R_AX_CK_EN_C1}; 1055 1056 func_en = B_AX_CMAC_EN | B_AX_CMAC_TXEN | B_AX_CMAC_RXEN | 1057 B_AX_PHYINTF_EN | B_AX_CMAC_DMA_EN | B_AX_PTCLTOP_EN | 1058 B_AX_SCHEDULER_EN | B_AX_TMAC_EN | B_AX_RMAC_EN; 1059 ck_en = B_AX_CMAC_CKEN | B_AX_PHYINTF_CKEN | B_AX_CMAC_DMA_CKEN | 1060 B_AX_PTCLTOP_CKEN | B_AX_SCHEDULER_CKEN | B_AX_TMAC_CKEN | 1061 B_AX_RMAC_CKEN; 1062 c1pc_en = B_AX_R_SYM_WLCMAC1_PC_EN | 1063 B_AX_R_SYM_WLCMAC1_P1_PC_EN | 1064 B_AX_R_SYM_WLCMAC1_P2_PC_EN | 1065 B_AX_R_SYM_WLCMAC1_P3_PC_EN | 1066 B_AX_R_SYM_WLCMAC1_P4_PC_EN; 1067 1068 if (en) { 1069 if (mac_idx == RTW89_MAC_1) { 1070 rtw89_write32_set(rtwdev, R_AX_AFE_CTRL1, c1pc_en); 1071 rtw89_write32_clr(rtwdev, R_AX_SYS_ISO_CTRL_EXTEND, 1072 B_AX_R_SYM_ISO_CMAC12PP); 1073 rtw89_write32_set(rtwdev, R_AX_SYS_ISO_CTRL_EXTEND, 1074 B_AX_CMAC1_FEN); 1075 } 1076 rtw89_write32_set(rtwdev, addrl_ck_en[mac_idx], ck_en); 1077 rtw89_write32_set(rtwdev, addrl_func_en[mac_idx], func_en); 1078 } else { 1079 rtw89_write32_clr(rtwdev, addrl_func_en[mac_idx], func_en); 1080 rtw89_write32_clr(rtwdev, addrl_ck_en[mac_idx], ck_en); 1081 if (mac_idx == RTW89_MAC_1) { 1082 rtw89_write32_clr(rtwdev, R_AX_SYS_ISO_CTRL_EXTEND, 1083 B_AX_CMAC1_FEN); 1084 rtw89_write32_set(rtwdev, R_AX_SYS_ISO_CTRL_EXTEND, 1085 B_AX_R_SYM_ISO_CMAC12PP); 1086 rtw89_write32_clr(rtwdev, R_AX_AFE_CTRL1, c1pc_en); 1087 } 1088 } 1089 1090 return 0; 1091 } 1092 1093 static int dmac_func_en(struct rtw89_dev *rtwdev) 1094 { 1095 u32 val32; 1096 u32 ret = 0; 1097 1098 val32 = (B_AX_MAC_FUNC_EN | B_AX_DMAC_FUNC_EN | B_AX_MAC_SEC_EN | 1099 B_AX_DISPATCHER_EN | B_AX_DLE_CPUIO_EN | B_AX_PKT_IN_EN | 1100 B_AX_DMAC_TBL_EN | B_AX_PKT_BUF_EN | B_AX_STA_SCH_EN | 1101 B_AX_TXPKT_CTRL_EN | B_AX_WD_RLS_EN | B_AX_MPDU_PROC_EN); 1102 rtw89_write32(rtwdev, R_AX_DMAC_FUNC_EN, val32); 1103 1104 val32 = (B_AX_MAC_SEC_CLK_EN | B_AX_DISPATCHER_CLK_EN | 1105 B_AX_DLE_CPUIO_CLK_EN | B_AX_PKT_IN_CLK_EN | 1106 B_AX_STA_SCH_CLK_EN | B_AX_TXPKT_CTRL_CLK_EN | 1107 B_AX_WD_RLS_CLK_EN); 1108 rtw89_write32(rtwdev, R_AX_DMAC_CLK_EN, val32); 1109 1110 return ret; 1111 } 1112 1113 static int chip_func_en(struct rtw89_dev *rtwdev) 1114 { 1115 rtw89_write32_set(rtwdev, R_AX_SPSLDO_ON_CTRL0, B_AX_OCP_L1_MASK); 1116 1117 return 0; 1118 } 1119 1120 static int rtw89_mac_sys_init(struct rtw89_dev *rtwdev) 1121 { 1122 int ret; 1123 1124 ret = dmac_func_en(rtwdev); 1125 if (ret) 1126 return ret; 1127 1128 ret = cmac_func_en(rtwdev, 0, true); 1129 if (ret) 1130 return ret; 1131 1132 ret = chip_func_en(rtwdev); 1133 if (ret) 1134 return ret; 1135 1136 return ret; 1137 } 1138 1139 /* PCIE 64 */ 1140 const struct rtw89_dle_size wde_size0 = { 1141 RTW89_WDE_PG_64, 4095, 1, 1142 }; 1143 1144 /* DLFW */ 1145 const struct rtw89_dle_size wde_size4 = { 1146 RTW89_WDE_PG_64, 0, 4096, 1147 }; 1148 1149 /* PCIE */ 1150 const struct rtw89_dle_size ple_size0 = { 1151 RTW89_PLE_PG_128, 1520, 16, 1152 }; 1153 1154 /* DLFW */ 1155 const struct rtw89_dle_size ple_size4 = { 1156 RTW89_PLE_PG_128, 64, 1472, 1157 }; 1158 1159 /* PCIE 64 */ 1160 const struct rtw89_wde_quota wde_qt0 = { 1161 3792, 196, 0, 107, 1162 }; 1163 1164 /* DLFW */ 1165 const struct rtw89_wde_quota wde_qt4 = { 1166 0, 0, 0, 0, 1167 }; 1168 1169 /* PCIE SCC */ 1170 const struct rtw89_ple_quota ple_qt4 = { 1171 264, 0, 16, 20, 26, 13, 356, 0, 32, 40, 8, 1172 }; 1173 1174 /* PCIE SCC */ 1175 const struct rtw89_ple_quota ple_qt5 = { 1176 264, 0, 32, 20, 64, 13, 1101, 0, 64, 128, 120, 1177 }; 1178 1179 /* DLFW */ 1180 const struct rtw89_ple_quota ple_qt13 = { 1181 0, 0, 16, 48, 0, 0, 0, 0, 0, 0, 0 1182 }; 1183 1184 static const struct rtw89_dle_mem *get_dle_mem_cfg(struct rtw89_dev *rtwdev, 1185 enum rtw89_qta_mode mode) 1186 { 1187 struct rtw89_mac_info *mac = &rtwdev->mac; 1188 const struct rtw89_dle_mem *cfg; 1189 1190 cfg = &rtwdev->chip->dle_mem[mode]; 1191 if (!cfg) 1192 return NULL; 1193 1194 if (cfg->mode != mode) { 1195 rtw89_warn(rtwdev, "qta mode unmatch!\n"); 1196 return NULL; 1197 } 1198 1199 mac->dle_info.wde_pg_size = cfg->wde_size->pge_size; 1200 mac->dle_info.ple_pg_size = cfg->ple_size->pge_size; 1201 mac->dle_info.qta_mode = mode; 1202 mac->dle_info.c0_rx_qta = cfg->ple_min_qt->cma0_dma; 1203 mac->dle_info.c1_rx_qta = cfg->ple_min_qt->cma1_dma; 1204 1205 return cfg; 1206 } 1207 1208 static inline u32 dle_used_size(const struct rtw89_dle_size *wde, 1209 const struct rtw89_dle_size *ple) 1210 { 1211 return wde->pge_size * (wde->lnk_pge_num + wde->unlnk_pge_num) + 1212 ple->pge_size * (ple->lnk_pge_num + ple->unlnk_pge_num); 1213 } 1214 1215 static void dle_func_en(struct rtw89_dev *rtwdev, bool enable) 1216 { 1217 if (enable) 1218 rtw89_write32_set(rtwdev, R_AX_DMAC_FUNC_EN, 1219 B_AX_DLE_WDE_EN | B_AX_DLE_PLE_EN); 1220 else 1221 rtw89_write32_clr(rtwdev, R_AX_DMAC_FUNC_EN, 1222 B_AX_DLE_WDE_EN | B_AX_DLE_PLE_EN); 1223 } 1224 1225 static void dle_clk_en(struct rtw89_dev *rtwdev, bool enable) 1226 { 1227 if (enable) 1228 rtw89_write32_set(rtwdev, R_AX_DMAC_CLK_EN, 1229 B_AX_DLE_WDE_CLK_EN | B_AX_DLE_PLE_CLK_EN); 1230 else 1231 rtw89_write32_clr(rtwdev, R_AX_DMAC_CLK_EN, 1232 B_AX_DLE_WDE_CLK_EN | B_AX_DLE_PLE_CLK_EN); 1233 } 1234 1235 static int dle_mix_cfg(struct rtw89_dev *rtwdev, const struct rtw89_dle_mem *cfg) 1236 { 1237 const struct rtw89_dle_size *size_cfg; 1238 u32 val; 1239 u8 bound = 0; 1240 1241 val = rtw89_read32(rtwdev, R_AX_WDE_PKTBUF_CFG); 1242 size_cfg = cfg->wde_size; 1243 1244 switch (size_cfg->pge_size) { 1245 default: 1246 case RTW89_WDE_PG_64: 1247 val = u32_replace_bits(val, S_AX_WDE_PAGE_SEL_64, 1248 B_AX_WDE_PAGE_SEL_MASK); 1249 break; 1250 case RTW89_WDE_PG_128: 1251 val = u32_replace_bits(val, S_AX_WDE_PAGE_SEL_128, 1252 B_AX_WDE_PAGE_SEL_MASK); 1253 break; 1254 case RTW89_WDE_PG_256: 1255 rtw89_err(rtwdev, "[ERR]WDE DLE doesn't support 256 byte!\n"); 1256 return -EINVAL; 1257 } 1258 1259 val = u32_replace_bits(val, bound, B_AX_WDE_START_BOUND_MASK); 1260 val = u32_replace_bits(val, size_cfg->lnk_pge_num, 1261 B_AX_WDE_FREE_PAGE_NUM_MASK); 1262 rtw89_write32(rtwdev, R_AX_WDE_PKTBUF_CFG, val); 1263 1264 val = rtw89_read32(rtwdev, R_AX_PLE_PKTBUF_CFG); 1265 bound = (size_cfg->lnk_pge_num + size_cfg->unlnk_pge_num) 1266 * size_cfg->pge_size / DLE_BOUND_UNIT; 1267 size_cfg = cfg->ple_size; 1268 1269 switch (size_cfg->pge_size) { 1270 default: 1271 case RTW89_PLE_PG_64: 1272 rtw89_err(rtwdev, "[ERR]PLE DLE doesn't support 64 byte!\n"); 1273 return -EINVAL; 1274 case RTW89_PLE_PG_128: 1275 val = u32_replace_bits(val, S_AX_PLE_PAGE_SEL_128, 1276 B_AX_PLE_PAGE_SEL_MASK); 1277 break; 1278 case RTW89_PLE_PG_256: 1279 val = u32_replace_bits(val, S_AX_PLE_PAGE_SEL_256, 1280 B_AX_PLE_PAGE_SEL_MASK); 1281 break; 1282 } 1283 1284 val = u32_replace_bits(val, bound, B_AX_PLE_START_BOUND_MASK); 1285 val = u32_replace_bits(val, size_cfg->lnk_pge_num, 1286 B_AX_PLE_FREE_PAGE_NUM_MASK); 1287 rtw89_write32(rtwdev, R_AX_PLE_PKTBUF_CFG, val); 1288 1289 return 0; 1290 } 1291 1292 #define INVALID_QT_WCPU U16_MAX 1293 #define SET_QUOTA_VAL(_min_x, _max_x, _module, _idx) \ 1294 do { \ 1295 val = ((_min_x) & \ 1296 B_AX_ ## _module ## _MIN_SIZE_MASK) | \ 1297 (((_max_x) << 16) & \ 1298 B_AX_ ## _module ## _MAX_SIZE_MASK); \ 1299 rtw89_write32(rtwdev, \ 1300 R_AX_ ## _module ## _QTA ## _idx ## _CFG, \ 1301 val); \ 1302 } while (0) 1303 #define SET_QUOTA(_x, _module, _idx) \ 1304 SET_QUOTA_VAL(min_cfg->_x, max_cfg->_x, _module, _idx) 1305 1306 static void wde_quota_cfg(struct rtw89_dev *rtwdev, 1307 const struct rtw89_wde_quota *min_cfg, 1308 const struct rtw89_wde_quota *max_cfg, 1309 u16 ext_wde_min_qt_wcpu) 1310 { 1311 u16 min_qt_wcpu = ext_wde_min_qt_wcpu != INVALID_QT_WCPU ? 1312 ext_wde_min_qt_wcpu : min_cfg->wcpu; 1313 u32 val; 1314 1315 SET_QUOTA(hif, WDE, 0); 1316 SET_QUOTA_VAL(min_qt_wcpu, max_cfg->wcpu, WDE, 1); 1317 SET_QUOTA(pkt_in, WDE, 3); 1318 SET_QUOTA(cpu_io, WDE, 4); 1319 } 1320 1321 static void ple_quota_cfg(struct rtw89_dev *rtwdev, 1322 const struct rtw89_ple_quota *min_cfg, 1323 const struct rtw89_ple_quota *max_cfg) 1324 { 1325 u32 val; 1326 1327 SET_QUOTA(cma0_tx, PLE, 0); 1328 SET_QUOTA(cma1_tx, PLE, 1); 1329 SET_QUOTA(c2h, PLE, 2); 1330 SET_QUOTA(h2c, PLE, 3); 1331 SET_QUOTA(wcpu, PLE, 4); 1332 SET_QUOTA(mpdu_proc, PLE, 5); 1333 SET_QUOTA(cma0_dma, PLE, 6); 1334 SET_QUOTA(cma1_dma, PLE, 7); 1335 SET_QUOTA(bb_rpt, PLE, 8); 1336 SET_QUOTA(wd_rel, PLE, 9); 1337 SET_QUOTA(cpu_io, PLE, 10); 1338 } 1339 1340 #undef SET_QUOTA 1341 1342 static void dle_quota_cfg(struct rtw89_dev *rtwdev, 1343 const struct rtw89_dle_mem *cfg, 1344 u16 ext_wde_min_qt_wcpu) 1345 { 1346 wde_quota_cfg(rtwdev, cfg->wde_min_qt, cfg->wde_max_qt, ext_wde_min_qt_wcpu); 1347 ple_quota_cfg(rtwdev, cfg->ple_min_qt, cfg->ple_max_qt); 1348 } 1349 1350 static int dle_init(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mode, 1351 enum rtw89_qta_mode ext_mode) 1352 { 1353 const struct rtw89_dle_mem *cfg, *ext_cfg; 1354 u16 ext_wde_min_qt_wcpu = INVALID_QT_WCPU; 1355 int ret = 0; 1356 u32 ini; 1357 1358 ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL); 1359 if (ret) 1360 return ret; 1361 1362 cfg = get_dle_mem_cfg(rtwdev, mode); 1363 if (!cfg) { 1364 rtw89_err(rtwdev, "[ERR]get_dle_mem_cfg\n"); 1365 ret = -EINVAL; 1366 goto error; 1367 } 1368 1369 if (mode == RTW89_QTA_DLFW) { 1370 ext_cfg = get_dle_mem_cfg(rtwdev, ext_mode); 1371 if (!ext_cfg) { 1372 rtw89_err(rtwdev, "[ERR]get_dle_ext_mem_cfg %d\n", 1373 ext_mode); 1374 ret = -EINVAL; 1375 goto error; 1376 } 1377 ext_wde_min_qt_wcpu = ext_cfg->wde_min_qt->wcpu; 1378 } 1379 1380 if (dle_used_size(cfg->wde_size, cfg->ple_size) != rtwdev->chip->fifo_size) { 1381 rtw89_err(rtwdev, "[ERR]wd/dle mem cfg\n"); 1382 ret = -EINVAL; 1383 goto error; 1384 } 1385 1386 dle_func_en(rtwdev, false); 1387 dle_clk_en(rtwdev, true); 1388 1389 ret = dle_mix_cfg(rtwdev, cfg); 1390 if (ret) { 1391 rtw89_err(rtwdev, "[ERR] dle mix cfg\n"); 1392 goto error; 1393 } 1394 dle_quota_cfg(rtwdev, cfg, ext_wde_min_qt_wcpu); 1395 1396 dle_func_en(rtwdev, true); 1397 1398 ret = read_poll_timeout(rtw89_read32, ini, 1399 (ini & WDE_MGN_INI_RDY) == WDE_MGN_INI_RDY, 1, 1400 2000, false, rtwdev, R_AX_WDE_INI_STATUS); 1401 if (ret) { 1402 rtw89_err(rtwdev, "[ERR]WDE cfg ready\n"); 1403 return ret; 1404 } 1405 1406 ret = read_poll_timeout(rtw89_read32, ini, 1407 (ini & WDE_MGN_INI_RDY) == WDE_MGN_INI_RDY, 1, 1408 2000, false, rtwdev, R_AX_PLE_INI_STATUS); 1409 if (ret) { 1410 rtw89_err(rtwdev, "[ERR]PLE cfg ready\n"); 1411 return ret; 1412 } 1413 1414 return 0; 1415 error: 1416 dle_func_en(rtwdev, false); 1417 rtw89_err(rtwdev, "[ERR]trxcfg wde 0x8900 = %x\n", 1418 rtw89_read32(rtwdev, R_AX_WDE_INI_STATUS)); 1419 rtw89_err(rtwdev, "[ERR]trxcfg ple 0x8D00 = %x\n", 1420 rtw89_read32(rtwdev, R_AX_PLE_INI_STATUS)); 1421 1422 return ret; 1423 } 1424 1425 static bool dle_is_txq_empty(struct rtw89_dev *rtwdev) 1426 { 1427 u32 msk32; 1428 u32 val32; 1429 1430 msk32 = B_AX_WDE_EMPTY_QUE_CMAC0_ALL_AC | B_AX_WDE_EMPTY_QUE_CMAC0_MBH | 1431 B_AX_WDE_EMPTY_QUE_CMAC1_MBH | B_AX_WDE_EMPTY_QUE_CMAC0_WMM0 | 1432 B_AX_WDE_EMPTY_QUE_CMAC0_WMM1 | B_AX_WDE_EMPTY_QUE_OTHERS | 1433 B_AX_PLE_EMPTY_QUE_DMAC_MPDU_TX | B_AX_PLE_EMPTY_QTA_DMAC_H2C | 1434 B_AX_PLE_EMPTY_QUE_DMAC_SEC_TX | B_AX_WDE_EMPTY_QUE_DMAC_PKTIN | 1435 B_AX_WDE_EMPTY_QTA_DMAC_HIF | B_AX_WDE_EMPTY_QTA_DMAC_WLAN_CPU | 1436 B_AX_WDE_EMPTY_QTA_DMAC_PKTIN | B_AX_WDE_EMPTY_QTA_DMAC_CPUIO | 1437 B_AX_PLE_EMPTY_QTA_DMAC_B0_TXPL | 1438 B_AX_PLE_EMPTY_QTA_DMAC_B1_TXPL | 1439 B_AX_PLE_EMPTY_QTA_DMAC_MPDU_TX | 1440 B_AX_PLE_EMPTY_QTA_DMAC_CPUIO | 1441 B_AX_WDE_EMPTY_QTA_DMAC_DATA_CPU | 1442 B_AX_PLE_EMPTY_QTA_DMAC_WLAN_CPU; 1443 val32 = rtw89_read32(rtwdev, R_AX_DLE_EMPTY0); 1444 1445 if ((val32 & msk32) == msk32) 1446 return true; 1447 1448 return false; 1449 } 1450 1451 static int sta_sch_init(struct rtw89_dev *rtwdev) 1452 { 1453 u32 p_val; 1454 u8 val; 1455 int ret; 1456 1457 ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL); 1458 if (ret) 1459 return ret; 1460 1461 val = rtw89_read8(rtwdev, R_AX_SS_CTRL); 1462 val |= B_AX_SS_EN; 1463 rtw89_write8(rtwdev, R_AX_SS_CTRL, val); 1464 1465 ret = read_poll_timeout(rtw89_read32, p_val, p_val & B_AX_SS_INIT_DONE_1, 1466 1, TRXCFG_WAIT_CNT, false, rtwdev, R_AX_SS_CTRL); 1467 if (ret) { 1468 rtw89_err(rtwdev, "[ERR]STA scheduler init\n"); 1469 return ret; 1470 } 1471 1472 rtw89_write32_set(rtwdev, R_AX_SS_CTRL, B_AX_SS_WARM_INIT_FLG); 1473 1474 return 0; 1475 } 1476 1477 static int mpdu_proc_init(struct rtw89_dev *rtwdev) 1478 { 1479 int ret; 1480 1481 ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL); 1482 if (ret) 1483 return ret; 1484 1485 rtw89_write32(rtwdev, R_AX_ACTION_FWD0, TRXCFG_MPDU_PROC_ACT_FRWD); 1486 rtw89_write32(rtwdev, R_AX_TF_FWD, TRXCFG_MPDU_PROC_TF_FRWD); 1487 rtw89_write32_set(rtwdev, R_AX_MPDU_PROC, 1488 B_AX_APPEND_FCS | B_AX_A_ICV_ERR); 1489 rtw89_write32(rtwdev, R_AX_CUT_AMSDU_CTRL, TRXCFG_MPDU_PROC_CUT_CTRL); 1490 1491 return 0; 1492 } 1493 1494 static int sec_eng_init(struct rtw89_dev *rtwdev) 1495 { 1496 u32 val = 0; 1497 int ret; 1498 1499 ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL); 1500 if (ret) 1501 return ret; 1502 1503 val = rtw89_read32(rtwdev, R_AX_SEC_ENG_CTRL); 1504 /* init clock */ 1505 val |= (B_AX_CLK_EN_CGCMP | B_AX_CLK_EN_WAPI | B_AX_CLK_EN_WEP_TKIP); 1506 /* init TX encryption */ 1507 val |= (B_AX_SEC_TX_ENC | B_AX_SEC_RX_DEC); 1508 val |= (B_AX_MC_DEC | B_AX_BC_DEC); 1509 val &= ~B_AX_TX_PARTIAL_MODE; 1510 rtw89_write32(rtwdev, R_AX_SEC_ENG_CTRL, val); 1511 1512 /* init MIC ICV append */ 1513 val = rtw89_read32(rtwdev, R_AX_SEC_MPDU_PROC); 1514 val |= (B_AX_APPEND_ICV | B_AX_APPEND_MIC); 1515 1516 /* option init */ 1517 rtw89_write32(rtwdev, R_AX_SEC_MPDU_PROC, val); 1518 1519 return 0; 1520 } 1521 1522 static int dmac_init(struct rtw89_dev *rtwdev, u8 mac_idx) 1523 { 1524 int ret; 1525 1526 ret = dle_init(rtwdev, rtwdev->mac.qta_mode, RTW89_QTA_INVALID); 1527 if (ret) { 1528 rtw89_err(rtwdev, "[ERR]DLE init %d\n", ret); 1529 return ret; 1530 } 1531 1532 ret = hfc_init(rtwdev, true, true, true); 1533 if (ret) { 1534 rtw89_err(rtwdev, "[ERR]HCI FC init %d\n", ret); 1535 return ret; 1536 } 1537 1538 ret = sta_sch_init(rtwdev); 1539 if (ret) { 1540 rtw89_err(rtwdev, "[ERR]STA SCH init %d\n", ret); 1541 return ret; 1542 } 1543 1544 ret = mpdu_proc_init(rtwdev); 1545 if (ret) { 1546 rtw89_err(rtwdev, "[ERR]MPDU Proc init %d\n", ret); 1547 return ret; 1548 } 1549 1550 ret = sec_eng_init(rtwdev); 1551 if (ret) { 1552 rtw89_err(rtwdev, "[ERR]Security Engine init %d\n", ret); 1553 return ret; 1554 } 1555 1556 return ret; 1557 } 1558 1559 static int addr_cam_init(struct rtw89_dev *rtwdev, u8 mac_idx) 1560 { 1561 u32 val, reg; 1562 u16 p_val; 1563 int ret; 1564 1565 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 1566 if (ret) 1567 return ret; 1568 1569 reg = rtw89_mac_reg_by_idx(R_AX_ADDR_CAM_CTRL, mac_idx); 1570 1571 val = rtw89_read32(rtwdev, reg); 1572 val |= u32_encode_bits(0x7f, B_AX_ADDR_CAM_RANGE_MASK) | 1573 B_AX_ADDR_CAM_CLR | B_AX_ADDR_CAM_EN; 1574 rtw89_write32(rtwdev, reg, val); 1575 1576 ret = read_poll_timeout(rtw89_read16, p_val, !(p_val & B_AX_ADDR_CAM_CLR), 1577 1, TRXCFG_WAIT_CNT, false, rtwdev, B_AX_ADDR_CAM_CLR); 1578 if (ret) { 1579 rtw89_err(rtwdev, "[ERR]ADDR_CAM reset\n"); 1580 return ret; 1581 } 1582 1583 return 0; 1584 } 1585 1586 static int scheduler_init(struct rtw89_dev *rtwdev, u8 mac_idx) 1587 { 1588 u32 ret; 1589 u32 reg; 1590 1591 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 1592 if (ret) 1593 return ret; 1594 1595 reg = rtw89_mac_reg_by_idx(R_AX_PREBKF_CFG_0, mac_idx); 1596 rtw89_write32_mask(rtwdev, reg, B_AX_PREBKF_TIME_MASK, SCH_PREBKF_24US); 1597 1598 return 0; 1599 } 1600 1601 static int rtw89_mac_typ_fltr_opt(struct rtw89_dev *rtwdev, 1602 enum rtw89_machdr_frame_type type, 1603 enum rtw89_mac_fwd_target fwd_target, 1604 u8 mac_idx) 1605 { 1606 u32 reg; 1607 u32 val; 1608 1609 switch (fwd_target) { 1610 case RTW89_FWD_DONT_CARE: 1611 val = RX_FLTR_FRAME_DROP; 1612 break; 1613 case RTW89_FWD_TO_HOST: 1614 val = RX_FLTR_FRAME_TO_HOST; 1615 break; 1616 case RTW89_FWD_TO_WLAN_CPU: 1617 val = RX_FLTR_FRAME_TO_WLCPU; 1618 break; 1619 default: 1620 rtw89_err(rtwdev, "[ERR]set rx filter fwd target err\n"); 1621 return -EINVAL; 1622 } 1623 1624 switch (type) { 1625 case RTW89_MGNT: 1626 reg = rtw89_mac_reg_by_idx(R_AX_MGNT_FLTR, mac_idx); 1627 break; 1628 case RTW89_CTRL: 1629 reg = rtw89_mac_reg_by_idx(R_AX_CTRL_FLTR, mac_idx); 1630 break; 1631 case RTW89_DATA: 1632 reg = rtw89_mac_reg_by_idx(R_AX_DATA_FLTR, mac_idx); 1633 break; 1634 default: 1635 rtw89_err(rtwdev, "[ERR]set rx filter type err\n"); 1636 return -EINVAL; 1637 } 1638 rtw89_write32(rtwdev, reg, val); 1639 1640 return 0; 1641 } 1642 1643 static int rx_fltr_init(struct rtw89_dev *rtwdev, u8 mac_idx) 1644 { 1645 int ret, i; 1646 u32 mac_ftlr, plcp_ftlr; 1647 1648 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 1649 if (ret) 1650 return ret; 1651 1652 for (i = RTW89_MGNT; i <= RTW89_DATA; i++) { 1653 ret = rtw89_mac_typ_fltr_opt(rtwdev, i, RTW89_FWD_TO_HOST, 1654 mac_idx); 1655 if (ret) 1656 return ret; 1657 } 1658 mac_ftlr = rtwdev->hal.rx_fltr; 1659 plcp_ftlr = B_AX_CCK_CRC_CHK | B_AX_CCK_SIG_CHK | 1660 B_AX_LSIG_PARITY_CHK_EN | B_AX_SIGA_CRC_CHK | 1661 B_AX_VHT_SU_SIGB_CRC_CHK | B_AX_VHT_MU_SIGB_CRC_CHK | 1662 B_AX_HE_SIGB_CRC_CHK; 1663 rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, mac_idx), 1664 mac_ftlr); 1665 rtw89_write16(rtwdev, rtw89_mac_reg_by_idx(R_AX_PLCP_HDR_FLTR, mac_idx), 1666 plcp_ftlr); 1667 1668 return 0; 1669 } 1670 1671 static void _patch_dis_resp_chk(struct rtw89_dev *rtwdev, u8 mac_idx) 1672 { 1673 u32 reg, val32; 1674 u32 b_rsp_chk_nav, b_rsp_chk_cca; 1675 1676 b_rsp_chk_nav = B_AX_RSP_CHK_TXNAV | B_AX_RSP_CHK_INTRA_NAV | 1677 B_AX_RSP_CHK_BASIC_NAV; 1678 b_rsp_chk_cca = B_AX_RSP_CHK_SEC_CCA_80 | B_AX_RSP_CHK_SEC_CCA_40 | 1679 B_AX_RSP_CHK_SEC_CCA_20 | B_AX_RSP_CHK_BTCCA | 1680 B_AX_RSP_CHK_EDCCA | B_AX_RSP_CHK_CCA; 1681 1682 switch (rtwdev->chip->chip_id) { 1683 case RTL8852A: 1684 case RTL8852B: 1685 reg = rtw89_mac_reg_by_idx(R_AX_RSP_CHK_SIG, mac_idx); 1686 val32 = rtw89_read32(rtwdev, reg) & ~b_rsp_chk_nav; 1687 rtw89_write32(rtwdev, reg, val32); 1688 1689 reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_0, mac_idx); 1690 val32 = rtw89_read32(rtwdev, reg) & ~b_rsp_chk_cca; 1691 rtw89_write32(rtwdev, reg, val32); 1692 break; 1693 default: 1694 reg = rtw89_mac_reg_by_idx(R_AX_RSP_CHK_SIG, mac_idx); 1695 val32 = rtw89_read32(rtwdev, reg) | b_rsp_chk_nav; 1696 rtw89_write32(rtwdev, reg, val32); 1697 1698 reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_0, mac_idx); 1699 val32 = rtw89_read32(rtwdev, reg) | b_rsp_chk_cca; 1700 rtw89_write32(rtwdev, reg, val32); 1701 break; 1702 } 1703 } 1704 1705 static int cca_ctrl_init(struct rtw89_dev *rtwdev, u8 mac_idx) 1706 { 1707 u32 val, reg; 1708 int ret; 1709 1710 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 1711 if (ret) 1712 return ret; 1713 1714 reg = rtw89_mac_reg_by_idx(R_AX_CCA_CONTROL, mac_idx); 1715 val = rtw89_read32(rtwdev, reg); 1716 val |= (B_AX_TB_CHK_BASIC_NAV | B_AX_TB_CHK_BTCCA | 1717 B_AX_TB_CHK_EDCCA | B_AX_TB_CHK_CCA_P20 | 1718 B_AX_SIFS_CHK_BTCCA | B_AX_SIFS_CHK_CCA_P20 | 1719 B_AX_CTN_CHK_INTRA_NAV | 1720 B_AX_CTN_CHK_BASIC_NAV | B_AX_CTN_CHK_BTCCA | 1721 B_AX_CTN_CHK_EDCCA | B_AX_CTN_CHK_CCA_S80 | 1722 B_AX_CTN_CHK_CCA_S40 | B_AX_CTN_CHK_CCA_S20 | 1723 B_AX_CTN_CHK_CCA_P20 | B_AX_SIFS_CHK_EDCCA); 1724 val &= ~(B_AX_TB_CHK_TX_NAV | B_AX_TB_CHK_CCA_S80 | 1725 B_AX_TB_CHK_CCA_S40 | B_AX_TB_CHK_CCA_S20 | 1726 B_AX_SIFS_CHK_CCA_S80 | B_AX_SIFS_CHK_CCA_S40 | 1727 B_AX_SIFS_CHK_CCA_S20 | B_AX_CTN_CHK_TXNAV); 1728 1729 rtw89_write32(rtwdev, reg, val); 1730 1731 _patch_dis_resp_chk(rtwdev, mac_idx); 1732 1733 return 0; 1734 } 1735 1736 static int spatial_reuse_init(struct rtw89_dev *rtwdev, u8 mac_idx) 1737 { 1738 u32 reg; 1739 int ret; 1740 1741 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 1742 if (ret) 1743 return ret; 1744 reg = rtw89_mac_reg_by_idx(R_AX_RX_SR_CTRL, mac_idx); 1745 rtw89_write8_clr(rtwdev, reg, B_AX_SR_EN); 1746 1747 return 0; 1748 } 1749 1750 static int tmac_init(struct rtw89_dev *rtwdev, u8 mac_idx) 1751 { 1752 u32 reg; 1753 int ret; 1754 1755 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 1756 if (ret) 1757 return ret; 1758 1759 reg = rtw89_mac_reg_by_idx(R_AX_MAC_LOOPBACK, mac_idx); 1760 rtw89_write32_clr(rtwdev, reg, B_AX_MACLBK_EN); 1761 1762 return 0; 1763 } 1764 1765 static int trxptcl_init(struct rtw89_dev *rtwdev, u8 mac_idx) 1766 { 1767 u32 reg, val, sifs; 1768 int ret; 1769 1770 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 1771 if (ret) 1772 return ret; 1773 1774 reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_0, mac_idx); 1775 val = rtw89_read32(rtwdev, reg); 1776 val &= ~B_AX_WMAC_SPEC_SIFS_CCK_MASK; 1777 val |= FIELD_PREP(B_AX_WMAC_SPEC_SIFS_CCK_MASK, WMAC_SPEC_SIFS_CCK); 1778 1779 switch (rtwdev->chip->chip_id) { 1780 case RTL8852A: 1781 sifs = WMAC_SPEC_SIFS_OFDM_52A; 1782 break; 1783 case RTL8852B: 1784 sifs = WMAC_SPEC_SIFS_OFDM_52B; 1785 break; 1786 default: 1787 sifs = WMAC_SPEC_SIFS_OFDM_52C; 1788 break; 1789 } 1790 val &= ~B_AX_WMAC_SPEC_SIFS_OFDM_MASK; 1791 val |= FIELD_PREP(B_AX_WMAC_SPEC_SIFS_OFDM_MASK, sifs); 1792 rtw89_write32(rtwdev, reg, val); 1793 1794 reg = rtw89_mac_reg_by_idx(R_AX_RXTRIG_TEST_USER_2, mac_idx); 1795 rtw89_write32_set(rtwdev, reg, B_AX_RXTRIG_FCSCHK_EN); 1796 1797 return 0; 1798 } 1799 1800 static int rmac_init(struct rtw89_dev *rtwdev, u8 mac_idx) 1801 { 1802 #define TRXCFG_RMAC_CCA_TO 32 1803 #define TRXCFG_RMAC_DATA_TO 15 1804 #define RX_MAX_LEN_UNIT 512 1805 #define PLD_RLS_MAX_PG 127 1806 int ret; 1807 u32 reg, rx_max_len, rx_qta; 1808 u16 val; 1809 1810 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 1811 if (ret) 1812 return ret; 1813 1814 reg = rtw89_mac_reg_by_idx(R_AX_RESPBA_CAM_CTRL, mac_idx); 1815 rtw89_write8_set(rtwdev, reg, B_AX_SSN_SEL); 1816 1817 reg = rtw89_mac_reg_by_idx(R_AX_DLK_PROTECT_CTL, mac_idx); 1818 val = rtw89_read16(rtwdev, reg); 1819 val = u16_replace_bits(val, TRXCFG_RMAC_DATA_TO, 1820 B_AX_RX_DLK_DATA_TIME_MASK); 1821 val = u16_replace_bits(val, TRXCFG_RMAC_CCA_TO, 1822 B_AX_RX_DLK_CCA_TIME_MASK); 1823 rtw89_write16(rtwdev, reg, val); 1824 1825 reg = rtw89_mac_reg_by_idx(R_AX_RCR, mac_idx); 1826 rtw89_write8_mask(rtwdev, reg, B_AX_CH_EN_MASK, 0x1); 1827 1828 reg = rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, mac_idx); 1829 if (mac_idx == RTW89_MAC_0) 1830 rx_qta = rtwdev->mac.dle_info.c0_rx_qta; 1831 else 1832 rx_qta = rtwdev->mac.dle_info.c1_rx_qta; 1833 rx_qta = rx_qta > PLD_RLS_MAX_PG ? PLD_RLS_MAX_PG : rx_qta; 1834 rx_max_len = (rx_qta - 1) * rtwdev->mac.dle_info.ple_pg_size / 1835 RX_MAX_LEN_UNIT; 1836 rx_max_len = rx_max_len > B_AX_RX_MPDU_MAX_LEN_SIZE ? 1837 B_AX_RX_MPDU_MAX_LEN_SIZE : rx_max_len; 1838 rtw89_write32_mask(rtwdev, reg, B_AX_RX_MPDU_MAX_LEN_MASK, rx_max_len); 1839 1840 if (rtwdev->chip->chip_id == RTL8852A && 1841 rtwdev->hal.cv == CHIP_CBV) { 1842 rtw89_write16_mask(rtwdev, 1843 rtw89_mac_reg_by_idx(R_AX_DLK_PROTECT_CTL, mac_idx), 1844 B_AX_RX_DLK_CCA_TIME_MASK, 0); 1845 rtw89_write16_set(rtwdev, rtw89_mac_reg_by_idx(R_AX_RCR, mac_idx), 1846 BIT(12)); 1847 } 1848 1849 reg = rtw89_mac_reg_by_idx(R_AX_PLCP_HDR_FLTR, mac_idx); 1850 rtw89_write8_clr(rtwdev, reg, B_AX_VHT_SU_SIGB_CRC_CHK); 1851 1852 return ret; 1853 } 1854 1855 static int cmac_com_init(struct rtw89_dev *rtwdev, u8 mac_idx) 1856 { 1857 u32 val, reg; 1858 int ret; 1859 1860 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 1861 if (ret) 1862 return ret; 1863 1864 reg = rtw89_mac_reg_by_idx(R_AX_TX_SUB_CARRIER_VALUE, mac_idx); 1865 val = rtw89_read32(rtwdev, reg); 1866 val = u32_replace_bits(val, 0, B_AX_TXSC_20M_MASK); 1867 val = u32_replace_bits(val, 0, B_AX_TXSC_40M_MASK); 1868 val = u32_replace_bits(val, 0, B_AX_TXSC_80M_MASK); 1869 rtw89_write32(rtwdev, reg, val); 1870 1871 return 0; 1872 } 1873 1874 static bool is_qta_dbcc(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mode) 1875 { 1876 const struct rtw89_dle_mem *cfg; 1877 1878 cfg = get_dle_mem_cfg(rtwdev, mode); 1879 if (!cfg) { 1880 rtw89_err(rtwdev, "[ERR]get_dle_mem_cfg\n"); 1881 return false; 1882 } 1883 1884 return (cfg->ple_min_qt->cma1_dma && cfg->ple_max_qt->cma1_dma); 1885 } 1886 1887 static int ptcl_init(struct rtw89_dev *rtwdev, u8 mac_idx) 1888 { 1889 u32 val, reg; 1890 int ret; 1891 1892 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 1893 if (ret) 1894 return ret; 1895 1896 if (rtwdev->hci.type == RTW89_HCI_TYPE_PCIE) { 1897 reg = rtw89_mac_reg_by_idx(R_AX_SIFS_SETTING, mac_idx); 1898 val = rtw89_read32(rtwdev, reg); 1899 val = u32_replace_bits(val, S_AX_CTS2S_TH_1K, 1900 B_AX_HW_CTS2SELF_PKT_LEN_TH_MASK); 1901 val |= B_AX_HW_CTS2SELF_EN; 1902 rtw89_write32(rtwdev, reg, val); 1903 1904 reg = rtw89_mac_reg_by_idx(R_AX_PTCL_FSM_MON, mac_idx); 1905 val = rtw89_read32(rtwdev, reg); 1906 val = u32_replace_bits(val, S_AX_PTCL_TO_2MS, B_AX_PTCL_TX_ARB_TO_THR_MASK); 1907 val &= ~B_AX_PTCL_TX_ARB_TO_MODE; 1908 rtw89_write32(rtwdev, reg, val); 1909 } 1910 1911 reg = rtw89_mac_reg_by_idx(R_AX_SIFS_SETTING, mac_idx); 1912 val = rtw89_read32(rtwdev, reg); 1913 val = u32_replace_bits(val, S_AX_CTS2S_TH_SEC_256B, B_AX_HW_CTS2SELF_PKT_LEN_TH_TWW_MASK); 1914 val |= B_AX_HW_CTS2SELF_EN; 1915 rtw89_write32(rtwdev, reg, val); 1916 1917 return 0; 1918 } 1919 1920 static int cmac_init(struct rtw89_dev *rtwdev, u8 mac_idx) 1921 { 1922 int ret; 1923 1924 ret = scheduler_init(rtwdev, mac_idx); 1925 if (ret) { 1926 rtw89_err(rtwdev, "[ERR]CMAC%d SCH init %d\n", mac_idx, ret); 1927 return ret; 1928 } 1929 1930 ret = addr_cam_init(rtwdev, mac_idx); 1931 if (ret) { 1932 rtw89_err(rtwdev, "[ERR]CMAC%d ADDR_CAM reset %d\n", mac_idx, 1933 ret); 1934 return ret; 1935 } 1936 1937 ret = rx_fltr_init(rtwdev, mac_idx); 1938 if (ret) { 1939 rtw89_err(rtwdev, "[ERR]CMAC%d RX filter init %d\n", mac_idx, 1940 ret); 1941 return ret; 1942 } 1943 1944 ret = cca_ctrl_init(rtwdev, mac_idx); 1945 if (ret) { 1946 rtw89_err(rtwdev, "[ERR]CMAC%d CCA CTRL init %d\n", mac_idx, 1947 ret); 1948 return ret; 1949 } 1950 1951 ret = spatial_reuse_init(rtwdev, mac_idx); 1952 if (ret) { 1953 rtw89_err(rtwdev, "[ERR]CMAC%d Spatial Reuse init %d\n", 1954 mac_idx, ret); 1955 return ret; 1956 } 1957 1958 ret = tmac_init(rtwdev, mac_idx); 1959 if (ret) { 1960 rtw89_err(rtwdev, "[ERR]CMAC%d TMAC init %d\n", mac_idx, ret); 1961 return ret; 1962 } 1963 1964 ret = trxptcl_init(rtwdev, mac_idx); 1965 if (ret) { 1966 rtw89_err(rtwdev, "[ERR]CMAC%d TRXPTCL init %d\n", mac_idx, ret); 1967 return ret; 1968 } 1969 1970 ret = rmac_init(rtwdev, mac_idx); 1971 if (ret) { 1972 rtw89_err(rtwdev, "[ERR]CMAC%d RMAC init %d\n", mac_idx, ret); 1973 return ret; 1974 } 1975 1976 ret = cmac_com_init(rtwdev, mac_idx); 1977 if (ret) { 1978 rtw89_err(rtwdev, "[ERR]CMAC%d Com init %d\n", mac_idx, ret); 1979 return ret; 1980 } 1981 1982 ret = ptcl_init(rtwdev, mac_idx); 1983 if (ret) { 1984 rtw89_err(rtwdev, "[ERR]CMAC%d PTCL init %d\n", mac_idx, ret); 1985 return ret; 1986 } 1987 1988 return ret; 1989 } 1990 1991 static int rtw89_mac_read_phycap(struct rtw89_dev *rtwdev, 1992 struct rtw89_mac_c2h_info *c2h_info) 1993 { 1994 struct rtw89_mac_h2c_info h2c_info = {0}; 1995 u32 ret; 1996 1997 h2c_info.id = RTW89_FWCMD_H2CREG_FUNC_GET_FEATURE; 1998 h2c_info.content_len = 0; 1999 2000 ret = rtw89_fw_msg_reg(rtwdev, &h2c_info, c2h_info); 2001 if (ret) 2002 return ret; 2003 2004 if (c2h_info->id != RTW89_FWCMD_C2HREG_FUNC_PHY_CAP) 2005 return -EINVAL; 2006 2007 return 0; 2008 } 2009 2010 int rtw89_mac_setup_phycap(struct rtw89_dev *rtwdev) 2011 { 2012 struct rtw89_hal *hal = &rtwdev->hal; 2013 const struct rtw89_chip_info *chip = rtwdev->chip; 2014 struct rtw89_mac_c2h_info c2h_info = {0}; 2015 struct rtw89_c2h_phy_cap *cap = 2016 (struct rtw89_c2h_phy_cap *)&c2h_info.c2hreg[0]; 2017 u32 ret; 2018 2019 ret = rtw89_mac_read_phycap(rtwdev, &c2h_info); 2020 if (ret) 2021 return ret; 2022 2023 hal->tx_nss = cap->tx_nss ? 2024 min_t(u8, cap->tx_nss, chip->tx_nss) : chip->tx_nss; 2025 hal->rx_nss = cap->rx_nss ? 2026 min_t(u8, cap->rx_nss, chip->rx_nss) : chip->rx_nss; 2027 2028 rtw89_debug(rtwdev, RTW89_DBG_FW, 2029 "phycap hal/phy/chip: tx_nss=0x%x/0x%x/0x%x rx_nss=0x%x/0x%x/0x%x\n", 2030 hal->tx_nss, cap->tx_nss, chip->tx_nss, 2031 hal->rx_nss, cap->rx_nss, chip->rx_nss); 2032 2033 return 0; 2034 } 2035 2036 static int rtw89_hw_sch_tx_en_h2c(struct rtw89_dev *rtwdev, u8 band, 2037 u16 tx_en_u16, u16 mask_u16) 2038 { 2039 u32 ret; 2040 struct rtw89_mac_c2h_info c2h_info = {0}; 2041 struct rtw89_mac_h2c_info h2c_info = {0}; 2042 struct rtw89_h2creg_sch_tx_en *h2creg = 2043 (struct rtw89_h2creg_sch_tx_en *)h2c_info.h2creg; 2044 2045 h2c_info.id = RTW89_FWCMD_H2CREG_FUNC_SCH_TX_EN; 2046 h2c_info.content_len = sizeof(*h2creg) - RTW89_H2CREG_HDR_LEN; 2047 h2creg->tx_en = tx_en_u16; 2048 h2creg->mask = mask_u16; 2049 h2creg->band = band; 2050 2051 ret = rtw89_fw_msg_reg(rtwdev, &h2c_info, &c2h_info); 2052 if (ret) 2053 return ret; 2054 2055 if (c2h_info.id != RTW89_FWCMD_C2HREG_FUNC_TX_PAUSE_RPT) 2056 return -EINVAL; 2057 2058 return 0; 2059 } 2060 2061 static int rtw89_set_hw_sch_tx_en(struct rtw89_dev *rtwdev, u8 mac_idx, 2062 u16 tx_en, u16 tx_en_mask) 2063 { 2064 u32 reg = rtw89_mac_reg_by_idx(R_AX_CTN_TXEN, mac_idx); 2065 u16 val; 2066 int ret; 2067 2068 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 2069 if (ret) 2070 return ret; 2071 2072 if (test_bit(RTW89_FLAG_FW_RDY, rtwdev->flags)) 2073 return rtw89_hw_sch_tx_en_h2c(rtwdev, mac_idx, 2074 tx_en, tx_en_mask); 2075 2076 val = rtw89_read16(rtwdev, reg); 2077 val = (val & ~tx_en_mask) | (tx_en & tx_en_mask); 2078 rtw89_write16(rtwdev, reg, val); 2079 2080 return 0; 2081 } 2082 2083 int rtw89_mac_stop_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx, 2084 u16 *tx_en, enum rtw89_sch_tx_sel sel) 2085 { 2086 int ret; 2087 2088 *tx_en = rtw89_read16(rtwdev, 2089 rtw89_mac_reg_by_idx(R_AX_CTN_TXEN, mac_idx)); 2090 2091 switch (sel) { 2092 case RTW89_SCH_TX_SEL_ALL: 2093 ret = rtw89_set_hw_sch_tx_en(rtwdev, mac_idx, 0, 0xffff); 2094 if (ret) 2095 return ret; 2096 break; 2097 case RTW89_SCH_TX_SEL_HIQ: 2098 ret = rtw89_set_hw_sch_tx_en(rtwdev, mac_idx, 2099 0, B_AX_CTN_TXEN_HGQ); 2100 if (ret) 2101 return ret; 2102 break; 2103 case RTW89_SCH_TX_SEL_MG0: 2104 ret = rtw89_set_hw_sch_tx_en(rtwdev, mac_idx, 2105 0, B_AX_CTN_TXEN_MGQ); 2106 if (ret) 2107 return ret; 2108 break; 2109 case RTW89_SCH_TX_SEL_MACID: 2110 ret = rtw89_set_hw_sch_tx_en(rtwdev, mac_idx, 0, 0xffff); 2111 if (ret) 2112 return ret; 2113 break; 2114 default: 2115 return 0; 2116 } 2117 2118 return 0; 2119 } 2120 2121 int rtw89_mac_resume_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx, u16 tx_en) 2122 { 2123 int ret; 2124 2125 ret = rtw89_set_hw_sch_tx_en(rtwdev, mac_idx, tx_en, 0xffff); 2126 if (ret) 2127 return ret; 2128 2129 return 0; 2130 } 2131 2132 static u16 rtw89_mac_dle_buf_req(struct rtw89_dev *rtwdev, u16 buf_len, 2133 bool wd) 2134 { 2135 u32 val, reg; 2136 int ret; 2137 2138 reg = wd ? R_AX_WD_BUF_REQ : R_AX_PL_BUF_REQ; 2139 val = buf_len; 2140 val |= B_AX_WD_BUF_REQ_EXEC; 2141 rtw89_write32(rtwdev, reg, val); 2142 2143 reg = wd ? R_AX_WD_BUF_STATUS : R_AX_PL_BUF_STATUS; 2144 2145 ret = read_poll_timeout(rtw89_read32, val, val & B_AX_WD_BUF_STAT_DONE, 2146 1, 2000, false, rtwdev, reg); 2147 if (ret) 2148 return 0xffff; 2149 2150 return FIELD_GET(B_AX_WD_BUF_STAT_PKTID_MASK, val); 2151 } 2152 2153 static int rtw89_mac_set_cpuio(struct rtw89_dev *rtwdev, 2154 struct rtw89_cpuio_ctrl *ctrl_para, 2155 bool wd) 2156 { 2157 u32 val, cmd_type, reg; 2158 int ret; 2159 2160 cmd_type = ctrl_para->cmd_type; 2161 2162 reg = wd ? R_AX_WD_CPUQ_OP_2 : R_AX_PL_CPUQ_OP_2; 2163 val = 0; 2164 val = u32_replace_bits(val, ctrl_para->start_pktid, 2165 B_AX_WD_CPUQ_OP_STRT_PKTID_MASK); 2166 val = u32_replace_bits(val, ctrl_para->end_pktid, 2167 B_AX_WD_CPUQ_OP_END_PKTID_MASK); 2168 rtw89_write32(rtwdev, reg, val); 2169 2170 reg = wd ? R_AX_WD_CPUQ_OP_1 : R_AX_PL_CPUQ_OP_1; 2171 val = 0; 2172 val = u32_replace_bits(val, ctrl_para->src_pid, 2173 B_AX_CPUQ_OP_SRC_PID_MASK); 2174 val = u32_replace_bits(val, ctrl_para->src_qid, 2175 B_AX_CPUQ_OP_SRC_QID_MASK); 2176 val = u32_replace_bits(val, ctrl_para->dst_pid, 2177 B_AX_CPUQ_OP_DST_PID_MASK); 2178 val = u32_replace_bits(val, ctrl_para->dst_qid, 2179 B_AX_CPUQ_OP_DST_QID_MASK); 2180 rtw89_write32(rtwdev, reg, val); 2181 2182 reg = wd ? R_AX_WD_CPUQ_OP_0 : R_AX_PL_CPUQ_OP_0; 2183 val = 0; 2184 val = u32_replace_bits(val, cmd_type, 2185 B_AX_CPUQ_OP_CMD_TYPE_MASK); 2186 val = u32_replace_bits(val, ctrl_para->macid, 2187 B_AX_CPUQ_OP_MACID_MASK); 2188 val = u32_replace_bits(val, ctrl_para->pkt_num, 2189 B_AX_CPUQ_OP_PKTNUM_MASK); 2190 val |= B_AX_WD_CPUQ_OP_EXEC; 2191 rtw89_write32(rtwdev, reg, val); 2192 2193 reg = wd ? R_AX_WD_CPUQ_OP_STATUS : R_AX_PL_CPUQ_OP_STATUS; 2194 2195 ret = read_poll_timeout(rtw89_read32, val, val & B_AX_WD_CPUQ_OP_STAT_DONE, 2196 1, 2000, false, rtwdev, reg); 2197 if (ret) 2198 return ret; 2199 2200 if (cmd_type == CPUIO_OP_CMD_GET_1ST_PID || 2201 cmd_type == CPUIO_OP_CMD_GET_NEXT_PID) 2202 ctrl_para->pktid = FIELD_GET(B_AX_WD_CPUQ_OP_PKTID_MASK, val); 2203 2204 return 0; 2205 } 2206 2207 static int dle_quota_change(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mode) 2208 { 2209 const struct rtw89_dle_mem *cfg; 2210 struct rtw89_cpuio_ctrl ctrl_para = {0}; 2211 u16 pkt_id; 2212 int ret; 2213 2214 cfg = get_dle_mem_cfg(rtwdev, mode); 2215 if (!cfg) { 2216 rtw89_err(rtwdev, "[ERR]wd/dle mem cfg\n"); 2217 return -EINVAL; 2218 } 2219 2220 if (dle_used_size(cfg->wde_size, cfg->ple_size) != rtwdev->chip->fifo_size) { 2221 rtw89_err(rtwdev, "[ERR]wd/dle mem cfg\n"); 2222 return -EINVAL; 2223 } 2224 2225 dle_quota_cfg(rtwdev, cfg, INVALID_QT_WCPU); 2226 2227 pkt_id = rtw89_mac_dle_buf_req(rtwdev, 0x20, true); 2228 if (pkt_id == 0xffff) { 2229 rtw89_err(rtwdev, "[ERR]WDE DLE buf req\n"); 2230 return -ENOMEM; 2231 } 2232 2233 ctrl_para.cmd_type = CPUIO_OP_CMD_ENQ_TO_HEAD; 2234 ctrl_para.start_pktid = pkt_id; 2235 ctrl_para.end_pktid = pkt_id; 2236 ctrl_para.pkt_num = 0; 2237 ctrl_para.dst_pid = WDE_DLE_PORT_ID_WDRLS; 2238 ctrl_para.dst_qid = WDE_DLE_QUEID_NO_REPORT; 2239 ret = rtw89_mac_set_cpuio(rtwdev, &ctrl_para, true); 2240 if (ret) { 2241 rtw89_err(rtwdev, "[ERR]WDE DLE enqueue to head\n"); 2242 return -EFAULT; 2243 } 2244 2245 pkt_id = rtw89_mac_dle_buf_req(rtwdev, 0x20, false); 2246 if (pkt_id == 0xffff) { 2247 rtw89_err(rtwdev, "[ERR]PLE DLE buf req\n"); 2248 return -ENOMEM; 2249 } 2250 2251 ctrl_para.cmd_type = CPUIO_OP_CMD_ENQ_TO_HEAD; 2252 ctrl_para.start_pktid = pkt_id; 2253 ctrl_para.end_pktid = pkt_id; 2254 ctrl_para.pkt_num = 0; 2255 ctrl_para.dst_pid = PLE_DLE_PORT_ID_PLRLS; 2256 ctrl_para.dst_qid = PLE_DLE_QUEID_NO_REPORT; 2257 ret = rtw89_mac_set_cpuio(rtwdev, &ctrl_para, false); 2258 if (ret) { 2259 rtw89_err(rtwdev, "[ERR]PLE DLE enqueue to head\n"); 2260 return -EFAULT; 2261 } 2262 2263 return 0; 2264 } 2265 2266 static int band_idle_ck_b(struct rtw89_dev *rtwdev, u8 mac_idx) 2267 { 2268 int ret; 2269 u32 reg; 2270 u8 val; 2271 2272 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 2273 if (ret) 2274 return ret; 2275 2276 reg = rtw89_mac_reg_by_idx(R_AX_PTCL_TX_CTN_SEL, mac_idx); 2277 2278 ret = read_poll_timeout(rtw89_read8, val, 2279 (val & B_AX_PTCL_TX_ON_STAT) == 0, 2280 SW_CVR_DUR_US, 2281 SW_CVR_DUR_US * PTCL_IDLE_POLL_CNT, 2282 false, rtwdev, reg); 2283 if (ret) 2284 return ret; 2285 2286 return 0; 2287 } 2288 2289 static int band1_enable(struct rtw89_dev *rtwdev) 2290 { 2291 int ret, i; 2292 u32 sleep_bak[4] = {0}; 2293 u32 pause_bak[4] = {0}; 2294 u16 tx_en; 2295 2296 ret = rtw89_mac_stop_sch_tx(rtwdev, 0, &tx_en, RTW89_SCH_TX_SEL_ALL); 2297 if (ret) { 2298 rtw89_err(rtwdev, "[ERR]stop sch tx %d\n", ret); 2299 return ret; 2300 } 2301 2302 for (i = 0; i < 4; i++) { 2303 sleep_bak[i] = rtw89_read32(rtwdev, R_AX_MACID_SLEEP_0 + i * 4); 2304 pause_bak[i] = rtw89_read32(rtwdev, R_AX_SS_MACID_PAUSE_0 + i * 4); 2305 rtw89_write32(rtwdev, R_AX_MACID_SLEEP_0 + i * 4, U32_MAX); 2306 rtw89_write32(rtwdev, R_AX_SS_MACID_PAUSE_0 + i * 4, U32_MAX); 2307 } 2308 2309 ret = band_idle_ck_b(rtwdev, 0); 2310 if (ret) { 2311 rtw89_err(rtwdev, "[ERR]tx idle poll %d\n", ret); 2312 return ret; 2313 } 2314 2315 ret = dle_quota_change(rtwdev, rtwdev->mac.qta_mode); 2316 if (ret) { 2317 rtw89_err(rtwdev, "[ERR]DLE quota change %d\n", ret); 2318 return ret; 2319 } 2320 2321 for (i = 0; i < 4; i++) { 2322 rtw89_write32(rtwdev, R_AX_MACID_SLEEP_0 + i * 4, sleep_bak[i]); 2323 rtw89_write32(rtwdev, R_AX_SS_MACID_PAUSE_0 + i * 4, pause_bak[i]); 2324 } 2325 2326 ret = rtw89_mac_resume_sch_tx(rtwdev, 0, tx_en); 2327 if (ret) { 2328 rtw89_err(rtwdev, "[ERR]CMAC1 resume sch tx %d\n", ret); 2329 return ret; 2330 } 2331 2332 ret = cmac_func_en(rtwdev, 1, true); 2333 if (ret) { 2334 rtw89_err(rtwdev, "[ERR]CMAC1 func en %d\n", ret); 2335 return ret; 2336 } 2337 2338 ret = cmac_init(rtwdev, 1); 2339 if (ret) { 2340 rtw89_err(rtwdev, "[ERR]CMAC1 init %d\n", ret); 2341 return ret; 2342 } 2343 2344 rtw89_write32_set(rtwdev, R_AX_SYS_ISO_CTRL_EXTEND, 2345 B_AX_R_SYM_FEN_WLBBFUN_1 | B_AX_R_SYM_FEN_WLBBGLB_1); 2346 2347 return 0; 2348 } 2349 2350 static int rtw89_mac_enable_imr(struct rtw89_dev *rtwdev, u8 mac_idx, 2351 enum rtw89_mac_hwmod_sel sel) 2352 { 2353 u32 reg, val; 2354 int ret; 2355 2356 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, sel); 2357 if (ret) { 2358 rtw89_err(rtwdev, "MAC%d mac_idx%d is not ready\n", 2359 sel, mac_idx); 2360 return ret; 2361 } 2362 2363 if (sel == RTW89_DMAC_SEL) { 2364 rtw89_write32_clr(rtwdev, R_AX_TXPKTCTL_ERR_IMR_ISR, 2365 B_AX_TXPKTCTL_USRCTL_RLSBMPLEN_ERR_INT_EN | 2366 B_AX_TXPKTCTL_USRCTL_RDNRLSCMD_ERR_INT_EN | 2367 B_AX_TXPKTCTL_CMDPSR_FRZTO_ERR_INT_EN); 2368 rtw89_write32_clr(rtwdev, R_AX_TXPKTCTL_ERR_IMR_ISR_B1, 2369 B_AX_TXPKTCTL_USRCTL_RLSBMPLEN_ERR_INT_EN | 2370 B_AX_TXPKTCTL_USRCTL_RDNRLSCMD_ERR_INT_EN); 2371 rtw89_write32_clr(rtwdev, R_AX_HOST_DISPATCHER_ERR_IMR, 2372 B_AX_HDT_PKT_FAIL_DBG_INT_EN | 2373 B_AX_HDT_OFFSET_UNMATCH_INT_EN); 2374 rtw89_write32_clr(rtwdev, R_AX_CPU_DISPATCHER_ERR_IMR, 2375 B_AX_CPU_SHIFT_EN_ERR_INT_EN); 2376 rtw89_write32_clr(rtwdev, R_AX_PLE_ERR_IMR, 2377 B_AX_PLE_GETNPG_STRPG_ERR_INT_EN); 2378 rtw89_write32_clr(rtwdev, R_AX_WDRLS_ERR_IMR, 2379 B_AX_WDRLS_PLEBREQ_TO_ERR_INT_EN); 2380 rtw89_write32_set(rtwdev, R_AX_HD0IMR, B_AX_WDT_PTFM_INT_EN); 2381 rtw89_write32_clr(rtwdev, R_AX_TXPKTCTL_ERR_IMR_ISR, 2382 B_AX_TXPKTCTL_USRCTL_NOINIT_ERR_INT_EN); 2383 } else if (sel == RTW89_CMAC_SEL) { 2384 reg = rtw89_mac_reg_by_idx(R_AX_SCHEDULE_ERR_IMR, mac_idx); 2385 rtw89_write32_clr(rtwdev, reg, 2386 B_AX_SORT_NON_IDLE_ERR_INT_EN); 2387 2388 reg = rtw89_mac_reg_by_idx(R_AX_DLE_CTRL, mac_idx); 2389 rtw89_write32_clr(rtwdev, reg, 2390 B_AX_NO_RESERVE_PAGE_ERR_IMR | 2391 B_AX_RXDATA_FSM_HANG_ERROR_IMR); 2392 2393 reg = rtw89_mac_reg_by_idx(R_AX_PTCL_IMR0, mac_idx); 2394 val = B_AX_F2PCMD_USER_ALLC_ERR_INT_EN | 2395 B_AX_TX_RECORD_PKTID_ERR_INT_EN | 2396 B_AX_FSM_TIMEOUT_ERR_INT_EN; 2397 rtw89_write32(rtwdev, reg, val); 2398 2399 reg = rtw89_mac_reg_by_idx(R_AX_PHYINFO_ERR_IMR, mac_idx); 2400 rtw89_write32_set(rtwdev, reg, 2401 B_AX_PHY_TXON_TIMEOUT_INT_EN | 2402 B_AX_CCK_CCA_TIMEOUT_INT_EN | 2403 B_AX_OFDM_CCA_TIMEOUT_INT_EN | 2404 B_AX_DATA_ON_TIMEOUT_INT_EN | 2405 B_AX_STS_ON_TIMEOUT_INT_EN | 2406 B_AX_CSI_ON_TIMEOUT_INT_EN); 2407 2408 reg = rtw89_mac_reg_by_idx(R_AX_RMAC_ERR_ISR, mac_idx); 2409 val = rtw89_read32(rtwdev, reg); 2410 val |= (B_AX_RMAC_RX_CSI_TIMEOUT_INT_EN | 2411 B_AX_RMAC_RX_TIMEOUT_INT_EN | 2412 B_AX_RMAC_CSI_TIMEOUT_INT_EN); 2413 val &= ~(B_AX_RMAC_CCA_TO_IDLE_TIMEOUT_INT_EN | 2414 B_AX_RMAC_DATA_ON_TO_IDLE_TIMEOUT_INT_EN | 2415 B_AX_RMAC_CCA_TIMEOUT_INT_EN | 2416 B_AX_RMAC_DATA_ON_TIMEOUT_INT_EN); 2417 rtw89_write32(rtwdev, reg, val); 2418 } else { 2419 return -EINVAL; 2420 } 2421 2422 return 0; 2423 } 2424 2425 static int rtw89_mac_dbcc_enable(struct rtw89_dev *rtwdev, bool enable) 2426 { 2427 int ret = 0; 2428 2429 if (enable) { 2430 ret = band1_enable(rtwdev); 2431 if (ret) { 2432 rtw89_err(rtwdev, "[ERR] band1_enable %d\n", ret); 2433 return ret; 2434 } 2435 2436 ret = rtw89_mac_enable_imr(rtwdev, RTW89_MAC_1, RTW89_CMAC_SEL); 2437 if (ret) { 2438 rtw89_err(rtwdev, "[ERR] enable CMAC1 IMR %d\n", ret); 2439 return ret; 2440 } 2441 } else { 2442 rtw89_err(rtwdev, "[ERR] disable dbcc is not implemented not\n"); 2443 return -EINVAL; 2444 } 2445 2446 return 0; 2447 } 2448 2449 static int set_host_rpr(struct rtw89_dev *rtwdev) 2450 { 2451 if (rtwdev->hci.type == RTW89_HCI_TYPE_PCIE) { 2452 rtw89_write32_mask(rtwdev, R_AX_WDRLS_CFG, 2453 B_AX_WDRLS_MODE_MASK, RTW89_RPR_MODE_POH); 2454 rtw89_write32_set(rtwdev, R_AX_RLSRPT0_CFG0, 2455 B_AX_RLSRPT0_FLTR_MAP_MASK); 2456 } else { 2457 rtw89_write32_mask(rtwdev, R_AX_WDRLS_CFG, 2458 B_AX_WDRLS_MODE_MASK, RTW89_RPR_MODE_STF); 2459 rtw89_write32_clr(rtwdev, R_AX_RLSRPT0_CFG0, 2460 B_AX_RLSRPT0_FLTR_MAP_MASK); 2461 } 2462 2463 rtw89_write32_mask(rtwdev, R_AX_RLSRPT0_CFG1, B_AX_RLSRPT0_AGGNUM_MASK, 30); 2464 rtw89_write32_mask(rtwdev, R_AX_RLSRPT0_CFG1, B_AX_RLSRPT0_TO_MASK, 255); 2465 2466 return 0; 2467 } 2468 2469 static int rtw89_mac_trx_init(struct rtw89_dev *rtwdev) 2470 { 2471 enum rtw89_qta_mode qta_mode = rtwdev->mac.qta_mode; 2472 int ret; 2473 2474 ret = dmac_init(rtwdev, 0); 2475 if (ret) { 2476 rtw89_err(rtwdev, "[ERR]DMAC init %d\n", ret); 2477 return ret; 2478 } 2479 2480 ret = cmac_init(rtwdev, 0); 2481 if (ret) { 2482 rtw89_err(rtwdev, "[ERR]CMAC%d init %d\n", 0, ret); 2483 return ret; 2484 } 2485 2486 if (is_qta_dbcc(rtwdev, qta_mode)) { 2487 ret = rtw89_mac_dbcc_enable(rtwdev, true); 2488 if (ret) { 2489 rtw89_err(rtwdev, "[ERR]dbcc_enable init %d\n", ret); 2490 return ret; 2491 } 2492 } 2493 2494 ret = rtw89_mac_enable_imr(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL); 2495 if (ret) { 2496 rtw89_err(rtwdev, "[ERR] enable DMAC IMR %d\n", ret); 2497 return ret; 2498 } 2499 2500 ret = rtw89_mac_enable_imr(rtwdev, RTW89_MAC_0, RTW89_CMAC_SEL); 2501 if (ret) { 2502 rtw89_err(rtwdev, "[ERR] to enable CMAC0 IMR %d\n", ret); 2503 return ret; 2504 } 2505 2506 ret = set_host_rpr(rtwdev); 2507 if (ret) { 2508 rtw89_err(rtwdev, "[ERR] set host rpr %d\n", ret); 2509 return ret; 2510 } 2511 2512 return 0; 2513 } 2514 2515 static void rtw89_mac_disable_cpu(struct rtw89_dev *rtwdev) 2516 { 2517 clear_bit(RTW89_FLAG_FW_RDY, rtwdev->flags); 2518 2519 rtw89_write32_clr(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_WCPU_EN); 2520 rtw89_write32_clr(rtwdev, R_AX_SYS_CLK_CTRL, B_AX_CPU_CLK_EN); 2521 } 2522 2523 static int rtw89_mac_enable_cpu(struct rtw89_dev *rtwdev, u8 boot_reason, 2524 bool dlfw) 2525 { 2526 u32 val; 2527 int ret; 2528 2529 if (rtw89_read32(rtwdev, R_AX_PLATFORM_ENABLE) & B_AX_WCPU_EN) 2530 return -EFAULT; 2531 2532 rtw89_write32(rtwdev, R_AX_HALT_H2C_CTRL, 0); 2533 rtw89_write32(rtwdev, R_AX_HALT_C2H_CTRL, 0); 2534 2535 rtw89_write32_set(rtwdev, R_AX_SYS_CLK_CTRL, B_AX_CPU_CLK_EN); 2536 2537 val = rtw89_read32(rtwdev, R_AX_WCPU_FW_CTRL); 2538 val &= ~(B_AX_WCPU_FWDL_EN | B_AX_H2C_PATH_RDY | B_AX_FWDL_PATH_RDY); 2539 val = u32_replace_bits(val, RTW89_FWDL_INITIAL_STATE, 2540 B_AX_WCPU_FWDL_STS_MASK); 2541 2542 if (dlfw) 2543 val |= B_AX_WCPU_FWDL_EN; 2544 2545 rtw89_write32(rtwdev, R_AX_WCPU_FW_CTRL, val); 2546 rtw89_write16_mask(rtwdev, R_AX_BOOT_REASON, B_AX_BOOT_REASON_MASK, 2547 boot_reason); 2548 rtw89_write32_set(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_WCPU_EN); 2549 2550 if (!dlfw) { 2551 mdelay(5); 2552 2553 ret = rtw89_fw_check_rdy(rtwdev); 2554 if (ret) 2555 return ret; 2556 } 2557 2558 return 0; 2559 } 2560 2561 static int rtw89_mac_fw_dl_pre_init(struct rtw89_dev *rtwdev) 2562 { 2563 u32 val; 2564 int ret; 2565 2566 val = B_AX_MAC_FUNC_EN | B_AX_DMAC_FUNC_EN | B_AX_DISPATCHER_EN | 2567 B_AX_PKT_BUF_EN; 2568 rtw89_write32(rtwdev, R_AX_DMAC_FUNC_EN, val); 2569 2570 val = B_AX_DISPATCHER_CLK_EN; 2571 rtw89_write32(rtwdev, R_AX_DMAC_CLK_EN, val); 2572 2573 ret = dle_init(rtwdev, RTW89_QTA_DLFW, rtwdev->mac.qta_mode); 2574 if (ret) { 2575 rtw89_err(rtwdev, "[ERR]DLE pre init %d\n", ret); 2576 return ret; 2577 } 2578 2579 ret = hfc_init(rtwdev, true, false, true); 2580 if (ret) { 2581 rtw89_err(rtwdev, "[ERR]HCI FC pre init %d\n", ret); 2582 return ret; 2583 } 2584 2585 return ret; 2586 } 2587 2588 static void rtw89_mac_hci_func_en(struct rtw89_dev *rtwdev) 2589 { 2590 rtw89_write32_set(rtwdev, R_AX_HCI_FUNC_EN, 2591 B_AX_HCI_TXDMA_EN | B_AX_HCI_RXDMA_EN); 2592 } 2593 2594 void rtw89_mac_enable_bb_rf(struct rtw89_dev *rtwdev) 2595 { 2596 rtw89_write8_set(rtwdev, R_AX_SYS_FUNC_EN, 2597 B_AX_FEN_BBRSTB | B_AX_FEN_BB_GLB_RSTN); 2598 rtw89_write32_set(rtwdev, R_AX_WLRF_CTRL, 2599 B_AX_WLRF1_CTRL_7 | B_AX_WLRF1_CTRL_1 | 2600 B_AX_WLRF_CTRL_7 | B_AX_WLRF_CTRL_1); 2601 rtw89_write8_set(rtwdev, R_AX_PHYREG_SET, PHYREG_SET_ALL_CYCLE); 2602 } 2603 2604 void rtw89_mac_disable_bb_rf(struct rtw89_dev *rtwdev) 2605 { 2606 rtw89_write8_clr(rtwdev, R_AX_SYS_FUNC_EN, 2607 B_AX_FEN_BBRSTB | B_AX_FEN_BB_GLB_RSTN); 2608 rtw89_write32_clr(rtwdev, R_AX_WLRF_CTRL, 2609 B_AX_WLRF1_CTRL_7 | B_AX_WLRF1_CTRL_1 | 2610 B_AX_WLRF_CTRL_7 | B_AX_WLRF_CTRL_1); 2611 rtw89_write8_clr(rtwdev, R_AX_PHYREG_SET, PHYREG_SET_ALL_CYCLE); 2612 } 2613 2614 int rtw89_mac_partial_init(struct rtw89_dev *rtwdev) 2615 { 2616 int ret; 2617 2618 ret = rtw89_mac_power_switch(rtwdev, true); 2619 if (ret) { 2620 rtw89_mac_power_switch(rtwdev, false); 2621 ret = rtw89_mac_power_switch(rtwdev, true); 2622 if (ret) 2623 return ret; 2624 } 2625 2626 rtw89_mac_hci_func_en(rtwdev); 2627 2628 if (rtwdev->hci.ops->mac_pre_init) { 2629 ret = rtwdev->hci.ops->mac_pre_init(rtwdev); 2630 if (ret) 2631 return ret; 2632 } 2633 2634 ret = rtw89_mac_fw_dl_pre_init(rtwdev); 2635 if (ret) 2636 return ret; 2637 2638 rtw89_mac_disable_cpu(rtwdev); 2639 ret = rtw89_mac_enable_cpu(rtwdev, 0, true); 2640 if (ret) 2641 return ret; 2642 2643 ret = rtw89_fw_download(rtwdev, RTW89_FW_NORMAL); 2644 if (ret) 2645 return ret; 2646 2647 return 0; 2648 } 2649 2650 int rtw89_mac_init(struct rtw89_dev *rtwdev) 2651 { 2652 int ret; 2653 2654 ret = rtw89_mac_partial_init(rtwdev); 2655 if (ret) 2656 goto fail; 2657 2658 rtw89_mac_enable_bb_rf(rtwdev); 2659 2660 ret = rtw89_mac_sys_init(rtwdev); 2661 if (ret) 2662 goto fail; 2663 2664 ret = rtw89_mac_trx_init(rtwdev); 2665 if (ret) 2666 goto fail; 2667 2668 if (rtwdev->hci.ops->mac_post_init) { 2669 ret = rtwdev->hci.ops->mac_post_init(rtwdev); 2670 if (ret) 2671 goto fail; 2672 } 2673 2674 rtw89_fw_send_all_early_h2c(rtwdev); 2675 rtw89_fw_h2c_set_ofld_cfg(rtwdev); 2676 2677 return ret; 2678 fail: 2679 rtw89_mac_power_switch(rtwdev, false); 2680 2681 return ret; 2682 } 2683 2684 static void rtw89_mac_dmac_tbl_init(struct rtw89_dev *rtwdev, u8 macid) 2685 { 2686 u8 i; 2687 2688 for (i = 0; i < 4; i++) { 2689 rtw89_write32(rtwdev, R_AX_FILTER_MODEL_ADDR, 2690 DMAC_TBL_BASE_ADDR + (macid << 4) + (i << 2)); 2691 rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY, 0); 2692 } 2693 } 2694 2695 static void rtw89_mac_cmac_tbl_init(struct rtw89_dev *rtwdev, u8 macid) 2696 { 2697 rtw89_write32(rtwdev, R_AX_FILTER_MODEL_ADDR, 2698 CMAC_TBL_BASE_ADDR + macid * CCTL_INFO_SIZE); 2699 rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY, 0x4); 2700 rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY + 4, 0x400A0004); 2701 rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY + 8, 0); 2702 rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY + 12, 0); 2703 rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY + 16, 0); 2704 rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY + 20, 0xE43000B); 2705 rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY + 24, 0); 2706 rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY + 28, 0xB8109); 2707 } 2708 2709 static int rtw89_set_macid_pause(struct rtw89_dev *rtwdev, u8 macid, bool pause) 2710 { 2711 u8 sh = FIELD_GET(GENMASK(4, 0), macid); 2712 u8 grp = macid >> 5; 2713 int ret; 2714 2715 ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_CMAC_SEL); 2716 if (ret) 2717 return ret; 2718 2719 rtw89_fw_h2c_macid_pause(rtwdev, sh, grp, pause); 2720 2721 return 0; 2722 } 2723 2724 static const struct rtw89_port_reg rtw_port_base = { 2725 .port_cfg = R_AX_PORT_CFG_P0, 2726 .tbtt_prohib = R_AX_TBTT_PROHIB_P0, 2727 .bcn_area = R_AX_BCN_AREA_P0, 2728 .bcn_early = R_AX_BCNERLYINT_CFG_P0, 2729 .tbtt_early = R_AX_TBTTERLYINT_CFG_P0, 2730 .tbtt_agg = R_AX_TBTT_AGG_P0, 2731 .bcn_space = R_AX_BCN_SPACE_CFG_P0, 2732 .bcn_forcetx = R_AX_BCN_FORCETX_P0, 2733 .bcn_err_cnt = R_AX_BCN_ERR_CNT_P0, 2734 .bcn_err_flag = R_AX_BCN_ERR_FLAG_P0, 2735 .dtim_ctrl = R_AX_DTIM_CTRL_P0, 2736 .tbtt_shift = R_AX_TBTT_SHIFT_P0, 2737 .bcn_cnt_tmr = R_AX_BCN_CNT_TMR_P0, 2738 .tsftr_l = R_AX_TSFTR_LOW_P0, 2739 .tsftr_h = R_AX_TSFTR_HIGH_P0 2740 }; 2741 2742 #define BCN_INTERVAL 100 2743 #define BCN_ERLY_DEF 160 2744 #define BCN_SETUP_DEF 2 2745 #define BCN_HOLD_DEF 200 2746 #define BCN_MASK_DEF 0 2747 #define TBTT_ERLY_DEF 5 2748 #define BCN_SET_UNIT 32 2749 #define BCN_ERLY_SET_DLY (10 * 2) 2750 2751 static void rtw89_mac_port_cfg_func_sw(struct rtw89_dev *rtwdev, 2752 struct rtw89_vif *rtwvif) 2753 { 2754 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); 2755 const struct rtw89_port_reg *p = &rtw_port_base; 2756 2757 if (!rtw89_read32_port_mask(rtwdev, rtwvif, p->port_cfg, B_AX_PORT_FUNC_EN)) 2758 return; 2759 2760 rtw89_write32_port_clr(rtwdev, rtwvif, p->tbtt_prohib, B_AX_TBTT_SETUP_MASK); 2761 rtw89_write32_port_mask(rtwdev, rtwvif, p->tbtt_prohib, B_AX_TBTT_HOLD_MASK, 1); 2762 rtw89_write16_port_clr(rtwdev, rtwvif, p->tbtt_early, B_AX_TBTTERLY_MASK); 2763 rtw89_write16_port_clr(rtwdev, rtwvif, p->bcn_early, B_AX_BCNERLY_MASK); 2764 2765 msleep(vif->bss_conf.beacon_int + 1); 2766 2767 rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, B_AX_PORT_FUNC_EN | 2768 B_AX_BRK_SETUP); 2769 rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, B_AX_TSFTR_RST); 2770 rtw89_write32_port(rtwdev, rtwvif, p->bcn_cnt_tmr, 0); 2771 } 2772 2773 static void rtw89_mac_port_cfg_tx_rpt(struct rtw89_dev *rtwdev, 2774 struct rtw89_vif *rtwvif, bool en) 2775 { 2776 const struct rtw89_port_reg *p = &rtw_port_base; 2777 2778 if (en) 2779 rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, B_AX_TXBCN_RPT_EN); 2780 else 2781 rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, B_AX_TXBCN_RPT_EN); 2782 } 2783 2784 static void rtw89_mac_port_cfg_rx_rpt(struct rtw89_dev *rtwdev, 2785 struct rtw89_vif *rtwvif, bool en) 2786 { 2787 const struct rtw89_port_reg *p = &rtw_port_base; 2788 2789 if (en) 2790 rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, B_AX_RXBCN_RPT_EN); 2791 else 2792 rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, B_AX_RXBCN_RPT_EN); 2793 } 2794 2795 static void rtw89_mac_port_cfg_net_type(struct rtw89_dev *rtwdev, 2796 struct rtw89_vif *rtwvif) 2797 { 2798 const struct rtw89_port_reg *p = &rtw_port_base; 2799 2800 rtw89_write32_port_mask(rtwdev, rtwvif, p->port_cfg, B_AX_NET_TYPE_MASK, 2801 rtwvif->net_type); 2802 } 2803 2804 static void rtw89_mac_port_cfg_bcn_prct(struct rtw89_dev *rtwdev, 2805 struct rtw89_vif *rtwvif) 2806 { 2807 const struct rtw89_port_reg *p = &rtw_port_base; 2808 bool en = rtwvif->net_type != RTW89_NET_TYPE_NO_LINK; 2809 u32 bits = B_AX_TBTT_PROHIB_EN | B_AX_BRK_SETUP; 2810 2811 if (en) 2812 rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, bits); 2813 else 2814 rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, bits); 2815 } 2816 2817 static void rtw89_mac_port_cfg_rx_sw(struct rtw89_dev *rtwdev, 2818 struct rtw89_vif *rtwvif) 2819 { 2820 const struct rtw89_port_reg *p = &rtw_port_base; 2821 bool en = rtwvif->net_type == RTW89_NET_TYPE_INFRA || 2822 rtwvif->net_type == RTW89_NET_TYPE_AD_HOC; 2823 u32 bit = B_AX_RX_BSSID_FIT_EN; 2824 2825 if (en) 2826 rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, bit); 2827 else 2828 rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, bit); 2829 } 2830 2831 static void rtw89_mac_port_cfg_rx_sync(struct rtw89_dev *rtwdev, 2832 struct rtw89_vif *rtwvif) 2833 { 2834 const struct rtw89_port_reg *p = &rtw_port_base; 2835 bool en = rtwvif->net_type == RTW89_NET_TYPE_INFRA || 2836 rtwvif->net_type == RTW89_NET_TYPE_AD_HOC; 2837 2838 if (en) 2839 rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, B_AX_TSF_UDT_EN); 2840 else 2841 rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, B_AX_TSF_UDT_EN); 2842 } 2843 2844 static void rtw89_mac_port_cfg_tx_sw(struct rtw89_dev *rtwdev, 2845 struct rtw89_vif *rtwvif) 2846 { 2847 const struct rtw89_port_reg *p = &rtw_port_base; 2848 bool en = rtwvif->net_type == RTW89_NET_TYPE_AP_MODE || 2849 rtwvif->net_type == RTW89_NET_TYPE_AD_HOC; 2850 2851 if (en) 2852 rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, B_AX_BCNTX_EN); 2853 else 2854 rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, B_AX_BCNTX_EN); 2855 } 2856 2857 static void rtw89_mac_port_cfg_bcn_intv(struct rtw89_dev *rtwdev, 2858 struct rtw89_vif *rtwvif) 2859 { 2860 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); 2861 const struct rtw89_port_reg *p = &rtw_port_base; 2862 u16 bcn_int = vif->bss_conf.beacon_int ? vif->bss_conf.beacon_int : BCN_INTERVAL; 2863 2864 rtw89_write32_port_mask(rtwdev, rtwvif, p->bcn_space, B_AX_BCN_SPACE_MASK, 2865 bcn_int); 2866 } 2867 2868 static void rtw89_mac_port_cfg_bcn_setup_time(struct rtw89_dev *rtwdev, 2869 struct rtw89_vif *rtwvif) 2870 { 2871 const struct rtw89_port_reg *p = &rtw_port_base; 2872 2873 rtw89_write32_port_mask(rtwdev, rtwvif, p->tbtt_prohib, 2874 B_AX_TBTT_SETUP_MASK, BCN_SETUP_DEF); 2875 } 2876 2877 static void rtw89_mac_port_cfg_bcn_hold_time(struct rtw89_dev *rtwdev, 2878 struct rtw89_vif *rtwvif) 2879 { 2880 const struct rtw89_port_reg *p = &rtw_port_base; 2881 2882 rtw89_write32_port_mask(rtwdev, rtwvif, p->tbtt_prohib, 2883 B_AX_TBTT_HOLD_MASK, BCN_HOLD_DEF); 2884 } 2885 2886 static void rtw89_mac_port_cfg_bcn_mask_area(struct rtw89_dev *rtwdev, 2887 struct rtw89_vif *rtwvif) 2888 { 2889 const struct rtw89_port_reg *p = &rtw_port_base; 2890 2891 rtw89_write32_port_mask(rtwdev, rtwvif, p->bcn_area, 2892 B_AX_BCN_MSK_AREA_MASK, BCN_MASK_DEF); 2893 } 2894 2895 static void rtw89_mac_port_cfg_tbtt_early(struct rtw89_dev *rtwdev, 2896 struct rtw89_vif *rtwvif) 2897 { 2898 const struct rtw89_port_reg *p = &rtw_port_base; 2899 2900 rtw89_write16_port_mask(rtwdev, rtwvif, p->tbtt_early, 2901 B_AX_TBTTERLY_MASK, TBTT_ERLY_DEF); 2902 } 2903 2904 static void rtw89_mac_port_cfg_bss_color(struct rtw89_dev *rtwdev, 2905 struct rtw89_vif *rtwvif) 2906 { 2907 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); 2908 static const u32 masks[RTW89_PORT_NUM] = { 2909 B_AX_BSS_COLOB_AX_PORT_0_MASK, B_AX_BSS_COLOB_AX_PORT_1_MASK, 2910 B_AX_BSS_COLOB_AX_PORT_2_MASK, B_AX_BSS_COLOB_AX_PORT_3_MASK, 2911 B_AX_BSS_COLOB_AX_PORT_4_MASK, 2912 }; 2913 u8 port = rtwvif->port; 2914 u32 reg_base; 2915 u32 reg; 2916 u8 bss_color; 2917 2918 bss_color = vif->bss_conf.he_bss_color.color; 2919 reg_base = port >= 4 ? R_AX_PTCL_BSS_COLOR_1 : R_AX_PTCL_BSS_COLOR_0; 2920 reg = rtw89_mac_reg_by_idx(reg_base, rtwvif->mac_idx); 2921 rtw89_write32_mask(rtwdev, reg, masks[port], bss_color); 2922 } 2923 2924 static void rtw89_mac_port_cfg_mbssid(struct rtw89_dev *rtwdev, 2925 struct rtw89_vif *rtwvif) 2926 { 2927 u8 port = rtwvif->port; 2928 u32 reg; 2929 2930 if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) 2931 return; 2932 2933 if (port == 0) { 2934 reg = rtw89_mac_reg_by_idx(R_AX_MBSSID_CTRL, rtwvif->mac_idx); 2935 rtw89_write32_clr(rtwdev, reg, B_AX_P0MB_ALL_MASK); 2936 } 2937 } 2938 2939 static void rtw89_mac_port_cfg_hiq_drop(struct rtw89_dev *rtwdev, 2940 struct rtw89_vif *rtwvif) 2941 { 2942 u8 port = rtwvif->port; 2943 u32 reg; 2944 u32 val; 2945 2946 reg = rtw89_mac_reg_by_idx(R_AX_MBSSID_DROP_0, rtwvif->mac_idx); 2947 val = rtw89_read32(rtwdev, reg); 2948 val &= ~FIELD_PREP(B_AX_PORT_DROP_4_0_MASK, BIT(port)); 2949 if (port == 0) 2950 val &= ~BIT(0); 2951 rtw89_write32(rtwdev, reg, val); 2952 } 2953 2954 static void rtw89_mac_port_cfg_func_en(struct rtw89_dev *rtwdev, 2955 struct rtw89_vif *rtwvif) 2956 { 2957 const struct rtw89_port_reg *p = &rtw_port_base; 2958 2959 rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, B_AX_PORT_FUNC_EN); 2960 } 2961 2962 static void rtw89_mac_port_cfg_bcn_early(struct rtw89_dev *rtwdev, 2963 struct rtw89_vif *rtwvif) 2964 { 2965 const struct rtw89_port_reg *p = &rtw_port_base; 2966 2967 rtw89_write32_port_mask(rtwdev, rtwvif, p->bcn_early, B_AX_BCNERLY_MASK, 2968 BCN_ERLY_DEF); 2969 } 2970 2971 int rtw89_mac_vif_init(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) 2972 { 2973 int ret; 2974 2975 ret = rtw89_mac_port_update(rtwdev, rtwvif); 2976 if (ret) 2977 return ret; 2978 2979 rtw89_mac_dmac_tbl_init(rtwdev, rtwvif->mac_id); 2980 rtw89_mac_cmac_tbl_init(rtwdev, rtwvif->mac_id); 2981 2982 ret = rtw89_set_macid_pause(rtwdev, rtwvif->mac_id, false); 2983 if (ret) 2984 return ret; 2985 2986 ret = rtw89_fw_h2c_vif_maintain(rtwdev, rtwvif, RTW89_VIF_CREATE); 2987 if (ret) 2988 return ret; 2989 2990 ret = rtw89_cam_init(rtwdev, rtwvif); 2991 if (ret) 2992 return ret; 2993 2994 ret = rtw89_fw_h2c_cam(rtwdev, rtwvif); 2995 if (ret) 2996 return ret; 2997 2998 ret = rtw89_fw_h2c_default_cmac_tbl(rtwdev, rtwvif->mac_id); 2999 if (ret) 3000 return ret; 3001 3002 return 0; 3003 } 3004 3005 int rtw89_mac_vif_deinit(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) 3006 { 3007 int ret; 3008 3009 ret = rtw89_fw_h2c_vif_maintain(rtwdev, rtwvif, RTW89_VIF_REMOVE); 3010 if (ret) 3011 return ret; 3012 3013 rtw89_cam_deinit(rtwdev, rtwvif); 3014 3015 ret = rtw89_fw_h2c_cam(rtwdev, rtwvif); 3016 if (ret) 3017 return ret; 3018 3019 return 0; 3020 } 3021 3022 int rtw89_mac_port_update(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) 3023 { 3024 u8 port = rtwvif->port; 3025 3026 if (port >= RTW89_PORT_NUM) 3027 return -EINVAL; 3028 3029 rtw89_mac_port_cfg_func_sw(rtwdev, rtwvif); 3030 rtw89_mac_port_cfg_tx_rpt(rtwdev, rtwvif, false); 3031 rtw89_mac_port_cfg_rx_rpt(rtwdev, rtwvif, false); 3032 rtw89_mac_port_cfg_net_type(rtwdev, rtwvif); 3033 rtw89_mac_port_cfg_bcn_prct(rtwdev, rtwvif); 3034 rtw89_mac_port_cfg_rx_sw(rtwdev, rtwvif); 3035 rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif); 3036 rtw89_mac_port_cfg_tx_sw(rtwdev, rtwvif); 3037 rtw89_mac_port_cfg_bcn_intv(rtwdev, rtwvif); 3038 rtw89_mac_port_cfg_bcn_setup_time(rtwdev, rtwvif); 3039 rtw89_mac_port_cfg_bcn_hold_time(rtwdev, rtwvif); 3040 rtw89_mac_port_cfg_bcn_mask_area(rtwdev, rtwvif); 3041 rtw89_mac_port_cfg_tbtt_early(rtwdev, rtwvif); 3042 rtw89_mac_port_cfg_bss_color(rtwdev, rtwvif); 3043 rtw89_mac_port_cfg_mbssid(rtwdev, rtwvif); 3044 rtw89_mac_port_cfg_hiq_drop(rtwdev, rtwvif); 3045 rtw89_mac_port_cfg_func_en(rtwdev, rtwvif); 3046 fsleep(BCN_ERLY_SET_DLY); 3047 rtw89_mac_port_cfg_bcn_early(rtwdev, rtwvif); 3048 3049 return 0; 3050 } 3051 3052 int rtw89_mac_add_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) 3053 { 3054 int ret; 3055 3056 rtwvif->mac_id = rtw89_core_acquire_bit_map(rtwdev->mac_id_map, 3057 RTW89_MAX_MAC_ID_NUM); 3058 if (rtwvif->mac_id == RTW89_MAX_MAC_ID_NUM) 3059 return -ENOSPC; 3060 3061 ret = rtw89_mac_vif_init(rtwdev, rtwvif); 3062 if (ret) 3063 goto release_mac_id; 3064 3065 return 0; 3066 3067 release_mac_id: 3068 rtw89_core_release_bit_map(rtwdev->mac_id_map, rtwvif->mac_id); 3069 3070 return ret; 3071 } 3072 3073 int rtw89_mac_remove_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) 3074 { 3075 int ret; 3076 3077 ret = rtw89_mac_vif_deinit(rtwdev, rtwvif); 3078 rtw89_core_release_bit_map(rtwdev->mac_id_map, rtwvif->mac_id); 3079 3080 return ret; 3081 } 3082 3083 static void 3084 rtw89_mac_c2h_macid_pause(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 3085 { 3086 } 3087 3088 static void 3089 rtw89_mac_c2h_rec_ack(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 3090 { 3091 rtw89_debug(rtwdev, RTW89_DBG_FW, 3092 "C2H rev ack recv, cat: %d, class: %d, func: %d, seq : %d\n", 3093 RTW89_GET_MAC_C2H_REV_ACK_CAT(c2h->data), 3094 RTW89_GET_MAC_C2H_REV_ACK_CLASS(c2h->data), 3095 RTW89_GET_MAC_C2H_REV_ACK_FUNC(c2h->data), 3096 RTW89_GET_MAC_C2H_REV_ACK_H2C_SEQ(c2h->data)); 3097 } 3098 3099 static void 3100 rtw89_mac_c2h_done_ack(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 3101 { 3102 rtw89_debug(rtwdev, RTW89_DBG_FW, 3103 "C2H done ack recv, cat: %d, class: %d, func: %d, ret: %d, seq : %d\n", 3104 RTW89_GET_MAC_C2H_DONE_ACK_CAT(c2h->data), 3105 RTW89_GET_MAC_C2H_DONE_ACK_CLASS(c2h->data), 3106 RTW89_GET_MAC_C2H_DONE_ACK_FUNC(c2h->data), 3107 RTW89_GET_MAC_C2H_DONE_ACK_H2C_RETURN(c2h->data), 3108 RTW89_GET_MAC_C2H_DONE_ACK_H2C_SEQ(c2h->data)); 3109 } 3110 3111 static void 3112 rtw89_mac_c2h_log(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 3113 { 3114 rtw89_info(rtwdev, "%*s", RTW89_GET_C2H_LOG_LEN(len), 3115 RTW89_GET_C2H_LOG_SRT_PRT(c2h->data)); 3116 } 3117 3118 static 3119 void (* const rtw89_mac_c2h_ofld_handler[])(struct rtw89_dev *rtwdev, 3120 struct sk_buff *c2h, u32 len) = { 3121 [RTW89_MAC_C2H_FUNC_EFUSE_DUMP] = NULL, 3122 [RTW89_MAC_C2H_FUNC_READ_RSP] = NULL, 3123 [RTW89_MAC_C2H_FUNC_PKT_OFLD_RSP] = NULL, 3124 [RTW89_MAC_C2H_FUNC_BCN_RESEND] = NULL, 3125 [RTW89_MAC_C2H_FUNC_MACID_PAUSE] = rtw89_mac_c2h_macid_pause, 3126 }; 3127 3128 static 3129 void (* const rtw89_mac_c2h_info_handler[])(struct rtw89_dev *rtwdev, 3130 struct sk_buff *c2h, u32 len) = { 3131 [RTW89_MAC_C2H_FUNC_REC_ACK] = rtw89_mac_c2h_rec_ack, 3132 [RTW89_MAC_C2H_FUNC_DONE_ACK] = rtw89_mac_c2h_done_ack, 3133 [RTW89_MAC_C2H_FUNC_C2H_LOG] = rtw89_mac_c2h_log, 3134 }; 3135 3136 void rtw89_mac_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb, 3137 u32 len, u8 class, u8 func) 3138 { 3139 void (*handler)(struct rtw89_dev *rtwdev, 3140 struct sk_buff *c2h, u32 len) = NULL; 3141 3142 switch (class) { 3143 case RTW89_MAC_C2H_CLASS_INFO: 3144 if (func < RTW89_MAC_C2H_FUNC_INFO_MAX) 3145 handler = rtw89_mac_c2h_info_handler[func]; 3146 break; 3147 case RTW89_MAC_C2H_CLASS_OFLD: 3148 if (func < RTW89_MAC_C2H_FUNC_OFLD_MAX) 3149 handler = rtw89_mac_c2h_ofld_handler[func]; 3150 break; 3151 case RTW89_MAC_C2H_CLASS_FWDBG: 3152 return; 3153 default: 3154 rtw89_info(rtwdev, "c2h class %d not support\n", class); 3155 return; 3156 } 3157 if (!handler) { 3158 rtw89_info(rtwdev, "c2h class %d func %d not support\n", class, 3159 func); 3160 return; 3161 } 3162 handler(rtwdev, skb, len); 3163 } 3164 3165 bool rtw89_mac_get_txpwr_cr(struct rtw89_dev *rtwdev, 3166 enum rtw89_phy_idx phy_idx, 3167 u32 reg_base, u32 *cr) 3168 { 3169 const struct rtw89_dle_mem *dle_mem = rtwdev->chip->dle_mem; 3170 enum rtw89_qta_mode mode = dle_mem->mode; 3171 u32 addr = rtw89_mac_reg_by_idx(reg_base, phy_idx); 3172 3173 if (addr < R_AX_PWR_RATE_CTRL || addr > CMAC1_END_ADDR) { 3174 rtw89_err(rtwdev, "[TXPWR] addr=0x%x exceed txpwr cr\n", 3175 addr); 3176 goto error; 3177 } 3178 3179 if (addr >= CMAC1_START_ADDR && addr <= CMAC1_END_ADDR) 3180 if (mode == RTW89_QTA_SCC) { 3181 rtw89_err(rtwdev, 3182 "[TXPWR] addr=0x%x but hw not enable\n", 3183 addr); 3184 goto error; 3185 } 3186 3187 *cr = addr; 3188 return true; 3189 3190 error: 3191 rtw89_err(rtwdev, "[TXPWR] check txpwr cr 0x%x(phy%d) fail\n", 3192 addr, phy_idx); 3193 3194 return false; 3195 } 3196 3197 int rtw89_mac_cfg_ppdu_status(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable) 3198 { 3199 u32 reg = rtw89_mac_reg_by_idx(R_AX_PPDU_STAT, mac_idx); 3200 int ret = 0; 3201 3202 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 3203 if (ret) 3204 return ret; 3205 3206 if (!enable) { 3207 rtw89_write32_clr(rtwdev, reg, B_AX_PPDU_STAT_RPT_EN); 3208 return ret; 3209 } 3210 3211 rtw89_write32(rtwdev, reg, B_AX_PPDU_STAT_RPT_EN | 3212 B_AX_APP_MAC_INFO_RPT | 3213 B_AX_APP_RX_CNT_RPT | B_AX_APP_PLCP_HDR_RPT | 3214 B_AX_PPDU_STAT_RPT_CRC32); 3215 rtw89_write32_mask(rtwdev, R_AX_HW_RPT_FWD, B_AX_FWD_PPDU_STAT_MASK, 3216 RTW89_PRPT_DEST_HOST); 3217 3218 return ret; 3219 } 3220 3221 void rtw89_mac_update_rts_threshold(struct rtw89_dev *rtwdev, u8 mac_idx) 3222 { 3223 #define MAC_AX_TIME_TH_SH 5 3224 #define MAC_AX_LEN_TH_SH 4 3225 #define MAC_AX_TIME_TH_MAX 255 3226 #define MAC_AX_LEN_TH_MAX 255 3227 #define MAC_AX_TIME_TH_DEF 88 3228 #define MAC_AX_LEN_TH_DEF 4080 3229 struct ieee80211_hw *hw = rtwdev->hw; 3230 u32 rts_threshold = hw->wiphy->rts_threshold; 3231 u32 time_th, len_th; 3232 u32 reg; 3233 3234 if (rts_threshold == (u32)-1) { 3235 time_th = MAC_AX_TIME_TH_DEF; 3236 len_th = MAC_AX_LEN_TH_DEF; 3237 } else { 3238 time_th = MAC_AX_TIME_TH_MAX << MAC_AX_TIME_TH_SH; 3239 len_th = rts_threshold; 3240 } 3241 3242 time_th = min_t(u32, time_th >> MAC_AX_TIME_TH_SH, MAC_AX_TIME_TH_MAX); 3243 len_th = min_t(u32, len_th >> MAC_AX_LEN_TH_SH, MAC_AX_LEN_TH_MAX); 3244 3245 reg = rtw89_mac_reg_by_idx(R_AX_AGG_LEN_HT_0, mac_idx); 3246 rtw89_write16_mask(rtwdev, reg, B_AX_RTS_TXTIME_TH_MASK, time_th); 3247 rtw89_write16_mask(rtwdev, reg, B_AX_RTS_LEN_TH_MASK, len_th); 3248 } 3249 3250 void rtw89_mac_flush_txq(struct rtw89_dev *rtwdev, u32 queues, bool drop) 3251 { 3252 bool empty; 3253 int ret; 3254 3255 if (!test_bit(RTW89_FLAG_POWERON, rtwdev->flags)) 3256 return; 3257 3258 ret = read_poll_timeout(dle_is_txq_empty, empty, empty, 3259 10000, 200000, false, rtwdev); 3260 if (ret && !drop && (rtwdev->total_sta_assoc || rtwdev->scanning)) 3261 rtw89_info(rtwdev, "timed out to flush queues\n"); 3262 } 3263 3264 int rtw89_mac_coex_init(struct rtw89_dev *rtwdev, const struct rtw89_mac_ax_coex *coex) 3265 { 3266 u8 val; 3267 u16 val16; 3268 u32 val32; 3269 int ret; 3270 3271 rtw89_write8_set(rtwdev, R_AX_GPIO_MUXCFG, B_AX_ENBT); 3272 rtw89_write8_set(rtwdev, R_AX_BTC_FUNC_EN, B_AX_PTA_WL_TX_EN); 3273 rtw89_write8_set(rtwdev, R_AX_BT_COEX_CFG_2 + 1, B_AX_GNT_BT_POLARITY >> 8); 3274 rtw89_write8_set(rtwdev, R_AX_CSR_MODE, B_AX_STATIS_BT_EN | B_AX_WL_ACT_MSK); 3275 rtw89_write8_set(rtwdev, R_AX_CSR_MODE + 2, B_AX_BT_CNT_RST >> 16); 3276 rtw89_write8_clr(rtwdev, R_AX_TRXPTCL_RESP_0 + 3, B_AX_RSP_CHK_BTCCA >> 24); 3277 3278 val16 = rtw89_read16(rtwdev, R_AX_CCA_CFG_0); 3279 val16 = (val16 | B_AX_BTCCA_EN) & ~B_AX_BTCCA_BRK_TXOP_EN; 3280 rtw89_write16(rtwdev, R_AX_CCA_CFG_0, val16); 3281 3282 ret = rtw89_mac_read_lte(rtwdev, R_AX_LTE_SW_CFG_2, &val32); 3283 if (ret) { 3284 rtw89_err(rtwdev, "Read R_AX_LTE_SW_CFG_2 fail!\n"); 3285 return ret; 3286 } 3287 val32 = val32 & B_AX_WL_RX_CTRL; 3288 ret = rtw89_mac_write_lte(rtwdev, R_AX_LTE_SW_CFG_2, val32); 3289 if (ret) { 3290 rtw89_err(rtwdev, "Write R_AX_LTE_SW_CFG_2 fail!\n"); 3291 return ret; 3292 } 3293 3294 switch (coex->pta_mode) { 3295 case RTW89_MAC_AX_COEX_RTK_MODE: 3296 val = rtw89_read8(rtwdev, R_AX_GPIO_MUXCFG); 3297 val &= ~B_AX_BTMODE_MASK; 3298 val |= FIELD_PREP(B_AX_BTMODE_MASK, MAC_AX_BT_MODE_0_3); 3299 rtw89_write8(rtwdev, R_AX_GPIO_MUXCFG, val); 3300 3301 val = rtw89_read8(rtwdev, R_AX_TDMA_MODE); 3302 rtw89_write8(rtwdev, R_AX_TDMA_MODE, val | B_AX_RTK_BT_ENABLE); 3303 3304 val = rtw89_read8(rtwdev, R_AX_BT_COEX_CFG_5); 3305 val &= ~B_AX_BT_RPT_SAMPLE_RATE_MASK; 3306 val |= FIELD_PREP(B_AX_BT_RPT_SAMPLE_RATE_MASK, MAC_AX_RTK_RATE); 3307 rtw89_write8(rtwdev, R_AX_BT_COEX_CFG_5, val); 3308 break; 3309 case RTW89_MAC_AX_COEX_CSR_MODE: 3310 val = rtw89_read8(rtwdev, R_AX_GPIO_MUXCFG); 3311 val &= ~B_AX_BTMODE_MASK; 3312 val |= FIELD_PREP(B_AX_BTMODE_MASK, MAC_AX_BT_MODE_2); 3313 rtw89_write8(rtwdev, R_AX_GPIO_MUXCFG, val); 3314 3315 val16 = rtw89_read16(rtwdev, R_AX_CSR_MODE); 3316 val16 &= ~B_AX_BT_PRI_DETECT_TO_MASK; 3317 val16 |= FIELD_PREP(B_AX_BT_PRI_DETECT_TO_MASK, MAC_AX_CSR_PRI_TO); 3318 val16 &= ~B_AX_BT_TRX_INIT_DETECT_MASK; 3319 val16 |= FIELD_PREP(B_AX_BT_TRX_INIT_DETECT_MASK, MAC_AX_CSR_TRX_TO); 3320 val16 &= ~B_AX_BT_STAT_DELAY_MASK; 3321 val16 |= FIELD_PREP(B_AX_BT_STAT_DELAY_MASK, MAC_AX_CSR_DELAY); 3322 val16 |= B_AX_ENHANCED_BT; 3323 rtw89_write16(rtwdev, R_AX_CSR_MODE, val16); 3324 3325 rtw89_write8(rtwdev, R_AX_BT_COEX_CFG_2, MAC_AX_CSR_RATE); 3326 break; 3327 default: 3328 return -EINVAL; 3329 } 3330 3331 switch (coex->direction) { 3332 case RTW89_MAC_AX_COEX_INNER: 3333 val = rtw89_read8(rtwdev, R_AX_GPIO_MUXCFG + 1); 3334 val = (val & ~BIT(2)) | BIT(1); 3335 rtw89_write8(rtwdev, R_AX_GPIO_MUXCFG + 1, val); 3336 break; 3337 case RTW89_MAC_AX_COEX_OUTPUT: 3338 val = rtw89_read8(rtwdev, R_AX_GPIO_MUXCFG + 1); 3339 val = val | BIT(1) | BIT(0); 3340 rtw89_write8(rtwdev, R_AX_GPIO_MUXCFG + 1, val); 3341 break; 3342 case RTW89_MAC_AX_COEX_INPUT: 3343 val = rtw89_read8(rtwdev, R_AX_GPIO_MUXCFG + 1); 3344 val = val & ~(BIT(2) | BIT(1)); 3345 rtw89_write8(rtwdev, R_AX_GPIO_MUXCFG + 1, val); 3346 break; 3347 default: 3348 return -EINVAL; 3349 } 3350 3351 return 0; 3352 } 3353 3354 int rtw89_mac_cfg_gnt(struct rtw89_dev *rtwdev, 3355 const struct rtw89_mac_ax_coex_gnt *gnt_cfg) 3356 { 3357 u32 val, ret; 3358 3359 ret = rtw89_mac_read_lte(rtwdev, R_AX_LTE_SW_CFG_1, &val); 3360 if (ret) { 3361 rtw89_err(rtwdev, "Read LTE fail!\n"); 3362 return ret; 3363 } 3364 val = (gnt_cfg->band[0].gnt_bt ? 3365 B_AX_GNT_BT_RFC_S0_SW_VAL | B_AX_GNT_BT_BB_S0_SW_VAL : 0) | 3366 (gnt_cfg->band[0].gnt_bt_sw_en ? 3367 B_AX_GNT_BT_RFC_S0_SW_CTRL | B_AX_GNT_BT_BB_S0_SW_CTRL : 0) | 3368 (gnt_cfg->band[0].gnt_wl ? 3369 B_AX_GNT_WL_RFC_S0_SW_VAL | B_AX_GNT_WL_BB_S0_SW_VAL : 0) | 3370 (gnt_cfg->band[0].gnt_wl_sw_en ? 3371 B_AX_GNT_WL_RFC_S0_SW_CTRL | B_AX_GNT_WL_BB_S0_SW_CTRL : 0) | 3372 (gnt_cfg->band[1].gnt_bt ? 3373 B_AX_GNT_BT_RFC_S1_SW_VAL | B_AX_GNT_BT_BB_S1_SW_VAL : 0) | 3374 (gnt_cfg->band[1].gnt_bt_sw_en ? 3375 B_AX_GNT_BT_RFC_S1_SW_CTRL | B_AX_GNT_BT_BB_S1_SW_CTRL : 0) | 3376 (gnt_cfg->band[1].gnt_wl ? 3377 B_AX_GNT_WL_RFC_S1_SW_VAL | B_AX_GNT_WL_BB_S1_SW_VAL : 0) | 3378 (gnt_cfg->band[1].gnt_wl_sw_en ? 3379 B_AX_GNT_WL_RFC_S1_SW_CTRL | B_AX_GNT_WL_BB_S1_SW_CTRL : 0); 3380 ret = rtw89_mac_write_lte(rtwdev, R_AX_LTE_SW_CFG_1, val); 3381 if (ret) { 3382 rtw89_err(rtwdev, "Write LTE fail!\n"); 3383 return ret; 3384 } 3385 3386 return 0; 3387 } 3388 3389 int rtw89_mac_cfg_plt(struct rtw89_dev *rtwdev, struct rtw89_mac_ax_plt *plt) 3390 { 3391 u32 reg; 3392 u8 val; 3393 int ret; 3394 3395 ret = rtw89_mac_check_mac_en(rtwdev, plt->band, RTW89_CMAC_SEL); 3396 if (ret) 3397 return ret; 3398 3399 reg = rtw89_mac_reg_by_idx(R_AX_BT_PLT, plt->band); 3400 val = (plt->tx & RTW89_MAC_AX_PLT_LTE_RX ? B_AX_TX_PLT_GNT_LTE_RX : 0) | 3401 (plt->tx & RTW89_MAC_AX_PLT_GNT_BT_TX ? B_AX_TX_PLT_GNT_BT_TX : 0) | 3402 (plt->tx & RTW89_MAC_AX_PLT_GNT_BT_RX ? B_AX_TX_PLT_GNT_BT_RX : 0) | 3403 (plt->tx & RTW89_MAC_AX_PLT_GNT_WL ? B_AX_TX_PLT_GNT_WL : 0) | 3404 (plt->rx & RTW89_MAC_AX_PLT_LTE_RX ? B_AX_RX_PLT_GNT_LTE_RX : 0) | 3405 (plt->rx & RTW89_MAC_AX_PLT_GNT_BT_TX ? B_AX_RX_PLT_GNT_BT_TX : 0) | 3406 (plt->rx & RTW89_MAC_AX_PLT_GNT_BT_RX ? B_AX_RX_PLT_GNT_BT_RX : 0) | 3407 (plt->rx & RTW89_MAC_AX_PLT_GNT_WL ? B_AX_RX_PLT_GNT_WL : 0); 3408 rtw89_write8(rtwdev, reg, val); 3409 3410 return 0; 3411 } 3412 3413 void rtw89_mac_cfg_sb(struct rtw89_dev *rtwdev, u32 val) 3414 { 3415 u32 fw_sb; 3416 3417 fw_sb = rtw89_read32(rtwdev, R_AX_SCOREBOARD); 3418 fw_sb = FIELD_GET(B_MAC_AX_SB_FW_MASK, fw_sb); 3419 fw_sb = fw_sb & ~B_MAC_AX_BTGS1_NOTIFY; 3420 if (!test_bit(RTW89_FLAG_POWERON, rtwdev->flags)) 3421 fw_sb = fw_sb | MAC_AX_NOTIFY_PWR_MAJOR; 3422 else 3423 fw_sb = fw_sb | MAC_AX_NOTIFY_TP_MAJOR; 3424 val = FIELD_GET(B_MAC_AX_SB_DRV_MASK, val); 3425 val = B_AX_TOGGLE | 3426 FIELD_PREP(B_MAC_AX_SB_DRV_MASK, val) | 3427 FIELD_PREP(B_MAC_AX_SB_FW_MASK, fw_sb); 3428 rtw89_write32(rtwdev, R_AX_SCOREBOARD, val); 3429 fsleep(1000); /* avoid BT FW loss information */ 3430 } 3431 3432 u32 rtw89_mac_get_sb(struct rtw89_dev *rtwdev) 3433 { 3434 return rtw89_read32(rtwdev, R_AX_SCOREBOARD); 3435 } 3436 3437 int rtw89_mac_cfg_ctrl_path(struct rtw89_dev *rtwdev, bool wl) 3438 { 3439 u8 val = rtw89_read8(rtwdev, R_AX_SYS_SDIO_CTRL + 3); 3440 3441 val = wl ? val | BIT(2) : val & ~BIT(2); 3442 rtw89_write8(rtwdev, R_AX_SYS_SDIO_CTRL + 3, val); 3443 3444 return 0; 3445 } 3446 3447 bool rtw89_mac_get_ctrl_path(struct rtw89_dev *rtwdev) 3448 { 3449 u8 val = rtw89_read8(rtwdev, R_AX_SYS_SDIO_CTRL + 3); 3450 3451 return FIELD_GET(B_AX_LTE_MUX_CTRL_PATH >> 24, val); 3452 } 3453 3454 static void rtw89_mac_bfee_ctrl(struct rtw89_dev *rtwdev, u8 mac_idx, bool en) 3455 { 3456 u32 reg; 3457 u32 mask = B_AX_BFMEE_HT_NDPA_EN | B_AX_BFMEE_VHT_NDPA_EN | 3458 B_AX_BFMEE_HE_NDPA_EN; 3459 3460 rtw89_debug(rtwdev, RTW89_DBG_BF, "set bfee ndpa_en to %d\n", en); 3461 reg = rtw89_mac_reg_by_idx(R_AX_BFMEE_RESP_OPTION, mac_idx); 3462 if (en) { 3463 set_bit(RTW89_FLAG_BFEE_EN, rtwdev->flags); 3464 rtw89_write32_set(rtwdev, reg, mask); 3465 } else { 3466 clear_bit(RTW89_FLAG_BFEE_EN, rtwdev->flags); 3467 rtw89_write32_clr(rtwdev, reg, mask); 3468 } 3469 } 3470 3471 static int rtw89_mac_init_bfee(struct rtw89_dev *rtwdev, u8 mac_idx) 3472 { 3473 u32 reg; 3474 u32 val32; 3475 int ret; 3476 3477 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 3478 if (ret) 3479 return ret; 3480 3481 /* AP mode set tx gid to 63 */ 3482 /* STA mode set tx gid to 0(default) */ 3483 reg = rtw89_mac_reg_by_idx(R_AX_BFMER_CTRL_0, mac_idx); 3484 rtw89_write32_set(rtwdev, reg, B_AX_BFMER_NDP_BFEN); 3485 3486 reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_CSI_RRSC, mac_idx); 3487 rtw89_write32(rtwdev, reg, CSI_RRSC_BMAP); 3488 3489 reg = rtw89_mac_reg_by_idx(R_AX_BFMEE_RESP_OPTION, mac_idx); 3490 val32 = FIELD_PREP(B_AX_BFMEE_BFRP_RX_STANDBY_TIMER_MASK, BFRP_RX_STANDBY_TIMER); 3491 val32 |= FIELD_PREP(B_AX_BFMEE_NDP_RX_STANDBY_TIMER_MASK, NDP_RX_STANDBY_TIMER); 3492 rtw89_write32(rtwdev, reg, val32); 3493 rtw89_mac_bfee_ctrl(rtwdev, mac_idx, true); 3494 3495 reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_CSI_CTRL_0, mac_idx); 3496 rtw89_write32_set(rtwdev, reg, B_AX_BFMEE_BFPARAM_SEL | 3497 B_AX_BFMEE_USE_NSTS | 3498 B_AX_BFMEE_CSI_GID_SEL | 3499 B_AX_BFMEE_CSI_FORCE_RETE_EN); 3500 reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_CSI_RATE, mac_idx); 3501 rtw89_write32(rtwdev, reg, 3502 u32_encode_bits(CSI_INIT_RATE_HT, B_AX_BFMEE_HT_CSI_RATE_MASK) | 3503 u32_encode_bits(CSI_INIT_RATE_VHT, B_AX_BFMEE_VHT_CSI_RATE_MASK) | 3504 u32_encode_bits(CSI_INIT_RATE_HE, B_AX_BFMEE_HE_CSI_RATE_MASK)); 3505 3506 return 0; 3507 } 3508 3509 static int rtw89_mac_set_csi_para_reg(struct rtw89_dev *rtwdev, 3510 struct ieee80211_vif *vif, 3511 struct ieee80211_sta *sta) 3512 { 3513 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 3514 u8 mac_idx = rtwvif->mac_idx; 3515 u8 nc = 1, nr = 3, ng = 0, cb = 1, cs = 1, ldpc_en = 1, stbc_en = 1; 3516 u8 port_sel = rtwvif->port; 3517 u8 sound_dim = 3, t; 3518 u8 *phy_cap = sta->he_cap.he_cap_elem.phy_cap_info; 3519 u32 reg; 3520 u16 val; 3521 int ret; 3522 3523 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 3524 if (ret) 3525 return ret; 3526 3527 if ((phy_cap[3] & IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER) || 3528 (phy_cap[4] & IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER)) { 3529 ldpc_en &= !!(phy_cap[1] & IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD); 3530 stbc_en &= !!(phy_cap[2] & IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ); 3531 t = FIELD_GET(IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK, 3532 phy_cap[5]); 3533 sound_dim = min(sound_dim, t); 3534 } 3535 if ((sta->vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) || 3536 (sta->vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)) { 3537 ldpc_en &= !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC); 3538 stbc_en &= !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_MASK); 3539 t = FIELD_GET(IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK, 3540 sta->vht_cap.cap); 3541 sound_dim = min(sound_dim, t); 3542 } 3543 nc = min(nc, sound_dim); 3544 nr = min(nr, sound_dim); 3545 3546 reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_CSI_CTRL_0, mac_idx); 3547 rtw89_write32_set(rtwdev, reg, B_AX_BFMEE_BFPARAM_SEL); 3548 3549 val = FIELD_PREP(B_AX_BFMEE_CSIINFO0_NC_MASK, nc) | 3550 FIELD_PREP(B_AX_BFMEE_CSIINFO0_NR_MASK, nr) | 3551 FIELD_PREP(B_AX_BFMEE_CSIINFO0_NG_MASK, ng) | 3552 FIELD_PREP(B_AX_BFMEE_CSIINFO0_CB_MASK, cb) | 3553 FIELD_PREP(B_AX_BFMEE_CSIINFO0_CS_MASK, cs) | 3554 FIELD_PREP(B_AX_BFMEE_CSIINFO0_LDPC_EN, ldpc_en) | 3555 FIELD_PREP(B_AX_BFMEE_CSIINFO0_STBC_EN, stbc_en); 3556 3557 if (port_sel == 0) 3558 reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_CSI_CTRL_0, mac_idx); 3559 else 3560 reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_CSI_CTRL_1, mac_idx); 3561 3562 rtw89_write16(rtwdev, reg, val); 3563 3564 return 0; 3565 } 3566 3567 static int rtw89_mac_csi_rrsc(struct rtw89_dev *rtwdev, 3568 struct ieee80211_vif *vif, 3569 struct ieee80211_sta *sta) 3570 { 3571 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 3572 u32 rrsc = BIT(RTW89_MAC_BF_RRSC_6M) | BIT(RTW89_MAC_BF_RRSC_24M); 3573 u32 reg; 3574 u8 mac_idx = rtwvif->mac_idx; 3575 int ret; 3576 3577 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 3578 if (ret) 3579 return ret; 3580 3581 if (sta->he_cap.has_he) { 3582 rrsc |= (BIT(RTW89_MAC_BF_RRSC_HE_MSC0) | 3583 BIT(RTW89_MAC_BF_RRSC_HE_MSC3) | 3584 BIT(RTW89_MAC_BF_RRSC_HE_MSC5)); 3585 } 3586 if (sta->vht_cap.vht_supported) { 3587 rrsc |= (BIT(RTW89_MAC_BF_RRSC_VHT_MSC0) | 3588 BIT(RTW89_MAC_BF_RRSC_VHT_MSC3) | 3589 BIT(RTW89_MAC_BF_RRSC_VHT_MSC5)); 3590 } 3591 if (sta->ht_cap.ht_supported) { 3592 rrsc |= (BIT(RTW89_MAC_BF_RRSC_HT_MSC0) | 3593 BIT(RTW89_MAC_BF_RRSC_HT_MSC3) | 3594 BIT(RTW89_MAC_BF_RRSC_HT_MSC5)); 3595 } 3596 reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_CSI_CTRL_0, mac_idx); 3597 rtw89_write32_set(rtwdev, reg, B_AX_BFMEE_BFPARAM_SEL); 3598 rtw89_write32_clr(rtwdev, reg, B_AX_BFMEE_CSI_FORCE_RETE_EN); 3599 rtw89_write32(rtwdev, 3600 rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_CSI_RRSC, mac_idx), 3601 rrsc); 3602 3603 return 0; 3604 } 3605 3606 void rtw89_mac_bf_assoc(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 3607 struct ieee80211_sta *sta) 3608 { 3609 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 3610 3611 if (rtw89_sta_has_beamformer_cap(sta)) { 3612 rtw89_debug(rtwdev, RTW89_DBG_BF, 3613 "initialize bfee for new association\n"); 3614 rtw89_mac_init_bfee(rtwdev, rtwvif->mac_idx); 3615 rtw89_mac_set_csi_para_reg(rtwdev, vif, sta); 3616 rtw89_mac_csi_rrsc(rtwdev, vif, sta); 3617 } 3618 } 3619 3620 void rtw89_mac_bf_disassoc(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 3621 struct ieee80211_sta *sta) 3622 { 3623 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 3624 3625 rtw89_mac_bfee_ctrl(rtwdev, rtwvif->mac_idx, false); 3626 } 3627 3628 void rtw89_mac_bf_set_gid_table(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 3629 struct ieee80211_bss_conf *conf) 3630 { 3631 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 3632 u8 mac_idx = rtwvif->mac_idx; 3633 __le32 *p; 3634 3635 rtw89_debug(rtwdev, RTW89_DBG_BF, "update bf GID table\n"); 3636 3637 p = (__le32 *)conf->mu_group.membership; 3638 rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(R_AX_GID_POSITION_EN0, mac_idx), 3639 le32_to_cpu(p[0])); 3640 rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(R_AX_GID_POSITION_EN1, mac_idx), 3641 le32_to_cpu(p[1])); 3642 3643 p = (__le32 *)conf->mu_group.position; 3644 rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(R_AX_GID_POSITION0, mac_idx), 3645 le32_to_cpu(p[0])); 3646 rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(R_AX_GID_POSITION1, mac_idx), 3647 le32_to_cpu(p[1])); 3648 rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(R_AX_GID_POSITION2, mac_idx), 3649 le32_to_cpu(p[2])); 3650 rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(R_AX_GID_POSITION3, mac_idx), 3651 le32_to_cpu(p[3])); 3652 } 3653 3654 struct rtw89_mac_bf_monitor_iter_data { 3655 struct rtw89_dev *rtwdev; 3656 struct ieee80211_sta *down_sta; 3657 int count; 3658 }; 3659 3660 static 3661 void rtw89_mac_bf_monitor_calc_iter(void *data, struct ieee80211_sta *sta) 3662 { 3663 struct rtw89_mac_bf_monitor_iter_data *iter_data = 3664 (struct rtw89_mac_bf_monitor_iter_data *)data; 3665 struct ieee80211_sta *down_sta = iter_data->down_sta; 3666 int *count = &iter_data->count; 3667 3668 if (down_sta == sta) 3669 return; 3670 3671 if (rtw89_sta_has_beamformer_cap(sta)) 3672 (*count)++; 3673 } 3674 3675 void rtw89_mac_bf_monitor_calc(struct rtw89_dev *rtwdev, 3676 struct ieee80211_sta *sta, bool disconnect) 3677 { 3678 struct rtw89_mac_bf_monitor_iter_data data; 3679 3680 data.rtwdev = rtwdev; 3681 data.down_sta = disconnect ? sta : NULL; 3682 data.count = 0; 3683 ieee80211_iterate_stations_atomic(rtwdev->hw, 3684 rtw89_mac_bf_monitor_calc_iter, 3685 &data); 3686 3687 rtw89_debug(rtwdev, RTW89_DBG_BF, "bfee STA count=%d\n", data.count); 3688 if (data.count) 3689 set_bit(RTW89_FLAG_BFEE_MON, rtwdev->flags); 3690 else 3691 clear_bit(RTW89_FLAG_BFEE_MON, rtwdev->flags); 3692 } 3693 3694 void _rtw89_mac_bf_monitor_track(struct rtw89_dev *rtwdev) 3695 { 3696 struct rtw89_traffic_stats *stats = &rtwdev->stats; 3697 struct rtw89_vif *rtwvif; 3698 bool en = stats->tx_tfc_lv > stats->rx_tfc_lv ? false : true; 3699 bool old = test_bit(RTW89_FLAG_BFEE_EN, rtwdev->flags); 3700 3701 if (en == old) 3702 return; 3703 3704 rtw89_for_each_rtwvif(rtwdev, rtwvif) 3705 rtw89_mac_bfee_ctrl(rtwdev, rtwvif->mac_idx, en); 3706 } 3707 3708 static int 3709 __rtw89_mac_set_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta, 3710 u32 tx_time) 3711 { 3712 #define MAC_AX_DFLT_TX_TIME 5280 3713 u8 mac_idx = rtwsta->rtwvif->mac_idx; 3714 u32 max_tx_time = tx_time == 0 ? MAC_AX_DFLT_TX_TIME : tx_time; 3715 u32 reg; 3716 int ret = 0; 3717 3718 if (rtwsta->cctl_tx_time) { 3719 rtwsta->ampdu_max_time = (max_tx_time - 512) >> 9; 3720 ret = rtw89_fw_h2c_txtime_cmac_tbl(rtwdev, rtwsta); 3721 } else { 3722 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 3723 if (ret) { 3724 rtw89_warn(rtwdev, "failed to check cmac in set txtime\n"); 3725 return ret; 3726 } 3727 3728 reg = rtw89_mac_reg_by_idx(R_AX_AMPDU_AGG_LIMIT, mac_idx); 3729 rtw89_write32_mask(rtwdev, reg, B_AX_AMPDU_MAX_TIME_MASK, 3730 max_tx_time >> 5); 3731 } 3732 3733 return ret; 3734 } 3735 3736 int rtw89_mac_set_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta, 3737 bool resume, u32 tx_time) 3738 { 3739 int ret = 0; 3740 3741 if (!resume) { 3742 rtwsta->cctl_tx_time = true; 3743 ret = __rtw89_mac_set_tx_time(rtwdev, rtwsta, tx_time); 3744 } else { 3745 ret = __rtw89_mac_set_tx_time(rtwdev, rtwsta, tx_time); 3746 rtwsta->cctl_tx_time = false; 3747 } 3748 3749 return ret; 3750 } 3751 3752 int rtw89_mac_get_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta, 3753 u32 *tx_time) 3754 { 3755 u8 mac_idx = rtwsta->rtwvif->mac_idx; 3756 u32 reg; 3757 int ret = 0; 3758 3759 if (rtwsta->cctl_tx_time) { 3760 *tx_time = (rtwsta->ampdu_max_time + 1) << 9; 3761 } else { 3762 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 3763 if (ret) { 3764 rtw89_warn(rtwdev, "failed to check cmac in tx_time\n"); 3765 return ret; 3766 } 3767 3768 reg = rtw89_mac_reg_by_idx(R_AX_AMPDU_AGG_LIMIT, mac_idx); 3769 *tx_time = rtw89_read32_mask(rtwdev, reg, B_AX_AMPDU_MAX_TIME_MASK) << 5; 3770 } 3771 3772 return ret; 3773 } 3774 3775 int rtw89_mac_set_tx_retry_limit(struct rtw89_dev *rtwdev, 3776 struct rtw89_sta *rtwsta, 3777 bool resume, u8 tx_retry) 3778 { 3779 int ret = 0; 3780 3781 rtwsta->data_tx_cnt_lmt = tx_retry; 3782 3783 if (!resume) { 3784 rtwsta->cctl_tx_retry_limit = true; 3785 ret = rtw89_fw_h2c_txtime_cmac_tbl(rtwdev, rtwsta); 3786 } else { 3787 ret = rtw89_fw_h2c_txtime_cmac_tbl(rtwdev, rtwsta); 3788 rtwsta->cctl_tx_retry_limit = false; 3789 } 3790 3791 return ret; 3792 } 3793 3794 int rtw89_mac_get_tx_retry_limit(struct rtw89_dev *rtwdev, 3795 struct rtw89_sta *rtwsta, u8 *tx_retry) 3796 { 3797 u8 mac_idx = rtwsta->rtwvif->mac_idx; 3798 u32 reg; 3799 int ret = 0; 3800 3801 if (rtwsta->cctl_tx_retry_limit) { 3802 *tx_retry = rtwsta->data_tx_cnt_lmt; 3803 } else { 3804 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 3805 if (ret) { 3806 rtw89_warn(rtwdev, "failed to check cmac in rty_lmt\n"); 3807 return ret; 3808 } 3809 3810 reg = rtw89_mac_reg_by_idx(R_AX_TXCNT, mac_idx); 3811 *tx_retry = rtw89_read32_mask(rtwdev, reg, B_AX_L_TXCNT_LMT_MASK); 3812 } 3813 3814 return ret; 3815 } 3816 3817 int rtw89_mac_set_hw_muedca_ctrl(struct rtw89_dev *rtwdev, 3818 struct rtw89_vif *rtwvif, bool en) 3819 { 3820 u8 mac_idx = rtwvif->mac_idx; 3821 u16 set = B_AX_MUEDCA_EN_0 | B_AX_SET_MUEDCATIMER_TF_0; 3822 u32 reg; 3823 u32 ret; 3824 3825 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 3826 if (ret) 3827 return ret; 3828 3829 reg = rtw89_mac_reg_by_idx(R_AX_MUEDCA_EN, mac_idx); 3830 if (en) 3831 rtw89_write16_set(rtwdev, reg, set); 3832 else 3833 rtw89_write16_clr(rtwdev, reg, set); 3834 3835 return 0; 3836 } 3837