1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2019-2020 Realtek Corporation 3 */ 4 5 #include <linux/devcoredump.h> 6 7 #include "cam.h" 8 #include "chan.h" 9 #include "debug.h" 10 #include "fw.h" 11 #include "mac.h" 12 #include "ps.h" 13 #include "reg.h" 14 #include "ser.h" 15 #include "util.h" 16 17 #define SER_RECFG_TIMEOUT 1000 18 19 enum ser_evt { 20 SER_EV_NONE, 21 SER_EV_STATE_IN, 22 SER_EV_STATE_OUT, 23 SER_EV_L1_RESET, /* M1 */ 24 SER_EV_DO_RECOVERY, /* M3 */ 25 SER_EV_MAC_RESET_DONE, /* M5 */ 26 SER_EV_L2_RESET, 27 SER_EV_L2_RECFG_DONE, 28 SER_EV_L2_RECFG_TIMEOUT, 29 SER_EV_M3_TIMEOUT, 30 SER_EV_FW_M5_TIMEOUT, 31 SER_EV_L0_RESET, 32 SER_EV_MAXX 33 }; 34 35 enum ser_state { 36 SER_IDLE_ST, 37 SER_RESET_TRX_ST, 38 SER_DO_HCI_ST, 39 SER_L2_RESET_ST, 40 SER_ST_MAX_ST 41 }; 42 43 struct ser_msg { 44 struct list_head list; 45 u8 event; 46 }; 47 48 struct state_ent { 49 u8 state; 50 char *name; 51 void (*st_func)(struct rtw89_ser *ser, u8 event); 52 }; 53 54 struct event_ent { 55 u8 event; 56 char *name; 57 }; 58 59 static char *ser_ev_name(struct rtw89_ser *ser, u8 event) 60 { 61 if (event < SER_EV_MAXX) 62 return ser->ev_tbl[event].name; 63 64 return "err_ev_name"; 65 } 66 67 static char *ser_st_name(struct rtw89_ser *ser) 68 { 69 if (ser->state < SER_ST_MAX_ST) 70 return ser->st_tbl[ser->state].name; 71 72 return "err_st_name"; 73 } 74 75 #define RTW89_DEF_SER_CD_TYPE(_name, _type, _size) \ 76 struct ser_cd_ ## _name { \ 77 u32 type; \ 78 u32 type_size; \ 79 u64 padding; \ 80 u8 data[_size]; \ 81 } __packed; \ 82 static void ser_cd_ ## _name ## _init(struct ser_cd_ ## _name *p) \ 83 { \ 84 p->type = _type; \ 85 p->type_size = sizeof(p->data); \ 86 p->padding = 0x0123456789abcdef; \ 87 } 88 89 enum rtw89_ser_cd_type { 90 RTW89_SER_CD_FW_RSVD_PLE = 0, 91 RTW89_SER_CD_FW_BACKTRACE = 1, 92 }; 93 94 RTW89_DEF_SER_CD_TYPE(fw_rsvd_ple, 95 RTW89_SER_CD_FW_RSVD_PLE, 96 RTW89_FW_RSVD_PLE_SIZE); 97 98 RTW89_DEF_SER_CD_TYPE(fw_backtrace, 99 RTW89_SER_CD_FW_BACKTRACE, 100 RTW89_FW_BACKTRACE_MAX_SIZE); 101 102 struct rtw89_ser_cd_buffer { 103 struct ser_cd_fw_rsvd_ple fwple; 104 struct ser_cd_fw_backtrace fwbt; 105 } __packed; 106 107 static struct rtw89_ser_cd_buffer *rtw89_ser_cd_prep(struct rtw89_dev *rtwdev) 108 { 109 struct rtw89_ser_cd_buffer *buf; 110 111 buf = vzalloc(sizeof(*buf)); 112 if (!buf) 113 return NULL; 114 115 ser_cd_fw_rsvd_ple_init(&buf->fwple); 116 ser_cd_fw_backtrace_init(&buf->fwbt); 117 118 return buf; 119 } 120 121 static void rtw89_ser_cd_send(struct rtw89_dev *rtwdev, 122 struct rtw89_ser_cd_buffer *buf) 123 { 124 rtw89_debug(rtwdev, RTW89_DBG_SER, "SER sends core dump\n"); 125 126 /* After calling dev_coredump, buf's lifetime is supposed to be 127 * handled by the device coredump framework. Note that a new dump 128 * will be discarded if a previous one hasn't been released by 129 * framework yet. 130 */ 131 dev_coredumpv(rtwdev->dev, buf, sizeof(*buf), GFP_KERNEL); 132 } 133 134 static void rtw89_ser_cd_free(struct rtw89_dev *rtwdev, 135 struct rtw89_ser_cd_buffer *buf, bool free_self) 136 { 137 if (!free_self) 138 return; 139 140 rtw89_debug(rtwdev, RTW89_DBG_SER, "SER frees core dump by self\n"); 141 142 /* When some problems happen during filling data of core dump, 143 * we won't send it to device coredump framework. Instead, we 144 * free buf by ourselves. 145 */ 146 vfree(buf); 147 } 148 149 static void ser_state_run(struct rtw89_ser *ser, u8 evt) 150 { 151 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); 152 153 rtw89_debug(rtwdev, RTW89_DBG_SER, "ser: %s receive %s\n", 154 ser_st_name(ser), ser_ev_name(ser, evt)); 155 156 mutex_lock(&rtwdev->mutex); 157 rtw89_leave_lps(rtwdev); 158 mutex_unlock(&rtwdev->mutex); 159 160 ser->st_tbl[ser->state].st_func(ser, evt); 161 } 162 163 static void ser_state_goto(struct rtw89_ser *ser, u8 new_state) 164 { 165 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); 166 167 if (ser->state == new_state || new_state >= SER_ST_MAX_ST) 168 return; 169 ser_state_run(ser, SER_EV_STATE_OUT); 170 171 rtw89_debug(rtwdev, RTW89_DBG_SER, "ser: %s goto -> %s\n", 172 ser_st_name(ser), ser->st_tbl[new_state].name); 173 174 ser->state = new_state; 175 ser_state_run(ser, SER_EV_STATE_IN); 176 } 177 178 static struct ser_msg *__rtw89_ser_dequeue_msg(struct rtw89_ser *ser) 179 { 180 struct ser_msg *msg; 181 182 spin_lock_irq(&ser->msg_q_lock); 183 msg = list_first_entry_or_null(&ser->msg_q, struct ser_msg, list); 184 if (msg) 185 list_del(&msg->list); 186 spin_unlock_irq(&ser->msg_q_lock); 187 188 return msg; 189 } 190 191 static void rtw89_ser_hdl_work(struct work_struct *work) 192 { 193 struct ser_msg *msg; 194 struct rtw89_ser *ser = container_of(work, struct rtw89_ser, 195 ser_hdl_work); 196 197 while ((msg = __rtw89_ser_dequeue_msg(ser))) { 198 ser_state_run(ser, msg->event); 199 kfree(msg); 200 } 201 } 202 203 static int ser_send_msg(struct rtw89_ser *ser, u8 event) 204 { 205 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); 206 struct ser_msg *msg = NULL; 207 208 if (test_bit(RTW89_SER_DRV_STOP_RUN, ser->flags)) 209 return -EIO; 210 211 msg = kmalloc(sizeof(*msg), GFP_ATOMIC); 212 if (!msg) 213 return -ENOMEM; 214 215 msg->event = event; 216 217 spin_lock_irq(&ser->msg_q_lock); 218 list_add(&msg->list, &ser->msg_q); 219 spin_unlock_irq(&ser->msg_q_lock); 220 221 ieee80211_queue_work(rtwdev->hw, &ser->ser_hdl_work); 222 return 0; 223 } 224 225 static void rtw89_ser_alarm_work(struct work_struct *work) 226 { 227 struct rtw89_ser *ser = container_of(work, struct rtw89_ser, 228 ser_alarm_work.work); 229 230 ser_send_msg(ser, ser->alarm_event); 231 ser->alarm_event = SER_EV_NONE; 232 } 233 234 static void ser_set_alarm(struct rtw89_ser *ser, u32 ms, u8 event) 235 { 236 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); 237 238 if (test_bit(RTW89_SER_DRV_STOP_RUN, ser->flags)) 239 return; 240 241 ser->alarm_event = event; 242 ieee80211_queue_delayed_work(rtwdev->hw, &ser->ser_alarm_work, 243 msecs_to_jiffies(ms)); 244 } 245 246 static void ser_del_alarm(struct rtw89_ser *ser) 247 { 248 cancel_delayed_work(&ser->ser_alarm_work); 249 ser->alarm_event = SER_EV_NONE; 250 } 251 252 /* driver function */ 253 static void drv_stop_tx(struct rtw89_ser *ser) 254 { 255 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); 256 257 ieee80211_stop_queues(rtwdev->hw); 258 set_bit(RTW89_SER_DRV_STOP_TX, ser->flags); 259 } 260 261 static void drv_stop_rx(struct rtw89_ser *ser) 262 { 263 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); 264 265 clear_bit(RTW89_FLAG_RUNNING, rtwdev->flags); 266 set_bit(RTW89_SER_DRV_STOP_RX, ser->flags); 267 } 268 269 static void drv_trx_reset(struct rtw89_ser *ser) 270 { 271 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); 272 273 rtw89_hci_reset(rtwdev); 274 } 275 276 static void drv_resume_tx(struct rtw89_ser *ser) 277 { 278 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); 279 280 if (!test_bit(RTW89_SER_DRV_STOP_TX, ser->flags)) 281 return; 282 283 ieee80211_wake_queues(rtwdev->hw); 284 clear_bit(RTW89_SER_DRV_STOP_TX, ser->flags); 285 } 286 287 static void drv_resume_rx(struct rtw89_ser *ser) 288 { 289 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); 290 291 if (!test_bit(RTW89_SER_DRV_STOP_RX, ser->flags)) 292 return; 293 294 set_bit(RTW89_FLAG_RUNNING, rtwdev->flags); 295 clear_bit(RTW89_SER_DRV_STOP_RX, ser->flags); 296 } 297 298 static void ser_reset_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) 299 { 300 rtw89_core_release_bit_map(rtwdev->hw_port, rtwvif->port); 301 rtwvif->net_type = RTW89_NET_TYPE_NO_LINK; 302 rtwvif->trigger = false; 303 } 304 305 static void ser_sta_deinit_cam_iter(void *data, struct ieee80211_sta *sta) 306 { 307 struct rtw89_vif *rtwvif = (struct rtw89_vif *)data; 308 struct rtw89_dev *rtwdev = rtwvif->rtwdev; 309 struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv; 310 311 if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE || sta->tdls) 312 rtw89_cam_deinit_addr_cam(rtwdev, &rtwsta->addr_cam); 313 if (sta->tdls) 314 rtw89_cam_deinit_bssid_cam(rtwdev, &rtwsta->bssid_cam); 315 316 INIT_LIST_HEAD(&rtwsta->ba_cam_list); 317 } 318 319 static void ser_deinit_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) 320 { 321 ieee80211_iterate_stations_atomic(rtwdev->hw, 322 ser_sta_deinit_cam_iter, 323 rtwvif); 324 325 rtw89_cam_deinit(rtwdev, rtwvif); 326 327 bitmap_zero(rtwdev->cam_info.ba_cam_map, RTW89_MAX_BA_CAM_NUM); 328 } 329 330 static void ser_reset_mac_binding(struct rtw89_dev *rtwdev) 331 { 332 struct rtw89_vif *rtwvif; 333 334 rtw89_cam_reset_keys(rtwdev); 335 rtw89_for_each_rtwvif(rtwdev, rtwvif) 336 ser_deinit_cam(rtwdev, rtwvif); 337 338 rtw89_core_release_all_bits_map(rtwdev->mac_id_map, RTW89_MAX_MAC_ID_NUM); 339 rtw89_for_each_rtwvif(rtwdev, rtwvif) 340 ser_reset_vif(rtwdev, rtwvif); 341 } 342 343 /* hal function */ 344 static int hal_enable_dma(struct rtw89_ser *ser) 345 { 346 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); 347 int ret; 348 349 if (!test_bit(RTW89_SER_HAL_STOP_DMA, ser->flags)) 350 return 0; 351 352 if (!rtwdev->hci.ops->mac_lv1_rcvy) 353 return -EIO; 354 355 ret = rtwdev->hci.ops->mac_lv1_rcvy(rtwdev, RTW89_LV1_RCVY_STEP_2); 356 if (!ret) 357 clear_bit(RTW89_SER_HAL_STOP_DMA, ser->flags); 358 359 return ret; 360 } 361 362 static int hal_stop_dma(struct rtw89_ser *ser) 363 { 364 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); 365 int ret; 366 367 if (!rtwdev->hci.ops->mac_lv1_rcvy) 368 return -EIO; 369 370 ret = rtwdev->hci.ops->mac_lv1_rcvy(rtwdev, RTW89_LV1_RCVY_STEP_1); 371 if (!ret) 372 set_bit(RTW89_SER_HAL_STOP_DMA, ser->flags); 373 374 return ret; 375 } 376 377 static void hal_send_m2_event(struct rtw89_ser *ser) 378 { 379 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); 380 381 rtw89_mac_set_err_status(rtwdev, MAC_AX_ERR_L1_DISABLE_EN); 382 } 383 384 static void hal_send_m4_event(struct rtw89_ser *ser) 385 { 386 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); 387 388 rtw89_mac_set_err_status(rtwdev, MAC_AX_ERR_L1_RCVY_EN); 389 } 390 391 /* state handler */ 392 static void ser_idle_st_hdl(struct rtw89_ser *ser, u8 evt) 393 { 394 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); 395 396 switch (evt) { 397 case SER_EV_STATE_IN: 398 rtw89_hci_recovery_complete(rtwdev); 399 clear_bit(RTW89_FLAG_CRASH_SIMULATING, rtwdev->flags); 400 break; 401 case SER_EV_L1_RESET: 402 ser_state_goto(ser, SER_RESET_TRX_ST); 403 break; 404 case SER_EV_L2_RESET: 405 ser_state_goto(ser, SER_L2_RESET_ST); 406 break; 407 case SER_EV_STATE_OUT: 408 rtw89_hci_recovery_start(rtwdev); 409 break; 410 default: 411 break; 412 } 413 } 414 415 static void ser_reset_trx_st_hdl(struct rtw89_ser *ser, u8 evt) 416 { 417 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); 418 419 switch (evt) { 420 case SER_EV_STATE_IN: 421 cancel_delayed_work_sync(&rtwdev->track_work); 422 drv_stop_tx(ser); 423 424 if (hal_stop_dma(ser)) { 425 ser_state_goto(ser, SER_L2_RESET_ST); 426 break; 427 } 428 429 drv_stop_rx(ser); 430 drv_trx_reset(ser); 431 432 /* wait m3 */ 433 hal_send_m2_event(ser); 434 435 /* set alarm to prevent FW response timeout */ 436 ser_set_alarm(ser, 1000, SER_EV_M3_TIMEOUT); 437 break; 438 439 case SER_EV_DO_RECOVERY: 440 ser_state_goto(ser, SER_DO_HCI_ST); 441 break; 442 443 case SER_EV_M3_TIMEOUT: 444 ser_state_goto(ser, SER_L2_RESET_ST); 445 break; 446 447 case SER_EV_STATE_OUT: 448 ser_del_alarm(ser); 449 hal_enable_dma(ser); 450 drv_resume_rx(ser); 451 drv_resume_tx(ser); 452 ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->track_work, 453 RTW89_TRACK_WORK_PERIOD); 454 break; 455 456 default: 457 break; 458 } 459 } 460 461 static void ser_do_hci_st_hdl(struct rtw89_ser *ser, u8 evt) 462 { 463 switch (evt) { 464 case SER_EV_STATE_IN: 465 /* wait m5 */ 466 hal_send_m4_event(ser); 467 468 /* prevent FW response timeout */ 469 ser_set_alarm(ser, 1000, SER_EV_FW_M5_TIMEOUT); 470 break; 471 472 case SER_EV_FW_M5_TIMEOUT: 473 ser_state_goto(ser, SER_L2_RESET_ST); 474 break; 475 476 case SER_EV_MAC_RESET_DONE: 477 ser_state_goto(ser, SER_IDLE_ST); 478 break; 479 480 case SER_EV_STATE_OUT: 481 ser_del_alarm(ser); 482 break; 483 484 default: 485 break; 486 } 487 } 488 489 static void ser_mac_mem_dump(struct rtw89_dev *rtwdev, u8 *buf, 490 u8 sel, u32 start_addr, u32 len) 491 { 492 u32 *ptr = (u32 *)buf; 493 u32 base_addr, start_page, residue; 494 u32 cnt = 0; 495 u32 i; 496 497 start_page = start_addr / MAC_MEM_DUMP_PAGE_SIZE; 498 residue = start_addr % MAC_MEM_DUMP_PAGE_SIZE; 499 base_addr = rtw89_mac_mem_base_addrs[sel]; 500 base_addr += start_page * MAC_MEM_DUMP_PAGE_SIZE; 501 502 while (cnt < len) { 503 rtw89_write32(rtwdev, R_AX_FILTER_MODEL_ADDR, base_addr); 504 505 for (i = R_AX_INDIR_ACCESS_ENTRY + residue; 506 i < R_AX_INDIR_ACCESS_ENTRY + MAC_MEM_DUMP_PAGE_SIZE; 507 i += 4, ptr++) { 508 *ptr = rtw89_read32(rtwdev, i); 509 cnt += 4; 510 if (cnt >= len) 511 break; 512 } 513 514 residue = 0; 515 base_addr += MAC_MEM_DUMP_PAGE_SIZE; 516 } 517 } 518 519 static void rtw89_ser_fw_rsvd_ple_dump(struct rtw89_dev *rtwdev, u8 *buf) 520 { 521 u32 start_addr = rtwdev->chip->rsvd_ple_ofst; 522 523 rtw89_debug(rtwdev, RTW89_DBG_SER, 524 "dump mem for fw rsvd payload engine (start addr: 0x%x)\n", 525 start_addr); 526 ser_mac_mem_dump(rtwdev, buf, RTW89_MAC_MEM_SHARED_BUF, start_addr, 527 RTW89_FW_RSVD_PLE_SIZE); 528 } 529 530 struct __fw_backtrace_entry { 531 u32 wcpu_addr; 532 u32 size; 533 u32 key; 534 } __packed; 535 536 struct __fw_backtrace_info { 537 u32 ra; 538 u32 sp; 539 } __packed; 540 541 static_assert(RTW89_FW_BACKTRACE_INFO_SIZE == 542 sizeof(struct __fw_backtrace_info)); 543 544 static int rtw89_ser_fw_backtrace_dump(struct rtw89_dev *rtwdev, u8 *buf, 545 const struct __fw_backtrace_entry *ent) 546 { 547 struct __fw_backtrace_info *ptr = (struct __fw_backtrace_info *)buf; 548 u32 fwbt_addr = ent->wcpu_addr & RTW89_WCPU_BASE_MASK; 549 u32 fwbt_size = ent->size; 550 u32 fwbt_key = ent->key; 551 u32 i; 552 553 if (fwbt_addr == 0) { 554 rtw89_warn(rtwdev, "FW backtrace invalid address: 0x%x\n", 555 fwbt_addr); 556 return -EINVAL; 557 } 558 559 if (fwbt_key != RTW89_FW_BACKTRACE_KEY) { 560 rtw89_warn(rtwdev, "FW backtrace invalid key: 0x%x\n", 561 fwbt_key); 562 return -EINVAL; 563 } 564 565 if (fwbt_size == 0 || !RTW89_VALID_FW_BACKTRACE_SIZE(fwbt_size) || 566 fwbt_size > RTW89_FW_BACKTRACE_MAX_SIZE) { 567 rtw89_warn(rtwdev, "FW backtrace invalid size: 0x%x\n", 568 fwbt_size); 569 return -EINVAL; 570 } 571 572 rtw89_debug(rtwdev, RTW89_DBG_SER, "dump fw backtrace start\n"); 573 rtw89_write32(rtwdev, R_AX_FILTER_MODEL_ADDR, fwbt_addr); 574 575 for (i = R_AX_INDIR_ACCESS_ENTRY; 576 i < R_AX_INDIR_ACCESS_ENTRY + fwbt_size; 577 i += RTW89_FW_BACKTRACE_INFO_SIZE, ptr++) { 578 *ptr = (struct __fw_backtrace_info){ 579 .ra = rtw89_read32(rtwdev, i), 580 .sp = rtw89_read32(rtwdev, i + 4), 581 }; 582 rtw89_debug(rtwdev, RTW89_DBG_SER, 583 "next sp: 0x%x, next ra: 0x%x\n", 584 ptr->sp, ptr->ra); 585 } 586 587 rtw89_debug(rtwdev, RTW89_DBG_SER, "dump fw backtrace end\n"); 588 return 0; 589 } 590 591 static void ser_l2_reset_st_pre_hdl(struct rtw89_ser *ser) 592 { 593 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); 594 struct rtw89_ser_cd_buffer *buf; 595 struct __fw_backtrace_entry fwbt_ent; 596 int ret = 0; 597 598 buf = rtw89_ser_cd_prep(rtwdev); 599 if (!buf) { 600 ret = -ENOMEM; 601 goto bottom; 602 } 603 604 rtw89_ser_fw_rsvd_ple_dump(rtwdev, buf->fwple.data); 605 606 fwbt_ent = *(struct __fw_backtrace_entry *)buf->fwple.data; 607 ret = rtw89_ser_fw_backtrace_dump(rtwdev, buf->fwbt.data, &fwbt_ent); 608 if (ret) 609 goto bottom; 610 611 rtw89_ser_cd_send(rtwdev, buf); 612 613 bottom: 614 rtw89_ser_cd_free(rtwdev, buf, !!ret); 615 616 ser_reset_mac_binding(rtwdev); 617 rtw89_core_stop(rtwdev); 618 rtw89_entity_init(rtwdev); 619 rtw89_fw_release_general_pkt_list(rtwdev, false); 620 INIT_LIST_HEAD(&rtwdev->rtwvifs_list); 621 } 622 623 static void ser_l2_reset_st_hdl(struct rtw89_ser *ser, u8 evt) 624 { 625 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); 626 627 switch (evt) { 628 case SER_EV_STATE_IN: 629 mutex_lock(&rtwdev->mutex); 630 ser_l2_reset_st_pre_hdl(ser); 631 mutex_unlock(&rtwdev->mutex); 632 633 ieee80211_restart_hw(rtwdev->hw); 634 ser_set_alarm(ser, SER_RECFG_TIMEOUT, SER_EV_L2_RECFG_TIMEOUT); 635 break; 636 637 case SER_EV_L2_RECFG_TIMEOUT: 638 rtw89_info(rtwdev, "Err: ser L2 re-config timeout\n"); 639 fallthrough; 640 case SER_EV_L2_RECFG_DONE: 641 ser_state_goto(ser, SER_IDLE_ST); 642 break; 643 644 case SER_EV_STATE_OUT: 645 ser_del_alarm(ser); 646 break; 647 648 default: 649 break; 650 } 651 } 652 653 static const struct event_ent ser_ev_tbl[] = { 654 {SER_EV_NONE, "SER_EV_NONE"}, 655 {SER_EV_STATE_IN, "SER_EV_STATE_IN"}, 656 {SER_EV_STATE_OUT, "SER_EV_STATE_OUT"}, 657 {SER_EV_L1_RESET, "SER_EV_L1_RESET"}, 658 {SER_EV_DO_RECOVERY, "SER_EV_DO_RECOVERY m3"}, 659 {SER_EV_MAC_RESET_DONE, "SER_EV_MAC_RESET_DONE m5"}, 660 {SER_EV_L2_RESET, "SER_EV_L2_RESET"}, 661 {SER_EV_L2_RECFG_DONE, "SER_EV_L2_RECFG_DONE"}, 662 {SER_EV_L2_RECFG_TIMEOUT, "SER_EV_L2_RECFG_TIMEOUT"}, 663 {SER_EV_M3_TIMEOUT, "SER_EV_M3_TIMEOUT"}, 664 {SER_EV_FW_M5_TIMEOUT, "SER_EV_FW_M5_TIMEOUT"}, 665 {SER_EV_L0_RESET, "SER_EV_L0_RESET"}, 666 {SER_EV_MAXX, "SER_EV_MAX"} 667 }; 668 669 static const struct state_ent ser_st_tbl[] = { 670 {SER_IDLE_ST, "SER_IDLE_ST", ser_idle_st_hdl}, 671 {SER_RESET_TRX_ST, "SER_RESET_TRX_ST", ser_reset_trx_st_hdl}, 672 {SER_DO_HCI_ST, "SER_DO_HCI_ST", ser_do_hci_st_hdl}, 673 {SER_L2_RESET_ST, "SER_L2_RESET_ST", ser_l2_reset_st_hdl} 674 }; 675 676 int rtw89_ser_init(struct rtw89_dev *rtwdev) 677 { 678 struct rtw89_ser *ser = &rtwdev->ser; 679 680 memset(ser, 0, sizeof(*ser)); 681 INIT_LIST_HEAD(&ser->msg_q); 682 ser->state = SER_IDLE_ST; 683 ser->st_tbl = ser_st_tbl; 684 ser->ev_tbl = ser_ev_tbl; 685 686 bitmap_zero(ser->flags, RTW89_NUM_OF_SER_FLAGS); 687 spin_lock_init(&ser->msg_q_lock); 688 INIT_WORK(&ser->ser_hdl_work, rtw89_ser_hdl_work); 689 INIT_DELAYED_WORK(&ser->ser_alarm_work, rtw89_ser_alarm_work); 690 return 0; 691 } 692 693 int rtw89_ser_deinit(struct rtw89_dev *rtwdev) 694 { 695 struct rtw89_ser *ser = (struct rtw89_ser *)&rtwdev->ser; 696 697 set_bit(RTW89_SER_DRV_STOP_RUN, ser->flags); 698 cancel_delayed_work_sync(&ser->ser_alarm_work); 699 cancel_work_sync(&ser->ser_hdl_work); 700 clear_bit(RTW89_SER_DRV_STOP_RUN, ser->flags); 701 return 0; 702 } 703 704 void rtw89_ser_recfg_done(struct rtw89_dev *rtwdev) 705 { 706 ser_send_msg(&rtwdev->ser, SER_EV_L2_RECFG_DONE); 707 } 708 709 int rtw89_ser_notify(struct rtw89_dev *rtwdev, u32 err) 710 { 711 u8 event = SER_EV_NONE; 712 713 rtw89_info(rtwdev, "SER catches error: 0x%x\n", err); 714 715 switch (err) { 716 case MAC_AX_ERR_L1_ERR_DMAC: 717 case MAC_AX_ERR_L0_PROMOTE_TO_L1: 718 event = SER_EV_L1_RESET; /* M1 */ 719 break; 720 case MAC_AX_ERR_L1_RESET_DISABLE_DMAC_DONE: 721 event = SER_EV_DO_RECOVERY; /* M3 */ 722 break; 723 case MAC_AX_ERR_L1_RESET_RECOVERY_DONE: 724 event = SER_EV_MAC_RESET_DONE; /* M5 */ 725 break; 726 case MAC_AX_ERR_L0_ERR_CMAC0: 727 case MAC_AX_ERR_L0_ERR_CMAC1: 728 case MAC_AX_ERR_L0_RESET_DONE: 729 event = SER_EV_L0_RESET; 730 break; 731 default: 732 if (err == MAC_AX_ERR_L1_PROMOTE_TO_L2 || 733 (err >= MAC_AX_ERR_L2_ERR_AH_DMA && 734 err <= MAC_AX_GET_ERR_MAX)) 735 event = SER_EV_L2_RESET; 736 break; 737 } 738 739 if (event == SER_EV_NONE) { 740 rtw89_warn(rtwdev, "SER cannot recognize error: 0x%x\n", err); 741 return -EINVAL; 742 } 743 744 ser_send_msg(&rtwdev->ser, event); 745 return 0; 746 } 747 EXPORT_SYMBOL(rtw89_ser_notify); 748