1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2019-2020 Realtek Corporation 3 */ 4 5 #include <linux/devcoredump.h> 6 7 #include "cam.h" 8 #include "chan.h" 9 #include "debug.h" 10 #include "fw.h" 11 #include "mac.h" 12 #include "ps.h" 13 #include "reg.h" 14 #include "ser.h" 15 #include "util.h" 16 17 #define SER_RECFG_TIMEOUT 1000 18 19 enum ser_evt { 20 SER_EV_NONE, 21 SER_EV_STATE_IN, 22 SER_EV_STATE_OUT, 23 SER_EV_L1_RESET_PREPARE, /* pre-M0 */ 24 SER_EV_L1_RESET, /* M1 */ 25 SER_EV_DO_RECOVERY, /* M3 */ 26 SER_EV_MAC_RESET_DONE, /* M5 */ 27 SER_EV_L2_RESET, 28 SER_EV_L2_RECFG_DONE, 29 SER_EV_L2_RECFG_TIMEOUT, 30 SER_EV_M1_TIMEOUT, 31 SER_EV_M3_TIMEOUT, 32 SER_EV_FW_M5_TIMEOUT, 33 SER_EV_L0_RESET, 34 SER_EV_MAXX 35 }; 36 37 enum ser_state { 38 SER_IDLE_ST, 39 SER_L1_RESET_PRE_ST, 40 SER_RESET_TRX_ST, 41 SER_DO_HCI_ST, 42 SER_L2_RESET_ST, 43 SER_ST_MAX_ST 44 }; 45 46 struct ser_msg { 47 struct list_head list; 48 u8 event; 49 }; 50 51 struct state_ent { 52 u8 state; 53 char *name; 54 void (*st_func)(struct rtw89_ser *ser, u8 event); 55 }; 56 57 struct event_ent { 58 u8 event; 59 char *name; 60 }; 61 62 static char *ser_ev_name(struct rtw89_ser *ser, u8 event) 63 { 64 if (event < SER_EV_MAXX) 65 return ser->ev_tbl[event].name; 66 67 return "err_ev_name"; 68 } 69 70 static char *ser_st_name(struct rtw89_ser *ser) 71 { 72 if (ser->state < SER_ST_MAX_ST) 73 return ser->st_tbl[ser->state].name; 74 75 return "err_st_name"; 76 } 77 78 #define RTW89_DEF_SER_CD_TYPE(_name, _type, _size) \ 79 struct ser_cd_ ## _name { \ 80 u32 type; \ 81 u32 type_size; \ 82 u64 padding; \ 83 u8 data[_size]; \ 84 } __packed; \ 85 static void ser_cd_ ## _name ## _init(struct ser_cd_ ## _name *p) \ 86 { \ 87 p->type = _type; \ 88 p->type_size = sizeof(p->data); \ 89 p->padding = 0x0123456789abcdef; \ 90 } 91 92 enum rtw89_ser_cd_type { 93 RTW89_SER_CD_FW_RSVD_PLE = 0, 94 RTW89_SER_CD_FW_BACKTRACE = 1, 95 }; 96 97 RTW89_DEF_SER_CD_TYPE(fw_rsvd_ple, 98 RTW89_SER_CD_FW_RSVD_PLE, 99 RTW89_FW_RSVD_PLE_SIZE); 100 101 RTW89_DEF_SER_CD_TYPE(fw_backtrace, 102 RTW89_SER_CD_FW_BACKTRACE, 103 RTW89_FW_BACKTRACE_MAX_SIZE); 104 105 struct rtw89_ser_cd_buffer { 106 struct ser_cd_fw_rsvd_ple fwple; 107 struct ser_cd_fw_backtrace fwbt; 108 } __packed; 109 110 static struct rtw89_ser_cd_buffer *rtw89_ser_cd_prep(struct rtw89_dev *rtwdev) 111 { 112 struct rtw89_ser_cd_buffer *buf; 113 114 buf = vzalloc(sizeof(*buf)); 115 if (!buf) 116 return NULL; 117 118 ser_cd_fw_rsvd_ple_init(&buf->fwple); 119 ser_cd_fw_backtrace_init(&buf->fwbt); 120 121 return buf; 122 } 123 124 static void rtw89_ser_cd_send(struct rtw89_dev *rtwdev, 125 struct rtw89_ser_cd_buffer *buf) 126 { 127 rtw89_debug(rtwdev, RTW89_DBG_SER, "SER sends core dump\n"); 128 129 /* After calling dev_coredump, buf's lifetime is supposed to be 130 * handled by the device coredump framework. Note that a new dump 131 * will be discarded if a previous one hasn't been released by 132 * framework yet. 133 */ 134 dev_coredumpv(rtwdev->dev, buf, sizeof(*buf), GFP_KERNEL); 135 } 136 137 static void rtw89_ser_cd_free(struct rtw89_dev *rtwdev, 138 struct rtw89_ser_cd_buffer *buf, bool free_self) 139 { 140 if (!free_self) 141 return; 142 143 rtw89_debug(rtwdev, RTW89_DBG_SER, "SER frees core dump by self\n"); 144 145 /* When some problems happen during filling data of core dump, 146 * we won't send it to device coredump framework. Instead, we 147 * free buf by ourselves. 148 */ 149 vfree(buf); 150 } 151 152 static void ser_state_run(struct rtw89_ser *ser, u8 evt) 153 { 154 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); 155 156 rtw89_debug(rtwdev, RTW89_DBG_SER, "ser: %s receive %s\n", 157 ser_st_name(ser), ser_ev_name(ser, evt)); 158 159 mutex_lock(&rtwdev->mutex); 160 rtw89_leave_lps(rtwdev); 161 mutex_unlock(&rtwdev->mutex); 162 163 ser->st_tbl[ser->state].st_func(ser, evt); 164 } 165 166 static void ser_state_goto(struct rtw89_ser *ser, u8 new_state) 167 { 168 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); 169 170 if (ser->state == new_state || new_state >= SER_ST_MAX_ST) 171 return; 172 ser_state_run(ser, SER_EV_STATE_OUT); 173 174 rtw89_debug(rtwdev, RTW89_DBG_SER, "ser: %s goto -> %s\n", 175 ser_st_name(ser), ser->st_tbl[new_state].name); 176 177 ser->state = new_state; 178 ser_state_run(ser, SER_EV_STATE_IN); 179 } 180 181 static struct ser_msg *__rtw89_ser_dequeue_msg(struct rtw89_ser *ser) 182 { 183 struct ser_msg *msg; 184 185 spin_lock_irq(&ser->msg_q_lock); 186 msg = list_first_entry_or_null(&ser->msg_q, struct ser_msg, list); 187 if (msg) 188 list_del(&msg->list); 189 spin_unlock_irq(&ser->msg_q_lock); 190 191 return msg; 192 } 193 194 static void rtw89_ser_hdl_work(struct work_struct *work) 195 { 196 struct ser_msg *msg; 197 struct rtw89_ser *ser = container_of(work, struct rtw89_ser, 198 ser_hdl_work); 199 200 while ((msg = __rtw89_ser_dequeue_msg(ser))) { 201 ser_state_run(ser, msg->event); 202 kfree(msg); 203 } 204 } 205 206 static int ser_send_msg(struct rtw89_ser *ser, u8 event) 207 { 208 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); 209 struct ser_msg *msg = NULL; 210 211 if (test_bit(RTW89_SER_DRV_STOP_RUN, ser->flags)) 212 return -EIO; 213 214 msg = kmalloc(sizeof(*msg), GFP_ATOMIC); 215 if (!msg) 216 return -ENOMEM; 217 218 msg->event = event; 219 220 spin_lock_irq(&ser->msg_q_lock); 221 list_add(&msg->list, &ser->msg_q); 222 spin_unlock_irq(&ser->msg_q_lock); 223 224 ieee80211_queue_work(rtwdev->hw, &ser->ser_hdl_work); 225 return 0; 226 } 227 228 static void rtw89_ser_alarm_work(struct work_struct *work) 229 { 230 struct rtw89_ser *ser = container_of(work, struct rtw89_ser, 231 ser_alarm_work.work); 232 233 ser_send_msg(ser, ser->alarm_event); 234 ser->alarm_event = SER_EV_NONE; 235 } 236 237 static void ser_set_alarm(struct rtw89_ser *ser, u32 ms, u8 event) 238 { 239 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); 240 241 if (test_bit(RTW89_SER_DRV_STOP_RUN, ser->flags)) 242 return; 243 244 ser->alarm_event = event; 245 ieee80211_queue_delayed_work(rtwdev->hw, &ser->ser_alarm_work, 246 msecs_to_jiffies(ms)); 247 } 248 249 static void ser_del_alarm(struct rtw89_ser *ser) 250 { 251 cancel_delayed_work(&ser->ser_alarm_work); 252 ser->alarm_event = SER_EV_NONE; 253 } 254 255 /* driver function */ 256 static void drv_stop_tx(struct rtw89_ser *ser) 257 { 258 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); 259 260 ieee80211_stop_queues(rtwdev->hw); 261 set_bit(RTW89_SER_DRV_STOP_TX, ser->flags); 262 } 263 264 static void drv_stop_rx(struct rtw89_ser *ser) 265 { 266 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); 267 268 clear_bit(RTW89_FLAG_RUNNING, rtwdev->flags); 269 set_bit(RTW89_SER_DRV_STOP_RX, ser->flags); 270 } 271 272 static void drv_trx_reset(struct rtw89_ser *ser) 273 { 274 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); 275 276 rtw89_hci_reset(rtwdev); 277 } 278 279 static void drv_resume_tx(struct rtw89_ser *ser) 280 { 281 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); 282 283 if (!test_bit(RTW89_SER_DRV_STOP_TX, ser->flags)) 284 return; 285 286 ieee80211_wake_queues(rtwdev->hw); 287 clear_bit(RTW89_SER_DRV_STOP_TX, ser->flags); 288 } 289 290 static void drv_resume_rx(struct rtw89_ser *ser) 291 { 292 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); 293 294 if (!test_bit(RTW89_SER_DRV_STOP_RX, ser->flags)) 295 return; 296 297 set_bit(RTW89_FLAG_RUNNING, rtwdev->flags); 298 clear_bit(RTW89_SER_DRV_STOP_RX, ser->flags); 299 } 300 301 static void ser_reset_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) 302 { 303 rtw89_core_release_bit_map(rtwdev->hw_port, rtwvif->port); 304 rtwvif->net_type = RTW89_NET_TYPE_NO_LINK; 305 rtwvif->trigger = false; 306 rtwvif->tdls_peer = 0; 307 } 308 309 static void ser_sta_deinit_cam_iter(void *data, struct ieee80211_sta *sta) 310 { 311 struct rtw89_vif *target_rtwvif = (struct rtw89_vif *)data; 312 struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv; 313 struct rtw89_vif *rtwvif = rtwsta->rtwvif; 314 struct rtw89_dev *rtwdev = rtwvif->rtwdev; 315 316 if (rtwvif != target_rtwvif) 317 return; 318 319 if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE || sta->tdls) 320 rtw89_cam_deinit_addr_cam(rtwdev, &rtwsta->addr_cam); 321 if (sta->tdls) 322 rtw89_cam_deinit_bssid_cam(rtwdev, &rtwsta->bssid_cam); 323 324 INIT_LIST_HEAD(&rtwsta->ba_cam_list); 325 } 326 327 static void ser_deinit_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) 328 { 329 ieee80211_iterate_stations_atomic(rtwdev->hw, 330 ser_sta_deinit_cam_iter, 331 rtwvif); 332 333 rtw89_cam_deinit(rtwdev, rtwvif); 334 335 bitmap_zero(rtwdev->cam_info.ba_cam_map, RTW89_MAX_BA_CAM_NUM); 336 } 337 338 static void ser_reset_mac_binding(struct rtw89_dev *rtwdev) 339 { 340 struct rtw89_vif *rtwvif; 341 342 rtw89_cam_reset_keys(rtwdev); 343 rtw89_for_each_rtwvif(rtwdev, rtwvif) 344 ser_deinit_cam(rtwdev, rtwvif); 345 346 rtw89_core_release_all_bits_map(rtwdev->mac_id_map, RTW89_MAX_MAC_ID_NUM); 347 rtw89_for_each_rtwvif(rtwdev, rtwvif) 348 ser_reset_vif(rtwdev, rtwvif); 349 350 rtwdev->total_sta_assoc = 0; 351 } 352 353 /* hal function */ 354 static int hal_enable_dma(struct rtw89_ser *ser) 355 { 356 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); 357 int ret; 358 359 if (!test_bit(RTW89_SER_HAL_STOP_DMA, ser->flags)) 360 return 0; 361 362 if (!rtwdev->hci.ops->mac_lv1_rcvy) 363 return -EIO; 364 365 ret = rtwdev->hci.ops->mac_lv1_rcvy(rtwdev, RTW89_LV1_RCVY_STEP_2); 366 if (!ret) 367 clear_bit(RTW89_SER_HAL_STOP_DMA, ser->flags); 368 369 return ret; 370 } 371 372 static int hal_stop_dma(struct rtw89_ser *ser) 373 { 374 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); 375 int ret; 376 377 if (!rtwdev->hci.ops->mac_lv1_rcvy) 378 return -EIO; 379 380 ret = rtwdev->hci.ops->mac_lv1_rcvy(rtwdev, RTW89_LV1_RCVY_STEP_1); 381 if (!ret) 382 set_bit(RTW89_SER_HAL_STOP_DMA, ser->flags); 383 384 return ret; 385 } 386 387 static void hal_send_post_m0_event(struct rtw89_ser *ser) 388 { 389 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); 390 391 rtw89_mac_set_err_status(rtwdev, MAC_AX_ERR_L1_RESET_START_DMAC); 392 } 393 394 static void hal_send_m2_event(struct rtw89_ser *ser) 395 { 396 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); 397 398 rtw89_mac_set_err_status(rtwdev, MAC_AX_ERR_L1_DISABLE_EN); 399 } 400 401 static void hal_send_m4_event(struct rtw89_ser *ser) 402 { 403 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); 404 405 rtw89_mac_set_err_status(rtwdev, MAC_AX_ERR_L1_RCVY_EN); 406 } 407 408 /* state handler */ 409 static void ser_idle_st_hdl(struct rtw89_ser *ser, u8 evt) 410 { 411 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); 412 413 switch (evt) { 414 case SER_EV_STATE_IN: 415 rtw89_hci_recovery_complete(rtwdev); 416 clear_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags); 417 clear_bit(RTW89_FLAG_CRASH_SIMULATING, rtwdev->flags); 418 break; 419 case SER_EV_L1_RESET_PREPARE: 420 ser_state_goto(ser, SER_L1_RESET_PRE_ST); 421 break; 422 case SER_EV_L1_RESET: 423 ser_state_goto(ser, SER_RESET_TRX_ST); 424 break; 425 case SER_EV_L2_RESET: 426 ser_state_goto(ser, SER_L2_RESET_ST); 427 break; 428 case SER_EV_STATE_OUT: 429 set_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags); 430 rtw89_hci_recovery_start(rtwdev); 431 break; 432 default: 433 break; 434 } 435 } 436 437 static void ser_l1_reset_pre_st_hdl(struct rtw89_ser *ser, u8 evt) 438 { 439 switch (evt) { 440 case SER_EV_STATE_IN: 441 ser->prehandle_l1 = true; 442 hal_send_post_m0_event(ser); 443 ser_set_alarm(ser, 1000, SER_EV_M1_TIMEOUT); 444 break; 445 case SER_EV_L1_RESET: 446 ser_state_goto(ser, SER_RESET_TRX_ST); 447 break; 448 case SER_EV_M1_TIMEOUT: 449 ser_state_goto(ser, SER_L2_RESET_ST); 450 break; 451 case SER_EV_STATE_OUT: 452 ser_del_alarm(ser); 453 break; 454 default: 455 break; 456 } 457 } 458 459 static void ser_reset_trx_st_hdl(struct rtw89_ser *ser, u8 evt) 460 { 461 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); 462 463 switch (evt) { 464 case SER_EV_STATE_IN: 465 cancel_delayed_work_sync(&rtwdev->track_work); 466 drv_stop_tx(ser); 467 468 if (hal_stop_dma(ser)) { 469 ser_state_goto(ser, SER_L2_RESET_ST); 470 break; 471 } 472 473 drv_stop_rx(ser); 474 drv_trx_reset(ser); 475 476 /* wait m3 */ 477 hal_send_m2_event(ser); 478 479 /* set alarm to prevent FW response timeout */ 480 ser_set_alarm(ser, 1000, SER_EV_M3_TIMEOUT); 481 break; 482 483 case SER_EV_DO_RECOVERY: 484 ser_state_goto(ser, SER_DO_HCI_ST); 485 break; 486 487 case SER_EV_M3_TIMEOUT: 488 ser_state_goto(ser, SER_L2_RESET_ST); 489 break; 490 491 case SER_EV_STATE_OUT: 492 ser_del_alarm(ser); 493 hal_enable_dma(ser); 494 drv_resume_rx(ser); 495 drv_resume_tx(ser); 496 ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->track_work, 497 RTW89_TRACK_WORK_PERIOD); 498 break; 499 500 default: 501 break; 502 } 503 } 504 505 static void ser_do_hci_st_hdl(struct rtw89_ser *ser, u8 evt) 506 { 507 switch (evt) { 508 case SER_EV_STATE_IN: 509 /* wait m5 */ 510 hal_send_m4_event(ser); 511 512 /* prevent FW response timeout */ 513 ser_set_alarm(ser, 1000, SER_EV_FW_M5_TIMEOUT); 514 break; 515 516 case SER_EV_FW_M5_TIMEOUT: 517 ser_state_goto(ser, SER_L2_RESET_ST); 518 break; 519 520 case SER_EV_MAC_RESET_DONE: 521 ser_state_goto(ser, SER_IDLE_ST); 522 break; 523 524 case SER_EV_STATE_OUT: 525 ser_del_alarm(ser); 526 break; 527 528 default: 529 break; 530 } 531 } 532 533 static void ser_mac_mem_dump(struct rtw89_dev *rtwdev, u8 *buf, 534 u8 sel, u32 start_addr, u32 len) 535 { 536 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 537 u32 filter_model_addr = mac->filter_model_addr; 538 u32 indir_access_addr = mac->indir_access_addr; 539 u32 *ptr = (u32 *)buf; 540 u32 base_addr, start_page, residue; 541 u32 cnt = 0; 542 u32 i; 543 544 start_page = start_addr / MAC_MEM_DUMP_PAGE_SIZE; 545 residue = start_addr % MAC_MEM_DUMP_PAGE_SIZE; 546 base_addr = mac->mem_base_addrs[sel]; 547 base_addr += start_page * MAC_MEM_DUMP_PAGE_SIZE; 548 549 while (cnt < len) { 550 rtw89_write32(rtwdev, filter_model_addr, base_addr); 551 552 for (i = indir_access_addr + residue; 553 i < indir_access_addr + MAC_MEM_DUMP_PAGE_SIZE; 554 i += 4, ptr++) { 555 *ptr = rtw89_read32(rtwdev, i); 556 cnt += 4; 557 if (cnt >= len) 558 break; 559 } 560 561 residue = 0; 562 base_addr += MAC_MEM_DUMP_PAGE_SIZE; 563 } 564 } 565 566 static void rtw89_ser_fw_rsvd_ple_dump(struct rtw89_dev *rtwdev, u8 *buf) 567 { 568 u32 start_addr = rtwdev->chip->rsvd_ple_ofst; 569 570 rtw89_debug(rtwdev, RTW89_DBG_SER, 571 "dump mem for fw rsvd payload engine (start addr: 0x%x)\n", 572 start_addr); 573 ser_mac_mem_dump(rtwdev, buf, RTW89_MAC_MEM_SHARED_BUF, start_addr, 574 RTW89_FW_RSVD_PLE_SIZE); 575 } 576 577 struct __fw_backtrace_entry { 578 u32 wcpu_addr; 579 u32 size; 580 u32 key; 581 } __packed; 582 583 struct __fw_backtrace_info { 584 u32 ra; 585 u32 sp; 586 } __packed; 587 588 static_assert(RTW89_FW_BACKTRACE_INFO_SIZE == 589 sizeof(struct __fw_backtrace_info)); 590 591 static int rtw89_ser_fw_backtrace_dump(struct rtw89_dev *rtwdev, u8 *buf, 592 const struct __fw_backtrace_entry *ent) 593 { 594 struct __fw_backtrace_info *ptr = (struct __fw_backtrace_info *)buf; 595 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 596 u32 filter_model_addr = mac->filter_model_addr; 597 u32 indir_access_addr = mac->indir_access_addr; 598 u32 fwbt_addr = ent->wcpu_addr & RTW89_WCPU_BASE_MASK; 599 u32 fwbt_size = ent->size; 600 u32 fwbt_key = ent->key; 601 u32 i; 602 603 if (fwbt_addr == 0) { 604 rtw89_warn(rtwdev, "FW backtrace invalid address: 0x%x\n", 605 fwbt_addr); 606 return -EINVAL; 607 } 608 609 if (fwbt_key != RTW89_FW_BACKTRACE_KEY) { 610 rtw89_warn(rtwdev, "FW backtrace invalid key: 0x%x\n", 611 fwbt_key); 612 return -EINVAL; 613 } 614 615 if (fwbt_size == 0 || !RTW89_VALID_FW_BACKTRACE_SIZE(fwbt_size) || 616 fwbt_size > RTW89_FW_BACKTRACE_MAX_SIZE) { 617 rtw89_warn(rtwdev, "FW backtrace invalid size: 0x%x\n", 618 fwbt_size); 619 return -EINVAL; 620 } 621 622 rtw89_debug(rtwdev, RTW89_DBG_SER, "dump fw backtrace start\n"); 623 rtw89_write32(rtwdev, filter_model_addr, fwbt_addr); 624 625 for (i = indir_access_addr; 626 i < indir_access_addr + fwbt_size; 627 i += RTW89_FW_BACKTRACE_INFO_SIZE, ptr++) { 628 *ptr = (struct __fw_backtrace_info){ 629 .ra = rtw89_read32(rtwdev, i), 630 .sp = rtw89_read32(rtwdev, i + 4), 631 }; 632 rtw89_debug(rtwdev, RTW89_DBG_SER, 633 "next sp: 0x%x, next ra: 0x%x\n", 634 ptr->sp, ptr->ra); 635 } 636 637 rtw89_debug(rtwdev, RTW89_DBG_SER, "dump fw backtrace end\n"); 638 return 0; 639 } 640 641 static void ser_l2_reset_st_pre_hdl(struct rtw89_ser *ser) 642 { 643 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); 644 struct rtw89_ser_cd_buffer *buf; 645 struct __fw_backtrace_entry fwbt_ent; 646 int ret = 0; 647 648 buf = rtw89_ser_cd_prep(rtwdev); 649 if (!buf) { 650 ret = -ENOMEM; 651 goto bottom; 652 } 653 654 rtw89_ser_fw_rsvd_ple_dump(rtwdev, buf->fwple.data); 655 656 fwbt_ent = *(struct __fw_backtrace_entry *)buf->fwple.data; 657 ret = rtw89_ser_fw_backtrace_dump(rtwdev, buf->fwbt.data, &fwbt_ent); 658 if (ret) 659 goto bottom; 660 661 rtw89_ser_cd_send(rtwdev, buf); 662 663 bottom: 664 rtw89_ser_cd_free(rtwdev, buf, !!ret); 665 666 ser_reset_mac_binding(rtwdev); 667 rtw89_core_stop(rtwdev); 668 rtw89_entity_init(rtwdev); 669 rtw89_fw_release_general_pkt_list(rtwdev, false); 670 INIT_LIST_HEAD(&rtwdev->rtwvifs_list); 671 } 672 673 static void ser_l2_reset_st_hdl(struct rtw89_ser *ser, u8 evt) 674 { 675 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); 676 677 switch (evt) { 678 case SER_EV_STATE_IN: 679 mutex_lock(&rtwdev->mutex); 680 ser_l2_reset_st_pre_hdl(ser); 681 mutex_unlock(&rtwdev->mutex); 682 683 ieee80211_restart_hw(rtwdev->hw); 684 ser_set_alarm(ser, SER_RECFG_TIMEOUT, SER_EV_L2_RECFG_TIMEOUT); 685 break; 686 687 case SER_EV_L2_RECFG_TIMEOUT: 688 rtw89_info(rtwdev, "Err: ser L2 re-config timeout\n"); 689 fallthrough; 690 case SER_EV_L2_RECFG_DONE: 691 ser_state_goto(ser, SER_IDLE_ST); 692 break; 693 694 case SER_EV_STATE_OUT: 695 ser_del_alarm(ser); 696 break; 697 698 default: 699 break; 700 } 701 } 702 703 static const struct event_ent ser_ev_tbl[] = { 704 {SER_EV_NONE, "SER_EV_NONE"}, 705 {SER_EV_STATE_IN, "SER_EV_STATE_IN"}, 706 {SER_EV_STATE_OUT, "SER_EV_STATE_OUT"}, 707 {SER_EV_L1_RESET_PREPARE, "SER_EV_L1_RESET_PREPARE pre-m0"}, 708 {SER_EV_L1_RESET, "SER_EV_L1_RESET m1"}, 709 {SER_EV_DO_RECOVERY, "SER_EV_DO_RECOVERY m3"}, 710 {SER_EV_MAC_RESET_DONE, "SER_EV_MAC_RESET_DONE m5"}, 711 {SER_EV_L2_RESET, "SER_EV_L2_RESET"}, 712 {SER_EV_L2_RECFG_DONE, "SER_EV_L2_RECFG_DONE"}, 713 {SER_EV_L2_RECFG_TIMEOUT, "SER_EV_L2_RECFG_TIMEOUT"}, 714 {SER_EV_M1_TIMEOUT, "SER_EV_M1_TIMEOUT"}, 715 {SER_EV_M3_TIMEOUT, "SER_EV_M3_TIMEOUT"}, 716 {SER_EV_FW_M5_TIMEOUT, "SER_EV_FW_M5_TIMEOUT"}, 717 {SER_EV_L0_RESET, "SER_EV_L0_RESET"}, 718 {SER_EV_MAXX, "SER_EV_MAX"} 719 }; 720 721 static const struct state_ent ser_st_tbl[] = { 722 {SER_IDLE_ST, "SER_IDLE_ST", ser_idle_st_hdl}, 723 {SER_L1_RESET_PRE_ST, "SER_L1_RESET_PRE_ST", ser_l1_reset_pre_st_hdl}, 724 {SER_RESET_TRX_ST, "SER_RESET_TRX_ST", ser_reset_trx_st_hdl}, 725 {SER_DO_HCI_ST, "SER_DO_HCI_ST", ser_do_hci_st_hdl}, 726 {SER_L2_RESET_ST, "SER_L2_RESET_ST", ser_l2_reset_st_hdl} 727 }; 728 729 int rtw89_ser_init(struct rtw89_dev *rtwdev) 730 { 731 struct rtw89_ser *ser = &rtwdev->ser; 732 733 memset(ser, 0, sizeof(*ser)); 734 INIT_LIST_HEAD(&ser->msg_q); 735 ser->state = SER_IDLE_ST; 736 ser->st_tbl = ser_st_tbl; 737 ser->ev_tbl = ser_ev_tbl; 738 739 bitmap_zero(ser->flags, RTW89_NUM_OF_SER_FLAGS); 740 spin_lock_init(&ser->msg_q_lock); 741 INIT_WORK(&ser->ser_hdl_work, rtw89_ser_hdl_work); 742 INIT_DELAYED_WORK(&ser->ser_alarm_work, rtw89_ser_alarm_work); 743 return 0; 744 } 745 746 int rtw89_ser_deinit(struct rtw89_dev *rtwdev) 747 { 748 struct rtw89_ser *ser = (struct rtw89_ser *)&rtwdev->ser; 749 750 set_bit(RTW89_SER_DRV_STOP_RUN, ser->flags); 751 cancel_delayed_work_sync(&ser->ser_alarm_work); 752 cancel_work_sync(&ser->ser_hdl_work); 753 clear_bit(RTW89_SER_DRV_STOP_RUN, ser->flags); 754 return 0; 755 } 756 757 void rtw89_ser_recfg_done(struct rtw89_dev *rtwdev) 758 { 759 ser_send_msg(&rtwdev->ser, SER_EV_L2_RECFG_DONE); 760 } 761 762 int rtw89_ser_notify(struct rtw89_dev *rtwdev, u32 err) 763 { 764 u8 event = SER_EV_NONE; 765 766 rtw89_info(rtwdev, "SER catches error: 0x%x\n", err); 767 768 switch (err) { 769 case MAC_AX_ERR_L1_PREERR_DMAC: /* pre-M0 */ 770 event = SER_EV_L1_RESET_PREPARE; 771 break; 772 case MAC_AX_ERR_L1_ERR_DMAC: 773 case MAC_AX_ERR_L0_PROMOTE_TO_L1: 774 event = SER_EV_L1_RESET; /* M1 */ 775 break; 776 case MAC_AX_ERR_L1_RESET_DISABLE_DMAC_DONE: 777 event = SER_EV_DO_RECOVERY; /* M3 */ 778 break; 779 case MAC_AX_ERR_L1_RESET_RECOVERY_DONE: 780 event = SER_EV_MAC_RESET_DONE; /* M5 */ 781 break; 782 case MAC_AX_ERR_L0_ERR_CMAC0: 783 case MAC_AX_ERR_L0_ERR_CMAC1: 784 case MAC_AX_ERR_L0_RESET_DONE: 785 event = SER_EV_L0_RESET; 786 break; 787 default: 788 if (err == MAC_AX_ERR_L1_PROMOTE_TO_L2 || 789 (err >= MAC_AX_ERR_L2_ERR_AH_DMA && 790 err <= MAC_AX_GET_ERR_MAX)) 791 event = SER_EV_L2_RESET; 792 break; 793 } 794 795 if (event == SER_EV_NONE) { 796 rtw89_warn(rtwdev, "SER cannot recognize error: 0x%x\n", err); 797 return -EINVAL; 798 } 799 800 ser_send_msg(&rtwdev->ser, event); 801 return 0; 802 } 803 EXPORT_SYMBOL(rtw89_ser_notify); 804