1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2019-2020 Realtek Corporation 3 */ 4 5 #include "cam.h" 6 #include "debug.h" 7 #include "mac.h" 8 #include "ps.h" 9 #include "ser.h" 10 #include "util.h" 11 12 #define SER_RECFG_TIMEOUT 1000 13 14 enum ser_evt { 15 SER_EV_NONE, 16 SER_EV_STATE_IN, 17 SER_EV_STATE_OUT, 18 SER_EV_L1_RESET, /* M1 */ 19 SER_EV_DO_RECOVERY, /* M3 */ 20 SER_EV_MAC_RESET_DONE, /* M5 */ 21 SER_EV_L2_RESET, 22 SER_EV_L2_RECFG_DONE, 23 SER_EV_L2_RECFG_TIMEOUT, 24 SER_EV_M3_TIMEOUT, 25 SER_EV_FW_M5_TIMEOUT, 26 SER_EV_L0_RESET, 27 SER_EV_MAXX 28 }; 29 30 enum ser_state { 31 SER_IDLE_ST, 32 SER_RESET_TRX_ST, 33 SER_DO_HCI_ST, 34 SER_L2_RESET_ST, 35 SER_ST_MAX_ST 36 }; 37 38 struct ser_msg { 39 struct list_head list; 40 u8 event; 41 }; 42 43 struct state_ent { 44 u8 state; 45 char *name; 46 void (*st_func)(struct rtw89_ser *ser, u8 event); 47 }; 48 49 struct event_ent { 50 u8 event; 51 char *name; 52 }; 53 54 static char *ser_ev_name(struct rtw89_ser *ser, u8 event) 55 { 56 if (event < SER_EV_MAXX) 57 return ser->ev_tbl[event].name; 58 59 return "err_ev_name"; 60 } 61 62 static char *ser_st_name(struct rtw89_ser *ser) 63 { 64 if (ser->state < SER_ST_MAX_ST) 65 return ser->st_tbl[ser->state].name; 66 67 return "err_st_name"; 68 } 69 70 static void ser_state_run(struct rtw89_ser *ser, u8 evt) 71 { 72 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); 73 74 rtw89_debug(rtwdev, RTW89_DBG_SER, "ser: %s receive %s\n", 75 ser_st_name(ser), ser_ev_name(ser, evt)); 76 77 rtw89_leave_lps(rtwdev); 78 ser->st_tbl[ser->state].st_func(ser, evt); 79 } 80 81 static void ser_state_goto(struct rtw89_ser *ser, u8 new_state) 82 { 83 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); 84 85 if (ser->state == new_state || new_state >= SER_ST_MAX_ST) 86 return; 87 ser_state_run(ser, SER_EV_STATE_OUT); 88 89 rtw89_debug(rtwdev, RTW89_DBG_SER, "ser: %s goto -> %s\n", 90 ser_st_name(ser), ser->st_tbl[new_state].name); 91 92 ser->state = new_state; 93 ser_state_run(ser, SER_EV_STATE_IN); 94 } 95 96 static struct ser_msg *__rtw89_ser_dequeue_msg(struct rtw89_ser *ser) 97 { 98 struct ser_msg *msg; 99 100 spin_lock_irq(&ser->msg_q_lock); 101 msg = list_first_entry_or_null(&ser->msg_q, struct ser_msg, list); 102 if (msg) 103 list_del(&msg->list); 104 spin_unlock_irq(&ser->msg_q_lock); 105 106 return msg; 107 } 108 109 static void rtw89_ser_hdl_work(struct work_struct *work) 110 { 111 struct ser_msg *msg; 112 struct rtw89_ser *ser = container_of(work, struct rtw89_ser, 113 ser_hdl_work); 114 115 while ((msg = __rtw89_ser_dequeue_msg(ser))) { 116 ser_state_run(ser, msg->event); 117 kfree(msg); 118 } 119 } 120 121 static int ser_send_msg(struct rtw89_ser *ser, u8 event) 122 { 123 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); 124 struct ser_msg *msg = NULL; 125 126 if (test_bit(RTW89_SER_DRV_STOP_RUN, ser->flags)) 127 return -EIO; 128 129 msg = kmalloc(sizeof(*msg), GFP_ATOMIC); 130 if (!msg) 131 return -ENOMEM; 132 133 msg->event = event; 134 135 spin_lock_irq(&ser->msg_q_lock); 136 list_add(&msg->list, &ser->msg_q); 137 spin_unlock_irq(&ser->msg_q_lock); 138 139 ieee80211_queue_work(rtwdev->hw, &ser->ser_hdl_work); 140 return 0; 141 } 142 143 static void rtw89_ser_alarm_work(struct work_struct *work) 144 { 145 struct rtw89_ser *ser = container_of(work, struct rtw89_ser, 146 ser_alarm_work.work); 147 148 ser_send_msg(ser, ser->alarm_event); 149 ser->alarm_event = SER_EV_NONE; 150 } 151 152 static void ser_set_alarm(struct rtw89_ser *ser, u32 ms, u8 event) 153 { 154 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); 155 156 if (test_bit(RTW89_SER_DRV_STOP_RUN, ser->flags)) 157 return; 158 159 ser->alarm_event = event; 160 ieee80211_queue_delayed_work(rtwdev->hw, &ser->ser_alarm_work, 161 msecs_to_jiffies(ms)); 162 } 163 164 static void ser_del_alarm(struct rtw89_ser *ser) 165 { 166 cancel_delayed_work(&ser->ser_alarm_work); 167 ser->alarm_event = SER_EV_NONE; 168 } 169 170 /* driver function */ 171 static void drv_stop_tx(struct rtw89_ser *ser) 172 { 173 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); 174 175 ieee80211_stop_queues(rtwdev->hw); 176 set_bit(RTW89_SER_DRV_STOP_TX, ser->flags); 177 } 178 179 static void drv_stop_rx(struct rtw89_ser *ser) 180 { 181 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); 182 183 clear_bit(RTW89_FLAG_RUNNING, rtwdev->flags); 184 set_bit(RTW89_SER_DRV_STOP_RX, ser->flags); 185 } 186 187 static void drv_trx_reset(struct rtw89_ser *ser) 188 { 189 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); 190 191 rtw89_hci_reset(rtwdev); 192 } 193 194 static void drv_resume_tx(struct rtw89_ser *ser) 195 { 196 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); 197 198 if (!test_bit(RTW89_SER_DRV_STOP_TX, ser->flags)) 199 return; 200 201 ieee80211_wake_queues(rtwdev->hw); 202 clear_bit(RTW89_SER_DRV_STOP_TX, ser->flags); 203 } 204 205 static void drv_resume_rx(struct rtw89_ser *ser) 206 { 207 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); 208 209 if (!test_bit(RTW89_SER_DRV_STOP_RX, ser->flags)) 210 return; 211 212 set_bit(RTW89_FLAG_RUNNING, rtwdev->flags); 213 clear_bit(RTW89_SER_DRV_STOP_RX, ser->flags); 214 } 215 216 static void ser_reset_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) 217 { 218 rtw89_core_release_bit_map(rtwdev->hw_port, rtwvif->port); 219 rtwvif->net_type = RTW89_NET_TYPE_NO_LINK; 220 rtwvif->trigger = false; 221 } 222 223 static void ser_reset_mac_binding(struct rtw89_dev *rtwdev) 224 { 225 struct rtw89_vif *rtwvif; 226 227 rtw89_cam_reset_keys(rtwdev); 228 rtw89_core_release_all_bits_map(rtwdev->mac_id_map, RTW89_MAX_MAC_ID_NUM); 229 rtw89_for_each_rtwvif(rtwdev, rtwvif) 230 ser_reset_vif(rtwdev, rtwvif); 231 } 232 233 /* hal function */ 234 static int hal_enable_dma(struct rtw89_ser *ser) 235 { 236 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); 237 int ret; 238 239 if (!test_bit(RTW89_SER_HAL_STOP_DMA, ser->flags)) 240 return 0; 241 242 if (!rtwdev->hci.ops->mac_lv1_rcvy) 243 return -EIO; 244 245 ret = rtwdev->hci.ops->mac_lv1_rcvy(rtwdev, RTW89_LV1_RCVY_STEP_2); 246 if (!ret) 247 clear_bit(RTW89_SER_HAL_STOP_DMA, ser->flags); 248 249 return ret; 250 } 251 252 static int hal_stop_dma(struct rtw89_ser *ser) 253 { 254 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); 255 int ret; 256 257 if (!rtwdev->hci.ops->mac_lv1_rcvy) 258 return -EIO; 259 260 ret = rtwdev->hci.ops->mac_lv1_rcvy(rtwdev, RTW89_LV1_RCVY_STEP_1); 261 if (!ret) 262 set_bit(RTW89_SER_HAL_STOP_DMA, ser->flags); 263 264 return ret; 265 } 266 267 static void hal_send_m2_event(struct rtw89_ser *ser) 268 { 269 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); 270 271 rtw89_mac_set_err_status(rtwdev, MAC_AX_ERR_L1_DISABLE_EN); 272 } 273 274 static void hal_send_m4_event(struct rtw89_ser *ser) 275 { 276 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); 277 278 rtw89_mac_set_err_status(rtwdev, MAC_AX_ERR_L1_RCVY_EN); 279 } 280 281 /* state handler */ 282 static void ser_idle_st_hdl(struct rtw89_ser *ser, u8 evt) 283 { 284 switch (evt) { 285 case SER_EV_STATE_IN: 286 break; 287 case SER_EV_L1_RESET: 288 ser_state_goto(ser, SER_RESET_TRX_ST); 289 break; 290 case SER_EV_L2_RESET: 291 ser_state_goto(ser, SER_L2_RESET_ST); 292 break; 293 case SER_EV_STATE_OUT: 294 default: 295 break; 296 } 297 } 298 299 static void ser_reset_trx_st_hdl(struct rtw89_ser *ser, u8 evt) 300 { 301 switch (evt) { 302 case SER_EV_STATE_IN: 303 drv_stop_tx(ser); 304 305 if (hal_stop_dma(ser)) { 306 ser_state_goto(ser, SER_L2_RESET_ST); 307 break; 308 } 309 310 drv_stop_rx(ser); 311 drv_trx_reset(ser); 312 313 /* wait m3 */ 314 hal_send_m2_event(ser); 315 316 /* set alarm to prevent FW response timeout */ 317 ser_set_alarm(ser, 1000, SER_EV_M3_TIMEOUT); 318 break; 319 320 case SER_EV_DO_RECOVERY: 321 ser_state_goto(ser, SER_DO_HCI_ST); 322 break; 323 324 case SER_EV_M3_TIMEOUT: 325 ser_state_goto(ser, SER_L2_RESET_ST); 326 break; 327 328 case SER_EV_STATE_OUT: 329 ser_del_alarm(ser); 330 hal_enable_dma(ser); 331 drv_resume_rx(ser); 332 drv_resume_tx(ser); 333 break; 334 335 default: 336 break; 337 } 338 } 339 340 static void ser_do_hci_st_hdl(struct rtw89_ser *ser, u8 evt) 341 { 342 switch (evt) { 343 case SER_EV_STATE_IN: 344 /* wait m5 */ 345 hal_send_m4_event(ser); 346 347 /* prevent FW response timeout */ 348 ser_set_alarm(ser, 1000, SER_EV_FW_M5_TIMEOUT); 349 break; 350 351 case SER_EV_FW_M5_TIMEOUT: 352 ser_state_goto(ser, SER_L2_RESET_ST); 353 break; 354 355 case SER_EV_MAC_RESET_DONE: 356 ser_state_goto(ser, SER_IDLE_ST); 357 break; 358 359 case SER_EV_STATE_OUT: 360 ser_del_alarm(ser); 361 break; 362 363 default: 364 break; 365 } 366 } 367 368 static void ser_l2_reset_st_hdl(struct rtw89_ser *ser, u8 evt) 369 { 370 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); 371 372 switch (evt) { 373 case SER_EV_STATE_IN: 374 mutex_lock(&rtwdev->mutex); 375 ser_reset_mac_binding(rtwdev); 376 rtw89_core_stop(rtwdev); 377 mutex_unlock(&rtwdev->mutex); 378 379 ieee80211_restart_hw(rtwdev->hw); 380 ser_set_alarm(ser, SER_RECFG_TIMEOUT, SER_EV_L2_RECFG_TIMEOUT); 381 break; 382 383 case SER_EV_L2_RECFG_TIMEOUT: 384 rtw89_info(rtwdev, "Err: ser L2 re-config timeout\n"); 385 fallthrough; 386 case SER_EV_L2_RECFG_DONE: 387 ser_state_goto(ser, SER_IDLE_ST); 388 break; 389 390 case SER_EV_STATE_OUT: 391 ser_del_alarm(ser); 392 break; 393 394 default: 395 break; 396 } 397 } 398 399 static struct event_ent ser_ev_tbl[] = { 400 {SER_EV_NONE, "SER_EV_NONE"}, 401 {SER_EV_STATE_IN, "SER_EV_STATE_IN"}, 402 {SER_EV_STATE_OUT, "SER_EV_STATE_OUT"}, 403 {SER_EV_L1_RESET, "SER_EV_L1_RESET"}, 404 {SER_EV_DO_RECOVERY, "SER_EV_DO_RECOVERY m3"}, 405 {SER_EV_MAC_RESET_DONE, "SER_EV_MAC_RESET_DONE m5"}, 406 {SER_EV_L2_RESET, "SER_EV_L2_RESET"}, 407 {SER_EV_L2_RECFG_DONE, "SER_EV_L2_RECFG_DONE"}, 408 {SER_EV_L2_RECFG_TIMEOUT, "SER_EV_L2_RECFG_TIMEOUT"}, 409 {SER_EV_M3_TIMEOUT, "SER_EV_M3_TIMEOUT"}, 410 {SER_EV_FW_M5_TIMEOUT, "SER_EV_FW_M5_TIMEOUT"}, 411 {SER_EV_L0_RESET, "SER_EV_L0_RESET"}, 412 {SER_EV_MAXX, "SER_EV_MAX"} 413 }; 414 415 static struct state_ent ser_st_tbl[] = { 416 {SER_IDLE_ST, "SER_IDLE_ST", ser_idle_st_hdl}, 417 {SER_RESET_TRX_ST, "SER_RESET_TRX_ST", ser_reset_trx_st_hdl}, 418 {SER_DO_HCI_ST, "SER_DO_HCI_ST", ser_do_hci_st_hdl}, 419 {SER_L2_RESET_ST, "SER_L2_RESET_ST", ser_l2_reset_st_hdl} 420 }; 421 422 int rtw89_ser_init(struct rtw89_dev *rtwdev) 423 { 424 struct rtw89_ser *ser = &rtwdev->ser; 425 426 memset(ser, 0, sizeof(*ser)); 427 INIT_LIST_HEAD(&ser->msg_q); 428 ser->state = SER_IDLE_ST; 429 ser->st_tbl = ser_st_tbl; 430 ser->ev_tbl = ser_ev_tbl; 431 432 bitmap_zero(ser->flags, RTW89_NUM_OF_SER_FLAGS); 433 spin_lock_init(&ser->msg_q_lock); 434 INIT_WORK(&ser->ser_hdl_work, rtw89_ser_hdl_work); 435 INIT_DELAYED_WORK(&ser->ser_alarm_work, rtw89_ser_alarm_work); 436 return 0; 437 } 438 439 int rtw89_ser_deinit(struct rtw89_dev *rtwdev) 440 { 441 struct rtw89_ser *ser = (struct rtw89_ser *)&rtwdev->ser; 442 443 set_bit(RTW89_SER_DRV_STOP_RUN, ser->flags); 444 cancel_delayed_work_sync(&ser->ser_alarm_work); 445 cancel_work_sync(&ser->ser_hdl_work); 446 clear_bit(RTW89_SER_DRV_STOP_RUN, ser->flags); 447 return 0; 448 } 449 450 void rtw89_ser_recfg_done(struct rtw89_dev *rtwdev) 451 { 452 ser_send_msg(&rtwdev->ser, SER_EV_L2_RECFG_DONE); 453 } 454 455 int rtw89_ser_notify(struct rtw89_dev *rtwdev, u32 err) 456 { 457 u8 event = SER_EV_NONE; 458 459 rtw89_info(rtwdev, "ser event = 0x%04x\n", err); 460 461 switch (err) { 462 case MAC_AX_ERR_L1_ERR_DMAC: 463 case MAC_AX_ERR_L0_PROMOTE_TO_L1: 464 event = SER_EV_L1_RESET; /* M1 */ 465 break; 466 case MAC_AX_ERR_L1_RESET_DISABLE_DMAC_DONE: 467 event = SER_EV_DO_RECOVERY; /* M3 */ 468 break; 469 case MAC_AX_ERR_L1_RESET_RECOVERY_DONE: 470 event = SER_EV_MAC_RESET_DONE; /* M5 */ 471 break; 472 case MAC_AX_ERR_L0_ERR_CMAC0: 473 case MAC_AX_ERR_L0_ERR_CMAC1: 474 case MAC_AX_ERR_L0_RESET_DONE: 475 event = SER_EV_L0_RESET; 476 break; 477 default: 478 if (err == MAC_AX_ERR_L1_PROMOTE_TO_L2 || 479 (err >= MAC_AX_ERR_L2_ERR_AH_DMA && 480 err <= MAC_AX_GET_ERR_MAX)) 481 event = SER_EV_L2_RESET; 482 break; 483 } 484 485 if (event == SER_EV_NONE) 486 return -EINVAL; 487 488 ser_send_msg(&rtwdev->ser, event); 489 return 0; 490 } 491 EXPORT_SYMBOL(rtw89_ser_notify); 492