1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2019-2020  Realtek Corporation
3  */
4 
5 #include <linux/devcoredump.h>
6 
7 #include "cam.h"
8 #include "debug.h"
9 #include "fw.h"
10 #include "mac.h"
11 #include "ps.h"
12 #include "reg.h"
13 #include "ser.h"
14 #include "util.h"
15 
16 #define SER_RECFG_TIMEOUT 1000
17 
18 enum ser_evt {
19 	SER_EV_NONE,
20 	SER_EV_STATE_IN,
21 	SER_EV_STATE_OUT,
22 	SER_EV_L1_RESET, /* M1 */
23 	SER_EV_DO_RECOVERY, /* M3 */
24 	SER_EV_MAC_RESET_DONE, /* M5 */
25 	SER_EV_L2_RESET,
26 	SER_EV_L2_RECFG_DONE,
27 	SER_EV_L2_RECFG_TIMEOUT,
28 	SER_EV_M3_TIMEOUT,
29 	SER_EV_FW_M5_TIMEOUT,
30 	SER_EV_L0_RESET,
31 	SER_EV_MAXX
32 };
33 
34 enum ser_state {
35 	SER_IDLE_ST,
36 	SER_RESET_TRX_ST,
37 	SER_DO_HCI_ST,
38 	SER_L2_RESET_ST,
39 	SER_ST_MAX_ST
40 };
41 
42 struct ser_msg {
43 	struct list_head list;
44 	u8 event;
45 };
46 
47 struct state_ent {
48 	u8 state;
49 	char *name;
50 	void (*st_func)(struct rtw89_ser *ser, u8 event);
51 };
52 
53 struct event_ent {
54 	u8 event;
55 	char *name;
56 };
57 
58 static char *ser_ev_name(struct rtw89_ser *ser, u8 event)
59 {
60 	if (event < SER_EV_MAXX)
61 		return ser->ev_tbl[event].name;
62 
63 	return "err_ev_name";
64 }
65 
66 static char *ser_st_name(struct rtw89_ser *ser)
67 {
68 	if (ser->state < SER_ST_MAX_ST)
69 		return ser->st_tbl[ser->state].name;
70 
71 	return "err_st_name";
72 }
73 
74 #define RTW89_DEF_SER_CD_TYPE(_name, _type, _size) \
75 struct ser_cd_ ## _name { \
76 	u32 type; \
77 	u32 type_size; \
78 	u64 padding; \
79 	u8 data[_size]; \
80 } __packed; \
81 static void ser_cd_ ## _name ## _init(struct ser_cd_ ## _name *p) \
82 { \
83 	p->type = _type; \
84 	p->type_size = sizeof(p->data); \
85 	p->padding = 0x0123456789abcdef; \
86 }
87 
88 enum rtw89_ser_cd_type {
89 	RTW89_SER_CD_FW_RSVD_PLE	= 0,
90 	RTW89_SER_CD_FW_BACKTRACE	= 1,
91 };
92 
93 RTW89_DEF_SER_CD_TYPE(fw_rsvd_ple,
94 		      RTW89_SER_CD_FW_RSVD_PLE,
95 		      RTW89_FW_RSVD_PLE_SIZE);
96 
97 RTW89_DEF_SER_CD_TYPE(fw_backtrace,
98 		      RTW89_SER_CD_FW_BACKTRACE,
99 		      RTW89_FW_BACKTRACE_MAX_SIZE);
100 
101 struct rtw89_ser_cd_buffer {
102 	struct ser_cd_fw_rsvd_ple fwple;
103 	struct ser_cd_fw_backtrace fwbt;
104 } __packed;
105 
106 static struct rtw89_ser_cd_buffer *rtw89_ser_cd_prep(struct rtw89_dev *rtwdev)
107 {
108 	struct rtw89_ser_cd_buffer *buf;
109 
110 	buf = vzalloc(sizeof(*buf));
111 	if (!buf)
112 		return NULL;
113 
114 	ser_cd_fw_rsvd_ple_init(&buf->fwple);
115 	ser_cd_fw_backtrace_init(&buf->fwbt);
116 
117 	return buf;
118 }
119 
120 static void rtw89_ser_cd_send(struct rtw89_dev *rtwdev,
121 			      struct rtw89_ser_cd_buffer *buf)
122 {
123 	rtw89_debug(rtwdev, RTW89_DBG_SER, "SER sends core dump\n");
124 
125 	/* After calling dev_coredump, buf's lifetime is supposed to be
126 	 * handled by the device coredump framework. Note that a new dump
127 	 * will be discarded if a previous one hasn't been released by
128 	 * framework yet.
129 	 */
130 	dev_coredumpv(rtwdev->dev, buf, sizeof(*buf), GFP_KERNEL);
131 }
132 
133 static void rtw89_ser_cd_free(struct rtw89_dev *rtwdev,
134 			      struct rtw89_ser_cd_buffer *buf, bool free_self)
135 {
136 	if (!free_self)
137 		return;
138 
139 	rtw89_debug(rtwdev, RTW89_DBG_SER, "SER frees core dump by self\n");
140 
141 	/* When some problems happen during filling data of core dump,
142 	 * we won't send it to device coredump framework. Instead, we
143 	 * free buf by ourselves.
144 	 */
145 	vfree(buf);
146 }
147 
148 static void ser_state_run(struct rtw89_ser *ser, u8 evt)
149 {
150 	struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
151 
152 	rtw89_debug(rtwdev, RTW89_DBG_SER, "ser: %s receive %s\n",
153 		    ser_st_name(ser), ser_ev_name(ser, evt));
154 
155 	rtw89_leave_lps(rtwdev);
156 	ser->st_tbl[ser->state].st_func(ser, evt);
157 }
158 
159 static void ser_state_goto(struct rtw89_ser *ser, u8 new_state)
160 {
161 	struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
162 
163 	if (ser->state == new_state || new_state >= SER_ST_MAX_ST)
164 		return;
165 	ser_state_run(ser, SER_EV_STATE_OUT);
166 
167 	rtw89_debug(rtwdev, RTW89_DBG_SER, "ser: %s goto -> %s\n",
168 		    ser_st_name(ser), ser->st_tbl[new_state].name);
169 
170 	ser->state = new_state;
171 	ser_state_run(ser, SER_EV_STATE_IN);
172 }
173 
174 static struct ser_msg *__rtw89_ser_dequeue_msg(struct rtw89_ser *ser)
175 {
176 	struct ser_msg *msg;
177 
178 	spin_lock_irq(&ser->msg_q_lock);
179 	msg = list_first_entry_or_null(&ser->msg_q, struct ser_msg, list);
180 	if (msg)
181 		list_del(&msg->list);
182 	spin_unlock_irq(&ser->msg_q_lock);
183 
184 	return msg;
185 }
186 
187 static void rtw89_ser_hdl_work(struct work_struct *work)
188 {
189 	struct ser_msg *msg;
190 	struct rtw89_ser *ser = container_of(work, struct rtw89_ser,
191 					     ser_hdl_work);
192 
193 	while ((msg = __rtw89_ser_dequeue_msg(ser))) {
194 		ser_state_run(ser, msg->event);
195 		kfree(msg);
196 	}
197 }
198 
199 static int ser_send_msg(struct rtw89_ser *ser, u8 event)
200 {
201 	struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
202 	struct ser_msg *msg = NULL;
203 
204 	if (test_bit(RTW89_SER_DRV_STOP_RUN, ser->flags))
205 		return -EIO;
206 
207 	msg = kmalloc(sizeof(*msg), GFP_ATOMIC);
208 	if (!msg)
209 		return -ENOMEM;
210 
211 	msg->event = event;
212 
213 	spin_lock_irq(&ser->msg_q_lock);
214 	list_add(&msg->list, &ser->msg_q);
215 	spin_unlock_irq(&ser->msg_q_lock);
216 
217 	ieee80211_queue_work(rtwdev->hw, &ser->ser_hdl_work);
218 	return 0;
219 }
220 
221 static void rtw89_ser_alarm_work(struct work_struct *work)
222 {
223 	struct rtw89_ser *ser = container_of(work, struct rtw89_ser,
224 					     ser_alarm_work.work);
225 
226 	ser_send_msg(ser, ser->alarm_event);
227 	ser->alarm_event = SER_EV_NONE;
228 }
229 
230 static void ser_set_alarm(struct rtw89_ser *ser, u32 ms, u8 event)
231 {
232 	struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
233 
234 	if (test_bit(RTW89_SER_DRV_STOP_RUN, ser->flags))
235 		return;
236 
237 	ser->alarm_event = event;
238 	ieee80211_queue_delayed_work(rtwdev->hw, &ser->ser_alarm_work,
239 				     msecs_to_jiffies(ms));
240 }
241 
242 static void ser_del_alarm(struct rtw89_ser *ser)
243 {
244 	cancel_delayed_work(&ser->ser_alarm_work);
245 	ser->alarm_event = SER_EV_NONE;
246 }
247 
248 /* driver function */
249 static void drv_stop_tx(struct rtw89_ser *ser)
250 {
251 	struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
252 
253 	ieee80211_stop_queues(rtwdev->hw);
254 	set_bit(RTW89_SER_DRV_STOP_TX, ser->flags);
255 }
256 
257 static void drv_stop_rx(struct rtw89_ser *ser)
258 {
259 	struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
260 
261 	clear_bit(RTW89_FLAG_RUNNING, rtwdev->flags);
262 	set_bit(RTW89_SER_DRV_STOP_RX, ser->flags);
263 }
264 
265 static void drv_trx_reset(struct rtw89_ser *ser)
266 {
267 	struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
268 
269 	rtw89_hci_reset(rtwdev);
270 }
271 
272 static void drv_resume_tx(struct rtw89_ser *ser)
273 {
274 	struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
275 
276 	if (!test_bit(RTW89_SER_DRV_STOP_TX, ser->flags))
277 		return;
278 
279 	ieee80211_wake_queues(rtwdev->hw);
280 	clear_bit(RTW89_SER_DRV_STOP_TX, ser->flags);
281 }
282 
283 static void drv_resume_rx(struct rtw89_ser *ser)
284 {
285 	struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
286 
287 	if (!test_bit(RTW89_SER_DRV_STOP_RX, ser->flags))
288 		return;
289 
290 	set_bit(RTW89_FLAG_RUNNING, rtwdev->flags);
291 	clear_bit(RTW89_SER_DRV_STOP_RX, ser->flags);
292 }
293 
294 static void ser_reset_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
295 {
296 	rtw89_core_release_bit_map(rtwdev->hw_port, rtwvif->port);
297 	rtwvif->net_type = RTW89_NET_TYPE_NO_LINK;
298 	rtwvif->trigger = false;
299 }
300 
301 static void ser_sta_deinit_addr_cam_iter(void *data, struct ieee80211_sta *sta)
302 {
303 	struct rtw89_vif *rtwvif = (struct rtw89_vif *)data;
304 	struct rtw89_dev *rtwdev = rtwvif->rtwdev;
305 	struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
306 
307 	if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE || sta->tdls)
308 		rtw89_cam_deinit_addr_cam(rtwdev, &rtwsta->addr_cam);
309 	if (sta->tdls)
310 		rtw89_cam_deinit_bssid_cam(rtwdev, &rtwsta->bssid_cam);
311 }
312 
313 static void ser_deinit_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
314 {
315 	ieee80211_iterate_stations_atomic(rtwdev->hw,
316 					  ser_sta_deinit_addr_cam_iter,
317 					  rtwvif);
318 
319 	rtw89_cam_deinit(rtwdev, rtwvif);
320 }
321 
322 static void ser_reset_mac_binding(struct rtw89_dev *rtwdev)
323 {
324 	struct rtw89_vif *rtwvif;
325 
326 	rtw89_cam_reset_keys(rtwdev);
327 	rtw89_for_each_rtwvif(rtwdev, rtwvif)
328 		ser_deinit_cam(rtwdev, rtwvif);
329 
330 	rtw89_core_release_all_bits_map(rtwdev->mac_id_map, RTW89_MAX_MAC_ID_NUM);
331 	rtw89_for_each_rtwvif(rtwdev, rtwvif)
332 		ser_reset_vif(rtwdev, rtwvif);
333 }
334 
335 /* hal function */
336 static int hal_enable_dma(struct rtw89_ser *ser)
337 {
338 	struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
339 	int ret;
340 
341 	if (!test_bit(RTW89_SER_HAL_STOP_DMA, ser->flags))
342 		return 0;
343 
344 	if (!rtwdev->hci.ops->mac_lv1_rcvy)
345 		return -EIO;
346 
347 	ret = rtwdev->hci.ops->mac_lv1_rcvy(rtwdev, RTW89_LV1_RCVY_STEP_2);
348 	if (!ret)
349 		clear_bit(RTW89_SER_HAL_STOP_DMA, ser->flags);
350 
351 	return ret;
352 }
353 
354 static int hal_stop_dma(struct rtw89_ser *ser)
355 {
356 	struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
357 	int ret;
358 
359 	if (!rtwdev->hci.ops->mac_lv1_rcvy)
360 		return -EIO;
361 
362 	ret = rtwdev->hci.ops->mac_lv1_rcvy(rtwdev, RTW89_LV1_RCVY_STEP_1);
363 	if (!ret)
364 		set_bit(RTW89_SER_HAL_STOP_DMA, ser->flags);
365 
366 	return ret;
367 }
368 
369 static void hal_send_m2_event(struct rtw89_ser *ser)
370 {
371 	struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
372 
373 	rtw89_mac_set_err_status(rtwdev, MAC_AX_ERR_L1_DISABLE_EN);
374 }
375 
376 static void hal_send_m4_event(struct rtw89_ser *ser)
377 {
378 	struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
379 
380 	rtw89_mac_set_err_status(rtwdev, MAC_AX_ERR_L1_RCVY_EN);
381 }
382 
383 /* state handler */
384 static void ser_idle_st_hdl(struct rtw89_ser *ser, u8 evt)
385 {
386 	struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
387 
388 	switch (evt) {
389 	case SER_EV_STATE_IN:
390 		rtw89_hci_recovery_complete(rtwdev);
391 		break;
392 	case SER_EV_L1_RESET:
393 		ser_state_goto(ser, SER_RESET_TRX_ST);
394 		break;
395 	case SER_EV_L2_RESET:
396 		ser_state_goto(ser, SER_L2_RESET_ST);
397 		break;
398 	case SER_EV_STATE_OUT:
399 		rtw89_hci_recovery_start(rtwdev);
400 		break;
401 	default:
402 		break;
403 	}
404 }
405 
406 static void ser_reset_trx_st_hdl(struct rtw89_ser *ser, u8 evt)
407 {
408 	switch (evt) {
409 	case SER_EV_STATE_IN:
410 		drv_stop_tx(ser);
411 
412 		if (hal_stop_dma(ser)) {
413 			ser_state_goto(ser, SER_L2_RESET_ST);
414 			break;
415 		}
416 
417 		drv_stop_rx(ser);
418 		drv_trx_reset(ser);
419 
420 		/* wait m3 */
421 		hal_send_m2_event(ser);
422 
423 		/* set alarm to prevent FW response timeout */
424 		ser_set_alarm(ser, 1000, SER_EV_M3_TIMEOUT);
425 		break;
426 
427 	case SER_EV_DO_RECOVERY:
428 		ser_state_goto(ser, SER_DO_HCI_ST);
429 		break;
430 
431 	case SER_EV_M3_TIMEOUT:
432 		ser_state_goto(ser, SER_L2_RESET_ST);
433 		break;
434 
435 	case SER_EV_STATE_OUT:
436 		ser_del_alarm(ser);
437 		hal_enable_dma(ser);
438 		drv_resume_rx(ser);
439 		drv_resume_tx(ser);
440 		break;
441 
442 	default:
443 		break;
444 	}
445 }
446 
447 static void ser_do_hci_st_hdl(struct rtw89_ser *ser, u8 evt)
448 {
449 	switch (evt) {
450 	case SER_EV_STATE_IN:
451 		/* wait m5 */
452 		hal_send_m4_event(ser);
453 
454 		/* prevent FW response timeout */
455 		ser_set_alarm(ser, 1000, SER_EV_FW_M5_TIMEOUT);
456 		break;
457 
458 	case SER_EV_FW_M5_TIMEOUT:
459 		ser_state_goto(ser, SER_L2_RESET_ST);
460 		break;
461 
462 	case SER_EV_MAC_RESET_DONE:
463 		ser_state_goto(ser, SER_IDLE_ST);
464 		break;
465 
466 	case SER_EV_STATE_OUT:
467 		ser_del_alarm(ser);
468 		break;
469 
470 	default:
471 		break;
472 	}
473 }
474 
475 static void ser_mac_mem_dump(struct rtw89_dev *rtwdev, u8 *buf,
476 			     u8 sel, u32 start_addr, u32 len)
477 {
478 	u32 *ptr = (u32 *)buf;
479 	u32 base_addr, start_page, residue;
480 	u32 cnt = 0;
481 	u32 i;
482 
483 	start_page = start_addr / MAC_MEM_DUMP_PAGE_SIZE;
484 	residue = start_addr % MAC_MEM_DUMP_PAGE_SIZE;
485 	base_addr = rtw89_mac_mem_base_addrs[sel];
486 	base_addr += start_page * MAC_MEM_DUMP_PAGE_SIZE;
487 
488 	while (cnt < len) {
489 		rtw89_write32(rtwdev, R_AX_FILTER_MODEL_ADDR, base_addr);
490 
491 		for (i = R_AX_INDIR_ACCESS_ENTRY + residue;
492 		     i < R_AX_INDIR_ACCESS_ENTRY + MAC_MEM_DUMP_PAGE_SIZE;
493 		     i += 4, ptr++) {
494 			*ptr = rtw89_read32(rtwdev, i);
495 			cnt += 4;
496 			if (cnt >= len)
497 				break;
498 		}
499 
500 		residue = 0;
501 		base_addr += MAC_MEM_DUMP_PAGE_SIZE;
502 	}
503 }
504 
505 static void rtw89_ser_fw_rsvd_ple_dump(struct rtw89_dev *rtwdev, u8 *buf)
506 {
507 	u32 start_addr = rtwdev->chip->rsvd_ple_ofst;
508 
509 	rtw89_debug(rtwdev, RTW89_DBG_SER,
510 		    "dump mem for fw rsvd payload engine (start addr: 0x%x)\n",
511 		    start_addr);
512 	ser_mac_mem_dump(rtwdev, buf, RTW89_MAC_MEM_SHARED_BUF, start_addr,
513 			 RTW89_FW_RSVD_PLE_SIZE);
514 }
515 
516 struct __fw_backtrace_entry {
517 	u32 wcpu_addr;
518 	u32 size;
519 	u32 key;
520 } __packed;
521 
522 struct __fw_backtrace_info {
523 	u32 ra;
524 	u32 sp;
525 } __packed;
526 
527 static_assert(RTW89_FW_BACKTRACE_INFO_SIZE ==
528 	      sizeof(struct __fw_backtrace_info));
529 
530 static int rtw89_ser_fw_backtrace_dump(struct rtw89_dev *rtwdev, u8 *buf,
531 				       const struct __fw_backtrace_entry *ent)
532 {
533 	struct __fw_backtrace_info *ptr = (struct __fw_backtrace_info *)buf;
534 	u32 fwbt_addr = ent->wcpu_addr - RTW89_WCPU_BASE_ADDR;
535 	u32 fwbt_size = ent->size;
536 	u32 fwbt_key = ent->key;
537 	u32 i;
538 
539 	if (fwbt_addr == 0) {
540 		rtw89_warn(rtwdev, "FW backtrace invalid address: 0x%x\n",
541 			   fwbt_addr);
542 		return -EINVAL;
543 	}
544 
545 	if (fwbt_key != RTW89_FW_BACKTRACE_KEY) {
546 		rtw89_warn(rtwdev, "FW backtrace invalid key: 0x%x\n",
547 			   fwbt_key);
548 		return -EINVAL;
549 	}
550 
551 	if (fwbt_size == 0 || !RTW89_VALID_FW_BACKTRACE_SIZE(fwbt_size) ||
552 	    fwbt_size > RTW89_FW_BACKTRACE_MAX_SIZE) {
553 		rtw89_warn(rtwdev, "FW backtrace invalid size: 0x%x\n",
554 			   fwbt_size);
555 		return -EINVAL;
556 	}
557 
558 	rtw89_debug(rtwdev, RTW89_DBG_SER, "dump fw backtrace start\n");
559 	rtw89_write32(rtwdev, R_AX_FILTER_MODEL_ADDR, fwbt_addr);
560 
561 	for (i = R_AX_INDIR_ACCESS_ENTRY;
562 	     i < R_AX_INDIR_ACCESS_ENTRY + fwbt_size;
563 	     i += RTW89_FW_BACKTRACE_INFO_SIZE, ptr++) {
564 		*ptr = (struct __fw_backtrace_info){
565 			.ra = rtw89_read32(rtwdev, i),
566 			.sp = rtw89_read32(rtwdev, i + 4),
567 		};
568 		rtw89_debug(rtwdev, RTW89_DBG_SER,
569 			    "next sp: 0x%x, next ra: 0x%x\n",
570 			    ptr->sp, ptr->ra);
571 	}
572 
573 	rtw89_debug(rtwdev, RTW89_DBG_SER, "dump fw backtrace end\n");
574 	return 0;
575 }
576 
577 static void ser_l2_reset_st_pre_hdl(struct rtw89_ser *ser)
578 {
579 	struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
580 	struct rtw89_ser_cd_buffer *buf;
581 	struct __fw_backtrace_entry fwbt_ent;
582 	int ret = 0;
583 
584 	buf = rtw89_ser_cd_prep(rtwdev);
585 	if (!buf) {
586 		ret = -ENOMEM;
587 		goto bottom;
588 	}
589 
590 	rtw89_ser_fw_rsvd_ple_dump(rtwdev, buf->fwple.data);
591 
592 	fwbt_ent = *(struct __fw_backtrace_entry *)buf->fwple.data;
593 	ret = rtw89_ser_fw_backtrace_dump(rtwdev, buf->fwbt.data, &fwbt_ent);
594 	if (ret)
595 		goto bottom;
596 
597 	rtw89_ser_cd_send(rtwdev, buf);
598 
599 bottom:
600 	rtw89_ser_cd_free(rtwdev, buf, !!ret);
601 
602 	ser_reset_mac_binding(rtwdev);
603 	rtw89_core_stop(rtwdev);
604 	INIT_LIST_HEAD(&rtwdev->rtwvifs_list);
605 }
606 
607 static void ser_l2_reset_st_hdl(struct rtw89_ser *ser, u8 evt)
608 {
609 	struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
610 
611 	switch (evt) {
612 	case SER_EV_STATE_IN:
613 		mutex_lock(&rtwdev->mutex);
614 		ser_l2_reset_st_pre_hdl(ser);
615 		mutex_unlock(&rtwdev->mutex);
616 
617 		ieee80211_restart_hw(rtwdev->hw);
618 		ser_set_alarm(ser, SER_RECFG_TIMEOUT, SER_EV_L2_RECFG_TIMEOUT);
619 		break;
620 
621 	case SER_EV_L2_RECFG_TIMEOUT:
622 		rtw89_info(rtwdev, "Err: ser L2 re-config timeout\n");
623 		fallthrough;
624 	case SER_EV_L2_RECFG_DONE:
625 		ser_state_goto(ser, SER_IDLE_ST);
626 		clear_bit(RTW89_FLAG_RESTART_TRIGGER, rtwdev->flags);
627 		break;
628 
629 	case SER_EV_STATE_OUT:
630 		ser_del_alarm(ser);
631 		break;
632 
633 	default:
634 		break;
635 	}
636 }
637 
638 static const struct event_ent ser_ev_tbl[] = {
639 	{SER_EV_NONE, "SER_EV_NONE"},
640 	{SER_EV_STATE_IN, "SER_EV_STATE_IN"},
641 	{SER_EV_STATE_OUT, "SER_EV_STATE_OUT"},
642 	{SER_EV_L1_RESET, "SER_EV_L1_RESET"},
643 	{SER_EV_DO_RECOVERY, "SER_EV_DO_RECOVERY m3"},
644 	{SER_EV_MAC_RESET_DONE, "SER_EV_MAC_RESET_DONE m5"},
645 	{SER_EV_L2_RESET, "SER_EV_L2_RESET"},
646 	{SER_EV_L2_RECFG_DONE, "SER_EV_L2_RECFG_DONE"},
647 	{SER_EV_L2_RECFG_TIMEOUT, "SER_EV_L2_RECFG_TIMEOUT"},
648 	{SER_EV_M3_TIMEOUT, "SER_EV_M3_TIMEOUT"},
649 	{SER_EV_FW_M5_TIMEOUT, "SER_EV_FW_M5_TIMEOUT"},
650 	{SER_EV_L0_RESET, "SER_EV_L0_RESET"},
651 	{SER_EV_MAXX, "SER_EV_MAX"}
652 };
653 
654 static const struct state_ent ser_st_tbl[] = {
655 	{SER_IDLE_ST, "SER_IDLE_ST", ser_idle_st_hdl},
656 	{SER_RESET_TRX_ST, "SER_RESET_TRX_ST", ser_reset_trx_st_hdl},
657 	{SER_DO_HCI_ST, "SER_DO_HCI_ST", ser_do_hci_st_hdl},
658 	{SER_L2_RESET_ST, "SER_L2_RESET_ST", ser_l2_reset_st_hdl}
659 };
660 
661 int rtw89_ser_init(struct rtw89_dev *rtwdev)
662 {
663 	struct rtw89_ser *ser = &rtwdev->ser;
664 
665 	memset(ser, 0, sizeof(*ser));
666 	INIT_LIST_HEAD(&ser->msg_q);
667 	ser->state = SER_IDLE_ST;
668 	ser->st_tbl = ser_st_tbl;
669 	ser->ev_tbl = ser_ev_tbl;
670 
671 	bitmap_zero(ser->flags, RTW89_NUM_OF_SER_FLAGS);
672 	spin_lock_init(&ser->msg_q_lock);
673 	INIT_WORK(&ser->ser_hdl_work, rtw89_ser_hdl_work);
674 	INIT_DELAYED_WORK(&ser->ser_alarm_work, rtw89_ser_alarm_work);
675 	return 0;
676 }
677 
678 int rtw89_ser_deinit(struct rtw89_dev *rtwdev)
679 {
680 	struct rtw89_ser *ser = (struct rtw89_ser *)&rtwdev->ser;
681 
682 	set_bit(RTW89_SER_DRV_STOP_RUN, ser->flags);
683 	cancel_delayed_work_sync(&ser->ser_alarm_work);
684 	cancel_work_sync(&ser->ser_hdl_work);
685 	clear_bit(RTW89_SER_DRV_STOP_RUN, ser->flags);
686 	return 0;
687 }
688 
689 void rtw89_ser_recfg_done(struct rtw89_dev *rtwdev)
690 {
691 	ser_send_msg(&rtwdev->ser, SER_EV_L2_RECFG_DONE);
692 }
693 
694 int rtw89_ser_notify(struct rtw89_dev *rtwdev, u32 err)
695 {
696 	u8 event = SER_EV_NONE;
697 
698 	rtw89_info(rtwdev, "SER catches error: 0x%x\n", err);
699 
700 	switch (err) {
701 	case MAC_AX_ERR_L1_ERR_DMAC:
702 	case MAC_AX_ERR_L0_PROMOTE_TO_L1:
703 		event = SER_EV_L1_RESET; /* M1 */
704 		break;
705 	case MAC_AX_ERR_L1_RESET_DISABLE_DMAC_DONE:
706 		event = SER_EV_DO_RECOVERY; /* M3 */
707 		break;
708 	case MAC_AX_ERR_L1_RESET_RECOVERY_DONE:
709 		event = SER_EV_MAC_RESET_DONE; /* M5 */
710 		break;
711 	case MAC_AX_ERR_L0_ERR_CMAC0:
712 	case MAC_AX_ERR_L0_ERR_CMAC1:
713 	case MAC_AX_ERR_L0_RESET_DONE:
714 		event = SER_EV_L0_RESET;
715 		break;
716 	default:
717 		if (err == MAC_AX_ERR_L1_PROMOTE_TO_L2 ||
718 		    (err >= MAC_AX_ERR_L2_ERR_AH_DMA &&
719 		     err <= MAC_AX_GET_ERR_MAX))
720 			event = SER_EV_L2_RESET;
721 		break;
722 	}
723 
724 	if (event == SER_EV_NONE) {
725 		rtw89_warn(rtwdev, "SER cannot recognize error: 0x%x\n", err);
726 		return -EINVAL;
727 	}
728 
729 	ser_send_msg(&rtwdev->ser, event);
730 	return 0;
731 }
732 EXPORT_SYMBOL(rtw89_ser_notify);
733