1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2021, MediaTek Inc.
4 * Copyright (c) 2021-2022, Intel Corporation.
5 *
6 * Authors:
7 * Haijun Liu <haijun.liu@mediatek.com>
8 * Eliot Lee <eliot.lee@intel.com>
9 * Moises Veleta <moises.veleta@intel.com>
10 * Ricardo Martinez <ricardo.martinez@linux.intel.com>
11 *
12 * Contributors:
13 * Amir Hanania <amir.hanania@intel.com>
14 * Sreehari Kancharla <sreehari.kancharla@intel.com>
15 */
16
17 #include <linux/bits.h>
18 #include <linux/bitfield.h>
19 #include <linux/completion.h>
20 #include <linux/device.h>
21 #include <linux/delay.h>
22 #include <linux/err.h>
23 #include <linux/gfp.h>
24 #include <linux/iopoll.h>
25 #include <linux/jiffies.h>
26 #include <linux/kernel.h>
27 #include <linux/kthread.h>
28 #include <linux/list.h>
29 #include <linux/slab.h>
30 #include <linux/spinlock.h>
31 #include <linux/string.h>
32 #include <linux/types.h>
33 #include <linux/wait.h>
34
35 #include "t7xx_hif_cldma.h"
36 #include "t7xx_mhccif.h"
37 #include "t7xx_modem_ops.h"
38 #include "t7xx_pci.h"
39 #include "t7xx_pcie_mac.h"
40 #include "t7xx_port_proxy.h"
41 #include "t7xx_reg.h"
42 #include "t7xx_state_monitor.h"
43
44 #define FSM_DRM_DISABLE_DELAY_MS 200
45 #define FSM_EVENT_POLL_INTERVAL_MS 20
46 #define FSM_MD_EX_REC_OK_TIMEOUT_MS 10000
47 #define FSM_MD_EX_PASS_TIMEOUT_MS 45000
48 #define FSM_CMD_TIMEOUT_MS 2000
49
t7xx_fsm_notifier_register(struct t7xx_modem * md,struct t7xx_fsm_notifier * notifier)50 void t7xx_fsm_notifier_register(struct t7xx_modem *md, struct t7xx_fsm_notifier *notifier)
51 {
52 struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
53 unsigned long flags;
54
55 spin_lock_irqsave(&ctl->notifier_lock, flags);
56 list_add_tail(¬ifier->entry, &ctl->notifier_list);
57 spin_unlock_irqrestore(&ctl->notifier_lock, flags);
58 }
59
t7xx_fsm_notifier_unregister(struct t7xx_modem * md,struct t7xx_fsm_notifier * notifier)60 void t7xx_fsm_notifier_unregister(struct t7xx_modem *md, struct t7xx_fsm_notifier *notifier)
61 {
62 struct t7xx_fsm_notifier *notifier_cur, *notifier_next;
63 struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
64 unsigned long flags;
65
66 spin_lock_irqsave(&ctl->notifier_lock, flags);
67 list_for_each_entry_safe(notifier_cur, notifier_next, &ctl->notifier_list, entry) {
68 if (notifier_cur == notifier)
69 list_del(¬ifier->entry);
70 }
71 spin_unlock_irqrestore(&ctl->notifier_lock, flags);
72 }
73
fsm_state_notify(struct t7xx_modem * md,enum md_state state)74 static void fsm_state_notify(struct t7xx_modem *md, enum md_state state)
75 {
76 struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
77 struct t7xx_fsm_notifier *notifier;
78 unsigned long flags;
79
80 spin_lock_irqsave(&ctl->notifier_lock, flags);
81 list_for_each_entry(notifier, &ctl->notifier_list, entry) {
82 spin_unlock_irqrestore(&ctl->notifier_lock, flags);
83 if (notifier->notifier_fn)
84 notifier->notifier_fn(state, notifier->data);
85
86 spin_lock_irqsave(&ctl->notifier_lock, flags);
87 }
88 spin_unlock_irqrestore(&ctl->notifier_lock, flags);
89 }
90
t7xx_fsm_broadcast_state(struct t7xx_fsm_ctl * ctl,enum md_state state)91 void t7xx_fsm_broadcast_state(struct t7xx_fsm_ctl *ctl, enum md_state state)
92 {
93 ctl->md_state = state;
94
95 /* Update to port first, otherwise sending message on HS2 may fail */
96 t7xx_port_proxy_md_status_notify(ctl->md->port_prox, state);
97 fsm_state_notify(ctl->md, state);
98 }
99
fsm_release_command(struct kref * ref)100 static void fsm_release_command(struct kref *ref)
101 {
102 struct t7xx_fsm_command *cmd = container_of(ref, typeof(*cmd), refcnt);
103
104 kfree(cmd);
105 }
106
fsm_finish_command(struct t7xx_fsm_ctl * ctl,struct t7xx_fsm_command * cmd,int result)107 static void fsm_finish_command(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd, int result)
108 {
109 if (cmd->flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) {
110 cmd->result = result;
111 complete_all(&cmd->done);
112 }
113
114 kref_put(&cmd->refcnt, fsm_release_command);
115 }
116
fsm_del_kf_event(struct t7xx_fsm_event * event)117 static void fsm_del_kf_event(struct t7xx_fsm_event *event)
118 {
119 list_del(&event->entry);
120 kfree(event);
121 }
122
fsm_flush_event_cmd_qs(struct t7xx_fsm_ctl * ctl)123 static void fsm_flush_event_cmd_qs(struct t7xx_fsm_ctl *ctl)
124 {
125 struct device *dev = &ctl->md->t7xx_dev->pdev->dev;
126 struct t7xx_fsm_event *event, *evt_next;
127 struct t7xx_fsm_command *cmd, *cmd_next;
128 unsigned long flags;
129
130 spin_lock_irqsave(&ctl->command_lock, flags);
131 list_for_each_entry_safe(cmd, cmd_next, &ctl->command_queue, entry) {
132 dev_warn(dev, "Unhandled command %d\n", cmd->cmd_id);
133 list_del(&cmd->entry);
134 fsm_finish_command(ctl, cmd, -EINVAL);
135 }
136 spin_unlock_irqrestore(&ctl->command_lock, flags);
137
138 spin_lock_irqsave(&ctl->event_lock, flags);
139 list_for_each_entry_safe(event, evt_next, &ctl->event_queue, entry) {
140 dev_warn(dev, "Unhandled event %d\n", event->event_id);
141 fsm_del_kf_event(event);
142 }
143 spin_unlock_irqrestore(&ctl->event_lock, flags);
144 }
145
fsm_wait_for_event(struct t7xx_fsm_ctl * ctl,enum t7xx_fsm_event_state event_expected,enum t7xx_fsm_event_state event_ignore,int retries)146 static void fsm_wait_for_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_expected,
147 enum t7xx_fsm_event_state event_ignore, int retries)
148 {
149 struct t7xx_fsm_event *event;
150 bool event_received = false;
151 unsigned long flags;
152 int cnt = 0;
153
154 while (cnt++ < retries && !event_received) {
155 bool sleep_required = true;
156
157 if (kthread_should_stop())
158 return;
159
160 spin_lock_irqsave(&ctl->event_lock, flags);
161 event = list_first_entry_or_null(&ctl->event_queue, struct t7xx_fsm_event, entry);
162 if (event) {
163 event_received = event->event_id == event_expected;
164 if (event_received || event->event_id == event_ignore) {
165 fsm_del_kf_event(event);
166 sleep_required = false;
167 }
168 }
169 spin_unlock_irqrestore(&ctl->event_lock, flags);
170
171 if (sleep_required)
172 msleep(FSM_EVENT_POLL_INTERVAL_MS);
173 }
174 }
175
fsm_routine_exception(struct t7xx_fsm_ctl * ctl,struct t7xx_fsm_command * cmd,enum t7xx_ex_reason reason)176 static void fsm_routine_exception(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd,
177 enum t7xx_ex_reason reason)
178 {
179 struct device *dev = &ctl->md->t7xx_dev->pdev->dev;
180
181 if (ctl->curr_state != FSM_STATE_READY && ctl->curr_state != FSM_STATE_STARTING) {
182 if (cmd)
183 fsm_finish_command(ctl, cmd, -EINVAL);
184
185 return;
186 }
187
188 ctl->curr_state = FSM_STATE_EXCEPTION;
189
190 switch (reason) {
191 case EXCEPTION_HS_TIMEOUT:
192 dev_err(dev, "Boot Handshake failure\n");
193 break;
194
195 case EXCEPTION_EVENT:
196 dev_err(dev, "Exception event\n");
197 t7xx_fsm_broadcast_state(ctl, MD_STATE_EXCEPTION);
198 t7xx_pci_pm_exp_detected(ctl->md->t7xx_dev);
199 t7xx_md_exception_handshake(ctl->md);
200
201 fsm_wait_for_event(ctl, FSM_EVENT_MD_EX_REC_OK, FSM_EVENT_MD_EX,
202 FSM_MD_EX_REC_OK_TIMEOUT_MS / FSM_EVENT_POLL_INTERVAL_MS);
203 fsm_wait_for_event(ctl, FSM_EVENT_MD_EX_PASS, FSM_EVENT_INVALID,
204 FSM_MD_EX_PASS_TIMEOUT_MS / FSM_EVENT_POLL_INTERVAL_MS);
205 break;
206
207 default:
208 dev_err(dev, "Exception %d\n", reason);
209 break;
210 }
211
212 if (cmd)
213 fsm_finish_command(ctl, cmd, 0);
214 }
215
fsm_stopped_handler(struct t7xx_fsm_ctl * ctl)216 static int fsm_stopped_handler(struct t7xx_fsm_ctl *ctl)
217 {
218 ctl->curr_state = FSM_STATE_STOPPED;
219
220 t7xx_fsm_broadcast_state(ctl, MD_STATE_STOPPED);
221 return t7xx_md_reset(ctl->md->t7xx_dev);
222 }
223
fsm_routine_stopped(struct t7xx_fsm_ctl * ctl,struct t7xx_fsm_command * cmd)224 static void fsm_routine_stopped(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd)
225 {
226 if (ctl->curr_state == FSM_STATE_STOPPED) {
227 fsm_finish_command(ctl, cmd, -EINVAL);
228 return;
229 }
230
231 fsm_finish_command(ctl, cmd, fsm_stopped_handler(ctl));
232 }
233
fsm_routine_stopping(struct t7xx_fsm_ctl * ctl,struct t7xx_fsm_command * cmd)234 static void fsm_routine_stopping(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd)
235 {
236 struct t7xx_pci_dev *t7xx_dev;
237 struct cldma_ctrl *md_ctrl;
238 int err;
239
240 if (ctl->curr_state == FSM_STATE_STOPPED || ctl->curr_state == FSM_STATE_STOPPING) {
241 fsm_finish_command(ctl, cmd, -EINVAL);
242 return;
243 }
244
245 md_ctrl = ctl->md->md_ctrl[CLDMA_ID_MD];
246 t7xx_dev = ctl->md->t7xx_dev;
247
248 ctl->curr_state = FSM_STATE_STOPPING;
249 t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_TO_STOP);
250 t7xx_cldma_stop(md_ctrl);
251
252 if (!ctl->md->rgu_irq_asserted) {
253 t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DRM_DISABLE_AP);
254 /* Wait for the DRM disable to take effect */
255 msleep(FSM_DRM_DISABLE_DELAY_MS);
256
257 err = t7xx_acpi_fldr_func(t7xx_dev);
258 if (err)
259 t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DEVICE_RESET);
260 }
261
262 fsm_finish_command(ctl, cmd, fsm_stopped_handler(ctl));
263 }
264
t7xx_fsm_broadcast_ready_state(struct t7xx_fsm_ctl * ctl)265 static void t7xx_fsm_broadcast_ready_state(struct t7xx_fsm_ctl *ctl)
266 {
267 if (ctl->md_state != MD_STATE_WAITING_FOR_HS2)
268 return;
269
270 ctl->md_state = MD_STATE_READY;
271
272 fsm_state_notify(ctl->md, MD_STATE_READY);
273 t7xx_port_proxy_md_status_notify(ctl->md->port_prox, MD_STATE_READY);
274 }
275
fsm_routine_ready(struct t7xx_fsm_ctl * ctl)276 static void fsm_routine_ready(struct t7xx_fsm_ctl *ctl)
277 {
278 struct t7xx_modem *md = ctl->md;
279
280 ctl->curr_state = FSM_STATE_READY;
281 t7xx_fsm_broadcast_ready_state(ctl);
282 t7xx_md_event_notify(md, FSM_READY);
283 }
284
fsm_routine_starting(struct t7xx_fsm_ctl * ctl)285 static int fsm_routine_starting(struct t7xx_fsm_ctl *ctl)
286 {
287 struct t7xx_modem *md = ctl->md;
288 struct device *dev;
289
290 ctl->curr_state = FSM_STATE_STARTING;
291
292 t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_FOR_HS1);
293 t7xx_md_event_notify(md, FSM_START);
294
295 wait_event_interruptible_timeout(ctl->async_hk_wq,
296 (md->core_md.ready && md->core_ap.ready) ||
297 ctl->exp_flg, HZ * 60);
298 dev = &md->t7xx_dev->pdev->dev;
299
300 if (ctl->exp_flg)
301 dev_err(dev, "MD exception is captured during handshake\n");
302
303 if (!md->core_md.ready) {
304 dev_err(dev, "MD handshake timeout\n");
305 if (md->core_md.handshake_ongoing)
306 t7xx_fsm_append_event(ctl, FSM_EVENT_MD_HS2_EXIT, NULL, 0);
307
308 fsm_routine_exception(ctl, NULL, EXCEPTION_HS_TIMEOUT);
309 return -ETIMEDOUT;
310 } else if (!md->core_ap.ready) {
311 dev_err(dev, "AP handshake timeout\n");
312 if (md->core_ap.handshake_ongoing)
313 t7xx_fsm_append_event(ctl, FSM_EVENT_AP_HS2_EXIT, NULL, 0);
314
315 fsm_routine_exception(ctl, NULL, EXCEPTION_HS_TIMEOUT);
316 return -ETIMEDOUT;
317 }
318
319 t7xx_pci_pm_init_late(md->t7xx_dev);
320 fsm_routine_ready(ctl);
321 return 0;
322 }
323
fsm_routine_start(struct t7xx_fsm_ctl * ctl,struct t7xx_fsm_command * cmd)324 static void fsm_routine_start(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd)
325 {
326 struct t7xx_modem *md = ctl->md;
327 u32 dev_status;
328 int ret;
329
330 if (!md)
331 return;
332
333 if (ctl->curr_state != FSM_STATE_INIT && ctl->curr_state != FSM_STATE_PRE_START &&
334 ctl->curr_state != FSM_STATE_STOPPED) {
335 fsm_finish_command(ctl, cmd, -EINVAL);
336 return;
337 }
338
339 ctl->curr_state = FSM_STATE_PRE_START;
340 t7xx_md_event_notify(md, FSM_PRE_START);
341
342 ret = read_poll_timeout(ioread32, dev_status,
343 (dev_status & MISC_STAGE_MASK) == LINUX_STAGE, 20000, 2000000,
344 false, IREG_BASE(md->t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS);
345 if (ret) {
346 struct device *dev = &md->t7xx_dev->pdev->dev;
347
348 fsm_finish_command(ctl, cmd, -ETIMEDOUT);
349 dev_err(dev, "Invalid device status 0x%lx\n", dev_status & MISC_STAGE_MASK);
350 return;
351 }
352
353 t7xx_cldma_hif_hw_init(md->md_ctrl[CLDMA_ID_AP]);
354 t7xx_cldma_hif_hw_init(md->md_ctrl[CLDMA_ID_MD]);
355 fsm_finish_command(ctl, cmd, fsm_routine_starting(ctl));
356 }
357
fsm_main_thread(void * data)358 static int fsm_main_thread(void *data)
359 {
360 struct t7xx_fsm_ctl *ctl = data;
361 struct t7xx_fsm_command *cmd;
362 unsigned long flags;
363
364 while (!kthread_should_stop()) {
365 if (wait_event_interruptible(ctl->command_wq, !list_empty(&ctl->command_queue) ||
366 kthread_should_stop()))
367 continue;
368
369 if (kthread_should_stop())
370 break;
371
372 spin_lock_irqsave(&ctl->command_lock, flags);
373 cmd = list_first_entry(&ctl->command_queue, struct t7xx_fsm_command, entry);
374 list_del(&cmd->entry);
375 spin_unlock_irqrestore(&ctl->command_lock, flags);
376
377 switch (cmd->cmd_id) {
378 case FSM_CMD_START:
379 fsm_routine_start(ctl, cmd);
380 break;
381
382 case FSM_CMD_EXCEPTION:
383 fsm_routine_exception(ctl, cmd, FIELD_GET(FSM_CMD_EX_REASON, cmd->flag));
384 break;
385
386 case FSM_CMD_PRE_STOP:
387 fsm_routine_stopping(ctl, cmd);
388 break;
389
390 case FSM_CMD_STOP:
391 fsm_routine_stopped(ctl, cmd);
392 break;
393
394 default:
395 fsm_finish_command(ctl, cmd, -EINVAL);
396 fsm_flush_event_cmd_qs(ctl);
397 break;
398 }
399 }
400
401 return 0;
402 }
403
t7xx_fsm_append_cmd(struct t7xx_fsm_ctl * ctl,enum t7xx_fsm_cmd_state cmd_id,unsigned int flag)404 int t7xx_fsm_append_cmd(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_cmd_state cmd_id, unsigned int flag)
405 {
406 struct t7xx_fsm_command *cmd;
407 unsigned long flags;
408 int ret;
409
410 cmd = kzalloc(sizeof(*cmd), flag & FSM_CMD_FLAG_IN_INTERRUPT ? GFP_ATOMIC : GFP_KERNEL);
411 if (!cmd)
412 return -ENOMEM;
413
414 INIT_LIST_HEAD(&cmd->entry);
415 cmd->cmd_id = cmd_id;
416 cmd->flag = flag;
417 kref_init(&cmd->refcnt);
418 if (flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) {
419 init_completion(&cmd->done);
420 kref_get(&cmd->refcnt);
421 }
422
423 kref_get(&cmd->refcnt);
424 spin_lock_irqsave(&ctl->command_lock, flags);
425 list_add_tail(&cmd->entry, &ctl->command_queue);
426 spin_unlock_irqrestore(&ctl->command_lock, flags);
427
428 wake_up(&ctl->command_wq);
429
430 if (flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) {
431 unsigned long wait_ret;
432
433 wait_ret = wait_for_completion_timeout(&cmd->done,
434 msecs_to_jiffies(FSM_CMD_TIMEOUT_MS));
435
436 ret = wait_ret ? cmd->result : -ETIMEDOUT;
437 kref_put(&cmd->refcnt, fsm_release_command);
438 return ret;
439 }
440
441 return 0;
442 }
443
t7xx_fsm_append_event(struct t7xx_fsm_ctl * ctl,enum t7xx_fsm_event_state event_id,unsigned char * data,unsigned int length)444 int t7xx_fsm_append_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_id,
445 unsigned char *data, unsigned int length)
446 {
447 struct device *dev = &ctl->md->t7xx_dev->pdev->dev;
448 struct t7xx_fsm_event *event;
449 unsigned long flags;
450
451 if (event_id <= FSM_EVENT_INVALID || event_id >= FSM_EVENT_MAX) {
452 dev_err(dev, "Invalid event %d\n", event_id);
453 return -EINVAL;
454 }
455
456 event = kmalloc(sizeof(*event) + length, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
457 if (!event)
458 return -ENOMEM;
459
460 INIT_LIST_HEAD(&event->entry);
461 event->event_id = event_id;
462 event->length = length;
463
464 if (data && length)
465 memcpy(event->data, data, length);
466
467 spin_lock_irqsave(&ctl->event_lock, flags);
468 list_add_tail(&event->entry, &ctl->event_queue);
469 spin_unlock_irqrestore(&ctl->event_lock, flags);
470
471 wake_up_all(&ctl->event_wq);
472 return 0;
473 }
474
t7xx_fsm_clr_event(struct t7xx_fsm_ctl * ctl,enum t7xx_fsm_event_state event_id)475 void t7xx_fsm_clr_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_id)
476 {
477 struct t7xx_fsm_event *event, *evt_next;
478 unsigned long flags;
479
480 spin_lock_irqsave(&ctl->event_lock, flags);
481 list_for_each_entry_safe(event, evt_next, &ctl->event_queue, entry) {
482 if (event->event_id == event_id)
483 fsm_del_kf_event(event);
484 }
485 spin_unlock_irqrestore(&ctl->event_lock, flags);
486 }
487
t7xx_fsm_get_md_state(struct t7xx_fsm_ctl * ctl)488 enum md_state t7xx_fsm_get_md_state(struct t7xx_fsm_ctl *ctl)
489 {
490 if (ctl)
491 return ctl->md_state;
492
493 return MD_STATE_INVALID;
494 }
495
t7xx_fsm_get_ctl_state(struct t7xx_fsm_ctl * ctl)496 unsigned int t7xx_fsm_get_ctl_state(struct t7xx_fsm_ctl *ctl)
497 {
498 if (ctl)
499 return ctl->curr_state;
500
501 return FSM_STATE_STOPPED;
502 }
503
t7xx_fsm_recv_md_intr(struct t7xx_fsm_ctl * ctl,enum t7xx_md_irq_type type)504 int t7xx_fsm_recv_md_intr(struct t7xx_fsm_ctl *ctl, enum t7xx_md_irq_type type)
505 {
506 unsigned int cmd_flags = FSM_CMD_FLAG_IN_INTERRUPT;
507
508 if (type == MD_IRQ_PORT_ENUM) {
509 return t7xx_fsm_append_cmd(ctl, FSM_CMD_START, cmd_flags);
510 } else if (type == MD_IRQ_CCIF_EX) {
511 ctl->exp_flg = true;
512 wake_up(&ctl->async_hk_wq);
513 cmd_flags |= FIELD_PREP(FSM_CMD_EX_REASON, EXCEPTION_EVENT);
514 return t7xx_fsm_append_cmd(ctl, FSM_CMD_EXCEPTION, cmd_flags);
515 }
516
517 return -EINVAL;
518 }
519
t7xx_fsm_reset(struct t7xx_modem * md)520 void t7xx_fsm_reset(struct t7xx_modem *md)
521 {
522 struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
523
524 fsm_flush_event_cmd_qs(ctl);
525 ctl->curr_state = FSM_STATE_STOPPED;
526 ctl->exp_flg = false;
527 }
528
t7xx_fsm_init(struct t7xx_modem * md)529 int t7xx_fsm_init(struct t7xx_modem *md)
530 {
531 struct device *dev = &md->t7xx_dev->pdev->dev;
532 struct t7xx_fsm_ctl *ctl;
533
534 ctl = devm_kzalloc(dev, sizeof(*ctl), GFP_KERNEL);
535 if (!ctl)
536 return -ENOMEM;
537
538 md->fsm_ctl = ctl;
539 ctl->md = md;
540 ctl->curr_state = FSM_STATE_INIT;
541 INIT_LIST_HEAD(&ctl->command_queue);
542 INIT_LIST_HEAD(&ctl->event_queue);
543 init_waitqueue_head(&ctl->async_hk_wq);
544 init_waitqueue_head(&ctl->event_wq);
545 INIT_LIST_HEAD(&ctl->notifier_list);
546 init_waitqueue_head(&ctl->command_wq);
547 spin_lock_init(&ctl->event_lock);
548 spin_lock_init(&ctl->command_lock);
549 ctl->exp_flg = false;
550 spin_lock_init(&ctl->notifier_lock);
551
552 ctl->fsm_thread = kthread_run(fsm_main_thread, ctl, "t7xx_fsm");
553 return PTR_ERR_OR_ZERO(ctl->fsm_thread);
554 }
555
t7xx_fsm_uninit(struct t7xx_modem * md)556 void t7xx_fsm_uninit(struct t7xx_modem *md)
557 {
558 struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
559
560 if (!ctl)
561 return;
562
563 if (ctl->fsm_thread)
564 kthread_stop(ctl->fsm_thread);
565
566 fsm_flush_event_cmd_qs(ctl);
567 }
568