Lines Matching full:ctl

52 	struct t7xx_fsm_ctl *ctl = md->fsm_ctl;  in t7xx_fsm_notifier_register()  local
55 spin_lock_irqsave(&ctl->notifier_lock, flags); in t7xx_fsm_notifier_register()
56 list_add_tail(&notifier->entry, &ctl->notifier_list); in t7xx_fsm_notifier_register()
57 spin_unlock_irqrestore(&ctl->notifier_lock, flags); in t7xx_fsm_notifier_register()
63 struct t7xx_fsm_ctl *ctl = md->fsm_ctl; in t7xx_fsm_notifier_unregister() local
66 spin_lock_irqsave(&ctl->notifier_lock, flags); in t7xx_fsm_notifier_unregister()
67 list_for_each_entry_safe(notifier_cur, notifier_next, &ctl->notifier_list, entry) { in t7xx_fsm_notifier_unregister()
71 spin_unlock_irqrestore(&ctl->notifier_lock, flags); in t7xx_fsm_notifier_unregister()
76 struct t7xx_fsm_ctl *ctl = md->fsm_ctl; in fsm_state_notify() local
80 spin_lock_irqsave(&ctl->notifier_lock, flags); in fsm_state_notify()
81 list_for_each_entry(notifier, &ctl->notifier_list, entry) { in fsm_state_notify()
82 spin_unlock_irqrestore(&ctl->notifier_lock, flags); in fsm_state_notify()
86 spin_lock_irqsave(&ctl->notifier_lock, flags); in fsm_state_notify()
88 spin_unlock_irqrestore(&ctl->notifier_lock, flags); in fsm_state_notify()
91 void t7xx_fsm_broadcast_state(struct t7xx_fsm_ctl *ctl, enum md_state state) in t7xx_fsm_broadcast_state() argument
93 ctl->md_state = state; in t7xx_fsm_broadcast_state()
96 t7xx_port_proxy_md_status_notify(ctl->md->port_prox, state); in t7xx_fsm_broadcast_state()
97 fsm_state_notify(ctl->md, state); in t7xx_fsm_broadcast_state()
107 static void fsm_finish_command(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd, int result) in fsm_finish_command() argument
123 static void fsm_flush_event_cmd_qs(struct t7xx_fsm_ctl *ctl) in fsm_flush_event_cmd_qs() argument
125 struct device *dev = &ctl->md->t7xx_dev->pdev->dev; in fsm_flush_event_cmd_qs()
130 spin_lock_irqsave(&ctl->command_lock, flags); in fsm_flush_event_cmd_qs()
131 list_for_each_entry_safe(cmd, cmd_next, &ctl->command_queue, entry) { in fsm_flush_event_cmd_qs()
134 fsm_finish_command(ctl, cmd, -EINVAL); in fsm_flush_event_cmd_qs()
136 spin_unlock_irqrestore(&ctl->command_lock, flags); in fsm_flush_event_cmd_qs()
138 spin_lock_irqsave(&ctl->event_lock, flags); in fsm_flush_event_cmd_qs()
139 list_for_each_entry_safe(event, evt_next, &ctl->event_queue, entry) { in fsm_flush_event_cmd_qs()
143 spin_unlock_irqrestore(&ctl->event_lock, flags); in fsm_flush_event_cmd_qs()
146 static void fsm_wait_for_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_expected, in fsm_wait_for_event() argument
160 spin_lock_irqsave(&ctl->event_lock, flags); in fsm_wait_for_event()
161 event = list_first_entry_or_null(&ctl->event_queue, struct t7xx_fsm_event, entry); in fsm_wait_for_event()
169 spin_unlock_irqrestore(&ctl->event_lock, flags); in fsm_wait_for_event()
176 static void fsm_routine_exception(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd, in fsm_routine_exception() argument
179 struct device *dev = &ctl->md->t7xx_dev->pdev->dev; in fsm_routine_exception()
181 if (ctl->curr_state != FSM_STATE_READY && ctl->curr_state != FSM_STATE_STARTING) { in fsm_routine_exception()
183 fsm_finish_command(ctl, cmd, -EINVAL); in fsm_routine_exception()
188 ctl->curr_state = FSM_STATE_EXCEPTION; in fsm_routine_exception()
197 t7xx_fsm_broadcast_state(ctl, MD_STATE_EXCEPTION); in fsm_routine_exception()
198 t7xx_pci_pm_exp_detected(ctl->md->t7xx_dev); in fsm_routine_exception()
199 t7xx_md_exception_handshake(ctl->md); in fsm_routine_exception()
201 fsm_wait_for_event(ctl, FSM_EVENT_MD_EX_REC_OK, FSM_EVENT_MD_EX, in fsm_routine_exception()
203 fsm_wait_for_event(ctl, FSM_EVENT_MD_EX_PASS, FSM_EVENT_INVALID, in fsm_routine_exception()
213 fsm_finish_command(ctl, cmd, 0); in fsm_routine_exception()
216 static int fsm_stopped_handler(struct t7xx_fsm_ctl *ctl) in fsm_stopped_handler() argument
218 ctl->curr_state = FSM_STATE_STOPPED; in fsm_stopped_handler()
220 t7xx_fsm_broadcast_state(ctl, MD_STATE_STOPPED); in fsm_stopped_handler()
221 return t7xx_md_reset(ctl->md->t7xx_dev); in fsm_stopped_handler()
224 static void fsm_routine_stopped(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd) in fsm_routine_stopped() argument
226 if (ctl->curr_state == FSM_STATE_STOPPED) { in fsm_routine_stopped()
227 fsm_finish_command(ctl, cmd, -EINVAL); in fsm_routine_stopped()
231 fsm_finish_command(ctl, cmd, fsm_stopped_handler(ctl)); in fsm_routine_stopped()
234 static void fsm_routine_stopping(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd) in fsm_routine_stopping() argument
240 if (ctl->curr_state == FSM_STATE_STOPPED || ctl->curr_state == FSM_STATE_STOPPING) { in fsm_routine_stopping()
241 fsm_finish_command(ctl, cmd, -EINVAL); in fsm_routine_stopping()
245 md_ctrl = ctl->md->md_ctrl[CLDMA_ID_MD]; in fsm_routine_stopping()
246 t7xx_dev = ctl->md->t7xx_dev; in fsm_routine_stopping()
248 ctl->curr_state = FSM_STATE_STOPPING; in fsm_routine_stopping()
249 t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_TO_STOP); in fsm_routine_stopping()
252 if (!ctl->md->rgu_irq_asserted) { in fsm_routine_stopping()
262 fsm_finish_command(ctl, cmd, fsm_stopped_handler(ctl)); in fsm_routine_stopping()
265 static void t7xx_fsm_broadcast_ready_state(struct t7xx_fsm_ctl *ctl) in t7xx_fsm_broadcast_ready_state() argument
267 if (ctl->md_state != MD_STATE_WAITING_FOR_HS2) in t7xx_fsm_broadcast_ready_state()
270 ctl->md_state = MD_STATE_READY; in t7xx_fsm_broadcast_ready_state()
272 fsm_state_notify(ctl->md, MD_STATE_READY); in t7xx_fsm_broadcast_ready_state()
273 t7xx_port_proxy_md_status_notify(ctl->md->port_prox, MD_STATE_READY); in t7xx_fsm_broadcast_ready_state()
276 static void fsm_routine_ready(struct t7xx_fsm_ctl *ctl) in fsm_routine_ready() argument
278 struct t7xx_modem *md = ctl->md; in fsm_routine_ready()
280 ctl->curr_state = FSM_STATE_READY; in fsm_routine_ready()
281 t7xx_fsm_broadcast_ready_state(ctl); in fsm_routine_ready()
285 static int fsm_routine_starting(struct t7xx_fsm_ctl *ctl) in fsm_routine_starting() argument
287 struct t7xx_modem *md = ctl->md; in fsm_routine_starting()
290 ctl->curr_state = FSM_STATE_STARTING; in fsm_routine_starting()
292 t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_FOR_HS1); in fsm_routine_starting()
295 wait_event_interruptible_timeout(ctl->async_hk_wq, in fsm_routine_starting()
297 ctl->exp_flg, HZ * 60); in fsm_routine_starting()
300 if (ctl->exp_flg) in fsm_routine_starting()
306 t7xx_fsm_append_event(ctl, FSM_EVENT_MD_HS2_EXIT, NULL, 0); in fsm_routine_starting()
308 fsm_routine_exception(ctl, NULL, EXCEPTION_HS_TIMEOUT); in fsm_routine_starting()
313 t7xx_fsm_append_event(ctl, FSM_EVENT_AP_HS2_EXIT, NULL, 0); in fsm_routine_starting()
315 fsm_routine_exception(ctl, NULL, EXCEPTION_HS_TIMEOUT); in fsm_routine_starting()
320 fsm_routine_ready(ctl); in fsm_routine_starting()
324 static void fsm_routine_start(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd) in fsm_routine_start() argument
326 struct t7xx_modem *md = ctl->md; in fsm_routine_start()
333 if (ctl->curr_state != FSM_STATE_INIT && ctl->curr_state != FSM_STATE_PRE_START && in fsm_routine_start()
334 ctl->curr_state != FSM_STATE_STOPPED) { in fsm_routine_start()
335 fsm_finish_command(ctl, cmd, -EINVAL); in fsm_routine_start()
339 ctl->curr_state = FSM_STATE_PRE_START; in fsm_routine_start()
348 fsm_finish_command(ctl, cmd, -ETIMEDOUT); in fsm_routine_start()
355 fsm_finish_command(ctl, cmd, fsm_routine_starting(ctl)); in fsm_routine_start()
360 struct t7xx_fsm_ctl *ctl = data; in fsm_main_thread() local
365 if (wait_event_interruptible(ctl->command_wq, !list_empty(&ctl->command_queue) || in fsm_main_thread()
372 spin_lock_irqsave(&ctl->command_lock, flags); in fsm_main_thread()
373 cmd = list_first_entry(&ctl->command_queue, struct t7xx_fsm_command, entry); in fsm_main_thread()
375 spin_unlock_irqrestore(&ctl->command_lock, flags); in fsm_main_thread()
379 fsm_routine_start(ctl, cmd); in fsm_main_thread()
383 fsm_routine_exception(ctl, cmd, FIELD_GET(FSM_CMD_EX_REASON, cmd->flag)); in fsm_main_thread()
387 fsm_routine_stopping(ctl, cmd); in fsm_main_thread()
391 fsm_routine_stopped(ctl, cmd); in fsm_main_thread()
395 fsm_finish_command(ctl, cmd, -EINVAL); in fsm_main_thread()
396 fsm_flush_event_cmd_qs(ctl); in fsm_main_thread()
404 int t7xx_fsm_append_cmd(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_cmd_state cmd_id, unsigned int flag) in t7xx_fsm_append_cmd() argument
424 spin_lock_irqsave(&ctl->command_lock, flags); in t7xx_fsm_append_cmd()
425 list_add_tail(&cmd->entry, &ctl->command_queue); in t7xx_fsm_append_cmd()
426 spin_unlock_irqrestore(&ctl->command_lock, flags); in t7xx_fsm_append_cmd()
428 wake_up(&ctl->command_wq); in t7xx_fsm_append_cmd()
444 int t7xx_fsm_append_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_id, in t7xx_fsm_append_event() argument
447 struct device *dev = &ctl->md->t7xx_dev->pdev->dev; in t7xx_fsm_append_event()
467 spin_lock_irqsave(&ctl->event_lock, flags); in t7xx_fsm_append_event()
468 list_add_tail(&event->entry, &ctl->event_queue); in t7xx_fsm_append_event()
469 spin_unlock_irqrestore(&ctl->event_lock, flags); in t7xx_fsm_append_event()
471 wake_up_all(&ctl->event_wq); in t7xx_fsm_append_event()
475 void t7xx_fsm_clr_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_id) in t7xx_fsm_clr_event() argument
480 spin_lock_irqsave(&ctl->event_lock, flags); in t7xx_fsm_clr_event()
481 list_for_each_entry_safe(event, evt_next, &ctl->event_queue, entry) { in t7xx_fsm_clr_event()
485 spin_unlock_irqrestore(&ctl->event_lock, flags); in t7xx_fsm_clr_event()
488 enum md_state t7xx_fsm_get_md_state(struct t7xx_fsm_ctl *ctl) in t7xx_fsm_get_md_state() argument
490 if (ctl) in t7xx_fsm_get_md_state()
491 return ctl->md_state; in t7xx_fsm_get_md_state()
496 unsigned int t7xx_fsm_get_ctl_state(struct t7xx_fsm_ctl *ctl) in t7xx_fsm_get_ctl_state() argument
498 if (ctl) in t7xx_fsm_get_ctl_state()
499 return ctl->curr_state; in t7xx_fsm_get_ctl_state()
504 int t7xx_fsm_recv_md_intr(struct t7xx_fsm_ctl *ctl, enum t7xx_md_irq_type type) in t7xx_fsm_recv_md_intr() argument
509 return t7xx_fsm_append_cmd(ctl, FSM_CMD_START, cmd_flags); in t7xx_fsm_recv_md_intr()
511 ctl->exp_flg = true; in t7xx_fsm_recv_md_intr()
512 wake_up(&ctl->async_hk_wq); in t7xx_fsm_recv_md_intr()
514 return t7xx_fsm_append_cmd(ctl, FSM_CMD_EXCEPTION, cmd_flags); in t7xx_fsm_recv_md_intr()
522 struct t7xx_fsm_ctl *ctl = md->fsm_ctl; in t7xx_fsm_reset() local
524 fsm_flush_event_cmd_qs(ctl); in t7xx_fsm_reset()
525 ctl->curr_state = FSM_STATE_STOPPED; in t7xx_fsm_reset()
526 ctl->exp_flg = false; in t7xx_fsm_reset()
532 struct t7xx_fsm_ctl *ctl; in t7xx_fsm_init() local
534 ctl = devm_kzalloc(dev, sizeof(*ctl), GFP_KERNEL); in t7xx_fsm_init()
535 if (!ctl) in t7xx_fsm_init()
538 md->fsm_ctl = ctl; in t7xx_fsm_init()
539 ctl->md = md; in t7xx_fsm_init()
540 ctl->curr_state = FSM_STATE_INIT; in t7xx_fsm_init()
541 INIT_LIST_HEAD(&ctl->command_queue); in t7xx_fsm_init()
542 INIT_LIST_HEAD(&ctl->event_queue); in t7xx_fsm_init()
543 init_waitqueue_head(&ctl->async_hk_wq); in t7xx_fsm_init()
544 init_waitqueue_head(&ctl->event_wq); in t7xx_fsm_init()
545 INIT_LIST_HEAD(&ctl->notifier_list); in t7xx_fsm_init()
546 init_waitqueue_head(&ctl->command_wq); in t7xx_fsm_init()
547 spin_lock_init(&ctl->event_lock); in t7xx_fsm_init()
548 spin_lock_init(&ctl->command_lock); in t7xx_fsm_init()
549 ctl->exp_flg = false; in t7xx_fsm_init()
550 spin_lock_init(&ctl->notifier_lock); in t7xx_fsm_init()
552 ctl->fsm_thread = kthread_run(fsm_main_thread, ctl, "t7xx_fsm"); in t7xx_fsm_init()
553 return PTR_ERR_OR_ZERO(ctl->fsm_thread); in t7xx_fsm_init()
558 struct t7xx_fsm_ctl *ctl = md->fsm_ctl; in t7xx_fsm_uninit() local
560 if (!ctl) in t7xx_fsm_uninit()
563 if (ctl->fsm_thread) in t7xx_fsm_uninit()
564 kthread_stop(ctl->fsm_thread); in t7xx_fsm_uninit()
566 fsm_flush_event_cmd_qs(ctl); in t7xx_fsm_uninit()