1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2021, MediaTek Inc. 4 * Copyright (c) 2021-2022, Intel Corporation. 5 * 6 * Authors: 7 * Haijun Liu <haijun.liu@mediatek.com> 8 * Eliot Lee <eliot.lee@intel.com> 9 * Moises Veleta <moises.veleta@intel.com> 10 * Ricardo Martinez <ricardo.martinez@linux.intel.com> 11 * 12 * Contributors: 13 * Amir Hanania <amir.hanania@intel.com> 14 * Sreehari Kancharla <sreehari.kancharla@intel.com> 15 */ 16 17 #include <linux/bits.h> 18 #include <linux/bitfield.h> 19 #include <linux/completion.h> 20 #include <linux/device.h> 21 #include <linux/delay.h> 22 #include <linux/err.h> 23 #include <linux/gfp.h> 24 #include <linux/iopoll.h> 25 #include <linux/jiffies.h> 26 #include <linux/kernel.h> 27 #include <linux/kthread.h> 28 #include <linux/list.h> 29 #include <linux/slab.h> 30 #include <linux/spinlock.h> 31 #include <linux/string.h> 32 #include <linux/types.h> 33 #include <linux/wait.h> 34 35 #include "t7xx_hif_cldma.h" 36 #include "t7xx_mhccif.h" 37 #include "t7xx_modem_ops.h" 38 #include "t7xx_pci.h" 39 #include "t7xx_pcie_mac.h" 40 #include "t7xx_port_proxy.h" 41 #include "t7xx_reg.h" 42 #include "t7xx_state_monitor.h" 43 44 #define FSM_DRM_DISABLE_DELAY_MS 200 45 #define FSM_EVENT_POLL_INTERVAL_MS 20 46 #define FSM_MD_EX_REC_OK_TIMEOUT_MS 10000 47 #define FSM_MD_EX_PASS_TIMEOUT_MS 45000 48 #define FSM_CMD_TIMEOUT_MS 2000 49 50 void t7xx_fsm_notifier_register(struct t7xx_modem *md, struct t7xx_fsm_notifier *notifier) 51 { 52 struct t7xx_fsm_ctl *ctl = md->fsm_ctl; 53 unsigned long flags; 54 55 spin_lock_irqsave(&ctl->notifier_lock, flags); 56 list_add_tail(¬ifier->entry, &ctl->notifier_list); 57 spin_unlock_irqrestore(&ctl->notifier_lock, flags); 58 } 59 60 void t7xx_fsm_notifier_unregister(struct t7xx_modem *md, struct t7xx_fsm_notifier *notifier) 61 { 62 struct t7xx_fsm_notifier *notifier_cur, *notifier_next; 63 struct t7xx_fsm_ctl *ctl = md->fsm_ctl; 64 unsigned long flags; 65 66 spin_lock_irqsave(&ctl->notifier_lock, flags); 67 list_for_each_entry_safe(notifier_cur, notifier_next, &ctl->notifier_list, entry) { 68 if (notifier_cur == notifier) 69 list_del(¬ifier->entry); 70 } 71 spin_unlock_irqrestore(&ctl->notifier_lock, flags); 72 } 73 74 static void fsm_state_notify(struct t7xx_modem *md, enum md_state state) 75 { 76 struct t7xx_fsm_ctl *ctl = md->fsm_ctl; 77 struct t7xx_fsm_notifier *notifier; 78 unsigned long flags; 79 80 spin_lock_irqsave(&ctl->notifier_lock, flags); 81 list_for_each_entry(notifier, &ctl->notifier_list, entry) { 82 spin_unlock_irqrestore(&ctl->notifier_lock, flags); 83 if (notifier->notifier_fn) 84 notifier->notifier_fn(state, notifier->data); 85 86 spin_lock_irqsave(&ctl->notifier_lock, flags); 87 } 88 spin_unlock_irqrestore(&ctl->notifier_lock, flags); 89 } 90 91 void t7xx_fsm_broadcast_state(struct t7xx_fsm_ctl *ctl, enum md_state state) 92 { 93 ctl->md_state = state; 94 95 /* Update to port first, otherwise sending message on HS2 may fail */ 96 t7xx_port_proxy_md_status_notify(ctl->md->port_prox, state); 97 fsm_state_notify(ctl->md, state); 98 } 99 100 static void fsm_finish_command(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd, int result) 101 { 102 if (cmd->flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) { 103 *cmd->ret = result; 104 complete_all(cmd->done); 105 } 106 107 kfree(cmd); 108 } 109 110 static void fsm_del_kf_event(struct t7xx_fsm_event *event) 111 { 112 list_del(&event->entry); 113 kfree(event); 114 } 115 116 static void fsm_flush_event_cmd_qs(struct t7xx_fsm_ctl *ctl) 117 { 118 struct device *dev = &ctl->md->t7xx_dev->pdev->dev; 119 struct t7xx_fsm_event *event, *evt_next; 120 struct t7xx_fsm_command *cmd, *cmd_next; 121 unsigned long flags; 122 123 spin_lock_irqsave(&ctl->command_lock, flags); 124 list_for_each_entry_safe(cmd, cmd_next, &ctl->command_queue, entry) { 125 dev_warn(dev, "Unhandled command %d\n", cmd->cmd_id); 126 list_del(&cmd->entry); 127 fsm_finish_command(ctl, cmd, -EINVAL); 128 } 129 spin_unlock_irqrestore(&ctl->command_lock, flags); 130 131 spin_lock_irqsave(&ctl->event_lock, flags); 132 list_for_each_entry_safe(event, evt_next, &ctl->event_queue, entry) { 133 dev_warn(dev, "Unhandled event %d\n", event->event_id); 134 fsm_del_kf_event(event); 135 } 136 spin_unlock_irqrestore(&ctl->event_lock, flags); 137 } 138 139 static void fsm_wait_for_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_expected, 140 enum t7xx_fsm_event_state event_ignore, int retries) 141 { 142 struct t7xx_fsm_event *event; 143 bool event_received = false; 144 unsigned long flags; 145 int cnt = 0; 146 147 while (cnt++ < retries && !event_received) { 148 bool sleep_required = true; 149 150 if (kthread_should_stop()) 151 return; 152 153 spin_lock_irqsave(&ctl->event_lock, flags); 154 event = list_first_entry_or_null(&ctl->event_queue, struct t7xx_fsm_event, entry); 155 if (event) { 156 event_received = event->event_id == event_expected; 157 if (event_received || event->event_id == event_ignore) { 158 fsm_del_kf_event(event); 159 sleep_required = false; 160 } 161 } 162 spin_unlock_irqrestore(&ctl->event_lock, flags); 163 164 if (sleep_required) 165 msleep(FSM_EVENT_POLL_INTERVAL_MS); 166 } 167 } 168 169 static void fsm_routine_exception(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd, 170 enum t7xx_ex_reason reason) 171 { 172 struct device *dev = &ctl->md->t7xx_dev->pdev->dev; 173 174 if (ctl->curr_state != FSM_STATE_READY && ctl->curr_state != FSM_STATE_STARTING) { 175 if (cmd) 176 fsm_finish_command(ctl, cmd, -EINVAL); 177 178 return; 179 } 180 181 ctl->curr_state = FSM_STATE_EXCEPTION; 182 183 switch (reason) { 184 case EXCEPTION_HS_TIMEOUT: 185 dev_err(dev, "Boot Handshake failure\n"); 186 break; 187 188 case EXCEPTION_EVENT: 189 dev_err(dev, "Exception event\n"); 190 t7xx_fsm_broadcast_state(ctl, MD_STATE_EXCEPTION); 191 t7xx_pci_pm_exp_detected(ctl->md->t7xx_dev); 192 t7xx_md_exception_handshake(ctl->md); 193 194 fsm_wait_for_event(ctl, FSM_EVENT_MD_EX_REC_OK, FSM_EVENT_MD_EX, 195 FSM_MD_EX_REC_OK_TIMEOUT_MS / FSM_EVENT_POLL_INTERVAL_MS); 196 fsm_wait_for_event(ctl, FSM_EVENT_MD_EX_PASS, FSM_EVENT_INVALID, 197 FSM_MD_EX_PASS_TIMEOUT_MS / FSM_EVENT_POLL_INTERVAL_MS); 198 break; 199 200 default: 201 dev_err(dev, "Exception %d\n", reason); 202 break; 203 } 204 205 if (cmd) 206 fsm_finish_command(ctl, cmd, 0); 207 } 208 209 static int fsm_stopped_handler(struct t7xx_fsm_ctl *ctl) 210 { 211 ctl->curr_state = FSM_STATE_STOPPED; 212 213 t7xx_fsm_broadcast_state(ctl, MD_STATE_STOPPED); 214 return t7xx_md_reset(ctl->md->t7xx_dev); 215 } 216 217 static void fsm_routine_stopped(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd) 218 { 219 if (ctl->curr_state == FSM_STATE_STOPPED) { 220 fsm_finish_command(ctl, cmd, -EINVAL); 221 return; 222 } 223 224 fsm_finish_command(ctl, cmd, fsm_stopped_handler(ctl)); 225 } 226 227 static void fsm_routine_stopping(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd) 228 { 229 struct t7xx_pci_dev *t7xx_dev; 230 struct cldma_ctrl *md_ctrl; 231 int err; 232 233 if (ctl->curr_state == FSM_STATE_STOPPED || ctl->curr_state == FSM_STATE_STOPPING) { 234 fsm_finish_command(ctl, cmd, -EINVAL); 235 return; 236 } 237 238 md_ctrl = ctl->md->md_ctrl[CLDMA_ID_MD]; 239 t7xx_dev = ctl->md->t7xx_dev; 240 241 ctl->curr_state = FSM_STATE_STOPPING; 242 t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_TO_STOP); 243 t7xx_cldma_stop(md_ctrl); 244 245 if (!ctl->md->rgu_irq_asserted) { 246 t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DRM_DISABLE_AP); 247 /* Wait for the DRM disable to take effect */ 248 msleep(FSM_DRM_DISABLE_DELAY_MS); 249 250 err = t7xx_acpi_fldr_func(t7xx_dev); 251 if (err) 252 t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DEVICE_RESET); 253 } 254 255 fsm_finish_command(ctl, cmd, fsm_stopped_handler(ctl)); 256 } 257 258 static void t7xx_fsm_broadcast_ready_state(struct t7xx_fsm_ctl *ctl) 259 { 260 if (ctl->md_state != MD_STATE_WAITING_FOR_HS2) 261 return; 262 263 ctl->md_state = MD_STATE_READY; 264 265 fsm_state_notify(ctl->md, MD_STATE_READY); 266 t7xx_port_proxy_md_status_notify(ctl->md->port_prox, MD_STATE_READY); 267 } 268 269 static void fsm_routine_ready(struct t7xx_fsm_ctl *ctl) 270 { 271 struct t7xx_modem *md = ctl->md; 272 273 ctl->curr_state = FSM_STATE_READY; 274 t7xx_fsm_broadcast_ready_state(ctl); 275 t7xx_md_event_notify(md, FSM_READY); 276 } 277 278 static int fsm_routine_starting(struct t7xx_fsm_ctl *ctl) 279 { 280 struct t7xx_modem *md = ctl->md; 281 struct device *dev; 282 283 ctl->curr_state = FSM_STATE_STARTING; 284 285 t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_FOR_HS1); 286 t7xx_md_event_notify(md, FSM_START); 287 288 wait_event_interruptible_timeout(ctl->async_hk_wq, 289 (md->core_md.ready && md->core_ap.ready) || 290 ctl->exp_flg, HZ * 60); 291 dev = &md->t7xx_dev->pdev->dev; 292 293 if (ctl->exp_flg) 294 dev_err(dev, "MD exception is captured during handshake\n"); 295 296 if (!md->core_md.ready) { 297 dev_err(dev, "MD handshake timeout\n"); 298 if (md->core_md.handshake_ongoing) 299 t7xx_fsm_append_event(ctl, FSM_EVENT_MD_HS2_EXIT, NULL, 0); 300 301 fsm_routine_exception(ctl, NULL, EXCEPTION_HS_TIMEOUT); 302 return -ETIMEDOUT; 303 } else if (!md->core_ap.ready) { 304 dev_err(dev, "AP handshake timeout\n"); 305 if (md->core_ap.handshake_ongoing) 306 t7xx_fsm_append_event(ctl, FSM_EVENT_AP_HS2_EXIT, NULL, 0); 307 308 fsm_routine_exception(ctl, NULL, EXCEPTION_HS_TIMEOUT); 309 return -ETIMEDOUT; 310 } 311 312 t7xx_pci_pm_init_late(md->t7xx_dev); 313 fsm_routine_ready(ctl); 314 return 0; 315 } 316 317 static void fsm_routine_start(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd) 318 { 319 struct t7xx_modem *md = ctl->md; 320 u32 dev_status; 321 int ret; 322 323 if (!md) 324 return; 325 326 if (ctl->curr_state != FSM_STATE_INIT && ctl->curr_state != FSM_STATE_PRE_START && 327 ctl->curr_state != FSM_STATE_STOPPED) { 328 fsm_finish_command(ctl, cmd, -EINVAL); 329 return; 330 } 331 332 ctl->curr_state = FSM_STATE_PRE_START; 333 t7xx_md_event_notify(md, FSM_PRE_START); 334 335 ret = read_poll_timeout(ioread32, dev_status, 336 (dev_status & MISC_STAGE_MASK) == LINUX_STAGE, 20000, 2000000, 337 false, IREG_BASE(md->t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS); 338 if (ret) { 339 struct device *dev = &md->t7xx_dev->pdev->dev; 340 341 fsm_finish_command(ctl, cmd, -ETIMEDOUT); 342 dev_err(dev, "Invalid device status 0x%lx\n", dev_status & MISC_STAGE_MASK); 343 return; 344 } 345 346 t7xx_cldma_hif_hw_init(md->md_ctrl[CLDMA_ID_AP]); 347 t7xx_cldma_hif_hw_init(md->md_ctrl[CLDMA_ID_MD]); 348 fsm_finish_command(ctl, cmd, fsm_routine_starting(ctl)); 349 } 350 351 static int fsm_main_thread(void *data) 352 { 353 struct t7xx_fsm_ctl *ctl = data; 354 struct t7xx_fsm_command *cmd; 355 unsigned long flags; 356 357 while (!kthread_should_stop()) { 358 if (wait_event_interruptible(ctl->command_wq, !list_empty(&ctl->command_queue) || 359 kthread_should_stop())) 360 continue; 361 362 if (kthread_should_stop()) 363 break; 364 365 spin_lock_irqsave(&ctl->command_lock, flags); 366 cmd = list_first_entry(&ctl->command_queue, struct t7xx_fsm_command, entry); 367 list_del(&cmd->entry); 368 spin_unlock_irqrestore(&ctl->command_lock, flags); 369 370 switch (cmd->cmd_id) { 371 case FSM_CMD_START: 372 fsm_routine_start(ctl, cmd); 373 break; 374 375 case FSM_CMD_EXCEPTION: 376 fsm_routine_exception(ctl, cmd, FIELD_GET(FSM_CMD_EX_REASON, cmd->flag)); 377 break; 378 379 case FSM_CMD_PRE_STOP: 380 fsm_routine_stopping(ctl, cmd); 381 break; 382 383 case FSM_CMD_STOP: 384 fsm_routine_stopped(ctl, cmd); 385 break; 386 387 default: 388 fsm_finish_command(ctl, cmd, -EINVAL); 389 fsm_flush_event_cmd_qs(ctl); 390 break; 391 } 392 } 393 394 return 0; 395 } 396 397 int t7xx_fsm_append_cmd(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_cmd_state cmd_id, unsigned int flag) 398 { 399 DECLARE_COMPLETION_ONSTACK(done); 400 struct t7xx_fsm_command *cmd; 401 unsigned long flags; 402 int ret; 403 404 cmd = kzalloc(sizeof(*cmd), flag & FSM_CMD_FLAG_IN_INTERRUPT ? GFP_ATOMIC : GFP_KERNEL); 405 if (!cmd) 406 return -ENOMEM; 407 408 INIT_LIST_HEAD(&cmd->entry); 409 cmd->cmd_id = cmd_id; 410 cmd->flag = flag; 411 if (flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) { 412 cmd->done = &done; 413 cmd->ret = &ret; 414 } 415 416 spin_lock_irqsave(&ctl->command_lock, flags); 417 list_add_tail(&cmd->entry, &ctl->command_queue); 418 spin_unlock_irqrestore(&ctl->command_lock, flags); 419 420 wake_up(&ctl->command_wq); 421 422 if (flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) { 423 unsigned long wait_ret; 424 425 wait_ret = wait_for_completion_timeout(&done, 426 msecs_to_jiffies(FSM_CMD_TIMEOUT_MS)); 427 if (!wait_ret) 428 return -ETIMEDOUT; 429 430 return ret; 431 } 432 433 return 0; 434 } 435 436 int t7xx_fsm_append_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_id, 437 unsigned char *data, unsigned int length) 438 { 439 struct device *dev = &ctl->md->t7xx_dev->pdev->dev; 440 struct t7xx_fsm_event *event; 441 unsigned long flags; 442 443 if (event_id <= FSM_EVENT_INVALID || event_id >= FSM_EVENT_MAX) { 444 dev_err(dev, "Invalid event %d\n", event_id); 445 return -EINVAL; 446 } 447 448 event = kmalloc(sizeof(*event) + length, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL); 449 if (!event) 450 return -ENOMEM; 451 452 INIT_LIST_HEAD(&event->entry); 453 event->event_id = event_id; 454 event->length = length; 455 456 if (data && length) 457 memcpy(event->data, data, length); 458 459 spin_lock_irqsave(&ctl->event_lock, flags); 460 list_add_tail(&event->entry, &ctl->event_queue); 461 spin_unlock_irqrestore(&ctl->event_lock, flags); 462 463 wake_up_all(&ctl->event_wq); 464 return 0; 465 } 466 467 void t7xx_fsm_clr_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_id) 468 { 469 struct t7xx_fsm_event *event, *evt_next; 470 unsigned long flags; 471 472 spin_lock_irqsave(&ctl->event_lock, flags); 473 list_for_each_entry_safe(event, evt_next, &ctl->event_queue, entry) { 474 if (event->event_id == event_id) 475 fsm_del_kf_event(event); 476 } 477 spin_unlock_irqrestore(&ctl->event_lock, flags); 478 } 479 480 enum md_state t7xx_fsm_get_md_state(struct t7xx_fsm_ctl *ctl) 481 { 482 if (ctl) 483 return ctl->md_state; 484 485 return MD_STATE_INVALID; 486 } 487 488 unsigned int t7xx_fsm_get_ctl_state(struct t7xx_fsm_ctl *ctl) 489 { 490 if (ctl) 491 return ctl->curr_state; 492 493 return FSM_STATE_STOPPED; 494 } 495 496 int t7xx_fsm_recv_md_intr(struct t7xx_fsm_ctl *ctl, enum t7xx_md_irq_type type) 497 { 498 unsigned int cmd_flags = FSM_CMD_FLAG_IN_INTERRUPT; 499 500 if (type == MD_IRQ_PORT_ENUM) { 501 return t7xx_fsm_append_cmd(ctl, FSM_CMD_START, cmd_flags); 502 } else if (type == MD_IRQ_CCIF_EX) { 503 ctl->exp_flg = true; 504 wake_up(&ctl->async_hk_wq); 505 cmd_flags |= FIELD_PREP(FSM_CMD_EX_REASON, EXCEPTION_EVENT); 506 return t7xx_fsm_append_cmd(ctl, FSM_CMD_EXCEPTION, cmd_flags); 507 } 508 509 return -EINVAL; 510 } 511 512 void t7xx_fsm_reset(struct t7xx_modem *md) 513 { 514 struct t7xx_fsm_ctl *ctl = md->fsm_ctl; 515 516 fsm_flush_event_cmd_qs(ctl); 517 ctl->curr_state = FSM_STATE_STOPPED; 518 ctl->exp_flg = false; 519 } 520 521 int t7xx_fsm_init(struct t7xx_modem *md) 522 { 523 struct device *dev = &md->t7xx_dev->pdev->dev; 524 struct t7xx_fsm_ctl *ctl; 525 526 ctl = devm_kzalloc(dev, sizeof(*ctl), GFP_KERNEL); 527 if (!ctl) 528 return -ENOMEM; 529 530 md->fsm_ctl = ctl; 531 ctl->md = md; 532 ctl->curr_state = FSM_STATE_INIT; 533 INIT_LIST_HEAD(&ctl->command_queue); 534 INIT_LIST_HEAD(&ctl->event_queue); 535 init_waitqueue_head(&ctl->async_hk_wq); 536 init_waitqueue_head(&ctl->event_wq); 537 INIT_LIST_HEAD(&ctl->notifier_list); 538 init_waitqueue_head(&ctl->command_wq); 539 spin_lock_init(&ctl->event_lock); 540 spin_lock_init(&ctl->command_lock); 541 ctl->exp_flg = false; 542 spin_lock_init(&ctl->notifier_lock); 543 544 ctl->fsm_thread = kthread_run(fsm_main_thread, ctl, "t7xx_fsm"); 545 return PTR_ERR_OR_ZERO(ctl->fsm_thread); 546 } 547 548 void t7xx_fsm_uninit(struct t7xx_modem *md) 549 { 550 struct t7xx_fsm_ctl *ctl = md->fsm_ctl; 551 552 if (!ctl) 553 return; 554 555 if (ctl->fsm_thread) 556 kthread_stop(ctl->fsm_thread); 557 558 fsm_flush_event_cmd_qs(ctl); 559 } 560