1 // SPDX-License-Identifier: GPL-2.0 2 // Copyright (c) 2011-2017, The Linux Foundation. All rights reserved. 3 // Copyright (c) 2018, Linaro Limited 4 5 #include <linux/irq.h> 6 #include <linux/kernel.h> 7 #include <linux/init.h> 8 #include <linux/slab.h> 9 #include <linux/interrupt.h> 10 #include <linux/platform_device.h> 11 #include <linux/dma-mapping.h> 12 #include <linux/dmaengine.h> 13 #include <linux/slimbus.h> 14 #include <linux/delay.h> 15 #include <linux/pm_runtime.h> 16 #include <linux/mutex.h> 17 #include <linux/notifier.h> 18 #include <linux/remoteproc/qcom_rproc.h> 19 #include <linux/of.h> 20 #include <linux/io.h> 21 #include <linux/soc/qcom/qmi.h> 22 #include <linux/soc/qcom/pdr.h> 23 #include <net/sock.h> 24 #include "slimbus.h" 25 26 /* NGD (Non-ported Generic Device) registers */ 27 #define NGD_CFG 0x0 28 #define NGD_CFG_ENABLE BIT(0) 29 #define NGD_CFG_RX_MSGQ_EN BIT(1) 30 #define NGD_CFG_TX_MSGQ_EN BIT(2) 31 #define NGD_STATUS 0x4 32 #define NGD_LADDR BIT(1) 33 #define NGD_RX_MSGQ_CFG 0x8 34 #define NGD_INT_EN 0x10 35 #define NGD_INT_RECFG_DONE BIT(24) 36 #define NGD_INT_TX_NACKED_2 BIT(25) 37 #define NGD_INT_MSG_BUF_CONTE BIT(26) 38 #define NGD_INT_MSG_TX_INVAL BIT(27) 39 #define NGD_INT_IE_VE_CHG BIT(28) 40 #define NGD_INT_DEV_ERR BIT(29) 41 #define NGD_INT_RX_MSG_RCVD BIT(30) 42 #define NGD_INT_TX_MSG_SENT BIT(31) 43 #define NGD_INT_STAT 0x14 44 #define NGD_INT_CLR 0x18 45 #define DEF_NGD_INT_MASK (NGD_INT_TX_NACKED_2 | NGD_INT_MSG_BUF_CONTE | \ 46 NGD_INT_MSG_TX_INVAL | NGD_INT_IE_VE_CHG | \ 47 NGD_INT_DEV_ERR | NGD_INT_TX_MSG_SENT | \ 48 NGD_INT_RX_MSG_RCVD) 49 50 /* Slimbus QMI service */ 51 #define SLIMBUS_QMI_SVC_ID 0x0301 52 #define SLIMBUS_QMI_SVC_V1 1 53 #define SLIMBUS_QMI_INS_ID 0 54 #define SLIMBUS_QMI_SELECT_INSTANCE_REQ_V01 0x0020 55 #define SLIMBUS_QMI_SELECT_INSTANCE_RESP_V01 0x0020 56 #define SLIMBUS_QMI_POWER_REQ_V01 0x0021 57 #define SLIMBUS_QMI_POWER_RESP_V01 0x0021 58 #define SLIMBUS_QMI_CHECK_FRAMER_STATUS_REQ 0x0022 59 #define SLIMBUS_QMI_CHECK_FRAMER_STATUS_RESP 0x0022 60 #define SLIMBUS_QMI_POWER_REQ_MAX_MSG_LEN 14 61 #define SLIMBUS_QMI_POWER_RESP_MAX_MSG_LEN 7 62 #define SLIMBUS_QMI_SELECT_INSTANCE_REQ_MAX_MSG_LEN 14 63 #define SLIMBUS_QMI_SELECT_INSTANCE_RESP_MAX_MSG_LEN 7 64 #define SLIMBUS_QMI_CHECK_FRAMER_STAT_RESP_MAX_MSG_LEN 7 65 /* QMI response timeout of 500ms */ 66 #define SLIMBUS_QMI_RESP_TOUT 1000 67 68 /* User defined commands */ 69 #define SLIM_USR_MC_GENERIC_ACK 0x25 70 #define SLIM_USR_MC_MASTER_CAPABILITY 0x0 71 #define SLIM_USR_MC_REPORT_SATELLITE 0x1 72 #define SLIM_USR_MC_ADDR_QUERY 0xD 73 #define SLIM_USR_MC_ADDR_REPLY 0xE 74 #define SLIM_USR_MC_DEFINE_CHAN 0x20 75 #define SLIM_USR_MC_DEF_ACT_CHAN 0x21 76 #define SLIM_USR_MC_CHAN_CTRL 0x23 77 #define SLIM_USR_MC_RECONFIG_NOW 0x24 78 #define SLIM_USR_MC_REQ_BW 0x28 79 #define SLIM_USR_MC_CONNECT_SRC 0x2C 80 #define SLIM_USR_MC_CONNECT_SINK 0x2D 81 #define SLIM_USR_MC_DISCONNECT_PORT 0x2E 82 #define SLIM_USR_MC_REPEAT_CHANGE_VALUE 0x0 83 84 #define QCOM_SLIM_NGD_AUTOSUSPEND MSEC_PER_SEC 85 #define SLIM_RX_MSGQ_TIMEOUT_VAL 0x10000 86 87 #define SLIM_LA_MGR 0xFF 88 #define SLIM_ROOT_FREQ 24576000 89 #define LADDR_RETRY 5 90 91 /* Per spec.max 40 bytes per received message */ 92 #define SLIM_MSGQ_BUF_LEN 40 93 #define QCOM_SLIM_NGD_DESC_NUM 32 94 95 #define SLIM_MSG_ASM_FIRST_WORD(l, mt, mc, dt, ad) \ 96 ((l) | ((mt) << 5) | ((mc) << 8) | ((dt) << 15) | ((ad) << 16)) 97 98 #define INIT_MX_RETRIES 10 99 #define DEF_RETRY_MS 10 100 #define SAT_MAGIC_LSB 0xD9 101 #define SAT_MAGIC_MSB 0xC5 102 #define SAT_MSG_VER 0x1 103 #define SAT_MSG_PROT 0x1 104 #define to_ngd(d) container_of(d, struct qcom_slim_ngd, dev) 105 106 struct ngd_reg_offset_data { 107 u32 offset, size; 108 }; 109 110 static const struct ngd_reg_offset_data ngd_v1_5_offset_info = { 111 .offset = 0x1000, 112 .size = 0x1000, 113 }; 114 115 enum qcom_slim_ngd_state { 116 QCOM_SLIM_NGD_CTRL_AWAKE, 117 QCOM_SLIM_NGD_CTRL_IDLE, 118 QCOM_SLIM_NGD_CTRL_ASLEEP, 119 QCOM_SLIM_NGD_CTRL_DOWN, 120 }; 121 122 struct qcom_slim_ngd_qmi { 123 struct qmi_handle qmi; 124 struct sockaddr_qrtr svc_info; 125 struct qmi_handle svc_event_hdl; 126 struct qmi_response_type_v01 resp; 127 struct qmi_handle *handle; 128 struct completion qmi_comp; 129 }; 130 131 struct qcom_slim_ngd_ctrl; 132 struct qcom_slim_ngd; 133 134 struct qcom_slim_ngd_dma_desc { 135 struct dma_async_tx_descriptor *desc; 136 struct qcom_slim_ngd_ctrl *ctrl; 137 struct completion *comp; 138 dma_cookie_t cookie; 139 dma_addr_t phys; 140 void *base; 141 }; 142 143 struct qcom_slim_ngd { 144 struct platform_device *pdev; 145 void __iomem *base; 146 int id; 147 }; 148 149 struct qcom_slim_ngd_ctrl { 150 struct slim_framer framer; 151 struct slim_controller ctrl; 152 struct qcom_slim_ngd_qmi qmi; 153 struct qcom_slim_ngd *ngd; 154 struct device *dev; 155 void __iomem *base; 156 struct dma_chan *dma_rx_channel; 157 struct dma_chan *dma_tx_channel; 158 struct qcom_slim_ngd_dma_desc rx_desc[QCOM_SLIM_NGD_DESC_NUM]; 159 struct qcom_slim_ngd_dma_desc txdesc[QCOM_SLIM_NGD_DESC_NUM]; 160 struct completion reconf; 161 struct work_struct m_work; 162 struct work_struct ngd_up_work; 163 struct workqueue_struct *mwq; 164 struct completion qmi_up; 165 spinlock_t tx_buf_lock; 166 struct mutex tx_lock; 167 struct mutex ssr_lock; 168 struct notifier_block nb; 169 void *notifier; 170 struct pdr_handle *pdr; 171 enum qcom_slim_ngd_state state; 172 dma_addr_t rx_phys_base; 173 dma_addr_t tx_phys_base; 174 void *rx_base; 175 void *tx_base; 176 int tx_tail; 177 int tx_head; 178 u32 ver; 179 }; 180 181 enum slimbus_mode_enum_type_v01 { 182 /* To force a 32 bit signed enum. Do not change or use*/ 183 SLIMBUS_MODE_ENUM_TYPE_MIN_ENUM_VAL_V01 = INT_MIN, 184 SLIMBUS_MODE_SATELLITE_V01 = 1, 185 SLIMBUS_MODE_MASTER_V01 = 2, 186 SLIMBUS_MODE_ENUM_TYPE_MAX_ENUM_VAL_V01 = INT_MAX, 187 }; 188 189 enum slimbus_pm_enum_type_v01 { 190 /* To force a 32 bit signed enum. Do not change or use*/ 191 SLIMBUS_PM_ENUM_TYPE_MIN_ENUM_VAL_V01 = INT_MIN, 192 SLIMBUS_PM_INACTIVE_V01 = 1, 193 SLIMBUS_PM_ACTIVE_V01 = 2, 194 SLIMBUS_PM_ENUM_TYPE_MAX_ENUM_VAL_V01 = INT_MAX, 195 }; 196 197 enum slimbus_resp_enum_type_v01 { 198 SLIMBUS_RESP_ENUM_TYPE_MIN_VAL_V01 = INT_MIN, 199 SLIMBUS_RESP_SYNCHRONOUS_V01 = 1, 200 SLIMBUS_RESP_ENUM_TYPE_MAX_VAL_V01 = INT_MAX, 201 }; 202 203 struct slimbus_select_inst_req_msg_v01 { 204 uint32_t instance; 205 uint8_t mode_valid; 206 enum slimbus_mode_enum_type_v01 mode; 207 }; 208 209 struct slimbus_select_inst_resp_msg_v01 { 210 struct qmi_response_type_v01 resp; 211 }; 212 213 struct slimbus_power_req_msg_v01 { 214 enum slimbus_pm_enum_type_v01 pm_req; 215 uint8_t resp_type_valid; 216 enum slimbus_resp_enum_type_v01 resp_type; 217 }; 218 219 struct slimbus_power_resp_msg_v01 { 220 struct qmi_response_type_v01 resp; 221 }; 222 223 static struct qmi_elem_info slimbus_select_inst_req_msg_v01_ei[] = { 224 { 225 .data_type = QMI_UNSIGNED_4_BYTE, 226 .elem_len = 1, 227 .elem_size = sizeof(uint32_t), 228 .array_type = NO_ARRAY, 229 .tlv_type = 0x01, 230 .offset = offsetof(struct slimbus_select_inst_req_msg_v01, 231 instance), 232 .ei_array = NULL, 233 }, 234 { 235 .data_type = QMI_OPT_FLAG, 236 .elem_len = 1, 237 .elem_size = sizeof(uint8_t), 238 .array_type = NO_ARRAY, 239 .tlv_type = 0x10, 240 .offset = offsetof(struct slimbus_select_inst_req_msg_v01, 241 mode_valid), 242 .ei_array = NULL, 243 }, 244 { 245 .data_type = QMI_UNSIGNED_4_BYTE, 246 .elem_len = 1, 247 .elem_size = sizeof(enum slimbus_mode_enum_type_v01), 248 .array_type = NO_ARRAY, 249 .tlv_type = 0x10, 250 .offset = offsetof(struct slimbus_select_inst_req_msg_v01, 251 mode), 252 .ei_array = NULL, 253 }, 254 { 255 .data_type = QMI_EOTI, 256 .elem_len = 0, 257 .elem_size = 0, 258 .array_type = NO_ARRAY, 259 .tlv_type = 0x00, 260 .offset = 0, 261 .ei_array = NULL, 262 }, 263 }; 264 265 static struct qmi_elem_info slimbus_select_inst_resp_msg_v01_ei[] = { 266 { 267 .data_type = QMI_STRUCT, 268 .elem_len = 1, 269 .elem_size = sizeof(struct qmi_response_type_v01), 270 .array_type = NO_ARRAY, 271 .tlv_type = 0x02, 272 .offset = offsetof(struct slimbus_select_inst_resp_msg_v01, 273 resp), 274 .ei_array = qmi_response_type_v01_ei, 275 }, 276 { 277 .data_type = QMI_EOTI, 278 .elem_len = 0, 279 .elem_size = 0, 280 .array_type = NO_ARRAY, 281 .tlv_type = 0x00, 282 .offset = 0, 283 .ei_array = NULL, 284 }, 285 }; 286 287 static struct qmi_elem_info slimbus_power_req_msg_v01_ei[] = { 288 { 289 .data_type = QMI_UNSIGNED_4_BYTE, 290 .elem_len = 1, 291 .elem_size = sizeof(enum slimbus_pm_enum_type_v01), 292 .array_type = NO_ARRAY, 293 .tlv_type = 0x01, 294 .offset = offsetof(struct slimbus_power_req_msg_v01, 295 pm_req), 296 .ei_array = NULL, 297 }, 298 { 299 .data_type = QMI_OPT_FLAG, 300 .elem_len = 1, 301 .elem_size = sizeof(uint8_t), 302 .array_type = NO_ARRAY, 303 .tlv_type = 0x10, 304 .offset = offsetof(struct slimbus_power_req_msg_v01, 305 resp_type_valid), 306 }, 307 { 308 .data_type = QMI_SIGNED_4_BYTE_ENUM, 309 .elem_len = 1, 310 .elem_size = sizeof(enum slimbus_resp_enum_type_v01), 311 .array_type = NO_ARRAY, 312 .tlv_type = 0x10, 313 .offset = offsetof(struct slimbus_power_req_msg_v01, 314 resp_type), 315 }, 316 { 317 .data_type = QMI_EOTI, 318 .elem_len = 0, 319 .elem_size = 0, 320 .array_type = NO_ARRAY, 321 .tlv_type = 0x00, 322 .offset = 0, 323 .ei_array = NULL, 324 }, 325 }; 326 327 static struct qmi_elem_info slimbus_power_resp_msg_v01_ei[] = { 328 { 329 .data_type = QMI_STRUCT, 330 .elem_len = 1, 331 .elem_size = sizeof(struct qmi_response_type_v01), 332 .array_type = NO_ARRAY, 333 .tlv_type = 0x02, 334 .offset = offsetof(struct slimbus_power_resp_msg_v01, resp), 335 .ei_array = qmi_response_type_v01_ei, 336 }, 337 { 338 .data_type = QMI_EOTI, 339 .elem_len = 0, 340 .elem_size = 0, 341 .array_type = NO_ARRAY, 342 .tlv_type = 0x00, 343 .offset = 0, 344 .ei_array = NULL, 345 }, 346 }; 347 348 static int qcom_slim_qmi_send_select_inst_req(struct qcom_slim_ngd_ctrl *ctrl, 349 struct slimbus_select_inst_req_msg_v01 *req) 350 { 351 struct slimbus_select_inst_resp_msg_v01 resp = { { 0, 0 } }; 352 struct qmi_txn txn; 353 int rc; 354 355 rc = qmi_txn_init(ctrl->qmi.handle, &txn, 356 slimbus_select_inst_resp_msg_v01_ei, &resp); 357 if (rc < 0) { 358 dev_err(ctrl->dev, "QMI TXN init fail: %d\n", rc); 359 return rc; 360 } 361 362 rc = qmi_send_request(ctrl->qmi.handle, NULL, &txn, 363 SLIMBUS_QMI_SELECT_INSTANCE_REQ_V01, 364 SLIMBUS_QMI_SELECT_INSTANCE_REQ_MAX_MSG_LEN, 365 slimbus_select_inst_req_msg_v01_ei, req); 366 if (rc < 0) { 367 dev_err(ctrl->dev, "QMI send req fail %d\n", rc); 368 qmi_txn_cancel(&txn); 369 return rc; 370 } 371 372 rc = qmi_txn_wait(&txn, SLIMBUS_QMI_RESP_TOUT); 373 if (rc < 0) { 374 dev_err(ctrl->dev, "QMI TXN wait fail: %d\n", rc); 375 return rc; 376 } 377 /* Check the response */ 378 if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { 379 dev_err(ctrl->dev, "QMI request failed 0x%x\n", 380 resp.resp.result); 381 return -EREMOTEIO; 382 } 383 384 return 0; 385 } 386 387 static void qcom_slim_qmi_power_resp_cb(struct qmi_handle *handle, 388 struct sockaddr_qrtr *sq, 389 struct qmi_txn *txn, const void *data) 390 { 391 struct slimbus_power_resp_msg_v01 *resp; 392 393 resp = (struct slimbus_power_resp_msg_v01 *)data; 394 if (resp->resp.result != QMI_RESULT_SUCCESS_V01) 395 pr_err("QMI power request failed 0x%x\n", 396 resp->resp.result); 397 398 complete(&txn->completion); 399 } 400 401 static int qcom_slim_qmi_send_power_request(struct qcom_slim_ngd_ctrl *ctrl, 402 struct slimbus_power_req_msg_v01 *req) 403 { 404 struct slimbus_power_resp_msg_v01 resp = { { 0, 0 } }; 405 struct qmi_txn txn; 406 int rc; 407 408 rc = qmi_txn_init(ctrl->qmi.handle, &txn, 409 slimbus_power_resp_msg_v01_ei, &resp); 410 411 rc = qmi_send_request(ctrl->qmi.handle, NULL, &txn, 412 SLIMBUS_QMI_POWER_REQ_V01, 413 SLIMBUS_QMI_POWER_REQ_MAX_MSG_LEN, 414 slimbus_power_req_msg_v01_ei, req); 415 if (rc < 0) { 416 dev_err(ctrl->dev, "QMI send req fail %d\n", rc); 417 qmi_txn_cancel(&txn); 418 return rc; 419 } 420 421 rc = qmi_txn_wait(&txn, SLIMBUS_QMI_RESP_TOUT); 422 if (rc < 0) { 423 dev_err(ctrl->dev, "QMI TXN wait fail: %d\n", rc); 424 return rc; 425 } 426 427 /* Check the response */ 428 if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { 429 dev_err(ctrl->dev, "QMI request failed 0x%x\n", 430 resp.resp.result); 431 return -EREMOTEIO; 432 } 433 434 return 0; 435 } 436 437 static const struct qmi_msg_handler qcom_slim_qmi_msg_handlers[] = { 438 { 439 .type = QMI_RESPONSE, 440 .msg_id = SLIMBUS_QMI_POWER_RESP_V01, 441 .ei = slimbus_power_resp_msg_v01_ei, 442 .decoded_size = sizeof(struct slimbus_power_resp_msg_v01), 443 .fn = qcom_slim_qmi_power_resp_cb, 444 }, 445 {} 446 }; 447 448 static int qcom_slim_qmi_init(struct qcom_slim_ngd_ctrl *ctrl, 449 bool apps_is_master) 450 { 451 struct slimbus_select_inst_req_msg_v01 req; 452 struct qmi_handle *handle; 453 int rc; 454 455 handle = devm_kzalloc(ctrl->dev, sizeof(*handle), GFP_KERNEL); 456 if (!handle) 457 return -ENOMEM; 458 459 rc = qmi_handle_init(handle, SLIMBUS_QMI_POWER_REQ_MAX_MSG_LEN, 460 NULL, qcom_slim_qmi_msg_handlers); 461 if (rc < 0) { 462 dev_err(ctrl->dev, "QMI client init failed: %d\n", rc); 463 goto qmi_handle_init_failed; 464 } 465 466 rc = kernel_connect(handle->sock, 467 (struct sockaddr *)&ctrl->qmi.svc_info, 468 sizeof(ctrl->qmi.svc_info), 0); 469 if (rc < 0) { 470 dev_err(ctrl->dev, "Remote Service connect failed: %d\n", rc); 471 goto qmi_connect_to_service_failed; 472 } 473 474 /* Instance is 0 based */ 475 req.instance = (ctrl->ngd->id >> 1); 476 req.mode_valid = 1; 477 478 /* Mode indicates the role of the ADSP */ 479 if (apps_is_master) 480 req.mode = SLIMBUS_MODE_SATELLITE_V01; 481 else 482 req.mode = SLIMBUS_MODE_MASTER_V01; 483 484 ctrl->qmi.handle = handle; 485 486 rc = qcom_slim_qmi_send_select_inst_req(ctrl, &req); 487 if (rc) { 488 dev_err(ctrl->dev, "failed to select h/w instance\n"); 489 goto qmi_select_instance_failed; 490 } 491 492 return 0; 493 494 qmi_select_instance_failed: 495 ctrl->qmi.handle = NULL; 496 qmi_connect_to_service_failed: 497 qmi_handle_release(handle); 498 qmi_handle_init_failed: 499 devm_kfree(ctrl->dev, handle); 500 return rc; 501 } 502 503 static void qcom_slim_qmi_exit(struct qcom_slim_ngd_ctrl *ctrl) 504 { 505 if (!ctrl->qmi.handle) 506 return; 507 508 qmi_handle_release(ctrl->qmi.handle); 509 devm_kfree(ctrl->dev, ctrl->qmi.handle); 510 ctrl->qmi.handle = NULL; 511 } 512 513 static int qcom_slim_qmi_power_request(struct qcom_slim_ngd_ctrl *ctrl, 514 bool active) 515 { 516 struct slimbus_power_req_msg_v01 req; 517 518 if (active) 519 req.pm_req = SLIMBUS_PM_ACTIVE_V01; 520 else 521 req.pm_req = SLIMBUS_PM_INACTIVE_V01; 522 523 req.resp_type_valid = 0; 524 525 return qcom_slim_qmi_send_power_request(ctrl, &req); 526 } 527 528 static u32 *qcom_slim_ngd_tx_msg_get(struct qcom_slim_ngd_ctrl *ctrl, int len, 529 struct completion *comp) 530 { 531 struct qcom_slim_ngd_dma_desc *desc; 532 unsigned long flags; 533 534 spin_lock_irqsave(&ctrl->tx_buf_lock, flags); 535 536 if ((ctrl->tx_tail + 1) % QCOM_SLIM_NGD_DESC_NUM == ctrl->tx_head) { 537 spin_unlock_irqrestore(&ctrl->tx_buf_lock, flags); 538 return NULL; 539 } 540 desc = &ctrl->txdesc[ctrl->tx_tail]; 541 desc->base = ctrl->tx_base + ctrl->tx_tail * SLIM_MSGQ_BUF_LEN; 542 desc->comp = comp; 543 ctrl->tx_tail = (ctrl->tx_tail + 1) % QCOM_SLIM_NGD_DESC_NUM; 544 545 spin_unlock_irqrestore(&ctrl->tx_buf_lock, flags); 546 547 return desc->base; 548 } 549 550 static void qcom_slim_ngd_tx_msg_dma_cb(void *args) 551 { 552 struct qcom_slim_ngd_dma_desc *desc = args; 553 struct qcom_slim_ngd_ctrl *ctrl = desc->ctrl; 554 unsigned long flags; 555 556 spin_lock_irqsave(&ctrl->tx_buf_lock, flags); 557 558 if (desc->comp) { 559 complete(desc->comp); 560 desc->comp = NULL; 561 } 562 563 ctrl->tx_head = (ctrl->tx_head + 1) % QCOM_SLIM_NGD_DESC_NUM; 564 spin_unlock_irqrestore(&ctrl->tx_buf_lock, flags); 565 } 566 567 static int qcom_slim_ngd_tx_msg_post(struct qcom_slim_ngd_ctrl *ctrl, 568 void *buf, int len) 569 { 570 struct qcom_slim_ngd_dma_desc *desc; 571 unsigned long flags; 572 int index, offset; 573 574 spin_lock_irqsave(&ctrl->tx_buf_lock, flags); 575 offset = buf - ctrl->tx_base; 576 index = offset/SLIM_MSGQ_BUF_LEN; 577 578 desc = &ctrl->txdesc[index]; 579 desc->phys = ctrl->tx_phys_base + offset; 580 desc->base = ctrl->tx_base + offset; 581 desc->ctrl = ctrl; 582 len = (len + 3) & 0xfc; 583 584 desc->desc = dmaengine_prep_slave_single(ctrl->dma_tx_channel, 585 desc->phys, len, 586 DMA_MEM_TO_DEV, 587 DMA_PREP_INTERRUPT); 588 if (!desc->desc) { 589 dev_err(ctrl->dev, "unable to prepare channel\n"); 590 spin_unlock_irqrestore(&ctrl->tx_buf_lock, flags); 591 return -EINVAL; 592 } 593 594 desc->desc->callback = qcom_slim_ngd_tx_msg_dma_cb; 595 desc->desc->callback_param = desc; 596 desc->desc->cookie = dmaengine_submit(desc->desc); 597 dma_async_issue_pending(ctrl->dma_tx_channel); 598 spin_unlock_irqrestore(&ctrl->tx_buf_lock, flags); 599 600 return 0; 601 } 602 603 static void qcom_slim_ngd_rx(struct qcom_slim_ngd_ctrl *ctrl, u8 *buf) 604 { 605 u8 mc, mt, len; 606 607 mt = SLIM_HEADER_GET_MT(buf[0]); 608 len = SLIM_HEADER_GET_RL(buf[0]); 609 mc = SLIM_HEADER_GET_MC(buf[1]); 610 611 if (mc == SLIM_USR_MC_MASTER_CAPABILITY && 612 mt == SLIM_MSG_MT_SRC_REFERRED_USER) 613 queue_work(ctrl->mwq, &ctrl->m_work); 614 615 if (mc == SLIM_MSG_MC_REPLY_INFORMATION || 616 mc == SLIM_MSG_MC_REPLY_VALUE || (mc == SLIM_USR_MC_ADDR_REPLY && 617 mt == SLIM_MSG_MT_SRC_REFERRED_USER) || 618 (mc == SLIM_USR_MC_GENERIC_ACK && 619 mt == SLIM_MSG_MT_SRC_REFERRED_USER)) { 620 slim_msg_response(&ctrl->ctrl, &buf[4], buf[3], len - 4); 621 pm_runtime_mark_last_busy(ctrl->ctrl.dev); 622 } 623 } 624 625 static void qcom_slim_ngd_rx_msgq_cb(void *args) 626 { 627 struct qcom_slim_ngd_dma_desc *desc = args; 628 struct qcom_slim_ngd_ctrl *ctrl = desc->ctrl; 629 630 qcom_slim_ngd_rx(ctrl, (u8 *)desc->base); 631 /* Add descriptor back to the queue */ 632 desc->desc = dmaengine_prep_slave_single(ctrl->dma_rx_channel, 633 desc->phys, SLIM_MSGQ_BUF_LEN, 634 DMA_DEV_TO_MEM, 635 DMA_PREP_INTERRUPT); 636 if (!desc->desc) { 637 dev_err(ctrl->dev, "Unable to prepare rx channel\n"); 638 return; 639 } 640 641 desc->desc->callback = qcom_slim_ngd_rx_msgq_cb; 642 desc->desc->callback_param = desc; 643 desc->desc->cookie = dmaengine_submit(desc->desc); 644 dma_async_issue_pending(ctrl->dma_rx_channel); 645 } 646 647 static int qcom_slim_ngd_post_rx_msgq(struct qcom_slim_ngd_ctrl *ctrl) 648 { 649 struct qcom_slim_ngd_dma_desc *desc; 650 int i; 651 652 for (i = 0; i < QCOM_SLIM_NGD_DESC_NUM; i++) { 653 desc = &ctrl->rx_desc[i]; 654 desc->phys = ctrl->rx_phys_base + i * SLIM_MSGQ_BUF_LEN; 655 desc->ctrl = ctrl; 656 desc->base = ctrl->rx_base + i * SLIM_MSGQ_BUF_LEN; 657 desc->desc = dmaengine_prep_slave_single(ctrl->dma_rx_channel, 658 desc->phys, SLIM_MSGQ_BUF_LEN, 659 DMA_DEV_TO_MEM, 660 DMA_PREP_INTERRUPT); 661 if (!desc->desc) { 662 dev_err(ctrl->dev, "Unable to prepare rx channel\n"); 663 return -EINVAL; 664 } 665 666 desc->desc->callback = qcom_slim_ngd_rx_msgq_cb; 667 desc->desc->callback_param = desc; 668 desc->desc->cookie = dmaengine_submit(desc->desc); 669 } 670 dma_async_issue_pending(ctrl->dma_rx_channel); 671 672 return 0; 673 } 674 675 static int qcom_slim_ngd_init_rx_msgq(struct qcom_slim_ngd_ctrl *ctrl) 676 { 677 struct device *dev = ctrl->dev; 678 int ret, size; 679 680 ctrl->dma_rx_channel = dma_request_chan(dev, "rx"); 681 if (IS_ERR(ctrl->dma_rx_channel)) { 682 dev_err(dev, "Failed to request RX dma channel"); 683 ret = PTR_ERR(ctrl->dma_rx_channel); 684 ctrl->dma_rx_channel = NULL; 685 return ret; 686 } 687 688 size = QCOM_SLIM_NGD_DESC_NUM * SLIM_MSGQ_BUF_LEN; 689 ctrl->rx_base = dma_alloc_coherent(dev, size, &ctrl->rx_phys_base, 690 GFP_KERNEL); 691 if (!ctrl->rx_base) { 692 ret = -ENOMEM; 693 goto rel_rx; 694 } 695 696 ret = qcom_slim_ngd_post_rx_msgq(ctrl); 697 if (ret) { 698 dev_err(dev, "post_rx_msgq() failed 0x%x\n", ret); 699 goto rx_post_err; 700 } 701 702 return 0; 703 704 rx_post_err: 705 dma_free_coherent(dev, size, ctrl->rx_base, ctrl->rx_phys_base); 706 rel_rx: 707 dma_release_channel(ctrl->dma_rx_channel); 708 return ret; 709 } 710 711 static int qcom_slim_ngd_init_tx_msgq(struct qcom_slim_ngd_ctrl *ctrl) 712 { 713 struct device *dev = ctrl->dev; 714 unsigned long flags; 715 int ret = 0; 716 int size; 717 718 ctrl->dma_tx_channel = dma_request_chan(dev, "tx"); 719 if (IS_ERR(ctrl->dma_tx_channel)) { 720 dev_err(dev, "Failed to request TX dma channel"); 721 ret = PTR_ERR(ctrl->dma_tx_channel); 722 ctrl->dma_tx_channel = NULL; 723 return ret; 724 } 725 726 size = ((QCOM_SLIM_NGD_DESC_NUM + 1) * SLIM_MSGQ_BUF_LEN); 727 ctrl->tx_base = dma_alloc_coherent(dev, size, &ctrl->tx_phys_base, 728 GFP_KERNEL); 729 if (!ctrl->tx_base) { 730 ret = -EINVAL; 731 goto rel_tx; 732 } 733 734 spin_lock_irqsave(&ctrl->tx_buf_lock, flags); 735 ctrl->tx_tail = 0; 736 ctrl->tx_head = 0; 737 spin_unlock_irqrestore(&ctrl->tx_buf_lock, flags); 738 739 return 0; 740 rel_tx: 741 dma_release_channel(ctrl->dma_tx_channel); 742 return ret; 743 } 744 745 static int qcom_slim_ngd_init_dma(struct qcom_slim_ngd_ctrl *ctrl) 746 { 747 int ret = 0; 748 749 ret = qcom_slim_ngd_init_rx_msgq(ctrl); 750 if (ret) { 751 dev_err(ctrl->dev, "rx dma init failed\n"); 752 return ret; 753 } 754 755 ret = qcom_slim_ngd_init_tx_msgq(ctrl); 756 if (ret) 757 dev_err(ctrl->dev, "tx dma init failed\n"); 758 759 return ret; 760 } 761 762 static irqreturn_t qcom_slim_ngd_interrupt(int irq, void *d) 763 { 764 struct qcom_slim_ngd_ctrl *ctrl = d; 765 void __iomem *base = ctrl->ngd->base; 766 u32 stat; 767 768 if (pm_runtime_suspended(ctrl->ctrl.dev)) { 769 dev_warn_once(ctrl->dev, "Interrupt received while suspended\n"); 770 return IRQ_NONE; 771 } 772 773 stat = readl(base + NGD_INT_STAT); 774 775 if ((stat & NGD_INT_MSG_BUF_CONTE) || 776 (stat & NGD_INT_MSG_TX_INVAL) || (stat & NGD_INT_DEV_ERR) || 777 (stat & NGD_INT_TX_NACKED_2)) { 778 dev_err(ctrl->dev, "Error Interrupt received 0x%x\n", stat); 779 } 780 781 writel(stat, base + NGD_INT_CLR); 782 783 return IRQ_HANDLED; 784 } 785 786 static int qcom_slim_ngd_xfer_msg(struct slim_controller *sctrl, 787 struct slim_msg_txn *txn) 788 { 789 struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(sctrl->dev); 790 DECLARE_COMPLETION_ONSTACK(tx_sent); 791 DECLARE_COMPLETION_ONSTACK(done); 792 int ret, timeout, i; 793 u8 wbuf[SLIM_MSGQ_BUF_LEN]; 794 u8 rbuf[SLIM_MSGQ_BUF_LEN]; 795 u32 *pbuf; 796 u8 *puc; 797 u8 la = txn->la; 798 bool usr_msg = false; 799 800 if (txn->mt == SLIM_MSG_MT_CORE && 801 (txn->mc >= SLIM_MSG_MC_BEGIN_RECONFIGURATION && 802 txn->mc <= SLIM_MSG_MC_RECONFIGURE_NOW)) 803 return 0; 804 805 if (txn->dt == SLIM_MSG_DEST_ENUMADDR) 806 return -EPROTONOSUPPORT; 807 808 if (txn->msg->num_bytes > SLIM_MSGQ_BUF_LEN || 809 txn->rl > SLIM_MSGQ_BUF_LEN) { 810 dev_err(ctrl->dev, "msg exceeds HW limit\n"); 811 return -EINVAL; 812 } 813 814 pbuf = qcom_slim_ngd_tx_msg_get(ctrl, txn->rl, &tx_sent); 815 if (!pbuf) { 816 dev_err(ctrl->dev, "Message buffer unavailable\n"); 817 return -ENOMEM; 818 } 819 820 if (txn->mt == SLIM_MSG_MT_CORE && 821 (txn->mc == SLIM_MSG_MC_CONNECT_SOURCE || 822 txn->mc == SLIM_MSG_MC_CONNECT_SINK || 823 txn->mc == SLIM_MSG_MC_DISCONNECT_PORT)) { 824 txn->mt = SLIM_MSG_MT_DEST_REFERRED_USER; 825 switch (txn->mc) { 826 case SLIM_MSG_MC_CONNECT_SOURCE: 827 txn->mc = SLIM_USR_MC_CONNECT_SRC; 828 break; 829 case SLIM_MSG_MC_CONNECT_SINK: 830 txn->mc = SLIM_USR_MC_CONNECT_SINK; 831 break; 832 case SLIM_MSG_MC_DISCONNECT_PORT: 833 txn->mc = SLIM_USR_MC_DISCONNECT_PORT; 834 break; 835 default: 836 return -EINVAL; 837 } 838 839 usr_msg = true; 840 i = 0; 841 wbuf[i++] = txn->la; 842 la = SLIM_LA_MGR; 843 wbuf[i++] = txn->msg->wbuf[0]; 844 if (txn->mc != SLIM_USR_MC_DISCONNECT_PORT) 845 wbuf[i++] = txn->msg->wbuf[1]; 846 847 txn->comp = &done; 848 ret = slim_alloc_txn_tid(sctrl, txn); 849 if (ret) { 850 dev_err(ctrl->dev, "Unable to allocate TID\n"); 851 return ret; 852 } 853 854 wbuf[i++] = txn->tid; 855 856 txn->msg->num_bytes = i; 857 txn->msg->wbuf = wbuf; 858 txn->msg->rbuf = rbuf; 859 txn->rl = txn->msg->num_bytes + 4; 860 } 861 862 /* HW expects length field to be excluded */ 863 txn->rl--; 864 puc = (u8 *)pbuf; 865 *pbuf = 0; 866 if (txn->dt == SLIM_MSG_DEST_LOGICALADDR) { 867 *pbuf = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, txn->mc, 0, 868 la); 869 puc += 3; 870 } else { 871 *pbuf = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, txn->mc, 1, 872 la); 873 puc += 2; 874 } 875 876 if (slim_tid_txn(txn->mt, txn->mc)) 877 *(puc++) = txn->tid; 878 879 if (slim_ec_txn(txn->mt, txn->mc)) { 880 *(puc++) = (txn->ec & 0xFF); 881 *(puc++) = (txn->ec >> 8) & 0xFF; 882 } 883 884 if (txn->msg && txn->msg->wbuf) 885 memcpy(puc, txn->msg->wbuf, txn->msg->num_bytes); 886 887 mutex_lock(&ctrl->tx_lock); 888 ret = qcom_slim_ngd_tx_msg_post(ctrl, pbuf, txn->rl); 889 if (ret) { 890 mutex_unlock(&ctrl->tx_lock); 891 return ret; 892 } 893 894 timeout = wait_for_completion_timeout(&tx_sent, HZ); 895 if (!timeout) { 896 dev_err(sctrl->dev, "TX timed out:MC:0x%x,mt:0x%x", txn->mc, 897 txn->mt); 898 mutex_unlock(&ctrl->tx_lock); 899 return -ETIMEDOUT; 900 } 901 902 if (usr_msg) { 903 timeout = wait_for_completion_timeout(&done, HZ); 904 if (!timeout) { 905 dev_err(sctrl->dev, "TX timed out:MC:0x%x,mt:0x%x", 906 txn->mc, txn->mt); 907 mutex_unlock(&ctrl->tx_lock); 908 return -ETIMEDOUT; 909 } 910 } 911 912 mutex_unlock(&ctrl->tx_lock); 913 return 0; 914 } 915 916 static int qcom_slim_ngd_xfer_msg_sync(struct slim_controller *ctrl, 917 struct slim_msg_txn *txn) 918 { 919 DECLARE_COMPLETION_ONSTACK(done); 920 int ret, timeout; 921 922 ret = pm_runtime_get_sync(ctrl->dev); 923 if (ret < 0) 924 goto pm_put; 925 926 txn->comp = &done; 927 928 ret = qcom_slim_ngd_xfer_msg(ctrl, txn); 929 if (ret) 930 goto pm_put; 931 932 timeout = wait_for_completion_timeout(&done, HZ); 933 if (!timeout) { 934 dev_err(ctrl->dev, "TX timed out:MC:0x%x,mt:0x%x", txn->mc, 935 txn->mt); 936 ret = -ETIMEDOUT; 937 goto pm_put; 938 } 939 return 0; 940 941 pm_put: 942 pm_runtime_put(ctrl->dev); 943 944 return ret; 945 } 946 947 static int qcom_slim_calc_coef(struct slim_stream_runtime *rt, int *exp) 948 { 949 struct slim_controller *ctrl = rt->dev->ctrl; 950 int coef; 951 952 if (rt->ratem * ctrl->a_framer->superfreq < rt->rate) 953 rt->ratem++; 954 955 coef = rt->ratem; 956 *exp = 0; 957 958 /* 959 * CRM = Cx(2^E) is the formula we are using. 960 * Here C is the coffecient and E is the exponent. 961 * CRM is the Channel Rate Multiplier. 962 * Coefficeint should be either 1 or 3 and exponenet 963 * should be an integer between 0 to 9, inclusive. 964 */ 965 while (1) { 966 while ((coef & 0x1) != 0x1) { 967 coef >>= 1; 968 *exp = *exp + 1; 969 } 970 971 if (coef <= 3) 972 break; 973 974 coef++; 975 } 976 977 /* 978 * we rely on the coef value (1 or 3) to set a bit 979 * in the slimbus message packet. This bit is 980 * BIT(5) which is the segment rate coefficient. 981 */ 982 if (coef == 1) { 983 if (*exp > 9) 984 return -EIO; 985 coef = 0; 986 } else { 987 if (*exp > 8) 988 return -EIO; 989 coef = 1; 990 } 991 992 return coef; 993 } 994 995 static int qcom_slim_ngd_enable_stream(struct slim_stream_runtime *rt) 996 { 997 struct slim_device *sdev = rt->dev; 998 struct slim_controller *ctrl = sdev->ctrl; 999 struct slim_val_inf msg = {0}; 1000 u8 wbuf[SLIM_MSGQ_BUF_LEN]; 1001 u8 rbuf[SLIM_MSGQ_BUF_LEN]; 1002 struct slim_msg_txn txn = {0,}; 1003 int i, ret; 1004 1005 txn.mt = SLIM_MSG_MT_DEST_REFERRED_USER; 1006 txn.dt = SLIM_MSG_DEST_LOGICALADDR; 1007 txn.la = SLIM_LA_MGR; 1008 txn.ec = 0; 1009 txn.msg = &msg; 1010 txn.msg->num_bytes = 0; 1011 txn.msg->wbuf = wbuf; 1012 txn.msg->rbuf = rbuf; 1013 1014 for (i = 0; i < rt->num_ports; i++) { 1015 struct slim_port *port = &rt->ports[i]; 1016 1017 if (txn.msg->num_bytes == 0) { 1018 int exp = 0, coef = 0; 1019 1020 wbuf[txn.msg->num_bytes++] = sdev->laddr; 1021 wbuf[txn.msg->num_bytes] = rt->bps >> 2 | 1022 (port->ch.aux_fmt << 6); 1023 1024 /* calculate coef dynamically */ 1025 coef = qcom_slim_calc_coef(rt, &exp); 1026 if (coef < 0) { 1027 dev_err(&sdev->dev, 1028 "%s: error calculating coef %d\n", __func__, 1029 coef); 1030 return -EIO; 1031 } 1032 1033 if (coef) 1034 wbuf[txn.msg->num_bytes] |= BIT(5); 1035 1036 txn.msg->num_bytes++; 1037 wbuf[txn.msg->num_bytes++] = exp << 4 | rt->prot; 1038 1039 if (rt->prot == SLIM_PROTO_ISO) 1040 wbuf[txn.msg->num_bytes++] = 1041 port->ch.prrate | 1042 SLIM_CHANNEL_CONTENT_FL; 1043 else 1044 wbuf[txn.msg->num_bytes++] = port->ch.prrate; 1045 1046 ret = slim_alloc_txn_tid(ctrl, &txn); 1047 if (ret) { 1048 dev_err(&sdev->dev, "Fail to allocate TID\n"); 1049 return -ENXIO; 1050 } 1051 wbuf[txn.msg->num_bytes++] = txn.tid; 1052 } 1053 wbuf[txn.msg->num_bytes++] = port->ch.id; 1054 } 1055 1056 txn.mc = SLIM_USR_MC_DEF_ACT_CHAN; 1057 txn.rl = txn.msg->num_bytes + 4; 1058 ret = qcom_slim_ngd_xfer_msg_sync(ctrl, &txn); 1059 if (ret) { 1060 slim_free_txn_tid(ctrl, &txn); 1061 dev_err(&sdev->dev, "TX timed out:MC:0x%x,mt:0x%x", txn.mc, 1062 txn.mt); 1063 return ret; 1064 } 1065 1066 txn.mc = SLIM_USR_MC_RECONFIG_NOW; 1067 txn.msg->num_bytes = 2; 1068 wbuf[1] = sdev->laddr; 1069 txn.rl = txn.msg->num_bytes + 4; 1070 1071 ret = slim_alloc_txn_tid(ctrl, &txn); 1072 if (ret) { 1073 dev_err(ctrl->dev, "Fail to allocate TID\n"); 1074 return ret; 1075 } 1076 1077 wbuf[0] = txn.tid; 1078 ret = qcom_slim_ngd_xfer_msg_sync(ctrl, &txn); 1079 if (ret) { 1080 slim_free_txn_tid(ctrl, &txn); 1081 dev_err(&sdev->dev, "TX timed out:MC:0x%x,mt:0x%x", txn.mc, 1082 txn.mt); 1083 } 1084 1085 return ret; 1086 } 1087 1088 static int qcom_slim_ngd_get_laddr(struct slim_controller *ctrl, 1089 struct slim_eaddr *ea, u8 *laddr) 1090 { 1091 struct slim_val_inf msg = {0}; 1092 u8 failed_ea[6] = {0, 0, 0, 0, 0, 0}; 1093 struct slim_msg_txn txn; 1094 u8 wbuf[10] = {0}; 1095 u8 rbuf[10] = {0}; 1096 int ret; 1097 1098 txn.mt = SLIM_MSG_MT_DEST_REFERRED_USER; 1099 txn.dt = SLIM_MSG_DEST_LOGICALADDR; 1100 txn.la = SLIM_LA_MGR; 1101 txn.ec = 0; 1102 1103 txn.mc = SLIM_USR_MC_ADDR_QUERY; 1104 txn.rl = 11; 1105 txn.msg = &msg; 1106 txn.msg->num_bytes = 7; 1107 txn.msg->wbuf = wbuf; 1108 txn.msg->rbuf = rbuf; 1109 1110 ret = slim_alloc_txn_tid(ctrl, &txn); 1111 if (ret < 0) 1112 return ret; 1113 1114 wbuf[0] = (u8)txn.tid; 1115 memcpy(&wbuf[1], ea, sizeof(*ea)); 1116 1117 ret = qcom_slim_ngd_xfer_msg_sync(ctrl, &txn); 1118 if (ret) { 1119 slim_free_txn_tid(ctrl, &txn); 1120 return ret; 1121 } 1122 1123 if (!memcmp(rbuf, failed_ea, 6)) 1124 return -ENXIO; 1125 1126 *laddr = rbuf[6]; 1127 1128 return ret; 1129 } 1130 1131 static int qcom_slim_ngd_exit_dma(struct qcom_slim_ngd_ctrl *ctrl) 1132 { 1133 if (ctrl->dma_rx_channel) { 1134 dmaengine_terminate_sync(ctrl->dma_rx_channel); 1135 dma_release_channel(ctrl->dma_rx_channel); 1136 } 1137 1138 if (ctrl->dma_tx_channel) { 1139 dmaengine_terminate_sync(ctrl->dma_tx_channel); 1140 dma_release_channel(ctrl->dma_tx_channel); 1141 } 1142 1143 ctrl->dma_tx_channel = ctrl->dma_rx_channel = NULL; 1144 1145 return 0; 1146 } 1147 1148 static void qcom_slim_ngd_setup(struct qcom_slim_ngd_ctrl *ctrl) 1149 { 1150 u32 cfg = readl_relaxed(ctrl->ngd->base); 1151 1152 if (ctrl->state == QCOM_SLIM_NGD_CTRL_DOWN || 1153 ctrl->state == QCOM_SLIM_NGD_CTRL_ASLEEP) 1154 qcom_slim_ngd_init_dma(ctrl); 1155 1156 /* By default enable message queues */ 1157 cfg |= NGD_CFG_RX_MSGQ_EN; 1158 cfg |= NGD_CFG_TX_MSGQ_EN; 1159 1160 /* Enable NGD if it's not already enabled*/ 1161 if (!(cfg & NGD_CFG_ENABLE)) 1162 cfg |= NGD_CFG_ENABLE; 1163 1164 writel_relaxed(cfg, ctrl->ngd->base); 1165 } 1166 1167 static int qcom_slim_ngd_power_up(struct qcom_slim_ngd_ctrl *ctrl) 1168 { 1169 enum qcom_slim_ngd_state cur_state = ctrl->state; 1170 struct qcom_slim_ngd *ngd = ctrl->ngd; 1171 u32 laddr, rx_msgq; 1172 int timeout, ret = 0; 1173 1174 if (ctrl->state == QCOM_SLIM_NGD_CTRL_DOWN) { 1175 timeout = wait_for_completion_timeout(&ctrl->qmi.qmi_comp, HZ); 1176 if (!timeout) 1177 return -EREMOTEIO; 1178 } 1179 1180 if (ctrl->state == QCOM_SLIM_NGD_CTRL_ASLEEP || 1181 ctrl->state == QCOM_SLIM_NGD_CTRL_DOWN) { 1182 ret = qcom_slim_qmi_power_request(ctrl, true); 1183 if (ret) { 1184 dev_err(ctrl->dev, "SLIM QMI power request failed:%d\n", 1185 ret); 1186 return ret; 1187 } 1188 } 1189 1190 ctrl->ver = readl_relaxed(ctrl->base); 1191 /* Version info in 16 MSbits */ 1192 ctrl->ver >>= 16; 1193 1194 laddr = readl_relaxed(ngd->base + NGD_STATUS); 1195 if (laddr & NGD_LADDR) { 1196 /* 1197 * external MDM restart case where ADSP itself was active framer 1198 * For example, modem restarted when playback was active 1199 */ 1200 if (cur_state == QCOM_SLIM_NGD_CTRL_AWAKE) { 1201 dev_info(ctrl->dev, "Subsys restart: ADSP active framer\n"); 1202 return 0; 1203 } 1204 qcom_slim_ngd_setup(ctrl); 1205 return 0; 1206 } 1207 1208 /* 1209 * Reinitialize only when registers are not retained or when enumeration 1210 * is lost for ngd. 1211 */ 1212 reinit_completion(&ctrl->reconf); 1213 1214 writel_relaxed(DEF_NGD_INT_MASK, ngd->base + NGD_INT_EN); 1215 rx_msgq = readl_relaxed(ngd->base + NGD_RX_MSGQ_CFG); 1216 1217 writel_relaxed(rx_msgq|SLIM_RX_MSGQ_TIMEOUT_VAL, 1218 ngd->base + NGD_RX_MSGQ_CFG); 1219 qcom_slim_ngd_setup(ctrl); 1220 1221 timeout = wait_for_completion_timeout(&ctrl->reconf, HZ); 1222 if (!timeout) { 1223 dev_err(ctrl->dev, "capability exchange timed-out\n"); 1224 return -ETIMEDOUT; 1225 } 1226 1227 return 0; 1228 } 1229 1230 static void qcom_slim_ngd_notify_slaves(struct qcom_slim_ngd_ctrl *ctrl) 1231 { 1232 struct slim_device *sbdev; 1233 struct device_node *node; 1234 1235 for_each_child_of_node(ctrl->ngd->pdev->dev.of_node, node) { 1236 sbdev = of_slim_get_device(&ctrl->ctrl, node); 1237 if (!sbdev) 1238 continue; 1239 1240 if (slim_get_logical_addr(sbdev)) 1241 dev_err(ctrl->dev, "Failed to get logical address\n"); 1242 } 1243 } 1244 1245 static void qcom_slim_ngd_master_worker(struct work_struct *work) 1246 { 1247 struct qcom_slim_ngd_ctrl *ctrl; 1248 struct slim_msg_txn txn; 1249 struct slim_val_inf msg = {0}; 1250 int retries = 0; 1251 u8 wbuf[8]; 1252 int ret = 0; 1253 1254 ctrl = container_of(work, struct qcom_slim_ngd_ctrl, m_work); 1255 txn.dt = SLIM_MSG_DEST_LOGICALADDR; 1256 txn.ec = 0; 1257 txn.mc = SLIM_USR_MC_REPORT_SATELLITE; 1258 txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER; 1259 txn.la = SLIM_LA_MGR; 1260 wbuf[0] = SAT_MAGIC_LSB; 1261 wbuf[1] = SAT_MAGIC_MSB; 1262 wbuf[2] = SAT_MSG_VER; 1263 wbuf[3] = SAT_MSG_PROT; 1264 txn.msg = &msg; 1265 txn.msg->wbuf = wbuf; 1266 txn.msg->num_bytes = 4; 1267 txn.rl = 8; 1268 1269 dev_info(ctrl->dev, "SLIM SAT: Rcvd master capability\n"); 1270 1271 capability_retry: 1272 ret = qcom_slim_ngd_xfer_msg(&ctrl->ctrl, &txn); 1273 if (!ret) { 1274 if (ctrl->state >= QCOM_SLIM_NGD_CTRL_ASLEEP) 1275 complete(&ctrl->reconf); 1276 else 1277 dev_err(ctrl->dev, "unexpected state:%d\n", 1278 ctrl->state); 1279 1280 if (ctrl->state == QCOM_SLIM_NGD_CTRL_DOWN) 1281 qcom_slim_ngd_notify_slaves(ctrl); 1282 1283 } else if (ret == -EIO) { 1284 dev_err(ctrl->dev, "capability message NACKed, retrying\n"); 1285 if (retries < INIT_MX_RETRIES) { 1286 msleep(DEF_RETRY_MS); 1287 retries++; 1288 goto capability_retry; 1289 } 1290 } else { 1291 dev_err(ctrl->dev, "SLIM: capability TX failed:%d\n", ret); 1292 } 1293 } 1294 1295 static int qcom_slim_ngd_update_device_status(struct device *dev, void *null) 1296 { 1297 slim_report_absent(to_slim_device(dev)); 1298 1299 return 0; 1300 } 1301 1302 static int qcom_slim_ngd_runtime_resume(struct device *dev) 1303 { 1304 struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev); 1305 int ret = 0; 1306 1307 if (!ctrl->qmi.handle) 1308 return 0; 1309 1310 if (ctrl->state >= QCOM_SLIM_NGD_CTRL_ASLEEP) 1311 ret = qcom_slim_ngd_power_up(ctrl); 1312 if (ret) { 1313 /* Did SSR cause this power up failure */ 1314 if (ctrl->state != QCOM_SLIM_NGD_CTRL_DOWN) 1315 ctrl->state = QCOM_SLIM_NGD_CTRL_ASLEEP; 1316 else 1317 dev_err(ctrl->dev, "HW wakeup attempt during SSR\n"); 1318 } else { 1319 ctrl->state = QCOM_SLIM_NGD_CTRL_AWAKE; 1320 } 1321 1322 return 0; 1323 } 1324 1325 static int qcom_slim_ngd_enable(struct qcom_slim_ngd_ctrl *ctrl, bool enable) 1326 { 1327 if (enable) { 1328 int ret = qcom_slim_qmi_init(ctrl, false); 1329 1330 if (ret) { 1331 dev_err(ctrl->dev, "qmi init fail, ret:%d, state:%d\n", 1332 ret, ctrl->state); 1333 return ret; 1334 } 1335 /* controller state should be in sync with framework state */ 1336 complete(&ctrl->qmi.qmi_comp); 1337 if (!pm_runtime_enabled(ctrl->ctrl.dev) || 1338 !pm_runtime_suspended(ctrl->ctrl.dev)) 1339 qcom_slim_ngd_runtime_resume(ctrl->ctrl.dev); 1340 else 1341 pm_runtime_resume(ctrl->ctrl.dev); 1342 1343 pm_runtime_mark_last_busy(ctrl->ctrl.dev); 1344 pm_runtime_put(ctrl->ctrl.dev); 1345 1346 ret = slim_register_controller(&ctrl->ctrl); 1347 if (ret) { 1348 dev_err(ctrl->dev, "error adding slim controller\n"); 1349 return ret; 1350 } 1351 1352 dev_info(ctrl->dev, "SLIM controller Registered\n"); 1353 } else { 1354 qcom_slim_qmi_exit(ctrl); 1355 slim_unregister_controller(&ctrl->ctrl); 1356 } 1357 1358 return 0; 1359 } 1360 1361 static int qcom_slim_ngd_qmi_new_server(struct qmi_handle *hdl, 1362 struct qmi_service *service) 1363 { 1364 struct qcom_slim_ngd_qmi *qmi = 1365 container_of(hdl, struct qcom_slim_ngd_qmi, svc_event_hdl); 1366 struct qcom_slim_ngd_ctrl *ctrl = 1367 container_of(qmi, struct qcom_slim_ngd_ctrl, qmi); 1368 1369 qmi->svc_info.sq_family = AF_QIPCRTR; 1370 qmi->svc_info.sq_node = service->node; 1371 qmi->svc_info.sq_port = service->port; 1372 1373 complete(&ctrl->qmi_up); 1374 1375 return 0; 1376 } 1377 1378 static void qcom_slim_ngd_qmi_del_server(struct qmi_handle *hdl, 1379 struct qmi_service *service) 1380 { 1381 struct qcom_slim_ngd_qmi *qmi = 1382 container_of(hdl, struct qcom_slim_ngd_qmi, svc_event_hdl); 1383 struct qcom_slim_ngd_ctrl *ctrl = 1384 container_of(qmi, struct qcom_slim_ngd_ctrl, qmi); 1385 1386 reinit_completion(&ctrl->qmi_up); 1387 qmi->svc_info.sq_node = 0; 1388 qmi->svc_info.sq_port = 0; 1389 } 1390 1391 static const struct qmi_ops qcom_slim_ngd_qmi_svc_event_ops = { 1392 .new_server = qcom_slim_ngd_qmi_new_server, 1393 .del_server = qcom_slim_ngd_qmi_del_server, 1394 }; 1395 1396 static int qcom_slim_ngd_qmi_svc_event_init(struct qcom_slim_ngd_ctrl *ctrl) 1397 { 1398 struct qcom_slim_ngd_qmi *qmi = &ctrl->qmi; 1399 int ret; 1400 1401 ret = qmi_handle_init(&qmi->svc_event_hdl, 0, 1402 &qcom_slim_ngd_qmi_svc_event_ops, NULL); 1403 if (ret < 0) { 1404 dev_err(ctrl->dev, "qmi_handle_init failed: %d\n", ret); 1405 return ret; 1406 } 1407 1408 ret = qmi_add_lookup(&qmi->svc_event_hdl, SLIMBUS_QMI_SVC_ID, 1409 SLIMBUS_QMI_SVC_V1, SLIMBUS_QMI_INS_ID); 1410 if (ret < 0) { 1411 dev_err(ctrl->dev, "qmi_add_lookup failed: %d\n", ret); 1412 qmi_handle_release(&qmi->svc_event_hdl); 1413 } 1414 return ret; 1415 } 1416 1417 static void qcom_slim_ngd_qmi_svc_event_deinit(struct qcom_slim_ngd_qmi *qmi) 1418 { 1419 qmi_handle_release(&qmi->svc_event_hdl); 1420 } 1421 1422 static struct platform_driver qcom_slim_ngd_driver; 1423 #define QCOM_SLIM_NGD_DRV_NAME "qcom,slim-ngd" 1424 1425 static const struct of_device_id qcom_slim_ngd_dt_match[] = { 1426 { 1427 .compatible = "qcom,slim-ngd-v1.5.0", 1428 .data = &ngd_v1_5_offset_info, 1429 },{ 1430 .compatible = "qcom,slim-ngd-v2.1.0", 1431 .data = &ngd_v1_5_offset_info, 1432 }, 1433 {} 1434 }; 1435 1436 MODULE_DEVICE_TABLE(of, qcom_slim_ngd_dt_match); 1437 1438 static void qcom_slim_ngd_down(struct qcom_slim_ngd_ctrl *ctrl) 1439 { 1440 mutex_lock(&ctrl->ssr_lock); 1441 device_for_each_child(ctrl->ctrl.dev, NULL, 1442 qcom_slim_ngd_update_device_status); 1443 qcom_slim_ngd_enable(ctrl, false); 1444 mutex_unlock(&ctrl->ssr_lock); 1445 } 1446 1447 static void qcom_slim_ngd_up_worker(struct work_struct *work) 1448 { 1449 struct qcom_slim_ngd_ctrl *ctrl; 1450 1451 ctrl = container_of(work, struct qcom_slim_ngd_ctrl, ngd_up_work); 1452 1453 /* Make sure qmi service is up before continuing */ 1454 if (!wait_for_completion_interruptible_timeout(&ctrl->qmi_up, 1455 msecs_to_jiffies(MSEC_PER_SEC))) { 1456 dev_err(ctrl->dev, "QMI wait timeout\n"); 1457 return; 1458 } 1459 1460 mutex_lock(&ctrl->ssr_lock); 1461 qcom_slim_ngd_enable(ctrl, true); 1462 mutex_unlock(&ctrl->ssr_lock); 1463 } 1464 1465 static int qcom_slim_ngd_ssr_pdr_notify(struct qcom_slim_ngd_ctrl *ctrl, 1466 unsigned long action) 1467 { 1468 switch (action) { 1469 case QCOM_SSR_BEFORE_SHUTDOWN: 1470 case SERVREG_SERVICE_STATE_DOWN: 1471 /* Make sure the last dma xfer is finished */ 1472 mutex_lock(&ctrl->tx_lock); 1473 if (ctrl->state != QCOM_SLIM_NGD_CTRL_DOWN) { 1474 pm_runtime_get_noresume(ctrl->ctrl.dev); 1475 ctrl->state = QCOM_SLIM_NGD_CTRL_DOWN; 1476 qcom_slim_ngd_down(ctrl); 1477 qcom_slim_ngd_exit_dma(ctrl); 1478 } 1479 mutex_unlock(&ctrl->tx_lock); 1480 break; 1481 case QCOM_SSR_AFTER_POWERUP: 1482 case SERVREG_SERVICE_STATE_UP: 1483 schedule_work(&ctrl->ngd_up_work); 1484 break; 1485 default: 1486 break; 1487 } 1488 1489 return NOTIFY_OK; 1490 } 1491 1492 static int qcom_slim_ngd_ssr_notify(struct notifier_block *nb, 1493 unsigned long action, 1494 void *data) 1495 { 1496 struct qcom_slim_ngd_ctrl *ctrl = container_of(nb, 1497 struct qcom_slim_ngd_ctrl, nb); 1498 1499 return qcom_slim_ngd_ssr_pdr_notify(ctrl, action); 1500 } 1501 1502 static void slim_pd_status(int state, char *svc_path, void *priv) 1503 { 1504 struct qcom_slim_ngd_ctrl *ctrl = (struct qcom_slim_ngd_ctrl *)priv; 1505 1506 qcom_slim_ngd_ssr_pdr_notify(ctrl, state); 1507 } 1508 static int of_qcom_slim_ngd_register(struct device *parent, 1509 struct qcom_slim_ngd_ctrl *ctrl) 1510 { 1511 const struct ngd_reg_offset_data *data; 1512 struct qcom_slim_ngd *ngd; 1513 const struct of_device_id *match; 1514 struct device_node *node; 1515 u32 id; 1516 int ret; 1517 1518 match = of_match_node(qcom_slim_ngd_dt_match, parent->of_node); 1519 data = match->data; 1520 for_each_available_child_of_node(parent->of_node, node) { 1521 if (of_property_read_u32(node, "reg", &id)) 1522 continue; 1523 1524 ngd = kzalloc(sizeof(*ngd), GFP_KERNEL); 1525 if (!ngd) { 1526 of_node_put(node); 1527 return -ENOMEM; 1528 } 1529 1530 ngd->pdev = platform_device_alloc(QCOM_SLIM_NGD_DRV_NAME, id); 1531 if (!ngd->pdev) { 1532 kfree(ngd); 1533 of_node_put(node); 1534 return -ENOMEM; 1535 } 1536 ngd->id = id; 1537 ngd->pdev->dev.parent = parent; 1538 1539 ret = driver_set_override(&ngd->pdev->dev, 1540 &ngd->pdev->driver_override, 1541 QCOM_SLIM_NGD_DRV_NAME, 1542 strlen(QCOM_SLIM_NGD_DRV_NAME)); 1543 if (ret) { 1544 platform_device_put(ngd->pdev); 1545 kfree(ngd); 1546 of_node_put(node); 1547 return ret; 1548 } 1549 ngd->pdev->dev.of_node = node; 1550 ctrl->ngd = ngd; 1551 1552 ret = platform_device_add(ngd->pdev); 1553 if (ret) { 1554 platform_device_put(ngd->pdev); 1555 kfree(ngd); 1556 of_node_put(node); 1557 return ret; 1558 } 1559 ngd->base = ctrl->base + ngd->id * data->offset + 1560 (ngd->id - 1) * data->size; 1561 1562 return 0; 1563 } 1564 1565 return -ENODEV; 1566 } 1567 1568 static int qcom_slim_ngd_probe(struct platform_device *pdev) 1569 { 1570 struct device *dev = &pdev->dev; 1571 struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev->parent); 1572 int ret; 1573 1574 ctrl->ctrl.dev = dev; 1575 1576 platform_set_drvdata(pdev, ctrl); 1577 pm_runtime_use_autosuspend(dev); 1578 pm_runtime_set_autosuspend_delay(dev, QCOM_SLIM_NGD_AUTOSUSPEND); 1579 pm_runtime_set_suspended(dev); 1580 pm_runtime_enable(dev); 1581 pm_runtime_get_noresume(dev); 1582 ret = qcom_slim_ngd_qmi_svc_event_init(ctrl); 1583 if (ret) { 1584 dev_err(&pdev->dev, "QMI service registration failed:%d", ret); 1585 return ret; 1586 } 1587 1588 INIT_WORK(&ctrl->m_work, qcom_slim_ngd_master_worker); 1589 INIT_WORK(&ctrl->ngd_up_work, qcom_slim_ngd_up_worker); 1590 ctrl->mwq = create_singlethread_workqueue("ngd_master"); 1591 if (!ctrl->mwq) { 1592 dev_err(&pdev->dev, "Failed to start master worker\n"); 1593 ret = -ENOMEM; 1594 goto wq_err; 1595 } 1596 1597 return 0; 1598 wq_err: 1599 qcom_slim_ngd_qmi_svc_event_deinit(&ctrl->qmi); 1600 if (ctrl->mwq) 1601 destroy_workqueue(ctrl->mwq); 1602 1603 return ret; 1604 } 1605 1606 static int qcom_slim_ngd_ctrl_probe(struct platform_device *pdev) 1607 { 1608 struct device *dev = &pdev->dev; 1609 struct qcom_slim_ngd_ctrl *ctrl; 1610 int ret; 1611 struct pdr_service *pds; 1612 1613 ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL); 1614 if (!ctrl) 1615 return -ENOMEM; 1616 1617 dev_set_drvdata(dev, ctrl); 1618 1619 ctrl->base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); 1620 if (IS_ERR(ctrl->base)) 1621 return PTR_ERR(ctrl->base); 1622 1623 ret = platform_get_irq(pdev, 0); 1624 if (ret < 0) 1625 return ret; 1626 1627 ret = devm_request_irq(dev, ret, qcom_slim_ngd_interrupt, 1628 IRQF_TRIGGER_HIGH, "slim-ngd", ctrl); 1629 if (ret) 1630 return dev_err_probe(&pdev->dev, ret, "request IRQ failed\n"); 1631 1632 ctrl->nb.notifier_call = qcom_slim_ngd_ssr_notify; 1633 ctrl->notifier = qcom_register_ssr_notifier("lpass", &ctrl->nb); 1634 if (IS_ERR(ctrl->notifier)) 1635 return PTR_ERR(ctrl->notifier); 1636 1637 ctrl->dev = dev; 1638 ctrl->framer.rootfreq = SLIM_ROOT_FREQ >> 3; 1639 ctrl->framer.superfreq = 1640 ctrl->framer.rootfreq / SLIM_CL_PER_SUPERFRAME_DIV8; 1641 1642 ctrl->ctrl.a_framer = &ctrl->framer; 1643 ctrl->ctrl.clkgear = SLIM_MAX_CLK_GEAR; 1644 ctrl->ctrl.get_laddr = qcom_slim_ngd_get_laddr; 1645 ctrl->ctrl.enable_stream = qcom_slim_ngd_enable_stream; 1646 ctrl->ctrl.xfer_msg = qcom_slim_ngd_xfer_msg; 1647 ctrl->ctrl.wakeup = NULL; 1648 ctrl->state = QCOM_SLIM_NGD_CTRL_DOWN; 1649 1650 mutex_init(&ctrl->tx_lock); 1651 mutex_init(&ctrl->ssr_lock); 1652 spin_lock_init(&ctrl->tx_buf_lock); 1653 init_completion(&ctrl->reconf); 1654 init_completion(&ctrl->qmi.qmi_comp); 1655 init_completion(&ctrl->qmi_up); 1656 1657 ctrl->pdr = pdr_handle_alloc(slim_pd_status, ctrl); 1658 if (IS_ERR(ctrl->pdr)) { 1659 ret = dev_err_probe(dev, PTR_ERR(ctrl->pdr), 1660 "Failed to init PDR handle\n"); 1661 goto err_pdr_alloc; 1662 } 1663 1664 pds = pdr_add_lookup(ctrl->pdr, "avs/audio", "msm/adsp/audio_pd"); 1665 if (IS_ERR(pds) && PTR_ERR(pds) != -EALREADY) { 1666 ret = dev_err_probe(dev, PTR_ERR(pds), "pdr add lookup failed\n"); 1667 goto err_pdr_lookup; 1668 } 1669 1670 platform_driver_register(&qcom_slim_ngd_driver); 1671 return of_qcom_slim_ngd_register(dev, ctrl); 1672 1673 err_pdr_alloc: 1674 qcom_unregister_ssr_notifier(ctrl->notifier, &ctrl->nb); 1675 1676 err_pdr_lookup: 1677 pdr_handle_release(ctrl->pdr); 1678 1679 return ret; 1680 } 1681 1682 static int qcom_slim_ngd_ctrl_remove(struct platform_device *pdev) 1683 { 1684 platform_driver_unregister(&qcom_slim_ngd_driver); 1685 1686 return 0; 1687 } 1688 1689 static int qcom_slim_ngd_remove(struct platform_device *pdev) 1690 { 1691 struct qcom_slim_ngd_ctrl *ctrl = platform_get_drvdata(pdev); 1692 1693 pm_runtime_disable(&pdev->dev); 1694 pdr_handle_release(ctrl->pdr); 1695 qcom_unregister_ssr_notifier(ctrl->notifier, &ctrl->nb); 1696 qcom_slim_ngd_enable(ctrl, false); 1697 qcom_slim_ngd_exit_dma(ctrl); 1698 qcom_slim_ngd_qmi_svc_event_deinit(&ctrl->qmi); 1699 if (ctrl->mwq) 1700 destroy_workqueue(ctrl->mwq); 1701 1702 kfree(ctrl->ngd); 1703 ctrl->ngd = NULL; 1704 return 0; 1705 } 1706 1707 static int __maybe_unused qcom_slim_ngd_runtime_idle(struct device *dev) 1708 { 1709 struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev); 1710 1711 if (ctrl->state == QCOM_SLIM_NGD_CTRL_AWAKE) 1712 ctrl->state = QCOM_SLIM_NGD_CTRL_IDLE; 1713 pm_request_autosuspend(dev); 1714 return -EAGAIN; 1715 } 1716 1717 static int __maybe_unused qcom_slim_ngd_runtime_suspend(struct device *dev) 1718 { 1719 struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev); 1720 int ret = 0; 1721 1722 qcom_slim_ngd_exit_dma(ctrl); 1723 if (!ctrl->qmi.handle) 1724 return 0; 1725 1726 ret = qcom_slim_qmi_power_request(ctrl, false); 1727 if (ret && ret != -EBUSY) 1728 dev_info(ctrl->dev, "slim resource not idle:%d\n", ret); 1729 if (!ret || ret == -ETIMEDOUT) 1730 ctrl->state = QCOM_SLIM_NGD_CTRL_ASLEEP; 1731 1732 return ret; 1733 } 1734 1735 static const struct dev_pm_ops qcom_slim_ngd_dev_pm_ops = { 1736 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, 1737 pm_runtime_force_resume) 1738 SET_RUNTIME_PM_OPS( 1739 qcom_slim_ngd_runtime_suspend, 1740 qcom_slim_ngd_runtime_resume, 1741 qcom_slim_ngd_runtime_idle 1742 ) 1743 }; 1744 1745 static struct platform_driver qcom_slim_ngd_ctrl_driver = { 1746 .probe = qcom_slim_ngd_ctrl_probe, 1747 .remove = qcom_slim_ngd_ctrl_remove, 1748 .driver = { 1749 .name = "qcom,slim-ngd-ctrl", 1750 .of_match_table = qcom_slim_ngd_dt_match, 1751 }, 1752 }; 1753 1754 static struct platform_driver qcom_slim_ngd_driver = { 1755 .probe = qcom_slim_ngd_probe, 1756 .remove = qcom_slim_ngd_remove, 1757 .driver = { 1758 .name = QCOM_SLIM_NGD_DRV_NAME, 1759 .pm = &qcom_slim_ngd_dev_pm_ops, 1760 }, 1761 }; 1762 1763 module_platform_driver(qcom_slim_ngd_ctrl_driver); 1764 MODULE_LICENSE("GPL v2"); 1765 MODULE_DESCRIPTION("Qualcomm SLIMBus NGD controller"); 1766