1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2019, Linaro Ltd 4 */ 5 #include <dt-bindings/power/qcom-aoss-qmp.h> 6 #include <linux/clk-provider.h> 7 #include <linux/interrupt.h> 8 #include <linux/io.h> 9 #include <linux/mailbox_client.h> 10 #include <linux/module.h> 11 #include <linux/platform_device.h> 12 #include <linux/pm_domain.h> 13 #include <linux/thermal.h> 14 #include <linux/slab.h> 15 16 #define QMP_DESC_MAGIC 0x0 17 #define QMP_DESC_VERSION 0x4 18 #define QMP_DESC_FEATURES 0x8 19 20 /* AOP-side offsets */ 21 #define QMP_DESC_UCORE_LINK_STATE 0xc 22 #define QMP_DESC_UCORE_LINK_STATE_ACK 0x10 23 #define QMP_DESC_UCORE_CH_STATE 0x14 24 #define QMP_DESC_UCORE_CH_STATE_ACK 0x18 25 #define QMP_DESC_UCORE_MBOX_SIZE 0x1c 26 #define QMP_DESC_UCORE_MBOX_OFFSET 0x20 27 28 /* Linux-side offsets */ 29 #define QMP_DESC_MCORE_LINK_STATE 0x24 30 #define QMP_DESC_MCORE_LINK_STATE_ACK 0x28 31 #define QMP_DESC_MCORE_CH_STATE 0x2c 32 #define QMP_DESC_MCORE_CH_STATE_ACK 0x30 33 #define QMP_DESC_MCORE_MBOX_SIZE 0x34 34 #define QMP_DESC_MCORE_MBOX_OFFSET 0x38 35 36 #define QMP_STATE_UP GENMASK(15, 0) 37 #define QMP_STATE_DOWN GENMASK(31, 16) 38 39 #define QMP_MAGIC 0x4d41494c /* mail */ 40 #define QMP_VERSION 1 41 42 /* 64 bytes is enough to store the requests and provides padding to 4 bytes */ 43 #define QMP_MSG_LEN 64 44 45 #define QMP_NUM_COOLING_RESOURCES 2 46 47 static bool qmp_cdev_init_state = 1; 48 49 struct qmp_cooling_device { 50 struct thermal_cooling_device *cdev; 51 struct qmp *qmp; 52 char *name; 53 bool state; 54 }; 55 56 /** 57 * struct qmp - driver state for QMP implementation 58 * @msgram: iomem referencing the message RAM used for communication 59 * @dev: reference to QMP device 60 * @mbox_client: mailbox client used to ring the doorbell on transmit 61 * @mbox_chan: mailbox channel used to ring the doorbell on transmit 62 * @offset: offset within @msgram where messages should be written 63 * @size: maximum size of the messages to be transmitted 64 * @event: wait_queue for synchronization with the IRQ 65 * @tx_lock: provides synchronization between multiple callers of qmp_send() 66 * @qdss_clk: QDSS clock hw struct 67 * @pd_data: genpd data 68 */ 69 struct qmp { 70 void __iomem *msgram; 71 struct device *dev; 72 73 struct mbox_client mbox_client; 74 struct mbox_chan *mbox_chan; 75 76 size_t offset; 77 size_t size; 78 79 wait_queue_head_t event; 80 81 struct mutex tx_lock; 82 83 struct clk_hw qdss_clk; 84 struct genpd_onecell_data pd_data; 85 struct qmp_cooling_device *cooling_devs; 86 }; 87 88 struct qmp_pd { 89 struct qmp *qmp; 90 struct generic_pm_domain pd; 91 }; 92 93 #define to_qmp_pd_resource(res) container_of(res, struct qmp_pd, pd) 94 95 static void qmp_kick(struct qmp *qmp) 96 { 97 mbox_send_message(qmp->mbox_chan, NULL); 98 mbox_client_txdone(qmp->mbox_chan, 0); 99 } 100 101 static bool qmp_magic_valid(struct qmp *qmp) 102 { 103 return readl(qmp->msgram + QMP_DESC_MAGIC) == QMP_MAGIC; 104 } 105 106 static bool qmp_link_acked(struct qmp *qmp) 107 { 108 return readl(qmp->msgram + QMP_DESC_MCORE_LINK_STATE_ACK) == QMP_STATE_UP; 109 } 110 111 static bool qmp_mcore_channel_acked(struct qmp *qmp) 112 { 113 return readl(qmp->msgram + QMP_DESC_MCORE_CH_STATE_ACK) == QMP_STATE_UP; 114 } 115 116 static bool qmp_ucore_channel_up(struct qmp *qmp) 117 { 118 return readl(qmp->msgram + QMP_DESC_UCORE_CH_STATE) == QMP_STATE_UP; 119 } 120 121 static int qmp_open(struct qmp *qmp) 122 { 123 int ret; 124 u32 val; 125 126 if (!qmp_magic_valid(qmp)) { 127 dev_err(qmp->dev, "QMP magic doesn't match\n"); 128 return -EINVAL; 129 } 130 131 val = readl(qmp->msgram + QMP_DESC_VERSION); 132 if (val != QMP_VERSION) { 133 dev_err(qmp->dev, "unsupported QMP version %d\n", val); 134 return -EINVAL; 135 } 136 137 qmp->offset = readl(qmp->msgram + QMP_DESC_MCORE_MBOX_OFFSET); 138 qmp->size = readl(qmp->msgram + QMP_DESC_MCORE_MBOX_SIZE); 139 if (!qmp->size) { 140 dev_err(qmp->dev, "invalid mailbox size\n"); 141 return -EINVAL; 142 } 143 144 /* Ack remote core's link state */ 145 val = readl(qmp->msgram + QMP_DESC_UCORE_LINK_STATE); 146 writel(val, qmp->msgram + QMP_DESC_UCORE_LINK_STATE_ACK); 147 148 /* Set local core's link state to up */ 149 writel(QMP_STATE_UP, qmp->msgram + QMP_DESC_MCORE_LINK_STATE); 150 151 qmp_kick(qmp); 152 153 ret = wait_event_timeout(qmp->event, qmp_link_acked(qmp), HZ); 154 if (!ret) { 155 dev_err(qmp->dev, "ucore didn't ack link\n"); 156 goto timeout_close_link; 157 } 158 159 writel(QMP_STATE_UP, qmp->msgram + QMP_DESC_MCORE_CH_STATE); 160 161 qmp_kick(qmp); 162 163 ret = wait_event_timeout(qmp->event, qmp_ucore_channel_up(qmp), HZ); 164 if (!ret) { 165 dev_err(qmp->dev, "ucore didn't open channel\n"); 166 goto timeout_close_channel; 167 } 168 169 /* Ack remote core's channel state */ 170 writel(QMP_STATE_UP, qmp->msgram + QMP_DESC_UCORE_CH_STATE_ACK); 171 172 qmp_kick(qmp); 173 174 ret = wait_event_timeout(qmp->event, qmp_mcore_channel_acked(qmp), HZ); 175 if (!ret) { 176 dev_err(qmp->dev, "ucore didn't ack channel\n"); 177 goto timeout_close_channel; 178 } 179 180 return 0; 181 182 timeout_close_channel: 183 writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_CH_STATE); 184 185 timeout_close_link: 186 writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_LINK_STATE); 187 qmp_kick(qmp); 188 189 return -ETIMEDOUT; 190 } 191 192 static void qmp_close(struct qmp *qmp) 193 { 194 writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_CH_STATE); 195 writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_LINK_STATE); 196 qmp_kick(qmp); 197 } 198 199 static irqreturn_t qmp_intr(int irq, void *data) 200 { 201 struct qmp *qmp = data; 202 203 wake_up_interruptible_all(&qmp->event); 204 205 return IRQ_HANDLED; 206 } 207 208 static bool qmp_message_empty(struct qmp *qmp) 209 { 210 return readl(qmp->msgram + qmp->offset) == 0; 211 } 212 213 /** 214 * qmp_send() - send a message to the AOSS 215 * @qmp: qmp context 216 * @data: message to be sent 217 * @len: length of the message 218 * 219 * Transmit @data to AOSS and wait for the AOSS to acknowledge the message. 220 * @len must be a multiple of 4 and not longer than the mailbox size. Access is 221 * synchronized by this implementation. 222 * 223 * Return: 0 on success, negative errno on failure 224 */ 225 static int qmp_send(struct qmp *qmp, const void *data, size_t len) 226 { 227 long time_left; 228 int ret; 229 230 if (WARN_ON(len + sizeof(u32) > qmp->size)) 231 return -EINVAL; 232 233 if (WARN_ON(len % sizeof(u32))) 234 return -EINVAL; 235 236 mutex_lock(&qmp->tx_lock); 237 238 /* The message RAM only implements 32-bit accesses */ 239 __iowrite32_copy(qmp->msgram + qmp->offset + sizeof(u32), 240 data, len / sizeof(u32)); 241 writel(len, qmp->msgram + qmp->offset); 242 qmp_kick(qmp); 243 244 time_left = wait_event_interruptible_timeout(qmp->event, 245 qmp_message_empty(qmp), HZ); 246 if (!time_left) { 247 dev_err(qmp->dev, "ucore did not ack channel\n"); 248 ret = -ETIMEDOUT; 249 250 /* Clear message from buffer */ 251 writel(0, qmp->msgram + qmp->offset); 252 } else { 253 ret = 0; 254 } 255 256 mutex_unlock(&qmp->tx_lock); 257 258 return ret; 259 } 260 261 static int qmp_qdss_clk_prepare(struct clk_hw *hw) 262 { 263 static const char buf[QMP_MSG_LEN] = "{class: clock, res: qdss, val: 1}"; 264 struct qmp *qmp = container_of(hw, struct qmp, qdss_clk); 265 266 return qmp_send(qmp, buf, sizeof(buf)); 267 } 268 269 static void qmp_qdss_clk_unprepare(struct clk_hw *hw) 270 { 271 static const char buf[QMP_MSG_LEN] = "{class: clock, res: qdss, val: 0}"; 272 struct qmp *qmp = container_of(hw, struct qmp, qdss_clk); 273 274 qmp_send(qmp, buf, sizeof(buf)); 275 } 276 277 static const struct clk_ops qmp_qdss_clk_ops = { 278 .prepare = qmp_qdss_clk_prepare, 279 .unprepare = qmp_qdss_clk_unprepare, 280 }; 281 282 static int qmp_qdss_clk_add(struct qmp *qmp) 283 { 284 static const struct clk_init_data qdss_init = { 285 .ops = &qmp_qdss_clk_ops, 286 .name = "qdss", 287 }; 288 int ret; 289 290 qmp->qdss_clk.init = &qdss_init; 291 ret = clk_hw_register(qmp->dev, &qmp->qdss_clk); 292 if (ret < 0) { 293 dev_err(qmp->dev, "failed to register qdss clock\n"); 294 return ret; 295 } 296 297 ret = of_clk_add_hw_provider(qmp->dev->of_node, of_clk_hw_simple_get, 298 &qmp->qdss_clk); 299 if (ret < 0) { 300 dev_err(qmp->dev, "unable to register of clk hw provider\n"); 301 clk_hw_unregister(&qmp->qdss_clk); 302 } 303 304 return ret; 305 } 306 307 static void qmp_qdss_clk_remove(struct qmp *qmp) 308 { 309 of_clk_del_provider(qmp->dev->of_node); 310 clk_hw_unregister(&qmp->qdss_clk); 311 } 312 313 static int qmp_pd_power_toggle(struct qmp_pd *res, bool enable) 314 { 315 char buf[QMP_MSG_LEN] = {}; 316 317 snprintf(buf, sizeof(buf), 318 "{class: image, res: load_state, name: %s, val: %s}", 319 res->pd.name, enable ? "on" : "off"); 320 return qmp_send(res->qmp, buf, sizeof(buf)); 321 } 322 323 static int qmp_pd_power_on(struct generic_pm_domain *domain) 324 { 325 return qmp_pd_power_toggle(to_qmp_pd_resource(domain), true); 326 } 327 328 static int qmp_pd_power_off(struct generic_pm_domain *domain) 329 { 330 return qmp_pd_power_toggle(to_qmp_pd_resource(domain), false); 331 } 332 333 static const char * const sdm845_resources[] = { 334 [AOSS_QMP_LS_CDSP] = "cdsp", 335 [AOSS_QMP_LS_LPASS] = "adsp", 336 [AOSS_QMP_LS_MODEM] = "modem", 337 [AOSS_QMP_LS_SLPI] = "slpi", 338 [AOSS_QMP_LS_SPSS] = "spss", 339 [AOSS_QMP_LS_VENUS] = "venus", 340 }; 341 342 static int qmp_pd_add(struct qmp *qmp) 343 { 344 struct genpd_onecell_data *data = &qmp->pd_data; 345 struct device *dev = qmp->dev; 346 struct qmp_pd *res; 347 size_t num = ARRAY_SIZE(sdm845_resources); 348 int ret; 349 int i; 350 351 res = devm_kcalloc(dev, num, sizeof(*res), GFP_KERNEL); 352 if (!res) 353 return -ENOMEM; 354 355 data->domains = devm_kcalloc(dev, num, sizeof(*data->domains), 356 GFP_KERNEL); 357 if (!data->domains) 358 return -ENOMEM; 359 360 for (i = 0; i < num; i++) { 361 res[i].qmp = qmp; 362 res[i].pd.name = sdm845_resources[i]; 363 res[i].pd.power_on = qmp_pd_power_on; 364 res[i].pd.power_off = qmp_pd_power_off; 365 366 ret = pm_genpd_init(&res[i].pd, NULL, true); 367 if (ret < 0) { 368 dev_err(dev, "failed to init genpd\n"); 369 goto unroll_genpds; 370 } 371 372 data->domains[i] = &res[i].pd; 373 } 374 375 data->num_domains = i; 376 377 ret = of_genpd_add_provider_onecell(dev->of_node, data); 378 if (ret < 0) 379 goto unroll_genpds; 380 381 return 0; 382 383 unroll_genpds: 384 for (i--; i >= 0; i--) 385 pm_genpd_remove(data->domains[i]); 386 387 return ret; 388 } 389 390 static void qmp_pd_remove(struct qmp *qmp) 391 { 392 struct genpd_onecell_data *data = &qmp->pd_data; 393 struct device *dev = qmp->dev; 394 int i; 395 396 of_genpd_del_provider(dev->of_node); 397 398 for (i = 0; i < data->num_domains; i++) 399 pm_genpd_remove(data->domains[i]); 400 } 401 402 static int qmp_cdev_get_max_state(struct thermal_cooling_device *cdev, 403 unsigned long *state) 404 { 405 *state = qmp_cdev_init_state; 406 return 0; 407 } 408 409 static int qmp_cdev_get_cur_state(struct thermal_cooling_device *cdev, 410 unsigned long *state) 411 { 412 struct qmp_cooling_device *qmp_cdev = cdev->devdata; 413 414 *state = qmp_cdev->state; 415 return 0; 416 } 417 418 static int qmp_cdev_set_cur_state(struct thermal_cooling_device *cdev, 419 unsigned long state) 420 { 421 struct qmp_cooling_device *qmp_cdev = cdev->devdata; 422 char buf[QMP_MSG_LEN] = {}; 423 bool cdev_state; 424 int ret; 425 426 /* Normalize state */ 427 cdev_state = !!state; 428 429 if (qmp_cdev->state == state) 430 return 0; 431 432 snprintf(buf, sizeof(buf), 433 "{class: volt_flr, event:zero_temp, res:%s, value:%s}", 434 qmp_cdev->name, 435 cdev_state ? "off" : "on"); 436 437 ret = qmp_send(qmp_cdev->qmp, buf, sizeof(buf)); 438 439 if (!ret) 440 qmp_cdev->state = cdev_state; 441 442 return ret; 443 } 444 445 static struct thermal_cooling_device_ops qmp_cooling_device_ops = { 446 .get_max_state = qmp_cdev_get_max_state, 447 .get_cur_state = qmp_cdev_get_cur_state, 448 .set_cur_state = qmp_cdev_set_cur_state, 449 }; 450 451 static int qmp_cooling_device_add(struct qmp *qmp, 452 struct qmp_cooling_device *qmp_cdev, 453 struct device_node *node) 454 { 455 char *cdev_name = (char *)node->name; 456 457 qmp_cdev->qmp = qmp; 458 qmp_cdev->state = qmp_cdev_init_state; 459 qmp_cdev->name = cdev_name; 460 qmp_cdev->cdev = devm_thermal_of_cooling_device_register 461 (qmp->dev, node, 462 cdev_name, 463 qmp_cdev, &qmp_cooling_device_ops); 464 465 if (IS_ERR(qmp_cdev->cdev)) 466 dev_err(qmp->dev, "unable to register %s cooling device\n", 467 cdev_name); 468 469 return PTR_ERR_OR_ZERO(qmp_cdev->cdev); 470 } 471 472 static int qmp_cooling_devices_register(struct qmp *qmp) 473 { 474 struct device_node *np, *child; 475 int count = QMP_NUM_COOLING_RESOURCES; 476 int ret; 477 478 np = qmp->dev->of_node; 479 480 qmp->cooling_devs = devm_kcalloc(qmp->dev, count, 481 sizeof(*qmp->cooling_devs), 482 GFP_KERNEL); 483 484 if (!qmp->cooling_devs) 485 return -ENOMEM; 486 487 for_each_available_child_of_node(np, child) { 488 if (!of_find_property(child, "#cooling-cells", NULL)) 489 continue; 490 ret = qmp_cooling_device_add(qmp, &qmp->cooling_devs[count++], 491 child); 492 if (ret) 493 goto unroll; 494 } 495 496 return 0; 497 498 unroll: 499 while (--count >= 0) 500 thermal_cooling_device_unregister 501 (qmp->cooling_devs[count].cdev); 502 503 return ret; 504 } 505 506 static void qmp_cooling_devices_remove(struct qmp *qmp) 507 { 508 int i; 509 510 for (i = 0; i < QMP_NUM_COOLING_RESOURCES; i++) 511 thermal_cooling_device_unregister(qmp->cooling_devs[i].cdev); 512 } 513 514 static int qmp_probe(struct platform_device *pdev) 515 { 516 struct resource *res; 517 struct qmp *qmp; 518 int irq; 519 int ret; 520 521 qmp = devm_kzalloc(&pdev->dev, sizeof(*qmp), GFP_KERNEL); 522 if (!qmp) 523 return -ENOMEM; 524 525 qmp->dev = &pdev->dev; 526 init_waitqueue_head(&qmp->event); 527 mutex_init(&qmp->tx_lock); 528 529 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 530 qmp->msgram = devm_ioremap_resource(&pdev->dev, res); 531 if (IS_ERR(qmp->msgram)) 532 return PTR_ERR(qmp->msgram); 533 534 qmp->mbox_client.dev = &pdev->dev; 535 qmp->mbox_client.knows_txdone = true; 536 qmp->mbox_chan = mbox_request_channel(&qmp->mbox_client, 0); 537 if (IS_ERR(qmp->mbox_chan)) { 538 dev_err(&pdev->dev, "failed to acquire ipc mailbox\n"); 539 return PTR_ERR(qmp->mbox_chan); 540 } 541 542 irq = platform_get_irq(pdev, 0); 543 ret = devm_request_irq(&pdev->dev, irq, qmp_intr, IRQF_ONESHOT, 544 "aoss-qmp", qmp); 545 if (ret < 0) { 546 dev_err(&pdev->dev, "failed to request interrupt\n"); 547 goto err_free_mbox; 548 } 549 550 ret = qmp_open(qmp); 551 if (ret < 0) 552 goto err_free_mbox; 553 554 ret = qmp_qdss_clk_add(qmp); 555 if (ret) 556 goto err_close_qmp; 557 558 ret = qmp_pd_add(qmp); 559 if (ret) 560 goto err_remove_qdss_clk; 561 562 ret = qmp_cooling_devices_register(qmp); 563 if (ret) 564 dev_err(&pdev->dev, "failed to register aoss cooling devices\n"); 565 566 platform_set_drvdata(pdev, qmp); 567 568 return 0; 569 570 err_remove_qdss_clk: 571 qmp_qdss_clk_remove(qmp); 572 err_close_qmp: 573 qmp_close(qmp); 574 err_free_mbox: 575 mbox_free_channel(qmp->mbox_chan); 576 577 return ret; 578 } 579 580 static int qmp_remove(struct platform_device *pdev) 581 { 582 struct qmp *qmp = platform_get_drvdata(pdev); 583 584 qmp_qdss_clk_remove(qmp); 585 qmp_pd_remove(qmp); 586 qmp_cooling_devices_remove(qmp); 587 588 qmp_close(qmp); 589 mbox_free_channel(qmp->mbox_chan); 590 591 return 0; 592 } 593 594 static const struct of_device_id qmp_dt_match[] = { 595 { .compatible = "qcom,sc7180-aoss-qmp", }, 596 { .compatible = "qcom,sdm845-aoss-qmp", }, 597 { .compatible = "qcom,sm8150-aoss-qmp", }, 598 {} 599 }; 600 MODULE_DEVICE_TABLE(of, qmp_dt_match); 601 602 static struct platform_driver qmp_driver = { 603 .driver = { 604 .name = "qcom_aoss_qmp", 605 .of_match_table = qmp_dt_match, 606 }, 607 .probe = qmp_probe, 608 .remove = qmp_remove, 609 }; 610 module_platform_driver(qmp_driver); 611 612 MODULE_DESCRIPTION("Qualcomm AOSS QMP driver"); 613 MODULE_LICENSE("GPL v2"); 614