1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2019, Linaro Ltd 4 */ 5 #include <dt-bindings/power/qcom-aoss-qmp.h> 6 #include <linux/clk-provider.h> 7 #include <linux/interrupt.h> 8 #include <linux/io.h> 9 #include <linux/mailbox_client.h> 10 #include <linux/module.h> 11 #include <linux/platform_device.h> 12 #include <linux/pm_domain.h> 13 #include <linux/thermal.h> 14 #include <linux/slab.h> 15 16 #define QMP_DESC_MAGIC 0x0 17 #define QMP_DESC_VERSION 0x4 18 #define QMP_DESC_FEATURES 0x8 19 20 /* AOP-side offsets */ 21 #define QMP_DESC_UCORE_LINK_STATE 0xc 22 #define QMP_DESC_UCORE_LINK_STATE_ACK 0x10 23 #define QMP_DESC_UCORE_CH_STATE 0x14 24 #define QMP_DESC_UCORE_CH_STATE_ACK 0x18 25 #define QMP_DESC_UCORE_MBOX_SIZE 0x1c 26 #define QMP_DESC_UCORE_MBOX_OFFSET 0x20 27 28 /* Linux-side offsets */ 29 #define QMP_DESC_MCORE_LINK_STATE 0x24 30 #define QMP_DESC_MCORE_LINK_STATE_ACK 0x28 31 #define QMP_DESC_MCORE_CH_STATE 0x2c 32 #define QMP_DESC_MCORE_CH_STATE_ACK 0x30 33 #define QMP_DESC_MCORE_MBOX_SIZE 0x34 34 #define QMP_DESC_MCORE_MBOX_OFFSET 0x38 35 36 #define QMP_STATE_UP GENMASK(15, 0) 37 #define QMP_STATE_DOWN GENMASK(31, 16) 38 39 #define QMP_MAGIC 0x4d41494c /* mail */ 40 #define QMP_VERSION 1 41 42 /* 64 bytes is enough to store the requests and provides padding to 4 bytes */ 43 #define QMP_MSG_LEN 64 44 45 #define QMP_NUM_COOLING_RESOURCES 2 46 47 static bool qmp_cdev_max_state = 1; 48 49 struct qmp_cooling_device { 50 struct thermal_cooling_device *cdev; 51 struct qmp *qmp; 52 char *name; 53 bool state; 54 }; 55 56 /** 57 * struct qmp - driver state for QMP implementation 58 * @msgram: iomem referencing the message RAM used for communication 59 * @dev: reference to QMP device 60 * @mbox_client: mailbox client used to ring the doorbell on transmit 61 * @mbox_chan: mailbox channel used to ring the doorbell on transmit 62 * @offset: offset within @msgram where messages should be written 63 * @size: maximum size of the messages to be transmitted 64 * @event: wait_queue for synchronization with the IRQ 65 * @tx_lock: provides synchronization between multiple callers of qmp_send() 66 * @qdss_clk: QDSS clock hw struct 67 * @pd_data: genpd data 68 * @cooling_devs: thermal cooling devices 69 */ 70 struct qmp { 71 void __iomem *msgram; 72 struct device *dev; 73 74 struct mbox_client mbox_client; 75 struct mbox_chan *mbox_chan; 76 77 size_t offset; 78 size_t size; 79 80 wait_queue_head_t event; 81 82 struct mutex tx_lock; 83 84 struct clk_hw qdss_clk; 85 struct genpd_onecell_data pd_data; 86 struct qmp_cooling_device *cooling_devs; 87 }; 88 89 struct qmp_pd { 90 struct qmp *qmp; 91 struct generic_pm_domain pd; 92 }; 93 94 #define to_qmp_pd_resource(res) container_of(res, struct qmp_pd, pd) 95 96 static void qmp_kick(struct qmp *qmp) 97 { 98 mbox_send_message(qmp->mbox_chan, NULL); 99 mbox_client_txdone(qmp->mbox_chan, 0); 100 } 101 102 static bool qmp_magic_valid(struct qmp *qmp) 103 { 104 return readl(qmp->msgram + QMP_DESC_MAGIC) == QMP_MAGIC; 105 } 106 107 static bool qmp_link_acked(struct qmp *qmp) 108 { 109 return readl(qmp->msgram + QMP_DESC_MCORE_LINK_STATE_ACK) == QMP_STATE_UP; 110 } 111 112 static bool qmp_mcore_channel_acked(struct qmp *qmp) 113 { 114 return readl(qmp->msgram + QMP_DESC_MCORE_CH_STATE_ACK) == QMP_STATE_UP; 115 } 116 117 static bool qmp_ucore_channel_up(struct qmp *qmp) 118 { 119 return readl(qmp->msgram + QMP_DESC_UCORE_CH_STATE) == QMP_STATE_UP; 120 } 121 122 static int qmp_open(struct qmp *qmp) 123 { 124 int ret; 125 u32 val; 126 127 if (!qmp_magic_valid(qmp)) { 128 dev_err(qmp->dev, "QMP magic doesn't match\n"); 129 return -EINVAL; 130 } 131 132 val = readl(qmp->msgram + QMP_DESC_VERSION); 133 if (val != QMP_VERSION) { 134 dev_err(qmp->dev, "unsupported QMP version %d\n", val); 135 return -EINVAL; 136 } 137 138 qmp->offset = readl(qmp->msgram + QMP_DESC_MCORE_MBOX_OFFSET); 139 qmp->size = readl(qmp->msgram + QMP_DESC_MCORE_MBOX_SIZE); 140 if (!qmp->size) { 141 dev_err(qmp->dev, "invalid mailbox size\n"); 142 return -EINVAL; 143 } 144 145 /* Ack remote core's link state */ 146 val = readl(qmp->msgram + QMP_DESC_UCORE_LINK_STATE); 147 writel(val, qmp->msgram + QMP_DESC_UCORE_LINK_STATE_ACK); 148 149 /* Set local core's link state to up */ 150 writel(QMP_STATE_UP, qmp->msgram + QMP_DESC_MCORE_LINK_STATE); 151 152 qmp_kick(qmp); 153 154 ret = wait_event_timeout(qmp->event, qmp_link_acked(qmp), HZ); 155 if (!ret) { 156 dev_err(qmp->dev, "ucore didn't ack link\n"); 157 goto timeout_close_link; 158 } 159 160 writel(QMP_STATE_UP, qmp->msgram + QMP_DESC_MCORE_CH_STATE); 161 162 qmp_kick(qmp); 163 164 ret = wait_event_timeout(qmp->event, qmp_ucore_channel_up(qmp), HZ); 165 if (!ret) { 166 dev_err(qmp->dev, "ucore didn't open channel\n"); 167 goto timeout_close_channel; 168 } 169 170 /* Ack remote core's channel state */ 171 writel(QMP_STATE_UP, qmp->msgram + QMP_DESC_UCORE_CH_STATE_ACK); 172 173 qmp_kick(qmp); 174 175 ret = wait_event_timeout(qmp->event, qmp_mcore_channel_acked(qmp), HZ); 176 if (!ret) { 177 dev_err(qmp->dev, "ucore didn't ack channel\n"); 178 goto timeout_close_channel; 179 } 180 181 return 0; 182 183 timeout_close_channel: 184 writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_CH_STATE); 185 186 timeout_close_link: 187 writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_LINK_STATE); 188 qmp_kick(qmp); 189 190 return -ETIMEDOUT; 191 } 192 193 static void qmp_close(struct qmp *qmp) 194 { 195 writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_CH_STATE); 196 writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_LINK_STATE); 197 qmp_kick(qmp); 198 } 199 200 static irqreturn_t qmp_intr(int irq, void *data) 201 { 202 struct qmp *qmp = data; 203 204 wake_up_all(&qmp->event); 205 206 return IRQ_HANDLED; 207 } 208 209 static bool qmp_message_empty(struct qmp *qmp) 210 { 211 return readl(qmp->msgram + qmp->offset) == 0; 212 } 213 214 /** 215 * qmp_send() - send a message to the AOSS 216 * @qmp: qmp context 217 * @data: message to be sent 218 * @len: length of the message 219 * 220 * Transmit @data to AOSS and wait for the AOSS to acknowledge the message. 221 * @len must be a multiple of 4 and not longer than the mailbox size. Access is 222 * synchronized by this implementation. 223 * 224 * Return: 0 on success, negative errno on failure 225 */ 226 static int qmp_send(struct qmp *qmp, const void *data, size_t len) 227 { 228 long time_left; 229 int ret; 230 231 if (WARN_ON(len + sizeof(u32) > qmp->size)) 232 return -EINVAL; 233 234 if (WARN_ON(len % sizeof(u32))) 235 return -EINVAL; 236 237 mutex_lock(&qmp->tx_lock); 238 239 /* The message RAM only implements 32-bit accesses */ 240 __iowrite32_copy(qmp->msgram + qmp->offset + sizeof(u32), 241 data, len / sizeof(u32)); 242 writel(len, qmp->msgram + qmp->offset); 243 244 /* Read back len to confirm data written in message RAM */ 245 readl(qmp->msgram + qmp->offset); 246 qmp_kick(qmp); 247 248 time_left = wait_event_interruptible_timeout(qmp->event, 249 qmp_message_empty(qmp), HZ); 250 if (!time_left) { 251 dev_err(qmp->dev, "ucore did not ack channel\n"); 252 ret = -ETIMEDOUT; 253 254 /* Clear message from buffer */ 255 writel(0, qmp->msgram + qmp->offset); 256 } else { 257 ret = 0; 258 } 259 260 mutex_unlock(&qmp->tx_lock); 261 262 return ret; 263 } 264 265 static int qmp_qdss_clk_prepare(struct clk_hw *hw) 266 { 267 static const char buf[QMP_MSG_LEN] = "{class: clock, res: qdss, val: 1}"; 268 struct qmp *qmp = container_of(hw, struct qmp, qdss_clk); 269 270 return qmp_send(qmp, buf, sizeof(buf)); 271 } 272 273 static void qmp_qdss_clk_unprepare(struct clk_hw *hw) 274 { 275 static const char buf[QMP_MSG_LEN] = "{class: clock, res: qdss, val: 0}"; 276 struct qmp *qmp = container_of(hw, struct qmp, qdss_clk); 277 278 qmp_send(qmp, buf, sizeof(buf)); 279 } 280 281 static const struct clk_ops qmp_qdss_clk_ops = { 282 .prepare = qmp_qdss_clk_prepare, 283 .unprepare = qmp_qdss_clk_unprepare, 284 }; 285 286 static int qmp_qdss_clk_add(struct qmp *qmp) 287 { 288 static const struct clk_init_data qdss_init = { 289 .ops = &qmp_qdss_clk_ops, 290 .name = "qdss", 291 }; 292 int ret; 293 294 qmp->qdss_clk.init = &qdss_init; 295 ret = clk_hw_register(qmp->dev, &qmp->qdss_clk); 296 if (ret < 0) { 297 dev_err(qmp->dev, "failed to register qdss clock\n"); 298 return ret; 299 } 300 301 ret = of_clk_add_hw_provider(qmp->dev->of_node, of_clk_hw_simple_get, 302 &qmp->qdss_clk); 303 if (ret < 0) { 304 dev_err(qmp->dev, "unable to register of clk hw provider\n"); 305 clk_hw_unregister(&qmp->qdss_clk); 306 } 307 308 return ret; 309 } 310 311 static void qmp_qdss_clk_remove(struct qmp *qmp) 312 { 313 of_clk_del_provider(qmp->dev->of_node); 314 clk_hw_unregister(&qmp->qdss_clk); 315 } 316 317 static int qmp_pd_power_toggle(struct qmp_pd *res, bool enable) 318 { 319 char buf[QMP_MSG_LEN] = {}; 320 321 snprintf(buf, sizeof(buf), 322 "{class: image, res: load_state, name: %s, val: %s}", 323 res->pd.name, enable ? "on" : "off"); 324 return qmp_send(res->qmp, buf, sizeof(buf)); 325 } 326 327 static int qmp_pd_power_on(struct generic_pm_domain *domain) 328 { 329 return qmp_pd_power_toggle(to_qmp_pd_resource(domain), true); 330 } 331 332 static int qmp_pd_power_off(struct generic_pm_domain *domain) 333 { 334 return qmp_pd_power_toggle(to_qmp_pd_resource(domain), false); 335 } 336 337 static const char * const sdm845_resources[] = { 338 [AOSS_QMP_LS_CDSP] = "cdsp", 339 [AOSS_QMP_LS_LPASS] = "adsp", 340 [AOSS_QMP_LS_MODEM] = "modem", 341 [AOSS_QMP_LS_SLPI] = "slpi", 342 [AOSS_QMP_LS_SPSS] = "spss", 343 [AOSS_QMP_LS_VENUS] = "venus", 344 }; 345 346 static int qmp_pd_add(struct qmp *qmp) 347 { 348 struct genpd_onecell_data *data = &qmp->pd_data; 349 struct device *dev = qmp->dev; 350 struct qmp_pd *res; 351 size_t num = ARRAY_SIZE(sdm845_resources); 352 int ret; 353 int i; 354 355 res = devm_kcalloc(dev, num, sizeof(*res), GFP_KERNEL); 356 if (!res) 357 return -ENOMEM; 358 359 data->domains = devm_kcalloc(dev, num, sizeof(*data->domains), 360 GFP_KERNEL); 361 if (!data->domains) 362 return -ENOMEM; 363 364 for (i = 0; i < num; i++) { 365 res[i].qmp = qmp; 366 res[i].pd.name = sdm845_resources[i]; 367 res[i].pd.power_on = qmp_pd_power_on; 368 res[i].pd.power_off = qmp_pd_power_off; 369 370 ret = pm_genpd_init(&res[i].pd, NULL, true); 371 if (ret < 0) { 372 dev_err(dev, "failed to init genpd\n"); 373 goto unroll_genpds; 374 } 375 376 data->domains[i] = &res[i].pd; 377 } 378 379 data->num_domains = i; 380 381 ret = of_genpd_add_provider_onecell(dev->of_node, data); 382 if (ret < 0) 383 goto unroll_genpds; 384 385 return 0; 386 387 unroll_genpds: 388 for (i--; i >= 0; i--) 389 pm_genpd_remove(data->domains[i]); 390 391 return ret; 392 } 393 394 static void qmp_pd_remove(struct qmp *qmp) 395 { 396 struct genpd_onecell_data *data = &qmp->pd_data; 397 struct device *dev = qmp->dev; 398 int i; 399 400 of_genpd_del_provider(dev->of_node); 401 402 for (i = 0; i < data->num_domains; i++) 403 pm_genpd_remove(data->domains[i]); 404 } 405 406 static int qmp_cdev_get_max_state(struct thermal_cooling_device *cdev, 407 unsigned long *state) 408 { 409 *state = qmp_cdev_max_state; 410 return 0; 411 } 412 413 static int qmp_cdev_get_cur_state(struct thermal_cooling_device *cdev, 414 unsigned long *state) 415 { 416 struct qmp_cooling_device *qmp_cdev = cdev->devdata; 417 418 *state = qmp_cdev->state; 419 return 0; 420 } 421 422 static int qmp_cdev_set_cur_state(struct thermal_cooling_device *cdev, 423 unsigned long state) 424 { 425 struct qmp_cooling_device *qmp_cdev = cdev->devdata; 426 char buf[QMP_MSG_LEN] = {}; 427 bool cdev_state; 428 int ret; 429 430 /* Normalize state */ 431 cdev_state = !!state; 432 433 if (qmp_cdev->state == state) 434 return 0; 435 436 snprintf(buf, sizeof(buf), 437 "{class: volt_flr, event:zero_temp, res:%s, value:%s}", 438 qmp_cdev->name, 439 cdev_state ? "on" : "off"); 440 441 ret = qmp_send(qmp_cdev->qmp, buf, sizeof(buf)); 442 443 if (!ret) 444 qmp_cdev->state = cdev_state; 445 446 return ret; 447 } 448 449 static struct thermal_cooling_device_ops qmp_cooling_device_ops = { 450 .get_max_state = qmp_cdev_get_max_state, 451 .get_cur_state = qmp_cdev_get_cur_state, 452 .set_cur_state = qmp_cdev_set_cur_state, 453 }; 454 455 static int qmp_cooling_device_add(struct qmp *qmp, 456 struct qmp_cooling_device *qmp_cdev, 457 struct device_node *node) 458 { 459 char *cdev_name = (char *)node->name; 460 461 qmp_cdev->qmp = qmp; 462 qmp_cdev->state = !qmp_cdev_max_state; 463 qmp_cdev->name = cdev_name; 464 qmp_cdev->cdev = devm_thermal_of_cooling_device_register 465 (qmp->dev, node, 466 cdev_name, 467 qmp_cdev, &qmp_cooling_device_ops); 468 469 if (IS_ERR(qmp_cdev->cdev)) 470 dev_err(qmp->dev, "unable to register %s cooling device\n", 471 cdev_name); 472 473 return PTR_ERR_OR_ZERO(qmp_cdev->cdev); 474 } 475 476 static int qmp_cooling_devices_register(struct qmp *qmp) 477 { 478 struct device_node *np, *child; 479 int count = QMP_NUM_COOLING_RESOURCES; 480 int ret; 481 482 np = qmp->dev->of_node; 483 484 qmp->cooling_devs = devm_kcalloc(qmp->dev, count, 485 sizeof(*qmp->cooling_devs), 486 GFP_KERNEL); 487 488 if (!qmp->cooling_devs) 489 return -ENOMEM; 490 491 for_each_available_child_of_node(np, child) { 492 if (!of_find_property(child, "#cooling-cells", NULL)) 493 continue; 494 ret = qmp_cooling_device_add(qmp, &qmp->cooling_devs[count++], 495 child); 496 if (ret) 497 goto unroll; 498 } 499 500 return 0; 501 502 unroll: 503 while (--count >= 0) 504 thermal_cooling_device_unregister 505 (qmp->cooling_devs[count].cdev); 506 507 return ret; 508 } 509 510 static void qmp_cooling_devices_remove(struct qmp *qmp) 511 { 512 int i; 513 514 for (i = 0; i < QMP_NUM_COOLING_RESOURCES; i++) 515 thermal_cooling_device_unregister(qmp->cooling_devs[i].cdev); 516 } 517 518 static int qmp_probe(struct platform_device *pdev) 519 { 520 struct resource *res; 521 struct qmp *qmp; 522 int irq; 523 int ret; 524 525 qmp = devm_kzalloc(&pdev->dev, sizeof(*qmp), GFP_KERNEL); 526 if (!qmp) 527 return -ENOMEM; 528 529 qmp->dev = &pdev->dev; 530 init_waitqueue_head(&qmp->event); 531 mutex_init(&qmp->tx_lock); 532 533 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 534 qmp->msgram = devm_ioremap_resource(&pdev->dev, res); 535 if (IS_ERR(qmp->msgram)) 536 return PTR_ERR(qmp->msgram); 537 538 qmp->mbox_client.dev = &pdev->dev; 539 qmp->mbox_client.knows_txdone = true; 540 qmp->mbox_chan = mbox_request_channel(&qmp->mbox_client, 0); 541 if (IS_ERR(qmp->mbox_chan)) { 542 dev_err(&pdev->dev, "failed to acquire ipc mailbox\n"); 543 return PTR_ERR(qmp->mbox_chan); 544 } 545 546 irq = platform_get_irq(pdev, 0); 547 ret = devm_request_irq(&pdev->dev, irq, qmp_intr, IRQF_ONESHOT, 548 "aoss-qmp", qmp); 549 if (ret < 0) { 550 dev_err(&pdev->dev, "failed to request interrupt\n"); 551 goto err_free_mbox; 552 } 553 554 ret = qmp_open(qmp); 555 if (ret < 0) 556 goto err_free_mbox; 557 558 ret = qmp_qdss_clk_add(qmp); 559 if (ret) 560 goto err_close_qmp; 561 562 ret = qmp_pd_add(qmp); 563 if (ret) 564 goto err_remove_qdss_clk; 565 566 ret = qmp_cooling_devices_register(qmp); 567 if (ret) 568 dev_err(&pdev->dev, "failed to register aoss cooling devices\n"); 569 570 platform_set_drvdata(pdev, qmp); 571 572 return 0; 573 574 err_remove_qdss_clk: 575 qmp_qdss_clk_remove(qmp); 576 err_close_qmp: 577 qmp_close(qmp); 578 err_free_mbox: 579 mbox_free_channel(qmp->mbox_chan); 580 581 return ret; 582 } 583 584 static int qmp_remove(struct platform_device *pdev) 585 { 586 struct qmp *qmp = platform_get_drvdata(pdev); 587 588 qmp_qdss_clk_remove(qmp); 589 qmp_pd_remove(qmp); 590 qmp_cooling_devices_remove(qmp); 591 592 qmp_close(qmp); 593 mbox_free_channel(qmp->mbox_chan); 594 595 return 0; 596 } 597 598 static const struct of_device_id qmp_dt_match[] = { 599 { .compatible = "qcom,sc7180-aoss-qmp", }, 600 { .compatible = "qcom,sdm845-aoss-qmp", }, 601 { .compatible = "qcom,sm8150-aoss-qmp", }, 602 { .compatible = "qcom,sm8250-aoss-qmp", }, 603 {} 604 }; 605 MODULE_DEVICE_TABLE(of, qmp_dt_match); 606 607 static struct platform_driver qmp_driver = { 608 .driver = { 609 .name = "qcom_aoss_qmp", 610 .of_match_table = qmp_dt_match, 611 }, 612 .probe = qmp_probe, 613 .remove = qmp_remove, 614 }; 615 module_platform_driver(qmp_driver); 616 617 MODULE_DESCRIPTION("Qualcomm AOSS QMP driver"); 618 MODULE_LICENSE("GPL v2"); 619