1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2019, Linaro Ltd 4 */ 5 #include <dt-bindings/power/qcom-aoss-qmp.h> 6 #include <linux/clk-provider.h> 7 #include <linux/interrupt.h> 8 #include <linux/io.h> 9 #include <linux/mailbox_client.h> 10 #include <linux/module.h> 11 #include <linux/platform_device.h> 12 #include <linux/pm_domain.h> 13 #include <linux/thermal.h> 14 #include <linux/slab.h> 15 16 #define QMP_DESC_MAGIC 0x0 17 #define QMP_DESC_VERSION 0x4 18 #define QMP_DESC_FEATURES 0x8 19 20 /* AOP-side offsets */ 21 #define QMP_DESC_UCORE_LINK_STATE 0xc 22 #define QMP_DESC_UCORE_LINK_STATE_ACK 0x10 23 #define QMP_DESC_UCORE_CH_STATE 0x14 24 #define QMP_DESC_UCORE_CH_STATE_ACK 0x18 25 #define QMP_DESC_UCORE_MBOX_SIZE 0x1c 26 #define QMP_DESC_UCORE_MBOX_OFFSET 0x20 27 28 /* Linux-side offsets */ 29 #define QMP_DESC_MCORE_LINK_STATE 0x24 30 #define QMP_DESC_MCORE_LINK_STATE_ACK 0x28 31 #define QMP_DESC_MCORE_CH_STATE 0x2c 32 #define QMP_DESC_MCORE_CH_STATE_ACK 0x30 33 #define QMP_DESC_MCORE_MBOX_SIZE 0x34 34 #define QMP_DESC_MCORE_MBOX_OFFSET 0x38 35 36 #define QMP_STATE_UP GENMASK(15, 0) 37 #define QMP_STATE_DOWN GENMASK(31, 16) 38 39 #define QMP_MAGIC 0x4d41494c /* mail */ 40 #define QMP_VERSION 1 41 42 /* 64 bytes is enough to store the requests and provides padding to 4 bytes */ 43 #define QMP_MSG_LEN 64 44 45 #define QMP_NUM_COOLING_RESOURCES 2 46 47 static bool qmp_cdev_max_state = 1; 48 49 struct qmp_cooling_device { 50 struct thermal_cooling_device *cdev; 51 struct qmp *qmp; 52 char *name; 53 bool state; 54 }; 55 56 /** 57 * struct qmp - driver state for QMP implementation 58 * @msgram: iomem referencing the message RAM used for communication 59 * @dev: reference to QMP device 60 * @mbox_client: mailbox client used to ring the doorbell on transmit 61 * @mbox_chan: mailbox channel used to ring the doorbell on transmit 62 * @offset: offset within @msgram where messages should be written 63 * @size: maximum size of the messages to be transmitted 64 * @event: wait_queue for synchronization with the IRQ 65 * @tx_lock: provides synchronization between multiple callers of qmp_send() 66 * @qdss_clk: QDSS clock hw struct 67 * @pd_data: genpd data 68 */ 69 struct qmp { 70 void __iomem *msgram; 71 struct device *dev; 72 73 struct mbox_client mbox_client; 74 struct mbox_chan *mbox_chan; 75 76 size_t offset; 77 size_t size; 78 79 wait_queue_head_t event; 80 81 struct mutex tx_lock; 82 83 struct clk_hw qdss_clk; 84 struct genpd_onecell_data pd_data; 85 struct qmp_cooling_device *cooling_devs; 86 }; 87 88 struct qmp_pd { 89 struct qmp *qmp; 90 struct generic_pm_domain pd; 91 }; 92 93 #define to_qmp_pd_resource(res) container_of(res, struct qmp_pd, pd) 94 95 static void qmp_kick(struct qmp *qmp) 96 { 97 mbox_send_message(qmp->mbox_chan, NULL); 98 mbox_client_txdone(qmp->mbox_chan, 0); 99 } 100 101 static bool qmp_magic_valid(struct qmp *qmp) 102 { 103 return readl(qmp->msgram + QMP_DESC_MAGIC) == QMP_MAGIC; 104 } 105 106 static bool qmp_link_acked(struct qmp *qmp) 107 { 108 return readl(qmp->msgram + QMP_DESC_MCORE_LINK_STATE_ACK) == QMP_STATE_UP; 109 } 110 111 static bool qmp_mcore_channel_acked(struct qmp *qmp) 112 { 113 return readl(qmp->msgram + QMP_DESC_MCORE_CH_STATE_ACK) == QMP_STATE_UP; 114 } 115 116 static bool qmp_ucore_channel_up(struct qmp *qmp) 117 { 118 return readl(qmp->msgram + QMP_DESC_UCORE_CH_STATE) == QMP_STATE_UP; 119 } 120 121 static int qmp_open(struct qmp *qmp) 122 { 123 int ret; 124 u32 val; 125 126 if (!qmp_magic_valid(qmp)) { 127 dev_err(qmp->dev, "QMP magic doesn't match\n"); 128 return -EINVAL; 129 } 130 131 val = readl(qmp->msgram + QMP_DESC_VERSION); 132 if (val != QMP_VERSION) { 133 dev_err(qmp->dev, "unsupported QMP version %d\n", val); 134 return -EINVAL; 135 } 136 137 qmp->offset = readl(qmp->msgram + QMP_DESC_MCORE_MBOX_OFFSET); 138 qmp->size = readl(qmp->msgram + QMP_DESC_MCORE_MBOX_SIZE); 139 if (!qmp->size) { 140 dev_err(qmp->dev, "invalid mailbox size\n"); 141 return -EINVAL; 142 } 143 144 /* Ack remote core's link state */ 145 val = readl(qmp->msgram + QMP_DESC_UCORE_LINK_STATE); 146 writel(val, qmp->msgram + QMP_DESC_UCORE_LINK_STATE_ACK); 147 148 /* Set local core's link state to up */ 149 writel(QMP_STATE_UP, qmp->msgram + QMP_DESC_MCORE_LINK_STATE); 150 151 qmp_kick(qmp); 152 153 ret = wait_event_timeout(qmp->event, qmp_link_acked(qmp), HZ); 154 if (!ret) { 155 dev_err(qmp->dev, "ucore didn't ack link\n"); 156 goto timeout_close_link; 157 } 158 159 writel(QMP_STATE_UP, qmp->msgram + QMP_DESC_MCORE_CH_STATE); 160 161 qmp_kick(qmp); 162 163 ret = wait_event_timeout(qmp->event, qmp_ucore_channel_up(qmp), HZ); 164 if (!ret) { 165 dev_err(qmp->dev, "ucore didn't open channel\n"); 166 goto timeout_close_channel; 167 } 168 169 /* Ack remote core's channel state */ 170 writel(QMP_STATE_UP, qmp->msgram + QMP_DESC_UCORE_CH_STATE_ACK); 171 172 qmp_kick(qmp); 173 174 ret = wait_event_timeout(qmp->event, qmp_mcore_channel_acked(qmp), HZ); 175 if (!ret) { 176 dev_err(qmp->dev, "ucore didn't ack channel\n"); 177 goto timeout_close_channel; 178 } 179 180 return 0; 181 182 timeout_close_channel: 183 writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_CH_STATE); 184 185 timeout_close_link: 186 writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_LINK_STATE); 187 qmp_kick(qmp); 188 189 return -ETIMEDOUT; 190 } 191 192 static void qmp_close(struct qmp *qmp) 193 { 194 writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_CH_STATE); 195 writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_LINK_STATE); 196 qmp_kick(qmp); 197 } 198 199 static irqreturn_t qmp_intr(int irq, void *data) 200 { 201 struct qmp *qmp = data; 202 203 wake_up_all(&qmp->event); 204 205 return IRQ_HANDLED; 206 } 207 208 static bool qmp_message_empty(struct qmp *qmp) 209 { 210 return readl(qmp->msgram + qmp->offset) == 0; 211 } 212 213 /** 214 * qmp_send() - send a message to the AOSS 215 * @qmp: qmp context 216 * @data: message to be sent 217 * @len: length of the message 218 * 219 * Transmit @data to AOSS and wait for the AOSS to acknowledge the message. 220 * @len must be a multiple of 4 and not longer than the mailbox size. Access is 221 * synchronized by this implementation. 222 * 223 * Return: 0 on success, negative errno on failure 224 */ 225 static int qmp_send(struct qmp *qmp, const void *data, size_t len) 226 { 227 long time_left; 228 size_t tlen; 229 int ret; 230 231 if (WARN_ON(len + sizeof(u32) > qmp->size)) 232 return -EINVAL; 233 234 if (WARN_ON(len % sizeof(u32))) 235 return -EINVAL; 236 237 mutex_lock(&qmp->tx_lock); 238 239 /* The message RAM only implements 32-bit accesses */ 240 __iowrite32_copy(qmp->msgram + qmp->offset + sizeof(u32), 241 data, len / sizeof(u32)); 242 writel(len, qmp->msgram + qmp->offset); 243 244 /* Read back len to confirm data written in message RAM */ 245 tlen = readl(qmp->msgram + qmp->offset); 246 qmp_kick(qmp); 247 248 time_left = wait_event_interruptible_timeout(qmp->event, 249 qmp_message_empty(qmp), HZ); 250 if (!time_left) { 251 dev_err(qmp->dev, "ucore did not ack channel\n"); 252 ret = -ETIMEDOUT; 253 254 /* Clear message from buffer */ 255 writel(0, qmp->msgram + qmp->offset); 256 } else { 257 ret = 0; 258 } 259 260 mutex_unlock(&qmp->tx_lock); 261 262 return ret; 263 } 264 265 static int qmp_qdss_clk_prepare(struct clk_hw *hw) 266 { 267 static const char buf[QMP_MSG_LEN] = "{class: clock, res: qdss, val: 1}"; 268 struct qmp *qmp = container_of(hw, struct qmp, qdss_clk); 269 270 return qmp_send(qmp, buf, sizeof(buf)); 271 } 272 273 static void qmp_qdss_clk_unprepare(struct clk_hw *hw) 274 { 275 static const char buf[QMP_MSG_LEN] = "{class: clock, res: qdss, val: 0}"; 276 struct qmp *qmp = container_of(hw, struct qmp, qdss_clk); 277 278 qmp_send(qmp, buf, sizeof(buf)); 279 } 280 281 static const struct clk_ops qmp_qdss_clk_ops = { 282 .prepare = qmp_qdss_clk_prepare, 283 .unprepare = qmp_qdss_clk_unprepare, 284 }; 285 286 static int qmp_qdss_clk_add(struct qmp *qmp) 287 { 288 static const struct clk_init_data qdss_init = { 289 .ops = &qmp_qdss_clk_ops, 290 .name = "qdss", 291 }; 292 int ret; 293 294 qmp->qdss_clk.init = &qdss_init; 295 ret = clk_hw_register(qmp->dev, &qmp->qdss_clk); 296 if (ret < 0) { 297 dev_err(qmp->dev, "failed to register qdss clock\n"); 298 return ret; 299 } 300 301 ret = of_clk_add_hw_provider(qmp->dev->of_node, of_clk_hw_simple_get, 302 &qmp->qdss_clk); 303 if (ret < 0) { 304 dev_err(qmp->dev, "unable to register of clk hw provider\n"); 305 clk_hw_unregister(&qmp->qdss_clk); 306 } 307 308 return ret; 309 } 310 311 static void qmp_qdss_clk_remove(struct qmp *qmp) 312 { 313 of_clk_del_provider(qmp->dev->of_node); 314 clk_hw_unregister(&qmp->qdss_clk); 315 } 316 317 static int qmp_pd_power_toggle(struct qmp_pd *res, bool enable) 318 { 319 char buf[QMP_MSG_LEN] = {}; 320 321 snprintf(buf, sizeof(buf), 322 "{class: image, res: load_state, name: %s, val: %s}", 323 res->pd.name, enable ? "on" : "off"); 324 return qmp_send(res->qmp, buf, sizeof(buf)); 325 } 326 327 static int qmp_pd_power_on(struct generic_pm_domain *domain) 328 { 329 return qmp_pd_power_toggle(to_qmp_pd_resource(domain), true); 330 } 331 332 static int qmp_pd_power_off(struct generic_pm_domain *domain) 333 { 334 return qmp_pd_power_toggle(to_qmp_pd_resource(domain), false); 335 } 336 337 static const char * const sdm845_resources[] = { 338 [AOSS_QMP_LS_CDSP] = "cdsp", 339 [AOSS_QMP_LS_LPASS] = "adsp", 340 [AOSS_QMP_LS_MODEM] = "modem", 341 [AOSS_QMP_LS_SLPI] = "slpi", 342 [AOSS_QMP_LS_SPSS] = "spss", 343 [AOSS_QMP_LS_VENUS] = "venus", 344 }; 345 346 static int qmp_pd_add(struct qmp *qmp) 347 { 348 struct genpd_onecell_data *data = &qmp->pd_data; 349 struct device *dev = qmp->dev; 350 struct qmp_pd *res; 351 size_t num = ARRAY_SIZE(sdm845_resources); 352 int ret; 353 int i; 354 355 res = devm_kcalloc(dev, num, sizeof(*res), GFP_KERNEL); 356 if (!res) 357 return -ENOMEM; 358 359 data->domains = devm_kcalloc(dev, num, sizeof(*data->domains), 360 GFP_KERNEL); 361 if (!data->domains) 362 return -ENOMEM; 363 364 for (i = 0; i < num; i++) { 365 res[i].qmp = qmp; 366 res[i].pd.name = sdm845_resources[i]; 367 res[i].pd.power_on = qmp_pd_power_on; 368 res[i].pd.power_off = qmp_pd_power_off; 369 370 ret = pm_genpd_init(&res[i].pd, NULL, true); 371 if (ret < 0) { 372 dev_err(dev, "failed to init genpd\n"); 373 goto unroll_genpds; 374 } 375 376 data->domains[i] = &res[i].pd; 377 } 378 379 data->num_domains = i; 380 381 ret = of_genpd_add_provider_onecell(dev->of_node, data); 382 if (ret < 0) 383 goto unroll_genpds; 384 385 return 0; 386 387 unroll_genpds: 388 for (i--; i >= 0; i--) 389 pm_genpd_remove(data->domains[i]); 390 391 return ret; 392 } 393 394 static void qmp_pd_remove(struct qmp *qmp) 395 { 396 struct genpd_onecell_data *data = &qmp->pd_data; 397 struct device *dev = qmp->dev; 398 int i; 399 400 of_genpd_del_provider(dev->of_node); 401 402 for (i = 0; i < data->num_domains; i++) 403 pm_genpd_remove(data->domains[i]); 404 } 405 406 static int qmp_cdev_get_max_state(struct thermal_cooling_device *cdev, 407 unsigned long *state) 408 { 409 *state = qmp_cdev_max_state; 410 return 0; 411 } 412 413 static int qmp_cdev_get_cur_state(struct thermal_cooling_device *cdev, 414 unsigned long *state) 415 { 416 struct qmp_cooling_device *qmp_cdev = cdev->devdata; 417 418 *state = qmp_cdev->state; 419 return 0; 420 } 421 422 static int qmp_cdev_set_cur_state(struct thermal_cooling_device *cdev, 423 unsigned long state) 424 { 425 struct qmp_cooling_device *qmp_cdev = cdev->devdata; 426 char buf[QMP_MSG_LEN] = {}; 427 bool cdev_state; 428 int ret; 429 430 /* Normalize state */ 431 cdev_state = !!state; 432 433 if (qmp_cdev->state == state) 434 return 0; 435 436 snprintf(buf, sizeof(buf), 437 "{class: volt_flr, event:zero_temp, res:%s, value:%s}", 438 qmp_cdev->name, 439 cdev_state ? "on" : "off"); 440 441 ret = qmp_send(qmp_cdev->qmp, buf, sizeof(buf)); 442 443 if (!ret) 444 qmp_cdev->state = cdev_state; 445 446 return ret; 447 } 448 449 static struct thermal_cooling_device_ops qmp_cooling_device_ops = { 450 .get_max_state = qmp_cdev_get_max_state, 451 .get_cur_state = qmp_cdev_get_cur_state, 452 .set_cur_state = qmp_cdev_set_cur_state, 453 }; 454 455 static int qmp_cooling_device_add(struct qmp *qmp, 456 struct qmp_cooling_device *qmp_cdev, 457 struct device_node *node) 458 { 459 char *cdev_name = (char *)node->name; 460 461 qmp_cdev->qmp = qmp; 462 qmp_cdev->state = !qmp_cdev_max_state; 463 qmp_cdev->name = cdev_name; 464 qmp_cdev->cdev = devm_thermal_of_cooling_device_register 465 (qmp->dev, node, 466 cdev_name, 467 qmp_cdev, &qmp_cooling_device_ops); 468 469 if (IS_ERR(qmp_cdev->cdev)) 470 dev_err(qmp->dev, "unable to register %s cooling device\n", 471 cdev_name); 472 473 return PTR_ERR_OR_ZERO(qmp_cdev->cdev); 474 } 475 476 static int qmp_cooling_devices_register(struct qmp *qmp) 477 { 478 struct device_node *np, *child; 479 int count = QMP_NUM_COOLING_RESOURCES; 480 int ret; 481 482 np = qmp->dev->of_node; 483 484 qmp->cooling_devs = devm_kcalloc(qmp->dev, count, 485 sizeof(*qmp->cooling_devs), 486 GFP_KERNEL); 487 488 if (!qmp->cooling_devs) 489 return -ENOMEM; 490 491 for_each_available_child_of_node(np, child) { 492 if (!of_find_property(child, "#cooling-cells", NULL)) 493 continue; 494 ret = qmp_cooling_device_add(qmp, &qmp->cooling_devs[count++], 495 child); 496 if (ret) 497 goto unroll; 498 } 499 500 return 0; 501 502 unroll: 503 while (--count >= 0) 504 thermal_cooling_device_unregister 505 (qmp->cooling_devs[count].cdev); 506 507 return ret; 508 } 509 510 static void qmp_cooling_devices_remove(struct qmp *qmp) 511 { 512 int i; 513 514 for (i = 0; i < QMP_NUM_COOLING_RESOURCES; i++) 515 thermal_cooling_device_unregister(qmp->cooling_devs[i].cdev); 516 } 517 518 static int qmp_probe(struct platform_device *pdev) 519 { 520 struct resource *res; 521 struct qmp *qmp; 522 int irq; 523 int ret; 524 525 qmp = devm_kzalloc(&pdev->dev, sizeof(*qmp), GFP_KERNEL); 526 if (!qmp) 527 return -ENOMEM; 528 529 qmp->dev = &pdev->dev; 530 init_waitqueue_head(&qmp->event); 531 mutex_init(&qmp->tx_lock); 532 533 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 534 qmp->msgram = devm_ioremap_resource(&pdev->dev, res); 535 if (IS_ERR(qmp->msgram)) 536 return PTR_ERR(qmp->msgram); 537 538 qmp->mbox_client.dev = &pdev->dev; 539 qmp->mbox_client.knows_txdone = true; 540 qmp->mbox_chan = mbox_request_channel(&qmp->mbox_client, 0); 541 if (IS_ERR(qmp->mbox_chan)) { 542 dev_err(&pdev->dev, "failed to acquire ipc mailbox\n"); 543 return PTR_ERR(qmp->mbox_chan); 544 } 545 546 irq = platform_get_irq(pdev, 0); 547 ret = devm_request_irq(&pdev->dev, irq, qmp_intr, IRQF_ONESHOT, 548 "aoss-qmp", qmp); 549 if (ret < 0) { 550 dev_err(&pdev->dev, "failed to request interrupt\n"); 551 goto err_free_mbox; 552 } 553 554 ret = qmp_open(qmp); 555 if (ret < 0) 556 goto err_free_mbox; 557 558 ret = qmp_qdss_clk_add(qmp); 559 if (ret) 560 goto err_close_qmp; 561 562 ret = qmp_pd_add(qmp); 563 if (ret) 564 goto err_remove_qdss_clk; 565 566 ret = qmp_cooling_devices_register(qmp); 567 if (ret) 568 dev_err(&pdev->dev, "failed to register aoss cooling devices\n"); 569 570 platform_set_drvdata(pdev, qmp); 571 572 return 0; 573 574 err_remove_qdss_clk: 575 qmp_qdss_clk_remove(qmp); 576 err_close_qmp: 577 qmp_close(qmp); 578 err_free_mbox: 579 mbox_free_channel(qmp->mbox_chan); 580 581 return ret; 582 } 583 584 static int qmp_remove(struct platform_device *pdev) 585 { 586 struct qmp *qmp = platform_get_drvdata(pdev); 587 588 qmp_qdss_clk_remove(qmp); 589 qmp_pd_remove(qmp); 590 qmp_cooling_devices_remove(qmp); 591 592 qmp_close(qmp); 593 mbox_free_channel(qmp->mbox_chan); 594 595 return 0; 596 } 597 598 static const struct of_device_id qmp_dt_match[] = { 599 { .compatible = "qcom,sc7180-aoss-qmp", }, 600 { .compatible = "qcom,sdm845-aoss-qmp", }, 601 { .compatible = "qcom,sm8150-aoss-qmp", }, 602 { .compatible = "qcom,sm8250-aoss-qmp", }, 603 {} 604 }; 605 MODULE_DEVICE_TABLE(of, qmp_dt_match); 606 607 static struct platform_driver qmp_driver = { 608 .driver = { 609 .name = "qcom_aoss_qmp", 610 .of_match_table = qmp_dt_match, 611 }, 612 .probe = qmp_probe, 613 .remove = qmp_remove, 614 }; 615 module_platform_driver(qmp_driver); 616 617 MODULE_DESCRIPTION("Qualcomm AOSS QMP driver"); 618 MODULE_LICENSE("GPL v2"); 619