1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2019, Linaro Ltd
4 */
5 #include <linux/clk-provider.h>
6 #include <linux/interrupt.h>
7 #include <linux/io.h>
8 #include <linux/mailbox_client.h>
9 #include <linux/module.h>
10 #include <linux/of_platform.h>
11 #include <linux/platform_device.h>
12 #include <linux/thermal.h>
13 #include <linux/slab.h>
14 #include <linux/soc/qcom/qcom_aoss.h>
15
16 #define QMP_DESC_MAGIC 0x0
17 #define QMP_DESC_VERSION 0x4
18 #define QMP_DESC_FEATURES 0x8
19
20 /* AOP-side offsets */
21 #define QMP_DESC_UCORE_LINK_STATE 0xc
22 #define QMP_DESC_UCORE_LINK_STATE_ACK 0x10
23 #define QMP_DESC_UCORE_CH_STATE 0x14
24 #define QMP_DESC_UCORE_CH_STATE_ACK 0x18
25 #define QMP_DESC_UCORE_MBOX_SIZE 0x1c
26 #define QMP_DESC_UCORE_MBOX_OFFSET 0x20
27
28 /* Linux-side offsets */
29 #define QMP_DESC_MCORE_LINK_STATE 0x24
30 #define QMP_DESC_MCORE_LINK_STATE_ACK 0x28
31 #define QMP_DESC_MCORE_CH_STATE 0x2c
32 #define QMP_DESC_MCORE_CH_STATE_ACK 0x30
33 #define QMP_DESC_MCORE_MBOX_SIZE 0x34
34 #define QMP_DESC_MCORE_MBOX_OFFSET 0x38
35
36 #define QMP_STATE_UP GENMASK(15, 0)
37 #define QMP_STATE_DOWN GENMASK(31, 16)
38
39 #define QMP_MAGIC 0x4d41494c /* mail */
40 #define QMP_VERSION 1
41
42 /* 64 bytes is enough to store the requests and provides padding to 4 bytes */
43 #define QMP_MSG_LEN 64
44
45 #define QMP_NUM_COOLING_RESOURCES 2
46
47 static bool qmp_cdev_max_state = 1;
48
49 struct qmp_cooling_device {
50 struct thermal_cooling_device *cdev;
51 struct qmp *qmp;
52 char *name;
53 bool state;
54 };
55
56 /**
57 * struct qmp - driver state for QMP implementation
58 * @msgram: iomem referencing the message RAM used for communication
59 * @dev: reference to QMP device
60 * @mbox_client: mailbox client used to ring the doorbell on transmit
61 * @mbox_chan: mailbox channel used to ring the doorbell on transmit
62 * @offset: offset within @msgram where messages should be written
63 * @size: maximum size of the messages to be transmitted
64 * @event: wait_queue for synchronization with the IRQ
65 * @tx_lock: provides synchronization between multiple callers of qmp_send()
66 * @qdss_clk: QDSS clock hw struct
67 * @cooling_devs: thermal cooling devices
68 */
69 struct qmp {
70 void __iomem *msgram;
71 struct device *dev;
72
73 struct mbox_client mbox_client;
74 struct mbox_chan *mbox_chan;
75
76 size_t offset;
77 size_t size;
78
79 wait_queue_head_t event;
80
81 struct mutex tx_lock;
82
83 struct clk_hw qdss_clk;
84 struct qmp_cooling_device *cooling_devs;
85 };
86
qmp_kick(struct qmp * qmp)87 static void qmp_kick(struct qmp *qmp)
88 {
89 mbox_send_message(qmp->mbox_chan, NULL);
90 mbox_client_txdone(qmp->mbox_chan, 0);
91 }
92
qmp_magic_valid(struct qmp * qmp)93 static bool qmp_magic_valid(struct qmp *qmp)
94 {
95 return readl(qmp->msgram + QMP_DESC_MAGIC) == QMP_MAGIC;
96 }
97
qmp_link_acked(struct qmp * qmp)98 static bool qmp_link_acked(struct qmp *qmp)
99 {
100 return readl(qmp->msgram + QMP_DESC_MCORE_LINK_STATE_ACK) == QMP_STATE_UP;
101 }
102
qmp_mcore_channel_acked(struct qmp * qmp)103 static bool qmp_mcore_channel_acked(struct qmp *qmp)
104 {
105 return readl(qmp->msgram + QMP_DESC_MCORE_CH_STATE_ACK) == QMP_STATE_UP;
106 }
107
qmp_ucore_channel_up(struct qmp * qmp)108 static bool qmp_ucore_channel_up(struct qmp *qmp)
109 {
110 return readl(qmp->msgram + QMP_DESC_UCORE_CH_STATE) == QMP_STATE_UP;
111 }
112
qmp_open(struct qmp * qmp)113 static int qmp_open(struct qmp *qmp)
114 {
115 int ret;
116 u32 val;
117
118 if (!qmp_magic_valid(qmp)) {
119 dev_err(qmp->dev, "QMP magic doesn't match\n");
120 return -EINVAL;
121 }
122
123 val = readl(qmp->msgram + QMP_DESC_VERSION);
124 if (val != QMP_VERSION) {
125 dev_err(qmp->dev, "unsupported QMP version %d\n", val);
126 return -EINVAL;
127 }
128
129 qmp->offset = readl(qmp->msgram + QMP_DESC_MCORE_MBOX_OFFSET);
130 qmp->size = readl(qmp->msgram + QMP_DESC_MCORE_MBOX_SIZE);
131 if (!qmp->size) {
132 dev_err(qmp->dev, "invalid mailbox size\n");
133 return -EINVAL;
134 }
135
136 /* Ack remote core's link state */
137 val = readl(qmp->msgram + QMP_DESC_UCORE_LINK_STATE);
138 writel(val, qmp->msgram + QMP_DESC_UCORE_LINK_STATE_ACK);
139
140 /* Set local core's link state to up */
141 writel(QMP_STATE_UP, qmp->msgram + QMP_DESC_MCORE_LINK_STATE);
142
143 qmp_kick(qmp);
144
145 ret = wait_event_timeout(qmp->event, qmp_link_acked(qmp), HZ);
146 if (!ret) {
147 dev_err(qmp->dev, "ucore didn't ack link\n");
148 goto timeout_close_link;
149 }
150
151 writel(QMP_STATE_UP, qmp->msgram + QMP_DESC_MCORE_CH_STATE);
152
153 qmp_kick(qmp);
154
155 ret = wait_event_timeout(qmp->event, qmp_ucore_channel_up(qmp), HZ);
156 if (!ret) {
157 dev_err(qmp->dev, "ucore didn't open channel\n");
158 goto timeout_close_channel;
159 }
160
161 /* Ack remote core's channel state */
162 writel(QMP_STATE_UP, qmp->msgram + QMP_DESC_UCORE_CH_STATE_ACK);
163
164 qmp_kick(qmp);
165
166 ret = wait_event_timeout(qmp->event, qmp_mcore_channel_acked(qmp), HZ);
167 if (!ret) {
168 dev_err(qmp->dev, "ucore didn't ack channel\n");
169 goto timeout_close_channel;
170 }
171
172 return 0;
173
174 timeout_close_channel:
175 writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_CH_STATE);
176
177 timeout_close_link:
178 writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_LINK_STATE);
179 qmp_kick(qmp);
180
181 return -ETIMEDOUT;
182 }
183
qmp_close(struct qmp * qmp)184 static void qmp_close(struct qmp *qmp)
185 {
186 writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_CH_STATE);
187 writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_LINK_STATE);
188 qmp_kick(qmp);
189 }
190
qmp_intr(int irq,void * data)191 static irqreturn_t qmp_intr(int irq, void *data)
192 {
193 struct qmp *qmp = data;
194
195 wake_up_all(&qmp->event);
196
197 return IRQ_HANDLED;
198 }
199
qmp_message_empty(struct qmp * qmp)200 static bool qmp_message_empty(struct qmp *qmp)
201 {
202 return readl(qmp->msgram + qmp->offset) == 0;
203 }
204
205 /**
206 * qmp_send() - send a message to the AOSS
207 * @qmp: qmp context
208 * @fmt: format string for message to be sent
209 * @...: arguments for the format string
210 *
211 * Transmit message to AOSS and wait for the AOSS to acknowledge the message.
212 * data must not be longer than the mailbox size. Access is synchronized by
213 * this implementation.
214 *
215 * Return: 0 on success, negative errno on failure
216 */
qmp_send(struct qmp * qmp,const char * fmt,...)217 int qmp_send(struct qmp *qmp, const char *fmt, ...)
218 {
219 char buf[QMP_MSG_LEN];
220 long time_left;
221 va_list args;
222 int len;
223 int ret;
224
225 if (WARN_ON(IS_ERR_OR_NULL(qmp) || !fmt))
226 return -EINVAL;
227
228 memset(buf, 0, sizeof(buf));
229 va_start(args, fmt);
230 len = vsnprintf(buf, sizeof(buf), fmt, args);
231 va_end(args);
232
233 if (WARN_ON(len >= sizeof(buf)))
234 return -EINVAL;
235
236 mutex_lock(&qmp->tx_lock);
237
238 /* The message RAM only implements 32-bit accesses */
239 __iowrite32_copy(qmp->msgram + qmp->offset + sizeof(u32),
240 buf, sizeof(buf) / sizeof(u32));
241 writel(sizeof(buf), qmp->msgram + qmp->offset);
242
243 /* Read back length to confirm data written in message RAM */
244 readl(qmp->msgram + qmp->offset);
245 qmp_kick(qmp);
246
247 time_left = wait_event_interruptible_timeout(qmp->event,
248 qmp_message_empty(qmp), HZ);
249 if (!time_left) {
250 dev_err(qmp->dev, "ucore did not ack channel\n");
251 ret = -ETIMEDOUT;
252
253 /* Clear message from buffer */
254 writel(0, qmp->msgram + qmp->offset);
255 } else {
256 ret = 0;
257 }
258
259 mutex_unlock(&qmp->tx_lock);
260
261 return ret;
262 }
263 EXPORT_SYMBOL(qmp_send);
264
qmp_qdss_clk_prepare(struct clk_hw * hw)265 static int qmp_qdss_clk_prepare(struct clk_hw *hw)
266 {
267 static const char *buf = "{class: clock, res: qdss, val: 1}";
268 struct qmp *qmp = container_of(hw, struct qmp, qdss_clk);
269
270 return qmp_send(qmp, buf);
271 }
272
qmp_qdss_clk_unprepare(struct clk_hw * hw)273 static void qmp_qdss_clk_unprepare(struct clk_hw *hw)
274 {
275 static const char *buf = "{class: clock, res: qdss, val: 0}";
276 struct qmp *qmp = container_of(hw, struct qmp, qdss_clk);
277
278 qmp_send(qmp, buf);
279 }
280
281 static const struct clk_ops qmp_qdss_clk_ops = {
282 .prepare = qmp_qdss_clk_prepare,
283 .unprepare = qmp_qdss_clk_unprepare,
284 };
285
qmp_qdss_clk_add(struct qmp * qmp)286 static int qmp_qdss_clk_add(struct qmp *qmp)
287 {
288 static const struct clk_init_data qdss_init = {
289 .ops = &qmp_qdss_clk_ops,
290 .name = "qdss",
291 };
292 int ret;
293
294 qmp->qdss_clk.init = &qdss_init;
295 ret = clk_hw_register(qmp->dev, &qmp->qdss_clk);
296 if (ret < 0) {
297 dev_err(qmp->dev, "failed to register qdss clock\n");
298 return ret;
299 }
300
301 ret = of_clk_add_hw_provider(qmp->dev->of_node, of_clk_hw_simple_get,
302 &qmp->qdss_clk);
303 if (ret < 0) {
304 dev_err(qmp->dev, "unable to register of clk hw provider\n");
305 clk_hw_unregister(&qmp->qdss_clk);
306 }
307
308 return ret;
309 }
310
qmp_qdss_clk_remove(struct qmp * qmp)311 static void qmp_qdss_clk_remove(struct qmp *qmp)
312 {
313 of_clk_del_provider(qmp->dev->of_node);
314 clk_hw_unregister(&qmp->qdss_clk);
315 }
316
qmp_cdev_get_max_state(struct thermal_cooling_device * cdev,unsigned long * state)317 static int qmp_cdev_get_max_state(struct thermal_cooling_device *cdev,
318 unsigned long *state)
319 {
320 *state = qmp_cdev_max_state;
321 return 0;
322 }
323
qmp_cdev_get_cur_state(struct thermal_cooling_device * cdev,unsigned long * state)324 static int qmp_cdev_get_cur_state(struct thermal_cooling_device *cdev,
325 unsigned long *state)
326 {
327 struct qmp_cooling_device *qmp_cdev = cdev->devdata;
328
329 *state = qmp_cdev->state;
330 return 0;
331 }
332
qmp_cdev_set_cur_state(struct thermal_cooling_device * cdev,unsigned long state)333 static int qmp_cdev_set_cur_state(struct thermal_cooling_device *cdev,
334 unsigned long state)
335 {
336 struct qmp_cooling_device *qmp_cdev = cdev->devdata;
337 bool cdev_state;
338 int ret;
339
340 /* Normalize state */
341 cdev_state = !!state;
342
343 if (qmp_cdev->state == state)
344 return 0;
345
346 ret = qmp_send(qmp_cdev->qmp, "{class: volt_flr, event:zero_temp, res:%s, value:%s}",
347 qmp_cdev->name, cdev_state ? "on" : "off");
348 if (!ret)
349 qmp_cdev->state = cdev_state;
350
351 return ret;
352 }
353
354 static const struct thermal_cooling_device_ops qmp_cooling_device_ops = {
355 .get_max_state = qmp_cdev_get_max_state,
356 .get_cur_state = qmp_cdev_get_cur_state,
357 .set_cur_state = qmp_cdev_set_cur_state,
358 };
359
qmp_cooling_device_add(struct qmp * qmp,struct qmp_cooling_device * qmp_cdev,struct device_node * node)360 static int qmp_cooling_device_add(struct qmp *qmp,
361 struct qmp_cooling_device *qmp_cdev,
362 struct device_node *node)
363 {
364 char *cdev_name = (char *)node->name;
365
366 qmp_cdev->qmp = qmp;
367 qmp_cdev->state = !qmp_cdev_max_state;
368 qmp_cdev->name = cdev_name;
369 qmp_cdev->cdev = devm_thermal_of_cooling_device_register
370 (qmp->dev, node,
371 cdev_name,
372 qmp_cdev, &qmp_cooling_device_ops);
373
374 if (IS_ERR(qmp_cdev->cdev))
375 dev_err(qmp->dev, "unable to register %s cooling device\n",
376 cdev_name);
377
378 return PTR_ERR_OR_ZERO(qmp_cdev->cdev);
379 }
380
qmp_cooling_devices_register(struct qmp * qmp)381 static int qmp_cooling_devices_register(struct qmp *qmp)
382 {
383 struct device_node *np, *child;
384 int count = 0;
385 int ret;
386
387 np = qmp->dev->of_node;
388
389 qmp->cooling_devs = devm_kcalloc(qmp->dev, QMP_NUM_COOLING_RESOURCES,
390 sizeof(*qmp->cooling_devs),
391 GFP_KERNEL);
392
393 if (!qmp->cooling_devs)
394 return -ENOMEM;
395
396 for_each_available_child_of_node(np, child) {
397 if (!of_property_present(child, "#cooling-cells"))
398 continue;
399 ret = qmp_cooling_device_add(qmp, &qmp->cooling_devs[count++],
400 child);
401 if (ret) {
402 of_node_put(child);
403 goto unroll;
404 }
405 }
406
407 if (!count)
408 devm_kfree(qmp->dev, qmp->cooling_devs);
409
410 return 0;
411
412 unroll:
413 while (--count >= 0)
414 thermal_cooling_device_unregister
415 (qmp->cooling_devs[count].cdev);
416 devm_kfree(qmp->dev, qmp->cooling_devs);
417
418 return ret;
419 }
420
qmp_cooling_devices_remove(struct qmp * qmp)421 static void qmp_cooling_devices_remove(struct qmp *qmp)
422 {
423 int i;
424
425 for (i = 0; i < QMP_NUM_COOLING_RESOURCES; i++)
426 thermal_cooling_device_unregister(qmp->cooling_devs[i].cdev);
427 }
428
429 /**
430 * qmp_get() - get a qmp handle from a device
431 * @dev: client device pointer
432 *
433 * Return: handle to qmp device on success, ERR_PTR() on failure
434 */
qmp_get(struct device * dev)435 struct qmp *qmp_get(struct device *dev)
436 {
437 struct platform_device *pdev;
438 struct device_node *np;
439 struct qmp *qmp;
440
441 if (!dev || !dev->of_node)
442 return ERR_PTR(-EINVAL);
443
444 np = of_parse_phandle(dev->of_node, "qcom,qmp", 0);
445 if (!np)
446 return ERR_PTR(-ENODEV);
447
448 pdev = of_find_device_by_node(np);
449 of_node_put(np);
450 if (!pdev)
451 return ERR_PTR(-EINVAL);
452
453 qmp = platform_get_drvdata(pdev);
454
455 if (!qmp) {
456 put_device(&pdev->dev);
457 return ERR_PTR(-EPROBE_DEFER);
458 }
459 return qmp;
460 }
461 EXPORT_SYMBOL(qmp_get);
462
463 /**
464 * qmp_put() - release a qmp handle
465 * @qmp: qmp handle obtained from qmp_get()
466 */
qmp_put(struct qmp * qmp)467 void qmp_put(struct qmp *qmp)
468 {
469 /*
470 * Match get_device() inside of_find_device_by_node() in
471 * qmp_get()
472 */
473 if (!IS_ERR_OR_NULL(qmp))
474 put_device(qmp->dev);
475 }
476 EXPORT_SYMBOL(qmp_put);
477
qmp_probe(struct platform_device * pdev)478 static int qmp_probe(struct platform_device *pdev)
479 {
480 struct qmp *qmp;
481 int irq;
482 int ret;
483
484 qmp = devm_kzalloc(&pdev->dev, sizeof(*qmp), GFP_KERNEL);
485 if (!qmp)
486 return -ENOMEM;
487
488 qmp->dev = &pdev->dev;
489 init_waitqueue_head(&qmp->event);
490 mutex_init(&qmp->tx_lock);
491
492 qmp->msgram = devm_platform_ioremap_resource(pdev, 0);
493 if (IS_ERR(qmp->msgram))
494 return PTR_ERR(qmp->msgram);
495
496 qmp->mbox_client.dev = &pdev->dev;
497 qmp->mbox_client.knows_txdone = true;
498 qmp->mbox_chan = mbox_request_channel(&qmp->mbox_client, 0);
499 if (IS_ERR(qmp->mbox_chan)) {
500 dev_err(&pdev->dev, "failed to acquire ipc mailbox\n");
501 return PTR_ERR(qmp->mbox_chan);
502 }
503
504 irq = platform_get_irq(pdev, 0);
505 ret = devm_request_irq(&pdev->dev, irq, qmp_intr, 0,
506 "aoss-qmp", qmp);
507 if (ret < 0) {
508 dev_err(&pdev->dev, "failed to request interrupt\n");
509 goto err_free_mbox;
510 }
511
512 ret = qmp_open(qmp);
513 if (ret < 0)
514 goto err_free_mbox;
515
516 ret = qmp_qdss_clk_add(qmp);
517 if (ret)
518 goto err_close_qmp;
519
520 ret = qmp_cooling_devices_register(qmp);
521 if (ret)
522 dev_err(&pdev->dev, "failed to register aoss cooling devices\n");
523
524 platform_set_drvdata(pdev, qmp);
525
526 return 0;
527
528 err_close_qmp:
529 qmp_close(qmp);
530 err_free_mbox:
531 mbox_free_channel(qmp->mbox_chan);
532
533 return ret;
534 }
535
qmp_remove(struct platform_device * pdev)536 static int qmp_remove(struct platform_device *pdev)
537 {
538 struct qmp *qmp = platform_get_drvdata(pdev);
539
540 qmp_qdss_clk_remove(qmp);
541 qmp_cooling_devices_remove(qmp);
542
543 qmp_close(qmp);
544 mbox_free_channel(qmp->mbox_chan);
545
546 return 0;
547 }
548
549 static const struct of_device_id qmp_dt_match[] = {
550 { .compatible = "qcom,sc7180-aoss-qmp", },
551 { .compatible = "qcom,sc7280-aoss-qmp", },
552 { .compatible = "qcom,sdm845-aoss-qmp", },
553 { .compatible = "qcom,sm8150-aoss-qmp", },
554 { .compatible = "qcom,sm8250-aoss-qmp", },
555 { .compatible = "qcom,sm8350-aoss-qmp", },
556 { .compatible = "qcom,aoss-qmp", },
557 {}
558 };
559 MODULE_DEVICE_TABLE(of, qmp_dt_match);
560
561 static struct platform_driver qmp_driver = {
562 .driver = {
563 .name = "qcom_aoss_qmp",
564 .of_match_table = qmp_dt_match,
565 .suppress_bind_attrs = true,
566 },
567 .probe = qmp_probe,
568 .remove = qmp_remove,
569 };
570 module_platform_driver(qmp_driver);
571
572 MODULE_DESCRIPTION("Qualcomm AOSS QMP driver");
573 MODULE_LICENSE("GPL v2");
574