xref: /openbmc/linux/drivers/soc/qcom/qcom_aoss.c (revision 7cffcade)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2019, Linaro Ltd
4  */
5 #include <linux/clk-provider.h>
6 #include <linux/interrupt.h>
7 #include <linux/io.h>
8 #include <linux/mailbox_client.h>
9 #include <linux/module.h>
10 #include <linux/of_platform.h>
11 #include <linux/platform_device.h>
12 #include <linux/thermal.h>
13 #include <linux/slab.h>
14 #include <linux/soc/qcom/qcom_aoss.h>
15 
16 #define QMP_DESC_MAGIC			0x0
17 #define QMP_DESC_VERSION		0x4
18 #define QMP_DESC_FEATURES		0x8
19 
20 /* AOP-side offsets */
21 #define QMP_DESC_UCORE_LINK_STATE	0xc
22 #define QMP_DESC_UCORE_LINK_STATE_ACK	0x10
23 #define QMP_DESC_UCORE_CH_STATE		0x14
24 #define QMP_DESC_UCORE_CH_STATE_ACK	0x18
25 #define QMP_DESC_UCORE_MBOX_SIZE	0x1c
26 #define QMP_DESC_UCORE_MBOX_OFFSET	0x20
27 
28 /* Linux-side offsets */
29 #define QMP_DESC_MCORE_LINK_STATE	0x24
30 #define QMP_DESC_MCORE_LINK_STATE_ACK	0x28
31 #define QMP_DESC_MCORE_CH_STATE		0x2c
32 #define QMP_DESC_MCORE_CH_STATE_ACK	0x30
33 #define QMP_DESC_MCORE_MBOX_SIZE	0x34
34 #define QMP_DESC_MCORE_MBOX_OFFSET	0x38
35 
36 #define QMP_STATE_UP			GENMASK(15, 0)
37 #define QMP_STATE_DOWN			GENMASK(31, 16)
38 
39 #define QMP_MAGIC			0x4d41494c /* mail */
40 #define QMP_VERSION			1
41 
42 /* 64 bytes is enough to store the requests and provides padding to 4 bytes */
43 #define QMP_MSG_LEN			64
44 
45 #define QMP_NUM_COOLING_RESOURCES	2
46 
47 static bool qmp_cdev_max_state = 1;
48 
49 struct qmp_cooling_device {
50 	struct thermal_cooling_device *cdev;
51 	struct qmp *qmp;
52 	char *name;
53 	bool state;
54 };
55 
56 /**
57  * struct qmp - driver state for QMP implementation
58  * @msgram: iomem referencing the message RAM used for communication
59  * @dev: reference to QMP device
60  * @mbox_client: mailbox client used to ring the doorbell on transmit
61  * @mbox_chan: mailbox channel used to ring the doorbell on transmit
62  * @offset: offset within @msgram where messages should be written
63  * @size: maximum size of the messages to be transmitted
64  * @event: wait_queue for synchronization with the IRQ
65  * @tx_lock: provides synchronization between multiple callers of qmp_send()
66  * @qdss_clk: QDSS clock hw struct
67  * @cooling_devs: thermal cooling devices
68  */
69 struct qmp {
70 	void __iomem *msgram;
71 	struct device *dev;
72 
73 	struct mbox_client mbox_client;
74 	struct mbox_chan *mbox_chan;
75 
76 	size_t offset;
77 	size_t size;
78 
79 	wait_queue_head_t event;
80 
81 	struct mutex tx_lock;
82 
83 	struct clk_hw qdss_clk;
84 	struct qmp_cooling_device *cooling_devs;
85 };
86 
87 static void qmp_kick(struct qmp *qmp)
88 {
89 	mbox_send_message(qmp->mbox_chan, NULL);
90 	mbox_client_txdone(qmp->mbox_chan, 0);
91 }
92 
93 static bool qmp_magic_valid(struct qmp *qmp)
94 {
95 	return readl(qmp->msgram + QMP_DESC_MAGIC) == QMP_MAGIC;
96 }
97 
98 static bool qmp_link_acked(struct qmp *qmp)
99 {
100 	return readl(qmp->msgram + QMP_DESC_MCORE_LINK_STATE_ACK) == QMP_STATE_UP;
101 }
102 
103 static bool qmp_mcore_channel_acked(struct qmp *qmp)
104 {
105 	return readl(qmp->msgram + QMP_DESC_MCORE_CH_STATE_ACK) == QMP_STATE_UP;
106 }
107 
108 static bool qmp_ucore_channel_up(struct qmp *qmp)
109 {
110 	return readl(qmp->msgram + QMP_DESC_UCORE_CH_STATE) == QMP_STATE_UP;
111 }
112 
113 static int qmp_open(struct qmp *qmp)
114 {
115 	int ret;
116 	u32 val;
117 
118 	if (!qmp_magic_valid(qmp)) {
119 		dev_err(qmp->dev, "QMP magic doesn't match\n");
120 		return -EINVAL;
121 	}
122 
123 	val = readl(qmp->msgram + QMP_DESC_VERSION);
124 	if (val != QMP_VERSION) {
125 		dev_err(qmp->dev, "unsupported QMP version %d\n", val);
126 		return -EINVAL;
127 	}
128 
129 	qmp->offset = readl(qmp->msgram + QMP_DESC_MCORE_MBOX_OFFSET);
130 	qmp->size = readl(qmp->msgram + QMP_DESC_MCORE_MBOX_SIZE);
131 	if (!qmp->size) {
132 		dev_err(qmp->dev, "invalid mailbox size\n");
133 		return -EINVAL;
134 	}
135 
136 	/* Ack remote core's link state */
137 	val = readl(qmp->msgram + QMP_DESC_UCORE_LINK_STATE);
138 	writel(val, qmp->msgram + QMP_DESC_UCORE_LINK_STATE_ACK);
139 
140 	/* Set local core's link state to up */
141 	writel(QMP_STATE_UP, qmp->msgram + QMP_DESC_MCORE_LINK_STATE);
142 
143 	qmp_kick(qmp);
144 
145 	ret = wait_event_timeout(qmp->event, qmp_link_acked(qmp), HZ);
146 	if (!ret) {
147 		dev_err(qmp->dev, "ucore didn't ack link\n");
148 		goto timeout_close_link;
149 	}
150 
151 	writel(QMP_STATE_UP, qmp->msgram + QMP_DESC_MCORE_CH_STATE);
152 
153 	qmp_kick(qmp);
154 
155 	ret = wait_event_timeout(qmp->event, qmp_ucore_channel_up(qmp), HZ);
156 	if (!ret) {
157 		dev_err(qmp->dev, "ucore didn't open channel\n");
158 		goto timeout_close_channel;
159 	}
160 
161 	/* Ack remote core's channel state */
162 	writel(QMP_STATE_UP, qmp->msgram + QMP_DESC_UCORE_CH_STATE_ACK);
163 
164 	qmp_kick(qmp);
165 
166 	ret = wait_event_timeout(qmp->event, qmp_mcore_channel_acked(qmp), HZ);
167 	if (!ret) {
168 		dev_err(qmp->dev, "ucore didn't ack channel\n");
169 		goto timeout_close_channel;
170 	}
171 
172 	return 0;
173 
174 timeout_close_channel:
175 	writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_CH_STATE);
176 
177 timeout_close_link:
178 	writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_LINK_STATE);
179 	qmp_kick(qmp);
180 
181 	return -ETIMEDOUT;
182 }
183 
184 static void qmp_close(struct qmp *qmp)
185 {
186 	writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_CH_STATE);
187 	writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_LINK_STATE);
188 	qmp_kick(qmp);
189 }
190 
191 static irqreturn_t qmp_intr(int irq, void *data)
192 {
193 	struct qmp *qmp = data;
194 
195 	wake_up_all(&qmp->event);
196 
197 	return IRQ_HANDLED;
198 }
199 
200 static bool qmp_message_empty(struct qmp *qmp)
201 {
202 	return readl(qmp->msgram + qmp->offset) == 0;
203 }
204 
205 /**
206  * qmp_send() - send a message to the AOSS
207  * @qmp: qmp context
208  * @data: message to be sent
209  * @len: length of the message
210  *
211  * Transmit @data to AOSS and wait for the AOSS to acknowledge the message.
212  * @len must be a multiple of 4 and not longer than the mailbox size. Access is
213  * synchronized by this implementation.
214  *
215  * Return: 0 on success, negative errno on failure
216  */
217 int qmp_send(struct qmp *qmp, const void *data, size_t len)
218 {
219 	long time_left;
220 	int ret;
221 
222 	if (WARN_ON(IS_ERR_OR_NULL(qmp) || !data))
223 		return -EINVAL;
224 
225 	if (WARN_ON(len + sizeof(u32) > qmp->size))
226 		return -EINVAL;
227 
228 	if (WARN_ON(len % sizeof(u32)))
229 		return -EINVAL;
230 
231 	mutex_lock(&qmp->tx_lock);
232 
233 	/* The message RAM only implements 32-bit accesses */
234 	__iowrite32_copy(qmp->msgram + qmp->offset + sizeof(u32),
235 			 data, len / sizeof(u32));
236 	writel(len, qmp->msgram + qmp->offset);
237 
238 	/* Read back len to confirm data written in message RAM */
239 	readl(qmp->msgram + qmp->offset);
240 	qmp_kick(qmp);
241 
242 	time_left = wait_event_interruptible_timeout(qmp->event,
243 						     qmp_message_empty(qmp), HZ);
244 	if (!time_left) {
245 		dev_err(qmp->dev, "ucore did not ack channel\n");
246 		ret = -ETIMEDOUT;
247 
248 		/* Clear message from buffer */
249 		writel(0, qmp->msgram + qmp->offset);
250 	} else {
251 		ret = 0;
252 	}
253 
254 	mutex_unlock(&qmp->tx_lock);
255 
256 	return ret;
257 }
258 EXPORT_SYMBOL(qmp_send);
259 
260 static int qmp_qdss_clk_prepare(struct clk_hw *hw)
261 {
262 	static const char buf[QMP_MSG_LEN] = "{class: clock, res: qdss, val: 1}";
263 	struct qmp *qmp = container_of(hw, struct qmp, qdss_clk);
264 
265 	return qmp_send(qmp, buf, sizeof(buf));
266 }
267 
268 static void qmp_qdss_clk_unprepare(struct clk_hw *hw)
269 {
270 	static const char buf[QMP_MSG_LEN] = "{class: clock, res: qdss, val: 0}";
271 	struct qmp *qmp = container_of(hw, struct qmp, qdss_clk);
272 
273 	qmp_send(qmp, buf, sizeof(buf));
274 }
275 
276 static const struct clk_ops qmp_qdss_clk_ops = {
277 	.prepare = qmp_qdss_clk_prepare,
278 	.unprepare = qmp_qdss_clk_unprepare,
279 };
280 
281 static int qmp_qdss_clk_add(struct qmp *qmp)
282 {
283 	static const struct clk_init_data qdss_init = {
284 		.ops = &qmp_qdss_clk_ops,
285 		.name = "qdss",
286 	};
287 	int ret;
288 
289 	qmp->qdss_clk.init = &qdss_init;
290 	ret = clk_hw_register(qmp->dev, &qmp->qdss_clk);
291 	if (ret < 0) {
292 		dev_err(qmp->dev, "failed to register qdss clock\n");
293 		return ret;
294 	}
295 
296 	ret = of_clk_add_hw_provider(qmp->dev->of_node, of_clk_hw_simple_get,
297 				     &qmp->qdss_clk);
298 	if (ret < 0) {
299 		dev_err(qmp->dev, "unable to register of clk hw provider\n");
300 		clk_hw_unregister(&qmp->qdss_clk);
301 	}
302 
303 	return ret;
304 }
305 
306 static void qmp_qdss_clk_remove(struct qmp *qmp)
307 {
308 	of_clk_del_provider(qmp->dev->of_node);
309 	clk_hw_unregister(&qmp->qdss_clk);
310 }
311 
312 static int qmp_cdev_get_max_state(struct thermal_cooling_device *cdev,
313 				  unsigned long *state)
314 {
315 	*state = qmp_cdev_max_state;
316 	return 0;
317 }
318 
319 static int qmp_cdev_get_cur_state(struct thermal_cooling_device *cdev,
320 				  unsigned long *state)
321 {
322 	struct qmp_cooling_device *qmp_cdev = cdev->devdata;
323 
324 	*state = qmp_cdev->state;
325 	return 0;
326 }
327 
328 static int qmp_cdev_set_cur_state(struct thermal_cooling_device *cdev,
329 				  unsigned long state)
330 {
331 	struct qmp_cooling_device *qmp_cdev = cdev->devdata;
332 	char buf[QMP_MSG_LEN] = {};
333 	bool cdev_state;
334 	int ret;
335 
336 	/* Normalize state */
337 	cdev_state = !!state;
338 
339 	if (qmp_cdev->state == state)
340 		return 0;
341 
342 	snprintf(buf, sizeof(buf),
343 		 "{class: volt_flr, event:zero_temp, res:%s, value:%s}",
344 			qmp_cdev->name,
345 			cdev_state ? "on" : "off");
346 
347 	ret = qmp_send(qmp_cdev->qmp, buf, sizeof(buf));
348 
349 	if (!ret)
350 		qmp_cdev->state = cdev_state;
351 
352 	return ret;
353 }
354 
355 static const struct thermal_cooling_device_ops qmp_cooling_device_ops = {
356 	.get_max_state = qmp_cdev_get_max_state,
357 	.get_cur_state = qmp_cdev_get_cur_state,
358 	.set_cur_state = qmp_cdev_set_cur_state,
359 };
360 
361 static int qmp_cooling_device_add(struct qmp *qmp,
362 				  struct qmp_cooling_device *qmp_cdev,
363 				  struct device_node *node)
364 {
365 	char *cdev_name = (char *)node->name;
366 
367 	qmp_cdev->qmp = qmp;
368 	qmp_cdev->state = !qmp_cdev_max_state;
369 	qmp_cdev->name = cdev_name;
370 	qmp_cdev->cdev = devm_thermal_of_cooling_device_register
371 				(qmp->dev, node,
372 				cdev_name,
373 				qmp_cdev, &qmp_cooling_device_ops);
374 
375 	if (IS_ERR(qmp_cdev->cdev))
376 		dev_err(qmp->dev, "unable to register %s cooling device\n",
377 			cdev_name);
378 
379 	return PTR_ERR_OR_ZERO(qmp_cdev->cdev);
380 }
381 
382 static int qmp_cooling_devices_register(struct qmp *qmp)
383 {
384 	struct device_node *np, *child;
385 	int count = 0;
386 	int ret;
387 
388 	np = qmp->dev->of_node;
389 
390 	qmp->cooling_devs = devm_kcalloc(qmp->dev, QMP_NUM_COOLING_RESOURCES,
391 					 sizeof(*qmp->cooling_devs),
392 					 GFP_KERNEL);
393 
394 	if (!qmp->cooling_devs)
395 		return -ENOMEM;
396 
397 	for_each_available_child_of_node(np, child) {
398 		if (!of_find_property(child, "#cooling-cells", NULL))
399 			continue;
400 		ret = qmp_cooling_device_add(qmp, &qmp->cooling_devs[count++],
401 					     child);
402 		if (ret) {
403 			of_node_put(child);
404 			goto unroll;
405 		}
406 	}
407 
408 	if (!count)
409 		devm_kfree(qmp->dev, qmp->cooling_devs);
410 
411 	return 0;
412 
413 unroll:
414 	while (--count >= 0)
415 		thermal_cooling_device_unregister
416 			(qmp->cooling_devs[count].cdev);
417 	devm_kfree(qmp->dev, qmp->cooling_devs);
418 
419 	return ret;
420 }
421 
422 static void qmp_cooling_devices_remove(struct qmp *qmp)
423 {
424 	int i;
425 
426 	for (i = 0; i < QMP_NUM_COOLING_RESOURCES; i++)
427 		thermal_cooling_device_unregister(qmp->cooling_devs[i].cdev);
428 }
429 
430 /**
431  * qmp_get() - get a qmp handle from a device
432  * @dev: client device pointer
433  *
434  * Return: handle to qmp device on success, ERR_PTR() on failure
435  */
436 struct qmp *qmp_get(struct device *dev)
437 {
438 	struct platform_device *pdev;
439 	struct device_node *np;
440 	struct qmp *qmp;
441 
442 	if (!dev || !dev->of_node)
443 		return ERR_PTR(-EINVAL);
444 
445 	np = of_parse_phandle(dev->of_node, "qcom,qmp", 0);
446 	if (!np)
447 		return ERR_PTR(-ENODEV);
448 
449 	pdev = of_find_device_by_node(np);
450 	of_node_put(np);
451 	if (!pdev)
452 		return ERR_PTR(-EINVAL);
453 
454 	qmp = platform_get_drvdata(pdev);
455 
456 	if (!qmp) {
457 		put_device(&pdev->dev);
458 		return ERR_PTR(-EPROBE_DEFER);
459 	}
460 	return qmp;
461 }
462 EXPORT_SYMBOL(qmp_get);
463 
464 /**
465  * qmp_put() - release a qmp handle
466  * @qmp: qmp handle obtained from qmp_get()
467  */
468 void qmp_put(struct qmp *qmp)
469 {
470 	/*
471 	 * Match get_device() inside of_find_device_by_node() in
472 	 * qmp_get()
473 	 */
474 	if (!IS_ERR_OR_NULL(qmp))
475 		put_device(qmp->dev);
476 }
477 EXPORT_SYMBOL(qmp_put);
478 
479 static int qmp_probe(struct platform_device *pdev)
480 {
481 	struct qmp *qmp;
482 	int irq;
483 	int ret;
484 
485 	qmp = devm_kzalloc(&pdev->dev, sizeof(*qmp), GFP_KERNEL);
486 	if (!qmp)
487 		return -ENOMEM;
488 
489 	qmp->dev = &pdev->dev;
490 	init_waitqueue_head(&qmp->event);
491 	mutex_init(&qmp->tx_lock);
492 
493 	qmp->msgram = devm_platform_ioremap_resource(pdev, 0);
494 	if (IS_ERR(qmp->msgram))
495 		return PTR_ERR(qmp->msgram);
496 
497 	qmp->mbox_client.dev = &pdev->dev;
498 	qmp->mbox_client.knows_txdone = true;
499 	qmp->mbox_chan = mbox_request_channel(&qmp->mbox_client, 0);
500 	if (IS_ERR(qmp->mbox_chan)) {
501 		dev_err(&pdev->dev, "failed to acquire ipc mailbox\n");
502 		return PTR_ERR(qmp->mbox_chan);
503 	}
504 
505 	irq = platform_get_irq(pdev, 0);
506 	ret = devm_request_irq(&pdev->dev, irq, qmp_intr, 0,
507 			       "aoss-qmp", qmp);
508 	if (ret < 0) {
509 		dev_err(&pdev->dev, "failed to request interrupt\n");
510 		goto err_free_mbox;
511 	}
512 
513 	ret = qmp_open(qmp);
514 	if (ret < 0)
515 		goto err_free_mbox;
516 
517 	ret = qmp_qdss_clk_add(qmp);
518 	if (ret)
519 		goto err_close_qmp;
520 
521 	ret = qmp_cooling_devices_register(qmp);
522 	if (ret)
523 		dev_err(&pdev->dev, "failed to register aoss cooling devices\n");
524 
525 	platform_set_drvdata(pdev, qmp);
526 
527 	return 0;
528 
529 err_close_qmp:
530 	qmp_close(qmp);
531 err_free_mbox:
532 	mbox_free_channel(qmp->mbox_chan);
533 
534 	return ret;
535 }
536 
537 static int qmp_remove(struct platform_device *pdev)
538 {
539 	struct qmp *qmp = platform_get_drvdata(pdev);
540 
541 	qmp_qdss_clk_remove(qmp);
542 	qmp_cooling_devices_remove(qmp);
543 
544 	qmp_close(qmp);
545 	mbox_free_channel(qmp->mbox_chan);
546 
547 	return 0;
548 }
549 
550 static const struct of_device_id qmp_dt_match[] = {
551 	{ .compatible = "qcom,sc7180-aoss-qmp", },
552 	{ .compatible = "qcom,sc7280-aoss-qmp", },
553 	{ .compatible = "qcom,sdm845-aoss-qmp", },
554 	{ .compatible = "qcom,sm8150-aoss-qmp", },
555 	{ .compatible = "qcom,sm8250-aoss-qmp", },
556 	{ .compatible = "qcom,sm8350-aoss-qmp", },
557 	{ .compatible = "qcom,aoss-qmp", },
558 	{}
559 };
560 MODULE_DEVICE_TABLE(of, qmp_dt_match);
561 
562 static struct platform_driver qmp_driver = {
563 	.driver = {
564 		.name		= "qcom_aoss_qmp",
565 		.of_match_table	= qmp_dt_match,
566 		.suppress_bind_attrs = true,
567 	},
568 	.probe = qmp_probe,
569 	.remove	= qmp_remove,
570 };
571 module_platform_driver(qmp_driver);
572 
573 MODULE_DESCRIPTION("Qualcomm AOSS QMP driver");
574 MODULE_LICENSE("GPL v2");
575