xref: /openbmc/linux/drivers/hwmon/xgene-hwmon.c (revision 813cc94c)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * APM X-Gene SoC Hardware Monitoring Driver
4  *
5  * Copyright (c) 2016, Applied Micro Circuits Corporation
6  * Author: Loc Ho <lho@apm.com>
7  *         Hoan Tran <hotran@apm.com>
8  *
9  * This driver provides the following features:
10  *  - Retrieve CPU total power (uW)
11  *  - Retrieve IO total power (uW)
12  *  - Retrieve SoC temperature (milli-degree C) and alarm
13  */
14 #include <linux/acpi.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/hwmon.h>
17 #include <linux/hwmon-sysfs.h>
18 #include <linux/io.h>
19 #include <linux/interrupt.h>
20 #include <linux/kfifo.h>
21 #include <linux/mailbox_controller.h>
22 #include <linux/mailbox_client.h>
23 #include <linux/module.h>
24 #include <linux/of.h>
25 #include <linux/platform_device.h>
26 
27 #include <acpi/pcc.h>
28 
29 /* SLIMpro message defines */
30 #define MSG_TYPE_DBG			0
31 #define MSG_TYPE_ERR			7
32 #define MSG_TYPE_PWRMGMT		9
33 
34 #define MSG_TYPE(v)			(((v) & 0xF0000000) >> 28)
35 #define MSG_TYPE_SET(v)			(((v) << 28) & 0xF0000000)
36 #define MSG_SUBTYPE(v)			(((v) & 0x0F000000) >> 24)
37 #define MSG_SUBTYPE_SET(v)		(((v) << 24) & 0x0F000000)
38 
39 #define DBG_SUBTYPE_SENSOR_READ		4
40 #define SENSOR_RD_MSG			0x04FFE902
41 #define SENSOR_RD_EN_ADDR(a)		((a) & 0x000FFFFF)
42 #define PMD_PWR_REG			0x20
43 #define PMD_PWR_MW_REG			0x26
44 #define SOC_PWR_REG			0x21
45 #define SOC_PWR_MW_REG			0x27
46 #define SOC_TEMP_REG			0x10
47 
48 #define TEMP_NEGATIVE_BIT		8
49 #define SENSOR_INVALID_DATA		BIT(15)
50 
51 #define PWRMGMT_SUBTYPE_TPC		1
52 #define TPC_ALARM			2
53 #define TPC_GET_ALARM			3
54 #define TPC_CMD(v)			(((v) & 0x00FF0000) >> 16)
55 #define TPC_CMD_SET(v)			(((v) << 16) & 0x00FF0000)
56 #define TPC_EN_MSG(hndl, cmd, type) \
57 	(MSG_TYPE_SET(MSG_TYPE_PWRMGMT) | \
58 	MSG_SUBTYPE_SET(hndl) | TPC_CMD_SET(cmd) | type)
59 
60 /* PCC defines */
61 #define PCC_SIGNATURE_MASK		0x50424300
62 #define PCCC_GENERATE_DB_INT		BIT(15)
63 #define PCCS_CMD_COMPLETE		BIT(0)
64 #define PCCS_SCI_DOORBEL		BIT(1)
65 #define PCCS_PLATFORM_NOTIFICATION	BIT(3)
66 /*
67  * Arbitrary retries in case the remote processor is slow to respond
68  * to PCC commands
69  */
70 #define PCC_NUM_RETRIES			500
71 
72 #define ASYNC_MSG_FIFO_SIZE		16
73 #define MBOX_OP_TIMEOUTMS		1000
74 
75 #define WATT_TO_mWATT(x)		((x) * 1000)
76 #define mWATT_TO_uWATT(x)		((x) * 1000)
77 #define CELSIUS_TO_mCELSIUS(x)		((x) * 1000)
78 
79 #define to_xgene_hwmon_dev(cl)		\
80 	container_of(cl, struct xgene_hwmon_dev, mbox_client)
81 
82 enum xgene_hwmon_version {
83 	XGENE_HWMON_V1 = 0,
84 	XGENE_HWMON_V2 = 1,
85 };
86 
87 struct slimpro_resp_msg {
88 	u32 msg;
89 	u32 param1;
90 	u32 param2;
91 } __packed;
92 
93 struct xgene_hwmon_dev {
94 	struct device		*dev;
95 	struct mbox_chan	*mbox_chan;
96 	struct pcc_mbox_chan	*pcc_chan;
97 	struct mbox_client	mbox_client;
98 	int			mbox_idx;
99 
100 	spinlock_t		kfifo_lock;
101 	struct mutex		rd_mutex;
102 	struct completion	rd_complete;
103 	int			resp_pending;
104 	struct slimpro_resp_msg sync_msg;
105 
106 	struct work_struct	workq;
107 	struct kfifo_rec_ptr_1	async_msg_fifo;
108 
109 	struct device		*hwmon_dev;
110 	bool			temp_critical_alarm;
111 
112 	phys_addr_t		comm_base_addr;
113 	void			*pcc_comm_addr;
114 	u64			usecs_lat;
115 };
116 
117 /*
118  * This function tests and clears a bitmask then returns its old value
119  */
xgene_word_tst_and_clr(u16 * addr,u16 mask)120 static u16 xgene_word_tst_and_clr(u16 *addr, u16 mask)
121 {
122 	u16 ret, val;
123 
124 	val = le16_to_cpu(READ_ONCE(*addr));
125 	ret = val & mask;
126 	val &= ~mask;
127 	WRITE_ONCE(*addr, cpu_to_le16(val));
128 
129 	return ret;
130 }
131 
xgene_hwmon_pcc_rd(struct xgene_hwmon_dev * ctx,u32 * msg)132 static int xgene_hwmon_pcc_rd(struct xgene_hwmon_dev *ctx, u32 *msg)
133 {
134 	struct acpi_pcct_shared_memory *generic_comm_base = ctx->pcc_comm_addr;
135 	u32 *ptr = (void *)(generic_comm_base + 1);
136 	int rc, i;
137 	u16 val;
138 
139 	mutex_lock(&ctx->rd_mutex);
140 	init_completion(&ctx->rd_complete);
141 	ctx->resp_pending = true;
142 
143 	/* Write signature for subspace */
144 	WRITE_ONCE(generic_comm_base->signature,
145 		   cpu_to_le32(PCC_SIGNATURE_MASK | ctx->mbox_idx));
146 
147 	/* Write to the shared command region */
148 	WRITE_ONCE(generic_comm_base->command,
149 		   cpu_to_le16(MSG_TYPE(msg[0]) | PCCC_GENERATE_DB_INT));
150 
151 	/* Flip CMD COMPLETE bit */
152 	val = le16_to_cpu(READ_ONCE(generic_comm_base->status));
153 	val &= ~PCCS_CMD_COMPLETE;
154 	WRITE_ONCE(generic_comm_base->status, cpu_to_le16(val));
155 
156 	/* Copy the message to the PCC comm space */
157 	for (i = 0; i < sizeof(struct slimpro_resp_msg) / 4; i++)
158 		WRITE_ONCE(ptr[i], cpu_to_le32(msg[i]));
159 
160 	/* Ring the doorbell */
161 	rc = mbox_send_message(ctx->mbox_chan, msg);
162 	if (rc < 0) {
163 		dev_err(ctx->dev, "Mailbox send error %d\n", rc);
164 		goto err;
165 	}
166 	if (!wait_for_completion_timeout(&ctx->rd_complete,
167 					 usecs_to_jiffies(ctx->usecs_lat))) {
168 		dev_err(ctx->dev, "Mailbox operation timed out\n");
169 		rc = -ETIMEDOUT;
170 		goto err;
171 	}
172 
173 	/* Check for error message */
174 	if (MSG_TYPE(ctx->sync_msg.msg) == MSG_TYPE_ERR) {
175 		rc = -EINVAL;
176 		goto err;
177 	}
178 
179 	msg[0] = ctx->sync_msg.msg;
180 	msg[1] = ctx->sync_msg.param1;
181 	msg[2] = ctx->sync_msg.param2;
182 
183 err:
184 	mbox_chan_txdone(ctx->mbox_chan, 0);
185 	ctx->resp_pending = false;
186 	mutex_unlock(&ctx->rd_mutex);
187 	return rc;
188 }
189 
xgene_hwmon_rd(struct xgene_hwmon_dev * ctx,u32 * msg)190 static int xgene_hwmon_rd(struct xgene_hwmon_dev *ctx, u32 *msg)
191 {
192 	int rc;
193 
194 	mutex_lock(&ctx->rd_mutex);
195 	init_completion(&ctx->rd_complete);
196 	ctx->resp_pending = true;
197 
198 	rc = mbox_send_message(ctx->mbox_chan, msg);
199 	if (rc < 0) {
200 		dev_err(ctx->dev, "Mailbox send error %d\n", rc);
201 		goto err;
202 	}
203 
204 	if (!wait_for_completion_timeout(&ctx->rd_complete,
205 					 msecs_to_jiffies(MBOX_OP_TIMEOUTMS))) {
206 		dev_err(ctx->dev, "Mailbox operation timed out\n");
207 		rc = -ETIMEDOUT;
208 		goto err;
209 	}
210 
211 	/* Check for error message */
212 	if (MSG_TYPE(ctx->sync_msg.msg) == MSG_TYPE_ERR) {
213 		rc = -EINVAL;
214 		goto err;
215 	}
216 
217 	msg[0] = ctx->sync_msg.msg;
218 	msg[1] = ctx->sync_msg.param1;
219 	msg[2] = ctx->sync_msg.param2;
220 
221 err:
222 	ctx->resp_pending = false;
223 	mutex_unlock(&ctx->rd_mutex);
224 	return rc;
225 }
226 
xgene_hwmon_reg_map_rd(struct xgene_hwmon_dev * ctx,u32 addr,u32 * data)227 static int xgene_hwmon_reg_map_rd(struct xgene_hwmon_dev *ctx, u32 addr,
228 				  u32 *data)
229 {
230 	u32 msg[3];
231 	int rc;
232 
233 	msg[0] = SENSOR_RD_MSG;
234 	msg[1] = SENSOR_RD_EN_ADDR(addr);
235 	msg[2] = 0;
236 
237 	if (acpi_disabled)
238 		rc = xgene_hwmon_rd(ctx, msg);
239 	else
240 		rc = xgene_hwmon_pcc_rd(ctx, msg);
241 
242 	if (rc < 0)
243 		return rc;
244 
245 	/*
246 	 * Check if sensor data is valid.
247 	 */
248 	if (msg[1] & SENSOR_INVALID_DATA)
249 		return -ENODATA;
250 
251 	*data = msg[1];
252 
253 	return rc;
254 }
255 
xgene_hwmon_get_notification_msg(struct xgene_hwmon_dev * ctx,u32 * amsg)256 static int xgene_hwmon_get_notification_msg(struct xgene_hwmon_dev *ctx,
257 					    u32 *amsg)
258 {
259 	u32 msg[3];
260 	int rc;
261 
262 	msg[0] = TPC_EN_MSG(PWRMGMT_SUBTYPE_TPC, TPC_GET_ALARM, 0);
263 	msg[1] = 0;
264 	msg[2] = 0;
265 
266 	rc = xgene_hwmon_pcc_rd(ctx, msg);
267 	if (rc < 0)
268 		return rc;
269 
270 	amsg[0] = msg[0];
271 	amsg[1] = msg[1];
272 	amsg[2] = msg[2];
273 
274 	return rc;
275 }
276 
xgene_hwmon_get_cpu_pwr(struct xgene_hwmon_dev * ctx,u32 * val)277 static int xgene_hwmon_get_cpu_pwr(struct xgene_hwmon_dev *ctx, u32 *val)
278 {
279 	u32 watt, mwatt;
280 	int rc;
281 
282 	rc = xgene_hwmon_reg_map_rd(ctx, PMD_PWR_REG, &watt);
283 	if (rc < 0)
284 		return rc;
285 
286 	rc = xgene_hwmon_reg_map_rd(ctx, PMD_PWR_MW_REG, &mwatt);
287 	if (rc < 0)
288 		return rc;
289 
290 	*val = WATT_TO_mWATT(watt) + mwatt;
291 	return 0;
292 }
293 
xgene_hwmon_get_io_pwr(struct xgene_hwmon_dev * ctx,u32 * val)294 static int xgene_hwmon_get_io_pwr(struct xgene_hwmon_dev *ctx, u32 *val)
295 {
296 	u32 watt, mwatt;
297 	int rc;
298 
299 	rc = xgene_hwmon_reg_map_rd(ctx, SOC_PWR_REG, &watt);
300 	if (rc < 0)
301 		return rc;
302 
303 	rc = xgene_hwmon_reg_map_rd(ctx, SOC_PWR_MW_REG, &mwatt);
304 	if (rc < 0)
305 		return rc;
306 
307 	*val = WATT_TO_mWATT(watt) + mwatt;
308 	return 0;
309 }
310 
xgene_hwmon_get_temp(struct xgene_hwmon_dev * ctx,u32 * val)311 static int xgene_hwmon_get_temp(struct xgene_hwmon_dev *ctx, u32 *val)
312 {
313 	return xgene_hwmon_reg_map_rd(ctx, SOC_TEMP_REG, val);
314 }
315 
316 /*
317  * Sensor temperature/power functions
318  */
temp1_input_show(struct device * dev,struct device_attribute * attr,char * buf)319 static ssize_t temp1_input_show(struct device *dev,
320 				struct device_attribute *attr,
321 				char *buf)
322 {
323 	struct xgene_hwmon_dev *ctx = dev_get_drvdata(dev);
324 	int rc, temp;
325 	u32 val;
326 
327 	rc = xgene_hwmon_get_temp(ctx, &val);
328 	if (rc < 0)
329 		return rc;
330 
331 	temp = sign_extend32(val, TEMP_NEGATIVE_BIT);
332 
333 	return sysfs_emit(buf, "%d\n", CELSIUS_TO_mCELSIUS(temp));
334 }
335 
temp1_label_show(struct device * dev,struct device_attribute * attr,char * buf)336 static ssize_t temp1_label_show(struct device *dev,
337 				struct device_attribute *attr,
338 				char *buf)
339 {
340 	return sysfs_emit(buf, "SoC Temperature\n");
341 }
342 
temp1_critical_alarm_show(struct device * dev,struct device_attribute * devattr,char * buf)343 static ssize_t temp1_critical_alarm_show(struct device *dev,
344 					 struct device_attribute *devattr,
345 					 char *buf)
346 {
347 	struct xgene_hwmon_dev *ctx = dev_get_drvdata(dev);
348 
349 	return sysfs_emit(buf, "%d\n", ctx->temp_critical_alarm);
350 }
351 
power1_label_show(struct device * dev,struct device_attribute * attr,char * buf)352 static ssize_t power1_label_show(struct device *dev,
353 				 struct device_attribute *attr,
354 				 char *buf)
355 {
356 	return sysfs_emit(buf, "CPU power\n");
357 }
358 
power2_label_show(struct device * dev,struct device_attribute * attr,char * buf)359 static ssize_t power2_label_show(struct device *dev,
360 				 struct device_attribute *attr,
361 				 char *buf)
362 {
363 	return sysfs_emit(buf, "IO power\n");
364 }
365 
power1_input_show(struct device * dev,struct device_attribute * attr,char * buf)366 static ssize_t power1_input_show(struct device *dev,
367 				 struct device_attribute *attr,
368 				 char *buf)
369 {
370 	struct xgene_hwmon_dev *ctx = dev_get_drvdata(dev);
371 	u32 val;
372 	int rc;
373 
374 	rc = xgene_hwmon_get_cpu_pwr(ctx, &val);
375 	if (rc < 0)
376 		return rc;
377 
378 	return sysfs_emit(buf, "%u\n", mWATT_TO_uWATT(val));
379 }
380 
power2_input_show(struct device * dev,struct device_attribute * attr,char * buf)381 static ssize_t power2_input_show(struct device *dev,
382 				 struct device_attribute *attr,
383 				 char *buf)
384 {
385 	struct xgene_hwmon_dev *ctx = dev_get_drvdata(dev);
386 	u32 val;
387 	int rc;
388 
389 	rc = xgene_hwmon_get_io_pwr(ctx, &val);
390 	if (rc < 0)
391 		return rc;
392 
393 	return sysfs_emit(buf, "%u\n", mWATT_TO_uWATT(val));
394 }
395 
396 static DEVICE_ATTR_RO(temp1_label);
397 static DEVICE_ATTR_RO(temp1_input);
398 static DEVICE_ATTR_RO(temp1_critical_alarm);
399 static DEVICE_ATTR_RO(power1_label);
400 static DEVICE_ATTR_RO(power1_input);
401 static DEVICE_ATTR_RO(power2_label);
402 static DEVICE_ATTR_RO(power2_input);
403 
404 static struct attribute *xgene_hwmon_attrs[] = {
405 	&dev_attr_temp1_label.attr,
406 	&dev_attr_temp1_input.attr,
407 	&dev_attr_temp1_critical_alarm.attr,
408 	&dev_attr_power1_label.attr,
409 	&dev_attr_power1_input.attr,
410 	&dev_attr_power2_label.attr,
411 	&dev_attr_power2_input.attr,
412 	NULL,
413 };
414 
415 ATTRIBUTE_GROUPS(xgene_hwmon);
416 
xgene_hwmon_tpc_alarm(struct xgene_hwmon_dev * ctx,struct slimpro_resp_msg * amsg)417 static int xgene_hwmon_tpc_alarm(struct xgene_hwmon_dev *ctx,
418 				 struct slimpro_resp_msg *amsg)
419 {
420 	ctx->temp_critical_alarm = !!amsg->param2;
421 	sysfs_notify(&ctx->dev->kobj, NULL, "temp1_critical_alarm");
422 
423 	return 0;
424 }
425 
xgene_hwmon_process_pwrmsg(struct xgene_hwmon_dev * ctx,struct slimpro_resp_msg * amsg)426 static void xgene_hwmon_process_pwrmsg(struct xgene_hwmon_dev *ctx,
427 				       struct slimpro_resp_msg *amsg)
428 {
429 	if ((MSG_SUBTYPE(amsg->msg) == PWRMGMT_SUBTYPE_TPC) &&
430 	    (TPC_CMD(amsg->msg) == TPC_ALARM))
431 		xgene_hwmon_tpc_alarm(ctx, amsg);
432 }
433 
434 /*
435  * This function is called to process async work queue
436  */
xgene_hwmon_evt_work(struct work_struct * work)437 static void xgene_hwmon_evt_work(struct work_struct *work)
438 {
439 	struct slimpro_resp_msg amsg;
440 	struct xgene_hwmon_dev *ctx;
441 	int ret;
442 
443 	ctx = container_of(work, struct xgene_hwmon_dev, workq);
444 	while (kfifo_out_spinlocked(&ctx->async_msg_fifo, &amsg,
445 				    sizeof(struct slimpro_resp_msg),
446 				    &ctx->kfifo_lock)) {
447 		/*
448 		 * If PCC, send a consumer command to Platform to get info
449 		 * If Slimpro Mailbox, get message from specific FIFO
450 		 */
451 		if (!acpi_disabled) {
452 			ret = xgene_hwmon_get_notification_msg(ctx,
453 							       (u32 *)&amsg);
454 			if (ret < 0)
455 				continue;
456 		}
457 
458 		if (MSG_TYPE(amsg.msg) == MSG_TYPE_PWRMGMT)
459 			xgene_hwmon_process_pwrmsg(ctx, &amsg);
460 	}
461 }
462 
xgene_hwmon_rx_ready(struct xgene_hwmon_dev * ctx,void * msg)463 static int xgene_hwmon_rx_ready(struct xgene_hwmon_dev *ctx, void *msg)
464 {
465 	if (IS_ERR_OR_NULL(ctx->hwmon_dev) && !ctx->resp_pending) {
466 		/* Enqueue to the FIFO */
467 		kfifo_in_spinlocked(&ctx->async_msg_fifo, msg,
468 				    sizeof(struct slimpro_resp_msg),
469 				    &ctx->kfifo_lock);
470 		return -ENODEV;
471 	}
472 
473 	return 0;
474 }
475 
476 /*
477  * This function is called when the SLIMpro Mailbox received a message
478  */
xgene_hwmon_rx_cb(struct mbox_client * cl,void * msg)479 static void xgene_hwmon_rx_cb(struct mbox_client *cl, void *msg)
480 {
481 	struct xgene_hwmon_dev *ctx = to_xgene_hwmon_dev(cl);
482 
483 	/*
484 	 * While the driver registers with the mailbox framework, an interrupt
485 	 * can be pending before the probe function completes its
486 	 * initialization. If such condition occurs, just queue up the message
487 	 * as the driver is not ready for servicing the callback.
488 	 */
489 	if (xgene_hwmon_rx_ready(ctx, msg) < 0)
490 		return;
491 
492 	/*
493 	 * Response message format:
494 	 * msg[0] is the return code of the operation
495 	 * msg[1] is the first parameter word
496 	 * msg[2] is the second parameter word
497 	 *
498 	 * As message only supports dword size, just assign it.
499 	 */
500 
501 	/* Check for sync query */
502 	if (ctx->resp_pending &&
503 	    ((MSG_TYPE(((u32 *)msg)[0]) == MSG_TYPE_ERR) ||
504 	     (MSG_TYPE(((u32 *)msg)[0]) == MSG_TYPE_DBG &&
505 	      MSG_SUBTYPE(((u32 *)msg)[0]) == DBG_SUBTYPE_SENSOR_READ) ||
506 	     (MSG_TYPE(((u32 *)msg)[0]) == MSG_TYPE_PWRMGMT &&
507 	      MSG_SUBTYPE(((u32 *)msg)[0]) == PWRMGMT_SUBTYPE_TPC &&
508 	      TPC_CMD(((u32 *)msg)[0]) == TPC_ALARM))) {
509 		ctx->sync_msg.msg = ((u32 *)msg)[0];
510 		ctx->sync_msg.param1 = ((u32 *)msg)[1];
511 		ctx->sync_msg.param2 = ((u32 *)msg)[2];
512 
513 		/* Operation waiting for response */
514 		complete(&ctx->rd_complete);
515 
516 		return;
517 	}
518 
519 	/* Enqueue to the FIFO */
520 	kfifo_in_spinlocked(&ctx->async_msg_fifo, msg,
521 			    sizeof(struct slimpro_resp_msg), &ctx->kfifo_lock);
522 	/* Schedule the bottom handler */
523 	schedule_work(&ctx->workq);
524 }
525 
526 /*
527  * This function is called when the PCC Mailbox received a message
528  */
xgene_hwmon_pcc_rx_cb(struct mbox_client * cl,void * msg)529 static void xgene_hwmon_pcc_rx_cb(struct mbox_client *cl, void *msg)
530 {
531 	struct xgene_hwmon_dev *ctx = to_xgene_hwmon_dev(cl);
532 	struct acpi_pcct_shared_memory *generic_comm_base = ctx->pcc_comm_addr;
533 	struct slimpro_resp_msg amsg;
534 
535 	/*
536 	 * While the driver registers with the mailbox framework, an interrupt
537 	 * can be pending before the probe function completes its
538 	 * initialization. If such condition occurs, just queue up the message
539 	 * as the driver is not ready for servicing the callback.
540 	 */
541 	if (xgene_hwmon_rx_ready(ctx, &amsg) < 0)
542 		return;
543 
544 	msg = generic_comm_base + 1;
545 	/* Check if platform sends interrupt */
546 	if (!xgene_word_tst_and_clr(&generic_comm_base->status,
547 				    PCCS_SCI_DOORBEL))
548 		return;
549 
550 	/*
551 	 * Response message format:
552 	 * msg[0] is the return code of the operation
553 	 * msg[1] is the first parameter word
554 	 * msg[2] is the second parameter word
555 	 *
556 	 * As message only supports dword size, just assign it.
557 	 */
558 
559 	/* Check for sync query */
560 	if (ctx->resp_pending &&
561 	    ((MSG_TYPE(((u32 *)msg)[0]) == MSG_TYPE_ERR) ||
562 	     (MSG_TYPE(((u32 *)msg)[0]) == MSG_TYPE_DBG &&
563 	      MSG_SUBTYPE(((u32 *)msg)[0]) == DBG_SUBTYPE_SENSOR_READ) ||
564 	     (MSG_TYPE(((u32 *)msg)[0]) == MSG_TYPE_PWRMGMT &&
565 	      MSG_SUBTYPE(((u32 *)msg)[0]) == PWRMGMT_SUBTYPE_TPC &&
566 	      TPC_CMD(((u32 *)msg)[0]) == TPC_ALARM))) {
567 		/* Check if platform completes command */
568 		if (xgene_word_tst_and_clr(&generic_comm_base->status,
569 					   PCCS_CMD_COMPLETE)) {
570 			ctx->sync_msg.msg = ((u32 *)msg)[0];
571 			ctx->sync_msg.param1 = ((u32 *)msg)[1];
572 			ctx->sync_msg.param2 = ((u32 *)msg)[2];
573 
574 			/* Operation waiting for response */
575 			complete(&ctx->rd_complete);
576 
577 			return;
578 		}
579 	}
580 
581 	/*
582 	 * Platform notifies interrupt to OSPM.
583 	 * OPSM schedules a consumer command to get this information
584 	 * in a workqueue. Platform must wait until OSPM has issued
585 	 * a consumer command that serves this notification.
586 	 */
587 
588 	/* Enqueue to the FIFO */
589 	kfifo_in_spinlocked(&ctx->async_msg_fifo, &amsg,
590 			    sizeof(struct slimpro_resp_msg), &ctx->kfifo_lock);
591 	/* Schedule the bottom handler */
592 	schedule_work(&ctx->workq);
593 }
594 
xgene_hwmon_tx_done(struct mbox_client * cl,void * msg,int ret)595 static void xgene_hwmon_tx_done(struct mbox_client *cl, void *msg, int ret)
596 {
597 	if (ret) {
598 		dev_dbg(cl->dev, "TX did not complete: CMD sent:%x, ret:%d\n",
599 			*(u16 *)msg, ret);
600 	} else {
601 		dev_dbg(cl->dev, "TX completed. CMD sent:%x, ret:%d\n",
602 			*(u16 *)msg, ret);
603 	}
604 }
605 
606 #ifdef CONFIG_ACPI
607 static const struct acpi_device_id xgene_hwmon_acpi_match[] = {
608 	{"APMC0D29", XGENE_HWMON_V1},
609 	{"APMC0D8A", XGENE_HWMON_V2},
610 	{},
611 };
612 MODULE_DEVICE_TABLE(acpi, xgene_hwmon_acpi_match);
613 #endif
614 
xgene_hwmon_probe(struct platform_device * pdev)615 static int xgene_hwmon_probe(struct platform_device *pdev)
616 {
617 	struct xgene_hwmon_dev *ctx;
618 	struct mbox_client *cl;
619 	int rc;
620 
621 	ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
622 	if (!ctx)
623 		return -ENOMEM;
624 
625 	ctx->dev = &pdev->dev;
626 	platform_set_drvdata(pdev, ctx);
627 	cl = &ctx->mbox_client;
628 
629 	spin_lock_init(&ctx->kfifo_lock);
630 	mutex_init(&ctx->rd_mutex);
631 
632 	rc = kfifo_alloc(&ctx->async_msg_fifo,
633 			 sizeof(struct slimpro_resp_msg) * ASYNC_MSG_FIFO_SIZE,
634 			 GFP_KERNEL);
635 	if (rc)
636 		return -ENOMEM;
637 
638 	INIT_WORK(&ctx->workq, xgene_hwmon_evt_work);
639 
640 	/* Request mailbox channel */
641 	cl->dev = &pdev->dev;
642 	cl->tx_done = xgene_hwmon_tx_done;
643 	cl->tx_block = false;
644 	cl->tx_tout = MBOX_OP_TIMEOUTMS;
645 	cl->knows_txdone = false;
646 	if (acpi_disabled) {
647 		cl->rx_callback = xgene_hwmon_rx_cb;
648 		ctx->mbox_chan = mbox_request_channel(cl, 0);
649 		if (IS_ERR(ctx->mbox_chan)) {
650 			dev_err(&pdev->dev,
651 				"SLIMpro mailbox channel request failed\n");
652 			rc = -ENODEV;
653 			goto out_mbox_free;
654 		}
655 	} else {
656 		struct pcc_mbox_chan *pcc_chan;
657 		const struct acpi_device_id *acpi_id;
658 		int version;
659 
660 		acpi_id = acpi_match_device(pdev->dev.driver->acpi_match_table,
661 					    &pdev->dev);
662 		if (!acpi_id) {
663 			rc = -EINVAL;
664 			goto out_mbox_free;
665 		}
666 
667 		version = (int)acpi_id->driver_data;
668 
669 		if (device_property_read_u32(&pdev->dev, "pcc-channel",
670 					     &ctx->mbox_idx)) {
671 			dev_err(&pdev->dev, "no pcc-channel property\n");
672 			rc = -ENODEV;
673 			goto out_mbox_free;
674 		}
675 
676 		cl->rx_callback = xgene_hwmon_pcc_rx_cb;
677 		pcc_chan = pcc_mbox_request_channel(cl, ctx->mbox_idx);
678 		if (IS_ERR(pcc_chan)) {
679 			dev_err(&pdev->dev,
680 				"PPC channel request failed\n");
681 			rc = -ENODEV;
682 			goto out_mbox_free;
683 		}
684 
685 		ctx->pcc_chan = pcc_chan;
686 		ctx->mbox_chan = pcc_chan->mchan;
687 
688 		if (!ctx->mbox_chan->mbox->txdone_irq) {
689 			dev_err(&pdev->dev, "PCC IRQ not supported\n");
690 			rc = -ENODEV;
691 			goto out;
692 		}
693 
694 		/*
695 		 * This is the shared communication region
696 		 * for the OS and Platform to communicate over.
697 		 */
698 		ctx->comm_base_addr = pcc_chan->shmem_base_addr;
699 		if (ctx->comm_base_addr) {
700 			if (version == XGENE_HWMON_V2)
701 				ctx->pcc_comm_addr = (void __force *)devm_ioremap(&pdev->dev,
702 								  ctx->comm_base_addr,
703 								  pcc_chan->shmem_size);
704 			else
705 				ctx->pcc_comm_addr = devm_memremap(&pdev->dev,
706 								   ctx->comm_base_addr,
707 								   pcc_chan->shmem_size,
708 								   MEMREMAP_WB);
709 		} else {
710 			dev_err(&pdev->dev, "Failed to get PCC comm region\n");
711 			rc = -ENODEV;
712 			goto out;
713 		}
714 
715 		if (!ctx->pcc_comm_addr) {
716 			dev_err(&pdev->dev,
717 				"Failed to ioremap PCC comm region\n");
718 			rc = -ENOMEM;
719 			goto out;
720 		}
721 
722 		/*
723 		 * pcc_chan->latency is just a Nominal value. In reality
724 		 * the remote processor could be much slower to reply.
725 		 * So add an arbitrary amount of wait on top of Nominal.
726 		 */
727 		ctx->usecs_lat = PCC_NUM_RETRIES * pcc_chan->latency;
728 	}
729 
730 	ctx->hwmon_dev = hwmon_device_register_with_groups(ctx->dev,
731 							   "apm_xgene",
732 							   ctx,
733 							   xgene_hwmon_groups);
734 	if (IS_ERR(ctx->hwmon_dev)) {
735 		dev_err(&pdev->dev, "Failed to register HW monitor device\n");
736 		rc = PTR_ERR(ctx->hwmon_dev);
737 		goto out;
738 	}
739 
740 	/*
741 	 * Schedule the bottom handler if there is a pending message.
742 	 */
743 	schedule_work(&ctx->workq);
744 
745 	dev_info(&pdev->dev, "APM X-Gene SoC HW monitor driver registered\n");
746 
747 	return 0;
748 
749 out:
750 	if (acpi_disabled)
751 		mbox_free_channel(ctx->mbox_chan);
752 	else
753 		pcc_mbox_free_channel(ctx->pcc_chan);
754 out_mbox_free:
755 	kfifo_free(&ctx->async_msg_fifo);
756 
757 	return rc;
758 }
759 
xgene_hwmon_remove(struct platform_device * pdev)760 static int xgene_hwmon_remove(struct platform_device *pdev)
761 {
762 	struct xgene_hwmon_dev *ctx = platform_get_drvdata(pdev);
763 
764 	cancel_work_sync(&ctx->workq);
765 	hwmon_device_unregister(ctx->hwmon_dev);
766 	kfifo_free(&ctx->async_msg_fifo);
767 	if (acpi_disabled)
768 		mbox_free_channel(ctx->mbox_chan);
769 	else
770 		pcc_mbox_free_channel(ctx->pcc_chan);
771 
772 	return 0;
773 }
774 
775 static const struct of_device_id xgene_hwmon_of_match[] = {
776 	{.compatible = "apm,xgene-slimpro-hwmon"},
777 	{}
778 };
779 MODULE_DEVICE_TABLE(of, xgene_hwmon_of_match);
780 
781 static struct platform_driver xgene_hwmon_driver = {
782 	.probe = xgene_hwmon_probe,
783 	.remove = xgene_hwmon_remove,
784 	.driver = {
785 		.name = "xgene-slimpro-hwmon",
786 		.of_match_table = xgene_hwmon_of_match,
787 		.acpi_match_table = ACPI_PTR(xgene_hwmon_acpi_match),
788 	},
789 };
790 module_platform_driver(xgene_hwmon_driver);
791 
792 MODULE_DESCRIPTION("APM X-Gene SoC hardware monitor");
793 MODULE_LICENSE("GPL");
794