1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * For transport using shared mem structure.
4  *
5  * Copyright (C) 2019 ARM Ltd.
6  */
7 
8 #include <linux/ktime.h>
9 #include <linux/io.h>
10 #include <linux/processor.h>
11 #include <linux/types.h>
12 
13 #include <asm-generic/bug.h>
14 
15 #include "common.h"
16 
17 /*
18  * SCMI specification requires all parameters, message headers, return
19  * arguments or any protocol data to be expressed in little endian
20  * format only.
21  */
22 struct scmi_shared_mem {
23 	__le32 reserved;
24 	__le32 channel_status;
25 #define SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR	BIT(1)
26 #define SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE	BIT(0)
27 	__le32 reserved1[2];
28 	__le32 flags;
29 #define SCMI_SHMEM_FLAG_INTR_ENABLED	BIT(0)
30 	__le32 length;
31 	__le32 msg_header;
32 	u8 msg_payload[];
33 };
34 
35 void shmem_tx_prepare(struct scmi_shared_mem __iomem *shmem,
36 		      struct scmi_xfer *xfer, struct scmi_chan_info *cinfo)
37 {
38 	ktime_t stop;
39 
40 	/*
41 	 * Ideally channel must be free by now unless OS timeout last
42 	 * request and platform continued to process the same, wait
43 	 * until it releases the shared memory, otherwise we may endup
44 	 * overwriting its response with new message payload or vice-versa.
45 	 * Giving up anyway after twice the expected channel timeout so as
46 	 * not to bail-out on intermittent issues where the platform is
47 	 * occasionally a bit slower to answer.
48 	 *
49 	 * Note that after a timeout is detected we bail-out and carry on but
50 	 * the transport functionality is probably permanently compromised:
51 	 * this is just to ease debugging and avoid complete hangs on boot
52 	 * due to a misbehaving SCMI firmware.
53 	 */
54 	stop = ktime_add_ms(ktime_get(), 2 * cinfo->rx_timeout_ms);
55 	spin_until_cond((ioread32(&shmem->channel_status) &
56 			 SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE) ||
57 			 ktime_after(ktime_get(), stop));
58 	if (!(ioread32(&shmem->channel_status) &
59 	      SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE)) {
60 		WARN_ON_ONCE(1);
61 		dev_err(cinfo->dev,
62 			"Timeout waiting for a free TX channel !\n");
63 		return;
64 	}
65 
66 	/* Mark channel busy + clear error */
67 	iowrite32(0x0, &shmem->channel_status);
68 	iowrite32(xfer->hdr.poll_completion ? 0 : SCMI_SHMEM_FLAG_INTR_ENABLED,
69 		  &shmem->flags);
70 	iowrite32(sizeof(shmem->msg_header) + xfer->tx.len, &shmem->length);
71 	iowrite32(pack_scmi_header(&xfer->hdr), &shmem->msg_header);
72 	if (xfer->tx.buf)
73 		memcpy_toio(shmem->msg_payload, xfer->tx.buf, xfer->tx.len);
74 }
75 
76 u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem)
77 {
78 	return ioread32(&shmem->msg_header);
79 }
80 
81 void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem,
82 			  struct scmi_xfer *xfer)
83 {
84 	xfer->hdr.status = ioread32(shmem->msg_payload);
85 	/* Skip the length of header and status in shmem area i.e 8 bytes */
86 	xfer->rx.len = min_t(size_t, xfer->rx.len,
87 			     ioread32(&shmem->length) - 8);
88 
89 	/* Take a copy to the rx buffer.. */
90 	memcpy_fromio(xfer->rx.buf, shmem->msg_payload + 4, xfer->rx.len);
91 }
92 
93 void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem,
94 			      size_t max_len, struct scmi_xfer *xfer)
95 {
96 	/* Skip only the length of header in shmem area i.e 4 bytes */
97 	xfer->rx.len = min_t(size_t, max_len, ioread32(&shmem->length) - 4);
98 
99 	/* Take a copy to the rx buffer.. */
100 	memcpy_fromio(xfer->rx.buf, shmem->msg_payload, xfer->rx.len);
101 }
102 
103 void shmem_clear_channel(struct scmi_shared_mem __iomem *shmem)
104 {
105 	iowrite32(SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE, &shmem->channel_status);
106 }
107 
108 bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem,
109 		     struct scmi_xfer *xfer)
110 {
111 	u16 xfer_id;
112 
113 	xfer_id = MSG_XTRACT_TOKEN(ioread32(&shmem->msg_header));
114 
115 	if (xfer->hdr.seq != xfer_id)
116 		return false;
117 
118 	return ioread32(&shmem->channel_status) &
119 		(SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR |
120 		 SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
121 }
122