1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * System Control and Management Interface (SCMI) Message Protocol driver
4  *
5  * SCMI Message Protocol is used between the System Control Processor(SCP)
6  * and the Application Processors(AP). The Message Handling Unit(MHU)
7  * provides a mechanism for inter-processor communication between SCP's
8  * Cortex M3 and AP.
9  *
10  * SCP offers control and management of the core/cluster power states,
11  * various power domain DVFS including the core/cluster, certain system
12  * clocks configuration, thermal sensors and many others.
13  *
14  * Copyright (C) 2018 ARM Ltd.
15  */
16 
17 #include <linux/bitmap.h>
18 #include <linux/export.h>
19 #include <linux/io.h>
20 #include <linux/kernel.h>
21 #include <linux/ktime.h>
22 #include <linux/mailbox_client.h>
23 #include <linux/module.h>
24 #include <linux/of_address.h>
25 #include <linux/of_device.h>
26 #include <linux/processor.h>
27 #include <linux/semaphore.h>
28 #include <linux/slab.h>
29 
30 #include "common.h"
31 
32 #define CREATE_TRACE_POINTS
33 #include <trace/events/scmi.h>
34 
35 #define MSG_ID_MASK		GENMASK(7, 0)
36 #define MSG_XTRACT_ID(hdr)	FIELD_GET(MSG_ID_MASK, (hdr))
37 #define MSG_TYPE_MASK		GENMASK(9, 8)
38 #define MSG_XTRACT_TYPE(hdr)	FIELD_GET(MSG_TYPE_MASK, (hdr))
39 #define MSG_TYPE_COMMAND	0
40 #define MSG_TYPE_DELAYED_RESP	2
41 #define MSG_TYPE_NOTIFICATION	3
42 #define MSG_PROTOCOL_ID_MASK	GENMASK(17, 10)
43 #define MSG_XTRACT_PROT_ID(hdr)	FIELD_GET(MSG_PROTOCOL_ID_MASK, (hdr))
44 #define MSG_TOKEN_ID_MASK	GENMASK(27, 18)
45 #define MSG_XTRACT_TOKEN(hdr)	FIELD_GET(MSG_TOKEN_ID_MASK, (hdr))
46 #define MSG_TOKEN_MAX		(MSG_XTRACT_TOKEN(MSG_TOKEN_ID_MASK) + 1)
47 
48 enum scmi_error_codes {
49 	SCMI_SUCCESS = 0,	/* Success */
50 	SCMI_ERR_SUPPORT = -1,	/* Not supported */
51 	SCMI_ERR_PARAMS = -2,	/* Invalid Parameters */
52 	SCMI_ERR_ACCESS = -3,	/* Invalid access/permission denied */
53 	SCMI_ERR_ENTRY = -4,	/* Not found */
54 	SCMI_ERR_RANGE = -5,	/* Value out of range */
55 	SCMI_ERR_BUSY = -6,	/* Device busy */
56 	SCMI_ERR_COMMS = -7,	/* Communication Error */
57 	SCMI_ERR_GENERIC = -8,	/* Generic Error */
58 	SCMI_ERR_HARDWARE = -9,	/* Hardware Error */
59 	SCMI_ERR_PROTOCOL = -10,/* Protocol Error */
60 	SCMI_ERR_MAX
61 };
62 
63 /* List of all SCMI devices active in system */
64 static LIST_HEAD(scmi_list);
65 /* Protection for the entire list */
66 static DEFINE_MUTEX(scmi_list_mutex);
67 /* Track the unique id for the transfers for debug & profiling purpose */
68 static atomic_t transfer_last_id;
69 
70 /**
71  * struct scmi_xfers_info - Structure to manage transfer information
72  *
73  * @xfer_block: Preallocated Message array
74  * @xfer_alloc_table: Bitmap table for allocated messages.
75  *	Index of this bitmap table is also used for message
76  *	sequence identifier.
77  * @xfer_lock: Protection for message allocation
78  */
79 struct scmi_xfers_info {
80 	struct scmi_xfer *xfer_block;
81 	unsigned long *xfer_alloc_table;
82 	spinlock_t xfer_lock;
83 };
84 
85 /**
86  * struct scmi_desc - Description of SoC integration
87  *
88  * @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds)
89  * @max_msg: Maximum number of messages that can be pending
90  *	simultaneously in the system
91  * @max_msg_size: Maximum size of data per message that can be handled.
92  */
93 struct scmi_desc {
94 	int max_rx_timeout_ms;
95 	int max_msg;
96 	int max_msg_size;
97 };
98 
99 /**
100  * struct scmi_chan_info - Structure representing a SCMI channel information
101  *
102  * @cl: Mailbox Client
103  * @chan: Transmit/Receive mailbox channel
104  * @payload: Transmit/Receive mailbox channel payload area
105  * @dev: Reference to device in the SCMI hierarchy corresponding to this
106  *	 channel
107  * @handle: Pointer to SCMI entity handle
108  */
109 struct scmi_chan_info {
110 	struct mbox_client cl;
111 	struct mbox_chan *chan;
112 	void __iomem *payload;
113 	struct device *dev;
114 	struct scmi_handle *handle;
115 };
116 
117 /**
118  * struct scmi_info - Structure representing a SCMI instance
119  *
120  * @dev: Device pointer
121  * @desc: SoC description for this instance
122  * @handle: Instance of SCMI handle to send to clients
123  * @version: SCMI revision information containing protocol version,
124  *	implementation version and (sub-)vendor identification.
125  * @tx_minfo: Universal Transmit Message management info
126  * @tx_idr: IDR object to map protocol id to Tx channel info pointer
127  * @rx_idr: IDR object to map protocol id to Rx channel info pointer
128  * @protocols_imp: List of protocols implemented, currently maximum of
129  *	MAX_PROTOCOLS_IMP elements allocated by the base protocol
130  * @node: List head
131  * @users: Number of users of this instance
132  */
133 struct scmi_info {
134 	struct device *dev;
135 	const struct scmi_desc *desc;
136 	struct scmi_revision_info version;
137 	struct scmi_handle handle;
138 	struct scmi_xfers_info tx_minfo;
139 	struct idr tx_idr;
140 	struct idr rx_idr;
141 	u8 *protocols_imp;
142 	struct list_head node;
143 	int users;
144 };
145 
146 #define client_to_scmi_chan_info(c) container_of(c, struct scmi_chan_info, cl)
147 #define handle_to_scmi_info(h)	container_of(h, struct scmi_info, handle)
148 
149 /*
150  * SCMI specification requires all parameters, message headers, return
151  * arguments or any protocol data to be expressed in little endian
152  * format only.
153  */
154 struct scmi_shared_mem {
155 	__le32 reserved;
156 	__le32 channel_status;
157 #define SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR	BIT(1)
158 #define SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE	BIT(0)
159 	__le32 reserved1[2];
160 	__le32 flags;
161 #define SCMI_SHMEM_FLAG_INTR_ENABLED	BIT(0)
162 	__le32 length;
163 	__le32 msg_header;
164 	u8 msg_payload[0];
165 };
166 
167 static const int scmi_linux_errmap[] = {
168 	/* better than switch case as long as return value is continuous */
169 	0,			/* SCMI_SUCCESS */
170 	-EOPNOTSUPP,		/* SCMI_ERR_SUPPORT */
171 	-EINVAL,		/* SCMI_ERR_PARAM */
172 	-EACCES,		/* SCMI_ERR_ACCESS */
173 	-ENOENT,		/* SCMI_ERR_ENTRY */
174 	-ERANGE,		/* SCMI_ERR_RANGE */
175 	-EBUSY,			/* SCMI_ERR_BUSY */
176 	-ECOMM,			/* SCMI_ERR_COMMS */
177 	-EIO,			/* SCMI_ERR_GENERIC */
178 	-EREMOTEIO,		/* SCMI_ERR_HARDWARE */
179 	-EPROTO,		/* SCMI_ERR_PROTOCOL */
180 };
181 
182 static inline int scmi_to_linux_errno(int errno)
183 {
184 	if (errno < SCMI_SUCCESS && errno > SCMI_ERR_MAX)
185 		return scmi_linux_errmap[-errno];
186 	return -EIO;
187 }
188 
189 /**
190  * scmi_dump_header_dbg() - Helper to dump a message header.
191  *
192  * @dev: Device pointer corresponding to the SCMI entity
193  * @hdr: pointer to header.
194  */
195 static inline void scmi_dump_header_dbg(struct device *dev,
196 					struct scmi_msg_hdr *hdr)
197 {
198 	dev_dbg(dev, "Message ID: %x Sequence ID: %x Protocol: %x\n",
199 		hdr->id, hdr->seq, hdr->protocol_id);
200 }
201 
202 static void scmi_fetch_response(struct scmi_xfer *xfer,
203 				struct scmi_shared_mem __iomem *mem)
204 {
205 	xfer->hdr.status = ioread32(mem->msg_payload);
206 	/* Skip the length of header and status in payload area i.e 8 bytes */
207 	xfer->rx.len = min_t(size_t, xfer->rx.len, ioread32(&mem->length) - 8);
208 
209 	/* Take a copy to the rx buffer.. */
210 	memcpy_fromio(xfer->rx.buf, mem->msg_payload + 4, xfer->rx.len);
211 }
212 
213 /**
214  * pack_scmi_header() - packs and returns 32-bit header
215  *
216  * @hdr: pointer to header containing all the information on message id,
217  *	protocol id and sequence id.
218  *
219  * Return: 32-bit packed message header to be sent to the platform.
220  */
221 static inline u32 pack_scmi_header(struct scmi_msg_hdr *hdr)
222 {
223 	return FIELD_PREP(MSG_ID_MASK, hdr->id) |
224 		FIELD_PREP(MSG_TOKEN_ID_MASK, hdr->seq) |
225 		FIELD_PREP(MSG_PROTOCOL_ID_MASK, hdr->protocol_id);
226 }
227 
228 /**
229  * unpack_scmi_header() - unpacks and records message and protocol id
230  *
231  * @msg_hdr: 32-bit packed message header sent from the platform
232  * @hdr: pointer to header to fetch message and protocol id.
233  */
234 static inline void unpack_scmi_header(u32 msg_hdr, struct scmi_msg_hdr *hdr)
235 {
236 	hdr->id = MSG_XTRACT_ID(msg_hdr);
237 	hdr->protocol_id = MSG_XTRACT_PROT_ID(msg_hdr);
238 }
239 
240 /**
241  * scmi_tx_prepare() - mailbox client callback to prepare for the transfer
242  *
243  * @cl: client pointer
244  * @m: mailbox message
245  *
246  * This function prepares the shared memory which contains the header and the
247  * payload.
248  */
249 static void scmi_tx_prepare(struct mbox_client *cl, void *m)
250 {
251 	struct scmi_xfer *t = m;
252 	struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl);
253 	struct scmi_shared_mem __iomem *mem = cinfo->payload;
254 
255 	/*
256 	 * Ideally channel must be free by now unless OS timeout last
257 	 * request and platform continued to process the same, wait
258 	 * until it releases the shared memory, otherwise we may endup
259 	 * overwriting its response with new message payload or vice-versa
260 	 */
261 	spin_until_cond(ioread32(&mem->channel_status) &
262 			SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
263 	/* Mark channel busy + clear error */
264 	iowrite32(0x0, &mem->channel_status);
265 	iowrite32(t->hdr.poll_completion ? 0 : SCMI_SHMEM_FLAG_INTR_ENABLED,
266 		  &mem->flags);
267 	iowrite32(sizeof(mem->msg_header) + t->tx.len, &mem->length);
268 	iowrite32(pack_scmi_header(&t->hdr), &mem->msg_header);
269 	if (t->tx.buf)
270 		memcpy_toio(mem->msg_payload, t->tx.buf, t->tx.len);
271 }
272 
273 /**
274  * scmi_xfer_get() - Allocate one message
275  *
276  * @handle: Pointer to SCMI entity handle
277  * @minfo: Pointer to Tx/Rx Message management info based on channel type
278  *
279  * Helper function which is used by various message functions that are
280  * exposed to clients of this driver for allocating a message traffic event.
281  *
282  * This function can sleep depending on pending requests already in the system
283  * for the SCMI entity. Further, this also holds a spinlock to maintain
284  * integrity of internal data structures.
285  *
286  * Return: 0 if all went fine, else corresponding error.
287  */
288 static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle,
289 				       struct scmi_xfers_info *minfo)
290 {
291 	u16 xfer_id;
292 	struct scmi_xfer *xfer;
293 	unsigned long flags, bit_pos;
294 	struct scmi_info *info = handle_to_scmi_info(handle);
295 
296 	/* Keep the locked section as small as possible */
297 	spin_lock_irqsave(&minfo->xfer_lock, flags);
298 	bit_pos = find_first_zero_bit(minfo->xfer_alloc_table,
299 				      info->desc->max_msg);
300 	if (bit_pos == info->desc->max_msg) {
301 		spin_unlock_irqrestore(&minfo->xfer_lock, flags);
302 		return ERR_PTR(-ENOMEM);
303 	}
304 	set_bit(bit_pos, minfo->xfer_alloc_table);
305 	spin_unlock_irqrestore(&minfo->xfer_lock, flags);
306 
307 	xfer_id = bit_pos;
308 
309 	xfer = &minfo->xfer_block[xfer_id];
310 	xfer->hdr.seq = xfer_id;
311 	reinit_completion(&xfer->done);
312 	xfer->transfer_id = atomic_inc_return(&transfer_last_id);
313 
314 	return xfer;
315 }
316 
317 /**
318  * __scmi_xfer_put() - Release a message
319  *
320  * @minfo: Pointer to Tx/Rx Message management info based on channel type
321  * @xfer: message that was reserved by scmi_xfer_get
322  *
323  * This holds a spinlock to maintain integrity of internal data structures.
324  */
325 static void
326 __scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
327 {
328 	unsigned long flags;
329 
330 	/*
331 	 * Keep the locked section as small as possible
332 	 * NOTE: we might escape with smp_mb and no lock here..
333 	 * but just be conservative and symmetric.
334 	 */
335 	spin_lock_irqsave(&minfo->xfer_lock, flags);
336 	clear_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
337 	spin_unlock_irqrestore(&minfo->xfer_lock, flags);
338 }
339 
340 /**
341  * scmi_rx_callback() - mailbox client callback for receive messages
342  *
343  * @cl: client pointer
344  * @m: mailbox message
345  *
346  * Processes one received message to appropriate transfer information and
347  * signals completion of the transfer.
348  *
349  * NOTE: This function will be invoked in IRQ context, hence should be
350  * as optimal as possible.
351  */
352 static void scmi_rx_callback(struct mbox_client *cl, void *m)
353 {
354 	u8 msg_type;
355 	u32 msg_hdr;
356 	u16 xfer_id;
357 	struct scmi_xfer *xfer;
358 	struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl);
359 	struct device *dev = cinfo->dev;
360 	struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
361 	struct scmi_xfers_info *minfo = &info->tx_minfo;
362 	struct scmi_shared_mem __iomem *mem = cinfo->payload;
363 
364 	msg_hdr = ioread32(&mem->msg_header);
365 	msg_type = MSG_XTRACT_TYPE(msg_hdr);
366 	xfer_id = MSG_XTRACT_TOKEN(msg_hdr);
367 
368 	if (msg_type == MSG_TYPE_NOTIFICATION)
369 		return; /* Notifications not yet supported */
370 
371 	/* Are we even expecting this? */
372 	if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
373 		dev_err(dev, "message for %d is not expected!\n", xfer_id);
374 		return;
375 	}
376 
377 	xfer = &minfo->xfer_block[xfer_id];
378 
379 	scmi_dump_header_dbg(dev, &xfer->hdr);
380 
381 	scmi_fetch_response(xfer, mem);
382 
383 	trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
384 			   xfer->hdr.protocol_id, xfer->hdr.seq,
385 			   msg_type);
386 
387 	if (msg_type == MSG_TYPE_DELAYED_RESP)
388 		complete(xfer->async_done);
389 	else
390 		complete(&xfer->done);
391 }
392 
393 /**
394  * scmi_xfer_put() - Release a transmit message
395  *
396  * @handle: Pointer to SCMI entity handle
397  * @xfer: message that was reserved by scmi_xfer_get
398  */
399 void scmi_xfer_put(const struct scmi_handle *handle, struct scmi_xfer *xfer)
400 {
401 	struct scmi_info *info = handle_to_scmi_info(handle);
402 
403 	__scmi_xfer_put(&info->tx_minfo, xfer);
404 }
405 
406 static bool
407 scmi_xfer_poll_done(const struct scmi_chan_info *cinfo, struct scmi_xfer *xfer)
408 {
409 	struct scmi_shared_mem __iomem *mem = cinfo->payload;
410 	u16 xfer_id = MSG_XTRACT_TOKEN(ioread32(&mem->msg_header));
411 
412 	if (xfer->hdr.seq != xfer_id)
413 		return false;
414 
415 	return ioread32(&mem->channel_status) &
416 		(SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR |
417 		SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
418 }
419 
420 #define SCMI_MAX_POLL_TO_NS	(100 * NSEC_PER_USEC)
421 
422 static bool scmi_xfer_done_no_timeout(const struct scmi_chan_info *cinfo,
423 				      struct scmi_xfer *xfer, ktime_t stop)
424 {
425 	ktime_t __cur = ktime_get();
426 
427 	return scmi_xfer_poll_done(cinfo, xfer) || ktime_after(__cur, stop);
428 }
429 
430 /**
431  * scmi_do_xfer() - Do one transfer
432  *
433  * @handle: Pointer to SCMI entity handle
434  * @xfer: Transfer to initiate and wait for response
435  *
436  * Return: -ETIMEDOUT in case of no response, if transmit error,
437  *	return corresponding error, else if all goes well,
438  *	return 0.
439  */
440 int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer)
441 {
442 	int ret;
443 	int timeout;
444 	struct scmi_info *info = handle_to_scmi_info(handle);
445 	struct device *dev = info->dev;
446 	struct scmi_chan_info *cinfo;
447 
448 	cinfo = idr_find(&info->tx_idr, xfer->hdr.protocol_id);
449 	if (unlikely(!cinfo))
450 		return -EINVAL;
451 
452 	trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id,
453 			      xfer->hdr.protocol_id, xfer->hdr.seq,
454 			      xfer->hdr.poll_completion);
455 
456 	ret = mbox_send_message(cinfo->chan, xfer);
457 	if (ret < 0) {
458 		dev_dbg(dev, "mbox send fail %d\n", ret);
459 		return ret;
460 	}
461 
462 	/* mbox_send_message returns non-negative value on success, so reset */
463 	ret = 0;
464 
465 	if (xfer->hdr.poll_completion) {
466 		ktime_t stop = ktime_add_ns(ktime_get(), SCMI_MAX_POLL_TO_NS);
467 
468 		spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer, stop));
469 
470 		if (ktime_before(ktime_get(), stop))
471 			scmi_fetch_response(xfer, cinfo->payload);
472 		else
473 			ret = -ETIMEDOUT;
474 	} else {
475 		/* And we wait for the response. */
476 		timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
477 		if (!wait_for_completion_timeout(&xfer->done, timeout)) {
478 			dev_err(dev, "mbox timed out in resp(caller: %pS)\n",
479 				(void *)_RET_IP_);
480 			ret = -ETIMEDOUT;
481 		}
482 	}
483 
484 	if (!ret && xfer->hdr.status)
485 		ret = scmi_to_linux_errno(xfer->hdr.status);
486 
487 	/*
488 	 * NOTE: we might prefer not to need the mailbox ticker to manage the
489 	 * transfer queueing since the protocol layer queues things by itself.
490 	 * Unfortunately, we have to kick the mailbox framework after we have
491 	 * received our message.
492 	 */
493 	mbox_client_txdone(cinfo->chan, ret);
494 
495 	trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id,
496 			    xfer->hdr.protocol_id, xfer->hdr.seq,
497 			    xfer->hdr.status);
498 
499 	return ret;
500 }
501 
502 #define SCMI_MAX_RESPONSE_TIMEOUT	(2 * MSEC_PER_SEC)
503 
504 /**
505  * scmi_do_xfer_with_response() - Do one transfer and wait until the delayed
506  *	response is received
507  *
508  * @handle: Pointer to SCMI entity handle
509  * @xfer: Transfer to initiate and wait for response
510  *
511  * Return: -ETIMEDOUT in case of no delayed response, if transmit error,
512  *	return corresponding error, else if all goes well, return 0.
513  */
514 int scmi_do_xfer_with_response(const struct scmi_handle *handle,
515 			       struct scmi_xfer *xfer)
516 {
517 	int ret, timeout = msecs_to_jiffies(SCMI_MAX_RESPONSE_TIMEOUT);
518 	DECLARE_COMPLETION_ONSTACK(async_response);
519 
520 	xfer->async_done = &async_response;
521 
522 	ret = scmi_do_xfer(handle, xfer);
523 	if (!ret && !wait_for_completion_timeout(xfer->async_done, timeout))
524 		ret = -ETIMEDOUT;
525 
526 	xfer->async_done = NULL;
527 	return ret;
528 }
529 
530 /**
531  * scmi_xfer_get_init() - Allocate and initialise one message for transmit
532  *
533  * @handle: Pointer to SCMI entity handle
534  * @msg_id: Message identifier
535  * @prot_id: Protocol identifier for the message
536  * @tx_size: transmit message size
537  * @rx_size: receive message size
538  * @p: pointer to the allocated and initialised message
539  *
540  * This function allocates the message using @scmi_xfer_get and
541  * initialise the header.
542  *
543  * Return: 0 if all went fine with @p pointing to message, else
544  *	corresponding error.
545  */
546 int scmi_xfer_get_init(const struct scmi_handle *handle, u8 msg_id, u8 prot_id,
547 		       size_t tx_size, size_t rx_size, struct scmi_xfer **p)
548 {
549 	int ret;
550 	struct scmi_xfer *xfer;
551 	struct scmi_info *info = handle_to_scmi_info(handle);
552 	struct scmi_xfers_info *minfo = &info->tx_minfo;
553 	struct device *dev = info->dev;
554 
555 	/* Ensure we have sane transfer sizes */
556 	if (rx_size > info->desc->max_msg_size ||
557 	    tx_size > info->desc->max_msg_size)
558 		return -ERANGE;
559 
560 	xfer = scmi_xfer_get(handle, minfo);
561 	if (IS_ERR(xfer)) {
562 		ret = PTR_ERR(xfer);
563 		dev_err(dev, "failed to get free message slot(%d)\n", ret);
564 		return ret;
565 	}
566 
567 	xfer->tx.len = tx_size;
568 	xfer->rx.len = rx_size ? : info->desc->max_msg_size;
569 	xfer->hdr.id = msg_id;
570 	xfer->hdr.protocol_id = prot_id;
571 	xfer->hdr.poll_completion = false;
572 
573 	*p = xfer;
574 
575 	return 0;
576 }
577 
578 /**
579  * scmi_version_get() - command to get the revision of the SCMI entity
580  *
581  * @handle: Pointer to SCMI entity handle
582  * @protocol: Protocol identifier for the message
583  * @version: Holds returned version of protocol.
584  *
585  * Updates the SCMI information in the internal data structure.
586  *
587  * Return: 0 if all went fine, else return appropriate error.
588  */
589 int scmi_version_get(const struct scmi_handle *handle, u8 protocol,
590 		     u32 *version)
591 {
592 	int ret;
593 	__le32 *rev_info;
594 	struct scmi_xfer *t;
595 
596 	ret = scmi_xfer_get_init(handle, PROTOCOL_VERSION, protocol, 0,
597 				 sizeof(*version), &t);
598 	if (ret)
599 		return ret;
600 
601 	ret = scmi_do_xfer(handle, t);
602 	if (!ret) {
603 		rev_info = t->rx.buf;
604 		*version = le32_to_cpu(*rev_info);
605 	}
606 
607 	scmi_xfer_put(handle, t);
608 	return ret;
609 }
610 
611 void scmi_setup_protocol_implemented(const struct scmi_handle *handle,
612 				     u8 *prot_imp)
613 {
614 	struct scmi_info *info = handle_to_scmi_info(handle);
615 
616 	info->protocols_imp = prot_imp;
617 }
618 
619 static bool
620 scmi_is_protocol_implemented(const struct scmi_handle *handle, u8 prot_id)
621 {
622 	int i;
623 	struct scmi_info *info = handle_to_scmi_info(handle);
624 
625 	if (!info->protocols_imp)
626 		return false;
627 
628 	for (i = 0; i < MAX_PROTOCOLS_IMP; i++)
629 		if (info->protocols_imp[i] == prot_id)
630 			return true;
631 	return false;
632 }
633 
634 /**
635  * scmi_handle_get() - Get the SCMI handle for a device
636  *
637  * @dev: pointer to device for which we want SCMI handle
638  *
639  * NOTE: The function does not track individual clients of the framework
640  * and is expected to be maintained by caller of SCMI protocol library.
641  * scmi_handle_put must be balanced with successful scmi_handle_get
642  *
643  * Return: pointer to handle if successful, NULL on error
644  */
645 struct scmi_handle *scmi_handle_get(struct device *dev)
646 {
647 	struct list_head *p;
648 	struct scmi_info *info;
649 	struct scmi_handle *handle = NULL;
650 
651 	mutex_lock(&scmi_list_mutex);
652 	list_for_each(p, &scmi_list) {
653 		info = list_entry(p, struct scmi_info, node);
654 		if (dev->parent == info->dev) {
655 			handle = &info->handle;
656 			info->users++;
657 			break;
658 		}
659 	}
660 	mutex_unlock(&scmi_list_mutex);
661 
662 	return handle;
663 }
664 
665 /**
666  * scmi_handle_put() - Release the handle acquired by scmi_handle_get
667  *
668  * @handle: handle acquired by scmi_handle_get
669  *
670  * NOTE: The function does not track individual clients of the framework
671  * and is expected to be maintained by caller of SCMI protocol library.
672  * scmi_handle_put must be balanced with successful scmi_handle_get
673  *
674  * Return: 0 is successfully released
675  *	if null was passed, it returns -EINVAL;
676  */
677 int scmi_handle_put(const struct scmi_handle *handle)
678 {
679 	struct scmi_info *info;
680 
681 	if (!handle)
682 		return -EINVAL;
683 
684 	info = handle_to_scmi_info(handle);
685 	mutex_lock(&scmi_list_mutex);
686 	if (!WARN_ON(!info->users))
687 		info->users--;
688 	mutex_unlock(&scmi_list_mutex);
689 
690 	return 0;
691 }
692 
693 static int scmi_xfer_info_init(struct scmi_info *sinfo)
694 {
695 	int i;
696 	struct scmi_xfer *xfer;
697 	struct device *dev = sinfo->dev;
698 	const struct scmi_desc *desc = sinfo->desc;
699 	struct scmi_xfers_info *info = &sinfo->tx_minfo;
700 
701 	/* Pre-allocated messages, no more than what hdr.seq can support */
702 	if (WARN_ON(desc->max_msg >= MSG_TOKEN_MAX)) {
703 		dev_err(dev, "Maximum message of %d exceeds supported %ld\n",
704 			desc->max_msg, MSG_TOKEN_MAX);
705 		return -EINVAL;
706 	}
707 
708 	info->xfer_block = devm_kcalloc(dev, desc->max_msg,
709 					sizeof(*info->xfer_block), GFP_KERNEL);
710 	if (!info->xfer_block)
711 		return -ENOMEM;
712 
713 	info->xfer_alloc_table = devm_kcalloc(dev, BITS_TO_LONGS(desc->max_msg),
714 					      sizeof(long), GFP_KERNEL);
715 	if (!info->xfer_alloc_table)
716 		return -ENOMEM;
717 
718 	/* Pre-initialize the buffer pointer to pre-allocated buffers */
719 	for (i = 0, xfer = info->xfer_block; i < desc->max_msg; i++, xfer++) {
720 		xfer->rx.buf = devm_kcalloc(dev, sizeof(u8), desc->max_msg_size,
721 					    GFP_KERNEL);
722 		if (!xfer->rx.buf)
723 			return -ENOMEM;
724 
725 		xfer->tx.buf = xfer->rx.buf;
726 		init_completion(&xfer->done);
727 	}
728 
729 	spin_lock_init(&info->xfer_lock);
730 
731 	return 0;
732 }
733 
734 static int scmi_mailbox_check(struct device_node *np, int idx)
735 {
736 	return of_parse_phandle_with_args(np, "mboxes", "#mbox-cells",
737 					  idx, NULL);
738 }
739 
740 static int scmi_mbox_chan_setup(struct scmi_info *info, struct device *dev,
741 				int prot_id, bool tx)
742 {
743 	int ret, idx;
744 	struct resource res;
745 	resource_size_t size;
746 	struct device_node *shmem, *np = dev->of_node;
747 	struct scmi_chan_info *cinfo;
748 	struct mbox_client *cl;
749 	struct idr *idr;
750 	const char *desc = tx ? "Tx" : "Rx";
751 
752 	/* Transmit channel is first entry i.e. index 0 */
753 	idx = tx ? 0 : 1;
754 	idr = tx ? &info->tx_idr : &info->rx_idr;
755 
756 	/* check if already allocated, used for multiple device per protocol */
757 	cinfo = idr_find(idr, prot_id);
758 	if (cinfo)
759 		return 0;
760 
761 	if (scmi_mailbox_check(np, idx)) {
762 		cinfo = idr_find(idr, SCMI_PROTOCOL_BASE);
763 		if (unlikely(!cinfo)) /* Possible only if platform has no Rx */
764 			return -EINVAL;
765 		goto idr_alloc;
766 	}
767 
768 	cinfo = devm_kzalloc(info->dev, sizeof(*cinfo), GFP_KERNEL);
769 	if (!cinfo)
770 		return -ENOMEM;
771 
772 	cinfo->dev = dev;
773 
774 	cl = &cinfo->cl;
775 	cl->dev = dev;
776 	cl->rx_callback = scmi_rx_callback;
777 	cl->tx_prepare = tx ? scmi_tx_prepare : NULL;
778 	cl->tx_block = false;
779 	cl->knows_txdone = tx;
780 
781 	shmem = of_parse_phandle(np, "shmem", idx);
782 	ret = of_address_to_resource(shmem, 0, &res);
783 	of_node_put(shmem);
784 	if (ret) {
785 		dev_err(dev, "failed to get SCMI %s payload memory\n", desc);
786 		return ret;
787 	}
788 
789 	size = resource_size(&res);
790 	cinfo->payload = devm_ioremap(info->dev, res.start, size);
791 	if (!cinfo->payload) {
792 		dev_err(dev, "failed to ioremap SCMI %s payload\n", desc);
793 		return -EADDRNOTAVAIL;
794 	}
795 
796 	cinfo->chan = mbox_request_channel(cl, idx);
797 	if (IS_ERR(cinfo->chan)) {
798 		ret = PTR_ERR(cinfo->chan);
799 		if (ret != -EPROBE_DEFER)
800 			dev_err(dev, "failed to request SCMI %s mailbox\n",
801 				desc);
802 		return ret;
803 	}
804 
805 idr_alloc:
806 	ret = idr_alloc(idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL);
807 	if (ret != prot_id) {
808 		dev_err(dev, "unable to allocate SCMI idr slot err %d\n", ret);
809 		return ret;
810 	}
811 
812 	cinfo->handle = &info->handle;
813 	return 0;
814 }
815 
816 static inline int
817 scmi_mbox_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id)
818 {
819 	int ret = scmi_mbox_chan_setup(info, dev, prot_id, true);
820 
821 	if (!ret) /* Rx is optional, hence no error check */
822 		scmi_mbox_chan_setup(info, dev, prot_id, false);
823 
824 	return ret;
825 }
826 
827 static inline void
828 scmi_create_protocol_device(struct device_node *np, struct scmi_info *info,
829 			    int prot_id, const char *name)
830 {
831 	struct scmi_device *sdev;
832 
833 	sdev = scmi_device_create(np, info->dev, prot_id, name);
834 	if (!sdev) {
835 		dev_err(info->dev, "failed to create %d protocol device\n",
836 			prot_id);
837 		return;
838 	}
839 
840 	if (scmi_mbox_txrx_setup(info, &sdev->dev, prot_id)) {
841 		dev_err(&sdev->dev, "failed to setup transport\n");
842 		scmi_device_destroy(sdev);
843 		return;
844 	}
845 
846 	/* setup handle now as the transport is ready */
847 	scmi_set_handle(sdev);
848 }
849 
850 #define MAX_SCMI_DEV_PER_PROTOCOL	2
851 struct scmi_prot_devnames {
852 	int protocol_id;
853 	char *names[MAX_SCMI_DEV_PER_PROTOCOL];
854 };
855 
856 static struct scmi_prot_devnames devnames[] = {
857 	{ SCMI_PROTOCOL_POWER,  { "genpd" },},
858 	{ SCMI_PROTOCOL_PERF,   { "cpufreq" },},
859 	{ SCMI_PROTOCOL_CLOCK,  { "clocks" },},
860 	{ SCMI_PROTOCOL_SENSOR, { "hwmon" },},
861 	{ SCMI_PROTOCOL_RESET,  { "reset" },},
862 };
863 
864 static inline void
865 scmi_create_protocol_devices(struct device_node *np, struct scmi_info *info,
866 			     int prot_id)
867 {
868 	int loop, cnt;
869 
870 	for (loop = 0; loop < ARRAY_SIZE(devnames); loop++) {
871 		if (devnames[loop].protocol_id != prot_id)
872 			continue;
873 
874 		for (cnt = 0; cnt < ARRAY_SIZE(devnames[loop].names); cnt++) {
875 			const char *name = devnames[loop].names[cnt];
876 
877 			if (name)
878 				scmi_create_protocol_device(np, info, prot_id,
879 							    name);
880 		}
881 	}
882 }
883 
884 static int scmi_probe(struct platform_device *pdev)
885 {
886 	int ret;
887 	struct scmi_handle *handle;
888 	const struct scmi_desc *desc;
889 	struct scmi_info *info;
890 	struct device *dev = &pdev->dev;
891 	struct device_node *child, *np = dev->of_node;
892 
893 	/* Only mailbox method supported, check for the presence of one */
894 	if (scmi_mailbox_check(np, 0)) {
895 		dev_err(dev, "no mailbox found in %pOF\n", np);
896 		return -EINVAL;
897 	}
898 
899 	desc = of_device_get_match_data(dev);
900 	if (!desc)
901 		return -EINVAL;
902 
903 	info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
904 	if (!info)
905 		return -ENOMEM;
906 
907 	info->dev = dev;
908 	info->desc = desc;
909 	INIT_LIST_HEAD(&info->node);
910 
911 	ret = scmi_xfer_info_init(info);
912 	if (ret)
913 		return ret;
914 
915 	platform_set_drvdata(pdev, info);
916 	idr_init(&info->tx_idr);
917 	idr_init(&info->rx_idr);
918 
919 	handle = &info->handle;
920 	handle->dev = info->dev;
921 	handle->version = &info->version;
922 
923 	ret = scmi_mbox_txrx_setup(info, dev, SCMI_PROTOCOL_BASE);
924 	if (ret)
925 		return ret;
926 
927 	ret = scmi_base_protocol_init(handle);
928 	if (ret) {
929 		dev_err(dev, "unable to communicate with SCMI(%d)\n", ret);
930 		return ret;
931 	}
932 
933 	mutex_lock(&scmi_list_mutex);
934 	list_add_tail(&info->node, &scmi_list);
935 	mutex_unlock(&scmi_list_mutex);
936 
937 	for_each_available_child_of_node(np, child) {
938 		u32 prot_id;
939 
940 		if (of_property_read_u32(child, "reg", &prot_id))
941 			continue;
942 
943 		if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK, prot_id))
944 			dev_err(dev, "Out of range protocol %d\n", prot_id);
945 
946 		if (!scmi_is_protocol_implemented(handle, prot_id)) {
947 			dev_err(dev, "SCMI protocol %d not implemented\n",
948 				prot_id);
949 			continue;
950 		}
951 
952 		scmi_create_protocol_devices(child, info, prot_id);
953 	}
954 
955 	return 0;
956 }
957 
958 static int scmi_mbox_free_channel(int id, void *p, void *data)
959 {
960 	struct scmi_chan_info *cinfo = p;
961 	struct idr *idr = data;
962 
963 	if (!IS_ERR_OR_NULL(cinfo->chan)) {
964 		mbox_free_channel(cinfo->chan);
965 		cinfo->chan = NULL;
966 	}
967 
968 	idr_remove(idr, id);
969 
970 	return 0;
971 }
972 
973 static int scmi_remove(struct platform_device *pdev)
974 {
975 	int ret = 0;
976 	struct scmi_info *info = platform_get_drvdata(pdev);
977 	struct idr *idr = &info->tx_idr;
978 
979 	mutex_lock(&scmi_list_mutex);
980 	if (info->users)
981 		ret = -EBUSY;
982 	else
983 		list_del(&info->node);
984 	mutex_unlock(&scmi_list_mutex);
985 
986 	if (ret)
987 		return ret;
988 
989 	/* Safe to free channels since no more users */
990 	ret = idr_for_each(idr, scmi_mbox_free_channel, idr);
991 	idr_destroy(&info->tx_idr);
992 
993 	idr = &info->rx_idr;
994 	ret = idr_for_each(idr, scmi_mbox_free_channel, idr);
995 	idr_destroy(&info->rx_idr);
996 
997 	return ret;
998 }
999 
1000 static ssize_t protocol_version_show(struct device *dev,
1001 				     struct device_attribute *attr, char *buf)
1002 {
1003 	struct scmi_info *info = dev_get_drvdata(dev);
1004 
1005 	return sprintf(buf, "%u.%u\n", info->version.major_ver,
1006 		       info->version.minor_ver);
1007 }
1008 static DEVICE_ATTR_RO(protocol_version);
1009 
1010 static ssize_t firmware_version_show(struct device *dev,
1011 				     struct device_attribute *attr, char *buf)
1012 {
1013 	struct scmi_info *info = dev_get_drvdata(dev);
1014 
1015 	return sprintf(buf, "0x%x\n", info->version.impl_ver);
1016 }
1017 static DEVICE_ATTR_RO(firmware_version);
1018 
1019 static ssize_t vendor_id_show(struct device *dev,
1020 			      struct device_attribute *attr, char *buf)
1021 {
1022 	struct scmi_info *info = dev_get_drvdata(dev);
1023 
1024 	return sprintf(buf, "%s\n", info->version.vendor_id);
1025 }
1026 static DEVICE_ATTR_RO(vendor_id);
1027 
1028 static ssize_t sub_vendor_id_show(struct device *dev,
1029 				  struct device_attribute *attr, char *buf)
1030 {
1031 	struct scmi_info *info = dev_get_drvdata(dev);
1032 
1033 	return sprintf(buf, "%s\n", info->version.sub_vendor_id);
1034 }
1035 static DEVICE_ATTR_RO(sub_vendor_id);
1036 
1037 static struct attribute *versions_attrs[] = {
1038 	&dev_attr_firmware_version.attr,
1039 	&dev_attr_protocol_version.attr,
1040 	&dev_attr_vendor_id.attr,
1041 	&dev_attr_sub_vendor_id.attr,
1042 	NULL,
1043 };
1044 ATTRIBUTE_GROUPS(versions);
1045 
1046 static const struct scmi_desc scmi_generic_desc = {
1047 	.max_rx_timeout_ms = 30,	/* We may increase this if required */
1048 	.max_msg = 20,		/* Limited by MBOX_TX_QUEUE_LEN */
1049 	.max_msg_size = 128,
1050 };
1051 
1052 /* Each compatible listed below must have descriptor associated with it */
1053 static const struct of_device_id scmi_of_match[] = {
1054 	{ .compatible = "arm,scmi", .data = &scmi_generic_desc },
1055 	{ /* Sentinel */ },
1056 };
1057 
1058 MODULE_DEVICE_TABLE(of, scmi_of_match);
1059 
1060 static struct platform_driver scmi_driver = {
1061 	.driver = {
1062 		   .name = "arm-scmi",
1063 		   .of_match_table = scmi_of_match,
1064 		   .dev_groups = versions_groups,
1065 		   },
1066 	.probe = scmi_probe,
1067 	.remove = scmi_remove,
1068 };
1069 
1070 module_platform_driver(scmi_driver);
1071 
1072 MODULE_ALIAS("platform: arm-scmi");
1073 MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
1074 MODULE_DESCRIPTION("ARM SCMI protocol driver");
1075 MODULE_LICENSE("GPL v2");
1076