1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * System Control and Management Interface (SCMI) Message Protocol driver
4  *
5  * SCMI Message Protocol is used between the System Control Processor(SCP)
6  * and the Application Processors(AP). The Message Handling Unit(MHU)
7  * provides a mechanism for inter-processor communication between SCP's
8  * Cortex M3 and AP.
9  *
10  * SCP offers control and management of the core/cluster power states,
11  * various power domain DVFS including the core/cluster, certain system
12  * clocks configuration, thermal sensors and many others.
13  *
14  * Copyright (C) 2018-2021 ARM Ltd.
15  */
16 
17 #include <linux/bitmap.h>
18 #include <linux/device.h>
19 #include <linux/export.h>
20 #include <linux/idr.h>
21 #include <linux/io.h>
22 #include <linux/kernel.h>
23 #include <linux/ktime.h>
24 #include <linux/list.h>
25 #include <linux/module.h>
26 #include <linux/of_address.h>
27 #include <linux/of_device.h>
28 #include <linux/processor.h>
29 #include <linux/refcount.h>
30 #include <linux/slab.h>
31 
32 #include "common.h"
33 #include "notify.h"
34 
35 #define CREATE_TRACE_POINTS
36 #include <trace/events/scmi.h>
37 
38 enum scmi_error_codes {
39 	SCMI_SUCCESS = 0,	/* Success */
40 	SCMI_ERR_SUPPORT = -1,	/* Not supported */
41 	SCMI_ERR_PARAMS = -2,	/* Invalid Parameters */
42 	SCMI_ERR_ACCESS = -3,	/* Invalid access/permission denied */
43 	SCMI_ERR_ENTRY = -4,	/* Not found */
44 	SCMI_ERR_RANGE = -5,	/* Value out of range */
45 	SCMI_ERR_BUSY = -6,	/* Device busy */
46 	SCMI_ERR_COMMS = -7,	/* Communication Error */
47 	SCMI_ERR_GENERIC = -8,	/* Generic Error */
48 	SCMI_ERR_HARDWARE = -9,	/* Hardware Error */
49 	SCMI_ERR_PROTOCOL = -10,/* Protocol Error */
50 	SCMI_ERR_MAX
51 };
52 
53 /* List of all SCMI devices active in system */
54 static LIST_HEAD(scmi_list);
55 /* Protection for the entire list */
56 static DEFINE_MUTEX(scmi_list_mutex);
57 /* Track the unique id for the transfers for debug & profiling purpose */
58 static atomic_t transfer_last_id;
59 
60 static DEFINE_IDR(scmi_requested_devices);
61 static DEFINE_MUTEX(scmi_requested_devices_mtx);
62 
63 struct scmi_requested_dev {
64 	const struct scmi_device_id *id_table;
65 	struct list_head node;
66 };
67 
68 /**
69  * struct scmi_xfers_info - Structure to manage transfer information
70  *
71  * @xfer_block: Preallocated Message array
72  * @xfer_alloc_table: Bitmap table for allocated messages.
73  *	Index of this bitmap table is also used for message
74  *	sequence identifier.
75  * @xfer_lock: Protection for message allocation
76  */
77 struct scmi_xfers_info {
78 	struct scmi_xfer *xfer_block;
79 	unsigned long *xfer_alloc_table;
80 	spinlock_t xfer_lock;
81 };
82 
83 /**
84  * struct scmi_protocol_instance  - Describe an initialized protocol instance.
85  * @handle: Reference to the SCMI handle associated to this protocol instance.
86  * @proto: A reference to the protocol descriptor.
87  * @gid: A reference for per-protocol devres management.
88  * @users: A refcount to track effective users of this protocol.
89  * @priv: Reference for optional protocol private data.
90  * @ph: An embedded protocol handle that will be passed down to protocol
91  *	initialization code to identify this instance.
92  *
93  * Each protocol is initialized independently once for each SCMI platform in
94  * which is defined by DT and implemented by the SCMI server fw.
95  */
96 struct scmi_protocol_instance {
97 	const struct scmi_handle	*handle;
98 	const struct scmi_protocol	*proto;
99 	void				*gid;
100 	refcount_t			users;
101 	void				*priv;
102 	struct scmi_protocol_handle	ph;
103 };
104 
105 #define ph_to_pi(h)	container_of(h, struct scmi_protocol_instance, ph)
106 
107 /**
108  * struct scmi_info - Structure representing a SCMI instance
109  *
110  * @dev: Device pointer
111  * @desc: SoC description for this instance
112  * @version: SCMI revision information containing protocol version,
113  *	implementation version and (sub-)vendor identification.
114  * @handle: Instance of SCMI handle to send to clients
115  * @tx_minfo: Universal Transmit Message management info
116  * @rx_minfo: Universal Receive Message management info
117  * @tx_idr: IDR object to map protocol id to Tx channel info pointer
118  * @rx_idr: IDR object to map protocol id to Rx channel info pointer
119  * @protocols: IDR for protocols' instance descriptors initialized for
120  *	       this SCMI instance: populated on protocol's first attempted
121  *	       usage.
122  * @protocols_mtx: A mutex to protect protocols instances initialization.
123  * @protocols_imp: List of protocols implemented, currently maximum of
124  *	MAX_PROTOCOLS_IMP elements allocated by the base protocol
125  * @active_protocols: IDR storing device_nodes for protocols actually defined
126  *		      in the DT and confirmed as implemented by fw.
127  * @notify_priv: Pointer to private data structure specific to notifications.
128  * @node: List head
129  * @users: Number of users of this instance
130  */
131 struct scmi_info {
132 	struct device *dev;
133 	const struct scmi_desc *desc;
134 	struct scmi_revision_info version;
135 	struct scmi_handle handle;
136 	struct scmi_xfers_info tx_minfo;
137 	struct scmi_xfers_info rx_minfo;
138 	struct idr tx_idr;
139 	struct idr rx_idr;
140 	struct idr protocols;
141 	/* Ensure mutual exclusive access to protocols instance array */
142 	struct mutex protocols_mtx;
143 	u8 *protocols_imp;
144 	struct idr active_protocols;
145 	void *notify_priv;
146 	struct list_head node;
147 	int users;
148 };
149 
150 #define handle_to_scmi_info(h)	container_of(h, struct scmi_info, handle)
151 
152 static const int scmi_linux_errmap[] = {
153 	/* better than switch case as long as return value is continuous */
154 	0,			/* SCMI_SUCCESS */
155 	-EOPNOTSUPP,		/* SCMI_ERR_SUPPORT */
156 	-EINVAL,		/* SCMI_ERR_PARAM */
157 	-EACCES,		/* SCMI_ERR_ACCESS */
158 	-ENOENT,		/* SCMI_ERR_ENTRY */
159 	-ERANGE,		/* SCMI_ERR_RANGE */
160 	-EBUSY,			/* SCMI_ERR_BUSY */
161 	-ECOMM,			/* SCMI_ERR_COMMS */
162 	-EIO,			/* SCMI_ERR_GENERIC */
163 	-EREMOTEIO,		/* SCMI_ERR_HARDWARE */
164 	-EPROTO,		/* SCMI_ERR_PROTOCOL */
165 };
166 
167 static inline int scmi_to_linux_errno(int errno)
168 {
169 	if (errno < SCMI_SUCCESS && errno > SCMI_ERR_MAX)
170 		return scmi_linux_errmap[-errno];
171 	return -EIO;
172 }
173 
174 /**
175  * scmi_dump_header_dbg() - Helper to dump a message header.
176  *
177  * @dev: Device pointer corresponding to the SCMI entity
178  * @hdr: pointer to header.
179  */
180 static inline void scmi_dump_header_dbg(struct device *dev,
181 					struct scmi_msg_hdr *hdr)
182 {
183 	dev_dbg(dev, "Message ID: %x Sequence ID: %x Protocol: %x\n",
184 		hdr->id, hdr->seq, hdr->protocol_id);
185 }
186 
187 void scmi_notification_instance_data_set(const struct scmi_handle *handle,
188 					 void *priv)
189 {
190 	struct scmi_info *info = handle_to_scmi_info(handle);
191 
192 	info->notify_priv = priv;
193 	/* Ensure updated protocol private date are visible */
194 	smp_wmb();
195 }
196 
197 void *scmi_notification_instance_data_get(const struct scmi_handle *handle)
198 {
199 	struct scmi_info *info = handle_to_scmi_info(handle);
200 
201 	/* Ensure protocols_private_data has been updated */
202 	smp_rmb();
203 	return info->notify_priv;
204 }
205 
206 /**
207  * scmi_xfer_get() - Allocate one message
208  *
209  * @handle: Pointer to SCMI entity handle
210  * @minfo: Pointer to Tx/Rx Message management info based on channel type
211  *
212  * Helper function which is used by various message functions that are
213  * exposed to clients of this driver for allocating a message traffic event.
214  *
215  * This function can sleep depending on pending requests already in the system
216  * for the SCMI entity. Further, this also holds a spinlock to maintain
217  * integrity of internal data structures.
218  *
219  * Return: 0 if all went fine, else corresponding error.
220  */
221 static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle,
222 				       struct scmi_xfers_info *minfo)
223 {
224 	u16 xfer_id;
225 	struct scmi_xfer *xfer;
226 	unsigned long flags, bit_pos;
227 	struct scmi_info *info = handle_to_scmi_info(handle);
228 
229 	/* Keep the locked section as small as possible */
230 	spin_lock_irqsave(&minfo->xfer_lock, flags);
231 	bit_pos = find_first_zero_bit(minfo->xfer_alloc_table,
232 				      info->desc->max_msg);
233 	if (bit_pos == info->desc->max_msg) {
234 		spin_unlock_irqrestore(&minfo->xfer_lock, flags);
235 		return ERR_PTR(-ENOMEM);
236 	}
237 	set_bit(bit_pos, minfo->xfer_alloc_table);
238 	spin_unlock_irqrestore(&minfo->xfer_lock, flags);
239 
240 	xfer_id = bit_pos;
241 
242 	xfer = &minfo->xfer_block[xfer_id];
243 	xfer->hdr.seq = xfer_id;
244 	xfer->transfer_id = atomic_inc_return(&transfer_last_id);
245 
246 	return xfer;
247 }
248 
249 /**
250  * __scmi_xfer_put() - Release a message
251  *
252  * @minfo: Pointer to Tx/Rx Message management info based on channel type
253  * @xfer: message that was reserved by scmi_xfer_get
254  *
255  * This holds a spinlock to maintain integrity of internal data structures.
256  */
257 static void
258 __scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
259 {
260 	unsigned long flags;
261 
262 	/*
263 	 * Keep the locked section as small as possible
264 	 * NOTE: we might escape with smp_mb and no lock here..
265 	 * but just be conservative and symmetric.
266 	 */
267 	spin_lock_irqsave(&minfo->xfer_lock, flags);
268 	clear_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
269 	spin_unlock_irqrestore(&minfo->xfer_lock, flags);
270 }
271 
272 static void scmi_handle_notification(struct scmi_chan_info *cinfo, u32 msg_hdr)
273 {
274 	struct scmi_xfer *xfer;
275 	struct device *dev = cinfo->dev;
276 	struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
277 	struct scmi_xfers_info *minfo = &info->rx_minfo;
278 	ktime_t ts;
279 
280 	ts = ktime_get_boottime();
281 	xfer = scmi_xfer_get(cinfo->handle, minfo);
282 	if (IS_ERR(xfer)) {
283 		dev_err(dev, "failed to get free message slot (%ld)\n",
284 			PTR_ERR(xfer));
285 		info->desc->ops->clear_channel(cinfo);
286 		return;
287 	}
288 
289 	unpack_scmi_header(msg_hdr, &xfer->hdr);
290 	scmi_dump_header_dbg(dev, &xfer->hdr);
291 	info->desc->ops->fetch_notification(cinfo, info->desc->max_msg_size,
292 					    xfer);
293 	scmi_notify(cinfo->handle, xfer->hdr.protocol_id,
294 		    xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts);
295 
296 	trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
297 			   xfer->hdr.protocol_id, xfer->hdr.seq,
298 			   MSG_TYPE_NOTIFICATION);
299 
300 	__scmi_xfer_put(minfo, xfer);
301 
302 	info->desc->ops->clear_channel(cinfo);
303 }
304 
305 static void scmi_handle_response(struct scmi_chan_info *cinfo,
306 				 u16 xfer_id, u8 msg_type)
307 {
308 	struct scmi_xfer *xfer;
309 	struct device *dev = cinfo->dev;
310 	struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
311 	struct scmi_xfers_info *minfo = &info->tx_minfo;
312 
313 	/* Are we even expecting this? */
314 	if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
315 		dev_err(dev, "message for %d is not expected!\n", xfer_id);
316 		info->desc->ops->clear_channel(cinfo);
317 		return;
318 	}
319 
320 	xfer = &minfo->xfer_block[xfer_id];
321 	/*
322 	 * Even if a response was indeed expected on this slot at this point,
323 	 * a buggy platform could wrongly reply feeding us an unexpected
324 	 * delayed response we're not prepared to handle: bail-out safely
325 	 * blaming firmware.
326 	 */
327 	if (unlikely(msg_type == MSG_TYPE_DELAYED_RESP && !xfer->async_done)) {
328 		dev_err(dev,
329 			"Delayed Response for %d not expected! Buggy F/W ?\n",
330 			xfer_id);
331 		info->desc->ops->clear_channel(cinfo);
332 		/* It was unexpected, so nobody will clear the xfer if not us */
333 		__scmi_xfer_put(minfo, xfer);
334 		return;
335 	}
336 
337 	/* rx.len could be shrunk in the sync do_xfer, so reset to maxsz */
338 	if (msg_type == MSG_TYPE_DELAYED_RESP)
339 		xfer->rx.len = info->desc->max_msg_size;
340 
341 	scmi_dump_header_dbg(dev, &xfer->hdr);
342 
343 	info->desc->ops->fetch_response(cinfo, xfer);
344 
345 	trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
346 			   xfer->hdr.protocol_id, xfer->hdr.seq,
347 			   msg_type);
348 
349 	if (msg_type == MSG_TYPE_DELAYED_RESP) {
350 		info->desc->ops->clear_channel(cinfo);
351 		complete(xfer->async_done);
352 	} else {
353 		complete(&xfer->done);
354 	}
355 }
356 
357 /**
358  * scmi_rx_callback() - callback for receiving messages
359  *
360  * @cinfo: SCMI channel info
361  * @msg_hdr: Message header
362  *
363  * Processes one received message to appropriate transfer information and
364  * signals completion of the transfer.
365  *
366  * NOTE: This function will be invoked in IRQ context, hence should be
367  * as optimal as possible.
368  */
369 void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr)
370 {
371 	u16 xfer_id = MSG_XTRACT_TOKEN(msg_hdr);
372 	u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
373 
374 	switch (msg_type) {
375 	case MSG_TYPE_NOTIFICATION:
376 		scmi_handle_notification(cinfo, msg_hdr);
377 		break;
378 	case MSG_TYPE_COMMAND:
379 	case MSG_TYPE_DELAYED_RESP:
380 		scmi_handle_response(cinfo, xfer_id, msg_type);
381 		break;
382 	default:
383 		WARN_ONCE(1, "received unknown msg_type:%d\n", msg_type);
384 		break;
385 	}
386 }
387 
388 /**
389  * xfer_put() - Release a transmit message
390  *
391  * @ph: Pointer to SCMI protocol handle
392  * @xfer: message that was reserved by scmi_xfer_get
393  */
394 static void xfer_put(const struct scmi_protocol_handle *ph,
395 		     struct scmi_xfer *xfer)
396 {
397 	const struct scmi_protocol_instance *pi = ph_to_pi(ph);
398 	struct scmi_info *info = handle_to_scmi_info(pi->handle);
399 
400 	__scmi_xfer_put(&info->tx_minfo, xfer);
401 }
402 
403 #define SCMI_MAX_POLL_TO_NS	(100 * NSEC_PER_USEC)
404 
405 static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo,
406 				      struct scmi_xfer *xfer, ktime_t stop)
407 {
408 	struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
409 
410 	return info->desc->ops->poll_done(cinfo, xfer) ||
411 	       ktime_after(ktime_get(), stop);
412 }
413 
414 /**
415  * do_xfer() - Do one transfer
416  *
417  * @ph: Pointer to SCMI protocol handle
418  * @xfer: Transfer to initiate and wait for response
419  *
420  * Return: -ETIMEDOUT in case of no response, if transmit error,
421  *	return corresponding error, else if all goes well,
422  *	return 0.
423  */
424 static int do_xfer(const struct scmi_protocol_handle *ph,
425 		   struct scmi_xfer *xfer)
426 {
427 	int ret;
428 	int timeout;
429 	const struct scmi_protocol_instance *pi = ph_to_pi(ph);
430 	struct scmi_info *info = handle_to_scmi_info(pi->handle);
431 	struct device *dev = info->dev;
432 	struct scmi_chan_info *cinfo;
433 
434 	/*
435 	 * Initialise protocol id now from protocol handle to avoid it being
436 	 * overridden by mistake (or malice) by the protocol code mangling with
437 	 * the scmi_xfer structure prior to this.
438 	 */
439 	xfer->hdr.protocol_id = pi->proto->id;
440 	reinit_completion(&xfer->done);
441 
442 	cinfo = idr_find(&info->tx_idr, xfer->hdr.protocol_id);
443 	if (unlikely(!cinfo))
444 		return -EINVAL;
445 
446 	trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id,
447 			      xfer->hdr.protocol_id, xfer->hdr.seq,
448 			      xfer->hdr.poll_completion);
449 
450 	ret = info->desc->ops->send_message(cinfo, xfer);
451 	if (ret < 0) {
452 		dev_dbg(dev, "Failed to send message %d\n", ret);
453 		return ret;
454 	}
455 
456 	if (xfer->hdr.poll_completion) {
457 		ktime_t stop = ktime_add_ns(ktime_get(), SCMI_MAX_POLL_TO_NS);
458 
459 		spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer, stop));
460 
461 		if (ktime_before(ktime_get(), stop))
462 			info->desc->ops->fetch_response(cinfo, xfer);
463 		else
464 			ret = -ETIMEDOUT;
465 	} else {
466 		/* And we wait for the response. */
467 		timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
468 		if (!wait_for_completion_timeout(&xfer->done, timeout)) {
469 			dev_err(dev, "timed out in resp(caller: %pS)\n",
470 				(void *)_RET_IP_);
471 			ret = -ETIMEDOUT;
472 		}
473 	}
474 
475 	if (!ret && xfer->hdr.status)
476 		ret = scmi_to_linux_errno(xfer->hdr.status);
477 
478 	if (info->desc->ops->mark_txdone)
479 		info->desc->ops->mark_txdone(cinfo, ret);
480 
481 	trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id,
482 			    xfer->hdr.protocol_id, xfer->hdr.seq, ret);
483 
484 	return ret;
485 }
486 
487 static void reset_rx_to_maxsz(const struct scmi_protocol_handle *ph,
488 			      struct scmi_xfer *xfer)
489 {
490 	const struct scmi_protocol_instance *pi = ph_to_pi(ph);
491 	struct scmi_info *info = handle_to_scmi_info(pi->handle);
492 
493 	xfer->rx.len = info->desc->max_msg_size;
494 }
495 
496 #define SCMI_MAX_RESPONSE_TIMEOUT	(2 * MSEC_PER_SEC)
497 
498 /**
499  * do_xfer_with_response() - Do one transfer and wait until the delayed
500  *	response is received
501  *
502  * @ph: Pointer to SCMI protocol handle
503  * @xfer: Transfer to initiate and wait for response
504  *
505  * Return: -ETIMEDOUT in case of no delayed response, if transmit error,
506  *	return corresponding error, else if all goes well, return 0.
507  */
508 static int do_xfer_with_response(const struct scmi_protocol_handle *ph,
509 				 struct scmi_xfer *xfer)
510 {
511 	int ret, timeout = msecs_to_jiffies(SCMI_MAX_RESPONSE_TIMEOUT);
512 	DECLARE_COMPLETION_ONSTACK(async_response);
513 
514 	xfer->async_done = &async_response;
515 
516 	ret = do_xfer(ph, xfer);
517 	if (!ret) {
518 		if (!wait_for_completion_timeout(xfer->async_done, timeout))
519 			ret = -ETIMEDOUT;
520 		else if (xfer->hdr.status)
521 			ret = scmi_to_linux_errno(xfer->hdr.status);
522 	}
523 
524 	xfer->async_done = NULL;
525 	return ret;
526 }
527 
528 /**
529  * xfer_get_init() - Allocate and initialise one message for transmit
530  *
531  * @ph: Pointer to SCMI protocol handle
532  * @msg_id: Message identifier
533  * @tx_size: transmit message size
534  * @rx_size: receive message size
535  * @p: pointer to the allocated and initialised message
536  *
537  * This function allocates the message using @scmi_xfer_get and
538  * initialise the header.
539  *
540  * Return: 0 if all went fine with @p pointing to message, else
541  *	corresponding error.
542  */
543 static int xfer_get_init(const struct scmi_protocol_handle *ph,
544 			 u8 msg_id, size_t tx_size, size_t rx_size,
545 			 struct scmi_xfer **p)
546 {
547 	int ret;
548 	struct scmi_xfer *xfer;
549 	const struct scmi_protocol_instance *pi = ph_to_pi(ph);
550 	struct scmi_info *info = handle_to_scmi_info(pi->handle);
551 	struct scmi_xfers_info *minfo = &info->tx_minfo;
552 	struct device *dev = info->dev;
553 
554 	/* Ensure we have sane transfer sizes */
555 	if (rx_size > info->desc->max_msg_size ||
556 	    tx_size > info->desc->max_msg_size)
557 		return -ERANGE;
558 
559 	xfer = scmi_xfer_get(pi->handle, minfo);
560 	if (IS_ERR(xfer)) {
561 		ret = PTR_ERR(xfer);
562 		dev_err(dev, "failed to get free message slot(%d)\n", ret);
563 		return ret;
564 	}
565 
566 	xfer->tx.len = tx_size;
567 	xfer->rx.len = rx_size ? : info->desc->max_msg_size;
568 	xfer->hdr.id = msg_id;
569 	xfer->hdr.poll_completion = false;
570 
571 	*p = xfer;
572 
573 	return 0;
574 }
575 
576 /**
577  * version_get() - command to get the revision of the SCMI entity
578  *
579  * @ph: Pointer to SCMI protocol handle
580  * @version: Holds returned version of protocol.
581  *
582  * Updates the SCMI information in the internal data structure.
583  *
584  * Return: 0 if all went fine, else return appropriate error.
585  */
586 static int version_get(const struct scmi_protocol_handle *ph, u32 *version)
587 {
588 	int ret;
589 	__le32 *rev_info;
590 	struct scmi_xfer *t;
591 
592 	ret = xfer_get_init(ph, PROTOCOL_VERSION, 0, sizeof(*version), &t);
593 	if (ret)
594 		return ret;
595 
596 	ret = do_xfer(ph, t);
597 	if (!ret) {
598 		rev_info = t->rx.buf;
599 		*version = le32_to_cpu(*rev_info);
600 	}
601 
602 	xfer_put(ph, t);
603 	return ret;
604 }
605 
606 /**
607  * scmi_set_protocol_priv  - Set protocol specific data at init time
608  *
609  * @ph: A reference to the protocol handle.
610  * @priv: The private data to set.
611  *
612  * Return: 0 on Success
613  */
614 static int scmi_set_protocol_priv(const struct scmi_protocol_handle *ph,
615 				  void *priv)
616 {
617 	struct scmi_protocol_instance *pi = ph_to_pi(ph);
618 
619 	pi->priv = priv;
620 
621 	return 0;
622 }
623 
624 /**
625  * scmi_get_protocol_priv  - Set protocol specific data at init time
626  *
627  * @ph: A reference to the protocol handle.
628  *
629  * Return: Protocol private data if any was set.
630  */
631 static void *scmi_get_protocol_priv(const struct scmi_protocol_handle *ph)
632 {
633 	const struct scmi_protocol_instance *pi = ph_to_pi(ph);
634 
635 	return pi->priv;
636 }
637 
638 static const struct scmi_xfer_ops xfer_ops = {
639 	.version_get = version_get,
640 	.xfer_get_init = xfer_get_init,
641 	.reset_rx_to_maxsz = reset_rx_to_maxsz,
642 	.do_xfer = do_xfer,
643 	.do_xfer_with_response = do_xfer_with_response,
644 	.xfer_put = xfer_put,
645 };
646 
647 /**
648  * scmi_revision_area_get  - Retrieve version memory area.
649  *
650  * @ph: A reference to the protocol handle.
651  *
652  * A helper to grab the version memory area reference during SCMI Base protocol
653  * initialization.
654  *
655  * Return: A reference to the version memory area associated to the SCMI
656  *	   instance underlying this protocol handle.
657  */
658 struct scmi_revision_info *
659 scmi_revision_area_get(const struct scmi_protocol_handle *ph)
660 {
661 	const struct scmi_protocol_instance *pi = ph_to_pi(ph);
662 
663 	return pi->handle->version;
664 }
665 
666 /**
667  * scmi_alloc_init_protocol_instance  - Allocate and initialize a protocol
668  * instance descriptor.
669  * @info: The reference to the related SCMI instance.
670  * @proto: The protocol descriptor.
671  *
672  * Allocate a new protocol instance descriptor, using the provided @proto
673  * description, against the specified SCMI instance @info, and initialize it;
674  * all resources management is handled via a dedicated per-protocol devres
675  * group.
676  *
677  * Context: Assumes to be called with @protocols_mtx already acquired.
678  * Return: A reference to a freshly allocated and initialized protocol instance
679  *	   or ERR_PTR on failure. On failure the @proto reference is at first
680  *	   put using @scmi_protocol_put() before releasing all the devres group.
681  */
682 static struct scmi_protocol_instance *
683 scmi_alloc_init_protocol_instance(struct scmi_info *info,
684 				  const struct scmi_protocol *proto)
685 {
686 	int ret = -ENOMEM;
687 	void *gid;
688 	struct scmi_protocol_instance *pi;
689 	const struct scmi_handle *handle = &info->handle;
690 
691 	/* Protocol specific devres group */
692 	gid = devres_open_group(handle->dev, NULL, GFP_KERNEL);
693 	if (!gid) {
694 		scmi_protocol_put(proto->id);
695 		goto out;
696 	}
697 
698 	pi = devm_kzalloc(handle->dev, sizeof(*pi), GFP_KERNEL);
699 	if (!pi)
700 		goto clean;
701 
702 	pi->gid = gid;
703 	pi->proto = proto;
704 	pi->handle = handle;
705 	pi->ph.dev = handle->dev;
706 	pi->ph.xops = &xfer_ops;
707 	pi->ph.set_priv = scmi_set_protocol_priv;
708 	pi->ph.get_priv = scmi_get_protocol_priv;
709 	refcount_set(&pi->users, 1);
710 	/* proto->init is assured NON NULL by scmi_protocol_register */
711 	ret = pi->proto->instance_init(&pi->ph);
712 	if (ret)
713 		goto clean;
714 
715 	ret = idr_alloc(&info->protocols, pi, proto->id, proto->id + 1,
716 			GFP_KERNEL);
717 	if (ret != proto->id)
718 		goto clean;
719 
720 	/*
721 	 * Warn but ignore events registration errors since we do not want
722 	 * to skip whole protocols if their notifications are messed up.
723 	 */
724 	if (pi->proto->events) {
725 		ret = scmi_register_protocol_events(handle, pi->proto->id,
726 						    &pi->ph,
727 						    pi->proto->events);
728 		if (ret)
729 			dev_warn(handle->dev,
730 				 "Protocol:%X - Events Registration Failed - err:%d\n",
731 				 pi->proto->id, ret);
732 	}
733 
734 	devres_close_group(handle->dev, pi->gid);
735 	dev_dbg(handle->dev, "Initialized protocol: 0x%X\n", pi->proto->id);
736 
737 	return pi;
738 
739 clean:
740 	/* Take care to put the protocol module's owner before releasing all */
741 	scmi_protocol_put(proto->id);
742 	devres_release_group(handle->dev, gid);
743 out:
744 	return ERR_PTR(ret);
745 }
746 
747 /**
748  * scmi_get_protocol_instance  - Protocol initialization helper.
749  * @handle: A reference to the SCMI platform instance.
750  * @protocol_id: The protocol being requested.
751  *
752  * In case the required protocol has never been requested before for this
753  * instance, allocate and initialize all the needed structures while handling
754  * resource allocation with a dedicated per-protocol devres subgroup.
755  *
756  * Return: A reference to an initialized protocol instance or error on failure:
757  *	   in particular returns -EPROBE_DEFER when the desired protocol could
758  *	   NOT be found.
759  */
760 static struct scmi_protocol_instance * __must_check
761 scmi_get_protocol_instance(const struct scmi_handle *handle, u8 protocol_id)
762 {
763 	struct scmi_protocol_instance *pi;
764 	struct scmi_info *info = handle_to_scmi_info(handle);
765 
766 	mutex_lock(&info->protocols_mtx);
767 	pi = idr_find(&info->protocols, protocol_id);
768 
769 	if (pi) {
770 		refcount_inc(&pi->users);
771 	} else {
772 		const struct scmi_protocol *proto;
773 
774 		/* Fails if protocol not registered on bus */
775 		proto = scmi_protocol_get(protocol_id);
776 		if (proto)
777 			pi = scmi_alloc_init_protocol_instance(info, proto);
778 		else
779 			pi = ERR_PTR(-EPROBE_DEFER);
780 	}
781 	mutex_unlock(&info->protocols_mtx);
782 
783 	return pi;
784 }
785 
786 /**
787  * scmi_protocol_acquire  - Protocol acquire
788  * @handle: A reference to the SCMI platform instance.
789  * @protocol_id: The protocol being requested.
790  *
791  * Register a new user for the requested protocol on the specified SCMI
792  * platform instance, possibly triggering its initialization on first user.
793  *
794  * Return: 0 if protocol was acquired successfully.
795  */
796 int scmi_protocol_acquire(const struct scmi_handle *handle, u8 protocol_id)
797 {
798 	return PTR_ERR_OR_ZERO(scmi_get_protocol_instance(handle, protocol_id));
799 }
800 
801 /**
802  * scmi_protocol_release  - Protocol de-initialization helper.
803  * @handle: A reference to the SCMI platform instance.
804  * @protocol_id: The protocol being requested.
805  *
806  * Remove one user for the specified protocol and triggers de-initialization
807  * and resources de-allocation once the last user has gone.
808  */
809 void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id)
810 {
811 	struct scmi_info *info = handle_to_scmi_info(handle);
812 	struct scmi_protocol_instance *pi;
813 
814 	mutex_lock(&info->protocols_mtx);
815 	pi = idr_find(&info->protocols, protocol_id);
816 	if (WARN_ON(!pi))
817 		goto out;
818 
819 	if (refcount_dec_and_test(&pi->users)) {
820 		void *gid = pi->gid;
821 
822 		if (pi->proto->events)
823 			scmi_deregister_protocol_events(handle, protocol_id);
824 
825 		if (pi->proto->instance_deinit)
826 			pi->proto->instance_deinit(&pi->ph);
827 
828 		idr_remove(&info->protocols, protocol_id);
829 
830 		scmi_protocol_put(protocol_id);
831 
832 		devres_release_group(handle->dev, gid);
833 		dev_dbg(handle->dev, "De-Initialized protocol: 0x%X\n",
834 			protocol_id);
835 	}
836 
837 out:
838 	mutex_unlock(&info->protocols_mtx);
839 }
840 
841 void scmi_setup_protocol_implemented(const struct scmi_protocol_handle *ph,
842 				     u8 *prot_imp)
843 {
844 	const struct scmi_protocol_instance *pi = ph_to_pi(ph);
845 	struct scmi_info *info = handle_to_scmi_info(pi->handle);
846 
847 	info->protocols_imp = prot_imp;
848 }
849 
850 static bool
851 scmi_is_protocol_implemented(const struct scmi_handle *handle, u8 prot_id)
852 {
853 	int i;
854 	struct scmi_info *info = handle_to_scmi_info(handle);
855 
856 	if (!info->protocols_imp)
857 		return false;
858 
859 	for (i = 0; i < MAX_PROTOCOLS_IMP; i++)
860 		if (info->protocols_imp[i] == prot_id)
861 			return true;
862 	return false;
863 }
864 
865 struct scmi_protocol_devres {
866 	const struct scmi_handle *handle;
867 	u8 protocol_id;
868 };
869 
870 static void scmi_devm_release_protocol(struct device *dev, void *res)
871 {
872 	struct scmi_protocol_devres *dres = res;
873 
874 	scmi_protocol_release(dres->handle, dres->protocol_id);
875 }
876 
877 /**
878  * scmi_devm_protocol_get  - Devres managed get protocol operations and handle
879  * @sdev: A reference to an scmi_device whose embedded struct device is to
880  *	  be used for devres accounting.
881  * @protocol_id: The protocol being requested.
882  * @ph: A pointer reference used to pass back the associated protocol handle.
883  *
884  * Get hold of a protocol accounting for its usage, eventually triggering its
885  * initialization, and returning the protocol specific operations and related
886  * protocol handle which will be used as first argument in most of the
887  * protocols operations methods.
888  * Being a devres based managed method, protocol hold will be automatically
889  * released, and possibly de-initialized on last user, once the SCMI driver
890  * owning the scmi_device is unbound from it.
891  *
892  * Return: A reference to the requested protocol operations or error.
893  *	   Must be checked for errors by caller.
894  */
895 static const void __must_check *
896 scmi_devm_protocol_get(struct scmi_device *sdev, u8 protocol_id,
897 		       struct scmi_protocol_handle **ph)
898 {
899 	struct scmi_protocol_instance *pi;
900 	struct scmi_protocol_devres *dres;
901 	struct scmi_handle *handle = sdev->handle;
902 
903 	if (!ph)
904 		return ERR_PTR(-EINVAL);
905 
906 	dres = devres_alloc(scmi_devm_release_protocol,
907 			    sizeof(*dres), GFP_KERNEL);
908 	if (!dres)
909 		return ERR_PTR(-ENOMEM);
910 
911 	pi = scmi_get_protocol_instance(handle, protocol_id);
912 	if (IS_ERR(pi)) {
913 		devres_free(dres);
914 		return pi;
915 	}
916 
917 	dres->handle = handle;
918 	dres->protocol_id = protocol_id;
919 	devres_add(&sdev->dev, dres);
920 
921 	*ph = &pi->ph;
922 
923 	return pi->proto->ops;
924 }
925 
926 static int scmi_devm_protocol_match(struct device *dev, void *res, void *data)
927 {
928 	struct scmi_protocol_devres *dres = res;
929 
930 	if (WARN_ON(!dres || !data))
931 		return 0;
932 
933 	return dres->protocol_id == *((u8 *)data);
934 }
935 
936 /**
937  * scmi_devm_protocol_put  - Devres managed put protocol operations and handle
938  * @sdev: A reference to an scmi_device whose embedded struct device is to
939  *	  be used for devres accounting.
940  * @protocol_id: The protocol being requested.
941  *
942  * Explicitly release a protocol hold previously obtained calling the above
943  * @scmi_devm_protocol_get.
944  */
945 static void scmi_devm_protocol_put(struct scmi_device *sdev, u8 protocol_id)
946 {
947 	int ret;
948 
949 	ret = devres_release(&sdev->dev, scmi_devm_release_protocol,
950 			     scmi_devm_protocol_match, &protocol_id);
951 	WARN_ON(ret);
952 }
953 
954 static inline
955 struct scmi_handle *scmi_handle_get_from_info_unlocked(struct scmi_info *info)
956 {
957 	info->users++;
958 	return &info->handle;
959 }
960 
961 /**
962  * scmi_handle_get() - Get the SCMI handle for a device
963  *
964  * @dev: pointer to device for which we want SCMI handle
965  *
966  * NOTE: The function does not track individual clients of the framework
967  * and is expected to be maintained by caller of SCMI protocol library.
968  * scmi_handle_put must be balanced with successful scmi_handle_get
969  *
970  * Return: pointer to handle if successful, NULL on error
971  */
972 struct scmi_handle *scmi_handle_get(struct device *dev)
973 {
974 	struct list_head *p;
975 	struct scmi_info *info;
976 	struct scmi_handle *handle = NULL;
977 
978 	mutex_lock(&scmi_list_mutex);
979 	list_for_each(p, &scmi_list) {
980 		info = list_entry(p, struct scmi_info, node);
981 		if (dev->parent == info->dev) {
982 			handle = scmi_handle_get_from_info_unlocked(info);
983 			break;
984 		}
985 	}
986 	mutex_unlock(&scmi_list_mutex);
987 
988 	return handle;
989 }
990 
991 /**
992  * scmi_handle_put() - Release the handle acquired by scmi_handle_get
993  *
994  * @handle: handle acquired by scmi_handle_get
995  *
996  * NOTE: The function does not track individual clients of the framework
997  * and is expected to be maintained by caller of SCMI protocol library.
998  * scmi_handle_put must be balanced with successful scmi_handle_get
999  *
1000  * Return: 0 is successfully released
1001  *	if null was passed, it returns -EINVAL;
1002  */
1003 int scmi_handle_put(const struct scmi_handle *handle)
1004 {
1005 	struct scmi_info *info;
1006 
1007 	if (!handle)
1008 		return -EINVAL;
1009 
1010 	info = handle_to_scmi_info(handle);
1011 	mutex_lock(&scmi_list_mutex);
1012 	if (!WARN_ON(!info->users))
1013 		info->users--;
1014 	mutex_unlock(&scmi_list_mutex);
1015 
1016 	return 0;
1017 }
1018 
1019 static int __scmi_xfer_info_init(struct scmi_info *sinfo,
1020 				 struct scmi_xfers_info *info)
1021 {
1022 	int i;
1023 	struct scmi_xfer *xfer;
1024 	struct device *dev = sinfo->dev;
1025 	const struct scmi_desc *desc = sinfo->desc;
1026 
1027 	/* Pre-allocated messages, no more than what hdr.seq can support */
1028 	if (WARN_ON(desc->max_msg >= MSG_TOKEN_MAX)) {
1029 		dev_err(dev, "Maximum message of %d exceeds supported %ld\n",
1030 			desc->max_msg, MSG_TOKEN_MAX);
1031 		return -EINVAL;
1032 	}
1033 
1034 	info->xfer_block = devm_kcalloc(dev, desc->max_msg,
1035 					sizeof(*info->xfer_block), GFP_KERNEL);
1036 	if (!info->xfer_block)
1037 		return -ENOMEM;
1038 
1039 	info->xfer_alloc_table = devm_kcalloc(dev, BITS_TO_LONGS(desc->max_msg),
1040 					      sizeof(long), GFP_KERNEL);
1041 	if (!info->xfer_alloc_table)
1042 		return -ENOMEM;
1043 
1044 	/* Pre-initialize the buffer pointer to pre-allocated buffers */
1045 	for (i = 0, xfer = info->xfer_block; i < desc->max_msg; i++, xfer++) {
1046 		xfer->rx.buf = devm_kcalloc(dev, sizeof(u8), desc->max_msg_size,
1047 					    GFP_KERNEL);
1048 		if (!xfer->rx.buf)
1049 			return -ENOMEM;
1050 
1051 		xfer->tx.buf = xfer->rx.buf;
1052 		init_completion(&xfer->done);
1053 	}
1054 
1055 	spin_lock_init(&info->xfer_lock);
1056 
1057 	return 0;
1058 }
1059 
1060 static int scmi_xfer_info_init(struct scmi_info *sinfo)
1061 {
1062 	int ret = __scmi_xfer_info_init(sinfo, &sinfo->tx_minfo);
1063 
1064 	if (!ret && idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE))
1065 		ret = __scmi_xfer_info_init(sinfo, &sinfo->rx_minfo);
1066 
1067 	return ret;
1068 }
1069 
1070 static int scmi_chan_setup(struct scmi_info *info, struct device *dev,
1071 			   int prot_id, bool tx)
1072 {
1073 	int ret, idx;
1074 	struct scmi_chan_info *cinfo;
1075 	struct idr *idr;
1076 
1077 	/* Transmit channel is first entry i.e. index 0 */
1078 	idx = tx ? 0 : 1;
1079 	idr = tx ? &info->tx_idr : &info->rx_idr;
1080 
1081 	/* check if already allocated, used for multiple device per protocol */
1082 	cinfo = idr_find(idr, prot_id);
1083 	if (cinfo)
1084 		return 0;
1085 
1086 	if (!info->desc->ops->chan_available(dev, idx)) {
1087 		cinfo = idr_find(idr, SCMI_PROTOCOL_BASE);
1088 		if (unlikely(!cinfo)) /* Possible only if platform has no Rx */
1089 			return -EINVAL;
1090 		goto idr_alloc;
1091 	}
1092 
1093 	cinfo = devm_kzalloc(info->dev, sizeof(*cinfo), GFP_KERNEL);
1094 	if (!cinfo)
1095 		return -ENOMEM;
1096 
1097 	cinfo->dev = dev;
1098 
1099 	ret = info->desc->ops->chan_setup(cinfo, info->dev, tx);
1100 	if (ret)
1101 		return ret;
1102 
1103 idr_alloc:
1104 	ret = idr_alloc(idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL);
1105 	if (ret != prot_id) {
1106 		dev_err(dev, "unable to allocate SCMI idr slot err %d\n", ret);
1107 		return ret;
1108 	}
1109 
1110 	cinfo->handle = &info->handle;
1111 	return 0;
1112 }
1113 
1114 static inline int
1115 scmi_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id)
1116 {
1117 	int ret = scmi_chan_setup(info, dev, prot_id, true);
1118 
1119 	if (!ret) /* Rx is optional, hence no error check */
1120 		scmi_chan_setup(info, dev, prot_id, false);
1121 
1122 	return ret;
1123 }
1124 
1125 /**
1126  * scmi_get_protocol_device  - Helper to get/create an SCMI device.
1127  *
1128  * @np: A device node representing a valid active protocols for the referred
1129  * SCMI instance.
1130  * @info: The referred SCMI instance for which we are getting/creating this
1131  * device.
1132  * @prot_id: The protocol ID.
1133  * @name: The device name.
1134  *
1135  * Referring to the specific SCMI instance identified by @info, this helper
1136  * takes care to return a properly initialized device matching the requested
1137  * @proto_id and @name: if device was still not existent it is created as a
1138  * child of the specified SCMI instance @info and its transport properly
1139  * initialized as usual.
1140  */
1141 static inline struct scmi_device *
1142 scmi_get_protocol_device(struct device_node *np, struct scmi_info *info,
1143 			 int prot_id, const char *name)
1144 {
1145 	struct scmi_device *sdev;
1146 
1147 	/* Already created for this parent SCMI instance ? */
1148 	sdev = scmi_child_dev_find(info->dev, prot_id, name);
1149 	if (sdev)
1150 		return sdev;
1151 
1152 	pr_debug("Creating SCMI device (%s) for protocol %x\n", name, prot_id);
1153 
1154 	sdev = scmi_device_create(np, info->dev, prot_id, name);
1155 	if (!sdev) {
1156 		dev_err(info->dev, "failed to create %d protocol device\n",
1157 			prot_id);
1158 		return NULL;
1159 	}
1160 
1161 	if (scmi_txrx_setup(info, &sdev->dev, prot_id)) {
1162 		dev_err(&sdev->dev, "failed to setup transport\n");
1163 		scmi_device_destroy(sdev);
1164 		return NULL;
1165 	}
1166 
1167 	return sdev;
1168 }
1169 
1170 static inline void
1171 scmi_create_protocol_device(struct device_node *np, struct scmi_info *info,
1172 			    int prot_id, const char *name)
1173 {
1174 	struct scmi_device *sdev;
1175 
1176 	sdev = scmi_get_protocol_device(np, info, prot_id, name);
1177 	if (!sdev)
1178 		return;
1179 
1180 	/* setup handle now as the transport is ready */
1181 	scmi_set_handle(sdev);
1182 }
1183 
1184 /**
1185  * scmi_create_protocol_devices  - Create devices for all pending requests for
1186  * this SCMI instance.
1187  *
1188  * @np: The device node describing the protocol
1189  * @info: The SCMI instance descriptor
1190  * @prot_id: The protocol ID
1191  *
1192  * All devices previously requested for this instance (if any) are found and
1193  * created by scanning the proper @&scmi_requested_devices entry.
1194  */
1195 static void scmi_create_protocol_devices(struct device_node *np,
1196 					 struct scmi_info *info, int prot_id)
1197 {
1198 	struct list_head *phead;
1199 
1200 	mutex_lock(&scmi_requested_devices_mtx);
1201 	phead = idr_find(&scmi_requested_devices, prot_id);
1202 	if (phead) {
1203 		struct scmi_requested_dev *rdev;
1204 
1205 		list_for_each_entry(rdev, phead, node)
1206 			scmi_create_protocol_device(np, info, prot_id,
1207 						    rdev->id_table->name);
1208 	}
1209 	mutex_unlock(&scmi_requested_devices_mtx);
1210 }
1211 
1212 /**
1213  * scmi_protocol_device_request  - Helper to request a device
1214  *
1215  * @id_table: A protocol/name pair descriptor for the device to be created.
1216  *
1217  * This helper let an SCMI driver request specific devices identified by the
1218  * @id_table to be created for each active SCMI instance.
1219  *
1220  * The requested device name MUST NOT be already existent for any protocol;
1221  * at first the freshly requested @id_table is annotated in the IDR table
1222  * @scmi_requested_devices, then a matching device is created for each already
1223  * active SCMI instance. (if any)
1224  *
1225  * This way the requested device is created straight-away for all the already
1226  * initialized(probed) SCMI instances (handles) and it remains also annotated
1227  * as pending creation if the requesting SCMI driver was loaded before some
1228  * SCMI instance and related transports were available: when such late instance
1229  * is probed, its probe will take care to scan the list of pending requested
1230  * devices and create those on its own (see @scmi_create_protocol_devices and
1231  * its enclosing loop)
1232  *
1233  * Return: 0 on Success
1234  */
1235 int scmi_protocol_device_request(const struct scmi_device_id *id_table)
1236 {
1237 	int ret = 0;
1238 	unsigned int id = 0;
1239 	struct list_head *head, *phead = NULL;
1240 	struct scmi_requested_dev *rdev;
1241 	struct scmi_info *info;
1242 
1243 	pr_debug("Requesting SCMI device (%s) for protocol %x\n",
1244 		 id_table->name, id_table->protocol_id);
1245 
1246 	/*
1247 	 * Search for the matching protocol rdev list and then search
1248 	 * of any existent equally named device...fails if any duplicate found.
1249 	 */
1250 	mutex_lock(&scmi_requested_devices_mtx);
1251 	idr_for_each_entry(&scmi_requested_devices, head, id) {
1252 		if (!phead) {
1253 			/* A list found registered in the IDR is never empty */
1254 			rdev = list_first_entry(head, struct scmi_requested_dev,
1255 						node);
1256 			if (rdev->id_table->protocol_id ==
1257 			    id_table->protocol_id)
1258 				phead = head;
1259 		}
1260 		list_for_each_entry(rdev, head, node) {
1261 			if (!strcmp(rdev->id_table->name, id_table->name)) {
1262 				pr_err("Ignoring duplicate request [%d] %s\n",
1263 				       rdev->id_table->protocol_id,
1264 				       rdev->id_table->name);
1265 				ret = -EINVAL;
1266 				goto out;
1267 			}
1268 		}
1269 	}
1270 
1271 	/*
1272 	 * No duplicate found for requested id_table, so let's create a new
1273 	 * requested device entry for this new valid request.
1274 	 */
1275 	rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
1276 	if (!rdev) {
1277 		ret = -ENOMEM;
1278 		goto out;
1279 	}
1280 	rdev->id_table = id_table;
1281 
1282 	/*
1283 	 * Append the new requested device table descriptor to the head of the
1284 	 * related protocol list, eventually creating such head if not already
1285 	 * there.
1286 	 */
1287 	if (!phead) {
1288 		phead = kzalloc(sizeof(*phead), GFP_KERNEL);
1289 		if (!phead) {
1290 			kfree(rdev);
1291 			ret = -ENOMEM;
1292 			goto out;
1293 		}
1294 		INIT_LIST_HEAD(phead);
1295 
1296 		ret = idr_alloc(&scmi_requested_devices, (void *)phead,
1297 				id_table->protocol_id,
1298 				id_table->protocol_id + 1, GFP_KERNEL);
1299 		if (ret != id_table->protocol_id) {
1300 			pr_err("Failed to save SCMI device - ret:%d\n", ret);
1301 			kfree(rdev);
1302 			kfree(phead);
1303 			ret = -EINVAL;
1304 			goto out;
1305 		}
1306 		ret = 0;
1307 	}
1308 	list_add(&rdev->node, phead);
1309 
1310 	/*
1311 	 * Now effectively create and initialize the requested device for every
1312 	 * already initialized SCMI instance which has registered the requested
1313 	 * protocol as a valid active one: i.e. defined in DT and supported by
1314 	 * current platform FW.
1315 	 */
1316 	mutex_lock(&scmi_list_mutex);
1317 	list_for_each_entry(info, &scmi_list, node) {
1318 		struct device_node *child;
1319 
1320 		child = idr_find(&info->active_protocols,
1321 				 id_table->protocol_id);
1322 		if (child) {
1323 			struct scmi_device *sdev;
1324 
1325 			sdev = scmi_get_protocol_device(child, info,
1326 							id_table->protocol_id,
1327 							id_table->name);
1328 			/* Set handle if not already set: device existed */
1329 			if (sdev && !sdev->handle)
1330 				sdev->handle =
1331 					scmi_handle_get_from_info_unlocked(info);
1332 		} else {
1333 			dev_err(info->dev,
1334 				"Failed. SCMI protocol %d not active.\n",
1335 				id_table->protocol_id);
1336 		}
1337 	}
1338 	mutex_unlock(&scmi_list_mutex);
1339 
1340 out:
1341 	mutex_unlock(&scmi_requested_devices_mtx);
1342 
1343 	return ret;
1344 }
1345 
1346 /**
1347  * scmi_protocol_device_unrequest  - Helper to unrequest a device
1348  *
1349  * @id_table: A protocol/name pair descriptor for the device to be unrequested.
1350  *
1351  * An helper to let an SCMI driver release its request about devices; note that
1352  * devices are created and initialized once the first SCMI driver request them
1353  * but they destroyed only on SCMI core unloading/unbinding.
1354  *
1355  * The current SCMI transport layer uses such devices as internal references and
1356  * as such they could be shared as same transport between multiple drivers so
1357  * that cannot be safely destroyed till the whole SCMI stack is removed.
1358  * (unless adding further burden of refcounting.)
1359  */
1360 void scmi_protocol_device_unrequest(const struct scmi_device_id *id_table)
1361 {
1362 	struct list_head *phead;
1363 
1364 	pr_debug("Unrequesting SCMI device (%s) for protocol %x\n",
1365 		 id_table->name, id_table->protocol_id);
1366 
1367 	mutex_lock(&scmi_requested_devices_mtx);
1368 	phead = idr_find(&scmi_requested_devices, id_table->protocol_id);
1369 	if (phead) {
1370 		struct scmi_requested_dev *victim, *tmp;
1371 
1372 		list_for_each_entry_safe(victim, tmp, phead, node) {
1373 			if (!strcmp(victim->id_table->name, id_table->name)) {
1374 				list_del(&victim->node);
1375 				kfree(victim);
1376 				break;
1377 			}
1378 		}
1379 
1380 		if (list_empty(phead)) {
1381 			idr_remove(&scmi_requested_devices,
1382 				   id_table->protocol_id);
1383 			kfree(phead);
1384 		}
1385 	}
1386 	mutex_unlock(&scmi_requested_devices_mtx);
1387 }
1388 
1389 static int scmi_probe(struct platform_device *pdev)
1390 {
1391 	int ret;
1392 	struct scmi_handle *handle;
1393 	const struct scmi_desc *desc;
1394 	struct scmi_info *info;
1395 	struct device *dev = &pdev->dev;
1396 	struct device_node *child, *np = dev->of_node;
1397 
1398 	desc = of_device_get_match_data(dev);
1399 	if (!desc)
1400 		return -EINVAL;
1401 
1402 	info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
1403 	if (!info)
1404 		return -ENOMEM;
1405 
1406 	info->dev = dev;
1407 	info->desc = desc;
1408 	INIT_LIST_HEAD(&info->node);
1409 	idr_init(&info->protocols);
1410 	mutex_init(&info->protocols_mtx);
1411 	idr_init(&info->active_protocols);
1412 
1413 	platform_set_drvdata(pdev, info);
1414 	idr_init(&info->tx_idr);
1415 	idr_init(&info->rx_idr);
1416 
1417 	handle = &info->handle;
1418 	handle->dev = info->dev;
1419 	handle->version = &info->version;
1420 	handle->devm_protocol_get = scmi_devm_protocol_get;
1421 	handle->devm_protocol_put = scmi_devm_protocol_put;
1422 
1423 	ret = scmi_txrx_setup(info, dev, SCMI_PROTOCOL_BASE);
1424 	if (ret)
1425 		return ret;
1426 
1427 	ret = scmi_xfer_info_init(info);
1428 	if (ret)
1429 		return ret;
1430 
1431 	if (scmi_notification_init(handle))
1432 		dev_err(dev, "SCMI Notifications NOT available.\n");
1433 
1434 	/*
1435 	 * Trigger SCMI Base protocol initialization.
1436 	 * It's mandatory and won't be ever released/deinit until the
1437 	 * SCMI stack is shutdown/unloaded as a whole.
1438 	 */
1439 	ret = scmi_protocol_acquire(handle, SCMI_PROTOCOL_BASE);
1440 	if (ret) {
1441 		dev_err(dev, "unable to communicate with SCMI\n");
1442 		return ret;
1443 	}
1444 
1445 	mutex_lock(&scmi_list_mutex);
1446 	list_add_tail(&info->node, &scmi_list);
1447 	mutex_unlock(&scmi_list_mutex);
1448 
1449 	for_each_available_child_of_node(np, child) {
1450 		u32 prot_id;
1451 
1452 		if (of_property_read_u32(child, "reg", &prot_id))
1453 			continue;
1454 
1455 		if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK, prot_id))
1456 			dev_err(dev, "Out of range protocol %d\n", prot_id);
1457 
1458 		if (!scmi_is_protocol_implemented(handle, prot_id)) {
1459 			dev_err(dev, "SCMI protocol %d not implemented\n",
1460 				prot_id);
1461 			continue;
1462 		}
1463 
1464 		/*
1465 		 * Save this valid DT protocol descriptor amongst
1466 		 * @active_protocols for this SCMI instance/
1467 		 */
1468 		ret = idr_alloc(&info->active_protocols, child,
1469 				prot_id, prot_id + 1, GFP_KERNEL);
1470 		if (ret != prot_id) {
1471 			dev_err(dev, "SCMI protocol %d already activated. Skip\n",
1472 				prot_id);
1473 			continue;
1474 		}
1475 
1476 		of_node_get(child);
1477 		scmi_create_protocol_devices(child, info, prot_id);
1478 	}
1479 
1480 	return 0;
1481 }
1482 
1483 void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id)
1484 {
1485 	idr_remove(idr, id);
1486 }
1487 
1488 static int scmi_remove(struct platform_device *pdev)
1489 {
1490 	int ret = 0, id;
1491 	struct scmi_info *info = platform_get_drvdata(pdev);
1492 	struct idr *idr = &info->tx_idr;
1493 	struct device_node *child;
1494 
1495 	mutex_lock(&scmi_list_mutex);
1496 	if (info->users)
1497 		ret = -EBUSY;
1498 	else
1499 		list_del(&info->node);
1500 	mutex_unlock(&scmi_list_mutex);
1501 
1502 	if (ret)
1503 		return ret;
1504 
1505 	scmi_notification_exit(&info->handle);
1506 
1507 	mutex_lock(&info->protocols_mtx);
1508 	idr_destroy(&info->protocols);
1509 	mutex_unlock(&info->protocols_mtx);
1510 
1511 	idr_for_each_entry(&info->active_protocols, child, id)
1512 		of_node_put(child);
1513 	idr_destroy(&info->active_protocols);
1514 
1515 	/* Safe to free channels since no more users */
1516 	ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
1517 	idr_destroy(&info->tx_idr);
1518 
1519 	idr = &info->rx_idr;
1520 	ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
1521 	idr_destroy(&info->rx_idr);
1522 
1523 	return ret;
1524 }
1525 
1526 static ssize_t protocol_version_show(struct device *dev,
1527 				     struct device_attribute *attr, char *buf)
1528 {
1529 	struct scmi_info *info = dev_get_drvdata(dev);
1530 
1531 	return sprintf(buf, "%u.%u\n", info->version.major_ver,
1532 		       info->version.minor_ver);
1533 }
1534 static DEVICE_ATTR_RO(protocol_version);
1535 
1536 static ssize_t firmware_version_show(struct device *dev,
1537 				     struct device_attribute *attr, char *buf)
1538 {
1539 	struct scmi_info *info = dev_get_drvdata(dev);
1540 
1541 	return sprintf(buf, "0x%x\n", info->version.impl_ver);
1542 }
1543 static DEVICE_ATTR_RO(firmware_version);
1544 
1545 static ssize_t vendor_id_show(struct device *dev,
1546 			      struct device_attribute *attr, char *buf)
1547 {
1548 	struct scmi_info *info = dev_get_drvdata(dev);
1549 
1550 	return sprintf(buf, "%s\n", info->version.vendor_id);
1551 }
1552 static DEVICE_ATTR_RO(vendor_id);
1553 
1554 static ssize_t sub_vendor_id_show(struct device *dev,
1555 				  struct device_attribute *attr, char *buf)
1556 {
1557 	struct scmi_info *info = dev_get_drvdata(dev);
1558 
1559 	return sprintf(buf, "%s\n", info->version.sub_vendor_id);
1560 }
1561 static DEVICE_ATTR_RO(sub_vendor_id);
1562 
1563 static struct attribute *versions_attrs[] = {
1564 	&dev_attr_firmware_version.attr,
1565 	&dev_attr_protocol_version.attr,
1566 	&dev_attr_vendor_id.attr,
1567 	&dev_attr_sub_vendor_id.attr,
1568 	NULL,
1569 };
1570 ATTRIBUTE_GROUPS(versions);
1571 
1572 /* Each compatible listed below must have descriptor associated with it */
1573 static const struct of_device_id scmi_of_match[] = {
1574 #ifdef CONFIG_MAILBOX
1575 	{ .compatible = "arm,scmi", .data = &scmi_mailbox_desc },
1576 #endif
1577 #ifdef CONFIG_HAVE_ARM_SMCCC_DISCOVERY
1578 	{ .compatible = "arm,scmi-smc", .data = &scmi_smc_desc},
1579 #endif
1580 	{ /* Sentinel */ },
1581 };
1582 
1583 MODULE_DEVICE_TABLE(of, scmi_of_match);
1584 
1585 static struct platform_driver scmi_driver = {
1586 	.driver = {
1587 		   .name = "arm-scmi",
1588 		   .of_match_table = scmi_of_match,
1589 		   .dev_groups = versions_groups,
1590 		   },
1591 	.probe = scmi_probe,
1592 	.remove = scmi_remove,
1593 };
1594 
1595 static int __init scmi_driver_init(void)
1596 {
1597 	scmi_bus_init();
1598 
1599 	scmi_base_register();
1600 
1601 	scmi_clock_register();
1602 	scmi_perf_register();
1603 	scmi_power_register();
1604 	scmi_reset_register();
1605 	scmi_sensors_register();
1606 	scmi_voltage_register();
1607 	scmi_system_register();
1608 
1609 	return platform_driver_register(&scmi_driver);
1610 }
1611 subsys_initcall(scmi_driver_init);
1612 
1613 static void __exit scmi_driver_exit(void)
1614 {
1615 	scmi_base_unregister();
1616 
1617 	scmi_clock_unregister();
1618 	scmi_perf_unregister();
1619 	scmi_power_unregister();
1620 	scmi_reset_unregister();
1621 	scmi_sensors_unregister();
1622 	scmi_voltage_unregister();
1623 	scmi_system_unregister();
1624 
1625 	scmi_bus_exit();
1626 
1627 	platform_driver_unregister(&scmi_driver);
1628 }
1629 module_exit(scmi_driver_exit);
1630 
1631 MODULE_ALIAS("platform: arm-scmi");
1632 MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
1633 MODULE_DESCRIPTION("ARM SCMI protocol driver");
1634 MODULE_LICENSE("GPL v2");
1635