1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * System Control and Management Interface (SCMI) Message Protocol driver
4  *
5  * SCMI Message Protocol is used between the System Control Processor(SCP)
6  * and the Application Processors(AP). The Message Handling Unit(MHU)
7  * provides a mechanism for inter-processor communication between SCP's
8  * Cortex M3 and AP.
9  *
10  * SCP offers control and management of the core/cluster power states,
11  * various power domain DVFS including the core/cluster, certain system
12  * clocks configuration, thermal sensors and many others.
13  *
14  * Copyright (C) 2018-2021 ARM Ltd.
15  */
16 
17 #include <linux/bitmap.h>
18 #include <linux/device.h>
19 #include <linux/export.h>
20 #include <linux/idr.h>
21 #include <linux/io.h>
22 #include <linux/kernel.h>
23 #include <linux/ktime.h>
24 #include <linux/hashtable.h>
25 #include <linux/list.h>
26 #include <linux/module.h>
27 #include <linux/of_address.h>
28 #include <linux/of_device.h>
29 #include <linux/processor.h>
30 #include <linux/refcount.h>
31 #include <linux/slab.h>
32 
33 #include "common.h"
34 #include "notify.h"
35 
36 #define CREATE_TRACE_POINTS
37 #include <trace/events/scmi.h>
38 
39 enum scmi_error_codes {
40 	SCMI_SUCCESS = 0,	/* Success */
41 	SCMI_ERR_SUPPORT = -1,	/* Not supported */
42 	SCMI_ERR_PARAMS = -2,	/* Invalid Parameters */
43 	SCMI_ERR_ACCESS = -3,	/* Invalid access/permission denied */
44 	SCMI_ERR_ENTRY = -4,	/* Not found */
45 	SCMI_ERR_RANGE = -5,	/* Value out of range */
46 	SCMI_ERR_BUSY = -6,	/* Device busy */
47 	SCMI_ERR_COMMS = -7,	/* Communication Error */
48 	SCMI_ERR_GENERIC = -8,	/* Generic Error */
49 	SCMI_ERR_HARDWARE = -9,	/* Hardware Error */
50 	SCMI_ERR_PROTOCOL = -10,/* Protocol Error */
51 };
52 
53 /* List of all SCMI devices active in system */
54 static LIST_HEAD(scmi_list);
55 /* Protection for the entire list */
56 static DEFINE_MUTEX(scmi_list_mutex);
57 /* Track the unique id for the transfers for debug & profiling purpose */
58 static atomic_t transfer_last_id;
59 
60 static DEFINE_IDR(scmi_requested_devices);
61 static DEFINE_MUTEX(scmi_requested_devices_mtx);
62 
63 struct scmi_requested_dev {
64 	const struct scmi_device_id *id_table;
65 	struct list_head node;
66 };
67 
68 /**
69  * struct scmi_xfers_info - Structure to manage transfer information
70  *
71  * @xfer_alloc_table: Bitmap table for allocated messages.
72  *	Index of this bitmap table is also used for message
73  *	sequence identifier.
74  * @xfer_lock: Protection for message allocation
75  * @max_msg: Maximum number of messages that can be pending
76  * @free_xfers: A free list for available to use xfers. It is initialized with
77  *		a number of xfers equal to the maximum allowed in-flight
78  *		messages.
79  * @pending_xfers: An hashtable, indexed by msg_hdr.seq, used to keep all the
80  *		   currently in-flight messages.
81  */
82 struct scmi_xfers_info {
83 	unsigned long *xfer_alloc_table;
84 	spinlock_t xfer_lock;
85 	int max_msg;
86 	struct hlist_head free_xfers;
87 	DECLARE_HASHTABLE(pending_xfers, SCMI_PENDING_XFERS_HT_ORDER_SZ);
88 };
89 
90 /**
91  * struct scmi_protocol_instance  - Describe an initialized protocol instance.
92  * @handle: Reference to the SCMI handle associated to this protocol instance.
93  * @proto: A reference to the protocol descriptor.
94  * @gid: A reference for per-protocol devres management.
95  * @users: A refcount to track effective users of this protocol.
96  * @priv: Reference for optional protocol private data.
97  * @ph: An embedded protocol handle that will be passed down to protocol
98  *	initialization code to identify this instance.
99  *
100  * Each protocol is initialized independently once for each SCMI platform in
101  * which is defined by DT and implemented by the SCMI server fw.
102  */
103 struct scmi_protocol_instance {
104 	const struct scmi_handle	*handle;
105 	const struct scmi_protocol	*proto;
106 	void				*gid;
107 	refcount_t			users;
108 	void				*priv;
109 	struct scmi_protocol_handle	ph;
110 };
111 
112 #define ph_to_pi(h)	container_of(h, struct scmi_protocol_instance, ph)
113 
114 /**
115  * struct scmi_info - Structure representing a SCMI instance
116  *
117  * @dev: Device pointer
118  * @desc: SoC description for this instance
119  * @version: SCMI revision information containing protocol version,
120  *	implementation version and (sub-)vendor identification.
121  * @handle: Instance of SCMI handle to send to clients
122  * @tx_minfo: Universal Transmit Message management info
123  * @rx_minfo: Universal Receive Message management info
124  * @tx_idr: IDR object to map protocol id to Tx channel info pointer
125  * @rx_idr: IDR object to map protocol id to Rx channel info pointer
126  * @protocols: IDR for protocols' instance descriptors initialized for
127  *	       this SCMI instance: populated on protocol's first attempted
128  *	       usage.
129  * @protocols_mtx: A mutex to protect protocols instances initialization.
130  * @protocols_imp: List of protocols implemented, currently maximum of
131  *	MAX_PROTOCOLS_IMP elements allocated by the base protocol
132  * @active_protocols: IDR storing device_nodes for protocols actually defined
133  *		      in the DT and confirmed as implemented by fw.
134  * @atomic_threshold: Optional system wide DT-configured threshold, expressed
135  *		      in microseconds, for atomic operations.
136  *		      Only SCMI synchronous commands reported by the platform
137  *		      to have an execution latency lesser-equal to the threshold
138  *		      should be considered for atomic mode operation: such
139  *		      decision is finally left up to the SCMI drivers.
140  * @notify_priv: Pointer to private data structure specific to notifications.
141  * @node: List head
142  * @users: Number of users of this instance
143  */
144 struct scmi_info {
145 	struct device *dev;
146 	const struct scmi_desc *desc;
147 	struct scmi_revision_info version;
148 	struct scmi_handle handle;
149 	struct scmi_xfers_info tx_minfo;
150 	struct scmi_xfers_info rx_minfo;
151 	struct idr tx_idr;
152 	struct idr rx_idr;
153 	struct idr protocols;
154 	/* Ensure mutual exclusive access to protocols instance array */
155 	struct mutex protocols_mtx;
156 	u8 *protocols_imp;
157 	struct idr active_protocols;
158 	unsigned int atomic_threshold;
159 	void *notify_priv;
160 	struct list_head node;
161 	int users;
162 };
163 
164 #define handle_to_scmi_info(h)	container_of(h, struct scmi_info, handle)
165 
166 static const int scmi_linux_errmap[] = {
167 	/* better than switch case as long as return value is continuous */
168 	0,			/* SCMI_SUCCESS */
169 	-EOPNOTSUPP,		/* SCMI_ERR_SUPPORT */
170 	-EINVAL,		/* SCMI_ERR_PARAM */
171 	-EACCES,		/* SCMI_ERR_ACCESS */
172 	-ENOENT,		/* SCMI_ERR_ENTRY */
173 	-ERANGE,		/* SCMI_ERR_RANGE */
174 	-EBUSY,			/* SCMI_ERR_BUSY */
175 	-ECOMM,			/* SCMI_ERR_COMMS */
176 	-EIO,			/* SCMI_ERR_GENERIC */
177 	-EREMOTEIO,		/* SCMI_ERR_HARDWARE */
178 	-EPROTO,		/* SCMI_ERR_PROTOCOL */
179 };
180 
181 static inline int scmi_to_linux_errno(int errno)
182 {
183 	int err_idx = -errno;
184 
185 	if (err_idx >= SCMI_SUCCESS && err_idx < ARRAY_SIZE(scmi_linux_errmap))
186 		return scmi_linux_errmap[err_idx];
187 	return -EIO;
188 }
189 
190 void scmi_notification_instance_data_set(const struct scmi_handle *handle,
191 					 void *priv)
192 {
193 	struct scmi_info *info = handle_to_scmi_info(handle);
194 
195 	info->notify_priv = priv;
196 	/* Ensure updated protocol private date are visible */
197 	smp_wmb();
198 }
199 
200 void *scmi_notification_instance_data_get(const struct scmi_handle *handle)
201 {
202 	struct scmi_info *info = handle_to_scmi_info(handle);
203 
204 	/* Ensure protocols_private_data has been updated */
205 	smp_rmb();
206 	return info->notify_priv;
207 }
208 
209 /**
210  * scmi_xfer_token_set  - Reserve and set new token for the xfer at hand
211  *
212  * @minfo: Pointer to Tx/Rx Message management info based on channel type
213  * @xfer: The xfer to act upon
214  *
215  * Pick the next unused monotonically increasing token and set it into
216  * xfer->hdr.seq: picking a monotonically increasing value avoids immediate
217  * reuse of freshly completed or timed-out xfers, thus mitigating the risk
218  * of incorrect association of a late and expired xfer with a live in-flight
219  * transaction, both happening to re-use the same token identifier.
220  *
221  * Since platform is NOT required to answer our request in-order we should
222  * account for a few rare but possible scenarios:
223  *
224  *  - exactly 'next_token' may be NOT available so pick xfer_id >= next_token
225  *    using find_next_zero_bit() starting from candidate next_token bit
226  *
227  *  - all tokens ahead upto (MSG_TOKEN_ID_MASK - 1) are used in-flight but we
228  *    are plenty of free tokens at start, so try a second pass using
229  *    find_next_zero_bit() and starting from 0.
230  *
231  *  X = used in-flight
232  *
233  * Normal
234  * ------
235  *
236  *		|- xfer_id picked
237  *   -----------+----------------------------------------------------------
238  *   | | |X|X|X| | | | | | ... ... ... ... ... ... ... ... ... ... ...|X|X|
239  *   ----------------------------------------------------------------------
240  *		^
241  *		|- next_token
242  *
243  * Out-of-order pending at start
244  * -----------------------------
245  *
246  *	  |- xfer_id picked, last_token fixed
247  *   -----+----------------------------------------------------------------
248  *   |X|X| | | | |X|X| ... ... ... ... ... ... ... ... ... ... ... ...|X| |
249  *   ----------------------------------------------------------------------
250  *    ^
251  *    |- next_token
252  *
253  *
254  * Out-of-order pending at end
255  * ---------------------------
256  *
257  *	  |- xfer_id picked, last_token fixed
258  *   -----+----------------------------------------------------------------
259  *   |X|X| | | | |X|X| ... ... ... ... ... ... ... ... ... ... |X|X|X||X|X|
260  *   ----------------------------------------------------------------------
261  *								^
262  *								|- next_token
263  *
264  * Context: Assumes to be called with @xfer_lock already acquired.
265  *
266  * Return: 0 on Success or error
267  */
268 static int scmi_xfer_token_set(struct scmi_xfers_info *minfo,
269 			       struct scmi_xfer *xfer)
270 {
271 	unsigned long xfer_id, next_token;
272 
273 	/*
274 	 * Pick a candidate monotonic token in range [0, MSG_TOKEN_MAX - 1]
275 	 * using the pre-allocated transfer_id as a base.
276 	 * Note that the global transfer_id is shared across all message types
277 	 * so there could be holes in the allocated set of monotonic sequence
278 	 * numbers, but that is going to limit the effectiveness of the
279 	 * mitigation only in very rare limit conditions.
280 	 */
281 	next_token = (xfer->transfer_id & (MSG_TOKEN_MAX - 1));
282 
283 	/* Pick the next available xfer_id >= next_token */
284 	xfer_id = find_next_zero_bit(minfo->xfer_alloc_table,
285 				     MSG_TOKEN_MAX, next_token);
286 	if (xfer_id == MSG_TOKEN_MAX) {
287 		/*
288 		 * After heavily out-of-order responses, there are no free
289 		 * tokens ahead, but only at start of xfer_alloc_table so
290 		 * try again from the beginning.
291 		 */
292 		xfer_id = find_next_zero_bit(minfo->xfer_alloc_table,
293 					     MSG_TOKEN_MAX, 0);
294 		/*
295 		 * Something is wrong if we got here since there can be a
296 		 * maximum number of (MSG_TOKEN_MAX - 1) in-flight messages
297 		 * but we have not found any free token [0, MSG_TOKEN_MAX - 1].
298 		 */
299 		if (WARN_ON_ONCE(xfer_id == MSG_TOKEN_MAX))
300 			return -ENOMEM;
301 	}
302 
303 	/* Update +/- last_token accordingly if we skipped some hole */
304 	if (xfer_id != next_token)
305 		atomic_add((int)(xfer_id - next_token), &transfer_last_id);
306 
307 	/* Set in-flight */
308 	set_bit(xfer_id, minfo->xfer_alloc_table);
309 	xfer->hdr.seq = (u16)xfer_id;
310 
311 	return 0;
312 }
313 
314 /**
315  * scmi_xfer_token_clear  - Release the token
316  *
317  * @minfo: Pointer to Tx/Rx Message management info based on channel type
318  * @xfer: The xfer to act upon
319  */
320 static inline void scmi_xfer_token_clear(struct scmi_xfers_info *minfo,
321 					 struct scmi_xfer *xfer)
322 {
323 	clear_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
324 }
325 
326 /**
327  * scmi_xfer_get() - Allocate one message
328  *
329  * @handle: Pointer to SCMI entity handle
330  * @minfo: Pointer to Tx/Rx Message management info based on channel type
331  * @set_pending: If true a monotonic token is picked and the xfer is added to
332  *		 the pending hash table.
333  *
334  * Helper function which is used by various message functions that are
335  * exposed to clients of this driver for allocating a message traffic event.
336  *
337  * Picks an xfer from the free list @free_xfers (if any available) and, if
338  * required, sets a monotonically increasing token and stores the inflight xfer
339  * into the @pending_xfers hashtable for later retrieval.
340  *
341  * The successfully initialized xfer is refcounted.
342  *
343  * Context: Holds @xfer_lock while manipulating @xfer_alloc_table and
344  *	    @free_xfers.
345  *
346  * Return: 0 if all went fine, else corresponding error.
347  */
348 static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle,
349 				       struct scmi_xfers_info *minfo,
350 				       bool set_pending)
351 {
352 	int ret;
353 	unsigned long flags;
354 	struct scmi_xfer *xfer;
355 
356 	spin_lock_irqsave(&minfo->xfer_lock, flags);
357 	if (hlist_empty(&minfo->free_xfers)) {
358 		spin_unlock_irqrestore(&minfo->xfer_lock, flags);
359 		return ERR_PTR(-ENOMEM);
360 	}
361 
362 	/* grab an xfer from the free_list */
363 	xfer = hlist_entry(minfo->free_xfers.first, struct scmi_xfer, node);
364 	hlist_del_init(&xfer->node);
365 
366 	/*
367 	 * Allocate transfer_id early so that can be used also as base for
368 	 * monotonic sequence number generation if needed.
369 	 */
370 	xfer->transfer_id = atomic_inc_return(&transfer_last_id);
371 
372 	if (set_pending) {
373 		/* Pick and set monotonic token */
374 		ret = scmi_xfer_token_set(minfo, xfer);
375 		if (!ret) {
376 			hash_add(minfo->pending_xfers, &xfer->node,
377 				 xfer->hdr.seq);
378 			xfer->pending = true;
379 		} else {
380 			dev_err(handle->dev,
381 				"Failed to get monotonic token %d\n", ret);
382 			hlist_add_head(&xfer->node, &minfo->free_xfers);
383 			xfer = ERR_PTR(ret);
384 		}
385 	}
386 
387 	if (!IS_ERR(xfer)) {
388 		refcount_set(&xfer->users, 1);
389 		atomic_set(&xfer->busy, SCMI_XFER_FREE);
390 	}
391 	spin_unlock_irqrestore(&minfo->xfer_lock, flags);
392 
393 	return xfer;
394 }
395 
396 /**
397  * __scmi_xfer_put() - Release a message
398  *
399  * @minfo: Pointer to Tx/Rx Message management info based on channel type
400  * @xfer: message that was reserved by scmi_xfer_get
401  *
402  * After refcount check, possibly release an xfer, clearing the token slot,
403  * removing xfer from @pending_xfers and putting it back into free_xfers.
404  *
405  * This holds a spinlock to maintain integrity of internal data structures.
406  */
407 static void
408 __scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
409 {
410 	unsigned long flags;
411 
412 	spin_lock_irqsave(&minfo->xfer_lock, flags);
413 	if (refcount_dec_and_test(&xfer->users)) {
414 		if (xfer->pending) {
415 			scmi_xfer_token_clear(minfo, xfer);
416 			hash_del(&xfer->node);
417 			xfer->pending = false;
418 		}
419 		hlist_add_head(&xfer->node, &minfo->free_xfers);
420 	}
421 	spin_unlock_irqrestore(&minfo->xfer_lock, flags);
422 }
423 
424 /**
425  * scmi_xfer_lookup_unlocked  -  Helper to lookup an xfer_id
426  *
427  * @minfo: Pointer to Tx/Rx Message management info based on channel type
428  * @xfer_id: Token ID to lookup in @pending_xfers
429  *
430  * Refcounting is untouched.
431  *
432  * Context: Assumes to be called with @xfer_lock already acquired.
433  *
434  * Return: A valid xfer on Success or error otherwise
435  */
436 static struct scmi_xfer *
437 scmi_xfer_lookup_unlocked(struct scmi_xfers_info *minfo, u16 xfer_id)
438 {
439 	struct scmi_xfer *xfer = NULL;
440 
441 	if (test_bit(xfer_id, minfo->xfer_alloc_table))
442 		xfer = XFER_FIND(minfo->pending_xfers, xfer_id);
443 
444 	return xfer ?: ERR_PTR(-EINVAL);
445 }
446 
447 /**
448  * scmi_msg_response_validate  - Validate message type against state of related
449  * xfer
450  *
451  * @cinfo: A reference to the channel descriptor.
452  * @msg_type: Message type to check
453  * @xfer: A reference to the xfer to validate against @msg_type
454  *
455  * This function checks if @msg_type is congruent with the current state of
456  * a pending @xfer; if an asynchronous delayed response is received before the
457  * related synchronous response (Out-of-Order Delayed Response) the missing
458  * synchronous response is assumed to be OK and completed, carrying on with the
459  * Delayed Response: this is done to address the case in which the underlying
460  * SCMI transport can deliver such out-of-order responses.
461  *
462  * Context: Assumes to be called with xfer->lock already acquired.
463  *
464  * Return: 0 on Success, error otherwise
465  */
466 static inline int scmi_msg_response_validate(struct scmi_chan_info *cinfo,
467 					     u8 msg_type,
468 					     struct scmi_xfer *xfer)
469 {
470 	/*
471 	 * Even if a response was indeed expected on this slot at this point,
472 	 * a buggy platform could wrongly reply feeding us an unexpected
473 	 * delayed response we're not prepared to handle: bail-out safely
474 	 * blaming firmware.
475 	 */
476 	if (msg_type == MSG_TYPE_DELAYED_RESP && !xfer->async_done) {
477 		dev_err(cinfo->dev,
478 			"Delayed Response for %d not expected! Buggy F/W ?\n",
479 			xfer->hdr.seq);
480 		return -EINVAL;
481 	}
482 
483 	switch (xfer->state) {
484 	case SCMI_XFER_SENT_OK:
485 		if (msg_type == MSG_TYPE_DELAYED_RESP) {
486 			/*
487 			 * Delayed Response expected but delivered earlier.
488 			 * Assume message RESPONSE was OK and skip state.
489 			 */
490 			xfer->hdr.status = SCMI_SUCCESS;
491 			xfer->state = SCMI_XFER_RESP_OK;
492 			complete(&xfer->done);
493 			dev_warn(cinfo->dev,
494 				 "Received valid OoO Delayed Response for %d\n",
495 				 xfer->hdr.seq);
496 		}
497 		break;
498 	case SCMI_XFER_RESP_OK:
499 		if (msg_type != MSG_TYPE_DELAYED_RESP)
500 			return -EINVAL;
501 		break;
502 	case SCMI_XFER_DRESP_OK:
503 		/* No further message expected once in SCMI_XFER_DRESP_OK */
504 		return -EINVAL;
505 	}
506 
507 	return 0;
508 }
509 
510 /**
511  * scmi_xfer_state_update  - Update xfer state
512  *
513  * @xfer: A reference to the xfer to update
514  * @msg_type: Type of message being processed.
515  *
516  * Note that this message is assumed to have been already successfully validated
517  * by @scmi_msg_response_validate(), so here we just update the state.
518  *
519  * Context: Assumes to be called on an xfer exclusively acquired using the
520  *	    busy flag.
521  */
522 static inline void scmi_xfer_state_update(struct scmi_xfer *xfer, u8 msg_type)
523 {
524 	xfer->hdr.type = msg_type;
525 
526 	/* Unknown command types were already discarded earlier */
527 	if (xfer->hdr.type == MSG_TYPE_COMMAND)
528 		xfer->state = SCMI_XFER_RESP_OK;
529 	else
530 		xfer->state = SCMI_XFER_DRESP_OK;
531 }
532 
533 static bool scmi_xfer_acquired(struct scmi_xfer *xfer)
534 {
535 	int ret;
536 
537 	ret = atomic_cmpxchg(&xfer->busy, SCMI_XFER_FREE, SCMI_XFER_BUSY);
538 
539 	return ret == SCMI_XFER_FREE;
540 }
541 
542 /**
543  * scmi_xfer_command_acquire  -  Helper to lookup and acquire a command xfer
544  *
545  * @cinfo: A reference to the channel descriptor.
546  * @msg_hdr: A message header to use as lookup key
547  *
548  * When a valid xfer is found for the sequence number embedded in the provided
549  * msg_hdr, reference counting is properly updated and exclusive access to this
550  * xfer is granted till released with @scmi_xfer_command_release.
551  *
552  * Return: A valid @xfer on Success or error otherwise.
553  */
554 static inline struct scmi_xfer *
555 scmi_xfer_command_acquire(struct scmi_chan_info *cinfo, u32 msg_hdr)
556 {
557 	int ret;
558 	unsigned long flags;
559 	struct scmi_xfer *xfer;
560 	struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
561 	struct scmi_xfers_info *minfo = &info->tx_minfo;
562 	u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
563 	u16 xfer_id = MSG_XTRACT_TOKEN(msg_hdr);
564 
565 	/* Are we even expecting this? */
566 	spin_lock_irqsave(&minfo->xfer_lock, flags);
567 	xfer = scmi_xfer_lookup_unlocked(minfo, xfer_id);
568 	if (IS_ERR(xfer)) {
569 		dev_err(cinfo->dev,
570 			"Message for %d type %d is not expected!\n",
571 			xfer_id, msg_type);
572 		spin_unlock_irqrestore(&minfo->xfer_lock, flags);
573 		return xfer;
574 	}
575 	refcount_inc(&xfer->users);
576 	spin_unlock_irqrestore(&minfo->xfer_lock, flags);
577 
578 	spin_lock_irqsave(&xfer->lock, flags);
579 	ret = scmi_msg_response_validate(cinfo, msg_type, xfer);
580 	/*
581 	 * If a pending xfer was found which was also in a congruent state with
582 	 * the received message, acquire exclusive access to it setting the busy
583 	 * flag.
584 	 * Spins only on the rare limit condition of concurrent reception of
585 	 * RESP and DRESP for the same xfer.
586 	 */
587 	if (!ret) {
588 		spin_until_cond(scmi_xfer_acquired(xfer));
589 		scmi_xfer_state_update(xfer, msg_type);
590 	}
591 	spin_unlock_irqrestore(&xfer->lock, flags);
592 
593 	if (ret) {
594 		dev_err(cinfo->dev,
595 			"Invalid message type:%d for %d - HDR:0x%X  state:%d\n",
596 			msg_type, xfer_id, msg_hdr, xfer->state);
597 		/* On error the refcount incremented above has to be dropped */
598 		__scmi_xfer_put(minfo, xfer);
599 		xfer = ERR_PTR(-EINVAL);
600 	}
601 
602 	return xfer;
603 }
604 
605 static inline void scmi_xfer_command_release(struct scmi_info *info,
606 					     struct scmi_xfer *xfer)
607 {
608 	atomic_set(&xfer->busy, SCMI_XFER_FREE);
609 	__scmi_xfer_put(&info->tx_minfo, xfer);
610 }
611 
612 static inline void scmi_clear_channel(struct scmi_info *info,
613 				      struct scmi_chan_info *cinfo)
614 {
615 	if (info->desc->ops->clear_channel)
616 		info->desc->ops->clear_channel(cinfo);
617 }
618 
619 static inline bool is_polling_required(struct scmi_chan_info *cinfo,
620 				       struct scmi_info *info)
621 {
622 	return cinfo->no_completion_irq || info->desc->force_polling;
623 }
624 
625 static inline bool is_transport_polling_capable(struct scmi_info *info)
626 {
627 	return info->desc->ops->poll_done ||
628 		info->desc->sync_cmds_completed_on_ret;
629 }
630 
631 static inline bool is_polling_enabled(struct scmi_chan_info *cinfo,
632 				      struct scmi_info *info)
633 {
634 	return is_polling_required(cinfo, info) &&
635 		is_transport_polling_capable(info);
636 }
637 
638 static void scmi_handle_notification(struct scmi_chan_info *cinfo,
639 				     u32 msg_hdr, void *priv)
640 {
641 	struct scmi_xfer *xfer;
642 	struct device *dev = cinfo->dev;
643 	struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
644 	struct scmi_xfers_info *minfo = &info->rx_minfo;
645 	ktime_t ts;
646 
647 	ts = ktime_get_boottime();
648 	xfer = scmi_xfer_get(cinfo->handle, minfo, false);
649 	if (IS_ERR(xfer)) {
650 		dev_err(dev, "failed to get free message slot (%ld)\n",
651 			PTR_ERR(xfer));
652 		scmi_clear_channel(info, cinfo);
653 		return;
654 	}
655 
656 	unpack_scmi_header(msg_hdr, &xfer->hdr);
657 	if (priv)
658 		/* Ensure order between xfer->priv store and following ops */
659 		smp_store_mb(xfer->priv, priv);
660 	info->desc->ops->fetch_notification(cinfo, info->desc->max_msg_size,
661 					    xfer);
662 	scmi_notify(cinfo->handle, xfer->hdr.protocol_id,
663 		    xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts);
664 
665 	trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
666 			   xfer->hdr.protocol_id, xfer->hdr.seq,
667 			   MSG_TYPE_NOTIFICATION);
668 
669 	__scmi_xfer_put(minfo, xfer);
670 
671 	scmi_clear_channel(info, cinfo);
672 }
673 
674 static void scmi_handle_response(struct scmi_chan_info *cinfo,
675 				 u32 msg_hdr, void *priv)
676 {
677 	struct scmi_xfer *xfer;
678 	struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
679 
680 	xfer = scmi_xfer_command_acquire(cinfo, msg_hdr);
681 	if (IS_ERR(xfer)) {
682 		scmi_clear_channel(info, cinfo);
683 		return;
684 	}
685 
686 	/* rx.len could be shrunk in the sync do_xfer, so reset to maxsz */
687 	if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP)
688 		xfer->rx.len = info->desc->max_msg_size;
689 
690 	if (priv)
691 		/* Ensure order between xfer->priv store and following ops */
692 		smp_store_mb(xfer->priv, priv);
693 	info->desc->ops->fetch_response(cinfo, xfer);
694 
695 	trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
696 			   xfer->hdr.protocol_id, xfer->hdr.seq,
697 			   xfer->hdr.type);
698 
699 	if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP) {
700 		scmi_clear_channel(info, cinfo);
701 		complete(xfer->async_done);
702 	} else {
703 		complete(&xfer->done);
704 	}
705 
706 	scmi_xfer_command_release(info, xfer);
707 }
708 
709 /**
710  * scmi_rx_callback() - callback for receiving messages
711  *
712  * @cinfo: SCMI channel info
713  * @msg_hdr: Message header
714  * @priv: Transport specific private data.
715  *
716  * Processes one received message to appropriate transfer information and
717  * signals completion of the transfer.
718  *
719  * NOTE: This function will be invoked in IRQ context, hence should be
720  * as optimal as possible.
721  */
722 void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr, void *priv)
723 {
724 	u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
725 
726 	switch (msg_type) {
727 	case MSG_TYPE_NOTIFICATION:
728 		scmi_handle_notification(cinfo, msg_hdr, priv);
729 		break;
730 	case MSG_TYPE_COMMAND:
731 	case MSG_TYPE_DELAYED_RESP:
732 		scmi_handle_response(cinfo, msg_hdr, priv);
733 		break;
734 	default:
735 		WARN_ONCE(1, "received unknown msg_type:%d\n", msg_type);
736 		break;
737 	}
738 }
739 
740 /**
741  * xfer_put() - Release a transmit message
742  *
743  * @ph: Pointer to SCMI protocol handle
744  * @xfer: message that was reserved by xfer_get_init
745  */
746 static void xfer_put(const struct scmi_protocol_handle *ph,
747 		     struct scmi_xfer *xfer)
748 {
749 	const struct scmi_protocol_instance *pi = ph_to_pi(ph);
750 	struct scmi_info *info = handle_to_scmi_info(pi->handle);
751 
752 	__scmi_xfer_put(&info->tx_minfo, xfer);
753 }
754 
755 static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo,
756 				      struct scmi_xfer *xfer, ktime_t stop)
757 {
758 	struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
759 
760 	/*
761 	 * Poll also on xfer->done so that polling can be forcibly terminated
762 	 * in case of out-of-order receptions of delayed responses
763 	 */
764 	return info->desc->ops->poll_done(cinfo, xfer) ||
765 	       try_wait_for_completion(&xfer->done) ||
766 	       ktime_after(ktime_get(), stop);
767 }
768 
769 /**
770  * scmi_wait_for_message_response  - An helper to group all the possible ways of
771  * waiting for a synchronous message response.
772  *
773  * @cinfo: SCMI channel info
774  * @xfer: Reference to the transfer being waited for.
775  *
776  * Chooses waiting strategy (sleep-waiting vs busy-waiting) depending on
777  * configuration flags like xfer->hdr.poll_completion.
778  *
779  * Return: 0 on Success, error otherwise.
780  */
781 static int scmi_wait_for_message_response(struct scmi_chan_info *cinfo,
782 					  struct scmi_xfer *xfer)
783 {
784 	struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
785 	struct device *dev = info->dev;
786 	int ret = 0, timeout_ms = info->desc->max_rx_timeout_ms;
787 
788 	trace_scmi_xfer_response_wait(xfer->transfer_id, xfer->hdr.id,
789 				      xfer->hdr.protocol_id, xfer->hdr.seq,
790 				      timeout_ms,
791 				      xfer->hdr.poll_completion);
792 
793 	if (xfer->hdr.poll_completion) {
794 		/*
795 		 * Real polling is needed only if transport has NOT declared
796 		 * itself to support synchronous commands replies.
797 		 */
798 		if (!info->desc->sync_cmds_completed_on_ret) {
799 			/*
800 			 * Poll on xfer using transport provided .poll_done();
801 			 * assumes no completion interrupt was available.
802 			 */
803 			ktime_t stop = ktime_add_ms(ktime_get(), timeout_ms);
804 
805 			spin_until_cond(scmi_xfer_done_no_timeout(cinfo,
806 								  xfer, stop));
807 			if (ktime_after(ktime_get(), stop)) {
808 				dev_err(dev,
809 					"timed out in resp(caller: %pS) - polling\n",
810 					(void *)_RET_IP_);
811 				ret = -ETIMEDOUT;
812 			}
813 		}
814 
815 		if (!ret) {
816 			unsigned long flags;
817 
818 			/*
819 			 * Do not fetch_response if an out-of-order delayed
820 			 * response is being processed.
821 			 */
822 			spin_lock_irqsave(&xfer->lock, flags);
823 			if (xfer->state == SCMI_XFER_SENT_OK) {
824 				info->desc->ops->fetch_response(cinfo, xfer);
825 				xfer->state = SCMI_XFER_RESP_OK;
826 			}
827 			spin_unlock_irqrestore(&xfer->lock, flags);
828 		}
829 	} else {
830 		/* And we wait for the response. */
831 		if (!wait_for_completion_timeout(&xfer->done,
832 						 msecs_to_jiffies(timeout_ms))) {
833 			dev_err(dev, "timed out in resp(caller: %pS)\n",
834 				(void *)_RET_IP_);
835 			ret = -ETIMEDOUT;
836 		}
837 	}
838 
839 	return ret;
840 }
841 
842 /**
843  * do_xfer() - Do one transfer
844  *
845  * @ph: Pointer to SCMI protocol handle
846  * @xfer: Transfer to initiate and wait for response
847  *
848  * Return: -ETIMEDOUT in case of no response, if transmit error,
849  *	return corresponding error, else if all goes well,
850  *	return 0.
851  */
852 static int do_xfer(const struct scmi_protocol_handle *ph,
853 		   struct scmi_xfer *xfer)
854 {
855 	int ret;
856 	const struct scmi_protocol_instance *pi = ph_to_pi(ph);
857 	struct scmi_info *info = handle_to_scmi_info(pi->handle);
858 	struct device *dev = info->dev;
859 	struct scmi_chan_info *cinfo;
860 
861 	/* Check for polling request on custom command xfers at first */
862 	if (xfer->hdr.poll_completion && !is_transport_polling_capable(info)) {
863 		dev_warn_once(dev,
864 			      "Polling mode is not supported by transport.\n");
865 		return -EINVAL;
866 	}
867 
868 	cinfo = idr_find(&info->tx_idr, pi->proto->id);
869 	if (unlikely(!cinfo))
870 		return -EINVAL;
871 
872 	/* True ONLY if also supported by transport. */
873 	if (is_polling_enabled(cinfo, info))
874 		xfer->hdr.poll_completion = true;
875 
876 	/*
877 	 * Initialise protocol id now from protocol handle to avoid it being
878 	 * overridden by mistake (or malice) by the protocol code mangling with
879 	 * the scmi_xfer structure prior to this.
880 	 */
881 	xfer->hdr.protocol_id = pi->proto->id;
882 	reinit_completion(&xfer->done);
883 
884 	trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id,
885 			      xfer->hdr.protocol_id, xfer->hdr.seq,
886 			      xfer->hdr.poll_completion);
887 
888 	xfer->state = SCMI_XFER_SENT_OK;
889 	/*
890 	 * Even though spinlocking is not needed here since no race is possible
891 	 * on xfer->state due to the monotonically increasing tokens allocation,
892 	 * we must anyway ensure xfer->state initialization is not re-ordered
893 	 * after the .send_message() to be sure that on the RX path an early
894 	 * ISR calling scmi_rx_callback() cannot see an old stale xfer->state.
895 	 */
896 	smp_mb();
897 
898 	ret = info->desc->ops->send_message(cinfo, xfer);
899 	if (ret < 0) {
900 		dev_dbg(dev, "Failed to send message %d\n", ret);
901 		return ret;
902 	}
903 
904 	ret = scmi_wait_for_message_response(cinfo, xfer);
905 	if (!ret && xfer->hdr.status)
906 		ret = scmi_to_linux_errno(xfer->hdr.status);
907 
908 	if (info->desc->ops->mark_txdone)
909 		info->desc->ops->mark_txdone(cinfo, ret, xfer);
910 
911 	trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id,
912 			    xfer->hdr.protocol_id, xfer->hdr.seq, ret);
913 
914 	return ret;
915 }
916 
917 static void reset_rx_to_maxsz(const struct scmi_protocol_handle *ph,
918 			      struct scmi_xfer *xfer)
919 {
920 	const struct scmi_protocol_instance *pi = ph_to_pi(ph);
921 	struct scmi_info *info = handle_to_scmi_info(pi->handle);
922 
923 	xfer->rx.len = info->desc->max_msg_size;
924 }
925 
926 #define SCMI_MAX_RESPONSE_TIMEOUT	(2 * MSEC_PER_SEC)
927 
928 /**
929  * do_xfer_with_response() - Do one transfer and wait until the delayed
930  *	response is received
931  *
932  * @ph: Pointer to SCMI protocol handle
933  * @xfer: Transfer to initiate and wait for response
934  *
935  * Using asynchronous commands in atomic/polling mode should be avoided since
936  * it could cause long busy-waiting here, so ignore polling for the delayed
937  * response and WARN if it was requested for this command transaction since
938  * upper layers should refrain from issuing such kind of requests.
939  *
940  * The only other option would have been to refrain from using any asynchronous
941  * command even if made available, when an atomic transport is detected, and
942  * instead forcibly use the synchronous version (thing that can be easily
943  * attained at the protocol layer), but this would also have led to longer
944  * stalls of the channel for synchronous commands and possibly timeouts.
945  * (in other words there is usually a good reason if a platform provides an
946  *  asynchronous version of a command and we should prefer to use it...just not
947  *  when using atomic/polling mode)
948  *
949  * Return: -ETIMEDOUT in case of no delayed response, if transmit error,
950  *	return corresponding error, else if all goes well, return 0.
951  */
952 static int do_xfer_with_response(const struct scmi_protocol_handle *ph,
953 				 struct scmi_xfer *xfer)
954 {
955 	int ret, timeout = msecs_to_jiffies(SCMI_MAX_RESPONSE_TIMEOUT);
956 	DECLARE_COMPLETION_ONSTACK(async_response);
957 
958 	xfer->async_done = &async_response;
959 
960 	/*
961 	 * Delayed responses should not be polled, so an async command should
962 	 * not have been used when requiring an atomic/poll context; WARN and
963 	 * perform instead a sleeping wait.
964 	 * (Note Async + IgnoreDelayedResponses are sent via do_xfer)
965 	 */
966 	WARN_ON_ONCE(xfer->hdr.poll_completion);
967 
968 	ret = do_xfer(ph, xfer);
969 	if (!ret) {
970 		if (!wait_for_completion_timeout(xfer->async_done, timeout)) {
971 			dev_err(ph->dev,
972 				"timed out in delayed resp(caller: %pS)\n",
973 				(void *)_RET_IP_);
974 			ret = -ETIMEDOUT;
975 		} else if (xfer->hdr.status) {
976 			ret = scmi_to_linux_errno(xfer->hdr.status);
977 		}
978 	}
979 
980 	xfer->async_done = NULL;
981 	return ret;
982 }
983 
984 /**
985  * xfer_get_init() - Allocate and initialise one message for transmit
986  *
987  * @ph: Pointer to SCMI protocol handle
988  * @msg_id: Message identifier
989  * @tx_size: transmit message size
990  * @rx_size: receive message size
991  * @p: pointer to the allocated and initialised message
992  *
993  * This function allocates the message using @scmi_xfer_get and
994  * initialise the header.
995  *
996  * Return: 0 if all went fine with @p pointing to message, else
997  *	corresponding error.
998  */
999 static int xfer_get_init(const struct scmi_protocol_handle *ph,
1000 			 u8 msg_id, size_t tx_size, size_t rx_size,
1001 			 struct scmi_xfer **p)
1002 {
1003 	int ret;
1004 	struct scmi_xfer *xfer;
1005 	const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1006 	struct scmi_info *info = handle_to_scmi_info(pi->handle);
1007 	struct scmi_xfers_info *minfo = &info->tx_minfo;
1008 	struct device *dev = info->dev;
1009 
1010 	/* Ensure we have sane transfer sizes */
1011 	if (rx_size > info->desc->max_msg_size ||
1012 	    tx_size > info->desc->max_msg_size)
1013 		return -ERANGE;
1014 
1015 	xfer = scmi_xfer_get(pi->handle, minfo, true);
1016 	if (IS_ERR(xfer)) {
1017 		ret = PTR_ERR(xfer);
1018 		dev_err(dev, "failed to get free message slot(%d)\n", ret);
1019 		return ret;
1020 	}
1021 
1022 	xfer->tx.len = tx_size;
1023 	xfer->rx.len = rx_size ? : info->desc->max_msg_size;
1024 	xfer->hdr.type = MSG_TYPE_COMMAND;
1025 	xfer->hdr.id = msg_id;
1026 	xfer->hdr.poll_completion = false;
1027 
1028 	*p = xfer;
1029 
1030 	return 0;
1031 }
1032 
1033 /**
1034  * version_get() - command to get the revision of the SCMI entity
1035  *
1036  * @ph: Pointer to SCMI protocol handle
1037  * @version: Holds returned version of protocol.
1038  *
1039  * Updates the SCMI information in the internal data structure.
1040  *
1041  * Return: 0 if all went fine, else return appropriate error.
1042  */
1043 static int version_get(const struct scmi_protocol_handle *ph, u32 *version)
1044 {
1045 	int ret;
1046 	__le32 *rev_info;
1047 	struct scmi_xfer *t;
1048 
1049 	ret = xfer_get_init(ph, PROTOCOL_VERSION, 0, sizeof(*version), &t);
1050 	if (ret)
1051 		return ret;
1052 
1053 	ret = do_xfer(ph, t);
1054 	if (!ret) {
1055 		rev_info = t->rx.buf;
1056 		*version = le32_to_cpu(*rev_info);
1057 	}
1058 
1059 	xfer_put(ph, t);
1060 	return ret;
1061 }
1062 
1063 /**
1064  * scmi_set_protocol_priv  - Set protocol specific data at init time
1065  *
1066  * @ph: A reference to the protocol handle.
1067  * @priv: The private data to set.
1068  *
1069  * Return: 0 on Success
1070  */
1071 static int scmi_set_protocol_priv(const struct scmi_protocol_handle *ph,
1072 				  void *priv)
1073 {
1074 	struct scmi_protocol_instance *pi = ph_to_pi(ph);
1075 
1076 	pi->priv = priv;
1077 
1078 	return 0;
1079 }
1080 
1081 /**
1082  * scmi_get_protocol_priv  - Set protocol specific data at init time
1083  *
1084  * @ph: A reference to the protocol handle.
1085  *
1086  * Return: Protocol private data if any was set.
1087  */
1088 static void *scmi_get_protocol_priv(const struct scmi_protocol_handle *ph)
1089 {
1090 	const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1091 
1092 	return pi->priv;
1093 }
1094 
1095 static const struct scmi_xfer_ops xfer_ops = {
1096 	.version_get = version_get,
1097 	.xfer_get_init = xfer_get_init,
1098 	.reset_rx_to_maxsz = reset_rx_to_maxsz,
1099 	.do_xfer = do_xfer,
1100 	.do_xfer_with_response = do_xfer_with_response,
1101 	.xfer_put = xfer_put,
1102 };
1103 
1104 /**
1105  * scmi_revision_area_get  - Retrieve version memory area.
1106  *
1107  * @ph: A reference to the protocol handle.
1108  *
1109  * A helper to grab the version memory area reference during SCMI Base protocol
1110  * initialization.
1111  *
1112  * Return: A reference to the version memory area associated to the SCMI
1113  *	   instance underlying this protocol handle.
1114  */
1115 struct scmi_revision_info *
1116 scmi_revision_area_get(const struct scmi_protocol_handle *ph)
1117 {
1118 	const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1119 
1120 	return pi->handle->version;
1121 }
1122 
1123 /**
1124  * scmi_alloc_init_protocol_instance  - Allocate and initialize a protocol
1125  * instance descriptor.
1126  * @info: The reference to the related SCMI instance.
1127  * @proto: The protocol descriptor.
1128  *
1129  * Allocate a new protocol instance descriptor, using the provided @proto
1130  * description, against the specified SCMI instance @info, and initialize it;
1131  * all resources management is handled via a dedicated per-protocol devres
1132  * group.
1133  *
1134  * Context: Assumes to be called with @protocols_mtx already acquired.
1135  * Return: A reference to a freshly allocated and initialized protocol instance
1136  *	   or ERR_PTR on failure. On failure the @proto reference is at first
1137  *	   put using @scmi_protocol_put() before releasing all the devres group.
1138  */
1139 static struct scmi_protocol_instance *
1140 scmi_alloc_init_protocol_instance(struct scmi_info *info,
1141 				  const struct scmi_protocol *proto)
1142 {
1143 	int ret = -ENOMEM;
1144 	void *gid;
1145 	struct scmi_protocol_instance *pi;
1146 	const struct scmi_handle *handle = &info->handle;
1147 
1148 	/* Protocol specific devres group */
1149 	gid = devres_open_group(handle->dev, NULL, GFP_KERNEL);
1150 	if (!gid) {
1151 		scmi_protocol_put(proto->id);
1152 		goto out;
1153 	}
1154 
1155 	pi = devm_kzalloc(handle->dev, sizeof(*pi), GFP_KERNEL);
1156 	if (!pi)
1157 		goto clean;
1158 
1159 	pi->gid = gid;
1160 	pi->proto = proto;
1161 	pi->handle = handle;
1162 	pi->ph.dev = handle->dev;
1163 	pi->ph.xops = &xfer_ops;
1164 	pi->ph.set_priv = scmi_set_protocol_priv;
1165 	pi->ph.get_priv = scmi_get_protocol_priv;
1166 	refcount_set(&pi->users, 1);
1167 	/* proto->init is assured NON NULL by scmi_protocol_register */
1168 	ret = pi->proto->instance_init(&pi->ph);
1169 	if (ret)
1170 		goto clean;
1171 
1172 	ret = idr_alloc(&info->protocols, pi, proto->id, proto->id + 1,
1173 			GFP_KERNEL);
1174 	if (ret != proto->id)
1175 		goto clean;
1176 
1177 	/*
1178 	 * Warn but ignore events registration errors since we do not want
1179 	 * to skip whole protocols if their notifications are messed up.
1180 	 */
1181 	if (pi->proto->events) {
1182 		ret = scmi_register_protocol_events(handle, pi->proto->id,
1183 						    &pi->ph,
1184 						    pi->proto->events);
1185 		if (ret)
1186 			dev_warn(handle->dev,
1187 				 "Protocol:%X - Events Registration Failed - err:%d\n",
1188 				 pi->proto->id, ret);
1189 	}
1190 
1191 	devres_close_group(handle->dev, pi->gid);
1192 	dev_dbg(handle->dev, "Initialized protocol: 0x%X\n", pi->proto->id);
1193 
1194 	return pi;
1195 
1196 clean:
1197 	/* Take care to put the protocol module's owner before releasing all */
1198 	scmi_protocol_put(proto->id);
1199 	devres_release_group(handle->dev, gid);
1200 out:
1201 	return ERR_PTR(ret);
1202 }
1203 
1204 /**
1205  * scmi_get_protocol_instance  - Protocol initialization helper.
1206  * @handle: A reference to the SCMI platform instance.
1207  * @protocol_id: The protocol being requested.
1208  *
1209  * In case the required protocol has never been requested before for this
1210  * instance, allocate and initialize all the needed structures while handling
1211  * resource allocation with a dedicated per-protocol devres subgroup.
1212  *
1213  * Return: A reference to an initialized protocol instance or error on failure:
1214  *	   in particular returns -EPROBE_DEFER when the desired protocol could
1215  *	   NOT be found.
1216  */
1217 static struct scmi_protocol_instance * __must_check
1218 scmi_get_protocol_instance(const struct scmi_handle *handle, u8 protocol_id)
1219 {
1220 	struct scmi_protocol_instance *pi;
1221 	struct scmi_info *info = handle_to_scmi_info(handle);
1222 
1223 	mutex_lock(&info->protocols_mtx);
1224 	pi = idr_find(&info->protocols, protocol_id);
1225 
1226 	if (pi) {
1227 		refcount_inc(&pi->users);
1228 	} else {
1229 		const struct scmi_protocol *proto;
1230 
1231 		/* Fails if protocol not registered on bus */
1232 		proto = scmi_protocol_get(protocol_id);
1233 		if (proto)
1234 			pi = scmi_alloc_init_protocol_instance(info, proto);
1235 		else
1236 			pi = ERR_PTR(-EPROBE_DEFER);
1237 	}
1238 	mutex_unlock(&info->protocols_mtx);
1239 
1240 	return pi;
1241 }
1242 
1243 /**
1244  * scmi_protocol_acquire  - Protocol acquire
1245  * @handle: A reference to the SCMI platform instance.
1246  * @protocol_id: The protocol being requested.
1247  *
1248  * Register a new user for the requested protocol on the specified SCMI
1249  * platform instance, possibly triggering its initialization on first user.
1250  *
1251  * Return: 0 if protocol was acquired successfully.
1252  */
1253 int scmi_protocol_acquire(const struct scmi_handle *handle, u8 protocol_id)
1254 {
1255 	return PTR_ERR_OR_ZERO(scmi_get_protocol_instance(handle, protocol_id));
1256 }
1257 
1258 /**
1259  * scmi_protocol_release  - Protocol de-initialization helper.
1260  * @handle: A reference to the SCMI platform instance.
1261  * @protocol_id: The protocol being requested.
1262  *
1263  * Remove one user for the specified protocol and triggers de-initialization
1264  * and resources de-allocation once the last user has gone.
1265  */
1266 void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id)
1267 {
1268 	struct scmi_info *info = handle_to_scmi_info(handle);
1269 	struct scmi_protocol_instance *pi;
1270 
1271 	mutex_lock(&info->protocols_mtx);
1272 	pi = idr_find(&info->protocols, protocol_id);
1273 	if (WARN_ON(!pi))
1274 		goto out;
1275 
1276 	if (refcount_dec_and_test(&pi->users)) {
1277 		void *gid = pi->gid;
1278 
1279 		if (pi->proto->events)
1280 			scmi_deregister_protocol_events(handle, protocol_id);
1281 
1282 		if (pi->proto->instance_deinit)
1283 			pi->proto->instance_deinit(&pi->ph);
1284 
1285 		idr_remove(&info->protocols, protocol_id);
1286 
1287 		scmi_protocol_put(protocol_id);
1288 
1289 		devres_release_group(handle->dev, gid);
1290 		dev_dbg(handle->dev, "De-Initialized protocol: 0x%X\n",
1291 			protocol_id);
1292 	}
1293 
1294 out:
1295 	mutex_unlock(&info->protocols_mtx);
1296 }
1297 
1298 void scmi_setup_protocol_implemented(const struct scmi_protocol_handle *ph,
1299 				     u8 *prot_imp)
1300 {
1301 	const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1302 	struct scmi_info *info = handle_to_scmi_info(pi->handle);
1303 
1304 	info->protocols_imp = prot_imp;
1305 }
1306 
1307 static bool
1308 scmi_is_protocol_implemented(const struct scmi_handle *handle, u8 prot_id)
1309 {
1310 	int i;
1311 	struct scmi_info *info = handle_to_scmi_info(handle);
1312 
1313 	if (!info->protocols_imp)
1314 		return false;
1315 
1316 	for (i = 0; i < MAX_PROTOCOLS_IMP; i++)
1317 		if (info->protocols_imp[i] == prot_id)
1318 			return true;
1319 	return false;
1320 }
1321 
1322 struct scmi_protocol_devres {
1323 	const struct scmi_handle *handle;
1324 	u8 protocol_id;
1325 };
1326 
1327 static void scmi_devm_release_protocol(struct device *dev, void *res)
1328 {
1329 	struct scmi_protocol_devres *dres = res;
1330 
1331 	scmi_protocol_release(dres->handle, dres->protocol_id);
1332 }
1333 
1334 /**
1335  * scmi_devm_protocol_get  - Devres managed get protocol operations and handle
1336  * @sdev: A reference to an scmi_device whose embedded struct device is to
1337  *	  be used for devres accounting.
1338  * @protocol_id: The protocol being requested.
1339  * @ph: A pointer reference used to pass back the associated protocol handle.
1340  *
1341  * Get hold of a protocol accounting for its usage, eventually triggering its
1342  * initialization, and returning the protocol specific operations and related
1343  * protocol handle which will be used as first argument in most of the
1344  * protocols operations methods.
1345  * Being a devres based managed method, protocol hold will be automatically
1346  * released, and possibly de-initialized on last user, once the SCMI driver
1347  * owning the scmi_device is unbound from it.
1348  *
1349  * Return: A reference to the requested protocol operations or error.
1350  *	   Must be checked for errors by caller.
1351  */
1352 static const void __must_check *
1353 scmi_devm_protocol_get(struct scmi_device *sdev, u8 protocol_id,
1354 		       struct scmi_protocol_handle **ph)
1355 {
1356 	struct scmi_protocol_instance *pi;
1357 	struct scmi_protocol_devres *dres;
1358 	struct scmi_handle *handle = sdev->handle;
1359 
1360 	if (!ph)
1361 		return ERR_PTR(-EINVAL);
1362 
1363 	dres = devres_alloc(scmi_devm_release_protocol,
1364 			    sizeof(*dres), GFP_KERNEL);
1365 	if (!dres)
1366 		return ERR_PTR(-ENOMEM);
1367 
1368 	pi = scmi_get_protocol_instance(handle, protocol_id);
1369 	if (IS_ERR(pi)) {
1370 		devres_free(dres);
1371 		return pi;
1372 	}
1373 
1374 	dres->handle = handle;
1375 	dres->protocol_id = protocol_id;
1376 	devres_add(&sdev->dev, dres);
1377 
1378 	*ph = &pi->ph;
1379 
1380 	return pi->proto->ops;
1381 }
1382 
1383 static int scmi_devm_protocol_match(struct device *dev, void *res, void *data)
1384 {
1385 	struct scmi_protocol_devres *dres = res;
1386 
1387 	if (WARN_ON(!dres || !data))
1388 		return 0;
1389 
1390 	return dres->protocol_id == *((u8 *)data);
1391 }
1392 
1393 /**
1394  * scmi_devm_protocol_put  - Devres managed put protocol operations and handle
1395  * @sdev: A reference to an scmi_device whose embedded struct device is to
1396  *	  be used for devres accounting.
1397  * @protocol_id: The protocol being requested.
1398  *
1399  * Explicitly release a protocol hold previously obtained calling the above
1400  * @scmi_devm_protocol_get.
1401  */
1402 static void scmi_devm_protocol_put(struct scmi_device *sdev, u8 protocol_id)
1403 {
1404 	int ret;
1405 
1406 	ret = devres_release(&sdev->dev, scmi_devm_release_protocol,
1407 			     scmi_devm_protocol_match, &protocol_id);
1408 	WARN_ON(ret);
1409 }
1410 
1411 /**
1412  * scmi_is_transport_atomic  - Method to check if underlying transport for an
1413  * SCMI instance is configured as atomic.
1414  *
1415  * @handle: A reference to the SCMI platform instance.
1416  * @atomic_threshold: An optional return value for the system wide currently
1417  *		      configured threshold for atomic operations.
1418  *
1419  * Return: True if transport is configured as atomic
1420  */
1421 static bool scmi_is_transport_atomic(const struct scmi_handle *handle,
1422 				     unsigned int *atomic_threshold)
1423 {
1424 	bool ret;
1425 	struct scmi_info *info = handle_to_scmi_info(handle);
1426 
1427 	ret = info->desc->atomic_enabled && is_transport_polling_capable(info);
1428 	if (ret && atomic_threshold)
1429 		*atomic_threshold = info->atomic_threshold;
1430 
1431 	return ret;
1432 }
1433 
1434 static inline
1435 struct scmi_handle *scmi_handle_get_from_info_unlocked(struct scmi_info *info)
1436 {
1437 	info->users++;
1438 	return &info->handle;
1439 }
1440 
1441 /**
1442  * scmi_handle_get() - Get the SCMI handle for a device
1443  *
1444  * @dev: pointer to device for which we want SCMI handle
1445  *
1446  * NOTE: The function does not track individual clients of the framework
1447  * and is expected to be maintained by caller of SCMI protocol library.
1448  * scmi_handle_put must be balanced with successful scmi_handle_get
1449  *
1450  * Return: pointer to handle if successful, NULL on error
1451  */
1452 struct scmi_handle *scmi_handle_get(struct device *dev)
1453 {
1454 	struct list_head *p;
1455 	struct scmi_info *info;
1456 	struct scmi_handle *handle = NULL;
1457 
1458 	mutex_lock(&scmi_list_mutex);
1459 	list_for_each(p, &scmi_list) {
1460 		info = list_entry(p, struct scmi_info, node);
1461 		if (dev->parent == info->dev) {
1462 			handle = scmi_handle_get_from_info_unlocked(info);
1463 			break;
1464 		}
1465 	}
1466 	mutex_unlock(&scmi_list_mutex);
1467 
1468 	return handle;
1469 }
1470 
1471 /**
1472  * scmi_handle_put() - Release the handle acquired by scmi_handle_get
1473  *
1474  * @handle: handle acquired by scmi_handle_get
1475  *
1476  * NOTE: The function does not track individual clients of the framework
1477  * and is expected to be maintained by caller of SCMI protocol library.
1478  * scmi_handle_put must be balanced with successful scmi_handle_get
1479  *
1480  * Return: 0 is successfully released
1481  *	if null was passed, it returns -EINVAL;
1482  */
1483 int scmi_handle_put(const struct scmi_handle *handle)
1484 {
1485 	struct scmi_info *info;
1486 
1487 	if (!handle)
1488 		return -EINVAL;
1489 
1490 	info = handle_to_scmi_info(handle);
1491 	mutex_lock(&scmi_list_mutex);
1492 	if (!WARN_ON(!info->users))
1493 		info->users--;
1494 	mutex_unlock(&scmi_list_mutex);
1495 
1496 	return 0;
1497 }
1498 
1499 static int __scmi_xfer_info_init(struct scmi_info *sinfo,
1500 				 struct scmi_xfers_info *info)
1501 {
1502 	int i;
1503 	struct scmi_xfer *xfer;
1504 	struct device *dev = sinfo->dev;
1505 	const struct scmi_desc *desc = sinfo->desc;
1506 
1507 	/* Pre-allocated messages, no more than what hdr.seq can support */
1508 	if (WARN_ON(!info->max_msg || info->max_msg > MSG_TOKEN_MAX)) {
1509 		dev_err(dev,
1510 			"Invalid maximum messages %d, not in range [1 - %lu]\n",
1511 			info->max_msg, MSG_TOKEN_MAX);
1512 		return -EINVAL;
1513 	}
1514 
1515 	hash_init(info->pending_xfers);
1516 
1517 	/* Allocate a bitmask sized to hold MSG_TOKEN_MAX tokens */
1518 	info->xfer_alloc_table = devm_kcalloc(dev, BITS_TO_LONGS(MSG_TOKEN_MAX),
1519 					      sizeof(long), GFP_KERNEL);
1520 	if (!info->xfer_alloc_table)
1521 		return -ENOMEM;
1522 
1523 	/*
1524 	 * Preallocate a number of xfers equal to max inflight messages,
1525 	 * pre-initialize the buffer pointer to pre-allocated buffers and
1526 	 * attach all of them to the free list
1527 	 */
1528 	INIT_HLIST_HEAD(&info->free_xfers);
1529 	for (i = 0; i < info->max_msg; i++) {
1530 		xfer = devm_kzalloc(dev, sizeof(*xfer), GFP_KERNEL);
1531 		if (!xfer)
1532 			return -ENOMEM;
1533 
1534 		xfer->rx.buf = devm_kcalloc(dev, sizeof(u8), desc->max_msg_size,
1535 					    GFP_KERNEL);
1536 		if (!xfer->rx.buf)
1537 			return -ENOMEM;
1538 
1539 		xfer->tx.buf = xfer->rx.buf;
1540 		init_completion(&xfer->done);
1541 		spin_lock_init(&xfer->lock);
1542 
1543 		/* Add initialized xfer to the free list */
1544 		hlist_add_head(&xfer->node, &info->free_xfers);
1545 	}
1546 
1547 	spin_lock_init(&info->xfer_lock);
1548 
1549 	return 0;
1550 }
1551 
1552 static int scmi_channels_max_msg_configure(struct scmi_info *sinfo)
1553 {
1554 	const struct scmi_desc *desc = sinfo->desc;
1555 
1556 	if (!desc->ops->get_max_msg) {
1557 		sinfo->tx_minfo.max_msg = desc->max_msg;
1558 		sinfo->rx_minfo.max_msg = desc->max_msg;
1559 	} else {
1560 		struct scmi_chan_info *base_cinfo;
1561 
1562 		base_cinfo = idr_find(&sinfo->tx_idr, SCMI_PROTOCOL_BASE);
1563 		if (!base_cinfo)
1564 			return -EINVAL;
1565 		sinfo->tx_minfo.max_msg = desc->ops->get_max_msg(base_cinfo);
1566 
1567 		/* RX channel is optional so can be skipped */
1568 		base_cinfo = idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE);
1569 		if (base_cinfo)
1570 			sinfo->rx_minfo.max_msg =
1571 				desc->ops->get_max_msg(base_cinfo);
1572 	}
1573 
1574 	return 0;
1575 }
1576 
1577 static int scmi_xfer_info_init(struct scmi_info *sinfo)
1578 {
1579 	int ret;
1580 
1581 	ret = scmi_channels_max_msg_configure(sinfo);
1582 	if (ret)
1583 		return ret;
1584 
1585 	ret = __scmi_xfer_info_init(sinfo, &sinfo->tx_minfo);
1586 	if (!ret && idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE))
1587 		ret = __scmi_xfer_info_init(sinfo, &sinfo->rx_minfo);
1588 
1589 	return ret;
1590 }
1591 
1592 static int scmi_chan_setup(struct scmi_info *info, struct device *dev,
1593 			   int prot_id, bool tx)
1594 {
1595 	int ret, idx;
1596 	struct scmi_chan_info *cinfo;
1597 	struct idr *idr;
1598 
1599 	/* Transmit channel is first entry i.e. index 0 */
1600 	idx = tx ? 0 : 1;
1601 	idr = tx ? &info->tx_idr : &info->rx_idr;
1602 
1603 	/* check if already allocated, used for multiple device per protocol */
1604 	cinfo = idr_find(idr, prot_id);
1605 	if (cinfo)
1606 		return 0;
1607 
1608 	if (!info->desc->ops->chan_available(dev, idx)) {
1609 		cinfo = idr_find(idr, SCMI_PROTOCOL_BASE);
1610 		if (unlikely(!cinfo)) /* Possible only if platform has no Rx */
1611 			return -EINVAL;
1612 		goto idr_alloc;
1613 	}
1614 
1615 	cinfo = devm_kzalloc(info->dev, sizeof(*cinfo), GFP_KERNEL);
1616 	if (!cinfo)
1617 		return -ENOMEM;
1618 
1619 	cinfo->dev = dev;
1620 
1621 	ret = info->desc->ops->chan_setup(cinfo, info->dev, tx);
1622 	if (ret)
1623 		return ret;
1624 
1625 	if (tx && is_polling_required(cinfo, info)) {
1626 		if (is_transport_polling_capable(info))
1627 			dev_info(dev,
1628 				 "Enabled polling mode TX channel - prot_id:%d\n",
1629 				 prot_id);
1630 		else
1631 			dev_warn(dev,
1632 				 "Polling mode NOT supported by transport.\n");
1633 	}
1634 
1635 idr_alloc:
1636 	ret = idr_alloc(idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL);
1637 	if (ret != prot_id) {
1638 		dev_err(dev, "unable to allocate SCMI idr slot err %d\n", ret);
1639 		return ret;
1640 	}
1641 
1642 	cinfo->handle = &info->handle;
1643 	return 0;
1644 }
1645 
1646 static inline int
1647 scmi_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id)
1648 {
1649 	int ret = scmi_chan_setup(info, dev, prot_id, true);
1650 
1651 	if (!ret) /* Rx is optional, hence no error check */
1652 		scmi_chan_setup(info, dev, prot_id, false);
1653 
1654 	return ret;
1655 }
1656 
1657 /**
1658  * scmi_get_protocol_device  - Helper to get/create an SCMI device.
1659  *
1660  * @np: A device node representing a valid active protocols for the referred
1661  * SCMI instance.
1662  * @info: The referred SCMI instance for which we are getting/creating this
1663  * device.
1664  * @prot_id: The protocol ID.
1665  * @name: The device name.
1666  *
1667  * Referring to the specific SCMI instance identified by @info, this helper
1668  * takes care to return a properly initialized device matching the requested
1669  * @proto_id and @name: if device was still not existent it is created as a
1670  * child of the specified SCMI instance @info and its transport properly
1671  * initialized as usual.
1672  *
1673  * Return: A properly initialized scmi device, NULL otherwise.
1674  */
1675 static inline struct scmi_device *
1676 scmi_get_protocol_device(struct device_node *np, struct scmi_info *info,
1677 			 int prot_id, const char *name)
1678 {
1679 	struct scmi_device *sdev;
1680 
1681 	/* Already created for this parent SCMI instance ? */
1682 	sdev = scmi_child_dev_find(info->dev, prot_id, name);
1683 	if (sdev)
1684 		return sdev;
1685 
1686 	pr_debug("Creating SCMI device (%s) for protocol %x\n", name, prot_id);
1687 
1688 	sdev = scmi_device_create(np, info->dev, prot_id, name);
1689 	if (!sdev) {
1690 		dev_err(info->dev, "failed to create %d protocol device\n",
1691 			prot_id);
1692 		return NULL;
1693 	}
1694 
1695 	if (scmi_txrx_setup(info, &sdev->dev, prot_id)) {
1696 		dev_err(&sdev->dev, "failed to setup transport\n");
1697 		scmi_device_destroy(sdev);
1698 		return NULL;
1699 	}
1700 
1701 	return sdev;
1702 }
1703 
1704 static inline void
1705 scmi_create_protocol_device(struct device_node *np, struct scmi_info *info,
1706 			    int prot_id, const char *name)
1707 {
1708 	struct scmi_device *sdev;
1709 
1710 	sdev = scmi_get_protocol_device(np, info, prot_id, name);
1711 	if (!sdev)
1712 		return;
1713 
1714 	/* setup handle now as the transport is ready */
1715 	scmi_set_handle(sdev);
1716 }
1717 
1718 /**
1719  * scmi_create_protocol_devices  - Create devices for all pending requests for
1720  * this SCMI instance.
1721  *
1722  * @np: The device node describing the protocol
1723  * @info: The SCMI instance descriptor
1724  * @prot_id: The protocol ID
1725  *
1726  * All devices previously requested for this instance (if any) are found and
1727  * created by scanning the proper @&scmi_requested_devices entry.
1728  */
1729 static void scmi_create_protocol_devices(struct device_node *np,
1730 					 struct scmi_info *info, int prot_id)
1731 {
1732 	struct list_head *phead;
1733 
1734 	mutex_lock(&scmi_requested_devices_mtx);
1735 	phead = idr_find(&scmi_requested_devices, prot_id);
1736 	if (phead) {
1737 		struct scmi_requested_dev *rdev;
1738 
1739 		list_for_each_entry(rdev, phead, node)
1740 			scmi_create_protocol_device(np, info, prot_id,
1741 						    rdev->id_table->name);
1742 	}
1743 	mutex_unlock(&scmi_requested_devices_mtx);
1744 }
1745 
1746 /**
1747  * scmi_protocol_device_request  - Helper to request a device
1748  *
1749  * @id_table: A protocol/name pair descriptor for the device to be created.
1750  *
1751  * This helper let an SCMI driver request specific devices identified by the
1752  * @id_table to be created for each active SCMI instance.
1753  *
1754  * The requested device name MUST NOT be already existent for any protocol;
1755  * at first the freshly requested @id_table is annotated in the IDR table
1756  * @scmi_requested_devices, then a matching device is created for each already
1757  * active SCMI instance. (if any)
1758  *
1759  * This way the requested device is created straight-away for all the already
1760  * initialized(probed) SCMI instances (handles) and it remains also annotated
1761  * as pending creation if the requesting SCMI driver was loaded before some
1762  * SCMI instance and related transports were available: when such late instance
1763  * is probed, its probe will take care to scan the list of pending requested
1764  * devices and create those on its own (see @scmi_create_protocol_devices and
1765  * its enclosing loop)
1766  *
1767  * Return: 0 on Success
1768  */
1769 int scmi_protocol_device_request(const struct scmi_device_id *id_table)
1770 {
1771 	int ret = 0;
1772 	unsigned int id = 0;
1773 	struct list_head *head, *phead = NULL;
1774 	struct scmi_requested_dev *rdev;
1775 	struct scmi_info *info;
1776 
1777 	pr_debug("Requesting SCMI device (%s) for protocol %x\n",
1778 		 id_table->name, id_table->protocol_id);
1779 
1780 	/*
1781 	 * Search for the matching protocol rdev list and then search
1782 	 * of any existent equally named device...fails if any duplicate found.
1783 	 */
1784 	mutex_lock(&scmi_requested_devices_mtx);
1785 	idr_for_each_entry(&scmi_requested_devices, head, id) {
1786 		if (!phead) {
1787 			/* A list found registered in the IDR is never empty */
1788 			rdev = list_first_entry(head, struct scmi_requested_dev,
1789 						node);
1790 			if (rdev->id_table->protocol_id ==
1791 			    id_table->protocol_id)
1792 				phead = head;
1793 		}
1794 		list_for_each_entry(rdev, head, node) {
1795 			if (!strcmp(rdev->id_table->name, id_table->name)) {
1796 				pr_err("Ignoring duplicate request [%d] %s\n",
1797 				       rdev->id_table->protocol_id,
1798 				       rdev->id_table->name);
1799 				ret = -EINVAL;
1800 				goto out;
1801 			}
1802 		}
1803 	}
1804 
1805 	/*
1806 	 * No duplicate found for requested id_table, so let's create a new
1807 	 * requested device entry for this new valid request.
1808 	 */
1809 	rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
1810 	if (!rdev) {
1811 		ret = -ENOMEM;
1812 		goto out;
1813 	}
1814 	rdev->id_table = id_table;
1815 
1816 	/*
1817 	 * Append the new requested device table descriptor to the head of the
1818 	 * related protocol list, eventually creating such head if not already
1819 	 * there.
1820 	 */
1821 	if (!phead) {
1822 		phead = kzalloc(sizeof(*phead), GFP_KERNEL);
1823 		if (!phead) {
1824 			kfree(rdev);
1825 			ret = -ENOMEM;
1826 			goto out;
1827 		}
1828 		INIT_LIST_HEAD(phead);
1829 
1830 		ret = idr_alloc(&scmi_requested_devices, (void *)phead,
1831 				id_table->protocol_id,
1832 				id_table->protocol_id + 1, GFP_KERNEL);
1833 		if (ret != id_table->protocol_id) {
1834 			pr_err("Failed to save SCMI device - ret:%d\n", ret);
1835 			kfree(rdev);
1836 			kfree(phead);
1837 			ret = -EINVAL;
1838 			goto out;
1839 		}
1840 		ret = 0;
1841 	}
1842 	list_add(&rdev->node, phead);
1843 
1844 	/*
1845 	 * Now effectively create and initialize the requested device for every
1846 	 * already initialized SCMI instance which has registered the requested
1847 	 * protocol as a valid active one: i.e. defined in DT and supported by
1848 	 * current platform FW.
1849 	 */
1850 	mutex_lock(&scmi_list_mutex);
1851 	list_for_each_entry(info, &scmi_list, node) {
1852 		struct device_node *child;
1853 
1854 		child = idr_find(&info->active_protocols,
1855 				 id_table->protocol_id);
1856 		if (child) {
1857 			struct scmi_device *sdev;
1858 
1859 			sdev = scmi_get_protocol_device(child, info,
1860 							id_table->protocol_id,
1861 							id_table->name);
1862 			/* Set handle if not already set: device existed */
1863 			if (sdev && !sdev->handle)
1864 				sdev->handle =
1865 					scmi_handle_get_from_info_unlocked(info);
1866 		} else {
1867 			dev_err(info->dev,
1868 				"Failed. SCMI protocol %d not active.\n",
1869 				id_table->protocol_id);
1870 		}
1871 	}
1872 	mutex_unlock(&scmi_list_mutex);
1873 
1874 out:
1875 	mutex_unlock(&scmi_requested_devices_mtx);
1876 
1877 	return ret;
1878 }
1879 
1880 /**
1881  * scmi_protocol_device_unrequest  - Helper to unrequest a device
1882  *
1883  * @id_table: A protocol/name pair descriptor for the device to be unrequested.
1884  *
1885  * An helper to let an SCMI driver release its request about devices; note that
1886  * devices are created and initialized once the first SCMI driver request them
1887  * but they destroyed only on SCMI core unloading/unbinding.
1888  *
1889  * The current SCMI transport layer uses such devices as internal references and
1890  * as such they could be shared as same transport between multiple drivers so
1891  * that cannot be safely destroyed till the whole SCMI stack is removed.
1892  * (unless adding further burden of refcounting.)
1893  */
1894 void scmi_protocol_device_unrequest(const struct scmi_device_id *id_table)
1895 {
1896 	struct list_head *phead;
1897 
1898 	pr_debug("Unrequesting SCMI device (%s) for protocol %x\n",
1899 		 id_table->name, id_table->protocol_id);
1900 
1901 	mutex_lock(&scmi_requested_devices_mtx);
1902 	phead = idr_find(&scmi_requested_devices, id_table->protocol_id);
1903 	if (phead) {
1904 		struct scmi_requested_dev *victim, *tmp;
1905 
1906 		list_for_each_entry_safe(victim, tmp, phead, node) {
1907 			if (!strcmp(victim->id_table->name, id_table->name)) {
1908 				list_del(&victim->node);
1909 				kfree(victim);
1910 				break;
1911 			}
1912 		}
1913 
1914 		if (list_empty(phead)) {
1915 			idr_remove(&scmi_requested_devices,
1916 				   id_table->protocol_id);
1917 			kfree(phead);
1918 		}
1919 	}
1920 	mutex_unlock(&scmi_requested_devices_mtx);
1921 }
1922 
1923 static int scmi_cleanup_txrx_channels(struct scmi_info *info)
1924 {
1925 	int ret;
1926 	struct idr *idr = &info->tx_idr;
1927 
1928 	ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
1929 	idr_destroy(&info->tx_idr);
1930 
1931 	idr = &info->rx_idr;
1932 	ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
1933 	idr_destroy(&info->rx_idr);
1934 
1935 	return ret;
1936 }
1937 
1938 static int scmi_probe(struct platform_device *pdev)
1939 {
1940 	int ret;
1941 	struct scmi_handle *handle;
1942 	const struct scmi_desc *desc;
1943 	struct scmi_info *info;
1944 	struct device *dev = &pdev->dev;
1945 	struct device_node *child, *np = dev->of_node;
1946 
1947 	desc = of_device_get_match_data(dev);
1948 	if (!desc)
1949 		return -EINVAL;
1950 
1951 	info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
1952 	if (!info)
1953 		return -ENOMEM;
1954 
1955 	info->dev = dev;
1956 	info->desc = desc;
1957 	INIT_LIST_HEAD(&info->node);
1958 	idr_init(&info->protocols);
1959 	mutex_init(&info->protocols_mtx);
1960 	idr_init(&info->active_protocols);
1961 
1962 	platform_set_drvdata(pdev, info);
1963 	idr_init(&info->tx_idr);
1964 	idr_init(&info->rx_idr);
1965 
1966 	handle = &info->handle;
1967 	handle->dev = info->dev;
1968 	handle->version = &info->version;
1969 	handle->devm_protocol_get = scmi_devm_protocol_get;
1970 	handle->devm_protocol_put = scmi_devm_protocol_put;
1971 
1972 	/* System wide atomic threshold for atomic ops .. if any */
1973 	if (!of_property_read_u32(np, "atomic-threshold-us",
1974 				  &info->atomic_threshold))
1975 		dev_info(dev,
1976 			 "SCMI System wide atomic threshold set to %d us\n",
1977 			 info->atomic_threshold);
1978 	handle->is_transport_atomic = scmi_is_transport_atomic;
1979 
1980 	if (desc->ops->link_supplier) {
1981 		ret = desc->ops->link_supplier(dev);
1982 		if (ret)
1983 			return ret;
1984 	}
1985 
1986 	ret = scmi_txrx_setup(info, dev, SCMI_PROTOCOL_BASE);
1987 	if (ret)
1988 		return ret;
1989 
1990 	ret = scmi_xfer_info_init(info);
1991 	if (ret)
1992 		goto clear_txrx_setup;
1993 
1994 	if (scmi_notification_init(handle))
1995 		dev_err(dev, "SCMI Notifications NOT available.\n");
1996 
1997 	if (info->desc->atomic_enabled && !is_transport_polling_capable(info))
1998 		dev_err(dev,
1999 			"Transport is not polling capable. Atomic mode not supported.\n");
2000 
2001 	/*
2002 	 * Trigger SCMI Base protocol initialization.
2003 	 * It's mandatory and won't be ever released/deinit until the
2004 	 * SCMI stack is shutdown/unloaded as a whole.
2005 	 */
2006 	ret = scmi_protocol_acquire(handle, SCMI_PROTOCOL_BASE);
2007 	if (ret) {
2008 		dev_err(dev, "unable to communicate with SCMI\n");
2009 		goto notification_exit;
2010 	}
2011 
2012 	mutex_lock(&scmi_list_mutex);
2013 	list_add_tail(&info->node, &scmi_list);
2014 	mutex_unlock(&scmi_list_mutex);
2015 
2016 	for_each_available_child_of_node(np, child) {
2017 		u32 prot_id;
2018 
2019 		if (of_property_read_u32(child, "reg", &prot_id))
2020 			continue;
2021 
2022 		if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK, prot_id))
2023 			dev_err(dev, "Out of range protocol %d\n", prot_id);
2024 
2025 		if (!scmi_is_protocol_implemented(handle, prot_id)) {
2026 			dev_err(dev, "SCMI protocol %d not implemented\n",
2027 				prot_id);
2028 			continue;
2029 		}
2030 
2031 		/*
2032 		 * Save this valid DT protocol descriptor amongst
2033 		 * @active_protocols for this SCMI instance/
2034 		 */
2035 		ret = idr_alloc(&info->active_protocols, child,
2036 				prot_id, prot_id + 1, GFP_KERNEL);
2037 		if (ret != prot_id) {
2038 			dev_err(dev, "SCMI protocol %d already activated. Skip\n",
2039 				prot_id);
2040 			continue;
2041 		}
2042 
2043 		of_node_get(child);
2044 		scmi_create_protocol_devices(child, info, prot_id);
2045 	}
2046 
2047 	return 0;
2048 
2049 notification_exit:
2050 	scmi_notification_exit(&info->handle);
2051 clear_txrx_setup:
2052 	scmi_cleanup_txrx_channels(info);
2053 	return ret;
2054 }
2055 
2056 void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id)
2057 {
2058 	idr_remove(idr, id);
2059 }
2060 
2061 static int scmi_remove(struct platform_device *pdev)
2062 {
2063 	int ret = 0, id;
2064 	struct scmi_info *info = platform_get_drvdata(pdev);
2065 	struct device_node *child;
2066 
2067 	mutex_lock(&scmi_list_mutex);
2068 	if (info->users)
2069 		ret = -EBUSY;
2070 	else
2071 		list_del(&info->node);
2072 	mutex_unlock(&scmi_list_mutex);
2073 
2074 	if (ret)
2075 		return ret;
2076 
2077 	scmi_notification_exit(&info->handle);
2078 
2079 	mutex_lock(&info->protocols_mtx);
2080 	idr_destroy(&info->protocols);
2081 	mutex_unlock(&info->protocols_mtx);
2082 
2083 	idr_for_each_entry(&info->active_protocols, child, id)
2084 		of_node_put(child);
2085 	idr_destroy(&info->active_protocols);
2086 
2087 	/* Safe to free channels since no more users */
2088 	return scmi_cleanup_txrx_channels(info);
2089 }
2090 
2091 static ssize_t protocol_version_show(struct device *dev,
2092 				     struct device_attribute *attr, char *buf)
2093 {
2094 	struct scmi_info *info = dev_get_drvdata(dev);
2095 
2096 	return sprintf(buf, "%u.%u\n", info->version.major_ver,
2097 		       info->version.minor_ver);
2098 }
2099 static DEVICE_ATTR_RO(protocol_version);
2100 
2101 static ssize_t firmware_version_show(struct device *dev,
2102 				     struct device_attribute *attr, char *buf)
2103 {
2104 	struct scmi_info *info = dev_get_drvdata(dev);
2105 
2106 	return sprintf(buf, "0x%x\n", info->version.impl_ver);
2107 }
2108 static DEVICE_ATTR_RO(firmware_version);
2109 
2110 static ssize_t vendor_id_show(struct device *dev,
2111 			      struct device_attribute *attr, char *buf)
2112 {
2113 	struct scmi_info *info = dev_get_drvdata(dev);
2114 
2115 	return sprintf(buf, "%s\n", info->version.vendor_id);
2116 }
2117 static DEVICE_ATTR_RO(vendor_id);
2118 
2119 static ssize_t sub_vendor_id_show(struct device *dev,
2120 				  struct device_attribute *attr, char *buf)
2121 {
2122 	struct scmi_info *info = dev_get_drvdata(dev);
2123 
2124 	return sprintf(buf, "%s\n", info->version.sub_vendor_id);
2125 }
2126 static DEVICE_ATTR_RO(sub_vendor_id);
2127 
2128 static struct attribute *versions_attrs[] = {
2129 	&dev_attr_firmware_version.attr,
2130 	&dev_attr_protocol_version.attr,
2131 	&dev_attr_vendor_id.attr,
2132 	&dev_attr_sub_vendor_id.attr,
2133 	NULL,
2134 };
2135 ATTRIBUTE_GROUPS(versions);
2136 
2137 /* Each compatible listed below must have descriptor associated with it */
2138 static const struct of_device_id scmi_of_match[] = {
2139 #ifdef CONFIG_ARM_SCMI_TRANSPORT_MAILBOX
2140 	{ .compatible = "arm,scmi", .data = &scmi_mailbox_desc },
2141 #endif
2142 #ifdef CONFIG_ARM_SCMI_TRANSPORT_OPTEE
2143 	{ .compatible = "linaro,scmi-optee", .data = &scmi_optee_desc },
2144 #endif
2145 #ifdef CONFIG_ARM_SCMI_TRANSPORT_SMC
2146 	{ .compatible = "arm,scmi-smc", .data = &scmi_smc_desc},
2147 #endif
2148 #ifdef CONFIG_ARM_SCMI_TRANSPORT_VIRTIO
2149 	{ .compatible = "arm,scmi-virtio", .data = &scmi_virtio_desc},
2150 #endif
2151 	{ /* Sentinel */ },
2152 };
2153 
2154 MODULE_DEVICE_TABLE(of, scmi_of_match);
2155 
2156 static struct platform_driver scmi_driver = {
2157 	.driver = {
2158 		   .name = "arm-scmi",
2159 		   .of_match_table = scmi_of_match,
2160 		   .dev_groups = versions_groups,
2161 		   },
2162 	.probe = scmi_probe,
2163 	.remove = scmi_remove,
2164 };
2165 
2166 /**
2167  * __scmi_transports_setup  - Common helper to call transport-specific
2168  * .init/.exit code if provided.
2169  *
2170  * @init: A flag to distinguish between init and exit.
2171  *
2172  * Note that, if provided, we invoke .init/.exit functions for all the
2173  * transports currently compiled in.
2174  *
2175  * Return: 0 on Success.
2176  */
2177 static inline int __scmi_transports_setup(bool init)
2178 {
2179 	int ret = 0;
2180 	const struct of_device_id *trans;
2181 
2182 	for (trans = scmi_of_match; trans->data; trans++) {
2183 		const struct scmi_desc *tdesc = trans->data;
2184 
2185 		if ((init && !tdesc->transport_init) ||
2186 		    (!init && !tdesc->transport_exit))
2187 			continue;
2188 
2189 		if (init)
2190 			ret = tdesc->transport_init();
2191 		else
2192 			tdesc->transport_exit();
2193 
2194 		if (ret) {
2195 			pr_err("SCMI transport %s FAILED initialization!\n",
2196 			       trans->compatible);
2197 			break;
2198 		}
2199 	}
2200 
2201 	return ret;
2202 }
2203 
2204 static int __init scmi_transports_init(void)
2205 {
2206 	return __scmi_transports_setup(true);
2207 }
2208 
2209 static void __exit scmi_transports_exit(void)
2210 {
2211 	__scmi_transports_setup(false);
2212 }
2213 
2214 static int __init scmi_driver_init(void)
2215 {
2216 	int ret;
2217 
2218 	/* Bail out if no SCMI transport was configured */
2219 	if (WARN_ON(!IS_ENABLED(CONFIG_ARM_SCMI_HAVE_TRANSPORT)))
2220 		return -EINVAL;
2221 
2222 	scmi_bus_init();
2223 
2224 	/* Initialize any compiled-in transport which provided an init/exit */
2225 	ret = scmi_transports_init();
2226 	if (ret)
2227 		return ret;
2228 
2229 	scmi_base_register();
2230 
2231 	scmi_clock_register();
2232 	scmi_perf_register();
2233 	scmi_power_register();
2234 	scmi_reset_register();
2235 	scmi_sensors_register();
2236 	scmi_voltage_register();
2237 	scmi_system_register();
2238 
2239 	return platform_driver_register(&scmi_driver);
2240 }
2241 subsys_initcall(scmi_driver_init);
2242 
2243 static void __exit scmi_driver_exit(void)
2244 {
2245 	scmi_base_unregister();
2246 
2247 	scmi_clock_unregister();
2248 	scmi_perf_unregister();
2249 	scmi_power_unregister();
2250 	scmi_reset_unregister();
2251 	scmi_sensors_unregister();
2252 	scmi_voltage_unregister();
2253 	scmi_system_unregister();
2254 
2255 	scmi_bus_exit();
2256 
2257 	scmi_transports_exit();
2258 
2259 	platform_driver_unregister(&scmi_driver);
2260 }
2261 module_exit(scmi_driver_exit);
2262 
2263 MODULE_ALIAS("platform:arm-scmi");
2264 MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
2265 MODULE_DESCRIPTION("ARM SCMI protocol driver");
2266 MODULE_LICENSE("GPL v2");
2267