1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * System Control and Management Interface (SCMI) Message Protocol driver
4  *
5  * SCMI Message Protocol is used between the System Control Processor(SCP)
6  * and the Application Processors(AP). The Message Handling Unit(MHU)
7  * provides a mechanism for inter-processor communication between SCP's
8  * Cortex M3 and AP.
9  *
10  * SCP offers control and management of the core/cluster power states,
11  * various power domain DVFS including the core/cluster, certain system
12  * clocks configuration, thermal sensors and many others.
13  *
14  * Copyright (C) 2018-2021 ARM Ltd.
15  */
16 
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 
19 #include <linux/bitmap.h>
20 #include <linux/debugfs.h>
21 #include <linux/device.h>
22 #include <linux/export.h>
23 #include <linux/idr.h>
24 #include <linux/io.h>
25 #include <linux/io-64-nonatomic-hi-lo.h>
26 #include <linux/kernel.h>
27 #include <linux/ktime.h>
28 #include <linux/hashtable.h>
29 #include <linux/list.h>
30 #include <linux/module.h>
31 #include <linux/of.h>
32 #include <linux/platform_device.h>
33 #include <linux/processor.h>
34 #include <linux/refcount.h>
35 #include <linux/slab.h>
36 
37 #include "common.h"
38 #include "notify.h"
39 
40 #include "raw_mode.h"
41 
42 #define CREATE_TRACE_POINTS
43 #include <trace/events/scmi.h>
44 
45 static DEFINE_IDA(scmi_id);
46 
47 static DEFINE_IDR(scmi_protocols);
48 static DEFINE_SPINLOCK(protocol_lock);
49 
50 /* List of all SCMI devices active in system */
51 static LIST_HEAD(scmi_list);
52 /* Protection for the entire list */
53 static DEFINE_MUTEX(scmi_list_mutex);
54 /* Track the unique id for the transfers for debug & profiling purpose */
55 static atomic_t transfer_last_id;
56 
57 static struct dentry *scmi_top_dentry;
58 
59 /**
60  * struct scmi_xfers_info - Structure to manage transfer information
61  *
62  * @xfer_alloc_table: Bitmap table for allocated messages.
63  *	Index of this bitmap table is also used for message
64  *	sequence identifier.
65  * @xfer_lock: Protection for message allocation
66  * @max_msg: Maximum number of messages that can be pending
67  * @free_xfers: A free list for available to use xfers. It is initialized with
68  *		a number of xfers equal to the maximum allowed in-flight
69  *		messages.
70  * @pending_xfers: An hashtable, indexed by msg_hdr.seq, used to keep all the
71  *		   currently in-flight messages.
72  */
73 struct scmi_xfers_info {
74 	unsigned long *xfer_alloc_table;
75 	spinlock_t xfer_lock;
76 	int max_msg;
77 	struct hlist_head free_xfers;
78 	DECLARE_HASHTABLE(pending_xfers, SCMI_PENDING_XFERS_HT_ORDER_SZ);
79 };
80 
81 /**
82  * struct scmi_protocol_instance  - Describe an initialized protocol instance.
83  * @handle: Reference to the SCMI handle associated to this protocol instance.
84  * @proto: A reference to the protocol descriptor.
85  * @gid: A reference for per-protocol devres management.
86  * @users: A refcount to track effective users of this protocol.
87  * @priv: Reference for optional protocol private data.
88  * @ph: An embedded protocol handle that will be passed down to protocol
89  *	initialization code to identify this instance.
90  *
91  * Each protocol is initialized independently once for each SCMI platform in
92  * which is defined by DT and implemented by the SCMI server fw.
93  */
94 struct scmi_protocol_instance {
95 	const struct scmi_handle	*handle;
96 	const struct scmi_protocol	*proto;
97 	void				*gid;
98 	refcount_t			users;
99 	void				*priv;
100 	struct scmi_protocol_handle	ph;
101 };
102 
103 #define ph_to_pi(h)	container_of(h, struct scmi_protocol_instance, ph)
104 
105 /**
106  * struct scmi_debug_info  - Debug common info
107  * @top_dentry: A reference to the top debugfs dentry
108  * @name: Name of this SCMI instance
109  * @type: Type of this SCMI instance
110  * @is_atomic: Flag to state if the transport of this instance is atomic
111  */
112 struct scmi_debug_info {
113 	struct dentry *top_dentry;
114 	const char *name;
115 	const char *type;
116 	bool is_atomic;
117 };
118 
119 /**
120  * struct scmi_info - Structure representing a SCMI instance
121  *
122  * @id: A sequence number starting from zero identifying this instance
123  * @dev: Device pointer
124  * @desc: SoC description for this instance
125  * @version: SCMI revision information containing protocol version,
126  *	implementation version and (sub-)vendor identification.
127  * @handle: Instance of SCMI handle to send to clients
128  * @tx_minfo: Universal Transmit Message management info
129  * @rx_minfo: Universal Receive Message management info
130  * @tx_idr: IDR object to map protocol id to Tx channel info pointer
131  * @rx_idr: IDR object to map protocol id to Rx channel info pointer
132  * @protocols: IDR for protocols' instance descriptors initialized for
133  *	       this SCMI instance: populated on protocol's first attempted
134  *	       usage.
135  * @protocols_mtx: A mutex to protect protocols instances initialization.
136  * @protocols_imp: List of protocols implemented, currently maximum of
137  *		   scmi_revision_info.num_protocols elements allocated by the
138  *		   base protocol
139  * @active_protocols: IDR storing device_nodes for protocols actually defined
140  *		      in the DT and confirmed as implemented by fw.
141  * @atomic_threshold: Optional system wide DT-configured threshold, expressed
142  *		      in microseconds, for atomic operations.
143  *		      Only SCMI synchronous commands reported by the platform
144  *		      to have an execution latency lesser-equal to the threshold
145  *		      should be considered for atomic mode operation: such
146  *		      decision is finally left up to the SCMI drivers.
147  * @notify_priv: Pointer to private data structure specific to notifications.
148  * @node: List head
149  * @users: Number of users of this instance
150  * @bus_nb: A notifier to listen for device bind/unbind on the scmi bus
151  * @dev_req_nb: A notifier to listen for device request/unrequest on the scmi
152  *		bus
153  * @devreq_mtx: A mutex to serialize device creation for this SCMI instance
154  * @dbg: A pointer to debugfs related data (if any)
155  * @raw: An opaque reference handle used by SCMI Raw mode.
156  */
157 struct scmi_info {
158 	int id;
159 	struct device *dev;
160 	const struct scmi_desc *desc;
161 	struct scmi_revision_info version;
162 	struct scmi_handle handle;
163 	struct scmi_xfers_info tx_minfo;
164 	struct scmi_xfers_info rx_minfo;
165 	struct idr tx_idr;
166 	struct idr rx_idr;
167 	struct idr protocols;
168 	/* Ensure mutual exclusive access to protocols instance array */
169 	struct mutex protocols_mtx;
170 	u8 *protocols_imp;
171 	struct idr active_protocols;
172 	unsigned int atomic_threshold;
173 	void *notify_priv;
174 	struct list_head node;
175 	int users;
176 	struct notifier_block bus_nb;
177 	struct notifier_block dev_req_nb;
178 	/* Serialize device creation process for this instance */
179 	struct mutex devreq_mtx;
180 	struct scmi_debug_info *dbg;
181 	void *raw;
182 };
183 
184 #define handle_to_scmi_info(h)	container_of(h, struct scmi_info, handle)
185 #define bus_nb_to_scmi_info(nb)	container_of(nb, struct scmi_info, bus_nb)
186 #define req_nb_to_scmi_info(nb)	container_of(nb, struct scmi_info, dev_req_nb)
187 
188 static const struct scmi_protocol *scmi_protocol_get(int protocol_id)
189 {
190 	const struct scmi_protocol *proto;
191 
192 	proto = idr_find(&scmi_protocols, protocol_id);
193 	if (!proto || !try_module_get(proto->owner)) {
194 		pr_warn("SCMI Protocol 0x%x not found!\n", protocol_id);
195 		return NULL;
196 	}
197 
198 	pr_debug("Found SCMI Protocol 0x%x\n", protocol_id);
199 
200 	return proto;
201 }
202 
203 static void scmi_protocol_put(int protocol_id)
204 {
205 	const struct scmi_protocol *proto;
206 
207 	proto = idr_find(&scmi_protocols, protocol_id);
208 	if (proto)
209 		module_put(proto->owner);
210 }
211 
212 int scmi_protocol_register(const struct scmi_protocol *proto)
213 {
214 	int ret;
215 
216 	if (!proto) {
217 		pr_err("invalid protocol\n");
218 		return -EINVAL;
219 	}
220 
221 	if (!proto->instance_init) {
222 		pr_err("missing init for protocol 0x%x\n", proto->id);
223 		return -EINVAL;
224 	}
225 
226 	spin_lock(&protocol_lock);
227 	ret = idr_alloc(&scmi_protocols, (void *)proto,
228 			proto->id, proto->id + 1, GFP_ATOMIC);
229 	spin_unlock(&protocol_lock);
230 	if (ret != proto->id) {
231 		pr_err("unable to allocate SCMI idr slot for 0x%x - err %d\n",
232 		       proto->id, ret);
233 		return ret;
234 	}
235 
236 	pr_debug("Registered SCMI Protocol 0x%x\n", proto->id);
237 
238 	return 0;
239 }
240 EXPORT_SYMBOL_GPL(scmi_protocol_register);
241 
242 void scmi_protocol_unregister(const struct scmi_protocol *proto)
243 {
244 	spin_lock(&protocol_lock);
245 	idr_remove(&scmi_protocols, proto->id);
246 	spin_unlock(&protocol_lock);
247 
248 	pr_debug("Unregistered SCMI Protocol 0x%x\n", proto->id);
249 }
250 EXPORT_SYMBOL_GPL(scmi_protocol_unregister);
251 
252 /**
253  * scmi_create_protocol_devices  - Create devices for all pending requests for
254  * this SCMI instance.
255  *
256  * @np: The device node describing the protocol
257  * @info: The SCMI instance descriptor
258  * @prot_id: The protocol ID
259  * @name: The optional name of the device to be created: if not provided this
260  *	  call will lead to the creation of all the devices currently requested
261  *	  for the specified protocol.
262  */
263 static void scmi_create_protocol_devices(struct device_node *np,
264 					 struct scmi_info *info,
265 					 int prot_id, const char *name)
266 {
267 	struct scmi_device *sdev;
268 
269 	mutex_lock(&info->devreq_mtx);
270 	sdev = scmi_device_create(np, info->dev, prot_id, name);
271 	if (name && !sdev)
272 		dev_err(info->dev,
273 			"failed to create device for protocol 0x%X (%s)\n",
274 			prot_id, name);
275 	mutex_unlock(&info->devreq_mtx);
276 }
277 
278 static void scmi_destroy_protocol_devices(struct scmi_info *info,
279 					  int prot_id, const char *name)
280 {
281 	mutex_lock(&info->devreq_mtx);
282 	scmi_device_destroy(info->dev, prot_id, name);
283 	mutex_unlock(&info->devreq_mtx);
284 }
285 
286 void scmi_notification_instance_data_set(const struct scmi_handle *handle,
287 					 void *priv)
288 {
289 	struct scmi_info *info = handle_to_scmi_info(handle);
290 
291 	info->notify_priv = priv;
292 	/* Ensure updated protocol private date are visible */
293 	smp_wmb();
294 }
295 
296 void *scmi_notification_instance_data_get(const struct scmi_handle *handle)
297 {
298 	struct scmi_info *info = handle_to_scmi_info(handle);
299 
300 	/* Ensure protocols_private_data has been updated */
301 	smp_rmb();
302 	return info->notify_priv;
303 }
304 
305 /**
306  * scmi_xfer_token_set  - Reserve and set new token for the xfer at hand
307  *
308  * @minfo: Pointer to Tx/Rx Message management info based on channel type
309  * @xfer: The xfer to act upon
310  *
311  * Pick the next unused monotonically increasing token and set it into
312  * xfer->hdr.seq: picking a monotonically increasing value avoids immediate
313  * reuse of freshly completed or timed-out xfers, thus mitigating the risk
314  * of incorrect association of a late and expired xfer with a live in-flight
315  * transaction, both happening to re-use the same token identifier.
316  *
317  * Since platform is NOT required to answer our request in-order we should
318  * account for a few rare but possible scenarios:
319  *
320  *  - exactly 'next_token' may be NOT available so pick xfer_id >= next_token
321  *    using find_next_zero_bit() starting from candidate next_token bit
322  *
323  *  - all tokens ahead upto (MSG_TOKEN_ID_MASK - 1) are used in-flight but we
324  *    are plenty of free tokens at start, so try a second pass using
325  *    find_next_zero_bit() and starting from 0.
326  *
327  *  X = used in-flight
328  *
329  * Normal
330  * ------
331  *
332  *		|- xfer_id picked
333  *   -----------+----------------------------------------------------------
334  *   | | |X|X|X| | | | | | ... ... ... ... ... ... ... ... ... ... ...|X|X|
335  *   ----------------------------------------------------------------------
336  *		^
337  *		|- next_token
338  *
339  * Out-of-order pending at start
340  * -----------------------------
341  *
342  *	  |- xfer_id picked, last_token fixed
343  *   -----+----------------------------------------------------------------
344  *   |X|X| | | | |X|X| ... ... ... ... ... ... ... ... ... ... ... ...|X| |
345  *   ----------------------------------------------------------------------
346  *    ^
347  *    |- next_token
348  *
349  *
350  * Out-of-order pending at end
351  * ---------------------------
352  *
353  *	  |- xfer_id picked, last_token fixed
354  *   -----+----------------------------------------------------------------
355  *   |X|X| | | | |X|X| ... ... ... ... ... ... ... ... ... ... |X|X|X||X|X|
356  *   ----------------------------------------------------------------------
357  *								^
358  *								|- next_token
359  *
360  * Context: Assumes to be called with @xfer_lock already acquired.
361  *
362  * Return: 0 on Success or error
363  */
364 static int scmi_xfer_token_set(struct scmi_xfers_info *minfo,
365 			       struct scmi_xfer *xfer)
366 {
367 	unsigned long xfer_id, next_token;
368 
369 	/*
370 	 * Pick a candidate monotonic token in range [0, MSG_TOKEN_MAX - 1]
371 	 * using the pre-allocated transfer_id as a base.
372 	 * Note that the global transfer_id is shared across all message types
373 	 * so there could be holes in the allocated set of monotonic sequence
374 	 * numbers, but that is going to limit the effectiveness of the
375 	 * mitigation only in very rare limit conditions.
376 	 */
377 	next_token = (xfer->transfer_id & (MSG_TOKEN_MAX - 1));
378 
379 	/* Pick the next available xfer_id >= next_token */
380 	xfer_id = find_next_zero_bit(minfo->xfer_alloc_table,
381 				     MSG_TOKEN_MAX, next_token);
382 	if (xfer_id == MSG_TOKEN_MAX) {
383 		/*
384 		 * After heavily out-of-order responses, there are no free
385 		 * tokens ahead, but only at start of xfer_alloc_table so
386 		 * try again from the beginning.
387 		 */
388 		xfer_id = find_next_zero_bit(minfo->xfer_alloc_table,
389 					     MSG_TOKEN_MAX, 0);
390 		/*
391 		 * Something is wrong if we got here since there can be a
392 		 * maximum number of (MSG_TOKEN_MAX - 1) in-flight messages
393 		 * but we have not found any free token [0, MSG_TOKEN_MAX - 1].
394 		 */
395 		if (WARN_ON_ONCE(xfer_id == MSG_TOKEN_MAX))
396 			return -ENOMEM;
397 	}
398 
399 	/* Update +/- last_token accordingly if we skipped some hole */
400 	if (xfer_id != next_token)
401 		atomic_add((int)(xfer_id - next_token), &transfer_last_id);
402 
403 	xfer->hdr.seq = (u16)xfer_id;
404 
405 	return 0;
406 }
407 
408 /**
409  * scmi_xfer_token_clear  - Release the token
410  *
411  * @minfo: Pointer to Tx/Rx Message management info based on channel type
412  * @xfer: The xfer to act upon
413  */
414 static inline void scmi_xfer_token_clear(struct scmi_xfers_info *minfo,
415 					 struct scmi_xfer *xfer)
416 {
417 	clear_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
418 }
419 
420 /**
421  * scmi_xfer_inflight_register_unlocked  - Register the xfer as in-flight
422  *
423  * @xfer: The xfer to register
424  * @minfo: Pointer to Tx/Rx Message management info based on channel type
425  *
426  * Note that this helper assumes that the xfer to be registered as in-flight
427  * had been built using an xfer sequence number which still corresponds to a
428  * free slot in the xfer_alloc_table.
429  *
430  * Context: Assumes to be called with @xfer_lock already acquired.
431  */
432 static inline void
433 scmi_xfer_inflight_register_unlocked(struct scmi_xfer *xfer,
434 				     struct scmi_xfers_info *minfo)
435 {
436 	/* Set in-flight */
437 	set_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
438 	hash_add(minfo->pending_xfers, &xfer->node, xfer->hdr.seq);
439 	xfer->pending = true;
440 }
441 
442 /**
443  * scmi_xfer_inflight_register  - Try to register an xfer as in-flight
444  *
445  * @xfer: The xfer to register
446  * @minfo: Pointer to Tx/Rx Message management info based on channel type
447  *
448  * Note that this helper does NOT assume anything about the sequence number
449  * that was baked into the provided xfer, so it checks at first if it can
450  * be mapped to a free slot and fails with an error if another xfer with the
451  * same sequence number is currently still registered as in-flight.
452  *
453  * Return: 0 on Success or -EBUSY if sequence number embedded in the xfer
454  *	   could not rbe mapped to a free slot in the xfer_alloc_table.
455  */
456 static int scmi_xfer_inflight_register(struct scmi_xfer *xfer,
457 				       struct scmi_xfers_info *minfo)
458 {
459 	int ret = 0;
460 	unsigned long flags;
461 
462 	spin_lock_irqsave(&minfo->xfer_lock, flags);
463 	if (!test_bit(xfer->hdr.seq, minfo->xfer_alloc_table))
464 		scmi_xfer_inflight_register_unlocked(xfer, minfo);
465 	else
466 		ret = -EBUSY;
467 	spin_unlock_irqrestore(&minfo->xfer_lock, flags);
468 
469 	return ret;
470 }
471 
472 /**
473  * scmi_xfer_raw_inflight_register  - An helper to register the given xfer as in
474  * flight on the TX channel, if possible.
475  *
476  * @handle: Pointer to SCMI entity handle
477  * @xfer: The xfer to register
478  *
479  * Return: 0 on Success, error otherwise
480  */
481 int scmi_xfer_raw_inflight_register(const struct scmi_handle *handle,
482 				    struct scmi_xfer *xfer)
483 {
484 	struct scmi_info *info = handle_to_scmi_info(handle);
485 
486 	return scmi_xfer_inflight_register(xfer, &info->tx_minfo);
487 }
488 
489 /**
490  * scmi_xfer_pending_set  - Pick a proper sequence number and mark the xfer
491  * as pending in-flight
492  *
493  * @xfer: The xfer to act upon
494  * @minfo: Pointer to Tx/Rx Message management info based on channel type
495  *
496  * Return: 0 on Success or error otherwise
497  */
498 static inline int scmi_xfer_pending_set(struct scmi_xfer *xfer,
499 					struct scmi_xfers_info *minfo)
500 {
501 	int ret;
502 	unsigned long flags;
503 
504 	spin_lock_irqsave(&minfo->xfer_lock, flags);
505 	/* Set a new monotonic token as the xfer sequence number */
506 	ret = scmi_xfer_token_set(minfo, xfer);
507 	if (!ret)
508 		scmi_xfer_inflight_register_unlocked(xfer, minfo);
509 	spin_unlock_irqrestore(&minfo->xfer_lock, flags);
510 
511 	return ret;
512 }
513 
514 /**
515  * scmi_xfer_get() - Allocate one message
516  *
517  * @handle: Pointer to SCMI entity handle
518  * @minfo: Pointer to Tx/Rx Message management info based on channel type
519  *
520  * Helper function which is used by various message functions that are
521  * exposed to clients of this driver for allocating a message traffic event.
522  *
523  * Picks an xfer from the free list @free_xfers (if any available) and perform
524  * a basic initialization.
525  *
526  * Note that, at this point, still no sequence number is assigned to the
527  * allocated xfer, nor it is registered as a pending transaction.
528  *
529  * The successfully initialized xfer is refcounted.
530  *
531  * Context: Holds @xfer_lock while manipulating @free_xfers.
532  *
533  * Return: An initialized xfer if all went fine, else pointer error.
534  */
535 static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle,
536 				       struct scmi_xfers_info *minfo)
537 {
538 	unsigned long flags;
539 	struct scmi_xfer *xfer;
540 
541 	spin_lock_irqsave(&minfo->xfer_lock, flags);
542 	if (hlist_empty(&minfo->free_xfers)) {
543 		spin_unlock_irqrestore(&minfo->xfer_lock, flags);
544 		return ERR_PTR(-ENOMEM);
545 	}
546 
547 	/* grab an xfer from the free_list */
548 	xfer = hlist_entry(minfo->free_xfers.first, struct scmi_xfer, node);
549 	hlist_del_init(&xfer->node);
550 
551 	/*
552 	 * Allocate transfer_id early so that can be used also as base for
553 	 * monotonic sequence number generation if needed.
554 	 */
555 	xfer->transfer_id = atomic_inc_return(&transfer_last_id);
556 
557 	refcount_set(&xfer->users, 1);
558 	atomic_set(&xfer->busy, SCMI_XFER_FREE);
559 	spin_unlock_irqrestore(&minfo->xfer_lock, flags);
560 
561 	return xfer;
562 }
563 
564 /**
565  * scmi_xfer_raw_get  - Helper to get a bare free xfer from the TX channel
566  *
567  * @handle: Pointer to SCMI entity handle
568  *
569  * Note that xfer is taken from the TX channel structures.
570  *
571  * Return: A valid xfer on Success, or an error-pointer otherwise
572  */
573 struct scmi_xfer *scmi_xfer_raw_get(const struct scmi_handle *handle)
574 {
575 	struct scmi_xfer *xfer;
576 	struct scmi_info *info = handle_to_scmi_info(handle);
577 
578 	xfer = scmi_xfer_get(handle, &info->tx_minfo);
579 	if (!IS_ERR(xfer))
580 		xfer->flags |= SCMI_XFER_FLAG_IS_RAW;
581 
582 	return xfer;
583 }
584 
585 /**
586  * scmi_xfer_raw_channel_get  - Helper to get a reference to the proper channel
587  * to use for a specific protocol_id Raw transaction.
588  *
589  * @handle: Pointer to SCMI entity handle
590  * @protocol_id: Identifier of the protocol
591  *
592  * Note that in a regular SCMI stack, usually, a protocol has to be defined in
593  * the DT to have an associated channel and be usable; but in Raw mode any
594  * protocol in range is allowed, re-using the Base channel, so as to enable
595  * fuzzing on any protocol without the need of a fully compiled DT.
596  *
597  * Return: A reference to the channel to use, or an ERR_PTR
598  */
599 struct scmi_chan_info *
600 scmi_xfer_raw_channel_get(const struct scmi_handle *handle, u8 protocol_id)
601 {
602 	struct scmi_chan_info *cinfo;
603 	struct scmi_info *info = handle_to_scmi_info(handle);
604 
605 	cinfo = idr_find(&info->tx_idr, protocol_id);
606 	if (!cinfo) {
607 		if (protocol_id == SCMI_PROTOCOL_BASE)
608 			return ERR_PTR(-EINVAL);
609 		/* Use Base channel for protocols not defined for DT */
610 		cinfo = idr_find(&info->tx_idr, SCMI_PROTOCOL_BASE);
611 		if (!cinfo)
612 			return ERR_PTR(-EINVAL);
613 		dev_warn_once(handle->dev,
614 			      "Using Base channel for protocol 0x%X\n",
615 			      protocol_id);
616 	}
617 
618 	return cinfo;
619 }
620 
621 /**
622  * __scmi_xfer_put() - Release a message
623  *
624  * @minfo: Pointer to Tx/Rx Message management info based on channel type
625  * @xfer: message that was reserved by scmi_xfer_get
626  *
627  * After refcount check, possibly release an xfer, clearing the token slot,
628  * removing xfer from @pending_xfers and putting it back into free_xfers.
629  *
630  * This holds a spinlock to maintain integrity of internal data structures.
631  */
632 static void
633 __scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
634 {
635 	unsigned long flags;
636 
637 	spin_lock_irqsave(&minfo->xfer_lock, flags);
638 	if (refcount_dec_and_test(&xfer->users)) {
639 		if (xfer->pending) {
640 			scmi_xfer_token_clear(minfo, xfer);
641 			hash_del(&xfer->node);
642 			xfer->pending = false;
643 		}
644 		hlist_add_head(&xfer->node, &minfo->free_xfers);
645 	}
646 	spin_unlock_irqrestore(&minfo->xfer_lock, flags);
647 }
648 
649 /**
650  * scmi_xfer_raw_put  - Release an xfer that was taken by @scmi_xfer_raw_get
651  *
652  * @handle: Pointer to SCMI entity handle
653  * @xfer: A reference to the xfer to put
654  *
655  * Note that as with other xfer_put() handlers the xfer is really effectively
656  * released only if there are no more users on the system.
657  */
658 void scmi_xfer_raw_put(const struct scmi_handle *handle, struct scmi_xfer *xfer)
659 {
660 	struct scmi_info *info = handle_to_scmi_info(handle);
661 
662 	xfer->flags &= ~SCMI_XFER_FLAG_IS_RAW;
663 	xfer->flags &= ~SCMI_XFER_FLAG_CHAN_SET;
664 	return __scmi_xfer_put(&info->tx_minfo, xfer);
665 }
666 
667 /**
668  * scmi_xfer_lookup_unlocked  -  Helper to lookup an xfer_id
669  *
670  * @minfo: Pointer to Tx/Rx Message management info based on channel type
671  * @xfer_id: Token ID to lookup in @pending_xfers
672  *
673  * Refcounting is untouched.
674  *
675  * Context: Assumes to be called with @xfer_lock already acquired.
676  *
677  * Return: A valid xfer on Success or error otherwise
678  */
679 static struct scmi_xfer *
680 scmi_xfer_lookup_unlocked(struct scmi_xfers_info *minfo, u16 xfer_id)
681 {
682 	struct scmi_xfer *xfer = NULL;
683 
684 	if (test_bit(xfer_id, minfo->xfer_alloc_table))
685 		xfer = XFER_FIND(minfo->pending_xfers, xfer_id);
686 
687 	return xfer ?: ERR_PTR(-EINVAL);
688 }
689 
690 /**
691  * scmi_msg_response_validate  - Validate message type against state of related
692  * xfer
693  *
694  * @cinfo: A reference to the channel descriptor.
695  * @msg_type: Message type to check
696  * @xfer: A reference to the xfer to validate against @msg_type
697  *
698  * This function checks if @msg_type is congruent with the current state of
699  * a pending @xfer; if an asynchronous delayed response is received before the
700  * related synchronous response (Out-of-Order Delayed Response) the missing
701  * synchronous response is assumed to be OK and completed, carrying on with the
702  * Delayed Response: this is done to address the case in which the underlying
703  * SCMI transport can deliver such out-of-order responses.
704  *
705  * Context: Assumes to be called with xfer->lock already acquired.
706  *
707  * Return: 0 on Success, error otherwise
708  */
709 static inline int scmi_msg_response_validate(struct scmi_chan_info *cinfo,
710 					     u8 msg_type,
711 					     struct scmi_xfer *xfer)
712 {
713 	/*
714 	 * Even if a response was indeed expected on this slot at this point,
715 	 * a buggy platform could wrongly reply feeding us an unexpected
716 	 * delayed response we're not prepared to handle: bail-out safely
717 	 * blaming firmware.
718 	 */
719 	if (msg_type == MSG_TYPE_DELAYED_RESP && !xfer->async_done) {
720 		dev_err(cinfo->dev,
721 			"Delayed Response for %d not expected! Buggy F/W ?\n",
722 			xfer->hdr.seq);
723 		return -EINVAL;
724 	}
725 
726 	switch (xfer->state) {
727 	case SCMI_XFER_SENT_OK:
728 		if (msg_type == MSG_TYPE_DELAYED_RESP) {
729 			/*
730 			 * Delayed Response expected but delivered earlier.
731 			 * Assume message RESPONSE was OK and skip state.
732 			 */
733 			xfer->hdr.status = SCMI_SUCCESS;
734 			xfer->state = SCMI_XFER_RESP_OK;
735 			complete(&xfer->done);
736 			dev_warn(cinfo->dev,
737 				 "Received valid OoO Delayed Response for %d\n",
738 				 xfer->hdr.seq);
739 		}
740 		break;
741 	case SCMI_XFER_RESP_OK:
742 		if (msg_type != MSG_TYPE_DELAYED_RESP)
743 			return -EINVAL;
744 		break;
745 	case SCMI_XFER_DRESP_OK:
746 		/* No further message expected once in SCMI_XFER_DRESP_OK */
747 		return -EINVAL;
748 	}
749 
750 	return 0;
751 }
752 
753 /**
754  * scmi_xfer_state_update  - Update xfer state
755  *
756  * @xfer: A reference to the xfer to update
757  * @msg_type: Type of message being processed.
758  *
759  * Note that this message is assumed to have been already successfully validated
760  * by @scmi_msg_response_validate(), so here we just update the state.
761  *
762  * Context: Assumes to be called on an xfer exclusively acquired using the
763  *	    busy flag.
764  */
765 static inline void scmi_xfer_state_update(struct scmi_xfer *xfer, u8 msg_type)
766 {
767 	xfer->hdr.type = msg_type;
768 
769 	/* Unknown command types were already discarded earlier */
770 	if (xfer->hdr.type == MSG_TYPE_COMMAND)
771 		xfer->state = SCMI_XFER_RESP_OK;
772 	else
773 		xfer->state = SCMI_XFER_DRESP_OK;
774 }
775 
776 static bool scmi_xfer_acquired(struct scmi_xfer *xfer)
777 {
778 	int ret;
779 
780 	ret = atomic_cmpxchg(&xfer->busy, SCMI_XFER_FREE, SCMI_XFER_BUSY);
781 
782 	return ret == SCMI_XFER_FREE;
783 }
784 
785 /**
786  * scmi_xfer_command_acquire  -  Helper to lookup and acquire a command xfer
787  *
788  * @cinfo: A reference to the channel descriptor.
789  * @msg_hdr: A message header to use as lookup key
790  *
791  * When a valid xfer is found for the sequence number embedded in the provided
792  * msg_hdr, reference counting is properly updated and exclusive access to this
793  * xfer is granted till released with @scmi_xfer_command_release.
794  *
795  * Return: A valid @xfer on Success or error otherwise.
796  */
797 static inline struct scmi_xfer *
798 scmi_xfer_command_acquire(struct scmi_chan_info *cinfo, u32 msg_hdr)
799 {
800 	int ret;
801 	unsigned long flags;
802 	struct scmi_xfer *xfer;
803 	struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
804 	struct scmi_xfers_info *minfo = &info->tx_minfo;
805 	u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
806 	u16 xfer_id = MSG_XTRACT_TOKEN(msg_hdr);
807 
808 	/* Are we even expecting this? */
809 	spin_lock_irqsave(&minfo->xfer_lock, flags);
810 	xfer = scmi_xfer_lookup_unlocked(minfo, xfer_id);
811 	if (IS_ERR(xfer)) {
812 		dev_err(cinfo->dev,
813 			"Message for %d type %d is not expected!\n",
814 			xfer_id, msg_type);
815 		spin_unlock_irqrestore(&minfo->xfer_lock, flags);
816 		return xfer;
817 	}
818 	refcount_inc(&xfer->users);
819 	spin_unlock_irqrestore(&minfo->xfer_lock, flags);
820 
821 	spin_lock_irqsave(&xfer->lock, flags);
822 	ret = scmi_msg_response_validate(cinfo, msg_type, xfer);
823 	/*
824 	 * If a pending xfer was found which was also in a congruent state with
825 	 * the received message, acquire exclusive access to it setting the busy
826 	 * flag.
827 	 * Spins only on the rare limit condition of concurrent reception of
828 	 * RESP and DRESP for the same xfer.
829 	 */
830 	if (!ret) {
831 		spin_until_cond(scmi_xfer_acquired(xfer));
832 		scmi_xfer_state_update(xfer, msg_type);
833 	}
834 	spin_unlock_irqrestore(&xfer->lock, flags);
835 
836 	if (ret) {
837 		dev_err(cinfo->dev,
838 			"Invalid message type:%d for %d - HDR:0x%X  state:%d\n",
839 			msg_type, xfer_id, msg_hdr, xfer->state);
840 		/* On error the refcount incremented above has to be dropped */
841 		__scmi_xfer_put(minfo, xfer);
842 		xfer = ERR_PTR(-EINVAL);
843 	}
844 
845 	return xfer;
846 }
847 
848 static inline void scmi_xfer_command_release(struct scmi_info *info,
849 					     struct scmi_xfer *xfer)
850 {
851 	atomic_set(&xfer->busy, SCMI_XFER_FREE);
852 	__scmi_xfer_put(&info->tx_minfo, xfer);
853 }
854 
855 static inline void scmi_clear_channel(struct scmi_info *info,
856 				      struct scmi_chan_info *cinfo)
857 {
858 	if (!cinfo->is_p2a) {
859 		dev_warn(cinfo->dev, "Invalid clear on A2P channel !\n");
860 		return;
861 	}
862 
863 	if (info->desc->ops->clear_channel)
864 		info->desc->ops->clear_channel(cinfo);
865 }
866 
867 static void scmi_handle_notification(struct scmi_chan_info *cinfo,
868 				     u32 msg_hdr, void *priv)
869 {
870 	struct scmi_xfer *xfer;
871 	struct device *dev = cinfo->dev;
872 	struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
873 	struct scmi_xfers_info *minfo = &info->rx_minfo;
874 	ktime_t ts;
875 
876 	ts = ktime_get_boottime();
877 	xfer = scmi_xfer_get(cinfo->handle, minfo);
878 	if (IS_ERR(xfer)) {
879 		dev_err(dev, "failed to get free message slot (%ld)\n",
880 			PTR_ERR(xfer));
881 		scmi_clear_channel(info, cinfo);
882 		return;
883 	}
884 
885 	unpack_scmi_header(msg_hdr, &xfer->hdr);
886 	if (priv)
887 		/* Ensure order between xfer->priv store and following ops */
888 		smp_store_mb(xfer->priv, priv);
889 	info->desc->ops->fetch_notification(cinfo, info->desc->max_msg_size,
890 					    xfer);
891 
892 	trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
893 			    xfer->hdr.id, "NOTI", xfer->hdr.seq,
894 			    xfer->hdr.status, xfer->rx.buf, xfer->rx.len);
895 
896 	scmi_notify(cinfo->handle, xfer->hdr.protocol_id,
897 		    xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts);
898 
899 	trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
900 			   xfer->hdr.protocol_id, xfer->hdr.seq,
901 			   MSG_TYPE_NOTIFICATION);
902 
903 	if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
904 		xfer->hdr.seq = MSG_XTRACT_TOKEN(msg_hdr);
905 		scmi_raw_message_report(info->raw, xfer, SCMI_RAW_NOTIF_QUEUE,
906 					cinfo->id);
907 	}
908 
909 	__scmi_xfer_put(minfo, xfer);
910 
911 	scmi_clear_channel(info, cinfo);
912 }
913 
914 static void scmi_handle_response(struct scmi_chan_info *cinfo,
915 				 u32 msg_hdr, void *priv)
916 {
917 	struct scmi_xfer *xfer;
918 	struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
919 
920 	xfer = scmi_xfer_command_acquire(cinfo, msg_hdr);
921 	if (IS_ERR(xfer)) {
922 		if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT))
923 			scmi_raw_error_report(info->raw, cinfo, msg_hdr, priv);
924 
925 		if (MSG_XTRACT_TYPE(msg_hdr) == MSG_TYPE_DELAYED_RESP)
926 			scmi_clear_channel(info, cinfo);
927 		return;
928 	}
929 
930 	/* rx.len could be shrunk in the sync do_xfer, so reset to maxsz */
931 	if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP)
932 		xfer->rx.len = info->desc->max_msg_size;
933 
934 	if (priv)
935 		/* Ensure order between xfer->priv store and following ops */
936 		smp_store_mb(xfer->priv, priv);
937 	info->desc->ops->fetch_response(cinfo, xfer);
938 
939 	trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
940 			    xfer->hdr.id,
941 			    xfer->hdr.type == MSG_TYPE_DELAYED_RESP ?
942 			    (!SCMI_XFER_IS_RAW(xfer) ? "DLYD" : "dlyd") :
943 			    (!SCMI_XFER_IS_RAW(xfer) ? "RESP" : "resp"),
944 			    xfer->hdr.seq, xfer->hdr.status,
945 			    xfer->rx.buf, xfer->rx.len);
946 
947 	trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
948 			   xfer->hdr.protocol_id, xfer->hdr.seq,
949 			   xfer->hdr.type);
950 
951 	if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP) {
952 		scmi_clear_channel(info, cinfo);
953 		complete(xfer->async_done);
954 	} else {
955 		complete(&xfer->done);
956 	}
957 
958 	if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
959 		/*
960 		 * When in polling mode avoid to queue the Raw xfer on the IRQ
961 		 * RX path since it will be already queued at the end of the TX
962 		 * poll loop.
963 		 */
964 		if (!xfer->hdr.poll_completion)
965 			scmi_raw_message_report(info->raw, xfer,
966 						SCMI_RAW_REPLY_QUEUE,
967 						cinfo->id);
968 	}
969 
970 	scmi_xfer_command_release(info, xfer);
971 }
972 
973 /**
974  * scmi_rx_callback() - callback for receiving messages
975  *
976  * @cinfo: SCMI channel info
977  * @msg_hdr: Message header
978  * @priv: Transport specific private data.
979  *
980  * Processes one received message to appropriate transfer information and
981  * signals completion of the transfer.
982  *
983  * NOTE: This function will be invoked in IRQ context, hence should be
984  * as optimal as possible.
985  */
986 void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr, void *priv)
987 {
988 	u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
989 
990 	switch (msg_type) {
991 	case MSG_TYPE_NOTIFICATION:
992 		scmi_handle_notification(cinfo, msg_hdr, priv);
993 		break;
994 	case MSG_TYPE_COMMAND:
995 	case MSG_TYPE_DELAYED_RESP:
996 		scmi_handle_response(cinfo, msg_hdr, priv);
997 		break;
998 	default:
999 		WARN_ONCE(1, "received unknown msg_type:%d\n", msg_type);
1000 		break;
1001 	}
1002 }
1003 
1004 /**
1005  * xfer_put() - Release a transmit message
1006  *
1007  * @ph: Pointer to SCMI protocol handle
1008  * @xfer: message that was reserved by xfer_get_init
1009  */
1010 static void xfer_put(const struct scmi_protocol_handle *ph,
1011 		     struct scmi_xfer *xfer)
1012 {
1013 	const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1014 	struct scmi_info *info = handle_to_scmi_info(pi->handle);
1015 
1016 	__scmi_xfer_put(&info->tx_minfo, xfer);
1017 }
1018 
1019 static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo,
1020 				      struct scmi_xfer *xfer, ktime_t stop)
1021 {
1022 	struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
1023 
1024 	/*
1025 	 * Poll also on xfer->done so that polling can be forcibly terminated
1026 	 * in case of out-of-order receptions of delayed responses
1027 	 */
1028 	return info->desc->ops->poll_done(cinfo, xfer) ||
1029 	       try_wait_for_completion(&xfer->done) ||
1030 	       ktime_after(ktime_get(), stop);
1031 }
1032 
1033 static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc,
1034 			       struct scmi_chan_info *cinfo,
1035 			       struct scmi_xfer *xfer, unsigned int timeout_ms)
1036 {
1037 	int ret = 0;
1038 
1039 	if (xfer->hdr.poll_completion) {
1040 		/*
1041 		 * Real polling is needed only if transport has NOT declared
1042 		 * itself to support synchronous commands replies.
1043 		 */
1044 		if (!desc->sync_cmds_completed_on_ret) {
1045 			/*
1046 			 * Poll on xfer using transport provided .poll_done();
1047 			 * assumes no completion interrupt was available.
1048 			 */
1049 			ktime_t stop = ktime_add_ms(ktime_get(), timeout_ms);
1050 
1051 			spin_until_cond(scmi_xfer_done_no_timeout(cinfo,
1052 								  xfer, stop));
1053 			if (ktime_after(ktime_get(), stop)) {
1054 				dev_err(dev,
1055 					"timed out in resp(caller: %pS) - polling\n",
1056 					(void *)_RET_IP_);
1057 				ret = -ETIMEDOUT;
1058 			}
1059 		}
1060 
1061 		if (!ret) {
1062 			unsigned long flags;
1063 			struct scmi_info *info =
1064 				handle_to_scmi_info(cinfo->handle);
1065 
1066 			/*
1067 			 * Do not fetch_response if an out-of-order delayed
1068 			 * response is being processed.
1069 			 */
1070 			spin_lock_irqsave(&xfer->lock, flags);
1071 			if (xfer->state == SCMI_XFER_SENT_OK) {
1072 				desc->ops->fetch_response(cinfo, xfer);
1073 				xfer->state = SCMI_XFER_RESP_OK;
1074 			}
1075 			spin_unlock_irqrestore(&xfer->lock, flags);
1076 
1077 			/* Trace polled replies. */
1078 			trace_scmi_msg_dump(info->id, cinfo->id,
1079 					    xfer->hdr.protocol_id, xfer->hdr.id,
1080 					    !SCMI_XFER_IS_RAW(xfer) ?
1081 					    "RESP" : "resp",
1082 					    xfer->hdr.seq, xfer->hdr.status,
1083 					    xfer->rx.buf, xfer->rx.len);
1084 
1085 			if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
1086 				struct scmi_info *info =
1087 					handle_to_scmi_info(cinfo->handle);
1088 
1089 				scmi_raw_message_report(info->raw, xfer,
1090 							SCMI_RAW_REPLY_QUEUE,
1091 							cinfo->id);
1092 			}
1093 		}
1094 	} else {
1095 		/* And we wait for the response. */
1096 		if (!wait_for_completion_timeout(&xfer->done,
1097 						 msecs_to_jiffies(timeout_ms))) {
1098 			dev_err(dev, "timed out in resp(caller: %pS)\n",
1099 				(void *)_RET_IP_);
1100 			ret = -ETIMEDOUT;
1101 		}
1102 	}
1103 
1104 	return ret;
1105 }
1106 
1107 /**
1108  * scmi_wait_for_message_response  - An helper to group all the possible ways of
1109  * waiting for a synchronous message response.
1110  *
1111  * @cinfo: SCMI channel info
1112  * @xfer: Reference to the transfer being waited for.
1113  *
1114  * Chooses waiting strategy (sleep-waiting vs busy-waiting) depending on
1115  * configuration flags like xfer->hdr.poll_completion.
1116  *
1117  * Return: 0 on Success, error otherwise.
1118  */
1119 static int scmi_wait_for_message_response(struct scmi_chan_info *cinfo,
1120 					  struct scmi_xfer *xfer)
1121 {
1122 	struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
1123 	struct device *dev = info->dev;
1124 
1125 	trace_scmi_xfer_response_wait(xfer->transfer_id, xfer->hdr.id,
1126 				      xfer->hdr.protocol_id, xfer->hdr.seq,
1127 				      info->desc->max_rx_timeout_ms,
1128 				      xfer->hdr.poll_completion);
1129 
1130 	return scmi_wait_for_reply(dev, info->desc, cinfo, xfer,
1131 				   info->desc->max_rx_timeout_ms);
1132 }
1133 
1134 /**
1135  * scmi_xfer_raw_wait_for_message_response  - An helper to wait for a message
1136  * reply to an xfer raw request on a specific channel for the required timeout.
1137  *
1138  * @cinfo: SCMI channel info
1139  * @xfer: Reference to the transfer being waited for.
1140  * @timeout_ms: The maximum timeout in milliseconds
1141  *
1142  * Return: 0 on Success, error otherwise.
1143  */
1144 int scmi_xfer_raw_wait_for_message_response(struct scmi_chan_info *cinfo,
1145 					    struct scmi_xfer *xfer,
1146 					    unsigned int timeout_ms)
1147 {
1148 	int ret;
1149 	struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
1150 	struct device *dev = info->dev;
1151 
1152 	ret = scmi_wait_for_reply(dev, info->desc, cinfo, xfer, timeout_ms);
1153 	if (ret)
1154 		dev_dbg(dev, "timed out in RAW response - HDR:%08X\n",
1155 			pack_scmi_header(&xfer->hdr));
1156 
1157 	return ret;
1158 }
1159 
1160 /**
1161  * do_xfer() - Do one transfer
1162  *
1163  * @ph: Pointer to SCMI protocol handle
1164  * @xfer: Transfer to initiate and wait for response
1165  *
1166  * Return: -ETIMEDOUT in case of no response, if transmit error,
1167  *	return corresponding error, else if all goes well,
1168  *	return 0.
1169  */
1170 static int do_xfer(const struct scmi_protocol_handle *ph,
1171 		   struct scmi_xfer *xfer)
1172 {
1173 	int ret;
1174 	const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1175 	struct scmi_info *info = handle_to_scmi_info(pi->handle);
1176 	struct device *dev = info->dev;
1177 	struct scmi_chan_info *cinfo;
1178 
1179 	/* Check for polling request on custom command xfers at first */
1180 	if (xfer->hdr.poll_completion &&
1181 	    !is_transport_polling_capable(info->desc)) {
1182 		dev_warn_once(dev,
1183 			      "Polling mode is not supported by transport.\n");
1184 		return -EINVAL;
1185 	}
1186 
1187 	cinfo = idr_find(&info->tx_idr, pi->proto->id);
1188 	if (unlikely(!cinfo))
1189 		return -EINVAL;
1190 
1191 	/* True ONLY if also supported by transport. */
1192 	if (is_polling_enabled(cinfo, info->desc))
1193 		xfer->hdr.poll_completion = true;
1194 
1195 	/*
1196 	 * Initialise protocol id now from protocol handle to avoid it being
1197 	 * overridden by mistake (or malice) by the protocol code mangling with
1198 	 * the scmi_xfer structure prior to this.
1199 	 */
1200 	xfer->hdr.protocol_id = pi->proto->id;
1201 	reinit_completion(&xfer->done);
1202 
1203 	trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id,
1204 			      xfer->hdr.protocol_id, xfer->hdr.seq,
1205 			      xfer->hdr.poll_completion);
1206 
1207 	/* Clear any stale status */
1208 	xfer->hdr.status = SCMI_SUCCESS;
1209 	xfer->state = SCMI_XFER_SENT_OK;
1210 	/*
1211 	 * Even though spinlocking is not needed here since no race is possible
1212 	 * on xfer->state due to the monotonically increasing tokens allocation,
1213 	 * we must anyway ensure xfer->state initialization is not re-ordered
1214 	 * after the .send_message() to be sure that on the RX path an early
1215 	 * ISR calling scmi_rx_callback() cannot see an old stale xfer->state.
1216 	 */
1217 	smp_mb();
1218 
1219 	ret = info->desc->ops->send_message(cinfo, xfer);
1220 	if (ret < 0) {
1221 		dev_dbg(dev, "Failed to send message %d\n", ret);
1222 		return ret;
1223 	}
1224 
1225 	trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
1226 			    xfer->hdr.id, "CMND", xfer->hdr.seq,
1227 			    xfer->hdr.status, xfer->tx.buf, xfer->tx.len);
1228 
1229 	ret = scmi_wait_for_message_response(cinfo, xfer);
1230 	if (!ret && xfer->hdr.status)
1231 		ret = scmi_to_linux_errno(xfer->hdr.status);
1232 
1233 	if (info->desc->ops->mark_txdone)
1234 		info->desc->ops->mark_txdone(cinfo, ret, xfer);
1235 
1236 	trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id,
1237 			    xfer->hdr.protocol_id, xfer->hdr.seq, ret);
1238 
1239 	return ret;
1240 }
1241 
1242 static void reset_rx_to_maxsz(const struct scmi_protocol_handle *ph,
1243 			      struct scmi_xfer *xfer)
1244 {
1245 	const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1246 	struct scmi_info *info = handle_to_scmi_info(pi->handle);
1247 
1248 	xfer->rx.len = info->desc->max_msg_size;
1249 }
1250 
1251 /**
1252  * do_xfer_with_response() - Do one transfer and wait until the delayed
1253  *	response is received
1254  *
1255  * @ph: Pointer to SCMI protocol handle
1256  * @xfer: Transfer to initiate and wait for response
1257  *
1258  * Using asynchronous commands in atomic/polling mode should be avoided since
1259  * it could cause long busy-waiting here, so ignore polling for the delayed
1260  * response and WARN if it was requested for this command transaction since
1261  * upper layers should refrain from issuing such kind of requests.
1262  *
1263  * The only other option would have been to refrain from using any asynchronous
1264  * command even if made available, when an atomic transport is detected, and
1265  * instead forcibly use the synchronous version (thing that can be easily
1266  * attained at the protocol layer), but this would also have led to longer
1267  * stalls of the channel for synchronous commands and possibly timeouts.
1268  * (in other words there is usually a good reason if a platform provides an
1269  *  asynchronous version of a command and we should prefer to use it...just not
1270  *  when using atomic/polling mode)
1271  *
1272  * Return: -ETIMEDOUT in case of no delayed response, if transmit error,
1273  *	return corresponding error, else if all goes well, return 0.
1274  */
1275 static int do_xfer_with_response(const struct scmi_protocol_handle *ph,
1276 				 struct scmi_xfer *xfer)
1277 {
1278 	int ret, timeout = msecs_to_jiffies(SCMI_MAX_RESPONSE_TIMEOUT);
1279 	DECLARE_COMPLETION_ONSTACK(async_response);
1280 
1281 	xfer->async_done = &async_response;
1282 
1283 	/*
1284 	 * Delayed responses should not be polled, so an async command should
1285 	 * not have been used when requiring an atomic/poll context; WARN and
1286 	 * perform instead a sleeping wait.
1287 	 * (Note Async + IgnoreDelayedResponses are sent via do_xfer)
1288 	 */
1289 	WARN_ON_ONCE(xfer->hdr.poll_completion);
1290 
1291 	ret = do_xfer(ph, xfer);
1292 	if (!ret) {
1293 		if (!wait_for_completion_timeout(xfer->async_done, timeout)) {
1294 			dev_err(ph->dev,
1295 				"timed out in delayed resp(caller: %pS)\n",
1296 				(void *)_RET_IP_);
1297 			ret = -ETIMEDOUT;
1298 		} else if (xfer->hdr.status) {
1299 			ret = scmi_to_linux_errno(xfer->hdr.status);
1300 		}
1301 	}
1302 
1303 	xfer->async_done = NULL;
1304 	return ret;
1305 }
1306 
1307 /**
1308  * xfer_get_init() - Allocate and initialise one message for transmit
1309  *
1310  * @ph: Pointer to SCMI protocol handle
1311  * @msg_id: Message identifier
1312  * @tx_size: transmit message size
1313  * @rx_size: receive message size
1314  * @p: pointer to the allocated and initialised message
1315  *
1316  * This function allocates the message using @scmi_xfer_get and
1317  * initialise the header.
1318  *
1319  * Return: 0 if all went fine with @p pointing to message, else
1320  *	corresponding error.
1321  */
1322 static int xfer_get_init(const struct scmi_protocol_handle *ph,
1323 			 u8 msg_id, size_t tx_size, size_t rx_size,
1324 			 struct scmi_xfer **p)
1325 {
1326 	int ret;
1327 	struct scmi_xfer *xfer;
1328 	const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1329 	struct scmi_info *info = handle_to_scmi_info(pi->handle);
1330 	struct scmi_xfers_info *minfo = &info->tx_minfo;
1331 	struct device *dev = info->dev;
1332 
1333 	/* Ensure we have sane transfer sizes */
1334 	if (rx_size > info->desc->max_msg_size ||
1335 	    tx_size > info->desc->max_msg_size)
1336 		return -ERANGE;
1337 
1338 	xfer = scmi_xfer_get(pi->handle, minfo);
1339 	if (IS_ERR(xfer)) {
1340 		ret = PTR_ERR(xfer);
1341 		dev_err(dev, "failed to get free message slot(%d)\n", ret);
1342 		return ret;
1343 	}
1344 
1345 	/* Pick a sequence number and register this xfer as in-flight */
1346 	ret = scmi_xfer_pending_set(xfer, minfo);
1347 	if (ret) {
1348 		dev_err(pi->handle->dev,
1349 			"Failed to get monotonic token %d\n", ret);
1350 		__scmi_xfer_put(minfo, xfer);
1351 		return ret;
1352 	}
1353 
1354 	xfer->tx.len = tx_size;
1355 	xfer->rx.len = rx_size ? : info->desc->max_msg_size;
1356 	xfer->hdr.type = MSG_TYPE_COMMAND;
1357 	xfer->hdr.id = msg_id;
1358 	xfer->hdr.poll_completion = false;
1359 
1360 	*p = xfer;
1361 
1362 	return 0;
1363 }
1364 
1365 /**
1366  * version_get() - command to get the revision of the SCMI entity
1367  *
1368  * @ph: Pointer to SCMI protocol handle
1369  * @version: Holds returned version of protocol.
1370  *
1371  * Updates the SCMI information in the internal data structure.
1372  *
1373  * Return: 0 if all went fine, else return appropriate error.
1374  */
1375 static int version_get(const struct scmi_protocol_handle *ph, u32 *version)
1376 {
1377 	int ret;
1378 	__le32 *rev_info;
1379 	struct scmi_xfer *t;
1380 
1381 	ret = xfer_get_init(ph, PROTOCOL_VERSION, 0, sizeof(*version), &t);
1382 	if (ret)
1383 		return ret;
1384 
1385 	ret = do_xfer(ph, t);
1386 	if (!ret) {
1387 		rev_info = t->rx.buf;
1388 		*version = le32_to_cpu(*rev_info);
1389 	}
1390 
1391 	xfer_put(ph, t);
1392 	return ret;
1393 }
1394 
1395 /**
1396  * scmi_set_protocol_priv  - Set protocol specific data at init time
1397  *
1398  * @ph: A reference to the protocol handle.
1399  * @priv: The private data to set.
1400  *
1401  * Return: 0 on Success
1402  */
1403 static int scmi_set_protocol_priv(const struct scmi_protocol_handle *ph,
1404 				  void *priv)
1405 {
1406 	struct scmi_protocol_instance *pi = ph_to_pi(ph);
1407 
1408 	pi->priv = priv;
1409 
1410 	return 0;
1411 }
1412 
1413 /**
1414  * scmi_get_protocol_priv  - Set protocol specific data at init time
1415  *
1416  * @ph: A reference to the protocol handle.
1417  *
1418  * Return: Protocol private data if any was set.
1419  */
1420 static void *scmi_get_protocol_priv(const struct scmi_protocol_handle *ph)
1421 {
1422 	const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1423 
1424 	return pi->priv;
1425 }
1426 
1427 static const struct scmi_xfer_ops xfer_ops = {
1428 	.version_get = version_get,
1429 	.xfer_get_init = xfer_get_init,
1430 	.reset_rx_to_maxsz = reset_rx_to_maxsz,
1431 	.do_xfer = do_xfer,
1432 	.do_xfer_with_response = do_xfer_with_response,
1433 	.xfer_put = xfer_put,
1434 };
1435 
1436 struct scmi_msg_resp_domain_name_get {
1437 	__le32 flags;
1438 	u8 name[SCMI_MAX_STR_SIZE];
1439 };
1440 
1441 /**
1442  * scmi_common_extended_name_get  - Common helper to get extended resources name
1443  * @ph: A protocol handle reference.
1444  * @cmd_id: The specific command ID to use.
1445  * @res_id: The specific resource ID to use.
1446  * @name: A pointer to the preallocated area where the retrieved name will be
1447  *	  stored as a NULL terminated string.
1448  * @len: The len in bytes of the @name char array.
1449  *
1450  * Return: 0 on Succcess
1451  */
1452 static int scmi_common_extended_name_get(const struct scmi_protocol_handle *ph,
1453 					 u8 cmd_id, u32 res_id, char *name,
1454 					 size_t len)
1455 {
1456 	int ret;
1457 	struct scmi_xfer *t;
1458 	struct scmi_msg_resp_domain_name_get *resp;
1459 
1460 	ret = ph->xops->xfer_get_init(ph, cmd_id, sizeof(res_id),
1461 				      sizeof(*resp), &t);
1462 	if (ret)
1463 		goto out;
1464 
1465 	put_unaligned_le32(res_id, t->tx.buf);
1466 	resp = t->rx.buf;
1467 
1468 	ret = ph->xops->do_xfer(ph, t);
1469 	if (!ret)
1470 		strscpy(name, resp->name, len);
1471 
1472 	ph->xops->xfer_put(ph, t);
1473 out:
1474 	if (ret)
1475 		dev_warn(ph->dev,
1476 			 "Failed to get extended name - id:%u (ret:%d). Using %s\n",
1477 			 res_id, ret, name);
1478 	return ret;
1479 }
1480 
1481 /**
1482  * struct scmi_iterator  - Iterator descriptor
1483  * @msg: A reference to the message TX buffer; filled by @prepare_message with
1484  *	 a proper custom command payload for each multi-part command request.
1485  * @resp: A reference to the response RX buffer; used by @update_state and
1486  *	  @process_response to parse the multi-part replies.
1487  * @t: A reference to the underlying xfer initialized and used transparently by
1488  *     the iterator internal routines.
1489  * @ph: A reference to the associated protocol handle to be used.
1490  * @ops: A reference to the custom provided iterator operations.
1491  * @state: The current iterator state; used and updated in turn by the iterators
1492  *	   internal routines and by the caller-provided @scmi_iterator_ops.
1493  * @priv: A reference to optional private data as provided by the caller and
1494  *	  passed back to the @@scmi_iterator_ops.
1495  */
1496 struct scmi_iterator {
1497 	void *msg;
1498 	void *resp;
1499 	struct scmi_xfer *t;
1500 	const struct scmi_protocol_handle *ph;
1501 	struct scmi_iterator_ops *ops;
1502 	struct scmi_iterator_state state;
1503 	void *priv;
1504 };
1505 
1506 static void *scmi_iterator_init(const struct scmi_protocol_handle *ph,
1507 				struct scmi_iterator_ops *ops,
1508 				unsigned int max_resources, u8 msg_id,
1509 				size_t tx_size, void *priv)
1510 {
1511 	int ret;
1512 	struct scmi_iterator *i;
1513 
1514 	i = devm_kzalloc(ph->dev, sizeof(*i), GFP_KERNEL);
1515 	if (!i)
1516 		return ERR_PTR(-ENOMEM);
1517 
1518 	i->ph = ph;
1519 	i->ops = ops;
1520 	i->priv = priv;
1521 
1522 	ret = ph->xops->xfer_get_init(ph, msg_id, tx_size, 0, &i->t);
1523 	if (ret) {
1524 		devm_kfree(ph->dev, i);
1525 		return ERR_PTR(ret);
1526 	}
1527 
1528 	i->state.max_resources = max_resources;
1529 	i->msg = i->t->tx.buf;
1530 	i->resp = i->t->rx.buf;
1531 
1532 	return i;
1533 }
1534 
1535 static int scmi_iterator_run(void *iter)
1536 {
1537 	int ret = -EINVAL;
1538 	struct scmi_iterator_ops *iops;
1539 	const struct scmi_protocol_handle *ph;
1540 	struct scmi_iterator_state *st;
1541 	struct scmi_iterator *i = iter;
1542 
1543 	if (!i || !i->ops || !i->ph)
1544 		return ret;
1545 
1546 	iops = i->ops;
1547 	ph = i->ph;
1548 	st = &i->state;
1549 
1550 	do {
1551 		iops->prepare_message(i->msg, st->desc_index, i->priv);
1552 		ret = ph->xops->do_xfer(ph, i->t);
1553 		if (ret)
1554 			break;
1555 
1556 		st->rx_len = i->t->rx.len;
1557 		ret = iops->update_state(st, i->resp, i->priv);
1558 		if (ret)
1559 			break;
1560 
1561 		if (st->num_returned > st->max_resources - st->desc_index) {
1562 			dev_err(ph->dev,
1563 				"No. of resources can't exceed %d\n",
1564 				st->max_resources);
1565 			ret = -EINVAL;
1566 			break;
1567 		}
1568 
1569 		for (st->loop_idx = 0; st->loop_idx < st->num_returned;
1570 		     st->loop_idx++) {
1571 			ret = iops->process_response(ph, i->resp, st, i->priv);
1572 			if (ret)
1573 				goto out;
1574 		}
1575 
1576 		st->desc_index += st->num_returned;
1577 		ph->xops->reset_rx_to_maxsz(ph, i->t);
1578 		/*
1579 		 * check for both returned and remaining to avoid infinite
1580 		 * loop due to buggy firmware
1581 		 */
1582 	} while (st->num_returned && st->num_remaining);
1583 
1584 out:
1585 	/* Finalize and destroy iterator */
1586 	ph->xops->xfer_put(ph, i->t);
1587 	devm_kfree(ph->dev, i);
1588 
1589 	return ret;
1590 }
1591 
1592 struct scmi_msg_get_fc_info {
1593 	__le32 domain;
1594 	__le32 message_id;
1595 };
1596 
1597 struct scmi_msg_resp_desc_fc {
1598 	__le32 attr;
1599 #define SUPPORTS_DOORBELL(x)		((x) & BIT(0))
1600 #define DOORBELL_REG_WIDTH(x)		FIELD_GET(GENMASK(2, 1), (x))
1601 	__le32 rate_limit;
1602 	__le32 chan_addr_low;
1603 	__le32 chan_addr_high;
1604 	__le32 chan_size;
1605 	__le32 db_addr_low;
1606 	__le32 db_addr_high;
1607 	__le32 db_set_lmask;
1608 	__le32 db_set_hmask;
1609 	__le32 db_preserve_lmask;
1610 	__le32 db_preserve_hmask;
1611 };
1612 
1613 static void
1614 scmi_common_fastchannel_init(const struct scmi_protocol_handle *ph,
1615 			     u8 describe_id, u32 message_id, u32 valid_size,
1616 			     u32 domain, void __iomem **p_addr,
1617 			     struct scmi_fc_db_info **p_db)
1618 {
1619 	int ret;
1620 	u32 flags;
1621 	u64 phys_addr;
1622 	u8 size;
1623 	void __iomem *addr;
1624 	struct scmi_xfer *t;
1625 	struct scmi_fc_db_info *db = NULL;
1626 	struct scmi_msg_get_fc_info *info;
1627 	struct scmi_msg_resp_desc_fc *resp;
1628 	const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1629 
1630 	if (!p_addr) {
1631 		ret = -EINVAL;
1632 		goto err_out;
1633 	}
1634 
1635 	ret = ph->xops->xfer_get_init(ph, describe_id,
1636 				      sizeof(*info), sizeof(*resp), &t);
1637 	if (ret)
1638 		goto err_out;
1639 
1640 	info = t->tx.buf;
1641 	info->domain = cpu_to_le32(domain);
1642 	info->message_id = cpu_to_le32(message_id);
1643 
1644 	/*
1645 	 * Bail out on error leaving fc_info addresses zeroed; this includes
1646 	 * the case in which the requested domain/message_id does NOT support
1647 	 * fastchannels at all.
1648 	 */
1649 	ret = ph->xops->do_xfer(ph, t);
1650 	if (ret)
1651 		goto err_xfer;
1652 
1653 	resp = t->rx.buf;
1654 	flags = le32_to_cpu(resp->attr);
1655 	size = le32_to_cpu(resp->chan_size);
1656 	if (size != valid_size) {
1657 		ret = -EINVAL;
1658 		goto err_xfer;
1659 	}
1660 
1661 	phys_addr = le32_to_cpu(resp->chan_addr_low);
1662 	phys_addr |= (u64)le32_to_cpu(resp->chan_addr_high) << 32;
1663 	addr = devm_ioremap(ph->dev, phys_addr, size);
1664 	if (!addr) {
1665 		ret = -EADDRNOTAVAIL;
1666 		goto err_xfer;
1667 	}
1668 
1669 	*p_addr = addr;
1670 
1671 	if (p_db && SUPPORTS_DOORBELL(flags)) {
1672 		db = devm_kzalloc(ph->dev, sizeof(*db), GFP_KERNEL);
1673 		if (!db) {
1674 			ret = -ENOMEM;
1675 			goto err_db;
1676 		}
1677 
1678 		size = 1 << DOORBELL_REG_WIDTH(flags);
1679 		phys_addr = le32_to_cpu(resp->db_addr_low);
1680 		phys_addr |= (u64)le32_to_cpu(resp->db_addr_high) << 32;
1681 		addr = devm_ioremap(ph->dev, phys_addr, size);
1682 		if (!addr) {
1683 			ret = -EADDRNOTAVAIL;
1684 			goto err_db_mem;
1685 		}
1686 
1687 		db->addr = addr;
1688 		db->width = size;
1689 		db->set = le32_to_cpu(resp->db_set_lmask);
1690 		db->set |= (u64)le32_to_cpu(resp->db_set_hmask) << 32;
1691 		db->mask = le32_to_cpu(resp->db_preserve_lmask);
1692 		db->mask |= (u64)le32_to_cpu(resp->db_preserve_hmask) << 32;
1693 
1694 		*p_db = db;
1695 	}
1696 
1697 	ph->xops->xfer_put(ph, t);
1698 
1699 	dev_dbg(ph->dev,
1700 		"Using valid FC for protocol %X [MSG_ID:%u / RES_ID:%u]\n",
1701 		pi->proto->id, message_id, domain);
1702 
1703 	return;
1704 
1705 err_db_mem:
1706 	devm_kfree(ph->dev, db);
1707 
1708 err_db:
1709 	*p_addr = NULL;
1710 
1711 err_xfer:
1712 	ph->xops->xfer_put(ph, t);
1713 
1714 err_out:
1715 	dev_warn(ph->dev,
1716 		 "Failed to get FC for protocol %X [MSG_ID:%u / RES_ID:%u] - ret:%d. Using regular messaging.\n",
1717 		 pi->proto->id, message_id, domain, ret);
1718 }
1719 
1720 #define SCMI_PROTO_FC_RING_DB(w)			\
1721 do {							\
1722 	u##w val = 0;					\
1723 							\
1724 	if (db->mask)					\
1725 		val = ioread##w(db->addr) & db->mask;	\
1726 	iowrite##w((u##w)db->set | val, db->addr);	\
1727 } while (0)
1728 
1729 static void scmi_common_fastchannel_db_ring(struct scmi_fc_db_info *db)
1730 {
1731 	if (!db || !db->addr)
1732 		return;
1733 
1734 	if (db->width == 1)
1735 		SCMI_PROTO_FC_RING_DB(8);
1736 	else if (db->width == 2)
1737 		SCMI_PROTO_FC_RING_DB(16);
1738 	else if (db->width == 4)
1739 		SCMI_PROTO_FC_RING_DB(32);
1740 	else /* db->width == 8 */
1741 #ifdef CONFIG_64BIT
1742 		SCMI_PROTO_FC_RING_DB(64);
1743 #else
1744 	{
1745 		u64 val = 0;
1746 
1747 		if (db->mask)
1748 			val = ioread64_hi_lo(db->addr) & db->mask;
1749 		iowrite64_hi_lo(db->set | val, db->addr);
1750 	}
1751 #endif
1752 }
1753 
1754 static const struct scmi_proto_helpers_ops helpers_ops = {
1755 	.extended_name_get = scmi_common_extended_name_get,
1756 	.iter_response_init = scmi_iterator_init,
1757 	.iter_response_run = scmi_iterator_run,
1758 	.fastchannel_init = scmi_common_fastchannel_init,
1759 	.fastchannel_db_ring = scmi_common_fastchannel_db_ring,
1760 };
1761 
1762 /**
1763  * scmi_revision_area_get  - Retrieve version memory area.
1764  *
1765  * @ph: A reference to the protocol handle.
1766  *
1767  * A helper to grab the version memory area reference during SCMI Base protocol
1768  * initialization.
1769  *
1770  * Return: A reference to the version memory area associated to the SCMI
1771  *	   instance underlying this protocol handle.
1772  */
1773 struct scmi_revision_info *
1774 scmi_revision_area_get(const struct scmi_protocol_handle *ph)
1775 {
1776 	const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1777 
1778 	return pi->handle->version;
1779 }
1780 
1781 /**
1782  * scmi_alloc_init_protocol_instance  - Allocate and initialize a protocol
1783  * instance descriptor.
1784  * @info: The reference to the related SCMI instance.
1785  * @proto: The protocol descriptor.
1786  *
1787  * Allocate a new protocol instance descriptor, using the provided @proto
1788  * description, against the specified SCMI instance @info, and initialize it;
1789  * all resources management is handled via a dedicated per-protocol devres
1790  * group.
1791  *
1792  * Context: Assumes to be called with @protocols_mtx already acquired.
1793  * Return: A reference to a freshly allocated and initialized protocol instance
1794  *	   or ERR_PTR on failure. On failure the @proto reference is at first
1795  *	   put using @scmi_protocol_put() before releasing all the devres group.
1796  */
1797 static struct scmi_protocol_instance *
1798 scmi_alloc_init_protocol_instance(struct scmi_info *info,
1799 				  const struct scmi_protocol *proto)
1800 {
1801 	int ret = -ENOMEM;
1802 	void *gid;
1803 	struct scmi_protocol_instance *pi;
1804 	const struct scmi_handle *handle = &info->handle;
1805 
1806 	/* Protocol specific devres group */
1807 	gid = devres_open_group(handle->dev, NULL, GFP_KERNEL);
1808 	if (!gid) {
1809 		scmi_protocol_put(proto->id);
1810 		goto out;
1811 	}
1812 
1813 	pi = devm_kzalloc(handle->dev, sizeof(*pi), GFP_KERNEL);
1814 	if (!pi)
1815 		goto clean;
1816 
1817 	pi->gid = gid;
1818 	pi->proto = proto;
1819 	pi->handle = handle;
1820 	pi->ph.dev = handle->dev;
1821 	pi->ph.xops = &xfer_ops;
1822 	pi->ph.hops = &helpers_ops;
1823 	pi->ph.set_priv = scmi_set_protocol_priv;
1824 	pi->ph.get_priv = scmi_get_protocol_priv;
1825 	refcount_set(&pi->users, 1);
1826 	/* proto->init is assured NON NULL by scmi_protocol_register */
1827 	ret = pi->proto->instance_init(&pi->ph);
1828 	if (ret)
1829 		goto clean;
1830 
1831 	ret = idr_alloc(&info->protocols, pi, proto->id, proto->id + 1,
1832 			GFP_KERNEL);
1833 	if (ret != proto->id)
1834 		goto clean;
1835 
1836 	/*
1837 	 * Warn but ignore events registration errors since we do not want
1838 	 * to skip whole protocols if their notifications are messed up.
1839 	 */
1840 	if (pi->proto->events) {
1841 		ret = scmi_register_protocol_events(handle, pi->proto->id,
1842 						    &pi->ph,
1843 						    pi->proto->events);
1844 		if (ret)
1845 			dev_warn(handle->dev,
1846 				 "Protocol:%X - Events Registration Failed - err:%d\n",
1847 				 pi->proto->id, ret);
1848 	}
1849 
1850 	devres_close_group(handle->dev, pi->gid);
1851 	dev_dbg(handle->dev, "Initialized protocol: 0x%X\n", pi->proto->id);
1852 
1853 	return pi;
1854 
1855 clean:
1856 	/* Take care to put the protocol module's owner before releasing all */
1857 	scmi_protocol_put(proto->id);
1858 	devres_release_group(handle->dev, gid);
1859 out:
1860 	return ERR_PTR(ret);
1861 }
1862 
1863 /**
1864  * scmi_get_protocol_instance  - Protocol initialization helper.
1865  * @handle: A reference to the SCMI platform instance.
1866  * @protocol_id: The protocol being requested.
1867  *
1868  * In case the required protocol has never been requested before for this
1869  * instance, allocate and initialize all the needed structures while handling
1870  * resource allocation with a dedicated per-protocol devres subgroup.
1871  *
1872  * Return: A reference to an initialized protocol instance or error on failure:
1873  *	   in particular returns -EPROBE_DEFER when the desired protocol could
1874  *	   NOT be found.
1875  */
1876 static struct scmi_protocol_instance * __must_check
1877 scmi_get_protocol_instance(const struct scmi_handle *handle, u8 protocol_id)
1878 {
1879 	struct scmi_protocol_instance *pi;
1880 	struct scmi_info *info = handle_to_scmi_info(handle);
1881 
1882 	mutex_lock(&info->protocols_mtx);
1883 	pi = idr_find(&info->protocols, protocol_id);
1884 
1885 	if (pi) {
1886 		refcount_inc(&pi->users);
1887 	} else {
1888 		const struct scmi_protocol *proto;
1889 
1890 		/* Fails if protocol not registered on bus */
1891 		proto = scmi_protocol_get(protocol_id);
1892 		if (proto)
1893 			pi = scmi_alloc_init_protocol_instance(info, proto);
1894 		else
1895 			pi = ERR_PTR(-EPROBE_DEFER);
1896 	}
1897 	mutex_unlock(&info->protocols_mtx);
1898 
1899 	return pi;
1900 }
1901 
1902 /**
1903  * scmi_protocol_acquire  - Protocol acquire
1904  * @handle: A reference to the SCMI platform instance.
1905  * @protocol_id: The protocol being requested.
1906  *
1907  * Register a new user for the requested protocol on the specified SCMI
1908  * platform instance, possibly triggering its initialization on first user.
1909  *
1910  * Return: 0 if protocol was acquired successfully.
1911  */
1912 int scmi_protocol_acquire(const struct scmi_handle *handle, u8 protocol_id)
1913 {
1914 	return PTR_ERR_OR_ZERO(scmi_get_protocol_instance(handle, protocol_id));
1915 }
1916 
1917 /**
1918  * scmi_protocol_release  - Protocol de-initialization helper.
1919  * @handle: A reference to the SCMI platform instance.
1920  * @protocol_id: The protocol being requested.
1921  *
1922  * Remove one user for the specified protocol and triggers de-initialization
1923  * and resources de-allocation once the last user has gone.
1924  */
1925 void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id)
1926 {
1927 	struct scmi_info *info = handle_to_scmi_info(handle);
1928 	struct scmi_protocol_instance *pi;
1929 
1930 	mutex_lock(&info->protocols_mtx);
1931 	pi = idr_find(&info->protocols, protocol_id);
1932 	if (WARN_ON(!pi))
1933 		goto out;
1934 
1935 	if (refcount_dec_and_test(&pi->users)) {
1936 		void *gid = pi->gid;
1937 
1938 		if (pi->proto->events)
1939 			scmi_deregister_protocol_events(handle, protocol_id);
1940 
1941 		if (pi->proto->instance_deinit)
1942 			pi->proto->instance_deinit(&pi->ph);
1943 
1944 		idr_remove(&info->protocols, protocol_id);
1945 
1946 		scmi_protocol_put(protocol_id);
1947 
1948 		devres_release_group(handle->dev, gid);
1949 		dev_dbg(handle->dev, "De-Initialized protocol: 0x%X\n",
1950 			protocol_id);
1951 	}
1952 
1953 out:
1954 	mutex_unlock(&info->protocols_mtx);
1955 }
1956 
1957 void scmi_setup_protocol_implemented(const struct scmi_protocol_handle *ph,
1958 				     u8 *prot_imp)
1959 {
1960 	const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1961 	struct scmi_info *info = handle_to_scmi_info(pi->handle);
1962 
1963 	info->protocols_imp = prot_imp;
1964 }
1965 
1966 static bool
1967 scmi_is_protocol_implemented(const struct scmi_handle *handle, u8 prot_id)
1968 {
1969 	int i;
1970 	struct scmi_info *info = handle_to_scmi_info(handle);
1971 	struct scmi_revision_info *rev = handle->version;
1972 
1973 	if (!info->protocols_imp)
1974 		return false;
1975 
1976 	for (i = 0; i < rev->num_protocols; i++)
1977 		if (info->protocols_imp[i] == prot_id)
1978 			return true;
1979 	return false;
1980 }
1981 
1982 struct scmi_protocol_devres {
1983 	const struct scmi_handle *handle;
1984 	u8 protocol_id;
1985 };
1986 
1987 static void scmi_devm_release_protocol(struct device *dev, void *res)
1988 {
1989 	struct scmi_protocol_devres *dres = res;
1990 
1991 	scmi_protocol_release(dres->handle, dres->protocol_id);
1992 }
1993 
1994 static struct scmi_protocol_instance __must_check *
1995 scmi_devres_protocol_instance_get(struct scmi_device *sdev, u8 protocol_id)
1996 {
1997 	struct scmi_protocol_instance *pi;
1998 	struct scmi_protocol_devres *dres;
1999 
2000 	dres = devres_alloc(scmi_devm_release_protocol,
2001 			    sizeof(*dres), GFP_KERNEL);
2002 	if (!dres)
2003 		return ERR_PTR(-ENOMEM);
2004 
2005 	pi = scmi_get_protocol_instance(sdev->handle, protocol_id);
2006 	if (IS_ERR(pi)) {
2007 		devres_free(dres);
2008 		return pi;
2009 	}
2010 
2011 	dres->handle = sdev->handle;
2012 	dres->protocol_id = protocol_id;
2013 	devres_add(&sdev->dev, dres);
2014 
2015 	return pi;
2016 }
2017 
2018 /**
2019  * scmi_devm_protocol_get  - Devres managed get protocol operations and handle
2020  * @sdev: A reference to an scmi_device whose embedded struct device is to
2021  *	  be used for devres accounting.
2022  * @protocol_id: The protocol being requested.
2023  * @ph: A pointer reference used to pass back the associated protocol handle.
2024  *
2025  * Get hold of a protocol accounting for its usage, eventually triggering its
2026  * initialization, and returning the protocol specific operations and related
2027  * protocol handle which will be used as first argument in most of the
2028  * protocols operations methods.
2029  * Being a devres based managed method, protocol hold will be automatically
2030  * released, and possibly de-initialized on last user, once the SCMI driver
2031  * owning the scmi_device is unbound from it.
2032  *
2033  * Return: A reference to the requested protocol operations or error.
2034  *	   Must be checked for errors by caller.
2035  */
2036 static const void __must_check *
2037 scmi_devm_protocol_get(struct scmi_device *sdev, u8 protocol_id,
2038 		       struct scmi_protocol_handle **ph)
2039 {
2040 	struct scmi_protocol_instance *pi;
2041 
2042 	if (!ph)
2043 		return ERR_PTR(-EINVAL);
2044 
2045 	pi = scmi_devres_protocol_instance_get(sdev, protocol_id);
2046 	if (IS_ERR(pi))
2047 		return pi;
2048 
2049 	*ph = &pi->ph;
2050 
2051 	return pi->proto->ops;
2052 }
2053 
2054 /**
2055  * scmi_devm_protocol_acquire  - Devres managed helper to get hold of a protocol
2056  * @sdev: A reference to an scmi_device whose embedded struct device is to
2057  *	  be used for devres accounting.
2058  * @protocol_id: The protocol being requested.
2059  *
2060  * Get hold of a protocol accounting for its usage, possibly triggering its
2061  * initialization but without getting access to its protocol specific operations
2062  * and handle.
2063  *
2064  * Being a devres based managed method, protocol hold will be automatically
2065  * released, and possibly de-initialized on last user, once the SCMI driver
2066  * owning the scmi_device is unbound from it.
2067  *
2068  * Return: 0 on SUCCESS
2069  */
2070 static int __must_check scmi_devm_protocol_acquire(struct scmi_device *sdev,
2071 						   u8 protocol_id)
2072 {
2073 	struct scmi_protocol_instance *pi;
2074 
2075 	pi = scmi_devres_protocol_instance_get(sdev, protocol_id);
2076 	if (IS_ERR(pi))
2077 		return PTR_ERR(pi);
2078 
2079 	return 0;
2080 }
2081 
2082 static int scmi_devm_protocol_match(struct device *dev, void *res, void *data)
2083 {
2084 	struct scmi_protocol_devres *dres = res;
2085 
2086 	if (WARN_ON(!dres || !data))
2087 		return 0;
2088 
2089 	return dres->protocol_id == *((u8 *)data);
2090 }
2091 
2092 /**
2093  * scmi_devm_protocol_put  - Devres managed put protocol operations and handle
2094  * @sdev: A reference to an scmi_device whose embedded struct device is to
2095  *	  be used for devres accounting.
2096  * @protocol_id: The protocol being requested.
2097  *
2098  * Explicitly release a protocol hold previously obtained calling the above
2099  * @scmi_devm_protocol_get.
2100  */
2101 static void scmi_devm_protocol_put(struct scmi_device *sdev, u8 protocol_id)
2102 {
2103 	int ret;
2104 
2105 	ret = devres_release(&sdev->dev, scmi_devm_release_protocol,
2106 			     scmi_devm_protocol_match, &protocol_id);
2107 	WARN_ON(ret);
2108 }
2109 
2110 /**
2111  * scmi_is_transport_atomic  - Method to check if underlying transport for an
2112  * SCMI instance is configured as atomic.
2113  *
2114  * @handle: A reference to the SCMI platform instance.
2115  * @atomic_threshold: An optional return value for the system wide currently
2116  *		      configured threshold for atomic operations.
2117  *
2118  * Return: True if transport is configured as atomic
2119  */
2120 static bool scmi_is_transport_atomic(const struct scmi_handle *handle,
2121 				     unsigned int *atomic_threshold)
2122 {
2123 	bool ret;
2124 	struct scmi_info *info = handle_to_scmi_info(handle);
2125 
2126 	ret = info->desc->atomic_enabled &&
2127 		is_transport_polling_capable(info->desc);
2128 	if (ret && atomic_threshold)
2129 		*atomic_threshold = info->atomic_threshold;
2130 
2131 	return ret;
2132 }
2133 
2134 /**
2135  * scmi_handle_get() - Get the SCMI handle for a device
2136  *
2137  * @dev: pointer to device for which we want SCMI handle
2138  *
2139  * NOTE: The function does not track individual clients of the framework
2140  * and is expected to be maintained by caller of SCMI protocol library.
2141  * scmi_handle_put must be balanced with successful scmi_handle_get
2142  *
2143  * Return: pointer to handle if successful, NULL on error
2144  */
2145 static struct scmi_handle *scmi_handle_get(struct device *dev)
2146 {
2147 	struct list_head *p;
2148 	struct scmi_info *info;
2149 	struct scmi_handle *handle = NULL;
2150 
2151 	mutex_lock(&scmi_list_mutex);
2152 	list_for_each(p, &scmi_list) {
2153 		info = list_entry(p, struct scmi_info, node);
2154 		if (dev->parent == info->dev) {
2155 			info->users++;
2156 			handle = &info->handle;
2157 			break;
2158 		}
2159 	}
2160 	mutex_unlock(&scmi_list_mutex);
2161 
2162 	return handle;
2163 }
2164 
2165 /**
2166  * scmi_handle_put() - Release the handle acquired by scmi_handle_get
2167  *
2168  * @handle: handle acquired by scmi_handle_get
2169  *
2170  * NOTE: The function does not track individual clients of the framework
2171  * and is expected to be maintained by caller of SCMI protocol library.
2172  * scmi_handle_put must be balanced with successful scmi_handle_get
2173  *
2174  * Return: 0 is successfully released
2175  *	if null was passed, it returns -EINVAL;
2176  */
2177 static int scmi_handle_put(const struct scmi_handle *handle)
2178 {
2179 	struct scmi_info *info;
2180 
2181 	if (!handle)
2182 		return -EINVAL;
2183 
2184 	info = handle_to_scmi_info(handle);
2185 	mutex_lock(&scmi_list_mutex);
2186 	if (!WARN_ON(!info->users))
2187 		info->users--;
2188 	mutex_unlock(&scmi_list_mutex);
2189 
2190 	return 0;
2191 }
2192 
2193 static void scmi_device_link_add(struct device *consumer,
2194 				 struct device *supplier)
2195 {
2196 	struct device_link *link;
2197 
2198 	link = device_link_add(consumer, supplier, DL_FLAG_AUTOREMOVE_CONSUMER);
2199 
2200 	WARN_ON(!link);
2201 }
2202 
2203 static void scmi_set_handle(struct scmi_device *scmi_dev)
2204 {
2205 	scmi_dev->handle = scmi_handle_get(&scmi_dev->dev);
2206 	if (scmi_dev->handle)
2207 		scmi_device_link_add(&scmi_dev->dev, scmi_dev->handle->dev);
2208 }
2209 
2210 static int __scmi_xfer_info_init(struct scmi_info *sinfo,
2211 				 struct scmi_xfers_info *info)
2212 {
2213 	int i;
2214 	struct scmi_xfer *xfer;
2215 	struct device *dev = sinfo->dev;
2216 	const struct scmi_desc *desc = sinfo->desc;
2217 
2218 	/* Pre-allocated messages, no more than what hdr.seq can support */
2219 	if (WARN_ON(!info->max_msg || info->max_msg > MSG_TOKEN_MAX)) {
2220 		dev_err(dev,
2221 			"Invalid maximum messages %d, not in range [1 - %lu]\n",
2222 			info->max_msg, MSG_TOKEN_MAX);
2223 		return -EINVAL;
2224 	}
2225 
2226 	hash_init(info->pending_xfers);
2227 
2228 	/* Allocate a bitmask sized to hold MSG_TOKEN_MAX tokens */
2229 	info->xfer_alloc_table = devm_bitmap_zalloc(dev, MSG_TOKEN_MAX,
2230 						    GFP_KERNEL);
2231 	if (!info->xfer_alloc_table)
2232 		return -ENOMEM;
2233 
2234 	/*
2235 	 * Preallocate a number of xfers equal to max inflight messages,
2236 	 * pre-initialize the buffer pointer to pre-allocated buffers and
2237 	 * attach all of them to the free list
2238 	 */
2239 	INIT_HLIST_HEAD(&info->free_xfers);
2240 	for (i = 0; i < info->max_msg; i++) {
2241 		xfer = devm_kzalloc(dev, sizeof(*xfer), GFP_KERNEL);
2242 		if (!xfer)
2243 			return -ENOMEM;
2244 
2245 		xfer->rx.buf = devm_kcalloc(dev, sizeof(u8), desc->max_msg_size,
2246 					    GFP_KERNEL);
2247 		if (!xfer->rx.buf)
2248 			return -ENOMEM;
2249 
2250 		xfer->tx.buf = xfer->rx.buf;
2251 		init_completion(&xfer->done);
2252 		spin_lock_init(&xfer->lock);
2253 
2254 		/* Add initialized xfer to the free list */
2255 		hlist_add_head(&xfer->node, &info->free_xfers);
2256 	}
2257 
2258 	spin_lock_init(&info->xfer_lock);
2259 
2260 	return 0;
2261 }
2262 
2263 static int scmi_channels_max_msg_configure(struct scmi_info *sinfo)
2264 {
2265 	const struct scmi_desc *desc = sinfo->desc;
2266 
2267 	if (!desc->ops->get_max_msg) {
2268 		sinfo->tx_minfo.max_msg = desc->max_msg;
2269 		sinfo->rx_minfo.max_msg = desc->max_msg;
2270 	} else {
2271 		struct scmi_chan_info *base_cinfo;
2272 
2273 		base_cinfo = idr_find(&sinfo->tx_idr, SCMI_PROTOCOL_BASE);
2274 		if (!base_cinfo)
2275 			return -EINVAL;
2276 		sinfo->tx_minfo.max_msg = desc->ops->get_max_msg(base_cinfo);
2277 
2278 		/* RX channel is optional so can be skipped */
2279 		base_cinfo = idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE);
2280 		if (base_cinfo)
2281 			sinfo->rx_minfo.max_msg =
2282 				desc->ops->get_max_msg(base_cinfo);
2283 	}
2284 
2285 	return 0;
2286 }
2287 
2288 static int scmi_xfer_info_init(struct scmi_info *sinfo)
2289 {
2290 	int ret;
2291 
2292 	ret = scmi_channels_max_msg_configure(sinfo);
2293 	if (ret)
2294 		return ret;
2295 
2296 	ret = __scmi_xfer_info_init(sinfo, &sinfo->tx_minfo);
2297 	if (!ret && !idr_is_empty(&sinfo->rx_idr))
2298 		ret = __scmi_xfer_info_init(sinfo, &sinfo->rx_minfo);
2299 
2300 	return ret;
2301 }
2302 
2303 static int scmi_chan_setup(struct scmi_info *info, struct device_node *of_node,
2304 			   int prot_id, bool tx)
2305 {
2306 	int ret, idx;
2307 	char name[32];
2308 	struct scmi_chan_info *cinfo;
2309 	struct idr *idr;
2310 	struct scmi_device *tdev = NULL;
2311 
2312 	/* Transmit channel is first entry i.e. index 0 */
2313 	idx = tx ? 0 : 1;
2314 	idr = tx ? &info->tx_idr : &info->rx_idr;
2315 
2316 	if (!info->desc->ops->chan_available(of_node, idx)) {
2317 		cinfo = idr_find(idr, SCMI_PROTOCOL_BASE);
2318 		if (unlikely(!cinfo)) /* Possible only if platform has no Rx */
2319 			return -EINVAL;
2320 		goto idr_alloc;
2321 	}
2322 
2323 	cinfo = devm_kzalloc(info->dev, sizeof(*cinfo), GFP_KERNEL);
2324 	if (!cinfo)
2325 		return -ENOMEM;
2326 
2327 	cinfo->is_p2a = !tx;
2328 	cinfo->rx_timeout_ms = info->desc->max_rx_timeout_ms;
2329 
2330 	/* Create a unique name for this transport device */
2331 	snprintf(name, 32, "__scmi_transport_device_%s_%02X",
2332 		 idx ? "rx" : "tx", prot_id);
2333 	/* Create a uniquely named, dedicated transport device for this chan */
2334 	tdev = scmi_device_create(of_node, info->dev, prot_id, name);
2335 	if (!tdev) {
2336 		dev_err(info->dev,
2337 			"failed to create transport device (%s)\n", name);
2338 		devm_kfree(info->dev, cinfo);
2339 		return -EINVAL;
2340 	}
2341 	of_node_get(of_node);
2342 
2343 	cinfo->id = prot_id;
2344 	cinfo->dev = &tdev->dev;
2345 	ret = info->desc->ops->chan_setup(cinfo, info->dev, tx);
2346 	if (ret) {
2347 		of_node_put(of_node);
2348 		scmi_device_destroy(info->dev, prot_id, name);
2349 		devm_kfree(info->dev, cinfo);
2350 		return ret;
2351 	}
2352 
2353 	if (tx && is_polling_required(cinfo, info->desc)) {
2354 		if (is_transport_polling_capable(info->desc))
2355 			dev_info(&tdev->dev,
2356 				 "Enabled polling mode TX channel - prot_id:%d\n",
2357 				 prot_id);
2358 		else
2359 			dev_warn(&tdev->dev,
2360 				 "Polling mode NOT supported by transport.\n");
2361 	}
2362 
2363 idr_alloc:
2364 	ret = idr_alloc(idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL);
2365 	if (ret != prot_id) {
2366 		dev_err(info->dev,
2367 			"unable to allocate SCMI idr slot err %d\n", ret);
2368 		/* Destroy channel and device only if created by this call. */
2369 		if (tdev) {
2370 			of_node_put(of_node);
2371 			scmi_device_destroy(info->dev, prot_id, name);
2372 			devm_kfree(info->dev, cinfo);
2373 		}
2374 		return ret;
2375 	}
2376 
2377 	cinfo->handle = &info->handle;
2378 	return 0;
2379 }
2380 
2381 static inline int
2382 scmi_txrx_setup(struct scmi_info *info, struct device_node *of_node,
2383 		int prot_id)
2384 {
2385 	int ret = scmi_chan_setup(info, of_node, prot_id, true);
2386 
2387 	if (!ret) {
2388 		/* Rx is optional, report only memory errors */
2389 		ret = scmi_chan_setup(info, of_node, prot_id, false);
2390 		if (ret && ret != -ENOMEM)
2391 			ret = 0;
2392 	}
2393 
2394 	return ret;
2395 }
2396 
2397 /**
2398  * scmi_channels_setup  - Helper to initialize all required channels
2399  *
2400  * @info: The SCMI instance descriptor.
2401  *
2402  * Initialize all the channels found described in the DT against the underlying
2403  * configured transport using custom defined dedicated devices instead of
2404  * borrowing devices from the SCMI drivers; this way channels are initialized
2405  * upfront during core SCMI stack probing and are no more coupled with SCMI
2406  * devices used by SCMI drivers.
2407  *
2408  * Note that, even though a pair of TX/RX channels is associated to each
2409  * protocol defined in the DT, a distinct freshly initialized channel is
2410  * created only if the DT node for the protocol at hand describes a dedicated
2411  * channel: in all the other cases the common BASE protocol channel is reused.
2412  *
2413  * Return: 0 on Success
2414  */
2415 static int scmi_channels_setup(struct scmi_info *info)
2416 {
2417 	int ret;
2418 	struct device_node *child, *top_np = info->dev->of_node;
2419 
2420 	/* Initialize a common generic channel at first */
2421 	ret = scmi_txrx_setup(info, top_np, SCMI_PROTOCOL_BASE);
2422 	if (ret)
2423 		return ret;
2424 
2425 	for_each_available_child_of_node(top_np, child) {
2426 		u32 prot_id;
2427 
2428 		if (of_property_read_u32(child, "reg", &prot_id))
2429 			continue;
2430 
2431 		if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK, prot_id))
2432 			dev_err(info->dev,
2433 				"Out of range protocol %d\n", prot_id);
2434 
2435 		ret = scmi_txrx_setup(info, child, prot_id);
2436 		if (ret) {
2437 			of_node_put(child);
2438 			return ret;
2439 		}
2440 	}
2441 
2442 	return 0;
2443 }
2444 
2445 static int scmi_chan_destroy(int id, void *p, void *idr)
2446 {
2447 	struct scmi_chan_info *cinfo = p;
2448 
2449 	if (cinfo->dev) {
2450 		struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
2451 		struct scmi_device *sdev = to_scmi_dev(cinfo->dev);
2452 
2453 		of_node_put(cinfo->dev->of_node);
2454 		scmi_device_destroy(info->dev, id, sdev->name);
2455 		cinfo->dev = NULL;
2456 	}
2457 
2458 	idr_remove(idr, id);
2459 
2460 	return 0;
2461 }
2462 
2463 static void scmi_cleanup_channels(struct scmi_info *info, struct idr *idr)
2464 {
2465 	/* At first free all channels at the transport layer ... */
2466 	idr_for_each(idr, info->desc->ops->chan_free, idr);
2467 
2468 	/* ...then destroy all underlying devices */
2469 	idr_for_each(idr, scmi_chan_destroy, idr);
2470 
2471 	idr_destroy(idr);
2472 }
2473 
2474 static void scmi_cleanup_txrx_channels(struct scmi_info *info)
2475 {
2476 	scmi_cleanup_channels(info, &info->tx_idr);
2477 
2478 	scmi_cleanup_channels(info, &info->rx_idr);
2479 }
2480 
2481 static int scmi_bus_notifier(struct notifier_block *nb,
2482 			     unsigned long action, void *data)
2483 {
2484 	struct scmi_info *info = bus_nb_to_scmi_info(nb);
2485 	struct scmi_device *sdev = to_scmi_dev(data);
2486 
2487 	/* Skip transport devices and devices of different SCMI instances */
2488 	if (!strncmp(sdev->name, "__scmi_transport_device", 23) ||
2489 	    sdev->dev.parent != info->dev)
2490 		return NOTIFY_DONE;
2491 
2492 	switch (action) {
2493 	case BUS_NOTIFY_BIND_DRIVER:
2494 		/* setup handle now as the transport is ready */
2495 		scmi_set_handle(sdev);
2496 		break;
2497 	case BUS_NOTIFY_UNBOUND_DRIVER:
2498 		scmi_handle_put(sdev->handle);
2499 		sdev->handle = NULL;
2500 		break;
2501 	default:
2502 		return NOTIFY_DONE;
2503 	}
2504 
2505 	dev_dbg(info->dev, "Device %s (%s) is now %s\n", dev_name(&sdev->dev),
2506 		sdev->name, action == BUS_NOTIFY_BIND_DRIVER ?
2507 		"about to be BOUND." : "UNBOUND.");
2508 
2509 	return NOTIFY_OK;
2510 }
2511 
2512 static int scmi_device_request_notifier(struct notifier_block *nb,
2513 					unsigned long action, void *data)
2514 {
2515 	struct device_node *np;
2516 	struct scmi_device_id *id_table = data;
2517 	struct scmi_info *info = req_nb_to_scmi_info(nb);
2518 
2519 	np = idr_find(&info->active_protocols, id_table->protocol_id);
2520 	if (!np)
2521 		return NOTIFY_DONE;
2522 
2523 	dev_dbg(info->dev, "%sRequested device (%s) for protocol 0x%x\n",
2524 		action == SCMI_BUS_NOTIFY_DEVICE_REQUEST ? "" : "UN-",
2525 		id_table->name, id_table->protocol_id);
2526 
2527 	switch (action) {
2528 	case SCMI_BUS_NOTIFY_DEVICE_REQUEST:
2529 		scmi_create_protocol_devices(np, info, id_table->protocol_id,
2530 					     id_table->name);
2531 		break;
2532 	case SCMI_BUS_NOTIFY_DEVICE_UNREQUEST:
2533 		scmi_destroy_protocol_devices(info, id_table->protocol_id,
2534 					      id_table->name);
2535 		break;
2536 	default:
2537 		return NOTIFY_DONE;
2538 	}
2539 
2540 	return NOTIFY_OK;
2541 }
2542 
2543 static void scmi_debugfs_common_cleanup(void *d)
2544 {
2545 	struct scmi_debug_info *dbg = d;
2546 
2547 	if (!dbg)
2548 		return;
2549 
2550 	debugfs_remove_recursive(dbg->top_dentry);
2551 	kfree(dbg->name);
2552 	kfree(dbg->type);
2553 }
2554 
2555 static struct scmi_debug_info *scmi_debugfs_common_setup(struct scmi_info *info)
2556 {
2557 	char top_dir[16];
2558 	struct dentry *trans, *top_dentry;
2559 	struct scmi_debug_info *dbg;
2560 	const char *c_ptr = NULL;
2561 
2562 	dbg = devm_kzalloc(info->dev, sizeof(*dbg), GFP_KERNEL);
2563 	if (!dbg)
2564 		return NULL;
2565 
2566 	dbg->name = kstrdup(of_node_full_name(info->dev->of_node), GFP_KERNEL);
2567 	if (!dbg->name) {
2568 		devm_kfree(info->dev, dbg);
2569 		return NULL;
2570 	}
2571 
2572 	of_property_read_string(info->dev->of_node, "compatible", &c_ptr);
2573 	dbg->type = kstrdup(c_ptr, GFP_KERNEL);
2574 	if (!dbg->type) {
2575 		kfree(dbg->name);
2576 		devm_kfree(info->dev, dbg);
2577 		return NULL;
2578 	}
2579 
2580 	snprintf(top_dir, 16, "%d", info->id);
2581 	top_dentry = debugfs_create_dir(top_dir, scmi_top_dentry);
2582 	trans = debugfs_create_dir("transport", top_dentry);
2583 
2584 	dbg->is_atomic = info->desc->atomic_enabled &&
2585 				is_transport_polling_capable(info->desc);
2586 
2587 	debugfs_create_str("instance_name", 0400, top_dentry,
2588 			   (char **)&dbg->name);
2589 
2590 	debugfs_create_u32("atomic_threshold_us", 0400, top_dentry,
2591 			   &info->atomic_threshold);
2592 
2593 	debugfs_create_str("type", 0400, trans, (char **)&dbg->type);
2594 
2595 	debugfs_create_bool("is_atomic", 0400, trans, &dbg->is_atomic);
2596 
2597 	debugfs_create_u32("max_rx_timeout_ms", 0400, trans,
2598 			   (u32 *)&info->desc->max_rx_timeout_ms);
2599 
2600 	debugfs_create_u32("max_msg_size", 0400, trans,
2601 			   (u32 *)&info->desc->max_msg_size);
2602 
2603 	debugfs_create_u32("tx_max_msg", 0400, trans,
2604 			   (u32 *)&info->tx_minfo.max_msg);
2605 
2606 	debugfs_create_u32("rx_max_msg", 0400, trans,
2607 			   (u32 *)&info->rx_minfo.max_msg);
2608 
2609 	dbg->top_dentry = top_dentry;
2610 
2611 	if (devm_add_action_or_reset(info->dev,
2612 				     scmi_debugfs_common_cleanup, dbg))
2613 		return NULL;
2614 
2615 	return dbg;
2616 }
2617 
2618 static int scmi_debugfs_raw_mode_setup(struct scmi_info *info)
2619 {
2620 	int id, num_chans = 0, ret = 0;
2621 	struct scmi_chan_info *cinfo;
2622 	u8 channels[SCMI_MAX_CHANNELS] = {};
2623 	DECLARE_BITMAP(protos, SCMI_MAX_CHANNELS) = {};
2624 
2625 	if (!info->dbg)
2626 		return -EINVAL;
2627 
2628 	/* Enumerate all channels to collect their ids */
2629 	idr_for_each_entry(&info->tx_idr, cinfo, id) {
2630 		/*
2631 		 * Cannot happen, but be defensive.
2632 		 * Zero as num_chans is ok, warn and carry on.
2633 		 */
2634 		if (num_chans >= SCMI_MAX_CHANNELS || !cinfo) {
2635 			dev_warn(info->dev,
2636 				 "SCMI RAW - Error enumerating channels\n");
2637 			break;
2638 		}
2639 
2640 		if (!test_bit(cinfo->id, protos)) {
2641 			channels[num_chans++] = cinfo->id;
2642 			set_bit(cinfo->id, protos);
2643 		}
2644 	}
2645 
2646 	info->raw = scmi_raw_mode_init(&info->handle, info->dbg->top_dentry,
2647 				       info->id, channels, num_chans,
2648 				       info->desc, info->tx_minfo.max_msg);
2649 	if (IS_ERR(info->raw)) {
2650 		dev_err(info->dev, "Failed to initialize SCMI RAW Mode !\n");
2651 		ret = PTR_ERR(info->raw);
2652 		info->raw = NULL;
2653 	}
2654 
2655 	return ret;
2656 }
2657 
2658 static int scmi_probe(struct platform_device *pdev)
2659 {
2660 	int ret;
2661 	struct scmi_handle *handle;
2662 	const struct scmi_desc *desc;
2663 	struct scmi_info *info;
2664 	bool coex = IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT_COEX);
2665 	struct device *dev = &pdev->dev;
2666 	struct device_node *child, *np = dev->of_node;
2667 
2668 	desc = of_device_get_match_data(dev);
2669 	if (!desc)
2670 		return -EINVAL;
2671 
2672 	info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
2673 	if (!info)
2674 		return -ENOMEM;
2675 
2676 	info->id = ida_alloc_min(&scmi_id, 0, GFP_KERNEL);
2677 	if (info->id < 0)
2678 		return info->id;
2679 
2680 	info->dev = dev;
2681 	info->desc = desc;
2682 	info->bus_nb.notifier_call = scmi_bus_notifier;
2683 	info->dev_req_nb.notifier_call = scmi_device_request_notifier;
2684 	INIT_LIST_HEAD(&info->node);
2685 	idr_init(&info->protocols);
2686 	mutex_init(&info->protocols_mtx);
2687 	idr_init(&info->active_protocols);
2688 	mutex_init(&info->devreq_mtx);
2689 
2690 	platform_set_drvdata(pdev, info);
2691 	idr_init(&info->tx_idr);
2692 	idr_init(&info->rx_idr);
2693 
2694 	handle = &info->handle;
2695 	handle->dev = info->dev;
2696 	handle->version = &info->version;
2697 	handle->devm_protocol_acquire = scmi_devm_protocol_acquire;
2698 	handle->devm_protocol_get = scmi_devm_protocol_get;
2699 	handle->devm_protocol_put = scmi_devm_protocol_put;
2700 
2701 	/* System wide atomic threshold for atomic ops .. if any */
2702 	if (!of_property_read_u32(np, "atomic-threshold-us",
2703 				  &info->atomic_threshold))
2704 		dev_info(dev,
2705 			 "SCMI System wide atomic threshold set to %d us\n",
2706 			 info->atomic_threshold);
2707 	handle->is_transport_atomic = scmi_is_transport_atomic;
2708 
2709 	if (desc->ops->link_supplier) {
2710 		ret = desc->ops->link_supplier(dev);
2711 		if (ret)
2712 			goto clear_ida;
2713 	}
2714 
2715 	/* Setup all channels described in the DT at first */
2716 	ret = scmi_channels_setup(info);
2717 	if (ret)
2718 		goto clear_ida;
2719 
2720 	ret = bus_register_notifier(&scmi_bus_type, &info->bus_nb);
2721 	if (ret)
2722 		goto clear_txrx_setup;
2723 
2724 	ret = blocking_notifier_chain_register(&scmi_requested_devices_nh,
2725 					       &info->dev_req_nb);
2726 	if (ret)
2727 		goto clear_bus_notifier;
2728 
2729 	ret = scmi_xfer_info_init(info);
2730 	if (ret)
2731 		goto clear_dev_req_notifier;
2732 
2733 	if (scmi_top_dentry) {
2734 		info->dbg = scmi_debugfs_common_setup(info);
2735 		if (!info->dbg)
2736 			dev_warn(dev, "Failed to setup SCMI debugfs.\n");
2737 
2738 		if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
2739 			ret = scmi_debugfs_raw_mode_setup(info);
2740 			if (!coex) {
2741 				if (ret)
2742 					goto clear_dev_req_notifier;
2743 
2744 				/* Bail out anyway when coex disabled. */
2745 				return 0;
2746 			}
2747 
2748 			/* Coex enabled, carry on in any case. */
2749 			dev_info(dev, "SCMI RAW Mode COEX enabled !\n");
2750 		}
2751 	}
2752 
2753 	if (scmi_notification_init(handle))
2754 		dev_err(dev, "SCMI Notifications NOT available.\n");
2755 
2756 	if (info->desc->atomic_enabled &&
2757 	    !is_transport_polling_capable(info->desc))
2758 		dev_err(dev,
2759 			"Transport is not polling capable. Atomic mode not supported.\n");
2760 
2761 	/*
2762 	 * Trigger SCMI Base protocol initialization.
2763 	 * It's mandatory and won't be ever released/deinit until the
2764 	 * SCMI stack is shutdown/unloaded as a whole.
2765 	 */
2766 	ret = scmi_protocol_acquire(handle, SCMI_PROTOCOL_BASE);
2767 	if (ret) {
2768 		dev_err(dev, "unable to communicate with SCMI\n");
2769 		if (coex)
2770 			return 0;
2771 		goto notification_exit;
2772 	}
2773 
2774 	mutex_lock(&scmi_list_mutex);
2775 	list_add_tail(&info->node, &scmi_list);
2776 	mutex_unlock(&scmi_list_mutex);
2777 
2778 	for_each_available_child_of_node(np, child) {
2779 		u32 prot_id;
2780 
2781 		if (of_property_read_u32(child, "reg", &prot_id))
2782 			continue;
2783 
2784 		if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK, prot_id))
2785 			dev_err(dev, "Out of range protocol %d\n", prot_id);
2786 
2787 		if (!scmi_is_protocol_implemented(handle, prot_id)) {
2788 			dev_err(dev, "SCMI protocol %d not implemented\n",
2789 				prot_id);
2790 			continue;
2791 		}
2792 
2793 		/*
2794 		 * Save this valid DT protocol descriptor amongst
2795 		 * @active_protocols for this SCMI instance/
2796 		 */
2797 		ret = idr_alloc(&info->active_protocols, child,
2798 				prot_id, prot_id + 1, GFP_KERNEL);
2799 		if (ret != prot_id) {
2800 			dev_err(dev, "SCMI protocol %d already activated. Skip\n",
2801 				prot_id);
2802 			continue;
2803 		}
2804 
2805 		of_node_get(child);
2806 		scmi_create_protocol_devices(child, info, prot_id, NULL);
2807 	}
2808 
2809 	return 0;
2810 
2811 notification_exit:
2812 	if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT))
2813 		scmi_raw_mode_cleanup(info->raw);
2814 	scmi_notification_exit(&info->handle);
2815 clear_dev_req_notifier:
2816 	blocking_notifier_chain_unregister(&scmi_requested_devices_nh,
2817 					   &info->dev_req_nb);
2818 clear_bus_notifier:
2819 	bus_unregister_notifier(&scmi_bus_type, &info->bus_nb);
2820 clear_txrx_setup:
2821 	scmi_cleanup_txrx_channels(info);
2822 clear_ida:
2823 	ida_free(&scmi_id, info->id);
2824 	return ret;
2825 }
2826 
2827 static int scmi_remove(struct platform_device *pdev)
2828 {
2829 	int id;
2830 	struct scmi_info *info = platform_get_drvdata(pdev);
2831 	struct device_node *child;
2832 
2833 	if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT))
2834 		scmi_raw_mode_cleanup(info->raw);
2835 
2836 	mutex_lock(&scmi_list_mutex);
2837 	if (info->users)
2838 		dev_warn(&pdev->dev,
2839 			 "Still active SCMI users will be forcibly unbound.\n");
2840 	list_del(&info->node);
2841 	mutex_unlock(&scmi_list_mutex);
2842 
2843 	scmi_notification_exit(&info->handle);
2844 
2845 	mutex_lock(&info->protocols_mtx);
2846 	idr_destroy(&info->protocols);
2847 	mutex_unlock(&info->protocols_mtx);
2848 
2849 	idr_for_each_entry(&info->active_protocols, child, id)
2850 		of_node_put(child);
2851 	idr_destroy(&info->active_protocols);
2852 
2853 	blocking_notifier_chain_unregister(&scmi_requested_devices_nh,
2854 					   &info->dev_req_nb);
2855 	bus_unregister_notifier(&scmi_bus_type, &info->bus_nb);
2856 
2857 	/* Safe to free channels since no more users */
2858 	scmi_cleanup_txrx_channels(info);
2859 
2860 	ida_free(&scmi_id, info->id);
2861 
2862 	return 0;
2863 }
2864 
2865 static ssize_t protocol_version_show(struct device *dev,
2866 				     struct device_attribute *attr, char *buf)
2867 {
2868 	struct scmi_info *info = dev_get_drvdata(dev);
2869 
2870 	return sprintf(buf, "%u.%u\n", info->version.major_ver,
2871 		       info->version.minor_ver);
2872 }
2873 static DEVICE_ATTR_RO(protocol_version);
2874 
2875 static ssize_t firmware_version_show(struct device *dev,
2876 				     struct device_attribute *attr, char *buf)
2877 {
2878 	struct scmi_info *info = dev_get_drvdata(dev);
2879 
2880 	return sprintf(buf, "0x%x\n", info->version.impl_ver);
2881 }
2882 static DEVICE_ATTR_RO(firmware_version);
2883 
2884 static ssize_t vendor_id_show(struct device *dev,
2885 			      struct device_attribute *attr, char *buf)
2886 {
2887 	struct scmi_info *info = dev_get_drvdata(dev);
2888 
2889 	return sprintf(buf, "%s\n", info->version.vendor_id);
2890 }
2891 static DEVICE_ATTR_RO(vendor_id);
2892 
2893 static ssize_t sub_vendor_id_show(struct device *dev,
2894 				  struct device_attribute *attr, char *buf)
2895 {
2896 	struct scmi_info *info = dev_get_drvdata(dev);
2897 
2898 	return sprintf(buf, "%s\n", info->version.sub_vendor_id);
2899 }
2900 static DEVICE_ATTR_RO(sub_vendor_id);
2901 
2902 static struct attribute *versions_attrs[] = {
2903 	&dev_attr_firmware_version.attr,
2904 	&dev_attr_protocol_version.attr,
2905 	&dev_attr_vendor_id.attr,
2906 	&dev_attr_sub_vendor_id.attr,
2907 	NULL,
2908 };
2909 ATTRIBUTE_GROUPS(versions);
2910 
2911 /* Each compatible listed below must have descriptor associated with it */
2912 static const struct of_device_id scmi_of_match[] = {
2913 #ifdef CONFIG_ARM_SCMI_TRANSPORT_MAILBOX
2914 	{ .compatible = "arm,scmi", .data = &scmi_mailbox_desc },
2915 #endif
2916 #ifdef CONFIG_ARM_SCMI_TRANSPORT_OPTEE
2917 	{ .compatible = "linaro,scmi-optee", .data = &scmi_optee_desc },
2918 #endif
2919 #ifdef CONFIG_ARM_SCMI_TRANSPORT_SMC
2920 	{ .compatible = "arm,scmi-smc", .data = &scmi_smc_desc},
2921 	{ .compatible = "arm,scmi-smc-param", .data = &scmi_smc_desc},
2922 #endif
2923 #ifdef CONFIG_ARM_SCMI_TRANSPORT_VIRTIO
2924 	{ .compatible = "arm,scmi-virtio", .data = &scmi_virtio_desc},
2925 #endif
2926 	{ /* Sentinel */ },
2927 };
2928 
2929 MODULE_DEVICE_TABLE(of, scmi_of_match);
2930 
2931 static struct platform_driver scmi_driver = {
2932 	.driver = {
2933 		   .name = "arm-scmi",
2934 		   .suppress_bind_attrs = true,
2935 		   .of_match_table = scmi_of_match,
2936 		   .dev_groups = versions_groups,
2937 		   },
2938 	.probe = scmi_probe,
2939 	.remove = scmi_remove,
2940 };
2941 
2942 /**
2943  * __scmi_transports_setup  - Common helper to call transport-specific
2944  * .init/.exit code if provided.
2945  *
2946  * @init: A flag to distinguish between init and exit.
2947  *
2948  * Note that, if provided, we invoke .init/.exit functions for all the
2949  * transports currently compiled in.
2950  *
2951  * Return: 0 on Success.
2952  */
2953 static inline int __scmi_transports_setup(bool init)
2954 {
2955 	int ret = 0;
2956 	const struct of_device_id *trans;
2957 
2958 	for (trans = scmi_of_match; trans->data; trans++) {
2959 		const struct scmi_desc *tdesc = trans->data;
2960 
2961 		if ((init && !tdesc->transport_init) ||
2962 		    (!init && !tdesc->transport_exit))
2963 			continue;
2964 
2965 		if (init)
2966 			ret = tdesc->transport_init();
2967 		else
2968 			tdesc->transport_exit();
2969 
2970 		if (ret) {
2971 			pr_err("SCMI transport %s FAILED initialization!\n",
2972 			       trans->compatible);
2973 			break;
2974 		}
2975 	}
2976 
2977 	return ret;
2978 }
2979 
2980 static int __init scmi_transports_init(void)
2981 {
2982 	return __scmi_transports_setup(true);
2983 }
2984 
2985 static void __exit scmi_transports_exit(void)
2986 {
2987 	__scmi_transports_setup(false);
2988 }
2989 
2990 static struct dentry *scmi_debugfs_init(void)
2991 {
2992 	struct dentry *d;
2993 
2994 	d = debugfs_create_dir("scmi", NULL);
2995 	if (IS_ERR(d)) {
2996 		pr_err("Could NOT create SCMI top dentry.\n");
2997 		return NULL;
2998 	}
2999 
3000 	return d;
3001 }
3002 
3003 static int __init scmi_driver_init(void)
3004 {
3005 	int ret;
3006 
3007 	/* Bail out if no SCMI transport was configured */
3008 	if (WARN_ON(!IS_ENABLED(CONFIG_ARM_SCMI_HAVE_TRANSPORT)))
3009 		return -EINVAL;
3010 
3011 	/* Initialize any compiled-in transport which provided an init/exit */
3012 	ret = scmi_transports_init();
3013 	if (ret)
3014 		return ret;
3015 
3016 	if (IS_ENABLED(CONFIG_ARM_SCMI_NEED_DEBUGFS))
3017 		scmi_top_dentry = scmi_debugfs_init();
3018 
3019 	scmi_base_register();
3020 
3021 	scmi_clock_register();
3022 	scmi_perf_register();
3023 	scmi_power_register();
3024 	scmi_reset_register();
3025 	scmi_sensors_register();
3026 	scmi_voltage_register();
3027 	scmi_system_register();
3028 	scmi_powercap_register();
3029 
3030 	return platform_driver_register(&scmi_driver);
3031 }
3032 module_init(scmi_driver_init);
3033 
3034 static void __exit scmi_driver_exit(void)
3035 {
3036 	scmi_base_unregister();
3037 
3038 	scmi_clock_unregister();
3039 	scmi_perf_unregister();
3040 	scmi_power_unregister();
3041 	scmi_reset_unregister();
3042 	scmi_sensors_unregister();
3043 	scmi_voltage_unregister();
3044 	scmi_system_unregister();
3045 	scmi_powercap_unregister();
3046 
3047 	scmi_transports_exit();
3048 
3049 	platform_driver_unregister(&scmi_driver);
3050 
3051 	debugfs_remove_recursive(scmi_top_dentry);
3052 }
3053 module_exit(scmi_driver_exit);
3054 
3055 MODULE_ALIAS("platform:arm-scmi");
3056 MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
3057 MODULE_DESCRIPTION("ARM SCMI protocol driver");
3058 MODULE_LICENSE("GPL v2");
3059