Lines Matching +full:scmi +full:- +full:smc
1 // SPDX-License-Identifier: GPL-2.0
3 * System Control and Management Interface (SCMI) Message Protocol driver
5 * SCMI Message Protocol is used between the System Control Processor(SCP)
7 * provides a mechanism for inter-processor communication between SCP's
14 * Copyright (C) 2018-2021 ARM Ltd.
25 #include <linux/io-64-nonatomic-hi-lo.h>
43 #include <trace/events/scmi.h>
50 /* List of all SCMI devices active in system */
60 * struct scmi_xfers_info - Structure to manage transfer information
68 * a number of xfers equal to the maximum allowed in-flight
71 * currently in-flight messages.
82 * struct scmi_protocol_instance - Describe an initialized protocol instance.
83 * @handle: Reference to the SCMI handle associated to this protocol instance.
85 * @gid: A reference for per-protocol devres management.
91 * Each protocol is initialized independently once for each SCMI platform in
92 * which is defined by DT and implemented by the SCMI server fw.
106 * struct scmi_debug_info - Debug common info
108 * @name: Name of this SCMI instance
109 * @type: Type of this SCMI instance
120 * struct scmi_info - Structure representing a SCMI instance
125 * @version: SCMI revision information containing protocol version,
126 * implementation version and (sub-)vendor identification.
127 * @handle: Instance of SCMI handle to send to clients
133 * this SCMI instance: populated on protocol's first attempted
141 * @atomic_threshold: Optional system wide DT-configured threshold, expressed
143 * Only SCMI synchronous commands reported by the platform
144 * to have an execution latency lesser-equal to the threshold
146 * decision is finally left up to the SCMI drivers.
150 * @bus_nb: A notifier to listen for device bind/unbind on the scmi bus
151 * @dev_req_nb: A notifier to listen for device request/unrequest on the scmi
153 * @devreq_mtx: A mutex to serialize device creation for this SCMI instance
155 * @raw: An opaque reference handle used by SCMI Raw mode.
193 if (!proto || !try_module_get(proto->owner)) { in scmi_protocol_get()
194 pr_warn("SCMI Protocol 0x%x not found!\n", protocol_id); in scmi_protocol_get()
198 pr_debug("Found SCMI Protocol 0x%x\n", protocol_id); in scmi_protocol_get()
209 module_put(proto->owner); in scmi_protocol_put()
218 return -EINVAL; in scmi_protocol_register()
221 if (!proto->instance_init) { in scmi_protocol_register()
222 pr_err("missing init for protocol 0x%x\n", proto->id); in scmi_protocol_register()
223 return -EINVAL; in scmi_protocol_register()
228 proto->id, proto->id + 1, GFP_ATOMIC); in scmi_protocol_register()
230 if (ret != proto->id) { in scmi_protocol_register()
231 pr_err("unable to allocate SCMI idr slot for 0x%x - err %d\n", in scmi_protocol_register()
232 proto->id, ret); in scmi_protocol_register()
236 pr_debug("Registered SCMI Protocol 0x%x\n", proto->id); in scmi_protocol_register()
245 idr_remove(&scmi_protocols, proto->id); in scmi_protocol_unregister()
248 pr_debug("Unregistered SCMI Protocol 0x%x\n", proto->id); in scmi_protocol_unregister()
253 * scmi_create_protocol_devices - Create devices for all pending requests for
254 * this SCMI instance.
257 * @info: The SCMI instance descriptor
269 mutex_lock(&info->devreq_mtx); in scmi_create_protocol_devices()
270 sdev = scmi_device_create(np, info->dev, prot_id, name); in scmi_create_protocol_devices()
272 dev_err(info->dev, in scmi_create_protocol_devices()
275 mutex_unlock(&info->devreq_mtx); in scmi_create_protocol_devices()
281 mutex_lock(&info->devreq_mtx); in scmi_destroy_protocol_devices()
282 scmi_device_destroy(info->dev, prot_id, name); in scmi_destroy_protocol_devices()
283 mutex_unlock(&info->devreq_mtx); in scmi_destroy_protocol_devices()
291 info->notify_priv = priv; in scmi_notification_instance_data_set()
302 return info->notify_priv; in scmi_notification_instance_data_get()
306 * scmi_xfer_token_set - Reserve and set new token for the xfer at hand
312 * xfer->hdr.seq: picking a monotonically increasing value avoids immediate
313 * reuse of freshly completed or timed-out xfers, thus mitigating the risk
314 * of incorrect association of a late and expired xfer with a live in-flight
315 * transaction, both happening to re-use the same token identifier.
317 * Since platform is NOT required to answer our request in-order we should
320 * - exactly 'next_token' may be NOT available so pick xfer_id >= next_token
323 * - all tokens ahead upto (MSG_TOKEN_ID_MASK - 1) are used in-flight but we
327 * X = used in-flight
330 * ------
332 * |- xfer_id picked
333 * -----------+----------------------------------------------------------
335 * ----------------------------------------------------------------------
337 * |- next_token
339 * Out-of-order pending at start
340 * -----------------------------
342 * |- xfer_id picked, last_token fixed
343 * -----+----------------------------------------------------------------
345 * ----------------------------------------------------------------------
347 * |- next_token
350 * Out-of-order pending at end
351 * ---------------------------
353 * |- xfer_id picked, last_token fixed
354 * -----+----------------------------------------------------------------
356 * ----------------------------------------------------------------------
358 * |- next_token
370 * Pick a candidate monotonic token in range [0, MSG_TOKEN_MAX - 1] in scmi_xfer_token_set()
371 * using the pre-allocated transfer_id as a base. in scmi_xfer_token_set()
377 next_token = (xfer->transfer_id & (MSG_TOKEN_MAX - 1)); in scmi_xfer_token_set()
380 xfer_id = find_next_zero_bit(minfo->xfer_alloc_table, in scmi_xfer_token_set()
384 * After heavily out-of-order responses, there are no free in scmi_xfer_token_set()
388 xfer_id = find_next_zero_bit(minfo->xfer_alloc_table, in scmi_xfer_token_set()
392 * maximum number of (MSG_TOKEN_MAX - 1) in-flight messages in scmi_xfer_token_set()
393 * but we have not found any free token [0, MSG_TOKEN_MAX - 1]. in scmi_xfer_token_set()
396 return -ENOMEM; in scmi_xfer_token_set()
399 /* Update +/- last_token accordingly if we skipped some hole */ in scmi_xfer_token_set()
401 atomic_add((int)(xfer_id - next_token), &transfer_last_id); in scmi_xfer_token_set()
403 xfer->hdr.seq = (u16)xfer_id; in scmi_xfer_token_set()
409 * scmi_xfer_token_clear - Release the token
417 clear_bit(xfer->hdr.seq, minfo->xfer_alloc_table); in scmi_xfer_token_clear()
421 * scmi_xfer_inflight_register_unlocked - Register the xfer as in-flight
426 * Note that this helper assumes that the xfer to be registered as in-flight
436 /* Set in-flight */ in scmi_xfer_inflight_register_unlocked()
437 set_bit(xfer->hdr.seq, minfo->xfer_alloc_table); in scmi_xfer_inflight_register_unlocked()
438 hash_add(minfo->pending_xfers, &xfer->node, xfer->hdr.seq); in scmi_xfer_inflight_register_unlocked()
439 xfer->pending = true; in scmi_xfer_inflight_register_unlocked()
443 * scmi_xfer_inflight_register - Try to register an xfer as in-flight
451 * same sequence number is currently still registered as in-flight.
453 * Return: 0 on Success or -EBUSY if sequence number embedded in the xfer
462 spin_lock_irqsave(&minfo->xfer_lock, flags); in scmi_xfer_inflight_register()
463 if (!test_bit(xfer->hdr.seq, minfo->xfer_alloc_table)) in scmi_xfer_inflight_register()
466 ret = -EBUSY; in scmi_xfer_inflight_register()
467 spin_unlock_irqrestore(&minfo->xfer_lock, flags); in scmi_xfer_inflight_register()
473 * scmi_xfer_raw_inflight_register - An helper to register the given xfer as in
476 * @handle: Pointer to SCMI entity handle
486 return scmi_xfer_inflight_register(xfer, &info->tx_minfo); in scmi_xfer_raw_inflight_register()
490 * scmi_xfer_pending_set - Pick a proper sequence number and mark the xfer
491 * as pending in-flight
504 spin_lock_irqsave(&minfo->xfer_lock, flags); in scmi_xfer_pending_set()
509 spin_unlock_irqrestore(&minfo->xfer_lock, flags); in scmi_xfer_pending_set()
515 * scmi_xfer_get() - Allocate one message
517 * @handle: Pointer to SCMI entity handle
541 spin_lock_irqsave(&minfo->xfer_lock, flags); in scmi_xfer_get()
542 if (hlist_empty(&minfo->free_xfers)) { in scmi_xfer_get()
543 spin_unlock_irqrestore(&minfo->xfer_lock, flags); in scmi_xfer_get()
544 return ERR_PTR(-ENOMEM); in scmi_xfer_get()
548 xfer = hlist_entry(minfo->free_xfers.first, struct scmi_xfer, node); in scmi_xfer_get()
549 hlist_del_init(&xfer->node); in scmi_xfer_get()
555 xfer->transfer_id = atomic_inc_return(&transfer_last_id); in scmi_xfer_get()
557 refcount_set(&xfer->users, 1); in scmi_xfer_get()
558 atomic_set(&xfer->busy, SCMI_XFER_FREE); in scmi_xfer_get()
559 spin_unlock_irqrestore(&minfo->xfer_lock, flags); in scmi_xfer_get()
565 * scmi_xfer_raw_get - Helper to get a bare free xfer from the TX channel
567 * @handle: Pointer to SCMI entity handle
571 * Return: A valid xfer on Success, or an error-pointer otherwise
578 xfer = scmi_xfer_get(handle, &info->tx_minfo); in scmi_xfer_raw_get()
580 xfer->flags |= SCMI_XFER_FLAG_IS_RAW; in scmi_xfer_raw_get()
586 * scmi_xfer_raw_channel_get - Helper to get a reference to the proper channel
589 * @handle: Pointer to SCMI entity handle
592 * Note that in a regular SCMI stack, usually, a protocol has to be defined in
594 * protocol in range is allowed, re-using the Base channel, so as to enable
605 cinfo = idr_find(&info->tx_idr, protocol_id); in scmi_xfer_raw_channel_get()
608 return ERR_PTR(-EINVAL); in scmi_xfer_raw_channel_get()
610 cinfo = idr_find(&info->tx_idr, SCMI_PROTOCOL_BASE); in scmi_xfer_raw_channel_get()
612 return ERR_PTR(-EINVAL); in scmi_xfer_raw_channel_get()
613 dev_warn_once(handle->dev, in scmi_xfer_raw_channel_get()
622 * __scmi_xfer_put() - Release a message
637 spin_lock_irqsave(&minfo->xfer_lock, flags); in __scmi_xfer_put()
638 if (refcount_dec_and_test(&xfer->users)) { in __scmi_xfer_put()
639 if (xfer->pending) { in __scmi_xfer_put()
641 hash_del(&xfer->node); in __scmi_xfer_put()
642 xfer->pending = false; in __scmi_xfer_put()
644 hlist_add_head(&xfer->node, &minfo->free_xfers); in __scmi_xfer_put()
646 spin_unlock_irqrestore(&minfo->xfer_lock, flags); in __scmi_xfer_put()
650 * scmi_xfer_raw_put - Release an xfer that was taken by @scmi_xfer_raw_get
652 * @handle: Pointer to SCMI entity handle
662 xfer->flags &= ~SCMI_XFER_FLAG_IS_RAW; in scmi_xfer_raw_put()
663 xfer->flags &= ~SCMI_XFER_FLAG_CHAN_SET; in scmi_xfer_raw_put()
664 return __scmi_xfer_put(&info->tx_minfo, xfer); in scmi_xfer_raw_put()
668 * scmi_xfer_lookup_unlocked - Helper to lookup an xfer_id
684 if (test_bit(xfer_id, minfo->xfer_alloc_table)) in scmi_xfer_lookup_unlocked()
685 xfer = XFER_FIND(minfo->pending_xfers, xfer_id); in scmi_xfer_lookup_unlocked()
687 return xfer ?: ERR_PTR(-EINVAL); in scmi_xfer_lookup_unlocked()
691 * scmi_msg_response_validate - Validate message type against state of related
700 * related synchronous response (Out-of-Order Delayed Response) the missing
703 * SCMI transport can deliver such out-of-order responses.
705 * Context: Assumes to be called with xfer->lock already acquired.
716 * delayed response we're not prepared to handle: bail-out safely in scmi_msg_response_validate()
719 if (msg_type == MSG_TYPE_DELAYED_RESP && !xfer->async_done) { in scmi_msg_response_validate()
720 dev_err(cinfo->dev, in scmi_msg_response_validate()
722 xfer->hdr.seq); in scmi_msg_response_validate()
723 return -EINVAL; in scmi_msg_response_validate()
726 switch (xfer->state) { in scmi_msg_response_validate()
733 xfer->hdr.status = SCMI_SUCCESS; in scmi_msg_response_validate()
734 xfer->state = SCMI_XFER_RESP_OK; in scmi_msg_response_validate()
735 complete(&xfer->done); in scmi_msg_response_validate()
736 dev_warn(cinfo->dev, in scmi_msg_response_validate()
738 xfer->hdr.seq); in scmi_msg_response_validate()
743 return -EINVAL; in scmi_msg_response_validate()
747 return -EINVAL; in scmi_msg_response_validate()
754 * scmi_xfer_state_update - Update xfer state
767 xfer->hdr.type = msg_type; in scmi_xfer_state_update()
770 if (xfer->hdr.type == MSG_TYPE_COMMAND) in scmi_xfer_state_update()
771 xfer->state = SCMI_XFER_RESP_OK; in scmi_xfer_state_update()
773 xfer->state = SCMI_XFER_DRESP_OK; in scmi_xfer_state_update()
780 ret = atomic_cmpxchg(&xfer->busy, SCMI_XFER_FREE, SCMI_XFER_BUSY); in scmi_xfer_acquired()
786 * scmi_xfer_command_acquire - Helper to lookup and acquire a command xfer
803 struct scmi_info *info = handle_to_scmi_info(cinfo->handle); in scmi_xfer_command_acquire()
804 struct scmi_xfers_info *minfo = &info->tx_minfo; in scmi_xfer_command_acquire()
809 spin_lock_irqsave(&minfo->xfer_lock, flags); in scmi_xfer_command_acquire()
812 dev_err(cinfo->dev, in scmi_xfer_command_acquire()
815 spin_unlock_irqrestore(&minfo->xfer_lock, flags); in scmi_xfer_command_acquire()
818 refcount_inc(&xfer->users); in scmi_xfer_command_acquire()
819 spin_unlock_irqrestore(&minfo->xfer_lock, flags); in scmi_xfer_command_acquire()
821 spin_lock_irqsave(&xfer->lock, flags); in scmi_xfer_command_acquire()
834 spin_unlock_irqrestore(&xfer->lock, flags); in scmi_xfer_command_acquire()
837 dev_err(cinfo->dev, in scmi_xfer_command_acquire()
838 "Invalid message type:%d for %d - HDR:0x%X state:%d\n", in scmi_xfer_command_acquire()
839 msg_type, xfer_id, msg_hdr, xfer->state); in scmi_xfer_command_acquire()
842 xfer = ERR_PTR(-EINVAL); in scmi_xfer_command_acquire()
851 atomic_set(&xfer->busy, SCMI_XFER_FREE); in scmi_xfer_command_release()
852 __scmi_xfer_put(&info->tx_minfo, xfer); in scmi_xfer_command_release()
858 if (!cinfo->is_p2a) { in scmi_clear_channel()
859 dev_warn(cinfo->dev, "Invalid clear on A2P channel !\n"); in scmi_clear_channel()
863 if (info->desc->ops->clear_channel) in scmi_clear_channel()
864 info->desc->ops->clear_channel(cinfo); in scmi_clear_channel()
871 struct device *dev = cinfo->dev; in scmi_handle_notification()
872 struct scmi_info *info = handle_to_scmi_info(cinfo->handle); in scmi_handle_notification()
873 struct scmi_xfers_info *minfo = &info->rx_minfo; in scmi_handle_notification()
877 xfer = scmi_xfer_get(cinfo->handle, minfo); in scmi_handle_notification()
885 unpack_scmi_header(msg_hdr, &xfer->hdr); in scmi_handle_notification()
887 /* Ensure order between xfer->priv store and following ops */ in scmi_handle_notification()
888 smp_store_mb(xfer->priv, priv); in scmi_handle_notification()
889 info->desc->ops->fetch_notification(cinfo, info->desc->max_msg_size, in scmi_handle_notification()
892 trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id, in scmi_handle_notification()
893 xfer->hdr.id, "NOTI", xfer->hdr.seq, in scmi_handle_notification()
894 xfer->hdr.status, xfer->rx.buf, xfer->rx.len); in scmi_handle_notification()
896 scmi_notify(cinfo->handle, xfer->hdr.protocol_id, in scmi_handle_notification()
897 xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts); in scmi_handle_notification()
899 trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id, in scmi_handle_notification()
900 xfer->hdr.protocol_id, xfer->hdr.seq, in scmi_handle_notification()
904 xfer->hdr.seq = MSG_XTRACT_TOKEN(msg_hdr); in scmi_handle_notification()
905 scmi_raw_message_report(info->raw, xfer, SCMI_RAW_NOTIF_QUEUE, in scmi_handle_notification()
906 cinfo->id); in scmi_handle_notification()
918 struct scmi_info *info = handle_to_scmi_info(cinfo->handle); in scmi_handle_response()
923 scmi_raw_error_report(info->raw, cinfo, msg_hdr, priv); in scmi_handle_response()
931 if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP) in scmi_handle_response()
932 xfer->rx.len = info->desc->max_msg_size; in scmi_handle_response()
935 /* Ensure order between xfer->priv store and following ops */ in scmi_handle_response()
936 smp_store_mb(xfer->priv, priv); in scmi_handle_response()
937 info->desc->ops->fetch_response(cinfo, xfer); in scmi_handle_response()
939 trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id, in scmi_handle_response()
940 xfer->hdr.id, in scmi_handle_response()
941 xfer->hdr.type == MSG_TYPE_DELAYED_RESP ? in scmi_handle_response()
944 xfer->hdr.seq, xfer->hdr.status, in scmi_handle_response()
945 xfer->rx.buf, xfer->rx.len); in scmi_handle_response()
947 trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id, in scmi_handle_response()
948 xfer->hdr.protocol_id, xfer->hdr.seq, in scmi_handle_response()
949 xfer->hdr.type); in scmi_handle_response()
951 if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP) { in scmi_handle_response()
953 complete(xfer->async_done); in scmi_handle_response()
955 complete(&xfer->done); in scmi_handle_response()
964 if (!xfer->hdr.poll_completion) in scmi_handle_response()
965 scmi_raw_message_report(info->raw, xfer, in scmi_handle_response()
967 cinfo->id); in scmi_handle_response()
974 * scmi_rx_callback() - callback for receiving messages
976 * @cinfo: SCMI channel info
1005 * xfer_put() - Release a transmit message
1007 * @ph: Pointer to SCMI protocol handle
1014 struct scmi_info *info = handle_to_scmi_info(pi->handle); in xfer_put()
1016 __scmi_xfer_put(&info->tx_minfo, xfer); in xfer_put()
1022 struct scmi_info *info = handle_to_scmi_info(cinfo->handle); in scmi_xfer_done_no_timeout()
1025 * Poll also on xfer->done so that polling can be forcibly terminated in scmi_xfer_done_no_timeout()
1026 * in case of out-of-order receptions of delayed responses in scmi_xfer_done_no_timeout()
1028 return info->desc->ops->poll_done(cinfo, xfer) || in scmi_xfer_done_no_timeout()
1029 try_wait_for_completion(&xfer->done) || in scmi_xfer_done_no_timeout()
1039 if (xfer->hdr.poll_completion) { in scmi_wait_for_reply()
1044 if (!desc->sync_cmds_completed_on_ret) { in scmi_wait_for_reply()
1055 "timed out in resp(caller: %pS) - polling\n", in scmi_wait_for_reply()
1057 ret = -ETIMEDOUT; in scmi_wait_for_reply()
1064 handle_to_scmi_info(cinfo->handle); in scmi_wait_for_reply()
1067 * Do not fetch_response if an out-of-order delayed in scmi_wait_for_reply()
1070 spin_lock_irqsave(&xfer->lock, flags); in scmi_wait_for_reply()
1071 if (xfer->state == SCMI_XFER_SENT_OK) { in scmi_wait_for_reply()
1072 desc->ops->fetch_response(cinfo, xfer); in scmi_wait_for_reply()
1073 xfer->state = SCMI_XFER_RESP_OK; in scmi_wait_for_reply()
1075 spin_unlock_irqrestore(&xfer->lock, flags); in scmi_wait_for_reply()
1078 trace_scmi_msg_dump(info->id, cinfo->id, in scmi_wait_for_reply()
1079 xfer->hdr.protocol_id, xfer->hdr.id, in scmi_wait_for_reply()
1082 xfer->hdr.seq, xfer->hdr.status, in scmi_wait_for_reply()
1083 xfer->rx.buf, xfer->rx.len); in scmi_wait_for_reply()
1087 handle_to_scmi_info(cinfo->handle); in scmi_wait_for_reply()
1089 scmi_raw_message_report(info->raw, xfer, in scmi_wait_for_reply()
1091 cinfo->id); in scmi_wait_for_reply()
1096 if (!wait_for_completion_timeout(&xfer->done, in scmi_wait_for_reply()
1100 ret = -ETIMEDOUT; in scmi_wait_for_reply()
1108 * scmi_wait_for_message_response - An helper to group all the possible ways of
1111 * @cinfo: SCMI channel info
1114 * Chooses waiting strategy (sleep-waiting vs busy-waiting) depending on
1115 * configuration flags like xfer->hdr.poll_completion.
1122 struct scmi_info *info = handle_to_scmi_info(cinfo->handle); in scmi_wait_for_message_response()
1123 struct device *dev = info->dev; in scmi_wait_for_message_response()
1125 trace_scmi_xfer_response_wait(xfer->transfer_id, xfer->hdr.id, in scmi_wait_for_message_response()
1126 xfer->hdr.protocol_id, xfer->hdr.seq, in scmi_wait_for_message_response()
1127 info->desc->max_rx_timeout_ms, in scmi_wait_for_message_response()
1128 xfer->hdr.poll_completion); in scmi_wait_for_message_response()
1130 return scmi_wait_for_reply(dev, info->desc, cinfo, xfer, in scmi_wait_for_message_response()
1131 info->desc->max_rx_timeout_ms); in scmi_wait_for_message_response()
1135 * scmi_xfer_raw_wait_for_message_response - An helper to wait for a message
1138 * @cinfo: SCMI channel info
1149 struct scmi_info *info = handle_to_scmi_info(cinfo->handle); in scmi_xfer_raw_wait_for_message_response()
1150 struct device *dev = info->dev; in scmi_xfer_raw_wait_for_message_response()
1152 ret = scmi_wait_for_reply(dev, info->desc, cinfo, xfer, timeout_ms); in scmi_xfer_raw_wait_for_message_response()
1154 dev_dbg(dev, "timed out in RAW response - HDR:%08X\n", in scmi_xfer_raw_wait_for_message_response()
1155 pack_scmi_header(&xfer->hdr)); in scmi_xfer_raw_wait_for_message_response()
1161 * do_xfer() - Do one transfer
1163 * @ph: Pointer to SCMI protocol handle
1166 * Return: -ETIMEDOUT in case of no response, if transmit error,
1175 struct scmi_info *info = handle_to_scmi_info(pi->handle); in do_xfer()
1176 struct device *dev = info->dev; in do_xfer()
1180 if (xfer->hdr.poll_completion && in do_xfer()
1181 !is_transport_polling_capable(info->desc)) { in do_xfer()
1184 return -EINVAL; in do_xfer()
1187 cinfo = idr_find(&info->tx_idr, pi->proto->id); in do_xfer()
1189 return -EINVAL; in do_xfer()
1192 if (is_polling_enabled(cinfo, info->desc)) in do_xfer()
1193 xfer->hdr.poll_completion = true; in do_xfer()
1200 xfer->hdr.protocol_id = pi->proto->id; in do_xfer()
1201 reinit_completion(&xfer->done); in do_xfer()
1203 trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id, in do_xfer()
1204 xfer->hdr.protocol_id, xfer->hdr.seq, in do_xfer()
1205 xfer->hdr.poll_completion); in do_xfer()
1208 xfer->hdr.status = SCMI_SUCCESS; in do_xfer()
1209 xfer->state = SCMI_XFER_SENT_OK; in do_xfer()
1212 * on xfer->state due to the monotonically increasing tokens allocation, in do_xfer()
1213 * we must anyway ensure xfer->state initialization is not re-ordered in do_xfer()
1215 * ISR calling scmi_rx_callback() cannot see an old stale xfer->state. in do_xfer()
1219 ret = info->desc->ops->send_message(cinfo, xfer); in do_xfer()
1225 trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id, in do_xfer()
1226 xfer->hdr.id, "CMND", xfer->hdr.seq, in do_xfer()
1227 xfer->hdr.status, xfer->tx.buf, xfer->tx.len); in do_xfer()
1230 if (!ret && xfer->hdr.status) in do_xfer()
1231 ret = scmi_to_linux_errno(xfer->hdr.status); in do_xfer()
1233 if (info->desc->ops->mark_txdone) in do_xfer()
1234 info->desc->ops->mark_txdone(cinfo, ret, xfer); in do_xfer()
1236 trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id, in do_xfer()
1237 xfer->hdr.protocol_id, xfer->hdr.seq, ret); in do_xfer()
1246 struct scmi_info *info = handle_to_scmi_info(pi->handle); in reset_rx_to_maxsz()
1248 xfer->rx.len = info->desc->max_msg_size; in reset_rx_to_maxsz()
1252 * do_xfer_with_response() - Do one transfer and wait until the delayed
1255 * @ph: Pointer to SCMI protocol handle
1259 * it could cause long busy-waiting here, so ignore polling for the delayed
1272 * Return: -ETIMEDOUT in case of no delayed response, if transmit error,
1281 xfer->async_done = &async_response; in do_xfer_with_response()
1289 WARN_ON_ONCE(xfer->hdr.poll_completion); in do_xfer_with_response()
1293 if (!wait_for_completion_timeout(xfer->async_done, timeout)) { in do_xfer_with_response()
1294 dev_err(ph->dev, in do_xfer_with_response()
1297 ret = -ETIMEDOUT; in do_xfer_with_response()
1298 } else if (xfer->hdr.status) { in do_xfer_with_response()
1299 ret = scmi_to_linux_errno(xfer->hdr.status); in do_xfer_with_response()
1303 xfer->async_done = NULL; in do_xfer_with_response()
1308 * xfer_get_init() - Allocate and initialise one message for transmit
1310 * @ph: Pointer to SCMI protocol handle
1329 struct scmi_info *info = handle_to_scmi_info(pi->handle); in xfer_get_init()
1330 struct scmi_xfers_info *minfo = &info->tx_minfo; in xfer_get_init()
1331 struct device *dev = info->dev; in xfer_get_init()
1334 if (rx_size > info->desc->max_msg_size || in xfer_get_init()
1335 tx_size > info->desc->max_msg_size) in xfer_get_init()
1336 return -ERANGE; in xfer_get_init()
1338 xfer = scmi_xfer_get(pi->handle, minfo); in xfer_get_init()
1345 /* Pick a sequence number and register this xfer as in-flight */ in xfer_get_init()
1348 dev_err(pi->handle->dev, in xfer_get_init()
1354 xfer->tx.len = tx_size; in xfer_get_init()
1355 xfer->rx.len = rx_size ? : info->desc->max_msg_size; in xfer_get_init()
1356 xfer->hdr.type = MSG_TYPE_COMMAND; in xfer_get_init()
1357 xfer->hdr.id = msg_id; in xfer_get_init()
1358 xfer->hdr.poll_completion = false; in xfer_get_init()
1366 * version_get() - command to get the revision of the SCMI entity
1368 * @ph: Pointer to SCMI protocol handle
1371 * Updates the SCMI information in the internal data structure.
1387 rev_info = t->rx.buf; in version_get()
1396 * scmi_set_protocol_priv - Set protocol specific data at init time
1408 pi->priv = priv; in scmi_set_protocol_priv()
1414 * scmi_get_protocol_priv - Set protocol specific data at init time
1424 return pi->priv; in scmi_get_protocol_priv()
1442 * scmi_common_extended_name_get - Common helper to get extended resources name
1460 ret = ph->xops->xfer_get_init(ph, cmd_id, sizeof(res_id), in scmi_common_extended_name_get()
1465 put_unaligned_le32(res_id, t->tx.buf); in scmi_common_extended_name_get()
1466 resp = t->rx.buf; in scmi_common_extended_name_get()
1468 ret = ph->xops->do_xfer(ph, t); in scmi_common_extended_name_get()
1470 strscpy(name, resp->name, len); in scmi_common_extended_name_get()
1472 ph->xops->xfer_put(ph, t); in scmi_common_extended_name_get()
1475 dev_warn(ph->dev, in scmi_common_extended_name_get()
1476 "Failed to get extended name - id:%u (ret:%d). Using %s\n", in scmi_common_extended_name_get()
1482 * struct scmi_iterator - Iterator descriptor
1484 * a proper custom command payload for each multi-part command request.
1486 * @process_response to parse the multi-part replies.
1492 * internal routines and by the caller-provided @scmi_iterator_ops.
1514 i = devm_kzalloc(ph->dev, sizeof(*i), GFP_KERNEL); in scmi_iterator_init()
1516 return ERR_PTR(-ENOMEM); in scmi_iterator_init()
1518 i->ph = ph; in scmi_iterator_init()
1519 i->ops = ops; in scmi_iterator_init()
1520 i->priv = priv; in scmi_iterator_init()
1522 ret = ph->xops->xfer_get_init(ph, msg_id, tx_size, 0, &i->t); in scmi_iterator_init()
1524 devm_kfree(ph->dev, i); in scmi_iterator_init()
1528 i->state.max_resources = max_resources; in scmi_iterator_init()
1529 i->msg = i->t->tx.buf; in scmi_iterator_init()
1530 i->resp = i->t->rx.buf; in scmi_iterator_init()
1537 int ret = -EINVAL; in scmi_iterator_run()
1543 if (!i || !i->ops || !i->ph) in scmi_iterator_run()
1546 iops = i->ops; in scmi_iterator_run()
1547 ph = i->ph; in scmi_iterator_run()
1548 st = &i->state; in scmi_iterator_run()
1551 iops->prepare_message(i->msg, st->desc_index, i->priv); in scmi_iterator_run()
1552 ret = ph->xops->do_xfer(ph, i->t); in scmi_iterator_run()
1556 st->rx_len = i->t->rx.len; in scmi_iterator_run()
1557 ret = iops->update_state(st, i->resp, i->priv); in scmi_iterator_run()
1561 if (st->num_returned > st->max_resources - st->desc_index) { in scmi_iterator_run()
1562 dev_err(ph->dev, in scmi_iterator_run()
1564 st->max_resources); in scmi_iterator_run()
1565 ret = -EINVAL; in scmi_iterator_run()
1569 for (st->loop_idx = 0; st->loop_idx < st->num_returned; in scmi_iterator_run()
1570 st->loop_idx++) { in scmi_iterator_run()
1571 ret = iops->process_response(ph, i->resp, st, i->priv); in scmi_iterator_run()
1576 st->desc_index += st->num_returned; in scmi_iterator_run()
1577 ph->xops->reset_rx_to_maxsz(ph, i->t); in scmi_iterator_run()
1582 } while (st->num_returned && st->num_remaining); in scmi_iterator_run()
1586 ph->xops->xfer_put(ph, i->t); in scmi_iterator_run()
1587 devm_kfree(ph->dev, i); in scmi_iterator_run()
1631 ret = -EINVAL; in scmi_common_fastchannel_init()
1635 ret = ph->xops->xfer_get_init(ph, describe_id, in scmi_common_fastchannel_init()
1640 info = t->tx.buf; in scmi_common_fastchannel_init()
1641 info->domain = cpu_to_le32(domain); in scmi_common_fastchannel_init()
1642 info->message_id = cpu_to_le32(message_id); in scmi_common_fastchannel_init()
1649 ret = ph->xops->do_xfer(ph, t); in scmi_common_fastchannel_init()
1653 resp = t->rx.buf; in scmi_common_fastchannel_init()
1654 flags = le32_to_cpu(resp->attr); in scmi_common_fastchannel_init()
1655 size = le32_to_cpu(resp->chan_size); in scmi_common_fastchannel_init()
1657 ret = -EINVAL; in scmi_common_fastchannel_init()
1661 phys_addr = le32_to_cpu(resp->chan_addr_low); in scmi_common_fastchannel_init()
1662 phys_addr |= (u64)le32_to_cpu(resp->chan_addr_high) << 32; in scmi_common_fastchannel_init()
1663 addr = devm_ioremap(ph->dev, phys_addr, size); in scmi_common_fastchannel_init()
1665 ret = -EADDRNOTAVAIL; in scmi_common_fastchannel_init()
1672 db = devm_kzalloc(ph->dev, sizeof(*db), GFP_KERNEL); in scmi_common_fastchannel_init()
1674 ret = -ENOMEM; in scmi_common_fastchannel_init()
1679 phys_addr = le32_to_cpu(resp->db_addr_low); in scmi_common_fastchannel_init()
1680 phys_addr |= (u64)le32_to_cpu(resp->db_addr_high) << 32; in scmi_common_fastchannel_init()
1681 addr = devm_ioremap(ph->dev, phys_addr, size); in scmi_common_fastchannel_init()
1683 ret = -EADDRNOTAVAIL; in scmi_common_fastchannel_init()
1687 db->addr = addr; in scmi_common_fastchannel_init()
1688 db->width = size; in scmi_common_fastchannel_init()
1689 db->set = le32_to_cpu(resp->db_set_lmask); in scmi_common_fastchannel_init()
1690 db->set |= (u64)le32_to_cpu(resp->db_set_hmask) << 32; in scmi_common_fastchannel_init()
1691 db->mask = le32_to_cpu(resp->db_preserve_lmask); in scmi_common_fastchannel_init()
1692 db->mask |= (u64)le32_to_cpu(resp->db_preserve_hmask) << 32; in scmi_common_fastchannel_init()
1697 ph->xops->xfer_put(ph, t); in scmi_common_fastchannel_init()
1699 dev_dbg(ph->dev, in scmi_common_fastchannel_init()
1701 pi->proto->id, message_id, domain); in scmi_common_fastchannel_init()
1706 devm_kfree(ph->dev, db); in scmi_common_fastchannel_init()
1712 ph->xops->xfer_put(ph, t); in scmi_common_fastchannel_init()
1715 dev_warn(ph->dev, in scmi_common_fastchannel_init()
1716 "Failed to get FC for protocol %X [MSG_ID:%u / RES_ID:%u] - ret:%d. Using regular messaging.\n", in scmi_common_fastchannel_init()
1717 pi->proto->id, message_id, domain, ret); in scmi_common_fastchannel_init()
1724 if (db->mask) \
1725 val = ioread##w(db->addr) & db->mask; \
1726 iowrite##w((u##w)db->set | val, db->addr); \
1731 if (!db || !db->addr) in scmi_common_fastchannel_db_ring()
1734 if (db->width == 1) in scmi_common_fastchannel_db_ring()
1736 else if (db->width == 2) in scmi_common_fastchannel_db_ring()
1738 else if (db->width == 4) in scmi_common_fastchannel_db_ring()
1740 else /* db->width == 8 */ in scmi_common_fastchannel_db_ring()
1747 if (db->mask) in scmi_common_fastchannel_db_ring()
1748 val = ioread64_hi_lo(db->addr) & db->mask; in scmi_common_fastchannel_db_ring()
1749 iowrite64_hi_lo(db->set | val, db->addr); in scmi_common_fastchannel_db_ring()
1763 * scmi_revision_area_get - Retrieve version memory area.
1767 * A helper to grab the version memory area reference during SCMI Base protocol
1770 * Return: A reference to the version memory area associated to the SCMI
1778 return pi->handle->version; in scmi_revision_area_get()
1782 * scmi_alloc_init_protocol_instance - Allocate and initialize a protocol
1784 * @info: The reference to the related SCMI instance.
1788 * description, against the specified SCMI instance @info, and initialize it;
1789 * all resources management is handled via a dedicated per-protocol devres
1801 int ret = -ENOMEM; in scmi_alloc_init_protocol_instance()
1804 const struct scmi_handle *handle = &info->handle; in scmi_alloc_init_protocol_instance()
1807 gid = devres_open_group(handle->dev, NULL, GFP_KERNEL); in scmi_alloc_init_protocol_instance()
1809 scmi_protocol_put(proto->id); in scmi_alloc_init_protocol_instance()
1813 pi = devm_kzalloc(handle->dev, sizeof(*pi), GFP_KERNEL); in scmi_alloc_init_protocol_instance()
1817 pi->gid = gid; in scmi_alloc_init_protocol_instance()
1818 pi->proto = proto; in scmi_alloc_init_protocol_instance()
1819 pi->handle = handle; in scmi_alloc_init_protocol_instance()
1820 pi->ph.dev = handle->dev; in scmi_alloc_init_protocol_instance()
1821 pi->ph.xops = &xfer_ops; in scmi_alloc_init_protocol_instance()
1822 pi->ph.hops = &helpers_ops; in scmi_alloc_init_protocol_instance()
1823 pi->ph.set_priv = scmi_set_protocol_priv; in scmi_alloc_init_protocol_instance()
1824 pi->ph.get_priv = scmi_get_protocol_priv; in scmi_alloc_init_protocol_instance()
1825 refcount_set(&pi->users, 1); in scmi_alloc_init_protocol_instance()
1826 /* proto->init is assured NON NULL by scmi_protocol_register */ in scmi_alloc_init_protocol_instance()
1827 ret = pi->proto->instance_init(&pi->ph); in scmi_alloc_init_protocol_instance()
1831 ret = idr_alloc(&info->protocols, pi, proto->id, proto->id + 1, in scmi_alloc_init_protocol_instance()
1833 if (ret != proto->id) in scmi_alloc_init_protocol_instance()
1840 if (pi->proto->events) { in scmi_alloc_init_protocol_instance()
1841 ret = scmi_register_protocol_events(handle, pi->proto->id, in scmi_alloc_init_protocol_instance()
1842 &pi->ph, in scmi_alloc_init_protocol_instance()
1843 pi->proto->events); in scmi_alloc_init_protocol_instance()
1845 dev_warn(handle->dev, in scmi_alloc_init_protocol_instance()
1846 "Protocol:%X - Events Registration Failed - err:%d\n", in scmi_alloc_init_protocol_instance()
1847 pi->proto->id, ret); in scmi_alloc_init_protocol_instance()
1850 devres_close_group(handle->dev, pi->gid); in scmi_alloc_init_protocol_instance()
1851 dev_dbg(handle->dev, "Initialized protocol: 0x%X\n", pi->proto->id); in scmi_alloc_init_protocol_instance()
1857 scmi_protocol_put(proto->id); in scmi_alloc_init_protocol_instance()
1858 devres_release_group(handle->dev, gid); in scmi_alloc_init_protocol_instance()
1864 * scmi_get_protocol_instance - Protocol initialization helper.
1865 * @handle: A reference to the SCMI platform instance.
1870 * resource allocation with a dedicated per-protocol devres subgroup.
1873 * in particular returns -EPROBE_DEFER when the desired protocol could
1882 mutex_lock(&info->protocols_mtx); in scmi_get_protocol_instance()
1883 pi = idr_find(&info->protocols, protocol_id); in scmi_get_protocol_instance()
1886 refcount_inc(&pi->users); in scmi_get_protocol_instance()
1895 pi = ERR_PTR(-EPROBE_DEFER); in scmi_get_protocol_instance()
1897 mutex_unlock(&info->protocols_mtx); in scmi_get_protocol_instance()
1903 * scmi_protocol_acquire - Protocol acquire
1904 * @handle: A reference to the SCMI platform instance.
1907 * Register a new user for the requested protocol on the specified SCMI
1918 * scmi_protocol_release - Protocol de-initialization helper.
1919 * @handle: A reference to the SCMI platform instance.
1922 * Remove one user for the specified protocol and triggers de-initialization
1923 * and resources de-allocation once the last user has gone.
1930 mutex_lock(&info->protocols_mtx); in scmi_protocol_release()
1931 pi = idr_find(&info->protocols, protocol_id); in scmi_protocol_release()
1935 if (refcount_dec_and_test(&pi->users)) { in scmi_protocol_release()
1936 void *gid = pi->gid; in scmi_protocol_release()
1938 if (pi->proto->events) in scmi_protocol_release()
1941 if (pi->proto->instance_deinit) in scmi_protocol_release()
1942 pi->proto->instance_deinit(&pi->ph); in scmi_protocol_release()
1944 idr_remove(&info->protocols, protocol_id); in scmi_protocol_release()
1948 devres_release_group(handle->dev, gid); in scmi_protocol_release()
1949 dev_dbg(handle->dev, "De-Initialized protocol: 0x%X\n", in scmi_protocol_release()
1954 mutex_unlock(&info->protocols_mtx); in scmi_protocol_release()
1961 struct scmi_info *info = handle_to_scmi_info(pi->handle); in scmi_setup_protocol_implemented()
1963 info->protocols_imp = prot_imp; in scmi_setup_protocol_implemented()
1971 struct scmi_revision_info *rev = handle->version; in scmi_is_protocol_implemented()
1973 if (!info->protocols_imp) in scmi_is_protocol_implemented()
1976 for (i = 0; i < rev->num_protocols; i++) in scmi_is_protocol_implemented()
1977 if (info->protocols_imp[i] == prot_id) in scmi_is_protocol_implemented()
1991 scmi_protocol_release(dres->handle, dres->protocol_id); in scmi_devm_release_protocol()
2003 return ERR_PTR(-ENOMEM); in scmi_devres_protocol_instance_get()
2005 pi = scmi_get_protocol_instance(sdev->handle, protocol_id); in scmi_devres_protocol_instance_get()
2011 dres->handle = sdev->handle; in scmi_devres_protocol_instance_get()
2012 dres->protocol_id = protocol_id; in scmi_devres_protocol_instance_get()
2013 devres_add(&sdev->dev, dres); in scmi_devres_protocol_instance_get()
2019 * scmi_devm_protocol_get - Devres managed get protocol operations and handle
2030 * released, and possibly de-initialized on last user, once the SCMI driver
2043 return ERR_PTR(-EINVAL); in scmi_devm_protocol_get()
2049 *ph = &pi->ph; in scmi_devm_protocol_get()
2051 return pi->proto->ops; in scmi_devm_protocol_get()
2055 * scmi_devm_protocol_acquire - Devres managed helper to get hold of a protocol
2065 * released, and possibly de-initialized on last user, once the SCMI driver
2089 return dres->protocol_id == *((u8 *)data); in scmi_devm_protocol_match()
2093 * scmi_devm_protocol_put - Devres managed put protocol operations and handle
2105 ret = devres_release(&sdev->dev, scmi_devm_release_protocol, in scmi_devm_protocol_put()
2111 * scmi_is_transport_atomic - Method to check if underlying transport for an
2112 * SCMI instance is configured as atomic.
2114 * @handle: A reference to the SCMI platform instance.
2126 ret = info->desc->atomic_enabled && in scmi_is_transport_atomic()
2127 is_transport_polling_capable(info->desc); in scmi_is_transport_atomic()
2129 *atomic_threshold = info->atomic_threshold; in scmi_is_transport_atomic()
2135 * scmi_handle_get() - Get the SCMI handle for a device
2137 * @dev: pointer to device for which we want SCMI handle
2140 * and is expected to be maintained by caller of SCMI protocol library.
2154 if (dev->parent == info->dev) { in scmi_handle_get()
2155 info->users++; in scmi_handle_get()
2156 handle = &info->handle; in scmi_handle_get()
2166 * scmi_handle_put() - Release the handle acquired by scmi_handle_get
2171 * and is expected to be maintained by caller of SCMI protocol library.
2175 * if null was passed, it returns -EINVAL;
2182 return -EINVAL; in scmi_handle_put()
2186 if (!WARN_ON(!info->users)) in scmi_handle_put()
2187 info->users--; in scmi_handle_put()
2205 scmi_dev->handle = scmi_handle_get(&scmi_dev->dev); in scmi_set_handle()
2206 if (scmi_dev->handle) in scmi_set_handle()
2207 scmi_device_link_add(&scmi_dev->dev, scmi_dev->handle->dev); in scmi_set_handle()
2215 struct device *dev = sinfo->dev; in __scmi_xfer_info_init()
2216 const struct scmi_desc *desc = sinfo->desc; in __scmi_xfer_info_init()
2218 /* Pre-allocated messages, no more than what hdr.seq can support */ in __scmi_xfer_info_init()
2219 if (WARN_ON(!info->max_msg || info->max_msg > MSG_TOKEN_MAX)) { in __scmi_xfer_info_init()
2221 "Invalid maximum messages %d, not in range [1 - %lu]\n", in __scmi_xfer_info_init()
2222 info->max_msg, MSG_TOKEN_MAX); in __scmi_xfer_info_init()
2223 return -EINVAL; in __scmi_xfer_info_init()
2226 hash_init(info->pending_xfers); in __scmi_xfer_info_init()
2229 info->xfer_alloc_table = devm_bitmap_zalloc(dev, MSG_TOKEN_MAX, in __scmi_xfer_info_init()
2231 if (!info->xfer_alloc_table) in __scmi_xfer_info_init()
2232 return -ENOMEM; in __scmi_xfer_info_init()
2236 * pre-initialize the buffer pointer to pre-allocated buffers and in __scmi_xfer_info_init()
2239 INIT_HLIST_HEAD(&info->free_xfers); in __scmi_xfer_info_init()
2240 for (i = 0; i < info->max_msg; i++) { in __scmi_xfer_info_init()
2243 return -ENOMEM; in __scmi_xfer_info_init()
2245 xfer->rx.buf = devm_kcalloc(dev, sizeof(u8), desc->max_msg_size, in __scmi_xfer_info_init()
2247 if (!xfer->rx.buf) in __scmi_xfer_info_init()
2248 return -ENOMEM; in __scmi_xfer_info_init()
2250 xfer->tx.buf = xfer->rx.buf; in __scmi_xfer_info_init()
2251 init_completion(&xfer->done); in __scmi_xfer_info_init()
2252 spin_lock_init(&xfer->lock); in __scmi_xfer_info_init()
2255 hlist_add_head(&xfer->node, &info->free_xfers); in __scmi_xfer_info_init()
2258 spin_lock_init(&info->xfer_lock); in __scmi_xfer_info_init()
2265 const struct scmi_desc *desc = sinfo->desc; in scmi_channels_max_msg_configure()
2267 if (!desc->ops->get_max_msg) { in scmi_channels_max_msg_configure()
2268 sinfo->tx_minfo.max_msg = desc->max_msg; in scmi_channels_max_msg_configure()
2269 sinfo->rx_minfo.max_msg = desc->max_msg; in scmi_channels_max_msg_configure()
2273 base_cinfo = idr_find(&sinfo->tx_idr, SCMI_PROTOCOL_BASE); in scmi_channels_max_msg_configure()
2275 return -EINVAL; in scmi_channels_max_msg_configure()
2276 sinfo->tx_minfo.max_msg = desc->ops->get_max_msg(base_cinfo); in scmi_channels_max_msg_configure()
2279 base_cinfo = idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE); in scmi_channels_max_msg_configure()
2281 sinfo->rx_minfo.max_msg = in scmi_channels_max_msg_configure()
2282 desc->ops->get_max_msg(base_cinfo); in scmi_channels_max_msg_configure()
2296 ret = __scmi_xfer_info_init(sinfo, &sinfo->tx_minfo); in scmi_xfer_info_init()
2297 if (!ret && !idr_is_empty(&sinfo->rx_idr)) in scmi_xfer_info_init()
2298 ret = __scmi_xfer_info_init(sinfo, &sinfo->rx_minfo); in scmi_xfer_info_init()
2314 idr = tx ? &info->tx_idr : &info->rx_idr; in scmi_chan_setup()
2316 if (!info->desc->ops->chan_available(of_node, idx)) { in scmi_chan_setup()
2319 return -EINVAL; in scmi_chan_setup()
2323 cinfo = devm_kzalloc(info->dev, sizeof(*cinfo), GFP_KERNEL); in scmi_chan_setup()
2325 return -ENOMEM; in scmi_chan_setup()
2327 cinfo->is_p2a = !tx; in scmi_chan_setup()
2328 cinfo->rx_timeout_ms = info->desc->max_rx_timeout_ms; in scmi_chan_setup()
2334 tdev = scmi_device_create(of_node, info->dev, prot_id, name); in scmi_chan_setup()
2336 dev_err(info->dev, in scmi_chan_setup()
2338 devm_kfree(info->dev, cinfo); in scmi_chan_setup()
2339 return -EINVAL; in scmi_chan_setup()
2343 cinfo->id = prot_id; in scmi_chan_setup()
2344 cinfo->dev = &tdev->dev; in scmi_chan_setup()
2345 ret = info->desc->ops->chan_setup(cinfo, info->dev, tx); in scmi_chan_setup()
2348 scmi_device_destroy(info->dev, prot_id, name); in scmi_chan_setup()
2349 devm_kfree(info->dev, cinfo); in scmi_chan_setup()
2353 if (tx && is_polling_required(cinfo, info->desc)) { in scmi_chan_setup()
2354 if (is_transport_polling_capable(info->desc)) in scmi_chan_setup()
2355 dev_info(&tdev->dev, in scmi_chan_setup()
2356 "Enabled polling mode TX channel - prot_id:%d\n", in scmi_chan_setup()
2359 dev_warn(&tdev->dev, in scmi_chan_setup()
2366 dev_err(info->dev, in scmi_chan_setup()
2367 "unable to allocate SCMI idr slot err %d\n", ret); in scmi_chan_setup()
2371 scmi_device_destroy(info->dev, prot_id, name); in scmi_chan_setup()
2372 devm_kfree(info->dev, cinfo); in scmi_chan_setup()
2377 cinfo->handle = &info->handle; in scmi_chan_setup()
2390 if (ret && ret != -ENOMEM) in scmi_txrx_setup()
2398 * scmi_channels_setup - Helper to initialize all required channels
2400 * @info: The SCMI instance descriptor.
2404 * borrowing devices from the SCMI drivers; this way channels are initialized
2405 * upfront during core SCMI stack probing and are no more coupled with SCMI
2406 * devices used by SCMI drivers.
2418 struct device_node *child, *top_np = info->dev->of_node; in scmi_channels_setup()
2432 dev_err(info->dev, in scmi_channels_setup()
2449 if (cinfo->dev) { in scmi_chan_destroy()
2450 struct scmi_info *info = handle_to_scmi_info(cinfo->handle); in scmi_chan_destroy()
2451 struct scmi_device *sdev = to_scmi_dev(cinfo->dev); in scmi_chan_destroy()
2453 of_node_put(cinfo->dev->of_node); in scmi_chan_destroy()
2454 scmi_device_destroy(info->dev, id, sdev->name); in scmi_chan_destroy()
2455 cinfo->dev = NULL; in scmi_chan_destroy()
2466 idr_for_each(idr, info->desc->ops->chan_free, idr); in scmi_cleanup_channels()
2476 scmi_cleanup_channels(info, &info->tx_idr); in scmi_cleanup_txrx_channels()
2478 scmi_cleanup_channels(info, &info->rx_idr); in scmi_cleanup_txrx_channels()
2487 /* Skip transport devices and devices of different SCMI instances */ in scmi_bus_notifier()
2488 if (!strncmp(sdev->name, "__scmi_transport_device", 23) || in scmi_bus_notifier()
2489 sdev->dev.parent != info->dev) in scmi_bus_notifier()
2498 scmi_handle_put(sdev->handle); in scmi_bus_notifier()
2499 sdev->handle = NULL; in scmi_bus_notifier()
2505 dev_dbg(info->dev, "Device %s (%s) is now %s\n", dev_name(&sdev->dev), in scmi_bus_notifier()
2506 sdev->name, action == BUS_NOTIFY_BIND_DRIVER ? in scmi_bus_notifier()
2519 np = idr_find(&info->active_protocols, id_table->protocol_id); in scmi_device_request_notifier()
2523 dev_dbg(info->dev, "%sRequested device (%s) for protocol 0x%x\n", in scmi_device_request_notifier()
2524 action == SCMI_BUS_NOTIFY_DEVICE_REQUEST ? "" : "UN-", in scmi_device_request_notifier()
2525 id_table->name, id_table->protocol_id); in scmi_device_request_notifier()
2529 scmi_create_protocol_devices(np, info, id_table->protocol_id, in scmi_device_request_notifier()
2530 id_table->name); in scmi_device_request_notifier()
2533 scmi_destroy_protocol_devices(info, id_table->protocol_id, in scmi_device_request_notifier()
2534 id_table->name); in scmi_device_request_notifier()
2550 debugfs_remove_recursive(dbg->top_dentry); in scmi_debugfs_common_cleanup()
2551 kfree(dbg->name); in scmi_debugfs_common_cleanup()
2552 kfree(dbg->type); in scmi_debugfs_common_cleanup()
2562 dbg = devm_kzalloc(info->dev, sizeof(*dbg), GFP_KERNEL); in scmi_debugfs_common_setup()
2566 dbg->name = kstrdup(of_node_full_name(info->dev->of_node), GFP_KERNEL); in scmi_debugfs_common_setup()
2567 if (!dbg->name) { in scmi_debugfs_common_setup()
2568 devm_kfree(info->dev, dbg); in scmi_debugfs_common_setup()
2572 of_property_read_string(info->dev->of_node, "compatible", &c_ptr); in scmi_debugfs_common_setup()
2573 dbg->type = kstrdup(c_ptr, GFP_KERNEL); in scmi_debugfs_common_setup()
2574 if (!dbg->type) { in scmi_debugfs_common_setup()
2575 kfree(dbg->name); in scmi_debugfs_common_setup()
2576 devm_kfree(info->dev, dbg); in scmi_debugfs_common_setup()
2580 snprintf(top_dir, 16, "%d", info->id); in scmi_debugfs_common_setup()
2584 dbg->is_atomic = info->desc->atomic_enabled && in scmi_debugfs_common_setup()
2585 is_transport_polling_capable(info->desc); in scmi_debugfs_common_setup()
2588 (char **)&dbg->name); in scmi_debugfs_common_setup()
2591 &info->atomic_threshold); in scmi_debugfs_common_setup()
2593 debugfs_create_str("type", 0400, trans, (char **)&dbg->type); in scmi_debugfs_common_setup()
2595 debugfs_create_bool("is_atomic", 0400, trans, &dbg->is_atomic); in scmi_debugfs_common_setup()
2598 (u32 *)&info->desc->max_rx_timeout_ms); in scmi_debugfs_common_setup()
2601 (u32 *)&info->desc->max_msg_size); in scmi_debugfs_common_setup()
2604 (u32 *)&info->tx_minfo.max_msg); in scmi_debugfs_common_setup()
2607 (u32 *)&info->rx_minfo.max_msg); in scmi_debugfs_common_setup()
2609 dbg->top_dentry = top_dentry; in scmi_debugfs_common_setup()
2611 if (devm_add_action_or_reset(info->dev, in scmi_debugfs_common_setup()
2625 if (!info->dbg) in scmi_debugfs_raw_mode_setup()
2626 return -EINVAL; in scmi_debugfs_raw_mode_setup()
2629 idr_for_each_entry(&info->tx_idr, cinfo, id) { in scmi_debugfs_raw_mode_setup()
2635 dev_warn(info->dev, in scmi_debugfs_raw_mode_setup()
2636 "SCMI RAW - Error enumerating channels\n"); in scmi_debugfs_raw_mode_setup()
2640 if (!test_bit(cinfo->id, protos)) { in scmi_debugfs_raw_mode_setup()
2641 channels[num_chans++] = cinfo->id; in scmi_debugfs_raw_mode_setup()
2642 set_bit(cinfo->id, protos); in scmi_debugfs_raw_mode_setup()
2646 info->raw = scmi_raw_mode_init(&info->handle, info->dbg->top_dentry, in scmi_debugfs_raw_mode_setup()
2647 info->id, channels, num_chans, in scmi_debugfs_raw_mode_setup()
2648 info->desc, info->tx_minfo.max_msg); in scmi_debugfs_raw_mode_setup()
2649 if (IS_ERR(info->raw)) { in scmi_debugfs_raw_mode_setup()
2650 dev_err(info->dev, "Failed to initialize SCMI RAW Mode !\n"); in scmi_debugfs_raw_mode_setup()
2651 ret = PTR_ERR(info->raw); in scmi_debugfs_raw_mode_setup()
2652 info->raw = NULL; in scmi_debugfs_raw_mode_setup()
2665 struct device *dev = &pdev->dev; in scmi_probe()
2666 struct device_node *child, *np = dev->of_node; in scmi_probe()
2670 return -EINVAL; in scmi_probe()
2674 return -ENOMEM; in scmi_probe()
2676 info->id = ida_alloc_min(&scmi_id, 0, GFP_KERNEL); in scmi_probe()
2677 if (info->id < 0) in scmi_probe()
2678 return info->id; in scmi_probe()
2680 info->dev = dev; in scmi_probe()
2681 info->desc = desc; in scmi_probe()
2682 info->bus_nb.notifier_call = scmi_bus_notifier; in scmi_probe()
2683 info->dev_req_nb.notifier_call = scmi_device_request_notifier; in scmi_probe()
2684 INIT_LIST_HEAD(&info->node); in scmi_probe()
2685 idr_init(&info->protocols); in scmi_probe()
2686 mutex_init(&info->protocols_mtx); in scmi_probe()
2687 idr_init(&info->active_protocols); in scmi_probe()
2688 mutex_init(&info->devreq_mtx); in scmi_probe()
2691 idr_init(&info->tx_idr); in scmi_probe()
2692 idr_init(&info->rx_idr); in scmi_probe()
2694 handle = &info->handle; in scmi_probe()
2695 handle->dev = info->dev; in scmi_probe()
2696 handle->version = &info->version; in scmi_probe()
2697 handle->devm_protocol_acquire = scmi_devm_protocol_acquire; in scmi_probe()
2698 handle->devm_protocol_get = scmi_devm_protocol_get; in scmi_probe()
2699 handle->devm_protocol_put = scmi_devm_protocol_put; in scmi_probe()
2702 if (!of_property_read_u32(np, "atomic-threshold-us", in scmi_probe()
2703 &info->atomic_threshold)) in scmi_probe()
2705 "SCMI System wide atomic threshold set to %d us\n", in scmi_probe()
2706 info->atomic_threshold); in scmi_probe()
2707 handle->is_transport_atomic = scmi_is_transport_atomic; in scmi_probe()
2709 if (desc->ops->link_supplier) { in scmi_probe()
2710 ret = desc->ops->link_supplier(dev); in scmi_probe()
2720 ret = bus_register_notifier(&scmi_bus_type, &info->bus_nb); in scmi_probe()
2725 &info->dev_req_nb); in scmi_probe()
2734 info->dbg = scmi_debugfs_common_setup(info); in scmi_probe()
2735 if (!info->dbg) in scmi_probe()
2736 dev_warn(dev, "Failed to setup SCMI debugfs.\n"); in scmi_probe()
2749 dev_info(dev, "SCMI RAW Mode COEX enabled !\n"); in scmi_probe()
2754 dev_err(dev, "SCMI Notifications NOT available.\n"); in scmi_probe()
2756 if (info->desc->atomic_enabled && in scmi_probe()
2757 !is_transport_polling_capable(info->desc)) in scmi_probe()
2762 * Trigger SCMI Base protocol initialization. in scmi_probe()
2764 * SCMI stack is shutdown/unloaded as a whole. in scmi_probe()
2768 dev_err(dev, "unable to communicate with SCMI\n"); in scmi_probe()
2775 list_add_tail(&info->node, &scmi_list); in scmi_probe()
2788 dev_err(dev, "SCMI protocol %d not implemented\n", in scmi_probe()
2795 * @active_protocols for this SCMI instance/ in scmi_probe()
2797 ret = idr_alloc(&info->active_protocols, child, in scmi_probe()
2800 dev_err(dev, "SCMI protocol %d already activated. Skip\n", in scmi_probe()
2813 scmi_raw_mode_cleanup(info->raw); in scmi_probe()
2814 scmi_notification_exit(&info->handle); in scmi_probe()
2817 &info->dev_req_nb); in scmi_probe()
2819 bus_unregister_notifier(&scmi_bus_type, &info->bus_nb); in scmi_probe()
2823 ida_free(&scmi_id, info->id); in scmi_probe()
2834 scmi_raw_mode_cleanup(info->raw); in scmi_remove()
2837 if (info->users) in scmi_remove()
2838 dev_warn(&pdev->dev, in scmi_remove()
2839 "Still active SCMI users will be forcibly unbound.\n"); in scmi_remove()
2840 list_del(&info->node); in scmi_remove()
2843 scmi_notification_exit(&info->handle); in scmi_remove()
2845 mutex_lock(&info->protocols_mtx); in scmi_remove()
2846 idr_destroy(&info->protocols); in scmi_remove()
2847 mutex_unlock(&info->protocols_mtx); in scmi_remove()
2849 idr_for_each_entry(&info->active_protocols, child, id) in scmi_remove()
2851 idr_destroy(&info->active_protocols); in scmi_remove()
2854 &info->dev_req_nb); in scmi_remove()
2855 bus_unregister_notifier(&scmi_bus_type, &info->bus_nb); in scmi_remove()
2860 ida_free(&scmi_id, info->id); in scmi_remove()
2870 return sprintf(buf, "%u.%u\n", info->version.major_ver, in protocol_version_show()
2871 info->version.minor_ver); in protocol_version_show()
2880 return sprintf(buf, "0x%x\n", info->version.impl_ver); in firmware_version_show()
2889 return sprintf(buf, "%s\n", info->version.vendor_id); in vendor_id_show()
2898 return sprintf(buf, "%s\n", info->version.sub_vendor_id); in sub_vendor_id_show()
2914 { .compatible = "arm,scmi", .data = &scmi_mailbox_desc },
2917 { .compatible = "linaro,scmi-optee", .data = &scmi_optee_desc },
2920 { .compatible = "arm,scmi-smc", .data = &scmi_smc_desc},
2921 { .compatible = "arm,scmi-smc-param", .data = &scmi_smc_desc},
2924 { .compatible = "arm,scmi-virtio", .data = &scmi_virtio_desc},
2933 .name = "arm-scmi",
2943 * __scmi_transports_setup - Common helper to call transport-specific
2958 for (trans = scmi_of_match; trans->data; trans++) { in __scmi_transports_setup()
2959 const struct scmi_desc *tdesc = trans->data; in __scmi_transports_setup()
2961 if ((init && !tdesc->transport_init) || in __scmi_transports_setup()
2962 (!init && !tdesc->transport_exit)) in __scmi_transports_setup()
2966 ret = tdesc->transport_init(); in __scmi_transports_setup()
2968 tdesc->transport_exit(); in __scmi_transports_setup()
2971 pr_err("SCMI transport %s FAILED initialization!\n", in __scmi_transports_setup()
2972 trans->compatible); in __scmi_transports_setup()
2994 d = debugfs_create_dir("scmi", NULL); in scmi_debugfs_init()
2996 pr_err("Could NOT create SCMI top dentry.\n"); in scmi_debugfs_init()
3007 /* Bail out if no SCMI transport was configured */ in scmi_driver_init()
3009 return -EINVAL; in scmi_driver_init()
3011 /* Initialize any compiled-in transport which provided an init/exit */ in scmi_driver_init()
3055 MODULE_ALIAS("platform:arm-scmi");
3057 MODULE_DESCRIPTION("ARM SCMI protocol driver");