Lines Matching +full:mhu +full:- +full:rx

1 // SPDX-License-Identifier: GPL-2.0
6 * and the Application Processors(AP). The Message Handling Unit(MHU)
7 * provides a mechanism for inter-processor communication between SCP's
14 * Copyright (C) 2018-2021 ARM Ltd.
25 #include <linux/io-64-nonatomic-hi-lo.h>
60 * struct scmi_xfers_info - Structure to manage transfer information
68 * a number of xfers equal to the maximum allowed in-flight
71 * currently in-flight messages.
82 * struct scmi_protocol_instance - Describe an initialized protocol instance.
85 * @gid: A reference for per-protocol devres management.
106 * struct scmi_debug_info - Debug common info
122 * struct scmi_info - Structure representing a SCMI instance
128 * implementation version and (sub-)vendor identification.
133 * @rx_idr: IDR object to map protocol id to Rx channel info pointer
143 * @atomic_threshold: Optional system wide DT-configured threshold, expressed
146 * to have an execution latency lesser-equal to the threshold
195 if (!proto || !try_module_get(proto->owner)) { in scmi_protocol_get()
211 module_put(proto->owner); in scmi_protocol_put()
220 return -EINVAL; in scmi_protocol_register()
223 if (!proto->instance_init) { in scmi_protocol_register()
224 pr_err("missing init for protocol 0x%x\n", proto->id); in scmi_protocol_register()
225 return -EINVAL; in scmi_protocol_register()
230 proto->id, proto->id + 1, GFP_ATOMIC); in scmi_protocol_register()
232 if (ret != proto->id) { in scmi_protocol_register()
233 pr_err("unable to allocate SCMI idr slot for 0x%x - err %d\n", in scmi_protocol_register()
234 proto->id, ret); in scmi_protocol_register()
238 pr_debug("Registered SCMI Protocol 0x%x\n", proto->id); in scmi_protocol_register()
247 idr_remove(&scmi_protocols, proto->id); in scmi_protocol_unregister()
250 pr_debug("Unregistered SCMI Protocol 0x%x\n", proto->id); in scmi_protocol_unregister()
255 * scmi_create_protocol_devices - Create devices for all pending requests for
271 mutex_lock(&info->devreq_mtx); in scmi_create_protocol_devices()
272 sdev = scmi_device_create(np, info->dev, prot_id, name); in scmi_create_protocol_devices()
274 dev_err(info->dev, in scmi_create_protocol_devices()
277 mutex_unlock(&info->devreq_mtx); in scmi_create_protocol_devices()
283 mutex_lock(&info->devreq_mtx); in scmi_destroy_protocol_devices()
284 scmi_device_destroy(info->dev, prot_id, name); in scmi_destroy_protocol_devices()
285 mutex_unlock(&info->devreq_mtx); in scmi_destroy_protocol_devices()
293 info->notify_priv = priv; in scmi_notification_instance_data_set()
304 return info->notify_priv; in scmi_notification_instance_data_get()
308 * scmi_xfer_token_set - Reserve and set new token for the xfer at hand
310 * @minfo: Pointer to Tx/Rx Message management info based on channel type
314 * xfer->hdr.seq: picking a monotonically increasing value avoids immediate
315 * reuse of freshly completed or timed-out xfers, thus mitigating the risk
316 * of incorrect association of a late and expired xfer with a live in-flight
317 * transaction, both happening to re-use the same token identifier.
319 * Since platform is NOT required to answer our request in-order we should
322 * - exactly 'next_token' may be NOT available so pick xfer_id >= next_token
325 * - all tokens ahead upto (MSG_TOKEN_ID_MASK - 1) are used in-flight but we
329 * X = used in-flight
332 * ------
334 * |- xfer_id picked
335 * -----------+----------------------------------------------------------
337 * ----------------------------------------------------------------------
339 * |- next_token
341 * Out-of-order pending at start
342 * -----------------------------
344 * |- xfer_id picked, last_token fixed
345 * -----+----------------------------------------------------------------
347 * ----------------------------------------------------------------------
349 * |- next_token
352 * Out-of-order pending at end
353 * ---------------------------
355 * |- xfer_id picked, last_token fixed
356 * -----+----------------------------------------------------------------
358 * ----------------------------------------------------------------------
360 * |- next_token
372 * Pick a candidate monotonic token in range [0, MSG_TOKEN_MAX - 1] in scmi_xfer_token_set()
373 * using the pre-allocated transfer_id as a base. in scmi_xfer_token_set()
379 next_token = (xfer->transfer_id & (MSG_TOKEN_MAX - 1)); in scmi_xfer_token_set()
382 xfer_id = find_next_zero_bit(minfo->xfer_alloc_table, in scmi_xfer_token_set()
386 * After heavily out-of-order responses, there are no free in scmi_xfer_token_set()
390 xfer_id = find_next_zero_bit(minfo->xfer_alloc_table, in scmi_xfer_token_set()
394 * maximum number of (MSG_TOKEN_MAX - 1) in-flight messages in scmi_xfer_token_set()
395 * but we have not found any free token [0, MSG_TOKEN_MAX - 1]. in scmi_xfer_token_set()
398 return -ENOMEM; in scmi_xfer_token_set()
401 /* Update +/- last_token accordingly if we skipped some hole */ in scmi_xfer_token_set()
403 atomic_add((int)(xfer_id - next_token), &transfer_last_id); in scmi_xfer_token_set()
405 xfer->hdr.seq = (u16)xfer_id; in scmi_xfer_token_set()
411 * scmi_xfer_token_clear - Release the token
413 * @minfo: Pointer to Tx/Rx Message management info based on channel type
419 clear_bit(xfer->hdr.seq, minfo->xfer_alloc_table); in scmi_xfer_token_clear()
423 * scmi_xfer_inflight_register_unlocked - Register the xfer as in-flight
426 * @minfo: Pointer to Tx/Rx Message management info based on channel type
428 * Note that this helper assumes that the xfer to be registered as in-flight
438 /* Set in-flight */ in scmi_xfer_inflight_register_unlocked()
439 set_bit(xfer->hdr.seq, minfo->xfer_alloc_table); in scmi_xfer_inflight_register_unlocked()
440 hash_add(minfo->pending_xfers, &xfer->node, xfer->hdr.seq); in scmi_xfer_inflight_register_unlocked()
441 xfer->pending = true; in scmi_xfer_inflight_register_unlocked()
445 * scmi_xfer_inflight_register - Try to register an xfer as in-flight
448 * @minfo: Pointer to Tx/Rx Message management info based on channel type
453 * same sequence number is currently still registered as in-flight.
455 * Return: 0 on Success or -EBUSY if sequence number embedded in the xfer
464 spin_lock_irqsave(&minfo->xfer_lock, flags); in scmi_xfer_inflight_register()
465 if (!test_bit(xfer->hdr.seq, minfo->xfer_alloc_table)) in scmi_xfer_inflight_register()
468 ret = -EBUSY; in scmi_xfer_inflight_register()
469 spin_unlock_irqrestore(&minfo->xfer_lock, flags); in scmi_xfer_inflight_register()
475 * scmi_xfer_raw_inflight_register - An helper to register the given xfer as in
488 return scmi_xfer_inflight_register(xfer, &info->tx_minfo); in scmi_xfer_raw_inflight_register()
492 * scmi_xfer_pending_set - Pick a proper sequence number and mark the xfer
493 * as pending in-flight
496 * @minfo: Pointer to Tx/Rx Message management info based on channel type
506 spin_lock_irqsave(&minfo->xfer_lock, flags); in scmi_xfer_pending_set()
511 spin_unlock_irqrestore(&minfo->xfer_lock, flags); in scmi_xfer_pending_set()
517 * scmi_xfer_get() - Allocate one message
520 * @minfo: Pointer to Tx/Rx Message management info based on channel type
543 spin_lock_irqsave(&minfo->xfer_lock, flags); in scmi_xfer_get()
544 if (hlist_empty(&minfo->free_xfers)) { in scmi_xfer_get()
545 spin_unlock_irqrestore(&minfo->xfer_lock, flags); in scmi_xfer_get()
546 return ERR_PTR(-ENOMEM); in scmi_xfer_get()
550 xfer = hlist_entry(minfo->free_xfers.first, struct scmi_xfer, node); in scmi_xfer_get()
551 hlist_del_init(&xfer->node); in scmi_xfer_get()
557 xfer->transfer_id = atomic_inc_return(&transfer_last_id); in scmi_xfer_get()
559 refcount_set(&xfer->users, 1); in scmi_xfer_get()
560 atomic_set(&xfer->busy, SCMI_XFER_FREE); in scmi_xfer_get()
561 spin_unlock_irqrestore(&minfo->xfer_lock, flags); in scmi_xfer_get()
567 * scmi_xfer_raw_get - Helper to get a bare free xfer from the TX channel
573 * Return: A valid xfer on Success, or an error-pointer otherwise
580 xfer = scmi_xfer_get(handle, &info->tx_minfo); in scmi_xfer_raw_get()
582 xfer->flags |= SCMI_XFER_FLAG_IS_RAW; in scmi_xfer_raw_get()
588 * scmi_xfer_raw_channel_get - Helper to get a reference to the proper channel
596 * protocol in range is allowed, re-using the Base channel, so as to enable
607 cinfo = idr_find(&info->tx_idr, protocol_id); in scmi_xfer_raw_channel_get()
610 return ERR_PTR(-EINVAL); in scmi_xfer_raw_channel_get()
612 cinfo = idr_find(&info->tx_idr, SCMI_PROTOCOL_BASE); in scmi_xfer_raw_channel_get()
614 return ERR_PTR(-EINVAL); in scmi_xfer_raw_channel_get()
615 dev_warn_once(handle->dev, in scmi_xfer_raw_channel_get()
624 * __scmi_xfer_put() - Release a message
626 * @minfo: Pointer to Tx/Rx Message management info based on channel type
639 spin_lock_irqsave(&minfo->xfer_lock, flags); in __scmi_xfer_put()
640 if (refcount_dec_and_test(&xfer->users)) { in __scmi_xfer_put()
641 if (xfer->pending) { in __scmi_xfer_put()
643 hash_del(&xfer->node); in __scmi_xfer_put()
644 xfer->pending = false; in __scmi_xfer_put()
646 hlist_add_head(&xfer->node, &minfo->free_xfers); in __scmi_xfer_put()
648 spin_unlock_irqrestore(&minfo->xfer_lock, flags); in __scmi_xfer_put()
652 * scmi_xfer_raw_put - Release an xfer that was taken by @scmi_xfer_raw_get
664 xfer->flags &= ~SCMI_XFER_FLAG_IS_RAW; in scmi_xfer_raw_put()
665 xfer->flags &= ~SCMI_XFER_FLAG_CHAN_SET; in scmi_xfer_raw_put()
666 return __scmi_xfer_put(&info->tx_minfo, xfer); in scmi_xfer_raw_put()
670 * scmi_xfer_lookup_unlocked - Helper to lookup an xfer_id
672 * @minfo: Pointer to Tx/Rx Message management info based on channel type
686 if (test_bit(xfer_id, minfo->xfer_alloc_table)) in scmi_xfer_lookup_unlocked()
687 xfer = XFER_FIND(minfo->pending_xfers, xfer_id); in scmi_xfer_lookup_unlocked()
689 return xfer ?: ERR_PTR(-EINVAL); in scmi_xfer_lookup_unlocked()
693 * scmi_bad_message_trace - A helper to trace weird messages
701 * timed-out message that arrives and as such, can be traced only referring to
708 struct scmi_info *info = handle_to_scmi_info(cinfo->handle); in scmi_bad_message_trace()
725 trace_scmi_msg_dump(info->id, cinfo->id, in scmi_bad_message_trace()
732 * scmi_msg_response_validate - Validate message type against state of related
741 * related synchronous response (Out-of-Order Delayed Response) the missing
744 * SCMI transport can deliver such out-of-order responses.
746 * Context: Assumes to be called with xfer->lock already acquired.
757 * delayed response we're not prepared to handle: bail-out safely in scmi_msg_response_validate()
760 if (msg_type == MSG_TYPE_DELAYED_RESP && !xfer->async_done) { in scmi_msg_response_validate()
761 dev_err(cinfo->dev, in scmi_msg_response_validate()
763 xfer->hdr.seq); in scmi_msg_response_validate()
764 return -EINVAL; in scmi_msg_response_validate()
767 switch (xfer->state) { in scmi_msg_response_validate()
774 xfer->hdr.status = SCMI_SUCCESS; in scmi_msg_response_validate()
775 xfer->state = SCMI_XFER_RESP_OK; in scmi_msg_response_validate()
776 complete(&xfer->done); in scmi_msg_response_validate()
777 dev_warn(cinfo->dev, in scmi_msg_response_validate()
779 xfer->hdr.seq); in scmi_msg_response_validate()
784 return -EINVAL; in scmi_msg_response_validate()
788 return -EINVAL; in scmi_msg_response_validate()
795 * scmi_xfer_state_update - Update xfer state
808 xfer->hdr.type = msg_type; in scmi_xfer_state_update()
811 if (xfer->hdr.type == MSG_TYPE_COMMAND) in scmi_xfer_state_update()
812 xfer->state = SCMI_XFER_RESP_OK; in scmi_xfer_state_update()
814 xfer->state = SCMI_XFER_DRESP_OK; in scmi_xfer_state_update()
821 ret = atomic_cmpxchg(&xfer->busy, SCMI_XFER_FREE, SCMI_XFER_BUSY); in scmi_xfer_acquired()
827 * scmi_xfer_command_acquire - Helper to lookup and acquire a command xfer
844 struct scmi_info *info = handle_to_scmi_info(cinfo->handle); in scmi_xfer_command_acquire()
845 struct scmi_xfers_info *minfo = &info->tx_minfo; in scmi_xfer_command_acquire()
850 spin_lock_irqsave(&minfo->xfer_lock, flags); in scmi_xfer_command_acquire()
853 dev_err(cinfo->dev, in scmi_xfer_command_acquire()
856 spin_unlock_irqrestore(&minfo->xfer_lock, flags); in scmi_xfer_command_acquire()
859 scmi_inc_count(info->dbg->counters, ERR_MSG_UNEXPECTED); in scmi_xfer_command_acquire()
863 refcount_inc(&xfer->users); in scmi_xfer_command_acquire()
864 spin_unlock_irqrestore(&minfo->xfer_lock, flags); in scmi_xfer_command_acquire()
866 spin_lock_irqsave(&xfer->lock, flags); in scmi_xfer_command_acquire()
879 spin_unlock_irqrestore(&xfer->lock, flags); in scmi_xfer_command_acquire()
882 dev_err(cinfo->dev, in scmi_xfer_command_acquire()
883 "Invalid message type:%d for %d - HDR:0x%X state:%d\n", in scmi_xfer_command_acquire()
884 msg_type, xfer_id, msg_hdr, xfer->state); in scmi_xfer_command_acquire()
887 scmi_inc_count(info->dbg->counters, ERR_MSG_INVALID); in scmi_xfer_command_acquire()
892 xfer = ERR_PTR(-EINVAL); in scmi_xfer_command_acquire()
901 atomic_set(&xfer->busy, SCMI_XFER_FREE); in scmi_xfer_command_release()
902 __scmi_xfer_put(&info->tx_minfo, xfer); in scmi_xfer_command_release()
908 if (!cinfo->is_p2a) { in scmi_clear_channel()
909 dev_warn(cinfo->dev, "Invalid clear on A2P channel !\n"); in scmi_clear_channel()
913 if (info->desc->ops->clear_channel) in scmi_clear_channel()
914 info->desc->ops->clear_channel(cinfo); in scmi_clear_channel()
921 struct device *dev = cinfo->dev; in scmi_handle_notification()
922 struct scmi_info *info = handle_to_scmi_info(cinfo->handle); in scmi_handle_notification()
923 struct scmi_xfers_info *minfo = &info->rx_minfo; in scmi_handle_notification()
927 xfer = scmi_xfer_get(cinfo->handle, minfo); in scmi_handle_notification()
933 scmi_inc_count(info->dbg->counters, ERR_MSG_NOMEM); in scmi_handle_notification()
939 unpack_scmi_header(msg_hdr, &xfer->hdr); in scmi_handle_notification()
941 /* Ensure order between xfer->priv store and following ops */ in scmi_handle_notification()
942 smp_store_mb(xfer->priv, priv); in scmi_handle_notification()
943 info->desc->ops->fetch_notification(cinfo, info->desc->max_msg_size, in scmi_handle_notification()
946 trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id, in scmi_handle_notification()
947 xfer->hdr.id, "NOTI", xfer->hdr.seq, in scmi_handle_notification()
948 xfer->hdr.status, xfer->rx.buf, xfer->rx.len); in scmi_handle_notification()
949 scmi_inc_count(info->dbg->counters, NOTIFICATION_OK); in scmi_handle_notification()
951 scmi_notify(cinfo->handle, xfer->hdr.protocol_id, in scmi_handle_notification()
952 xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts); in scmi_handle_notification()
954 trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id, in scmi_handle_notification()
955 xfer->hdr.protocol_id, xfer->hdr.seq, in scmi_handle_notification()
959 xfer->hdr.seq = MSG_XTRACT_TOKEN(msg_hdr); in scmi_handle_notification()
960 scmi_raw_message_report(info->raw, xfer, SCMI_RAW_NOTIF_QUEUE, in scmi_handle_notification()
961 cinfo->id); in scmi_handle_notification()
973 struct scmi_info *info = handle_to_scmi_info(cinfo->handle); in scmi_handle_response()
978 scmi_raw_error_report(info->raw, cinfo, msg_hdr, priv); in scmi_handle_response()
985 /* rx.len could be shrunk in the sync do_xfer, so reset to maxsz */ in scmi_handle_response()
986 if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP) in scmi_handle_response()
987 xfer->rx.len = info->desc->max_msg_size; in scmi_handle_response()
990 /* Ensure order between xfer->priv store and following ops */ in scmi_handle_response()
991 smp_store_mb(xfer->priv, priv); in scmi_handle_response()
992 info->desc->ops->fetch_response(cinfo, xfer); in scmi_handle_response()
994 trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id, in scmi_handle_response()
995 xfer->hdr.id, in scmi_handle_response()
996 xfer->hdr.type == MSG_TYPE_DELAYED_RESP ? in scmi_handle_response()
999 xfer->hdr.seq, xfer->hdr.status, in scmi_handle_response()
1000 xfer->rx.buf, xfer->rx.len); in scmi_handle_response()
1002 trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id, in scmi_handle_response()
1003 xfer->hdr.protocol_id, xfer->hdr.seq, in scmi_handle_response()
1004 xfer->hdr.type); in scmi_handle_response()
1006 if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP) { in scmi_handle_response()
1008 complete(xfer->async_done); in scmi_handle_response()
1009 scmi_inc_count(info->dbg->counters, DELAYED_RESPONSE_OK); in scmi_handle_response()
1011 complete(&xfer->done); in scmi_handle_response()
1012 scmi_inc_count(info->dbg->counters, RESPONSE_OK); in scmi_handle_response()
1018 * RX path since it will be already queued at the end of the TX in scmi_handle_response()
1021 if (!xfer->hdr.poll_completion) in scmi_handle_response()
1022 scmi_raw_message_report(info->raw, xfer, in scmi_handle_response()
1024 cinfo->id); in scmi_handle_response()
1031 * scmi_rx_callback() - callback for receiving messages
1063 * xfer_put() - Release a transmit message
1072 struct scmi_info *info = handle_to_scmi_info(pi->handle); in xfer_put()
1074 __scmi_xfer_put(&info->tx_minfo, xfer); in xfer_put()
1081 struct scmi_info *info = handle_to_scmi_info(cinfo->handle); in scmi_xfer_done_no_timeout()
1084 * Poll also on xfer->done so that polling can be forcibly terminated in scmi_xfer_done_no_timeout()
1085 * in case of out-of-order receptions of delayed responses in scmi_xfer_done_no_timeout()
1087 return info->desc->ops->poll_done(cinfo, xfer) || in scmi_xfer_done_no_timeout()
1088 (*ooo = try_wait_for_completion(&xfer->done)) || in scmi_xfer_done_no_timeout()
1097 struct scmi_info *info = handle_to_scmi_info(cinfo->handle); in scmi_wait_for_reply()
1099 if (xfer->hdr.poll_completion) { in scmi_wait_for_reply()
1104 if (!desc->sync_cmds_completed_on_ret) { in scmi_wait_for_reply()
1115 if (!ooo && !info->desc->ops->poll_done(cinfo, xfer)) { in scmi_wait_for_reply()
1117 "timed out in resp(caller: %pS) - polling\n", in scmi_wait_for_reply()
1119 ret = -ETIMEDOUT; in scmi_wait_for_reply()
1120 scmi_inc_count(info->dbg->counters, XFERS_RESPONSE_POLLED_TIMEOUT); in scmi_wait_for_reply()
1128 * Do not fetch_response if an out-of-order delayed in scmi_wait_for_reply()
1131 spin_lock_irqsave(&xfer->lock, flags); in scmi_wait_for_reply()
1132 if (xfer->state == SCMI_XFER_SENT_OK) { in scmi_wait_for_reply()
1133 desc->ops->fetch_response(cinfo, xfer); in scmi_wait_for_reply()
1134 xfer->state = SCMI_XFER_RESP_OK; in scmi_wait_for_reply()
1136 spin_unlock_irqrestore(&xfer->lock, flags); in scmi_wait_for_reply()
1139 trace_scmi_msg_dump(info->id, cinfo->id, in scmi_wait_for_reply()
1140 xfer->hdr.protocol_id, xfer->hdr.id, in scmi_wait_for_reply()
1143 xfer->hdr.seq, xfer->hdr.status, in scmi_wait_for_reply()
1144 xfer->rx.buf, xfer->rx.len); in scmi_wait_for_reply()
1145 scmi_inc_count(info->dbg->counters, RESPONSE_POLLED_OK); in scmi_wait_for_reply()
1149 handle_to_scmi_info(cinfo->handle); in scmi_wait_for_reply()
1151 scmi_raw_message_report(info->raw, xfer, in scmi_wait_for_reply()
1153 cinfo->id); in scmi_wait_for_reply()
1158 if (!wait_for_completion_timeout(&xfer->done, in scmi_wait_for_reply()
1162 ret = -ETIMEDOUT; in scmi_wait_for_reply()
1163 scmi_inc_count(info->dbg->counters, XFERS_RESPONSE_TIMEOUT); in scmi_wait_for_reply()
1171 * scmi_wait_for_message_response - An helper to group all the possible ways of
1177 * Chooses waiting strategy (sleep-waiting vs busy-waiting) depending on
1178 * configuration flags like xfer->hdr.poll_completion.
1185 struct scmi_info *info = handle_to_scmi_info(cinfo->handle); in scmi_wait_for_message_response()
1186 struct device *dev = info->dev; in scmi_wait_for_message_response()
1188 trace_scmi_xfer_response_wait(xfer->transfer_id, xfer->hdr.id, in scmi_wait_for_message_response()
1189 xfer->hdr.protocol_id, xfer->hdr.seq, in scmi_wait_for_message_response()
1190 info->desc->max_rx_timeout_ms, in scmi_wait_for_message_response()
1191 xfer->hdr.poll_completion); in scmi_wait_for_message_response()
1193 return scmi_wait_for_reply(dev, info->desc, cinfo, xfer, in scmi_wait_for_message_response()
1194 info->desc->max_rx_timeout_ms); in scmi_wait_for_message_response()
1198 * scmi_xfer_raw_wait_for_message_response - An helper to wait for a message
1212 struct scmi_info *info = handle_to_scmi_info(cinfo->handle); in scmi_xfer_raw_wait_for_message_response()
1213 struct device *dev = info->dev; in scmi_xfer_raw_wait_for_message_response()
1215 ret = scmi_wait_for_reply(dev, info->desc, cinfo, xfer, timeout_ms); in scmi_xfer_raw_wait_for_message_response()
1217 dev_dbg(dev, "timed out in RAW response - HDR:%08X\n", in scmi_xfer_raw_wait_for_message_response()
1218 pack_scmi_header(&xfer->hdr)); in scmi_xfer_raw_wait_for_message_response()
1224 * do_xfer() - Do one transfer
1229 * Return: -ETIMEDOUT in case of no response, if transmit error,
1238 struct scmi_info *info = handle_to_scmi_info(pi->handle); in do_xfer()
1239 struct device *dev = info->dev; in do_xfer()
1243 if (xfer->hdr.poll_completion && in do_xfer()
1244 !is_transport_polling_capable(info->desc)) { in do_xfer()
1247 scmi_inc_count(info->dbg->counters, SENT_FAIL_POLLING_UNSUPPORTED); in do_xfer()
1248 return -EINVAL; in do_xfer()
1251 cinfo = idr_find(&info->tx_idr, pi->proto->id); in do_xfer()
1253 scmi_inc_count(info->dbg->counters, SENT_FAIL_CHANNEL_NOT_FOUND); in do_xfer()
1254 return -EINVAL; in do_xfer()
1257 if (is_polling_enabled(cinfo, info->desc)) in do_xfer()
1258 xfer->hdr.poll_completion = true; in do_xfer()
1265 xfer->hdr.protocol_id = pi->proto->id; in do_xfer()
1266 reinit_completion(&xfer->done); in do_xfer()
1268 trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id, in do_xfer()
1269 xfer->hdr.protocol_id, xfer->hdr.seq, in do_xfer()
1270 xfer->hdr.poll_completion); in do_xfer()
1273 xfer->hdr.status = SCMI_SUCCESS; in do_xfer()
1274 xfer->state = SCMI_XFER_SENT_OK; in do_xfer()
1277 * on xfer->state due to the monotonically increasing tokens allocation, in do_xfer()
1278 * we must anyway ensure xfer->state initialization is not re-ordered in do_xfer()
1279 * after the .send_message() to be sure that on the RX path an early in do_xfer()
1280 * ISR calling scmi_rx_callback() cannot see an old stale xfer->state. in do_xfer()
1284 ret = info->desc->ops->send_message(cinfo, xfer); in do_xfer()
1287 scmi_inc_count(info->dbg->counters, SENT_FAIL); in do_xfer()
1291 trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id, in do_xfer()
1292 xfer->hdr.id, "CMND", xfer->hdr.seq, in do_xfer()
1293 xfer->hdr.status, xfer->tx.buf, xfer->tx.len); in do_xfer()
1294 scmi_inc_count(info->dbg->counters, SENT_OK); in do_xfer()
1297 if (!ret && xfer->hdr.status) { in do_xfer()
1298 ret = scmi_to_linux_errno(xfer->hdr.status); in do_xfer()
1299 scmi_inc_count(info->dbg->counters, ERR_PROTOCOL); in do_xfer()
1302 if (info->desc->ops->mark_txdone) in do_xfer()
1303 info->desc->ops->mark_txdone(cinfo, ret, xfer); in do_xfer()
1305 trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id, in do_xfer()
1306 xfer->hdr.protocol_id, xfer->hdr.seq, ret); in do_xfer()
1315 struct scmi_info *info = handle_to_scmi_info(pi->handle); in reset_rx_to_maxsz()
1317 xfer->rx.len = info->desc->max_msg_size; in reset_rx_to_maxsz()
1321 * do_xfer_with_response() - Do one transfer and wait until the delayed
1328 * it could cause long busy-waiting here, so ignore polling for the delayed
1341 * Return: -ETIMEDOUT in case of no delayed response, if transmit error,
1350 xfer->async_done = &async_response; in do_xfer_with_response()
1358 WARN_ON_ONCE(xfer->hdr.poll_completion); in do_xfer_with_response()
1362 if (!wait_for_completion_timeout(xfer->async_done, timeout)) { in do_xfer_with_response()
1363 dev_err(ph->dev, in do_xfer_with_response()
1366 ret = -ETIMEDOUT; in do_xfer_with_response()
1367 } else if (xfer->hdr.status) { in do_xfer_with_response()
1368 ret = scmi_to_linux_errno(xfer->hdr.status); in do_xfer_with_response()
1372 xfer->async_done = NULL; in do_xfer_with_response()
1377 * xfer_get_init() - Allocate and initialise one message for transmit
1398 struct scmi_info *info = handle_to_scmi_info(pi->handle); in xfer_get_init()
1399 struct scmi_xfers_info *minfo = &info->tx_minfo; in xfer_get_init()
1400 struct device *dev = info->dev; in xfer_get_init()
1403 if (rx_size > info->desc->max_msg_size || in xfer_get_init()
1404 tx_size > info->desc->max_msg_size) in xfer_get_init()
1405 return -ERANGE; in xfer_get_init()
1407 xfer = scmi_xfer_get(pi->handle, minfo); in xfer_get_init()
1414 /* Pick a sequence number and register this xfer as in-flight */ in xfer_get_init()
1417 dev_err(pi->handle->dev, in xfer_get_init()
1423 xfer->tx.len = tx_size; in xfer_get_init()
1424 xfer->rx.len = rx_size ? : info->desc->max_msg_size; in xfer_get_init()
1425 xfer->hdr.type = MSG_TYPE_COMMAND; in xfer_get_init()
1426 xfer->hdr.id = msg_id; in xfer_get_init()
1427 xfer->hdr.poll_completion = false; in xfer_get_init()
1435 * version_get() - command to get the revision of the SCMI entity
1456 rev_info = t->rx.buf; in version_get()
1465 * scmi_set_protocol_priv - Set protocol specific data at init time
1477 pi->priv = priv; in scmi_set_protocol_priv()
1483 * scmi_get_protocol_priv - Set protocol specific data at init time
1493 return pi->priv; in scmi_get_protocol_priv()
1511 * scmi_common_extended_name_get - Common helper to get extended resources name
1529 ret = ph->xops->xfer_get_init(ph, cmd_id, sizeof(res_id), in scmi_common_extended_name_get()
1534 put_unaligned_le32(res_id, t->tx.buf); in scmi_common_extended_name_get()
1535 resp = t->rx.buf; in scmi_common_extended_name_get()
1537 ret = ph->xops->do_xfer(ph, t); in scmi_common_extended_name_get()
1539 strscpy(name, resp->name, len); in scmi_common_extended_name_get()
1541 ph->xops->xfer_put(ph, t); in scmi_common_extended_name_get()
1544 dev_warn(ph->dev, in scmi_common_extended_name_get()
1545 "Failed to get extended name - id:%u (ret:%d). Using %s\n", in scmi_common_extended_name_get()
1551 * struct scmi_iterator - Iterator descriptor
1553 * a proper custom command payload for each multi-part command request.
1554 * @resp: A reference to the response RX buffer; used by @update_state and
1555 * @process_response to parse the multi-part replies.
1561 * internal routines and by the caller-provided @scmi_iterator_ops.
1583 i = devm_kzalloc(ph->dev, sizeof(*i), GFP_KERNEL); in scmi_iterator_init()
1585 return ERR_PTR(-ENOMEM); in scmi_iterator_init()
1587 i->ph = ph; in scmi_iterator_init()
1588 i->ops = ops; in scmi_iterator_init()
1589 i->priv = priv; in scmi_iterator_init()
1591 ret = ph->xops->xfer_get_init(ph, msg_id, tx_size, 0, &i->t); in scmi_iterator_init()
1593 devm_kfree(ph->dev, i); in scmi_iterator_init()
1597 i->state.max_resources = max_resources; in scmi_iterator_init()
1598 i->msg = i->t->tx.buf; in scmi_iterator_init()
1599 i->resp = i->t->rx.buf; in scmi_iterator_init()
1606 int ret = -EINVAL; in scmi_iterator_run()
1612 if (!i || !i->ops || !i->ph) in scmi_iterator_run()
1615 iops = i->ops; in scmi_iterator_run()
1616 ph = i->ph; in scmi_iterator_run()
1617 st = &i->state; in scmi_iterator_run()
1620 iops->prepare_message(i->msg, st->desc_index, i->priv); in scmi_iterator_run()
1621 ret = ph->xops->do_xfer(ph, i->t); in scmi_iterator_run()
1625 st->rx_len = i->t->rx.len; in scmi_iterator_run()
1626 ret = iops->update_state(st, i->resp, i->priv); in scmi_iterator_run()
1630 if (st->num_returned > st->max_resources - st->desc_index) { in scmi_iterator_run()
1631 dev_err(ph->dev, in scmi_iterator_run()
1633 st->max_resources); in scmi_iterator_run()
1634 ret = -EINVAL; in scmi_iterator_run()
1638 for (st->loop_idx = 0; st->loop_idx < st->num_returned; in scmi_iterator_run()
1639 st->loop_idx++) { in scmi_iterator_run()
1640 ret = iops->process_response(ph, i->resp, st, i->priv); in scmi_iterator_run()
1645 st->desc_index += st->num_returned; in scmi_iterator_run()
1646 ph->xops->reset_rx_to_maxsz(ph, i->t); in scmi_iterator_run()
1651 } while (st->num_returned && st->num_remaining); in scmi_iterator_run()
1655 ph->xops->xfer_put(ph, i->t); in scmi_iterator_run()
1656 devm_kfree(ph->dev, i); in scmi_iterator_run()
1700 ret = -EINVAL; in scmi_common_fastchannel_init()
1704 ret = ph->xops->xfer_get_init(ph, describe_id, in scmi_common_fastchannel_init()
1709 info = t->tx.buf; in scmi_common_fastchannel_init()
1710 info->domain = cpu_to_le32(domain); in scmi_common_fastchannel_init()
1711 info->message_id = cpu_to_le32(message_id); in scmi_common_fastchannel_init()
1718 ret = ph->xops->do_xfer(ph, t); in scmi_common_fastchannel_init()
1722 resp = t->rx.buf; in scmi_common_fastchannel_init()
1723 flags = le32_to_cpu(resp->attr); in scmi_common_fastchannel_init()
1724 size = le32_to_cpu(resp->chan_size); in scmi_common_fastchannel_init()
1726 ret = -EINVAL; in scmi_common_fastchannel_init()
1730 phys_addr = le32_to_cpu(resp->chan_addr_low); in scmi_common_fastchannel_init()
1731 phys_addr |= (u64)le32_to_cpu(resp->chan_addr_high) << 32; in scmi_common_fastchannel_init()
1732 addr = devm_ioremap(ph->dev, phys_addr, size); in scmi_common_fastchannel_init()
1734 ret = -EADDRNOTAVAIL; in scmi_common_fastchannel_init()
1741 db = devm_kzalloc(ph->dev, sizeof(*db), GFP_KERNEL); in scmi_common_fastchannel_init()
1743 ret = -ENOMEM; in scmi_common_fastchannel_init()
1748 phys_addr = le32_to_cpu(resp->db_addr_low); in scmi_common_fastchannel_init()
1749 phys_addr |= (u64)le32_to_cpu(resp->db_addr_high) << 32; in scmi_common_fastchannel_init()
1750 addr = devm_ioremap(ph->dev, phys_addr, size); in scmi_common_fastchannel_init()
1752 ret = -EADDRNOTAVAIL; in scmi_common_fastchannel_init()
1756 db->addr = addr; in scmi_common_fastchannel_init()
1757 db->width = size; in scmi_common_fastchannel_init()
1758 db->set = le32_to_cpu(resp->db_set_lmask); in scmi_common_fastchannel_init()
1759 db->set |= (u64)le32_to_cpu(resp->db_set_hmask) << 32; in scmi_common_fastchannel_init()
1760 db->mask = le32_to_cpu(resp->db_preserve_lmask); in scmi_common_fastchannel_init()
1761 db->mask |= (u64)le32_to_cpu(resp->db_preserve_hmask) << 32; in scmi_common_fastchannel_init()
1766 ph->xops->xfer_put(ph, t); in scmi_common_fastchannel_init()
1768 dev_dbg(ph->dev, in scmi_common_fastchannel_init()
1770 pi->proto->id, message_id, domain); in scmi_common_fastchannel_init()
1775 devm_kfree(ph->dev, db); in scmi_common_fastchannel_init()
1781 ph->xops->xfer_put(ph, t); in scmi_common_fastchannel_init()
1784 dev_warn(ph->dev, in scmi_common_fastchannel_init()
1785 "Failed to get FC for protocol %X [MSG_ID:%u / RES_ID:%u] - ret:%d. Using regular messaging.\n", in scmi_common_fastchannel_init()
1786 pi->proto->id, message_id, domain, ret); in scmi_common_fastchannel_init()
1793 if (db->mask) \
1794 val = ioread##w(db->addr) & db->mask; \
1795 iowrite##w((u##w)db->set | val, db->addr); \
1800 if (!db || !db->addr) in scmi_common_fastchannel_db_ring()
1803 if (db->width == 1) in scmi_common_fastchannel_db_ring()
1805 else if (db->width == 2) in scmi_common_fastchannel_db_ring()
1807 else if (db->width == 4) in scmi_common_fastchannel_db_ring()
1809 else /* db->width == 8 */ in scmi_common_fastchannel_db_ring()
1816 if (db->mask) in scmi_common_fastchannel_db_ring()
1817 val = ioread64_hi_lo(db->addr) & db->mask; in scmi_common_fastchannel_db_ring()
1818 iowrite64_hi_lo(db->set | val, db->addr); in scmi_common_fastchannel_db_ring()
1832 * scmi_revision_area_get - Retrieve version memory area.
1847 return pi->handle->version; in scmi_revision_area_get()
1851 * scmi_alloc_init_protocol_instance - Allocate and initialize a protocol
1858 * all resources management is handled via a dedicated per-protocol devres
1870 int ret = -ENOMEM; in scmi_alloc_init_protocol_instance()
1873 const struct scmi_handle *handle = &info->handle; in scmi_alloc_init_protocol_instance()
1876 gid = devres_open_group(handle->dev, NULL, GFP_KERNEL); in scmi_alloc_init_protocol_instance()
1878 scmi_protocol_put(proto->id); in scmi_alloc_init_protocol_instance()
1882 pi = devm_kzalloc(handle->dev, sizeof(*pi), GFP_KERNEL); in scmi_alloc_init_protocol_instance()
1886 pi->gid = gid; in scmi_alloc_init_protocol_instance()
1887 pi->proto = proto; in scmi_alloc_init_protocol_instance()
1888 pi->handle = handle; in scmi_alloc_init_protocol_instance()
1889 pi->ph.dev = handle->dev; in scmi_alloc_init_protocol_instance()
1890 pi->ph.xops = &xfer_ops; in scmi_alloc_init_protocol_instance()
1891 pi->ph.hops = &helpers_ops; in scmi_alloc_init_protocol_instance()
1892 pi->ph.set_priv = scmi_set_protocol_priv; in scmi_alloc_init_protocol_instance()
1893 pi->ph.get_priv = scmi_get_protocol_priv; in scmi_alloc_init_protocol_instance()
1894 refcount_set(&pi->users, 1); in scmi_alloc_init_protocol_instance()
1895 /* proto->init is assured NON NULL by scmi_protocol_register */ in scmi_alloc_init_protocol_instance()
1896 ret = pi->proto->instance_init(&pi->ph); in scmi_alloc_init_protocol_instance()
1900 ret = idr_alloc(&info->protocols, pi, proto->id, proto->id + 1, in scmi_alloc_init_protocol_instance()
1902 if (ret != proto->id) in scmi_alloc_init_protocol_instance()
1909 if (pi->proto->events) { in scmi_alloc_init_protocol_instance()
1910 ret = scmi_register_protocol_events(handle, pi->proto->id, in scmi_alloc_init_protocol_instance()
1911 &pi->ph, in scmi_alloc_init_protocol_instance()
1912 pi->proto->events); in scmi_alloc_init_protocol_instance()
1914 dev_warn(handle->dev, in scmi_alloc_init_protocol_instance()
1915 "Protocol:%X - Events Registration Failed - err:%d\n", in scmi_alloc_init_protocol_instance()
1916 pi->proto->id, ret); in scmi_alloc_init_protocol_instance()
1919 devres_close_group(handle->dev, pi->gid); in scmi_alloc_init_protocol_instance()
1920 dev_dbg(handle->dev, "Initialized protocol: 0x%X\n", pi->proto->id); in scmi_alloc_init_protocol_instance()
1926 scmi_protocol_put(proto->id); in scmi_alloc_init_protocol_instance()
1927 devres_release_group(handle->dev, gid); in scmi_alloc_init_protocol_instance()
1933 * scmi_get_protocol_instance - Protocol initialization helper.
1939 * resource allocation with a dedicated per-protocol devres subgroup.
1942 * in particular returns -EPROBE_DEFER when the desired protocol could
1951 mutex_lock(&info->protocols_mtx); in scmi_get_protocol_instance()
1952 pi = idr_find(&info->protocols, protocol_id); in scmi_get_protocol_instance()
1955 refcount_inc(&pi->users); in scmi_get_protocol_instance()
1964 pi = ERR_PTR(-EPROBE_DEFER); in scmi_get_protocol_instance()
1966 mutex_unlock(&info->protocols_mtx); in scmi_get_protocol_instance()
1972 * scmi_protocol_acquire - Protocol acquire
1987 * scmi_protocol_release - Protocol de-initialization helper.
1991 * Remove one user for the specified protocol and triggers de-initialization
1992 * and resources de-allocation once the last user has gone.
1999 mutex_lock(&info->protocols_mtx); in scmi_protocol_release()
2000 pi = idr_find(&info->protocols, protocol_id); in scmi_protocol_release()
2004 if (refcount_dec_and_test(&pi->users)) { in scmi_protocol_release()
2005 void *gid = pi->gid; in scmi_protocol_release()
2007 if (pi->proto->events) in scmi_protocol_release()
2010 if (pi->proto->instance_deinit) in scmi_protocol_release()
2011 pi->proto->instance_deinit(&pi->ph); in scmi_protocol_release()
2013 idr_remove(&info->protocols, protocol_id); in scmi_protocol_release()
2017 devres_release_group(handle->dev, gid); in scmi_protocol_release()
2018 dev_dbg(handle->dev, "De-Initialized protocol: 0x%X\n", in scmi_protocol_release()
2023 mutex_unlock(&info->protocols_mtx); in scmi_protocol_release()
2030 struct scmi_info *info = handle_to_scmi_info(pi->handle); in scmi_setup_protocol_implemented()
2032 info->protocols_imp = prot_imp; in scmi_setup_protocol_implemented()
2040 struct scmi_revision_info *rev = handle->version; in scmi_is_protocol_implemented()
2042 if (!info->protocols_imp) in scmi_is_protocol_implemented()
2045 for (i = 0; i < rev->num_protocols; i++) in scmi_is_protocol_implemented()
2046 if (info->protocols_imp[i] == prot_id) in scmi_is_protocol_implemented()
2060 scmi_protocol_release(dres->handle, dres->protocol_id); in scmi_devm_release_protocol()
2072 return ERR_PTR(-ENOMEM); in scmi_devres_protocol_instance_get()
2074 pi = scmi_get_protocol_instance(sdev->handle, protocol_id); in scmi_devres_protocol_instance_get()
2080 dres->handle = sdev->handle; in scmi_devres_protocol_instance_get()
2081 dres->protocol_id = protocol_id; in scmi_devres_protocol_instance_get()
2082 devres_add(&sdev->dev, dres); in scmi_devres_protocol_instance_get()
2088 * scmi_devm_protocol_get - Devres managed get protocol operations and handle
2099 * released, and possibly de-initialized on last user, once the SCMI driver
2112 return ERR_PTR(-EINVAL); in scmi_devm_protocol_get()
2118 *ph = &pi->ph; in scmi_devm_protocol_get()
2120 return pi->proto->ops; in scmi_devm_protocol_get()
2124 * scmi_devm_protocol_acquire - Devres managed helper to get hold of a protocol
2134 * released, and possibly de-initialized on last user, once the SCMI driver
2158 return dres->protocol_id == *((u8 *)data); in scmi_devm_protocol_match()
2162 * scmi_devm_protocol_put - Devres managed put protocol operations and handle
2174 ret = devres_release(&sdev->dev, scmi_devm_release_protocol, in scmi_devm_protocol_put()
2180 * scmi_is_transport_atomic - Method to check if underlying transport for an
2195 ret = info->desc->atomic_enabled && in scmi_is_transport_atomic()
2196 is_transport_polling_capable(info->desc); in scmi_is_transport_atomic()
2198 *atomic_threshold = info->atomic_threshold; in scmi_is_transport_atomic()
2204 * scmi_handle_get() - Get the SCMI handle for a device
2223 if (dev->parent == info->dev) { in scmi_handle_get()
2224 info->users++; in scmi_handle_get()
2225 handle = &info->handle; in scmi_handle_get()
2235 * scmi_handle_put() - Release the handle acquired by scmi_handle_get
2244 * if null was passed, it returns -EINVAL;
2251 return -EINVAL; in scmi_handle_put()
2255 if (!WARN_ON(!info->users)) in scmi_handle_put()
2256 info->users--; in scmi_handle_put()
2274 scmi_dev->handle = scmi_handle_get(&scmi_dev->dev); in scmi_set_handle()
2275 if (scmi_dev->handle) in scmi_set_handle()
2276 scmi_device_link_add(&scmi_dev->dev, scmi_dev->handle->dev); in scmi_set_handle()
2284 struct device *dev = sinfo->dev; in __scmi_xfer_info_init()
2285 const struct scmi_desc *desc = sinfo->desc; in __scmi_xfer_info_init()
2287 /* Pre-allocated messages, no more than what hdr.seq can support */ in __scmi_xfer_info_init()
2288 if (WARN_ON(!info->max_msg || info->max_msg > MSG_TOKEN_MAX)) { in __scmi_xfer_info_init()
2290 "Invalid maximum messages %d, not in range [1 - %lu]\n", in __scmi_xfer_info_init()
2291 info->max_msg, MSG_TOKEN_MAX); in __scmi_xfer_info_init()
2292 return -EINVAL; in __scmi_xfer_info_init()
2295 hash_init(info->pending_xfers); in __scmi_xfer_info_init()
2298 info->xfer_alloc_table = devm_bitmap_zalloc(dev, MSG_TOKEN_MAX, in __scmi_xfer_info_init()
2300 if (!info->xfer_alloc_table) in __scmi_xfer_info_init()
2301 return -ENOMEM; in __scmi_xfer_info_init()
2305 * pre-initialize the buffer pointer to pre-allocated buffers and in __scmi_xfer_info_init()
2308 INIT_HLIST_HEAD(&info->free_xfers); in __scmi_xfer_info_init()
2309 for (i = 0; i < info->max_msg; i++) { in __scmi_xfer_info_init()
2312 return -ENOMEM; in __scmi_xfer_info_init()
2314 xfer->rx.buf = devm_kcalloc(dev, sizeof(u8), desc->max_msg_size, in __scmi_xfer_info_init()
2316 if (!xfer->rx.buf) in __scmi_xfer_info_init()
2317 return -ENOMEM; in __scmi_xfer_info_init()
2319 xfer->tx.buf = xfer->rx.buf; in __scmi_xfer_info_init()
2320 init_completion(&xfer->done); in __scmi_xfer_info_init()
2321 spin_lock_init(&xfer->lock); in __scmi_xfer_info_init()
2324 hlist_add_head(&xfer->node, &info->free_xfers); in __scmi_xfer_info_init()
2327 spin_lock_init(&info->xfer_lock); in __scmi_xfer_info_init()
2334 const struct scmi_desc *desc = sinfo->desc; in scmi_channels_max_msg_configure()
2336 if (!desc->ops->get_max_msg) { in scmi_channels_max_msg_configure()
2337 sinfo->tx_minfo.max_msg = desc->max_msg; in scmi_channels_max_msg_configure()
2338 sinfo->rx_minfo.max_msg = desc->max_msg; in scmi_channels_max_msg_configure()
2342 base_cinfo = idr_find(&sinfo->tx_idr, SCMI_PROTOCOL_BASE); in scmi_channels_max_msg_configure()
2344 return -EINVAL; in scmi_channels_max_msg_configure()
2345 sinfo->tx_minfo.max_msg = desc->ops->get_max_msg(base_cinfo); in scmi_channels_max_msg_configure()
2347 /* RX channel is optional so can be skipped */ in scmi_channels_max_msg_configure()
2348 base_cinfo = idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE); in scmi_channels_max_msg_configure()
2350 sinfo->rx_minfo.max_msg = in scmi_channels_max_msg_configure()
2351 desc->ops->get_max_msg(base_cinfo); in scmi_channels_max_msg_configure()
2365 ret = __scmi_xfer_info_init(sinfo, &sinfo->tx_minfo); in scmi_xfer_info_init()
2366 if (!ret && !idr_is_empty(&sinfo->rx_idr)) in scmi_xfer_info_init()
2367 ret = __scmi_xfer_info_init(sinfo, &sinfo->rx_minfo); in scmi_xfer_info_init()
2383 idr = tx ? &info->tx_idr : &info->rx_idr; in scmi_chan_setup()
2385 if (!info->desc->ops->chan_available(of_node, idx)) { in scmi_chan_setup()
2387 if (unlikely(!cinfo)) /* Possible only if platform has no Rx */ in scmi_chan_setup()
2388 return -EINVAL; in scmi_chan_setup()
2392 cinfo = devm_kzalloc(info->dev, sizeof(*cinfo), GFP_KERNEL); in scmi_chan_setup()
2394 return -ENOMEM; in scmi_chan_setup()
2396 cinfo->is_p2a = !tx; in scmi_chan_setup()
2397 cinfo->rx_timeout_ms = info->desc->max_rx_timeout_ms; in scmi_chan_setup()
2401 idx ? "rx" : "tx", prot_id); in scmi_chan_setup()
2403 tdev = scmi_device_create(of_node, info->dev, prot_id, name); in scmi_chan_setup()
2405 dev_err(info->dev, in scmi_chan_setup()
2407 devm_kfree(info->dev, cinfo); in scmi_chan_setup()
2408 return -EINVAL; in scmi_chan_setup()
2412 cinfo->id = prot_id; in scmi_chan_setup()
2413 cinfo->dev = &tdev->dev; in scmi_chan_setup()
2414 ret = info->desc->ops->chan_setup(cinfo, info->dev, tx); in scmi_chan_setup()
2417 scmi_device_destroy(info->dev, prot_id, name); in scmi_chan_setup()
2418 devm_kfree(info->dev, cinfo); in scmi_chan_setup()
2422 if (tx && is_polling_required(cinfo, info->desc)) { in scmi_chan_setup()
2423 if (is_transport_polling_capable(info->desc)) in scmi_chan_setup()
2424 dev_info(&tdev->dev, in scmi_chan_setup()
2425 "Enabled polling mode TX channel - prot_id:%d\n", in scmi_chan_setup()
2428 dev_warn(&tdev->dev, in scmi_chan_setup()
2435 dev_err(info->dev, in scmi_chan_setup()
2440 scmi_device_destroy(info->dev, prot_id, name); in scmi_chan_setup()
2441 devm_kfree(info->dev, cinfo); in scmi_chan_setup()
2446 cinfo->handle = &info->handle; in scmi_chan_setup()
2457 /* Rx is optional, report only memory errors */ in scmi_txrx_setup()
2459 if (ret && ret != -ENOMEM) in scmi_txrx_setup()
2467 * scmi_channels_setup - Helper to initialize all required channels
2477 * Note that, even though a pair of TX/RX channels is associated to each
2487 struct device_node *child, *top_np = info->dev->of_node; in scmi_channels_setup()
2501 dev_err(info->dev, in scmi_channels_setup()
2518 if (cinfo->dev) { in scmi_chan_destroy()
2519 struct scmi_info *info = handle_to_scmi_info(cinfo->handle); in scmi_chan_destroy()
2520 struct scmi_device *sdev = to_scmi_dev(cinfo->dev); in scmi_chan_destroy()
2522 of_node_put(cinfo->dev->of_node); in scmi_chan_destroy()
2523 scmi_device_destroy(info->dev, id, sdev->name); in scmi_chan_destroy()
2524 cinfo->dev = NULL; in scmi_chan_destroy()
2535 idr_for_each(idr, info->desc->ops->chan_free, idr); in scmi_cleanup_channels()
2545 scmi_cleanup_channels(info, &info->tx_idr); in scmi_cleanup_txrx_channels()
2547 scmi_cleanup_channels(info, &info->rx_idr); in scmi_cleanup_txrx_channels()
2557 if (!strncmp(sdev->name, "__scmi_transport_device", 23) || in scmi_bus_notifier()
2558 sdev->dev.parent != info->dev) in scmi_bus_notifier()
2567 scmi_handle_put(sdev->handle); in scmi_bus_notifier()
2568 sdev->handle = NULL; in scmi_bus_notifier()
2574 dev_dbg(info->dev, "Device %s (%s) is now %s\n", dev_name(&sdev->dev), in scmi_bus_notifier()
2575 sdev->name, action == BUS_NOTIFY_BIND_DRIVER ? in scmi_bus_notifier()
2588 np = idr_find(&info->active_protocols, id_table->protocol_id); in scmi_device_request_notifier()
2592 dev_dbg(info->dev, "%sRequested device (%s) for protocol 0x%x\n", in scmi_device_request_notifier()
2593 action == SCMI_BUS_NOTIFY_DEVICE_REQUEST ? "" : "UN-", in scmi_device_request_notifier()
2594 id_table->name, id_table->protocol_id); in scmi_device_request_notifier()
2598 scmi_create_protocol_devices(np, info, id_table->protocol_id, in scmi_device_request_notifier()
2599 id_table->name); in scmi_device_request_notifier()
2602 scmi_destroy_protocol_devices(info, id_table->protocol_id, in scmi_device_request_notifier()
2603 id_table->name); in scmi_device_request_notifier()
2619 debugfs_remove_recursive(dbg->top_dentry); in scmi_debugfs_common_cleanup()
2620 kfree(dbg->name); in scmi_debugfs_common_cleanup()
2621 kfree(dbg->type); in scmi_debugfs_common_cleanup()
2631 dbg = devm_kzalloc(info->dev, sizeof(*dbg), GFP_KERNEL); in scmi_debugfs_common_setup()
2635 dbg->name = kstrdup(of_node_full_name(info->dev->of_node), GFP_KERNEL); in scmi_debugfs_common_setup()
2636 if (!dbg->name) { in scmi_debugfs_common_setup()
2637 devm_kfree(info->dev, dbg); in scmi_debugfs_common_setup()
2641 of_property_read_string(info->dev->of_node, "compatible", &c_ptr); in scmi_debugfs_common_setup()
2642 dbg->type = kstrdup(c_ptr, GFP_KERNEL); in scmi_debugfs_common_setup()
2643 if (!dbg->type) { in scmi_debugfs_common_setup()
2644 kfree(dbg->name); in scmi_debugfs_common_setup()
2645 devm_kfree(info->dev, dbg); in scmi_debugfs_common_setup()
2649 snprintf(top_dir, 16, "%d", info->id); in scmi_debugfs_common_setup()
2653 dbg->is_atomic = info->desc->atomic_enabled && in scmi_debugfs_common_setup()
2654 is_transport_polling_capable(info->desc); in scmi_debugfs_common_setup()
2657 (char **)&dbg->name); in scmi_debugfs_common_setup()
2660 &info->atomic_threshold); in scmi_debugfs_common_setup()
2662 debugfs_create_str("type", 0400, trans, (char **)&dbg->type); in scmi_debugfs_common_setup()
2664 debugfs_create_bool("is_atomic", 0400, trans, &dbg->is_atomic); in scmi_debugfs_common_setup()
2667 (u32 *)&info->desc->max_rx_timeout_ms); in scmi_debugfs_common_setup()
2670 (u32 *)&info->desc->max_msg_size); in scmi_debugfs_common_setup()
2673 (u32 *)&info->tx_minfo.max_msg); in scmi_debugfs_common_setup()
2676 (u32 *)&info->rx_minfo.max_msg); in scmi_debugfs_common_setup()
2678 dbg->top_dentry = top_dentry; in scmi_debugfs_common_setup()
2680 if (devm_add_action_or_reset(info->dev, in scmi_debugfs_common_setup()
2694 if (!info->dbg) in scmi_debugfs_raw_mode_setup()
2695 return -EINVAL; in scmi_debugfs_raw_mode_setup()
2698 idr_for_each_entry(&info->tx_idr, cinfo, id) { in scmi_debugfs_raw_mode_setup()
2704 dev_warn(info->dev, in scmi_debugfs_raw_mode_setup()
2705 "SCMI RAW - Error enumerating channels\n"); in scmi_debugfs_raw_mode_setup()
2709 if (!test_bit(cinfo->id, protos)) { in scmi_debugfs_raw_mode_setup()
2710 channels[num_chans++] = cinfo->id; in scmi_debugfs_raw_mode_setup()
2711 set_bit(cinfo->id, protos); in scmi_debugfs_raw_mode_setup()
2715 info->raw = scmi_raw_mode_init(&info->handle, info->dbg->top_dentry, in scmi_debugfs_raw_mode_setup()
2716 info->id, channels, num_chans, in scmi_debugfs_raw_mode_setup()
2717 info->desc, info->tx_minfo.max_msg); in scmi_debugfs_raw_mode_setup()
2718 if (IS_ERR(info->raw)) { in scmi_debugfs_raw_mode_setup()
2719 dev_err(info->dev, "Failed to initialize SCMI RAW Mode !\n"); in scmi_debugfs_raw_mode_setup()
2720 ret = PTR_ERR(info->raw); in scmi_debugfs_raw_mode_setup()
2721 info->raw = NULL; in scmi_debugfs_raw_mode_setup()
2734 struct device *dev = &pdev->dev; in scmi_probe()
2735 struct device_node *child, *np = dev->of_node; in scmi_probe()
2739 return -EINVAL; in scmi_probe()
2743 return -ENOMEM; in scmi_probe()
2745 info->id = ida_alloc_min(&scmi_id, 0, GFP_KERNEL); in scmi_probe()
2746 if (info->id < 0) in scmi_probe()
2747 return info->id; in scmi_probe()
2749 info->dev = dev; in scmi_probe()
2750 info->desc = desc; in scmi_probe()
2751 info->bus_nb.notifier_call = scmi_bus_notifier; in scmi_probe()
2752 info->dev_req_nb.notifier_call = scmi_device_request_notifier; in scmi_probe()
2753 INIT_LIST_HEAD(&info->node); in scmi_probe()
2754 idr_init(&info->protocols); in scmi_probe()
2755 mutex_init(&info->protocols_mtx); in scmi_probe()
2756 idr_init(&info->active_protocols); in scmi_probe()
2757 mutex_init(&info->devreq_mtx); in scmi_probe()
2760 idr_init(&info->tx_idr); in scmi_probe()
2761 idr_init(&info->rx_idr); in scmi_probe()
2763 handle = &info->handle; in scmi_probe()
2764 handle->dev = info->dev; in scmi_probe()
2765 handle->version = &info->version; in scmi_probe()
2766 handle->devm_protocol_acquire = scmi_devm_protocol_acquire; in scmi_probe()
2767 handle->devm_protocol_get = scmi_devm_protocol_get; in scmi_probe()
2768 handle->devm_protocol_put = scmi_devm_protocol_put; in scmi_probe()
2771 if (!of_property_read_u32(np, "atomic-threshold-us", in scmi_probe()
2772 &info->atomic_threshold)) in scmi_probe()
2775 info->atomic_threshold); in scmi_probe()
2776 handle->is_transport_atomic = scmi_is_transport_atomic; in scmi_probe()
2778 if (desc->ops->link_supplier) { in scmi_probe()
2779 ret = desc->ops->link_supplier(dev); in scmi_probe()
2789 ret = bus_register_notifier(&scmi_bus_type, &info->bus_nb); in scmi_probe()
2794 &info->dev_req_nb); in scmi_probe()
2803 info->dbg = scmi_debugfs_common_setup(info); in scmi_probe()
2804 if (!info->dbg) in scmi_probe()
2825 if (info->desc->atomic_enabled && in scmi_probe()
2826 !is_transport_polling_capable(info->desc)) in scmi_probe()
2844 list_add_tail(&info->node, &scmi_list); in scmi_probe()
2866 ret = idr_alloc(&info->active_protocols, child, in scmi_probe()
2882 scmi_raw_mode_cleanup(info->raw); in scmi_probe()
2883 scmi_notification_exit(&info->handle); in scmi_probe()
2886 &info->dev_req_nb); in scmi_probe()
2888 bus_unregister_notifier(&scmi_bus_type, &info->bus_nb); in scmi_probe()
2892 ida_free(&scmi_id, info->id); in scmi_probe()
2903 scmi_raw_mode_cleanup(info->raw); in scmi_remove()
2906 if (info->users) in scmi_remove()
2907 dev_warn(&pdev->dev, in scmi_remove()
2909 list_del(&info->node); in scmi_remove()
2912 scmi_notification_exit(&info->handle); in scmi_remove()
2914 mutex_lock(&info->protocols_mtx); in scmi_remove()
2915 idr_destroy(&info->protocols); in scmi_remove()
2916 mutex_unlock(&info->protocols_mtx); in scmi_remove()
2918 idr_for_each_entry(&info->active_protocols, child, id) in scmi_remove()
2920 idr_destroy(&info->active_protocols); in scmi_remove()
2923 &info->dev_req_nb); in scmi_remove()
2924 bus_unregister_notifier(&scmi_bus_type, &info->bus_nb); in scmi_remove()
2929 ida_free(&scmi_id, info->id); in scmi_remove()
2939 return sprintf(buf, "%u.%u\n", info->version.major_ver, in protocol_version_show()
2940 info->version.minor_ver); in protocol_version_show()
2949 return sprintf(buf, "0x%x\n", info->version.impl_ver); in firmware_version_show()
2958 return sprintf(buf, "%s\n", info->version.vendor_id); in vendor_id_show()
2967 return sprintf(buf, "%s\n", info->version.sub_vendor_id); in sub_vendor_id_show()
2986 { .compatible = "linaro,scmi-optee", .data = &scmi_optee_desc },
2989 { .compatible = "arm,scmi-smc", .data = &scmi_smc_desc},
2990 { .compatible = "arm,scmi-smc-param", .data = &scmi_smc_desc},
2993 { .compatible = "arm,scmi-virtio", .data = &scmi_virtio_desc},
3002 .name = "arm-scmi",
3012 * __scmi_transports_setup - Common helper to call transport-specific
3027 for (trans = scmi_of_match; trans->data; trans++) { in __scmi_transports_setup()
3028 const struct scmi_desc *tdesc = trans->data; in __scmi_transports_setup()
3030 if ((init && !tdesc->transport_init) || in __scmi_transports_setup()
3031 (!init && !tdesc->transport_exit)) in __scmi_transports_setup()
3035 ret = tdesc->transport_init(); in __scmi_transports_setup()
3037 tdesc->transport_exit(); in __scmi_transports_setup()
3041 trans->compatible); in __scmi_transports_setup()
3078 return -EINVAL; in scmi_driver_init()
3080 /* Initialize any compiled-in transport which provided an init/exit */ in scmi_driver_init()
3124 MODULE_ALIAS("platform:arm-scmi");