1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * System Control and Management Interface (SCMI) Message Protocol 4 * driver common header file containing some definitions, structures 5 * and function prototypes used in all the different SCMI protocols. 6 * 7 * Copyright (C) 2018-2021 ARM Ltd. 8 */ 9 #ifndef _SCMI_COMMON_H 10 #define _SCMI_COMMON_H 11 12 #include <linux/bitfield.h> 13 #include <linux/completion.h> 14 #include <linux/device.h> 15 #include <linux/errno.h> 16 #include <linux/kernel.h> 17 #include <linux/hashtable.h> 18 #include <linux/list.h> 19 #include <linux/module.h> 20 #include <linux/refcount.h> 21 #include <linux/scmi_protocol.h> 22 #include <linux/spinlock.h> 23 #include <linux/types.h> 24 25 #include <asm/unaligned.h> 26 27 #include "notify.h" 28 29 #define PROTOCOL_REV_MINOR_MASK GENMASK(15, 0) 30 #define PROTOCOL_REV_MAJOR_MASK GENMASK(31, 16) 31 #define PROTOCOL_REV_MAJOR(x) (u16)(FIELD_GET(PROTOCOL_REV_MAJOR_MASK, (x))) 32 #define PROTOCOL_REV_MINOR(x) (u16)(FIELD_GET(PROTOCOL_REV_MINOR_MASK, (x))) 33 #define MAX_PROTOCOLS_IMP 16 34 #define MAX_OPPS 16 35 36 enum scmi_common_cmd { 37 PROTOCOL_VERSION = 0x0, 38 PROTOCOL_ATTRIBUTES = 0x1, 39 PROTOCOL_MESSAGE_ATTRIBUTES = 0x2, 40 }; 41 42 /** 43 * struct scmi_msg_resp_prot_version - Response for a message 44 * 45 * @minor_version: Minor version of the ABI that firmware supports 46 * @major_version: Major version of the ABI that firmware supports 47 * 48 * In general, ABI version changes follow the rule that minor version increments 49 * are backward compatible. Major revision changes in ABI may not be 50 * backward compatible. 51 * 52 * Response to a generic message with message type SCMI_MSG_VERSION 53 */ 54 struct scmi_msg_resp_prot_version { 55 __le16 minor_version; 56 __le16 major_version; 57 }; 58 59 #define MSG_ID_MASK GENMASK(7, 0) 60 #define MSG_XTRACT_ID(hdr) FIELD_GET(MSG_ID_MASK, (hdr)) 61 #define MSG_TYPE_MASK GENMASK(9, 8) 62 #define MSG_XTRACT_TYPE(hdr) FIELD_GET(MSG_TYPE_MASK, (hdr)) 63 #define MSG_TYPE_COMMAND 0 64 #define MSG_TYPE_DELAYED_RESP 2 65 #define MSG_TYPE_NOTIFICATION 3 66 #define MSG_PROTOCOL_ID_MASK GENMASK(17, 10) 67 #define MSG_XTRACT_PROT_ID(hdr) FIELD_GET(MSG_PROTOCOL_ID_MASK, (hdr)) 68 #define MSG_TOKEN_ID_MASK GENMASK(27, 18) 69 #define MSG_XTRACT_TOKEN(hdr) FIELD_GET(MSG_TOKEN_ID_MASK, (hdr)) 70 #define MSG_TOKEN_MAX (MSG_XTRACT_TOKEN(MSG_TOKEN_ID_MASK) + 1) 71 72 /* 73 * Size of @pending_xfers hashtable included in @scmi_xfers_info; ideally, in 74 * order to minimize space and collisions, this should equal max_msg, i.e. the 75 * maximum number of in-flight messages on a specific platform, but such value 76 * is only available at runtime while kernel hashtables are statically sized: 77 * pick instead as a fixed static size the maximum number of entries that can 78 * fit the whole table into one 4k page. 79 */ 80 #define SCMI_PENDING_XFERS_HT_ORDER_SZ 9 81 82 /** 83 * struct scmi_msg_hdr - Message(Tx/Rx) header 84 * 85 * @id: The identifier of the message being sent 86 * @protocol_id: The identifier of the protocol used to send @id message 87 * @type: The SCMI type for this message 88 * @seq: The token to identify the message. When a message returns, the 89 * platform returns the whole message header unmodified including the 90 * token 91 * @status: Status of the transfer once it's complete 92 * @poll_completion: Indicate if the transfer needs to be polled for 93 * completion or interrupt mode is used 94 */ 95 struct scmi_msg_hdr { 96 u8 id; 97 u8 protocol_id; 98 u8 type; 99 u16 seq; 100 u32 status; 101 bool poll_completion; 102 }; 103 104 /** 105 * pack_scmi_header() - packs and returns 32-bit header 106 * 107 * @hdr: pointer to header containing all the information on message id, 108 * protocol id, sequence id and type. 109 * 110 * Return: 32-bit packed message header to be sent to the platform. 111 */ 112 static inline u32 pack_scmi_header(struct scmi_msg_hdr *hdr) 113 { 114 return FIELD_PREP(MSG_ID_MASK, hdr->id) | 115 FIELD_PREP(MSG_TYPE_MASK, hdr->type) | 116 FIELD_PREP(MSG_TOKEN_ID_MASK, hdr->seq) | 117 FIELD_PREP(MSG_PROTOCOL_ID_MASK, hdr->protocol_id); 118 } 119 120 /** 121 * unpack_scmi_header() - unpacks and records message and protocol id 122 * 123 * @msg_hdr: 32-bit packed message header sent from the platform 124 * @hdr: pointer to header to fetch message and protocol id. 125 */ 126 static inline void unpack_scmi_header(u32 msg_hdr, struct scmi_msg_hdr *hdr) 127 { 128 hdr->id = MSG_XTRACT_ID(msg_hdr); 129 hdr->protocol_id = MSG_XTRACT_PROT_ID(msg_hdr); 130 hdr->type = MSG_XTRACT_TYPE(msg_hdr); 131 } 132 133 /** 134 * struct scmi_msg - Message(Tx/Rx) structure 135 * 136 * @buf: Buffer pointer 137 * @len: Length of data in the Buffer 138 */ 139 struct scmi_msg { 140 void *buf; 141 size_t len; 142 }; 143 144 /** 145 * struct scmi_xfer - Structure representing a message flow 146 * 147 * @transfer_id: Unique ID for debug & profiling purpose 148 * @hdr: Transmit message header 149 * @tx: Transmit message 150 * @rx: Receive message, the buffer should be pre-allocated to store 151 * message. If request-ACK protocol is used, we can reuse the same 152 * buffer for the rx path as we use for the tx path. 153 * @done: command message transmit completion event 154 * @async_done: pointer to delayed response message received event completion 155 * @pending: True for xfers added to @pending_xfers hashtable 156 * @node: An hlist_node reference used to store this xfer, alternatively, on 157 * the free list @free_xfers or in the @pending_xfers hashtable 158 * @users: A refcount to track the active users for this xfer. 159 * This is meant to protect against the possibility that, when a command 160 * transaction times out concurrently with the reception of a valid 161 * response message, the xfer could be finally put on the TX path, and 162 * so vanish, while on the RX path scmi_rx_callback() is still 163 * processing it: in such a case this refcounting will ensure that, even 164 * though the timed-out transaction will anyway cause the command 165 * request to be reported as failed by time-out, the underlying xfer 166 * cannot be discarded and possibly reused until the last one user on 167 * the RX path has released it. 168 * @busy: An atomic flag to ensure exclusive write access to this xfer 169 * @state: The current state of this transfer, with states transitions deemed 170 * valid being: 171 * - SCMI_XFER_SENT_OK -> SCMI_XFER_RESP_OK [ -> SCMI_XFER_DRESP_OK ] 172 * - SCMI_XFER_SENT_OK -> SCMI_XFER_DRESP_OK 173 * (Missing synchronous response is assumed OK and ignored) 174 * @lock: A spinlock to protect state and busy fields. 175 * @priv: A pointer for transport private usage. 176 */ 177 struct scmi_xfer { 178 int transfer_id; 179 struct scmi_msg_hdr hdr; 180 struct scmi_msg tx; 181 struct scmi_msg rx; 182 struct completion done; 183 struct completion *async_done; 184 bool pending; 185 struct hlist_node node; 186 refcount_t users; 187 #define SCMI_XFER_FREE 0 188 #define SCMI_XFER_BUSY 1 189 atomic_t busy; 190 #define SCMI_XFER_SENT_OK 0 191 #define SCMI_XFER_RESP_OK 1 192 #define SCMI_XFER_DRESP_OK 2 193 int state; 194 /* A lock to protect state and busy fields */ 195 spinlock_t lock; 196 void *priv; 197 }; 198 199 /* 200 * An helper macro to lookup an xfer from the @pending_xfers hashtable 201 * using the message sequence number token as a key. 202 */ 203 #define XFER_FIND(__ht, __k) \ 204 ({ \ 205 typeof(__k) k_ = __k; \ 206 struct scmi_xfer *xfer_ = NULL; \ 207 \ 208 hash_for_each_possible((__ht), xfer_, node, k_) \ 209 if (xfer_->hdr.seq == k_) \ 210 break; \ 211 xfer_; \ 212 }) 213 214 struct scmi_xfer_ops; 215 216 /** 217 * struct scmi_protocol_handle - Reference to an initialized protocol instance 218 * 219 * @dev: A reference to the associated SCMI instance device (handle->dev). 220 * @xops: A reference to a struct holding refs to the core xfer operations that 221 * can be used by the protocol implementation to generate SCMI messages. 222 * @set_priv: A method to set protocol private data for this instance. 223 * @get_priv: A method to get protocol private data previously set. 224 * 225 * This structure represents a protocol initialized against specific SCMI 226 * instance and it will be used as follows: 227 * - as a parameter fed from the core to the protocol initialization code so 228 * that it can access the core xfer operations to build and generate SCMI 229 * messages exclusively for the specific underlying protocol instance. 230 * - as an opaque handle fed by an SCMI driver user when it tries to access 231 * this protocol through its own protocol operations. 232 * In this case this handle will be returned as an opaque object together 233 * with the related protocol operations when the SCMI driver tries to access 234 * the protocol. 235 */ 236 struct scmi_protocol_handle { 237 struct device *dev; 238 const struct scmi_xfer_ops *xops; 239 int (*set_priv)(const struct scmi_protocol_handle *ph, void *priv); 240 void *(*get_priv)(const struct scmi_protocol_handle *ph); 241 }; 242 243 /** 244 * struct scmi_xfer_ops - References to the core SCMI xfer operations. 245 * @version_get: Get this version protocol. 246 * @xfer_get_init: Initialize one struct xfer if any xfer slot is free. 247 * @reset_rx_to_maxsz: Reset rx size to max transport size. 248 * @do_xfer: Do the SCMI transfer. 249 * @do_xfer_with_response: Do the SCMI transfer waiting for a response. 250 * @xfer_put: Free the xfer slot. 251 * 252 * Note that all this operations expect a protocol handle as first parameter; 253 * they then internally use it to infer the underlying protocol number: this 254 * way is not possible for a protocol implementation to forge messages for 255 * another protocol. 256 */ 257 struct scmi_xfer_ops { 258 int (*version_get)(const struct scmi_protocol_handle *ph, u32 *version); 259 int (*xfer_get_init)(const struct scmi_protocol_handle *ph, u8 msg_id, 260 size_t tx_size, size_t rx_size, 261 struct scmi_xfer **p); 262 void (*reset_rx_to_maxsz)(const struct scmi_protocol_handle *ph, 263 struct scmi_xfer *xfer); 264 int (*do_xfer)(const struct scmi_protocol_handle *ph, 265 struct scmi_xfer *xfer); 266 int (*do_xfer_with_response)(const struct scmi_protocol_handle *ph, 267 struct scmi_xfer *xfer); 268 void (*xfer_put)(const struct scmi_protocol_handle *ph, 269 struct scmi_xfer *xfer); 270 }; 271 272 struct scmi_revision_info * 273 scmi_revision_area_get(const struct scmi_protocol_handle *ph); 274 int scmi_handle_put(const struct scmi_handle *handle); 275 struct scmi_handle *scmi_handle_get(struct device *dev); 276 void scmi_set_handle(struct scmi_device *scmi_dev); 277 void scmi_setup_protocol_implemented(const struct scmi_protocol_handle *ph, 278 u8 *prot_imp); 279 280 typedef int (*scmi_prot_init_ph_fn_t)(const struct scmi_protocol_handle *); 281 282 /** 283 * struct scmi_protocol - Protocol descriptor 284 * @id: Protocol ID. 285 * @owner: Module reference if any. 286 * @instance_init: Mandatory protocol initialization function. 287 * @instance_deinit: Optional protocol de-initialization function. 288 * @ops: Optional reference to the operations provided by the protocol and 289 * exposed in scmi_protocol.h. 290 * @events: An optional reference to the events supported by this protocol. 291 */ 292 struct scmi_protocol { 293 const u8 id; 294 struct module *owner; 295 const scmi_prot_init_ph_fn_t instance_init; 296 const scmi_prot_init_ph_fn_t instance_deinit; 297 const void *ops; 298 const struct scmi_protocol_events *events; 299 }; 300 301 int __init scmi_bus_init(void); 302 void __exit scmi_bus_exit(void); 303 304 #define DECLARE_SCMI_REGISTER_UNREGISTER(func) \ 305 int __init scmi_##func##_register(void); \ 306 void __exit scmi_##func##_unregister(void) 307 DECLARE_SCMI_REGISTER_UNREGISTER(base); 308 DECLARE_SCMI_REGISTER_UNREGISTER(clock); 309 DECLARE_SCMI_REGISTER_UNREGISTER(perf); 310 DECLARE_SCMI_REGISTER_UNREGISTER(power); 311 DECLARE_SCMI_REGISTER_UNREGISTER(reset); 312 DECLARE_SCMI_REGISTER_UNREGISTER(sensors); 313 DECLARE_SCMI_REGISTER_UNREGISTER(voltage); 314 DECLARE_SCMI_REGISTER_UNREGISTER(system); 315 316 #define DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(name, proto) \ 317 static const struct scmi_protocol *__this_proto = &(proto); \ 318 \ 319 int __init scmi_##name##_register(void) \ 320 { \ 321 return scmi_protocol_register(__this_proto); \ 322 } \ 323 \ 324 void __exit scmi_##name##_unregister(void) \ 325 { \ 326 scmi_protocol_unregister(__this_proto); \ 327 } 328 329 const struct scmi_protocol *scmi_protocol_get(int protocol_id); 330 void scmi_protocol_put(int protocol_id); 331 332 int scmi_protocol_acquire(const struct scmi_handle *handle, u8 protocol_id); 333 void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id); 334 335 /* SCMI Transport */ 336 /** 337 * struct scmi_chan_info - Structure representing a SCMI channel information 338 * 339 * @dev: Reference to device in the SCMI hierarchy corresponding to this 340 * channel 341 * @handle: Pointer to SCMI entity handle 342 * @transport_info: Transport layer related information 343 */ 344 struct scmi_chan_info { 345 struct device *dev; 346 struct scmi_handle *handle; 347 void *transport_info; 348 }; 349 350 /** 351 * struct scmi_transport_ops - Structure representing a SCMI transport ops 352 * 353 * @link_supplier: Optional callback to add link to a supplier device 354 * @chan_available: Callback to check if channel is available or not 355 * @chan_setup: Callback to allocate and setup a channel 356 * @chan_free: Callback to free a channel 357 * @get_max_msg: Optional callback to provide max_msg dynamically 358 * Returns the maximum number of messages for the channel type 359 * (tx or rx) that can be pending simultaneously in the system 360 * @send_message: Callback to send a message 361 * @mark_txdone: Callback to mark tx as done 362 * @fetch_response: Callback to fetch response 363 * @fetch_notification: Callback to fetch notification 364 * @clear_channel: Callback to clear a channel 365 * @poll_done: Callback to poll transfer status 366 */ 367 struct scmi_transport_ops { 368 int (*link_supplier)(struct device *dev); 369 bool (*chan_available)(struct device *dev, int idx); 370 int (*chan_setup)(struct scmi_chan_info *cinfo, struct device *dev, 371 bool tx); 372 int (*chan_free)(int id, void *p, void *data); 373 unsigned int (*get_max_msg)(struct scmi_chan_info *base_cinfo); 374 int (*send_message)(struct scmi_chan_info *cinfo, 375 struct scmi_xfer *xfer); 376 void (*mark_txdone)(struct scmi_chan_info *cinfo, int ret); 377 void (*fetch_response)(struct scmi_chan_info *cinfo, 378 struct scmi_xfer *xfer); 379 void (*fetch_notification)(struct scmi_chan_info *cinfo, 380 size_t max_len, struct scmi_xfer *xfer); 381 void (*clear_channel)(struct scmi_chan_info *cinfo); 382 bool (*poll_done)(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer); 383 }; 384 385 int scmi_protocol_device_request(const struct scmi_device_id *id_table); 386 void scmi_protocol_device_unrequest(const struct scmi_device_id *id_table); 387 struct scmi_device *scmi_child_dev_find(struct device *parent, 388 int prot_id, const char *name); 389 390 /** 391 * struct scmi_desc - Description of SoC integration 392 * 393 * @transport_init: An optional function that a transport can provide to 394 * initialize some transport-specific setup during SCMI core 395 * initialization, so ahead of SCMI core probing. 396 * @transport_exit: An optional function that a transport can provide to 397 * de-initialize some transport-specific setup during SCMI core 398 * de-initialization, so after SCMI core removal. 399 * @ops: Pointer to the transport specific ops structure 400 * @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds) 401 * @max_msg: Maximum number of messages for a channel type (tx or rx) that can 402 * be pending simultaneously in the system. May be overridden by the 403 * get_max_msg op. 404 * @max_msg_size: Maximum size of data per message that can be handled. 405 */ 406 struct scmi_desc { 407 int (*transport_init)(void); 408 void (*transport_exit)(void); 409 const struct scmi_transport_ops *ops; 410 int max_rx_timeout_ms; 411 int max_msg; 412 int max_msg_size; 413 }; 414 415 #ifdef CONFIG_ARM_SCMI_TRANSPORT_MAILBOX 416 extern const struct scmi_desc scmi_mailbox_desc; 417 #endif 418 #ifdef CONFIG_ARM_SCMI_TRANSPORT_SMC 419 extern const struct scmi_desc scmi_smc_desc; 420 #endif 421 #ifdef CONFIG_ARM_SCMI_TRANSPORT_VIRTIO 422 extern const struct scmi_desc scmi_virtio_desc; 423 #endif 424 425 void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr, void *priv); 426 void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id); 427 428 /* shmem related declarations */ 429 struct scmi_shared_mem; 430 431 void shmem_tx_prepare(struct scmi_shared_mem __iomem *shmem, 432 struct scmi_xfer *xfer); 433 u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem); 434 void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem, 435 struct scmi_xfer *xfer); 436 void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem, 437 size_t max_len, struct scmi_xfer *xfer); 438 void shmem_clear_channel(struct scmi_shared_mem __iomem *shmem); 439 bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem, 440 struct scmi_xfer *xfer); 441 442 /* declarations for message passing transports */ 443 struct scmi_msg_payld; 444 445 /* Maximum overhead of message w.r.t. struct scmi_desc.max_msg_size */ 446 #define SCMI_MSG_MAX_PROT_OVERHEAD (2 * sizeof(__le32)) 447 448 size_t msg_response_size(struct scmi_xfer *xfer); 449 size_t msg_command_size(struct scmi_xfer *xfer); 450 void msg_tx_prepare(struct scmi_msg_payld *msg, struct scmi_xfer *xfer); 451 u32 msg_read_header(struct scmi_msg_payld *msg); 452 void msg_fetch_response(struct scmi_msg_payld *msg, size_t len, 453 struct scmi_xfer *xfer); 454 void msg_fetch_notification(struct scmi_msg_payld *msg, size_t len, 455 size_t max_len, struct scmi_xfer *xfer); 456 457 void scmi_notification_instance_data_set(const struct scmi_handle *handle, 458 void *priv); 459 void *scmi_notification_instance_data_get(const struct scmi_handle *handle); 460 #endif /* _SCMI_COMMON_H */ 461