1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Texas Instruments' Message Manager Driver 4 * 5 * Copyright (C) 2015-2022 Texas Instruments Incorporated - https://www.ti.com/ 6 * Nishanth Menon 7 */ 8 9 #define pr_fmt(fmt) "%s: " fmt, __func__ 10 11 #include <linux/device.h> 12 #include <linux/interrupt.h> 13 #include <linux/io.h> 14 #include <linux/iopoll.h> 15 #include <linux/kernel.h> 16 #include <linux/mailbox_controller.h> 17 #include <linux/module.h> 18 #include <linux/of_device.h> 19 #include <linux/of.h> 20 #include <linux/of_irq.h> 21 #include <linux/platform_device.h> 22 #include <linux/soc/ti/ti-msgmgr.h> 23 24 #define Q_DATA_OFFSET(proxy, queue, reg) \ 25 ((0x10000 * (proxy)) + (0x80 * (queue)) + ((reg) * 4)) 26 #define Q_STATE_OFFSET(queue) ((queue) * 0x4) 27 #define Q_STATE_ENTRY_COUNT_MASK (0xFFF000) 28 29 #define SPROXY_THREAD_OFFSET(tid) (0x1000 * (tid)) 30 #define SPROXY_THREAD_DATA_OFFSET(tid, reg) \ 31 (SPROXY_THREAD_OFFSET(tid) + ((reg) * 0x4) + 0x4) 32 33 #define SPROXY_THREAD_STATUS_OFFSET(tid) (SPROXY_THREAD_OFFSET(tid)) 34 35 #define SPROXY_THREAD_STATUS_COUNT_MASK (0xFF) 36 37 #define SPROXY_THREAD_CTRL_OFFSET(tid) (0x1000 + SPROXY_THREAD_OFFSET(tid)) 38 #define SPROXY_THREAD_CTRL_DIR_MASK (0x1 << 31) 39 40 /** 41 * struct ti_msgmgr_valid_queue_desc - SoC valid queues meant for this processor 42 * @queue_id: Queue Number for this path 43 * @proxy_id: Proxy ID representing the processor in SoC 44 * @is_tx: Is this a receive path? 45 */ 46 struct ti_msgmgr_valid_queue_desc { 47 u8 queue_id; 48 u8 proxy_id; 49 bool is_tx; 50 }; 51 52 /** 53 * struct ti_msgmgr_desc - Description of message manager integration 54 * @queue_count: Number of Queues 55 * @max_message_size: Message size in bytes 56 * @max_messages: Number of messages 57 * @data_first_reg: First data register for proxy data region 58 * @data_last_reg: Last data register for proxy data region 59 * @status_cnt_mask: Mask for getting the status value 60 * @status_err_mask: Mask for getting the error value, if applicable 61 * @tx_polled: Do I need to use polled mechanism for tx 62 * @tx_poll_timeout_ms: Timeout in ms if polled 63 * @valid_queues: List of Valid queues that the processor can access 64 * @data_region_name: Name of the proxy data region 65 * @status_region_name: Name of the proxy status region 66 * @ctrl_region_name: Name of the proxy control region 67 * @num_valid_queues: Number of valid queues 68 * @is_sproxy: Is this an Secure Proxy instance? 69 * 70 * This structure is used in of match data to describe how integration 71 * for a specific compatible SoC is done. 72 */ 73 struct ti_msgmgr_desc { 74 u8 queue_count; 75 u8 max_message_size; 76 u8 max_messages; 77 u8 data_first_reg; 78 u8 data_last_reg; 79 u32 status_cnt_mask; 80 u32 status_err_mask; 81 bool tx_polled; 82 int tx_poll_timeout_ms; 83 const struct ti_msgmgr_valid_queue_desc *valid_queues; 84 const char *data_region_name; 85 const char *status_region_name; 86 const char *ctrl_region_name; 87 int num_valid_queues; 88 bool is_sproxy; 89 }; 90 91 /** 92 * struct ti_queue_inst - Description of a queue instance 93 * @name: Queue Name 94 * @queue_id: Queue Identifier as mapped on SoC 95 * @proxy_id: Proxy Identifier as mapped on SoC 96 * @irq: IRQ for Rx Queue 97 * @is_tx: 'true' if transmit queue, else, 'false' 98 * @queue_buff_start: First register of Data Buffer 99 * @queue_buff_end: Last (or confirmation) register of Data buffer 100 * @queue_state: Queue status register 101 * @queue_ctrl: Queue Control register 102 * @chan: Mailbox channel 103 * @rx_buff: Receive buffer pointer allocated at probe, max_message_size 104 * @polled_rx_mode: Use polling for rx instead of interrupts 105 */ 106 struct ti_queue_inst { 107 char name[30]; 108 u8 queue_id; 109 u8 proxy_id; 110 int irq; 111 bool is_tx; 112 void __iomem *queue_buff_start; 113 void __iomem *queue_buff_end; 114 void __iomem *queue_state; 115 void __iomem *queue_ctrl; 116 struct mbox_chan *chan; 117 u32 *rx_buff; 118 bool polled_rx_mode; 119 }; 120 121 /** 122 * struct ti_msgmgr_inst - Description of a Message Manager Instance 123 * @dev: device pointer corresponding to the Message Manager instance 124 * @desc: Description of the SoC integration 125 * @queue_proxy_region: Queue proxy region where queue buffers are located 126 * @queue_state_debug_region: Queue status register regions 127 * @queue_ctrl_region: Queue Control register regions 128 * @num_valid_queues: Number of valid queues defined for the processor 129 * Note: other queues are probably reserved for other processors 130 * in the SoC. 131 * @qinsts: Array of valid Queue Instances for the Processor 132 * @mbox: Mailbox Controller 133 * @chans: Array for channels corresponding to the Queue Instances. 134 */ 135 struct ti_msgmgr_inst { 136 struct device *dev; 137 const struct ti_msgmgr_desc *desc; 138 void __iomem *queue_proxy_region; 139 void __iomem *queue_state_debug_region; 140 void __iomem *queue_ctrl_region; 141 u8 num_valid_queues; 142 struct ti_queue_inst *qinsts; 143 struct mbox_controller mbox; 144 struct mbox_chan *chans; 145 }; 146 147 /** 148 * ti_msgmgr_queue_get_num_messages() - Get the number of pending messages 149 * @d: Description of message manager 150 * @qinst: Queue instance for which we check the number of pending messages 151 * 152 * Return: number of messages pending in the queue (0 == no pending messages) 153 */ 154 static inline int 155 ti_msgmgr_queue_get_num_messages(const struct ti_msgmgr_desc *d, 156 struct ti_queue_inst *qinst) 157 { 158 u32 val; 159 u32 status_cnt_mask = d->status_cnt_mask; 160 161 /* 162 * We cannot use relaxed operation here - update may happen 163 * real-time. 164 */ 165 val = readl(qinst->queue_state) & status_cnt_mask; 166 val >>= __ffs(status_cnt_mask); 167 168 return val; 169 } 170 171 /** 172 * ti_msgmgr_queue_is_error() - Check to see if there is queue error 173 * @d: Description of message manager 174 * @qinst: Queue instance for which we check the number of pending messages 175 * 176 * Return: true if error, else false 177 */ 178 static inline bool ti_msgmgr_queue_is_error(const struct ti_msgmgr_desc *d, 179 struct ti_queue_inst *qinst) 180 { 181 u32 val; 182 183 /* Msgmgr has no error detection */ 184 if (!d->is_sproxy) 185 return false; 186 187 /* 188 * We cannot use relaxed operation here - update may happen 189 * real-time. 190 */ 191 val = readl(qinst->queue_state) & d->status_err_mask; 192 193 return val ? true : false; 194 } 195 196 static int ti_msgmgr_queue_rx_data(struct mbox_chan *chan, struct ti_queue_inst *qinst, 197 const struct ti_msgmgr_desc *desc) 198 { 199 int num_words; 200 struct ti_msgmgr_message message; 201 void __iomem *data_reg; 202 u32 *word_data; 203 204 /* 205 * I have no idea about the protocol being used to communicate with the 206 * remote producer - 0 could be valid data, so I wont make a judgement 207 * of how many bytes I should be reading. Let the client figure this 208 * out.. I just read the full message and pass it on.. 209 */ 210 message.len = desc->max_message_size; 211 message.buf = (u8 *)qinst->rx_buff; 212 213 /* 214 * NOTE about register access involved here: 215 * the hardware block is implemented with 32bit access operations and no 216 * support for data splitting. We don't want the hardware to misbehave 217 * with sub 32bit access - For example: if the last register read is 218 * split into byte wise access, it can result in the queue getting 219 * stuck or indeterminate behavior. An out of order read operation may 220 * result in weird data results as well. 221 * Hence, we do not use memcpy_fromio or __ioread32_copy here, instead 222 * we depend on readl for the purpose. 223 * 224 * Also note that the final register read automatically marks the 225 * queue message as read. 226 */ 227 for (data_reg = qinst->queue_buff_start, word_data = qinst->rx_buff, 228 num_words = (desc->max_message_size / sizeof(u32)); 229 num_words; num_words--, data_reg += sizeof(u32), word_data++) 230 *word_data = readl(data_reg); 231 232 /* 233 * Last register read automatically clears the IRQ if only 1 message 234 * is pending - so send the data up the stack.. 235 * NOTE: Client is expected to be as optimal as possible, since 236 * we invoke the handler in IRQ context. 237 */ 238 mbox_chan_received_data(chan, (void *)&message); 239 240 return 0; 241 } 242 243 static int ti_msgmgr_queue_rx_poll_timeout(struct mbox_chan *chan, int timeout_us) 244 { 245 struct device *dev = chan->mbox->dev; 246 struct ti_msgmgr_inst *inst = dev_get_drvdata(dev); 247 struct ti_queue_inst *qinst = chan->con_priv; 248 const struct ti_msgmgr_desc *desc = inst->desc; 249 int msg_count; 250 int ret; 251 252 ret = readl_poll_timeout_atomic(qinst->queue_state, msg_count, 253 (msg_count & desc->status_cnt_mask), 254 10, timeout_us); 255 if (ret != 0) 256 return ret; 257 258 ti_msgmgr_queue_rx_data(chan, qinst, desc); 259 260 return 0; 261 } 262 263 /** 264 * ti_msgmgr_queue_rx_interrupt() - Interrupt handler for receive Queue 265 * @irq: Interrupt number 266 * @p: Channel Pointer 267 * 268 * Return: -EINVAL if there is no instance 269 * IRQ_NONE if the interrupt is not ours. 270 * IRQ_HANDLED if the rx interrupt was successfully handled. 271 */ 272 static irqreturn_t ti_msgmgr_queue_rx_interrupt(int irq, void *p) 273 { 274 struct mbox_chan *chan = p; 275 struct device *dev = chan->mbox->dev; 276 struct ti_msgmgr_inst *inst = dev_get_drvdata(dev); 277 struct ti_queue_inst *qinst = chan->con_priv; 278 const struct ti_msgmgr_desc *desc; 279 int msg_count; 280 281 if (WARN_ON(!inst)) { 282 dev_err(dev, "no platform drv data??\n"); 283 return -EINVAL; 284 } 285 286 /* Do I have an invalid interrupt source? */ 287 if (qinst->is_tx) { 288 dev_err(dev, "Cannot handle rx interrupt on tx channel %s\n", 289 qinst->name); 290 return IRQ_NONE; 291 } 292 293 desc = inst->desc; 294 if (ti_msgmgr_queue_is_error(desc, qinst)) { 295 dev_err(dev, "Error on Rx channel %s\n", qinst->name); 296 return IRQ_NONE; 297 } 298 299 /* Do I actually have messages to read? */ 300 msg_count = ti_msgmgr_queue_get_num_messages(desc, qinst); 301 if (!msg_count) { 302 /* Shared IRQ? */ 303 dev_dbg(dev, "Spurious event - 0 pending data!\n"); 304 return IRQ_NONE; 305 } 306 307 ti_msgmgr_queue_rx_data(chan, qinst, desc); 308 309 return IRQ_HANDLED; 310 } 311 312 /** 313 * ti_msgmgr_queue_peek_data() - Peek to see if there are any rx messages. 314 * @chan: Channel Pointer 315 * 316 * Return: 'true' if there is pending rx data, 'false' if there is none. 317 */ 318 static bool ti_msgmgr_queue_peek_data(struct mbox_chan *chan) 319 { 320 struct ti_queue_inst *qinst = chan->con_priv; 321 struct device *dev = chan->mbox->dev; 322 struct ti_msgmgr_inst *inst = dev_get_drvdata(dev); 323 const struct ti_msgmgr_desc *desc = inst->desc; 324 int msg_count; 325 326 if (qinst->is_tx) 327 return false; 328 329 if (ti_msgmgr_queue_is_error(desc, qinst)) { 330 dev_err(dev, "Error on channel %s\n", qinst->name); 331 return false; 332 } 333 334 msg_count = ti_msgmgr_queue_get_num_messages(desc, qinst); 335 336 return msg_count ? true : false; 337 } 338 339 /** 340 * ti_msgmgr_last_tx_done() - See if all the tx messages are sent 341 * @chan: Channel pointer 342 * 343 * Return: 'true' is no pending tx data, 'false' if there are any. 344 */ 345 static bool ti_msgmgr_last_tx_done(struct mbox_chan *chan) 346 { 347 struct ti_queue_inst *qinst = chan->con_priv; 348 struct device *dev = chan->mbox->dev; 349 struct ti_msgmgr_inst *inst = dev_get_drvdata(dev); 350 const struct ti_msgmgr_desc *desc = inst->desc; 351 int msg_count; 352 353 if (!qinst->is_tx) 354 return false; 355 356 if (ti_msgmgr_queue_is_error(desc, qinst)) { 357 dev_err(dev, "Error on channel %s\n", qinst->name); 358 return false; 359 } 360 361 msg_count = ti_msgmgr_queue_get_num_messages(desc, qinst); 362 363 if (desc->is_sproxy) { 364 /* In secure proxy, msg_count indicates how many we can send */ 365 return msg_count ? true : false; 366 } 367 368 /* if we have any messages pending.. */ 369 return msg_count ? false : true; 370 } 371 372 static bool ti_msgmgr_chan_has_polled_queue_rx(struct mbox_chan *chan) 373 { 374 struct ti_queue_inst *qinst; 375 376 if (!chan) 377 return false; 378 379 qinst = chan->con_priv; 380 return qinst->polled_rx_mode; 381 } 382 383 /** 384 * ti_msgmgr_send_data() - Send data 385 * @chan: Channel Pointer 386 * @data: ti_msgmgr_message * Message Pointer 387 * 388 * Return: 0 if all goes good, else appropriate error messages. 389 */ 390 static int ti_msgmgr_send_data(struct mbox_chan *chan, void *data) 391 { 392 struct device *dev = chan->mbox->dev; 393 struct ti_msgmgr_inst *inst = dev_get_drvdata(dev); 394 const struct ti_msgmgr_desc *desc; 395 struct ti_queue_inst *qinst = chan->con_priv; 396 int num_words, trail_bytes; 397 struct ti_msgmgr_message *message = data; 398 void __iomem *data_reg; 399 u32 *word_data; 400 int ret = 0; 401 402 if (WARN_ON(!inst)) { 403 dev_err(dev, "no platform drv data??\n"); 404 return -EINVAL; 405 } 406 desc = inst->desc; 407 408 if (ti_msgmgr_queue_is_error(desc, qinst)) { 409 dev_err(dev, "Error on channel %s\n", qinst->name); 410 return false; 411 } 412 413 if (desc->max_message_size < message->len) { 414 dev_err(dev, "Queue %s message length %zu > max %d\n", 415 qinst->name, message->len, desc->max_message_size); 416 return -EINVAL; 417 } 418 419 /* NOTE: Constraints similar to rx path exists here as well */ 420 for (data_reg = qinst->queue_buff_start, 421 num_words = message->len / sizeof(u32), 422 word_data = (u32 *)message->buf; 423 num_words; num_words--, data_reg += sizeof(u32), word_data++) 424 writel(*word_data, data_reg); 425 426 trail_bytes = message->len % sizeof(u32); 427 if (trail_bytes) { 428 u32 data_trail = *word_data; 429 430 /* Ensure all unused data is 0 */ 431 data_trail &= 0xFFFFFFFF >> (8 * (sizeof(u32) - trail_bytes)); 432 writel(data_trail, data_reg); 433 data_reg += sizeof(u32); 434 } 435 436 /* 437 * 'data_reg' indicates next register to write. If we did not already 438 * write on tx complete reg(last reg), we must do so for transmit 439 * In addition, we also need to make sure all intermediate data 440 * registers(if any required), are reset to 0 for TISCI backward 441 * compatibility to be maintained. 442 */ 443 while (data_reg <= qinst->queue_buff_end) { 444 writel(0, data_reg); 445 data_reg += sizeof(u32); 446 } 447 448 /* If we are in polled mode, wait for a response before proceeding */ 449 if (ti_msgmgr_chan_has_polled_queue_rx(message->chan_rx)) 450 ret = ti_msgmgr_queue_rx_poll_timeout(message->chan_rx, 451 message->timeout_rx_ms * 1000); 452 453 return ret; 454 } 455 456 /** 457 * ti_msgmgr_queue_rx_irq_req() - RX IRQ request 458 * @dev: device pointer 459 * @d: descriptor for ti_msgmgr 460 * @qinst: Queue instance 461 * @chan: Channel pointer 462 */ 463 static int ti_msgmgr_queue_rx_irq_req(struct device *dev, 464 const struct ti_msgmgr_desc *d, 465 struct ti_queue_inst *qinst, 466 struct mbox_chan *chan) 467 { 468 int ret = 0; 469 char of_rx_irq_name[7]; 470 struct device_node *np; 471 472 snprintf(of_rx_irq_name, sizeof(of_rx_irq_name), 473 "rx_%03d", d->is_sproxy ? qinst->proxy_id : qinst->queue_id); 474 475 /* Get the IRQ if not found */ 476 if (qinst->irq < 0) { 477 np = of_node_get(dev->of_node); 478 if (!np) 479 return -ENODATA; 480 qinst->irq = of_irq_get_byname(np, of_rx_irq_name); 481 of_node_put(np); 482 483 if (qinst->irq < 0) { 484 dev_err(dev, 485 "QID %d PID %d:No IRQ[%s]: %d\n", 486 qinst->queue_id, qinst->proxy_id, 487 of_rx_irq_name, qinst->irq); 488 return qinst->irq; 489 } 490 } 491 492 /* With the expectation that the IRQ might be shared in SoC */ 493 ret = request_irq(qinst->irq, ti_msgmgr_queue_rx_interrupt, 494 IRQF_SHARED, qinst->name, chan); 495 if (ret) { 496 dev_err(dev, "Unable to get IRQ %d on %s(res=%d)\n", 497 qinst->irq, qinst->name, ret); 498 } 499 500 return ret; 501 } 502 503 /** 504 * ti_msgmgr_queue_startup() - Startup queue 505 * @chan: Channel pointer 506 * 507 * Return: 0 if all goes good, else return corresponding error message 508 */ 509 static int ti_msgmgr_queue_startup(struct mbox_chan *chan) 510 { 511 struct device *dev = chan->mbox->dev; 512 struct ti_msgmgr_inst *inst = dev_get_drvdata(dev); 513 struct ti_queue_inst *qinst = chan->con_priv; 514 const struct ti_msgmgr_desc *d = inst->desc; 515 int ret; 516 int msg_count; 517 518 /* 519 * If sproxy is starting and can send messages, we are a Tx thread, 520 * else Rx 521 */ 522 if (d->is_sproxy) { 523 qinst->is_tx = (readl(qinst->queue_ctrl) & 524 SPROXY_THREAD_CTRL_DIR_MASK) ? false : true; 525 526 msg_count = ti_msgmgr_queue_get_num_messages(d, qinst); 527 528 if (!msg_count && qinst->is_tx) { 529 dev_err(dev, "%s: Cannot transmit with 0 credits!\n", 530 qinst->name); 531 return -EINVAL; 532 } 533 } 534 535 if (!qinst->is_tx) { 536 /* Allocate usage buffer for rx */ 537 qinst->rx_buff = kzalloc(d->max_message_size, GFP_KERNEL); 538 if (!qinst->rx_buff) 539 return -ENOMEM; 540 /* Request IRQ */ 541 ret = ti_msgmgr_queue_rx_irq_req(dev, d, qinst, chan); 542 if (ret) { 543 kfree(qinst->rx_buff); 544 return ret; 545 } 546 } 547 548 return 0; 549 } 550 551 /** 552 * ti_msgmgr_queue_shutdown() - Shutdown the queue 553 * @chan: Channel pointer 554 */ 555 static void ti_msgmgr_queue_shutdown(struct mbox_chan *chan) 556 { 557 struct ti_queue_inst *qinst = chan->con_priv; 558 559 if (!qinst->is_tx) { 560 free_irq(qinst->irq, chan); 561 kfree(qinst->rx_buff); 562 } 563 } 564 565 /** 566 * ti_msgmgr_of_xlate() - Translation of phandle to queue 567 * @mbox: Mailbox controller 568 * @p: phandle pointer 569 * 570 * Return: Mailbox channel corresponding to the queue, else return error 571 * pointer. 572 */ 573 static struct mbox_chan *ti_msgmgr_of_xlate(struct mbox_controller *mbox, 574 const struct of_phandle_args *p) 575 { 576 struct ti_msgmgr_inst *inst; 577 int req_qid, req_pid; 578 struct ti_queue_inst *qinst; 579 const struct ti_msgmgr_desc *d; 580 int i, ncells; 581 582 inst = container_of(mbox, struct ti_msgmgr_inst, mbox); 583 if (WARN_ON(!inst)) 584 return ERR_PTR(-EINVAL); 585 586 d = inst->desc; 587 588 if (d->is_sproxy) 589 ncells = 1; 590 else 591 ncells = 2; 592 if (p->args_count != ncells) { 593 dev_err(inst->dev, "Invalid arguments in dt[%d]. Must be %d\n", 594 p->args_count, ncells); 595 return ERR_PTR(-EINVAL); 596 } 597 if (ncells == 1) { 598 req_qid = 0; 599 req_pid = p->args[0]; 600 } else { 601 req_qid = p->args[0]; 602 req_pid = p->args[1]; 603 } 604 605 if (d->is_sproxy) { 606 if (req_pid >= d->num_valid_queues) 607 goto err; 608 qinst = &inst->qinsts[req_pid]; 609 return qinst->chan; 610 } 611 612 for (qinst = inst->qinsts, i = 0; i < inst->num_valid_queues; 613 i++, qinst++) { 614 if (req_qid == qinst->queue_id && req_pid == qinst->proxy_id) 615 return qinst->chan; 616 } 617 618 err: 619 dev_err(inst->dev, "Queue ID %d, Proxy ID %d is wrong on %pOFn\n", 620 req_qid, req_pid, p->np); 621 return ERR_PTR(-ENOENT); 622 } 623 624 /** 625 * ti_msgmgr_queue_setup() - Setup data structures for each queue instance 626 * @idx: index of the queue 627 * @dev: pointer to the message manager device 628 * @np: pointer to the of node 629 * @inst: Queue instance pointer 630 * @d: Message Manager instance description data 631 * @qd: Queue description data 632 * @qinst: Queue instance pointer 633 * @chan: pointer to mailbox channel 634 * 635 * Return: 0 if all went well, else return corresponding error 636 */ 637 static int ti_msgmgr_queue_setup(int idx, struct device *dev, 638 struct device_node *np, 639 struct ti_msgmgr_inst *inst, 640 const struct ti_msgmgr_desc *d, 641 const struct ti_msgmgr_valid_queue_desc *qd, 642 struct ti_queue_inst *qinst, 643 struct mbox_chan *chan) 644 { 645 char *dir; 646 647 qinst->proxy_id = qd->proxy_id; 648 qinst->queue_id = qd->queue_id; 649 650 if (qinst->queue_id > d->queue_count) { 651 dev_err(dev, "Queue Data [idx=%d] queuid %d > %d\n", 652 idx, qinst->queue_id, d->queue_count); 653 return -ERANGE; 654 } 655 656 if (d->is_sproxy) { 657 qinst->queue_buff_start = inst->queue_proxy_region + 658 SPROXY_THREAD_DATA_OFFSET(qinst->proxy_id, 659 d->data_first_reg); 660 qinst->queue_buff_end = inst->queue_proxy_region + 661 SPROXY_THREAD_DATA_OFFSET(qinst->proxy_id, 662 d->data_last_reg); 663 qinst->queue_state = inst->queue_state_debug_region + 664 SPROXY_THREAD_STATUS_OFFSET(qinst->proxy_id); 665 qinst->queue_ctrl = inst->queue_ctrl_region + 666 SPROXY_THREAD_CTRL_OFFSET(qinst->proxy_id); 667 668 /* XXX: DONOT read registers here!.. Some may be unusable */ 669 dir = "thr"; 670 snprintf(qinst->name, sizeof(qinst->name), "%s %s_%03d", 671 dev_name(dev), dir, qinst->proxy_id); 672 } else { 673 qinst->queue_buff_start = inst->queue_proxy_region + 674 Q_DATA_OFFSET(qinst->proxy_id, qinst->queue_id, 675 d->data_first_reg); 676 qinst->queue_buff_end = inst->queue_proxy_region + 677 Q_DATA_OFFSET(qinst->proxy_id, qinst->queue_id, 678 d->data_last_reg); 679 qinst->queue_state = 680 inst->queue_state_debug_region + 681 Q_STATE_OFFSET(qinst->queue_id); 682 qinst->is_tx = qd->is_tx; 683 dir = qinst->is_tx ? "tx" : "rx"; 684 snprintf(qinst->name, sizeof(qinst->name), "%s %s_%03d_%03d", 685 dev_name(dev), dir, qinst->queue_id, qinst->proxy_id); 686 } 687 688 qinst->chan = chan; 689 690 /* Setup an error value for IRQ - Lazy allocation */ 691 qinst->irq = -EINVAL; 692 693 chan->con_priv = qinst; 694 695 dev_dbg(dev, "[%d] qidx=%d pidx=%d irq=%d q_s=%p q_e = %p\n", 696 idx, qinst->queue_id, qinst->proxy_id, qinst->irq, 697 qinst->queue_buff_start, qinst->queue_buff_end); 698 return 0; 699 } 700 701 static int ti_msgmgr_queue_rx_set_polled_mode(struct ti_queue_inst *qinst, bool enable) 702 { 703 if (enable) { 704 disable_irq(qinst->irq); 705 qinst->polled_rx_mode = true; 706 } else { 707 enable_irq(qinst->irq); 708 qinst->polled_rx_mode = false; 709 } 710 711 return 0; 712 } 713 714 static int ti_msgmgr_suspend(struct device *dev) 715 { 716 struct ti_msgmgr_inst *inst = dev_get_drvdata(dev); 717 struct ti_queue_inst *qinst; 718 int i; 719 720 /* 721 * We must switch operation to polled mode now as drivers and the genpd 722 * layer may make late TI SCI calls to change clock and device states 723 * from the noirq phase of suspend. 724 */ 725 for (qinst = inst->qinsts, i = 0; i < inst->num_valid_queues; qinst++, i++) { 726 if (!qinst->is_tx) 727 ti_msgmgr_queue_rx_set_polled_mode(qinst, true); 728 } 729 730 return 0; 731 } 732 733 static int ti_msgmgr_resume(struct device *dev) 734 { 735 struct ti_msgmgr_inst *inst = dev_get_drvdata(dev); 736 struct ti_queue_inst *qinst; 737 int i; 738 739 for (qinst = inst->qinsts, i = 0; i < inst->num_valid_queues; qinst++, i++) { 740 if (!qinst->is_tx) 741 ti_msgmgr_queue_rx_set_polled_mode(qinst, false); 742 } 743 744 return 0; 745 } 746 747 static DEFINE_SIMPLE_DEV_PM_OPS(ti_msgmgr_pm_ops, ti_msgmgr_suspend, ti_msgmgr_resume); 748 749 /* Queue operations */ 750 static const struct mbox_chan_ops ti_msgmgr_chan_ops = { 751 .startup = ti_msgmgr_queue_startup, 752 .shutdown = ti_msgmgr_queue_shutdown, 753 .peek_data = ti_msgmgr_queue_peek_data, 754 .last_tx_done = ti_msgmgr_last_tx_done, 755 .send_data = ti_msgmgr_send_data, 756 }; 757 758 /* Keystone K2G SoC integration details */ 759 static const struct ti_msgmgr_valid_queue_desc k2g_valid_queues[] = { 760 {.queue_id = 0, .proxy_id = 0, .is_tx = true,}, 761 {.queue_id = 1, .proxy_id = 0, .is_tx = true,}, 762 {.queue_id = 2, .proxy_id = 0, .is_tx = true,}, 763 {.queue_id = 3, .proxy_id = 0, .is_tx = true,}, 764 {.queue_id = 5, .proxy_id = 2, .is_tx = false,}, 765 {.queue_id = 56, .proxy_id = 1, .is_tx = true,}, 766 {.queue_id = 57, .proxy_id = 2, .is_tx = false,}, 767 {.queue_id = 58, .proxy_id = 3, .is_tx = true,}, 768 {.queue_id = 59, .proxy_id = 4, .is_tx = true,}, 769 {.queue_id = 60, .proxy_id = 5, .is_tx = true,}, 770 {.queue_id = 61, .proxy_id = 6, .is_tx = true,}, 771 }; 772 773 static const struct ti_msgmgr_desc k2g_desc = { 774 .queue_count = 64, 775 .max_message_size = 64, 776 .max_messages = 128, 777 .data_region_name = "queue_proxy_region", 778 .status_region_name = "queue_state_debug_region", 779 .data_first_reg = 16, 780 .data_last_reg = 31, 781 .status_cnt_mask = Q_STATE_ENTRY_COUNT_MASK, 782 .tx_polled = false, 783 .valid_queues = k2g_valid_queues, 784 .num_valid_queues = ARRAY_SIZE(k2g_valid_queues), 785 .is_sproxy = false, 786 }; 787 788 static const struct ti_msgmgr_desc am654_desc = { 789 .queue_count = 190, 790 .num_valid_queues = 190, 791 .max_message_size = 60, 792 .data_region_name = "target_data", 793 .status_region_name = "rt", 794 .ctrl_region_name = "scfg", 795 .data_first_reg = 0, 796 .data_last_reg = 14, 797 .status_cnt_mask = SPROXY_THREAD_STATUS_COUNT_MASK, 798 .tx_polled = false, 799 .is_sproxy = true, 800 }; 801 802 static const struct of_device_id ti_msgmgr_of_match[] = { 803 {.compatible = "ti,k2g-message-manager", .data = &k2g_desc}, 804 {.compatible = "ti,am654-secure-proxy", .data = &am654_desc}, 805 { /* Sentinel */ } 806 }; 807 808 MODULE_DEVICE_TABLE(of, ti_msgmgr_of_match); 809 810 static int ti_msgmgr_probe(struct platform_device *pdev) 811 { 812 struct device *dev = &pdev->dev; 813 const struct of_device_id *of_id; 814 struct device_node *np; 815 const struct ti_msgmgr_desc *desc; 816 struct ti_msgmgr_inst *inst; 817 struct ti_queue_inst *qinst; 818 struct mbox_controller *mbox; 819 struct mbox_chan *chans; 820 int queue_count; 821 int i; 822 int ret = -EINVAL; 823 const struct ti_msgmgr_valid_queue_desc *queue_desc; 824 825 if (!dev->of_node) { 826 dev_err(dev, "no OF information\n"); 827 return -EINVAL; 828 } 829 np = dev->of_node; 830 831 of_id = of_match_device(ti_msgmgr_of_match, dev); 832 if (!of_id) { 833 dev_err(dev, "OF data missing\n"); 834 return -EINVAL; 835 } 836 desc = of_id->data; 837 838 inst = devm_kzalloc(dev, sizeof(*inst), GFP_KERNEL); 839 if (!inst) 840 return -ENOMEM; 841 842 inst->dev = dev; 843 inst->desc = desc; 844 845 inst->queue_proxy_region = 846 devm_platform_ioremap_resource_byname(pdev, desc->data_region_name); 847 if (IS_ERR(inst->queue_proxy_region)) 848 return PTR_ERR(inst->queue_proxy_region); 849 850 inst->queue_state_debug_region = 851 devm_platform_ioremap_resource_byname(pdev, desc->status_region_name); 852 if (IS_ERR(inst->queue_state_debug_region)) 853 return PTR_ERR(inst->queue_state_debug_region); 854 855 if (desc->is_sproxy) { 856 inst->queue_ctrl_region = 857 devm_platform_ioremap_resource_byname(pdev, desc->ctrl_region_name); 858 if (IS_ERR(inst->queue_ctrl_region)) 859 return PTR_ERR(inst->queue_ctrl_region); 860 } 861 862 dev_dbg(dev, "proxy region=%p, queue_state=%p\n", 863 inst->queue_proxy_region, inst->queue_state_debug_region); 864 865 queue_count = desc->num_valid_queues; 866 if (!queue_count || queue_count > desc->queue_count) { 867 dev_crit(dev, "Invalid Number of queues %d. Max %d\n", 868 queue_count, desc->queue_count); 869 return -ERANGE; 870 } 871 inst->num_valid_queues = queue_count; 872 873 qinst = devm_kcalloc(dev, queue_count, sizeof(*qinst), GFP_KERNEL); 874 if (!qinst) 875 return -ENOMEM; 876 inst->qinsts = qinst; 877 878 chans = devm_kcalloc(dev, queue_count, sizeof(*chans), GFP_KERNEL); 879 if (!chans) 880 return -ENOMEM; 881 inst->chans = chans; 882 883 if (desc->is_sproxy) { 884 struct ti_msgmgr_valid_queue_desc sproxy_desc; 885 886 /* All proxies may be valid in Secure Proxy instance */ 887 for (i = 0; i < queue_count; i++, qinst++, chans++) { 888 sproxy_desc.queue_id = 0; 889 sproxy_desc.proxy_id = i; 890 ret = ti_msgmgr_queue_setup(i, dev, np, inst, 891 desc, &sproxy_desc, qinst, 892 chans); 893 if (ret) 894 return ret; 895 } 896 } else { 897 /* Only Some proxies are valid in Message Manager */ 898 for (i = 0, queue_desc = desc->valid_queues; 899 i < queue_count; i++, qinst++, chans++, queue_desc++) { 900 ret = ti_msgmgr_queue_setup(i, dev, np, inst, 901 desc, queue_desc, qinst, 902 chans); 903 if (ret) 904 return ret; 905 } 906 } 907 908 mbox = &inst->mbox; 909 mbox->dev = dev; 910 mbox->ops = &ti_msgmgr_chan_ops; 911 mbox->chans = inst->chans; 912 mbox->num_chans = inst->num_valid_queues; 913 mbox->txdone_irq = false; 914 mbox->txdone_poll = desc->tx_polled; 915 if (desc->tx_polled) 916 mbox->txpoll_period = desc->tx_poll_timeout_ms; 917 mbox->of_xlate = ti_msgmgr_of_xlate; 918 919 platform_set_drvdata(pdev, inst); 920 ret = devm_mbox_controller_register(dev, mbox); 921 if (ret) 922 dev_err(dev, "Failed to register mbox_controller(%d)\n", ret); 923 924 return ret; 925 } 926 927 static struct platform_driver ti_msgmgr_driver = { 928 .probe = ti_msgmgr_probe, 929 .driver = { 930 .name = "ti-msgmgr", 931 .of_match_table = of_match_ptr(ti_msgmgr_of_match), 932 .pm = &ti_msgmgr_pm_ops, 933 }, 934 }; 935 module_platform_driver(ti_msgmgr_driver); 936 937 MODULE_LICENSE("GPL v2"); 938 MODULE_DESCRIPTION("TI message manager driver"); 939 MODULE_AUTHOR("Nishanth Menon"); 940 MODULE_ALIAS("platform:ti-msgmgr"); 941