1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * OMAP mailbox driver 4 * 5 * Copyright (C) 2006-2009 Nokia Corporation. All rights reserved. 6 * Copyright (C) 2013-2019 Texas Instruments Incorporated - http://www.ti.com 7 * 8 * Contact: Hiroshi DOYU <Hiroshi.DOYU@nokia.com> 9 * Suman Anna <s-anna@ti.com> 10 */ 11 12 #include <linux/interrupt.h> 13 #include <linux/spinlock.h> 14 #include <linux/mutex.h> 15 #include <linux/slab.h> 16 #include <linux/kfifo.h> 17 #include <linux/err.h> 18 #include <linux/module.h> 19 #include <linux/of_device.h> 20 #include <linux/platform_device.h> 21 #include <linux/pm_runtime.h> 22 #include <linux/omap-mailbox.h> 23 #include <linux/mailbox_controller.h> 24 #include <linux/mailbox_client.h> 25 26 #include "mailbox.h" 27 28 #define MAILBOX_REVISION 0x000 29 #define MAILBOX_MESSAGE(m) (0x040 + 4 * (m)) 30 #define MAILBOX_FIFOSTATUS(m) (0x080 + 4 * (m)) 31 #define MAILBOX_MSGSTATUS(m) (0x0c0 + 4 * (m)) 32 33 #define OMAP2_MAILBOX_IRQSTATUS(u) (0x100 + 8 * (u)) 34 #define OMAP2_MAILBOX_IRQENABLE(u) (0x104 + 8 * (u)) 35 36 #define OMAP4_MAILBOX_IRQSTATUS(u) (0x104 + 0x10 * (u)) 37 #define OMAP4_MAILBOX_IRQENABLE(u) (0x108 + 0x10 * (u)) 38 #define OMAP4_MAILBOX_IRQENABLE_CLR(u) (0x10c + 0x10 * (u)) 39 40 #define MAILBOX_IRQSTATUS(type, u) (type ? OMAP4_MAILBOX_IRQSTATUS(u) : \ 41 OMAP2_MAILBOX_IRQSTATUS(u)) 42 #define MAILBOX_IRQENABLE(type, u) (type ? OMAP4_MAILBOX_IRQENABLE(u) : \ 43 OMAP2_MAILBOX_IRQENABLE(u)) 44 #define MAILBOX_IRQDISABLE(type, u) (type ? OMAP4_MAILBOX_IRQENABLE_CLR(u) \ 45 : OMAP2_MAILBOX_IRQENABLE(u)) 46 47 #define MAILBOX_IRQ_NEWMSG(m) (1 << (2 * (m))) 48 #define MAILBOX_IRQ_NOTFULL(m) (1 << (2 * (m) + 1)) 49 50 /* Interrupt register configuration types */ 51 #define MBOX_INTR_CFG_TYPE1 0 52 #define MBOX_INTR_CFG_TYPE2 1 53 54 struct omap_mbox_fifo { 55 unsigned long msg; 56 unsigned long fifo_stat; 57 unsigned long msg_stat; 58 unsigned long irqenable; 59 unsigned long irqstatus; 60 unsigned long irqdisable; 61 u32 intr_bit; 62 }; 63 64 struct omap_mbox_queue { 65 spinlock_t lock; 66 struct kfifo fifo; 67 struct work_struct work; 68 struct omap_mbox *mbox; 69 bool full; 70 }; 71 72 struct omap_mbox_match_data { 73 u32 intr_type; 74 }; 75 76 struct omap_mbox_device { 77 struct device *dev; 78 struct mutex cfg_lock; 79 void __iomem *mbox_base; 80 u32 *irq_ctx; 81 u32 num_users; 82 u32 num_fifos; 83 u32 intr_type; 84 struct omap_mbox **mboxes; 85 struct mbox_controller controller; 86 struct list_head elem; 87 }; 88 89 struct omap_mbox_fifo_info { 90 int tx_id; 91 int tx_usr; 92 int tx_irq; 93 94 int rx_id; 95 int rx_usr; 96 int rx_irq; 97 98 const char *name; 99 bool send_no_irq; 100 }; 101 102 struct omap_mbox { 103 const char *name; 104 int irq; 105 struct omap_mbox_queue *rxq; 106 struct device *dev; 107 struct omap_mbox_device *parent; 108 struct omap_mbox_fifo tx_fifo; 109 struct omap_mbox_fifo rx_fifo; 110 u32 intr_type; 111 struct mbox_chan *chan; 112 bool send_no_irq; 113 }; 114 115 /* global variables for the mailbox devices */ 116 static DEFINE_MUTEX(omap_mbox_devices_lock); 117 static LIST_HEAD(omap_mbox_devices); 118 119 static unsigned int mbox_kfifo_size = CONFIG_OMAP_MBOX_KFIFO_SIZE; 120 module_param(mbox_kfifo_size, uint, S_IRUGO); 121 MODULE_PARM_DESC(mbox_kfifo_size, "Size of omap's mailbox kfifo (bytes)"); 122 123 static struct omap_mbox *mbox_chan_to_omap_mbox(struct mbox_chan *chan) 124 { 125 if (!chan || !chan->con_priv) 126 return NULL; 127 128 return (struct omap_mbox *)chan->con_priv; 129 } 130 131 static inline 132 unsigned int mbox_read_reg(struct omap_mbox_device *mdev, size_t ofs) 133 { 134 return __raw_readl(mdev->mbox_base + ofs); 135 } 136 137 static inline 138 void mbox_write_reg(struct omap_mbox_device *mdev, u32 val, size_t ofs) 139 { 140 __raw_writel(val, mdev->mbox_base + ofs); 141 } 142 143 /* Mailbox FIFO handle functions */ 144 static u32 mbox_fifo_read(struct omap_mbox *mbox) 145 { 146 struct omap_mbox_fifo *fifo = &mbox->rx_fifo; 147 148 return mbox_read_reg(mbox->parent, fifo->msg); 149 } 150 151 static void mbox_fifo_write(struct omap_mbox *mbox, u32 msg) 152 { 153 struct omap_mbox_fifo *fifo = &mbox->tx_fifo; 154 155 mbox_write_reg(mbox->parent, msg, fifo->msg); 156 } 157 158 static int mbox_fifo_empty(struct omap_mbox *mbox) 159 { 160 struct omap_mbox_fifo *fifo = &mbox->rx_fifo; 161 162 return (mbox_read_reg(mbox->parent, fifo->msg_stat) == 0); 163 } 164 165 static int mbox_fifo_full(struct omap_mbox *mbox) 166 { 167 struct omap_mbox_fifo *fifo = &mbox->tx_fifo; 168 169 return mbox_read_reg(mbox->parent, fifo->fifo_stat); 170 } 171 172 /* Mailbox IRQ handle functions */ 173 static void ack_mbox_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq) 174 { 175 struct omap_mbox_fifo *fifo = (irq == IRQ_TX) ? 176 &mbox->tx_fifo : &mbox->rx_fifo; 177 u32 bit = fifo->intr_bit; 178 u32 irqstatus = fifo->irqstatus; 179 180 mbox_write_reg(mbox->parent, bit, irqstatus); 181 182 /* Flush posted write for irq status to avoid spurious interrupts */ 183 mbox_read_reg(mbox->parent, irqstatus); 184 } 185 186 static int is_mbox_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq) 187 { 188 struct omap_mbox_fifo *fifo = (irq == IRQ_TX) ? 189 &mbox->tx_fifo : &mbox->rx_fifo; 190 u32 bit = fifo->intr_bit; 191 u32 irqenable = fifo->irqenable; 192 u32 irqstatus = fifo->irqstatus; 193 194 u32 enable = mbox_read_reg(mbox->parent, irqenable); 195 u32 status = mbox_read_reg(mbox->parent, irqstatus); 196 197 return (int)(enable & status & bit); 198 } 199 200 static void _omap_mbox_enable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq) 201 { 202 u32 l; 203 struct omap_mbox_fifo *fifo = (irq == IRQ_TX) ? 204 &mbox->tx_fifo : &mbox->rx_fifo; 205 u32 bit = fifo->intr_bit; 206 u32 irqenable = fifo->irqenable; 207 208 l = mbox_read_reg(mbox->parent, irqenable); 209 l |= bit; 210 mbox_write_reg(mbox->parent, l, irqenable); 211 } 212 213 static void _omap_mbox_disable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq) 214 { 215 struct omap_mbox_fifo *fifo = (irq == IRQ_TX) ? 216 &mbox->tx_fifo : &mbox->rx_fifo; 217 u32 bit = fifo->intr_bit; 218 u32 irqdisable = fifo->irqdisable; 219 220 /* 221 * Read and update the interrupt configuration register for pre-OMAP4. 222 * OMAP4 and later SoCs have a dedicated interrupt disabling register. 223 */ 224 if (!mbox->intr_type) 225 bit = mbox_read_reg(mbox->parent, irqdisable) & ~bit; 226 227 mbox_write_reg(mbox->parent, bit, irqdisable); 228 } 229 230 void omap_mbox_enable_irq(struct mbox_chan *chan, omap_mbox_irq_t irq) 231 { 232 struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan); 233 234 if (WARN_ON(!mbox)) 235 return; 236 237 _omap_mbox_enable_irq(mbox, irq); 238 } 239 EXPORT_SYMBOL(omap_mbox_enable_irq); 240 241 void omap_mbox_disable_irq(struct mbox_chan *chan, omap_mbox_irq_t irq) 242 { 243 struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan); 244 245 if (WARN_ON(!mbox)) 246 return; 247 248 _omap_mbox_disable_irq(mbox, irq); 249 } 250 EXPORT_SYMBOL(omap_mbox_disable_irq); 251 252 /* 253 * Message receiver(workqueue) 254 */ 255 static void mbox_rx_work(struct work_struct *work) 256 { 257 struct omap_mbox_queue *mq = 258 container_of(work, struct omap_mbox_queue, work); 259 mbox_msg_t data; 260 u32 msg; 261 int len; 262 263 while (kfifo_len(&mq->fifo) >= sizeof(msg)) { 264 len = kfifo_out(&mq->fifo, (unsigned char *)&msg, sizeof(msg)); 265 WARN_ON(len != sizeof(msg)); 266 data = msg; 267 268 mbox_chan_received_data(mq->mbox->chan, (void *)data); 269 spin_lock_irq(&mq->lock); 270 if (mq->full) { 271 mq->full = false; 272 _omap_mbox_enable_irq(mq->mbox, IRQ_RX); 273 } 274 spin_unlock_irq(&mq->lock); 275 } 276 } 277 278 /* 279 * Mailbox interrupt handler 280 */ 281 static void __mbox_tx_interrupt(struct omap_mbox *mbox) 282 { 283 _omap_mbox_disable_irq(mbox, IRQ_TX); 284 ack_mbox_irq(mbox, IRQ_TX); 285 mbox_chan_txdone(mbox->chan, 0); 286 } 287 288 static void __mbox_rx_interrupt(struct omap_mbox *mbox) 289 { 290 struct omap_mbox_queue *mq = mbox->rxq; 291 u32 msg; 292 int len; 293 294 while (!mbox_fifo_empty(mbox)) { 295 if (unlikely(kfifo_avail(&mq->fifo) < sizeof(msg))) { 296 _omap_mbox_disable_irq(mbox, IRQ_RX); 297 mq->full = true; 298 goto nomem; 299 } 300 301 msg = mbox_fifo_read(mbox); 302 303 len = kfifo_in(&mq->fifo, (unsigned char *)&msg, sizeof(msg)); 304 WARN_ON(len != sizeof(msg)); 305 } 306 307 /* no more messages in the fifo. clear IRQ source. */ 308 ack_mbox_irq(mbox, IRQ_RX); 309 nomem: 310 schedule_work(&mbox->rxq->work); 311 } 312 313 static irqreturn_t mbox_interrupt(int irq, void *p) 314 { 315 struct omap_mbox *mbox = p; 316 317 if (is_mbox_irq(mbox, IRQ_TX)) 318 __mbox_tx_interrupt(mbox); 319 320 if (is_mbox_irq(mbox, IRQ_RX)) 321 __mbox_rx_interrupt(mbox); 322 323 return IRQ_HANDLED; 324 } 325 326 static struct omap_mbox_queue *mbox_queue_alloc(struct omap_mbox *mbox, 327 void (*work)(struct work_struct *)) 328 { 329 struct omap_mbox_queue *mq; 330 331 if (!work) 332 return NULL; 333 334 mq = kzalloc(sizeof(*mq), GFP_KERNEL); 335 if (!mq) 336 return NULL; 337 338 spin_lock_init(&mq->lock); 339 340 if (kfifo_alloc(&mq->fifo, mbox_kfifo_size, GFP_KERNEL)) 341 goto error; 342 343 INIT_WORK(&mq->work, work); 344 return mq; 345 346 error: 347 kfree(mq); 348 return NULL; 349 } 350 351 static void mbox_queue_free(struct omap_mbox_queue *q) 352 { 353 kfifo_free(&q->fifo); 354 kfree(q); 355 } 356 357 static int omap_mbox_startup(struct omap_mbox *mbox) 358 { 359 int ret = 0; 360 struct omap_mbox_queue *mq; 361 362 mq = mbox_queue_alloc(mbox, mbox_rx_work); 363 if (!mq) 364 return -ENOMEM; 365 mbox->rxq = mq; 366 mq->mbox = mbox; 367 368 ret = request_irq(mbox->irq, mbox_interrupt, IRQF_SHARED, 369 mbox->name, mbox); 370 if (unlikely(ret)) { 371 pr_err("failed to register mailbox interrupt:%d\n", ret); 372 goto fail_request_irq; 373 } 374 375 if (mbox->send_no_irq) 376 mbox->chan->txdone_method = TXDONE_BY_ACK; 377 378 _omap_mbox_enable_irq(mbox, IRQ_RX); 379 380 return 0; 381 382 fail_request_irq: 383 mbox_queue_free(mbox->rxq); 384 return ret; 385 } 386 387 static void omap_mbox_fini(struct omap_mbox *mbox) 388 { 389 _omap_mbox_disable_irq(mbox, IRQ_RX); 390 free_irq(mbox->irq, mbox); 391 flush_work(&mbox->rxq->work); 392 mbox_queue_free(mbox->rxq); 393 } 394 395 static struct omap_mbox *omap_mbox_device_find(struct omap_mbox_device *mdev, 396 const char *mbox_name) 397 { 398 struct omap_mbox *_mbox, *mbox = NULL; 399 struct omap_mbox **mboxes = mdev->mboxes; 400 int i; 401 402 if (!mboxes) 403 return NULL; 404 405 for (i = 0; (_mbox = mboxes[i]); i++) { 406 if (!strcmp(_mbox->name, mbox_name)) { 407 mbox = _mbox; 408 break; 409 } 410 } 411 return mbox; 412 } 413 414 struct mbox_chan *omap_mbox_request_channel(struct mbox_client *cl, 415 const char *chan_name) 416 { 417 struct device *dev = cl->dev; 418 struct omap_mbox *mbox = NULL; 419 struct omap_mbox_device *mdev; 420 struct mbox_chan *chan; 421 unsigned long flags; 422 int ret; 423 424 if (!dev) 425 return ERR_PTR(-ENODEV); 426 427 if (dev->of_node) { 428 pr_err("%s: please use mbox_request_channel(), this API is supported only for OMAP non-DT usage\n", 429 __func__); 430 return ERR_PTR(-ENODEV); 431 } 432 433 mutex_lock(&omap_mbox_devices_lock); 434 list_for_each_entry(mdev, &omap_mbox_devices, elem) { 435 mbox = omap_mbox_device_find(mdev, chan_name); 436 if (mbox) 437 break; 438 } 439 mutex_unlock(&omap_mbox_devices_lock); 440 441 if (!mbox || !mbox->chan) 442 return ERR_PTR(-ENOENT); 443 444 chan = mbox->chan; 445 spin_lock_irqsave(&chan->lock, flags); 446 chan->msg_free = 0; 447 chan->msg_count = 0; 448 chan->active_req = NULL; 449 chan->cl = cl; 450 init_completion(&chan->tx_complete); 451 spin_unlock_irqrestore(&chan->lock, flags); 452 453 ret = chan->mbox->ops->startup(chan); 454 if (ret) { 455 pr_err("Unable to startup the chan (%d)\n", ret); 456 mbox_free_channel(chan); 457 chan = ERR_PTR(ret); 458 } 459 460 return chan; 461 } 462 EXPORT_SYMBOL(omap_mbox_request_channel); 463 464 static struct class omap_mbox_class = { .name = "mbox", }; 465 466 static int omap_mbox_register(struct omap_mbox_device *mdev) 467 { 468 int ret; 469 int i; 470 struct omap_mbox **mboxes; 471 472 if (!mdev || !mdev->mboxes) 473 return -EINVAL; 474 475 mboxes = mdev->mboxes; 476 for (i = 0; mboxes[i]; i++) { 477 struct omap_mbox *mbox = mboxes[i]; 478 479 mbox->dev = device_create(&omap_mbox_class, mdev->dev, 480 0, mbox, "%s", mbox->name); 481 if (IS_ERR(mbox->dev)) { 482 ret = PTR_ERR(mbox->dev); 483 goto err_out; 484 } 485 } 486 487 mutex_lock(&omap_mbox_devices_lock); 488 list_add(&mdev->elem, &omap_mbox_devices); 489 mutex_unlock(&omap_mbox_devices_lock); 490 491 ret = devm_mbox_controller_register(mdev->dev, &mdev->controller); 492 493 err_out: 494 if (ret) { 495 while (i--) 496 device_unregister(mboxes[i]->dev); 497 } 498 return ret; 499 } 500 501 static int omap_mbox_unregister(struct omap_mbox_device *mdev) 502 { 503 int i; 504 struct omap_mbox **mboxes; 505 506 if (!mdev || !mdev->mboxes) 507 return -EINVAL; 508 509 mutex_lock(&omap_mbox_devices_lock); 510 list_del(&mdev->elem); 511 mutex_unlock(&omap_mbox_devices_lock); 512 513 mboxes = mdev->mboxes; 514 for (i = 0; mboxes[i]; i++) 515 device_unregister(mboxes[i]->dev); 516 return 0; 517 } 518 519 static int omap_mbox_chan_startup(struct mbox_chan *chan) 520 { 521 struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan); 522 struct omap_mbox_device *mdev = mbox->parent; 523 int ret = 0; 524 525 mutex_lock(&mdev->cfg_lock); 526 pm_runtime_get_sync(mdev->dev); 527 ret = omap_mbox_startup(mbox); 528 if (ret) 529 pm_runtime_put_sync(mdev->dev); 530 mutex_unlock(&mdev->cfg_lock); 531 return ret; 532 } 533 534 static void omap_mbox_chan_shutdown(struct mbox_chan *chan) 535 { 536 struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan); 537 struct omap_mbox_device *mdev = mbox->parent; 538 539 mutex_lock(&mdev->cfg_lock); 540 omap_mbox_fini(mbox); 541 pm_runtime_put_sync(mdev->dev); 542 mutex_unlock(&mdev->cfg_lock); 543 } 544 545 static int omap_mbox_chan_send_noirq(struct omap_mbox *mbox, u32 msg) 546 { 547 int ret = -EBUSY; 548 549 if (!mbox_fifo_full(mbox)) { 550 _omap_mbox_enable_irq(mbox, IRQ_RX); 551 mbox_fifo_write(mbox, msg); 552 ret = 0; 553 _omap_mbox_disable_irq(mbox, IRQ_RX); 554 555 /* we must read and ack the interrupt directly from here */ 556 mbox_fifo_read(mbox); 557 ack_mbox_irq(mbox, IRQ_RX); 558 } 559 560 return ret; 561 } 562 563 static int omap_mbox_chan_send(struct omap_mbox *mbox, u32 msg) 564 { 565 int ret = -EBUSY; 566 567 if (!mbox_fifo_full(mbox)) { 568 mbox_fifo_write(mbox, msg); 569 ret = 0; 570 } 571 572 /* always enable the interrupt */ 573 _omap_mbox_enable_irq(mbox, IRQ_TX); 574 return ret; 575 } 576 577 static int omap_mbox_chan_send_data(struct mbox_chan *chan, void *data) 578 { 579 struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan); 580 int ret; 581 u32 msg = omap_mbox_message(data); 582 583 if (!mbox) 584 return -EINVAL; 585 586 if (mbox->send_no_irq) 587 ret = omap_mbox_chan_send_noirq(mbox, msg); 588 else 589 ret = omap_mbox_chan_send(mbox, msg); 590 591 return ret; 592 } 593 594 static const struct mbox_chan_ops omap_mbox_chan_ops = { 595 .startup = omap_mbox_chan_startup, 596 .send_data = omap_mbox_chan_send_data, 597 .shutdown = omap_mbox_chan_shutdown, 598 }; 599 600 #ifdef CONFIG_PM_SLEEP 601 static int omap_mbox_suspend(struct device *dev) 602 { 603 struct omap_mbox_device *mdev = dev_get_drvdata(dev); 604 u32 usr, fifo, reg; 605 606 if (pm_runtime_status_suspended(dev)) 607 return 0; 608 609 for (fifo = 0; fifo < mdev->num_fifos; fifo++) { 610 if (mbox_read_reg(mdev, MAILBOX_MSGSTATUS(fifo))) { 611 dev_err(mdev->dev, "fifo %d has unexpected unread messages\n", 612 fifo); 613 return -EBUSY; 614 } 615 } 616 617 for (usr = 0; usr < mdev->num_users; usr++) { 618 reg = MAILBOX_IRQENABLE(mdev->intr_type, usr); 619 mdev->irq_ctx[usr] = mbox_read_reg(mdev, reg); 620 } 621 622 return 0; 623 } 624 625 static int omap_mbox_resume(struct device *dev) 626 { 627 struct omap_mbox_device *mdev = dev_get_drvdata(dev); 628 u32 usr, reg; 629 630 if (pm_runtime_status_suspended(dev)) 631 return 0; 632 633 for (usr = 0; usr < mdev->num_users; usr++) { 634 reg = MAILBOX_IRQENABLE(mdev->intr_type, usr); 635 mbox_write_reg(mdev, mdev->irq_ctx[usr], reg); 636 } 637 638 return 0; 639 } 640 #endif 641 642 static const struct dev_pm_ops omap_mbox_pm_ops = { 643 SET_SYSTEM_SLEEP_PM_OPS(omap_mbox_suspend, omap_mbox_resume) 644 }; 645 646 static const struct omap_mbox_match_data omap2_data = { MBOX_INTR_CFG_TYPE1 }; 647 static const struct omap_mbox_match_data omap4_data = { MBOX_INTR_CFG_TYPE2 }; 648 649 static const struct of_device_id omap_mailbox_of_match[] = { 650 { 651 .compatible = "ti,omap2-mailbox", 652 .data = &omap2_data, 653 }, 654 { 655 .compatible = "ti,omap3-mailbox", 656 .data = &omap2_data, 657 }, 658 { 659 .compatible = "ti,omap4-mailbox", 660 .data = &omap4_data, 661 }, 662 { 663 .compatible = "ti,am654-mailbox", 664 .data = &omap4_data, 665 }, 666 { 667 /* end */ 668 }, 669 }; 670 MODULE_DEVICE_TABLE(of, omap_mailbox_of_match); 671 672 static struct mbox_chan *omap_mbox_of_xlate(struct mbox_controller *controller, 673 const struct of_phandle_args *sp) 674 { 675 phandle phandle = sp->args[0]; 676 struct device_node *node; 677 struct omap_mbox_device *mdev; 678 struct omap_mbox *mbox; 679 680 mdev = container_of(controller, struct omap_mbox_device, controller); 681 if (WARN_ON(!mdev)) 682 return ERR_PTR(-EINVAL); 683 684 node = of_find_node_by_phandle(phandle); 685 if (!node) { 686 pr_err("%s: could not find node phandle 0x%x\n", 687 __func__, phandle); 688 return ERR_PTR(-ENODEV); 689 } 690 691 mbox = omap_mbox_device_find(mdev, node->name); 692 of_node_put(node); 693 return mbox ? mbox->chan : ERR_PTR(-ENOENT); 694 } 695 696 static int omap_mbox_probe(struct platform_device *pdev) 697 { 698 struct resource *mem; 699 int ret; 700 struct mbox_chan *chnls; 701 struct omap_mbox **list, *mbox, *mboxblk; 702 struct omap_mbox_fifo_info *finfo, *finfoblk; 703 struct omap_mbox_device *mdev; 704 struct omap_mbox_fifo *fifo; 705 struct device_node *node = pdev->dev.of_node; 706 struct device_node *child; 707 const struct omap_mbox_match_data *match_data; 708 u32 intr_type, info_count; 709 u32 num_users, num_fifos; 710 u32 tmp[3]; 711 u32 l; 712 int i; 713 714 if (!node) { 715 pr_err("%s: only DT-based devices are supported\n", __func__); 716 return -ENODEV; 717 } 718 719 match_data = of_device_get_match_data(&pdev->dev); 720 if (!match_data) 721 return -ENODEV; 722 intr_type = match_data->intr_type; 723 724 if (of_property_read_u32(node, "ti,mbox-num-users", &num_users)) 725 return -ENODEV; 726 727 if (of_property_read_u32(node, "ti,mbox-num-fifos", &num_fifos)) 728 return -ENODEV; 729 730 info_count = of_get_available_child_count(node); 731 if (!info_count) { 732 dev_err(&pdev->dev, "no available mbox devices found\n"); 733 return -ENODEV; 734 } 735 736 finfoblk = devm_kcalloc(&pdev->dev, info_count, sizeof(*finfoblk), 737 GFP_KERNEL); 738 if (!finfoblk) 739 return -ENOMEM; 740 741 finfo = finfoblk; 742 child = NULL; 743 for (i = 0; i < info_count; i++, finfo++) { 744 child = of_get_next_available_child(node, child); 745 ret = of_property_read_u32_array(child, "ti,mbox-tx", tmp, 746 ARRAY_SIZE(tmp)); 747 if (ret) 748 return ret; 749 finfo->tx_id = tmp[0]; 750 finfo->tx_irq = tmp[1]; 751 finfo->tx_usr = tmp[2]; 752 753 ret = of_property_read_u32_array(child, "ti,mbox-rx", tmp, 754 ARRAY_SIZE(tmp)); 755 if (ret) 756 return ret; 757 finfo->rx_id = tmp[0]; 758 finfo->rx_irq = tmp[1]; 759 finfo->rx_usr = tmp[2]; 760 761 finfo->name = child->name; 762 763 if (of_find_property(child, "ti,mbox-send-noirq", NULL)) 764 finfo->send_no_irq = true; 765 766 if (finfo->tx_id >= num_fifos || finfo->rx_id >= num_fifos || 767 finfo->tx_usr >= num_users || finfo->rx_usr >= num_users) 768 return -EINVAL; 769 } 770 771 mdev = devm_kzalloc(&pdev->dev, sizeof(*mdev), GFP_KERNEL); 772 if (!mdev) 773 return -ENOMEM; 774 775 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 776 mdev->mbox_base = devm_ioremap_resource(&pdev->dev, mem); 777 if (IS_ERR(mdev->mbox_base)) 778 return PTR_ERR(mdev->mbox_base); 779 780 mdev->irq_ctx = devm_kcalloc(&pdev->dev, num_users, sizeof(u32), 781 GFP_KERNEL); 782 if (!mdev->irq_ctx) 783 return -ENOMEM; 784 785 /* allocate one extra for marking end of list */ 786 list = devm_kcalloc(&pdev->dev, info_count + 1, sizeof(*list), 787 GFP_KERNEL); 788 if (!list) 789 return -ENOMEM; 790 791 chnls = devm_kcalloc(&pdev->dev, info_count + 1, sizeof(*chnls), 792 GFP_KERNEL); 793 if (!chnls) 794 return -ENOMEM; 795 796 mboxblk = devm_kcalloc(&pdev->dev, info_count, sizeof(*mbox), 797 GFP_KERNEL); 798 if (!mboxblk) 799 return -ENOMEM; 800 801 mbox = mboxblk; 802 finfo = finfoblk; 803 for (i = 0; i < info_count; i++, finfo++) { 804 fifo = &mbox->tx_fifo; 805 fifo->msg = MAILBOX_MESSAGE(finfo->tx_id); 806 fifo->fifo_stat = MAILBOX_FIFOSTATUS(finfo->tx_id); 807 fifo->intr_bit = MAILBOX_IRQ_NOTFULL(finfo->tx_id); 808 fifo->irqenable = MAILBOX_IRQENABLE(intr_type, finfo->tx_usr); 809 fifo->irqstatus = MAILBOX_IRQSTATUS(intr_type, finfo->tx_usr); 810 fifo->irqdisable = MAILBOX_IRQDISABLE(intr_type, finfo->tx_usr); 811 812 fifo = &mbox->rx_fifo; 813 fifo->msg = MAILBOX_MESSAGE(finfo->rx_id); 814 fifo->msg_stat = MAILBOX_MSGSTATUS(finfo->rx_id); 815 fifo->intr_bit = MAILBOX_IRQ_NEWMSG(finfo->rx_id); 816 fifo->irqenable = MAILBOX_IRQENABLE(intr_type, finfo->rx_usr); 817 fifo->irqstatus = MAILBOX_IRQSTATUS(intr_type, finfo->rx_usr); 818 fifo->irqdisable = MAILBOX_IRQDISABLE(intr_type, finfo->rx_usr); 819 820 mbox->send_no_irq = finfo->send_no_irq; 821 mbox->intr_type = intr_type; 822 823 mbox->parent = mdev; 824 mbox->name = finfo->name; 825 mbox->irq = platform_get_irq(pdev, finfo->tx_irq); 826 if (mbox->irq < 0) 827 return mbox->irq; 828 mbox->chan = &chnls[i]; 829 chnls[i].con_priv = mbox; 830 list[i] = mbox++; 831 } 832 833 mutex_init(&mdev->cfg_lock); 834 mdev->dev = &pdev->dev; 835 mdev->num_users = num_users; 836 mdev->num_fifos = num_fifos; 837 mdev->intr_type = intr_type; 838 mdev->mboxes = list; 839 840 /* 841 * OMAP/K3 Mailbox IP does not have a Tx-Done IRQ, but rather a Tx-Ready 842 * IRQ and is needed to run the Tx state machine 843 */ 844 mdev->controller.txdone_irq = true; 845 mdev->controller.dev = mdev->dev; 846 mdev->controller.ops = &omap_mbox_chan_ops; 847 mdev->controller.chans = chnls; 848 mdev->controller.num_chans = info_count; 849 mdev->controller.of_xlate = omap_mbox_of_xlate; 850 ret = omap_mbox_register(mdev); 851 if (ret) 852 return ret; 853 854 platform_set_drvdata(pdev, mdev); 855 pm_runtime_enable(mdev->dev); 856 857 ret = pm_runtime_get_sync(mdev->dev); 858 if (ret < 0) { 859 pm_runtime_put_noidle(mdev->dev); 860 goto unregister; 861 } 862 863 /* 864 * just print the raw revision register, the format is not 865 * uniform across all SoCs 866 */ 867 l = mbox_read_reg(mdev, MAILBOX_REVISION); 868 dev_info(mdev->dev, "omap mailbox rev 0x%x\n", l); 869 870 ret = pm_runtime_put_sync(mdev->dev); 871 if (ret < 0 && ret != -ENOSYS) 872 goto unregister; 873 874 devm_kfree(&pdev->dev, finfoblk); 875 return 0; 876 877 unregister: 878 pm_runtime_disable(mdev->dev); 879 omap_mbox_unregister(mdev); 880 return ret; 881 } 882 883 static int omap_mbox_remove(struct platform_device *pdev) 884 { 885 struct omap_mbox_device *mdev = platform_get_drvdata(pdev); 886 887 pm_runtime_disable(mdev->dev); 888 omap_mbox_unregister(mdev); 889 890 return 0; 891 } 892 893 static struct platform_driver omap_mbox_driver = { 894 .probe = omap_mbox_probe, 895 .remove = omap_mbox_remove, 896 .driver = { 897 .name = "omap-mailbox", 898 .pm = &omap_mbox_pm_ops, 899 .of_match_table = of_match_ptr(omap_mailbox_of_match), 900 }, 901 }; 902 903 static int __init omap_mbox_init(void) 904 { 905 int err; 906 907 err = class_register(&omap_mbox_class); 908 if (err) 909 return err; 910 911 /* kfifo size sanity check: alignment and minimal size */ 912 mbox_kfifo_size = ALIGN(mbox_kfifo_size, sizeof(u32)); 913 mbox_kfifo_size = max_t(unsigned int, mbox_kfifo_size, sizeof(u32)); 914 915 err = platform_driver_register(&omap_mbox_driver); 916 if (err) 917 class_unregister(&omap_mbox_class); 918 919 return err; 920 } 921 subsys_initcall(omap_mbox_init); 922 923 static void __exit omap_mbox_exit(void) 924 { 925 platform_driver_unregister(&omap_mbox_driver); 926 class_unregister(&omap_mbox_class); 927 } 928 module_exit(omap_mbox_exit); 929 930 MODULE_LICENSE("GPL v2"); 931 MODULE_DESCRIPTION("omap mailbox: interrupt driven messaging"); 932 MODULE_AUTHOR("Toshihiro Kobayashi"); 933 MODULE_AUTHOR("Hiroshi DOYU"); 934