1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2015, Sony Mobile Communications AB. 4 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. 5 */ 6 7 #include <linux/interrupt.h> 8 #include <linux/io.h> 9 #include <linux/mailbox_client.h> 10 #include <linux/mfd/syscon.h> 11 #include <linux/module.h> 12 #include <linux/of_irq.h> 13 #include <linux/of_platform.h> 14 #include <linux/platform_device.h> 15 #include <linux/regmap.h> 16 #include <linux/sched.h> 17 #include <linux/sizes.h> 18 #include <linux/slab.h> 19 #include <linux/soc/qcom/smem.h> 20 #include <linux/wait.h> 21 #include <linux/rpmsg.h> 22 #include <linux/rpmsg/qcom_smd.h> 23 24 #include "rpmsg_internal.h" 25 26 /* 27 * The Qualcomm Shared Memory communication solution provides point-to-point 28 * channels for clients to send and receive streaming or packet based data. 29 * 30 * Each channel consists of a control item (channel info) and a ring buffer 31 * pair. The channel info carry information related to channel state, flow 32 * control and the offsets within the ring buffer. 33 * 34 * All allocated channels are listed in an allocation table, identifying the 35 * pair of items by name, type and remote processor. 36 * 37 * Upon creating a new channel the remote processor allocates channel info and 38 * ring buffer items from the smem heap and populate the allocation table. An 39 * interrupt is sent to the other end of the channel and a scan for new 40 * channels should be done. A channel never goes away, it will only change 41 * state. 42 * 43 * The remote processor signals it intent for bring up the communication 44 * channel by setting the state of its end of the channel to "opening" and 45 * sends out an interrupt. We detect this change and register a smd device to 46 * consume the channel. Upon finding a consumer we finish the handshake and the 47 * channel is up. 48 * 49 * Upon closing a channel, the remote processor will update the state of its 50 * end of the channel and signal us, we will then unregister any attached 51 * device and close our end of the channel. 52 * 53 * Devices attached to a channel can use the qcom_smd_send function to push 54 * data to the channel, this is done by copying the data into the tx ring 55 * buffer, updating the pointers in the channel info and signaling the remote 56 * processor. 57 * 58 * The remote processor does the equivalent when it transfer data and upon 59 * receiving the interrupt we check the channel info for new data and delivers 60 * this to the attached device. If the device is not ready to receive the data 61 * we leave it in the ring buffer for now. 62 */ 63 64 struct smd_channel_info; 65 struct smd_channel_info_pair; 66 struct smd_channel_info_word; 67 struct smd_channel_info_word_pair; 68 69 static const struct rpmsg_endpoint_ops qcom_smd_endpoint_ops; 70 71 #define SMD_ALLOC_TBL_COUNT 2 72 #define SMD_ALLOC_TBL_SIZE 64 73 74 /* 75 * This lists the various smem heap items relevant for the allocation table and 76 * smd channel entries. 77 */ 78 static const struct { 79 unsigned alloc_tbl_id; 80 unsigned info_base_id; 81 unsigned fifo_base_id; 82 } smem_items[SMD_ALLOC_TBL_COUNT] = { 83 { 84 .alloc_tbl_id = 13, 85 .info_base_id = 14, 86 .fifo_base_id = 338 87 }, 88 { 89 .alloc_tbl_id = 266, 90 .info_base_id = 138, 91 .fifo_base_id = 202, 92 }, 93 }; 94 95 /** 96 * struct qcom_smd_edge - representing a remote processor 97 * @dev: device associated with this edge 98 * @name: name of this edge 99 * @of_node: of_node handle for information related to this edge 100 * @edge_id: identifier of this edge 101 * @remote_pid: identifier of remote processor 102 * @irq: interrupt for signals on this edge 103 * @ipc_regmap: regmap handle holding the outgoing ipc register 104 * @ipc_offset: offset within @ipc_regmap of the register for ipc 105 * @ipc_bit: bit in the register at @ipc_offset of @ipc_regmap 106 * @mbox_client: mailbox client handle 107 * @mbox_chan: apcs ipc mailbox channel handle 108 * @channels: list of all channels detected on this edge 109 * @channels_lock: guard for modifications of @channels 110 * @allocated: array of bitmaps representing already allocated channels 111 * @smem_available: last available amount of smem triggering a channel scan 112 * @new_channel_event: wait queue for new channel events 113 * @scan_work: work item for discovering new channels 114 * @state_work: work item for edge state changes 115 */ 116 struct qcom_smd_edge { 117 struct device dev; 118 119 const char *name; 120 121 struct device_node *of_node; 122 unsigned edge_id; 123 unsigned remote_pid; 124 125 int irq; 126 127 struct regmap *ipc_regmap; 128 int ipc_offset; 129 int ipc_bit; 130 131 struct mbox_client mbox_client; 132 struct mbox_chan *mbox_chan; 133 134 struct list_head channels; 135 spinlock_t channels_lock; 136 137 DECLARE_BITMAP(allocated[SMD_ALLOC_TBL_COUNT], SMD_ALLOC_TBL_SIZE); 138 139 unsigned smem_available; 140 141 wait_queue_head_t new_channel_event; 142 143 struct work_struct scan_work; 144 struct work_struct state_work; 145 }; 146 147 /* 148 * SMD channel states. 149 */ 150 enum smd_channel_state { 151 SMD_CHANNEL_CLOSED, 152 SMD_CHANNEL_OPENING, 153 SMD_CHANNEL_OPENED, 154 SMD_CHANNEL_FLUSHING, 155 SMD_CHANNEL_CLOSING, 156 SMD_CHANNEL_RESET, 157 SMD_CHANNEL_RESET_OPENING 158 }; 159 160 struct qcom_smd_device { 161 struct rpmsg_device rpdev; 162 163 struct qcom_smd_edge *edge; 164 }; 165 166 struct qcom_smd_endpoint { 167 struct rpmsg_endpoint ept; 168 169 struct qcom_smd_channel *qsch; 170 }; 171 172 #define to_smd_device(r) container_of(r, struct qcom_smd_device, rpdev) 173 #define to_smd_edge(d) container_of(d, struct qcom_smd_edge, dev) 174 #define to_smd_endpoint(e) container_of(e, struct qcom_smd_endpoint, ept) 175 176 /** 177 * struct qcom_smd_channel - smd channel struct 178 * @edge: qcom_smd_edge this channel is living on 179 * @qsept: reference to a associated smd endpoint 180 * @registered: flag to indicate if the channel is registered 181 * @name: name of the channel 182 * @state: local state of the channel 183 * @remote_state: remote state of the channel 184 * @state_change_event: state change event 185 * @info: byte aligned outgoing/incoming channel info 186 * @info_word: word aligned outgoing/incoming channel info 187 * @tx_lock: lock to make writes to the channel mutually exclusive 188 * @fblockread_event: wakeup event tied to tx fBLOCKREADINTR 189 * @tx_fifo: pointer to the outgoing ring buffer 190 * @rx_fifo: pointer to the incoming ring buffer 191 * @fifo_size: size of each ring buffer 192 * @bounce_buffer: bounce buffer for reading wrapped packets 193 * @cb: callback function registered for this channel 194 * @recv_lock: guard for rx info modifications and cb pointer 195 * @pkt_size: size of the currently handled packet 196 * @drvdata: driver private data 197 * @list: lite entry for @channels in qcom_smd_edge 198 */ 199 struct qcom_smd_channel { 200 struct qcom_smd_edge *edge; 201 202 struct qcom_smd_endpoint *qsept; 203 bool registered; 204 205 char *name; 206 enum smd_channel_state state; 207 enum smd_channel_state remote_state; 208 wait_queue_head_t state_change_event; 209 210 struct smd_channel_info_pair *info; 211 struct smd_channel_info_word_pair *info_word; 212 213 spinlock_t tx_lock; 214 wait_queue_head_t fblockread_event; 215 216 void *tx_fifo; 217 void *rx_fifo; 218 int fifo_size; 219 220 void *bounce_buffer; 221 222 spinlock_t recv_lock; 223 224 int pkt_size; 225 226 void *drvdata; 227 228 struct list_head list; 229 }; 230 231 /* 232 * Format of the smd_info smem items, for byte aligned channels. 233 */ 234 struct smd_channel_info { 235 __le32 state; 236 u8 fDSR; 237 u8 fCTS; 238 u8 fCD; 239 u8 fRI; 240 u8 fHEAD; 241 u8 fTAIL; 242 u8 fSTATE; 243 u8 fBLOCKREADINTR; 244 __le32 tail; 245 __le32 head; 246 }; 247 248 struct smd_channel_info_pair { 249 struct smd_channel_info tx; 250 struct smd_channel_info rx; 251 }; 252 253 /* 254 * Format of the smd_info smem items, for word aligned channels. 255 */ 256 struct smd_channel_info_word { 257 __le32 state; 258 __le32 fDSR; 259 __le32 fCTS; 260 __le32 fCD; 261 __le32 fRI; 262 __le32 fHEAD; 263 __le32 fTAIL; 264 __le32 fSTATE; 265 __le32 fBLOCKREADINTR; 266 __le32 tail; 267 __le32 head; 268 }; 269 270 struct smd_channel_info_word_pair { 271 struct smd_channel_info_word tx; 272 struct smd_channel_info_word rx; 273 }; 274 275 #define GET_RX_CHANNEL_FLAG(channel, param) \ 276 ({ \ 277 BUILD_BUG_ON(sizeof(channel->info->rx.param) != sizeof(u8)); \ 278 channel->info_word ? \ 279 le32_to_cpu(channel->info_word->rx.param) : \ 280 channel->info->rx.param; \ 281 }) 282 283 #define GET_RX_CHANNEL_INFO(channel, param) \ 284 ({ \ 285 BUILD_BUG_ON(sizeof(channel->info->rx.param) != sizeof(u32)); \ 286 le32_to_cpu(channel->info_word ? \ 287 channel->info_word->rx.param : \ 288 channel->info->rx.param); \ 289 }) 290 291 #define SET_RX_CHANNEL_FLAG(channel, param, value) \ 292 ({ \ 293 BUILD_BUG_ON(sizeof(channel->info->rx.param) != sizeof(u8)); \ 294 if (channel->info_word) \ 295 channel->info_word->rx.param = cpu_to_le32(value); \ 296 else \ 297 channel->info->rx.param = value; \ 298 }) 299 300 #define SET_RX_CHANNEL_INFO(channel, param, value) \ 301 ({ \ 302 BUILD_BUG_ON(sizeof(channel->info->rx.param) != sizeof(u32)); \ 303 if (channel->info_word) \ 304 channel->info_word->rx.param = cpu_to_le32(value); \ 305 else \ 306 channel->info->rx.param = cpu_to_le32(value); \ 307 }) 308 309 #define GET_TX_CHANNEL_FLAG(channel, param) \ 310 ({ \ 311 BUILD_BUG_ON(sizeof(channel->info->tx.param) != sizeof(u8)); \ 312 channel->info_word ? \ 313 le32_to_cpu(channel->info_word->tx.param) : \ 314 channel->info->tx.param; \ 315 }) 316 317 #define GET_TX_CHANNEL_INFO(channel, param) \ 318 ({ \ 319 BUILD_BUG_ON(sizeof(channel->info->tx.param) != sizeof(u32)); \ 320 le32_to_cpu(channel->info_word ? \ 321 channel->info_word->tx.param : \ 322 channel->info->tx.param); \ 323 }) 324 325 #define SET_TX_CHANNEL_FLAG(channel, param, value) \ 326 ({ \ 327 BUILD_BUG_ON(sizeof(channel->info->tx.param) != sizeof(u8)); \ 328 if (channel->info_word) \ 329 channel->info_word->tx.param = cpu_to_le32(value); \ 330 else \ 331 channel->info->tx.param = value; \ 332 }) 333 334 #define SET_TX_CHANNEL_INFO(channel, param, value) \ 335 ({ \ 336 BUILD_BUG_ON(sizeof(channel->info->tx.param) != sizeof(u32)); \ 337 if (channel->info_word) \ 338 channel->info_word->tx.param = cpu_to_le32(value); \ 339 else \ 340 channel->info->tx.param = cpu_to_le32(value); \ 341 }) 342 343 /** 344 * struct qcom_smd_alloc_entry - channel allocation entry 345 * @name: channel name 346 * @cid: channel index 347 * @flags: channel flags and edge id 348 * @ref_count: reference count of the channel 349 */ 350 struct qcom_smd_alloc_entry { 351 u8 name[20]; 352 __le32 cid; 353 __le32 flags; 354 __le32 ref_count; 355 } __packed; 356 357 #define SMD_CHANNEL_FLAGS_EDGE_MASK 0xff 358 #define SMD_CHANNEL_FLAGS_STREAM BIT(8) 359 #define SMD_CHANNEL_FLAGS_PACKET BIT(9) 360 361 /* 362 * Each smd packet contains a 20 byte header, with the first 4 being the length 363 * of the packet. 364 */ 365 #define SMD_PACKET_HEADER_LEN 20 366 367 /* 368 * Signal the remote processor associated with 'channel'. 369 */ 370 static void qcom_smd_signal_channel(struct qcom_smd_channel *channel) 371 { 372 struct qcom_smd_edge *edge = channel->edge; 373 374 if (edge->mbox_chan) { 375 /* 376 * We can ignore a failing mbox_send_message() as the only 377 * possible cause is that the FIFO in the framework is full of 378 * other writes to the same bit. 379 */ 380 mbox_send_message(edge->mbox_chan, NULL); 381 mbox_client_txdone(edge->mbox_chan, 0); 382 } else { 383 regmap_write(edge->ipc_regmap, edge->ipc_offset, BIT(edge->ipc_bit)); 384 } 385 } 386 387 /* 388 * Initialize the tx channel info 389 */ 390 static void qcom_smd_channel_reset(struct qcom_smd_channel *channel) 391 { 392 SET_TX_CHANNEL_INFO(channel, state, SMD_CHANNEL_CLOSED); 393 SET_TX_CHANNEL_FLAG(channel, fDSR, 0); 394 SET_TX_CHANNEL_FLAG(channel, fCTS, 0); 395 SET_TX_CHANNEL_FLAG(channel, fCD, 0); 396 SET_TX_CHANNEL_FLAG(channel, fRI, 0); 397 SET_TX_CHANNEL_FLAG(channel, fHEAD, 0); 398 SET_TX_CHANNEL_FLAG(channel, fTAIL, 0); 399 SET_TX_CHANNEL_FLAG(channel, fSTATE, 1); 400 SET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR, 1); 401 SET_TX_CHANNEL_INFO(channel, head, 0); 402 SET_RX_CHANNEL_INFO(channel, tail, 0); 403 404 qcom_smd_signal_channel(channel); 405 406 channel->state = SMD_CHANNEL_CLOSED; 407 channel->pkt_size = 0; 408 } 409 410 /* 411 * Set the callback for a channel, with appropriate locking 412 */ 413 static void qcom_smd_channel_set_callback(struct qcom_smd_channel *channel, 414 rpmsg_rx_cb_t cb) 415 { 416 struct rpmsg_endpoint *ept = &channel->qsept->ept; 417 unsigned long flags; 418 419 spin_lock_irqsave(&channel->recv_lock, flags); 420 ept->cb = cb; 421 spin_unlock_irqrestore(&channel->recv_lock, flags); 422 }; 423 424 /* 425 * Calculate the amount of data available in the rx fifo 426 */ 427 static size_t qcom_smd_channel_get_rx_avail(struct qcom_smd_channel *channel) 428 { 429 unsigned head; 430 unsigned tail; 431 432 head = GET_RX_CHANNEL_INFO(channel, head); 433 tail = GET_RX_CHANNEL_INFO(channel, tail); 434 435 return (head - tail) & (channel->fifo_size - 1); 436 } 437 438 /* 439 * Set tx channel state and inform the remote processor 440 */ 441 static void qcom_smd_channel_set_state(struct qcom_smd_channel *channel, 442 int state) 443 { 444 struct qcom_smd_edge *edge = channel->edge; 445 bool is_open = state == SMD_CHANNEL_OPENED; 446 447 if (channel->state == state) 448 return; 449 450 dev_dbg(&edge->dev, "set_state(%s, %d)\n", channel->name, state); 451 452 SET_TX_CHANNEL_FLAG(channel, fDSR, is_open); 453 SET_TX_CHANNEL_FLAG(channel, fCTS, is_open); 454 SET_TX_CHANNEL_FLAG(channel, fCD, is_open); 455 456 SET_TX_CHANNEL_INFO(channel, state, state); 457 SET_TX_CHANNEL_FLAG(channel, fSTATE, 1); 458 459 channel->state = state; 460 qcom_smd_signal_channel(channel); 461 } 462 463 /* 464 * Copy count bytes of data using 32bit accesses, if that's required. 465 */ 466 static void smd_copy_to_fifo(void __iomem *dst, 467 const void *src, 468 size_t count, 469 bool word_aligned) 470 { 471 if (word_aligned) { 472 __iowrite32_copy(dst, src, count / sizeof(u32)); 473 } else { 474 memcpy_toio(dst, src, count); 475 } 476 } 477 478 /* 479 * Copy count bytes of data using 32bit accesses, if that is required. 480 */ 481 static void smd_copy_from_fifo(void *dst, 482 const void __iomem *src, 483 size_t count, 484 bool word_aligned) 485 { 486 if (word_aligned) { 487 __ioread32_copy(dst, src, count / sizeof(u32)); 488 } else { 489 memcpy_fromio(dst, src, count); 490 } 491 } 492 493 /* 494 * Read count bytes of data from the rx fifo into buf, but don't advance the 495 * tail. 496 */ 497 static size_t qcom_smd_channel_peek(struct qcom_smd_channel *channel, 498 void *buf, size_t count) 499 { 500 bool word_aligned; 501 unsigned tail; 502 size_t len; 503 504 word_aligned = channel->info_word; 505 tail = GET_RX_CHANNEL_INFO(channel, tail); 506 507 len = min_t(size_t, count, channel->fifo_size - tail); 508 if (len) { 509 smd_copy_from_fifo(buf, 510 channel->rx_fifo + tail, 511 len, 512 word_aligned); 513 } 514 515 if (len != count) { 516 smd_copy_from_fifo(buf + len, 517 channel->rx_fifo, 518 count - len, 519 word_aligned); 520 } 521 522 return count; 523 } 524 525 /* 526 * Advance the rx tail by count bytes. 527 */ 528 static void qcom_smd_channel_advance(struct qcom_smd_channel *channel, 529 size_t count) 530 { 531 unsigned tail; 532 533 tail = GET_RX_CHANNEL_INFO(channel, tail); 534 tail += count; 535 tail &= (channel->fifo_size - 1); 536 SET_RX_CHANNEL_INFO(channel, tail, tail); 537 } 538 539 /* 540 * Read out a single packet from the rx fifo and deliver it to the device 541 */ 542 static int qcom_smd_channel_recv_single(struct qcom_smd_channel *channel) 543 { 544 struct rpmsg_endpoint *ept = &channel->qsept->ept; 545 unsigned tail; 546 size_t len; 547 void *ptr; 548 int ret; 549 550 tail = GET_RX_CHANNEL_INFO(channel, tail); 551 552 /* Use bounce buffer if the data wraps */ 553 if (tail + channel->pkt_size >= channel->fifo_size) { 554 ptr = channel->bounce_buffer; 555 len = qcom_smd_channel_peek(channel, ptr, channel->pkt_size); 556 } else { 557 ptr = channel->rx_fifo + tail; 558 len = channel->pkt_size; 559 } 560 561 ret = ept->cb(ept->rpdev, ptr, len, ept->priv, RPMSG_ADDR_ANY); 562 if (ret < 0) 563 return ret; 564 565 /* Only forward the tail if the client consumed the data */ 566 qcom_smd_channel_advance(channel, len); 567 568 channel->pkt_size = 0; 569 570 return 0; 571 } 572 573 /* 574 * Per channel interrupt handling 575 */ 576 static bool qcom_smd_channel_intr(struct qcom_smd_channel *channel) 577 { 578 bool need_state_scan = false; 579 int remote_state; 580 __le32 pktlen; 581 int avail; 582 int ret; 583 584 /* Handle state changes */ 585 remote_state = GET_RX_CHANNEL_INFO(channel, state); 586 if (remote_state != channel->remote_state) { 587 channel->remote_state = remote_state; 588 need_state_scan = true; 589 590 wake_up_interruptible_all(&channel->state_change_event); 591 } 592 /* Indicate that we have seen any state change */ 593 SET_RX_CHANNEL_FLAG(channel, fSTATE, 0); 594 595 /* Signal waiting qcom_smd_send() about the interrupt */ 596 if (!GET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR)) 597 wake_up_interruptible_all(&channel->fblockread_event); 598 599 /* Don't consume any data until we've opened the channel */ 600 if (channel->state != SMD_CHANNEL_OPENED) 601 goto out; 602 603 /* Indicate that we've seen the new data */ 604 SET_RX_CHANNEL_FLAG(channel, fHEAD, 0); 605 606 /* Consume data */ 607 for (;;) { 608 avail = qcom_smd_channel_get_rx_avail(channel); 609 610 if (!channel->pkt_size && avail >= SMD_PACKET_HEADER_LEN) { 611 qcom_smd_channel_peek(channel, &pktlen, sizeof(pktlen)); 612 qcom_smd_channel_advance(channel, SMD_PACKET_HEADER_LEN); 613 channel->pkt_size = le32_to_cpu(pktlen); 614 } else if (channel->pkt_size && avail >= channel->pkt_size) { 615 ret = qcom_smd_channel_recv_single(channel); 616 if (ret) 617 break; 618 } else { 619 break; 620 } 621 } 622 623 /* Indicate that we have seen and updated tail */ 624 SET_RX_CHANNEL_FLAG(channel, fTAIL, 1); 625 626 /* Signal the remote that we've consumed the data (if requested) */ 627 if (!GET_RX_CHANNEL_FLAG(channel, fBLOCKREADINTR)) { 628 /* Ensure ordering of channel info updates */ 629 wmb(); 630 631 qcom_smd_signal_channel(channel); 632 } 633 634 out: 635 return need_state_scan; 636 } 637 638 /* 639 * The edge interrupts are triggered by the remote processor on state changes, 640 * channel info updates or when new channels are created. 641 */ 642 static irqreturn_t qcom_smd_edge_intr(int irq, void *data) 643 { 644 struct qcom_smd_edge *edge = data; 645 struct qcom_smd_channel *channel; 646 unsigned available; 647 bool kick_scanner = false; 648 bool kick_state = false; 649 650 /* 651 * Handle state changes or data on each of the channels on this edge 652 */ 653 spin_lock(&edge->channels_lock); 654 list_for_each_entry(channel, &edge->channels, list) { 655 spin_lock(&channel->recv_lock); 656 kick_state |= qcom_smd_channel_intr(channel); 657 spin_unlock(&channel->recv_lock); 658 } 659 spin_unlock(&edge->channels_lock); 660 661 /* 662 * Creating a new channel requires allocating an smem entry, so we only 663 * have to scan if the amount of available space in smem have changed 664 * since last scan. 665 */ 666 available = qcom_smem_get_free_space(edge->remote_pid); 667 if (available != edge->smem_available) { 668 edge->smem_available = available; 669 kick_scanner = true; 670 } 671 672 if (kick_scanner) 673 schedule_work(&edge->scan_work); 674 if (kick_state) 675 schedule_work(&edge->state_work); 676 677 return IRQ_HANDLED; 678 } 679 680 /* 681 * Calculate how much space is available in the tx fifo. 682 */ 683 static size_t qcom_smd_get_tx_avail(struct qcom_smd_channel *channel) 684 { 685 unsigned head; 686 unsigned tail; 687 unsigned mask = channel->fifo_size - 1; 688 689 head = GET_TX_CHANNEL_INFO(channel, head); 690 tail = GET_TX_CHANNEL_INFO(channel, tail); 691 692 return mask - ((head - tail) & mask); 693 } 694 695 /* 696 * Write count bytes of data into channel, possibly wrapping in the ring buffer 697 */ 698 static int qcom_smd_write_fifo(struct qcom_smd_channel *channel, 699 const void *data, 700 size_t count) 701 { 702 bool word_aligned; 703 unsigned head; 704 size_t len; 705 706 word_aligned = channel->info_word; 707 head = GET_TX_CHANNEL_INFO(channel, head); 708 709 len = min_t(size_t, count, channel->fifo_size - head); 710 if (len) { 711 smd_copy_to_fifo(channel->tx_fifo + head, 712 data, 713 len, 714 word_aligned); 715 } 716 717 if (len != count) { 718 smd_copy_to_fifo(channel->tx_fifo, 719 data + len, 720 count - len, 721 word_aligned); 722 } 723 724 head += count; 725 head &= (channel->fifo_size - 1); 726 SET_TX_CHANNEL_INFO(channel, head, head); 727 728 return count; 729 } 730 731 /** 732 * __qcom_smd_send - write data to smd channel 733 * @channel: channel handle 734 * @data: buffer of data to write 735 * @len: number of bytes to write 736 * @wait: flag to indicate if write can wait 737 * 738 * This is a blocking write of len bytes into the channel's tx ring buffer and 739 * signal the remote end. It will sleep until there is enough space available 740 * in the tx buffer, utilizing the fBLOCKREADINTR signaling mechanism to avoid 741 * polling. 742 */ 743 static int __qcom_smd_send(struct qcom_smd_channel *channel, const void *data, 744 int len, bool wait) 745 { 746 __le32 hdr[5] = { cpu_to_le32(len), }; 747 int tlen = sizeof(hdr) + len; 748 unsigned long flags; 749 int ret; 750 751 /* Word aligned channels only accept word size aligned data */ 752 if (channel->info_word && len % 4) 753 return -EINVAL; 754 755 /* Reject packets that are too big */ 756 if (tlen >= channel->fifo_size) 757 return -EINVAL; 758 759 /* Highlight the fact that if we enter the loop below we might sleep */ 760 if (wait) 761 might_sleep(); 762 763 spin_lock_irqsave(&channel->tx_lock, flags); 764 765 while (qcom_smd_get_tx_avail(channel) < tlen && 766 channel->state == SMD_CHANNEL_OPENED) { 767 if (!wait) { 768 ret = -EAGAIN; 769 goto out_unlock; 770 } 771 772 SET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR, 0); 773 774 /* Wait without holding the tx_lock */ 775 spin_unlock_irqrestore(&channel->tx_lock, flags); 776 777 ret = wait_event_interruptible(channel->fblockread_event, 778 qcom_smd_get_tx_avail(channel) >= tlen || 779 channel->state != SMD_CHANNEL_OPENED); 780 if (ret) 781 return ret; 782 783 spin_lock_irqsave(&channel->tx_lock, flags); 784 785 SET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR, 1); 786 } 787 788 /* Fail if the channel was closed */ 789 if (channel->state != SMD_CHANNEL_OPENED) { 790 ret = -EPIPE; 791 goto out_unlock; 792 } 793 794 SET_TX_CHANNEL_FLAG(channel, fTAIL, 0); 795 796 qcom_smd_write_fifo(channel, hdr, sizeof(hdr)); 797 qcom_smd_write_fifo(channel, data, len); 798 799 SET_TX_CHANNEL_FLAG(channel, fHEAD, 1); 800 801 /* Ensure ordering of channel info updates */ 802 wmb(); 803 804 qcom_smd_signal_channel(channel); 805 806 out_unlock: 807 spin_unlock_irqrestore(&channel->tx_lock, flags); 808 809 return ret; 810 } 811 812 /* 813 * Helper for opening a channel 814 */ 815 static int qcom_smd_channel_open(struct qcom_smd_channel *channel, 816 rpmsg_rx_cb_t cb) 817 { 818 struct qcom_smd_edge *edge = channel->edge; 819 size_t bb_size; 820 int ret; 821 822 /* 823 * Packets are maximum 4k, but reduce if the fifo is smaller 824 */ 825 bb_size = min(channel->fifo_size, SZ_4K); 826 channel->bounce_buffer = kmalloc(bb_size, GFP_KERNEL); 827 if (!channel->bounce_buffer) 828 return -ENOMEM; 829 830 qcom_smd_channel_set_callback(channel, cb); 831 qcom_smd_channel_set_state(channel, SMD_CHANNEL_OPENING); 832 833 /* Wait for remote to enter opening or opened */ 834 ret = wait_event_interruptible_timeout(channel->state_change_event, 835 channel->remote_state == SMD_CHANNEL_OPENING || 836 channel->remote_state == SMD_CHANNEL_OPENED, 837 HZ); 838 if (!ret) { 839 dev_err(&edge->dev, "remote side did not enter opening state\n"); 840 goto out_close_timeout; 841 } 842 843 qcom_smd_channel_set_state(channel, SMD_CHANNEL_OPENED); 844 845 /* Wait for remote to enter opened */ 846 ret = wait_event_interruptible_timeout(channel->state_change_event, 847 channel->remote_state == SMD_CHANNEL_OPENED, 848 HZ); 849 if (!ret) { 850 dev_err(&edge->dev, "remote side did not enter open state\n"); 851 goto out_close_timeout; 852 } 853 854 return 0; 855 856 out_close_timeout: 857 qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSED); 858 return -ETIMEDOUT; 859 } 860 861 /* 862 * Helper for closing and resetting a channel 863 */ 864 static void qcom_smd_channel_close(struct qcom_smd_channel *channel) 865 { 866 qcom_smd_channel_set_callback(channel, NULL); 867 868 kfree(channel->bounce_buffer); 869 channel->bounce_buffer = NULL; 870 871 qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSED); 872 qcom_smd_channel_reset(channel); 873 } 874 875 static struct qcom_smd_channel * 876 qcom_smd_find_channel(struct qcom_smd_edge *edge, const char *name) 877 { 878 struct qcom_smd_channel *channel; 879 struct qcom_smd_channel *ret = NULL; 880 unsigned long flags; 881 882 spin_lock_irqsave(&edge->channels_lock, flags); 883 list_for_each_entry(channel, &edge->channels, list) { 884 if (!strcmp(channel->name, name)) { 885 ret = channel; 886 break; 887 } 888 } 889 spin_unlock_irqrestore(&edge->channels_lock, flags); 890 891 return ret; 892 } 893 894 static void __ept_release(struct kref *kref) 895 { 896 struct rpmsg_endpoint *ept = container_of(kref, struct rpmsg_endpoint, 897 refcount); 898 kfree(to_smd_endpoint(ept)); 899 } 900 901 static struct rpmsg_endpoint *qcom_smd_create_ept(struct rpmsg_device *rpdev, 902 rpmsg_rx_cb_t cb, void *priv, 903 struct rpmsg_channel_info chinfo) 904 { 905 struct qcom_smd_endpoint *qsept; 906 struct qcom_smd_channel *channel; 907 struct qcom_smd_device *qsdev = to_smd_device(rpdev); 908 struct qcom_smd_edge *edge = qsdev->edge; 909 struct rpmsg_endpoint *ept; 910 const char *name = chinfo.name; 911 int ret; 912 913 /* Wait up to HZ for the channel to appear */ 914 ret = wait_event_interruptible_timeout(edge->new_channel_event, 915 (channel = qcom_smd_find_channel(edge, name)) != NULL, 916 HZ); 917 if (!ret) 918 return NULL; 919 920 if (channel->state != SMD_CHANNEL_CLOSED) { 921 dev_err(&rpdev->dev, "channel %s is busy\n", channel->name); 922 return NULL; 923 } 924 925 qsept = kzalloc(sizeof(*qsept), GFP_KERNEL); 926 if (!qsept) 927 return NULL; 928 929 ept = &qsept->ept; 930 931 kref_init(&ept->refcount); 932 933 ept->rpdev = rpdev; 934 ept->cb = cb; 935 ept->priv = priv; 936 ept->ops = &qcom_smd_endpoint_ops; 937 938 channel->qsept = qsept; 939 qsept->qsch = channel; 940 941 ret = qcom_smd_channel_open(channel, cb); 942 if (ret) 943 goto free_ept; 944 945 return ept; 946 947 free_ept: 948 channel->qsept = NULL; 949 kref_put(&ept->refcount, __ept_release); 950 return NULL; 951 } 952 953 static void qcom_smd_destroy_ept(struct rpmsg_endpoint *ept) 954 { 955 struct qcom_smd_endpoint *qsept = to_smd_endpoint(ept); 956 struct qcom_smd_channel *ch = qsept->qsch; 957 958 qcom_smd_channel_close(ch); 959 ch->qsept = NULL; 960 kref_put(&ept->refcount, __ept_release); 961 } 962 963 static int qcom_smd_send(struct rpmsg_endpoint *ept, void *data, int len) 964 { 965 struct qcom_smd_endpoint *qsept = to_smd_endpoint(ept); 966 967 return __qcom_smd_send(qsept->qsch, data, len, true); 968 } 969 970 static int qcom_smd_trysend(struct rpmsg_endpoint *ept, void *data, int len) 971 { 972 struct qcom_smd_endpoint *qsept = to_smd_endpoint(ept); 973 974 return __qcom_smd_send(qsept->qsch, data, len, false); 975 } 976 977 static int qcom_smd_sendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst) 978 { 979 struct qcom_smd_endpoint *qsept = to_smd_endpoint(ept); 980 981 return __qcom_smd_send(qsept->qsch, data, len, true); 982 } 983 984 static int qcom_smd_trysendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst) 985 { 986 struct qcom_smd_endpoint *qsept = to_smd_endpoint(ept); 987 988 return __qcom_smd_send(qsept->qsch, data, len, false); 989 } 990 991 static __poll_t qcom_smd_poll(struct rpmsg_endpoint *ept, 992 struct file *filp, poll_table *wait) 993 { 994 struct qcom_smd_endpoint *qsept = to_smd_endpoint(ept); 995 struct qcom_smd_channel *channel = qsept->qsch; 996 __poll_t mask = 0; 997 998 poll_wait(filp, &channel->fblockread_event, wait); 999 1000 if (qcom_smd_get_tx_avail(channel) > 20) 1001 mask |= EPOLLOUT | EPOLLWRNORM; 1002 1003 return mask; 1004 } 1005 1006 /* 1007 * Finds the device_node for the smd child interested in this channel. 1008 */ 1009 static struct device_node *qcom_smd_match_channel(struct device_node *edge_node, 1010 const char *channel) 1011 { 1012 struct device_node *child; 1013 const char *name; 1014 const char *key; 1015 int ret; 1016 1017 for_each_available_child_of_node(edge_node, child) { 1018 key = "qcom,smd-channels"; 1019 ret = of_property_read_string(child, key, &name); 1020 if (ret) 1021 continue; 1022 1023 if (strcmp(name, channel) == 0) 1024 return child; 1025 } 1026 1027 return NULL; 1028 } 1029 1030 static int qcom_smd_announce_create(struct rpmsg_device *rpdev) 1031 { 1032 struct qcom_smd_endpoint *qept = to_smd_endpoint(rpdev->ept); 1033 struct qcom_smd_channel *channel = qept->qsch; 1034 unsigned long flags; 1035 bool kick_state; 1036 1037 spin_lock_irqsave(&channel->recv_lock, flags); 1038 kick_state = qcom_smd_channel_intr(channel); 1039 spin_unlock_irqrestore(&channel->recv_lock, flags); 1040 1041 if (kick_state) 1042 schedule_work(&channel->edge->state_work); 1043 1044 return 0; 1045 } 1046 1047 static const struct rpmsg_device_ops qcom_smd_device_ops = { 1048 .create_ept = qcom_smd_create_ept, 1049 .announce_create = qcom_smd_announce_create, 1050 }; 1051 1052 static const struct rpmsg_endpoint_ops qcom_smd_endpoint_ops = { 1053 .destroy_ept = qcom_smd_destroy_ept, 1054 .send = qcom_smd_send, 1055 .sendto = qcom_smd_sendto, 1056 .trysend = qcom_smd_trysend, 1057 .trysendto = qcom_smd_trysendto, 1058 .poll = qcom_smd_poll, 1059 }; 1060 1061 static void qcom_smd_release_device(struct device *dev) 1062 { 1063 struct rpmsg_device *rpdev = to_rpmsg_device(dev); 1064 struct qcom_smd_device *qsdev = to_smd_device(rpdev); 1065 1066 kfree(qsdev); 1067 } 1068 1069 /* 1070 * Create a smd client device for channel that is being opened. 1071 */ 1072 static int qcom_smd_create_device(struct qcom_smd_channel *channel) 1073 { 1074 struct qcom_smd_device *qsdev; 1075 struct rpmsg_device *rpdev; 1076 struct qcom_smd_edge *edge = channel->edge; 1077 1078 dev_dbg(&edge->dev, "registering '%s'\n", channel->name); 1079 1080 qsdev = kzalloc(sizeof(*qsdev), GFP_KERNEL); 1081 if (!qsdev) 1082 return -ENOMEM; 1083 1084 /* Link qsdev to our SMD edge */ 1085 qsdev->edge = edge; 1086 1087 /* Assign callbacks for rpmsg_device */ 1088 qsdev->rpdev.ops = &qcom_smd_device_ops; 1089 1090 /* Assign public information to the rpmsg_device */ 1091 rpdev = &qsdev->rpdev; 1092 strscpy_pad(rpdev->id.name, channel->name, RPMSG_NAME_SIZE); 1093 rpdev->src = RPMSG_ADDR_ANY; 1094 rpdev->dst = RPMSG_ADDR_ANY; 1095 1096 rpdev->dev.of_node = qcom_smd_match_channel(edge->of_node, channel->name); 1097 rpdev->dev.parent = &edge->dev; 1098 rpdev->dev.release = qcom_smd_release_device; 1099 1100 return rpmsg_register_device(rpdev); 1101 } 1102 1103 static int qcom_smd_create_chrdev(struct qcom_smd_edge *edge) 1104 { 1105 struct qcom_smd_device *qsdev; 1106 1107 qsdev = kzalloc(sizeof(*qsdev), GFP_KERNEL); 1108 if (!qsdev) 1109 return -ENOMEM; 1110 1111 qsdev->edge = edge; 1112 qsdev->rpdev.ops = &qcom_smd_device_ops; 1113 qsdev->rpdev.dev.parent = &edge->dev; 1114 qsdev->rpdev.dev.release = qcom_smd_release_device; 1115 1116 return rpmsg_ctrldev_register_device(&qsdev->rpdev); 1117 } 1118 1119 /* 1120 * Allocate the qcom_smd_channel object for a newly found smd channel, 1121 * retrieving and validating the smem items involved. 1122 */ 1123 static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *edge, 1124 unsigned smem_info_item, 1125 unsigned smem_fifo_item, 1126 char *name) 1127 { 1128 struct qcom_smd_channel *channel; 1129 size_t fifo_size; 1130 size_t info_size; 1131 void *fifo_base; 1132 void *info; 1133 int ret; 1134 1135 channel = kzalloc(sizeof(*channel), GFP_KERNEL); 1136 if (!channel) 1137 return ERR_PTR(-ENOMEM); 1138 1139 channel->edge = edge; 1140 channel->name = kstrdup(name, GFP_KERNEL); 1141 if (!channel->name) { 1142 ret = -ENOMEM; 1143 goto free_channel; 1144 } 1145 1146 spin_lock_init(&channel->tx_lock); 1147 spin_lock_init(&channel->recv_lock); 1148 init_waitqueue_head(&channel->fblockread_event); 1149 init_waitqueue_head(&channel->state_change_event); 1150 1151 info = qcom_smem_get(edge->remote_pid, smem_info_item, &info_size); 1152 if (IS_ERR(info)) { 1153 ret = PTR_ERR(info); 1154 goto free_name_and_channel; 1155 } 1156 1157 /* 1158 * Use the size of the item to figure out which channel info struct to 1159 * use. 1160 */ 1161 if (info_size == 2 * sizeof(struct smd_channel_info_word)) { 1162 channel->info_word = info; 1163 } else if (info_size == 2 * sizeof(struct smd_channel_info)) { 1164 channel->info = info; 1165 } else { 1166 dev_err(&edge->dev, 1167 "channel info of size %zu not supported\n", info_size); 1168 ret = -EINVAL; 1169 goto free_name_and_channel; 1170 } 1171 1172 fifo_base = qcom_smem_get(edge->remote_pid, smem_fifo_item, &fifo_size); 1173 if (IS_ERR(fifo_base)) { 1174 ret = PTR_ERR(fifo_base); 1175 goto free_name_and_channel; 1176 } 1177 1178 /* The channel consist of a rx and tx fifo of equal size */ 1179 fifo_size /= 2; 1180 1181 dev_dbg(&edge->dev, "new channel '%s' info-size: %zu fifo-size: %zu\n", 1182 name, info_size, fifo_size); 1183 1184 channel->tx_fifo = fifo_base; 1185 channel->rx_fifo = fifo_base + fifo_size; 1186 channel->fifo_size = fifo_size; 1187 1188 qcom_smd_channel_reset(channel); 1189 1190 return channel; 1191 1192 free_name_and_channel: 1193 kfree(channel->name); 1194 free_channel: 1195 kfree(channel); 1196 1197 return ERR_PTR(ret); 1198 } 1199 1200 /* 1201 * Scans the allocation table for any newly allocated channels, calls 1202 * qcom_smd_create_channel() to create representations of these and add 1203 * them to the edge's list of channels. 1204 */ 1205 static void qcom_channel_scan_worker(struct work_struct *work) 1206 { 1207 struct qcom_smd_edge *edge = container_of(work, struct qcom_smd_edge, scan_work); 1208 struct qcom_smd_alloc_entry *alloc_tbl; 1209 struct qcom_smd_alloc_entry *entry; 1210 struct qcom_smd_channel *channel; 1211 unsigned long flags; 1212 unsigned fifo_id; 1213 unsigned info_id; 1214 int tbl; 1215 int i; 1216 u32 eflags, cid; 1217 1218 for (tbl = 0; tbl < SMD_ALLOC_TBL_COUNT; tbl++) { 1219 alloc_tbl = qcom_smem_get(edge->remote_pid, 1220 smem_items[tbl].alloc_tbl_id, NULL); 1221 if (IS_ERR(alloc_tbl)) 1222 continue; 1223 1224 for (i = 0; i < SMD_ALLOC_TBL_SIZE; i++) { 1225 entry = &alloc_tbl[i]; 1226 eflags = le32_to_cpu(entry->flags); 1227 if (test_bit(i, edge->allocated[tbl])) 1228 continue; 1229 1230 if (entry->ref_count == 0) 1231 continue; 1232 1233 if (!entry->name[0]) 1234 continue; 1235 1236 if (!(eflags & SMD_CHANNEL_FLAGS_PACKET)) 1237 continue; 1238 1239 if ((eflags & SMD_CHANNEL_FLAGS_EDGE_MASK) != edge->edge_id) 1240 continue; 1241 1242 cid = le32_to_cpu(entry->cid); 1243 info_id = smem_items[tbl].info_base_id + cid; 1244 fifo_id = smem_items[tbl].fifo_base_id + cid; 1245 1246 channel = qcom_smd_create_channel(edge, info_id, fifo_id, entry->name); 1247 if (IS_ERR(channel)) 1248 continue; 1249 1250 spin_lock_irqsave(&edge->channels_lock, flags); 1251 list_add(&channel->list, &edge->channels); 1252 spin_unlock_irqrestore(&edge->channels_lock, flags); 1253 1254 dev_dbg(&edge->dev, "new channel found: '%s'\n", channel->name); 1255 set_bit(i, edge->allocated[tbl]); 1256 1257 wake_up_interruptible_all(&edge->new_channel_event); 1258 } 1259 } 1260 1261 schedule_work(&edge->state_work); 1262 } 1263 1264 /* 1265 * This per edge worker scans smem for any new channels and register these. It 1266 * then scans all registered channels for state changes that should be handled 1267 * by creating or destroying smd client devices for the registered channels. 1268 * 1269 * LOCKING: edge->channels_lock only needs to cover the list operations, as the 1270 * worker is killed before any channels are deallocated 1271 */ 1272 static void qcom_channel_state_worker(struct work_struct *work) 1273 { 1274 struct qcom_smd_channel *channel; 1275 struct qcom_smd_edge *edge = container_of(work, 1276 struct qcom_smd_edge, 1277 state_work); 1278 struct rpmsg_channel_info chinfo; 1279 unsigned remote_state; 1280 unsigned long flags; 1281 1282 /* 1283 * Register a device for any closed channel where the remote processor 1284 * is showing interest in opening the channel. 1285 */ 1286 spin_lock_irqsave(&edge->channels_lock, flags); 1287 list_for_each_entry(channel, &edge->channels, list) { 1288 if (channel->state != SMD_CHANNEL_CLOSED) 1289 continue; 1290 1291 /* 1292 * Always open rpm_requests, even when already opened which is 1293 * required on some SoCs like msm8953. 1294 */ 1295 remote_state = GET_RX_CHANNEL_INFO(channel, state); 1296 if (remote_state != SMD_CHANNEL_OPENING && 1297 remote_state != SMD_CHANNEL_OPENED && 1298 strcmp(channel->name, "rpm_requests")) 1299 continue; 1300 1301 if (channel->registered) 1302 continue; 1303 1304 spin_unlock_irqrestore(&edge->channels_lock, flags); 1305 qcom_smd_create_device(channel); 1306 spin_lock_irqsave(&edge->channels_lock, flags); 1307 channel->registered = true; 1308 } 1309 1310 /* 1311 * Unregister the device for any channel that is opened where the 1312 * remote processor is closing the channel. 1313 */ 1314 list_for_each_entry(channel, &edge->channels, list) { 1315 if (channel->state != SMD_CHANNEL_OPENING && 1316 channel->state != SMD_CHANNEL_OPENED) 1317 continue; 1318 1319 remote_state = GET_RX_CHANNEL_INFO(channel, state); 1320 if (remote_state == SMD_CHANNEL_OPENING || 1321 remote_state == SMD_CHANNEL_OPENED) 1322 continue; 1323 1324 spin_unlock_irqrestore(&edge->channels_lock, flags); 1325 1326 strscpy_pad(chinfo.name, channel->name, sizeof(chinfo.name)); 1327 chinfo.src = RPMSG_ADDR_ANY; 1328 chinfo.dst = RPMSG_ADDR_ANY; 1329 rpmsg_unregister_device(&edge->dev, &chinfo); 1330 channel->registered = false; 1331 spin_lock_irqsave(&edge->channels_lock, flags); 1332 } 1333 spin_unlock_irqrestore(&edge->channels_lock, flags); 1334 } 1335 1336 /* 1337 * Parses an of_node describing an edge. 1338 */ 1339 static int qcom_smd_parse_edge(struct device *dev, 1340 struct device_node *node, 1341 struct qcom_smd_edge *edge) 1342 { 1343 struct device_node *syscon_np; 1344 const char *key; 1345 int irq; 1346 int ret; 1347 1348 INIT_LIST_HEAD(&edge->channels); 1349 spin_lock_init(&edge->channels_lock); 1350 1351 INIT_WORK(&edge->scan_work, qcom_channel_scan_worker); 1352 INIT_WORK(&edge->state_work, qcom_channel_state_worker); 1353 1354 edge->of_node = of_node_get(node); 1355 1356 key = "qcom,smd-edge"; 1357 ret = of_property_read_u32(node, key, &edge->edge_id); 1358 if (ret) { 1359 dev_err(dev, "edge missing %s property\n", key); 1360 goto put_node; 1361 } 1362 1363 edge->remote_pid = QCOM_SMEM_HOST_ANY; 1364 key = "qcom,remote-pid"; 1365 of_property_read_u32(node, key, &edge->remote_pid); 1366 1367 edge->mbox_client.dev = dev; 1368 edge->mbox_client.knows_txdone = true; 1369 edge->mbox_chan = mbox_request_channel(&edge->mbox_client, 0); 1370 if (IS_ERR(edge->mbox_chan)) { 1371 if (PTR_ERR(edge->mbox_chan) != -ENODEV) { 1372 ret = PTR_ERR(edge->mbox_chan); 1373 goto put_node; 1374 } 1375 1376 edge->mbox_chan = NULL; 1377 1378 syscon_np = of_parse_phandle(node, "qcom,ipc", 0); 1379 if (!syscon_np) { 1380 dev_err(dev, "no qcom,ipc node\n"); 1381 ret = -ENODEV; 1382 goto put_node; 1383 } 1384 1385 edge->ipc_regmap = syscon_node_to_regmap(syscon_np); 1386 of_node_put(syscon_np); 1387 if (IS_ERR(edge->ipc_regmap)) { 1388 ret = PTR_ERR(edge->ipc_regmap); 1389 goto put_node; 1390 } 1391 1392 key = "qcom,ipc"; 1393 ret = of_property_read_u32_index(node, key, 1, &edge->ipc_offset); 1394 if (ret < 0) { 1395 dev_err(dev, "no offset in %s\n", key); 1396 goto put_node; 1397 } 1398 1399 ret = of_property_read_u32_index(node, key, 2, &edge->ipc_bit); 1400 if (ret < 0) { 1401 dev_err(dev, "no bit in %s\n", key); 1402 goto put_node; 1403 } 1404 } 1405 1406 ret = of_property_read_string(node, "label", &edge->name); 1407 if (ret < 0) 1408 edge->name = node->name; 1409 1410 irq = irq_of_parse_and_map(node, 0); 1411 if (!irq) { 1412 dev_err(dev, "required smd interrupt missing\n"); 1413 ret = -EINVAL; 1414 goto put_node; 1415 } 1416 1417 ret = devm_request_irq(dev, irq, 1418 qcom_smd_edge_intr, IRQF_TRIGGER_RISING, 1419 node->name, edge); 1420 if (ret) { 1421 dev_err(dev, "failed to request smd irq\n"); 1422 goto put_node; 1423 } 1424 1425 edge->irq = irq; 1426 1427 return 0; 1428 1429 put_node: 1430 of_node_put(node); 1431 edge->of_node = NULL; 1432 1433 return ret; 1434 } 1435 1436 /* 1437 * Release function for an edge. 1438 * Reset the state of each associated channel and free the edge context. 1439 */ 1440 static void qcom_smd_edge_release(struct device *dev) 1441 { 1442 struct qcom_smd_channel *channel, *tmp; 1443 struct qcom_smd_edge *edge = to_smd_edge(dev); 1444 1445 list_for_each_entry_safe(channel, tmp, &edge->channels, list) { 1446 list_del(&channel->list); 1447 kfree(channel->name); 1448 kfree(channel); 1449 } 1450 1451 kfree(edge); 1452 } 1453 1454 static ssize_t rpmsg_name_show(struct device *dev, 1455 struct device_attribute *attr, char *buf) 1456 { 1457 struct qcom_smd_edge *edge = to_smd_edge(dev); 1458 1459 return sprintf(buf, "%s\n", edge->name); 1460 } 1461 static DEVICE_ATTR_RO(rpmsg_name); 1462 1463 static struct attribute *qcom_smd_edge_attrs[] = { 1464 &dev_attr_rpmsg_name.attr, 1465 NULL 1466 }; 1467 ATTRIBUTE_GROUPS(qcom_smd_edge); 1468 1469 /** 1470 * qcom_smd_register_edge() - register an edge based on an device_node 1471 * @parent: parent device for the edge 1472 * @node: device_node describing the edge 1473 * 1474 * Return: an edge reference, or negative ERR_PTR() on failure. 1475 */ 1476 struct qcom_smd_edge *qcom_smd_register_edge(struct device *parent, 1477 struct device_node *node) 1478 { 1479 struct qcom_smd_edge *edge; 1480 int ret; 1481 1482 edge = kzalloc(sizeof(*edge), GFP_KERNEL); 1483 if (!edge) 1484 return ERR_PTR(-ENOMEM); 1485 1486 init_waitqueue_head(&edge->new_channel_event); 1487 1488 edge->dev.parent = parent; 1489 edge->dev.release = qcom_smd_edge_release; 1490 edge->dev.of_node = node; 1491 edge->dev.groups = qcom_smd_edge_groups; 1492 dev_set_name(&edge->dev, "%s:%pOFn", dev_name(parent), node); 1493 ret = device_register(&edge->dev); 1494 if (ret) { 1495 pr_err("failed to register smd edge\n"); 1496 put_device(&edge->dev); 1497 return ERR_PTR(ret); 1498 } 1499 1500 ret = qcom_smd_parse_edge(&edge->dev, node, edge); 1501 if (ret) { 1502 dev_err(&edge->dev, "failed to parse smd edge\n"); 1503 goto unregister_dev; 1504 } 1505 1506 ret = qcom_smd_create_chrdev(edge); 1507 if (ret) { 1508 dev_err(&edge->dev, "failed to register chrdev for edge\n"); 1509 goto unregister_dev; 1510 } 1511 1512 schedule_work(&edge->scan_work); 1513 1514 return edge; 1515 1516 unregister_dev: 1517 if (!IS_ERR_OR_NULL(edge->mbox_chan)) 1518 mbox_free_channel(edge->mbox_chan); 1519 1520 device_unregister(&edge->dev); 1521 return ERR_PTR(ret); 1522 } 1523 EXPORT_SYMBOL(qcom_smd_register_edge); 1524 1525 static int qcom_smd_remove_device(struct device *dev, void *data) 1526 { 1527 device_unregister(dev); 1528 1529 return 0; 1530 } 1531 1532 /** 1533 * qcom_smd_unregister_edge() - release an edge and its children 1534 * @edge: edge reference acquired from qcom_smd_register_edge 1535 */ 1536 int qcom_smd_unregister_edge(struct qcom_smd_edge *edge) 1537 { 1538 int ret; 1539 1540 disable_irq(edge->irq); 1541 cancel_work_sync(&edge->scan_work); 1542 cancel_work_sync(&edge->state_work); 1543 1544 ret = device_for_each_child(&edge->dev, NULL, qcom_smd_remove_device); 1545 if (ret) 1546 dev_warn(&edge->dev, "can't remove smd device: %d\n", ret); 1547 1548 mbox_free_channel(edge->mbox_chan); 1549 device_unregister(&edge->dev); 1550 1551 return 0; 1552 } 1553 EXPORT_SYMBOL(qcom_smd_unregister_edge); 1554 1555 static int qcom_smd_probe(struct platform_device *pdev) 1556 { 1557 struct device_node *node; 1558 void *p; 1559 1560 /* Wait for smem */ 1561 p = qcom_smem_get(QCOM_SMEM_HOST_ANY, smem_items[0].alloc_tbl_id, NULL); 1562 if (PTR_ERR(p) == -EPROBE_DEFER) 1563 return PTR_ERR(p); 1564 1565 for_each_available_child_of_node(pdev->dev.of_node, node) 1566 qcom_smd_register_edge(&pdev->dev, node); 1567 1568 return 0; 1569 } 1570 1571 static int qcom_smd_remove_edge(struct device *dev, void *data) 1572 { 1573 struct qcom_smd_edge *edge = to_smd_edge(dev); 1574 1575 return qcom_smd_unregister_edge(edge); 1576 } 1577 1578 /* 1579 * Shut down all smd clients by making sure that each edge stops processing 1580 * events and scanning for new channels, then call destroy on the devices. 1581 */ 1582 static int qcom_smd_remove(struct platform_device *pdev) 1583 { 1584 int ret; 1585 1586 ret = device_for_each_child(&pdev->dev, NULL, qcom_smd_remove_edge); 1587 if (ret) 1588 dev_warn(&pdev->dev, "can't remove smd device: %d\n", ret); 1589 1590 return ret; 1591 } 1592 1593 static const struct of_device_id qcom_smd_of_match[] = { 1594 { .compatible = "qcom,smd" }, 1595 {} 1596 }; 1597 MODULE_DEVICE_TABLE(of, qcom_smd_of_match); 1598 1599 static struct platform_driver qcom_smd_driver = { 1600 .probe = qcom_smd_probe, 1601 .remove = qcom_smd_remove, 1602 .driver = { 1603 .name = "qcom-smd", 1604 .of_match_table = qcom_smd_of_match, 1605 }, 1606 }; 1607 1608 static int __init qcom_smd_init(void) 1609 { 1610 return platform_driver_register(&qcom_smd_driver); 1611 } 1612 arch_initcall(qcom_smd_init); 1613 1614 static void __exit qcom_smd_exit(void) 1615 { 1616 platform_driver_unregister(&qcom_smd_driver); 1617 } 1618 module_exit(qcom_smd_exit); 1619 1620 MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>"); 1621 MODULE_DESCRIPTION("Qualcomm Shared Memory Driver"); 1622 MODULE_LICENSE("GPL v2"); 1623