1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2015, Sony Mobile Communications AB. 4 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. 5 */ 6 7 #include <linux/interrupt.h> 8 #include <linux/io.h> 9 #include <linux/mailbox_client.h> 10 #include <linux/mfd/syscon.h> 11 #include <linux/module.h> 12 #include <linux/of_irq.h> 13 #include <linux/of_platform.h> 14 #include <linux/platform_device.h> 15 #include <linux/regmap.h> 16 #include <linux/sched.h> 17 #include <linux/sizes.h> 18 #include <linux/slab.h> 19 #include <linux/soc/qcom/smem.h> 20 #include <linux/wait.h> 21 #include <linux/rpmsg.h> 22 #include <linux/rpmsg/qcom_smd.h> 23 24 #include "rpmsg_internal.h" 25 26 /* 27 * The Qualcomm Shared Memory communication solution provides point-to-point 28 * channels for clients to send and receive streaming or packet based data. 29 * 30 * Each channel consists of a control item (channel info) and a ring buffer 31 * pair. The channel info carry information related to channel state, flow 32 * control and the offsets within the ring buffer. 33 * 34 * All allocated channels are listed in an allocation table, identifying the 35 * pair of items by name, type and remote processor. 36 * 37 * Upon creating a new channel the remote processor allocates channel info and 38 * ring buffer items from the smem heap and populate the allocation table. An 39 * interrupt is sent to the other end of the channel and a scan for new 40 * channels should be done. A channel never goes away, it will only change 41 * state. 42 * 43 * The remote processor signals it intent for bring up the communication 44 * channel by setting the state of its end of the channel to "opening" and 45 * sends out an interrupt. We detect this change and register a smd device to 46 * consume the channel. Upon finding a consumer we finish the handshake and the 47 * channel is up. 48 * 49 * Upon closing a channel, the remote processor will update the state of its 50 * end of the channel and signal us, we will then unregister any attached 51 * device and close our end of the channel. 52 * 53 * Devices attached to a channel can use the qcom_smd_send function to push 54 * data to the channel, this is done by copying the data into the tx ring 55 * buffer, updating the pointers in the channel info and signaling the remote 56 * processor. 57 * 58 * The remote processor does the equivalent when it transfer data and upon 59 * receiving the interrupt we check the channel info for new data and delivers 60 * this to the attached device. If the device is not ready to receive the data 61 * we leave it in the ring buffer for now. 62 */ 63 64 struct smd_channel_info; 65 struct smd_channel_info_pair; 66 struct smd_channel_info_word; 67 struct smd_channel_info_word_pair; 68 69 static const struct rpmsg_endpoint_ops qcom_smd_endpoint_ops; 70 71 #define SMD_ALLOC_TBL_COUNT 2 72 #define SMD_ALLOC_TBL_SIZE 64 73 74 /* 75 * This lists the various smem heap items relevant for the allocation table and 76 * smd channel entries. 77 */ 78 static const struct { 79 unsigned alloc_tbl_id; 80 unsigned info_base_id; 81 unsigned fifo_base_id; 82 } smem_items[SMD_ALLOC_TBL_COUNT] = { 83 { 84 .alloc_tbl_id = 13, 85 .info_base_id = 14, 86 .fifo_base_id = 338 87 }, 88 { 89 .alloc_tbl_id = 266, 90 .info_base_id = 138, 91 .fifo_base_id = 202, 92 }, 93 }; 94 95 /** 96 * struct qcom_smd_edge - representing a remote processor 97 * @of_node: of_node handle for information related to this edge 98 * @edge_id: identifier of this edge 99 * @remote_pid: identifier of remote processor 100 * @irq: interrupt for signals on this edge 101 * @ipc_regmap: regmap handle holding the outgoing ipc register 102 * @ipc_offset: offset within @ipc_regmap of the register for ipc 103 * @ipc_bit: bit in the register at @ipc_offset of @ipc_regmap 104 * @mbox_client: mailbox client handle 105 * @mbox_chan: apcs ipc mailbox channel handle 106 * @channels: list of all channels detected on this edge 107 * @channels_lock: guard for modifications of @channels 108 * @allocated: array of bitmaps representing already allocated channels 109 * @smem_available: last available amount of smem triggering a channel scan 110 * @scan_work: work item for discovering new channels 111 * @state_work: work item for edge state changes 112 */ 113 struct qcom_smd_edge { 114 struct device dev; 115 116 const char *name; 117 118 struct device_node *of_node; 119 unsigned edge_id; 120 unsigned remote_pid; 121 122 int irq; 123 124 struct regmap *ipc_regmap; 125 int ipc_offset; 126 int ipc_bit; 127 128 struct mbox_client mbox_client; 129 struct mbox_chan *mbox_chan; 130 131 struct list_head channels; 132 spinlock_t channels_lock; 133 134 DECLARE_BITMAP(allocated[SMD_ALLOC_TBL_COUNT], SMD_ALLOC_TBL_SIZE); 135 136 unsigned smem_available; 137 138 wait_queue_head_t new_channel_event; 139 140 struct work_struct scan_work; 141 struct work_struct state_work; 142 }; 143 144 /* 145 * SMD channel states. 146 */ 147 enum smd_channel_state { 148 SMD_CHANNEL_CLOSED, 149 SMD_CHANNEL_OPENING, 150 SMD_CHANNEL_OPENED, 151 SMD_CHANNEL_FLUSHING, 152 SMD_CHANNEL_CLOSING, 153 SMD_CHANNEL_RESET, 154 SMD_CHANNEL_RESET_OPENING 155 }; 156 157 struct qcom_smd_device { 158 struct rpmsg_device rpdev; 159 160 struct qcom_smd_edge *edge; 161 }; 162 163 struct qcom_smd_endpoint { 164 struct rpmsg_endpoint ept; 165 166 struct qcom_smd_channel *qsch; 167 }; 168 169 #define to_smd_device(r) container_of(r, struct qcom_smd_device, rpdev) 170 #define to_smd_edge(d) container_of(d, struct qcom_smd_edge, dev) 171 #define to_smd_endpoint(e) container_of(e, struct qcom_smd_endpoint, ept) 172 173 /** 174 * struct qcom_smd_channel - smd channel struct 175 * @edge: qcom_smd_edge this channel is living on 176 * @qsdev: reference to a associated smd client device 177 * @name: name of the channel 178 * @state: local state of the channel 179 * @remote_state: remote state of the channel 180 * @info: byte aligned outgoing/incoming channel info 181 * @info_word: word aligned outgoing/incoming channel info 182 * @tx_lock: lock to make writes to the channel mutually exclusive 183 * @fblockread_event: wakeup event tied to tx fBLOCKREADINTR 184 * @tx_fifo: pointer to the outgoing ring buffer 185 * @rx_fifo: pointer to the incoming ring buffer 186 * @fifo_size: size of each ring buffer 187 * @bounce_buffer: bounce buffer for reading wrapped packets 188 * @cb: callback function registered for this channel 189 * @recv_lock: guard for rx info modifications and cb pointer 190 * @pkt_size: size of the currently handled packet 191 * @list: lite entry for @channels in qcom_smd_edge 192 */ 193 struct qcom_smd_channel { 194 struct qcom_smd_edge *edge; 195 196 struct qcom_smd_endpoint *qsept; 197 bool registered; 198 199 char *name; 200 enum smd_channel_state state; 201 enum smd_channel_state remote_state; 202 wait_queue_head_t state_change_event; 203 204 struct smd_channel_info_pair *info; 205 struct smd_channel_info_word_pair *info_word; 206 207 spinlock_t tx_lock; 208 wait_queue_head_t fblockread_event; 209 210 void *tx_fifo; 211 void *rx_fifo; 212 int fifo_size; 213 214 void *bounce_buffer; 215 216 spinlock_t recv_lock; 217 218 int pkt_size; 219 220 void *drvdata; 221 222 struct list_head list; 223 }; 224 225 /* 226 * Format of the smd_info smem items, for byte aligned channels. 227 */ 228 struct smd_channel_info { 229 __le32 state; 230 u8 fDSR; 231 u8 fCTS; 232 u8 fCD; 233 u8 fRI; 234 u8 fHEAD; 235 u8 fTAIL; 236 u8 fSTATE; 237 u8 fBLOCKREADINTR; 238 __le32 tail; 239 __le32 head; 240 }; 241 242 struct smd_channel_info_pair { 243 struct smd_channel_info tx; 244 struct smd_channel_info rx; 245 }; 246 247 /* 248 * Format of the smd_info smem items, for word aligned channels. 249 */ 250 struct smd_channel_info_word { 251 __le32 state; 252 __le32 fDSR; 253 __le32 fCTS; 254 __le32 fCD; 255 __le32 fRI; 256 __le32 fHEAD; 257 __le32 fTAIL; 258 __le32 fSTATE; 259 __le32 fBLOCKREADINTR; 260 __le32 tail; 261 __le32 head; 262 }; 263 264 struct smd_channel_info_word_pair { 265 struct smd_channel_info_word tx; 266 struct smd_channel_info_word rx; 267 }; 268 269 #define GET_RX_CHANNEL_FLAG(channel, param) \ 270 ({ \ 271 BUILD_BUG_ON(sizeof(channel->info->rx.param) != sizeof(u8)); \ 272 channel->info_word ? \ 273 le32_to_cpu(channel->info_word->rx.param) : \ 274 channel->info->rx.param; \ 275 }) 276 277 #define GET_RX_CHANNEL_INFO(channel, param) \ 278 ({ \ 279 BUILD_BUG_ON(sizeof(channel->info->rx.param) != sizeof(u32)); \ 280 le32_to_cpu(channel->info_word ? \ 281 channel->info_word->rx.param : \ 282 channel->info->rx.param); \ 283 }) 284 285 #define SET_RX_CHANNEL_FLAG(channel, param, value) \ 286 ({ \ 287 BUILD_BUG_ON(sizeof(channel->info->rx.param) != sizeof(u8)); \ 288 if (channel->info_word) \ 289 channel->info_word->rx.param = cpu_to_le32(value); \ 290 else \ 291 channel->info->rx.param = value; \ 292 }) 293 294 #define SET_RX_CHANNEL_INFO(channel, param, value) \ 295 ({ \ 296 BUILD_BUG_ON(sizeof(channel->info->rx.param) != sizeof(u32)); \ 297 if (channel->info_word) \ 298 channel->info_word->rx.param = cpu_to_le32(value); \ 299 else \ 300 channel->info->rx.param = cpu_to_le32(value); \ 301 }) 302 303 #define GET_TX_CHANNEL_FLAG(channel, param) \ 304 ({ \ 305 BUILD_BUG_ON(sizeof(channel->info->tx.param) != sizeof(u8)); \ 306 channel->info_word ? \ 307 le32_to_cpu(channel->info_word->tx.param) : \ 308 channel->info->tx.param; \ 309 }) 310 311 #define GET_TX_CHANNEL_INFO(channel, param) \ 312 ({ \ 313 BUILD_BUG_ON(sizeof(channel->info->tx.param) != sizeof(u32)); \ 314 le32_to_cpu(channel->info_word ? \ 315 channel->info_word->tx.param : \ 316 channel->info->tx.param); \ 317 }) 318 319 #define SET_TX_CHANNEL_FLAG(channel, param, value) \ 320 ({ \ 321 BUILD_BUG_ON(sizeof(channel->info->tx.param) != sizeof(u8)); \ 322 if (channel->info_word) \ 323 channel->info_word->tx.param = cpu_to_le32(value); \ 324 else \ 325 channel->info->tx.param = value; \ 326 }) 327 328 #define SET_TX_CHANNEL_INFO(channel, param, value) \ 329 ({ \ 330 BUILD_BUG_ON(sizeof(channel->info->tx.param) != sizeof(u32)); \ 331 if (channel->info_word) \ 332 channel->info_word->tx.param = cpu_to_le32(value); \ 333 else \ 334 channel->info->tx.param = cpu_to_le32(value); \ 335 }) 336 337 /** 338 * struct qcom_smd_alloc_entry - channel allocation entry 339 * @name: channel name 340 * @cid: channel index 341 * @flags: channel flags and edge id 342 * @ref_count: reference count of the channel 343 */ 344 struct qcom_smd_alloc_entry { 345 u8 name[20]; 346 __le32 cid; 347 __le32 flags; 348 __le32 ref_count; 349 } __packed; 350 351 #define SMD_CHANNEL_FLAGS_EDGE_MASK 0xff 352 #define SMD_CHANNEL_FLAGS_STREAM BIT(8) 353 #define SMD_CHANNEL_FLAGS_PACKET BIT(9) 354 355 /* 356 * Each smd packet contains a 20 byte header, with the first 4 being the length 357 * of the packet. 358 */ 359 #define SMD_PACKET_HEADER_LEN 20 360 361 /* 362 * Signal the remote processor associated with 'channel'. 363 */ 364 static void qcom_smd_signal_channel(struct qcom_smd_channel *channel) 365 { 366 struct qcom_smd_edge *edge = channel->edge; 367 368 if (edge->mbox_chan) { 369 /* 370 * We can ignore a failing mbox_send_message() as the only 371 * possible cause is that the FIFO in the framework is full of 372 * other writes to the same bit. 373 */ 374 mbox_send_message(edge->mbox_chan, NULL); 375 mbox_client_txdone(edge->mbox_chan, 0); 376 } else { 377 regmap_write(edge->ipc_regmap, edge->ipc_offset, BIT(edge->ipc_bit)); 378 } 379 } 380 381 /* 382 * Initialize the tx channel info 383 */ 384 static void qcom_smd_channel_reset(struct qcom_smd_channel *channel) 385 { 386 SET_TX_CHANNEL_INFO(channel, state, SMD_CHANNEL_CLOSED); 387 SET_TX_CHANNEL_FLAG(channel, fDSR, 0); 388 SET_TX_CHANNEL_FLAG(channel, fCTS, 0); 389 SET_TX_CHANNEL_FLAG(channel, fCD, 0); 390 SET_TX_CHANNEL_FLAG(channel, fRI, 0); 391 SET_TX_CHANNEL_FLAG(channel, fHEAD, 0); 392 SET_TX_CHANNEL_FLAG(channel, fTAIL, 0); 393 SET_TX_CHANNEL_FLAG(channel, fSTATE, 1); 394 SET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR, 1); 395 SET_TX_CHANNEL_INFO(channel, head, 0); 396 SET_RX_CHANNEL_INFO(channel, tail, 0); 397 398 qcom_smd_signal_channel(channel); 399 400 channel->state = SMD_CHANNEL_CLOSED; 401 channel->pkt_size = 0; 402 } 403 404 /* 405 * Set the callback for a channel, with appropriate locking 406 */ 407 static void qcom_smd_channel_set_callback(struct qcom_smd_channel *channel, 408 rpmsg_rx_cb_t cb) 409 { 410 struct rpmsg_endpoint *ept = &channel->qsept->ept; 411 unsigned long flags; 412 413 spin_lock_irqsave(&channel->recv_lock, flags); 414 ept->cb = cb; 415 spin_unlock_irqrestore(&channel->recv_lock, flags); 416 }; 417 418 /* 419 * Calculate the amount of data available in the rx fifo 420 */ 421 static size_t qcom_smd_channel_get_rx_avail(struct qcom_smd_channel *channel) 422 { 423 unsigned head; 424 unsigned tail; 425 426 head = GET_RX_CHANNEL_INFO(channel, head); 427 tail = GET_RX_CHANNEL_INFO(channel, tail); 428 429 return (head - tail) & (channel->fifo_size - 1); 430 } 431 432 /* 433 * Set tx channel state and inform the remote processor 434 */ 435 static void qcom_smd_channel_set_state(struct qcom_smd_channel *channel, 436 int state) 437 { 438 struct qcom_smd_edge *edge = channel->edge; 439 bool is_open = state == SMD_CHANNEL_OPENED; 440 441 if (channel->state == state) 442 return; 443 444 dev_dbg(&edge->dev, "set_state(%s, %d)\n", channel->name, state); 445 446 SET_TX_CHANNEL_FLAG(channel, fDSR, is_open); 447 SET_TX_CHANNEL_FLAG(channel, fCTS, is_open); 448 SET_TX_CHANNEL_FLAG(channel, fCD, is_open); 449 450 SET_TX_CHANNEL_INFO(channel, state, state); 451 SET_TX_CHANNEL_FLAG(channel, fSTATE, 1); 452 453 channel->state = state; 454 qcom_smd_signal_channel(channel); 455 } 456 457 /* 458 * Copy count bytes of data using 32bit accesses, if that's required. 459 */ 460 static void smd_copy_to_fifo(void __iomem *dst, 461 const void *src, 462 size_t count, 463 bool word_aligned) 464 { 465 if (word_aligned) { 466 __iowrite32_copy(dst, src, count / sizeof(u32)); 467 } else { 468 memcpy_toio(dst, src, count); 469 } 470 } 471 472 /* 473 * Copy count bytes of data using 32bit accesses, if that is required. 474 */ 475 static void smd_copy_from_fifo(void *dst, 476 const void __iomem *src, 477 size_t count, 478 bool word_aligned) 479 { 480 if (word_aligned) { 481 __ioread32_copy(dst, src, count / sizeof(u32)); 482 } else { 483 memcpy_fromio(dst, src, count); 484 } 485 } 486 487 /* 488 * Read count bytes of data from the rx fifo into buf, but don't advance the 489 * tail. 490 */ 491 static size_t qcom_smd_channel_peek(struct qcom_smd_channel *channel, 492 void *buf, size_t count) 493 { 494 bool word_aligned; 495 unsigned tail; 496 size_t len; 497 498 word_aligned = channel->info_word; 499 tail = GET_RX_CHANNEL_INFO(channel, tail); 500 501 len = min_t(size_t, count, channel->fifo_size - tail); 502 if (len) { 503 smd_copy_from_fifo(buf, 504 channel->rx_fifo + tail, 505 len, 506 word_aligned); 507 } 508 509 if (len != count) { 510 smd_copy_from_fifo(buf + len, 511 channel->rx_fifo, 512 count - len, 513 word_aligned); 514 } 515 516 return count; 517 } 518 519 /* 520 * Advance the rx tail by count bytes. 521 */ 522 static void qcom_smd_channel_advance(struct qcom_smd_channel *channel, 523 size_t count) 524 { 525 unsigned tail; 526 527 tail = GET_RX_CHANNEL_INFO(channel, tail); 528 tail += count; 529 tail &= (channel->fifo_size - 1); 530 SET_RX_CHANNEL_INFO(channel, tail, tail); 531 } 532 533 /* 534 * Read out a single packet from the rx fifo and deliver it to the device 535 */ 536 static int qcom_smd_channel_recv_single(struct qcom_smd_channel *channel) 537 { 538 struct rpmsg_endpoint *ept = &channel->qsept->ept; 539 unsigned tail; 540 size_t len; 541 void *ptr; 542 int ret; 543 544 tail = GET_RX_CHANNEL_INFO(channel, tail); 545 546 /* Use bounce buffer if the data wraps */ 547 if (tail + channel->pkt_size >= channel->fifo_size) { 548 ptr = channel->bounce_buffer; 549 len = qcom_smd_channel_peek(channel, ptr, channel->pkt_size); 550 } else { 551 ptr = channel->rx_fifo + tail; 552 len = channel->pkt_size; 553 } 554 555 ret = ept->cb(ept->rpdev, ptr, len, ept->priv, RPMSG_ADDR_ANY); 556 if (ret < 0) 557 return ret; 558 559 /* Only forward the tail if the client consumed the data */ 560 qcom_smd_channel_advance(channel, len); 561 562 channel->pkt_size = 0; 563 564 return 0; 565 } 566 567 /* 568 * Per channel interrupt handling 569 */ 570 static bool qcom_smd_channel_intr(struct qcom_smd_channel *channel) 571 { 572 bool need_state_scan = false; 573 int remote_state; 574 __le32 pktlen; 575 int avail; 576 int ret; 577 578 /* Handle state changes */ 579 remote_state = GET_RX_CHANNEL_INFO(channel, state); 580 if (remote_state != channel->remote_state) { 581 channel->remote_state = remote_state; 582 need_state_scan = true; 583 584 wake_up_interruptible_all(&channel->state_change_event); 585 } 586 /* Indicate that we have seen any state change */ 587 SET_RX_CHANNEL_FLAG(channel, fSTATE, 0); 588 589 /* Signal waiting qcom_smd_send() about the interrupt */ 590 if (!GET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR)) 591 wake_up_interruptible_all(&channel->fblockread_event); 592 593 /* Don't consume any data until we've opened the channel */ 594 if (channel->state != SMD_CHANNEL_OPENED) 595 goto out; 596 597 /* Indicate that we've seen the new data */ 598 SET_RX_CHANNEL_FLAG(channel, fHEAD, 0); 599 600 /* Consume data */ 601 for (;;) { 602 avail = qcom_smd_channel_get_rx_avail(channel); 603 604 if (!channel->pkt_size && avail >= SMD_PACKET_HEADER_LEN) { 605 qcom_smd_channel_peek(channel, &pktlen, sizeof(pktlen)); 606 qcom_smd_channel_advance(channel, SMD_PACKET_HEADER_LEN); 607 channel->pkt_size = le32_to_cpu(pktlen); 608 } else if (channel->pkt_size && avail >= channel->pkt_size) { 609 ret = qcom_smd_channel_recv_single(channel); 610 if (ret) 611 break; 612 } else { 613 break; 614 } 615 } 616 617 /* Indicate that we have seen and updated tail */ 618 SET_RX_CHANNEL_FLAG(channel, fTAIL, 1); 619 620 /* Signal the remote that we've consumed the data (if requested) */ 621 if (!GET_RX_CHANNEL_FLAG(channel, fBLOCKREADINTR)) { 622 /* Ensure ordering of channel info updates */ 623 wmb(); 624 625 qcom_smd_signal_channel(channel); 626 } 627 628 out: 629 return need_state_scan; 630 } 631 632 /* 633 * The edge interrupts are triggered by the remote processor on state changes, 634 * channel info updates or when new channels are created. 635 */ 636 static irqreturn_t qcom_smd_edge_intr(int irq, void *data) 637 { 638 struct qcom_smd_edge *edge = data; 639 struct qcom_smd_channel *channel; 640 unsigned available; 641 bool kick_scanner = false; 642 bool kick_state = false; 643 644 /* 645 * Handle state changes or data on each of the channels on this edge 646 */ 647 spin_lock(&edge->channels_lock); 648 list_for_each_entry(channel, &edge->channels, list) { 649 spin_lock(&channel->recv_lock); 650 kick_state |= qcom_smd_channel_intr(channel); 651 spin_unlock(&channel->recv_lock); 652 } 653 spin_unlock(&edge->channels_lock); 654 655 /* 656 * Creating a new channel requires allocating an smem entry, so we only 657 * have to scan if the amount of available space in smem have changed 658 * since last scan. 659 */ 660 available = qcom_smem_get_free_space(edge->remote_pid); 661 if (available != edge->smem_available) { 662 edge->smem_available = available; 663 kick_scanner = true; 664 } 665 666 if (kick_scanner) 667 schedule_work(&edge->scan_work); 668 if (kick_state) 669 schedule_work(&edge->state_work); 670 671 return IRQ_HANDLED; 672 } 673 674 /* 675 * Calculate how much space is available in the tx fifo. 676 */ 677 static size_t qcom_smd_get_tx_avail(struct qcom_smd_channel *channel) 678 { 679 unsigned head; 680 unsigned tail; 681 unsigned mask = channel->fifo_size - 1; 682 683 head = GET_TX_CHANNEL_INFO(channel, head); 684 tail = GET_TX_CHANNEL_INFO(channel, tail); 685 686 return mask - ((head - tail) & mask); 687 } 688 689 /* 690 * Write count bytes of data into channel, possibly wrapping in the ring buffer 691 */ 692 static int qcom_smd_write_fifo(struct qcom_smd_channel *channel, 693 const void *data, 694 size_t count) 695 { 696 bool word_aligned; 697 unsigned head; 698 size_t len; 699 700 word_aligned = channel->info_word; 701 head = GET_TX_CHANNEL_INFO(channel, head); 702 703 len = min_t(size_t, count, channel->fifo_size - head); 704 if (len) { 705 smd_copy_to_fifo(channel->tx_fifo + head, 706 data, 707 len, 708 word_aligned); 709 } 710 711 if (len != count) { 712 smd_copy_to_fifo(channel->tx_fifo, 713 data + len, 714 count - len, 715 word_aligned); 716 } 717 718 head += count; 719 head &= (channel->fifo_size - 1); 720 SET_TX_CHANNEL_INFO(channel, head, head); 721 722 return count; 723 } 724 725 /** 726 * qcom_smd_send - write data to smd channel 727 * @channel: channel handle 728 * @data: buffer of data to write 729 * @len: number of bytes to write 730 * 731 * This is a blocking write of len bytes into the channel's tx ring buffer and 732 * signal the remote end. It will sleep until there is enough space available 733 * in the tx buffer, utilizing the fBLOCKREADINTR signaling mechanism to avoid 734 * polling. 735 */ 736 static int __qcom_smd_send(struct qcom_smd_channel *channel, const void *data, 737 int len, bool wait) 738 { 739 __le32 hdr[5] = { cpu_to_le32(len), }; 740 int tlen = sizeof(hdr) + len; 741 unsigned long flags; 742 int ret; 743 744 /* Word aligned channels only accept word size aligned data */ 745 if (channel->info_word && len % 4) 746 return -EINVAL; 747 748 /* Reject packets that are too big */ 749 if (tlen >= channel->fifo_size) 750 return -EINVAL; 751 752 /* Highlight the fact that if we enter the loop below we might sleep */ 753 if (wait) 754 might_sleep(); 755 756 spin_lock_irqsave(&channel->tx_lock, flags); 757 758 while (qcom_smd_get_tx_avail(channel) < tlen && 759 channel->state == SMD_CHANNEL_OPENED) { 760 if (!wait) { 761 ret = -EAGAIN; 762 goto out_unlock; 763 } 764 765 SET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR, 0); 766 767 /* Wait without holding the tx_lock */ 768 spin_unlock_irqrestore(&channel->tx_lock, flags); 769 770 ret = wait_event_interruptible(channel->fblockread_event, 771 qcom_smd_get_tx_avail(channel) >= tlen || 772 channel->state != SMD_CHANNEL_OPENED); 773 if (ret) 774 return ret; 775 776 spin_lock_irqsave(&channel->tx_lock, flags); 777 778 SET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR, 1); 779 } 780 781 /* Fail if the channel was closed */ 782 if (channel->state != SMD_CHANNEL_OPENED) { 783 ret = -EPIPE; 784 goto out_unlock; 785 } 786 787 SET_TX_CHANNEL_FLAG(channel, fTAIL, 0); 788 789 qcom_smd_write_fifo(channel, hdr, sizeof(hdr)); 790 qcom_smd_write_fifo(channel, data, len); 791 792 SET_TX_CHANNEL_FLAG(channel, fHEAD, 1); 793 794 /* Ensure ordering of channel info updates */ 795 wmb(); 796 797 qcom_smd_signal_channel(channel); 798 799 out_unlock: 800 spin_unlock_irqrestore(&channel->tx_lock, flags); 801 802 return ret; 803 } 804 805 /* 806 * Helper for opening a channel 807 */ 808 static int qcom_smd_channel_open(struct qcom_smd_channel *channel, 809 rpmsg_rx_cb_t cb) 810 { 811 struct qcom_smd_edge *edge = channel->edge; 812 size_t bb_size; 813 int ret; 814 815 /* 816 * Packets are maximum 4k, but reduce if the fifo is smaller 817 */ 818 bb_size = min(channel->fifo_size, SZ_4K); 819 channel->bounce_buffer = kmalloc(bb_size, GFP_KERNEL); 820 if (!channel->bounce_buffer) 821 return -ENOMEM; 822 823 qcom_smd_channel_set_callback(channel, cb); 824 qcom_smd_channel_set_state(channel, SMD_CHANNEL_OPENING); 825 826 /* Wait for remote to enter opening or opened */ 827 ret = wait_event_interruptible_timeout(channel->state_change_event, 828 channel->remote_state == SMD_CHANNEL_OPENING || 829 channel->remote_state == SMD_CHANNEL_OPENED, 830 HZ); 831 if (!ret) { 832 dev_err(&edge->dev, "remote side did not enter opening state\n"); 833 goto out_close_timeout; 834 } 835 836 qcom_smd_channel_set_state(channel, SMD_CHANNEL_OPENED); 837 838 /* Wait for remote to enter opened */ 839 ret = wait_event_interruptible_timeout(channel->state_change_event, 840 channel->remote_state == SMD_CHANNEL_OPENED, 841 HZ); 842 if (!ret) { 843 dev_err(&edge->dev, "remote side did not enter open state\n"); 844 goto out_close_timeout; 845 } 846 847 return 0; 848 849 out_close_timeout: 850 qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSED); 851 return -ETIMEDOUT; 852 } 853 854 /* 855 * Helper for closing and resetting a channel 856 */ 857 static void qcom_smd_channel_close(struct qcom_smd_channel *channel) 858 { 859 qcom_smd_channel_set_callback(channel, NULL); 860 861 kfree(channel->bounce_buffer); 862 channel->bounce_buffer = NULL; 863 864 qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSED); 865 qcom_smd_channel_reset(channel); 866 } 867 868 static struct qcom_smd_channel * 869 qcom_smd_find_channel(struct qcom_smd_edge *edge, const char *name) 870 { 871 struct qcom_smd_channel *channel; 872 struct qcom_smd_channel *ret = NULL; 873 unsigned long flags; 874 875 spin_lock_irqsave(&edge->channels_lock, flags); 876 list_for_each_entry(channel, &edge->channels, list) { 877 if (!strcmp(channel->name, name)) { 878 ret = channel; 879 break; 880 } 881 } 882 spin_unlock_irqrestore(&edge->channels_lock, flags); 883 884 return ret; 885 } 886 887 static void __ept_release(struct kref *kref) 888 { 889 struct rpmsg_endpoint *ept = container_of(kref, struct rpmsg_endpoint, 890 refcount); 891 kfree(to_smd_endpoint(ept)); 892 } 893 894 static struct rpmsg_endpoint *qcom_smd_create_ept(struct rpmsg_device *rpdev, 895 rpmsg_rx_cb_t cb, void *priv, 896 struct rpmsg_channel_info chinfo) 897 { 898 struct qcom_smd_endpoint *qsept; 899 struct qcom_smd_channel *channel; 900 struct qcom_smd_device *qsdev = to_smd_device(rpdev); 901 struct qcom_smd_edge *edge = qsdev->edge; 902 struct rpmsg_endpoint *ept; 903 const char *name = chinfo.name; 904 int ret; 905 906 /* Wait up to HZ for the channel to appear */ 907 ret = wait_event_interruptible_timeout(edge->new_channel_event, 908 (channel = qcom_smd_find_channel(edge, name)) != NULL, 909 HZ); 910 if (!ret) 911 return NULL; 912 913 if (channel->state != SMD_CHANNEL_CLOSED) { 914 dev_err(&rpdev->dev, "channel %s is busy\n", channel->name); 915 return NULL; 916 } 917 918 qsept = kzalloc(sizeof(*qsept), GFP_KERNEL); 919 if (!qsept) 920 return NULL; 921 922 ept = &qsept->ept; 923 924 kref_init(&ept->refcount); 925 926 ept->rpdev = rpdev; 927 ept->cb = cb; 928 ept->priv = priv; 929 ept->ops = &qcom_smd_endpoint_ops; 930 931 channel->qsept = qsept; 932 qsept->qsch = channel; 933 934 ret = qcom_smd_channel_open(channel, cb); 935 if (ret) 936 goto free_ept; 937 938 return ept; 939 940 free_ept: 941 channel->qsept = NULL; 942 kref_put(&ept->refcount, __ept_release); 943 return NULL; 944 } 945 946 static void qcom_smd_destroy_ept(struct rpmsg_endpoint *ept) 947 { 948 struct qcom_smd_endpoint *qsept = to_smd_endpoint(ept); 949 struct qcom_smd_channel *ch = qsept->qsch; 950 951 qcom_smd_channel_close(ch); 952 ch->qsept = NULL; 953 kref_put(&ept->refcount, __ept_release); 954 } 955 956 static int qcom_smd_send(struct rpmsg_endpoint *ept, void *data, int len) 957 { 958 struct qcom_smd_endpoint *qsept = to_smd_endpoint(ept); 959 960 return __qcom_smd_send(qsept->qsch, data, len, true); 961 } 962 963 static int qcom_smd_trysend(struct rpmsg_endpoint *ept, void *data, int len) 964 { 965 struct qcom_smd_endpoint *qsept = to_smd_endpoint(ept); 966 967 return __qcom_smd_send(qsept->qsch, data, len, false); 968 } 969 970 static __poll_t qcom_smd_poll(struct rpmsg_endpoint *ept, 971 struct file *filp, poll_table *wait) 972 { 973 struct qcom_smd_endpoint *qsept = to_smd_endpoint(ept); 974 struct qcom_smd_channel *channel = qsept->qsch; 975 __poll_t mask = 0; 976 977 poll_wait(filp, &channel->fblockread_event, wait); 978 979 if (qcom_smd_get_tx_avail(channel) > 20) 980 mask |= EPOLLOUT | EPOLLWRNORM; 981 982 return mask; 983 } 984 985 /* 986 * Finds the device_node for the smd child interested in this channel. 987 */ 988 static struct device_node *qcom_smd_match_channel(struct device_node *edge_node, 989 const char *channel) 990 { 991 struct device_node *child; 992 const char *name; 993 const char *key; 994 int ret; 995 996 for_each_available_child_of_node(edge_node, child) { 997 key = "qcom,smd-channels"; 998 ret = of_property_read_string(child, key, &name); 999 if (ret) 1000 continue; 1001 1002 if (strcmp(name, channel) == 0) 1003 return child; 1004 } 1005 1006 return NULL; 1007 } 1008 1009 static int qcom_smd_announce_create(struct rpmsg_device *rpdev) 1010 { 1011 struct qcom_smd_endpoint *qept = to_smd_endpoint(rpdev->ept); 1012 struct qcom_smd_channel *channel = qept->qsch; 1013 unsigned long flags; 1014 bool kick_state; 1015 1016 spin_lock_irqsave(&channel->recv_lock, flags); 1017 kick_state = qcom_smd_channel_intr(channel); 1018 spin_unlock_irqrestore(&channel->recv_lock, flags); 1019 1020 if (kick_state) 1021 schedule_work(&channel->edge->state_work); 1022 1023 return 0; 1024 } 1025 1026 static const struct rpmsg_device_ops qcom_smd_device_ops = { 1027 .create_ept = qcom_smd_create_ept, 1028 .announce_create = qcom_smd_announce_create, 1029 }; 1030 1031 static const struct rpmsg_endpoint_ops qcom_smd_endpoint_ops = { 1032 .destroy_ept = qcom_smd_destroy_ept, 1033 .send = qcom_smd_send, 1034 .trysend = qcom_smd_trysend, 1035 .poll = qcom_smd_poll, 1036 }; 1037 1038 static void qcom_smd_release_device(struct device *dev) 1039 { 1040 struct rpmsg_device *rpdev = to_rpmsg_device(dev); 1041 struct qcom_smd_device *qsdev = to_smd_device(rpdev); 1042 1043 kfree(qsdev); 1044 } 1045 1046 /* 1047 * Create a smd client device for channel that is being opened. 1048 */ 1049 static int qcom_smd_create_device(struct qcom_smd_channel *channel) 1050 { 1051 struct qcom_smd_device *qsdev; 1052 struct rpmsg_device *rpdev; 1053 struct qcom_smd_edge *edge = channel->edge; 1054 1055 dev_dbg(&edge->dev, "registering '%s'\n", channel->name); 1056 1057 qsdev = kzalloc(sizeof(*qsdev), GFP_KERNEL); 1058 if (!qsdev) 1059 return -ENOMEM; 1060 1061 /* Link qsdev to our SMD edge */ 1062 qsdev->edge = edge; 1063 1064 /* Assign callbacks for rpmsg_device */ 1065 qsdev->rpdev.ops = &qcom_smd_device_ops; 1066 1067 /* Assign public information to the rpmsg_device */ 1068 rpdev = &qsdev->rpdev; 1069 strncpy(rpdev->id.name, channel->name, RPMSG_NAME_SIZE); 1070 rpdev->src = RPMSG_ADDR_ANY; 1071 rpdev->dst = RPMSG_ADDR_ANY; 1072 1073 rpdev->dev.of_node = qcom_smd_match_channel(edge->of_node, channel->name); 1074 rpdev->dev.parent = &edge->dev; 1075 rpdev->dev.release = qcom_smd_release_device; 1076 1077 return rpmsg_register_device(rpdev); 1078 } 1079 1080 static int qcom_smd_create_chrdev(struct qcom_smd_edge *edge) 1081 { 1082 struct qcom_smd_device *qsdev; 1083 1084 qsdev = kzalloc(sizeof(*qsdev), GFP_KERNEL); 1085 if (!qsdev) 1086 return -ENOMEM; 1087 1088 qsdev->edge = edge; 1089 qsdev->rpdev.ops = &qcom_smd_device_ops; 1090 qsdev->rpdev.dev.parent = &edge->dev; 1091 qsdev->rpdev.dev.release = qcom_smd_release_device; 1092 1093 return rpmsg_chrdev_register_device(&qsdev->rpdev); 1094 } 1095 1096 /* 1097 * Allocate the qcom_smd_channel object for a newly found smd channel, 1098 * retrieving and validating the smem items involved. 1099 */ 1100 static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *edge, 1101 unsigned smem_info_item, 1102 unsigned smem_fifo_item, 1103 char *name) 1104 { 1105 struct qcom_smd_channel *channel; 1106 size_t fifo_size; 1107 size_t info_size; 1108 void *fifo_base; 1109 void *info; 1110 int ret; 1111 1112 channel = kzalloc(sizeof(*channel), GFP_KERNEL); 1113 if (!channel) 1114 return ERR_PTR(-ENOMEM); 1115 1116 channel->edge = edge; 1117 channel->name = kstrdup(name, GFP_KERNEL); 1118 if (!channel->name) 1119 return ERR_PTR(-ENOMEM); 1120 1121 spin_lock_init(&channel->tx_lock); 1122 spin_lock_init(&channel->recv_lock); 1123 init_waitqueue_head(&channel->fblockread_event); 1124 init_waitqueue_head(&channel->state_change_event); 1125 1126 info = qcom_smem_get(edge->remote_pid, smem_info_item, &info_size); 1127 if (IS_ERR(info)) { 1128 ret = PTR_ERR(info); 1129 goto free_name_and_channel; 1130 } 1131 1132 /* 1133 * Use the size of the item to figure out which channel info struct to 1134 * use. 1135 */ 1136 if (info_size == 2 * sizeof(struct smd_channel_info_word)) { 1137 channel->info_word = info; 1138 } else if (info_size == 2 * sizeof(struct smd_channel_info)) { 1139 channel->info = info; 1140 } else { 1141 dev_err(&edge->dev, 1142 "channel info of size %zu not supported\n", info_size); 1143 ret = -EINVAL; 1144 goto free_name_and_channel; 1145 } 1146 1147 fifo_base = qcom_smem_get(edge->remote_pid, smem_fifo_item, &fifo_size); 1148 if (IS_ERR(fifo_base)) { 1149 ret = PTR_ERR(fifo_base); 1150 goto free_name_and_channel; 1151 } 1152 1153 /* The channel consist of a rx and tx fifo of equal size */ 1154 fifo_size /= 2; 1155 1156 dev_dbg(&edge->dev, "new channel '%s' info-size: %zu fifo-size: %zu\n", 1157 name, info_size, fifo_size); 1158 1159 channel->tx_fifo = fifo_base; 1160 channel->rx_fifo = fifo_base + fifo_size; 1161 channel->fifo_size = fifo_size; 1162 1163 qcom_smd_channel_reset(channel); 1164 1165 return channel; 1166 1167 free_name_and_channel: 1168 kfree(channel->name); 1169 kfree(channel); 1170 1171 return ERR_PTR(ret); 1172 } 1173 1174 /* 1175 * Scans the allocation table for any newly allocated channels, calls 1176 * qcom_smd_create_channel() to create representations of these and add 1177 * them to the edge's list of channels. 1178 */ 1179 static void qcom_channel_scan_worker(struct work_struct *work) 1180 { 1181 struct qcom_smd_edge *edge = container_of(work, struct qcom_smd_edge, scan_work); 1182 struct qcom_smd_alloc_entry *alloc_tbl; 1183 struct qcom_smd_alloc_entry *entry; 1184 struct qcom_smd_channel *channel; 1185 unsigned long flags; 1186 unsigned fifo_id; 1187 unsigned info_id; 1188 int tbl; 1189 int i; 1190 u32 eflags, cid; 1191 1192 for (tbl = 0; tbl < SMD_ALLOC_TBL_COUNT; tbl++) { 1193 alloc_tbl = qcom_smem_get(edge->remote_pid, 1194 smem_items[tbl].alloc_tbl_id, NULL); 1195 if (IS_ERR(alloc_tbl)) 1196 continue; 1197 1198 for (i = 0; i < SMD_ALLOC_TBL_SIZE; i++) { 1199 entry = &alloc_tbl[i]; 1200 eflags = le32_to_cpu(entry->flags); 1201 if (test_bit(i, edge->allocated[tbl])) 1202 continue; 1203 1204 if (entry->ref_count == 0) 1205 continue; 1206 1207 if (!entry->name[0]) 1208 continue; 1209 1210 if (!(eflags & SMD_CHANNEL_FLAGS_PACKET)) 1211 continue; 1212 1213 if ((eflags & SMD_CHANNEL_FLAGS_EDGE_MASK) != edge->edge_id) 1214 continue; 1215 1216 cid = le32_to_cpu(entry->cid); 1217 info_id = smem_items[tbl].info_base_id + cid; 1218 fifo_id = smem_items[tbl].fifo_base_id + cid; 1219 1220 channel = qcom_smd_create_channel(edge, info_id, fifo_id, entry->name); 1221 if (IS_ERR(channel)) 1222 continue; 1223 1224 spin_lock_irqsave(&edge->channels_lock, flags); 1225 list_add(&channel->list, &edge->channels); 1226 spin_unlock_irqrestore(&edge->channels_lock, flags); 1227 1228 dev_dbg(&edge->dev, "new channel found: '%s'\n", channel->name); 1229 set_bit(i, edge->allocated[tbl]); 1230 1231 wake_up_interruptible_all(&edge->new_channel_event); 1232 } 1233 } 1234 1235 schedule_work(&edge->state_work); 1236 } 1237 1238 /* 1239 * This per edge worker scans smem for any new channels and register these. It 1240 * then scans all registered channels for state changes that should be handled 1241 * by creating or destroying smd client devices for the registered channels. 1242 * 1243 * LOCKING: edge->channels_lock only needs to cover the list operations, as the 1244 * worker is killed before any channels are deallocated 1245 */ 1246 static void qcom_channel_state_worker(struct work_struct *work) 1247 { 1248 struct qcom_smd_channel *channel; 1249 struct qcom_smd_edge *edge = container_of(work, 1250 struct qcom_smd_edge, 1251 state_work); 1252 struct rpmsg_channel_info chinfo; 1253 unsigned remote_state; 1254 unsigned long flags; 1255 1256 /* 1257 * Register a device for any closed channel where the remote processor 1258 * is showing interest in opening the channel. 1259 */ 1260 spin_lock_irqsave(&edge->channels_lock, flags); 1261 list_for_each_entry(channel, &edge->channels, list) { 1262 if (channel->state != SMD_CHANNEL_CLOSED) 1263 continue; 1264 1265 remote_state = GET_RX_CHANNEL_INFO(channel, state); 1266 if (remote_state != SMD_CHANNEL_OPENING && 1267 remote_state != SMD_CHANNEL_OPENED) 1268 continue; 1269 1270 if (channel->registered) 1271 continue; 1272 1273 spin_unlock_irqrestore(&edge->channels_lock, flags); 1274 qcom_smd_create_device(channel); 1275 channel->registered = true; 1276 spin_lock_irqsave(&edge->channels_lock, flags); 1277 1278 channel->registered = true; 1279 } 1280 1281 /* 1282 * Unregister the device for any channel that is opened where the 1283 * remote processor is closing the channel. 1284 */ 1285 list_for_each_entry(channel, &edge->channels, list) { 1286 if (channel->state != SMD_CHANNEL_OPENING && 1287 channel->state != SMD_CHANNEL_OPENED) 1288 continue; 1289 1290 remote_state = GET_RX_CHANNEL_INFO(channel, state); 1291 if (remote_state == SMD_CHANNEL_OPENING || 1292 remote_state == SMD_CHANNEL_OPENED) 1293 continue; 1294 1295 spin_unlock_irqrestore(&edge->channels_lock, flags); 1296 1297 strncpy(chinfo.name, channel->name, sizeof(chinfo.name)); 1298 chinfo.src = RPMSG_ADDR_ANY; 1299 chinfo.dst = RPMSG_ADDR_ANY; 1300 rpmsg_unregister_device(&edge->dev, &chinfo); 1301 channel->registered = false; 1302 spin_lock_irqsave(&edge->channels_lock, flags); 1303 } 1304 spin_unlock_irqrestore(&edge->channels_lock, flags); 1305 } 1306 1307 /* 1308 * Parses an of_node describing an edge. 1309 */ 1310 static int qcom_smd_parse_edge(struct device *dev, 1311 struct device_node *node, 1312 struct qcom_smd_edge *edge) 1313 { 1314 struct device_node *syscon_np; 1315 const char *key; 1316 int irq; 1317 int ret; 1318 1319 INIT_LIST_HEAD(&edge->channels); 1320 spin_lock_init(&edge->channels_lock); 1321 1322 INIT_WORK(&edge->scan_work, qcom_channel_scan_worker); 1323 INIT_WORK(&edge->state_work, qcom_channel_state_worker); 1324 1325 edge->of_node = of_node_get(node); 1326 1327 key = "qcom,smd-edge"; 1328 ret = of_property_read_u32(node, key, &edge->edge_id); 1329 if (ret) { 1330 dev_err(dev, "edge missing %s property\n", key); 1331 return -EINVAL; 1332 } 1333 1334 edge->remote_pid = QCOM_SMEM_HOST_ANY; 1335 key = "qcom,remote-pid"; 1336 of_property_read_u32(node, key, &edge->remote_pid); 1337 1338 edge->mbox_client.dev = dev; 1339 edge->mbox_client.knows_txdone = true; 1340 edge->mbox_chan = mbox_request_channel(&edge->mbox_client, 0); 1341 if (IS_ERR(edge->mbox_chan)) { 1342 if (PTR_ERR(edge->mbox_chan) != -ENODEV) 1343 return PTR_ERR(edge->mbox_chan); 1344 1345 edge->mbox_chan = NULL; 1346 1347 syscon_np = of_parse_phandle(node, "qcom,ipc", 0); 1348 if (!syscon_np) { 1349 dev_err(dev, "no qcom,ipc node\n"); 1350 return -ENODEV; 1351 } 1352 1353 edge->ipc_regmap = syscon_node_to_regmap(syscon_np); 1354 if (IS_ERR(edge->ipc_regmap)) 1355 return PTR_ERR(edge->ipc_regmap); 1356 1357 key = "qcom,ipc"; 1358 ret = of_property_read_u32_index(node, key, 1, &edge->ipc_offset); 1359 if (ret < 0) { 1360 dev_err(dev, "no offset in %s\n", key); 1361 return -EINVAL; 1362 } 1363 1364 ret = of_property_read_u32_index(node, key, 2, &edge->ipc_bit); 1365 if (ret < 0) { 1366 dev_err(dev, "no bit in %s\n", key); 1367 return -EINVAL; 1368 } 1369 } 1370 1371 ret = of_property_read_string(node, "label", &edge->name); 1372 if (ret < 0) 1373 edge->name = node->name; 1374 1375 irq = irq_of_parse_and_map(node, 0); 1376 if (irq < 0) { 1377 dev_err(dev, "required smd interrupt missing\n"); 1378 return -EINVAL; 1379 } 1380 1381 ret = devm_request_irq(dev, irq, 1382 qcom_smd_edge_intr, IRQF_TRIGGER_RISING, 1383 node->name, edge); 1384 if (ret) { 1385 dev_err(dev, "failed to request smd irq\n"); 1386 return ret; 1387 } 1388 1389 edge->irq = irq; 1390 1391 return 0; 1392 } 1393 1394 /* 1395 * Release function for an edge. 1396 * Reset the state of each associated channel and free the edge context. 1397 */ 1398 static void qcom_smd_edge_release(struct device *dev) 1399 { 1400 struct qcom_smd_channel *channel, *tmp; 1401 struct qcom_smd_edge *edge = to_smd_edge(dev); 1402 1403 list_for_each_entry_safe(channel, tmp, &edge->channels, list) { 1404 list_del(&channel->list); 1405 kfree(channel->name); 1406 kfree(channel); 1407 } 1408 1409 kfree(edge); 1410 } 1411 1412 static ssize_t rpmsg_name_show(struct device *dev, 1413 struct device_attribute *attr, char *buf) 1414 { 1415 struct qcom_smd_edge *edge = to_smd_edge(dev); 1416 1417 return sprintf(buf, "%s\n", edge->name); 1418 } 1419 static DEVICE_ATTR_RO(rpmsg_name); 1420 1421 static struct attribute *qcom_smd_edge_attrs[] = { 1422 &dev_attr_rpmsg_name.attr, 1423 NULL 1424 }; 1425 ATTRIBUTE_GROUPS(qcom_smd_edge); 1426 1427 /** 1428 * qcom_smd_register_edge() - register an edge based on an device_node 1429 * @parent: parent device for the edge 1430 * @node: device_node describing the edge 1431 * 1432 * Returns an edge reference, or negative ERR_PTR() on failure. 1433 */ 1434 struct qcom_smd_edge *qcom_smd_register_edge(struct device *parent, 1435 struct device_node *node) 1436 { 1437 struct qcom_smd_edge *edge; 1438 int ret; 1439 1440 edge = kzalloc(sizeof(*edge), GFP_KERNEL); 1441 if (!edge) 1442 return ERR_PTR(-ENOMEM); 1443 1444 init_waitqueue_head(&edge->new_channel_event); 1445 1446 edge->dev.parent = parent; 1447 edge->dev.release = qcom_smd_edge_release; 1448 edge->dev.of_node = node; 1449 edge->dev.groups = qcom_smd_edge_groups; 1450 dev_set_name(&edge->dev, "%s:%s", dev_name(parent), node->name); 1451 ret = device_register(&edge->dev); 1452 if (ret) { 1453 pr_err("failed to register smd edge\n"); 1454 put_device(&edge->dev); 1455 return ERR_PTR(ret); 1456 } 1457 1458 ret = qcom_smd_parse_edge(&edge->dev, node, edge); 1459 if (ret) { 1460 dev_err(&edge->dev, "failed to parse smd edge\n"); 1461 goto unregister_dev; 1462 } 1463 1464 ret = qcom_smd_create_chrdev(edge); 1465 if (ret) { 1466 dev_err(&edge->dev, "failed to register chrdev for edge\n"); 1467 goto unregister_dev; 1468 } 1469 1470 schedule_work(&edge->scan_work); 1471 1472 return edge; 1473 1474 unregister_dev: 1475 if (!IS_ERR_OR_NULL(edge->mbox_chan)) 1476 mbox_free_channel(edge->mbox_chan); 1477 1478 device_unregister(&edge->dev); 1479 return ERR_PTR(ret); 1480 } 1481 EXPORT_SYMBOL(qcom_smd_register_edge); 1482 1483 static int qcom_smd_remove_device(struct device *dev, void *data) 1484 { 1485 device_unregister(dev); 1486 1487 return 0; 1488 } 1489 1490 /** 1491 * qcom_smd_unregister_edge() - release an edge and its children 1492 * @edge: edge reference acquired from qcom_smd_register_edge 1493 */ 1494 int qcom_smd_unregister_edge(struct qcom_smd_edge *edge) 1495 { 1496 int ret; 1497 1498 disable_irq(edge->irq); 1499 cancel_work_sync(&edge->scan_work); 1500 cancel_work_sync(&edge->state_work); 1501 1502 ret = device_for_each_child(&edge->dev, NULL, qcom_smd_remove_device); 1503 if (ret) 1504 dev_warn(&edge->dev, "can't remove smd device: %d\n", ret); 1505 1506 mbox_free_channel(edge->mbox_chan); 1507 device_unregister(&edge->dev); 1508 1509 return 0; 1510 } 1511 EXPORT_SYMBOL(qcom_smd_unregister_edge); 1512 1513 static int qcom_smd_probe(struct platform_device *pdev) 1514 { 1515 struct device_node *node; 1516 void *p; 1517 1518 /* Wait for smem */ 1519 p = qcom_smem_get(QCOM_SMEM_HOST_ANY, smem_items[0].alloc_tbl_id, NULL); 1520 if (PTR_ERR(p) == -EPROBE_DEFER) 1521 return PTR_ERR(p); 1522 1523 for_each_available_child_of_node(pdev->dev.of_node, node) 1524 qcom_smd_register_edge(&pdev->dev, node); 1525 1526 return 0; 1527 } 1528 1529 static int qcom_smd_remove_edge(struct device *dev, void *data) 1530 { 1531 struct qcom_smd_edge *edge = to_smd_edge(dev); 1532 1533 return qcom_smd_unregister_edge(edge); 1534 } 1535 1536 /* 1537 * Shut down all smd clients by making sure that each edge stops processing 1538 * events and scanning for new channels, then call destroy on the devices. 1539 */ 1540 static int qcom_smd_remove(struct platform_device *pdev) 1541 { 1542 int ret; 1543 1544 ret = device_for_each_child(&pdev->dev, NULL, qcom_smd_remove_edge); 1545 if (ret) 1546 dev_warn(&pdev->dev, "can't remove smd device: %d\n", ret); 1547 1548 return ret; 1549 } 1550 1551 static const struct of_device_id qcom_smd_of_match[] = { 1552 { .compatible = "qcom,smd" }, 1553 {} 1554 }; 1555 MODULE_DEVICE_TABLE(of, qcom_smd_of_match); 1556 1557 static struct platform_driver qcom_smd_driver = { 1558 .probe = qcom_smd_probe, 1559 .remove = qcom_smd_remove, 1560 .driver = { 1561 .name = "qcom-smd", 1562 .of_match_table = qcom_smd_of_match, 1563 }, 1564 }; 1565 1566 static int __init qcom_smd_init(void) 1567 { 1568 return platform_driver_register(&qcom_smd_driver); 1569 } 1570 subsys_initcall(qcom_smd_init); 1571 1572 static void __exit qcom_smd_exit(void) 1573 { 1574 platform_driver_unregister(&qcom_smd_driver); 1575 } 1576 module_exit(qcom_smd_exit); 1577 1578 MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>"); 1579 MODULE_DESCRIPTION("Qualcomm Shared Memory Driver"); 1580 MODULE_LICENSE("GPL v2"); 1581