1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2016-2017, Linaro Ltd 4 */ 5 6 #include <linux/idr.h> 7 #include <linux/interrupt.h> 8 #include <linux/io.h> 9 #include <linux/list.h> 10 #include <linux/mfd/syscon.h> 11 #include <linux/module.h> 12 #include <linux/of.h> 13 #include <linux/of_address.h> 14 #include <linux/platform_device.h> 15 #include <linux/regmap.h> 16 #include <linux/rpmsg.h> 17 #include <linux/sizes.h> 18 #include <linux/slab.h> 19 #include <linux/wait.h> 20 #include <linux/workqueue.h> 21 #include <linux/mailbox_client.h> 22 23 #include "rpmsg_internal.h" 24 #include "qcom_glink_native.h" 25 26 #define GLINK_NAME_SIZE 32 27 #define GLINK_VERSION_1 1 28 29 #define RPM_GLINK_CID_MIN 1 30 #define RPM_GLINK_CID_MAX 65536 31 32 struct glink_msg { 33 __le16 cmd; 34 __le16 param1; 35 __le32 param2; 36 u8 data[]; 37 } __packed; 38 39 /** 40 * struct glink_defer_cmd - deferred incoming control message 41 * @node: list node 42 * @msg: message header 43 * @data: payload of the message 44 * 45 * Copy of a received control message, to be added to @rx_queue and processed 46 * by @rx_work of @qcom_glink. 47 */ 48 struct glink_defer_cmd { 49 struct list_head node; 50 51 struct glink_msg msg; 52 u8 data[]; 53 }; 54 55 /** 56 * struct glink_core_rx_intent - RX intent 57 * RX intent 58 * 59 * @data: pointer to the data (may be NULL for zero-copy) 60 * @id: remote or local intent ID 61 * @size: size of the original intent (do not modify) 62 * @reuse: To mark if the intent can be reused after first use 63 * @in_use: To mark if intent is already in use for the channel 64 * @offset: next write offset (initially 0) 65 * @node: list node 66 */ 67 struct glink_core_rx_intent { 68 void *data; 69 u32 id; 70 size_t size; 71 bool reuse; 72 bool in_use; 73 u32 offset; 74 75 struct list_head node; 76 }; 77 78 /** 79 * struct qcom_glink - driver context, relates to one remote subsystem 80 * @dev: reference to the associated struct device 81 * @rx_pipe: pipe object for receive FIFO 82 * @tx_pipe: pipe object for transmit FIFO 83 * @rx_work: worker for handling received control messages 84 * @rx_lock: protects the @rx_queue 85 * @rx_queue: queue of received control messages to be processed in @rx_work 86 * @tx_lock: synchronizes operations on the tx fifo 87 * @idr_lock: synchronizes @lcids and @rcids modifications 88 * @lcids: idr of all channels with a known local channel id 89 * @rcids: idr of all channels with a known remote channel id 90 * @features: remote features 91 * @intentless: flag to indicate that there is no intent 92 * @tx_avail_notify: Waitqueue for pending tx tasks 93 * @sent_read_notify: flag to check cmd sent or not 94 * @abort_tx: flag indicating that all tx attempts should fail 95 */ 96 struct qcom_glink { 97 struct device *dev; 98 99 struct qcom_glink_pipe *rx_pipe; 100 struct qcom_glink_pipe *tx_pipe; 101 102 struct work_struct rx_work; 103 spinlock_t rx_lock; 104 struct list_head rx_queue; 105 106 spinlock_t tx_lock; 107 108 spinlock_t idr_lock; 109 struct idr lcids; 110 struct idr rcids; 111 unsigned long features; 112 113 bool intentless; 114 wait_queue_head_t tx_avail_notify; 115 bool sent_read_notify; 116 117 bool abort_tx; 118 }; 119 120 enum { 121 GLINK_STATE_CLOSED, 122 GLINK_STATE_OPENING, 123 GLINK_STATE_OPEN, 124 GLINK_STATE_CLOSING, 125 }; 126 127 /** 128 * struct glink_channel - internal representation of a channel 129 * @rpdev: rpdev reference, only used for primary endpoints 130 * @ept: rpmsg endpoint this channel is associated with 131 * @glink: qcom_glink context handle 132 * @refcount: refcount for the channel object 133 * @recv_lock: guard for @ept.cb 134 * @name: unique channel name/identifier 135 * @lcid: channel id, in local space 136 * @rcid: channel id, in remote space 137 * @intent_lock: lock for protection of @liids, @riids 138 * @liids: idr of all local intents 139 * @riids: idr of all remote intents 140 * @intent_work: worker responsible for transmitting rx_done packets 141 * @done_intents: list of intents that needs to be announced rx_done 142 * @buf: receive buffer, for gathering fragments 143 * @buf_offset: write offset in @buf 144 * @buf_size: size of current @buf 145 * @open_ack: completed once remote has acked the open-request 146 * @open_req: completed once open-request has been received 147 * @intent_req_lock: Synchronises multiple intent requests 148 * @intent_req_result: Result of intent request 149 * @intent_received: flag indicating that an intent has been received 150 * @intent_req_wq: wait queue for intent_req signalling 151 */ 152 struct glink_channel { 153 struct rpmsg_endpoint ept; 154 155 struct rpmsg_device *rpdev; 156 struct qcom_glink *glink; 157 158 struct kref refcount; 159 160 spinlock_t recv_lock; 161 162 char *name; 163 unsigned int lcid; 164 unsigned int rcid; 165 166 spinlock_t intent_lock; 167 struct idr liids; 168 struct idr riids; 169 struct work_struct intent_work; 170 struct list_head done_intents; 171 172 struct glink_core_rx_intent *buf; 173 int buf_offset; 174 int buf_size; 175 176 struct completion open_ack; 177 struct completion open_req; 178 179 struct mutex intent_req_lock; 180 int intent_req_result; 181 bool intent_received; 182 wait_queue_head_t intent_req_wq; 183 }; 184 185 #define to_glink_channel(_ept) container_of(_ept, struct glink_channel, ept) 186 187 static const struct rpmsg_endpoint_ops glink_endpoint_ops; 188 189 #define GLINK_CMD_VERSION 0 190 #define GLINK_CMD_VERSION_ACK 1 191 #define GLINK_CMD_OPEN 2 192 #define GLINK_CMD_CLOSE 3 193 #define GLINK_CMD_OPEN_ACK 4 194 #define GLINK_CMD_INTENT 5 195 #define GLINK_CMD_RX_DONE 6 196 #define GLINK_CMD_RX_INTENT_REQ 7 197 #define GLINK_CMD_RX_INTENT_REQ_ACK 8 198 #define GLINK_CMD_TX_DATA 9 199 #define GLINK_CMD_CLOSE_ACK 11 200 #define GLINK_CMD_TX_DATA_CONT 12 201 #define GLINK_CMD_READ_NOTIF 13 202 #define GLINK_CMD_RX_DONE_W_REUSE 14 203 #define GLINK_CMD_SIGNALS 15 204 205 #define GLINK_FEATURE_INTENTLESS BIT(1) 206 207 #define NATIVE_DTR_SIG NATIVE_DSR_SIG 208 #define NATIVE_DSR_SIG BIT(31) 209 #define NATIVE_RTS_SIG NATIVE_CTS_SIG 210 #define NATIVE_CTS_SIG BIT(30) 211 212 static void qcom_glink_rx_done_work(struct work_struct *work); 213 214 static struct glink_channel *qcom_glink_alloc_channel(struct qcom_glink *glink, 215 const char *name) 216 { 217 struct glink_channel *channel; 218 219 channel = kzalloc(sizeof(*channel), GFP_KERNEL); 220 if (!channel) 221 return ERR_PTR(-ENOMEM); 222 223 /* Setup glink internal glink_channel data */ 224 spin_lock_init(&channel->recv_lock); 225 spin_lock_init(&channel->intent_lock); 226 mutex_init(&channel->intent_req_lock); 227 228 channel->glink = glink; 229 channel->name = kstrdup(name, GFP_KERNEL); 230 if (!channel->name) { 231 kfree(channel); 232 return ERR_PTR(-ENOMEM); 233 } 234 235 init_completion(&channel->open_req); 236 init_completion(&channel->open_ack); 237 init_waitqueue_head(&channel->intent_req_wq); 238 239 INIT_LIST_HEAD(&channel->done_intents); 240 INIT_WORK(&channel->intent_work, qcom_glink_rx_done_work); 241 242 idr_init(&channel->liids); 243 idr_init(&channel->riids); 244 kref_init(&channel->refcount); 245 246 return channel; 247 } 248 249 static void qcom_glink_channel_release(struct kref *ref) 250 { 251 struct glink_channel *channel = container_of(ref, struct glink_channel, 252 refcount); 253 struct glink_core_rx_intent *intent; 254 struct glink_core_rx_intent *tmp; 255 unsigned long flags; 256 int iid; 257 258 /* cancel pending rx_done work */ 259 cancel_work_sync(&channel->intent_work); 260 261 spin_lock_irqsave(&channel->intent_lock, flags); 262 /* Free all non-reuse intents pending rx_done work */ 263 list_for_each_entry_safe(intent, tmp, &channel->done_intents, node) { 264 if (!intent->reuse) { 265 kfree(intent->data); 266 kfree(intent); 267 } 268 } 269 270 idr_for_each_entry(&channel->liids, tmp, iid) { 271 kfree(tmp->data); 272 kfree(tmp); 273 } 274 idr_destroy(&channel->liids); 275 276 idr_for_each_entry(&channel->riids, tmp, iid) 277 kfree(tmp); 278 idr_destroy(&channel->riids); 279 spin_unlock_irqrestore(&channel->intent_lock, flags); 280 281 kfree(channel->name); 282 kfree(channel); 283 } 284 285 static size_t qcom_glink_rx_avail(struct qcom_glink *glink) 286 { 287 return glink->rx_pipe->avail(glink->rx_pipe); 288 } 289 290 static void qcom_glink_rx_peek(struct qcom_glink *glink, 291 void *data, unsigned int offset, size_t count) 292 { 293 glink->rx_pipe->peek(glink->rx_pipe, data, offset, count); 294 } 295 296 static void qcom_glink_rx_advance(struct qcom_glink *glink, size_t count) 297 { 298 glink->rx_pipe->advance(glink->rx_pipe, count); 299 } 300 301 static size_t qcom_glink_tx_avail(struct qcom_glink *glink) 302 { 303 return glink->tx_pipe->avail(glink->tx_pipe); 304 } 305 306 static void qcom_glink_tx_write(struct qcom_glink *glink, 307 const void *hdr, size_t hlen, 308 const void *data, size_t dlen) 309 { 310 glink->tx_pipe->write(glink->tx_pipe, hdr, hlen, data, dlen); 311 } 312 313 static void qcom_glink_tx_kick(struct qcom_glink *glink) 314 { 315 glink->tx_pipe->kick(glink->tx_pipe); 316 } 317 318 static void qcom_glink_send_read_notify(struct qcom_glink *glink) 319 { 320 struct glink_msg msg; 321 322 msg.cmd = cpu_to_le16(GLINK_CMD_READ_NOTIF); 323 msg.param1 = 0; 324 msg.param2 = 0; 325 326 qcom_glink_tx_write(glink, &msg, sizeof(msg), NULL, 0); 327 328 qcom_glink_tx_kick(glink); 329 } 330 331 static int qcom_glink_tx(struct qcom_glink *glink, 332 const void *hdr, size_t hlen, 333 const void *data, size_t dlen, bool wait) 334 { 335 unsigned int tlen = hlen + dlen; 336 unsigned long flags; 337 int ret = 0; 338 339 /* Reject packets that are too big */ 340 if (tlen >= glink->tx_pipe->length) 341 return -EINVAL; 342 343 spin_lock_irqsave(&glink->tx_lock, flags); 344 345 if (glink->abort_tx) { 346 ret = -EIO; 347 goto out; 348 } 349 350 while (qcom_glink_tx_avail(glink) < tlen) { 351 if (!wait) { 352 ret = -EAGAIN; 353 goto out; 354 } 355 356 if (glink->abort_tx) { 357 ret = -EIO; 358 goto out; 359 } 360 361 if (!glink->sent_read_notify) { 362 glink->sent_read_notify = true; 363 qcom_glink_send_read_notify(glink); 364 } 365 366 /* Wait without holding the tx_lock */ 367 spin_unlock_irqrestore(&glink->tx_lock, flags); 368 369 wait_event_timeout(glink->tx_avail_notify, 370 qcom_glink_tx_avail(glink) >= tlen, 10 * HZ); 371 372 spin_lock_irqsave(&glink->tx_lock, flags); 373 374 if (qcom_glink_tx_avail(glink) >= tlen) 375 glink->sent_read_notify = false; 376 } 377 378 qcom_glink_tx_write(glink, hdr, hlen, data, dlen); 379 qcom_glink_tx_kick(glink); 380 381 out: 382 spin_unlock_irqrestore(&glink->tx_lock, flags); 383 384 return ret; 385 } 386 387 static int qcom_glink_send_version(struct qcom_glink *glink) 388 { 389 struct glink_msg msg; 390 391 msg.cmd = cpu_to_le16(GLINK_CMD_VERSION); 392 msg.param1 = cpu_to_le16(GLINK_VERSION_1); 393 msg.param2 = cpu_to_le32(glink->features); 394 395 return qcom_glink_tx(glink, &msg, sizeof(msg), NULL, 0, true); 396 } 397 398 static void qcom_glink_send_version_ack(struct qcom_glink *glink) 399 { 400 struct glink_msg msg; 401 402 msg.cmd = cpu_to_le16(GLINK_CMD_VERSION_ACK); 403 msg.param1 = cpu_to_le16(GLINK_VERSION_1); 404 msg.param2 = cpu_to_le32(glink->features); 405 406 qcom_glink_tx(glink, &msg, sizeof(msg), NULL, 0, true); 407 } 408 409 static void qcom_glink_send_open_ack(struct qcom_glink *glink, 410 struct glink_channel *channel) 411 { 412 struct glink_msg msg; 413 414 msg.cmd = cpu_to_le16(GLINK_CMD_OPEN_ACK); 415 msg.param1 = cpu_to_le16(channel->rcid); 416 msg.param2 = cpu_to_le32(0); 417 418 qcom_glink_tx(glink, &msg, sizeof(msg), NULL, 0, true); 419 } 420 421 static void qcom_glink_handle_intent_req_ack(struct qcom_glink *glink, 422 unsigned int cid, bool granted) 423 { 424 struct glink_channel *channel; 425 unsigned long flags; 426 427 spin_lock_irqsave(&glink->idr_lock, flags); 428 channel = idr_find(&glink->rcids, cid); 429 spin_unlock_irqrestore(&glink->idr_lock, flags); 430 if (!channel) { 431 dev_err(glink->dev, "unable to find channel\n"); 432 return; 433 } 434 435 WRITE_ONCE(channel->intent_req_result, granted); 436 wake_up_all(&channel->intent_req_wq); 437 } 438 439 static void qcom_glink_intent_req_abort(struct glink_channel *channel) 440 { 441 WRITE_ONCE(channel->intent_req_result, 0); 442 wake_up_all(&channel->intent_req_wq); 443 } 444 445 /** 446 * qcom_glink_send_open_req() - send a GLINK_CMD_OPEN request to the remote 447 * @glink: Ptr to the glink edge 448 * @channel: Ptr to the channel that the open req is sent 449 * 450 * Allocates a local channel id and sends a GLINK_CMD_OPEN message to the remote. 451 * Will return with refcount held, regardless of outcome. 452 * 453 * Return: 0 on success, negative errno otherwise. 454 */ 455 static int qcom_glink_send_open_req(struct qcom_glink *glink, 456 struct glink_channel *channel) 457 { 458 struct { 459 struct glink_msg msg; 460 u8 name[GLINK_NAME_SIZE]; 461 } __packed req; 462 int name_len = strlen(channel->name) + 1; 463 int req_len = ALIGN(sizeof(req.msg) + name_len, 8); 464 int ret; 465 unsigned long flags; 466 467 kref_get(&channel->refcount); 468 469 spin_lock_irqsave(&glink->idr_lock, flags); 470 ret = idr_alloc_cyclic(&glink->lcids, channel, 471 RPM_GLINK_CID_MIN, RPM_GLINK_CID_MAX, 472 GFP_ATOMIC); 473 spin_unlock_irqrestore(&glink->idr_lock, flags); 474 if (ret < 0) 475 return ret; 476 477 channel->lcid = ret; 478 479 req.msg.cmd = cpu_to_le16(GLINK_CMD_OPEN); 480 req.msg.param1 = cpu_to_le16(channel->lcid); 481 req.msg.param2 = cpu_to_le32(name_len); 482 strcpy(req.name, channel->name); 483 484 ret = qcom_glink_tx(glink, &req, req_len, NULL, 0, true); 485 if (ret) 486 goto remove_idr; 487 488 return 0; 489 490 remove_idr: 491 spin_lock_irqsave(&glink->idr_lock, flags); 492 idr_remove(&glink->lcids, channel->lcid); 493 channel->lcid = 0; 494 spin_unlock_irqrestore(&glink->idr_lock, flags); 495 496 return ret; 497 } 498 499 static void qcom_glink_send_close_req(struct qcom_glink *glink, 500 struct glink_channel *channel) 501 { 502 struct glink_msg req; 503 504 req.cmd = cpu_to_le16(GLINK_CMD_CLOSE); 505 req.param1 = cpu_to_le16(channel->lcid); 506 req.param2 = 0; 507 508 qcom_glink_tx(glink, &req, sizeof(req), NULL, 0, true); 509 } 510 511 static void qcom_glink_send_close_ack(struct qcom_glink *glink, 512 unsigned int rcid) 513 { 514 struct glink_msg req; 515 516 req.cmd = cpu_to_le16(GLINK_CMD_CLOSE_ACK); 517 req.param1 = cpu_to_le16(rcid); 518 req.param2 = 0; 519 520 qcom_glink_tx(glink, &req, sizeof(req), NULL, 0, true); 521 } 522 523 static void qcom_glink_rx_done_work(struct work_struct *work) 524 { 525 struct glink_channel *channel = container_of(work, struct glink_channel, 526 intent_work); 527 struct qcom_glink *glink = channel->glink; 528 struct glink_core_rx_intent *intent, *tmp; 529 struct { 530 u16 id; 531 u16 lcid; 532 u32 liid; 533 } __packed cmd; 534 535 unsigned int cid = channel->lcid; 536 unsigned int iid; 537 bool reuse; 538 unsigned long flags; 539 540 spin_lock_irqsave(&channel->intent_lock, flags); 541 list_for_each_entry_safe(intent, tmp, &channel->done_intents, node) { 542 list_del(&intent->node); 543 spin_unlock_irqrestore(&channel->intent_lock, flags); 544 iid = intent->id; 545 reuse = intent->reuse; 546 547 cmd.id = reuse ? GLINK_CMD_RX_DONE_W_REUSE : GLINK_CMD_RX_DONE; 548 cmd.lcid = cid; 549 cmd.liid = iid; 550 551 qcom_glink_tx(glink, &cmd, sizeof(cmd), NULL, 0, true); 552 if (!reuse) { 553 kfree(intent->data); 554 kfree(intent); 555 } 556 spin_lock_irqsave(&channel->intent_lock, flags); 557 } 558 spin_unlock_irqrestore(&channel->intent_lock, flags); 559 } 560 561 static void qcom_glink_rx_done(struct qcom_glink *glink, 562 struct glink_channel *channel, 563 struct glink_core_rx_intent *intent) 564 { 565 /* We don't send RX_DONE to intentless systems */ 566 if (glink->intentless) { 567 kfree(intent->data); 568 kfree(intent); 569 return; 570 } 571 572 /* Take it off the tree of receive intents */ 573 if (!intent->reuse) { 574 spin_lock(&channel->intent_lock); 575 idr_remove(&channel->liids, intent->id); 576 spin_unlock(&channel->intent_lock); 577 } 578 579 /* Schedule the sending of a rx_done indication */ 580 spin_lock(&channel->intent_lock); 581 list_add_tail(&intent->node, &channel->done_intents); 582 spin_unlock(&channel->intent_lock); 583 584 schedule_work(&channel->intent_work); 585 } 586 587 /** 588 * qcom_glink_receive_version() - receive version/features from remote system 589 * 590 * @glink: pointer to transport interface 591 * @version: remote version 592 * @features: remote features 593 * 594 * This function is called in response to a remote-initiated version/feature 595 * negotiation sequence. 596 */ 597 static void qcom_glink_receive_version(struct qcom_glink *glink, 598 u32 version, 599 u32 features) 600 { 601 switch (version) { 602 case 0: 603 break; 604 case GLINK_VERSION_1: 605 glink->features &= features; 606 fallthrough; 607 default: 608 qcom_glink_send_version_ack(glink); 609 break; 610 } 611 } 612 613 /** 614 * qcom_glink_receive_version_ack() - receive negotiation ack from remote system 615 * 616 * @glink: pointer to transport interface 617 * @version: remote version response 618 * @features: remote features response 619 * 620 * This function is called in response to a local-initiated version/feature 621 * negotiation sequence and is the counter-offer from the remote side based 622 * upon the initial version and feature set requested. 623 */ 624 static void qcom_glink_receive_version_ack(struct qcom_glink *glink, 625 u32 version, 626 u32 features) 627 { 628 switch (version) { 629 case 0: 630 /* Version negotiation failed */ 631 break; 632 case GLINK_VERSION_1: 633 if (features == glink->features) 634 break; 635 636 glink->features &= features; 637 fallthrough; 638 default: 639 qcom_glink_send_version(glink); 640 break; 641 } 642 } 643 644 /** 645 * qcom_glink_send_intent_req_ack() - convert an rx intent request ack cmd to 646 * wire format and transmit 647 * @glink: The transport to transmit on. 648 * @channel: The glink channel 649 * @granted: The request response to encode. 650 * 651 * Return: 0 on success or standard Linux error code. 652 */ 653 static int qcom_glink_send_intent_req_ack(struct qcom_glink *glink, 654 struct glink_channel *channel, 655 bool granted) 656 { 657 struct glink_msg msg; 658 659 msg.cmd = cpu_to_le16(GLINK_CMD_RX_INTENT_REQ_ACK); 660 msg.param1 = cpu_to_le16(channel->lcid); 661 msg.param2 = cpu_to_le32(granted); 662 663 qcom_glink_tx(glink, &msg, sizeof(msg), NULL, 0, true); 664 665 return 0; 666 } 667 668 /** 669 * qcom_glink_advertise_intent - convert an rx intent cmd to wire format and 670 * transmit 671 * @glink: The transport to transmit on. 672 * @channel: The local channel 673 * @intent: The intent to pass on to remote. 674 * 675 * Return: 0 on success or standard Linux error code. 676 */ 677 static int qcom_glink_advertise_intent(struct qcom_glink *glink, 678 struct glink_channel *channel, 679 struct glink_core_rx_intent *intent) 680 { 681 struct command { 682 __le16 id; 683 __le16 lcid; 684 __le32 count; 685 __le32 size; 686 __le32 liid; 687 } __packed; 688 struct command cmd; 689 690 cmd.id = cpu_to_le16(GLINK_CMD_INTENT); 691 cmd.lcid = cpu_to_le16(channel->lcid); 692 cmd.count = cpu_to_le32(1); 693 cmd.size = cpu_to_le32(intent->size); 694 cmd.liid = cpu_to_le32(intent->id); 695 696 qcom_glink_tx(glink, &cmd, sizeof(cmd), NULL, 0, true); 697 698 return 0; 699 } 700 701 static struct glink_core_rx_intent * 702 qcom_glink_alloc_intent(struct qcom_glink *glink, 703 struct glink_channel *channel, 704 size_t size, 705 bool reuseable) 706 { 707 struct glink_core_rx_intent *intent; 708 int ret; 709 unsigned long flags; 710 711 intent = kzalloc(sizeof(*intent), GFP_KERNEL); 712 if (!intent) 713 return NULL; 714 715 intent->data = kzalloc(size, GFP_KERNEL); 716 if (!intent->data) 717 goto free_intent; 718 719 spin_lock_irqsave(&channel->intent_lock, flags); 720 ret = idr_alloc_cyclic(&channel->liids, intent, 1, -1, GFP_ATOMIC); 721 if (ret < 0) { 722 spin_unlock_irqrestore(&channel->intent_lock, flags); 723 goto free_data; 724 } 725 spin_unlock_irqrestore(&channel->intent_lock, flags); 726 727 intent->id = ret; 728 intent->size = size; 729 intent->reuse = reuseable; 730 731 return intent; 732 733 free_data: 734 kfree(intent->data); 735 free_intent: 736 kfree(intent); 737 return NULL; 738 } 739 740 static void qcom_glink_handle_rx_done(struct qcom_glink *glink, 741 u32 cid, uint32_t iid, 742 bool reuse) 743 { 744 struct glink_core_rx_intent *intent; 745 struct glink_channel *channel; 746 unsigned long flags; 747 748 spin_lock_irqsave(&glink->idr_lock, flags); 749 channel = idr_find(&glink->rcids, cid); 750 spin_unlock_irqrestore(&glink->idr_lock, flags); 751 if (!channel) { 752 dev_err(glink->dev, "invalid channel id received\n"); 753 return; 754 } 755 756 spin_lock_irqsave(&channel->intent_lock, flags); 757 intent = idr_find(&channel->riids, iid); 758 759 if (!intent) { 760 spin_unlock_irqrestore(&channel->intent_lock, flags); 761 dev_err(glink->dev, "invalid intent id received\n"); 762 return; 763 } 764 765 intent->in_use = false; 766 767 if (!reuse) { 768 idr_remove(&channel->riids, intent->id); 769 kfree(intent); 770 } 771 spin_unlock_irqrestore(&channel->intent_lock, flags); 772 773 if (reuse) { 774 WRITE_ONCE(channel->intent_received, true); 775 wake_up_all(&channel->intent_req_wq); 776 } 777 } 778 779 /** 780 * qcom_glink_handle_intent_req() - Receive a request for rx_intent 781 * from remote side 782 * @glink: Pointer to the transport interface 783 * @cid: Remote channel ID 784 * @size: size of the intent 785 * 786 * The function searches for the local channel to which the request for 787 * rx_intent has arrived and allocates and notifies the remote back 788 */ 789 static void qcom_glink_handle_intent_req(struct qcom_glink *glink, 790 u32 cid, size_t size) 791 { 792 struct glink_core_rx_intent *intent; 793 struct glink_channel *channel; 794 unsigned long flags; 795 796 spin_lock_irqsave(&glink->idr_lock, flags); 797 channel = idr_find(&glink->rcids, cid); 798 spin_unlock_irqrestore(&glink->idr_lock, flags); 799 800 if (!channel) { 801 pr_err("%s channel not found for cid %d\n", __func__, cid); 802 return; 803 } 804 805 intent = qcom_glink_alloc_intent(glink, channel, size, false); 806 if (intent) 807 qcom_glink_advertise_intent(glink, channel, intent); 808 809 qcom_glink_send_intent_req_ack(glink, channel, !!intent); 810 } 811 812 static int qcom_glink_rx_defer(struct qcom_glink *glink, size_t extra) 813 { 814 struct glink_defer_cmd *dcmd; 815 816 extra = ALIGN(extra, 8); 817 818 if (qcom_glink_rx_avail(glink) < sizeof(struct glink_msg) + extra) { 819 dev_dbg(glink->dev, "Insufficient data in rx fifo"); 820 return -ENXIO; 821 } 822 823 dcmd = kzalloc(struct_size(dcmd, data, extra), GFP_ATOMIC); 824 if (!dcmd) 825 return -ENOMEM; 826 827 INIT_LIST_HEAD(&dcmd->node); 828 829 qcom_glink_rx_peek(glink, &dcmd->msg, 0, sizeof(dcmd->msg) + extra); 830 831 spin_lock(&glink->rx_lock); 832 list_add_tail(&dcmd->node, &glink->rx_queue); 833 spin_unlock(&glink->rx_lock); 834 835 schedule_work(&glink->rx_work); 836 qcom_glink_rx_advance(glink, sizeof(dcmd->msg) + extra); 837 838 return 0; 839 } 840 841 static int qcom_glink_rx_data(struct qcom_glink *glink, size_t avail) 842 { 843 struct glink_core_rx_intent *intent; 844 struct glink_channel *channel; 845 struct { 846 struct glink_msg msg; 847 __le32 chunk_size; 848 __le32 left_size; 849 } __packed hdr; 850 unsigned int chunk_size; 851 unsigned int left_size; 852 unsigned int rcid; 853 unsigned int liid; 854 int ret = 0; 855 unsigned long flags; 856 857 if (avail < sizeof(hdr)) { 858 dev_dbg(glink->dev, "Not enough data in fifo\n"); 859 return -EAGAIN; 860 } 861 862 qcom_glink_rx_peek(glink, &hdr, 0, sizeof(hdr)); 863 chunk_size = le32_to_cpu(hdr.chunk_size); 864 left_size = le32_to_cpu(hdr.left_size); 865 866 if (avail < sizeof(hdr) + chunk_size) { 867 dev_dbg(glink->dev, "Payload not yet in fifo\n"); 868 return -EAGAIN; 869 } 870 871 rcid = le16_to_cpu(hdr.msg.param1); 872 spin_lock_irqsave(&glink->idr_lock, flags); 873 channel = idr_find(&glink->rcids, rcid); 874 spin_unlock_irqrestore(&glink->idr_lock, flags); 875 if (!channel) { 876 dev_dbg(glink->dev, "Data on non-existing channel\n"); 877 878 /* Drop the message */ 879 goto advance_rx; 880 } 881 882 if (glink->intentless) { 883 /* Might have an ongoing, fragmented, message to append */ 884 if (!channel->buf) { 885 intent = kzalloc(sizeof(*intent), GFP_ATOMIC); 886 if (!intent) 887 return -ENOMEM; 888 889 intent->data = kmalloc(chunk_size + left_size, 890 GFP_ATOMIC); 891 if (!intent->data) { 892 kfree(intent); 893 return -ENOMEM; 894 } 895 896 intent->id = 0xbabababa; 897 intent->size = chunk_size + left_size; 898 intent->offset = 0; 899 900 channel->buf = intent; 901 } else { 902 intent = channel->buf; 903 } 904 } else { 905 liid = le32_to_cpu(hdr.msg.param2); 906 907 spin_lock_irqsave(&channel->intent_lock, flags); 908 intent = idr_find(&channel->liids, liid); 909 spin_unlock_irqrestore(&channel->intent_lock, flags); 910 911 if (!intent) { 912 dev_err(glink->dev, 913 "no intent found for channel %s intent %d", 914 channel->name, liid); 915 ret = -ENOENT; 916 goto advance_rx; 917 } 918 } 919 920 if (intent->size - intent->offset < chunk_size) { 921 dev_err(glink->dev, "Insufficient space in intent\n"); 922 923 /* The packet header lied, drop payload */ 924 goto advance_rx; 925 } 926 927 qcom_glink_rx_peek(glink, intent->data + intent->offset, 928 sizeof(hdr), chunk_size); 929 intent->offset += chunk_size; 930 931 /* Handle message when no fragments remain to be received */ 932 if (!left_size) { 933 spin_lock(&channel->recv_lock); 934 if (channel->ept.cb) { 935 channel->ept.cb(channel->ept.rpdev, 936 intent->data, 937 intent->offset, 938 channel->ept.priv, 939 RPMSG_ADDR_ANY); 940 } 941 spin_unlock(&channel->recv_lock); 942 943 intent->offset = 0; 944 channel->buf = NULL; 945 946 qcom_glink_rx_done(glink, channel, intent); 947 } 948 949 advance_rx: 950 qcom_glink_rx_advance(glink, ALIGN(sizeof(hdr) + chunk_size, 8)); 951 952 return ret; 953 } 954 955 static void qcom_glink_handle_intent(struct qcom_glink *glink, 956 unsigned int cid, 957 unsigned int count, 958 size_t avail) 959 { 960 struct glink_core_rx_intent *intent; 961 struct glink_channel *channel; 962 struct intent_pair { 963 __le32 size; 964 __le32 iid; 965 }; 966 967 struct { 968 struct glink_msg msg; 969 struct intent_pair intents[]; 970 } __packed * msg; 971 972 const size_t msglen = struct_size(msg, intents, count); 973 int ret; 974 int i; 975 unsigned long flags; 976 977 if (avail < msglen) { 978 dev_dbg(glink->dev, "Not enough data in fifo\n"); 979 return; 980 } 981 982 spin_lock_irqsave(&glink->idr_lock, flags); 983 channel = idr_find(&glink->rcids, cid); 984 spin_unlock_irqrestore(&glink->idr_lock, flags); 985 if (!channel) { 986 dev_err(glink->dev, "intents for non-existing channel\n"); 987 qcom_glink_rx_advance(glink, ALIGN(msglen, 8)); 988 return; 989 } 990 991 msg = kmalloc(msglen, GFP_ATOMIC); 992 if (!msg) 993 return; 994 995 qcom_glink_rx_peek(glink, msg, 0, msglen); 996 997 for (i = 0; i < count; ++i) { 998 intent = kzalloc(sizeof(*intent), GFP_ATOMIC); 999 if (!intent) 1000 break; 1001 1002 intent->id = le32_to_cpu(msg->intents[i].iid); 1003 intent->size = le32_to_cpu(msg->intents[i].size); 1004 1005 spin_lock_irqsave(&channel->intent_lock, flags); 1006 ret = idr_alloc(&channel->riids, intent, 1007 intent->id, intent->id + 1, GFP_ATOMIC); 1008 spin_unlock_irqrestore(&channel->intent_lock, flags); 1009 1010 if (ret < 0) 1011 dev_err(glink->dev, "failed to store remote intent\n"); 1012 } 1013 1014 WRITE_ONCE(channel->intent_received, true); 1015 wake_up_all(&channel->intent_req_wq); 1016 1017 kfree(msg); 1018 qcom_glink_rx_advance(glink, ALIGN(msglen, 8)); 1019 } 1020 1021 static int qcom_glink_rx_open_ack(struct qcom_glink *glink, unsigned int lcid) 1022 { 1023 struct glink_channel *channel; 1024 1025 spin_lock(&glink->idr_lock); 1026 channel = idr_find(&glink->lcids, lcid); 1027 spin_unlock(&glink->idr_lock); 1028 if (!channel) { 1029 dev_err(glink->dev, "Invalid open ack packet\n"); 1030 return -EINVAL; 1031 } 1032 1033 complete_all(&channel->open_ack); 1034 1035 return 0; 1036 } 1037 1038 /** 1039 * qcom_glink_set_flow_control() - convert a signal cmd to wire format and transmit 1040 * @ept: Rpmsg endpoint for channel. 1041 * @pause: Pause transmission 1042 * @dst: destination address of the endpoint 1043 * 1044 * Return: 0 on success or standard Linux error code. 1045 */ 1046 static int qcom_glink_set_flow_control(struct rpmsg_endpoint *ept, bool pause, u32 dst) 1047 { 1048 struct glink_channel *channel = to_glink_channel(ept); 1049 struct qcom_glink *glink = channel->glink; 1050 struct glink_msg msg; 1051 u32 sigs = 0; 1052 1053 if (pause) 1054 sigs |= NATIVE_DTR_SIG | NATIVE_RTS_SIG; 1055 1056 msg.cmd = cpu_to_le16(GLINK_CMD_SIGNALS); 1057 msg.param1 = cpu_to_le16(channel->lcid); 1058 msg.param2 = cpu_to_le32(sigs); 1059 1060 return qcom_glink_tx(glink, &msg, sizeof(msg), NULL, 0, true); 1061 } 1062 1063 static void qcom_glink_handle_signals(struct qcom_glink *glink, 1064 unsigned int rcid, unsigned int sigs) 1065 { 1066 struct glink_channel *channel; 1067 unsigned long flags; 1068 bool enable; 1069 1070 spin_lock_irqsave(&glink->idr_lock, flags); 1071 channel = idr_find(&glink->rcids, rcid); 1072 spin_unlock_irqrestore(&glink->idr_lock, flags); 1073 if (!channel) { 1074 dev_err(glink->dev, "signal for non-existing channel\n"); 1075 return; 1076 } 1077 1078 enable = sigs & NATIVE_DSR_SIG || sigs & NATIVE_CTS_SIG; 1079 1080 if (channel->ept.flow_cb) 1081 channel->ept.flow_cb(channel->ept.rpdev, channel->ept.priv, enable); 1082 } 1083 1084 void qcom_glink_native_rx(struct qcom_glink *glink) 1085 { 1086 struct glink_msg msg; 1087 unsigned int param1; 1088 unsigned int param2; 1089 unsigned int avail; 1090 unsigned int cmd; 1091 int ret = 0; 1092 1093 /* To wakeup any blocking writers */ 1094 wake_up_all(&glink->tx_avail_notify); 1095 1096 for (;;) { 1097 avail = qcom_glink_rx_avail(glink); 1098 if (avail < sizeof(msg)) 1099 break; 1100 1101 qcom_glink_rx_peek(glink, &msg, 0, sizeof(msg)); 1102 1103 cmd = le16_to_cpu(msg.cmd); 1104 param1 = le16_to_cpu(msg.param1); 1105 param2 = le32_to_cpu(msg.param2); 1106 1107 switch (cmd) { 1108 case GLINK_CMD_VERSION: 1109 case GLINK_CMD_VERSION_ACK: 1110 case GLINK_CMD_CLOSE: 1111 case GLINK_CMD_CLOSE_ACK: 1112 case GLINK_CMD_RX_INTENT_REQ: 1113 ret = qcom_glink_rx_defer(glink, 0); 1114 break; 1115 case GLINK_CMD_OPEN_ACK: 1116 ret = qcom_glink_rx_open_ack(glink, param1); 1117 qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8)); 1118 break; 1119 case GLINK_CMD_OPEN: 1120 /* upper 16 bits of param2 are the "prio" field */ 1121 ret = qcom_glink_rx_defer(glink, param2 & 0xffff); 1122 break; 1123 case GLINK_CMD_TX_DATA: 1124 case GLINK_CMD_TX_DATA_CONT: 1125 ret = qcom_glink_rx_data(glink, avail); 1126 break; 1127 case GLINK_CMD_READ_NOTIF: 1128 qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8)); 1129 qcom_glink_tx_kick(glink); 1130 break; 1131 case GLINK_CMD_INTENT: 1132 qcom_glink_handle_intent(glink, param1, param2, avail); 1133 break; 1134 case GLINK_CMD_RX_DONE: 1135 qcom_glink_handle_rx_done(glink, param1, param2, false); 1136 qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8)); 1137 break; 1138 case GLINK_CMD_RX_DONE_W_REUSE: 1139 qcom_glink_handle_rx_done(glink, param1, param2, true); 1140 qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8)); 1141 break; 1142 case GLINK_CMD_RX_INTENT_REQ_ACK: 1143 qcom_glink_handle_intent_req_ack(glink, param1, param2); 1144 qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8)); 1145 break; 1146 case GLINK_CMD_SIGNALS: 1147 qcom_glink_handle_signals(glink, param1, param2); 1148 qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8)); 1149 break; 1150 default: 1151 dev_err(glink->dev, "unhandled rx cmd: %d\n", cmd); 1152 ret = -EINVAL; 1153 break; 1154 } 1155 1156 if (ret) 1157 break; 1158 } 1159 } 1160 EXPORT_SYMBOL(qcom_glink_native_rx); 1161 1162 /* Locally initiated rpmsg_create_ept */ 1163 static struct glink_channel *qcom_glink_create_local(struct qcom_glink *glink, 1164 const char *name) 1165 { 1166 struct glink_channel *channel; 1167 int ret; 1168 unsigned long flags; 1169 1170 channel = qcom_glink_alloc_channel(glink, name); 1171 if (IS_ERR(channel)) 1172 return ERR_CAST(channel); 1173 1174 ret = qcom_glink_send_open_req(glink, channel); 1175 if (ret) 1176 goto release_channel; 1177 1178 ret = wait_for_completion_timeout(&channel->open_ack, 5 * HZ); 1179 if (!ret) 1180 goto err_timeout; 1181 1182 ret = wait_for_completion_timeout(&channel->open_req, 5 * HZ); 1183 if (!ret) 1184 goto err_timeout; 1185 1186 qcom_glink_send_open_ack(glink, channel); 1187 1188 return channel; 1189 1190 err_timeout: 1191 /* qcom_glink_send_open_req() did register the channel in lcids*/ 1192 spin_lock_irqsave(&glink->idr_lock, flags); 1193 idr_remove(&glink->lcids, channel->lcid); 1194 spin_unlock_irqrestore(&glink->idr_lock, flags); 1195 1196 release_channel: 1197 /* Release qcom_glink_send_open_req() reference */ 1198 kref_put(&channel->refcount, qcom_glink_channel_release); 1199 /* Release qcom_glink_alloc_channel() reference */ 1200 kref_put(&channel->refcount, qcom_glink_channel_release); 1201 1202 return ERR_PTR(-ETIMEDOUT); 1203 } 1204 1205 /* Remote initiated rpmsg_create_ept */ 1206 static int qcom_glink_create_remote(struct qcom_glink *glink, 1207 struct glink_channel *channel) 1208 { 1209 int ret; 1210 1211 qcom_glink_send_open_ack(glink, channel); 1212 1213 ret = qcom_glink_send_open_req(glink, channel); 1214 if (ret) 1215 goto close_link; 1216 1217 ret = wait_for_completion_timeout(&channel->open_ack, 5 * HZ); 1218 if (!ret) { 1219 ret = -ETIMEDOUT; 1220 goto close_link; 1221 } 1222 1223 return 0; 1224 1225 close_link: 1226 /* 1227 * Send a close request to "undo" our open-ack. The close-ack will 1228 * release qcom_glink_send_open_req() reference and the last reference 1229 * will be relesed after receiving remote_close or transport unregister 1230 * by calling qcom_glink_native_remove(). 1231 */ 1232 qcom_glink_send_close_req(glink, channel); 1233 1234 return ret; 1235 } 1236 1237 static struct rpmsg_endpoint *qcom_glink_create_ept(struct rpmsg_device *rpdev, 1238 rpmsg_rx_cb_t cb, 1239 void *priv, 1240 struct rpmsg_channel_info 1241 chinfo) 1242 { 1243 struct glink_channel *parent = to_glink_channel(rpdev->ept); 1244 struct glink_channel *channel; 1245 struct qcom_glink *glink = parent->glink; 1246 struct rpmsg_endpoint *ept; 1247 const char *name = chinfo.name; 1248 int cid; 1249 int ret; 1250 unsigned long flags; 1251 1252 spin_lock_irqsave(&glink->idr_lock, flags); 1253 idr_for_each_entry(&glink->rcids, channel, cid) { 1254 if (!strcmp(channel->name, name)) 1255 break; 1256 } 1257 spin_unlock_irqrestore(&glink->idr_lock, flags); 1258 1259 if (!channel) { 1260 channel = qcom_glink_create_local(glink, name); 1261 if (IS_ERR(channel)) 1262 return NULL; 1263 } else { 1264 ret = qcom_glink_create_remote(glink, channel); 1265 if (ret) 1266 return NULL; 1267 } 1268 1269 ept = &channel->ept; 1270 ept->rpdev = rpdev; 1271 ept->cb = cb; 1272 ept->priv = priv; 1273 ept->ops = &glink_endpoint_ops; 1274 1275 return ept; 1276 } 1277 1278 static int qcom_glink_announce_create(struct rpmsg_device *rpdev) 1279 { 1280 struct glink_channel *channel = to_glink_channel(rpdev->ept); 1281 struct device_node *np = rpdev->dev.of_node; 1282 struct qcom_glink *glink = channel->glink; 1283 struct glink_core_rx_intent *intent; 1284 const struct property *prop = NULL; 1285 __be32 defaults[] = { cpu_to_be32(SZ_1K), cpu_to_be32(5) }; 1286 int num_intents; 1287 int num_groups = 1; 1288 __be32 *val = defaults; 1289 int size; 1290 1291 if (glink->intentless || !completion_done(&channel->open_ack)) 1292 return 0; 1293 1294 prop = of_find_property(np, "qcom,intents", NULL); 1295 if (prop) { 1296 val = prop->value; 1297 num_groups = prop->length / sizeof(u32) / 2; 1298 } 1299 1300 /* Channel is now open, advertise base set of intents */ 1301 while (num_groups--) { 1302 size = be32_to_cpup(val++); 1303 num_intents = be32_to_cpup(val++); 1304 while (num_intents--) { 1305 intent = qcom_glink_alloc_intent(glink, channel, size, 1306 true); 1307 if (!intent) 1308 break; 1309 1310 qcom_glink_advertise_intent(glink, channel, intent); 1311 } 1312 } 1313 return 0; 1314 } 1315 1316 static void qcom_glink_destroy_ept(struct rpmsg_endpoint *ept) 1317 { 1318 struct glink_channel *channel = to_glink_channel(ept); 1319 struct qcom_glink *glink = channel->glink; 1320 unsigned long flags; 1321 1322 spin_lock_irqsave(&channel->recv_lock, flags); 1323 channel->ept.cb = NULL; 1324 spin_unlock_irqrestore(&channel->recv_lock, flags); 1325 1326 /* Decouple the potential rpdev from the channel */ 1327 channel->rpdev = NULL; 1328 1329 qcom_glink_send_close_req(glink, channel); 1330 } 1331 1332 static int qcom_glink_request_intent(struct qcom_glink *glink, 1333 struct glink_channel *channel, 1334 size_t size) 1335 { 1336 struct { 1337 u16 id; 1338 u16 cid; 1339 u32 size; 1340 } __packed cmd; 1341 1342 int ret; 1343 1344 mutex_lock(&channel->intent_req_lock); 1345 1346 WRITE_ONCE(channel->intent_req_result, -1); 1347 WRITE_ONCE(channel->intent_received, false); 1348 1349 cmd.id = GLINK_CMD_RX_INTENT_REQ; 1350 cmd.cid = channel->lcid; 1351 cmd.size = size; 1352 1353 ret = qcom_glink_tx(glink, &cmd, sizeof(cmd), NULL, 0, true); 1354 if (ret) 1355 goto unlock; 1356 1357 ret = wait_event_timeout(channel->intent_req_wq, 1358 READ_ONCE(channel->intent_req_result) == 0 || 1359 (READ_ONCE(channel->intent_req_result) > 0 && 1360 READ_ONCE(channel->intent_received)) || 1361 glink->abort_tx, 1362 10 * HZ); 1363 if (!ret) { 1364 dev_err(glink->dev, "intent request timed out\n"); 1365 ret = -ETIMEDOUT; 1366 } else if (glink->abort_tx) { 1367 ret = -ECANCELED; 1368 } else { 1369 ret = READ_ONCE(channel->intent_req_result) ? 0 : -EAGAIN; 1370 } 1371 1372 unlock: 1373 mutex_unlock(&channel->intent_req_lock); 1374 return ret; 1375 } 1376 1377 static int __qcom_glink_send(struct glink_channel *channel, 1378 void *data, int len, bool wait) 1379 { 1380 struct qcom_glink *glink = channel->glink; 1381 struct glink_core_rx_intent *intent = NULL; 1382 struct glink_core_rx_intent *tmp; 1383 int iid = 0; 1384 struct { 1385 struct glink_msg msg; 1386 __le32 chunk_size; 1387 __le32 left_size; 1388 } __packed req; 1389 int ret; 1390 unsigned long flags; 1391 int chunk_size = len; 1392 size_t offset = 0; 1393 1394 if (!glink->intentless) { 1395 while (!intent) { 1396 spin_lock_irqsave(&channel->intent_lock, flags); 1397 idr_for_each_entry(&channel->riids, tmp, iid) { 1398 if (tmp->size >= len && !tmp->in_use) { 1399 if (!intent) 1400 intent = tmp; 1401 else if (intent->size > tmp->size) 1402 intent = tmp; 1403 if (intent->size == len) 1404 break; 1405 } 1406 } 1407 if (intent) 1408 intent->in_use = true; 1409 spin_unlock_irqrestore(&channel->intent_lock, flags); 1410 1411 /* We found an available intent */ 1412 if (intent) 1413 break; 1414 1415 if (!wait) 1416 return -EBUSY; 1417 1418 ret = qcom_glink_request_intent(glink, channel, len); 1419 if (ret < 0) 1420 return ret; 1421 } 1422 1423 iid = intent->id; 1424 } 1425 1426 while (offset < len) { 1427 chunk_size = len - offset; 1428 if (chunk_size > SZ_8K && wait) 1429 chunk_size = SZ_8K; 1430 1431 req.msg.cmd = cpu_to_le16(offset == 0 ? GLINK_CMD_TX_DATA : GLINK_CMD_TX_DATA_CONT); 1432 req.msg.param1 = cpu_to_le16(channel->lcid); 1433 req.msg.param2 = cpu_to_le32(iid); 1434 req.chunk_size = cpu_to_le32(chunk_size); 1435 req.left_size = cpu_to_le32(len - offset - chunk_size); 1436 1437 ret = qcom_glink_tx(glink, &req, sizeof(req), data + offset, chunk_size, wait); 1438 if (ret) { 1439 /* Mark intent available if we failed */ 1440 if (intent) 1441 intent->in_use = false; 1442 return ret; 1443 } 1444 1445 offset += chunk_size; 1446 } 1447 1448 return 0; 1449 } 1450 1451 static int qcom_glink_send(struct rpmsg_endpoint *ept, void *data, int len) 1452 { 1453 struct glink_channel *channel = to_glink_channel(ept); 1454 1455 return __qcom_glink_send(channel, data, len, true); 1456 } 1457 1458 static int qcom_glink_trysend(struct rpmsg_endpoint *ept, void *data, int len) 1459 { 1460 struct glink_channel *channel = to_glink_channel(ept); 1461 1462 return __qcom_glink_send(channel, data, len, false); 1463 } 1464 1465 static int qcom_glink_sendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst) 1466 { 1467 struct glink_channel *channel = to_glink_channel(ept); 1468 1469 return __qcom_glink_send(channel, data, len, true); 1470 } 1471 1472 static int qcom_glink_trysendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst) 1473 { 1474 struct glink_channel *channel = to_glink_channel(ept); 1475 1476 return __qcom_glink_send(channel, data, len, false); 1477 } 1478 1479 /* 1480 * Finds the device_node for the glink child interested in this channel. 1481 */ 1482 static struct device_node *qcom_glink_match_channel(struct device_node *node, 1483 const char *channel) 1484 { 1485 struct device_node *child; 1486 const char *name; 1487 const char *key; 1488 int ret; 1489 1490 for_each_available_child_of_node(node, child) { 1491 key = "qcom,glink-channels"; 1492 ret = of_property_read_string(child, key, &name); 1493 if (ret) 1494 continue; 1495 1496 if (strcmp(name, channel) == 0) 1497 return child; 1498 } 1499 1500 return NULL; 1501 } 1502 1503 static const struct rpmsg_device_ops glink_device_ops = { 1504 .create_ept = qcom_glink_create_ept, 1505 .announce_create = qcom_glink_announce_create, 1506 }; 1507 1508 static const struct rpmsg_endpoint_ops glink_endpoint_ops = { 1509 .destroy_ept = qcom_glink_destroy_ept, 1510 .send = qcom_glink_send, 1511 .sendto = qcom_glink_sendto, 1512 .trysend = qcom_glink_trysend, 1513 .trysendto = qcom_glink_trysendto, 1514 .set_flow_control = qcom_glink_set_flow_control, 1515 }; 1516 1517 static void qcom_glink_rpdev_release(struct device *dev) 1518 { 1519 struct rpmsg_device *rpdev = to_rpmsg_device(dev); 1520 1521 kfree(rpdev->driver_override); 1522 kfree(rpdev); 1523 } 1524 1525 static int qcom_glink_rx_open(struct qcom_glink *glink, unsigned int rcid, 1526 char *name) 1527 { 1528 struct glink_channel *channel; 1529 struct rpmsg_device *rpdev; 1530 bool create_device = false; 1531 struct device_node *node; 1532 int lcid; 1533 int ret; 1534 unsigned long flags; 1535 1536 spin_lock_irqsave(&glink->idr_lock, flags); 1537 idr_for_each_entry(&glink->lcids, channel, lcid) { 1538 if (!strcmp(channel->name, name)) 1539 break; 1540 } 1541 spin_unlock_irqrestore(&glink->idr_lock, flags); 1542 1543 if (!channel) { 1544 channel = qcom_glink_alloc_channel(glink, name); 1545 if (IS_ERR(channel)) 1546 return PTR_ERR(channel); 1547 1548 /* The opening dance was initiated by the remote */ 1549 create_device = true; 1550 } 1551 1552 spin_lock_irqsave(&glink->idr_lock, flags); 1553 ret = idr_alloc(&glink->rcids, channel, rcid, rcid + 1, GFP_ATOMIC); 1554 if (ret < 0) { 1555 dev_err(glink->dev, "Unable to insert channel into rcid list\n"); 1556 spin_unlock_irqrestore(&glink->idr_lock, flags); 1557 goto free_channel; 1558 } 1559 channel->rcid = ret; 1560 spin_unlock_irqrestore(&glink->idr_lock, flags); 1561 1562 complete_all(&channel->open_req); 1563 1564 if (create_device) { 1565 rpdev = kzalloc(sizeof(*rpdev), GFP_KERNEL); 1566 if (!rpdev) { 1567 ret = -ENOMEM; 1568 goto rcid_remove; 1569 } 1570 1571 rpdev->ept = &channel->ept; 1572 strscpy_pad(rpdev->id.name, name, RPMSG_NAME_SIZE); 1573 rpdev->src = RPMSG_ADDR_ANY; 1574 rpdev->dst = RPMSG_ADDR_ANY; 1575 rpdev->ops = &glink_device_ops; 1576 1577 node = qcom_glink_match_channel(glink->dev->of_node, name); 1578 rpdev->dev.of_node = node; 1579 rpdev->dev.parent = glink->dev; 1580 rpdev->dev.release = qcom_glink_rpdev_release; 1581 1582 ret = rpmsg_register_device(rpdev); 1583 if (ret) 1584 goto rcid_remove; 1585 1586 channel->rpdev = rpdev; 1587 } 1588 1589 return 0; 1590 1591 rcid_remove: 1592 spin_lock_irqsave(&glink->idr_lock, flags); 1593 idr_remove(&glink->rcids, channel->rcid); 1594 channel->rcid = 0; 1595 spin_unlock_irqrestore(&glink->idr_lock, flags); 1596 free_channel: 1597 /* Release the reference, iff we took it */ 1598 if (create_device) 1599 kref_put(&channel->refcount, qcom_glink_channel_release); 1600 1601 return ret; 1602 } 1603 1604 static void qcom_glink_rx_close(struct qcom_glink *glink, unsigned int rcid) 1605 { 1606 struct rpmsg_channel_info chinfo; 1607 struct glink_channel *channel; 1608 unsigned long flags; 1609 1610 spin_lock_irqsave(&glink->idr_lock, flags); 1611 channel = idr_find(&glink->rcids, rcid); 1612 spin_unlock_irqrestore(&glink->idr_lock, flags); 1613 if (WARN(!channel, "close request on unknown channel\n")) 1614 return; 1615 1616 /* cancel pending rx_done work */ 1617 cancel_work_sync(&channel->intent_work); 1618 1619 if (channel->rpdev) { 1620 strscpy_pad(chinfo.name, channel->name, sizeof(chinfo.name)); 1621 chinfo.src = RPMSG_ADDR_ANY; 1622 chinfo.dst = RPMSG_ADDR_ANY; 1623 1624 rpmsg_unregister_device(glink->dev, &chinfo); 1625 } 1626 channel->rpdev = NULL; 1627 1628 qcom_glink_send_close_ack(glink, channel->rcid); 1629 1630 spin_lock_irqsave(&glink->idr_lock, flags); 1631 idr_remove(&glink->rcids, channel->rcid); 1632 channel->rcid = 0; 1633 spin_unlock_irqrestore(&glink->idr_lock, flags); 1634 1635 kref_put(&channel->refcount, qcom_glink_channel_release); 1636 } 1637 1638 static void qcom_glink_rx_close_ack(struct qcom_glink *glink, unsigned int lcid) 1639 { 1640 struct rpmsg_channel_info chinfo; 1641 struct glink_channel *channel; 1642 unsigned long flags; 1643 1644 /* To wakeup any blocking writers */ 1645 wake_up_all(&glink->tx_avail_notify); 1646 1647 spin_lock_irqsave(&glink->idr_lock, flags); 1648 channel = idr_find(&glink->lcids, lcid); 1649 if (WARN(!channel, "close ack on unknown channel\n")) { 1650 spin_unlock_irqrestore(&glink->idr_lock, flags); 1651 return; 1652 } 1653 1654 idr_remove(&glink->lcids, channel->lcid); 1655 channel->lcid = 0; 1656 spin_unlock_irqrestore(&glink->idr_lock, flags); 1657 1658 /* Decouple the potential rpdev from the channel */ 1659 if (channel->rpdev) { 1660 strscpy(chinfo.name, channel->name, sizeof(chinfo.name)); 1661 chinfo.src = RPMSG_ADDR_ANY; 1662 chinfo.dst = RPMSG_ADDR_ANY; 1663 1664 rpmsg_unregister_device(glink->dev, &chinfo); 1665 } 1666 channel->rpdev = NULL; 1667 1668 kref_put(&channel->refcount, qcom_glink_channel_release); 1669 } 1670 1671 static void qcom_glink_work(struct work_struct *work) 1672 { 1673 struct qcom_glink *glink = container_of(work, struct qcom_glink, 1674 rx_work); 1675 struct glink_defer_cmd *dcmd; 1676 struct glink_msg *msg; 1677 unsigned long flags; 1678 unsigned int param1; 1679 unsigned int param2; 1680 unsigned int cmd; 1681 1682 for (;;) { 1683 spin_lock_irqsave(&glink->rx_lock, flags); 1684 if (list_empty(&glink->rx_queue)) { 1685 spin_unlock_irqrestore(&glink->rx_lock, flags); 1686 break; 1687 } 1688 dcmd = list_first_entry(&glink->rx_queue, 1689 struct glink_defer_cmd, node); 1690 list_del(&dcmd->node); 1691 spin_unlock_irqrestore(&glink->rx_lock, flags); 1692 1693 msg = &dcmd->msg; 1694 cmd = le16_to_cpu(msg->cmd); 1695 param1 = le16_to_cpu(msg->param1); 1696 param2 = le32_to_cpu(msg->param2); 1697 1698 switch (cmd) { 1699 case GLINK_CMD_VERSION: 1700 qcom_glink_receive_version(glink, param1, param2); 1701 break; 1702 case GLINK_CMD_VERSION_ACK: 1703 qcom_glink_receive_version_ack(glink, param1, param2); 1704 break; 1705 case GLINK_CMD_OPEN: 1706 qcom_glink_rx_open(glink, param1, msg->data); 1707 break; 1708 case GLINK_CMD_CLOSE: 1709 qcom_glink_rx_close(glink, param1); 1710 break; 1711 case GLINK_CMD_CLOSE_ACK: 1712 qcom_glink_rx_close_ack(glink, param1); 1713 break; 1714 case GLINK_CMD_RX_INTENT_REQ: 1715 qcom_glink_handle_intent_req(glink, param1, param2); 1716 break; 1717 default: 1718 WARN(1, "Unknown defer object %d\n", cmd); 1719 break; 1720 } 1721 1722 kfree(dcmd); 1723 } 1724 } 1725 1726 static void qcom_glink_cancel_rx_work(struct qcom_glink *glink) 1727 { 1728 struct glink_defer_cmd *dcmd; 1729 struct glink_defer_cmd *tmp; 1730 1731 /* cancel any pending deferred rx_work */ 1732 cancel_work_sync(&glink->rx_work); 1733 1734 list_for_each_entry_safe(dcmd, tmp, &glink->rx_queue, node) 1735 kfree(dcmd); 1736 } 1737 1738 static ssize_t rpmsg_name_show(struct device *dev, 1739 struct device_attribute *attr, char *buf) 1740 { 1741 int ret = 0; 1742 const char *name; 1743 1744 ret = of_property_read_string(dev->of_node, "label", &name); 1745 if (ret < 0) 1746 name = dev->of_node->name; 1747 1748 return sysfs_emit(buf, "%s\n", name); 1749 } 1750 static DEVICE_ATTR_RO(rpmsg_name); 1751 1752 static struct attribute *qcom_glink_attrs[] = { 1753 &dev_attr_rpmsg_name.attr, 1754 NULL 1755 }; 1756 ATTRIBUTE_GROUPS(qcom_glink); 1757 1758 static void qcom_glink_device_release(struct device *dev) 1759 { 1760 struct rpmsg_device *rpdev = to_rpmsg_device(dev); 1761 struct glink_channel *channel = to_glink_channel(rpdev->ept); 1762 1763 /* Release qcom_glink_alloc_channel() reference */ 1764 kref_put(&channel->refcount, qcom_glink_channel_release); 1765 kfree(rpdev->driver_override); 1766 kfree(rpdev); 1767 } 1768 1769 static int qcom_glink_create_chrdev(struct qcom_glink *glink) 1770 { 1771 struct rpmsg_device *rpdev; 1772 struct glink_channel *channel; 1773 1774 rpdev = kzalloc(sizeof(*rpdev), GFP_KERNEL); 1775 if (!rpdev) 1776 return -ENOMEM; 1777 1778 channel = qcom_glink_alloc_channel(glink, "rpmsg_chrdev"); 1779 if (IS_ERR(channel)) { 1780 kfree(rpdev); 1781 return PTR_ERR(channel); 1782 } 1783 channel->rpdev = rpdev; 1784 1785 rpdev->ept = &channel->ept; 1786 rpdev->ops = &glink_device_ops; 1787 rpdev->dev.parent = glink->dev; 1788 rpdev->dev.release = qcom_glink_device_release; 1789 1790 return rpmsg_ctrldev_register_device(rpdev); 1791 } 1792 1793 struct qcom_glink *qcom_glink_native_probe(struct device *dev, 1794 unsigned long features, 1795 struct qcom_glink_pipe *rx, 1796 struct qcom_glink_pipe *tx, 1797 bool intentless) 1798 { 1799 int ret; 1800 struct qcom_glink *glink; 1801 1802 glink = devm_kzalloc(dev, sizeof(*glink), GFP_KERNEL); 1803 if (!glink) 1804 return ERR_PTR(-ENOMEM); 1805 1806 glink->dev = dev; 1807 glink->tx_pipe = tx; 1808 glink->rx_pipe = rx; 1809 1810 glink->features = features; 1811 glink->intentless = intentless; 1812 1813 spin_lock_init(&glink->tx_lock); 1814 spin_lock_init(&glink->rx_lock); 1815 INIT_LIST_HEAD(&glink->rx_queue); 1816 INIT_WORK(&glink->rx_work, qcom_glink_work); 1817 init_waitqueue_head(&glink->tx_avail_notify); 1818 1819 spin_lock_init(&glink->idr_lock); 1820 idr_init(&glink->lcids); 1821 idr_init(&glink->rcids); 1822 1823 glink->dev->groups = qcom_glink_groups; 1824 1825 ret = device_add_groups(dev, qcom_glink_groups); 1826 if (ret) 1827 dev_err(dev, "failed to add groups\n"); 1828 1829 ret = qcom_glink_send_version(glink); 1830 if (ret) 1831 return ERR_PTR(ret); 1832 1833 ret = qcom_glink_create_chrdev(glink); 1834 if (ret) 1835 dev_err(glink->dev, "failed to register chrdev\n"); 1836 1837 return glink; 1838 } 1839 EXPORT_SYMBOL_GPL(qcom_glink_native_probe); 1840 1841 static int qcom_glink_remove_device(struct device *dev, void *data) 1842 { 1843 device_unregister(dev); 1844 1845 return 0; 1846 } 1847 1848 void qcom_glink_native_remove(struct qcom_glink *glink) 1849 { 1850 struct glink_channel *channel; 1851 unsigned long flags; 1852 int cid; 1853 int ret; 1854 1855 qcom_glink_cancel_rx_work(glink); 1856 1857 /* Fail all attempts at sending messages */ 1858 spin_lock_irqsave(&glink->tx_lock, flags); 1859 glink->abort_tx = true; 1860 wake_up_all(&glink->tx_avail_notify); 1861 spin_unlock_irqrestore(&glink->tx_lock, flags); 1862 1863 /* Abort any senders waiting for intent requests */ 1864 spin_lock_irqsave(&glink->idr_lock, flags); 1865 idr_for_each_entry(&glink->lcids, channel, cid) 1866 qcom_glink_intent_req_abort(channel); 1867 spin_unlock_irqrestore(&glink->idr_lock, flags); 1868 1869 ret = device_for_each_child(glink->dev, NULL, qcom_glink_remove_device); 1870 if (ret) 1871 dev_warn(glink->dev, "Can't remove GLINK devices: %d\n", ret); 1872 1873 /* Release any defunct local channels, waiting for close-ack */ 1874 idr_for_each_entry(&glink->lcids, channel, cid) 1875 kref_put(&channel->refcount, qcom_glink_channel_release); 1876 1877 /* Release any defunct local channels, waiting for close-req */ 1878 idr_for_each_entry(&glink->rcids, channel, cid) 1879 kref_put(&channel->refcount, qcom_glink_channel_release); 1880 1881 idr_destroy(&glink->lcids); 1882 idr_destroy(&glink->rcids); 1883 } 1884 EXPORT_SYMBOL_GPL(qcom_glink_native_remove); 1885 1886 MODULE_DESCRIPTION("Qualcomm GLINK driver"); 1887 MODULE_LICENSE("GPL v2"); 1888