1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2016-2017, Linaro Ltd 4 */ 5 6 #include <linux/idr.h> 7 #include <linux/interrupt.h> 8 #include <linux/io.h> 9 #include <linux/list.h> 10 #include <linux/mfd/syscon.h> 11 #include <linux/module.h> 12 #include <linux/of.h> 13 #include <linux/of_address.h> 14 #include <linux/platform_device.h> 15 #include <linux/regmap.h> 16 #include <linux/rpmsg.h> 17 #include <linux/sizes.h> 18 #include <linux/slab.h> 19 #include <linux/wait.h> 20 #include <linux/workqueue.h> 21 #include <linux/mailbox_client.h> 22 23 #include "rpmsg_internal.h" 24 #include "qcom_glink_native.h" 25 26 #define GLINK_NAME_SIZE 32 27 #define GLINK_VERSION_1 1 28 29 #define RPM_GLINK_CID_MIN 1 30 #define RPM_GLINK_CID_MAX 65536 31 32 struct glink_msg { 33 __le16 cmd; 34 __le16 param1; 35 __le32 param2; 36 u8 data[]; 37 } __packed; 38 39 /** 40 * struct glink_defer_cmd - deferred incoming control message 41 * @node: list node 42 * @msg: message header 43 * @data: payload of the message 44 * 45 * Copy of a received control message, to be added to @rx_queue and processed 46 * by @rx_work of @qcom_glink. 47 */ 48 struct glink_defer_cmd { 49 struct list_head node; 50 51 struct glink_msg msg; 52 u8 data[]; 53 }; 54 55 /** 56 * struct glink_core_rx_intent - RX intent 57 * RX intent 58 * 59 * @data: pointer to the data (may be NULL for zero-copy) 60 * @id: remote or local intent ID 61 * @size: size of the original intent (do not modify) 62 * @reuse: To mark if the intent can be reused after first use 63 * @in_use: To mark if intent is already in use for the channel 64 * @offset: next write offset (initially 0) 65 * @node: list node 66 */ 67 struct glink_core_rx_intent { 68 void *data; 69 u32 id; 70 size_t size; 71 bool reuse; 72 bool in_use; 73 u32 offset; 74 75 struct list_head node; 76 }; 77 78 /** 79 * struct qcom_glink - driver context, relates to one remote subsystem 80 * @dev: reference to the associated struct device 81 * @rx_pipe: pipe object for receive FIFO 82 * @tx_pipe: pipe object for transmit FIFO 83 * @rx_work: worker for handling received control messages 84 * @rx_lock: protects the @rx_queue 85 * @rx_queue: queue of received control messages to be processed in @rx_work 86 * @tx_lock: synchronizes operations on the tx fifo 87 * @idr_lock: synchronizes @lcids and @rcids modifications 88 * @lcids: idr of all channels with a known local channel id 89 * @rcids: idr of all channels with a known remote channel id 90 * @features: remote features 91 * @intentless: flag to indicate that there is no intent 92 * @tx_avail_notify: Waitqueue for pending tx tasks 93 * @sent_read_notify: flag to check cmd sent or not 94 * @abort_tx: flag indicating that all tx attempts should fail 95 */ 96 struct qcom_glink { 97 struct device *dev; 98 99 struct qcom_glink_pipe *rx_pipe; 100 struct qcom_glink_pipe *tx_pipe; 101 102 struct work_struct rx_work; 103 spinlock_t rx_lock; 104 struct list_head rx_queue; 105 106 spinlock_t tx_lock; 107 108 spinlock_t idr_lock; 109 struct idr lcids; 110 struct idr rcids; 111 unsigned long features; 112 113 bool intentless; 114 wait_queue_head_t tx_avail_notify; 115 bool sent_read_notify; 116 117 bool abort_tx; 118 }; 119 120 enum { 121 GLINK_STATE_CLOSED, 122 GLINK_STATE_OPENING, 123 GLINK_STATE_OPEN, 124 GLINK_STATE_CLOSING, 125 }; 126 127 /** 128 * struct glink_channel - internal representation of a channel 129 * @rpdev: rpdev reference, only used for primary endpoints 130 * @ept: rpmsg endpoint this channel is associated with 131 * @glink: qcom_glink context handle 132 * @refcount: refcount for the channel object 133 * @recv_lock: guard for @ept.cb 134 * @name: unique channel name/identifier 135 * @lcid: channel id, in local space 136 * @rcid: channel id, in remote space 137 * @intent_lock: lock for protection of @liids, @riids 138 * @liids: idr of all local intents 139 * @riids: idr of all remote intents 140 * @intent_work: worker responsible for transmitting rx_done packets 141 * @done_intents: list of intents that needs to be announced rx_done 142 * @buf: receive buffer, for gathering fragments 143 * @buf_offset: write offset in @buf 144 * @buf_size: size of current @buf 145 * @open_ack: completed once remote has acked the open-request 146 * @open_req: completed once open-request has been received 147 * @intent_req_lock: Synchronises multiple intent requests 148 * @intent_req_result: Result of intent request 149 * @intent_received: flag indicating that an intent has been received 150 * @intent_req_wq: wait queue for intent_req signalling 151 */ 152 struct glink_channel { 153 struct rpmsg_endpoint ept; 154 155 struct rpmsg_device *rpdev; 156 struct qcom_glink *glink; 157 158 struct kref refcount; 159 160 spinlock_t recv_lock; 161 162 char *name; 163 unsigned int lcid; 164 unsigned int rcid; 165 166 spinlock_t intent_lock; 167 struct idr liids; 168 struct idr riids; 169 struct work_struct intent_work; 170 struct list_head done_intents; 171 172 struct glink_core_rx_intent *buf; 173 int buf_offset; 174 int buf_size; 175 176 struct completion open_ack; 177 struct completion open_req; 178 179 struct mutex intent_req_lock; 180 int intent_req_result; 181 bool intent_received; 182 wait_queue_head_t intent_req_wq; 183 }; 184 185 #define to_glink_channel(_ept) container_of(_ept, struct glink_channel, ept) 186 187 static const struct rpmsg_endpoint_ops glink_endpoint_ops; 188 189 #define GLINK_CMD_VERSION 0 190 #define GLINK_CMD_VERSION_ACK 1 191 #define GLINK_CMD_OPEN 2 192 #define GLINK_CMD_CLOSE 3 193 #define GLINK_CMD_OPEN_ACK 4 194 #define GLINK_CMD_INTENT 5 195 #define GLINK_CMD_RX_DONE 6 196 #define GLINK_CMD_RX_INTENT_REQ 7 197 #define GLINK_CMD_RX_INTENT_REQ_ACK 8 198 #define GLINK_CMD_TX_DATA 9 199 #define GLINK_CMD_CLOSE_ACK 11 200 #define GLINK_CMD_TX_DATA_CONT 12 201 #define GLINK_CMD_READ_NOTIF 13 202 #define GLINK_CMD_RX_DONE_W_REUSE 14 203 #define GLINK_CMD_SIGNALS 15 204 205 #define GLINK_FEATURE_INTENTLESS BIT(1) 206 207 #define NATIVE_DTR_SIG NATIVE_DSR_SIG 208 #define NATIVE_DSR_SIG BIT(31) 209 #define NATIVE_RTS_SIG NATIVE_CTS_SIG 210 #define NATIVE_CTS_SIG BIT(30) 211 212 static void qcom_glink_rx_done_work(struct work_struct *work); 213 214 static struct glink_channel *qcom_glink_alloc_channel(struct qcom_glink *glink, 215 const char *name) 216 { 217 struct glink_channel *channel; 218 219 channel = kzalloc(sizeof(*channel), GFP_KERNEL); 220 if (!channel) 221 return ERR_PTR(-ENOMEM); 222 223 /* Setup glink internal glink_channel data */ 224 spin_lock_init(&channel->recv_lock); 225 spin_lock_init(&channel->intent_lock); 226 mutex_init(&channel->intent_req_lock); 227 228 channel->glink = glink; 229 channel->name = kstrdup(name, GFP_KERNEL); 230 if (!channel->name) { 231 kfree(channel); 232 return ERR_PTR(-ENOMEM); 233 } 234 235 init_completion(&channel->open_req); 236 init_completion(&channel->open_ack); 237 init_waitqueue_head(&channel->intent_req_wq); 238 239 INIT_LIST_HEAD(&channel->done_intents); 240 INIT_WORK(&channel->intent_work, qcom_glink_rx_done_work); 241 242 idr_init(&channel->liids); 243 idr_init(&channel->riids); 244 kref_init(&channel->refcount); 245 246 return channel; 247 } 248 249 static void qcom_glink_channel_release(struct kref *ref) 250 { 251 struct glink_channel *channel = container_of(ref, struct glink_channel, 252 refcount); 253 struct glink_core_rx_intent *intent; 254 struct glink_core_rx_intent *tmp; 255 unsigned long flags; 256 int iid; 257 258 /* cancel pending rx_done work */ 259 cancel_work_sync(&channel->intent_work); 260 261 spin_lock_irqsave(&channel->intent_lock, flags); 262 /* Free all non-reuse intents pending rx_done work */ 263 list_for_each_entry_safe(intent, tmp, &channel->done_intents, node) { 264 if (!intent->reuse) { 265 kfree(intent->data); 266 kfree(intent); 267 } 268 } 269 270 idr_for_each_entry(&channel->liids, tmp, iid) { 271 kfree(tmp->data); 272 kfree(tmp); 273 } 274 idr_destroy(&channel->liids); 275 276 idr_for_each_entry(&channel->riids, tmp, iid) 277 kfree(tmp); 278 idr_destroy(&channel->riids); 279 spin_unlock_irqrestore(&channel->intent_lock, flags); 280 281 kfree(channel->name); 282 kfree(channel); 283 } 284 285 static size_t qcom_glink_rx_avail(struct qcom_glink *glink) 286 { 287 return glink->rx_pipe->avail(glink->rx_pipe); 288 } 289 290 static void qcom_glink_rx_peek(struct qcom_glink *glink, 291 void *data, unsigned int offset, size_t count) 292 { 293 glink->rx_pipe->peek(glink->rx_pipe, data, offset, count); 294 } 295 296 static void qcom_glink_rx_advance(struct qcom_glink *glink, size_t count) 297 { 298 glink->rx_pipe->advance(glink->rx_pipe, count); 299 } 300 301 static size_t qcom_glink_tx_avail(struct qcom_glink *glink) 302 { 303 return glink->tx_pipe->avail(glink->tx_pipe); 304 } 305 306 static void qcom_glink_tx_write(struct qcom_glink *glink, 307 const void *hdr, size_t hlen, 308 const void *data, size_t dlen) 309 { 310 glink->tx_pipe->write(glink->tx_pipe, hdr, hlen, data, dlen); 311 } 312 313 static void qcom_glink_tx_kick(struct qcom_glink *glink) 314 { 315 glink->tx_pipe->kick(glink->tx_pipe); 316 } 317 318 static void qcom_glink_send_read_notify(struct qcom_glink *glink) 319 { 320 struct glink_msg msg; 321 322 msg.cmd = cpu_to_le16(GLINK_CMD_READ_NOTIF); 323 msg.param1 = 0; 324 msg.param2 = 0; 325 326 qcom_glink_tx_write(glink, &msg, sizeof(msg), NULL, 0); 327 328 qcom_glink_tx_kick(glink); 329 } 330 331 static int qcom_glink_tx(struct qcom_glink *glink, 332 const void *hdr, size_t hlen, 333 const void *data, size_t dlen, bool wait) 334 { 335 unsigned int tlen = hlen + dlen; 336 unsigned long flags; 337 int ret = 0; 338 339 /* Reject packets that are too big */ 340 if (tlen >= glink->tx_pipe->length) 341 return -EINVAL; 342 343 spin_lock_irqsave(&glink->tx_lock, flags); 344 345 if (glink->abort_tx) { 346 ret = -EIO; 347 goto out; 348 } 349 350 while (qcom_glink_tx_avail(glink) < tlen) { 351 if (!wait) { 352 ret = -EAGAIN; 353 goto out; 354 } 355 356 if (glink->abort_tx) { 357 ret = -EIO; 358 goto out; 359 } 360 361 if (!glink->sent_read_notify) { 362 glink->sent_read_notify = true; 363 qcom_glink_send_read_notify(glink); 364 } 365 366 /* Wait without holding the tx_lock */ 367 spin_unlock_irqrestore(&glink->tx_lock, flags); 368 369 wait_event_timeout(glink->tx_avail_notify, 370 qcom_glink_tx_avail(glink) >= tlen, 10 * HZ); 371 372 spin_lock_irqsave(&glink->tx_lock, flags); 373 374 if (qcom_glink_tx_avail(glink) >= tlen) 375 glink->sent_read_notify = false; 376 } 377 378 qcom_glink_tx_write(glink, hdr, hlen, data, dlen); 379 qcom_glink_tx_kick(glink); 380 381 out: 382 spin_unlock_irqrestore(&glink->tx_lock, flags); 383 384 return ret; 385 } 386 387 static int qcom_glink_send_version(struct qcom_glink *glink) 388 { 389 struct glink_msg msg; 390 391 msg.cmd = cpu_to_le16(GLINK_CMD_VERSION); 392 msg.param1 = cpu_to_le16(GLINK_VERSION_1); 393 msg.param2 = cpu_to_le32(glink->features); 394 395 return qcom_glink_tx(glink, &msg, sizeof(msg), NULL, 0, true); 396 } 397 398 static void qcom_glink_send_version_ack(struct qcom_glink *glink) 399 { 400 struct glink_msg msg; 401 402 msg.cmd = cpu_to_le16(GLINK_CMD_VERSION_ACK); 403 msg.param1 = cpu_to_le16(GLINK_VERSION_1); 404 msg.param2 = cpu_to_le32(glink->features); 405 406 qcom_glink_tx(glink, &msg, sizeof(msg), NULL, 0, true); 407 } 408 409 static void qcom_glink_send_open_ack(struct qcom_glink *glink, 410 struct glink_channel *channel) 411 { 412 struct glink_msg msg; 413 414 msg.cmd = cpu_to_le16(GLINK_CMD_OPEN_ACK); 415 msg.param1 = cpu_to_le16(channel->rcid); 416 msg.param2 = cpu_to_le32(0); 417 418 qcom_glink_tx(glink, &msg, sizeof(msg), NULL, 0, true); 419 } 420 421 static void qcom_glink_handle_intent_req_ack(struct qcom_glink *glink, 422 unsigned int cid, bool granted) 423 { 424 struct glink_channel *channel; 425 unsigned long flags; 426 427 spin_lock_irqsave(&glink->idr_lock, flags); 428 channel = idr_find(&glink->rcids, cid); 429 spin_unlock_irqrestore(&glink->idr_lock, flags); 430 if (!channel) { 431 dev_err(glink->dev, "unable to find channel\n"); 432 return; 433 } 434 435 WRITE_ONCE(channel->intent_req_result, granted); 436 wake_up_all(&channel->intent_req_wq); 437 } 438 439 static void qcom_glink_intent_req_abort(struct glink_channel *channel) 440 { 441 WRITE_ONCE(channel->intent_req_result, 0); 442 wake_up_all(&channel->intent_req_wq); 443 } 444 445 /** 446 * qcom_glink_send_open_req() - send a GLINK_CMD_OPEN request to the remote 447 * @glink: Ptr to the glink edge 448 * @channel: Ptr to the channel that the open req is sent 449 * 450 * Allocates a local channel id and sends a GLINK_CMD_OPEN message to the remote. 451 * Will return with refcount held, regardless of outcome. 452 * 453 * Return: 0 on success, negative errno otherwise. 454 */ 455 static int qcom_glink_send_open_req(struct qcom_glink *glink, 456 struct glink_channel *channel) 457 { 458 struct { 459 struct glink_msg msg; 460 u8 name[GLINK_NAME_SIZE]; 461 } __packed req; 462 int name_len = strlen(channel->name) + 1; 463 int req_len = ALIGN(sizeof(req.msg) + name_len, 8); 464 int ret; 465 unsigned long flags; 466 467 kref_get(&channel->refcount); 468 469 spin_lock_irqsave(&glink->idr_lock, flags); 470 ret = idr_alloc_cyclic(&glink->lcids, channel, 471 RPM_GLINK_CID_MIN, RPM_GLINK_CID_MAX, 472 GFP_ATOMIC); 473 spin_unlock_irqrestore(&glink->idr_lock, flags); 474 if (ret < 0) 475 return ret; 476 477 channel->lcid = ret; 478 479 req.msg.cmd = cpu_to_le16(GLINK_CMD_OPEN); 480 req.msg.param1 = cpu_to_le16(channel->lcid); 481 req.msg.param2 = cpu_to_le32(name_len); 482 strcpy(req.name, channel->name); 483 484 ret = qcom_glink_tx(glink, &req, req_len, NULL, 0, true); 485 if (ret) 486 goto remove_idr; 487 488 return 0; 489 490 remove_idr: 491 spin_lock_irqsave(&glink->idr_lock, flags); 492 idr_remove(&glink->lcids, channel->lcid); 493 channel->lcid = 0; 494 spin_unlock_irqrestore(&glink->idr_lock, flags); 495 496 return ret; 497 } 498 499 static void qcom_glink_send_close_req(struct qcom_glink *glink, 500 struct glink_channel *channel) 501 { 502 struct glink_msg req; 503 504 req.cmd = cpu_to_le16(GLINK_CMD_CLOSE); 505 req.param1 = cpu_to_le16(channel->lcid); 506 req.param2 = 0; 507 508 qcom_glink_tx(glink, &req, sizeof(req), NULL, 0, true); 509 } 510 511 static void qcom_glink_send_close_ack(struct qcom_glink *glink, 512 unsigned int rcid) 513 { 514 struct glink_msg req; 515 516 req.cmd = cpu_to_le16(GLINK_CMD_CLOSE_ACK); 517 req.param1 = cpu_to_le16(rcid); 518 req.param2 = 0; 519 520 qcom_glink_tx(glink, &req, sizeof(req), NULL, 0, true); 521 } 522 523 static void qcom_glink_rx_done_work(struct work_struct *work) 524 { 525 struct glink_channel *channel = container_of(work, struct glink_channel, 526 intent_work); 527 struct qcom_glink *glink = channel->glink; 528 struct glink_core_rx_intent *intent, *tmp; 529 struct { 530 u16 id; 531 u16 lcid; 532 u32 liid; 533 } __packed cmd; 534 535 unsigned int cid = channel->lcid; 536 unsigned int iid; 537 bool reuse; 538 unsigned long flags; 539 540 spin_lock_irqsave(&channel->intent_lock, flags); 541 list_for_each_entry_safe(intent, tmp, &channel->done_intents, node) { 542 list_del(&intent->node); 543 spin_unlock_irqrestore(&channel->intent_lock, flags); 544 iid = intent->id; 545 reuse = intent->reuse; 546 547 cmd.id = reuse ? GLINK_CMD_RX_DONE_W_REUSE : GLINK_CMD_RX_DONE; 548 cmd.lcid = cid; 549 cmd.liid = iid; 550 551 qcom_glink_tx(glink, &cmd, sizeof(cmd), NULL, 0, true); 552 if (!reuse) { 553 kfree(intent->data); 554 kfree(intent); 555 } 556 spin_lock_irqsave(&channel->intent_lock, flags); 557 } 558 spin_unlock_irqrestore(&channel->intent_lock, flags); 559 } 560 561 static void qcom_glink_rx_done(struct qcom_glink *glink, 562 struct glink_channel *channel, 563 struct glink_core_rx_intent *intent) 564 { 565 /* We don't send RX_DONE to intentless systems */ 566 if (glink->intentless) { 567 kfree(intent->data); 568 kfree(intent); 569 return; 570 } 571 572 /* Take it off the tree of receive intents */ 573 if (!intent->reuse) { 574 spin_lock(&channel->intent_lock); 575 idr_remove(&channel->liids, intent->id); 576 spin_unlock(&channel->intent_lock); 577 } 578 579 /* Schedule the sending of a rx_done indication */ 580 spin_lock(&channel->intent_lock); 581 list_add_tail(&intent->node, &channel->done_intents); 582 spin_unlock(&channel->intent_lock); 583 584 schedule_work(&channel->intent_work); 585 } 586 587 /** 588 * qcom_glink_receive_version() - receive version/features from remote system 589 * 590 * @glink: pointer to transport interface 591 * @version: remote version 592 * @features: remote features 593 * 594 * This function is called in response to a remote-initiated version/feature 595 * negotiation sequence. 596 */ 597 static void qcom_glink_receive_version(struct qcom_glink *glink, 598 u32 version, 599 u32 features) 600 { 601 switch (version) { 602 case 0: 603 break; 604 case GLINK_VERSION_1: 605 glink->features &= features; 606 fallthrough; 607 default: 608 qcom_glink_send_version_ack(glink); 609 break; 610 } 611 } 612 613 /** 614 * qcom_glink_receive_version_ack() - receive negotiation ack from remote system 615 * 616 * @glink: pointer to transport interface 617 * @version: remote version response 618 * @features: remote features response 619 * 620 * This function is called in response to a local-initiated version/feature 621 * negotiation sequence and is the counter-offer from the remote side based 622 * upon the initial version and feature set requested. 623 */ 624 static void qcom_glink_receive_version_ack(struct qcom_glink *glink, 625 u32 version, 626 u32 features) 627 { 628 switch (version) { 629 case 0: 630 /* Version negotiation failed */ 631 break; 632 case GLINK_VERSION_1: 633 if (features == glink->features) 634 break; 635 636 glink->features &= features; 637 fallthrough; 638 default: 639 qcom_glink_send_version(glink); 640 break; 641 } 642 } 643 644 /** 645 * qcom_glink_send_intent_req_ack() - convert an rx intent request ack cmd to 646 * wire format and transmit 647 * @glink: The transport to transmit on. 648 * @channel: The glink channel 649 * @granted: The request response to encode. 650 * 651 * Return: 0 on success or standard Linux error code. 652 */ 653 static int qcom_glink_send_intent_req_ack(struct qcom_glink *glink, 654 struct glink_channel *channel, 655 bool granted) 656 { 657 struct glink_msg msg; 658 659 msg.cmd = cpu_to_le16(GLINK_CMD_RX_INTENT_REQ_ACK); 660 msg.param1 = cpu_to_le16(channel->lcid); 661 msg.param2 = cpu_to_le32(granted); 662 663 qcom_glink_tx(glink, &msg, sizeof(msg), NULL, 0, true); 664 665 return 0; 666 } 667 668 /** 669 * qcom_glink_advertise_intent - convert an rx intent cmd to wire format and 670 * transmit 671 * @glink: The transport to transmit on. 672 * @channel: The local channel 673 * @intent: The intent to pass on to remote. 674 * 675 * Return: 0 on success or standard Linux error code. 676 */ 677 static int qcom_glink_advertise_intent(struct qcom_glink *glink, 678 struct glink_channel *channel, 679 struct glink_core_rx_intent *intent) 680 { 681 struct command { 682 __le16 id; 683 __le16 lcid; 684 __le32 count; 685 __le32 size; 686 __le32 liid; 687 } __packed; 688 struct command cmd; 689 690 cmd.id = cpu_to_le16(GLINK_CMD_INTENT); 691 cmd.lcid = cpu_to_le16(channel->lcid); 692 cmd.count = cpu_to_le32(1); 693 cmd.size = cpu_to_le32(intent->size); 694 cmd.liid = cpu_to_le32(intent->id); 695 696 qcom_glink_tx(glink, &cmd, sizeof(cmd), NULL, 0, true); 697 698 return 0; 699 } 700 701 static struct glink_core_rx_intent * 702 qcom_glink_alloc_intent(struct qcom_glink *glink, 703 struct glink_channel *channel, 704 size_t size, 705 bool reuseable) 706 { 707 struct glink_core_rx_intent *intent; 708 int ret; 709 unsigned long flags; 710 711 intent = kzalloc(sizeof(*intent), GFP_KERNEL); 712 if (!intent) 713 return NULL; 714 715 intent->data = kzalloc(size, GFP_KERNEL); 716 if (!intent->data) 717 goto free_intent; 718 719 spin_lock_irqsave(&channel->intent_lock, flags); 720 ret = idr_alloc_cyclic(&channel->liids, intent, 1, -1, GFP_ATOMIC); 721 if (ret < 0) { 722 spin_unlock_irqrestore(&channel->intent_lock, flags); 723 goto free_data; 724 } 725 spin_unlock_irqrestore(&channel->intent_lock, flags); 726 727 intent->id = ret; 728 intent->size = size; 729 intent->reuse = reuseable; 730 731 return intent; 732 733 free_data: 734 kfree(intent->data); 735 free_intent: 736 kfree(intent); 737 return NULL; 738 } 739 740 static void qcom_glink_handle_rx_done(struct qcom_glink *glink, 741 u32 cid, uint32_t iid, 742 bool reuse) 743 { 744 struct glink_core_rx_intent *intent; 745 struct glink_channel *channel; 746 unsigned long flags; 747 748 spin_lock_irqsave(&glink->idr_lock, flags); 749 channel = idr_find(&glink->rcids, cid); 750 spin_unlock_irqrestore(&glink->idr_lock, flags); 751 if (!channel) { 752 dev_err(glink->dev, "invalid channel id received\n"); 753 return; 754 } 755 756 spin_lock_irqsave(&channel->intent_lock, flags); 757 intent = idr_find(&channel->riids, iid); 758 759 if (!intent) { 760 spin_unlock_irqrestore(&channel->intent_lock, flags); 761 dev_err(glink->dev, "invalid intent id received\n"); 762 return; 763 } 764 765 intent->in_use = false; 766 767 if (!reuse) { 768 idr_remove(&channel->riids, intent->id); 769 kfree(intent); 770 } 771 spin_unlock_irqrestore(&channel->intent_lock, flags); 772 773 if (reuse) { 774 WRITE_ONCE(channel->intent_received, true); 775 wake_up_all(&channel->intent_req_wq); 776 } 777 } 778 779 /** 780 * qcom_glink_handle_intent_req() - Receive a request for rx_intent 781 * from remote side 782 * @glink: Pointer to the transport interface 783 * @cid: Remote channel ID 784 * @size: size of the intent 785 * 786 * The function searches for the local channel to which the request for 787 * rx_intent has arrived and allocates and notifies the remote back 788 */ 789 static void qcom_glink_handle_intent_req(struct qcom_glink *glink, 790 u32 cid, size_t size) 791 { 792 struct glink_core_rx_intent *intent; 793 struct glink_channel *channel; 794 unsigned long flags; 795 796 spin_lock_irqsave(&glink->idr_lock, flags); 797 channel = idr_find(&glink->rcids, cid); 798 spin_unlock_irqrestore(&glink->idr_lock, flags); 799 800 if (!channel) { 801 pr_err("%s channel not found for cid %d\n", __func__, cid); 802 return; 803 } 804 805 intent = qcom_glink_alloc_intent(glink, channel, size, false); 806 if (intent) 807 qcom_glink_advertise_intent(glink, channel, intent); 808 809 qcom_glink_send_intent_req_ack(glink, channel, !!intent); 810 } 811 812 static int qcom_glink_rx_defer(struct qcom_glink *glink, size_t extra) 813 { 814 struct glink_defer_cmd *dcmd; 815 816 extra = ALIGN(extra, 8); 817 818 if (qcom_glink_rx_avail(glink) < sizeof(struct glink_msg) + extra) { 819 dev_dbg(glink->dev, "Insufficient data in rx fifo"); 820 return -ENXIO; 821 } 822 823 dcmd = kzalloc(struct_size(dcmd, data, extra), GFP_ATOMIC); 824 if (!dcmd) 825 return -ENOMEM; 826 827 INIT_LIST_HEAD(&dcmd->node); 828 829 qcom_glink_rx_peek(glink, &dcmd->msg, 0, sizeof(dcmd->msg) + extra); 830 831 spin_lock(&glink->rx_lock); 832 list_add_tail(&dcmd->node, &glink->rx_queue); 833 spin_unlock(&glink->rx_lock); 834 835 schedule_work(&glink->rx_work); 836 qcom_glink_rx_advance(glink, sizeof(dcmd->msg) + extra); 837 838 return 0; 839 } 840 841 static int qcom_glink_rx_data(struct qcom_glink *glink, size_t avail) 842 { 843 struct glink_core_rx_intent *intent; 844 struct glink_channel *channel; 845 struct { 846 struct glink_msg msg; 847 __le32 chunk_size; 848 __le32 left_size; 849 } __packed hdr; 850 unsigned int chunk_size; 851 unsigned int left_size; 852 unsigned int rcid; 853 unsigned int liid; 854 int ret = 0; 855 unsigned long flags; 856 857 if (avail < sizeof(hdr)) { 858 dev_dbg(glink->dev, "Not enough data in fifo\n"); 859 return -EAGAIN; 860 } 861 862 qcom_glink_rx_peek(glink, &hdr, 0, sizeof(hdr)); 863 chunk_size = le32_to_cpu(hdr.chunk_size); 864 left_size = le32_to_cpu(hdr.left_size); 865 866 if (avail < sizeof(hdr) + chunk_size) { 867 dev_dbg(glink->dev, "Payload not yet in fifo\n"); 868 return -EAGAIN; 869 } 870 871 rcid = le16_to_cpu(hdr.msg.param1); 872 spin_lock_irqsave(&glink->idr_lock, flags); 873 channel = idr_find(&glink->rcids, rcid); 874 spin_unlock_irqrestore(&glink->idr_lock, flags); 875 if (!channel) { 876 dev_dbg(glink->dev, "Data on non-existing channel\n"); 877 878 /* Drop the message */ 879 goto advance_rx; 880 } 881 882 if (glink->intentless) { 883 /* Might have an ongoing, fragmented, message to append */ 884 if (!channel->buf) { 885 intent = kzalloc(sizeof(*intent), GFP_ATOMIC); 886 if (!intent) 887 return -ENOMEM; 888 889 intent->data = kmalloc(chunk_size + left_size, 890 GFP_ATOMIC); 891 if (!intent->data) { 892 kfree(intent); 893 return -ENOMEM; 894 } 895 896 intent->id = 0xbabababa; 897 intent->size = chunk_size + left_size; 898 intent->offset = 0; 899 900 channel->buf = intent; 901 } else { 902 intent = channel->buf; 903 } 904 } else { 905 liid = le32_to_cpu(hdr.msg.param2); 906 907 spin_lock_irqsave(&channel->intent_lock, flags); 908 intent = idr_find(&channel->liids, liid); 909 spin_unlock_irqrestore(&channel->intent_lock, flags); 910 911 if (!intent) { 912 dev_err(glink->dev, 913 "no intent found for channel %s intent %d", 914 channel->name, liid); 915 ret = -ENOENT; 916 goto advance_rx; 917 } 918 } 919 920 if (intent->size - intent->offset < chunk_size) { 921 dev_err(glink->dev, "Insufficient space in intent\n"); 922 923 /* The packet header lied, drop payload */ 924 goto advance_rx; 925 } 926 927 qcom_glink_rx_peek(glink, intent->data + intent->offset, 928 sizeof(hdr), chunk_size); 929 intent->offset += chunk_size; 930 931 /* Handle message when no fragments remain to be received */ 932 if (!left_size) { 933 spin_lock(&channel->recv_lock); 934 if (channel->ept.cb) { 935 channel->ept.cb(channel->ept.rpdev, 936 intent->data, 937 intent->offset, 938 channel->ept.priv, 939 RPMSG_ADDR_ANY); 940 } 941 spin_unlock(&channel->recv_lock); 942 943 intent->offset = 0; 944 channel->buf = NULL; 945 946 qcom_glink_rx_done(glink, channel, intent); 947 } 948 949 advance_rx: 950 qcom_glink_rx_advance(glink, ALIGN(sizeof(hdr) + chunk_size, 8)); 951 952 return ret; 953 } 954 955 static void qcom_glink_handle_intent(struct qcom_glink *glink, 956 unsigned int cid, 957 unsigned int count, 958 size_t avail) 959 { 960 struct glink_core_rx_intent *intent; 961 struct glink_channel *channel; 962 struct intent_pair { 963 __le32 size; 964 __le32 iid; 965 }; 966 967 struct { 968 struct glink_msg msg; 969 struct intent_pair intents[]; 970 } __packed * msg; 971 972 const size_t msglen = struct_size(msg, intents, count); 973 int ret; 974 int i; 975 unsigned long flags; 976 977 if (avail < msglen) { 978 dev_dbg(glink->dev, "Not enough data in fifo\n"); 979 return; 980 } 981 982 spin_lock_irqsave(&glink->idr_lock, flags); 983 channel = idr_find(&glink->rcids, cid); 984 spin_unlock_irqrestore(&glink->idr_lock, flags); 985 if (!channel) { 986 dev_err(glink->dev, "intents for non-existing channel\n"); 987 qcom_glink_rx_advance(glink, ALIGN(msglen, 8)); 988 return; 989 } 990 991 msg = kmalloc(msglen, GFP_ATOMIC); 992 if (!msg) 993 return; 994 995 qcom_glink_rx_peek(glink, msg, 0, msglen); 996 997 for (i = 0; i < count; ++i) { 998 intent = kzalloc(sizeof(*intent), GFP_ATOMIC); 999 if (!intent) 1000 break; 1001 1002 intent->id = le32_to_cpu(msg->intents[i].iid); 1003 intent->size = le32_to_cpu(msg->intents[i].size); 1004 1005 spin_lock_irqsave(&channel->intent_lock, flags); 1006 ret = idr_alloc(&channel->riids, intent, 1007 intent->id, intent->id + 1, GFP_ATOMIC); 1008 spin_unlock_irqrestore(&channel->intent_lock, flags); 1009 1010 if (ret < 0) 1011 dev_err(glink->dev, "failed to store remote intent\n"); 1012 } 1013 1014 WRITE_ONCE(channel->intent_received, true); 1015 wake_up_all(&channel->intent_req_wq); 1016 1017 kfree(msg); 1018 qcom_glink_rx_advance(glink, ALIGN(msglen, 8)); 1019 } 1020 1021 static int qcom_glink_rx_open_ack(struct qcom_glink *glink, unsigned int lcid) 1022 { 1023 struct glink_channel *channel; 1024 1025 spin_lock(&glink->idr_lock); 1026 channel = idr_find(&glink->lcids, lcid); 1027 spin_unlock(&glink->idr_lock); 1028 if (!channel) { 1029 dev_err(glink->dev, "Invalid open ack packet\n"); 1030 return -EINVAL; 1031 } 1032 1033 complete_all(&channel->open_ack); 1034 1035 return 0; 1036 } 1037 1038 /** 1039 * qcom_glink_set_flow_control() - convert a signal cmd to wire format and transmit 1040 * @ept: Rpmsg endpoint for channel. 1041 * @pause: Pause transmission 1042 * @dst: destination address of the endpoint 1043 * 1044 * Return: 0 on success or standard Linux error code. 1045 */ 1046 static int qcom_glink_set_flow_control(struct rpmsg_endpoint *ept, bool pause, u32 dst) 1047 { 1048 struct glink_channel *channel = to_glink_channel(ept); 1049 struct qcom_glink *glink = channel->glink; 1050 struct glink_msg msg; 1051 u32 sigs = 0; 1052 1053 if (pause) 1054 sigs |= NATIVE_DTR_SIG | NATIVE_RTS_SIG; 1055 1056 msg.cmd = cpu_to_le16(GLINK_CMD_SIGNALS); 1057 msg.param1 = cpu_to_le16(channel->lcid); 1058 msg.param2 = cpu_to_le32(sigs); 1059 1060 return qcom_glink_tx(glink, &msg, sizeof(msg), NULL, 0, true); 1061 } 1062 1063 static void qcom_glink_handle_signals(struct qcom_glink *glink, 1064 unsigned int rcid, unsigned int sigs) 1065 { 1066 struct glink_channel *channel; 1067 unsigned long flags; 1068 bool enable; 1069 1070 spin_lock_irqsave(&glink->idr_lock, flags); 1071 channel = idr_find(&glink->rcids, rcid); 1072 spin_unlock_irqrestore(&glink->idr_lock, flags); 1073 if (!channel) { 1074 dev_err(glink->dev, "signal for non-existing channel\n"); 1075 return; 1076 } 1077 1078 enable = sigs & NATIVE_DSR_SIG || sigs & NATIVE_CTS_SIG; 1079 1080 if (channel->ept.flow_cb) 1081 channel->ept.flow_cb(channel->ept.rpdev, channel->ept.priv, enable); 1082 } 1083 1084 void qcom_glink_native_rx(struct qcom_glink *glink) 1085 { 1086 struct glink_msg msg; 1087 unsigned int param1; 1088 unsigned int param2; 1089 unsigned int avail; 1090 unsigned int cmd; 1091 int ret = 0; 1092 1093 /* To wakeup any blocking writers */ 1094 wake_up_all(&glink->tx_avail_notify); 1095 1096 for (;;) { 1097 avail = qcom_glink_rx_avail(glink); 1098 if (avail < sizeof(msg)) 1099 break; 1100 1101 qcom_glink_rx_peek(glink, &msg, 0, sizeof(msg)); 1102 1103 cmd = le16_to_cpu(msg.cmd); 1104 param1 = le16_to_cpu(msg.param1); 1105 param2 = le32_to_cpu(msg.param2); 1106 1107 switch (cmd) { 1108 case GLINK_CMD_VERSION: 1109 case GLINK_CMD_VERSION_ACK: 1110 case GLINK_CMD_CLOSE: 1111 case GLINK_CMD_CLOSE_ACK: 1112 case GLINK_CMD_RX_INTENT_REQ: 1113 ret = qcom_glink_rx_defer(glink, 0); 1114 break; 1115 case GLINK_CMD_OPEN_ACK: 1116 ret = qcom_glink_rx_open_ack(glink, param1); 1117 qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8)); 1118 break; 1119 case GLINK_CMD_OPEN: 1120 ret = qcom_glink_rx_defer(glink, param2); 1121 break; 1122 case GLINK_CMD_TX_DATA: 1123 case GLINK_CMD_TX_DATA_CONT: 1124 ret = qcom_glink_rx_data(glink, avail); 1125 break; 1126 case GLINK_CMD_READ_NOTIF: 1127 qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8)); 1128 qcom_glink_tx_kick(glink); 1129 break; 1130 case GLINK_CMD_INTENT: 1131 qcom_glink_handle_intent(glink, param1, param2, avail); 1132 break; 1133 case GLINK_CMD_RX_DONE: 1134 qcom_glink_handle_rx_done(glink, param1, param2, false); 1135 qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8)); 1136 break; 1137 case GLINK_CMD_RX_DONE_W_REUSE: 1138 qcom_glink_handle_rx_done(glink, param1, param2, true); 1139 qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8)); 1140 break; 1141 case GLINK_CMD_RX_INTENT_REQ_ACK: 1142 qcom_glink_handle_intent_req_ack(glink, param1, param2); 1143 qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8)); 1144 break; 1145 case GLINK_CMD_SIGNALS: 1146 qcom_glink_handle_signals(glink, param1, param2); 1147 qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8)); 1148 break; 1149 default: 1150 dev_err(glink->dev, "unhandled rx cmd: %d\n", cmd); 1151 ret = -EINVAL; 1152 break; 1153 } 1154 1155 if (ret) 1156 break; 1157 } 1158 } 1159 EXPORT_SYMBOL(qcom_glink_native_rx); 1160 1161 /* Locally initiated rpmsg_create_ept */ 1162 static struct glink_channel *qcom_glink_create_local(struct qcom_glink *glink, 1163 const char *name) 1164 { 1165 struct glink_channel *channel; 1166 int ret; 1167 unsigned long flags; 1168 1169 channel = qcom_glink_alloc_channel(glink, name); 1170 if (IS_ERR(channel)) 1171 return ERR_CAST(channel); 1172 1173 ret = qcom_glink_send_open_req(glink, channel); 1174 if (ret) 1175 goto release_channel; 1176 1177 ret = wait_for_completion_timeout(&channel->open_ack, 5 * HZ); 1178 if (!ret) 1179 goto err_timeout; 1180 1181 ret = wait_for_completion_timeout(&channel->open_req, 5 * HZ); 1182 if (!ret) 1183 goto err_timeout; 1184 1185 qcom_glink_send_open_ack(glink, channel); 1186 1187 return channel; 1188 1189 err_timeout: 1190 /* qcom_glink_send_open_req() did register the channel in lcids*/ 1191 spin_lock_irqsave(&glink->idr_lock, flags); 1192 idr_remove(&glink->lcids, channel->lcid); 1193 spin_unlock_irqrestore(&glink->idr_lock, flags); 1194 1195 release_channel: 1196 /* Release qcom_glink_send_open_req() reference */ 1197 kref_put(&channel->refcount, qcom_glink_channel_release); 1198 /* Release qcom_glink_alloc_channel() reference */ 1199 kref_put(&channel->refcount, qcom_glink_channel_release); 1200 1201 return ERR_PTR(-ETIMEDOUT); 1202 } 1203 1204 /* Remote initiated rpmsg_create_ept */ 1205 static int qcom_glink_create_remote(struct qcom_glink *glink, 1206 struct glink_channel *channel) 1207 { 1208 int ret; 1209 1210 qcom_glink_send_open_ack(glink, channel); 1211 1212 ret = qcom_glink_send_open_req(glink, channel); 1213 if (ret) 1214 goto close_link; 1215 1216 ret = wait_for_completion_timeout(&channel->open_ack, 5 * HZ); 1217 if (!ret) { 1218 ret = -ETIMEDOUT; 1219 goto close_link; 1220 } 1221 1222 return 0; 1223 1224 close_link: 1225 /* 1226 * Send a close request to "undo" our open-ack. The close-ack will 1227 * release qcom_glink_send_open_req() reference and the last reference 1228 * will be relesed after receiving remote_close or transport unregister 1229 * by calling qcom_glink_native_remove(). 1230 */ 1231 qcom_glink_send_close_req(glink, channel); 1232 1233 return ret; 1234 } 1235 1236 static struct rpmsg_endpoint *qcom_glink_create_ept(struct rpmsg_device *rpdev, 1237 rpmsg_rx_cb_t cb, 1238 void *priv, 1239 struct rpmsg_channel_info 1240 chinfo) 1241 { 1242 struct glink_channel *parent = to_glink_channel(rpdev->ept); 1243 struct glink_channel *channel; 1244 struct qcom_glink *glink = parent->glink; 1245 struct rpmsg_endpoint *ept; 1246 const char *name = chinfo.name; 1247 int cid; 1248 int ret; 1249 unsigned long flags; 1250 1251 spin_lock_irqsave(&glink->idr_lock, flags); 1252 idr_for_each_entry(&glink->rcids, channel, cid) { 1253 if (!strcmp(channel->name, name)) 1254 break; 1255 } 1256 spin_unlock_irqrestore(&glink->idr_lock, flags); 1257 1258 if (!channel) { 1259 channel = qcom_glink_create_local(glink, name); 1260 if (IS_ERR(channel)) 1261 return NULL; 1262 } else { 1263 ret = qcom_glink_create_remote(glink, channel); 1264 if (ret) 1265 return NULL; 1266 } 1267 1268 ept = &channel->ept; 1269 ept->rpdev = rpdev; 1270 ept->cb = cb; 1271 ept->priv = priv; 1272 ept->ops = &glink_endpoint_ops; 1273 1274 return ept; 1275 } 1276 1277 static int qcom_glink_announce_create(struct rpmsg_device *rpdev) 1278 { 1279 struct glink_channel *channel = to_glink_channel(rpdev->ept); 1280 struct device_node *np = rpdev->dev.of_node; 1281 struct qcom_glink *glink = channel->glink; 1282 struct glink_core_rx_intent *intent; 1283 const struct property *prop = NULL; 1284 __be32 defaults[] = { cpu_to_be32(SZ_1K), cpu_to_be32(5) }; 1285 int num_intents; 1286 int num_groups = 1; 1287 __be32 *val = defaults; 1288 int size; 1289 1290 if (glink->intentless || !completion_done(&channel->open_ack)) 1291 return 0; 1292 1293 prop = of_find_property(np, "qcom,intents", NULL); 1294 if (prop) { 1295 val = prop->value; 1296 num_groups = prop->length / sizeof(u32) / 2; 1297 } 1298 1299 /* Channel is now open, advertise base set of intents */ 1300 while (num_groups--) { 1301 size = be32_to_cpup(val++); 1302 num_intents = be32_to_cpup(val++); 1303 while (num_intents--) { 1304 intent = qcom_glink_alloc_intent(glink, channel, size, 1305 true); 1306 if (!intent) 1307 break; 1308 1309 qcom_glink_advertise_intent(glink, channel, intent); 1310 } 1311 } 1312 return 0; 1313 } 1314 1315 static void qcom_glink_destroy_ept(struct rpmsg_endpoint *ept) 1316 { 1317 struct glink_channel *channel = to_glink_channel(ept); 1318 struct qcom_glink *glink = channel->glink; 1319 unsigned long flags; 1320 1321 spin_lock_irqsave(&channel->recv_lock, flags); 1322 channel->ept.cb = NULL; 1323 spin_unlock_irqrestore(&channel->recv_lock, flags); 1324 1325 /* Decouple the potential rpdev from the channel */ 1326 channel->rpdev = NULL; 1327 1328 qcom_glink_send_close_req(glink, channel); 1329 } 1330 1331 static int qcom_glink_request_intent(struct qcom_glink *glink, 1332 struct glink_channel *channel, 1333 size_t size) 1334 { 1335 struct { 1336 u16 id; 1337 u16 cid; 1338 u32 size; 1339 } __packed cmd; 1340 1341 int ret; 1342 1343 mutex_lock(&channel->intent_req_lock); 1344 1345 WRITE_ONCE(channel->intent_req_result, -1); 1346 WRITE_ONCE(channel->intent_received, false); 1347 1348 cmd.id = GLINK_CMD_RX_INTENT_REQ; 1349 cmd.cid = channel->lcid; 1350 cmd.size = size; 1351 1352 ret = qcom_glink_tx(glink, &cmd, sizeof(cmd), NULL, 0, true); 1353 if (ret) 1354 goto unlock; 1355 1356 ret = wait_event_timeout(channel->intent_req_wq, 1357 READ_ONCE(channel->intent_req_result) == 0 || 1358 (READ_ONCE(channel->intent_req_result) > 0 && 1359 READ_ONCE(channel->intent_received)) || 1360 glink->abort_tx, 1361 10 * HZ); 1362 if (!ret) { 1363 dev_err(glink->dev, "intent request timed out\n"); 1364 ret = -ETIMEDOUT; 1365 } else if (glink->abort_tx) { 1366 ret = -ECANCELED; 1367 } else { 1368 ret = READ_ONCE(channel->intent_req_result) ? 0 : -EAGAIN; 1369 } 1370 1371 unlock: 1372 mutex_unlock(&channel->intent_req_lock); 1373 return ret; 1374 } 1375 1376 static int __qcom_glink_send(struct glink_channel *channel, 1377 void *data, int len, bool wait) 1378 { 1379 struct qcom_glink *glink = channel->glink; 1380 struct glink_core_rx_intent *intent = NULL; 1381 struct glink_core_rx_intent *tmp; 1382 int iid = 0; 1383 struct { 1384 struct glink_msg msg; 1385 __le32 chunk_size; 1386 __le32 left_size; 1387 } __packed req; 1388 int ret; 1389 unsigned long flags; 1390 int chunk_size = len; 1391 size_t offset = 0; 1392 1393 if (!glink->intentless) { 1394 while (!intent) { 1395 spin_lock_irqsave(&channel->intent_lock, flags); 1396 idr_for_each_entry(&channel->riids, tmp, iid) { 1397 if (tmp->size >= len && !tmp->in_use) { 1398 if (!intent) 1399 intent = tmp; 1400 else if (intent->size > tmp->size) 1401 intent = tmp; 1402 if (intent->size == len) 1403 break; 1404 } 1405 } 1406 if (intent) 1407 intent->in_use = true; 1408 spin_unlock_irqrestore(&channel->intent_lock, flags); 1409 1410 /* We found an available intent */ 1411 if (intent) 1412 break; 1413 1414 if (!wait) 1415 return -EBUSY; 1416 1417 ret = qcom_glink_request_intent(glink, channel, len); 1418 if (ret < 0) 1419 return ret; 1420 } 1421 1422 iid = intent->id; 1423 } 1424 1425 while (offset < len) { 1426 chunk_size = len - offset; 1427 if (chunk_size > SZ_8K && wait) 1428 chunk_size = SZ_8K; 1429 1430 req.msg.cmd = cpu_to_le16(offset == 0 ? GLINK_CMD_TX_DATA : GLINK_CMD_TX_DATA_CONT); 1431 req.msg.param1 = cpu_to_le16(channel->lcid); 1432 req.msg.param2 = cpu_to_le32(iid); 1433 req.chunk_size = cpu_to_le32(chunk_size); 1434 req.left_size = cpu_to_le32(len - offset - chunk_size); 1435 1436 ret = qcom_glink_tx(glink, &req, sizeof(req), data + offset, chunk_size, wait); 1437 if (ret) { 1438 /* Mark intent available if we failed */ 1439 if (intent) 1440 intent->in_use = false; 1441 return ret; 1442 } 1443 1444 offset += chunk_size; 1445 } 1446 1447 return 0; 1448 } 1449 1450 static int qcom_glink_send(struct rpmsg_endpoint *ept, void *data, int len) 1451 { 1452 struct glink_channel *channel = to_glink_channel(ept); 1453 1454 return __qcom_glink_send(channel, data, len, true); 1455 } 1456 1457 static int qcom_glink_trysend(struct rpmsg_endpoint *ept, void *data, int len) 1458 { 1459 struct glink_channel *channel = to_glink_channel(ept); 1460 1461 return __qcom_glink_send(channel, data, len, false); 1462 } 1463 1464 static int qcom_glink_sendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst) 1465 { 1466 struct glink_channel *channel = to_glink_channel(ept); 1467 1468 return __qcom_glink_send(channel, data, len, true); 1469 } 1470 1471 static int qcom_glink_trysendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst) 1472 { 1473 struct glink_channel *channel = to_glink_channel(ept); 1474 1475 return __qcom_glink_send(channel, data, len, false); 1476 } 1477 1478 /* 1479 * Finds the device_node for the glink child interested in this channel. 1480 */ 1481 static struct device_node *qcom_glink_match_channel(struct device_node *node, 1482 const char *channel) 1483 { 1484 struct device_node *child; 1485 const char *name; 1486 const char *key; 1487 int ret; 1488 1489 for_each_available_child_of_node(node, child) { 1490 key = "qcom,glink-channels"; 1491 ret = of_property_read_string(child, key, &name); 1492 if (ret) 1493 continue; 1494 1495 if (strcmp(name, channel) == 0) 1496 return child; 1497 } 1498 1499 return NULL; 1500 } 1501 1502 static const struct rpmsg_device_ops glink_device_ops = { 1503 .create_ept = qcom_glink_create_ept, 1504 .announce_create = qcom_glink_announce_create, 1505 }; 1506 1507 static const struct rpmsg_endpoint_ops glink_endpoint_ops = { 1508 .destroy_ept = qcom_glink_destroy_ept, 1509 .send = qcom_glink_send, 1510 .sendto = qcom_glink_sendto, 1511 .trysend = qcom_glink_trysend, 1512 .trysendto = qcom_glink_trysendto, 1513 .set_flow_control = qcom_glink_set_flow_control, 1514 }; 1515 1516 static void qcom_glink_rpdev_release(struct device *dev) 1517 { 1518 struct rpmsg_device *rpdev = to_rpmsg_device(dev); 1519 1520 kfree(rpdev->driver_override); 1521 kfree(rpdev); 1522 } 1523 1524 static int qcom_glink_rx_open(struct qcom_glink *glink, unsigned int rcid, 1525 char *name) 1526 { 1527 struct glink_channel *channel; 1528 struct rpmsg_device *rpdev; 1529 bool create_device = false; 1530 struct device_node *node; 1531 int lcid; 1532 int ret; 1533 unsigned long flags; 1534 1535 spin_lock_irqsave(&glink->idr_lock, flags); 1536 idr_for_each_entry(&glink->lcids, channel, lcid) { 1537 if (!strcmp(channel->name, name)) 1538 break; 1539 } 1540 spin_unlock_irqrestore(&glink->idr_lock, flags); 1541 1542 if (!channel) { 1543 channel = qcom_glink_alloc_channel(glink, name); 1544 if (IS_ERR(channel)) 1545 return PTR_ERR(channel); 1546 1547 /* The opening dance was initiated by the remote */ 1548 create_device = true; 1549 } 1550 1551 spin_lock_irqsave(&glink->idr_lock, flags); 1552 ret = idr_alloc(&glink->rcids, channel, rcid, rcid + 1, GFP_ATOMIC); 1553 if (ret < 0) { 1554 dev_err(glink->dev, "Unable to insert channel into rcid list\n"); 1555 spin_unlock_irqrestore(&glink->idr_lock, flags); 1556 goto free_channel; 1557 } 1558 channel->rcid = ret; 1559 spin_unlock_irqrestore(&glink->idr_lock, flags); 1560 1561 complete_all(&channel->open_req); 1562 1563 if (create_device) { 1564 rpdev = kzalloc(sizeof(*rpdev), GFP_KERNEL); 1565 if (!rpdev) { 1566 ret = -ENOMEM; 1567 goto rcid_remove; 1568 } 1569 1570 rpdev->ept = &channel->ept; 1571 strscpy_pad(rpdev->id.name, name, RPMSG_NAME_SIZE); 1572 rpdev->src = RPMSG_ADDR_ANY; 1573 rpdev->dst = RPMSG_ADDR_ANY; 1574 rpdev->ops = &glink_device_ops; 1575 1576 node = qcom_glink_match_channel(glink->dev->of_node, name); 1577 rpdev->dev.of_node = node; 1578 rpdev->dev.parent = glink->dev; 1579 rpdev->dev.release = qcom_glink_rpdev_release; 1580 1581 ret = rpmsg_register_device(rpdev); 1582 if (ret) 1583 goto rcid_remove; 1584 1585 channel->rpdev = rpdev; 1586 } 1587 1588 return 0; 1589 1590 rcid_remove: 1591 spin_lock_irqsave(&glink->idr_lock, flags); 1592 idr_remove(&glink->rcids, channel->rcid); 1593 channel->rcid = 0; 1594 spin_unlock_irqrestore(&glink->idr_lock, flags); 1595 free_channel: 1596 /* Release the reference, iff we took it */ 1597 if (create_device) 1598 kref_put(&channel->refcount, qcom_glink_channel_release); 1599 1600 return ret; 1601 } 1602 1603 static void qcom_glink_rx_close(struct qcom_glink *glink, unsigned int rcid) 1604 { 1605 struct rpmsg_channel_info chinfo; 1606 struct glink_channel *channel; 1607 unsigned long flags; 1608 1609 spin_lock_irqsave(&glink->idr_lock, flags); 1610 channel = idr_find(&glink->rcids, rcid); 1611 spin_unlock_irqrestore(&glink->idr_lock, flags); 1612 if (WARN(!channel, "close request on unknown channel\n")) 1613 return; 1614 1615 /* cancel pending rx_done work */ 1616 cancel_work_sync(&channel->intent_work); 1617 1618 if (channel->rpdev) { 1619 strscpy_pad(chinfo.name, channel->name, sizeof(chinfo.name)); 1620 chinfo.src = RPMSG_ADDR_ANY; 1621 chinfo.dst = RPMSG_ADDR_ANY; 1622 1623 rpmsg_unregister_device(glink->dev, &chinfo); 1624 } 1625 channel->rpdev = NULL; 1626 1627 qcom_glink_send_close_ack(glink, channel->rcid); 1628 1629 spin_lock_irqsave(&glink->idr_lock, flags); 1630 idr_remove(&glink->rcids, channel->rcid); 1631 channel->rcid = 0; 1632 spin_unlock_irqrestore(&glink->idr_lock, flags); 1633 1634 kref_put(&channel->refcount, qcom_glink_channel_release); 1635 } 1636 1637 static void qcom_glink_rx_close_ack(struct qcom_glink *glink, unsigned int lcid) 1638 { 1639 struct rpmsg_channel_info chinfo; 1640 struct glink_channel *channel; 1641 unsigned long flags; 1642 1643 /* To wakeup any blocking writers */ 1644 wake_up_all(&glink->tx_avail_notify); 1645 1646 spin_lock_irqsave(&glink->idr_lock, flags); 1647 channel = idr_find(&glink->lcids, lcid); 1648 if (WARN(!channel, "close ack on unknown channel\n")) { 1649 spin_unlock_irqrestore(&glink->idr_lock, flags); 1650 return; 1651 } 1652 1653 idr_remove(&glink->lcids, channel->lcid); 1654 channel->lcid = 0; 1655 spin_unlock_irqrestore(&glink->idr_lock, flags); 1656 1657 /* Decouple the potential rpdev from the channel */ 1658 if (channel->rpdev) { 1659 strscpy(chinfo.name, channel->name, sizeof(chinfo.name)); 1660 chinfo.src = RPMSG_ADDR_ANY; 1661 chinfo.dst = RPMSG_ADDR_ANY; 1662 1663 rpmsg_unregister_device(glink->dev, &chinfo); 1664 } 1665 channel->rpdev = NULL; 1666 1667 kref_put(&channel->refcount, qcom_glink_channel_release); 1668 } 1669 1670 static void qcom_glink_work(struct work_struct *work) 1671 { 1672 struct qcom_glink *glink = container_of(work, struct qcom_glink, 1673 rx_work); 1674 struct glink_defer_cmd *dcmd; 1675 struct glink_msg *msg; 1676 unsigned long flags; 1677 unsigned int param1; 1678 unsigned int param2; 1679 unsigned int cmd; 1680 1681 for (;;) { 1682 spin_lock_irqsave(&glink->rx_lock, flags); 1683 if (list_empty(&glink->rx_queue)) { 1684 spin_unlock_irqrestore(&glink->rx_lock, flags); 1685 break; 1686 } 1687 dcmd = list_first_entry(&glink->rx_queue, 1688 struct glink_defer_cmd, node); 1689 list_del(&dcmd->node); 1690 spin_unlock_irqrestore(&glink->rx_lock, flags); 1691 1692 msg = &dcmd->msg; 1693 cmd = le16_to_cpu(msg->cmd); 1694 param1 = le16_to_cpu(msg->param1); 1695 param2 = le32_to_cpu(msg->param2); 1696 1697 switch (cmd) { 1698 case GLINK_CMD_VERSION: 1699 qcom_glink_receive_version(glink, param1, param2); 1700 break; 1701 case GLINK_CMD_VERSION_ACK: 1702 qcom_glink_receive_version_ack(glink, param1, param2); 1703 break; 1704 case GLINK_CMD_OPEN: 1705 qcom_glink_rx_open(glink, param1, msg->data); 1706 break; 1707 case GLINK_CMD_CLOSE: 1708 qcom_glink_rx_close(glink, param1); 1709 break; 1710 case GLINK_CMD_CLOSE_ACK: 1711 qcom_glink_rx_close_ack(glink, param1); 1712 break; 1713 case GLINK_CMD_RX_INTENT_REQ: 1714 qcom_glink_handle_intent_req(glink, param1, param2); 1715 break; 1716 default: 1717 WARN(1, "Unknown defer object %d\n", cmd); 1718 break; 1719 } 1720 1721 kfree(dcmd); 1722 } 1723 } 1724 1725 static void qcom_glink_cancel_rx_work(struct qcom_glink *glink) 1726 { 1727 struct glink_defer_cmd *dcmd; 1728 struct glink_defer_cmd *tmp; 1729 1730 /* cancel any pending deferred rx_work */ 1731 cancel_work_sync(&glink->rx_work); 1732 1733 list_for_each_entry_safe(dcmd, tmp, &glink->rx_queue, node) 1734 kfree(dcmd); 1735 } 1736 1737 static ssize_t rpmsg_name_show(struct device *dev, 1738 struct device_attribute *attr, char *buf) 1739 { 1740 int ret = 0; 1741 const char *name; 1742 1743 ret = of_property_read_string(dev->of_node, "label", &name); 1744 if (ret < 0) 1745 name = dev->of_node->name; 1746 1747 return sysfs_emit(buf, "%s\n", name); 1748 } 1749 static DEVICE_ATTR_RO(rpmsg_name); 1750 1751 static struct attribute *qcom_glink_attrs[] = { 1752 &dev_attr_rpmsg_name.attr, 1753 NULL 1754 }; 1755 ATTRIBUTE_GROUPS(qcom_glink); 1756 1757 static void qcom_glink_device_release(struct device *dev) 1758 { 1759 struct rpmsg_device *rpdev = to_rpmsg_device(dev); 1760 struct glink_channel *channel = to_glink_channel(rpdev->ept); 1761 1762 /* Release qcom_glink_alloc_channel() reference */ 1763 kref_put(&channel->refcount, qcom_glink_channel_release); 1764 kfree(rpdev->driver_override); 1765 kfree(rpdev); 1766 } 1767 1768 static int qcom_glink_create_chrdev(struct qcom_glink *glink) 1769 { 1770 struct rpmsg_device *rpdev; 1771 struct glink_channel *channel; 1772 1773 rpdev = kzalloc(sizeof(*rpdev), GFP_KERNEL); 1774 if (!rpdev) 1775 return -ENOMEM; 1776 1777 channel = qcom_glink_alloc_channel(glink, "rpmsg_chrdev"); 1778 if (IS_ERR(channel)) { 1779 kfree(rpdev); 1780 return PTR_ERR(channel); 1781 } 1782 channel->rpdev = rpdev; 1783 1784 rpdev->ept = &channel->ept; 1785 rpdev->ops = &glink_device_ops; 1786 rpdev->dev.parent = glink->dev; 1787 rpdev->dev.release = qcom_glink_device_release; 1788 1789 return rpmsg_ctrldev_register_device(rpdev); 1790 } 1791 1792 struct qcom_glink *qcom_glink_native_probe(struct device *dev, 1793 unsigned long features, 1794 struct qcom_glink_pipe *rx, 1795 struct qcom_glink_pipe *tx, 1796 bool intentless) 1797 { 1798 int ret; 1799 struct qcom_glink *glink; 1800 1801 glink = devm_kzalloc(dev, sizeof(*glink), GFP_KERNEL); 1802 if (!glink) 1803 return ERR_PTR(-ENOMEM); 1804 1805 glink->dev = dev; 1806 glink->tx_pipe = tx; 1807 glink->rx_pipe = rx; 1808 1809 glink->features = features; 1810 glink->intentless = intentless; 1811 1812 spin_lock_init(&glink->tx_lock); 1813 spin_lock_init(&glink->rx_lock); 1814 INIT_LIST_HEAD(&glink->rx_queue); 1815 INIT_WORK(&glink->rx_work, qcom_glink_work); 1816 init_waitqueue_head(&glink->tx_avail_notify); 1817 1818 spin_lock_init(&glink->idr_lock); 1819 idr_init(&glink->lcids); 1820 idr_init(&glink->rcids); 1821 1822 glink->dev->groups = qcom_glink_groups; 1823 1824 ret = device_add_groups(dev, qcom_glink_groups); 1825 if (ret) 1826 dev_err(dev, "failed to add groups\n"); 1827 1828 ret = qcom_glink_send_version(glink); 1829 if (ret) 1830 return ERR_PTR(ret); 1831 1832 ret = qcom_glink_create_chrdev(glink); 1833 if (ret) 1834 dev_err(glink->dev, "failed to register chrdev\n"); 1835 1836 return glink; 1837 } 1838 EXPORT_SYMBOL_GPL(qcom_glink_native_probe); 1839 1840 static int qcom_glink_remove_device(struct device *dev, void *data) 1841 { 1842 device_unregister(dev); 1843 1844 return 0; 1845 } 1846 1847 void qcom_glink_native_remove(struct qcom_glink *glink) 1848 { 1849 struct glink_channel *channel; 1850 unsigned long flags; 1851 int cid; 1852 int ret; 1853 1854 qcom_glink_cancel_rx_work(glink); 1855 1856 /* Fail all attempts at sending messages */ 1857 spin_lock_irqsave(&glink->tx_lock, flags); 1858 glink->abort_tx = true; 1859 wake_up_all(&glink->tx_avail_notify); 1860 spin_unlock_irqrestore(&glink->tx_lock, flags); 1861 1862 /* Abort any senders waiting for intent requests */ 1863 spin_lock_irqsave(&glink->idr_lock, flags); 1864 idr_for_each_entry(&glink->lcids, channel, cid) 1865 qcom_glink_intent_req_abort(channel); 1866 spin_unlock_irqrestore(&glink->idr_lock, flags); 1867 1868 ret = device_for_each_child(glink->dev, NULL, qcom_glink_remove_device); 1869 if (ret) 1870 dev_warn(glink->dev, "Can't remove GLINK devices: %d\n", ret); 1871 1872 /* Release any defunct local channels, waiting for close-ack */ 1873 idr_for_each_entry(&glink->lcids, channel, cid) 1874 kref_put(&channel->refcount, qcom_glink_channel_release); 1875 1876 /* Release any defunct local channels, waiting for close-req */ 1877 idr_for_each_entry(&glink->rcids, channel, cid) 1878 kref_put(&channel->refcount, qcom_glink_channel_release); 1879 1880 idr_destroy(&glink->lcids); 1881 idr_destroy(&glink->rcids); 1882 } 1883 EXPORT_SYMBOL_GPL(qcom_glink_native_remove); 1884 1885 MODULE_DESCRIPTION("Qualcomm GLINK driver"); 1886 MODULE_LICENSE("GPL v2"); 1887