1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2015, Sony Mobile Communications Inc. 4 * Copyright (c) 2013, The Linux Foundation. All rights reserved. 5 */ 6 #include <linux/module.h> 7 #include <linux/netlink.h> 8 #include <linux/qrtr.h> 9 #include <linux/termios.h> /* For TIOCINQ/OUTQ */ 10 #include <linux/spinlock.h> 11 #include <linux/wait.h> 12 13 #include <net/sock.h> 14 15 #include "qrtr.h" 16 17 #define QRTR_PROTO_VER_1 1 18 #define QRTR_PROTO_VER_2 3 19 20 /* auto-bind range */ 21 #define QRTR_MIN_EPH_SOCKET 0x4000 22 #define QRTR_MAX_EPH_SOCKET 0x7fff 23 #define QRTR_EPH_PORT_RANGE \ 24 XA_LIMIT(QRTR_MIN_EPH_SOCKET, QRTR_MAX_EPH_SOCKET) 25 26 #define QRTR_PORT_CTRL_LEGACY 0xffff 27 28 /** 29 * struct qrtr_hdr_v1 - (I|R)PCrouter packet header version 1 30 * @version: protocol version 31 * @type: packet type; one of QRTR_TYPE_* 32 * @src_node_id: source node 33 * @src_port_id: source port 34 * @confirm_rx: boolean; whether a resume-tx packet should be send in reply 35 * @size: length of packet, excluding this header 36 * @dst_node_id: destination node 37 * @dst_port_id: destination port 38 */ 39 struct qrtr_hdr_v1 { 40 __le32 version; 41 __le32 type; 42 __le32 src_node_id; 43 __le32 src_port_id; 44 __le32 confirm_rx; 45 __le32 size; 46 __le32 dst_node_id; 47 __le32 dst_port_id; 48 } __packed; 49 50 /** 51 * struct qrtr_hdr_v2 - (I|R)PCrouter packet header later versions 52 * @version: protocol version 53 * @type: packet type; one of QRTR_TYPE_* 54 * @flags: bitmask of QRTR_FLAGS_* 55 * @optlen: length of optional header data 56 * @size: length of packet, excluding this header and optlen 57 * @src_node_id: source node 58 * @src_port_id: source port 59 * @dst_node_id: destination node 60 * @dst_port_id: destination port 61 */ 62 struct qrtr_hdr_v2 { 63 u8 version; 64 u8 type; 65 u8 flags; 66 u8 optlen; 67 __le32 size; 68 __le16 src_node_id; 69 __le16 src_port_id; 70 __le16 dst_node_id; 71 __le16 dst_port_id; 72 }; 73 74 #define QRTR_FLAGS_CONFIRM_RX BIT(0) 75 76 struct qrtr_cb { 77 u32 src_node; 78 u32 src_port; 79 u32 dst_node; 80 u32 dst_port; 81 82 u8 type; 83 u8 confirm_rx; 84 }; 85 86 #define QRTR_HDR_MAX_SIZE max_t(size_t, sizeof(struct qrtr_hdr_v1), \ 87 sizeof(struct qrtr_hdr_v2)) 88 89 struct qrtr_sock { 90 /* WARNING: sk must be the first member */ 91 struct sock sk; 92 struct sockaddr_qrtr us; 93 struct sockaddr_qrtr peer; 94 }; 95 96 static inline struct qrtr_sock *qrtr_sk(struct sock *sk) 97 { 98 BUILD_BUG_ON(offsetof(struct qrtr_sock, sk) != 0); 99 return container_of(sk, struct qrtr_sock, sk); 100 } 101 102 static unsigned int qrtr_local_nid = 1; 103 104 /* for node ids */ 105 static RADIX_TREE(qrtr_nodes, GFP_ATOMIC); 106 static DEFINE_SPINLOCK(qrtr_nodes_lock); 107 /* broadcast list */ 108 static LIST_HEAD(qrtr_all_nodes); 109 /* lock for qrtr_all_nodes and node reference */ 110 static DEFINE_MUTEX(qrtr_node_lock); 111 112 /* local port allocation management */ 113 static DEFINE_XARRAY_ALLOC(qrtr_ports); 114 115 /** 116 * struct qrtr_node - endpoint node 117 * @ep_lock: lock for endpoint management and callbacks 118 * @ep: endpoint 119 * @ref: reference count for node 120 * @nid: node id 121 * @qrtr_tx_flow: tree of qrtr_tx_flow, keyed by node << 32 | port 122 * @qrtr_tx_lock: lock for qrtr_tx_flow inserts 123 * @rx_queue: receive queue 124 * @item: list item for broadcast list 125 */ 126 struct qrtr_node { 127 struct mutex ep_lock; 128 struct qrtr_endpoint *ep; 129 struct kref ref; 130 unsigned int nid; 131 132 struct radix_tree_root qrtr_tx_flow; 133 struct mutex qrtr_tx_lock; /* for qrtr_tx_flow */ 134 135 struct sk_buff_head rx_queue; 136 struct list_head item; 137 }; 138 139 /** 140 * struct qrtr_tx_flow - tx flow control 141 * @resume_tx: waiters for a resume tx from the remote 142 * @pending: number of waiting senders 143 * @tx_failed: indicates that a message with confirm_rx flag was lost 144 */ 145 struct qrtr_tx_flow { 146 struct wait_queue_head resume_tx; 147 int pending; 148 int tx_failed; 149 }; 150 151 #define QRTR_TX_FLOW_HIGH 10 152 #define QRTR_TX_FLOW_LOW 5 153 154 static int qrtr_local_enqueue(struct qrtr_node *node, struct sk_buff *skb, 155 int type, struct sockaddr_qrtr *from, 156 struct sockaddr_qrtr *to); 157 static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb, 158 int type, struct sockaddr_qrtr *from, 159 struct sockaddr_qrtr *to); 160 static struct qrtr_sock *qrtr_port_lookup(int port); 161 static void qrtr_port_put(struct qrtr_sock *ipc); 162 163 /* Release node resources and free the node. 164 * 165 * Do not call directly, use qrtr_node_release. To be used with 166 * kref_put_mutex. As such, the node mutex is expected to be locked on call. 167 */ 168 static void __qrtr_node_release(struct kref *kref) 169 { 170 struct qrtr_node *node = container_of(kref, struct qrtr_node, ref); 171 struct radix_tree_iter iter; 172 struct qrtr_tx_flow *flow; 173 unsigned long flags; 174 void __rcu **slot; 175 176 spin_lock_irqsave(&qrtr_nodes_lock, flags); 177 /* If the node is a bridge for other nodes, there are possibly 178 * multiple entries pointing to our released node, delete them all. 179 */ 180 radix_tree_for_each_slot(slot, &qrtr_nodes, &iter, 0) { 181 if (*slot == node) 182 radix_tree_iter_delete(&qrtr_nodes, &iter, slot); 183 } 184 spin_unlock_irqrestore(&qrtr_nodes_lock, flags); 185 186 list_del(&node->item); 187 mutex_unlock(&qrtr_node_lock); 188 189 skb_queue_purge(&node->rx_queue); 190 191 /* Free tx flow counters */ 192 radix_tree_for_each_slot(slot, &node->qrtr_tx_flow, &iter, 0) { 193 flow = *slot; 194 radix_tree_iter_delete(&node->qrtr_tx_flow, &iter, slot); 195 kfree(flow); 196 } 197 kfree(node); 198 } 199 200 /* Increment reference to node. */ 201 static struct qrtr_node *qrtr_node_acquire(struct qrtr_node *node) 202 { 203 if (node) 204 kref_get(&node->ref); 205 return node; 206 } 207 208 /* Decrement reference to node and release as necessary. */ 209 static void qrtr_node_release(struct qrtr_node *node) 210 { 211 if (!node) 212 return; 213 kref_put_mutex(&node->ref, __qrtr_node_release, &qrtr_node_lock); 214 } 215 216 /** 217 * qrtr_tx_resume() - reset flow control counter 218 * @node: qrtr_node that the QRTR_TYPE_RESUME_TX packet arrived on 219 * @skb: resume_tx packet 220 */ 221 static void qrtr_tx_resume(struct qrtr_node *node, struct sk_buff *skb) 222 { 223 struct qrtr_ctrl_pkt *pkt = (struct qrtr_ctrl_pkt *)skb->data; 224 u64 remote_node = le32_to_cpu(pkt->client.node); 225 u32 remote_port = le32_to_cpu(pkt->client.port); 226 struct qrtr_tx_flow *flow; 227 unsigned long key; 228 229 key = remote_node << 32 | remote_port; 230 231 rcu_read_lock(); 232 flow = radix_tree_lookup(&node->qrtr_tx_flow, key); 233 rcu_read_unlock(); 234 if (flow) { 235 spin_lock(&flow->resume_tx.lock); 236 flow->pending = 0; 237 spin_unlock(&flow->resume_tx.lock); 238 wake_up_interruptible_all(&flow->resume_tx); 239 } 240 241 consume_skb(skb); 242 } 243 244 /** 245 * qrtr_tx_wait() - flow control for outgoing packets 246 * @node: qrtr_node that the packet is to be send to 247 * @dest_node: node id of the destination 248 * @dest_port: port number of the destination 249 * @type: type of message 250 * 251 * The flow control scheme is based around the low and high "watermarks". When 252 * the low watermark is passed the confirm_rx flag is set on the outgoing 253 * message, which will trigger the remote to send a control message of the type 254 * QRTR_TYPE_RESUME_TX to reset the counter. If the high watermark is hit 255 * further transmision should be paused. 256 * 257 * Return: 1 if confirm_rx should be set, 0 otherwise or errno failure 258 */ 259 static int qrtr_tx_wait(struct qrtr_node *node, int dest_node, int dest_port, 260 int type) 261 { 262 unsigned long key = (u64)dest_node << 32 | dest_port; 263 struct qrtr_tx_flow *flow; 264 int confirm_rx = 0; 265 int ret; 266 267 /* Never set confirm_rx on non-data packets */ 268 if (type != QRTR_TYPE_DATA) 269 return 0; 270 271 mutex_lock(&node->qrtr_tx_lock); 272 flow = radix_tree_lookup(&node->qrtr_tx_flow, key); 273 if (!flow) { 274 flow = kzalloc(sizeof(*flow), GFP_KERNEL); 275 if (flow) { 276 init_waitqueue_head(&flow->resume_tx); 277 if (radix_tree_insert(&node->qrtr_tx_flow, key, flow)) { 278 kfree(flow); 279 flow = NULL; 280 } 281 } 282 } 283 mutex_unlock(&node->qrtr_tx_lock); 284 285 /* Set confirm_rx if we where unable to find and allocate a flow */ 286 if (!flow) 287 return 1; 288 289 spin_lock_irq(&flow->resume_tx.lock); 290 ret = wait_event_interruptible_locked_irq(flow->resume_tx, 291 flow->pending < QRTR_TX_FLOW_HIGH || 292 flow->tx_failed || 293 !node->ep); 294 if (ret < 0) { 295 confirm_rx = ret; 296 } else if (!node->ep) { 297 confirm_rx = -EPIPE; 298 } else if (flow->tx_failed) { 299 flow->tx_failed = 0; 300 confirm_rx = 1; 301 } else { 302 flow->pending++; 303 confirm_rx = flow->pending == QRTR_TX_FLOW_LOW; 304 } 305 spin_unlock_irq(&flow->resume_tx.lock); 306 307 return confirm_rx; 308 } 309 310 /** 311 * qrtr_tx_flow_failed() - flag that tx of confirm_rx flagged messages failed 312 * @node: qrtr_node that the packet is to be send to 313 * @dest_node: node id of the destination 314 * @dest_port: port number of the destination 315 * 316 * Signal that the transmission of a message with confirm_rx flag failed. The 317 * flow's "pending" counter will keep incrementing towards QRTR_TX_FLOW_HIGH, 318 * at which point transmission would stall forever waiting for the resume TX 319 * message associated with the dropped confirm_rx message. 320 * Work around this by marking the flow as having a failed transmission and 321 * cause the next transmission attempt to be sent with the confirm_rx. 322 */ 323 static void qrtr_tx_flow_failed(struct qrtr_node *node, int dest_node, 324 int dest_port) 325 { 326 unsigned long key = (u64)dest_node << 32 | dest_port; 327 struct qrtr_tx_flow *flow; 328 329 rcu_read_lock(); 330 flow = radix_tree_lookup(&node->qrtr_tx_flow, key); 331 rcu_read_unlock(); 332 if (flow) { 333 spin_lock_irq(&flow->resume_tx.lock); 334 flow->tx_failed = 1; 335 spin_unlock_irq(&flow->resume_tx.lock); 336 } 337 } 338 339 /* Pass an outgoing packet socket buffer to the endpoint driver. */ 340 static int qrtr_node_enqueue(struct qrtr_node *node, struct sk_buff *skb, 341 int type, struct sockaddr_qrtr *from, 342 struct sockaddr_qrtr *to) 343 { 344 struct qrtr_hdr_v1 *hdr; 345 size_t len = skb->len; 346 int rc, confirm_rx; 347 348 confirm_rx = qrtr_tx_wait(node, to->sq_node, to->sq_port, type); 349 if (confirm_rx < 0) { 350 kfree_skb(skb); 351 return confirm_rx; 352 } 353 354 hdr = skb_push(skb, sizeof(*hdr)); 355 hdr->version = cpu_to_le32(QRTR_PROTO_VER_1); 356 hdr->type = cpu_to_le32(type); 357 hdr->src_node_id = cpu_to_le32(from->sq_node); 358 hdr->src_port_id = cpu_to_le32(from->sq_port); 359 if (to->sq_port == QRTR_PORT_CTRL) { 360 hdr->dst_node_id = cpu_to_le32(node->nid); 361 hdr->dst_port_id = cpu_to_le32(QRTR_PORT_CTRL); 362 } else { 363 hdr->dst_node_id = cpu_to_le32(to->sq_node); 364 hdr->dst_port_id = cpu_to_le32(to->sq_port); 365 } 366 367 hdr->size = cpu_to_le32(len); 368 hdr->confirm_rx = !!confirm_rx; 369 370 rc = skb_put_padto(skb, ALIGN(len, 4) + sizeof(*hdr)); 371 372 if (!rc) { 373 mutex_lock(&node->ep_lock); 374 rc = -ENODEV; 375 if (node->ep) 376 rc = node->ep->xmit(node->ep, skb); 377 else 378 kfree_skb(skb); 379 mutex_unlock(&node->ep_lock); 380 } 381 /* Need to ensure that a subsequent message carries the otherwise lost 382 * confirm_rx flag if we dropped this one */ 383 if (rc && confirm_rx) 384 qrtr_tx_flow_failed(node, to->sq_node, to->sq_port); 385 386 return rc; 387 } 388 389 /* Lookup node by id. 390 * 391 * callers must release with qrtr_node_release() 392 */ 393 static struct qrtr_node *qrtr_node_lookup(unsigned int nid) 394 { 395 struct qrtr_node *node; 396 unsigned long flags; 397 398 mutex_lock(&qrtr_node_lock); 399 spin_lock_irqsave(&qrtr_nodes_lock, flags); 400 node = radix_tree_lookup(&qrtr_nodes, nid); 401 node = qrtr_node_acquire(node); 402 spin_unlock_irqrestore(&qrtr_nodes_lock, flags); 403 mutex_unlock(&qrtr_node_lock); 404 405 return node; 406 } 407 408 /* Assign node id to node. 409 * 410 * This is mostly useful for automatic node id assignment, based on 411 * the source id in the incoming packet. 412 */ 413 static void qrtr_node_assign(struct qrtr_node *node, unsigned int nid) 414 { 415 unsigned long flags; 416 417 if (nid == QRTR_EP_NID_AUTO) 418 return; 419 420 spin_lock_irqsave(&qrtr_nodes_lock, flags); 421 radix_tree_insert(&qrtr_nodes, nid, node); 422 if (node->nid == QRTR_EP_NID_AUTO) 423 node->nid = nid; 424 spin_unlock_irqrestore(&qrtr_nodes_lock, flags); 425 } 426 427 /** 428 * qrtr_endpoint_post() - post incoming data 429 * @ep: endpoint handle 430 * @data: data pointer 431 * @len: size of data in bytes 432 * 433 * Return: 0 on success; negative error code on failure 434 */ 435 int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len) 436 { 437 struct qrtr_node *node = ep->node; 438 const struct qrtr_hdr_v1 *v1; 439 const struct qrtr_hdr_v2 *v2; 440 struct qrtr_sock *ipc; 441 struct sk_buff *skb; 442 struct qrtr_cb *cb; 443 size_t size; 444 unsigned int ver; 445 size_t hdrlen; 446 447 if (len == 0 || len & 3) 448 return -EINVAL; 449 450 skb = __netdev_alloc_skb(NULL, len, GFP_ATOMIC | __GFP_NOWARN); 451 if (!skb) 452 return -ENOMEM; 453 454 cb = (struct qrtr_cb *)skb->cb; 455 456 /* Version field in v1 is little endian, so this works for both cases */ 457 ver = *(u8*)data; 458 459 switch (ver) { 460 case QRTR_PROTO_VER_1: 461 if (len < sizeof(*v1)) 462 goto err; 463 v1 = data; 464 hdrlen = sizeof(*v1); 465 466 cb->type = le32_to_cpu(v1->type); 467 cb->src_node = le32_to_cpu(v1->src_node_id); 468 cb->src_port = le32_to_cpu(v1->src_port_id); 469 cb->confirm_rx = !!v1->confirm_rx; 470 cb->dst_node = le32_to_cpu(v1->dst_node_id); 471 cb->dst_port = le32_to_cpu(v1->dst_port_id); 472 473 size = le32_to_cpu(v1->size); 474 break; 475 case QRTR_PROTO_VER_2: 476 if (len < sizeof(*v2)) 477 goto err; 478 v2 = data; 479 hdrlen = sizeof(*v2) + v2->optlen; 480 481 cb->type = v2->type; 482 cb->confirm_rx = !!(v2->flags & QRTR_FLAGS_CONFIRM_RX); 483 cb->src_node = le16_to_cpu(v2->src_node_id); 484 cb->src_port = le16_to_cpu(v2->src_port_id); 485 cb->dst_node = le16_to_cpu(v2->dst_node_id); 486 cb->dst_port = le16_to_cpu(v2->dst_port_id); 487 488 if (cb->src_port == (u16)QRTR_PORT_CTRL) 489 cb->src_port = QRTR_PORT_CTRL; 490 if (cb->dst_port == (u16)QRTR_PORT_CTRL) 491 cb->dst_port = QRTR_PORT_CTRL; 492 493 size = le32_to_cpu(v2->size); 494 break; 495 default: 496 pr_err("qrtr: Invalid version %d\n", ver); 497 goto err; 498 } 499 500 if (cb->dst_port == QRTR_PORT_CTRL_LEGACY) 501 cb->dst_port = QRTR_PORT_CTRL; 502 503 if (!size || len != ALIGN(size, 4) + hdrlen) 504 goto err; 505 506 if ((cb->type == QRTR_TYPE_NEW_SERVER || 507 cb->type == QRTR_TYPE_RESUME_TX) && 508 size < sizeof(struct qrtr_ctrl_pkt)) 509 goto err; 510 511 if (cb->dst_port != QRTR_PORT_CTRL && cb->type != QRTR_TYPE_DATA && 512 cb->type != QRTR_TYPE_RESUME_TX) 513 goto err; 514 515 skb_put_data(skb, data + hdrlen, size); 516 517 qrtr_node_assign(node, cb->src_node); 518 519 if (cb->type == QRTR_TYPE_NEW_SERVER) { 520 /* Remote node endpoint can bridge other distant nodes */ 521 const struct qrtr_ctrl_pkt *pkt; 522 523 pkt = data + hdrlen; 524 qrtr_node_assign(node, le32_to_cpu(pkt->server.node)); 525 } 526 527 if (cb->type == QRTR_TYPE_RESUME_TX) { 528 qrtr_tx_resume(node, skb); 529 } else { 530 ipc = qrtr_port_lookup(cb->dst_port); 531 if (!ipc) 532 goto err; 533 534 if (sock_queue_rcv_skb(&ipc->sk, skb)) { 535 qrtr_port_put(ipc); 536 goto err; 537 } 538 539 qrtr_port_put(ipc); 540 } 541 542 return 0; 543 544 err: 545 kfree_skb(skb); 546 return -EINVAL; 547 548 } 549 EXPORT_SYMBOL_GPL(qrtr_endpoint_post); 550 551 /** 552 * qrtr_alloc_ctrl_packet() - allocate control packet skb 553 * @pkt: reference to qrtr_ctrl_pkt pointer 554 * @flags: the type of memory to allocate 555 * 556 * Returns newly allocated sk_buff, or NULL on failure 557 * 558 * This function allocates a sk_buff large enough to carry a qrtr_ctrl_pkt and 559 * on success returns a reference to the control packet in @pkt. 560 */ 561 static struct sk_buff *qrtr_alloc_ctrl_packet(struct qrtr_ctrl_pkt **pkt, 562 gfp_t flags) 563 { 564 const int pkt_len = sizeof(struct qrtr_ctrl_pkt); 565 struct sk_buff *skb; 566 567 skb = alloc_skb(QRTR_HDR_MAX_SIZE + pkt_len, flags); 568 if (!skb) 569 return NULL; 570 571 skb_reserve(skb, QRTR_HDR_MAX_SIZE); 572 *pkt = skb_put_zero(skb, pkt_len); 573 574 return skb; 575 } 576 577 /** 578 * qrtr_endpoint_register() - register a new endpoint 579 * @ep: endpoint to register 580 * @nid: desired node id; may be QRTR_EP_NID_AUTO for auto-assignment 581 * Return: 0 on success; negative error code on failure 582 * 583 * The specified endpoint must have the xmit function pointer set on call. 584 */ 585 int qrtr_endpoint_register(struct qrtr_endpoint *ep, unsigned int nid) 586 { 587 struct qrtr_node *node; 588 589 if (!ep || !ep->xmit) 590 return -EINVAL; 591 592 node = kzalloc(sizeof(*node), GFP_KERNEL); 593 if (!node) 594 return -ENOMEM; 595 596 kref_init(&node->ref); 597 mutex_init(&node->ep_lock); 598 skb_queue_head_init(&node->rx_queue); 599 node->nid = QRTR_EP_NID_AUTO; 600 node->ep = ep; 601 602 INIT_RADIX_TREE(&node->qrtr_tx_flow, GFP_KERNEL); 603 mutex_init(&node->qrtr_tx_lock); 604 605 qrtr_node_assign(node, nid); 606 607 mutex_lock(&qrtr_node_lock); 608 list_add(&node->item, &qrtr_all_nodes); 609 mutex_unlock(&qrtr_node_lock); 610 ep->node = node; 611 612 return 0; 613 } 614 EXPORT_SYMBOL_GPL(qrtr_endpoint_register); 615 616 /** 617 * qrtr_endpoint_unregister - unregister endpoint 618 * @ep: endpoint to unregister 619 */ 620 void qrtr_endpoint_unregister(struct qrtr_endpoint *ep) 621 { 622 struct qrtr_node *node = ep->node; 623 struct sockaddr_qrtr src = {AF_QIPCRTR, node->nid, QRTR_PORT_CTRL}; 624 struct sockaddr_qrtr dst = {AF_QIPCRTR, qrtr_local_nid, QRTR_PORT_CTRL}; 625 struct radix_tree_iter iter; 626 struct qrtr_ctrl_pkt *pkt; 627 struct qrtr_tx_flow *flow; 628 struct sk_buff *skb; 629 unsigned long flags; 630 void __rcu **slot; 631 632 mutex_lock(&node->ep_lock); 633 node->ep = NULL; 634 mutex_unlock(&node->ep_lock); 635 636 /* Notify the local controller about the event */ 637 spin_lock_irqsave(&qrtr_nodes_lock, flags); 638 radix_tree_for_each_slot(slot, &qrtr_nodes, &iter, 0) { 639 if (*slot != node) 640 continue; 641 src.sq_node = iter.index; 642 skb = qrtr_alloc_ctrl_packet(&pkt, GFP_ATOMIC); 643 if (skb) { 644 pkt->cmd = cpu_to_le32(QRTR_TYPE_BYE); 645 qrtr_local_enqueue(NULL, skb, QRTR_TYPE_BYE, &src, &dst); 646 } 647 } 648 spin_unlock_irqrestore(&qrtr_nodes_lock, flags); 649 650 /* Wake up any transmitters waiting for resume-tx from the node */ 651 mutex_lock(&node->qrtr_tx_lock); 652 radix_tree_for_each_slot(slot, &node->qrtr_tx_flow, &iter, 0) { 653 flow = *slot; 654 wake_up_interruptible_all(&flow->resume_tx); 655 } 656 mutex_unlock(&node->qrtr_tx_lock); 657 658 qrtr_node_release(node); 659 ep->node = NULL; 660 } 661 EXPORT_SYMBOL_GPL(qrtr_endpoint_unregister); 662 663 /* Lookup socket by port. 664 * 665 * Callers must release with qrtr_port_put() 666 */ 667 static struct qrtr_sock *qrtr_port_lookup(int port) 668 { 669 struct qrtr_sock *ipc; 670 671 if (port == QRTR_PORT_CTRL) 672 port = 0; 673 674 rcu_read_lock(); 675 ipc = xa_load(&qrtr_ports, port); 676 if (ipc) 677 sock_hold(&ipc->sk); 678 rcu_read_unlock(); 679 680 return ipc; 681 } 682 683 /* Release acquired socket. */ 684 static void qrtr_port_put(struct qrtr_sock *ipc) 685 { 686 sock_put(&ipc->sk); 687 } 688 689 /* Remove port assignment. */ 690 static void qrtr_port_remove(struct qrtr_sock *ipc) 691 { 692 struct qrtr_ctrl_pkt *pkt; 693 struct sk_buff *skb; 694 int port = ipc->us.sq_port; 695 struct sockaddr_qrtr to; 696 697 to.sq_family = AF_QIPCRTR; 698 to.sq_node = QRTR_NODE_BCAST; 699 to.sq_port = QRTR_PORT_CTRL; 700 701 skb = qrtr_alloc_ctrl_packet(&pkt, GFP_KERNEL); 702 if (skb) { 703 pkt->cmd = cpu_to_le32(QRTR_TYPE_DEL_CLIENT); 704 pkt->client.node = cpu_to_le32(ipc->us.sq_node); 705 pkt->client.port = cpu_to_le32(ipc->us.sq_port); 706 707 skb_set_owner_w(skb, &ipc->sk); 708 qrtr_bcast_enqueue(NULL, skb, QRTR_TYPE_DEL_CLIENT, &ipc->us, 709 &to); 710 } 711 712 if (port == QRTR_PORT_CTRL) 713 port = 0; 714 715 __sock_put(&ipc->sk); 716 717 xa_erase(&qrtr_ports, port); 718 719 /* Ensure that if qrtr_port_lookup() did enter the RCU read section we 720 * wait for it to up increment the refcount */ 721 synchronize_rcu(); 722 } 723 724 /* Assign port number to socket. 725 * 726 * Specify port in the integer pointed to by port, and it will be adjusted 727 * on return as necesssary. 728 * 729 * Port may be: 730 * 0: Assign ephemeral port in [QRTR_MIN_EPH_SOCKET, QRTR_MAX_EPH_SOCKET] 731 * <QRTR_MIN_EPH_SOCKET: Specified; requires CAP_NET_ADMIN 732 * >QRTR_MIN_EPH_SOCKET: Specified; available to all 733 */ 734 static int qrtr_port_assign(struct qrtr_sock *ipc, int *port) 735 { 736 int rc; 737 738 if (!*port) { 739 rc = xa_alloc(&qrtr_ports, port, ipc, QRTR_EPH_PORT_RANGE, 740 GFP_KERNEL); 741 } else if (*port < QRTR_MIN_EPH_SOCKET && !capable(CAP_NET_ADMIN)) { 742 rc = -EACCES; 743 } else if (*port == QRTR_PORT_CTRL) { 744 rc = xa_insert(&qrtr_ports, 0, ipc, GFP_KERNEL); 745 } else { 746 rc = xa_insert(&qrtr_ports, *port, ipc, GFP_KERNEL); 747 } 748 749 if (rc == -EBUSY) 750 return -EADDRINUSE; 751 else if (rc < 0) 752 return rc; 753 754 sock_hold(&ipc->sk); 755 756 return 0; 757 } 758 759 /* Reset all non-control ports */ 760 static void qrtr_reset_ports(void) 761 { 762 struct qrtr_sock *ipc; 763 unsigned long index; 764 765 rcu_read_lock(); 766 xa_for_each_start(&qrtr_ports, index, ipc, 1) { 767 sock_hold(&ipc->sk); 768 ipc->sk.sk_err = ENETRESET; 769 sk_error_report(&ipc->sk); 770 sock_put(&ipc->sk); 771 } 772 rcu_read_unlock(); 773 } 774 775 /* Bind socket to address. 776 * 777 * Socket should be locked upon call. 778 */ 779 static int __qrtr_bind(struct socket *sock, 780 const struct sockaddr_qrtr *addr, int zapped) 781 { 782 struct qrtr_sock *ipc = qrtr_sk(sock->sk); 783 struct sock *sk = sock->sk; 784 int port; 785 int rc; 786 787 /* rebinding ok */ 788 if (!zapped && addr->sq_port == ipc->us.sq_port) 789 return 0; 790 791 port = addr->sq_port; 792 rc = qrtr_port_assign(ipc, &port); 793 if (rc) 794 return rc; 795 796 /* unbind previous, if any */ 797 if (!zapped) 798 qrtr_port_remove(ipc); 799 ipc->us.sq_port = port; 800 801 sock_reset_flag(sk, SOCK_ZAPPED); 802 803 /* Notify all open ports about the new controller */ 804 if (port == QRTR_PORT_CTRL) 805 qrtr_reset_ports(); 806 807 return 0; 808 } 809 810 /* Auto bind to an ephemeral port. */ 811 static int qrtr_autobind(struct socket *sock) 812 { 813 struct sock *sk = sock->sk; 814 struct sockaddr_qrtr addr; 815 816 if (!sock_flag(sk, SOCK_ZAPPED)) 817 return 0; 818 819 addr.sq_family = AF_QIPCRTR; 820 addr.sq_node = qrtr_local_nid; 821 addr.sq_port = 0; 822 823 return __qrtr_bind(sock, &addr, 1); 824 } 825 826 /* Bind socket to specified sockaddr. */ 827 static int qrtr_bind(struct socket *sock, struct sockaddr *saddr, int len) 828 { 829 DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, saddr); 830 struct qrtr_sock *ipc = qrtr_sk(sock->sk); 831 struct sock *sk = sock->sk; 832 int rc; 833 834 if (len < sizeof(*addr) || addr->sq_family != AF_QIPCRTR) 835 return -EINVAL; 836 837 if (addr->sq_node != ipc->us.sq_node) 838 return -EINVAL; 839 840 lock_sock(sk); 841 rc = __qrtr_bind(sock, addr, sock_flag(sk, SOCK_ZAPPED)); 842 release_sock(sk); 843 844 return rc; 845 } 846 847 /* Queue packet to local peer socket. */ 848 static int qrtr_local_enqueue(struct qrtr_node *node, struct sk_buff *skb, 849 int type, struct sockaddr_qrtr *from, 850 struct sockaddr_qrtr *to) 851 { 852 struct qrtr_sock *ipc; 853 struct qrtr_cb *cb; 854 855 ipc = qrtr_port_lookup(to->sq_port); 856 if (!ipc || &ipc->sk == skb->sk) { /* do not send to self */ 857 if (ipc) 858 qrtr_port_put(ipc); 859 kfree_skb(skb); 860 return -ENODEV; 861 } 862 863 cb = (struct qrtr_cb *)skb->cb; 864 cb->src_node = from->sq_node; 865 cb->src_port = from->sq_port; 866 867 if (sock_queue_rcv_skb(&ipc->sk, skb)) { 868 qrtr_port_put(ipc); 869 kfree_skb(skb); 870 return -ENOSPC; 871 } 872 873 qrtr_port_put(ipc); 874 875 return 0; 876 } 877 878 /* Queue packet for broadcast. */ 879 static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb, 880 int type, struct sockaddr_qrtr *from, 881 struct sockaddr_qrtr *to) 882 { 883 struct sk_buff *skbn; 884 885 mutex_lock(&qrtr_node_lock); 886 list_for_each_entry(node, &qrtr_all_nodes, item) { 887 skbn = pskb_copy(skb, GFP_KERNEL); 888 if (!skbn) 889 break; 890 skb_set_owner_w(skbn, skb->sk); 891 qrtr_node_enqueue(node, skbn, type, from, to); 892 } 893 mutex_unlock(&qrtr_node_lock); 894 895 qrtr_local_enqueue(NULL, skb, type, from, to); 896 897 return 0; 898 } 899 900 static int qrtr_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) 901 { 902 DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, msg->msg_name); 903 int (*enqueue_fn)(struct qrtr_node *, struct sk_buff *, int, 904 struct sockaddr_qrtr *, struct sockaddr_qrtr *); 905 __le32 qrtr_type = cpu_to_le32(QRTR_TYPE_DATA); 906 struct qrtr_sock *ipc = qrtr_sk(sock->sk); 907 struct sock *sk = sock->sk; 908 struct qrtr_node *node; 909 struct sk_buff *skb; 910 size_t plen; 911 u32 type; 912 int rc; 913 914 if (msg->msg_flags & ~(MSG_DONTWAIT)) 915 return -EINVAL; 916 917 if (len > 65535) 918 return -EMSGSIZE; 919 920 lock_sock(sk); 921 922 if (addr) { 923 if (msg->msg_namelen < sizeof(*addr)) { 924 release_sock(sk); 925 return -EINVAL; 926 } 927 928 if (addr->sq_family != AF_QIPCRTR) { 929 release_sock(sk); 930 return -EINVAL; 931 } 932 933 rc = qrtr_autobind(sock); 934 if (rc) { 935 release_sock(sk); 936 return rc; 937 } 938 } else if (sk->sk_state == TCP_ESTABLISHED) { 939 addr = &ipc->peer; 940 } else { 941 release_sock(sk); 942 return -ENOTCONN; 943 } 944 945 node = NULL; 946 if (addr->sq_node == QRTR_NODE_BCAST) { 947 if (addr->sq_port != QRTR_PORT_CTRL && 948 qrtr_local_nid != QRTR_NODE_BCAST) { 949 release_sock(sk); 950 return -ENOTCONN; 951 } 952 enqueue_fn = qrtr_bcast_enqueue; 953 } else if (addr->sq_node == ipc->us.sq_node) { 954 enqueue_fn = qrtr_local_enqueue; 955 } else { 956 node = qrtr_node_lookup(addr->sq_node); 957 if (!node) { 958 release_sock(sk); 959 return -ECONNRESET; 960 } 961 enqueue_fn = qrtr_node_enqueue; 962 } 963 964 plen = (len + 3) & ~3; 965 skb = sock_alloc_send_skb(sk, plen + QRTR_HDR_MAX_SIZE, 966 msg->msg_flags & MSG_DONTWAIT, &rc); 967 if (!skb) { 968 rc = -ENOMEM; 969 goto out_node; 970 } 971 972 skb_reserve(skb, QRTR_HDR_MAX_SIZE); 973 974 rc = memcpy_from_msg(skb_put(skb, len), msg, len); 975 if (rc) { 976 kfree_skb(skb); 977 goto out_node; 978 } 979 980 if (ipc->us.sq_port == QRTR_PORT_CTRL) { 981 if (len < 4) { 982 rc = -EINVAL; 983 kfree_skb(skb); 984 goto out_node; 985 } 986 987 /* control messages already require the type as 'command' */ 988 skb_copy_bits(skb, 0, &qrtr_type, 4); 989 } 990 991 type = le32_to_cpu(qrtr_type); 992 rc = enqueue_fn(node, skb, type, &ipc->us, addr); 993 if (rc >= 0) 994 rc = len; 995 996 out_node: 997 qrtr_node_release(node); 998 release_sock(sk); 999 1000 return rc; 1001 } 1002 1003 static int qrtr_send_resume_tx(struct qrtr_cb *cb) 1004 { 1005 struct sockaddr_qrtr remote = { AF_QIPCRTR, cb->src_node, cb->src_port }; 1006 struct sockaddr_qrtr local = { AF_QIPCRTR, cb->dst_node, cb->dst_port }; 1007 struct qrtr_ctrl_pkt *pkt; 1008 struct qrtr_node *node; 1009 struct sk_buff *skb; 1010 int ret; 1011 1012 node = qrtr_node_lookup(remote.sq_node); 1013 if (!node) 1014 return -EINVAL; 1015 1016 skb = qrtr_alloc_ctrl_packet(&pkt, GFP_KERNEL); 1017 if (!skb) 1018 return -ENOMEM; 1019 1020 pkt->cmd = cpu_to_le32(QRTR_TYPE_RESUME_TX); 1021 pkt->client.node = cpu_to_le32(cb->dst_node); 1022 pkt->client.port = cpu_to_le32(cb->dst_port); 1023 1024 ret = qrtr_node_enqueue(node, skb, QRTR_TYPE_RESUME_TX, &local, &remote); 1025 1026 qrtr_node_release(node); 1027 1028 return ret; 1029 } 1030 1031 static int qrtr_recvmsg(struct socket *sock, struct msghdr *msg, 1032 size_t size, int flags) 1033 { 1034 DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, msg->msg_name); 1035 struct sock *sk = sock->sk; 1036 struct sk_buff *skb; 1037 struct qrtr_cb *cb; 1038 int copied, rc; 1039 1040 lock_sock(sk); 1041 1042 if (sock_flag(sk, SOCK_ZAPPED)) { 1043 release_sock(sk); 1044 return -EADDRNOTAVAIL; 1045 } 1046 1047 skb = skb_recv_datagram(sk, flags, &rc); 1048 if (!skb) { 1049 release_sock(sk); 1050 return rc; 1051 } 1052 cb = (struct qrtr_cb *)skb->cb; 1053 1054 copied = skb->len; 1055 if (copied > size) { 1056 copied = size; 1057 msg->msg_flags |= MSG_TRUNC; 1058 } 1059 1060 rc = skb_copy_datagram_msg(skb, 0, msg, copied); 1061 if (rc < 0) 1062 goto out; 1063 rc = copied; 1064 1065 if (addr) { 1066 /* There is an anonymous 2-byte hole after sq_family, 1067 * make sure to clear it. 1068 */ 1069 memset(addr, 0, sizeof(*addr)); 1070 1071 addr->sq_family = AF_QIPCRTR; 1072 addr->sq_node = cb->src_node; 1073 addr->sq_port = cb->src_port; 1074 msg->msg_namelen = sizeof(*addr); 1075 } 1076 1077 out: 1078 if (cb->confirm_rx) 1079 qrtr_send_resume_tx(cb); 1080 1081 skb_free_datagram(sk, skb); 1082 release_sock(sk); 1083 1084 return rc; 1085 } 1086 1087 static int qrtr_connect(struct socket *sock, struct sockaddr *saddr, 1088 int len, int flags) 1089 { 1090 DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, saddr); 1091 struct qrtr_sock *ipc = qrtr_sk(sock->sk); 1092 struct sock *sk = sock->sk; 1093 int rc; 1094 1095 if (len < sizeof(*addr) || addr->sq_family != AF_QIPCRTR) 1096 return -EINVAL; 1097 1098 lock_sock(sk); 1099 1100 sk->sk_state = TCP_CLOSE; 1101 sock->state = SS_UNCONNECTED; 1102 1103 rc = qrtr_autobind(sock); 1104 if (rc) { 1105 release_sock(sk); 1106 return rc; 1107 } 1108 1109 ipc->peer = *addr; 1110 sock->state = SS_CONNECTED; 1111 sk->sk_state = TCP_ESTABLISHED; 1112 1113 release_sock(sk); 1114 1115 return 0; 1116 } 1117 1118 static int qrtr_getname(struct socket *sock, struct sockaddr *saddr, 1119 int peer) 1120 { 1121 struct qrtr_sock *ipc = qrtr_sk(sock->sk); 1122 struct sockaddr_qrtr qaddr; 1123 struct sock *sk = sock->sk; 1124 1125 lock_sock(sk); 1126 if (peer) { 1127 if (sk->sk_state != TCP_ESTABLISHED) { 1128 release_sock(sk); 1129 return -ENOTCONN; 1130 } 1131 1132 qaddr = ipc->peer; 1133 } else { 1134 qaddr = ipc->us; 1135 } 1136 release_sock(sk); 1137 1138 qaddr.sq_family = AF_QIPCRTR; 1139 1140 memcpy(saddr, &qaddr, sizeof(qaddr)); 1141 1142 return sizeof(qaddr); 1143 } 1144 1145 static int qrtr_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 1146 { 1147 void __user *argp = (void __user *)arg; 1148 struct qrtr_sock *ipc = qrtr_sk(sock->sk); 1149 struct sock *sk = sock->sk; 1150 struct sockaddr_qrtr *sq; 1151 struct sk_buff *skb; 1152 struct ifreq ifr; 1153 long len = 0; 1154 int rc = 0; 1155 1156 lock_sock(sk); 1157 1158 switch (cmd) { 1159 case TIOCOUTQ: 1160 len = sk->sk_sndbuf - sk_wmem_alloc_get(sk); 1161 if (len < 0) 1162 len = 0; 1163 rc = put_user(len, (int __user *)argp); 1164 break; 1165 case TIOCINQ: 1166 skb = skb_peek(&sk->sk_receive_queue); 1167 if (skb) 1168 len = skb->len; 1169 rc = put_user(len, (int __user *)argp); 1170 break; 1171 case SIOCGIFADDR: 1172 if (get_user_ifreq(&ifr, NULL, argp)) { 1173 rc = -EFAULT; 1174 break; 1175 } 1176 1177 sq = (struct sockaddr_qrtr *)&ifr.ifr_addr; 1178 *sq = ipc->us; 1179 if (put_user_ifreq(&ifr, argp)) { 1180 rc = -EFAULT; 1181 break; 1182 } 1183 break; 1184 case SIOCADDRT: 1185 case SIOCDELRT: 1186 case SIOCSIFADDR: 1187 case SIOCGIFDSTADDR: 1188 case SIOCSIFDSTADDR: 1189 case SIOCGIFBRDADDR: 1190 case SIOCSIFBRDADDR: 1191 case SIOCGIFNETMASK: 1192 case SIOCSIFNETMASK: 1193 rc = -EINVAL; 1194 break; 1195 default: 1196 rc = -ENOIOCTLCMD; 1197 break; 1198 } 1199 1200 release_sock(sk); 1201 1202 return rc; 1203 } 1204 1205 static int qrtr_release(struct socket *sock) 1206 { 1207 struct sock *sk = sock->sk; 1208 struct qrtr_sock *ipc; 1209 1210 if (!sk) 1211 return 0; 1212 1213 lock_sock(sk); 1214 1215 ipc = qrtr_sk(sk); 1216 sk->sk_shutdown = SHUTDOWN_MASK; 1217 if (!sock_flag(sk, SOCK_DEAD)) 1218 sk->sk_state_change(sk); 1219 1220 sock_set_flag(sk, SOCK_DEAD); 1221 sock_orphan(sk); 1222 sock->sk = NULL; 1223 1224 if (!sock_flag(sk, SOCK_ZAPPED)) 1225 qrtr_port_remove(ipc); 1226 1227 skb_queue_purge(&sk->sk_receive_queue); 1228 1229 release_sock(sk); 1230 sock_put(sk); 1231 1232 return 0; 1233 } 1234 1235 static const struct proto_ops qrtr_proto_ops = { 1236 .owner = THIS_MODULE, 1237 .family = AF_QIPCRTR, 1238 .bind = qrtr_bind, 1239 .connect = qrtr_connect, 1240 .socketpair = sock_no_socketpair, 1241 .accept = sock_no_accept, 1242 .listen = sock_no_listen, 1243 .sendmsg = qrtr_sendmsg, 1244 .recvmsg = qrtr_recvmsg, 1245 .getname = qrtr_getname, 1246 .ioctl = qrtr_ioctl, 1247 .gettstamp = sock_gettstamp, 1248 .poll = datagram_poll, 1249 .shutdown = sock_no_shutdown, 1250 .release = qrtr_release, 1251 .mmap = sock_no_mmap, 1252 }; 1253 1254 static struct proto qrtr_proto = { 1255 .name = "QIPCRTR", 1256 .owner = THIS_MODULE, 1257 .obj_size = sizeof(struct qrtr_sock), 1258 }; 1259 1260 static int qrtr_create(struct net *net, struct socket *sock, 1261 int protocol, int kern) 1262 { 1263 struct qrtr_sock *ipc; 1264 struct sock *sk; 1265 1266 if (sock->type != SOCK_DGRAM) 1267 return -EPROTOTYPE; 1268 1269 sk = sk_alloc(net, AF_QIPCRTR, GFP_KERNEL, &qrtr_proto, kern); 1270 if (!sk) 1271 return -ENOMEM; 1272 1273 sock_set_flag(sk, SOCK_ZAPPED); 1274 1275 sock_init_data(sock, sk); 1276 sock->ops = &qrtr_proto_ops; 1277 1278 ipc = qrtr_sk(sk); 1279 ipc->us.sq_family = AF_QIPCRTR; 1280 ipc->us.sq_node = qrtr_local_nid; 1281 ipc->us.sq_port = 0; 1282 1283 return 0; 1284 } 1285 1286 static const struct net_proto_family qrtr_family = { 1287 .owner = THIS_MODULE, 1288 .family = AF_QIPCRTR, 1289 .create = qrtr_create, 1290 }; 1291 1292 static int __init qrtr_proto_init(void) 1293 { 1294 int rc; 1295 1296 rc = proto_register(&qrtr_proto, 1); 1297 if (rc) 1298 return rc; 1299 1300 rc = sock_register(&qrtr_family); 1301 if (rc) 1302 goto err_proto; 1303 1304 rc = qrtr_ns_init(); 1305 if (rc) 1306 goto err_sock; 1307 1308 return 0; 1309 1310 err_sock: 1311 sock_unregister(qrtr_family.family); 1312 err_proto: 1313 proto_unregister(&qrtr_proto); 1314 return rc; 1315 } 1316 postcore_initcall(qrtr_proto_init); 1317 1318 static void __exit qrtr_proto_fini(void) 1319 { 1320 qrtr_ns_remove(); 1321 sock_unregister(qrtr_family.family); 1322 proto_unregister(&qrtr_proto); 1323 } 1324 module_exit(qrtr_proto_fini); 1325 1326 MODULE_DESCRIPTION("Qualcomm IPC-router driver"); 1327 MODULE_LICENSE("GPL v2"); 1328 MODULE_ALIAS_NETPROTO(PF_QIPCRTR); 1329