1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Management Component Transport Protocol (MCTP) - routing 4 * implementation. 5 * 6 * This is currently based on a simple routing table, with no dst cache. The 7 * number of routes should stay fairly small, so the lookup cost is small. 8 * 9 * Copyright (c) 2021 Code Construct 10 * Copyright (c) 2021 Google 11 */ 12 13 #include <linux/idr.h> 14 #include <linux/kconfig.h> 15 #include <linux/mctp.h> 16 #include <linux/netdevice.h> 17 #include <linux/rtnetlink.h> 18 #include <linux/skbuff.h> 19 20 #include <uapi/linux/if_arp.h> 21 22 #include <net/mctp.h> 23 #include <net/mctpdevice.h> 24 #include <net/netlink.h> 25 #include <net/sock.h> 26 27 #include <trace/events/mctp.h> 28 29 static const unsigned int mctp_message_maxlen = 64 * 1024; 30 static const unsigned long mctp_key_lifetime = 6 * CONFIG_HZ; 31 32 static void mctp_flow_prepare_output(struct sk_buff *skb, struct mctp_dev *dev); 33 34 /* route output callbacks */ 35 static int mctp_route_discard(struct mctp_route *route, struct sk_buff *skb) 36 { 37 kfree_skb(skb); 38 return 0; 39 } 40 41 static struct mctp_sock *mctp_lookup_bind(struct net *net, struct sk_buff *skb) 42 { 43 struct mctp_skb_cb *cb = mctp_cb(skb); 44 struct mctp_hdr *mh; 45 struct sock *sk; 46 u8 type; 47 48 WARN_ON(!rcu_read_lock_held()); 49 50 /* TODO: look up in skb->cb? */ 51 mh = mctp_hdr(skb); 52 53 if (!skb_headlen(skb)) 54 return NULL; 55 56 type = (*(u8 *)skb->data) & 0x7f; 57 58 sk_for_each_rcu(sk, &net->mctp.binds) { 59 struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk); 60 61 if (msk->bind_net != MCTP_NET_ANY && msk->bind_net != cb->net) 62 continue; 63 64 if (msk->bind_type != type) 65 continue; 66 67 if (!mctp_address_matches(msk->bind_addr, mh->dest)) 68 continue; 69 70 return msk; 71 } 72 73 return NULL; 74 } 75 76 static bool mctp_key_match(struct mctp_sk_key *key, mctp_eid_t local, 77 mctp_eid_t peer, u8 tag) 78 { 79 if (!mctp_address_matches(key->local_addr, local)) 80 return false; 81 82 if (key->peer_addr != peer) 83 return false; 84 85 if (key->tag != tag) 86 return false; 87 88 return true; 89 } 90 91 /* returns a key (with key->lock held, and refcounted), or NULL if no such 92 * key exists. 93 */ 94 static struct mctp_sk_key *mctp_lookup_key(struct net *net, struct sk_buff *skb, 95 mctp_eid_t peer, 96 unsigned long *irqflags) 97 __acquires(&key->lock) 98 { 99 struct mctp_sk_key *key, *ret; 100 unsigned long flags; 101 struct mctp_hdr *mh; 102 u8 tag; 103 104 mh = mctp_hdr(skb); 105 tag = mh->flags_seq_tag & (MCTP_HDR_TAG_MASK | MCTP_HDR_FLAG_TO); 106 107 ret = NULL; 108 spin_lock_irqsave(&net->mctp.keys_lock, flags); 109 110 hlist_for_each_entry(key, &net->mctp.keys, hlist) { 111 if (!mctp_key_match(key, mh->dest, peer, tag)) 112 continue; 113 114 spin_lock(&key->lock); 115 if (key->valid) { 116 refcount_inc(&key->refs); 117 ret = key; 118 break; 119 } 120 spin_unlock(&key->lock); 121 } 122 123 if (ret) { 124 spin_unlock(&net->mctp.keys_lock); 125 *irqflags = flags; 126 } else { 127 spin_unlock_irqrestore(&net->mctp.keys_lock, flags); 128 } 129 130 return ret; 131 } 132 133 static struct mctp_sk_key *mctp_key_alloc(struct mctp_sock *msk, 134 mctp_eid_t local, mctp_eid_t peer, 135 u8 tag, gfp_t gfp) 136 { 137 struct mctp_sk_key *key; 138 139 key = kzalloc(sizeof(*key), gfp); 140 if (!key) 141 return NULL; 142 143 key->peer_addr = peer; 144 key->local_addr = local; 145 key->tag = tag; 146 key->sk = &msk->sk; 147 key->valid = true; 148 spin_lock_init(&key->lock); 149 refcount_set(&key->refs, 1); 150 151 return key; 152 } 153 154 void mctp_key_unref(struct mctp_sk_key *key) 155 { 156 unsigned long flags; 157 158 if (!refcount_dec_and_test(&key->refs)) 159 return; 160 161 /* even though no refs exist here, the lock allows us to stay 162 * consistent with the locking requirement of mctp_dev_release_key 163 */ 164 spin_lock_irqsave(&key->lock, flags); 165 mctp_dev_release_key(key->dev, key); 166 spin_unlock_irqrestore(&key->lock, flags); 167 168 kfree(key); 169 } 170 171 static int mctp_key_add(struct mctp_sk_key *key, struct mctp_sock *msk) 172 { 173 struct net *net = sock_net(&msk->sk); 174 struct mctp_sk_key *tmp; 175 unsigned long flags; 176 int rc = 0; 177 178 spin_lock_irqsave(&net->mctp.keys_lock, flags); 179 180 hlist_for_each_entry(tmp, &net->mctp.keys, hlist) { 181 if (mctp_key_match(tmp, key->local_addr, key->peer_addr, 182 key->tag)) { 183 spin_lock(&tmp->lock); 184 if (tmp->valid) 185 rc = -EEXIST; 186 spin_unlock(&tmp->lock); 187 if (rc) 188 break; 189 } 190 } 191 192 if (!rc) { 193 refcount_inc(&key->refs); 194 key->expiry = jiffies + mctp_key_lifetime; 195 timer_reduce(&msk->key_expiry, key->expiry); 196 197 hlist_add_head(&key->hlist, &net->mctp.keys); 198 hlist_add_head(&key->sklist, &msk->keys); 199 } 200 201 spin_unlock_irqrestore(&net->mctp.keys_lock, flags); 202 203 return rc; 204 } 205 206 /* Helper for mctp_route_input(). 207 * We're done with the key; unlock and unref the key. 208 * For the usual case of automatic expiry we remove the key from lists. 209 * In the case that manual allocation is set on a key we release the lock 210 * and local ref, reset reassembly, but don't remove from lists. 211 */ 212 static void __mctp_key_done_in(struct mctp_sk_key *key, struct net *net, 213 unsigned long flags, unsigned long reason) 214 __releases(&key->lock) 215 { 216 struct sk_buff *skb; 217 218 trace_mctp_key_release(key, reason); 219 skb = key->reasm_head; 220 key->reasm_head = NULL; 221 222 if (!key->manual_alloc) { 223 key->reasm_dead = true; 224 key->valid = false; 225 mctp_dev_release_key(key->dev, key); 226 } 227 spin_unlock_irqrestore(&key->lock, flags); 228 229 if (!key->manual_alloc) { 230 spin_lock_irqsave(&net->mctp.keys_lock, flags); 231 hlist_del(&key->hlist); 232 hlist_del(&key->sklist); 233 spin_unlock_irqrestore(&net->mctp.keys_lock, flags); 234 235 /* unref for the lists */ 236 mctp_key_unref(key); 237 } 238 239 /* and one for the local reference */ 240 mctp_key_unref(key); 241 242 kfree_skb(skb); 243 } 244 245 #ifdef CONFIG_MCTP_FLOWS 246 static void mctp_skb_set_flow(struct sk_buff *skb, struct mctp_sk_key *key) 247 { 248 struct mctp_flow *flow; 249 250 flow = skb_ext_add(skb, SKB_EXT_MCTP); 251 if (!flow) 252 return; 253 254 refcount_inc(&key->refs); 255 flow->key = key; 256 } 257 258 static void mctp_flow_prepare_output(struct sk_buff *skb, struct mctp_dev *dev) 259 { 260 struct mctp_sk_key *key; 261 struct mctp_flow *flow; 262 263 flow = skb_ext_find(skb, SKB_EXT_MCTP); 264 if (!flow) 265 return; 266 267 key = flow->key; 268 269 if (WARN_ON(key->dev && key->dev != dev)) 270 return; 271 272 mctp_dev_set_key(dev, key); 273 } 274 #else 275 static void mctp_skb_set_flow(struct sk_buff *skb, struct mctp_sk_key *key) {} 276 static void mctp_flow_prepare_output(struct sk_buff *skb, struct mctp_dev *dev) {} 277 #endif 278 279 static int mctp_frag_queue(struct mctp_sk_key *key, struct sk_buff *skb) 280 { 281 struct mctp_hdr *hdr = mctp_hdr(skb); 282 u8 exp_seq, this_seq; 283 284 this_seq = (hdr->flags_seq_tag >> MCTP_HDR_SEQ_SHIFT) 285 & MCTP_HDR_SEQ_MASK; 286 287 if (!key->reasm_head) { 288 key->reasm_head = skb; 289 key->reasm_tailp = &(skb_shinfo(skb)->frag_list); 290 key->last_seq = this_seq; 291 return 0; 292 } 293 294 exp_seq = (key->last_seq + 1) & MCTP_HDR_SEQ_MASK; 295 296 if (this_seq != exp_seq) 297 return -EINVAL; 298 299 if (key->reasm_head->len + skb->len > mctp_message_maxlen) 300 return -EINVAL; 301 302 skb->next = NULL; 303 skb->sk = NULL; 304 *key->reasm_tailp = skb; 305 key->reasm_tailp = &skb->next; 306 307 key->last_seq = this_seq; 308 309 key->reasm_head->data_len += skb->len; 310 key->reasm_head->len += skb->len; 311 key->reasm_head->truesize += skb->truesize; 312 313 return 0; 314 } 315 316 static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb) 317 { 318 struct net *net = dev_net(skb->dev); 319 struct mctp_sk_key *key; 320 struct mctp_sock *msk; 321 struct mctp_hdr *mh; 322 unsigned long f; 323 u8 tag, flags; 324 int rc; 325 326 msk = NULL; 327 rc = -EINVAL; 328 329 /* we may be receiving a locally-routed packet; drop source sk 330 * accounting 331 */ 332 skb_orphan(skb); 333 334 /* ensure we have enough data for a header and a type */ 335 if (skb->len < sizeof(struct mctp_hdr) + 1) 336 goto out; 337 338 /* grab header, advance data ptr */ 339 mh = mctp_hdr(skb); 340 skb_pull(skb, sizeof(struct mctp_hdr)); 341 342 if (mh->ver != 1) 343 goto out; 344 345 flags = mh->flags_seq_tag & (MCTP_HDR_FLAG_SOM | MCTP_HDR_FLAG_EOM); 346 tag = mh->flags_seq_tag & (MCTP_HDR_TAG_MASK | MCTP_HDR_FLAG_TO); 347 348 rcu_read_lock(); 349 350 /* lookup socket / reasm context, exactly matching (src,dest,tag). 351 * we hold a ref on the key, and key->lock held. 352 */ 353 key = mctp_lookup_key(net, skb, mh->src, &f); 354 355 if (flags & MCTP_HDR_FLAG_SOM) { 356 if (key) { 357 msk = container_of(key->sk, struct mctp_sock, sk); 358 } else { 359 /* first response to a broadcast? do a more general 360 * key lookup to find the socket, but don't use this 361 * key for reassembly - we'll create a more specific 362 * one for future packets if required (ie, !EOM). 363 */ 364 key = mctp_lookup_key(net, skb, MCTP_ADDR_ANY, &f); 365 if (key) { 366 msk = container_of(key->sk, 367 struct mctp_sock, sk); 368 spin_unlock_irqrestore(&key->lock, f); 369 mctp_key_unref(key); 370 key = NULL; 371 } 372 } 373 374 if (!key && !msk && (tag & MCTP_HDR_FLAG_TO)) 375 msk = mctp_lookup_bind(net, skb); 376 377 if (!msk) { 378 rc = -ENOENT; 379 goto out_unlock; 380 } 381 382 /* single-packet message? deliver to socket, clean up any 383 * pending key. 384 */ 385 if (flags & MCTP_HDR_FLAG_EOM) { 386 sock_queue_rcv_skb(&msk->sk, skb); 387 if (key) { 388 /* we've hit a pending reassembly; not much we 389 * can do but drop it 390 */ 391 __mctp_key_done_in(key, net, f, 392 MCTP_TRACE_KEY_REPLIED); 393 key = NULL; 394 } 395 rc = 0; 396 goto out_unlock; 397 } 398 399 /* broadcast response or a bind() - create a key for further 400 * packets for this message 401 */ 402 if (!key) { 403 key = mctp_key_alloc(msk, mh->dest, mh->src, 404 tag, GFP_ATOMIC); 405 if (!key) { 406 rc = -ENOMEM; 407 goto out_unlock; 408 } 409 410 /* we can queue without the key lock here, as the 411 * key isn't observable yet 412 */ 413 mctp_frag_queue(key, skb); 414 415 /* if the key_add fails, we've raced with another 416 * SOM packet with the same src, dest and tag. There's 417 * no way to distinguish future packets, so all we 418 * can do is drop; we'll free the skb on exit from 419 * this function. 420 */ 421 rc = mctp_key_add(key, msk); 422 if (rc) { 423 kfree(key); 424 } else { 425 trace_mctp_key_acquire(key); 426 427 /* we don't need to release key->lock on exit */ 428 mctp_key_unref(key); 429 } 430 key = NULL; 431 432 } else { 433 if (key->reasm_head || key->reasm_dead) { 434 /* duplicate start? drop everything */ 435 __mctp_key_done_in(key, net, f, 436 MCTP_TRACE_KEY_INVALIDATED); 437 rc = -EEXIST; 438 key = NULL; 439 } else { 440 rc = mctp_frag_queue(key, skb); 441 } 442 } 443 444 } else if (key) { 445 /* this packet continues a previous message; reassemble 446 * using the message-specific key 447 */ 448 449 /* we need to be continuing an existing reassembly... */ 450 if (!key->reasm_head) 451 rc = -EINVAL; 452 else 453 rc = mctp_frag_queue(key, skb); 454 455 /* end of message? deliver to socket, and we're done with 456 * the reassembly/response key 457 */ 458 if (!rc && flags & MCTP_HDR_FLAG_EOM) { 459 sock_queue_rcv_skb(key->sk, key->reasm_head); 460 key->reasm_head = NULL; 461 __mctp_key_done_in(key, net, f, MCTP_TRACE_KEY_REPLIED); 462 key = NULL; 463 } 464 465 } else { 466 /* not a start, no matching key */ 467 rc = -ENOENT; 468 } 469 470 out_unlock: 471 rcu_read_unlock(); 472 if (key) { 473 spin_unlock_irqrestore(&key->lock, f); 474 mctp_key_unref(key); 475 } 476 out: 477 if (rc) 478 kfree_skb(skb); 479 return rc; 480 } 481 482 static unsigned int mctp_route_mtu(struct mctp_route *rt) 483 { 484 return rt->mtu ?: READ_ONCE(rt->dev->dev->mtu); 485 } 486 487 static int mctp_route_output(struct mctp_route *route, struct sk_buff *skb) 488 { 489 struct mctp_skb_cb *cb = mctp_cb(skb); 490 struct mctp_hdr *hdr = mctp_hdr(skb); 491 char daddr_buf[MAX_ADDR_LEN]; 492 char *daddr = NULL; 493 unsigned int mtu; 494 int rc; 495 496 skb->protocol = htons(ETH_P_MCTP); 497 498 mtu = READ_ONCE(skb->dev->mtu); 499 if (skb->len > mtu) { 500 kfree_skb(skb); 501 return -EMSGSIZE; 502 } 503 504 if (cb->ifindex) { 505 /* direct route; use the hwaddr we stashed in sendmsg */ 506 daddr = cb->haddr; 507 } else { 508 /* If lookup fails let the device handle daddr==NULL */ 509 if (mctp_neigh_lookup(route->dev, hdr->dest, daddr_buf) == 0) 510 daddr = daddr_buf; 511 } 512 513 rc = dev_hard_header(skb, skb->dev, ntohs(skb->protocol), 514 daddr, skb->dev->dev_addr, skb->len); 515 if (rc) { 516 kfree_skb(skb); 517 return -EHOSTUNREACH; 518 } 519 520 mctp_flow_prepare_output(skb, route->dev); 521 522 rc = dev_queue_xmit(skb); 523 if (rc) 524 rc = net_xmit_errno(rc); 525 526 return rc; 527 } 528 529 /* route alloc/release */ 530 static void mctp_route_release(struct mctp_route *rt) 531 { 532 if (refcount_dec_and_test(&rt->refs)) { 533 mctp_dev_put(rt->dev); 534 kfree_rcu(rt, rcu); 535 } 536 } 537 538 /* returns a route with the refcount at 1 */ 539 static struct mctp_route *mctp_route_alloc(void) 540 { 541 struct mctp_route *rt; 542 543 rt = kzalloc(sizeof(*rt), GFP_KERNEL); 544 if (!rt) 545 return NULL; 546 547 INIT_LIST_HEAD(&rt->list); 548 refcount_set(&rt->refs, 1); 549 rt->output = mctp_route_discard; 550 551 return rt; 552 } 553 554 unsigned int mctp_default_net(struct net *net) 555 { 556 return READ_ONCE(net->mctp.default_net); 557 } 558 559 int mctp_default_net_set(struct net *net, unsigned int index) 560 { 561 if (index == 0) 562 return -EINVAL; 563 WRITE_ONCE(net->mctp.default_net, index); 564 return 0; 565 } 566 567 /* tag management */ 568 static void mctp_reserve_tag(struct net *net, struct mctp_sk_key *key, 569 struct mctp_sock *msk) 570 { 571 struct netns_mctp *mns = &net->mctp; 572 573 lockdep_assert_held(&mns->keys_lock); 574 575 key->expiry = jiffies + mctp_key_lifetime; 576 timer_reduce(&msk->key_expiry, key->expiry); 577 578 /* we hold the net->key_lock here, allowing updates to both 579 * then net and sk 580 */ 581 hlist_add_head_rcu(&key->hlist, &mns->keys); 582 hlist_add_head_rcu(&key->sklist, &msk->keys); 583 refcount_inc(&key->refs); 584 } 585 586 /* Allocate a locally-owned tag value for (saddr, daddr), and reserve 587 * it for the socket msk 588 */ 589 struct mctp_sk_key *mctp_alloc_local_tag(struct mctp_sock *msk, 590 mctp_eid_t daddr, mctp_eid_t saddr, 591 bool manual, u8 *tagp) 592 { 593 struct net *net = sock_net(&msk->sk); 594 struct netns_mctp *mns = &net->mctp; 595 struct mctp_sk_key *key, *tmp; 596 unsigned long flags; 597 u8 tagbits; 598 599 /* for NULL destination EIDs, we may get a response from any peer */ 600 if (daddr == MCTP_ADDR_NULL) 601 daddr = MCTP_ADDR_ANY; 602 603 /* be optimistic, alloc now */ 604 key = mctp_key_alloc(msk, saddr, daddr, 0, GFP_KERNEL); 605 if (!key) 606 return ERR_PTR(-ENOMEM); 607 608 /* 8 possible tag values */ 609 tagbits = 0xff; 610 611 spin_lock_irqsave(&mns->keys_lock, flags); 612 613 /* Walk through the existing keys, looking for potential conflicting 614 * tags. If we find a conflict, clear that bit from tagbits 615 */ 616 hlist_for_each_entry(tmp, &mns->keys, hlist) { 617 /* We can check the lookup fields (*_addr, tag) without the 618 * lock held, they don't change over the lifetime of the key. 619 */ 620 621 /* if we don't own the tag, it can't conflict */ 622 if (tmp->tag & MCTP_HDR_FLAG_TO) 623 continue; 624 625 if (!(mctp_address_matches(tmp->peer_addr, daddr) && 626 mctp_address_matches(tmp->local_addr, saddr))) 627 continue; 628 629 spin_lock(&tmp->lock); 630 /* key must still be valid. If we find a match, clear the 631 * potential tag value 632 */ 633 if (tmp->valid) 634 tagbits &= ~(1 << tmp->tag); 635 spin_unlock(&tmp->lock); 636 637 if (!tagbits) 638 break; 639 } 640 641 if (tagbits) { 642 key->tag = __ffs(tagbits); 643 mctp_reserve_tag(net, key, msk); 644 trace_mctp_key_acquire(key); 645 646 key->manual_alloc = manual; 647 *tagp = key->tag; 648 } 649 650 spin_unlock_irqrestore(&mns->keys_lock, flags); 651 652 if (!tagbits) { 653 kfree(key); 654 return ERR_PTR(-EBUSY); 655 } 656 657 return key; 658 } 659 660 static struct mctp_sk_key *mctp_lookup_prealloc_tag(struct mctp_sock *msk, 661 mctp_eid_t daddr, 662 u8 req_tag, u8 *tagp) 663 { 664 struct net *net = sock_net(&msk->sk); 665 struct netns_mctp *mns = &net->mctp; 666 struct mctp_sk_key *key, *tmp; 667 unsigned long flags; 668 669 req_tag &= ~(MCTP_TAG_PREALLOC | MCTP_TAG_OWNER); 670 key = NULL; 671 672 spin_lock_irqsave(&mns->keys_lock, flags); 673 674 hlist_for_each_entry(tmp, &mns->keys, hlist) { 675 if (tmp->tag != req_tag) 676 continue; 677 678 if (!mctp_address_matches(tmp->peer_addr, daddr)) 679 continue; 680 681 if (!tmp->manual_alloc) 682 continue; 683 684 spin_lock(&tmp->lock); 685 if (tmp->valid) { 686 key = tmp; 687 refcount_inc(&key->refs); 688 spin_unlock(&tmp->lock); 689 break; 690 } 691 spin_unlock(&tmp->lock); 692 } 693 spin_unlock_irqrestore(&mns->keys_lock, flags); 694 695 if (!key) 696 return ERR_PTR(-ENOENT); 697 698 if (tagp) 699 *tagp = key->tag; 700 701 return key; 702 } 703 704 /* routing lookups */ 705 static bool mctp_rt_match_eid(struct mctp_route *rt, 706 unsigned int net, mctp_eid_t eid) 707 { 708 return READ_ONCE(rt->dev->net) == net && 709 rt->min <= eid && rt->max >= eid; 710 } 711 712 /* compares match, used for duplicate prevention */ 713 static bool mctp_rt_compare_exact(struct mctp_route *rt1, 714 struct mctp_route *rt2) 715 { 716 ASSERT_RTNL(); 717 return rt1->dev->net == rt2->dev->net && 718 rt1->min == rt2->min && 719 rt1->max == rt2->max; 720 } 721 722 struct mctp_route *mctp_route_lookup(struct net *net, unsigned int dnet, 723 mctp_eid_t daddr) 724 { 725 struct mctp_route *tmp, *rt = NULL; 726 727 list_for_each_entry_rcu(tmp, &net->mctp.routes, list) { 728 /* TODO: add metrics */ 729 if (mctp_rt_match_eid(tmp, dnet, daddr)) { 730 if (refcount_inc_not_zero(&tmp->refs)) { 731 rt = tmp; 732 break; 733 } 734 } 735 } 736 737 return rt; 738 } 739 740 static struct mctp_route *mctp_route_lookup_null(struct net *net, 741 struct net_device *dev) 742 { 743 struct mctp_route *rt; 744 745 list_for_each_entry_rcu(rt, &net->mctp.routes, list) { 746 if (rt->dev->dev == dev && rt->type == RTN_LOCAL && 747 refcount_inc_not_zero(&rt->refs)) 748 return rt; 749 } 750 751 return NULL; 752 } 753 754 static int mctp_do_fragment_route(struct mctp_route *rt, struct sk_buff *skb, 755 unsigned int mtu, u8 tag) 756 { 757 const unsigned int hlen = sizeof(struct mctp_hdr); 758 struct mctp_hdr *hdr, *hdr2; 759 unsigned int pos, size; 760 struct sk_buff *skb2; 761 int rc; 762 u8 seq; 763 764 hdr = mctp_hdr(skb); 765 seq = 0; 766 rc = 0; 767 768 if (mtu < hlen + 1) { 769 kfree_skb(skb); 770 return -EMSGSIZE; 771 } 772 773 /* we've got the header */ 774 skb_pull(skb, hlen); 775 776 for (pos = 0; pos < skb->len;) { 777 /* size of message payload */ 778 size = min(mtu - hlen, skb->len - pos); 779 780 skb2 = alloc_skb(MCTP_HEADER_MAXLEN + hlen + size, GFP_KERNEL); 781 if (!skb2) { 782 rc = -ENOMEM; 783 break; 784 } 785 786 /* generic skb copy */ 787 skb2->protocol = skb->protocol; 788 skb2->priority = skb->priority; 789 skb2->dev = skb->dev; 790 memcpy(skb2->cb, skb->cb, sizeof(skb2->cb)); 791 792 if (skb->sk) 793 skb_set_owner_w(skb2, skb->sk); 794 795 /* establish packet */ 796 skb_reserve(skb2, MCTP_HEADER_MAXLEN); 797 skb_reset_network_header(skb2); 798 skb_put(skb2, hlen + size); 799 skb2->transport_header = skb2->network_header + hlen; 800 801 /* copy header fields, calculate SOM/EOM flags & seq */ 802 hdr2 = mctp_hdr(skb2); 803 hdr2->ver = hdr->ver; 804 hdr2->dest = hdr->dest; 805 hdr2->src = hdr->src; 806 hdr2->flags_seq_tag = tag & 807 (MCTP_HDR_TAG_MASK | MCTP_HDR_FLAG_TO); 808 809 if (pos == 0) 810 hdr2->flags_seq_tag |= MCTP_HDR_FLAG_SOM; 811 812 if (pos + size == skb->len) 813 hdr2->flags_seq_tag |= MCTP_HDR_FLAG_EOM; 814 815 hdr2->flags_seq_tag |= seq << MCTP_HDR_SEQ_SHIFT; 816 817 /* copy message payload */ 818 skb_copy_bits(skb, pos, skb_transport_header(skb2), size); 819 820 /* do route */ 821 rc = rt->output(rt, skb2); 822 if (rc) 823 break; 824 825 seq = (seq + 1) & MCTP_HDR_SEQ_MASK; 826 pos += size; 827 } 828 829 consume_skb(skb); 830 return rc; 831 } 832 833 int mctp_local_output(struct sock *sk, struct mctp_route *rt, 834 struct sk_buff *skb, mctp_eid_t daddr, u8 req_tag) 835 { 836 struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk); 837 struct mctp_skb_cb *cb = mctp_cb(skb); 838 struct mctp_route tmp_rt = {0}; 839 struct mctp_sk_key *key; 840 struct mctp_hdr *hdr; 841 unsigned long flags; 842 unsigned int mtu; 843 mctp_eid_t saddr; 844 bool ext_rt; 845 int rc; 846 u8 tag; 847 848 rc = -ENODEV; 849 850 if (rt) { 851 ext_rt = false; 852 if (WARN_ON(!rt->dev)) 853 goto out_release; 854 855 } else if (cb->ifindex) { 856 struct net_device *dev; 857 858 ext_rt = true; 859 rt = &tmp_rt; 860 861 rcu_read_lock(); 862 dev = dev_get_by_index_rcu(sock_net(sk), cb->ifindex); 863 if (!dev) { 864 rcu_read_unlock(); 865 return rc; 866 } 867 rt->dev = __mctp_dev_get(dev); 868 rcu_read_unlock(); 869 870 if (!rt->dev) 871 goto out_release; 872 873 /* establish temporary route - we set up enough to keep 874 * mctp_route_output happy 875 */ 876 rt->output = mctp_route_output; 877 rt->mtu = 0; 878 879 } else { 880 return -EINVAL; 881 } 882 883 spin_lock_irqsave(&rt->dev->addrs_lock, flags); 884 if (rt->dev->num_addrs == 0) { 885 rc = -EHOSTUNREACH; 886 } else { 887 /* use the outbound interface's first address as our source */ 888 saddr = rt->dev->addrs[0]; 889 rc = 0; 890 } 891 spin_unlock_irqrestore(&rt->dev->addrs_lock, flags); 892 893 if (rc) 894 goto out_release; 895 896 if (req_tag & MCTP_TAG_OWNER) { 897 if (req_tag & MCTP_TAG_PREALLOC) 898 key = mctp_lookup_prealloc_tag(msk, daddr, 899 req_tag, &tag); 900 else 901 key = mctp_alloc_local_tag(msk, daddr, saddr, 902 false, &tag); 903 904 if (IS_ERR(key)) { 905 rc = PTR_ERR(key); 906 goto out_release; 907 } 908 mctp_skb_set_flow(skb, key); 909 /* done with the key in this scope */ 910 mctp_key_unref(key); 911 tag |= MCTP_HDR_FLAG_TO; 912 } else { 913 key = NULL; 914 tag = req_tag & MCTP_TAG_MASK; 915 } 916 917 skb->protocol = htons(ETH_P_MCTP); 918 skb->priority = 0; 919 skb_reset_transport_header(skb); 920 skb_push(skb, sizeof(struct mctp_hdr)); 921 skb_reset_network_header(skb); 922 skb->dev = rt->dev->dev; 923 924 /* cb->net will have been set on initial ingress */ 925 cb->src = saddr; 926 927 /* set up common header fields */ 928 hdr = mctp_hdr(skb); 929 hdr->ver = 1; 930 hdr->dest = daddr; 931 hdr->src = saddr; 932 933 mtu = mctp_route_mtu(rt); 934 935 if (skb->len + sizeof(struct mctp_hdr) <= mtu) { 936 hdr->flags_seq_tag = MCTP_HDR_FLAG_SOM | 937 MCTP_HDR_FLAG_EOM | tag; 938 rc = rt->output(rt, skb); 939 } else { 940 rc = mctp_do_fragment_route(rt, skb, mtu, tag); 941 } 942 943 out_release: 944 if (!ext_rt) 945 mctp_route_release(rt); 946 947 mctp_dev_put(tmp_rt.dev); 948 949 return rc; 950 } 951 952 /* route management */ 953 static int mctp_route_add(struct mctp_dev *mdev, mctp_eid_t daddr_start, 954 unsigned int daddr_extent, unsigned int mtu, 955 unsigned char type) 956 { 957 int (*rtfn)(struct mctp_route *rt, struct sk_buff *skb); 958 struct net *net = dev_net(mdev->dev); 959 struct mctp_route *rt, *ert; 960 961 if (!mctp_address_unicast(daddr_start)) 962 return -EINVAL; 963 964 if (daddr_extent > 0xff || daddr_start + daddr_extent >= 255) 965 return -EINVAL; 966 967 switch (type) { 968 case RTN_LOCAL: 969 rtfn = mctp_route_input; 970 break; 971 case RTN_UNICAST: 972 rtfn = mctp_route_output; 973 break; 974 default: 975 return -EINVAL; 976 } 977 978 rt = mctp_route_alloc(); 979 if (!rt) 980 return -ENOMEM; 981 982 rt->min = daddr_start; 983 rt->max = daddr_start + daddr_extent; 984 rt->mtu = mtu; 985 rt->dev = mdev; 986 mctp_dev_hold(rt->dev); 987 rt->type = type; 988 rt->output = rtfn; 989 990 ASSERT_RTNL(); 991 /* Prevent duplicate identical routes. */ 992 list_for_each_entry(ert, &net->mctp.routes, list) { 993 if (mctp_rt_compare_exact(rt, ert)) { 994 mctp_route_release(rt); 995 return -EEXIST; 996 } 997 } 998 999 list_add_rcu(&rt->list, &net->mctp.routes); 1000 1001 return 0; 1002 } 1003 1004 static int mctp_route_remove(struct mctp_dev *mdev, mctp_eid_t daddr_start, 1005 unsigned int daddr_extent, unsigned char type) 1006 { 1007 struct net *net = dev_net(mdev->dev); 1008 struct mctp_route *rt, *tmp; 1009 mctp_eid_t daddr_end; 1010 bool dropped; 1011 1012 if (daddr_extent > 0xff || daddr_start + daddr_extent >= 255) 1013 return -EINVAL; 1014 1015 daddr_end = daddr_start + daddr_extent; 1016 dropped = false; 1017 1018 ASSERT_RTNL(); 1019 1020 list_for_each_entry_safe(rt, tmp, &net->mctp.routes, list) { 1021 if (rt->dev == mdev && 1022 rt->min == daddr_start && rt->max == daddr_end && 1023 rt->type == type) { 1024 list_del_rcu(&rt->list); 1025 /* TODO: immediate RTM_DELROUTE */ 1026 mctp_route_release(rt); 1027 dropped = true; 1028 } 1029 } 1030 1031 return dropped ? 0 : -ENOENT; 1032 } 1033 1034 int mctp_route_add_local(struct mctp_dev *mdev, mctp_eid_t addr) 1035 { 1036 return mctp_route_add(mdev, addr, 0, 0, RTN_LOCAL); 1037 } 1038 1039 int mctp_route_remove_local(struct mctp_dev *mdev, mctp_eid_t addr) 1040 { 1041 return mctp_route_remove(mdev, addr, 0, RTN_LOCAL); 1042 } 1043 1044 /* removes all entries for a given device */ 1045 void mctp_route_remove_dev(struct mctp_dev *mdev) 1046 { 1047 struct net *net = dev_net(mdev->dev); 1048 struct mctp_route *rt, *tmp; 1049 1050 ASSERT_RTNL(); 1051 list_for_each_entry_safe(rt, tmp, &net->mctp.routes, list) { 1052 if (rt->dev == mdev) { 1053 list_del_rcu(&rt->list); 1054 /* TODO: immediate RTM_DELROUTE */ 1055 mctp_route_release(rt); 1056 } 1057 } 1058 } 1059 1060 /* Incoming packet-handling */ 1061 1062 static int mctp_pkttype_receive(struct sk_buff *skb, struct net_device *dev, 1063 struct packet_type *pt, 1064 struct net_device *orig_dev) 1065 { 1066 struct net *net = dev_net(dev); 1067 struct mctp_dev *mdev; 1068 struct mctp_skb_cb *cb; 1069 struct mctp_route *rt; 1070 struct mctp_hdr *mh; 1071 1072 rcu_read_lock(); 1073 mdev = __mctp_dev_get(dev); 1074 rcu_read_unlock(); 1075 if (!mdev) { 1076 /* basic non-data sanity checks */ 1077 goto err_drop; 1078 } 1079 1080 if (!pskb_may_pull(skb, sizeof(struct mctp_hdr))) 1081 goto err_drop; 1082 1083 skb_reset_transport_header(skb); 1084 skb_reset_network_header(skb); 1085 1086 /* We have enough for a header; decode and route */ 1087 mh = mctp_hdr(skb); 1088 if (mh->ver < MCTP_VER_MIN || mh->ver > MCTP_VER_MAX) 1089 goto err_drop; 1090 1091 /* source must be valid unicast or null; drop reserved ranges and 1092 * broadcast 1093 */ 1094 if (!(mctp_address_unicast(mh->src) || mctp_address_null(mh->src))) 1095 goto err_drop; 1096 1097 /* dest address: as above, but allow broadcast */ 1098 if (!(mctp_address_unicast(mh->dest) || mctp_address_null(mh->dest) || 1099 mctp_address_broadcast(mh->dest))) 1100 goto err_drop; 1101 1102 /* MCTP drivers must populate halen/haddr */ 1103 if (dev->type == ARPHRD_MCTP) { 1104 cb = mctp_cb(skb); 1105 } else { 1106 cb = __mctp_cb(skb); 1107 cb->halen = 0; 1108 } 1109 cb->net = READ_ONCE(mdev->net); 1110 cb->ifindex = dev->ifindex; 1111 1112 rt = mctp_route_lookup(net, cb->net, mh->dest); 1113 1114 /* NULL EID, but addressed to our physical address */ 1115 if (!rt && mh->dest == MCTP_ADDR_NULL && skb->pkt_type == PACKET_HOST) 1116 rt = mctp_route_lookup_null(net, dev); 1117 1118 if (!rt) 1119 goto err_drop; 1120 1121 rt->output(rt, skb); 1122 mctp_route_release(rt); 1123 mctp_dev_put(mdev); 1124 1125 return NET_RX_SUCCESS; 1126 1127 err_drop: 1128 kfree_skb(skb); 1129 mctp_dev_put(mdev); 1130 return NET_RX_DROP; 1131 } 1132 1133 static struct packet_type mctp_packet_type = { 1134 .type = cpu_to_be16(ETH_P_MCTP), 1135 .func = mctp_pkttype_receive, 1136 }; 1137 1138 /* netlink interface */ 1139 1140 static const struct nla_policy rta_mctp_policy[RTA_MAX + 1] = { 1141 [RTA_DST] = { .type = NLA_U8 }, 1142 [RTA_METRICS] = { .type = NLA_NESTED }, 1143 [RTA_OIF] = { .type = NLA_U32 }, 1144 }; 1145 1146 /* Common part for RTM_NEWROUTE and RTM_DELROUTE parsing. 1147 * tb must hold RTA_MAX+1 elements. 1148 */ 1149 static int mctp_route_nlparse(struct sk_buff *skb, struct nlmsghdr *nlh, 1150 struct netlink_ext_ack *extack, 1151 struct nlattr **tb, struct rtmsg **rtm, 1152 struct mctp_dev **mdev, mctp_eid_t *daddr_start) 1153 { 1154 struct net *net = sock_net(skb->sk); 1155 struct net_device *dev; 1156 unsigned int ifindex; 1157 int rc; 1158 1159 rc = nlmsg_parse(nlh, sizeof(struct rtmsg), tb, RTA_MAX, 1160 rta_mctp_policy, extack); 1161 if (rc < 0) { 1162 NL_SET_ERR_MSG(extack, "incorrect format"); 1163 return rc; 1164 } 1165 1166 if (!tb[RTA_DST]) { 1167 NL_SET_ERR_MSG(extack, "dst EID missing"); 1168 return -EINVAL; 1169 } 1170 *daddr_start = nla_get_u8(tb[RTA_DST]); 1171 1172 if (!tb[RTA_OIF]) { 1173 NL_SET_ERR_MSG(extack, "ifindex missing"); 1174 return -EINVAL; 1175 } 1176 ifindex = nla_get_u32(tb[RTA_OIF]); 1177 1178 *rtm = nlmsg_data(nlh); 1179 if ((*rtm)->rtm_family != AF_MCTP) { 1180 NL_SET_ERR_MSG(extack, "route family must be AF_MCTP"); 1181 return -EINVAL; 1182 } 1183 1184 dev = __dev_get_by_index(net, ifindex); 1185 if (!dev) { 1186 NL_SET_ERR_MSG(extack, "bad ifindex"); 1187 return -ENODEV; 1188 } 1189 *mdev = mctp_dev_get_rtnl(dev); 1190 if (!*mdev) 1191 return -ENODEV; 1192 1193 if (dev->flags & IFF_LOOPBACK) { 1194 NL_SET_ERR_MSG(extack, "no routes to loopback"); 1195 return -EINVAL; 1196 } 1197 1198 return 0; 1199 } 1200 1201 static const struct nla_policy rta_metrics_policy[RTAX_MAX + 1] = { 1202 [RTAX_MTU] = { .type = NLA_U32 }, 1203 }; 1204 1205 static int mctp_newroute(struct sk_buff *skb, struct nlmsghdr *nlh, 1206 struct netlink_ext_ack *extack) 1207 { 1208 struct nlattr *tb[RTA_MAX + 1]; 1209 struct nlattr *tbx[RTAX_MAX + 1]; 1210 mctp_eid_t daddr_start; 1211 struct mctp_dev *mdev; 1212 struct rtmsg *rtm; 1213 unsigned int mtu; 1214 int rc; 1215 1216 rc = mctp_route_nlparse(skb, nlh, extack, tb, 1217 &rtm, &mdev, &daddr_start); 1218 if (rc < 0) 1219 return rc; 1220 1221 if (rtm->rtm_type != RTN_UNICAST) { 1222 NL_SET_ERR_MSG(extack, "rtm_type must be RTN_UNICAST"); 1223 return -EINVAL; 1224 } 1225 1226 mtu = 0; 1227 if (tb[RTA_METRICS]) { 1228 rc = nla_parse_nested(tbx, RTAX_MAX, tb[RTA_METRICS], 1229 rta_metrics_policy, NULL); 1230 if (rc < 0) 1231 return rc; 1232 if (tbx[RTAX_MTU]) 1233 mtu = nla_get_u32(tbx[RTAX_MTU]); 1234 } 1235 1236 if (rtm->rtm_type != RTN_UNICAST) 1237 return -EINVAL; 1238 1239 rc = mctp_route_add(mdev, daddr_start, rtm->rtm_dst_len, mtu, 1240 rtm->rtm_type); 1241 return rc; 1242 } 1243 1244 static int mctp_delroute(struct sk_buff *skb, struct nlmsghdr *nlh, 1245 struct netlink_ext_ack *extack) 1246 { 1247 struct nlattr *tb[RTA_MAX + 1]; 1248 mctp_eid_t daddr_start; 1249 struct mctp_dev *mdev; 1250 struct rtmsg *rtm; 1251 int rc; 1252 1253 rc = mctp_route_nlparse(skb, nlh, extack, tb, 1254 &rtm, &mdev, &daddr_start); 1255 if (rc < 0) 1256 return rc; 1257 1258 /* we only have unicast routes */ 1259 if (rtm->rtm_type != RTN_UNICAST) 1260 return -EINVAL; 1261 1262 rc = mctp_route_remove(mdev, daddr_start, rtm->rtm_dst_len, RTN_UNICAST); 1263 return rc; 1264 } 1265 1266 static int mctp_fill_rtinfo(struct sk_buff *skb, struct mctp_route *rt, 1267 u32 portid, u32 seq, int event, unsigned int flags) 1268 { 1269 struct nlmsghdr *nlh; 1270 struct rtmsg *hdr; 1271 void *metrics; 1272 1273 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*hdr), flags); 1274 if (!nlh) 1275 return -EMSGSIZE; 1276 1277 hdr = nlmsg_data(nlh); 1278 hdr->rtm_family = AF_MCTP; 1279 1280 /* we use the _len fields as a number of EIDs, rather than 1281 * a number of bits in the address 1282 */ 1283 hdr->rtm_dst_len = rt->max - rt->min; 1284 hdr->rtm_src_len = 0; 1285 hdr->rtm_tos = 0; 1286 hdr->rtm_table = RT_TABLE_DEFAULT; 1287 hdr->rtm_protocol = RTPROT_STATIC; /* everything is user-defined */ 1288 hdr->rtm_scope = RT_SCOPE_LINK; /* TODO: scope in mctp_route? */ 1289 hdr->rtm_type = rt->type; 1290 1291 if (nla_put_u8(skb, RTA_DST, rt->min)) 1292 goto cancel; 1293 1294 metrics = nla_nest_start_noflag(skb, RTA_METRICS); 1295 if (!metrics) 1296 goto cancel; 1297 1298 if (rt->mtu) { 1299 if (nla_put_u32(skb, RTAX_MTU, rt->mtu)) 1300 goto cancel; 1301 } 1302 1303 nla_nest_end(skb, metrics); 1304 1305 if (rt->dev) { 1306 if (nla_put_u32(skb, RTA_OIF, rt->dev->dev->ifindex)) 1307 goto cancel; 1308 } 1309 1310 /* TODO: conditional neighbour physaddr? */ 1311 1312 nlmsg_end(skb, nlh); 1313 1314 return 0; 1315 1316 cancel: 1317 nlmsg_cancel(skb, nlh); 1318 return -EMSGSIZE; 1319 } 1320 1321 static int mctp_dump_rtinfo(struct sk_buff *skb, struct netlink_callback *cb) 1322 { 1323 struct net *net = sock_net(skb->sk); 1324 struct mctp_route *rt; 1325 int s_idx, idx; 1326 1327 /* TODO: allow filtering on route data, possibly under 1328 * cb->strict_check 1329 */ 1330 1331 /* TODO: change to struct overlay */ 1332 s_idx = cb->args[0]; 1333 idx = 0; 1334 1335 rcu_read_lock(); 1336 list_for_each_entry_rcu(rt, &net->mctp.routes, list) { 1337 if (idx++ < s_idx) 1338 continue; 1339 if (mctp_fill_rtinfo(skb, rt, 1340 NETLINK_CB(cb->skb).portid, 1341 cb->nlh->nlmsg_seq, 1342 RTM_NEWROUTE, NLM_F_MULTI) < 0) 1343 break; 1344 } 1345 1346 rcu_read_unlock(); 1347 cb->args[0] = idx; 1348 1349 return skb->len; 1350 } 1351 1352 /* net namespace implementation */ 1353 static int __net_init mctp_routes_net_init(struct net *net) 1354 { 1355 struct netns_mctp *ns = &net->mctp; 1356 1357 INIT_LIST_HEAD(&ns->routes); 1358 INIT_HLIST_HEAD(&ns->binds); 1359 mutex_init(&ns->bind_lock); 1360 INIT_HLIST_HEAD(&ns->keys); 1361 spin_lock_init(&ns->keys_lock); 1362 WARN_ON(mctp_default_net_set(net, MCTP_INITIAL_DEFAULT_NET)); 1363 return 0; 1364 } 1365 1366 static void __net_exit mctp_routes_net_exit(struct net *net) 1367 { 1368 struct mctp_route *rt; 1369 1370 rcu_read_lock(); 1371 list_for_each_entry_rcu(rt, &net->mctp.routes, list) 1372 mctp_route_release(rt); 1373 rcu_read_unlock(); 1374 } 1375 1376 static struct pernet_operations mctp_net_ops = { 1377 .init = mctp_routes_net_init, 1378 .exit = mctp_routes_net_exit, 1379 }; 1380 1381 int __init mctp_routes_init(void) 1382 { 1383 dev_add_pack(&mctp_packet_type); 1384 1385 rtnl_register_module(THIS_MODULE, PF_MCTP, RTM_GETROUTE, 1386 NULL, mctp_dump_rtinfo, 0); 1387 rtnl_register_module(THIS_MODULE, PF_MCTP, RTM_NEWROUTE, 1388 mctp_newroute, NULL, 0); 1389 rtnl_register_module(THIS_MODULE, PF_MCTP, RTM_DELROUTE, 1390 mctp_delroute, NULL, 0); 1391 1392 return register_pernet_subsys(&mctp_net_ops); 1393 } 1394 1395 void __exit mctp_routes_exit(void) 1396 { 1397 unregister_pernet_subsys(&mctp_net_ops); 1398 rtnl_unregister(PF_MCTP, RTM_DELROUTE); 1399 rtnl_unregister(PF_MCTP, RTM_NEWROUTE); 1400 rtnl_unregister(PF_MCTP, RTM_GETROUTE); 1401 dev_remove_pack(&mctp_packet_type); 1402 } 1403 1404 #if IS_ENABLED(CONFIG_MCTP_TEST) 1405 #include "test/route-test.c" 1406 #endif 1407