1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Management Component Transport Protocol (MCTP) - routing 4 * implementation. 5 * 6 * This is currently based on a simple routing table, with no dst cache. The 7 * number of routes should stay fairly small, so the lookup cost is small. 8 * 9 * Copyright (c) 2021 Code Construct 10 * Copyright (c) 2021 Google 11 */ 12 13 #include <linux/idr.h> 14 #include <linux/kconfig.h> 15 #include <linux/mctp.h> 16 #include <linux/netdevice.h> 17 #include <linux/rtnetlink.h> 18 #include <linux/skbuff.h> 19 20 #include <uapi/linux/if_arp.h> 21 22 #include <net/mctp.h> 23 #include <net/mctpdevice.h> 24 #include <net/netlink.h> 25 #include <net/sock.h> 26 27 #include <trace/events/mctp.h> 28 29 static const unsigned int mctp_message_maxlen = 64 * 1024; 30 static const unsigned long mctp_key_lifetime = 6 * CONFIG_HZ; 31 32 static void mctp_flow_prepare_output(struct sk_buff *skb, struct mctp_dev *dev); 33 34 /* route output callbacks */ 35 static int mctp_route_discard(struct mctp_route *route, struct sk_buff *skb) 36 { 37 kfree_skb(skb); 38 return 0; 39 } 40 41 static struct mctp_sock *mctp_lookup_bind(struct net *net, struct sk_buff *skb) 42 { 43 struct mctp_skb_cb *cb = mctp_cb(skb); 44 struct mctp_hdr *mh; 45 struct sock *sk; 46 u8 type; 47 48 WARN_ON(!rcu_read_lock_held()); 49 50 /* TODO: look up in skb->cb? */ 51 mh = mctp_hdr(skb); 52 53 if (!skb_headlen(skb)) 54 return NULL; 55 56 type = (*(u8 *)skb->data) & 0x7f; 57 58 sk_for_each_rcu(sk, &net->mctp.binds) { 59 struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk); 60 61 if (msk->bind_net != MCTP_NET_ANY && msk->bind_net != cb->net) 62 continue; 63 64 if (msk->bind_type != type) 65 continue; 66 67 if (!mctp_address_matches(msk->bind_addr, mh->dest)) 68 continue; 69 70 return msk; 71 } 72 73 return NULL; 74 } 75 76 static bool mctp_key_match(struct mctp_sk_key *key, mctp_eid_t local, 77 mctp_eid_t peer, u8 tag) 78 { 79 if (!mctp_address_matches(key->local_addr, local)) 80 return false; 81 82 if (key->peer_addr != peer) 83 return false; 84 85 if (key->tag != tag) 86 return false; 87 88 return true; 89 } 90 91 /* returns a key (with key->lock held, and refcounted), or NULL if no such 92 * key exists. 93 */ 94 static struct mctp_sk_key *mctp_lookup_key(struct net *net, struct sk_buff *skb, 95 mctp_eid_t peer, 96 unsigned long *irqflags) 97 __acquires(&key->lock) 98 { 99 struct mctp_sk_key *key, *ret; 100 unsigned long flags; 101 struct mctp_hdr *mh; 102 u8 tag; 103 104 mh = mctp_hdr(skb); 105 tag = mh->flags_seq_tag & (MCTP_HDR_TAG_MASK | MCTP_HDR_FLAG_TO); 106 107 ret = NULL; 108 spin_lock_irqsave(&net->mctp.keys_lock, flags); 109 110 hlist_for_each_entry(key, &net->mctp.keys, hlist) { 111 if (!mctp_key_match(key, mh->dest, peer, tag)) 112 continue; 113 114 spin_lock(&key->lock); 115 if (key->valid) { 116 refcount_inc(&key->refs); 117 ret = key; 118 break; 119 } 120 spin_unlock(&key->lock); 121 } 122 123 if (ret) { 124 spin_unlock(&net->mctp.keys_lock); 125 *irqflags = flags; 126 } else { 127 spin_unlock_irqrestore(&net->mctp.keys_lock, flags); 128 } 129 130 return ret; 131 } 132 133 static struct mctp_sk_key *mctp_key_alloc(struct mctp_sock *msk, 134 mctp_eid_t local, mctp_eid_t peer, 135 u8 tag, gfp_t gfp) 136 { 137 struct mctp_sk_key *key; 138 139 key = kzalloc(sizeof(*key), gfp); 140 if (!key) 141 return NULL; 142 143 key->peer_addr = peer; 144 key->local_addr = local; 145 key->tag = tag; 146 key->sk = &msk->sk; 147 key->valid = true; 148 spin_lock_init(&key->lock); 149 refcount_set(&key->refs, 1); 150 151 return key; 152 } 153 154 void mctp_key_unref(struct mctp_sk_key *key) 155 { 156 unsigned long flags; 157 158 if (!refcount_dec_and_test(&key->refs)) 159 return; 160 161 /* even though no refs exist here, the lock allows us to stay 162 * consistent with the locking requirement of mctp_dev_release_key 163 */ 164 spin_lock_irqsave(&key->lock, flags); 165 mctp_dev_release_key(key->dev, key); 166 spin_unlock_irqrestore(&key->lock, flags); 167 168 kfree(key); 169 } 170 171 static int mctp_key_add(struct mctp_sk_key *key, struct mctp_sock *msk) 172 { 173 struct net *net = sock_net(&msk->sk); 174 struct mctp_sk_key *tmp; 175 unsigned long flags; 176 int rc = 0; 177 178 spin_lock_irqsave(&net->mctp.keys_lock, flags); 179 180 hlist_for_each_entry(tmp, &net->mctp.keys, hlist) { 181 if (mctp_key_match(tmp, key->local_addr, key->peer_addr, 182 key->tag)) { 183 spin_lock(&tmp->lock); 184 if (tmp->valid) 185 rc = -EEXIST; 186 spin_unlock(&tmp->lock); 187 if (rc) 188 break; 189 } 190 } 191 192 if (!rc) { 193 refcount_inc(&key->refs); 194 key->expiry = jiffies + mctp_key_lifetime; 195 timer_reduce(&msk->key_expiry, key->expiry); 196 197 hlist_add_head(&key->hlist, &net->mctp.keys); 198 hlist_add_head(&key->sklist, &msk->keys); 199 } 200 201 spin_unlock_irqrestore(&net->mctp.keys_lock, flags); 202 203 return rc; 204 } 205 206 /* Helper for mctp_route_input(). 207 * We're done with the key; unlock and unref the key. 208 * For the usual case of automatic expiry we remove the key from lists. 209 * In the case that manual allocation is set on a key we release the lock 210 * and local ref, reset reassembly, but don't remove from lists. 211 */ 212 static void __mctp_key_done_in(struct mctp_sk_key *key, struct net *net, 213 unsigned long flags, unsigned long reason) 214 __releases(&key->lock) 215 { 216 struct sk_buff *skb; 217 218 trace_mctp_key_release(key, reason); 219 skb = key->reasm_head; 220 key->reasm_head = NULL; 221 222 if (!key->manual_alloc) { 223 key->reasm_dead = true; 224 key->valid = false; 225 mctp_dev_release_key(key->dev, key); 226 } 227 spin_unlock_irqrestore(&key->lock, flags); 228 229 if (!key->manual_alloc) { 230 spin_lock_irqsave(&net->mctp.keys_lock, flags); 231 hlist_del(&key->hlist); 232 hlist_del(&key->sklist); 233 spin_unlock_irqrestore(&net->mctp.keys_lock, flags); 234 235 /* unref for the lists */ 236 mctp_key_unref(key); 237 } 238 239 /* and one for the local reference */ 240 mctp_key_unref(key); 241 242 kfree_skb(skb); 243 } 244 245 #ifdef CONFIG_MCTP_FLOWS 246 static void mctp_skb_set_flow(struct sk_buff *skb, struct mctp_sk_key *key) 247 { 248 struct mctp_flow *flow; 249 250 flow = skb_ext_add(skb, SKB_EXT_MCTP); 251 if (!flow) 252 return; 253 254 refcount_inc(&key->refs); 255 flow->key = key; 256 } 257 258 static void mctp_flow_prepare_output(struct sk_buff *skb, struct mctp_dev *dev) 259 { 260 struct mctp_sk_key *key; 261 struct mctp_flow *flow; 262 263 flow = skb_ext_find(skb, SKB_EXT_MCTP); 264 if (!flow) 265 return; 266 267 key = flow->key; 268 269 if (WARN_ON(key->dev && key->dev != dev)) 270 return; 271 272 mctp_dev_set_key(dev, key); 273 } 274 #else 275 static void mctp_skb_set_flow(struct sk_buff *skb, struct mctp_sk_key *key) {} 276 static void mctp_flow_prepare_output(struct sk_buff *skb, struct mctp_dev *dev) {} 277 #endif 278 279 static int mctp_frag_queue(struct mctp_sk_key *key, struct sk_buff *skb) 280 { 281 struct mctp_hdr *hdr = mctp_hdr(skb); 282 u8 exp_seq, this_seq; 283 284 this_seq = (hdr->flags_seq_tag >> MCTP_HDR_SEQ_SHIFT) 285 & MCTP_HDR_SEQ_MASK; 286 287 if (!key->reasm_head) { 288 key->reasm_head = skb; 289 key->reasm_tailp = &(skb_shinfo(skb)->frag_list); 290 key->last_seq = this_seq; 291 return 0; 292 } 293 294 exp_seq = (key->last_seq + 1) & MCTP_HDR_SEQ_MASK; 295 296 if (this_seq != exp_seq) 297 return -EINVAL; 298 299 if (key->reasm_head->len + skb->len > mctp_message_maxlen) 300 return -EINVAL; 301 302 skb->next = NULL; 303 skb->sk = NULL; 304 *key->reasm_tailp = skb; 305 key->reasm_tailp = &skb->next; 306 307 key->last_seq = this_seq; 308 309 key->reasm_head->data_len += skb->len; 310 key->reasm_head->len += skb->len; 311 key->reasm_head->truesize += skb->truesize; 312 313 return 0; 314 } 315 316 static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb) 317 { 318 struct net *net = dev_net(skb->dev); 319 struct mctp_sk_key *key; 320 struct mctp_sock *msk; 321 struct mctp_hdr *mh; 322 unsigned long f; 323 u8 tag, flags; 324 int rc; 325 326 msk = NULL; 327 rc = -EINVAL; 328 329 /* we may be receiving a locally-routed packet; drop source sk 330 * accounting 331 */ 332 skb_orphan(skb); 333 334 /* ensure we have enough data for a header and a type */ 335 if (skb->len < sizeof(struct mctp_hdr) + 1) 336 goto out; 337 338 /* grab header, advance data ptr */ 339 mh = mctp_hdr(skb); 340 skb_pull(skb, sizeof(struct mctp_hdr)); 341 342 if (mh->ver != 1) 343 goto out; 344 345 flags = mh->flags_seq_tag & (MCTP_HDR_FLAG_SOM | MCTP_HDR_FLAG_EOM); 346 tag = mh->flags_seq_tag & (MCTP_HDR_TAG_MASK | MCTP_HDR_FLAG_TO); 347 348 rcu_read_lock(); 349 350 /* lookup socket / reasm context, exactly matching (src,dest,tag). 351 * we hold a ref on the key, and key->lock held. 352 */ 353 key = mctp_lookup_key(net, skb, mh->src, &f); 354 355 if (flags & MCTP_HDR_FLAG_SOM) { 356 if (key) { 357 msk = container_of(key->sk, struct mctp_sock, sk); 358 } else { 359 /* first response to a broadcast? do a more general 360 * key lookup to find the socket, but don't use this 361 * key for reassembly - we'll create a more specific 362 * one for future packets if required (ie, !EOM). 363 */ 364 key = mctp_lookup_key(net, skb, MCTP_ADDR_ANY, &f); 365 if (key) { 366 msk = container_of(key->sk, 367 struct mctp_sock, sk); 368 spin_unlock_irqrestore(&key->lock, f); 369 mctp_key_unref(key); 370 key = NULL; 371 } 372 } 373 374 if (!key && !msk && (tag & MCTP_HDR_FLAG_TO)) 375 msk = mctp_lookup_bind(net, skb); 376 377 if (!msk) { 378 rc = -ENOENT; 379 goto out_unlock; 380 } 381 382 /* single-packet message? deliver to socket, clean up any 383 * pending key. 384 */ 385 if (flags & MCTP_HDR_FLAG_EOM) { 386 sock_queue_rcv_skb(&msk->sk, skb); 387 if (key) { 388 /* we've hit a pending reassembly; not much we 389 * can do but drop it 390 */ 391 __mctp_key_done_in(key, net, f, 392 MCTP_TRACE_KEY_REPLIED); 393 key = NULL; 394 } 395 rc = 0; 396 goto out_unlock; 397 } 398 399 /* broadcast response or a bind() - create a key for further 400 * packets for this message 401 */ 402 if (!key) { 403 key = mctp_key_alloc(msk, mh->dest, mh->src, 404 tag, GFP_ATOMIC); 405 if (!key) { 406 rc = -ENOMEM; 407 goto out_unlock; 408 } 409 410 /* we can queue without the key lock here, as the 411 * key isn't observable yet 412 */ 413 mctp_frag_queue(key, skb); 414 415 /* if the key_add fails, we've raced with another 416 * SOM packet with the same src, dest and tag. There's 417 * no way to distinguish future packets, so all we 418 * can do is drop; we'll free the skb on exit from 419 * this function. 420 */ 421 rc = mctp_key_add(key, msk); 422 if (rc) { 423 kfree(key); 424 } else { 425 trace_mctp_key_acquire(key); 426 427 /* we don't need to release key->lock on exit */ 428 mctp_key_unref(key); 429 } 430 key = NULL; 431 432 } else { 433 if (key->reasm_head || key->reasm_dead) { 434 /* duplicate start? drop everything */ 435 __mctp_key_done_in(key, net, f, 436 MCTP_TRACE_KEY_INVALIDATED); 437 rc = -EEXIST; 438 key = NULL; 439 } else { 440 rc = mctp_frag_queue(key, skb); 441 } 442 } 443 444 } else if (key) { 445 /* this packet continues a previous message; reassemble 446 * using the message-specific key 447 */ 448 449 /* we need to be continuing an existing reassembly... */ 450 if (!key->reasm_head) 451 rc = -EINVAL; 452 else 453 rc = mctp_frag_queue(key, skb); 454 455 /* end of message? deliver to socket, and we're done with 456 * the reassembly/response key 457 */ 458 if (!rc && flags & MCTP_HDR_FLAG_EOM) { 459 msk = container_of(key->sk, struct mctp_sock, sk); 460 sock_queue_rcv_skb(key->sk, key->reasm_head); 461 key->reasm_head = NULL; 462 __mctp_key_done_in(key, net, f, MCTP_TRACE_KEY_REPLIED); 463 key = NULL; 464 } 465 466 } else { 467 /* not a start, no matching key */ 468 rc = -ENOENT; 469 } 470 471 out_unlock: 472 rcu_read_unlock(); 473 if (key) { 474 spin_unlock_irqrestore(&key->lock, f); 475 mctp_key_unref(key); 476 } 477 out: 478 if (rc) 479 kfree_skb(skb); 480 return rc; 481 } 482 483 static unsigned int mctp_route_mtu(struct mctp_route *rt) 484 { 485 return rt->mtu ?: READ_ONCE(rt->dev->dev->mtu); 486 } 487 488 static int mctp_route_output(struct mctp_route *route, struct sk_buff *skb) 489 { 490 struct mctp_skb_cb *cb = mctp_cb(skb); 491 struct mctp_hdr *hdr = mctp_hdr(skb); 492 char daddr_buf[MAX_ADDR_LEN]; 493 char *daddr = NULL; 494 unsigned int mtu; 495 int rc; 496 497 skb->protocol = htons(ETH_P_MCTP); 498 499 mtu = READ_ONCE(skb->dev->mtu); 500 if (skb->len > mtu) { 501 kfree_skb(skb); 502 return -EMSGSIZE; 503 } 504 505 if (cb->ifindex) { 506 /* direct route; use the hwaddr we stashed in sendmsg */ 507 daddr = cb->haddr; 508 } else { 509 /* If lookup fails let the device handle daddr==NULL */ 510 if (mctp_neigh_lookup(route->dev, hdr->dest, daddr_buf) == 0) 511 daddr = daddr_buf; 512 } 513 514 rc = dev_hard_header(skb, skb->dev, ntohs(skb->protocol), 515 daddr, skb->dev->dev_addr, skb->len); 516 if (rc) { 517 kfree_skb(skb); 518 return -EHOSTUNREACH; 519 } 520 521 mctp_flow_prepare_output(skb, route->dev); 522 523 rc = dev_queue_xmit(skb); 524 if (rc) 525 rc = net_xmit_errno(rc); 526 527 return rc; 528 } 529 530 /* route alloc/release */ 531 static void mctp_route_release(struct mctp_route *rt) 532 { 533 if (refcount_dec_and_test(&rt->refs)) { 534 mctp_dev_put(rt->dev); 535 kfree_rcu(rt, rcu); 536 } 537 } 538 539 /* returns a route with the refcount at 1 */ 540 static struct mctp_route *mctp_route_alloc(void) 541 { 542 struct mctp_route *rt; 543 544 rt = kzalloc(sizeof(*rt), GFP_KERNEL); 545 if (!rt) 546 return NULL; 547 548 INIT_LIST_HEAD(&rt->list); 549 refcount_set(&rt->refs, 1); 550 rt->output = mctp_route_discard; 551 552 return rt; 553 } 554 555 unsigned int mctp_default_net(struct net *net) 556 { 557 return READ_ONCE(net->mctp.default_net); 558 } 559 560 int mctp_default_net_set(struct net *net, unsigned int index) 561 { 562 if (index == 0) 563 return -EINVAL; 564 WRITE_ONCE(net->mctp.default_net, index); 565 return 0; 566 } 567 568 /* tag management */ 569 static void mctp_reserve_tag(struct net *net, struct mctp_sk_key *key, 570 struct mctp_sock *msk) 571 { 572 struct netns_mctp *mns = &net->mctp; 573 574 lockdep_assert_held(&mns->keys_lock); 575 576 key->expiry = jiffies + mctp_key_lifetime; 577 timer_reduce(&msk->key_expiry, key->expiry); 578 579 /* we hold the net->key_lock here, allowing updates to both 580 * then net and sk 581 */ 582 hlist_add_head_rcu(&key->hlist, &mns->keys); 583 hlist_add_head_rcu(&key->sklist, &msk->keys); 584 refcount_inc(&key->refs); 585 } 586 587 /* Allocate a locally-owned tag value for (saddr, daddr), and reserve 588 * it for the socket msk 589 */ 590 struct mctp_sk_key *mctp_alloc_local_tag(struct mctp_sock *msk, 591 mctp_eid_t daddr, mctp_eid_t saddr, 592 bool manual, u8 *tagp) 593 { 594 struct net *net = sock_net(&msk->sk); 595 struct netns_mctp *mns = &net->mctp; 596 struct mctp_sk_key *key, *tmp; 597 unsigned long flags; 598 u8 tagbits; 599 600 /* for NULL destination EIDs, we may get a response from any peer */ 601 if (daddr == MCTP_ADDR_NULL) 602 daddr = MCTP_ADDR_ANY; 603 604 /* be optimistic, alloc now */ 605 key = mctp_key_alloc(msk, saddr, daddr, 0, GFP_KERNEL); 606 if (!key) 607 return ERR_PTR(-ENOMEM); 608 609 /* 8 possible tag values */ 610 tagbits = 0xff; 611 612 spin_lock_irqsave(&mns->keys_lock, flags); 613 614 /* Walk through the existing keys, looking for potential conflicting 615 * tags. If we find a conflict, clear that bit from tagbits 616 */ 617 hlist_for_each_entry(tmp, &mns->keys, hlist) { 618 /* We can check the lookup fields (*_addr, tag) without the 619 * lock held, they don't change over the lifetime of the key. 620 */ 621 622 /* if we don't own the tag, it can't conflict */ 623 if (tmp->tag & MCTP_HDR_FLAG_TO) 624 continue; 625 626 if (!(mctp_address_matches(tmp->peer_addr, daddr) && 627 mctp_address_matches(tmp->local_addr, saddr))) 628 continue; 629 630 spin_lock(&tmp->lock); 631 /* key must still be valid. If we find a match, clear the 632 * potential tag value 633 */ 634 if (tmp->valid) 635 tagbits &= ~(1 << tmp->tag); 636 spin_unlock(&tmp->lock); 637 638 if (!tagbits) 639 break; 640 } 641 642 if (tagbits) { 643 key->tag = __ffs(tagbits); 644 mctp_reserve_tag(net, key, msk); 645 trace_mctp_key_acquire(key); 646 647 key->manual_alloc = manual; 648 *tagp = key->tag; 649 } 650 651 spin_unlock_irqrestore(&mns->keys_lock, flags); 652 653 if (!tagbits) { 654 kfree(key); 655 return ERR_PTR(-EBUSY); 656 } 657 658 return key; 659 } 660 661 static struct mctp_sk_key *mctp_lookup_prealloc_tag(struct mctp_sock *msk, 662 mctp_eid_t daddr, 663 u8 req_tag, u8 *tagp) 664 { 665 struct net *net = sock_net(&msk->sk); 666 struct netns_mctp *mns = &net->mctp; 667 struct mctp_sk_key *key, *tmp; 668 unsigned long flags; 669 670 req_tag &= ~(MCTP_TAG_PREALLOC | MCTP_TAG_OWNER); 671 key = NULL; 672 673 spin_lock_irqsave(&mns->keys_lock, flags); 674 675 hlist_for_each_entry(tmp, &mns->keys, hlist) { 676 if (tmp->tag != req_tag) 677 continue; 678 679 if (!mctp_address_matches(tmp->peer_addr, daddr)) 680 continue; 681 682 if (!tmp->manual_alloc) 683 continue; 684 685 spin_lock(&tmp->lock); 686 if (tmp->valid) { 687 key = tmp; 688 refcount_inc(&key->refs); 689 spin_unlock(&tmp->lock); 690 break; 691 } 692 spin_unlock(&tmp->lock); 693 } 694 spin_unlock_irqrestore(&mns->keys_lock, flags); 695 696 if (!key) 697 return ERR_PTR(-ENOENT); 698 699 if (tagp) 700 *tagp = key->tag; 701 702 return key; 703 } 704 705 /* routing lookups */ 706 static bool mctp_rt_match_eid(struct mctp_route *rt, 707 unsigned int net, mctp_eid_t eid) 708 { 709 return READ_ONCE(rt->dev->net) == net && 710 rt->min <= eid && rt->max >= eid; 711 } 712 713 /* compares match, used for duplicate prevention */ 714 static bool mctp_rt_compare_exact(struct mctp_route *rt1, 715 struct mctp_route *rt2) 716 { 717 ASSERT_RTNL(); 718 return rt1->dev->net == rt2->dev->net && 719 rt1->min == rt2->min && 720 rt1->max == rt2->max; 721 } 722 723 struct mctp_route *mctp_route_lookup(struct net *net, unsigned int dnet, 724 mctp_eid_t daddr) 725 { 726 struct mctp_route *tmp, *rt = NULL; 727 728 list_for_each_entry_rcu(tmp, &net->mctp.routes, list) { 729 /* TODO: add metrics */ 730 if (mctp_rt_match_eid(tmp, dnet, daddr)) { 731 if (refcount_inc_not_zero(&tmp->refs)) { 732 rt = tmp; 733 break; 734 } 735 } 736 } 737 738 return rt; 739 } 740 741 static struct mctp_route *mctp_route_lookup_null(struct net *net, 742 struct net_device *dev) 743 { 744 struct mctp_route *rt; 745 746 list_for_each_entry_rcu(rt, &net->mctp.routes, list) { 747 if (rt->dev->dev == dev && rt->type == RTN_LOCAL && 748 refcount_inc_not_zero(&rt->refs)) 749 return rt; 750 } 751 752 return NULL; 753 } 754 755 static int mctp_do_fragment_route(struct mctp_route *rt, struct sk_buff *skb, 756 unsigned int mtu, u8 tag) 757 { 758 const unsigned int hlen = sizeof(struct mctp_hdr); 759 struct mctp_hdr *hdr, *hdr2; 760 unsigned int pos, size; 761 struct sk_buff *skb2; 762 int rc; 763 u8 seq; 764 765 hdr = mctp_hdr(skb); 766 seq = 0; 767 rc = 0; 768 769 if (mtu < hlen + 1) { 770 kfree_skb(skb); 771 return -EMSGSIZE; 772 } 773 774 /* we've got the header */ 775 skb_pull(skb, hlen); 776 777 for (pos = 0; pos < skb->len;) { 778 /* size of message payload */ 779 size = min(mtu - hlen, skb->len - pos); 780 781 skb2 = alloc_skb(MCTP_HEADER_MAXLEN + hlen + size, GFP_KERNEL); 782 if (!skb2) { 783 rc = -ENOMEM; 784 break; 785 } 786 787 /* generic skb copy */ 788 skb2->protocol = skb->protocol; 789 skb2->priority = skb->priority; 790 skb2->dev = skb->dev; 791 memcpy(skb2->cb, skb->cb, sizeof(skb2->cb)); 792 793 if (skb->sk) 794 skb_set_owner_w(skb2, skb->sk); 795 796 /* establish packet */ 797 skb_reserve(skb2, MCTP_HEADER_MAXLEN); 798 skb_reset_network_header(skb2); 799 skb_put(skb2, hlen + size); 800 skb2->transport_header = skb2->network_header + hlen; 801 802 /* copy header fields, calculate SOM/EOM flags & seq */ 803 hdr2 = mctp_hdr(skb2); 804 hdr2->ver = hdr->ver; 805 hdr2->dest = hdr->dest; 806 hdr2->src = hdr->src; 807 hdr2->flags_seq_tag = tag & 808 (MCTP_HDR_TAG_MASK | MCTP_HDR_FLAG_TO); 809 810 if (pos == 0) 811 hdr2->flags_seq_tag |= MCTP_HDR_FLAG_SOM; 812 813 if (pos + size == skb->len) 814 hdr2->flags_seq_tag |= MCTP_HDR_FLAG_EOM; 815 816 hdr2->flags_seq_tag |= seq << MCTP_HDR_SEQ_SHIFT; 817 818 /* copy message payload */ 819 skb_copy_bits(skb, pos, skb_transport_header(skb2), size); 820 821 /* do route */ 822 rc = rt->output(rt, skb2); 823 if (rc) 824 break; 825 826 seq = (seq + 1) & MCTP_HDR_SEQ_MASK; 827 pos += size; 828 } 829 830 consume_skb(skb); 831 return rc; 832 } 833 834 int mctp_local_output(struct sock *sk, struct mctp_route *rt, 835 struct sk_buff *skb, mctp_eid_t daddr, u8 req_tag) 836 { 837 struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk); 838 struct mctp_skb_cb *cb = mctp_cb(skb); 839 struct mctp_route tmp_rt; 840 struct mctp_sk_key *key; 841 struct net_device *dev; 842 struct mctp_hdr *hdr; 843 unsigned long flags; 844 unsigned int mtu; 845 mctp_eid_t saddr; 846 bool ext_rt; 847 int rc; 848 u8 tag; 849 850 rc = -ENODEV; 851 852 if (rt) { 853 ext_rt = false; 854 dev = NULL; 855 856 if (WARN_ON(!rt->dev)) 857 goto out_release; 858 859 } else if (cb->ifindex) { 860 ext_rt = true; 861 rt = &tmp_rt; 862 863 rcu_read_lock(); 864 dev = dev_get_by_index_rcu(sock_net(sk), cb->ifindex); 865 if (!dev) { 866 rcu_read_unlock(); 867 return rc; 868 } 869 870 rt->dev = __mctp_dev_get(dev); 871 rcu_read_unlock(); 872 873 if (!rt->dev) 874 goto out_release; 875 876 /* establish temporary route - we set up enough to keep 877 * mctp_route_output happy 878 */ 879 rt->output = mctp_route_output; 880 rt->mtu = 0; 881 882 } else { 883 return -EINVAL; 884 } 885 886 spin_lock_irqsave(&rt->dev->addrs_lock, flags); 887 if (rt->dev->num_addrs == 0) { 888 rc = -EHOSTUNREACH; 889 } else { 890 /* use the outbound interface's first address as our source */ 891 saddr = rt->dev->addrs[0]; 892 rc = 0; 893 } 894 spin_unlock_irqrestore(&rt->dev->addrs_lock, flags); 895 896 if (rc) 897 goto out_release; 898 899 if (req_tag & MCTP_TAG_OWNER) { 900 if (req_tag & MCTP_TAG_PREALLOC) 901 key = mctp_lookup_prealloc_tag(msk, daddr, 902 req_tag, &tag); 903 else 904 key = mctp_alloc_local_tag(msk, daddr, saddr, 905 false, &tag); 906 907 if (IS_ERR(key)) { 908 rc = PTR_ERR(key); 909 goto out_release; 910 } 911 mctp_skb_set_flow(skb, key); 912 /* done with the key in this scope */ 913 mctp_key_unref(key); 914 tag |= MCTP_HDR_FLAG_TO; 915 } else { 916 key = NULL; 917 tag = req_tag & MCTP_TAG_MASK; 918 } 919 920 skb->protocol = htons(ETH_P_MCTP); 921 skb->priority = 0; 922 skb_reset_transport_header(skb); 923 skb_push(skb, sizeof(struct mctp_hdr)); 924 skb_reset_network_header(skb); 925 skb->dev = rt->dev->dev; 926 927 /* cb->net will have been set on initial ingress */ 928 cb->src = saddr; 929 930 /* set up common header fields */ 931 hdr = mctp_hdr(skb); 932 hdr->ver = 1; 933 hdr->dest = daddr; 934 hdr->src = saddr; 935 936 mtu = mctp_route_mtu(rt); 937 938 if (skb->len + sizeof(struct mctp_hdr) <= mtu) { 939 hdr->flags_seq_tag = MCTP_HDR_FLAG_SOM | 940 MCTP_HDR_FLAG_EOM | tag; 941 rc = rt->output(rt, skb); 942 } else { 943 rc = mctp_do_fragment_route(rt, skb, mtu, tag); 944 } 945 946 out_release: 947 if (!ext_rt) 948 mctp_route_release(rt); 949 950 dev_put(dev); 951 952 return rc; 953 954 } 955 956 /* route management */ 957 static int mctp_route_add(struct mctp_dev *mdev, mctp_eid_t daddr_start, 958 unsigned int daddr_extent, unsigned int mtu, 959 unsigned char type) 960 { 961 int (*rtfn)(struct mctp_route *rt, struct sk_buff *skb); 962 struct net *net = dev_net(mdev->dev); 963 struct mctp_route *rt, *ert; 964 965 if (!mctp_address_unicast(daddr_start)) 966 return -EINVAL; 967 968 if (daddr_extent > 0xff || daddr_start + daddr_extent >= 255) 969 return -EINVAL; 970 971 switch (type) { 972 case RTN_LOCAL: 973 rtfn = mctp_route_input; 974 break; 975 case RTN_UNICAST: 976 rtfn = mctp_route_output; 977 break; 978 default: 979 return -EINVAL; 980 } 981 982 rt = mctp_route_alloc(); 983 if (!rt) 984 return -ENOMEM; 985 986 rt->min = daddr_start; 987 rt->max = daddr_start + daddr_extent; 988 rt->mtu = mtu; 989 rt->dev = mdev; 990 mctp_dev_hold(rt->dev); 991 rt->type = type; 992 rt->output = rtfn; 993 994 ASSERT_RTNL(); 995 /* Prevent duplicate identical routes. */ 996 list_for_each_entry(ert, &net->mctp.routes, list) { 997 if (mctp_rt_compare_exact(rt, ert)) { 998 mctp_route_release(rt); 999 return -EEXIST; 1000 } 1001 } 1002 1003 list_add_rcu(&rt->list, &net->mctp.routes); 1004 1005 return 0; 1006 } 1007 1008 static int mctp_route_remove(struct mctp_dev *mdev, mctp_eid_t daddr_start, 1009 unsigned int daddr_extent, unsigned char type) 1010 { 1011 struct net *net = dev_net(mdev->dev); 1012 struct mctp_route *rt, *tmp; 1013 mctp_eid_t daddr_end; 1014 bool dropped; 1015 1016 if (daddr_extent > 0xff || daddr_start + daddr_extent >= 255) 1017 return -EINVAL; 1018 1019 daddr_end = daddr_start + daddr_extent; 1020 dropped = false; 1021 1022 ASSERT_RTNL(); 1023 1024 list_for_each_entry_safe(rt, tmp, &net->mctp.routes, list) { 1025 if (rt->dev == mdev && 1026 rt->min == daddr_start && rt->max == daddr_end && 1027 rt->type == type) { 1028 list_del_rcu(&rt->list); 1029 /* TODO: immediate RTM_DELROUTE */ 1030 mctp_route_release(rt); 1031 dropped = true; 1032 } 1033 } 1034 1035 return dropped ? 0 : -ENOENT; 1036 } 1037 1038 int mctp_route_add_local(struct mctp_dev *mdev, mctp_eid_t addr) 1039 { 1040 return mctp_route_add(mdev, addr, 0, 0, RTN_LOCAL); 1041 } 1042 1043 int mctp_route_remove_local(struct mctp_dev *mdev, mctp_eid_t addr) 1044 { 1045 return mctp_route_remove(mdev, addr, 0, RTN_LOCAL); 1046 } 1047 1048 /* removes all entries for a given device */ 1049 void mctp_route_remove_dev(struct mctp_dev *mdev) 1050 { 1051 struct net *net = dev_net(mdev->dev); 1052 struct mctp_route *rt, *tmp; 1053 1054 ASSERT_RTNL(); 1055 list_for_each_entry_safe(rt, tmp, &net->mctp.routes, list) { 1056 if (rt->dev == mdev) { 1057 list_del_rcu(&rt->list); 1058 /* TODO: immediate RTM_DELROUTE */ 1059 mctp_route_release(rt); 1060 } 1061 } 1062 } 1063 1064 /* Incoming packet-handling */ 1065 1066 static int mctp_pkttype_receive(struct sk_buff *skb, struct net_device *dev, 1067 struct packet_type *pt, 1068 struct net_device *orig_dev) 1069 { 1070 struct net *net = dev_net(dev); 1071 struct mctp_dev *mdev; 1072 struct mctp_skb_cb *cb; 1073 struct mctp_route *rt; 1074 struct mctp_hdr *mh; 1075 1076 rcu_read_lock(); 1077 mdev = __mctp_dev_get(dev); 1078 rcu_read_unlock(); 1079 if (!mdev) { 1080 /* basic non-data sanity checks */ 1081 goto err_drop; 1082 } 1083 1084 if (!pskb_may_pull(skb, sizeof(struct mctp_hdr))) 1085 goto err_drop; 1086 1087 skb_reset_transport_header(skb); 1088 skb_reset_network_header(skb); 1089 1090 /* We have enough for a header; decode and route */ 1091 mh = mctp_hdr(skb); 1092 if (mh->ver < MCTP_VER_MIN || mh->ver > MCTP_VER_MAX) 1093 goto err_drop; 1094 1095 /* source must be valid unicast or null; drop reserved ranges and 1096 * broadcast 1097 */ 1098 if (!(mctp_address_unicast(mh->src) || mctp_address_null(mh->src))) 1099 goto err_drop; 1100 1101 /* dest address: as above, but allow broadcast */ 1102 if (!(mctp_address_unicast(mh->dest) || mctp_address_null(mh->dest) || 1103 mctp_address_broadcast(mh->dest))) 1104 goto err_drop; 1105 1106 /* MCTP drivers must populate halen/haddr */ 1107 if (dev->type == ARPHRD_MCTP) { 1108 cb = mctp_cb(skb); 1109 } else { 1110 cb = __mctp_cb(skb); 1111 cb->halen = 0; 1112 } 1113 cb->net = READ_ONCE(mdev->net); 1114 cb->ifindex = dev->ifindex; 1115 1116 rt = mctp_route_lookup(net, cb->net, mh->dest); 1117 1118 /* NULL EID, but addressed to our physical address */ 1119 if (!rt && mh->dest == MCTP_ADDR_NULL && skb->pkt_type == PACKET_HOST) 1120 rt = mctp_route_lookup_null(net, dev); 1121 1122 if (!rt) 1123 goto err_drop; 1124 1125 rt->output(rt, skb); 1126 mctp_route_release(rt); 1127 1128 return NET_RX_SUCCESS; 1129 1130 err_drop: 1131 kfree_skb(skb); 1132 return NET_RX_DROP; 1133 } 1134 1135 static struct packet_type mctp_packet_type = { 1136 .type = cpu_to_be16(ETH_P_MCTP), 1137 .func = mctp_pkttype_receive, 1138 }; 1139 1140 /* netlink interface */ 1141 1142 static const struct nla_policy rta_mctp_policy[RTA_MAX + 1] = { 1143 [RTA_DST] = { .type = NLA_U8 }, 1144 [RTA_METRICS] = { .type = NLA_NESTED }, 1145 [RTA_OIF] = { .type = NLA_U32 }, 1146 }; 1147 1148 /* Common part for RTM_NEWROUTE and RTM_DELROUTE parsing. 1149 * tb must hold RTA_MAX+1 elements. 1150 */ 1151 static int mctp_route_nlparse(struct sk_buff *skb, struct nlmsghdr *nlh, 1152 struct netlink_ext_ack *extack, 1153 struct nlattr **tb, struct rtmsg **rtm, 1154 struct mctp_dev **mdev, mctp_eid_t *daddr_start) 1155 { 1156 struct net *net = sock_net(skb->sk); 1157 struct net_device *dev; 1158 unsigned int ifindex; 1159 int rc; 1160 1161 rc = nlmsg_parse(nlh, sizeof(struct rtmsg), tb, RTA_MAX, 1162 rta_mctp_policy, extack); 1163 if (rc < 0) { 1164 NL_SET_ERR_MSG(extack, "incorrect format"); 1165 return rc; 1166 } 1167 1168 if (!tb[RTA_DST]) { 1169 NL_SET_ERR_MSG(extack, "dst EID missing"); 1170 return -EINVAL; 1171 } 1172 *daddr_start = nla_get_u8(tb[RTA_DST]); 1173 1174 if (!tb[RTA_OIF]) { 1175 NL_SET_ERR_MSG(extack, "ifindex missing"); 1176 return -EINVAL; 1177 } 1178 ifindex = nla_get_u32(tb[RTA_OIF]); 1179 1180 *rtm = nlmsg_data(nlh); 1181 if ((*rtm)->rtm_family != AF_MCTP) { 1182 NL_SET_ERR_MSG(extack, "route family must be AF_MCTP"); 1183 return -EINVAL; 1184 } 1185 1186 dev = __dev_get_by_index(net, ifindex); 1187 if (!dev) { 1188 NL_SET_ERR_MSG(extack, "bad ifindex"); 1189 return -ENODEV; 1190 } 1191 *mdev = mctp_dev_get_rtnl(dev); 1192 if (!*mdev) 1193 return -ENODEV; 1194 1195 if (dev->flags & IFF_LOOPBACK) { 1196 NL_SET_ERR_MSG(extack, "no routes to loopback"); 1197 return -EINVAL; 1198 } 1199 1200 return 0; 1201 } 1202 1203 static const struct nla_policy rta_metrics_policy[RTAX_MAX + 1] = { 1204 [RTAX_MTU] = { .type = NLA_U32 }, 1205 }; 1206 1207 static int mctp_newroute(struct sk_buff *skb, struct nlmsghdr *nlh, 1208 struct netlink_ext_ack *extack) 1209 { 1210 struct nlattr *tb[RTA_MAX + 1]; 1211 struct nlattr *tbx[RTAX_MAX + 1]; 1212 mctp_eid_t daddr_start; 1213 struct mctp_dev *mdev; 1214 struct rtmsg *rtm; 1215 unsigned int mtu; 1216 int rc; 1217 1218 rc = mctp_route_nlparse(skb, nlh, extack, tb, 1219 &rtm, &mdev, &daddr_start); 1220 if (rc < 0) 1221 return rc; 1222 1223 if (rtm->rtm_type != RTN_UNICAST) { 1224 NL_SET_ERR_MSG(extack, "rtm_type must be RTN_UNICAST"); 1225 return -EINVAL; 1226 } 1227 1228 mtu = 0; 1229 if (tb[RTA_METRICS]) { 1230 rc = nla_parse_nested(tbx, RTAX_MAX, tb[RTA_METRICS], 1231 rta_metrics_policy, NULL); 1232 if (rc < 0) 1233 return rc; 1234 if (tbx[RTAX_MTU]) 1235 mtu = nla_get_u32(tbx[RTAX_MTU]); 1236 } 1237 1238 if (rtm->rtm_type != RTN_UNICAST) 1239 return -EINVAL; 1240 1241 rc = mctp_route_add(mdev, daddr_start, rtm->rtm_dst_len, mtu, 1242 rtm->rtm_type); 1243 return rc; 1244 } 1245 1246 static int mctp_delroute(struct sk_buff *skb, struct nlmsghdr *nlh, 1247 struct netlink_ext_ack *extack) 1248 { 1249 struct nlattr *tb[RTA_MAX + 1]; 1250 mctp_eid_t daddr_start; 1251 struct mctp_dev *mdev; 1252 struct rtmsg *rtm; 1253 int rc; 1254 1255 rc = mctp_route_nlparse(skb, nlh, extack, tb, 1256 &rtm, &mdev, &daddr_start); 1257 if (rc < 0) 1258 return rc; 1259 1260 /* we only have unicast routes */ 1261 if (rtm->rtm_type != RTN_UNICAST) 1262 return -EINVAL; 1263 1264 rc = mctp_route_remove(mdev, daddr_start, rtm->rtm_dst_len, RTN_UNICAST); 1265 return rc; 1266 } 1267 1268 static int mctp_fill_rtinfo(struct sk_buff *skb, struct mctp_route *rt, 1269 u32 portid, u32 seq, int event, unsigned int flags) 1270 { 1271 struct nlmsghdr *nlh; 1272 struct rtmsg *hdr; 1273 void *metrics; 1274 1275 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*hdr), flags); 1276 if (!nlh) 1277 return -EMSGSIZE; 1278 1279 hdr = nlmsg_data(nlh); 1280 hdr->rtm_family = AF_MCTP; 1281 1282 /* we use the _len fields as a number of EIDs, rather than 1283 * a number of bits in the address 1284 */ 1285 hdr->rtm_dst_len = rt->max - rt->min; 1286 hdr->rtm_src_len = 0; 1287 hdr->rtm_tos = 0; 1288 hdr->rtm_table = RT_TABLE_DEFAULT; 1289 hdr->rtm_protocol = RTPROT_STATIC; /* everything is user-defined */ 1290 hdr->rtm_scope = RT_SCOPE_LINK; /* TODO: scope in mctp_route? */ 1291 hdr->rtm_type = rt->type; 1292 1293 if (nla_put_u8(skb, RTA_DST, rt->min)) 1294 goto cancel; 1295 1296 metrics = nla_nest_start_noflag(skb, RTA_METRICS); 1297 if (!metrics) 1298 goto cancel; 1299 1300 if (rt->mtu) { 1301 if (nla_put_u32(skb, RTAX_MTU, rt->mtu)) 1302 goto cancel; 1303 } 1304 1305 nla_nest_end(skb, metrics); 1306 1307 if (rt->dev) { 1308 if (nla_put_u32(skb, RTA_OIF, rt->dev->dev->ifindex)) 1309 goto cancel; 1310 } 1311 1312 /* TODO: conditional neighbour physaddr? */ 1313 1314 nlmsg_end(skb, nlh); 1315 1316 return 0; 1317 1318 cancel: 1319 nlmsg_cancel(skb, nlh); 1320 return -EMSGSIZE; 1321 } 1322 1323 static int mctp_dump_rtinfo(struct sk_buff *skb, struct netlink_callback *cb) 1324 { 1325 struct net *net = sock_net(skb->sk); 1326 struct mctp_route *rt; 1327 int s_idx, idx; 1328 1329 /* TODO: allow filtering on route data, possibly under 1330 * cb->strict_check 1331 */ 1332 1333 /* TODO: change to struct overlay */ 1334 s_idx = cb->args[0]; 1335 idx = 0; 1336 1337 rcu_read_lock(); 1338 list_for_each_entry_rcu(rt, &net->mctp.routes, list) { 1339 if (idx++ < s_idx) 1340 continue; 1341 if (mctp_fill_rtinfo(skb, rt, 1342 NETLINK_CB(cb->skb).portid, 1343 cb->nlh->nlmsg_seq, 1344 RTM_NEWROUTE, NLM_F_MULTI) < 0) 1345 break; 1346 } 1347 1348 rcu_read_unlock(); 1349 cb->args[0] = idx; 1350 1351 return skb->len; 1352 } 1353 1354 /* net namespace implementation */ 1355 static int __net_init mctp_routes_net_init(struct net *net) 1356 { 1357 struct netns_mctp *ns = &net->mctp; 1358 1359 INIT_LIST_HEAD(&ns->routes); 1360 INIT_HLIST_HEAD(&ns->binds); 1361 mutex_init(&ns->bind_lock); 1362 INIT_HLIST_HEAD(&ns->keys); 1363 spin_lock_init(&ns->keys_lock); 1364 WARN_ON(mctp_default_net_set(net, MCTP_INITIAL_DEFAULT_NET)); 1365 return 0; 1366 } 1367 1368 static void __net_exit mctp_routes_net_exit(struct net *net) 1369 { 1370 struct mctp_route *rt; 1371 1372 rcu_read_lock(); 1373 list_for_each_entry_rcu(rt, &net->mctp.routes, list) 1374 mctp_route_release(rt); 1375 rcu_read_unlock(); 1376 } 1377 1378 static struct pernet_operations mctp_net_ops = { 1379 .init = mctp_routes_net_init, 1380 .exit = mctp_routes_net_exit, 1381 }; 1382 1383 int __init mctp_routes_init(void) 1384 { 1385 dev_add_pack(&mctp_packet_type); 1386 1387 rtnl_register_module(THIS_MODULE, PF_MCTP, RTM_GETROUTE, 1388 NULL, mctp_dump_rtinfo, 0); 1389 rtnl_register_module(THIS_MODULE, PF_MCTP, RTM_NEWROUTE, 1390 mctp_newroute, NULL, 0); 1391 rtnl_register_module(THIS_MODULE, PF_MCTP, RTM_DELROUTE, 1392 mctp_delroute, NULL, 0); 1393 1394 return register_pernet_subsys(&mctp_net_ops); 1395 } 1396 1397 void __exit mctp_routes_exit(void) 1398 { 1399 unregister_pernet_subsys(&mctp_net_ops); 1400 rtnl_unregister(PF_MCTP, RTM_DELROUTE); 1401 rtnl_unregister(PF_MCTP, RTM_NEWROUTE); 1402 rtnl_unregister(PF_MCTP, RTM_GETROUTE); 1403 dev_remove_pack(&mctp_packet_type); 1404 } 1405 1406 #if IS_ENABLED(CONFIG_MCTP_TEST) 1407 #include "test/route-test.c" 1408 #endif 1409