1 /* 2 * drivers/net/macsec.c - MACsec device 3 * 4 * Copyright (c) 2015 Sabrina Dubroca <sd@queasysnail.net> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 */ 11 12 #include <linux/types.h> 13 #include <linux/skbuff.h> 14 #include <linux/socket.h> 15 #include <linux/module.h> 16 #include <crypto/aead.h> 17 #include <linux/etherdevice.h> 18 #include <linux/rtnetlink.h> 19 #include <net/genetlink.h> 20 #include <net/sock.h> 21 #include <net/gro_cells.h> 22 23 #include <uapi/linux/if_macsec.h> 24 25 typedef u64 __bitwise sci_t; 26 27 #define MACSEC_SCI_LEN 8 28 29 /* SecTAG length = macsec_eth_header without the optional SCI */ 30 #define MACSEC_TAG_LEN 6 31 32 struct macsec_eth_header { 33 struct ethhdr eth; 34 /* SecTAG */ 35 u8 tci_an; 36 #if defined(__LITTLE_ENDIAN_BITFIELD) 37 u8 short_length:6, 38 unused:2; 39 #elif defined(__BIG_ENDIAN_BITFIELD) 40 u8 unused:2, 41 short_length:6; 42 #else 43 #error "Please fix <asm/byteorder.h>" 44 #endif 45 __be32 packet_number; 46 u8 secure_channel_id[8]; /* optional */ 47 } __packed; 48 49 #define MACSEC_TCI_VERSION 0x80 50 #define MACSEC_TCI_ES 0x40 /* end station */ 51 #define MACSEC_TCI_SC 0x20 /* SCI present */ 52 #define MACSEC_TCI_SCB 0x10 /* epon */ 53 #define MACSEC_TCI_E 0x08 /* encryption */ 54 #define MACSEC_TCI_C 0x04 /* changed text */ 55 #define MACSEC_AN_MASK 0x03 /* association number */ 56 #define MACSEC_TCI_CONFID (MACSEC_TCI_E | MACSEC_TCI_C) 57 58 /* minimum secure data length deemed "not short", see IEEE 802.1AE-2006 9.7 */ 59 #define MIN_NON_SHORT_LEN 48 60 61 #define GCM_AES_IV_LEN 12 62 #define DEFAULT_ICV_LEN 16 63 64 #define MACSEC_NUM_AN 4 /* 2 bits for the association number */ 65 66 #define for_each_rxsc(secy, sc) \ 67 for (sc = rcu_dereference_bh(secy->rx_sc); \ 68 sc; \ 69 sc = rcu_dereference_bh(sc->next)) 70 #define for_each_rxsc_rtnl(secy, sc) \ 71 for (sc = rtnl_dereference(secy->rx_sc); \ 72 sc; \ 73 sc = rtnl_dereference(sc->next)) 74 75 struct gcm_iv { 76 union { 77 u8 secure_channel_id[8]; 78 sci_t sci; 79 }; 80 __be32 pn; 81 }; 82 83 /** 84 * struct macsec_key - SA key 85 * @id: user-provided key identifier 86 * @tfm: crypto struct, key storage 87 */ 88 struct macsec_key { 89 u8 id[MACSEC_KEYID_LEN]; 90 struct crypto_aead *tfm; 91 }; 92 93 struct macsec_rx_sc_stats { 94 __u64 InOctetsValidated; 95 __u64 InOctetsDecrypted; 96 __u64 InPktsUnchecked; 97 __u64 InPktsDelayed; 98 __u64 InPktsOK; 99 __u64 InPktsInvalid; 100 __u64 InPktsLate; 101 __u64 InPktsNotValid; 102 __u64 InPktsNotUsingSA; 103 __u64 InPktsUnusedSA; 104 }; 105 106 struct macsec_rx_sa_stats { 107 __u32 InPktsOK; 108 __u32 InPktsInvalid; 109 __u32 InPktsNotValid; 110 __u32 InPktsNotUsingSA; 111 __u32 InPktsUnusedSA; 112 }; 113 114 struct macsec_tx_sa_stats { 115 __u32 OutPktsProtected; 116 __u32 OutPktsEncrypted; 117 }; 118 119 struct macsec_tx_sc_stats { 120 __u64 OutPktsProtected; 121 __u64 OutPktsEncrypted; 122 __u64 OutOctetsProtected; 123 __u64 OutOctetsEncrypted; 124 }; 125 126 struct macsec_dev_stats { 127 __u64 OutPktsUntagged; 128 __u64 InPktsUntagged; 129 __u64 OutPktsTooLong; 130 __u64 InPktsNoTag; 131 __u64 InPktsBadTag; 132 __u64 InPktsUnknownSCI; 133 __u64 InPktsNoSCI; 134 __u64 InPktsOverrun; 135 }; 136 137 /** 138 * struct macsec_rx_sa - receive secure association 139 * @active: 140 * @next_pn: packet number expected for the next packet 141 * @lock: protects next_pn manipulations 142 * @key: key structure 143 * @stats: per-SA stats 144 */ 145 struct macsec_rx_sa { 146 struct macsec_key key; 147 spinlock_t lock; 148 u32 next_pn; 149 atomic_t refcnt; 150 bool active; 151 struct macsec_rx_sa_stats __percpu *stats; 152 struct macsec_rx_sc *sc; 153 struct rcu_head rcu; 154 }; 155 156 struct pcpu_rx_sc_stats { 157 struct macsec_rx_sc_stats stats; 158 struct u64_stats_sync syncp; 159 }; 160 161 /** 162 * struct macsec_rx_sc - receive secure channel 163 * @sci: secure channel identifier for this SC 164 * @active: channel is active 165 * @sa: array of secure associations 166 * @stats: per-SC stats 167 */ 168 struct macsec_rx_sc { 169 struct macsec_rx_sc __rcu *next; 170 sci_t sci; 171 bool active; 172 struct macsec_rx_sa __rcu *sa[MACSEC_NUM_AN]; 173 struct pcpu_rx_sc_stats __percpu *stats; 174 atomic_t refcnt; 175 struct rcu_head rcu_head; 176 }; 177 178 /** 179 * struct macsec_tx_sa - transmit secure association 180 * @active: 181 * @next_pn: packet number to use for the next packet 182 * @lock: protects next_pn manipulations 183 * @key: key structure 184 * @stats: per-SA stats 185 */ 186 struct macsec_tx_sa { 187 struct macsec_key key; 188 spinlock_t lock; 189 u32 next_pn; 190 atomic_t refcnt; 191 bool active; 192 struct macsec_tx_sa_stats __percpu *stats; 193 struct rcu_head rcu; 194 }; 195 196 struct pcpu_tx_sc_stats { 197 struct macsec_tx_sc_stats stats; 198 struct u64_stats_sync syncp; 199 }; 200 201 /** 202 * struct macsec_tx_sc - transmit secure channel 203 * @active: 204 * @encoding_sa: association number of the SA currently in use 205 * @encrypt: encrypt packets on transmit, or authenticate only 206 * @send_sci: always include the SCI in the SecTAG 207 * @end_station: 208 * @scb: single copy broadcast flag 209 * @sa: array of secure associations 210 * @stats: stats for this TXSC 211 */ 212 struct macsec_tx_sc { 213 bool active; 214 u8 encoding_sa; 215 bool encrypt; 216 bool send_sci; 217 bool end_station; 218 bool scb; 219 struct macsec_tx_sa __rcu *sa[MACSEC_NUM_AN]; 220 struct pcpu_tx_sc_stats __percpu *stats; 221 }; 222 223 #define MACSEC_VALIDATE_DEFAULT MACSEC_VALIDATE_STRICT 224 225 /** 226 * struct macsec_secy - MACsec Security Entity 227 * @netdev: netdevice for this SecY 228 * @n_rx_sc: number of receive secure channels configured on this SecY 229 * @sci: secure channel identifier used for tx 230 * @key_len: length of keys used by the cipher suite 231 * @icv_len: length of ICV used by the cipher suite 232 * @validate_frames: validation mode 233 * @operational: MAC_Operational flag 234 * @protect_frames: enable protection for this SecY 235 * @replay_protect: enable packet number checks on receive 236 * @replay_window: size of the replay window 237 * @tx_sc: transmit secure channel 238 * @rx_sc: linked list of receive secure channels 239 */ 240 struct macsec_secy { 241 struct net_device *netdev; 242 unsigned int n_rx_sc; 243 sci_t sci; 244 u16 key_len; 245 u16 icv_len; 246 enum macsec_validation_type validate_frames; 247 bool operational; 248 bool protect_frames; 249 bool replay_protect; 250 u32 replay_window; 251 struct macsec_tx_sc tx_sc; 252 struct macsec_rx_sc __rcu *rx_sc; 253 }; 254 255 struct pcpu_secy_stats { 256 struct macsec_dev_stats stats; 257 struct u64_stats_sync syncp; 258 }; 259 260 /** 261 * struct macsec_dev - private data 262 * @secy: SecY config 263 * @real_dev: pointer to underlying netdevice 264 * @stats: MACsec device stats 265 * @secys: linked list of SecY's on the underlying device 266 */ 267 struct macsec_dev { 268 struct macsec_secy secy; 269 struct net_device *real_dev; 270 struct pcpu_secy_stats __percpu *stats; 271 struct list_head secys; 272 struct gro_cells gro_cells; 273 unsigned int nest_level; 274 }; 275 276 /** 277 * struct macsec_rxh_data - rx_handler private argument 278 * @secys: linked list of SecY's on this underlying device 279 */ 280 struct macsec_rxh_data { 281 struct list_head secys; 282 }; 283 284 static struct macsec_dev *macsec_priv(const struct net_device *dev) 285 { 286 return (struct macsec_dev *)netdev_priv(dev); 287 } 288 289 static struct macsec_rxh_data *macsec_data_rcu(const struct net_device *dev) 290 { 291 return rcu_dereference_bh(dev->rx_handler_data); 292 } 293 294 static struct macsec_rxh_data *macsec_data_rtnl(const struct net_device *dev) 295 { 296 return rtnl_dereference(dev->rx_handler_data); 297 } 298 299 struct macsec_cb { 300 struct aead_request *req; 301 union { 302 struct macsec_tx_sa *tx_sa; 303 struct macsec_rx_sa *rx_sa; 304 }; 305 u8 assoc_num; 306 bool valid; 307 bool has_sci; 308 }; 309 310 static struct macsec_rx_sa *macsec_rxsa_get(struct macsec_rx_sa __rcu *ptr) 311 { 312 struct macsec_rx_sa *sa = rcu_dereference_bh(ptr); 313 314 if (!sa || !sa->active) 315 return NULL; 316 317 if (!atomic_inc_not_zero(&sa->refcnt)) 318 return NULL; 319 320 return sa; 321 } 322 323 static void free_rx_sc_rcu(struct rcu_head *head) 324 { 325 struct macsec_rx_sc *rx_sc = container_of(head, struct macsec_rx_sc, rcu_head); 326 327 free_percpu(rx_sc->stats); 328 kfree(rx_sc); 329 } 330 331 static struct macsec_rx_sc *macsec_rxsc_get(struct macsec_rx_sc *sc) 332 { 333 return atomic_inc_not_zero(&sc->refcnt) ? sc : NULL; 334 } 335 336 static void macsec_rxsc_put(struct macsec_rx_sc *sc) 337 { 338 if (atomic_dec_and_test(&sc->refcnt)) 339 call_rcu(&sc->rcu_head, free_rx_sc_rcu); 340 } 341 342 static void free_rxsa(struct rcu_head *head) 343 { 344 struct macsec_rx_sa *sa = container_of(head, struct macsec_rx_sa, rcu); 345 346 crypto_free_aead(sa->key.tfm); 347 free_percpu(sa->stats); 348 kfree(sa); 349 } 350 351 static void macsec_rxsa_put(struct macsec_rx_sa *sa) 352 { 353 if (atomic_dec_and_test(&sa->refcnt)) 354 call_rcu(&sa->rcu, free_rxsa); 355 } 356 357 static struct macsec_tx_sa *macsec_txsa_get(struct macsec_tx_sa __rcu *ptr) 358 { 359 struct macsec_tx_sa *sa = rcu_dereference_bh(ptr); 360 361 if (!sa || !sa->active) 362 return NULL; 363 364 if (!atomic_inc_not_zero(&sa->refcnt)) 365 return NULL; 366 367 return sa; 368 } 369 370 static void free_txsa(struct rcu_head *head) 371 { 372 struct macsec_tx_sa *sa = container_of(head, struct macsec_tx_sa, rcu); 373 374 crypto_free_aead(sa->key.tfm); 375 free_percpu(sa->stats); 376 kfree(sa); 377 } 378 379 static void macsec_txsa_put(struct macsec_tx_sa *sa) 380 { 381 if (atomic_dec_and_test(&sa->refcnt)) 382 call_rcu(&sa->rcu, free_txsa); 383 } 384 385 static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb) 386 { 387 BUILD_BUG_ON(sizeof(struct macsec_cb) > sizeof(skb->cb)); 388 return (struct macsec_cb *)skb->cb; 389 } 390 391 #define MACSEC_PORT_ES (htons(0x0001)) 392 #define MACSEC_PORT_SCB (0x0000) 393 #define MACSEC_UNDEF_SCI ((__force sci_t)0xffffffffffffffffULL) 394 395 #define DEFAULT_SAK_LEN 16 396 #define DEFAULT_SEND_SCI true 397 #define DEFAULT_ENCRYPT false 398 #define DEFAULT_ENCODING_SA 0 399 400 static bool send_sci(const struct macsec_secy *secy) 401 { 402 const struct macsec_tx_sc *tx_sc = &secy->tx_sc; 403 404 return tx_sc->send_sci || 405 (secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb); 406 } 407 408 static sci_t make_sci(u8 *addr, __be16 port) 409 { 410 sci_t sci; 411 412 memcpy(&sci, addr, ETH_ALEN); 413 memcpy(((char *)&sci) + ETH_ALEN, &port, sizeof(port)); 414 415 return sci; 416 } 417 418 static sci_t macsec_frame_sci(struct macsec_eth_header *hdr, bool sci_present) 419 { 420 sci_t sci; 421 422 if (sci_present) 423 memcpy(&sci, hdr->secure_channel_id, 424 sizeof(hdr->secure_channel_id)); 425 else 426 sci = make_sci(hdr->eth.h_source, MACSEC_PORT_ES); 427 428 return sci; 429 } 430 431 static unsigned int macsec_sectag_len(bool sci_present) 432 { 433 return MACSEC_TAG_LEN + (sci_present ? MACSEC_SCI_LEN : 0); 434 } 435 436 static unsigned int macsec_hdr_len(bool sci_present) 437 { 438 return macsec_sectag_len(sci_present) + ETH_HLEN; 439 } 440 441 static unsigned int macsec_extra_len(bool sci_present) 442 { 443 return macsec_sectag_len(sci_present) + sizeof(__be16); 444 } 445 446 /* Fill SecTAG according to IEEE 802.1AE-2006 10.5.3 */ 447 static void macsec_fill_sectag(struct macsec_eth_header *h, 448 const struct macsec_secy *secy, u32 pn, 449 bool sci_present) 450 { 451 const struct macsec_tx_sc *tx_sc = &secy->tx_sc; 452 453 memset(&h->tci_an, 0, macsec_sectag_len(sci_present)); 454 h->eth.h_proto = htons(ETH_P_MACSEC); 455 456 if (sci_present) { 457 h->tci_an |= MACSEC_TCI_SC; 458 memcpy(&h->secure_channel_id, &secy->sci, 459 sizeof(h->secure_channel_id)); 460 } else { 461 if (tx_sc->end_station) 462 h->tci_an |= MACSEC_TCI_ES; 463 if (tx_sc->scb) 464 h->tci_an |= MACSEC_TCI_SCB; 465 } 466 467 h->packet_number = htonl(pn); 468 469 /* with GCM, C/E clear for !encrypt, both set for encrypt */ 470 if (tx_sc->encrypt) 471 h->tci_an |= MACSEC_TCI_CONFID; 472 else if (secy->icv_len != DEFAULT_ICV_LEN) 473 h->tci_an |= MACSEC_TCI_C; 474 475 h->tci_an |= tx_sc->encoding_sa; 476 } 477 478 static void macsec_set_shortlen(struct macsec_eth_header *h, size_t data_len) 479 { 480 if (data_len < MIN_NON_SHORT_LEN) 481 h->short_length = data_len; 482 } 483 484 /* validate MACsec packet according to IEEE 802.1AE-2006 9.12 */ 485 static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len) 486 { 487 struct macsec_eth_header *h = (struct macsec_eth_header *)skb->data; 488 int len = skb->len - 2 * ETH_ALEN; 489 int extra_len = macsec_extra_len(!!(h->tci_an & MACSEC_TCI_SC)) + icv_len; 490 491 /* a) It comprises at least 17 octets */ 492 if (skb->len <= 16) 493 return false; 494 495 /* b) MACsec EtherType: already checked */ 496 497 /* c) V bit is clear */ 498 if (h->tci_an & MACSEC_TCI_VERSION) 499 return false; 500 501 /* d) ES or SCB => !SC */ 502 if ((h->tci_an & MACSEC_TCI_ES || h->tci_an & MACSEC_TCI_SCB) && 503 (h->tci_an & MACSEC_TCI_SC)) 504 return false; 505 506 /* e) Bits 7 and 8 of octet 4 of the SecTAG are clear */ 507 if (h->unused) 508 return false; 509 510 /* rx.pn != 0 (figure 10-5) */ 511 if (!h->packet_number) 512 return false; 513 514 /* length check, f) g) h) i) */ 515 if (h->short_length) 516 return len == extra_len + h->short_length; 517 return len >= extra_len + MIN_NON_SHORT_LEN; 518 } 519 520 #define MACSEC_NEEDED_HEADROOM (macsec_extra_len(true)) 521 #define MACSEC_NEEDED_TAILROOM MACSEC_STD_ICV_LEN 522 523 static void macsec_fill_iv(unsigned char *iv, sci_t sci, u32 pn) 524 { 525 struct gcm_iv *gcm_iv = (struct gcm_iv *)iv; 526 527 gcm_iv->sci = sci; 528 gcm_iv->pn = htonl(pn); 529 } 530 531 static struct macsec_eth_header *macsec_ethhdr(struct sk_buff *skb) 532 { 533 return (struct macsec_eth_header *)skb_mac_header(skb); 534 } 535 536 static u32 tx_sa_update_pn(struct macsec_tx_sa *tx_sa, struct macsec_secy *secy) 537 { 538 u32 pn; 539 540 spin_lock_bh(&tx_sa->lock); 541 pn = tx_sa->next_pn; 542 543 tx_sa->next_pn++; 544 if (tx_sa->next_pn == 0) { 545 pr_debug("PN wrapped, transitioning to !oper\n"); 546 tx_sa->active = false; 547 if (secy->protect_frames) 548 secy->operational = false; 549 } 550 spin_unlock_bh(&tx_sa->lock); 551 552 return pn; 553 } 554 555 static void macsec_encrypt_finish(struct sk_buff *skb, struct net_device *dev) 556 { 557 struct macsec_dev *macsec = netdev_priv(dev); 558 559 skb->dev = macsec->real_dev; 560 skb_reset_mac_header(skb); 561 skb->protocol = eth_hdr(skb)->h_proto; 562 } 563 564 static void macsec_count_tx(struct sk_buff *skb, struct macsec_tx_sc *tx_sc, 565 struct macsec_tx_sa *tx_sa) 566 { 567 struct pcpu_tx_sc_stats *txsc_stats = this_cpu_ptr(tx_sc->stats); 568 569 u64_stats_update_begin(&txsc_stats->syncp); 570 if (tx_sc->encrypt) { 571 txsc_stats->stats.OutOctetsEncrypted += skb->len; 572 txsc_stats->stats.OutPktsEncrypted++; 573 this_cpu_inc(tx_sa->stats->OutPktsEncrypted); 574 } else { 575 txsc_stats->stats.OutOctetsProtected += skb->len; 576 txsc_stats->stats.OutPktsProtected++; 577 this_cpu_inc(tx_sa->stats->OutPktsProtected); 578 } 579 u64_stats_update_end(&txsc_stats->syncp); 580 } 581 582 static void count_tx(struct net_device *dev, int ret, int len) 583 { 584 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { 585 struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats); 586 587 u64_stats_update_begin(&stats->syncp); 588 stats->tx_packets++; 589 stats->tx_bytes += len; 590 u64_stats_update_end(&stats->syncp); 591 } else { 592 dev->stats.tx_dropped++; 593 } 594 } 595 596 static void macsec_encrypt_done(struct crypto_async_request *base, int err) 597 { 598 struct sk_buff *skb = base->data; 599 struct net_device *dev = skb->dev; 600 struct macsec_dev *macsec = macsec_priv(dev); 601 struct macsec_tx_sa *sa = macsec_skb_cb(skb)->tx_sa; 602 int len, ret; 603 604 aead_request_free(macsec_skb_cb(skb)->req); 605 606 rcu_read_lock_bh(); 607 macsec_encrypt_finish(skb, dev); 608 macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa); 609 len = skb->len; 610 ret = dev_queue_xmit(skb); 611 count_tx(dev, ret, len); 612 rcu_read_unlock_bh(); 613 614 macsec_txsa_put(sa); 615 dev_put(dev); 616 } 617 618 static struct aead_request *macsec_alloc_req(struct crypto_aead *tfm, 619 unsigned char **iv, 620 struct scatterlist **sg, 621 int num_frags) 622 { 623 size_t size, iv_offset, sg_offset; 624 struct aead_request *req; 625 void *tmp; 626 627 size = sizeof(struct aead_request) + crypto_aead_reqsize(tfm); 628 iv_offset = size; 629 size += GCM_AES_IV_LEN; 630 631 size = ALIGN(size, __alignof__(struct scatterlist)); 632 sg_offset = size; 633 size += sizeof(struct scatterlist) * num_frags; 634 635 tmp = kmalloc(size, GFP_ATOMIC); 636 if (!tmp) 637 return NULL; 638 639 *iv = (unsigned char *)(tmp + iv_offset); 640 *sg = (struct scatterlist *)(tmp + sg_offset); 641 req = tmp; 642 643 aead_request_set_tfm(req, tfm); 644 645 return req; 646 } 647 648 static struct sk_buff *macsec_encrypt(struct sk_buff *skb, 649 struct net_device *dev) 650 { 651 int ret; 652 struct scatterlist *sg; 653 struct sk_buff *trailer; 654 unsigned char *iv; 655 struct ethhdr *eth; 656 struct macsec_eth_header *hh; 657 size_t unprotected_len; 658 struct aead_request *req; 659 struct macsec_secy *secy; 660 struct macsec_tx_sc *tx_sc; 661 struct macsec_tx_sa *tx_sa; 662 struct macsec_dev *macsec = macsec_priv(dev); 663 bool sci_present; 664 u32 pn; 665 666 secy = &macsec->secy; 667 tx_sc = &secy->tx_sc; 668 669 /* 10.5.1 TX SA assignment */ 670 tx_sa = macsec_txsa_get(tx_sc->sa[tx_sc->encoding_sa]); 671 if (!tx_sa) { 672 secy->operational = false; 673 kfree_skb(skb); 674 return ERR_PTR(-EINVAL); 675 } 676 677 if (unlikely(skb_headroom(skb) < MACSEC_NEEDED_HEADROOM || 678 skb_tailroom(skb) < MACSEC_NEEDED_TAILROOM)) { 679 struct sk_buff *nskb = skb_copy_expand(skb, 680 MACSEC_NEEDED_HEADROOM, 681 MACSEC_NEEDED_TAILROOM, 682 GFP_ATOMIC); 683 if (likely(nskb)) { 684 consume_skb(skb); 685 skb = nskb; 686 } else { 687 macsec_txsa_put(tx_sa); 688 kfree_skb(skb); 689 return ERR_PTR(-ENOMEM); 690 } 691 } else { 692 skb = skb_unshare(skb, GFP_ATOMIC); 693 if (!skb) { 694 macsec_txsa_put(tx_sa); 695 return ERR_PTR(-ENOMEM); 696 } 697 } 698 699 unprotected_len = skb->len; 700 eth = eth_hdr(skb); 701 sci_present = send_sci(secy); 702 hh = (struct macsec_eth_header *)skb_push(skb, macsec_extra_len(sci_present)); 703 memmove(hh, eth, 2 * ETH_ALEN); 704 705 pn = tx_sa_update_pn(tx_sa, secy); 706 if (pn == 0) { 707 macsec_txsa_put(tx_sa); 708 kfree_skb(skb); 709 return ERR_PTR(-ENOLINK); 710 } 711 macsec_fill_sectag(hh, secy, pn, sci_present); 712 macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN); 713 714 skb_put(skb, secy->icv_len); 715 716 if (skb->len - ETH_HLEN > macsec_priv(dev)->real_dev->mtu) { 717 struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats); 718 719 u64_stats_update_begin(&secy_stats->syncp); 720 secy_stats->stats.OutPktsTooLong++; 721 u64_stats_update_end(&secy_stats->syncp); 722 723 macsec_txsa_put(tx_sa); 724 kfree_skb(skb); 725 return ERR_PTR(-EINVAL); 726 } 727 728 ret = skb_cow_data(skb, 0, &trailer); 729 if (unlikely(ret < 0)) { 730 macsec_txsa_put(tx_sa); 731 kfree_skb(skb); 732 return ERR_PTR(ret); 733 } 734 735 req = macsec_alloc_req(tx_sa->key.tfm, &iv, &sg, ret); 736 if (!req) { 737 macsec_txsa_put(tx_sa); 738 kfree_skb(skb); 739 return ERR_PTR(-ENOMEM); 740 } 741 742 macsec_fill_iv(iv, secy->sci, pn); 743 744 sg_init_table(sg, ret); 745 skb_to_sgvec(skb, sg, 0, skb->len); 746 747 if (tx_sc->encrypt) { 748 int len = skb->len - macsec_hdr_len(sci_present) - 749 secy->icv_len; 750 aead_request_set_crypt(req, sg, sg, len, iv); 751 aead_request_set_ad(req, macsec_hdr_len(sci_present)); 752 } else { 753 aead_request_set_crypt(req, sg, sg, 0, iv); 754 aead_request_set_ad(req, skb->len - secy->icv_len); 755 } 756 757 macsec_skb_cb(skb)->req = req; 758 macsec_skb_cb(skb)->tx_sa = tx_sa; 759 aead_request_set_callback(req, 0, macsec_encrypt_done, skb); 760 761 dev_hold(skb->dev); 762 ret = crypto_aead_encrypt(req); 763 if (ret == -EINPROGRESS) { 764 return ERR_PTR(ret); 765 } else if (ret != 0) { 766 dev_put(skb->dev); 767 kfree_skb(skb); 768 aead_request_free(req); 769 macsec_txsa_put(tx_sa); 770 return ERR_PTR(-EINVAL); 771 } 772 773 dev_put(skb->dev); 774 aead_request_free(req); 775 macsec_txsa_put(tx_sa); 776 777 return skb; 778 } 779 780 static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u32 pn) 781 { 782 struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa; 783 struct pcpu_rx_sc_stats *rxsc_stats = this_cpu_ptr(rx_sa->sc->stats); 784 struct macsec_eth_header *hdr = macsec_ethhdr(skb); 785 u32 lowest_pn = 0; 786 787 spin_lock(&rx_sa->lock); 788 if (rx_sa->next_pn >= secy->replay_window) 789 lowest_pn = rx_sa->next_pn - secy->replay_window; 790 791 /* Now perform replay protection check again 792 * (see IEEE 802.1AE-2006 figure 10-5) 793 */ 794 if (secy->replay_protect && pn < lowest_pn) { 795 spin_unlock(&rx_sa->lock); 796 u64_stats_update_begin(&rxsc_stats->syncp); 797 rxsc_stats->stats.InPktsLate++; 798 u64_stats_update_end(&rxsc_stats->syncp); 799 return false; 800 } 801 802 if (secy->validate_frames != MACSEC_VALIDATE_DISABLED) { 803 u64_stats_update_begin(&rxsc_stats->syncp); 804 if (hdr->tci_an & MACSEC_TCI_E) 805 rxsc_stats->stats.InOctetsDecrypted += skb->len; 806 else 807 rxsc_stats->stats.InOctetsValidated += skb->len; 808 u64_stats_update_end(&rxsc_stats->syncp); 809 } 810 811 if (!macsec_skb_cb(skb)->valid) { 812 spin_unlock(&rx_sa->lock); 813 814 /* 10.6.5 */ 815 if (hdr->tci_an & MACSEC_TCI_C || 816 secy->validate_frames == MACSEC_VALIDATE_STRICT) { 817 u64_stats_update_begin(&rxsc_stats->syncp); 818 rxsc_stats->stats.InPktsNotValid++; 819 u64_stats_update_end(&rxsc_stats->syncp); 820 return false; 821 } 822 823 u64_stats_update_begin(&rxsc_stats->syncp); 824 if (secy->validate_frames == MACSEC_VALIDATE_CHECK) { 825 rxsc_stats->stats.InPktsInvalid++; 826 this_cpu_inc(rx_sa->stats->InPktsInvalid); 827 } else if (pn < lowest_pn) { 828 rxsc_stats->stats.InPktsDelayed++; 829 } else { 830 rxsc_stats->stats.InPktsUnchecked++; 831 } 832 u64_stats_update_end(&rxsc_stats->syncp); 833 } else { 834 u64_stats_update_begin(&rxsc_stats->syncp); 835 if (pn < lowest_pn) { 836 rxsc_stats->stats.InPktsDelayed++; 837 } else { 838 rxsc_stats->stats.InPktsOK++; 839 this_cpu_inc(rx_sa->stats->InPktsOK); 840 } 841 u64_stats_update_end(&rxsc_stats->syncp); 842 843 if (pn >= rx_sa->next_pn) 844 rx_sa->next_pn = pn + 1; 845 spin_unlock(&rx_sa->lock); 846 } 847 848 return true; 849 } 850 851 static void macsec_reset_skb(struct sk_buff *skb, struct net_device *dev) 852 { 853 skb->pkt_type = PACKET_HOST; 854 skb->protocol = eth_type_trans(skb, dev); 855 856 skb_reset_network_header(skb); 857 if (!skb_transport_header_was_set(skb)) 858 skb_reset_transport_header(skb); 859 skb_reset_mac_len(skb); 860 } 861 862 static void macsec_finalize_skb(struct sk_buff *skb, u8 icv_len, u8 hdr_len) 863 { 864 memmove(skb->data + hdr_len, skb->data, 2 * ETH_ALEN); 865 skb_pull(skb, hdr_len); 866 pskb_trim_unique(skb, skb->len - icv_len); 867 } 868 869 static void count_rx(struct net_device *dev, int len) 870 { 871 struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats); 872 873 u64_stats_update_begin(&stats->syncp); 874 stats->rx_packets++; 875 stats->rx_bytes += len; 876 u64_stats_update_end(&stats->syncp); 877 } 878 879 static void macsec_decrypt_done(struct crypto_async_request *base, int err) 880 { 881 struct sk_buff *skb = base->data; 882 struct net_device *dev = skb->dev; 883 struct macsec_dev *macsec = macsec_priv(dev); 884 struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa; 885 struct macsec_rx_sc *rx_sc = rx_sa->sc; 886 int len, ret; 887 u32 pn; 888 889 aead_request_free(macsec_skb_cb(skb)->req); 890 891 if (!err) 892 macsec_skb_cb(skb)->valid = true; 893 894 rcu_read_lock_bh(); 895 pn = ntohl(macsec_ethhdr(skb)->packet_number); 896 if (!macsec_post_decrypt(skb, &macsec->secy, pn)) { 897 rcu_read_unlock_bh(); 898 kfree_skb(skb); 899 goto out; 900 } 901 902 macsec_finalize_skb(skb, macsec->secy.icv_len, 903 macsec_extra_len(macsec_skb_cb(skb)->has_sci)); 904 macsec_reset_skb(skb, macsec->secy.netdev); 905 906 len = skb->len; 907 ret = gro_cells_receive(&macsec->gro_cells, skb); 908 if (ret == NET_RX_SUCCESS) 909 count_rx(dev, len); 910 else 911 macsec->secy.netdev->stats.rx_dropped++; 912 913 rcu_read_unlock_bh(); 914 915 out: 916 macsec_rxsa_put(rx_sa); 917 macsec_rxsc_put(rx_sc); 918 dev_put(dev); 919 } 920 921 static struct sk_buff *macsec_decrypt(struct sk_buff *skb, 922 struct net_device *dev, 923 struct macsec_rx_sa *rx_sa, 924 sci_t sci, 925 struct macsec_secy *secy) 926 { 927 int ret; 928 struct scatterlist *sg; 929 struct sk_buff *trailer; 930 unsigned char *iv; 931 struct aead_request *req; 932 struct macsec_eth_header *hdr; 933 u16 icv_len = secy->icv_len; 934 935 macsec_skb_cb(skb)->valid = false; 936 skb = skb_share_check(skb, GFP_ATOMIC); 937 if (!skb) 938 return ERR_PTR(-ENOMEM); 939 940 ret = skb_cow_data(skb, 0, &trailer); 941 if (unlikely(ret < 0)) { 942 kfree_skb(skb); 943 return ERR_PTR(ret); 944 } 945 req = macsec_alloc_req(rx_sa->key.tfm, &iv, &sg, ret); 946 if (!req) { 947 kfree_skb(skb); 948 return ERR_PTR(-ENOMEM); 949 } 950 951 hdr = (struct macsec_eth_header *)skb->data; 952 macsec_fill_iv(iv, sci, ntohl(hdr->packet_number)); 953 954 sg_init_table(sg, ret); 955 skb_to_sgvec(skb, sg, 0, skb->len); 956 957 if (hdr->tci_an & MACSEC_TCI_E) { 958 /* confidentiality: ethernet + macsec header 959 * authenticated, encrypted payload 960 */ 961 int len = skb->len - macsec_hdr_len(macsec_skb_cb(skb)->has_sci); 962 963 aead_request_set_crypt(req, sg, sg, len, iv); 964 aead_request_set_ad(req, macsec_hdr_len(macsec_skb_cb(skb)->has_sci)); 965 skb = skb_unshare(skb, GFP_ATOMIC); 966 if (!skb) { 967 aead_request_free(req); 968 return ERR_PTR(-ENOMEM); 969 } 970 } else { 971 /* integrity only: all headers + data authenticated */ 972 aead_request_set_crypt(req, sg, sg, icv_len, iv); 973 aead_request_set_ad(req, skb->len - icv_len); 974 } 975 976 macsec_skb_cb(skb)->req = req; 977 skb->dev = dev; 978 aead_request_set_callback(req, 0, macsec_decrypt_done, skb); 979 980 dev_hold(dev); 981 ret = crypto_aead_decrypt(req); 982 if (ret == -EINPROGRESS) { 983 return ERR_PTR(ret); 984 } else if (ret != 0) { 985 /* decryption/authentication failed 986 * 10.6 if validateFrames is disabled, deliver anyway 987 */ 988 if (ret != -EBADMSG) { 989 kfree_skb(skb); 990 skb = ERR_PTR(ret); 991 } 992 } else { 993 macsec_skb_cb(skb)->valid = true; 994 } 995 dev_put(dev); 996 997 aead_request_free(req); 998 999 return skb; 1000 } 1001 1002 static struct macsec_rx_sc *find_rx_sc(struct macsec_secy *secy, sci_t sci) 1003 { 1004 struct macsec_rx_sc *rx_sc; 1005 1006 for_each_rxsc(secy, rx_sc) { 1007 if (rx_sc->sci == sci) 1008 return rx_sc; 1009 } 1010 1011 return NULL; 1012 } 1013 1014 static struct macsec_rx_sc *find_rx_sc_rtnl(struct macsec_secy *secy, sci_t sci) 1015 { 1016 struct macsec_rx_sc *rx_sc; 1017 1018 for_each_rxsc_rtnl(secy, rx_sc) { 1019 if (rx_sc->sci == sci) 1020 return rx_sc; 1021 } 1022 1023 return NULL; 1024 } 1025 1026 static void handle_not_macsec(struct sk_buff *skb) 1027 { 1028 struct macsec_rxh_data *rxd; 1029 struct macsec_dev *macsec; 1030 1031 rcu_read_lock(); 1032 rxd = macsec_data_rcu(skb->dev); 1033 1034 /* 10.6 If the management control validateFrames is not 1035 * Strict, frames without a SecTAG are received, counted, and 1036 * delivered to the Controlled Port 1037 */ 1038 list_for_each_entry_rcu(macsec, &rxd->secys, secys) { 1039 struct sk_buff *nskb; 1040 int ret; 1041 struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats); 1042 1043 if (macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) { 1044 u64_stats_update_begin(&secy_stats->syncp); 1045 secy_stats->stats.InPktsNoTag++; 1046 u64_stats_update_end(&secy_stats->syncp); 1047 continue; 1048 } 1049 1050 /* deliver on this port */ 1051 nskb = skb_clone(skb, GFP_ATOMIC); 1052 if (!nskb) 1053 break; 1054 1055 nskb->dev = macsec->secy.netdev; 1056 1057 ret = netif_rx(nskb); 1058 if (ret == NET_RX_SUCCESS) { 1059 u64_stats_update_begin(&secy_stats->syncp); 1060 secy_stats->stats.InPktsUntagged++; 1061 u64_stats_update_end(&secy_stats->syncp); 1062 } else { 1063 macsec->secy.netdev->stats.rx_dropped++; 1064 } 1065 } 1066 1067 rcu_read_unlock(); 1068 } 1069 1070 static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) 1071 { 1072 struct sk_buff *skb = *pskb; 1073 struct net_device *dev = skb->dev; 1074 struct macsec_eth_header *hdr; 1075 struct macsec_secy *secy = NULL; 1076 struct macsec_rx_sc *rx_sc; 1077 struct macsec_rx_sa *rx_sa; 1078 struct macsec_rxh_data *rxd; 1079 struct macsec_dev *macsec; 1080 sci_t sci; 1081 u32 pn; 1082 bool cbit; 1083 struct pcpu_rx_sc_stats *rxsc_stats; 1084 struct pcpu_secy_stats *secy_stats; 1085 bool pulled_sci; 1086 int ret; 1087 1088 if (skb_headroom(skb) < ETH_HLEN) 1089 goto drop_direct; 1090 1091 hdr = macsec_ethhdr(skb); 1092 if (hdr->eth.h_proto != htons(ETH_P_MACSEC)) { 1093 handle_not_macsec(skb); 1094 1095 /* and deliver to the uncontrolled port */ 1096 return RX_HANDLER_PASS; 1097 } 1098 1099 skb = skb_unshare(skb, GFP_ATOMIC); 1100 if (!skb) { 1101 *pskb = NULL; 1102 return RX_HANDLER_CONSUMED; 1103 } 1104 1105 pulled_sci = pskb_may_pull(skb, macsec_extra_len(true)); 1106 if (!pulled_sci) { 1107 if (!pskb_may_pull(skb, macsec_extra_len(false))) 1108 goto drop_direct; 1109 } 1110 1111 hdr = macsec_ethhdr(skb); 1112 1113 /* Frames with a SecTAG that has the TCI E bit set but the C 1114 * bit clear are discarded, as this reserved encoding is used 1115 * to identify frames with a SecTAG that are not to be 1116 * delivered to the Controlled Port. 1117 */ 1118 if ((hdr->tci_an & (MACSEC_TCI_C | MACSEC_TCI_E)) == MACSEC_TCI_E) 1119 return RX_HANDLER_PASS; 1120 1121 /* now, pull the extra length */ 1122 if (hdr->tci_an & MACSEC_TCI_SC) { 1123 if (!pulled_sci) 1124 goto drop_direct; 1125 } 1126 1127 /* ethernet header is part of crypto processing */ 1128 skb_push(skb, ETH_HLEN); 1129 1130 macsec_skb_cb(skb)->has_sci = !!(hdr->tci_an & MACSEC_TCI_SC); 1131 macsec_skb_cb(skb)->assoc_num = hdr->tci_an & MACSEC_AN_MASK; 1132 sci = macsec_frame_sci(hdr, macsec_skb_cb(skb)->has_sci); 1133 1134 rcu_read_lock(); 1135 rxd = macsec_data_rcu(skb->dev); 1136 1137 list_for_each_entry_rcu(macsec, &rxd->secys, secys) { 1138 struct macsec_rx_sc *sc = find_rx_sc(&macsec->secy, sci); 1139 sc = sc ? macsec_rxsc_get(sc) : NULL; 1140 1141 if (sc) { 1142 secy = &macsec->secy; 1143 rx_sc = sc; 1144 break; 1145 } 1146 } 1147 1148 if (!secy) 1149 goto nosci; 1150 1151 dev = secy->netdev; 1152 macsec = macsec_priv(dev); 1153 secy_stats = this_cpu_ptr(macsec->stats); 1154 rxsc_stats = this_cpu_ptr(rx_sc->stats); 1155 1156 if (!macsec_validate_skb(skb, secy->icv_len)) { 1157 u64_stats_update_begin(&secy_stats->syncp); 1158 secy_stats->stats.InPktsBadTag++; 1159 u64_stats_update_end(&secy_stats->syncp); 1160 goto drop_nosa; 1161 } 1162 1163 rx_sa = macsec_rxsa_get(rx_sc->sa[macsec_skb_cb(skb)->assoc_num]); 1164 if (!rx_sa) { 1165 /* 10.6.1 if the SA is not in use */ 1166 1167 /* If validateFrames is Strict or the C bit in the 1168 * SecTAG is set, discard 1169 */ 1170 if (hdr->tci_an & MACSEC_TCI_C || 1171 secy->validate_frames == MACSEC_VALIDATE_STRICT) { 1172 u64_stats_update_begin(&rxsc_stats->syncp); 1173 rxsc_stats->stats.InPktsNotUsingSA++; 1174 u64_stats_update_end(&rxsc_stats->syncp); 1175 goto drop_nosa; 1176 } 1177 1178 /* not Strict, the frame (with the SecTAG and ICV 1179 * removed) is delivered to the Controlled Port. 1180 */ 1181 u64_stats_update_begin(&rxsc_stats->syncp); 1182 rxsc_stats->stats.InPktsUnusedSA++; 1183 u64_stats_update_end(&rxsc_stats->syncp); 1184 goto deliver; 1185 } 1186 1187 /* First, PN check to avoid decrypting obviously wrong packets */ 1188 pn = ntohl(hdr->packet_number); 1189 if (secy->replay_protect) { 1190 bool late; 1191 1192 spin_lock(&rx_sa->lock); 1193 late = rx_sa->next_pn >= secy->replay_window && 1194 pn < (rx_sa->next_pn - secy->replay_window); 1195 spin_unlock(&rx_sa->lock); 1196 1197 if (late) { 1198 u64_stats_update_begin(&rxsc_stats->syncp); 1199 rxsc_stats->stats.InPktsLate++; 1200 u64_stats_update_end(&rxsc_stats->syncp); 1201 goto drop; 1202 } 1203 } 1204 1205 macsec_skb_cb(skb)->rx_sa = rx_sa; 1206 1207 /* Disabled && !changed text => skip validation */ 1208 if (hdr->tci_an & MACSEC_TCI_C || 1209 secy->validate_frames != MACSEC_VALIDATE_DISABLED) 1210 skb = macsec_decrypt(skb, dev, rx_sa, sci, secy); 1211 1212 if (IS_ERR(skb)) { 1213 /* the decrypt callback needs the reference */ 1214 if (PTR_ERR(skb) != -EINPROGRESS) { 1215 macsec_rxsa_put(rx_sa); 1216 macsec_rxsc_put(rx_sc); 1217 } 1218 rcu_read_unlock(); 1219 *pskb = NULL; 1220 return RX_HANDLER_CONSUMED; 1221 } 1222 1223 if (!macsec_post_decrypt(skb, secy, pn)) 1224 goto drop; 1225 1226 deliver: 1227 macsec_finalize_skb(skb, secy->icv_len, 1228 macsec_extra_len(macsec_skb_cb(skb)->has_sci)); 1229 macsec_reset_skb(skb, secy->netdev); 1230 1231 if (rx_sa) 1232 macsec_rxsa_put(rx_sa); 1233 macsec_rxsc_put(rx_sc); 1234 1235 ret = gro_cells_receive(&macsec->gro_cells, skb); 1236 if (ret == NET_RX_SUCCESS) 1237 count_rx(dev, skb->len); 1238 else 1239 macsec->secy.netdev->stats.rx_dropped++; 1240 1241 rcu_read_unlock(); 1242 1243 *pskb = NULL; 1244 return RX_HANDLER_CONSUMED; 1245 1246 drop: 1247 macsec_rxsa_put(rx_sa); 1248 drop_nosa: 1249 macsec_rxsc_put(rx_sc); 1250 rcu_read_unlock(); 1251 drop_direct: 1252 kfree_skb(skb); 1253 *pskb = NULL; 1254 return RX_HANDLER_CONSUMED; 1255 1256 nosci: 1257 /* 10.6.1 if the SC is not found */ 1258 cbit = !!(hdr->tci_an & MACSEC_TCI_C); 1259 if (!cbit) 1260 macsec_finalize_skb(skb, DEFAULT_ICV_LEN, 1261 macsec_extra_len(macsec_skb_cb(skb)->has_sci)); 1262 1263 list_for_each_entry_rcu(macsec, &rxd->secys, secys) { 1264 struct sk_buff *nskb; 1265 1266 secy_stats = this_cpu_ptr(macsec->stats); 1267 1268 /* If validateFrames is Strict or the C bit in the 1269 * SecTAG is set, discard 1270 */ 1271 if (cbit || 1272 macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) { 1273 u64_stats_update_begin(&secy_stats->syncp); 1274 secy_stats->stats.InPktsNoSCI++; 1275 u64_stats_update_end(&secy_stats->syncp); 1276 continue; 1277 } 1278 1279 /* not strict, the frame (with the SecTAG and ICV 1280 * removed) is delivered to the Controlled Port. 1281 */ 1282 nskb = skb_clone(skb, GFP_ATOMIC); 1283 if (!nskb) 1284 break; 1285 1286 macsec_reset_skb(nskb, macsec->secy.netdev); 1287 1288 ret = netif_rx(nskb); 1289 if (ret == NET_RX_SUCCESS) { 1290 u64_stats_update_begin(&secy_stats->syncp); 1291 secy_stats->stats.InPktsUnknownSCI++; 1292 u64_stats_update_end(&secy_stats->syncp); 1293 } else { 1294 macsec->secy.netdev->stats.rx_dropped++; 1295 } 1296 } 1297 1298 rcu_read_unlock(); 1299 *pskb = skb; 1300 return RX_HANDLER_PASS; 1301 } 1302 1303 static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len) 1304 { 1305 struct crypto_aead *tfm; 1306 int ret; 1307 1308 tfm = crypto_alloc_aead("gcm(aes)", 0, 0); 1309 1310 if (IS_ERR(tfm)) 1311 return tfm; 1312 1313 ret = crypto_aead_setkey(tfm, key, key_len); 1314 if (ret < 0) 1315 goto fail; 1316 1317 ret = crypto_aead_setauthsize(tfm, icv_len); 1318 if (ret < 0) 1319 goto fail; 1320 1321 return tfm; 1322 fail: 1323 crypto_free_aead(tfm); 1324 return ERR_PTR(ret); 1325 } 1326 1327 static int init_rx_sa(struct macsec_rx_sa *rx_sa, char *sak, int key_len, 1328 int icv_len) 1329 { 1330 rx_sa->stats = alloc_percpu(struct macsec_rx_sa_stats); 1331 if (!rx_sa->stats) 1332 return -ENOMEM; 1333 1334 rx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len); 1335 if (IS_ERR(rx_sa->key.tfm)) { 1336 free_percpu(rx_sa->stats); 1337 return PTR_ERR(rx_sa->key.tfm); 1338 } 1339 1340 rx_sa->active = false; 1341 rx_sa->next_pn = 1; 1342 atomic_set(&rx_sa->refcnt, 1); 1343 spin_lock_init(&rx_sa->lock); 1344 1345 return 0; 1346 } 1347 1348 static void clear_rx_sa(struct macsec_rx_sa *rx_sa) 1349 { 1350 rx_sa->active = false; 1351 1352 macsec_rxsa_put(rx_sa); 1353 } 1354 1355 static void free_rx_sc(struct macsec_rx_sc *rx_sc) 1356 { 1357 int i; 1358 1359 for (i = 0; i < MACSEC_NUM_AN; i++) { 1360 struct macsec_rx_sa *sa = rtnl_dereference(rx_sc->sa[i]); 1361 1362 RCU_INIT_POINTER(rx_sc->sa[i], NULL); 1363 if (sa) 1364 clear_rx_sa(sa); 1365 } 1366 1367 macsec_rxsc_put(rx_sc); 1368 } 1369 1370 static struct macsec_rx_sc *del_rx_sc(struct macsec_secy *secy, sci_t sci) 1371 { 1372 struct macsec_rx_sc *rx_sc, __rcu **rx_scp; 1373 1374 for (rx_scp = &secy->rx_sc, rx_sc = rtnl_dereference(*rx_scp); 1375 rx_sc; 1376 rx_scp = &rx_sc->next, rx_sc = rtnl_dereference(*rx_scp)) { 1377 if (rx_sc->sci == sci) { 1378 if (rx_sc->active) 1379 secy->n_rx_sc--; 1380 rcu_assign_pointer(*rx_scp, rx_sc->next); 1381 return rx_sc; 1382 } 1383 } 1384 1385 return NULL; 1386 } 1387 1388 static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci) 1389 { 1390 struct macsec_rx_sc *rx_sc; 1391 struct macsec_dev *macsec; 1392 struct net_device *real_dev = macsec_priv(dev)->real_dev; 1393 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); 1394 struct macsec_secy *secy; 1395 1396 list_for_each_entry(macsec, &rxd->secys, secys) { 1397 if (find_rx_sc_rtnl(&macsec->secy, sci)) 1398 return ERR_PTR(-EEXIST); 1399 } 1400 1401 rx_sc = kzalloc(sizeof(*rx_sc), GFP_KERNEL); 1402 if (!rx_sc) 1403 return ERR_PTR(-ENOMEM); 1404 1405 rx_sc->stats = netdev_alloc_pcpu_stats(struct pcpu_rx_sc_stats); 1406 if (!rx_sc->stats) { 1407 kfree(rx_sc); 1408 return ERR_PTR(-ENOMEM); 1409 } 1410 1411 rx_sc->sci = sci; 1412 rx_sc->active = true; 1413 atomic_set(&rx_sc->refcnt, 1); 1414 1415 secy = &macsec_priv(dev)->secy; 1416 rcu_assign_pointer(rx_sc->next, secy->rx_sc); 1417 rcu_assign_pointer(secy->rx_sc, rx_sc); 1418 1419 if (rx_sc->active) 1420 secy->n_rx_sc++; 1421 1422 return rx_sc; 1423 } 1424 1425 static int init_tx_sa(struct macsec_tx_sa *tx_sa, char *sak, int key_len, 1426 int icv_len) 1427 { 1428 tx_sa->stats = alloc_percpu(struct macsec_tx_sa_stats); 1429 if (!tx_sa->stats) 1430 return -ENOMEM; 1431 1432 tx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len); 1433 if (IS_ERR(tx_sa->key.tfm)) { 1434 free_percpu(tx_sa->stats); 1435 return PTR_ERR(tx_sa->key.tfm); 1436 } 1437 1438 tx_sa->active = false; 1439 atomic_set(&tx_sa->refcnt, 1); 1440 spin_lock_init(&tx_sa->lock); 1441 1442 return 0; 1443 } 1444 1445 static void clear_tx_sa(struct macsec_tx_sa *tx_sa) 1446 { 1447 tx_sa->active = false; 1448 1449 macsec_txsa_put(tx_sa); 1450 } 1451 1452 static struct genl_family macsec_fam; 1453 1454 static struct net_device *get_dev_from_nl(struct net *net, 1455 struct nlattr **attrs) 1456 { 1457 int ifindex = nla_get_u32(attrs[MACSEC_ATTR_IFINDEX]); 1458 struct net_device *dev; 1459 1460 dev = __dev_get_by_index(net, ifindex); 1461 if (!dev) 1462 return ERR_PTR(-ENODEV); 1463 1464 if (!netif_is_macsec(dev)) 1465 return ERR_PTR(-ENODEV); 1466 1467 return dev; 1468 } 1469 1470 static sci_t nla_get_sci(const struct nlattr *nla) 1471 { 1472 return (__force sci_t)nla_get_u64(nla); 1473 } 1474 1475 static int nla_put_sci(struct sk_buff *skb, int attrtype, sci_t value, 1476 int padattr) 1477 { 1478 return nla_put_u64_64bit(skb, attrtype, (__force u64)value, padattr); 1479 } 1480 1481 static struct macsec_tx_sa *get_txsa_from_nl(struct net *net, 1482 struct nlattr **attrs, 1483 struct nlattr **tb_sa, 1484 struct net_device **devp, 1485 struct macsec_secy **secyp, 1486 struct macsec_tx_sc **scp, 1487 u8 *assoc_num) 1488 { 1489 struct net_device *dev; 1490 struct macsec_secy *secy; 1491 struct macsec_tx_sc *tx_sc; 1492 struct macsec_tx_sa *tx_sa; 1493 1494 if (!tb_sa[MACSEC_SA_ATTR_AN]) 1495 return ERR_PTR(-EINVAL); 1496 1497 *assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1498 1499 dev = get_dev_from_nl(net, attrs); 1500 if (IS_ERR(dev)) 1501 return ERR_CAST(dev); 1502 1503 if (*assoc_num >= MACSEC_NUM_AN) 1504 return ERR_PTR(-EINVAL); 1505 1506 secy = &macsec_priv(dev)->secy; 1507 tx_sc = &secy->tx_sc; 1508 1509 tx_sa = rtnl_dereference(tx_sc->sa[*assoc_num]); 1510 if (!tx_sa) 1511 return ERR_PTR(-ENODEV); 1512 1513 *devp = dev; 1514 *scp = tx_sc; 1515 *secyp = secy; 1516 return tx_sa; 1517 } 1518 1519 static struct macsec_rx_sc *get_rxsc_from_nl(struct net *net, 1520 struct nlattr **attrs, 1521 struct nlattr **tb_rxsc, 1522 struct net_device **devp, 1523 struct macsec_secy **secyp) 1524 { 1525 struct net_device *dev; 1526 struct macsec_secy *secy; 1527 struct macsec_rx_sc *rx_sc; 1528 sci_t sci; 1529 1530 dev = get_dev_from_nl(net, attrs); 1531 if (IS_ERR(dev)) 1532 return ERR_CAST(dev); 1533 1534 secy = &macsec_priv(dev)->secy; 1535 1536 if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI]) 1537 return ERR_PTR(-EINVAL); 1538 1539 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); 1540 rx_sc = find_rx_sc_rtnl(secy, sci); 1541 if (!rx_sc) 1542 return ERR_PTR(-ENODEV); 1543 1544 *secyp = secy; 1545 *devp = dev; 1546 1547 return rx_sc; 1548 } 1549 1550 static struct macsec_rx_sa *get_rxsa_from_nl(struct net *net, 1551 struct nlattr **attrs, 1552 struct nlattr **tb_rxsc, 1553 struct nlattr **tb_sa, 1554 struct net_device **devp, 1555 struct macsec_secy **secyp, 1556 struct macsec_rx_sc **scp, 1557 u8 *assoc_num) 1558 { 1559 struct macsec_rx_sc *rx_sc; 1560 struct macsec_rx_sa *rx_sa; 1561 1562 if (!tb_sa[MACSEC_SA_ATTR_AN]) 1563 return ERR_PTR(-EINVAL); 1564 1565 *assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1566 if (*assoc_num >= MACSEC_NUM_AN) 1567 return ERR_PTR(-EINVAL); 1568 1569 rx_sc = get_rxsc_from_nl(net, attrs, tb_rxsc, devp, secyp); 1570 if (IS_ERR(rx_sc)) 1571 return ERR_CAST(rx_sc); 1572 1573 rx_sa = rtnl_dereference(rx_sc->sa[*assoc_num]); 1574 if (!rx_sa) 1575 return ERR_PTR(-ENODEV); 1576 1577 *scp = rx_sc; 1578 return rx_sa; 1579 } 1580 1581 1582 static const struct nla_policy macsec_genl_policy[NUM_MACSEC_ATTR] = { 1583 [MACSEC_ATTR_IFINDEX] = { .type = NLA_U32 }, 1584 [MACSEC_ATTR_RXSC_CONFIG] = { .type = NLA_NESTED }, 1585 [MACSEC_ATTR_SA_CONFIG] = { .type = NLA_NESTED }, 1586 }; 1587 1588 static const struct nla_policy macsec_genl_rxsc_policy[NUM_MACSEC_RXSC_ATTR] = { 1589 [MACSEC_RXSC_ATTR_SCI] = { .type = NLA_U64 }, 1590 [MACSEC_RXSC_ATTR_ACTIVE] = { .type = NLA_U8 }, 1591 }; 1592 1593 static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = { 1594 [MACSEC_SA_ATTR_AN] = { .type = NLA_U8 }, 1595 [MACSEC_SA_ATTR_ACTIVE] = { .type = NLA_U8 }, 1596 [MACSEC_SA_ATTR_PN] = { .type = NLA_U32 }, 1597 [MACSEC_SA_ATTR_KEYID] = { .type = NLA_BINARY, 1598 .len = MACSEC_KEYID_LEN, }, 1599 [MACSEC_SA_ATTR_KEY] = { .type = NLA_BINARY, 1600 .len = MACSEC_MAX_KEY_LEN, }, 1601 }; 1602 1603 static int parse_sa_config(struct nlattr **attrs, struct nlattr **tb_sa) 1604 { 1605 if (!attrs[MACSEC_ATTR_SA_CONFIG]) 1606 return -EINVAL; 1607 1608 if (nla_parse_nested(tb_sa, MACSEC_SA_ATTR_MAX, attrs[MACSEC_ATTR_SA_CONFIG], 1609 macsec_genl_sa_policy)) 1610 return -EINVAL; 1611 1612 return 0; 1613 } 1614 1615 static int parse_rxsc_config(struct nlattr **attrs, struct nlattr **tb_rxsc) 1616 { 1617 if (!attrs[MACSEC_ATTR_RXSC_CONFIG]) 1618 return -EINVAL; 1619 1620 if (nla_parse_nested(tb_rxsc, MACSEC_RXSC_ATTR_MAX, attrs[MACSEC_ATTR_RXSC_CONFIG], 1621 macsec_genl_rxsc_policy)) 1622 return -EINVAL; 1623 1624 return 0; 1625 } 1626 1627 static bool validate_add_rxsa(struct nlattr **attrs) 1628 { 1629 if (!attrs[MACSEC_SA_ATTR_AN] || 1630 !attrs[MACSEC_SA_ATTR_KEY] || 1631 !attrs[MACSEC_SA_ATTR_KEYID]) 1632 return false; 1633 1634 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) 1635 return false; 1636 1637 if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0) 1638 return false; 1639 1640 if (attrs[MACSEC_SA_ATTR_ACTIVE]) { 1641 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) 1642 return false; 1643 } 1644 1645 if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN) 1646 return false; 1647 1648 return true; 1649 } 1650 1651 static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info) 1652 { 1653 struct net_device *dev; 1654 struct nlattr **attrs = info->attrs; 1655 struct macsec_secy *secy; 1656 struct macsec_rx_sc *rx_sc; 1657 struct macsec_rx_sa *rx_sa; 1658 unsigned char assoc_num; 1659 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 1660 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 1661 int err; 1662 1663 if (!attrs[MACSEC_ATTR_IFINDEX]) 1664 return -EINVAL; 1665 1666 if (parse_sa_config(attrs, tb_sa)) 1667 return -EINVAL; 1668 1669 if (parse_rxsc_config(attrs, tb_rxsc)) 1670 return -EINVAL; 1671 1672 if (!validate_add_rxsa(tb_sa)) 1673 return -EINVAL; 1674 1675 rtnl_lock(); 1676 rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy); 1677 if (IS_ERR(rx_sc)) { 1678 rtnl_unlock(); 1679 return PTR_ERR(rx_sc); 1680 } 1681 1682 assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1683 1684 if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) { 1685 pr_notice("macsec: nl: add_rxsa: bad key length: %d != %d\n", 1686 nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len); 1687 rtnl_unlock(); 1688 return -EINVAL; 1689 } 1690 1691 rx_sa = rtnl_dereference(rx_sc->sa[assoc_num]); 1692 if (rx_sa) { 1693 rtnl_unlock(); 1694 return -EBUSY; 1695 } 1696 1697 rx_sa = kmalloc(sizeof(*rx_sa), GFP_KERNEL); 1698 if (!rx_sa) { 1699 rtnl_unlock(); 1700 return -ENOMEM; 1701 } 1702 1703 err = init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 1704 secy->key_len, secy->icv_len); 1705 if (err < 0) { 1706 kfree(rx_sa); 1707 rtnl_unlock(); 1708 return err; 1709 } 1710 1711 if (tb_sa[MACSEC_SA_ATTR_PN]) { 1712 spin_lock_bh(&rx_sa->lock); 1713 rx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]); 1714 spin_unlock_bh(&rx_sa->lock); 1715 } 1716 1717 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 1718 rx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 1719 1720 nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN); 1721 rx_sa->sc = rx_sc; 1722 rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa); 1723 1724 rtnl_unlock(); 1725 1726 return 0; 1727 } 1728 1729 static bool validate_add_rxsc(struct nlattr **attrs) 1730 { 1731 if (!attrs[MACSEC_RXSC_ATTR_SCI]) 1732 return false; 1733 1734 if (attrs[MACSEC_RXSC_ATTR_ACTIVE]) { 1735 if (nla_get_u8(attrs[MACSEC_RXSC_ATTR_ACTIVE]) > 1) 1736 return false; 1737 } 1738 1739 return true; 1740 } 1741 1742 static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info) 1743 { 1744 struct net_device *dev; 1745 sci_t sci = MACSEC_UNDEF_SCI; 1746 struct nlattr **attrs = info->attrs; 1747 struct macsec_rx_sc *rx_sc; 1748 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 1749 1750 if (!attrs[MACSEC_ATTR_IFINDEX]) 1751 return -EINVAL; 1752 1753 if (parse_rxsc_config(attrs, tb_rxsc)) 1754 return -EINVAL; 1755 1756 if (!validate_add_rxsc(tb_rxsc)) 1757 return -EINVAL; 1758 1759 rtnl_lock(); 1760 dev = get_dev_from_nl(genl_info_net(info), attrs); 1761 if (IS_ERR(dev)) { 1762 rtnl_unlock(); 1763 return PTR_ERR(dev); 1764 } 1765 1766 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); 1767 1768 rx_sc = create_rx_sc(dev, sci); 1769 if (IS_ERR(rx_sc)) { 1770 rtnl_unlock(); 1771 return PTR_ERR(rx_sc); 1772 } 1773 1774 if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) 1775 rx_sc->active = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]); 1776 1777 rtnl_unlock(); 1778 1779 return 0; 1780 } 1781 1782 static bool validate_add_txsa(struct nlattr **attrs) 1783 { 1784 if (!attrs[MACSEC_SA_ATTR_AN] || 1785 !attrs[MACSEC_SA_ATTR_PN] || 1786 !attrs[MACSEC_SA_ATTR_KEY] || 1787 !attrs[MACSEC_SA_ATTR_KEYID]) 1788 return false; 1789 1790 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) 1791 return false; 1792 1793 if (nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0) 1794 return false; 1795 1796 if (attrs[MACSEC_SA_ATTR_ACTIVE]) { 1797 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) 1798 return false; 1799 } 1800 1801 if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN) 1802 return false; 1803 1804 return true; 1805 } 1806 1807 static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info) 1808 { 1809 struct net_device *dev; 1810 struct nlattr **attrs = info->attrs; 1811 struct macsec_secy *secy; 1812 struct macsec_tx_sc *tx_sc; 1813 struct macsec_tx_sa *tx_sa; 1814 unsigned char assoc_num; 1815 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 1816 int err; 1817 1818 if (!attrs[MACSEC_ATTR_IFINDEX]) 1819 return -EINVAL; 1820 1821 if (parse_sa_config(attrs, tb_sa)) 1822 return -EINVAL; 1823 1824 if (!validate_add_txsa(tb_sa)) 1825 return -EINVAL; 1826 1827 rtnl_lock(); 1828 dev = get_dev_from_nl(genl_info_net(info), attrs); 1829 if (IS_ERR(dev)) { 1830 rtnl_unlock(); 1831 return PTR_ERR(dev); 1832 } 1833 1834 secy = &macsec_priv(dev)->secy; 1835 tx_sc = &secy->tx_sc; 1836 1837 assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1838 1839 if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) { 1840 pr_notice("macsec: nl: add_txsa: bad key length: %d != %d\n", 1841 nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len); 1842 rtnl_unlock(); 1843 return -EINVAL; 1844 } 1845 1846 tx_sa = rtnl_dereference(tx_sc->sa[assoc_num]); 1847 if (tx_sa) { 1848 rtnl_unlock(); 1849 return -EBUSY; 1850 } 1851 1852 tx_sa = kmalloc(sizeof(*tx_sa), GFP_KERNEL); 1853 if (!tx_sa) { 1854 rtnl_unlock(); 1855 return -ENOMEM; 1856 } 1857 1858 err = init_tx_sa(tx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 1859 secy->key_len, secy->icv_len); 1860 if (err < 0) { 1861 kfree(tx_sa); 1862 rtnl_unlock(); 1863 return err; 1864 } 1865 1866 nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN); 1867 1868 spin_lock_bh(&tx_sa->lock); 1869 tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]); 1870 spin_unlock_bh(&tx_sa->lock); 1871 1872 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 1873 tx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 1874 1875 if (assoc_num == tx_sc->encoding_sa && tx_sa->active) 1876 secy->operational = true; 1877 1878 rcu_assign_pointer(tx_sc->sa[assoc_num], tx_sa); 1879 1880 rtnl_unlock(); 1881 1882 return 0; 1883 } 1884 1885 static int macsec_del_rxsa(struct sk_buff *skb, struct genl_info *info) 1886 { 1887 struct nlattr **attrs = info->attrs; 1888 struct net_device *dev; 1889 struct macsec_secy *secy; 1890 struct macsec_rx_sc *rx_sc; 1891 struct macsec_rx_sa *rx_sa; 1892 u8 assoc_num; 1893 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 1894 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 1895 1896 if (!attrs[MACSEC_ATTR_IFINDEX]) 1897 return -EINVAL; 1898 1899 if (parse_sa_config(attrs, tb_sa)) 1900 return -EINVAL; 1901 1902 if (parse_rxsc_config(attrs, tb_rxsc)) 1903 return -EINVAL; 1904 1905 rtnl_lock(); 1906 rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa, 1907 &dev, &secy, &rx_sc, &assoc_num); 1908 if (IS_ERR(rx_sa)) { 1909 rtnl_unlock(); 1910 return PTR_ERR(rx_sa); 1911 } 1912 1913 if (rx_sa->active) { 1914 rtnl_unlock(); 1915 return -EBUSY; 1916 } 1917 1918 RCU_INIT_POINTER(rx_sc->sa[assoc_num], NULL); 1919 clear_rx_sa(rx_sa); 1920 1921 rtnl_unlock(); 1922 1923 return 0; 1924 } 1925 1926 static int macsec_del_rxsc(struct sk_buff *skb, struct genl_info *info) 1927 { 1928 struct nlattr **attrs = info->attrs; 1929 struct net_device *dev; 1930 struct macsec_secy *secy; 1931 struct macsec_rx_sc *rx_sc; 1932 sci_t sci; 1933 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 1934 1935 if (!attrs[MACSEC_ATTR_IFINDEX]) 1936 return -EINVAL; 1937 1938 if (parse_rxsc_config(attrs, tb_rxsc)) 1939 return -EINVAL; 1940 1941 if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI]) 1942 return -EINVAL; 1943 1944 rtnl_lock(); 1945 dev = get_dev_from_nl(genl_info_net(info), info->attrs); 1946 if (IS_ERR(dev)) { 1947 rtnl_unlock(); 1948 return PTR_ERR(dev); 1949 } 1950 1951 secy = &macsec_priv(dev)->secy; 1952 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); 1953 1954 rx_sc = del_rx_sc(secy, sci); 1955 if (!rx_sc) { 1956 rtnl_unlock(); 1957 return -ENODEV; 1958 } 1959 1960 free_rx_sc(rx_sc); 1961 rtnl_unlock(); 1962 1963 return 0; 1964 } 1965 1966 static int macsec_del_txsa(struct sk_buff *skb, struct genl_info *info) 1967 { 1968 struct nlattr **attrs = info->attrs; 1969 struct net_device *dev; 1970 struct macsec_secy *secy; 1971 struct macsec_tx_sc *tx_sc; 1972 struct macsec_tx_sa *tx_sa; 1973 u8 assoc_num; 1974 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 1975 1976 if (!attrs[MACSEC_ATTR_IFINDEX]) 1977 return -EINVAL; 1978 1979 if (parse_sa_config(attrs, tb_sa)) 1980 return -EINVAL; 1981 1982 rtnl_lock(); 1983 tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa, 1984 &dev, &secy, &tx_sc, &assoc_num); 1985 if (IS_ERR(tx_sa)) { 1986 rtnl_unlock(); 1987 return PTR_ERR(tx_sa); 1988 } 1989 1990 if (tx_sa->active) { 1991 rtnl_unlock(); 1992 return -EBUSY; 1993 } 1994 1995 RCU_INIT_POINTER(tx_sc->sa[assoc_num], NULL); 1996 clear_tx_sa(tx_sa); 1997 1998 rtnl_unlock(); 1999 2000 return 0; 2001 } 2002 2003 static bool validate_upd_sa(struct nlattr **attrs) 2004 { 2005 if (!attrs[MACSEC_SA_ATTR_AN] || 2006 attrs[MACSEC_SA_ATTR_KEY] || 2007 attrs[MACSEC_SA_ATTR_KEYID]) 2008 return false; 2009 2010 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) 2011 return false; 2012 2013 if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0) 2014 return false; 2015 2016 if (attrs[MACSEC_SA_ATTR_ACTIVE]) { 2017 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) 2018 return false; 2019 } 2020 2021 return true; 2022 } 2023 2024 static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info) 2025 { 2026 struct nlattr **attrs = info->attrs; 2027 struct net_device *dev; 2028 struct macsec_secy *secy; 2029 struct macsec_tx_sc *tx_sc; 2030 struct macsec_tx_sa *tx_sa; 2031 u8 assoc_num; 2032 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 2033 2034 if (!attrs[MACSEC_ATTR_IFINDEX]) 2035 return -EINVAL; 2036 2037 if (parse_sa_config(attrs, tb_sa)) 2038 return -EINVAL; 2039 2040 if (!validate_upd_sa(tb_sa)) 2041 return -EINVAL; 2042 2043 rtnl_lock(); 2044 tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa, 2045 &dev, &secy, &tx_sc, &assoc_num); 2046 if (IS_ERR(tx_sa)) { 2047 rtnl_unlock(); 2048 return PTR_ERR(tx_sa); 2049 } 2050 2051 if (tb_sa[MACSEC_SA_ATTR_PN]) { 2052 spin_lock_bh(&tx_sa->lock); 2053 tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]); 2054 spin_unlock_bh(&tx_sa->lock); 2055 } 2056 2057 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 2058 tx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 2059 2060 if (assoc_num == tx_sc->encoding_sa) 2061 secy->operational = tx_sa->active; 2062 2063 rtnl_unlock(); 2064 2065 return 0; 2066 } 2067 2068 static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info) 2069 { 2070 struct nlattr **attrs = info->attrs; 2071 struct net_device *dev; 2072 struct macsec_secy *secy; 2073 struct macsec_rx_sc *rx_sc; 2074 struct macsec_rx_sa *rx_sa; 2075 u8 assoc_num; 2076 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 2077 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 2078 2079 if (!attrs[MACSEC_ATTR_IFINDEX]) 2080 return -EINVAL; 2081 2082 if (parse_rxsc_config(attrs, tb_rxsc)) 2083 return -EINVAL; 2084 2085 if (parse_sa_config(attrs, tb_sa)) 2086 return -EINVAL; 2087 2088 if (!validate_upd_sa(tb_sa)) 2089 return -EINVAL; 2090 2091 rtnl_lock(); 2092 rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa, 2093 &dev, &secy, &rx_sc, &assoc_num); 2094 if (IS_ERR(rx_sa)) { 2095 rtnl_unlock(); 2096 return PTR_ERR(rx_sa); 2097 } 2098 2099 if (tb_sa[MACSEC_SA_ATTR_PN]) { 2100 spin_lock_bh(&rx_sa->lock); 2101 rx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]); 2102 spin_unlock_bh(&rx_sa->lock); 2103 } 2104 2105 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 2106 rx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 2107 2108 rtnl_unlock(); 2109 return 0; 2110 } 2111 2112 static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info) 2113 { 2114 struct nlattr **attrs = info->attrs; 2115 struct net_device *dev; 2116 struct macsec_secy *secy; 2117 struct macsec_rx_sc *rx_sc; 2118 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 2119 2120 if (!attrs[MACSEC_ATTR_IFINDEX]) 2121 return -EINVAL; 2122 2123 if (parse_rxsc_config(attrs, tb_rxsc)) 2124 return -EINVAL; 2125 2126 if (!validate_add_rxsc(tb_rxsc)) 2127 return -EINVAL; 2128 2129 rtnl_lock(); 2130 rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy); 2131 if (IS_ERR(rx_sc)) { 2132 rtnl_unlock(); 2133 return PTR_ERR(rx_sc); 2134 } 2135 2136 if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) { 2137 bool new = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]); 2138 2139 if (rx_sc->active != new) 2140 secy->n_rx_sc += new ? 1 : -1; 2141 2142 rx_sc->active = new; 2143 } 2144 2145 rtnl_unlock(); 2146 2147 return 0; 2148 } 2149 2150 static int copy_tx_sa_stats(struct sk_buff *skb, 2151 struct macsec_tx_sa_stats __percpu *pstats) 2152 { 2153 struct macsec_tx_sa_stats sum = {0, }; 2154 int cpu; 2155 2156 for_each_possible_cpu(cpu) { 2157 const struct macsec_tx_sa_stats *stats = per_cpu_ptr(pstats, cpu); 2158 2159 sum.OutPktsProtected += stats->OutPktsProtected; 2160 sum.OutPktsEncrypted += stats->OutPktsEncrypted; 2161 } 2162 2163 if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED, sum.OutPktsProtected) || 2164 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED, sum.OutPktsEncrypted)) 2165 return -EMSGSIZE; 2166 2167 return 0; 2168 } 2169 2170 static int copy_rx_sa_stats(struct sk_buff *skb, 2171 struct macsec_rx_sa_stats __percpu *pstats) 2172 { 2173 struct macsec_rx_sa_stats sum = {0, }; 2174 int cpu; 2175 2176 for_each_possible_cpu(cpu) { 2177 const struct macsec_rx_sa_stats *stats = per_cpu_ptr(pstats, cpu); 2178 2179 sum.InPktsOK += stats->InPktsOK; 2180 sum.InPktsInvalid += stats->InPktsInvalid; 2181 sum.InPktsNotValid += stats->InPktsNotValid; 2182 sum.InPktsNotUsingSA += stats->InPktsNotUsingSA; 2183 sum.InPktsUnusedSA += stats->InPktsUnusedSA; 2184 } 2185 2186 if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_OK, sum.InPktsOK) || 2187 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID, sum.InPktsInvalid) || 2188 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID, sum.InPktsNotValid) || 2189 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA, sum.InPktsNotUsingSA) || 2190 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA, sum.InPktsUnusedSA)) 2191 return -EMSGSIZE; 2192 2193 return 0; 2194 } 2195 2196 static int copy_rx_sc_stats(struct sk_buff *skb, 2197 struct pcpu_rx_sc_stats __percpu *pstats) 2198 { 2199 struct macsec_rx_sc_stats sum = {0, }; 2200 int cpu; 2201 2202 for_each_possible_cpu(cpu) { 2203 const struct pcpu_rx_sc_stats *stats; 2204 struct macsec_rx_sc_stats tmp; 2205 unsigned int start; 2206 2207 stats = per_cpu_ptr(pstats, cpu); 2208 do { 2209 start = u64_stats_fetch_begin_irq(&stats->syncp); 2210 memcpy(&tmp, &stats->stats, sizeof(tmp)); 2211 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 2212 2213 sum.InOctetsValidated += tmp.InOctetsValidated; 2214 sum.InOctetsDecrypted += tmp.InOctetsDecrypted; 2215 sum.InPktsUnchecked += tmp.InPktsUnchecked; 2216 sum.InPktsDelayed += tmp.InPktsDelayed; 2217 sum.InPktsOK += tmp.InPktsOK; 2218 sum.InPktsInvalid += tmp.InPktsInvalid; 2219 sum.InPktsLate += tmp.InPktsLate; 2220 sum.InPktsNotValid += tmp.InPktsNotValid; 2221 sum.InPktsNotUsingSA += tmp.InPktsNotUsingSA; 2222 sum.InPktsUnusedSA += tmp.InPktsUnusedSA; 2223 } 2224 2225 if (nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED, 2226 sum.InOctetsValidated, 2227 MACSEC_RXSC_STATS_ATTR_PAD) || 2228 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED, 2229 sum.InOctetsDecrypted, 2230 MACSEC_RXSC_STATS_ATTR_PAD) || 2231 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED, 2232 sum.InPktsUnchecked, 2233 MACSEC_RXSC_STATS_ATTR_PAD) || 2234 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED, 2235 sum.InPktsDelayed, 2236 MACSEC_RXSC_STATS_ATTR_PAD) || 2237 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK, 2238 sum.InPktsOK, 2239 MACSEC_RXSC_STATS_ATTR_PAD) || 2240 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID, 2241 sum.InPktsInvalid, 2242 MACSEC_RXSC_STATS_ATTR_PAD) || 2243 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE, 2244 sum.InPktsLate, 2245 MACSEC_RXSC_STATS_ATTR_PAD) || 2246 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID, 2247 sum.InPktsNotValid, 2248 MACSEC_RXSC_STATS_ATTR_PAD) || 2249 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA, 2250 sum.InPktsNotUsingSA, 2251 MACSEC_RXSC_STATS_ATTR_PAD) || 2252 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA, 2253 sum.InPktsUnusedSA, 2254 MACSEC_RXSC_STATS_ATTR_PAD)) 2255 return -EMSGSIZE; 2256 2257 return 0; 2258 } 2259 2260 static int copy_tx_sc_stats(struct sk_buff *skb, 2261 struct pcpu_tx_sc_stats __percpu *pstats) 2262 { 2263 struct macsec_tx_sc_stats sum = {0, }; 2264 int cpu; 2265 2266 for_each_possible_cpu(cpu) { 2267 const struct pcpu_tx_sc_stats *stats; 2268 struct macsec_tx_sc_stats tmp; 2269 unsigned int start; 2270 2271 stats = per_cpu_ptr(pstats, cpu); 2272 do { 2273 start = u64_stats_fetch_begin_irq(&stats->syncp); 2274 memcpy(&tmp, &stats->stats, sizeof(tmp)); 2275 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 2276 2277 sum.OutPktsProtected += tmp.OutPktsProtected; 2278 sum.OutPktsEncrypted += tmp.OutPktsEncrypted; 2279 sum.OutOctetsProtected += tmp.OutOctetsProtected; 2280 sum.OutOctetsEncrypted += tmp.OutOctetsEncrypted; 2281 } 2282 2283 if (nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED, 2284 sum.OutPktsProtected, 2285 MACSEC_TXSC_STATS_ATTR_PAD) || 2286 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED, 2287 sum.OutPktsEncrypted, 2288 MACSEC_TXSC_STATS_ATTR_PAD) || 2289 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED, 2290 sum.OutOctetsProtected, 2291 MACSEC_TXSC_STATS_ATTR_PAD) || 2292 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED, 2293 sum.OutOctetsEncrypted, 2294 MACSEC_TXSC_STATS_ATTR_PAD)) 2295 return -EMSGSIZE; 2296 2297 return 0; 2298 } 2299 2300 static int copy_secy_stats(struct sk_buff *skb, 2301 struct pcpu_secy_stats __percpu *pstats) 2302 { 2303 struct macsec_dev_stats sum = {0, }; 2304 int cpu; 2305 2306 for_each_possible_cpu(cpu) { 2307 const struct pcpu_secy_stats *stats; 2308 struct macsec_dev_stats tmp; 2309 unsigned int start; 2310 2311 stats = per_cpu_ptr(pstats, cpu); 2312 do { 2313 start = u64_stats_fetch_begin_irq(&stats->syncp); 2314 memcpy(&tmp, &stats->stats, sizeof(tmp)); 2315 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 2316 2317 sum.OutPktsUntagged += tmp.OutPktsUntagged; 2318 sum.InPktsUntagged += tmp.InPktsUntagged; 2319 sum.OutPktsTooLong += tmp.OutPktsTooLong; 2320 sum.InPktsNoTag += tmp.InPktsNoTag; 2321 sum.InPktsBadTag += tmp.InPktsBadTag; 2322 sum.InPktsUnknownSCI += tmp.InPktsUnknownSCI; 2323 sum.InPktsNoSCI += tmp.InPktsNoSCI; 2324 sum.InPktsOverrun += tmp.InPktsOverrun; 2325 } 2326 2327 if (nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED, 2328 sum.OutPktsUntagged, 2329 MACSEC_SECY_STATS_ATTR_PAD) || 2330 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED, 2331 sum.InPktsUntagged, 2332 MACSEC_SECY_STATS_ATTR_PAD) || 2333 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG, 2334 sum.OutPktsTooLong, 2335 MACSEC_SECY_STATS_ATTR_PAD) || 2336 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG, 2337 sum.InPktsNoTag, 2338 MACSEC_SECY_STATS_ATTR_PAD) || 2339 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG, 2340 sum.InPktsBadTag, 2341 MACSEC_SECY_STATS_ATTR_PAD) || 2342 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI, 2343 sum.InPktsUnknownSCI, 2344 MACSEC_SECY_STATS_ATTR_PAD) || 2345 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI, 2346 sum.InPktsNoSCI, 2347 MACSEC_SECY_STATS_ATTR_PAD) || 2348 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN, 2349 sum.InPktsOverrun, 2350 MACSEC_SECY_STATS_ATTR_PAD)) 2351 return -EMSGSIZE; 2352 2353 return 0; 2354 } 2355 2356 static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb) 2357 { 2358 struct macsec_tx_sc *tx_sc = &secy->tx_sc; 2359 struct nlattr *secy_nest = nla_nest_start(skb, MACSEC_ATTR_SECY); 2360 2361 if (!secy_nest) 2362 return 1; 2363 2364 if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci, 2365 MACSEC_SECY_ATTR_PAD) || 2366 nla_put_u64_64bit(skb, MACSEC_SECY_ATTR_CIPHER_SUITE, 2367 MACSEC_DEFAULT_CIPHER_ID, 2368 MACSEC_SECY_ATTR_PAD) || 2369 nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) || 2370 nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) || 2371 nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) || 2372 nla_put_u8(skb, MACSEC_SECY_ATTR_REPLAY, secy->replay_protect) || 2373 nla_put_u8(skb, MACSEC_SECY_ATTR_VALIDATE, secy->validate_frames) || 2374 nla_put_u8(skb, MACSEC_SECY_ATTR_ENCRYPT, tx_sc->encrypt) || 2375 nla_put_u8(skb, MACSEC_SECY_ATTR_INC_SCI, tx_sc->send_sci) || 2376 nla_put_u8(skb, MACSEC_SECY_ATTR_ES, tx_sc->end_station) || 2377 nla_put_u8(skb, MACSEC_SECY_ATTR_SCB, tx_sc->scb) || 2378 nla_put_u8(skb, MACSEC_SECY_ATTR_ENCODING_SA, tx_sc->encoding_sa)) 2379 goto cancel; 2380 2381 if (secy->replay_protect) { 2382 if (nla_put_u32(skb, MACSEC_SECY_ATTR_WINDOW, secy->replay_window)) 2383 goto cancel; 2384 } 2385 2386 nla_nest_end(skb, secy_nest); 2387 return 0; 2388 2389 cancel: 2390 nla_nest_cancel(skb, secy_nest); 2391 return 1; 2392 } 2393 2394 static int dump_secy(struct macsec_secy *secy, struct net_device *dev, 2395 struct sk_buff *skb, struct netlink_callback *cb) 2396 { 2397 struct macsec_rx_sc *rx_sc; 2398 struct macsec_tx_sc *tx_sc = &secy->tx_sc; 2399 struct nlattr *txsa_list, *rxsc_list; 2400 int i, j; 2401 void *hdr; 2402 struct nlattr *attr; 2403 2404 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 2405 &macsec_fam, NLM_F_MULTI, MACSEC_CMD_GET_TXSC); 2406 if (!hdr) 2407 return -EMSGSIZE; 2408 2409 genl_dump_check_consistent(cb, hdr, &macsec_fam); 2410 2411 if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex)) 2412 goto nla_put_failure; 2413 2414 if (nla_put_secy(secy, skb)) 2415 goto nla_put_failure; 2416 2417 attr = nla_nest_start(skb, MACSEC_ATTR_TXSC_STATS); 2418 if (!attr) 2419 goto nla_put_failure; 2420 if (copy_tx_sc_stats(skb, tx_sc->stats)) { 2421 nla_nest_cancel(skb, attr); 2422 goto nla_put_failure; 2423 } 2424 nla_nest_end(skb, attr); 2425 2426 attr = nla_nest_start(skb, MACSEC_ATTR_SECY_STATS); 2427 if (!attr) 2428 goto nla_put_failure; 2429 if (copy_secy_stats(skb, macsec_priv(dev)->stats)) { 2430 nla_nest_cancel(skb, attr); 2431 goto nla_put_failure; 2432 } 2433 nla_nest_end(skb, attr); 2434 2435 txsa_list = nla_nest_start(skb, MACSEC_ATTR_TXSA_LIST); 2436 if (!txsa_list) 2437 goto nla_put_failure; 2438 for (i = 0, j = 1; i < MACSEC_NUM_AN; i++) { 2439 struct macsec_tx_sa *tx_sa = rtnl_dereference(tx_sc->sa[i]); 2440 struct nlattr *txsa_nest; 2441 2442 if (!tx_sa) 2443 continue; 2444 2445 txsa_nest = nla_nest_start(skb, j++); 2446 if (!txsa_nest) { 2447 nla_nest_cancel(skb, txsa_list); 2448 goto nla_put_failure; 2449 } 2450 2451 if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) || 2452 nla_put_u32(skb, MACSEC_SA_ATTR_PN, tx_sa->next_pn) || 2453 nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, tx_sa->key.id) || 2454 nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, tx_sa->active)) { 2455 nla_nest_cancel(skb, txsa_nest); 2456 nla_nest_cancel(skb, txsa_list); 2457 goto nla_put_failure; 2458 } 2459 2460 attr = nla_nest_start(skb, MACSEC_SA_ATTR_STATS); 2461 if (!attr) { 2462 nla_nest_cancel(skb, txsa_nest); 2463 nla_nest_cancel(skb, txsa_list); 2464 goto nla_put_failure; 2465 } 2466 if (copy_tx_sa_stats(skb, tx_sa->stats)) { 2467 nla_nest_cancel(skb, attr); 2468 nla_nest_cancel(skb, txsa_nest); 2469 nla_nest_cancel(skb, txsa_list); 2470 goto nla_put_failure; 2471 } 2472 nla_nest_end(skb, attr); 2473 2474 nla_nest_end(skb, txsa_nest); 2475 } 2476 nla_nest_end(skb, txsa_list); 2477 2478 rxsc_list = nla_nest_start(skb, MACSEC_ATTR_RXSC_LIST); 2479 if (!rxsc_list) 2480 goto nla_put_failure; 2481 2482 j = 1; 2483 for_each_rxsc_rtnl(secy, rx_sc) { 2484 int k; 2485 struct nlattr *rxsa_list; 2486 struct nlattr *rxsc_nest = nla_nest_start(skb, j++); 2487 2488 if (!rxsc_nest) { 2489 nla_nest_cancel(skb, rxsc_list); 2490 goto nla_put_failure; 2491 } 2492 2493 if (nla_put_u8(skb, MACSEC_RXSC_ATTR_ACTIVE, rx_sc->active) || 2494 nla_put_sci(skb, MACSEC_RXSC_ATTR_SCI, rx_sc->sci, 2495 MACSEC_RXSC_ATTR_PAD)) { 2496 nla_nest_cancel(skb, rxsc_nest); 2497 nla_nest_cancel(skb, rxsc_list); 2498 goto nla_put_failure; 2499 } 2500 2501 attr = nla_nest_start(skb, MACSEC_RXSC_ATTR_STATS); 2502 if (!attr) { 2503 nla_nest_cancel(skb, rxsc_nest); 2504 nla_nest_cancel(skb, rxsc_list); 2505 goto nla_put_failure; 2506 } 2507 if (copy_rx_sc_stats(skb, rx_sc->stats)) { 2508 nla_nest_cancel(skb, attr); 2509 nla_nest_cancel(skb, rxsc_nest); 2510 nla_nest_cancel(skb, rxsc_list); 2511 goto nla_put_failure; 2512 } 2513 nla_nest_end(skb, attr); 2514 2515 rxsa_list = nla_nest_start(skb, MACSEC_RXSC_ATTR_SA_LIST); 2516 if (!rxsa_list) { 2517 nla_nest_cancel(skb, rxsc_nest); 2518 nla_nest_cancel(skb, rxsc_list); 2519 goto nla_put_failure; 2520 } 2521 2522 for (i = 0, k = 1; i < MACSEC_NUM_AN; i++) { 2523 struct macsec_rx_sa *rx_sa = rtnl_dereference(rx_sc->sa[i]); 2524 struct nlattr *rxsa_nest; 2525 2526 if (!rx_sa) 2527 continue; 2528 2529 rxsa_nest = nla_nest_start(skb, k++); 2530 if (!rxsa_nest) { 2531 nla_nest_cancel(skb, rxsa_list); 2532 nla_nest_cancel(skb, rxsc_nest); 2533 nla_nest_cancel(skb, rxsc_list); 2534 goto nla_put_failure; 2535 } 2536 2537 attr = nla_nest_start(skb, MACSEC_SA_ATTR_STATS); 2538 if (!attr) { 2539 nla_nest_cancel(skb, rxsa_list); 2540 nla_nest_cancel(skb, rxsc_nest); 2541 nla_nest_cancel(skb, rxsc_list); 2542 goto nla_put_failure; 2543 } 2544 if (copy_rx_sa_stats(skb, rx_sa->stats)) { 2545 nla_nest_cancel(skb, attr); 2546 nla_nest_cancel(skb, rxsa_list); 2547 nla_nest_cancel(skb, rxsc_nest); 2548 nla_nest_cancel(skb, rxsc_list); 2549 goto nla_put_failure; 2550 } 2551 nla_nest_end(skb, attr); 2552 2553 if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) || 2554 nla_put_u32(skb, MACSEC_SA_ATTR_PN, rx_sa->next_pn) || 2555 nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, rx_sa->key.id) || 2556 nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, rx_sa->active)) { 2557 nla_nest_cancel(skb, rxsa_nest); 2558 nla_nest_cancel(skb, rxsc_nest); 2559 nla_nest_cancel(skb, rxsc_list); 2560 goto nla_put_failure; 2561 } 2562 nla_nest_end(skb, rxsa_nest); 2563 } 2564 2565 nla_nest_end(skb, rxsa_list); 2566 nla_nest_end(skb, rxsc_nest); 2567 } 2568 2569 nla_nest_end(skb, rxsc_list); 2570 2571 genlmsg_end(skb, hdr); 2572 2573 return 0; 2574 2575 nla_put_failure: 2576 genlmsg_cancel(skb, hdr); 2577 return -EMSGSIZE; 2578 } 2579 2580 static int macsec_generation = 1; /* protected by RTNL */ 2581 2582 static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb) 2583 { 2584 struct net *net = sock_net(skb->sk); 2585 struct net_device *dev; 2586 int dev_idx, d; 2587 2588 dev_idx = cb->args[0]; 2589 2590 d = 0; 2591 rtnl_lock(); 2592 2593 cb->seq = macsec_generation; 2594 2595 for_each_netdev(net, dev) { 2596 struct macsec_secy *secy; 2597 2598 if (d < dev_idx) 2599 goto next; 2600 2601 if (!netif_is_macsec(dev)) 2602 goto next; 2603 2604 secy = &macsec_priv(dev)->secy; 2605 if (dump_secy(secy, dev, skb, cb) < 0) 2606 goto done; 2607 next: 2608 d++; 2609 } 2610 2611 done: 2612 rtnl_unlock(); 2613 cb->args[0] = d; 2614 return skb->len; 2615 } 2616 2617 static const struct genl_ops macsec_genl_ops[] = { 2618 { 2619 .cmd = MACSEC_CMD_GET_TXSC, 2620 .dumpit = macsec_dump_txsc, 2621 .policy = macsec_genl_policy, 2622 }, 2623 { 2624 .cmd = MACSEC_CMD_ADD_RXSC, 2625 .doit = macsec_add_rxsc, 2626 .policy = macsec_genl_policy, 2627 .flags = GENL_ADMIN_PERM, 2628 }, 2629 { 2630 .cmd = MACSEC_CMD_DEL_RXSC, 2631 .doit = macsec_del_rxsc, 2632 .policy = macsec_genl_policy, 2633 .flags = GENL_ADMIN_PERM, 2634 }, 2635 { 2636 .cmd = MACSEC_CMD_UPD_RXSC, 2637 .doit = macsec_upd_rxsc, 2638 .policy = macsec_genl_policy, 2639 .flags = GENL_ADMIN_PERM, 2640 }, 2641 { 2642 .cmd = MACSEC_CMD_ADD_TXSA, 2643 .doit = macsec_add_txsa, 2644 .policy = macsec_genl_policy, 2645 .flags = GENL_ADMIN_PERM, 2646 }, 2647 { 2648 .cmd = MACSEC_CMD_DEL_TXSA, 2649 .doit = macsec_del_txsa, 2650 .policy = macsec_genl_policy, 2651 .flags = GENL_ADMIN_PERM, 2652 }, 2653 { 2654 .cmd = MACSEC_CMD_UPD_TXSA, 2655 .doit = macsec_upd_txsa, 2656 .policy = macsec_genl_policy, 2657 .flags = GENL_ADMIN_PERM, 2658 }, 2659 { 2660 .cmd = MACSEC_CMD_ADD_RXSA, 2661 .doit = macsec_add_rxsa, 2662 .policy = macsec_genl_policy, 2663 .flags = GENL_ADMIN_PERM, 2664 }, 2665 { 2666 .cmd = MACSEC_CMD_DEL_RXSA, 2667 .doit = macsec_del_rxsa, 2668 .policy = macsec_genl_policy, 2669 .flags = GENL_ADMIN_PERM, 2670 }, 2671 { 2672 .cmd = MACSEC_CMD_UPD_RXSA, 2673 .doit = macsec_upd_rxsa, 2674 .policy = macsec_genl_policy, 2675 .flags = GENL_ADMIN_PERM, 2676 }, 2677 }; 2678 2679 static struct genl_family macsec_fam __ro_after_init = { 2680 .name = MACSEC_GENL_NAME, 2681 .hdrsize = 0, 2682 .version = MACSEC_GENL_VERSION, 2683 .maxattr = MACSEC_ATTR_MAX, 2684 .netnsok = true, 2685 .module = THIS_MODULE, 2686 .ops = macsec_genl_ops, 2687 .n_ops = ARRAY_SIZE(macsec_genl_ops), 2688 }; 2689 2690 static netdev_tx_t macsec_start_xmit(struct sk_buff *skb, 2691 struct net_device *dev) 2692 { 2693 struct macsec_dev *macsec = netdev_priv(dev); 2694 struct macsec_secy *secy = &macsec->secy; 2695 struct pcpu_secy_stats *secy_stats; 2696 int ret, len; 2697 2698 /* 10.5 */ 2699 if (!secy->protect_frames) { 2700 secy_stats = this_cpu_ptr(macsec->stats); 2701 u64_stats_update_begin(&secy_stats->syncp); 2702 secy_stats->stats.OutPktsUntagged++; 2703 u64_stats_update_end(&secy_stats->syncp); 2704 skb->dev = macsec->real_dev; 2705 len = skb->len; 2706 ret = dev_queue_xmit(skb); 2707 count_tx(dev, ret, len); 2708 return ret; 2709 } 2710 2711 if (!secy->operational) { 2712 kfree_skb(skb); 2713 dev->stats.tx_dropped++; 2714 return NETDEV_TX_OK; 2715 } 2716 2717 skb = macsec_encrypt(skb, dev); 2718 if (IS_ERR(skb)) { 2719 if (PTR_ERR(skb) != -EINPROGRESS) 2720 dev->stats.tx_dropped++; 2721 return NETDEV_TX_OK; 2722 } 2723 2724 macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa); 2725 2726 macsec_encrypt_finish(skb, dev); 2727 len = skb->len; 2728 ret = dev_queue_xmit(skb); 2729 count_tx(dev, ret, len); 2730 return ret; 2731 } 2732 2733 #define MACSEC_FEATURES \ 2734 (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST) 2735 static struct lock_class_key macsec_netdev_addr_lock_key; 2736 2737 static int macsec_dev_init(struct net_device *dev) 2738 { 2739 struct macsec_dev *macsec = macsec_priv(dev); 2740 struct net_device *real_dev = macsec->real_dev; 2741 int err; 2742 2743 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 2744 if (!dev->tstats) 2745 return -ENOMEM; 2746 2747 err = gro_cells_init(&macsec->gro_cells, dev); 2748 if (err) { 2749 free_percpu(dev->tstats); 2750 return err; 2751 } 2752 2753 dev->features = real_dev->features & MACSEC_FEATURES; 2754 dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE; 2755 2756 dev->needed_headroom = real_dev->needed_headroom + 2757 MACSEC_NEEDED_HEADROOM; 2758 dev->needed_tailroom = real_dev->needed_tailroom + 2759 MACSEC_NEEDED_TAILROOM; 2760 2761 if (is_zero_ether_addr(dev->dev_addr)) 2762 eth_hw_addr_inherit(dev, real_dev); 2763 if (is_zero_ether_addr(dev->broadcast)) 2764 memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len); 2765 2766 return 0; 2767 } 2768 2769 static void macsec_dev_uninit(struct net_device *dev) 2770 { 2771 struct macsec_dev *macsec = macsec_priv(dev); 2772 2773 gro_cells_destroy(&macsec->gro_cells); 2774 free_percpu(dev->tstats); 2775 } 2776 2777 static netdev_features_t macsec_fix_features(struct net_device *dev, 2778 netdev_features_t features) 2779 { 2780 struct macsec_dev *macsec = macsec_priv(dev); 2781 struct net_device *real_dev = macsec->real_dev; 2782 2783 features &= (real_dev->features & MACSEC_FEATURES) | 2784 NETIF_F_GSO_SOFTWARE | NETIF_F_SOFT_FEATURES; 2785 features |= NETIF_F_LLTX; 2786 2787 return features; 2788 } 2789 2790 static int macsec_dev_open(struct net_device *dev) 2791 { 2792 struct macsec_dev *macsec = macsec_priv(dev); 2793 struct net_device *real_dev = macsec->real_dev; 2794 int err; 2795 2796 if (!(real_dev->flags & IFF_UP)) 2797 return -ENETDOWN; 2798 2799 err = dev_uc_add(real_dev, dev->dev_addr); 2800 if (err < 0) 2801 return err; 2802 2803 if (dev->flags & IFF_ALLMULTI) { 2804 err = dev_set_allmulti(real_dev, 1); 2805 if (err < 0) 2806 goto del_unicast; 2807 } 2808 2809 if (dev->flags & IFF_PROMISC) { 2810 err = dev_set_promiscuity(real_dev, 1); 2811 if (err < 0) 2812 goto clear_allmulti; 2813 } 2814 2815 if (netif_carrier_ok(real_dev)) 2816 netif_carrier_on(dev); 2817 2818 return 0; 2819 clear_allmulti: 2820 if (dev->flags & IFF_ALLMULTI) 2821 dev_set_allmulti(real_dev, -1); 2822 del_unicast: 2823 dev_uc_del(real_dev, dev->dev_addr); 2824 netif_carrier_off(dev); 2825 return err; 2826 } 2827 2828 static int macsec_dev_stop(struct net_device *dev) 2829 { 2830 struct macsec_dev *macsec = macsec_priv(dev); 2831 struct net_device *real_dev = macsec->real_dev; 2832 2833 netif_carrier_off(dev); 2834 2835 dev_mc_unsync(real_dev, dev); 2836 dev_uc_unsync(real_dev, dev); 2837 2838 if (dev->flags & IFF_ALLMULTI) 2839 dev_set_allmulti(real_dev, -1); 2840 2841 if (dev->flags & IFF_PROMISC) 2842 dev_set_promiscuity(real_dev, -1); 2843 2844 dev_uc_del(real_dev, dev->dev_addr); 2845 2846 return 0; 2847 } 2848 2849 static void macsec_dev_change_rx_flags(struct net_device *dev, int change) 2850 { 2851 struct net_device *real_dev = macsec_priv(dev)->real_dev; 2852 2853 if (!(dev->flags & IFF_UP)) 2854 return; 2855 2856 if (change & IFF_ALLMULTI) 2857 dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1); 2858 2859 if (change & IFF_PROMISC) 2860 dev_set_promiscuity(real_dev, 2861 dev->flags & IFF_PROMISC ? 1 : -1); 2862 } 2863 2864 static void macsec_dev_set_rx_mode(struct net_device *dev) 2865 { 2866 struct net_device *real_dev = macsec_priv(dev)->real_dev; 2867 2868 dev_mc_sync(real_dev, dev); 2869 dev_uc_sync(real_dev, dev); 2870 } 2871 2872 static int macsec_set_mac_address(struct net_device *dev, void *p) 2873 { 2874 struct macsec_dev *macsec = macsec_priv(dev); 2875 struct net_device *real_dev = macsec->real_dev; 2876 struct sockaddr *addr = p; 2877 int err; 2878 2879 if (!is_valid_ether_addr(addr->sa_data)) 2880 return -EADDRNOTAVAIL; 2881 2882 if (!(dev->flags & IFF_UP)) 2883 goto out; 2884 2885 err = dev_uc_add(real_dev, addr->sa_data); 2886 if (err < 0) 2887 return err; 2888 2889 dev_uc_del(real_dev, dev->dev_addr); 2890 2891 out: 2892 ether_addr_copy(dev->dev_addr, addr->sa_data); 2893 return 0; 2894 } 2895 2896 static int macsec_change_mtu(struct net_device *dev, int new_mtu) 2897 { 2898 struct macsec_dev *macsec = macsec_priv(dev); 2899 unsigned int extra = macsec->secy.icv_len + macsec_extra_len(true); 2900 2901 if (macsec->real_dev->mtu - extra < new_mtu) 2902 return -ERANGE; 2903 2904 dev->mtu = new_mtu; 2905 2906 return 0; 2907 } 2908 2909 static void macsec_get_stats64(struct net_device *dev, 2910 struct rtnl_link_stats64 *s) 2911 { 2912 int cpu; 2913 2914 if (!dev->tstats) 2915 return; 2916 2917 for_each_possible_cpu(cpu) { 2918 struct pcpu_sw_netstats *stats; 2919 struct pcpu_sw_netstats tmp; 2920 int start; 2921 2922 stats = per_cpu_ptr(dev->tstats, cpu); 2923 do { 2924 start = u64_stats_fetch_begin_irq(&stats->syncp); 2925 tmp.rx_packets = stats->rx_packets; 2926 tmp.rx_bytes = stats->rx_bytes; 2927 tmp.tx_packets = stats->tx_packets; 2928 tmp.tx_bytes = stats->tx_bytes; 2929 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 2930 2931 s->rx_packets += tmp.rx_packets; 2932 s->rx_bytes += tmp.rx_bytes; 2933 s->tx_packets += tmp.tx_packets; 2934 s->tx_bytes += tmp.tx_bytes; 2935 } 2936 2937 s->rx_dropped = dev->stats.rx_dropped; 2938 s->tx_dropped = dev->stats.tx_dropped; 2939 } 2940 2941 static int macsec_get_iflink(const struct net_device *dev) 2942 { 2943 return macsec_priv(dev)->real_dev->ifindex; 2944 } 2945 2946 2947 static int macsec_get_nest_level(struct net_device *dev) 2948 { 2949 return macsec_priv(dev)->nest_level; 2950 } 2951 2952 2953 static const struct net_device_ops macsec_netdev_ops = { 2954 .ndo_init = macsec_dev_init, 2955 .ndo_uninit = macsec_dev_uninit, 2956 .ndo_open = macsec_dev_open, 2957 .ndo_stop = macsec_dev_stop, 2958 .ndo_fix_features = macsec_fix_features, 2959 .ndo_change_mtu = macsec_change_mtu, 2960 .ndo_set_rx_mode = macsec_dev_set_rx_mode, 2961 .ndo_change_rx_flags = macsec_dev_change_rx_flags, 2962 .ndo_set_mac_address = macsec_set_mac_address, 2963 .ndo_start_xmit = macsec_start_xmit, 2964 .ndo_get_stats64 = macsec_get_stats64, 2965 .ndo_get_iflink = macsec_get_iflink, 2966 .ndo_get_lock_subclass = macsec_get_nest_level, 2967 }; 2968 2969 static const struct device_type macsec_type = { 2970 .name = "macsec", 2971 }; 2972 2973 static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = { 2974 [IFLA_MACSEC_SCI] = { .type = NLA_U64 }, 2975 [IFLA_MACSEC_ICV_LEN] = { .type = NLA_U8 }, 2976 [IFLA_MACSEC_CIPHER_SUITE] = { .type = NLA_U64 }, 2977 [IFLA_MACSEC_WINDOW] = { .type = NLA_U32 }, 2978 [IFLA_MACSEC_ENCODING_SA] = { .type = NLA_U8 }, 2979 [IFLA_MACSEC_ENCRYPT] = { .type = NLA_U8 }, 2980 [IFLA_MACSEC_PROTECT] = { .type = NLA_U8 }, 2981 [IFLA_MACSEC_INC_SCI] = { .type = NLA_U8 }, 2982 [IFLA_MACSEC_ES] = { .type = NLA_U8 }, 2983 [IFLA_MACSEC_SCB] = { .type = NLA_U8 }, 2984 [IFLA_MACSEC_REPLAY_PROTECT] = { .type = NLA_U8 }, 2985 [IFLA_MACSEC_VALIDATION] = { .type = NLA_U8 }, 2986 }; 2987 2988 static void macsec_free_netdev(struct net_device *dev) 2989 { 2990 struct macsec_dev *macsec = macsec_priv(dev); 2991 struct net_device *real_dev = macsec->real_dev; 2992 2993 free_percpu(macsec->stats); 2994 free_percpu(macsec->secy.tx_sc.stats); 2995 2996 dev_put(real_dev); 2997 free_netdev(dev); 2998 } 2999 3000 static void macsec_setup(struct net_device *dev) 3001 { 3002 ether_setup(dev); 3003 dev->min_mtu = 0; 3004 dev->max_mtu = ETH_MAX_MTU; 3005 dev->priv_flags |= IFF_NO_QUEUE; 3006 dev->netdev_ops = &macsec_netdev_ops; 3007 dev->destructor = macsec_free_netdev; 3008 SET_NETDEV_DEVTYPE(dev, &macsec_type); 3009 3010 eth_zero_addr(dev->broadcast); 3011 } 3012 3013 static void macsec_changelink_common(struct net_device *dev, 3014 struct nlattr *data[]) 3015 { 3016 struct macsec_secy *secy; 3017 struct macsec_tx_sc *tx_sc; 3018 3019 secy = &macsec_priv(dev)->secy; 3020 tx_sc = &secy->tx_sc; 3021 3022 if (data[IFLA_MACSEC_ENCODING_SA]) { 3023 struct macsec_tx_sa *tx_sa; 3024 3025 tx_sc->encoding_sa = nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]); 3026 tx_sa = rtnl_dereference(tx_sc->sa[tx_sc->encoding_sa]); 3027 3028 secy->operational = tx_sa && tx_sa->active; 3029 } 3030 3031 if (data[IFLA_MACSEC_WINDOW]) 3032 secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]); 3033 3034 if (data[IFLA_MACSEC_ENCRYPT]) 3035 tx_sc->encrypt = !!nla_get_u8(data[IFLA_MACSEC_ENCRYPT]); 3036 3037 if (data[IFLA_MACSEC_PROTECT]) 3038 secy->protect_frames = !!nla_get_u8(data[IFLA_MACSEC_PROTECT]); 3039 3040 if (data[IFLA_MACSEC_INC_SCI]) 3041 tx_sc->send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]); 3042 3043 if (data[IFLA_MACSEC_ES]) 3044 tx_sc->end_station = !!nla_get_u8(data[IFLA_MACSEC_ES]); 3045 3046 if (data[IFLA_MACSEC_SCB]) 3047 tx_sc->scb = !!nla_get_u8(data[IFLA_MACSEC_SCB]); 3048 3049 if (data[IFLA_MACSEC_REPLAY_PROTECT]) 3050 secy->replay_protect = !!nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT]); 3051 3052 if (data[IFLA_MACSEC_VALIDATION]) 3053 secy->validate_frames = nla_get_u8(data[IFLA_MACSEC_VALIDATION]); 3054 } 3055 3056 static int macsec_changelink(struct net_device *dev, struct nlattr *tb[], 3057 struct nlattr *data[]) 3058 { 3059 if (!data) 3060 return 0; 3061 3062 if (data[IFLA_MACSEC_CIPHER_SUITE] || 3063 data[IFLA_MACSEC_ICV_LEN] || 3064 data[IFLA_MACSEC_SCI] || 3065 data[IFLA_MACSEC_PORT]) 3066 return -EINVAL; 3067 3068 macsec_changelink_common(dev, data); 3069 3070 return 0; 3071 } 3072 3073 static void macsec_del_dev(struct macsec_dev *macsec) 3074 { 3075 int i; 3076 3077 while (macsec->secy.rx_sc) { 3078 struct macsec_rx_sc *rx_sc = rtnl_dereference(macsec->secy.rx_sc); 3079 3080 rcu_assign_pointer(macsec->secy.rx_sc, rx_sc->next); 3081 free_rx_sc(rx_sc); 3082 } 3083 3084 for (i = 0; i < MACSEC_NUM_AN; i++) { 3085 struct macsec_tx_sa *sa = rtnl_dereference(macsec->secy.tx_sc.sa[i]); 3086 3087 if (sa) { 3088 RCU_INIT_POINTER(macsec->secy.tx_sc.sa[i], NULL); 3089 clear_tx_sa(sa); 3090 } 3091 } 3092 } 3093 3094 static void macsec_common_dellink(struct net_device *dev, struct list_head *head) 3095 { 3096 struct macsec_dev *macsec = macsec_priv(dev); 3097 struct net_device *real_dev = macsec->real_dev; 3098 3099 unregister_netdevice_queue(dev, head); 3100 list_del_rcu(&macsec->secys); 3101 macsec_del_dev(macsec); 3102 netdev_upper_dev_unlink(real_dev, dev); 3103 3104 macsec_generation++; 3105 } 3106 3107 static void macsec_dellink(struct net_device *dev, struct list_head *head) 3108 { 3109 struct macsec_dev *macsec = macsec_priv(dev); 3110 struct net_device *real_dev = macsec->real_dev; 3111 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); 3112 3113 macsec_common_dellink(dev, head); 3114 3115 if (list_empty(&rxd->secys)) { 3116 netdev_rx_handler_unregister(real_dev); 3117 kfree(rxd); 3118 } 3119 } 3120 3121 static int register_macsec_dev(struct net_device *real_dev, 3122 struct net_device *dev) 3123 { 3124 struct macsec_dev *macsec = macsec_priv(dev); 3125 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); 3126 3127 if (!rxd) { 3128 int err; 3129 3130 rxd = kmalloc(sizeof(*rxd), GFP_KERNEL); 3131 if (!rxd) 3132 return -ENOMEM; 3133 3134 INIT_LIST_HEAD(&rxd->secys); 3135 3136 err = netdev_rx_handler_register(real_dev, macsec_handle_frame, 3137 rxd); 3138 if (err < 0) { 3139 kfree(rxd); 3140 return err; 3141 } 3142 } 3143 3144 list_add_tail_rcu(&macsec->secys, &rxd->secys); 3145 return 0; 3146 } 3147 3148 static bool sci_exists(struct net_device *dev, sci_t sci) 3149 { 3150 struct macsec_rxh_data *rxd = macsec_data_rtnl(dev); 3151 struct macsec_dev *macsec; 3152 3153 list_for_each_entry(macsec, &rxd->secys, secys) { 3154 if (macsec->secy.sci == sci) 3155 return true; 3156 } 3157 3158 return false; 3159 } 3160 3161 static sci_t dev_to_sci(struct net_device *dev, __be16 port) 3162 { 3163 return make_sci(dev->dev_addr, port); 3164 } 3165 3166 static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len) 3167 { 3168 struct macsec_dev *macsec = macsec_priv(dev); 3169 struct macsec_secy *secy = &macsec->secy; 3170 3171 macsec->stats = netdev_alloc_pcpu_stats(struct pcpu_secy_stats); 3172 if (!macsec->stats) 3173 return -ENOMEM; 3174 3175 secy->tx_sc.stats = netdev_alloc_pcpu_stats(struct pcpu_tx_sc_stats); 3176 if (!secy->tx_sc.stats) { 3177 free_percpu(macsec->stats); 3178 return -ENOMEM; 3179 } 3180 3181 if (sci == MACSEC_UNDEF_SCI) 3182 sci = dev_to_sci(dev, MACSEC_PORT_ES); 3183 3184 secy->netdev = dev; 3185 secy->operational = true; 3186 secy->key_len = DEFAULT_SAK_LEN; 3187 secy->icv_len = icv_len; 3188 secy->validate_frames = MACSEC_VALIDATE_DEFAULT; 3189 secy->protect_frames = true; 3190 secy->replay_protect = false; 3191 3192 secy->sci = sci; 3193 secy->tx_sc.active = true; 3194 secy->tx_sc.encoding_sa = DEFAULT_ENCODING_SA; 3195 secy->tx_sc.encrypt = DEFAULT_ENCRYPT; 3196 secy->tx_sc.send_sci = DEFAULT_SEND_SCI; 3197 secy->tx_sc.end_station = false; 3198 secy->tx_sc.scb = false; 3199 3200 return 0; 3201 } 3202 3203 static int macsec_newlink(struct net *net, struct net_device *dev, 3204 struct nlattr *tb[], struct nlattr *data[]) 3205 { 3206 struct macsec_dev *macsec = macsec_priv(dev); 3207 struct net_device *real_dev; 3208 int err; 3209 sci_t sci; 3210 u8 icv_len = DEFAULT_ICV_LEN; 3211 rx_handler_func_t *rx_handler; 3212 3213 if (!tb[IFLA_LINK]) 3214 return -EINVAL; 3215 real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK])); 3216 if (!real_dev) 3217 return -ENODEV; 3218 3219 dev->priv_flags |= IFF_MACSEC; 3220 3221 macsec->real_dev = real_dev; 3222 3223 if (data && data[IFLA_MACSEC_ICV_LEN]) 3224 icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]); 3225 dev->mtu = real_dev->mtu - icv_len - macsec_extra_len(true); 3226 3227 rx_handler = rtnl_dereference(real_dev->rx_handler); 3228 if (rx_handler && rx_handler != macsec_handle_frame) 3229 return -EBUSY; 3230 3231 err = register_netdevice(dev); 3232 if (err < 0) 3233 return err; 3234 3235 dev_hold(real_dev); 3236 3237 macsec->nest_level = dev_get_nest_level(real_dev) + 1; 3238 netdev_lockdep_set_classes(dev); 3239 lockdep_set_class_and_subclass(&dev->addr_list_lock, 3240 &macsec_netdev_addr_lock_key, 3241 macsec_get_nest_level(dev)); 3242 3243 err = netdev_upper_dev_link(real_dev, dev); 3244 if (err < 0) 3245 goto unregister; 3246 3247 /* need to be already registered so that ->init has run and 3248 * the MAC addr is set 3249 */ 3250 if (data && data[IFLA_MACSEC_SCI]) 3251 sci = nla_get_sci(data[IFLA_MACSEC_SCI]); 3252 else if (data && data[IFLA_MACSEC_PORT]) 3253 sci = dev_to_sci(dev, nla_get_be16(data[IFLA_MACSEC_PORT])); 3254 else 3255 sci = dev_to_sci(dev, MACSEC_PORT_ES); 3256 3257 if (rx_handler && sci_exists(real_dev, sci)) { 3258 err = -EBUSY; 3259 goto unlink; 3260 } 3261 3262 err = macsec_add_dev(dev, sci, icv_len); 3263 if (err) 3264 goto unlink; 3265 3266 if (data) 3267 macsec_changelink_common(dev, data); 3268 3269 err = register_macsec_dev(real_dev, dev); 3270 if (err < 0) 3271 goto del_dev; 3272 3273 macsec_generation++; 3274 3275 return 0; 3276 3277 del_dev: 3278 macsec_del_dev(macsec); 3279 unlink: 3280 netdev_upper_dev_unlink(real_dev, dev); 3281 unregister: 3282 unregister_netdevice(dev); 3283 return err; 3284 } 3285 3286 static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[]) 3287 { 3288 u64 csid = MACSEC_DEFAULT_CIPHER_ID; 3289 u8 icv_len = DEFAULT_ICV_LEN; 3290 int flag; 3291 bool es, scb, sci; 3292 3293 if (!data) 3294 return 0; 3295 3296 if (data[IFLA_MACSEC_CIPHER_SUITE]) 3297 csid = nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE]); 3298 3299 if (data[IFLA_MACSEC_ICV_LEN]) { 3300 icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]); 3301 if (icv_len != DEFAULT_ICV_LEN) { 3302 char dummy_key[DEFAULT_SAK_LEN] = { 0 }; 3303 struct crypto_aead *dummy_tfm; 3304 3305 dummy_tfm = macsec_alloc_tfm(dummy_key, 3306 DEFAULT_SAK_LEN, 3307 icv_len); 3308 if (IS_ERR(dummy_tfm)) 3309 return PTR_ERR(dummy_tfm); 3310 crypto_free_aead(dummy_tfm); 3311 } 3312 } 3313 3314 switch (csid) { 3315 case MACSEC_DEFAULT_CIPHER_ID: 3316 case MACSEC_DEFAULT_CIPHER_ALT: 3317 if (icv_len < MACSEC_MIN_ICV_LEN || 3318 icv_len > MACSEC_STD_ICV_LEN) 3319 return -EINVAL; 3320 break; 3321 default: 3322 return -EINVAL; 3323 } 3324 3325 if (data[IFLA_MACSEC_ENCODING_SA]) { 3326 if (nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]) >= MACSEC_NUM_AN) 3327 return -EINVAL; 3328 } 3329 3330 for (flag = IFLA_MACSEC_ENCODING_SA + 1; 3331 flag < IFLA_MACSEC_VALIDATION; 3332 flag++) { 3333 if (data[flag]) { 3334 if (nla_get_u8(data[flag]) > 1) 3335 return -EINVAL; 3336 } 3337 } 3338 3339 es = data[IFLA_MACSEC_ES] ? nla_get_u8(data[IFLA_MACSEC_ES]) : false; 3340 sci = data[IFLA_MACSEC_INC_SCI] ? nla_get_u8(data[IFLA_MACSEC_INC_SCI]) : false; 3341 scb = data[IFLA_MACSEC_SCB] ? nla_get_u8(data[IFLA_MACSEC_SCB]) : false; 3342 3343 if ((sci && (scb || es)) || (scb && es)) 3344 return -EINVAL; 3345 3346 if (data[IFLA_MACSEC_VALIDATION] && 3347 nla_get_u8(data[IFLA_MACSEC_VALIDATION]) > MACSEC_VALIDATE_MAX) 3348 return -EINVAL; 3349 3350 if ((data[IFLA_MACSEC_REPLAY_PROTECT] && 3351 nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT])) && 3352 !data[IFLA_MACSEC_WINDOW]) 3353 return -EINVAL; 3354 3355 return 0; 3356 } 3357 3358 static struct net *macsec_get_link_net(const struct net_device *dev) 3359 { 3360 return dev_net(macsec_priv(dev)->real_dev); 3361 } 3362 3363 static size_t macsec_get_size(const struct net_device *dev) 3364 { 3365 return nla_total_size_64bit(8) + /* IFLA_MACSEC_SCI */ 3366 nla_total_size(1) + /* IFLA_MACSEC_ICV_LEN */ 3367 nla_total_size_64bit(8) + /* IFLA_MACSEC_CIPHER_SUITE */ 3368 nla_total_size(4) + /* IFLA_MACSEC_WINDOW */ 3369 nla_total_size(1) + /* IFLA_MACSEC_ENCODING_SA */ 3370 nla_total_size(1) + /* IFLA_MACSEC_ENCRYPT */ 3371 nla_total_size(1) + /* IFLA_MACSEC_PROTECT */ 3372 nla_total_size(1) + /* IFLA_MACSEC_INC_SCI */ 3373 nla_total_size(1) + /* IFLA_MACSEC_ES */ 3374 nla_total_size(1) + /* IFLA_MACSEC_SCB */ 3375 nla_total_size(1) + /* IFLA_MACSEC_REPLAY_PROTECT */ 3376 nla_total_size(1) + /* IFLA_MACSEC_VALIDATION */ 3377 0; 3378 } 3379 3380 static int macsec_fill_info(struct sk_buff *skb, 3381 const struct net_device *dev) 3382 { 3383 struct macsec_secy *secy = &macsec_priv(dev)->secy; 3384 struct macsec_tx_sc *tx_sc = &secy->tx_sc; 3385 3386 if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci, 3387 IFLA_MACSEC_PAD) || 3388 nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) || 3389 nla_put_u64_64bit(skb, IFLA_MACSEC_CIPHER_SUITE, 3390 MACSEC_DEFAULT_CIPHER_ID, IFLA_MACSEC_PAD) || 3391 nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) || 3392 nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) || 3393 nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) || 3394 nla_put_u8(skb, IFLA_MACSEC_INC_SCI, tx_sc->send_sci) || 3395 nla_put_u8(skb, IFLA_MACSEC_ES, tx_sc->end_station) || 3396 nla_put_u8(skb, IFLA_MACSEC_SCB, tx_sc->scb) || 3397 nla_put_u8(skb, IFLA_MACSEC_REPLAY_PROTECT, secy->replay_protect) || 3398 nla_put_u8(skb, IFLA_MACSEC_VALIDATION, secy->validate_frames) || 3399 0) 3400 goto nla_put_failure; 3401 3402 if (secy->replay_protect) { 3403 if (nla_put_u32(skb, IFLA_MACSEC_WINDOW, secy->replay_window)) 3404 goto nla_put_failure; 3405 } 3406 3407 return 0; 3408 3409 nla_put_failure: 3410 return -EMSGSIZE; 3411 } 3412 3413 static struct rtnl_link_ops macsec_link_ops __read_mostly = { 3414 .kind = "macsec", 3415 .priv_size = sizeof(struct macsec_dev), 3416 .maxtype = IFLA_MACSEC_MAX, 3417 .policy = macsec_rtnl_policy, 3418 .setup = macsec_setup, 3419 .validate = macsec_validate_attr, 3420 .newlink = macsec_newlink, 3421 .changelink = macsec_changelink, 3422 .dellink = macsec_dellink, 3423 .get_size = macsec_get_size, 3424 .fill_info = macsec_fill_info, 3425 .get_link_net = macsec_get_link_net, 3426 }; 3427 3428 static bool is_macsec_master(struct net_device *dev) 3429 { 3430 return rcu_access_pointer(dev->rx_handler) == macsec_handle_frame; 3431 } 3432 3433 static int macsec_notify(struct notifier_block *this, unsigned long event, 3434 void *ptr) 3435 { 3436 struct net_device *real_dev = netdev_notifier_info_to_dev(ptr); 3437 LIST_HEAD(head); 3438 3439 if (!is_macsec_master(real_dev)) 3440 return NOTIFY_DONE; 3441 3442 switch (event) { 3443 case NETDEV_UNREGISTER: { 3444 struct macsec_dev *m, *n; 3445 struct macsec_rxh_data *rxd; 3446 3447 rxd = macsec_data_rtnl(real_dev); 3448 list_for_each_entry_safe(m, n, &rxd->secys, secys) { 3449 macsec_common_dellink(m->secy.netdev, &head); 3450 } 3451 3452 netdev_rx_handler_unregister(real_dev); 3453 kfree(rxd); 3454 3455 unregister_netdevice_many(&head); 3456 break; 3457 } 3458 case NETDEV_CHANGEMTU: { 3459 struct macsec_dev *m; 3460 struct macsec_rxh_data *rxd; 3461 3462 rxd = macsec_data_rtnl(real_dev); 3463 list_for_each_entry(m, &rxd->secys, secys) { 3464 struct net_device *dev = m->secy.netdev; 3465 unsigned int mtu = real_dev->mtu - (m->secy.icv_len + 3466 macsec_extra_len(true)); 3467 3468 if (dev->mtu > mtu) 3469 dev_set_mtu(dev, mtu); 3470 } 3471 } 3472 } 3473 3474 return NOTIFY_OK; 3475 } 3476 3477 static struct notifier_block macsec_notifier = { 3478 .notifier_call = macsec_notify, 3479 }; 3480 3481 static int __init macsec_init(void) 3482 { 3483 int err; 3484 3485 pr_info("MACsec IEEE 802.1AE\n"); 3486 err = register_netdevice_notifier(&macsec_notifier); 3487 if (err) 3488 return err; 3489 3490 err = rtnl_link_register(&macsec_link_ops); 3491 if (err) 3492 goto notifier; 3493 3494 err = genl_register_family(&macsec_fam); 3495 if (err) 3496 goto rtnl; 3497 3498 return 0; 3499 3500 rtnl: 3501 rtnl_link_unregister(&macsec_link_ops); 3502 notifier: 3503 unregister_netdevice_notifier(&macsec_notifier); 3504 return err; 3505 } 3506 3507 static void __exit macsec_exit(void) 3508 { 3509 genl_unregister_family(&macsec_fam); 3510 rtnl_link_unregister(&macsec_link_ops); 3511 unregister_netdevice_notifier(&macsec_notifier); 3512 rcu_barrier(); 3513 } 3514 3515 module_init(macsec_init); 3516 module_exit(macsec_exit); 3517 3518 MODULE_ALIAS_RTNL_LINK("macsec"); 3519 3520 MODULE_DESCRIPTION("MACsec IEEE 802.1AE"); 3521 MODULE_LICENSE("GPL v2"); 3522