1 /* 2 * drivers/net/macsec.c - MACsec device 3 * 4 * Copyright (c) 2015 Sabrina Dubroca <sd@queasysnail.net> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 */ 11 12 #include <linux/types.h> 13 #include <linux/skbuff.h> 14 #include <linux/socket.h> 15 #include <linux/module.h> 16 #include <crypto/aead.h> 17 #include <linux/etherdevice.h> 18 #include <linux/rtnetlink.h> 19 #include <net/genetlink.h> 20 #include <net/sock.h> 21 22 #include <uapi/linux/if_macsec.h> 23 24 typedef u64 __bitwise sci_t; 25 26 #define MACSEC_SCI_LEN 8 27 28 /* SecTAG length = macsec_eth_header without the optional SCI */ 29 #define MACSEC_TAG_LEN 6 30 31 struct macsec_eth_header { 32 struct ethhdr eth; 33 /* SecTAG */ 34 u8 tci_an; 35 #if defined(__LITTLE_ENDIAN_BITFIELD) 36 u8 short_length:6, 37 unused:2; 38 #elif defined(__BIG_ENDIAN_BITFIELD) 39 u8 unused:2, 40 short_length:6; 41 #else 42 #error "Please fix <asm/byteorder.h>" 43 #endif 44 __be32 packet_number; 45 u8 secure_channel_id[8]; /* optional */ 46 } __packed; 47 48 #define MACSEC_TCI_VERSION 0x80 49 #define MACSEC_TCI_ES 0x40 /* end station */ 50 #define MACSEC_TCI_SC 0x20 /* SCI present */ 51 #define MACSEC_TCI_SCB 0x10 /* epon */ 52 #define MACSEC_TCI_E 0x08 /* encryption */ 53 #define MACSEC_TCI_C 0x04 /* changed text */ 54 #define MACSEC_AN_MASK 0x03 /* association number */ 55 #define MACSEC_TCI_CONFID (MACSEC_TCI_E | MACSEC_TCI_C) 56 57 /* minimum secure data length deemed "not short", see IEEE 802.1AE-2006 9.7 */ 58 #define MIN_NON_SHORT_LEN 48 59 60 #define GCM_AES_IV_LEN 12 61 #define DEFAULT_ICV_LEN 16 62 63 #define MACSEC_NUM_AN 4 /* 2 bits for the association number */ 64 65 #define for_each_rxsc(secy, sc) \ 66 for (sc = rcu_dereference_bh(secy->rx_sc); \ 67 sc; \ 68 sc = rcu_dereference_bh(sc->next)) 69 #define for_each_rxsc_rtnl(secy, sc) \ 70 for (sc = rtnl_dereference(secy->rx_sc); \ 71 sc; \ 72 sc = rtnl_dereference(sc->next)) 73 74 struct gcm_iv { 75 union { 76 u8 secure_channel_id[8]; 77 sci_t sci; 78 }; 79 __be32 pn; 80 }; 81 82 /** 83 * struct macsec_key - SA key 84 * @id: user-provided key identifier 85 * @tfm: crypto struct, key storage 86 */ 87 struct macsec_key { 88 u8 id[MACSEC_KEYID_LEN]; 89 struct crypto_aead *tfm; 90 }; 91 92 struct macsec_rx_sc_stats { 93 __u64 InOctetsValidated; 94 __u64 InOctetsDecrypted; 95 __u64 InPktsUnchecked; 96 __u64 InPktsDelayed; 97 __u64 InPktsOK; 98 __u64 InPktsInvalid; 99 __u64 InPktsLate; 100 __u64 InPktsNotValid; 101 __u64 InPktsNotUsingSA; 102 __u64 InPktsUnusedSA; 103 }; 104 105 struct macsec_rx_sa_stats { 106 __u32 InPktsOK; 107 __u32 InPktsInvalid; 108 __u32 InPktsNotValid; 109 __u32 InPktsNotUsingSA; 110 __u32 InPktsUnusedSA; 111 }; 112 113 struct macsec_tx_sa_stats { 114 __u32 OutPktsProtected; 115 __u32 OutPktsEncrypted; 116 }; 117 118 struct macsec_tx_sc_stats { 119 __u64 OutPktsProtected; 120 __u64 OutPktsEncrypted; 121 __u64 OutOctetsProtected; 122 __u64 OutOctetsEncrypted; 123 }; 124 125 struct macsec_dev_stats { 126 __u64 OutPktsUntagged; 127 __u64 InPktsUntagged; 128 __u64 OutPktsTooLong; 129 __u64 InPktsNoTag; 130 __u64 InPktsBadTag; 131 __u64 InPktsUnknownSCI; 132 __u64 InPktsNoSCI; 133 __u64 InPktsOverrun; 134 }; 135 136 /** 137 * struct macsec_rx_sa - receive secure association 138 * @active: 139 * @next_pn: packet number expected for the next packet 140 * @lock: protects next_pn manipulations 141 * @key: key structure 142 * @stats: per-SA stats 143 */ 144 struct macsec_rx_sa { 145 struct macsec_key key; 146 spinlock_t lock; 147 u32 next_pn; 148 atomic_t refcnt; 149 bool active; 150 struct macsec_rx_sa_stats __percpu *stats; 151 struct macsec_rx_sc *sc; 152 struct rcu_head rcu; 153 }; 154 155 struct pcpu_rx_sc_stats { 156 struct macsec_rx_sc_stats stats; 157 struct u64_stats_sync syncp; 158 }; 159 160 /** 161 * struct macsec_rx_sc - receive secure channel 162 * @sci: secure channel identifier for this SC 163 * @active: channel is active 164 * @sa: array of secure associations 165 * @stats: per-SC stats 166 */ 167 struct macsec_rx_sc { 168 struct macsec_rx_sc __rcu *next; 169 sci_t sci; 170 bool active; 171 struct macsec_rx_sa __rcu *sa[MACSEC_NUM_AN]; 172 struct pcpu_rx_sc_stats __percpu *stats; 173 atomic_t refcnt; 174 struct rcu_head rcu_head; 175 }; 176 177 /** 178 * struct macsec_tx_sa - transmit secure association 179 * @active: 180 * @next_pn: packet number to use for the next packet 181 * @lock: protects next_pn manipulations 182 * @key: key structure 183 * @stats: per-SA stats 184 */ 185 struct macsec_tx_sa { 186 struct macsec_key key; 187 spinlock_t lock; 188 u32 next_pn; 189 atomic_t refcnt; 190 bool active; 191 struct macsec_tx_sa_stats __percpu *stats; 192 struct rcu_head rcu; 193 }; 194 195 struct pcpu_tx_sc_stats { 196 struct macsec_tx_sc_stats stats; 197 struct u64_stats_sync syncp; 198 }; 199 200 /** 201 * struct macsec_tx_sc - transmit secure channel 202 * @active: 203 * @encoding_sa: association number of the SA currently in use 204 * @encrypt: encrypt packets on transmit, or authenticate only 205 * @send_sci: always include the SCI in the SecTAG 206 * @end_station: 207 * @scb: single copy broadcast flag 208 * @sa: array of secure associations 209 * @stats: stats for this TXSC 210 */ 211 struct macsec_tx_sc { 212 bool active; 213 u8 encoding_sa; 214 bool encrypt; 215 bool send_sci; 216 bool end_station; 217 bool scb; 218 struct macsec_tx_sa __rcu *sa[MACSEC_NUM_AN]; 219 struct pcpu_tx_sc_stats __percpu *stats; 220 }; 221 222 #define MACSEC_VALIDATE_DEFAULT MACSEC_VALIDATE_STRICT 223 224 /** 225 * struct macsec_secy - MACsec Security Entity 226 * @netdev: netdevice for this SecY 227 * @n_rx_sc: number of receive secure channels configured on this SecY 228 * @sci: secure channel identifier used for tx 229 * @key_len: length of keys used by the cipher suite 230 * @icv_len: length of ICV used by the cipher suite 231 * @validate_frames: validation mode 232 * @operational: MAC_Operational flag 233 * @protect_frames: enable protection for this SecY 234 * @replay_protect: enable packet number checks on receive 235 * @replay_window: size of the replay window 236 * @tx_sc: transmit secure channel 237 * @rx_sc: linked list of receive secure channels 238 */ 239 struct macsec_secy { 240 struct net_device *netdev; 241 unsigned int n_rx_sc; 242 sci_t sci; 243 u16 key_len; 244 u16 icv_len; 245 enum macsec_validation_type validate_frames; 246 bool operational; 247 bool protect_frames; 248 bool replay_protect; 249 u32 replay_window; 250 struct macsec_tx_sc tx_sc; 251 struct macsec_rx_sc __rcu *rx_sc; 252 }; 253 254 struct pcpu_secy_stats { 255 struct macsec_dev_stats stats; 256 struct u64_stats_sync syncp; 257 }; 258 259 /** 260 * struct macsec_dev - private data 261 * @secy: SecY config 262 * @real_dev: pointer to underlying netdevice 263 * @stats: MACsec device stats 264 * @secys: linked list of SecY's on the underlying device 265 */ 266 struct macsec_dev { 267 struct macsec_secy secy; 268 struct net_device *real_dev; 269 struct pcpu_secy_stats __percpu *stats; 270 struct list_head secys; 271 }; 272 273 /** 274 * struct macsec_rxh_data - rx_handler private argument 275 * @secys: linked list of SecY's on this underlying device 276 */ 277 struct macsec_rxh_data { 278 struct list_head secys; 279 }; 280 281 static struct macsec_dev *macsec_priv(const struct net_device *dev) 282 { 283 return (struct macsec_dev *)netdev_priv(dev); 284 } 285 286 static struct macsec_rxh_data *macsec_data_rcu(const struct net_device *dev) 287 { 288 return rcu_dereference_bh(dev->rx_handler_data); 289 } 290 291 static struct macsec_rxh_data *macsec_data_rtnl(const struct net_device *dev) 292 { 293 return rtnl_dereference(dev->rx_handler_data); 294 } 295 296 struct macsec_cb { 297 struct aead_request *req; 298 union { 299 struct macsec_tx_sa *tx_sa; 300 struct macsec_rx_sa *rx_sa; 301 }; 302 u8 assoc_num; 303 bool valid; 304 bool has_sci; 305 }; 306 307 static struct macsec_rx_sa *macsec_rxsa_get(struct macsec_rx_sa __rcu *ptr) 308 { 309 struct macsec_rx_sa *sa = rcu_dereference_bh(ptr); 310 311 if (!sa || !sa->active) 312 return NULL; 313 314 if (!atomic_inc_not_zero(&sa->refcnt)) 315 return NULL; 316 317 return sa; 318 } 319 320 static void free_rx_sc_rcu(struct rcu_head *head) 321 { 322 struct macsec_rx_sc *rx_sc = container_of(head, struct macsec_rx_sc, rcu_head); 323 324 free_percpu(rx_sc->stats); 325 kfree(rx_sc); 326 } 327 328 static struct macsec_rx_sc *macsec_rxsc_get(struct macsec_rx_sc *sc) 329 { 330 return atomic_inc_not_zero(&sc->refcnt) ? sc : NULL; 331 } 332 333 static void macsec_rxsc_put(struct macsec_rx_sc *sc) 334 { 335 if (atomic_dec_and_test(&sc->refcnt)) 336 call_rcu(&sc->rcu_head, free_rx_sc_rcu); 337 } 338 339 static void free_rxsa(struct rcu_head *head) 340 { 341 struct macsec_rx_sa *sa = container_of(head, struct macsec_rx_sa, rcu); 342 343 crypto_free_aead(sa->key.tfm); 344 free_percpu(sa->stats); 345 macsec_rxsc_put(sa->sc); 346 kfree(sa); 347 } 348 349 static void macsec_rxsa_put(struct macsec_rx_sa *sa) 350 { 351 if (atomic_dec_and_test(&sa->refcnt)) 352 call_rcu(&sa->rcu, free_rxsa); 353 } 354 355 static struct macsec_tx_sa *macsec_txsa_get(struct macsec_tx_sa __rcu *ptr) 356 { 357 struct macsec_tx_sa *sa = rcu_dereference_bh(ptr); 358 359 if (!sa || !sa->active) 360 return NULL; 361 362 if (!atomic_inc_not_zero(&sa->refcnt)) 363 return NULL; 364 365 return sa; 366 } 367 368 static void free_txsa(struct rcu_head *head) 369 { 370 struct macsec_tx_sa *sa = container_of(head, struct macsec_tx_sa, rcu); 371 372 crypto_free_aead(sa->key.tfm); 373 free_percpu(sa->stats); 374 kfree(sa); 375 } 376 377 static void macsec_txsa_put(struct macsec_tx_sa *sa) 378 { 379 if (atomic_dec_and_test(&sa->refcnt)) 380 call_rcu(&sa->rcu, free_txsa); 381 } 382 383 static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb) 384 { 385 BUILD_BUG_ON(sizeof(struct macsec_cb) > sizeof(skb->cb)); 386 return (struct macsec_cb *)skb->cb; 387 } 388 389 #define MACSEC_PORT_ES (htons(0x0001)) 390 #define MACSEC_PORT_SCB (0x0000) 391 #define MACSEC_UNDEF_SCI ((__force sci_t)0xffffffffffffffffULL) 392 393 #define DEFAULT_SAK_LEN 16 394 #define DEFAULT_SEND_SCI true 395 #define DEFAULT_ENCRYPT false 396 #define DEFAULT_ENCODING_SA 0 397 398 static sci_t make_sci(u8 *addr, __be16 port) 399 { 400 sci_t sci; 401 402 memcpy(&sci, addr, ETH_ALEN); 403 memcpy(((char *)&sci) + ETH_ALEN, &port, sizeof(port)); 404 405 return sci; 406 } 407 408 static sci_t macsec_frame_sci(struct macsec_eth_header *hdr, bool sci_present) 409 { 410 sci_t sci; 411 412 if (sci_present) 413 memcpy(&sci, hdr->secure_channel_id, 414 sizeof(hdr->secure_channel_id)); 415 else 416 sci = make_sci(hdr->eth.h_source, MACSEC_PORT_ES); 417 418 return sci; 419 } 420 421 static unsigned int macsec_sectag_len(bool sci_present) 422 { 423 return MACSEC_TAG_LEN + (sci_present ? MACSEC_SCI_LEN : 0); 424 } 425 426 static unsigned int macsec_hdr_len(bool sci_present) 427 { 428 return macsec_sectag_len(sci_present) + ETH_HLEN; 429 } 430 431 static unsigned int macsec_extra_len(bool sci_present) 432 { 433 return macsec_sectag_len(sci_present) + sizeof(__be16); 434 } 435 436 /* Fill SecTAG according to IEEE 802.1AE-2006 10.5.3 */ 437 static void macsec_fill_sectag(struct macsec_eth_header *h, 438 const struct macsec_secy *secy, u32 pn) 439 { 440 const struct macsec_tx_sc *tx_sc = &secy->tx_sc; 441 442 memset(&h->tci_an, 0, macsec_sectag_len(tx_sc->send_sci)); 443 h->eth.h_proto = htons(ETH_P_MACSEC); 444 445 if (tx_sc->send_sci || 446 (secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb)) { 447 h->tci_an |= MACSEC_TCI_SC; 448 memcpy(&h->secure_channel_id, &secy->sci, 449 sizeof(h->secure_channel_id)); 450 } else { 451 if (tx_sc->end_station) 452 h->tci_an |= MACSEC_TCI_ES; 453 if (tx_sc->scb) 454 h->tci_an |= MACSEC_TCI_SCB; 455 } 456 457 h->packet_number = htonl(pn); 458 459 /* with GCM, C/E clear for !encrypt, both set for encrypt */ 460 if (tx_sc->encrypt) 461 h->tci_an |= MACSEC_TCI_CONFID; 462 else if (secy->icv_len != DEFAULT_ICV_LEN) 463 h->tci_an |= MACSEC_TCI_C; 464 465 h->tci_an |= tx_sc->encoding_sa; 466 } 467 468 static void macsec_set_shortlen(struct macsec_eth_header *h, size_t data_len) 469 { 470 if (data_len < MIN_NON_SHORT_LEN) 471 h->short_length = data_len; 472 } 473 474 /* validate MACsec packet according to IEEE 802.1AE-2006 9.12 */ 475 static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len) 476 { 477 struct macsec_eth_header *h = (struct macsec_eth_header *)skb->data; 478 int len = skb->len - 2 * ETH_ALEN; 479 int extra_len = macsec_extra_len(!!(h->tci_an & MACSEC_TCI_SC)) + icv_len; 480 481 /* a) It comprises at least 17 octets */ 482 if (skb->len <= 16) 483 return false; 484 485 /* b) MACsec EtherType: already checked */ 486 487 /* c) V bit is clear */ 488 if (h->tci_an & MACSEC_TCI_VERSION) 489 return false; 490 491 /* d) ES or SCB => !SC */ 492 if ((h->tci_an & MACSEC_TCI_ES || h->tci_an & MACSEC_TCI_SCB) && 493 (h->tci_an & MACSEC_TCI_SC)) 494 return false; 495 496 /* e) Bits 7 and 8 of octet 4 of the SecTAG are clear */ 497 if (h->unused) 498 return false; 499 500 /* rx.pn != 0 (figure 10-5) */ 501 if (!h->packet_number) 502 return false; 503 504 /* length check, f) g) h) i) */ 505 if (h->short_length) 506 return len == extra_len + h->short_length; 507 return len >= extra_len + MIN_NON_SHORT_LEN; 508 } 509 510 #define MACSEC_NEEDED_HEADROOM (macsec_extra_len(true)) 511 #define MACSEC_NEEDED_TAILROOM MACSEC_MAX_ICV_LEN 512 513 static void macsec_fill_iv(unsigned char *iv, sci_t sci, u32 pn) 514 { 515 struct gcm_iv *gcm_iv = (struct gcm_iv *)iv; 516 517 gcm_iv->sci = sci; 518 gcm_iv->pn = htonl(pn); 519 } 520 521 static struct macsec_eth_header *macsec_ethhdr(struct sk_buff *skb) 522 { 523 return (struct macsec_eth_header *)skb_mac_header(skb); 524 } 525 526 static u32 tx_sa_update_pn(struct macsec_tx_sa *tx_sa, struct macsec_secy *secy) 527 { 528 u32 pn; 529 530 spin_lock_bh(&tx_sa->lock); 531 pn = tx_sa->next_pn; 532 533 tx_sa->next_pn++; 534 if (tx_sa->next_pn == 0) { 535 pr_debug("PN wrapped, transitioning to !oper\n"); 536 tx_sa->active = false; 537 if (secy->protect_frames) 538 secy->operational = false; 539 } 540 spin_unlock_bh(&tx_sa->lock); 541 542 return pn; 543 } 544 545 static void macsec_encrypt_finish(struct sk_buff *skb, struct net_device *dev) 546 { 547 struct macsec_dev *macsec = netdev_priv(dev); 548 549 skb->dev = macsec->real_dev; 550 skb_reset_mac_header(skb); 551 skb->protocol = eth_hdr(skb)->h_proto; 552 } 553 554 static void macsec_count_tx(struct sk_buff *skb, struct macsec_tx_sc *tx_sc, 555 struct macsec_tx_sa *tx_sa) 556 { 557 struct pcpu_tx_sc_stats *txsc_stats = this_cpu_ptr(tx_sc->stats); 558 559 u64_stats_update_begin(&txsc_stats->syncp); 560 if (tx_sc->encrypt) { 561 txsc_stats->stats.OutOctetsEncrypted += skb->len; 562 txsc_stats->stats.OutPktsEncrypted++; 563 this_cpu_inc(tx_sa->stats->OutPktsEncrypted); 564 } else { 565 txsc_stats->stats.OutOctetsProtected += skb->len; 566 txsc_stats->stats.OutPktsProtected++; 567 this_cpu_inc(tx_sa->stats->OutPktsProtected); 568 } 569 u64_stats_update_end(&txsc_stats->syncp); 570 } 571 572 static void count_tx(struct net_device *dev, int ret, int len) 573 { 574 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { 575 struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats); 576 577 u64_stats_update_begin(&stats->syncp); 578 stats->tx_packets++; 579 stats->tx_bytes += len; 580 u64_stats_update_end(&stats->syncp); 581 } else { 582 dev->stats.tx_dropped++; 583 } 584 } 585 586 static void macsec_encrypt_done(struct crypto_async_request *base, int err) 587 { 588 struct sk_buff *skb = base->data; 589 struct net_device *dev = skb->dev; 590 struct macsec_dev *macsec = macsec_priv(dev); 591 struct macsec_tx_sa *sa = macsec_skb_cb(skb)->tx_sa; 592 int len, ret; 593 594 aead_request_free(macsec_skb_cb(skb)->req); 595 596 rcu_read_lock_bh(); 597 macsec_encrypt_finish(skb, dev); 598 macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa); 599 len = skb->len; 600 ret = dev_queue_xmit(skb); 601 count_tx(dev, ret, len); 602 rcu_read_unlock_bh(); 603 604 macsec_txsa_put(sa); 605 dev_put(dev); 606 } 607 608 static struct sk_buff *macsec_encrypt(struct sk_buff *skb, 609 struct net_device *dev) 610 { 611 int ret; 612 struct scatterlist sg[MAX_SKB_FRAGS + 1]; 613 unsigned char iv[GCM_AES_IV_LEN]; 614 struct ethhdr *eth; 615 struct macsec_eth_header *hh; 616 size_t unprotected_len; 617 struct aead_request *req; 618 struct macsec_secy *secy; 619 struct macsec_tx_sc *tx_sc; 620 struct macsec_tx_sa *tx_sa; 621 struct macsec_dev *macsec = macsec_priv(dev); 622 u32 pn; 623 624 secy = &macsec->secy; 625 tx_sc = &secy->tx_sc; 626 627 /* 10.5.1 TX SA assignment */ 628 tx_sa = macsec_txsa_get(tx_sc->sa[tx_sc->encoding_sa]); 629 if (!tx_sa) { 630 secy->operational = false; 631 kfree_skb(skb); 632 return ERR_PTR(-EINVAL); 633 } 634 635 if (unlikely(skb_headroom(skb) < MACSEC_NEEDED_HEADROOM || 636 skb_tailroom(skb) < MACSEC_NEEDED_TAILROOM)) { 637 struct sk_buff *nskb = skb_copy_expand(skb, 638 MACSEC_NEEDED_HEADROOM, 639 MACSEC_NEEDED_TAILROOM, 640 GFP_ATOMIC); 641 if (likely(nskb)) { 642 consume_skb(skb); 643 skb = nskb; 644 } else { 645 macsec_txsa_put(tx_sa); 646 kfree_skb(skb); 647 return ERR_PTR(-ENOMEM); 648 } 649 } else { 650 skb = skb_unshare(skb, GFP_ATOMIC); 651 if (!skb) { 652 macsec_txsa_put(tx_sa); 653 return ERR_PTR(-ENOMEM); 654 } 655 } 656 657 unprotected_len = skb->len; 658 eth = eth_hdr(skb); 659 hh = (struct macsec_eth_header *)skb_push(skb, macsec_extra_len(tx_sc->send_sci)); 660 memmove(hh, eth, 2 * ETH_ALEN); 661 662 pn = tx_sa_update_pn(tx_sa, secy); 663 if (pn == 0) { 664 macsec_txsa_put(tx_sa); 665 kfree_skb(skb); 666 return ERR_PTR(-ENOLINK); 667 } 668 macsec_fill_sectag(hh, secy, pn); 669 macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN); 670 671 macsec_fill_iv(iv, secy->sci, pn); 672 673 skb_put(skb, secy->icv_len); 674 675 if (skb->len - ETH_HLEN > macsec_priv(dev)->real_dev->mtu) { 676 struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats); 677 678 u64_stats_update_begin(&secy_stats->syncp); 679 secy_stats->stats.OutPktsTooLong++; 680 u64_stats_update_end(&secy_stats->syncp); 681 682 macsec_txsa_put(tx_sa); 683 kfree_skb(skb); 684 return ERR_PTR(-EINVAL); 685 } 686 687 req = aead_request_alloc(tx_sa->key.tfm, GFP_ATOMIC); 688 if (!req) { 689 macsec_txsa_put(tx_sa); 690 kfree_skb(skb); 691 return ERR_PTR(-ENOMEM); 692 } 693 694 sg_init_table(sg, MAX_SKB_FRAGS + 1); 695 skb_to_sgvec(skb, sg, 0, skb->len); 696 697 if (tx_sc->encrypt) { 698 int len = skb->len - macsec_hdr_len(tx_sc->send_sci) - 699 secy->icv_len; 700 aead_request_set_crypt(req, sg, sg, len, iv); 701 aead_request_set_ad(req, macsec_hdr_len(tx_sc->send_sci)); 702 } else { 703 aead_request_set_crypt(req, sg, sg, 0, iv); 704 aead_request_set_ad(req, skb->len - secy->icv_len); 705 } 706 707 macsec_skb_cb(skb)->req = req; 708 macsec_skb_cb(skb)->tx_sa = tx_sa; 709 aead_request_set_callback(req, 0, macsec_encrypt_done, skb); 710 711 dev_hold(skb->dev); 712 ret = crypto_aead_encrypt(req); 713 if (ret == -EINPROGRESS) { 714 return ERR_PTR(ret); 715 } else if (ret != 0) { 716 dev_put(skb->dev); 717 kfree_skb(skb); 718 aead_request_free(req); 719 macsec_txsa_put(tx_sa); 720 return ERR_PTR(-EINVAL); 721 } 722 723 dev_put(skb->dev); 724 aead_request_free(req); 725 macsec_txsa_put(tx_sa); 726 727 return skb; 728 } 729 730 static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u32 pn) 731 { 732 struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa; 733 struct pcpu_rx_sc_stats *rxsc_stats = this_cpu_ptr(rx_sa->sc->stats); 734 struct macsec_eth_header *hdr = macsec_ethhdr(skb); 735 u32 lowest_pn = 0; 736 737 spin_lock(&rx_sa->lock); 738 if (rx_sa->next_pn >= secy->replay_window) 739 lowest_pn = rx_sa->next_pn - secy->replay_window; 740 741 /* Now perform replay protection check again 742 * (see IEEE 802.1AE-2006 figure 10-5) 743 */ 744 if (secy->replay_protect && pn < lowest_pn) { 745 spin_unlock(&rx_sa->lock); 746 u64_stats_update_begin(&rxsc_stats->syncp); 747 rxsc_stats->stats.InPktsLate++; 748 u64_stats_update_end(&rxsc_stats->syncp); 749 return false; 750 } 751 752 if (secy->validate_frames != MACSEC_VALIDATE_DISABLED) { 753 u64_stats_update_begin(&rxsc_stats->syncp); 754 if (hdr->tci_an & MACSEC_TCI_E) 755 rxsc_stats->stats.InOctetsDecrypted += skb->len; 756 else 757 rxsc_stats->stats.InOctetsValidated += skb->len; 758 u64_stats_update_end(&rxsc_stats->syncp); 759 } 760 761 if (!macsec_skb_cb(skb)->valid) { 762 spin_unlock(&rx_sa->lock); 763 764 /* 10.6.5 */ 765 if (hdr->tci_an & MACSEC_TCI_C || 766 secy->validate_frames == MACSEC_VALIDATE_STRICT) { 767 u64_stats_update_begin(&rxsc_stats->syncp); 768 rxsc_stats->stats.InPktsNotValid++; 769 u64_stats_update_end(&rxsc_stats->syncp); 770 return false; 771 } 772 773 u64_stats_update_begin(&rxsc_stats->syncp); 774 if (secy->validate_frames == MACSEC_VALIDATE_CHECK) { 775 rxsc_stats->stats.InPktsInvalid++; 776 this_cpu_inc(rx_sa->stats->InPktsInvalid); 777 } else if (pn < lowest_pn) { 778 rxsc_stats->stats.InPktsDelayed++; 779 } else { 780 rxsc_stats->stats.InPktsUnchecked++; 781 } 782 u64_stats_update_end(&rxsc_stats->syncp); 783 } else { 784 u64_stats_update_begin(&rxsc_stats->syncp); 785 if (pn < lowest_pn) { 786 rxsc_stats->stats.InPktsDelayed++; 787 } else { 788 rxsc_stats->stats.InPktsOK++; 789 this_cpu_inc(rx_sa->stats->InPktsOK); 790 } 791 u64_stats_update_end(&rxsc_stats->syncp); 792 793 if (pn >= rx_sa->next_pn) 794 rx_sa->next_pn = pn + 1; 795 spin_unlock(&rx_sa->lock); 796 } 797 798 return true; 799 } 800 801 static void macsec_reset_skb(struct sk_buff *skb, struct net_device *dev) 802 { 803 skb->pkt_type = PACKET_HOST; 804 skb->protocol = eth_type_trans(skb, dev); 805 806 skb_reset_network_header(skb); 807 if (!skb_transport_header_was_set(skb)) 808 skb_reset_transport_header(skb); 809 skb_reset_mac_len(skb); 810 } 811 812 static void macsec_finalize_skb(struct sk_buff *skb, u8 icv_len, u8 hdr_len) 813 { 814 memmove(skb->data + hdr_len, skb->data, 2 * ETH_ALEN); 815 skb_pull(skb, hdr_len); 816 pskb_trim_unique(skb, skb->len - icv_len); 817 } 818 819 static void count_rx(struct net_device *dev, int len) 820 { 821 struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats); 822 823 u64_stats_update_begin(&stats->syncp); 824 stats->rx_packets++; 825 stats->rx_bytes += len; 826 u64_stats_update_end(&stats->syncp); 827 } 828 829 static void macsec_decrypt_done(struct crypto_async_request *base, int err) 830 { 831 struct sk_buff *skb = base->data; 832 struct net_device *dev = skb->dev; 833 struct macsec_dev *macsec = macsec_priv(dev); 834 struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa; 835 int len, ret; 836 u32 pn; 837 838 aead_request_free(macsec_skb_cb(skb)->req); 839 840 rcu_read_lock_bh(); 841 pn = ntohl(macsec_ethhdr(skb)->packet_number); 842 if (!macsec_post_decrypt(skb, &macsec->secy, pn)) { 843 rcu_read_unlock_bh(); 844 kfree_skb(skb); 845 goto out; 846 } 847 848 macsec_finalize_skb(skb, macsec->secy.icv_len, 849 macsec_extra_len(macsec_skb_cb(skb)->has_sci)); 850 macsec_reset_skb(skb, macsec->secy.netdev); 851 852 len = skb->len; 853 ret = netif_rx(skb); 854 if (ret == NET_RX_SUCCESS) 855 count_rx(dev, len); 856 else 857 macsec->secy.netdev->stats.rx_dropped++; 858 859 rcu_read_unlock_bh(); 860 861 out: 862 macsec_rxsa_put(rx_sa); 863 dev_put(dev); 864 return; 865 } 866 867 static struct sk_buff *macsec_decrypt(struct sk_buff *skb, 868 struct net_device *dev, 869 struct macsec_rx_sa *rx_sa, 870 sci_t sci, 871 struct macsec_secy *secy) 872 { 873 int ret; 874 struct scatterlist sg[MAX_SKB_FRAGS + 1]; 875 unsigned char iv[GCM_AES_IV_LEN]; 876 struct aead_request *req; 877 struct macsec_eth_header *hdr; 878 u16 icv_len = secy->icv_len; 879 880 macsec_skb_cb(skb)->valid = false; 881 skb = skb_share_check(skb, GFP_ATOMIC); 882 if (!skb) 883 return ERR_PTR(-ENOMEM); 884 885 req = aead_request_alloc(rx_sa->key.tfm, GFP_ATOMIC); 886 if (!req) { 887 kfree_skb(skb); 888 return ERR_PTR(-ENOMEM); 889 } 890 891 hdr = (struct macsec_eth_header *)skb->data; 892 macsec_fill_iv(iv, sci, ntohl(hdr->packet_number)); 893 894 sg_init_table(sg, MAX_SKB_FRAGS + 1); 895 skb_to_sgvec(skb, sg, 0, skb->len); 896 897 if (hdr->tci_an & MACSEC_TCI_E) { 898 /* confidentiality: ethernet + macsec header 899 * authenticated, encrypted payload 900 */ 901 int len = skb->len - macsec_hdr_len(macsec_skb_cb(skb)->has_sci); 902 903 aead_request_set_crypt(req, sg, sg, len, iv); 904 aead_request_set_ad(req, macsec_hdr_len(macsec_skb_cb(skb)->has_sci)); 905 skb = skb_unshare(skb, GFP_ATOMIC); 906 if (!skb) { 907 aead_request_free(req); 908 return ERR_PTR(-ENOMEM); 909 } 910 } else { 911 /* integrity only: all headers + data authenticated */ 912 aead_request_set_crypt(req, sg, sg, icv_len, iv); 913 aead_request_set_ad(req, skb->len - icv_len); 914 } 915 916 macsec_skb_cb(skb)->req = req; 917 macsec_skb_cb(skb)->rx_sa = rx_sa; 918 skb->dev = dev; 919 aead_request_set_callback(req, 0, macsec_decrypt_done, skb); 920 921 dev_hold(dev); 922 ret = crypto_aead_decrypt(req); 923 if (ret == -EINPROGRESS) { 924 return ERR_PTR(ret); 925 } else if (ret != 0) { 926 /* decryption/authentication failed 927 * 10.6 if validateFrames is disabled, deliver anyway 928 */ 929 if (ret != -EBADMSG) { 930 kfree_skb(skb); 931 skb = ERR_PTR(ret); 932 } 933 } else { 934 macsec_skb_cb(skb)->valid = true; 935 } 936 dev_put(dev); 937 938 aead_request_free(req); 939 940 return skb; 941 } 942 943 static struct macsec_rx_sc *find_rx_sc(struct macsec_secy *secy, sci_t sci) 944 { 945 struct macsec_rx_sc *rx_sc; 946 947 for_each_rxsc(secy, rx_sc) { 948 if (rx_sc->sci == sci) 949 return rx_sc; 950 } 951 952 return NULL; 953 } 954 955 static struct macsec_rx_sc *find_rx_sc_rtnl(struct macsec_secy *secy, sci_t sci) 956 { 957 struct macsec_rx_sc *rx_sc; 958 959 for_each_rxsc_rtnl(secy, rx_sc) { 960 if (rx_sc->sci == sci) 961 return rx_sc; 962 } 963 964 return NULL; 965 } 966 967 static void handle_not_macsec(struct sk_buff *skb) 968 { 969 struct macsec_rxh_data *rxd; 970 struct macsec_dev *macsec; 971 972 rcu_read_lock(); 973 rxd = macsec_data_rcu(skb->dev); 974 975 /* 10.6 If the management control validateFrames is not 976 * Strict, frames without a SecTAG are received, counted, and 977 * delivered to the Controlled Port 978 */ 979 list_for_each_entry_rcu(macsec, &rxd->secys, secys) { 980 struct sk_buff *nskb; 981 int ret; 982 struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats); 983 984 if (macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) { 985 u64_stats_update_begin(&secy_stats->syncp); 986 secy_stats->stats.InPktsNoTag++; 987 u64_stats_update_end(&secy_stats->syncp); 988 continue; 989 } 990 991 /* deliver on this port */ 992 nskb = skb_clone(skb, GFP_ATOMIC); 993 if (!nskb) 994 break; 995 996 nskb->dev = macsec->secy.netdev; 997 998 ret = netif_rx(nskb); 999 if (ret == NET_RX_SUCCESS) { 1000 u64_stats_update_begin(&secy_stats->syncp); 1001 secy_stats->stats.InPktsUntagged++; 1002 u64_stats_update_end(&secy_stats->syncp); 1003 } else { 1004 macsec->secy.netdev->stats.rx_dropped++; 1005 } 1006 } 1007 1008 rcu_read_unlock(); 1009 } 1010 1011 static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) 1012 { 1013 struct sk_buff *skb = *pskb; 1014 struct net_device *dev = skb->dev; 1015 struct macsec_eth_header *hdr; 1016 struct macsec_secy *secy = NULL; 1017 struct macsec_rx_sc *rx_sc; 1018 struct macsec_rx_sa *rx_sa; 1019 struct macsec_rxh_data *rxd; 1020 struct macsec_dev *macsec; 1021 sci_t sci; 1022 u32 pn; 1023 bool cbit; 1024 struct pcpu_rx_sc_stats *rxsc_stats; 1025 struct pcpu_secy_stats *secy_stats; 1026 bool pulled_sci; 1027 1028 if (skb_headroom(skb) < ETH_HLEN) 1029 goto drop_direct; 1030 1031 hdr = macsec_ethhdr(skb); 1032 if (hdr->eth.h_proto != htons(ETH_P_MACSEC)) { 1033 handle_not_macsec(skb); 1034 1035 /* and deliver to the uncontrolled port */ 1036 return RX_HANDLER_PASS; 1037 } 1038 1039 skb = skb_unshare(skb, GFP_ATOMIC); 1040 if (!skb) { 1041 *pskb = NULL; 1042 return RX_HANDLER_CONSUMED; 1043 } 1044 1045 pulled_sci = pskb_may_pull(skb, macsec_extra_len(true)); 1046 if (!pulled_sci) { 1047 if (!pskb_may_pull(skb, macsec_extra_len(false))) 1048 goto drop_direct; 1049 } 1050 1051 hdr = macsec_ethhdr(skb); 1052 1053 /* Frames with a SecTAG that has the TCI E bit set but the C 1054 * bit clear are discarded, as this reserved encoding is used 1055 * to identify frames with a SecTAG that are not to be 1056 * delivered to the Controlled Port. 1057 */ 1058 if ((hdr->tci_an & (MACSEC_TCI_C | MACSEC_TCI_E)) == MACSEC_TCI_E) 1059 return RX_HANDLER_PASS; 1060 1061 /* now, pull the extra length */ 1062 if (hdr->tci_an & MACSEC_TCI_SC) { 1063 if (!pulled_sci) 1064 goto drop_direct; 1065 } 1066 1067 /* ethernet header is part of crypto processing */ 1068 skb_push(skb, ETH_HLEN); 1069 1070 macsec_skb_cb(skb)->has_sci = !!(hdr->tci_an & MACSEC_TCI_SC); 1071 macsec_skb_cb(skb)->assoc_num = hdr->tci_an & MACSEC_AN_MASK; 1072 sci = macsec_frame_sci(hdr, macsec_skb_cb(skb)->has_sci); 1073 1074 rcu_read_lock(); 1075 rxd = macsec_data_rcu(skb->dev); 1076 1077 list_for_each_entry_rcu(macsec, &rxd->secys, secys) { 1078 struct macsec_rx_sc *sc = find_rx_sc(&macsec->secy, sci); 1079 1080 if (sc) { 1081 secy = &macsec->secy; 1082 rx_sc = sc; 1083 break; 1084 } 1085 } 1086 1087 if (!secy) 1088 goto nosci; 1089 1090 dev = secy->netdev; 1091 macsec = macsec_priv(dev); 1092 secy_stats = this_cpu_ptr(macsec->stats); 1093 rxsc_stats = this_cpu_ptr(rx_sc->stats); 1094 1095 if (!macsec_validate_skb(skb, secy->icv_len)) { 1096 u64_stats_update_begin(&secy_stats->syncp); 1097 secy_stats->stats.InPktsBadTag++; 1098 u64_stats_update_end(&secy_stats->syncp); 1099 goto drop_nosa; 1100 } 1101 1102 rx_sa = macsec_rxsa_get(rx_sc->sa[macsec_skb_cb(skb)->assoc_num]); 1103 if (!rx_sa) { 1104 /* 10.6.1 if the SA is not in use */ 1105 1106 /* If validateFrames is Strict or the C bit in the 1107 * SecTAG is set, discard 1108 */ 1109 if (hdr->tci_an & MACSEC_TCI_C || 1110 secy->validate_frames == MACSEC_VALIDATE_STRICT) { 1111 u64_stats_update_begin(&rxsc_stats->syncp); 1112 rxsc_stats->stats.InPktsNotUsingSA++; 1113 u64_stats_update_end(&rxsc_stats->syncp); 1114 goto drop_nosa; 1115 } 1116 1117 /* not Strict, the frame (with the SecTAG and ICV 1118 * removed) is delivered to the Controlled Port. 1119 */ 1120 u64_stats_update_begin(&rxsc_stats->syncp); 1121 rxsc_stats->stats.InPktsUnusedSA++; 1122 u64_stats_update_end(&rxsc_stats->syncp); 1123 goto deliver; 1124 } 1125 1126 /* First, PN check to avoid decrypting obviously wrong packets */ 1127 pn = ntohl(hdr->packet_number); 1128 if (secy->replay_protect) { 1129 bool late; 1130 1131 spin_lock(&rx_sa->lock); 1132 late = rx_sa->next_pn >= secy->replay_window && 1133 pn < (rx_sa->next_pn - secy->replay_window); 1134 spin_unlock(&rx_sa->lock); 1135 1136 if (late) { 1137 u64_stats_update_begin(&rxsc_stats->syncp); 1138 rxsc_stats->stats.InPktsLate++; 1139 u64_stats_update_end(&rxsc_stats->syncp); 1140 goto drop; 1141 } 1142 } 1143 1144 /* Disabled && !changed text => skip validation */ 1145 if (hdr->tci_an & MACSEC_TCI_C || 1146 secy->validate_frames != MACSEC_VALIDATE_DISABLED) 1147 skb = macsec_decrypt(skb, dev, rx_sa, sci, secy); 1148 1149 if (IS_ERR(skb)) { 1150 /* the decrypt callback needs the reference */ 1151 if (PTR_ERR(skb) != -EINPROGRESS) 1152 macsec_rxsa_put(rx_sa); 1153 rcu_read_unlock(); 1154 *pskb = NULL; 1155 return RX_HANDLER_CONSUMED; 1156 } 1157 1158 if (!macsec_post_decrypt(skb, secy, pn)) 1159 goto drop; 1160 1161 deliver: 1162 macsec_finalize_skb(skb, secy->icv_len, 1163 macsec_extra_len(macsec_skb_cb(skb)->has_sci)); 1164 macsec_reset_skb(skb, secy->netdev); 1165 1166 if (rx_sa) 1167 macsec_rxsa_put(rx_sa); 1168 count_rx(dev, skb->len); 1169 1170 rcu_read_unlock(); 1171 1172 *pskb = skb; 1173 return RX_HANDLER_ANOTHER; 1174 1175 drop: 1176 macsec_rxsa_put(rx_sa); 1177 drop_nosa: 1178 rcu_read_unlock(); 1179 drop_direct: 1180 kfree_skb(skb); 1181 *pskb = NULL; 1182 return RX_HANDLER_CONSUMED; 1183 1184 nosci: 1185 /* 10.6.1 if the SC is not found */ 1186 cbit = !!(hdr->tci_an & MACSEC_TCI_C); 1187 if (!cbit) 1188 macsec_finalize_skb(skb, DEFAULT_ICV_LEN, 1189 macsec_extra_len(macsec_skb_cb(skb)->has_sci)); 1190 1191 list_for_each_entry_rcu(macsec, &rxd->secys, secys) { 1192 struct sk_buff *nskb; 1193 int ret; 1194 1195 secy_stats = this_cpu_ptr(macsec->stats); 1196 1197 /* If validateFrames is Strict or the C bit in the 1198 * SecTAG is set, discard 1199 */ 1200 if (cbit || 1201 macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) { 1202 u64_stats_update_begin(&secy_stats->syncp); 1203 secy_stats->stats.InPktsNoSCI++; 1204 u64_stats_update_end(&secy_stats->syncp); 1205 continue; 1206 } 1207 1208 /* not strict, the frame (with the SecTAG and ICV 1209 * removed) is delivered to the Controlled Port. 1210 */ 1211 nskb = skb_clone(skb, GFP_ATOMIC); 1212 if (!nskb) 1213 break; 1214 1215 macsec_reset_skb(nskb, macsec->secy.netdev); 1216 1217 ret = netif_rx(nskb); 1218 if (ret == NET_RX_SUCCESS) { 1219 u64_stats_update_begin(&secy_stats->syncp); 1220 secy_stats->stats.InPktsUnknownSCI++; 1221 u64_stats_update_end(&secy_stats->syncp); 1222 } else { 1223 macsec->secy.netdev->stats.rx_dropped++; 1224 } 1225 } 1226 1227 rcu_read_unlock(); 1228 *pskb = skb; 1229 return RX_HANDLER_PASS; 1230 } 1231 1232 static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len) 1233 { 1234 struct crypto_aead *tfm; 1235 int ret; 1236 1237 tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC); 1238 if (!tfm || IS_ERR(tfm)) 1239 return NULL; 1240 1241 ret = crypto_aead_setkey(tfm, key, key_len); 1242 if (ret < 0) { 1243 crypto_free_aead(tfm); 1244 return NULL; 1245 } 1246 1247 ret = crypto_aead_setauthsize(tfm, icv_len); 1248 if (ret < 0) { 1249 crypto_free_aead(tfm); 1250 return NULL; 1251 } 1252 1253 return tfm; 1254 } 1255 1256 static int init_rx_sa(struct macsec_rx_sa *rx_sa, char *sak, int key_len, 1257 int icv_len) 1258 { 1259 rx_sa->stats = alloc_percpu(struct macsec_rx_sa_stats); 1260 if (!rx_sa->stats) 1261 return -1; 1262 1263 rx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len); 1264 if (!rx_sa->key.tfm) { 1265 free_percpu(rx_sa->stats); 1266 return -1; 1267 } 1268 1269 rx_sa->active = false; 1270 rx_sa->next_pn = 1; 1271 atomic_set(&rx_sa->refcnt, 1); 1272 spin_lock_init(&rx_sa->lock); 1273 1274 return 0; 1275 } 1276 1277 static void clear_rx_sa(struct macsec_rx_sa *rx_sa) 1278 { 1279 rx_sa->active = false; 1280 1281 macsec_rxsa_put(rx_sa); 1282 } 1283 1284 static void free_rx_sc(struct macsec_rx_sc *rx_sc) 1285 { 1286 int i; 1287 1288 for (i = 0; i < MACSEC_NUM_AN; i++) { 1289 struct macsec_rx_sa *sa = rtnl_dereference(rx_sc->sa[i]); 1290 1291 RCU_INIT_POINTER(rx_sc->sa[i], NULL); 1292 if (sa) 1293 clear_rx_sa(sa); 1294 } 1295 1296 macsec_rxsc_put(rx_sc); 1297 } 1298 1299 static struct macsec_rx_sc *del_rx_sc(struct macsec_secy *secy, sci_t sci) 1300 { 1301 struct macsec_rx_sc *rx_sc, __rcu **rx_scp; 1302 1303 for (rx_scp = &secy->rx_sc, rx_sc = rtnl_dereference(*rx_scp); 1304 rx_sc; 1305 rx_scp = &rx_sc->next, rx_sc = rtnl_dereference(*rx_scp)) { 1306 if (rx_sc->sci == sci) { 1307 if (rx_sc->active) 1308 secy->n_rx_sc--; 1309 rcu_assign_pointer(*rx_scp, rx_sc->next); 1310 return rx_sc; 1311 } 1312 } 1313 1314 return NULL; 1315 } 1316 1317 static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci) 1318 { 1319 struct macsec_rx_sc *rx_sc; 1320 struct macsec_dev *macsec; 1321 struct net_device *real_dev = macsec_priv(dev)->real_dev; 1322 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); 1323 struct macsec_secy *secy; 1324 1325 list_for_each_entry(macsec, &rxd->secys, secys) { 1326 if (find_rx_sc_rtnl(&macsec->secy, sci)) 1327 return ERR_PTR(-EEXIST); 1328 } 1329 1330 rx_sc = kzalloc(sizeof(*rx_sc), GFP_KERNEL); 1331 if (!rx_sc) 1332 return ERR_PTR(-ENOMEM); 1333 1334 rx_sc->stats = netdev_alloc_pcpu_stats(struct pcpu_rx_sc_stats); 1335 if (!rx_sc->stats) { 1336 kfree(rx_sc); 1337 return ERR_PTR(-ENOMEM); 1338 } 1339 1340 rx_sc->sci = sci; 1341 rx_sc->active = true; 1342 atomic_set(&rx_sc->refcnt, 1); 1343 1344 secy = &macsec_priv(dev)->secy; 1345 rcu_assign_pointer(rx_sc->next, secy->rx_sc); 1346 rcu_assign_pointer(secy->rx_sc, rx_sc); 1347 1348 if (rx_sc->active) 1349 secy->n_rx_sc++; 1350 1351 return rx_sc; 1352 } 1353 1354 static int init_tx_sa(struct macsec_tx_sa *tx_sa, char *sak, int key_len, 1355 int icv_len) 1356 { 1357 tx_sa->stats = alloc_percpu(struct macsec_tx_sa_stats); 1358 if (!tx_sa->stats) 1359 return -1; 1360 1361 tx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len); 1362 if (!tx_sa->key.tfm) { 1363 free_percpu(tx_sa->stats); 1364 return -1; 1365 } 1366 1367 tx_sa->active = false; 1368 atomic_set(&tx_sa->refcnt, 1); 1369 spin_lock_init(&tx_sa->lock); 1370 1371 return 0; 1372 } 1373 1374 static void clear_tx_sa(struct macsec_tx_sa *tx_sa) 1375 { 1376 tx_sa->active = false; 1377 1378 macsec_txsa_put(tx_sa); 1379 } 1380 1381 static struct genl_family macsec_fam = { 1382 .id = GENL_ID_GENERATE, 1383 .name = MACSEC_GENL_NAME, 1384 .hdrsize = 0, 1385 .version = MACSEC_GENL_VERSION, 1386 .maxattr = MACSEC_ATTR_MAX, 1387 .netnsok = true, 1388 }; 1389 1390 static struct net_device *get_dev_from_nl(struct net *net, 1391 struct nlattr **attrs) 1392 { 1393 int ifindex = nla_get_u32(attrs[MACSEC_ATTR_IFINDEX]); 1394 struct net_device *dev; 1395 1396 dev = __dev_get_by_index(net, ifindex); 1397 if (!dev) 1398 return ERR_PTR(-ENODEV); 1399 1400 if (!netif_is_macsec(dev)) 1401 return ERR_PTR(-ENODEV); 1402 1403 return dev; 1404 } 1405 1406 static sci_t nla_get_sci(const struct nlattr *nla) 1407 { 1408 return (__force sci_t)nla_get_u64(nla); 1409 } 1410 1411 static int nla_put_sci(struct sk_buff *skb, int attrtype, sci_t value, 1412 int padattr) 1413 { 1414 return nla_put_u64_64bit(skb, attrtype, (__force u64)value, padattr); 1415 } 1416 1417 static struct macsec_tx_sa *get_txsa_from_nl(struct net *net, 1418 struct nlattr **attrs, 1419 struct nlattr **tb_sa, 1420 struct net_device **devp, 1421 struct macsec_secy **secyp, 1422 struct macsec_tx_sc **scp, 1423 u8 *assoc_num) 1424 { 1425 struct net_device *dev; 1426 struct macsec_secy *secy; 1427 struct macsec_tx_sc *tx_sc; 1428 struct macsec_tx_sa *tx_sa; 1429 1430 if (!tb_sa[MACSEC_SA_ATTR_AN]) 1431 return ERR_PTR(-EINVAL); 1432 1433 *assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1434 1435 dev = get_dev_from_nl(net, attrs); 1436 if (IS_ERR(dev)) 1437 return ERR_CAST(dev); 1438 1439 if (*assoc_num >= MACSEC_NUM_AN) 1440 return ERR_PTR(-EINVAL); 1441 1442 secy = &macsec_priv(dev)->secy; 1443 tx_sc = &secy->tx_sc; 1444 1445 tx_sa = rtnl_dereference(tx_sc->sa[*assoc_num]); 1446 if (!tx_sa) 1447 return ERR_PTR(-ENODEV); 1448 1449 *devp = dev; 1450 *scp = tx_sc; 1451 *secyp = secy; 1452 return tx_sa; 1453 } 1454 1455 static struct macsec_rx_sc *get_rxsc_from_nl(struct net *net, 1456 struct nlattr **attrs, 1457 struct nlattr **tb_rxsc, 1458 struct net_device **devp, 1459 struct macsec_secy **secyp) 1460 { 1461 struct net_device *dev; 1462 struct macsec_secy *secy; 1463 struct macsec_rx_sc *rx_sc; 1464 sci_t sci; 1465 1466 dev = get_dev_from_nl(net, attrs); 1467 if (IS_ERR(dev)) 1468 return ERR_CAST(dev); 1469 1470 secy = &macsec_priv(dev)->secy; 1471 1472 if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI]) 1473 return ERR_PTR(-EINVAL); 1474 1475 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); 1476 rx_sc = find_rx_sc_rtnl(secy, sci); 1477 if (!rx_sc) 1478 return ERR_PTR(-ENODEV); 1479 1480 *secyp = secy; 1481 *devp = dev; 1482 1483 return rx_sc; 1484 } 1485 1486 static struct macsec_rx_sa *get_rxsa_from_nl(struct net *net, 1487 struct nlattr **attrs, 1488 struct nlattr **tb_rxsc, 1489 struct nlattr **tb_sa, 1490 struct net_device **devp, 1491 struct macsec_secy **secyp, 1492 struct macsec_rx_sc **scp, 1493 u8 *assoc_num) 1494 { 1495 struct macsec_rx_sc *rx_sc; 1496 struct macsec_rx_sa *rx_sa; 1497 1498 if (!tb_sa[MACSEC_SA_ATTR_AN]) 1499 return ERR_PTR(-EINVAL); 1500 1501 *assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1502 if (*assoc_num >= MACSEC_NUM_AN) 1503 return ERR_PTR(-EINVAL); 1504 1505 rx_sc = get_rxsc_from_nl(net, attrs, tb_rxsc, devp, secyp); 1506 if (IS_ERR(rx_sc)) 1507 return ERR_CAST(rx_sc); 1508 1509 rx_sa = rtnl_dereference(rx_sc->sa[*assoc_num]); 1510 if (!rx_sa) 1511 return ERR_PTR(-ENODEV); 1512 1513 *scp = rx_sc; 1514 return rx_sa; 1515 } 1516 1517 1518 static const struct nla_policy macsec_genl_policy[NUM_MACSEC_ATTR] = { 1519 [MACSEC_ATTR_IFINDEX] = { .type = NLA_U32 }, 1520 [MACSEC_ATTR_RXSC_CONFIG] = { .type = NLA_NESTED }, 1521 [MACSEC_ATTR_SA_CONFIG] = { .type = NLA_NESTED }, 1522 }; 1523 1524 static const struct nla_policy macsec_genl_rxsc_policy[NUM_MACSEC_RXSC_ATTR] = { 1525 [MACSEC_RXSC_ATTR_SCI] = { .type = NLA_U64 }, 1526 [MACSEC_RXSC_ATTR_ACTIVE] = { .type = NLA_U8 }, 1527 }; 1528 1529 static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = { 1530 [MACSEC_SA_ATTR_AN] = { .type = NLA_U8 }, 1531 [MACSEC_SA_ATTR_ACTIVE] = { .type = NLA_U8 }, 1532 [MACSEC_SA_ATTR_PN] = { .type = NLA_U32 }, 1533 [MACSEC_SA_ATTR_KEYID] = { .type = NLA_BINARY, 1534 .len = MACSEC_KEYID_LEN, }, 1535 [MACSEC_SA_ATTR_KEY] = { .type = NLA_BINARY, 1536 .len = MACSEC_MAX_KEY_LEN, }, 1537 }; 1538 1539 static int parse_sa_config(struct nlattr **attrs, struct nlattr **tb_sa) 1540 { 1541 if (!attrs[MACSEC_ATTR_SA_CONFIG]) 1542 return -EINVAL; 1543 1544 if (nla_parse_nested(tb_sa, MACSEC_SA_ATTR_MAX, attrs[MACSEC_ATTR_SA_CONFIG], 1545 macsec_genl_sa_policy)) 1546 return -EINVAL; 1547 1548 return 0; 1549 } 1550 1551 static int parse_rxsc_config(struct nlattr **attrs, struct nlattr **tb_rxsc) 1552 { 1553 if (!attrs[MACSEC_ATTR_RXSC_CONFIG]) 1554 return -EINVAL; 1555 1556 if (nla_parse_nested(tb_rxsc, MACSEC_RXSC_ATTR_MAX, attrs[MACSEC_ATTR_RXSC_CONFIG], 1557 macsec_genl_rxsc_policy)) 1558 return -EINVAL; 1559 1560 return 0; 1561 } 1562 1563 static bool validate_add_rxsa(struct nlattr **attrs) 1564 { 1565 if (!attrs[MACSEC_SA_ATTR_AN] || 1566 !attrs[MACSEC_SA_ATTR_KEY] || 1567 !attrs[MACSEC_SA_ATTR_KEYID]) 1568 return false; 1569 1570 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) 1571 return false; 1572 1573 if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0) 1574 return false; 1575 1576 if (attrs[MACSEC_SA_ATTR_ACTIVE]) { 1577 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) 1578 return false; 1579 } 1580 1581 if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN) 1582 return false; 1583 1584 return true; 1585 } 1586 1587 static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info) 1588 { 1589 struct net_device *dev; 1590 struct nlattr **attrs = info->attrs; 1591 struct macsec_secy *secy; 1592 struct macsec_rx_sc *rx_sc; 1593 struct macsec_rx_sa *rx_sa; 1594 unsigned char assoc_num; 1595 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 1596 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 1597 1598 if (!attrs[MACSEC_ATTR_IFINDEX]) 1599 return -EINVAL; 1600 1601 if (parse_sa_config(attrs, tb_sa)) 1602 return -EINVAL; 1603 1604 if (parse_rxsc_config(attrs, tb_rxsc)) 1605 return -EINVAL; 1606 1607 if (!validate_add_rxsa(tb_sa)) 1608 return -EINVAL; 1609 1610 rtnl_lock(); 1611 rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy); 1612 if (IS_ERR(rx_sc) || !macsec_rxsc_get(rx_sc)) { 1613 rtnl_unlock(); 1614 return PTR_ERR(rx_sc); 1615 } 1616 1617 assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1618 1619 if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) { 1620 pr_notice("macsec: nl: add_rxsa: bad key length: %d != %d\n", 1621 nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len); 1622 rtnl_unlock(); 1623 return -EINVAL; 1624 } 1625 1626 rx_sa = rtnl_dereference(rx_sc->sa[assoc_num]); 1627 if (rx_sa) { 1628 rtnl_unlock(); 1629 return -EBUSY; 1630 } 1631 1632 rx_sa = kmalloc(sizeof(*rx_sa), GFP_KERNEL); 1633 if (!rx_sa || init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 1634 secy->key_len, secy->icv_len)) { 1635 kfree(rx_sa); 1636 rtnl_unlock(); 1637 return -ENOMEM; 1638 } 1639 1640 if (tb_sa[MACSEC_SA_ATTR_PN]) { 1641 spin_lock_bh(&rx_sa->lock); 1642 rx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]); 1643 spin_unlock_bh(&rx_sa->lock); 1644 } 1645 1646 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 1647 rx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 1648 1649 nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN); 1650 rx_sa->sc = rx_sc; 1651 rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa); 1652 1653 rtnl_unlock(); 1654 1655 return 0; 1656 } 1657 1658 static bool validate_add_rxsc(struct nlattr **attrs) 1659 { 1660 if (!attrs[MACSEC_RXSC_ATTR_SCI]) 1661 return false; 1662 1663 if (attrs[MACSEC_RXSC_ATTR_ACTIVE]) { 1664 if (nla_get_u8(attrs[MACSEC_RXSC_ATTR_ACTIVE]) > 1) 1665 return false; 1666 } 1667 1668 return true; 1669 } 1670 1671 static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info) 1672 { 1673 struct net_device *dev; 1674 sci_t sci = MACSEC_UNDEF_SCI; 1675 struct nlattr **attrs = info->attrs; 1676 struct macsec_rx_sc *rx_sc; 1677 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 1678 1679 if (!attrs[MACSEC_ATTR_IFINDEX]) 1680 return -EINVAL; 1681 1682 if (parse_rxsc_config(attrs, tb_rxsc)) 1683 return -EINVAL; 1684 1685 if (!validate_add_rxsc(tb_rxsc)) 1686 return -EINVAL; 1687 1688 rtnl_lock(); 1689 dev = get_dev_from_nl(genl_info_net(info), attrs); 1690 if (IS_ERR(dev)) { 1691 rtnl_unlock(); 1692 return PTR_ERR(dev); 1693 } 1694 1695 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); 1696 1697 rx_sc = create_rx_sc(dev, sci); 1698 if (IS_ERR(rx_sc)) { 1699 rtnl_unlock(); 1700 return PTR_ERR(rx_sc); 1701 } 1702 1703 if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) 1704 rx_sc->active = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]); 1705 1706 rtnl_unlock(); 1707 1708 return 0; 1709 } 1710 1711 static bool validate_add_txsa(struct nlattr **attrs) 1712 { 1713 if (!attrs[MACSEC_SA_ATTR_AN] || 1714 !attrs[MACSEC_SA_ATTR_PN] || 1715 !attrs[MACSEC_SA_ATTR_KEY] || 1716 !attrs[MACSEC_SA_ATTR_KEYID]) 1717 return false; 1718 1719 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) 1720 return false; 1721 1722 if (nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0) 1723 return false; 1724 1725 if (attrs[MACSEC_SA_ATTR_ACTIVE]) { 1726 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) 1727 return false; 1728 } 1729 1730 if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN) 1731 return false; 1732 1733 return true; 1734 } 1735 1736 static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info) 1737 { 1738 struct net_device *dev; 1739 struct nlattr **attrs = info->attrs; 1740 struct macsec_secy *secy; 1741 struct macsec_tx_sc *tx_sc; 1742 struct macsec_tx_sa *tx_sa; 1743 unsigned char assoc_num; 1744 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 1745 1746 if (!attrs[MACSEC_ATTR_IFINDEX]) 1747 return -EINVAL; 1748 1749 if (parse_sa_config(attrs, tb_sa)) 1750 return -EINVAL; 1751 1752 if (!validate_add_txsa(tb_sa)) 1753 return -EINVAL; 1754 1755 rtnl_lock(); 1756 dev = get_dev_from_nl(genl_info_net(info), attrs); 1757 if (IS_ERR(dev)) { 1758 rtnl_unlock(); 1759 return PTR_ERR(dev); 1760 } 1761 1762 secy = &macsec_priv(dev)->secy; 1763 tx_sc = &secy->tx_sc; 1764 1765 assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1766 1767 if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) { 1768 pr_notice("macsec: nl: add_txsa: bad key length: %d != %d\n", 1769 nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len); 1770 rtnl_unlock(); 1771 return -EINVAL; 1772 } 1773 1774 tx_sa = rtnl_dereference(tx_sc->sa[assoc_num]); 1775 if (tx_sa) { 1776 rtnl_unlock(); 1777 return -EBUSY; 1778 } 1779 1780 tx_sa = kmalloc(sizeof(*tx_sa), GFP_KERNEL); 1781 if (!tx_sa || init_tx_sa(tx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 1782 secy->key_len, secy->icv_len)) { 1783 kfree(tx_sa); 1784 rtnl_unlock(); 1785 return -ENOMEM; 1786 } 1787 1788 nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN); 1789 1790 spin_lock_bh(&tx_sa->lock); 1791 tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]); 1792 spin_unlock_bh(&tx_sa->lock); 1793 1794 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 1795 tx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 1796 1797 if (assoc_num == tx_sc->encoding_sa && tx_sa->active) 1798 secy->operational = true; 1799 1800 rcu_assign_pointer(tx_sc->sa[assoc_num], tx_sa); 1801 1802 rtnl_unlock(); 1803 1804 return 0; 1805 } 1806 1807 static int macsec_del_rxsa(struct sk_buff *skb, struct genl_info *info) 1808 { 1809 struct nlattr **attrs = info->attrs; 1810 struct net_device *dev; 1811 struct macsec_secy *secy; 1812 struct macsec_rx_sc *rx_sc; 1813 struct macsec_rx_sa *rx_sa; 1814 u8 assoc_num; 1815 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 1816 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 1817 1818 if (!attrs[MACSEC_ATTR_IFINDEX]) 1819 return -EINVAL; 1820 1821 if (parse_sa_config(attrs, tb_sa)) 1822 return -EINVAL; 1823 1824 if (parse_rxsc_config(attrs, tb_rxsc)) 1825 return -EINVAL; 1826 1827 rtnl_lock(); 1828 rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa, 1829 &dev, &secy, &rx_sc, &assoc_num); 1830 if (IS_ERR(rx_sa)) { 1831 rtnl_unlock(); 1832 return PTR_ERR(rx_sa); 1833 } 1834 1835 if (rx_sa->active) { 1836 rtnl_unlock(); 1837 return -EBUSY; 1838 } 1839 1840 RCU_INIT_POINTER(rx_sc->sa[assoc_num], NULL); 1841 clear_rx_sa(rx_sa); 1842 1843 rtnl_unlock(); 1844 1845 return 0; 1846 } 1847 1848 static int macsec_del_rxsc(struct sk_buff *skb, struct genl_info *info) 1849 { 1850 struct nlattr **attrs = info->attrs; 1851 struct net_device *dev; 1852 struct macsec_secy *secy; 1853 struct macsec_rx_sc *rx_sc; 1854 sci_t sci; 1855 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 1856 1857 if (!attrs[MACSEC_ATTR_IFINDEX]) 1858 return -EINVAL; 1859 1860 if (parse_rxsc_config(attrs, tb_rxsc)) 1861 return -EINVAL; 1862 1863 if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI]) 1864 return -EINVAL; 1865 1866 rtnl_lock(); 1867 dev = get_dev_from_nl(genl_info_net(info), info->attrs); 1868 if (IS_ERR(dev)) { 1869 rtnl_unlock(); 1870 return PTR_ERR(dev); 1871 } 1872 1873 secy = &macsec_priv(dev)->secy; 1874 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); 1875 1876 rx_sc = del_rx_sc(secy, sci); 1877 if (!rx_sc) { 1878 rtnl_unlock(); 1879 return -ENODEV; 1880 } 1881 1882 free_rx_sc(rx_sc); 1883 rtnl_unlock(); 1884 1885 return 0; 1886 } 1887 1888 static int macsec_del_txsa(struct sk_buff *skb, struct genl_info *info) 1889 { 1890 struct nlattr **attrs = info->attrs; 1891 struct net_device *dev; 1892 struct macsec_secy *secy; 1893 struct macsec_tx_sc *tx_sc; 1894 struct macsec_tx_sa *tx_sa; 1895 u8 assoc_num; 1896 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 1897 1898 if (!attrs[MACSEC_ATTR_IFINDEX]) 1899 return -EINVAL; 1900 1901 if (parse_sa_config(attrs, tb_sa)) 1902 return -EINVAL; 1903 1904 rtnl_lock(); 1905 tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa, 1906 &dev, &secy, &tx_sc, &assoc_num); 1907 if (IS_ERR(tx_sa)) { 1908 rtnl_unlock(); 1909 return PTR_ERR(tx_sa); 1910 } 1911 1912 if (tx_sa->active) { 1913 rtnl_unlock(); 1914 return -EBUSY; 1915 } 1916 1917 RCU_INIT_POINTER(tx_sc->sa[assoc_num], NULL); 1918 clear_tx_sa(tx_sa); 1919 1920 rtnl_unlock(); 1921 1922 return 0; 1923 } 1924 1925 static bool validate_upd_sa(struct nlattr **attrs) 1926 { 1927 if (!attrs[MACSEC_SA_ATTR_AN] || 1928 attrs[MACSEC_SA_ATTR_KEY] || 1929 attrs[MACSEC_SA_ATTR_KEYID]) 1930 return false; 1931 1932 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) 1933 return false; 1934 1935 if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0) 1936 return false; 1937 1938 if (attrs[MACSEC_SA_ATTR_ACTIVE]) { 1939 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) 1940 return false; 1941 } 1942 1943 return true; 1944 } 1945 1946 static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info) 1947 { 1948 struct nlattr **attrs = info->attrs; 1949 struct net_device *dev; 1950 struct macsec_secy *secy; 1951 struct macsec_tx_sc *tx_sc; 1952 struct macsec_tx_sa *tx_sa; 1953 u8 assoc_num; 1954 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 1955 1956 if (!attrs[MACSEC_ATTR_IFINDEX]) 1957 return -EINVAL; 1958 1959 if (parse_sa_config(attrs, tb_sa)) 1960 return -EINVAL; 1961 1962 if (!validate_upd_sa(tb_sa)) 1963 return -EINVAL; 1964 1965 rtnl_lock(); 1966 tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa, 1967 &dev, &secy, &tx_sc, &assoc_num); 1968 if (IS_ERR(tx_sa)) { 1969 rtnl_unlock(); 1970 return PTR_ERR(tx_sa); 1971 } 1972 1973 if (tb_sa[MACSEC_SA_ATTR_PN]) { 1974 spin_lock_bh(&tx_sa->lock); 1975 tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]); 1976 spin_unlock_bh(&tx_sa->lock); 1977 } 1978 1979 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 1980 tx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 1981 1982 if (assoc_num == tx_sc->encoding_sa) 1983 secy->operational = tx_sa->active; 1984 1985 rtnl_unlock(); 1986 1987 return 0; 1988 } 1989 1990 static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info) 1991 { 1992 struct nlattr **attrs = info->attrs; 1993 struct net_device *dev; 1994 struct macsec_secy *secy; 1995 struct macsec_rx_sc *rx_sc; 1996 struct macsec_rx_sa *rx_sa; 1997 u8 assoc_num; 1998 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 1999 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 2000 2001 if (!attrs[MACSEC_ATTR_IFINDEX]) 2002 return -EINVAL; 2003 2004 if (parse_rxsc_config(attrs, tb_rxsc)) 2005 return -EINVAL; 2006 2007 if (parse_sa_config(attrs, tb_sa)) 2008 return -EINVAL; 2009 2010 if (!validate_upd_sa(tb_sa)) 2011 return -EINVAL; 2012 2013 rtnl_lock(); 2014 rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa, 2015 &dev, &secy, &rx_sc, &assoc_num); 2016 if (IS_ERR(rx_sa)) { 2017 rtnl_unlock(); 2018 return PTR_ERR(rx_sa); 2019 } 2020 2021 if (tb_sa[MACSEC_SA_ATTR_PN]) { 2022 spin_lock_bh(&rx_sa->lock); 2023 rx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]); 2024 spin_unlock_bh(&rx_sa->lock); 2025 } 2026 2027 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 2028 rx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 2029 2030 rtnl_unlock(); 2031 return 0; 2032 } 2033 2034 static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info) 2035 { 2036 struct nlattr **attrs = info->attrs; 2037 struct net_device *dev; 2038 struct macsec_secy *secy; 2039 struct macsec_rx_sc *rx_sc; 2040 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 2041 2042 if (!attrs[MACSEC_ATTR_IFINDEX]) 2043 return -EINVAL; 2044 2045 if (parse_rxsc_config(attrs, tb_rxsc)) 2046 return -EINVAL; 2047 2048 if (!validate_add_rxsc(tb_rxsc)) 2049 return -EINVAL; 2050 2051 rtnl_lock(); 2052 rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy); 2053 if (IS_ERR(rx_sc)) { 2054 rtnl_unlock(); 2055 return PTR_ERR(rx_sc); 2056 } 2057 2058 if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) { 2059 bool new = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]); 2060 2061 if (rx_sc->active != new) 2062 secy->n_rx_sc += new ? 1 : -1; 2063 2064 rx_sc->active = new; 2065 } 2066 2067 rtnl_unlock(); 2068 2069 return 0; 2070 } 2071 2072 static int copy_tx_sa_stats(struct sk_buff *skb, 2073 struct macsec_tx_sa_stats __percpu *pstats) 2074 { 2075 struct macsec_tx_sa_stats sum = {0, }; 2076 int cpu; 2077 2078 for_each_possible_cpu(cpu) { 2079 const struct macsec_tx_sa_stats *stats = per_cpu_ptr(pstats, cpu); 2080 2081 sum.OutPktsProtected += stats->OutPktsProtected; 2082 sum.OutPktsEncrypted += stats->OutPktsEncrypted; 2083 } 2084 2085 if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED, sum.OutPktsProtected) || 2086 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED, sum.OutPktsEncrypted)) 2087 return -EMSGSIZE; 2088 2089 return 0; 2090 } 2091 2092 static int copy_rx_sa_stats(struct sk_buff *skb, 2093 struct macsec_rx_sa_stats __percpu *pstats) 2094 { 2095 struct macsec_rx_sa_stats sum = {0, }; 2096 int cpu; 2097 2098 for_each_possible_cpu(cpu) { 2099 const struct macsec_rx_sa_stats *stats = per_cpu_ptr(pstats, cpu); 2100 2101 sum.InPktsOK += stats->InPktsOK; 2102 sum.InPktsInvalid += stats->InPktsInvalid; 2103 sum.InPktsNotValid += stats->InPktsNotValid; 2104 sum.InPktsNotUsingSA += stats->InPktsNotUsingSA; 2105 sum.InPktsUnusedSA += stats->InPktsUnusedSA; 2106 } 2107 2108 if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_OK, sum.InPktsOK) || 2109 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID, sum.InPktsInvalid) || 2110 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID, sum.InPktsNotValid) || 2111 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA, sum.InPktsNotUsingSA) || 2112 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA, sum.InPktsUnusedSA)) 2113 return -EMSGSIZE; 2114 2115 return 0; 2116 } 2117 2118 static int copy_rx_sc_stats(struct sk_buff *skb, 2119 struct pcpu_rx_sc_stats __percpu *pstats) 2120 { 2121 struct macsec_rx_sc_stats sum = {0, }; 2122 int cpu; 2123 2124 for_each_possible_cpu(cpu) { 2125 const struct pcpu_rx_sc_stats *stats; 2126 struct macsec_rx_sc_stats tmp; 2127 unsigned int start; 2128 2129 stats = per_cpu_ptr(pstats, cpu); 2130 do { 2131 start = u64_stats_fetch_begin_irq(&stats->syncp); 2132 memcpy(&tmp, &stats->stats, sizeof(tmp)); 2133 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 2134 2135 sum.InOctetsValidated += tmp.InOctetsValidated; 2136 sum.InOctetsDecrypted += tmp.InOctetsDecrypted; 2137 sum.InPktsUnchecked += tmp.InPktsUnchecked; 2138 sum.InPktsDelayed += tmp.InPktsDelayed; 2139 sum.InPktsOK += tmp.InPktsOK; 2140 sum.InPktsInvalid += tmp.InPktsInvalid; 2141 sum.InPktsLate += tmp.InPktsLate; 2142 sum.InPktsNotValid += tmp.InPktsNotValid; 2143 sum.InPktsNotUsingSA += tmp.InPktsNotUsingSA; 2144 sum.InPktsUnusedSA += tmp.InPktsUnusedSA; 2145 } 2146 2147 if (nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED, 2148 sum.InOctetsValidated, 2149 MACSEC_RXSC_STATS_ATTR_PAD) || 2150 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED, 2151 sum.InOctetsDecrypted, 2152 MACSEC_RXSC_STATS_ATTR_PAD) || 2153 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED, 2154 sum.InPktsUnchecked, 2155 MACSEC_RXSC_STATS_ATTR_PAD) || 2156 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED, 2157 sum.InPktsDelayed, 2158 MACSEC_RXSC_STATS_ATTR_PAD) || 2159 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK, 2160 sum.InPktsOK, 2161 MACSEC_RXSC_STATS_ATTR_PAD) || 2162 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID, 2163 sum.InPktsInvalid, 2164 MACSEC_RXSC_STATS_ATTR_PAD) || 2165 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE, 2166 sum.InPktsLate, 2167 MACSEC_RXSC_STATS_ATTR_PAD) || 2168 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID, 2169 sum.InPktsNotValid, 2170 MACSEC_RXSC_STATS_ATTR_PAD) || 2171 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA, 2172 sum.InPktsNotUsingSA, 2173 MACSEC_RXSC_STATS_ATTR_PAD) || 2174 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA, 2175 sum.InPktsUnusedSA, 2176 MACSEC_RXSC_STATS_ATTR_PAD)) 2177 return -EMSGSIZE; 2178 2179 return 0; 2180 } 2181 2182 static int copy_tx_sc_stats(struct sk_buff *skb, 2183 struct pcpu_tx_sc_stats __percpu *pstats) 2184 { 2185 struct macsec_tx_sc_stats sum = {0, }; 2186 int cpu; 2187 2188 for_each_possible_cpu(cpu) { 2189 const struct pcpu_tx_sc_stats *stats; 2190 struct macsec_tx_sc_stats tmp; 2191 unsigned int start; 2192 2193 stats = per_cpu_ptr(pstats, cpu); 2194 do { 2195 start = u64_stats_fetch_begin_irq(&stats->syncp); 2196 memcpy(&tmp, &stats->stats, sizeof(tmp)); 2197 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 2198 2199 sum.OutPktsProtected += tmp.OutPktsProtected; 2200 sum.OutPktsEncrypted += tmp.OutPktsEncrypted; 2201 sum.OutOctetsProtected += tmp.OutOctetsProtected; 2202 sum.OutOctetsEncrypted += tmp.OutOctetsEncrypted; 2203 } 2204 2205 if (nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED, 2206 sum.OutPktsProtected, 2207 MACSEC_TXSC_STATS_ATTR_PAD) || 2208 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED, 2209 sum.OutPktsEncrypted, 2210 MACSEC_TXSC_STATS_ATTR_PAD) || 2211 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED, 2212 sum.OutOctetsProtected, 2213 MACSEC_TXSC_STATS_ATTR_PAD) || 2214 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED, 2215 sum.OutOctetsEncrypted, 2216 MACSEC_TXSC_STATS_ATTR_PAD)) 2217 return -EMSGSIZE; 2218 2219 return 0; 2220 } 2221 2222 static int copy_secy_stats(struct sk_buff *skb, 2223 struct pcpu_secy_stats __percpu *pstats) 2224 { 2225 struct macsec_dev_stats sum = {0, }; 2226 int cpu; 2227 2228 for_each_possible_cpu(cpu) { 2229 const struct pcpu_secy_stats *stats; 2230 struct macsec_dev_stats tmp; 2231 unsigned int start; 2232 2233 stats = per_cpu_ptr(pstats, cpu); 2234 do { 2235 start = u64_stats_fetch_begin_irq(&stats->syncp); 2236 memcpy(&tmp, &stats->stats, sizeof(tmp)); 2237 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 2238 2239 sum.OutPktsUntagged += tmp.OutPktsUntagged; 2240 sum.InPktsUntagged += tmp.InPktsUntagged; 2241 sum.OutPktsTooLong += tmp.OutPktsTooLong; 2242 sum.InPktsNoTag += tmp.InPktsNoTag; 2243 sum.InPktsBadTag += tmp.InPktsBadTag; 2244 sum.InPktsUnknownSCI += tmp.InPktsUnknownSCI; 2245 sum.InPktsNoSCI += tmp.InPktsNoSCI; 2246 sum.InPktsOverrun += tmp.InPktsOverrun; 2247 } 2248 2249 if (nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED, 2250 sum.OutPktsUntagged, 2251 MACSEC_SECY_STATS_ATTR_PAD) || 2252 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED, 2253 sum.InPktsUntagged, 2254 MACSEC_SECY_STATS_ATTR_PAD) || 2255 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG, 2256 sum.OutPktsTooLong, 2257 MACSEC_SECY_STATS_ATTR_PAD) || 2258 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG, 2259 sum.InPktsNoTag, 2260 MACSEC_SECY_STATS_ATTR_PAD) || 2261 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG, 2262 sum.InPktsBadTag, 2263 MACSEC_SECY_STATS_ATTR_PAD) || 2264 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI, 2265 sum.InPktsUnknownSCI, 2266 MACSEC_SECY_STATS_ATTR_PAD) || 2267 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI, 2268 sum.InPktsNoSCI, 2269 MACSEC_SECY_STATS_ATTR_PAD) || 2270 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN, 2271 sum.InPktsOverrun, 2272 MACSEC_SECY_STATS_ATTR_PAD)) 2273 return -EMSGSIZE; 2274 2275 return 0; 2276 } 2277 2278 static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb) 2279 { 2280 struct macsec_tx_sc *tx_sc = &secy->tx_sc; 2281 struct nlattr *secy_nest = nla_nest_start(skb, MACSEC_ATTR_SECY); 2282 2283 if (!secy_nest) 2284 return 1; 2285 2286 if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci, 2287 MACSEC_SECY_ATTR_PAD) || 2288 nla_put_u64_64bit(skb, MACSEC_SECY_ATTR_CIPHER_SUITE, 2289 MACSEC_DEFAULT_CIPHER_ID, 2290 MACSEC_SECY_ATTR_PAD) || 2291 nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) || 2292 nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) || 2293 nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) || 2294 nla_put_u8(skb, MACSEC_SECY_ATTR_REPLAY, secy->replay_protect) || 2295 nla_put_u8(skb, MACSEC_SECY_ATTR_VALIDATE, secy->validate_frames) || 2296 nla_put_u8(skb, MACSEC_SECY_ATTR_ENCRYPT, tx_sc->encrypt) || 2297 nla_put_u8(skb, MACSEC_SECY_ATTR_INC_SCI, tx_sc->send_sci) || 2298 nla_put_u8(skb, MACSEC_SECY_ATTR_ES, tx_sc->end_station) || 2299 nla_put_u8(skb, MACSEC_SECY_ATTR_SCB, tx_sc->scb) || 2300 nla_put_u8(skb, MACSEC_SECY_ATTR_ENCODING_SA, tx_sc->encoding_sa)) 2301 goto cancel; 2302 2303 if (secy->replay_protect) { 2304 if (nla_put_u32(skb, MACSEC_SECY_ATTR_WINDOW, secy->replay_window)) 2305 goto cancel; 2306 } 2307 2308 nla_nest_end(skb, secy_nest); 2309 return 0; 2310 2311 cancel: 2312 nla_nest_cancel(skb, secy_nest); 2313 return 1; 2314 } 2315 2316 static int dump_secy(struct macsec_secy *secy, struct net_device *dev, 2317 struct sk_buff *skb, struct netlink_callback *cb) 2318 { 2319 struct macsec_rx_sc *rx_sc; 2320 struct macsec_tx_sc *tx_sc = &secy->tx_sc; 2321 struct nlattr *txsa_list, *rxsc_list; 2322 int i, j; 2323 void *hdr; 2324 struct nlattr *attr; 2325 2326 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 2327 &macsec_fam, NLM_F_MULTI, MACSEC_CMD_GET_TXSC); 2328 if (!hdr) 2329 return -EMSGSIZE; 2330 2331 genl_dump_check_consistent(cb, hdr, &macsec_fam); 2332 2333 if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex)) 2334 goto nla_put_failure; 2335 2336 if (nla_put_secy(secy, skb)) 2337 goto nla_put_failure; 2338 2339 attr = nla_nest_start(skb, MACSEC_ATTR_TXSC_STATS); 2340 if (!attr) 2341 goto nla_put_failure; 2342 if (copy_tx_sc_stats(skb, tx_sc->stats)) { 2343 nla_nest_cancel(skb, attr); 2344 goto nla_put_failure; 2345 } 2346 nla_nest_end(skb, attr); 2347 2348 attr = nla_nest_start(skb, MACSEC_ATTR_SECY_STATS); 2349 if (!attr) 2350 goto nla_put_failure; 2351 if (copy_secy_stats(skb, macsec_priv(dev)->stats)) { 2352 nla_nest_cancel(skb, attr); 2353 goto nla_put_failure; 2354 } 2355 nla_nest_end(skb, attr); 2356 2357 txsa_list = nla_nest_start(skb, MACSEC_ATTR_TXSA_LIST); 2358 if (!txsa_list) 2359 goto nla_put_failure; 2360 for (i = 0, j = 1; i < MACSEC_NUM_AN; i++) { 2361 struct macsec_tx_sa *tx_sa = rtnl_dereference(tx_sc->sa[i]); 2362 struct nlattr *txsa_nest; 2363 2364 if (!tx_sa) 2365 continue; 2366 2367 txsa_nest = nla_nest_start(skb, j++); 2368 if (!txsa_nest) { 2369 nla_nest_cancel(skb, txsa_list); 2370 goto nla_put_failure; 2371 } 2372 2373 if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) || 2374 nla_put_u32(skb, MACSEC_SA_ATTR_PN, tx_sa->next_pn) || 2375 nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, tx_sa->key.id) || 2376 nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, tx_sa->active)) { 2377 nla_nest_cancel(skb, txsa_nest); 2378 nla_nest_cancel(skb, txsa_list); 2379 goto nla_put_failure; 2380 } 2381 2382 attr = nla_nest_start(skb, MACSEC_SA_ATTR_STATS); 2383 if (!attr) { 2384 nla_nest_cancel(skb, txsa_nest); 2385 nla_nest_cancel(skb, txsa_list); 2386 goto nla_put_failure; 2387 } 2388 if (copy_tx_sa_stats(skb, tx_sa->stats)) { 2389 nla_nest_cancel(skb, attr); 2390 nla_nest_cancel(skb, txsa_nest); 2391 nla_nest_cancel(skb, txsa_list); 2392 goto nla_put_failure; 2393 } 2394 nla_nest_end(skb, attr); 2395 2396 nla_nest_end(skb, txsa_nest); 2397 } 2398 nla_nest_end(skb, txsa_list); 2399 2400 rxsc_list = nla_nest_start(skb, MACSEC_ATTR_RXSC_LIST); 2401 if (!rxsc_list) 2402 goto nla_put_failure; 2403 2404 j = 1; 2405 for_each_rxsc_rtnl(secy, rx_sc) { 2406 int k; 2407 struct nlattr *rxsa_list; 2408 struct nlattr *rxsc_nest = nla_nest_start(skb, j++); 2409 2410 if (!rxsc_nest) { 2411 nla_nest_cancel(skb, rxsc_list); 2412 goto nla_put_failure; 2413 } 2414 2415 if (nla_put_u8(skb, MACSEC_RXSC_ATTR_ACTIVE, rx_sc->active) || 2416 nla_put_sci(skb, MACSEC_RXSC_ATTR_SCI, rx_sc->sci, 2417 MACSEC_RXSC_ATTR_PAD)) { 2418 nla_nest_cancel(skb, rxsc_nest); 2419 nla_nest_cancel(skb, rxsc_list); 2420 goto nla_put_failure; 2421 } 2422 2423 attr = nla_nest_start(skb, MACSEC_RXSC_ATTR_STATS); 2424 if (!attr) { 2425 nla_nest_cancel(skb, rxsc_nest); 2426 nla_nest_cancel(skb, rxsc_list); 2427 goto nla_put_failure; 2428 } 2429 if (copy_rx_sc_stats(skb, rx_sc->stats)) { 2430 nla_nest_cancel(skb, attr); 2431 nla_nest_cancel(skb, rxsc_nest); 2432 nla_nest_cancel(skb, rxsc_list); 2433 goto nla_put_failure; 2434 } 2435 nla_nest_end(skb, attr); 2436 2437 rxsa_list = nla_nest_start(skb, MACSEC_RXSC_ATTR_SA_LIST); 2438 if (!rxsa_list) { 2439 nla_nest_cancel(skb, rxsc_nest); 2440 nla_nest_cancel(skb, rxsc_list); 2441 goto nla_put_failure; 2442 } 2443 2444 for (i = 0, k = 1; i < MACSEC_NUM_AN; i++) { 2445 struct macsec_rx_sa *rx_sa = rtnl_dereference(rx_sc->sa[i]); 2446 struct nlattr *rxsa_nest; 2447 2448 if (!rx_sa) 2449 continue; 2450 2451 rxsa_nest = nla_nest_start(skb, k++); 2452 if (!rxsa_nest) { 2453 nla_nest_cancel(skb, rxsa_list); 2454 nla_nest_cancel(skb, rxsc_nest); 2455 nla_nest_cancel(skb, rxsc_list); 2456 goto nla_put_failure; 2457 } 2458 2459 attr = nla_nest_start(skb, MACSEC_SA_ATTR_STATS); 2460 if (!attr) { 2461 nla_nest_cancel(skb, rxsa_list); 2462 nla_nest_cancel(skb, rxsc_nest); 2463 nla_nest_cancel(skb, rxsc_list); 2464 goto nla_put_failure; 2465 } 2466 if (copy_rx_sa_stats(skb, rx_sa->stats)) { 2467 nla_nest_cancel(skb, attr); 2468 nla_nest_cancel(skb, rxsa_list); 2469 nla_nest_cancel(skb, rxsc_nest); 2470 nla_nest_cancel(skb, rxsc_list); 2471 goto nla_put_failure; 2472 } 2473 nla_nest_end(skb, attr); 2474 2475 if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) || 2476 nla_put_u32(skb, MACSEC_SA_ATTR_PN, rx_sa->next_pn) || 2477 nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, rx_sa->key.id) || 2478 nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, rx_sa->active)) { 2479 nla_nest_cancel(skb, rxsa_nest); 2480 nla_nest_cancel(skb, rxsc_nest); 2481 nla_nest_cancel(skb, rxsc_list); 2482 goto nla_put_failure; 2483 } 2484 nla_nest_end(skb, rxsa_nest); 2485 } 2486 2487 nla_nest_end(skb, rxsa_list); 2488 nla_nest_end(skb, rxsc_nest); 2489 } 2490 2491 nla_nest_end(skb, rxsc_list); 2492 2493 genlmsg_end(skb, hdr); 2494 2495 return 0; 2496 2497 nla_put_failure: 2498 genlmsg_cancel(skb, hdr); 2499 return -EMSGSIZE; 2500 } 2501 2502 static int macsec_generation = 1; /* protected by RTNL */ 2503 2504 static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb) 2505 { 2506 struct net *net = sock_net(skb->sk); 2507 struct net_device *dev; 2508 int dev_idx, d; 2509 2510 dev_idx = cb->args[0]; 2511 2512 d = 0; 2513 rtnl_lock(); 2514 2515 cb->seq = macsec_generation; 2516 2517 for_each_netdev(net, dev) { 2518 struct macsec_secy *secy; 2519 2520 if (d < dev_idx) 2521 goto next; 2522 2523 if (!netif_is_macsec(dev)) 2524 goto next; 2525 2526 secy = &macsec_priv(dev)->secy; 2527 if (dump_secy(secy, dev, skb, cb) < 0) 2528 goto done; 2529 next: 2530 d++; 2531 } 2532 2533 done: 2534 rtnl_unlock(); 2535 cb->args[0] = d; 2536 return skb->len; 2537 } 2538 2539 static const struct genl_ops macsec_genl_ops[] = { 2540 { 2541 .cmd = MACSEC_CMD_GET_TXSC, 2542 .dumpit = macsec_dump_txsc, 2543 .policy = macsec_genl_policy, 2544 }, 2545 { 2546 .cmd = MACSEC_CMD_ADD_RXSC, 2547 .doit = macsec_add_rxsc, 2548 .policy = macsec_genl_policy, 2549 .flags = GENL_ADMIN_PERM, 2550 }, 2551 { 2552 .cmd = MACSEC_CMD_DEL_RXSC, 2553 .doit = macsec_del_rxsc, 2554 .policy = macsec_genl_policy, 2555 .flags = GENL_ADMIN_PERM, 2556 }, 2557 { 2558 .cmd = MACSEC_CMD_UPD_RXSC, 2559 .doit = macsec_upd_rxsc, 2560 .policy = macsec_genl_policy, 2561 .flags = GENL_ADMIN_PERM, 2562 }, 2563 { 2564 .cmd = MACSEC_CMD_ADD_TXSA, 2565 .doit = macsec_add_txsa, 2566 .policy = macsec_genl_policy, 2567 .flags = GENL_ADMIN_PERM, 2568 }, 2569 { 2570 .cmd = MACSEC_CMD_DEL_TXSA, 2571 .doit = macsec_del_txsa, 2572 .policy = macsec_genl_policy, 2573 .flags = GENL_ADMIN_PERM, 2574 }, 2575 { 2576 .cmd = MACSEC_CMD_UPD_TXSA, 2577 .doit = macsec_upd_txsa, 2578 .policy = macsec_genl_policy, 2579 .flags = GENL_ADMIN_PERM, 2580 }, 2581 { 2582 .cmd = MACSEC_CMD_ADD_RXSA, 2583 .doit = macsec_add_rxsa, 2584 .policy = macsec_genl_policy, 2585 .flags = GENL_ADMIN_PERM, 2586 }, 2587 { 2588 .cmd = MACSEC_CMD_DEL_RXSA, 2589 .doit = macsec_del_rxsa, 2590 .policy = macsec_genl_policy, 2591 .flags = GENL_ADMIN_PERM, 2592 }, 2593 { 2594 .cmd = MACSEC_CMD_UPD_RXSA, 2595 .doit = macsec_upd_rxsa, 2596 .policy = macsec_genl_policy, 2597 .flags = GENL_ADMIN_PERM, 2598 }, 2599 }; 2600 2601 static netdev_tx_t macsec_start_xmit(struct sk_buff *skb, 2602 struct net_device *dev) 2603 { 2604 struct macsec_dev *macsec = netdev_priv(dev); 2605 struct macsec_secy *secy = &macsec->secy; 2606 struct pcpu_secy_stats *secy_stats; 2607 int ret, len; 2608 2609 /* 10.5 */ 2610 if (!secy->protect_frames) { 2611 secy_stats = this_cpu_ptr(macsec->stats); 2612 u64_stats_update_begin(&secy_stats->syncp); 2613 secy_stats->stats.OutPktsUntagged++; 2614 u64_stats_update_end(&secy_stats->syncp); 2615 len = skb->len; 2616 ret = dev_queue_xmit(skb); 2617 count_tx(dev, ret, len); 2618 return ret; 2619 } 2620 2621 if (!secy->operational) { 2622 kfree_skb(skb); 2623 dev->stats.tx_dropped++; 2624 return NETDEV_TX_OK; 2625 } 2626 2627 skb = macsec_encrypt(skb, dev); 2628 if (IS_ERR(skb)) { 2629 if (PTR_ERR(skb) != -EINPROGRESS) 2630 dev->stats.tx_dropped++; 2631 return NETDEV_TX_OK; 2632 } 2633 2634 macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa); 2635 2636 macsec_encrypt_finish(skb, dev); 2637 len = skb->len; 2638 ret = dev_queue_xmit(skb); 2639 count_tx(dev, ret, len); 2640 return ret; 2641 } 2642 2643 #define MACSEC_FEATURES \ 2644 (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST) 2645 static int macsec_dev_init(struct net_device *dev) 2646 { 2647 struct macsec_dev *macsec = macsec_priv(dev); 2648 struct net_device *real_dev = macsec->real_dev; 2649 2650 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 2651 if (!dev->tstats) 2652 return -ENOMEM; 2653 2654 dev->features = real_dev->features & MACSEC_FEATURES; 2655 dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE; 2656 2657 dev->needed_headroom = real_dev->needed_headroom + 2658 MACSEC_NEEDED_HEADROOM; 2659 dev->needed_tailroom = real_dev->needed_tailroom + 2660 MACSEC_NEEDED_TAILROOM; 2661 2662 if (is_zero_ether_addr(dev->dev_addr)) 2663 eth_hw_addr_inherit(dev, real_dev); 2664 if (is_zero_ether_addr(dev->broadcast)) 2665 memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len); 2666 2667 return 0; 2668 } 2669 2670 static void macsec_dev_uninit(struct net_device *dev) 2671 { 2672 free_percpu(dev->tstats); 2673 } 2674 2675 static netdev_features_t macsec_fix_features(struct net_device *dev, 2676 netdev_features_t features) 2677 { 2678 struct macsec_dev *macsec = macsec_priv(dev); 2679 struct net_device *real_dev = macsec->real_dev; 2680 2681 features &= real_dev->features & MACSEC_FEATURES; 2682 features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE; 2683 2684 return features; 2685 } 2686 2687 static int macsec_dev_open(struct net_device *dev) 2688 { 2689 struct macsec_dev *macsec = macsec_priv(dev); 2690 struct net_device *real_dev = macsec->real_dev; 2691 int err; 2692 2693 if (!(real_dev->flags & IFF_UP)) 2694 return -ENETDOWN; 2695 2696 err = dev_uc_add(real_dev, dev->dev_addr); 2697 if (err < 0) 2698 return err; 2699 2700 if (dev->flags & IFF_ALLMULTI) { 2701 err = dev_set_allmulti(real_dev, 1); 2702 if (err < 0) 2703 goto del_unicast; 2704 } 2705 2706 if (dev->flags & IFF_PROMISC) { 2707 err = dev_set_promiscuity(real_dev, 1); 2708 if (err < 0) 2709 goto clear_allmulti; 2710 } 2711 2712 if (netif_carrier_ok(real_dev)) 2713 netif_carrier_on(dev); 2714 2715 return 0; 2716 clear_allmulti: 2717 if (dev->flags & IFF_ALLMULTI) 2718 dev_set_allmulti(real_dev, -1); 2719 del_unicast: 2720 dev_uc_del(real_dev, dev->dev_addr); 2721 netif_carrier_off(dev); 2722 return err; 2723 } 2724 2725 static int macsec_dev_stop(struct net_device *dev) 2726 { 2727 struct macsec_dev *macsec = macsec_priv(dev); 2728 struct net_device *real_dev = macsec->real_dev; 2729 2730 netif_carrier_off(dev); 2731 2732 dev_mc_unsync(real_dev, dev); 2733 dev_uc_unsync(real_dev, dev); 2734 2735 if (dev->flags & IFF_ALLMULTI) 2736 dev_set_allmulti(real_dev, -1); 2737 2738 if (dev->flags & IFF_PROMISC) 2739 dev_set_promiscuity(real_dev, -1); 2740 2741 dev_uc_del(real_dev, dev->dev_addr); 2742 2743 return 0; 2744 } 2745 2746 static void macsec_dev_change_rx_flags(struct net_device *dev, int change) 2747 { 2748 struct net_device *real_dev = macsec_priv(dev)->real_dev; 2749 2750 if (!(dev->flags & IFF_UP)) 2751 return; 2752 2753 if (change & IFF_ALLMULTI) 2754 dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1); 2755 2756 if (change & IFF_PROMISC) 2757 dev_set_promiscuity(real_dev, 2758 dev->flags & IFF_PROMISC ? 1 : -1); 2759 } 2760 2761 static void macsec_dev_set_rx_mode(struct net_device *dev) 2762 { 2763 struct net_device *real_dev = macsec_priv(dev)->real_dev; 2764 2765 dev_mc_sync(real_dev, dev); 2766 dev_uc_sync(real_dev, dev); 2767 } 2768 2769 static int macsec_set_mac_address(struct net_device *dev, void *p) 2770 { 2771 struct macsec_dev *macsec = macsec_priv(dev); 2772 struct net_device *real_dev = macsec->real_dev; 2773 struct sockaddr *addr = p; 2774 int err; 2775 2776 if (!is_valid_ether_addr(addr->sa_data)) 2777 return -EADDRNOTAVAIL; 2778 2779 if (!(dev->flags & IFF_UP)) 2780 goto out; 2781 2782 err = dev_uc_add(real_dev, addr->sa_data); 2783 if (err < 0) 2784 return err; 2785 2786 dev_uc_del(real_dev, dev->dev_addr); 2787 2788 out: 2789 ether_addr_copy(dev->dev_addr, addr->sa_data); 2790 return 0; 2791 } 2792 2793 static int macsec_change_mtu(struct net_device *dev, int new_mtu) 2794 { 2795 struct macsec_dev *macsec = macsec_priv(dev); 2796 unsigned int extra = macsec->secy.icv_len + macsec_extra_len(true); 2797 2798 if (macsec->real_dev->mtu - extra < new_mtu) 2799 return -ERANGE; 2800 2801 dev->mtu = new_mtu; 2802 2803 return 0; 2804 } 2805 2806 static struct rtnl_link_stats64 *macsec_get_stats64(struct net_device *dev, 2807 struct rtnl_link_stats64 *s) 2808 { 2809 int cpu; 2810 2811 if (!dev->tstats) 2812 return s; 2813 2814 for_each_possible_cpu(cpu) { 2815 struct pcpu_sw_netstats *stats; 2816 struct pcpu_sw_netstats tmp; 2817 int start; 2818 2819 stats = per_cpu_ptr(dev->tstats, cpu); 2820 do { 2821 start = u64_stats_fetch_begin_irq(&stats->syncp); 2822 tmp.rx_packets = stats->rx_packets; 2823 tmp.rx_bytes = stats->rx_bytes; 2824 tmp.tx_packets = stats->tx_packets; 2825 tmp.tx_bytes = stats->tx_bytes; 2826 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 2827 2828 s->rx_packets += tmp.rx_packets; 2829 s->rx_bytes += tmp.rx_bytes; 2830 s->tx_packets += tmp.tx_packets; 2831 s->tx_bytes += tmp.tx_bytes; 2832 } 2833 2834 s->rx_dropped = dev->stats.rx_dropped; 2835 s->tx_dropped = dev->stats.tx_dropped; 2836 2837 return s; 2838 } 2839 2840 static int macsec_get_iflink(const struct net_device *dev) 2841 { 2842 return macsec_priv(dev)->real_dev->ifindex; 2843 } 2844 2845 static const struct net_device_ops macsec_netdev_ops = { 2846 .ndo_init = macsec_dev_init, 2847 .ndo_uninit = macsec_dev_uninit, 2848 .ndo_open = macsec_dev_open, 2849 .ndo_stop = macsec_dev_stop, 2850 .ndo_fix_features = macsec_fix_features, 2851 .ndo_change_mtu = macsec_change_mtu, 2852 .ndo_set_rx_mode = macsec_dev_set_rx_mode, 2853 .ndo_change_rx_flags = macsec_dev_change_rx_flags, 2854 .ndo_set_mac_address = macsec_set_mac_address, 2855 .ndo_start_xmit = macsec_start_xmit, 2856 .ndo_get_stats64 = macsec_get_stats64, 2857 .ndo_get_iflink = macsec_get_iflink, 2858 }; 2859 2860 static const struct device_type macsec_type = { 2861 .name = "macsec", 2862 }; 2863 2864 static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = { 2865 [IFLA_MACSEC_SCI] = { .type = NLA_U64 }, 2866 [IFLA_MACSEC_ICV_LEN] = { .type = NLA_U8 }, 2867 [IFLA_MACSEC_CIPHER_SUITE] = { .type = NLA_U64 }, 2868 [IFLA_MACSEC_WINDOW] = { .type = NLA_U32 }, 2869 [IFLA_MACSEC_ENCODING_SA] = { .type = NLA_U8 }, 2870 [IFLA_MACSEC_ENCRYPT] = { .type = NLA_U8 }, 2871 [IFLA_MACSEC_PROTECT] = { .type = NLA_U8 }, 2872 [IFLA_MACSEC_INC_SCI] = { .type = NLA_U8 }, 2873 [IFLA_MACSEC_ES] = { .type = NLA_U8 }, 2874 [IFLA_MACSEC_SCB] = { .type = NLA_U8 }, 2875 [IFLA_MACSEC_REPLAY_PROTECT] = { .type = NLA_U8 }, 2876 [IFLA_MACSEC_VALIDATION] = { .type = NLA_U8 }, 2877 }; 2878 2879 static void macsec_free_netdev(struct net_device *dev) 2880 { 2881 struct macsec_dev *macsec = macsec_priv(dev); 2882 struct net_device *real_dev = macsec->real_dev; 2883 2884 free_percpu(macsec->stats); 2885 free_percpu(macsec->secy.tx_sc.stats); 2886 2887 dev_put(real_dev); 2888 free_netdev(dev); 2889 } 2890 2891 static void macsec_setup(struct net_device *dev) 2892 { 2893 ether_setup(dev); 2894 dev->priv_flags |= IFF_NO_QUEUE; 2895 dev->netdev_ops = &macsec_netdev_ops; 2896 dev->destructor = macsec_free_netdev; 2897 2898 eth_zero_addr(dev->broadcast); 2899 } 2900 2901 static void macsec_changelink_common(struct net_device *dev, 2902 struct nlattr *data[]) 2903 { 2904 struct macsec_secy *secy; 2905 struct macsec_tx_sc *tx_sc; 2906 2907 secy = &macsec_priv(dev)->secy; 2908 tx_sc = &secy->tx_sc; 2909 2910 if (data[IFLA_MACSEC_ENCODING_SA]) { 2911 struct macsec_tx_sa *tx_sa; 2912 2913 tx_sc->encoding_sa = nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]); 2914 tx_sa = rtnl_dereference(tx_sc->sa[tx_sc->encoding_sa]); 2915 2916 secy->operational = tx_sa && tx_sa->active; 2917 } 2918 2919 if (data[IFLA_MACSEC_WINDOW]) 2920 secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]); 2921 2922 if (data[IFLA_MACSEC_ENCRYPT]) 2923 tx_sc->encrypt = !!nla_get_u8(data[IFLA_MACSEC_ENCRYPT]); 2924 2925 if (data[IFLA_MACSEC_PROTECT]) 2926 secy->protect_frames = !!nla_get_u8(data[IFLA_MACSEC_PROTECT]); 2927 2928 if (data[IFLA_MACSEC_INC_SCI]) 2929 tx_sc->send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]); 2930 2931 if (data[IFLA_MACSEC_ES]) 2932 tx_sc->end_station = !!nla_get_u8(data[IFLA_MACSEC_ES]); 2933 2934 if (data[IFLA_MACSEC_SCB]) 2935 tx_sc->scb = !!nla_get_u8(data[IFLA_MACSEC_SCB]); 2936 2937 if (data[IFLA_MACSEC_REPLAY_PROTECT]) 2938 secy->replay_protect = !!nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT]); 2939 2940 if (data[IFLA_MACSEC_VALIDATION]) 2941 secy->validate_frames = nla_get_u8(data[IFLA_MACSEC_VALIDATION]); 2942 } 2943 2944 static int macsec_changelink(struct net_device *dev, struct nlattr *tb[], 2945 struct nlattr *data[]) 2946 { 2947 if (!data) 2948 return 0; 2949 2950 if (data[IFLA_MACSEC_CIPHER_SUITE] || 2951 data[IFLA_MACSEC_ICV_LEN] || 2952 data[IFLA_MACSEC_SCI] || 2953 data[IFLA_MACSEC_PORT]) 2954 return -EINVAL; 2955 2956 macsec_changelink_common(dev, data); 2957 2958 return 0; 2959 } 2960 2961 static void macsec_del_dev(struct macsec_dev *macsec) 2962 { 2963 int i; 2964 2965 while (macsec->secy.rx_sc) { 2966 struct macsec_rx_sc *rx_sc = rtnl_dereference(macsec->secy.rx_sc); 2967 2968 rcu_assign_pointer(macsec->secy.rx_sc, rx_sc->next); 2969 free_rx_sc(rx_sc); 2970 } 2971 2972 for (i = 0; i < MACSEC_NUM_AN; i++) { 2973 struct macsec_tx_sa *sa = rtnl_dereference(macsec->secy.tx_sc.sa[i]); 2974 2975 if (sa) { 2976 RCU_INIT_POINTER(macsec->secy.tx_sc.sa[i], NULL); 2977 clear_tx_sa(sa); 2978 } 2979 } 2980 } 2981 2982 static void macsec_dellink(struct net_device *dev, struct list_head *head) 2983 { 2984 struct macsec_dev *macsec = macsec_priv(dev); 2985 struct net_device *real_dev = macsec->real_dev; 2986 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); 2987 2988 macsec_generation++; 2989 2990 unregister_netdevice_queue(dev, head); 2991 list_del_rcu(&macsec->secys); 2992 if (list_empty(&rxd->secys)) { 2993 netdev_rx_handler_unregister(real_dev); 2994 kfree(rxd); 2995 } 2996 2997 macsec_del_dev(macsec); 2998 } 2999 3000 static int register_macsec_dev(struct net_device *real_dev, 3001 struct net_device *dev) 3002 { 3003 struct macsec_dev *macsec = macsec_priv(dev); 3004 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); 3005 3006 if (!rxd) { 3007 int err; 3008 3009 rxd = kmalloc(sizeof(*rxd), GFP_KERNEL); 3010 if (!rxd) 3011 return -ENOMEM; 3012 3013 INIT_LIST_HEAD(&rxd->secys); 3014 3015 err = netdev_rx_handler_register(real_dev, macsec_handle_frame, 3016 rxd); 3017 if (err < 0) { 3018 kfree(rxd); 3019 return err; 3020 } 3021 } 3022 3023 list_add_tail_rcu(&macsec->secys, &rxd->secys); 3024 return 0; 3025 } 3026 3027 static bool sci_exists(struct net_device *dev, sci_t sci) 3028 { 3029 struct macsec_rxh_data *rxd = macsec_data_rtnl(dev); 3030 struct macsec_dev *macsec; 3031 3032 list_for_each_entry(macsec, &rxd->secys, secys) { 3033 if (macsec->secy.sci == sci) 3034 return true; 3035 } 3036 3037 return false; 3038 } 3039 3040 static sci_t dev_to_sci(struct net_device *dev, __be16 port) 3041 { 3042 return make_sci(dev->dev_addr, port); 3043 } 3044 3045 static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len) 3046 { 3047 struct macsec_dev *macsec = macsec_priv(dev); 3048 struct macsec_secy *secy = &macsec->secy; 3049 3050 macsec->stats = netdev_alloc_pcpu_stats(struct pcpu_secy_stats); 3051 if (!macsec->stats) 3052 return -ENOMEM; 3053 3054 secy->tx_sc.stats = netdev_alloc_pcpu_stats(struct pcpu_tx_sc_stats); 3055 if (!secy->tx_sc.stats) { 3056 free_percpu(macsec->stats); 3057 return -ENOMEM; 3058 } 3059 3060 if (sci == MACSEC_UNDEF_SCI) 3061 sci = dev_to_sci(dev, MACSEC_PORT_ES); 3062 3063 secy->netdev = dev; 3064 secy->operational = true; 3065 secy->key_len = DEFAULT_SAK_LEN; 3066 secy->icv_len = icv_len; 3067 secy->validate_frames = MACSEC_VALIDATE_DEFAULT; 3068 secy->protect_frames = true; 3069 secy->replay_protect = false; 3070 3071 secy->sci = sci; 3072 secy->tx_sc.active = true; 3073 secy->tx_sc.encoding_sa = DEFAULT_ENCODING_SA; 3074 secy->tx_sc.encrypt = DEFAULT_ENCRYPT; 3075 secy->tx_sc.send_sci = DEFAULT_SEND_SCI; 3076 secy->tx_sc.end_station = false; 3077 secy->tx_sc.scb = false; 3078 3079 return 0; 3080 } 3081 3082 static int macsec_newlink(struct net *net, struct net_device *dev, 3083 struct nlattr *tb[], struct nlattr *data[]) 3084 { 3085 struct macsec_dev *macsec = macsec_priv(dev); 3086 struct net_device *real_dev; 3087 int err; 3088 sci_t sci; 3089 u8 icv_len = DEFAULT_ICV_LEN; 3090 rx_handler_func_t *rx_handler; 3091 3092 if (!tb[IFLA_LINK]) 3093 return -EINVAL; 3094 real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK])); 3095 if (!real_dev) 3096 return -ENODEV; 3097 3098 dev->priv_flags |= IFF_MACSEC; 3099 3100 macsec->real_dev = real_dev; 3101 3102 if (data && data[IFLA_MACSEC_ICV_LEN]) 3103 icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]); 3104 dev->mtu = real_dev->mtu - icv_len - macsec_extra_len(true); 3105 3106 rx_handler = rtnl_dereference(real_dev->rx_handler); 3107 if (rx_handler && rx_handler != macsec_handle_frame) 3108 return -EBUSY; 3109 3110 err = register_netdevice(dev); 3111 if (err < 0) 3112 return err; 3113 3114 /* need to be already registered so that ->init has run and 3115 * the MAC addr is set 3116 */ 3117 if (data && data[IFLA_MACSEC_SCI]) 3118 sci = nla_get_sci(data[IFLA_MACSEC_SCI]); 3119 else if (data && data[IFLA_MACSEC_PORT]) 3120 sci = dev_to_sci(dev, nla_get_be16(data[IFLA_MACSEC_PORT])); 3121 else 3122 sci = dev_to_sci(dev, MACSEC_PORT_ES); 3123 3124 if (rx_handler && sci_exists(real_dev, sci)) { 3125 err = -EBUSY; 3126 goto unregister; 3127 } 3128 3129 err = macsec_add_dev(dev, sci, icv_len); 3130 if (err) 3131 goto unregister; 3132 3133 if (data) 3134 macsec_changelink_common(dev, data); 3135 3136 err = register_macsec_dev(real_dev, dev); 3137 if (err < 0) 3138 goto del_dev; 3139 3140 macsec_generation++; 3141 3142 dev_hold(real_dev); 3143 3144 return 0; 3145 3146 del_dev: 3147 macsec_del_dev(macsec); 3148 unregister: 3149 unregister_netdevice(dev); 3150 return err; 3151 } 3152 3153 static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[]) 3154 { 3155 u64 csid = MACSEC_DEFAULT_CIPHER_ID; 3156 u8 icv_len = DEFAULT_ICV_LEN; 3157 int flag; 3158 bool es, scb, sci; 3159 3160 if (!data) 3161 return 0; 3162 3163 if (data[IFLA_MACSEC_CIPHER_SUITE]) 3164 csid = nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE]); 3165 3166 if (data[IFLA_MACSEC_ICV_LEN]) 3167 icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]); 3168 3169 switch (csid) { 3170 case MACSEC_DEFAULT_CIPHER_ID: 3171 case MACSEC_DEFAULT_CIPHER_ALT: 3172 if (icv_len < MACSEC_MIN_ICV_LEN || 3173 icv_len > MACSEC_MAX_ICV_LEN) 3174 return -EINVAL; 3175 break; 3176 default: 3177 return -EINVAL; 3178 } 3179 3180 if (data[IFLA_MACSEC_ENCODING_SA]) { 3181 if (nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]) >= MACSEC_NUM_AN) 3182 return -EINVAL; 3183 } 3184 3185 for (flag = IFLA_MACSEC_ENCODING_SA + 1; 3186 flag < IFLA_MACSEC_VALIDATION; 3187 flag++) { 3188 if (data[flag]) { 3189 if (nla_get_u8(data[flag]) > 1) 3190 return -EINVAL; 3191 } 3192 } 3193 3194 es = data[IFLA_MACSEC_ES] ? nla_get_u8(data[IFLA_MACSEC_ES]) : false; 3195 sci = data[IFLA_MACSEC_INC_SCI] ? nla_get_u8(data[IFLA_MACSEC_INC_SCI]) : false; 3196 scb = data[IFLA_MACSEC_SCB] ? nla_get_u8(data[IFLA_MACSEC_SCB]) : false; 3197 3198 if ((sci && (scb || es)) || (scb && es)) 3199 return -EINVAL; 3200 3201 if (data[IFLA_MACSEC_VALIDATION] && 3202 nla_get_u8(data[IFLA_MACSEC_VALIDATION]) > MACSEC_VALIDATE_MAX) 3203 return -EINVAL; 3204 3205 if ((data[IFLA_MACSEC_REPLAY_PROTECT] && 3206 nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT])) && 3207 !data[IFLA_MACSEC_WINDOW]) 3208 return -EINVAL; 3209 3210 return 0; 3211 } 3212 3213 static struct net *macsec_get_link_net(const struct net_device *dev) 3214 { 3215 return dev_net(macsec_priv(dev)->real_dev); 3216 } 3217 3218 static size_t macsec_get_size(const struct net_device *dev) 3219 { 3220 return 0 + 3221 nla_total_size_64bit(8) + /* SCI */ 3222 nla_total_size(1) + /* ICV_LEN */ 3223 nla_total_size_64bit(8) + /* CIPHER_SUITE */ 3224 nla_total_size(4) + /* WINDOW */ 3225 nla_total_size(1) + /* ENCODING_SA */ 3226 nla_total_size(1) + /* ENCRYPT */ 3227 nla_total_size(1) + /* PROTECT */ 3228 nla_total_size(1) + /* INC_SCI */ 3229 nla_total_size(1) + /* ES */ 3230 nla_total_size(1) + /* SCB */ 3231 nla_total_size(1) + /* REPLAY_PROTECT */ 3232 nla_total_size(1) + /* VALIDATION */ 3233 0; 3234 } 3235 3236 static int macsec_fill_info(struct sk_buff *skb, 3237 const struct net_device *dev) 3238 { 3239 struct macsec_secy *secy = &macsec_priv(dev)->secy; 3240 struct macsec_tx_sc *tx_sc = &secy->tx_sc; 3241 3242 if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci, 3243 IFLA_MACSEC_PAD) || 3244 nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) || 3245 nla_put_u64_64bit(skb, IFLA_MACSEC_CIPHER_SUITE, 3246 MACSEC_DEFAULT_CIPHER_ID, IFLA_MACSEC_PAD) || 3247 nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) || 3248 nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) || 3249 nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) || 3250 nla_put_u8(skb, IFLA_MACSEC_INC_SCI, tx_sc->send_sci) || 3251 nla_put_u8(skb, IFLA_MACSEC_ES, tx_sc->end_station) || 3252 nla_put_u8(skb, IFLA_MACSEC_SCB, tx_sc->scb) || 3253 nla_put_u8(skb, IFLA_MACSEC_REPLAY_PROTECT, secy->replay_protect) || 3254 nla_put_u8(skb, IFLA_MACSEC_VALIDATION, secy->validate_frames) || 3255 0) 3256 goto nla_put_failure; 3257 3258 if (secy->replay_protect) { 3259 if (nla_put_u32(skb, IFLA_MACSEC_WINDOW, secy->replay_window)) 3260 goto nla_put_failure; 3261 } 3262 3263 return 0; 3264 3265 nla_put_failure: 3266 return -EMSGSIZE; 3267 } 3268 3269 static struct rtnl_link_ops macsec_link_ops __read_mostly = { 3270 .kind = "macsec", 3271 .priv_size = sizeof(struct macsec_dev), 3272 .maxtype = IFLA_MACSEC_MAX, 3273 .policy = macsec_rtnl_policy, 3274 .setup = macsec_setup, 3275 .validate = macsec_validate_attr, 3276 .newlink = macsec_newlink, 3277 .changelink = macsec_changelink, 3278 .dellink = macsec_dellink, 3279 .get_size = macsec_get_size, 3280 .fill_info = macsec_fill_info, 3281 .get_link_net = macsec_get_link_net, 3282 }; 3283 3284 static bool is_macsec_master(struct net_device *dev) 3285 { 3286 return rcu_access_pointer(dev->rx_handler) == macsec_handle_frame; 3287 } 3288 3289 static int macsec_notify(struct notifier_block *this, unsigned long event, 3290 void *ptr) 3291 { 3292 struct net_device *real_dev = netdev_notifier_info_to_dev(ptr); 3293 LIST_HEAD(head); 3294 3295 if (!is_macsec_master(real_dev)) 3296 return NOTIFY_DONE; 3297 3298 switch (event) { 3299 case NETDEV_UNREGISTER: { 3300 struct macsec_dev *m, *n; 3301 struct macsec_rxh_data *rxd; 3302 3303 rxd = macsec_data_rtnl(real_dev); 3304 list_for_each_entry_safe(m, n, &rxd->secys, secys) { 3305 macsec_dellink(m->secy.netdev, &head); 3306 } 3307 unregister_netdevice_many(&head); 3308 break; 3309 } 3310 case NETDEV_CHANGEMTU: { 3311 struct macsec_dev *m; 3312 struct macsec_rxh_data *rxd; 3313 3314 rxd = macsec_data_rtnl(real_dev); 3315 list_for_each_entry(m, &rxd->secys, secys) { 3316 struct net_device *dev = m->secy.netdev; 3317 unsigned int mtu = real_dev->mtu - (m->secy.icv_len + 3318 macsec_extra_len(true)); 3319 3320 if (dev->mtu > mtu) 3321 dev_set_mtu(dev, mtu); 3322 } 3323 } 3324 } 3325 3326 return NOTIFY_OK; 3327 } 3328 3329 static struct notifier_block macsec_notifier = { 3330 .notifier_call = macsec_notify, 3331 }; 3332 3333 static int __init macsec_init(void) 3334 { 3335 int err; 3336 3337 pr_info("MACsec IEEE 802.1AE\n"); 3338 err = register_netdevice_notifier(&macsec_notifier); 3339 if (err) 3340 return err; 3341 3342 err = rtnl_link_register(&macsec_link_ops); 3343 if (err) 3344 goto notifier; 3345 3346 err = genl_register_family_with_ops(&macsec_fam, macsec_genl_ops); 3347 if (err) 3348 goto rtnl; 3349 3350 return 0; 3351 3352 rtnl: 3353 rtnl_link_unregister(&macsec_link_ops); 3354 notifier: 3355 unregister_netdevice_notifier(&macsec_notifier); 3356 return err; 3357 } 3358 3359 static void __exit macsec_exit(void) 3360 { 3361 genl_unregister_family(&macsec_fam); 3362 rtnl_link_unregister(&macsec_link_ops); 3363 unregister_netdevice_notifier(&macsec_notifier); 3364 } 3365 3366 module_init(macsec_init); 3367 module_exit(macsec_exit); 3368 3369 MODULE_ALIAS_RTNL_LINK("macsec"); 3370 3371 MODULE_DESCRIPTION("MACsec IEEE 802.1AE"); 3372 MODULE_LICENSE("GPL v2"); 3373