1 /* 2 * drivers/net/macsec.c - MACsec device 3 * 4 * Copyright (c) 2015 Sabrina Dubroca <sd@queasysnail.net> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 */ 11 12 #include <linux/types.h> 13 #include <linux/skbuff.h> 14 #include <linux/socket.h> 15 #include <linux/module.h> 16 #include <crypto/aead.h> 17 #include <linux/etherdevice.h> 18 #include <linux/rtnetlink.h> 19 #include <net/genetlink.h> 20 #include <net/sock.h> 21 #include <net/gro_cells.h> 22 23 #include <uapi/linux/if_macsec.h> 24 25 typedef u64 __bitwise sci_t; 26 27 #define MACSEC_SCI_LEN 8 28 29 /* SecTAG length = macsec_eth_header without the optional SCI */ 30 #define MACSEC_TAG_LEN 6 31 32 struct macsec_eth_header { 33 struct ethhdr eth; 34 /* SecTAG */ 35 u8 tci_an; 36 #if defined(__LITTLE_ENDIAN_BITFIELD) 37 u8 short_length:6, 38 unused:2; 39 #elif defined(__BIG_ENDIAN_BITFIELD) 40 u8 unused:2, 41 short_length:6; 42 #else 43 #error "Please fix <asm/byteorder.h>" 44 #endif 45 __be32 packet_number; 46 u8 secure_channel_id[8]; /* optional */ 47 } __packed; 48 49 #define MACSEC_TCI_VERSION 0x80 50 #define MACSEC_TCI_ES 0x40 /* end station */ 51 #define MACSEC_TCI_SC 0x20 /* SCI present */ 52 #define MACSEC_TCI_SCB 0x10 /* epon */ 53 #define MACSEC_TCI_E 0x08 /* encryption */ 54 #define MACSEC_TCI_C 0x04 /* changed text */ 55 #define MACSEC_AN_MASK 0x03 /* association number */ 56 #define MACSEC_TCI_CONFID (MACSEC_TCI_E | MACSEC_TCI_C) 57 58 /* minimum secure data length deemed "not short", see IEEE 802.1AE-2006 9.7 */ 59 #define MIN_NON_SHORT_LEN 48 60 61 #define GCM_AES_IV_LEN 12 62 #define DEFAULT_ICV_LEN 16 63 64 #define MACSEC_NUM_AN 4 /* 2 bits for the association number */ 65 66 #define for_each_rxsc(secy, sc) \ 67 for (sc = rcu_dereference_bh(secy->rx_sc); \ 68 sc; \ 69 sc = rcu_dereference_bh(sc->next)) 70 #define for_each_rxsc_rtnl(secy, sc) \ 71 for (sc = rtnl_dereference(secy->rx_sc); \ 72 sc; \ 73 sc = rtnl_dereference(sc->next)) 74 75 struct gcm_iv { 76 union { 77 u8 secure_channel_id[8]; 78 sci_t sci; 79 }; 80 __be32 pn; 81 }; 82 83 /** 84 * struct macsec_key - SA key 85 * @id: user-provided key identifier 86 * @tfm: crypto struct, key storage 87 */ 88 struct macsec_key { 89 u8 id[MACSEC_KEYID_LEN]; 90 struct crypto_aead *tfm; 91 }; 92 93 struct macsec_rx_sc_stats { 94 __u64 InOctetsValidated; 95 __u64 InOctetsDecrypted; 96 __u64 InPktsUnchecked; 97 __u64 InPktsDelayed; 98 __u64 InPktsOK; 99 __u64 InPktsInvalid; 100 __u64 InPktsLate; 101 __u64 InPktsNotValid; 102 __u64 InPktsNotUsingSA; 103 __u64 InPktsUnusedSA; 104 }; 105 106 struct macsec_rx_sa_stats { 107 __u32 InPktsOK; 108 __u32 InPktsInvalid; 109 __u32 InPktsNotValid; 110 __u32 InPktsNotUsingSA; 111 __u32 InPktsUnusedSA; 112 }; 113 114 struct macsec_tx_sa_stats { 115 __u32 OutPktsProtected; 116 __u32 OutPktsEncrypted; 117 }; 118 119 struct macsec_tx_sc_stats { 120 __u64 OutPktsProtected; 121 __u64 OutPktsEncrypted; 122 __u64 OutOctetsProtected; 123 __u64 OutOctetsEncrypted; 124 }; 125 126 struct macsec_dev_stats { 127 __u64 OutPktsUntagged; 128 __u64 InPktsUntagged; 129 __u64 OutPktsTooLong; 130 __u64 InPktsNoTag; 131 __u64 InPktsBadTag; 132 __u64 InPktsUnknownSCI; 133 __u64 InPktsNoSCI; 134 __u64 InPktsOverrun; 135 }; 136 137 /** 138 * struct macsec_rx_sa - receive secure association 139 * @active: 140 * @next_pn: packet number expected for the next packet 141 * @lock: protects next_pn manipulations 142 * @key: key structure 143 * @stats: per-SA stats 144 */ 145 struct macsec_rx_sa { 146 struct macsec_key key; 147 spinlock_t lock; 148 u32 next_pn; 149 atomic_t refcnt; 150 bool active; 151 struct macsec_rx_sa_stats __percpu *stats; 152 struct macsec_rx_sc *sc; 153 struct rcu_head rcu; 154 }; 155 156 struct pcpu_rx_sc_stats { 157 struct macsec_rx_sc_stats stats; 158 struct u64_stats_sync syncp; 159 }; 160 161 /** 162 * struct macsec_rx_sc - receive secure channel 163 * @sci: secure channel identifier for this SC 164 * @active: channel is active 165 * @sa: array of secure associations 166 * @stats: per-SC stats 167 */ 168 struct macsec_rx_sc { 169 struct macsec_rx_sc __rcu *next; 170 sci_t sci; 171 bool active; 172 struct macsec_rx_sa __rcu *sa[MACSEC_NUM_AN]; 173 struct pcpu_rx_sc_stats __percpu *stats; 174 atomic_t refcnt; 175 struct rcu_head rcu_head; 176 }; 177 178 /** 179 * struct macsec_tx_sa - transmit secure association 180 * @active: 181 * @next_pn: packet number to use for the next packet 182 * @lock: protects next_pn manipulations 183 * @key: key structure 184 * @stats: per-SA stats 185 */ 186 struct macsec_tx_sa { 187 struct macsec_key key; 188 spinlock_t lock; 189 u32 next_pn; 190 atomic_t refcnt; 191 bool active; 192 struct macsec_tx_sa_stats __percpu *stats; 193 struct rcu_head rcu; 194 }; 195 196 struct pcpu_tx_sc_stats { 197 struct macsec_tx_sc_stats stats; 198 struct u64_stats_sync syncp; 199 }; 200 201 /** 202 * struct macsec_tx_sc - transmit secure channel 203 * @active: 204 * @encoding_sa: association number of the SA currently in use 205 * @encrypt: encrypt packets on transmit, or authenticate only 206 * @send_sci: always include the SCI in the SecTAG 207 * @end_station: 208 * @scb: single copy broadcast flag 209 * @sa: array of secure associations 210 * @stats: stats for this TXSC 211 */ 212 struct macsec_tx_sc { 213 bool active; 214 u8 encoding_sa; 215 bool encrypt; 216 bool send_sci; 217 bool end_station; 218 bool scb; 219 struct macsec_tx_sa __rcu *sa[MACSEC_NUM_AN]; 220 struct pcpu_tx_sc_stats __percpu *stats; 221 }; 222 223 #define MACSEC_VALIDATE_DEFAULT MACSEC_VALIDATE_STRICT 224 225 /** 226 * struct macsec_secy - MACsec Security Entity 227 * @netdev: netdevice for this SecY 228 * @n_rx_sc: number of receive secure channels configured on this SecY 229 * @sci: secure channel identifier used for tx 230 * @key_len: length of keys used by the cipher suite 231 * @icv_len: length of ICV used by the cipher suite 232 * @validate_frames: validation mode 233 * @operational: MAC_Operational flag 234 * @protect_frames: enable protection for this SecY 235 * @replay_protect: enable packet number checks on receive 236 * @replay_window: size of the replay window 237 * @tx_sc: transmit secure channel 238 * @rx_sc: linked list of receive secure channels 239 */ 240 struct macsec_secy { 241 struct net_device *netdev; 242 unsigned int n_rx_sc; 243 sci_t sci; 244 u16 key_len; 245 u16 icv_len; 246 enum macsec_validation_type validate_frames; 247 bool operational; 248 bool protect_frames; 249 bool replay_protect; 250 u32 replay_window; 251 struct macsec_tx_sc tx_sc; 252 struct macsec_rx_sc __rcu *rx_sc; 253 }; 254 255 struct pcpu_secy_stats { 256 struct macsec_dev_stats stats; 257 struct u64_stats_sync syncp; 258 }; 259 260 /** 261 * struct macsec_dev - private data 262 * @secy: SecY config 263 * @real_dev: pointer to underlying netdevice 264 * @stats: MACsec device stats 265 * @secys: linked list of SecY's on the underlying device 266 */ 267 struct macsec_dev { 268 struct macsec_secy secy; 269 struct net_device *real_dev; 270 struct pcpu_secy_stats __percpu *stats; 271 struct list_head secys; 272 struct gro_cells gro_cells; 273 unsigned int nest_level; 274 }; 275 276 /** 277 * struct macsec_rxh_data - rx_handler private argument 278 * @secys: linked list of SecY's on this underlying device 279 */ 280 struct macsec_rxh_data { 281 struct list_head secys; 282 }; 283 284 static struct macsec_dev *macsec_priv(const struct net_device *dev) 285 { 286 return (struct macsec_dev *)netdev_priv(dev); 287 } 288 289 static struct macsec_rxh_data *macsec_data_rcu(const struct net_device *dev) 290 { 291 return rcu_dereference_bh(dev->rx_handler_data); 292 } 293 294 static struct macsec_rxh_data *macsec_data_rtnl(const struct net_device *dev) 295 { 296 return rtnl_dereference(dev->rx_handler_data); 297 } 298 299 struct macsec_cb { 300 struct aead_request *req; 301 union { 302 struct macsec_tx_sa *tx_sa; 303 struct macsec_rx_sa *rx_sa; 304 }; 305 u8 assoc_num; 306 bool valid; 307 bool has_sci; 308 }; 309 310 static struct macsec_rx_sa *macsec_rxsa_get(struct macsec_rx_sa __rcu *ptr) 311 { 312 struct macsec_rx_sa *sa = rcu_dereference_bh(ptr); 313 314 if (!sa || !sa->active) 315 return NULL; 316 317 if (!atomic_inc_not_zero(&sa->refcnt)) 318 return NULL; 319 320 return sa; 321 } 322 323 static void free_rx_sc_rcu(struct rcu_head *head) 324 { 325 struct macsec_rx_sc *rx_sc = container_of(head, struct macsec_rx_sc, rcu_head); 326 327 free_percpu(rx_sc->stats); 328 kfree(rx_sc); 329 } 330 331 static struct macsec_rx_sc *macsec_rxsc_get(struct macsec_rx_sc *sc) 332 { 333 return atomic_inc_not_zero(&sc->refcnt) ? sc : NULL; 334 } 335 336 static void macsec_rxsc_put(struct macsec_rx_sc *sc) 337 { 338 if (atomic_dec_and_test(&sc->refcnt)) 339 call_rcu(&sc->rcu_head, free_rx_sc_rcu); 340 } 341 342 static void free_rxsa(struct rcu_head *head) 343 { 344 struct macsec_rx_sa *sa = container_of(head, struct macsec_rx_sa, rcu); 345 346 crypto_free_aead(sa->key.tfm); 347 free_percpu(sa->stats); 348 kfree(sa); 349 } 350 351 static void macsec_rxsa_put(struct macsec_rx_sa *sa) 352 { 353 if (atomic_dec_and_test(&sa->refcnt)) 354 call_rcu(&sa->rcu, free_rxsa); 355 } 356 357 static struct macsec_tx_sa *macsec_txsa_get(struct macsec_tx_sa __rcu *ptr) 358 { 359 struct macsec_tx_sa *sa = rcu_dereference_bh(ptr); 360 361 if (!sa || !sa->active) 362 return NULL; 363 364 if (!atomic_inc_not_zero(&sa->refcnt)) 365 return NULL; 366 367 return sa; 368 } 369 370 static void free_txsa(struct rcu_head *head) 371 { 372 struct macsec_tx_sa *sa = container_of(head, struct macsec_tx_sa, rcu); 373 374 crypto_free_aead(sa->key.tfm); 375 free_percpu(sa->stats); 376 kfree(sa); 377 } 378 379 static void macsec_txsa_put(struct macsec_tx_sa *sa) 380 { 381 if (atomic_dec_and_test(&sa->refcnt)) 382 call_rcu(&sa->rcu, free_txsa); 383 } 384 385 static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb) 386 { 387 BUILD_BUG_ON(sizeof(struct macsec_cb) > sizeof(skb->cb)); 388 return (struct macsec_cb *)skb->cb; 389 } 390 391 #define MACSEC_PORT_ES (htons(0x0001)) 392 #define MACSEC_PORT_SCB (0x0000) 393 #define MACSEC_UNDEF_SCI ((__force sci_t)0xffffffffffffffffULL) 394 395 #define DEFAULT_SAK_LEN 16 396 #define DEFAULT_SEND_SCI true 397 #define DEFAULT_ENCRYPT false 398 #define DEFAULT_ENCODING_SA 0 399 400 static bool send_sci(const struct macsec_secy *secy) 401 { 402 const struct macsec_tx_sc *tx_sc = &secy->tx_sc; 403 404 return tx_sc->send_sci || 405 (secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb); 406 } 407 408 static sci_t make_sci(u8 *addr, __be16 port) 409 { 410 sci_t sci; 411 412 memcpy(&sci, addr, ETH_ALEN); 413 memcpy(((char *)&sci) + ETH_ALEN, &port, sizeof(port)); 414 415 return sci; 416 } 417 418 static sci_t macsec_frame_sci(struct macsec_eth_header *hdr, bool sci_present) 419 { 420 sci_t sci; 421 422 if (sci_present) 423 memcpy(&sci, hdr->secure_channel_id, 424 sizeof(hdr->secure_channel_id)); 425 else 426 sci = make_sci(hdr->eth.h_source, MACSEC_PORT_ES); 427 428 return sci; 429 } 430 431 static unsigned int macsec_sectag_len(bool sci_present) 432 { 433 return MACSEC_TAG_LEN + (sci_present ? MACSEC_SCI_LEN : 0); 434 } 435 436 static unsigned int macsec_hdr_len(bool sci_present) 437 { 438 return macsec_sectag_len(sci_present) + ETH_HLEN; 439 } 440 441 static unsigned int macsec_extra_len(bool sci_present) 442 { 443 return macsec_sectag_len(sci_present) + sizeof(__be16); 444 } 445 446 /* Fill SecTAG according to IEEE 802.1AE-2006 10.5.3 */ 447 static void macsec_fill_sectag(struct macsec_eth_header *h, 448 const struct macsec_secy *secy, u32 pn, 449 bool sci_present) 450 { 451 const struct macsec_tx_sc *tx_sc = &secy->tx_sc; 452 453 memset(&h->tci_an, 0, macsec_sectag_len(sci_present)); 454 h->eth.h_proto = htons(ETH_P_MACSEC); 455 456 if (sci_present) { 457 h->tci_an |= MACSEC_TCI_SC; 458 memcpy(&h->secure_channel_id, &secy->sci, 459 sizeof(h->secure_channel_id)); 460 } else { 461 if (tx_sc->end_station) 462 h->tci_an |= MACSEC_TCI_ES; 463 if (tx_sc->scb) 464 h->tci_an |= MACSEC_TCI_SCB; 465 } 466 467 h->packet_number = htonl(pn); 468 469 /* with GCM, C/E clear for !encrypt, both set for encrypt */ 470 if (tx_sc->encrypt) 471 h->tci_an |= MACSEC_TCI_CONFID; 472 else if (secy->icv_len != DEFAULT_ICV_LEN) 473 h->tci_an |= MACSEC_TCI_C; 474 475 h->tci_an |= tx_sc->encoding_sa; 476 } 477 478 static void macsec_set_shortlen(struct macsec_eth_header *h, size_t data_len) 479 { 480 if (data_len < MIN_NON_SHORT_LEN) 481 h->short_length = data_len; 482 } 483 484 /* validate MACsec packet according to IEEE 802.1AE-2006 9.12 */ 485 static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len) 486 { 487 struct macsec_eth_header *h = (struct macsec_eth_header *)skb->data; 488 int len = skb->len - 2 * ETH_ALEN; 489 int extra_len = macsec_extra_len(!!(h->tci_an & MACSEC_TCI_SC)) + icv_len; 490 491 /* a) It comprises at least 17 octets */ 492 if (skb->len <= 16) 493 return false; 494 495 /* b) MACsec EtherType: already checked */ 496 497 /* c) V bit is clear */ 498 if (h->tci_an & MACSEC_TCI_VERSION) 499 return false; 500 501 /* d) ES or SCB => !SC */ 502 if ((h->tci_an & MACSEC_TCI_ES || h->tci_an & MACSEC_TCI_SCB) && 503 (h->tci_an & MACSEC_TCI_SC)) 504 return false; 505 506 /* e) Bits 7 and 8 of octet 4 of the SecTAG are clear */ 507 if (h->unused) 508 return false; 509 510 /* rx.pn != 0 (figure 10-5) */ 511 if (!h->packet_number) 512 return false; 513 514 /* length check, f) g) h) i) */ 515 if (h->short_length) 516 return len == extra_len + h->short_length; 517 return len >= extra_len + MIN_NON_SHORT_LEN; 518 } 519 520 #define MACSEC_NEEDED_HEADROOM (macsec_extra_len(true)) 521 #define MACSEC_NEEDED_TAILROOM MACSEC_STD_ICV_LEN 522 523 static void macsec_fill_iv(unsigned char *iv, sci_t sci, u32 pn) 524 { 525 struct gcm_iv *gcm_iv = (struct gcm_iv *)iv; 526 527 gcm_iv->sci = sci; 528 gcm_iv->pn = htonl(pn); 529 } 530 531 static struct macsec_eth_header *macsec_ethhdr(struct sk_buff *skb) 532 { 533 return (struct macsec_eth_header *)skb_mac_header(skb); 534 } 535 536 static u32 tx_sa_update_pn(struct macsec_tx_sa *tx_sa, struct macsec_secy *secy) 537 { 538 u32 pn; 539 540 spin_lock_bh(&tx_sa->lock); 541 pn = tx_sa->next_pn; 542 543 tx_sa->next_pn++; 544 if (tx_sa->next_pn == 0) { 545 pr_debug("PN wrapped, transitioning to !oper\n"); 546 tx_sa->active = false; 547 if (secy->protect_frames) 548 secy->operational = false; 549 } 550 spin_unlock_bh(&tx_sa->lock); 551 552 return pn; 553 } 554 555 static void macsec_encrypt_finish(struct sk_buff *skb, struct net_device *dev) 556 { 557 struct macsec_dev *macsec = netdev_priv(dev); 558 559 skb->dev = macsec->real_dev; 560 skb_reset_mac_header(skb); 561 skb->protocol = eth_hdr(skb)->h_proto; 562 } 563 564 static void macsec_count_tx(struct sk_buff *skb, struct macsec_tx_sc *tx_sc, 565 struct macsec_tx_sa *tx_sa) 566 { 567 struct pcpu_tx_sc_stats *txsc_stats = this_cpu_ptr(tx_sc->stats); 568 569 u64_stats_update_begin(&txsc_stats->syncp); 570 if (tx_sc->encrypt) { 571 txsc_stats->stats.OutOctetsEncrypted += skb->len; 572 txsc_stats->stats.OutPktsEncrypted++; 573 this_cpu_inc(tx_sa->stats->OutPktsEncrypted); 574 } else { 575 txsc_stats->stats.OutOctetsProtected += skb->len; 576 txsc_stats->stats.OutPktsProtected++; 577 this_cpu_inc(tx_sa->stats->OutPktsProtected); 578 } 579 u64_stats_update_end(&txsc_stats->syncp); 580 } 581 582 static void count_tx(struct net_device *dev, int ret, int len) 583 { 584 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { 585 struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats); 586 587 u64_stats_update_begin(&stats->syncp); 588 stats->tx_packets++; 589 stats->tx_bytes += len; 590 u64_stats_update_end(&stats->syncp); 591 } else { 592 dev->stats.tx_dropped++; 593 } 594 } 595 596 static void macsec_encrypt_done(struct crypto_async_request *base, int err) 597 { 598 struct sk_buff *skb = base->data; 599 struct net_device *dev = skb->dev; 600 struct macsec_dev *macsec = macsec_priv(dev); 601 struct macsec_tx_sa *sa = macsec_skb_cb(skb)->tx_sa; 602 int len, ret; 603 604 aead_request_free(macsec_skb_cb(skb)->req); 605 606 rcu_read_lock_bh(); 607 macsec_encrypt_finish(skb, dev); 608 macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa); 609 len = skb->len; 610 ret = dev_queue_xmit(skb); 611 count_tx(dev, ret, len); 612 rcu_read_unlock_bh(); 613 614 macsec_txsa_put(sa); 615 dev_put(dev); 616 } 617 618 static struct aead_request *macsec_alloc_req(struct crypto_aead *tfm, 619 unsigned char **iv, 620 struct scatterlist **sg) 621 { 622 size_t size, iv_offset, sg_offset; 623 struct aead_request *req; 624 void *tmp; 625 626 size = sizeof(struct aead_request) + crypto_aead_reqsize(tfm); 627 iv_offset = size; 628 size += GCM_AES_IV_LEN; 629 630 size = ALIGN(size, __alignof__(struct scatterlist)); 631 sg_offset = size; 632 size += sizeof(struct scatterlist) * (MAX_SKB_FRAGS + 1); 633 634 tmp = kmalloc(size, GFP_ATOMIC); 635 if (!tmp) 636 return NULL; 637 638 *iv = (unsigned char *)(tmp + iv_offset); 639 *sg = (struct scatterlist *)(tmp + sg_offset); 640 req = tmp; 641 642 aead_request_set_tfm(req, tfm); 643 644 return req; 645 } 646 647 static struct sk_buff *macsec_encrypt(struct sk_buff *skb, 648 struct net_device *dev) 649 { 650 int ret; 651 struct scatterlist *sg; 652 unsigned char *iv; 653 struct ethhdr *eth; 654 struct macsec_eth_header *hh; 655 size_t unprotected_len; 656 struct aead_request *req; 657 struct macsec_secy *secy; 658 struct macsec_tx_sc *tx_sc; 659 struct macsec_tx_sa *tx_sa; 660 struct macsec_dev *macsec = macsec_priv(dev); 661 bool sci_present; 662 u32 pn; 663 664 secy = &macsec->secy; 665 tx_sc = &secy->tx_sc; 666 667 /* 10.5.1 TX SA assignment */ 668 tx_sa = macsec_txsa_get(tx_sc->sa[tx_sc->encoding_sa]); 669 if (!tx_sa) { 670 secy->operational = false; 671 kfree_skb(skb); 672 return ERR_PTR(-EINVAL); 673 } 674 675 if (unlikely(skb_headroom(skb) < MACSEC_NEEDED_HEADROOM || 676 skb_tailroom(skb) < MACSEC_NEEDED_TAILROOM)) { 677 struct sk_buff *nskb = skb_copy_expand(skb, 678 MACSEC_NEEDED_HEADROOM, 679 MACSEC_NEEDED_TAILROOM, 680 GFP_ATOMIC); 681 if (likely(nskb)) { 682 consume_skb(skb); 683 skb = nskb; 684 } else { 685 macsec_txsa_put(tx_sa); 686 kfree_skb(skb); 687 return ERR_PTR(-ENOMEM); 688 } 689 } else { 690 skb = skb_unshare(skb, GFP_ATOMIC); 691 if (!skb) { 692 macsec_txsa_put(tx_sa); 693 return ERR_PTR(-ENOMEM); 694 } 695 } 696 697 unprotected_len = skb->len; 698 eth = eth_hdr(skb); 699 sci_present = send_sci(secy); 700 hh = (struct macsec_eth_header *)skb_push(skb, macsec_extra_len(sci_present)); 701 memmove(hh, eth, 2 * ETH_ALEN); 702 703 pn = tx_sa_update_pn(tx_sa, secy); 704 if (pn == 0) { 705 macsec_txsa_put(tx_sa); 706 kfree_skb(skb); 707 return ERR_PTR(-ENOLINK); 708 } 709 macsec_fill_sectag(hh, secy, pn, sci_present); 710 macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN); 711 712 skb_put(skb, secy->icv_len); 713 714 if (skb->len - ETH_HLEN > macsec_priv(dev)->real_dev->mtu) { 715 struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats); 716 717 u64_stats_update_begin(&secy_stats->syncp); 718 secy_stats->stats.OutPktsTooLong++; 719 u64_stats_update_end(&secy_stats->syncp); 720 721 macsec_txsa_put(tx_sa); 722 kfree_skb(skb); 723 return ERR_PTR(-EINVAL); 724 } 725 726 req = macsec_alloc_req(tx_sa->key.tfm, &iv, &sg); 727 if (!req) { 728 macsec_txsa_put(tx_sa); 729 kfree_skb(skb); 730 return ERR_PTR(-ENOMEM); 731 } 732 733 macsec_fill_iv(iv, secy->sci, pn); 734 735 sg_init_table(sg, MAX_SKB_FRAGS + 1); 736 skb_to_sgvec(skb, sg, 0, skb->len); 737 738 if (tx_sc->encrypt) { 739 int len = skb->len - macsec_hdr_len(sci_present) - 740 secy->icv_len; 741 aead_request_set_crypt(req, sg, sg, len, iv); 742 aead_request_set_ad(req, macsec_hdr_len(sci_present)); 743 } else { 744 aead_request_set_crypt(req, sg, sg, 0, iv); 745 aead_request_set_ad(req, skb->len - secy->icv_len); 746 } 747 748 macsec_skb_cb(skb)->req = req; 749 macsec_skb_cb(skb)->tx_sa = tx_sa; 750 aead_request_set_callback(req, 0, macsec_encrypt_done, skb); 751 752 dev_hold(skb->dev); 753 ret = crypto_aead_encrypt(req); 754 if (ret == -EINPROGRESS) { 755 return ERR_PTR(ret); 756 } else if (ret != 0) { 757 dev_put(skb->dev); 758 kfree_skb(skb); 759 aead_request_free(req); 760 macsec_txsa_put(tx_sa); 761 return ERR_PTR(-EINVAL); 762 } 763 764 dev_put(skb->dev); 765 aead_request_free(req); 766 macsec_txsa_put(tx_sa); 767 768 return skb; 769 } 770 771 static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u32 pn) 772 { 773 struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa; 774 struct pcpu_rx_sc_stats *rxsc_stats = this_cpu_ptr(rx_sa->sc->stats); 775 struct macsec_eth_header *hdr = macsec_ethhdr(skb); 776 u32 lowest_pn = 0; 777 778 spin_lock(&rx_sa->lock); 779 if (rx_sa->next_pn >= secy->replay_window) 780 lowest_pn = rx_sa->next_pn - secy->replay_window; 781 782 /* Now perform replay protection check again 783 * (see IEEE 802.1AE-2006 figure 10-5) 784 */ 785 if (secy->replay_protect && pn < lowest_pn) { 786 spin_unlock(&rx_sa->lock); 787 u64_stats_update_begin(&rxsc_stats->syncp); 788 rxsc_stats->stats.InPktsLate++; 789 u64_stats_update_end(&rxsc_stats->syncp); 790 return false; 791 } 792 793 if (secy->validate_frames != MACSEC_VALIDATE_DISABLED) { 794 u64_stats_update_begin(&rxsc_stats->syncp); 795 if (hdr->tci_an & MACSEC_TCI_E) 796 rxsc_stats->stats.InOctetsDecrypted += skb->len; 797 else 798 rxsc_stats->stats.InOctetsValidated += skb->len; 799 u64_stats_update_end(&rxsc_stats->syncp); 800 } 801 802 if (!macsec_skb_cb(skb)->valid) { 803 spin_unlock(&rx_sa->lock); 804 805 /* 10.6.5 */ 806 if (hdr->tci_an & MACSEC_TCI_C || 807 secy->validate_frames == MACSEC_VALIDATE_STRICT) { 808 u64_stats_update_begin(&rxsc_stats->syncp); 809 rxsc_stats->stats.InPktsNotValid++; 810 u64_stats_update_end(&rxsc_stats->syncp); 811 return false; 812 } 813 814 u64_stats_update_begin(&rxsc_stats->syncp); 815 if (secy->validate_frames == MACSEC_VALIDATE_CHECK) { 816 rxsc_stats->stats.InPktsInvalid++; 817 this_cpu_inc(rx_sa->stats->InPktsInvalid); 818 } else if (pn < lowest_pn) { 819 rxsc_stats->stats.InPktsDelayed++; 820 } else { 821 rxsc_stats->stats.InPktsUnchecked++; 822 } 823 u64_stats_update_end(&rxsc_stats->syncp); 824 } else { 825 u64_stats_update_begin(&rxsc_stats->syncp); 826 if (pn < lowest_pn) { 827 rxsc_stats->stats.InPktsDelayed++; 828 } else { 829 rxsc_stats->stats.InPktsOK++; 830 this_cpu_inc(rx_sa->stats->InPktsOK); 831 } 832 u64_stats_update_end(&rxsc_stats->syncp); 833 834 if (pn >= rx_sa->next_pn) 835 rx_sa->next_pn = pn + 1; 836 spin_unlock(&rx_sa->lock); 837 } 838 839 return true; 840 } 841 842 static void macsec_reset_skb(struct sk_buff *skb, struct net_device *dev) 843 { 844 skb->pkt_type = PACKET_HOST; 845 skb->protocol = eth_type_trans(skb, dev); 846 847 skb_reset_network_header(skb); 848 if (!skb_transport_header_was_set(skb)) 849 skb_reset_transport_header(skb); 850 skb_reset_mac_len(skb); 851 } 852 853 static void macsec_finalize_skb(struct sk_buff *skb, u8 icv_len, u8 hdr_len) 854 { 855 memmove(skb->data + hdr_len, skb->data, 2 * ETH_ALEN); 856 skb_pull(skb, hdr_len); 857 pskb_trim_unique(skb, skb->len - icv_len); 858 } 859 860 static void count_rx(struct net_device *dev, int len) 861 { 862 struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats); 863 864 u64_stats_update_begin(&stats->syncp); 865 stats->rx_packets++; 866 stats->rx_bytes += len; 867 u64_stats_update_end(&stats->syncp); 868 } 869 870 static void macsec_decrypt_done(struct crypto_async_request *base, int err) 871 { 872 struct sk_buff *skb = base->data; 873 struct net_device *dev = skb->dev; 874 struct macsec_dev *macsec = macsec_priv(dev); 875 struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa; 876 struct macsec_rx_sc *rx_sc = rx_sa->sc; 877 int len, ret; 878 u32 pn; 879 880 aead_request_free(macsec_skb_cb(skb)->req); 881 882 rcu_read_lock_bh(); 883 pn = ntohl(macsec_ethhdr(skb)->packet_number); 884 if (!macsec_post_decrypt(skb, &macsec->secy, pn)) { 885 rcu_read_unlock_bh(); 886 kfree_skb(skb); 887 goto out; 888 } 889 890 macsec_finalize_skb(skb, macsec->secy.icv_len, 891 macsec_extra_len(macsec_skb_cb(skb)->has_sci)); 892 macsec_reset_skb(skb, macsec->secy.netdev); 893 894 len = skb->len; 895 ret = gro_cells_receive(&macsec->gro_cells, skb); 896 if (ret == NET_RX_SUCCESS) 897 count_rx(dev, len); 898 else 899 macsec->secy.netdev->stats.rx_dropped++; 900 901 rcu_read_unlock_bh(); 902 903 out: 904 macsec_rxsa_put(rx_sa); 905 macsec_rxsc_put(rx_sc); 906 dev_put(dev); 907 } 908 909 static struct sk_buff *macsec_decrypt(struct sk_buff *skb, 910 struct net_device *dev, 911 struct macsec_rx_sa *rx_sa, 912 sci_t sci, 913 struct macsec_secy *secy) 914 { 915 int ret; 916 struct scatterlist *sg; 917 unsigned char *iv; 918 struct aead_request *req; 919 struct macsec_eth_header *hdr; 920 u16 icv_len = secy->icv_len; 921 922 macsec_skb_cb(skb)->valid = false; 923 skb = skb_share_check(skb, GFP_ATOMIC); 924 if (!skb) 925 return ERR_PTR(-ENOMEM); 926 927 req = macsec_alloc_req(rx_sa->key.tfm, &iv, &sg); 928 if (!req) { 929 kfree_skb(skb); 930 return ERR_PTR(-ENOMEM); 931 } 932 933 hdr = (struct macsec_eth_header *)skb->data; 934 macsec_fill_iv(iv, sci, ntohl(hdr->packet_number)); 935 936 sg_init_table(sg, MAX_SKB_FRAGS + 1); 937 skb_to_sgvec(skb, sg, 0, skb->len); 938 939 if (hdr->tci_an & MACSEC_TCI_E) { 940 /* confidentiality: ethernet + macsec header 941 * authenticated, encrypted payload 942 */ 943 int len = skb->len - macsec_hdr_len(macsec_skb_cb(skb)->has_sci); 944 945 aead_request_set_crypt(req, sg, sg, len, iv); 946 aead_request_set_ad(req, macsec_hdr_len(macsec_skb_cb(skb)->has_sci)); 947 skb = skb_unshare(skb, GFP_ATOMIC); 948 if (!skb) { 949 aead_request_free(req); 950 return ERR_PTR(-ENOMEM); 951 } 952 } else { 953 /* integrity only: all headers + data authenticated */ 954 aead_request_set_crypt(req, sg, sg, icv_len, iv); 955 aead_request_set_ad(req, skb->len - icv_len); 956 } 957 958 macsec_skb_cb(skb)->req = req; 959 skb->dev = dev; 960 aead_request_set_callback(req, 0, macsec_decrypt_done, skb); 961 962 dev_hold(dev); 963 ret = crypto_aead_decrypt(req); 964 if (ret == -EINPROGRESS) { 965 return ERR_PTR(ret); 966 } else if (ret != 0) { 967 /* decryption/authentication failed 968 * 10.6 if validateFrames is disabled, deliver anyway 969 */ 970 if (ret != -EBADMSG) { 971 kfree_skb(skb); 972 skb = ERR_PTR(ret); 973 } 974 } else { 975 macsec_skb_cb(skb)->valid = true; 976 } 977 dev_put(dev); 978 979 aead_request_free(req); 980 981 return skb; 982 } 983 984 static struct macsec_rx_sc *find_rx_sc(struct macsec_secy *secy, sci_t sci) 985 { 986 struct macsec_rx_sc *rx_sc; 987 988 for_each_rxsc(secy, rx_sc) { 989 if (rx_sc->sci == sci) 990 return rx_sc; 991 } 992 993 return NULL; 994 } 995 996 static struct macsec_rx_sc *find_rx_sc_rtnl(struct macsec_secy *secy, sci_t sci) 997 { 998 struct macsec_rx_sc *rx_sc; 999 1000 for_each_rxsc_rtnl(secy, rx_sc) { 1001 if (rx_sc->sci == sci) 1002 return rx_sc; 1003 } 1004 1005 return NULL; 1006 } 1007 1008 static void handle_not_macsec(struct sk_buff *skb) 1009 { 1010 struct macsec_rxh_data *rxd; 1011 struct macsec_dev *macsec; 1012 1013 rcu_read_lock(); 1014 rxd = macsec_data_rcu(skb->dev); 1015 1016 /* 10.6 If the management control validateFrames is not 1017 * Strict, frames without a SecTAG are received, counted, and 1018 * delivered to the Controlled Port 1019 */ 1020 list_for_each_entry_rcu(macsec, &rxd->secys, secys) { 1021 struct sk_buff *nskb; 1022 int ret; 1023 struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats); 1024 1025 if (macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) { 1026 u64_stats_update_begin(&secy_stats->syncp); 1027 secy_stats->stats.InPktsNoTag++; 1028 u64_stats_update_end(&secy_stats->syncp); 1029 continue; 1030 } 1031 1032 /* deliver on this port */ 1033 nskb = skb_clone(skb, GFP_ATOMIC); 1034 if (!nskb) 1035 break; 1036 1037 nskb->dev = macsec->secy.netdev; 1038 1039 ret = netif_rx(nskb); 1040 if (ret == NET_RX_SUCCESS) { 1041 u64_stats_update_begin(&secy_stats->syncp); 1042 secy_stats->stats.InPktsUntagged++; 1043 u64_stats_update_end(&secy_stats->syncp); 1044 } else { 1045 macsec->secy.netdev->stats.rx_dropped++; 1046 } 1047 } 1048 1049 rcu_read_unlock(); 1050 } 1051 1052 static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) 1053 { 1054 struct sk_buff *skb = *pskb; 1055 struct net_device *dev = skb->dev; 1056 struct macsec_eth_header *hdr; 1057 struct macsec_secy *secy = NULL; 1058 struct macsec_rx_sc *rx_sc; 1059 struct macsec_rx_sa *rx_sa; 1060 struct macsec_rxh_data *rxd; 1061 struct macsec_dev *macsec; 1062 sci_t sci; 1063 u32 pn; 1064 bool cbit; 1065 struct pcpu_rx_sc_stats *rxsc_stats; 1066 struct pcpu_secy_stats *secy_stats; 1067 bool pulled_sci; 1068 int ret; 1069 1070 if (skb_headroom(skb) < ETH_HLEN) 1071 goto drop_direct; 1072 1073 hdr = macsec_ethhdr(skb); 1074 if (hdr->eth.h_proto != htons(ETH_P_MACSEC)) { 1075 handle_not_macsec(skb); 1076 1077 /* and deliver to the uncontrolled port */ 1078 return RX_HANDLER_PASS; 1079 } 1080 1081 skb = skb_unshare(skb, GFP_ATOMIC); 1082 if (!skb) { 1083 *pskb = NULL; 1084 return RX_HANDLER_CONSUMED; 1085 } 1086 1087 pulled_sci = pskb_may_pull(skb, macsec_extra_len(true)); 1088 if (!pulled_sci) { 1089 if (!pskb_may_pull(skb, macsec_extra_len(false))) 1090 goto drop_direct; 1091 } 1092 1093 hdr = macsec_ethhdr(skb); 1094 1095 /* Frames with a SecTAG that has the TCI E bit set but the C 1096 * bit clear are discarded, as this reserved encoding is used 1097 * to identify frames with a SecTAG that are not to be 1098 * delivered to the Controlled Port. 1099 */ 1100 if ((hdr->tci_an & (MACSEC_TCI_C | MACSEC_TCI_E)) == MACSEC_TCI_E) 1101 return RX_HANDLER_PASS; 1102 1103 /* now, pull the extra length */ 1104 if (hdr->tci_an & MACSEC_TCI_SC) { 1105 if (!pulled_sci) 1106 goto drop_direct; 1107 } 1108 1109 /* ethernet header is part of crypto processing */ 1110 skb_push(skb, ETH_HLEN); 1111 1112 macsec_skb_cb(skb)->has_sci = !!(hdr->tci_an & MACSEC_TCI_SC); 1113 macsec_skb_cb(skb)->assoc_num = hdr->tci_an & MACSEC_AN_MASK; 1114 sci = macsec_frame_sci(hdr, macsec_skb_cb(skb)->has_sci); 1115 1116 rcu_read_lock(); 1117 rxd = macsec_data_rcu(skb->dev); 1118 1119 list_for_each_entry_rcu(macsec, &rxd->secys, secys) { 1120 struct macsec_rx_sc *sc = find_rx_sc(&macsec->secy, sci); 1121 sc = sc ? macsec_rxsc_get(sc) : NULL; 1122 1123 if (sc) { 1124 secy = &macsec->secy; 1125 rx_sc = sc; 1126 break; 1127 } 1128 } 1129 1130 if (!secy) 1131 goto nosci; 1132 1133 dev = secy->netdev; 1134 macsec = macsec_priv(dev); 1135 secy_stats = this_cpu_ptr(macsec->stats); 1136 rxsc_stats = this_cpu_ptr(rx_sc->stats); 1137 1138 if (!macsec_validate_skb(skb, secy->icv_len)) { 1139 u64_stats_update_begin(&secy_stats->syncp); 1140 secy_stats->stats.InPktsBadTag++; 1141 u64_stats_update_end(&secy_stats->syncp); 1142 goto drop_nosa; 1143 } 1144 1145 rx_sa = macsec_rxsa_get(rx_sc->sa[macsec_skb_cb(skb)->assoc_num]); 1146 if (!rx_sa) { 1147 /* 10.6.1 if the SA is not in use */ 1148 1149 /* If validateFrames is Strict or the C bit in the 1150 * SecTAG is set, discard 1151 */ 1152 if (hdr->tci_an & MACSEC_TCI_C || 1153 secy->validate_frames == MACSEC_VALIDATE_STRICT) { 1154 u64_stats_update_begin(&rxsc_stats->syncp); 1155 rxsc_stats->stats.InPktsNotUsingSA++; 1156 u64_stats_update_end(&rxsc_stats->syncp); 1157 goto drop_nosa; 1158 } 1159 1160 /* not Strict, the frame (with the SecTAG and ICV 1161 * removed) is delivered to the Controlled Port. 1162 */ 1163 u64_stats_update_begin(&rxsc_stats->syncp); 1164 rxsc_stats->stats.InPktsUnusedSA++; 1165 u64_stats_update_end(&rxsc_stats->syncp); 1166 goto deliver; 1167 } 1168 1169 /* First, PN check to avoid decrypting obviously wrong packets */ 1170 pn = ntohl(hdr->packet_number); 1171 if (secy->replay_protect) { 1172 bool late; 1173 1174 spin_lock(&rx_sa->lock); 1175 late = rx_sa->next_pn >= secy->replay_window && 1176 pn < (rx_sa->next_pn - secy->replay_window); 1177 spin_unlock(&rx_sa->lock); 1178 1179 if (late) { 1180 u64_stats_update_begin(&rxsc_stats->syncp); 1181 rxsc_stats->stats.InPktsLate++; 1182 u64_stats_update_end(&rxsc_stats->syncp); 1183 goto drop; 1184 } 1185 } 1186 1187 macsec_skb_cb(skb)->rx_sa = rx_sa; 1188 1189 /* Disabled && !changed text => skip validation */ 1190 if (hdr->tci_an & MACSEC_TCI_C || 1191 secy->validate_frames != MACSEC_VALIDATE_DISABLED) 1192 skb = macsec_decrypt(skb, dev, rx_sa, sci, secy); 1193 1194 if (IS_ERR(skb)) { 1195 /* the decrypt callback needs the reference */ 1196 if (PTR_ERR(skb) != -EINPROGRESS) { 1197 macsec_rxsa_put(rx_sa); 1198 macsec_rxsc_put(rx_sc); 1199 } 1200 rcu_read_unlock(); 1201 *pskb = NULL; 1202 return RX_HANDLER_CONSUMED; 1203 } 1204 1205 if (!macsec_post_decrypt(skb, secy, pn)) 1206 goto drop; 1207 1208 deliver: 1209 macsec_finalize_skb(skb, secy->icv_len, 1210 macsec_extra_len(macsec_skb_cb(skb)->has_sci)); 1211 macsec_reset_skb(skb, secy->netdev); 1212 1213 if (rx_sa) 1214 macsec_rxsa_put(rx_sa); 1215 macsec_rxsc_put(rx_sc); 1216 1217 ret = gro_cells_receive(&macsec->gro_cells, skb); 1218 if (ret == NET_RX_SUCCESS) 1219 count_rx(dev, skb->len); 1220 else 1221 macsec->secy.netdev->stats.rx_dropped++; 1222 1223 rcu_read_unlock(); 1224 1225 *pskb = NULL; 1226 return RX_HANDLER_CONSUMED; 1227 1228 drop: 1229 macsec_rxsa_put(rx_sa); 1230 drop_nosa: 1231 macsec_rxsc_put(rx_sc); 1232 rcu_read_unlock(); 1233 drop_direct: 1234 kfree_skb(skb); 1235 *pskb = NULL; 1236 return RX_HANDLER_CONSUMED; 1237 1238 nosci: 1239 /* 10.6.1 if the SC is not found */ 1240 cbit = !!(hdr->tci_an & MACSEC_TCI_C); 1241 if (!cbit) 1242 macsec_finalize_skb(skb, DEFAULT_ICV_LEN, 1243 macsec_extra_len(macsec_skb_cb(skb)->has_sci)); 1244 1245 list_for_each_entry_rcu(macsec, &rxd->secys, secys) { 1246 struct sk_buff *nskb; 1247 1248 secy_stats = this_cpu_ptr(macsec->stats); 1249 1250 /* If validateFrames is Strict or the C bit in the 1251 * SecTAG is set, discard 1252 */ 1253 if (cbit || 1254 macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) { 1255 u64_stats_update_begin(&secy_stats->syncp); 1256 secy_stats->stats.InPktsNoSCI++; 1257 u64_stats_update_end(&secy_stats->syncp); 1258 continue; 1259 } 1260 1261 /* not strict, the frame (with the SecTAG and ICV 1262 * removed) is delivered to the Controlled Port. 1263 */ 1264 nskb = skb_clone(skb, GFP_ATOMIC); 1265 if (!nskb) 1266 break; 1267 1268 macsec_reset_skb(nskb, macsec->secy.netdev); 1269 1270 ret = netif_rx(nskb); 1271 if (ret == NET_RX_SUCCESS) { 1272 u64_stats_update_begin(&secy_stats->syncp); 1273 secy_stats->stats.InPktsUnknownSCI++; 1274 u64_stats_update_end(&secy_stats->syncp); 1275 } else { 1276 macsec->secy.netdev->stats.rx_dropped++; 1277 } 1278 } 1279 1280 rcu_read_unlock(); 1281 *pskb = skb; 1282 return RX_HANDLER_PASS; 1283 } 1284 1285 static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len) 1286 { 1287 struct crypto_aead *tfm; 1288 int ret; 1289 1290 tfm = crypto_alloc_aead("gcm(aes)", 0, 0); 1291 1292 if (IS_ERR(tfm)) 1293 return tfm; 1294 1295 ret = crypto_aead_setkey(tfm, key, key_len); 1296 if (ret < 0) 1297 goto fail; 1298 1299 ret = crypto_aead_setauthsize(tfm, icv_len); 1300 if (ret < 0) 1301 goto fail; 1302 1303 return tfm; 1304 fail: 1305 crypto_free_aead(tfm); 1306 return ERR_PTR(ret); 1307 } 1308 1309 static int init_rx_sa(struct macsec_rx_sa *rx_sa, char *sak, int key_len, 1310 int icv_len) 1311 { 1312 rx_sa->stats = alloc_percpu(struct macsec_rx_sa_stats); 1313 if (!rx_sa->stats) 1314 return -ENOMEM; 1315 1316 rx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len); 1317 if (IS_ERR(rx_sa->key.tfm)) { 1318 free_percpu(rx_sa->stats); 1319 return PTR_ERR(rx_sa->key.tfm); 1320 } 1321 1322 rx_sa->active = false; 1323 rx_sa->next_pn = 1; 1324 atomic_set(&rx_sa->refcnt, 1); 1325 spin_lock_init(&rx_sa->lock); 1326 1327 return 0; 1328 } 1329 1330 static void clear_rx_sa(struct macsec_rx_sa *rx_sa) 1331 { 1332 rx_sa->active = false; 1333 1334 macsec_rxsa_put(rx_sa); 1335 } 1336 1337 static void free_rx_sc(struct macsec_rx_sc *rx_sc) 1338 { 1339 int i; 1340 1341 for (i = 0; i < MACSEC_NUM_AN; i++) { 1342 struct macsec_rx_sa *sa = rtnl_dereference(rx_sc->sa[i]); 1343 1344 RCU_INIT_POINTER(rx_sc->sa[i], NULL); 1345 if (sa) 1346 clear_rx_sa(sa); 1347 } 1348 1349 macsec_rxsc_put(rx_sc); 1350 } 1351 1352 static struct macsec_rx_sc *del_rx_sc(struct macsec_secy *secy, sci_t sci) 1353 { 1354 struct macsec_rx_sc *rx_sc, __rcu **rx_scp; 1355 1356 for (rx_scp = &secy->rx_sc, rx_sc = rtnl_dereference(*rx_scp); 1357 rx_sc; 1358 rx_scp = &rx_sc->next, rx_sc = rtnl_dereference(*rx_scp)) { 1359 if (rx_sc->sci == sci) { 1360 if (rx_sc->active) 1361 secy->n_rx_sc--; 1362 rcu_assign_pointer(*rx_scp, rx_sc->next); 1363 return rx_sc; 1364 } 1365 } 1366 1367 return NULL; 1368 } 1369 1370 static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci) 1371 { 1372 struct macsec_rx_sc *rx_sc; 1373 struct macsec_dev *macsec; 1374 struct net_device *real_dev = macsec_priv(dev)->real_dev; 1375 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); 1376 struct macsec_secy *secy; 1377 1378 list_for_each_entry(macsec, &rxd->secys, secys) { 1379 if (find_rx_sc_rtnl(&macsec->secy, sci)) 1380 return ERR_PTR(-EEXIST); 1381 } 1382 1383 rx_sc = kzalloc(sizeof(*rx_sc), GFP_KERNEL); 1384 if (!rx_sc) 1385 return ERR_PTR(-ENOMEM); 1386 1387 rx_sc->stats = netdev_alloc_pcpu_stats(struct pcpu_rx_sc_stats); 1388 if (!rx_sc->stats) { 1389 kfree(rx_sc); 1390 return ERR_PTR(-ENOMEM); 1391 } 1392 1393 rx_sc->sci = sci; 1394 rx_sc->active = true; 1395 atomic_set(&rx_sc->refcnt, 1); 1396 1397 secy = &macsec_priv(dev)->secy; 1398 rcu_assign_pointer(rx_sc->next, secy->rx_sc); 1399 rcu_assign_pointer(secy->rx_sc, rx_sc); 1400 1401 if (rx_sc->active) 1402 secy->n_rx_sc++; 1403 1404 return rx_sc; 1405 } 1406 1407 static int init_tx_sa(struct macsec_tx_sa *tx_sa, char *sak, int key_len, 1408 int icv_len) 1409 { 1410 tx_sa->stats = alloc_percpu(struct macsec_tx_sa_stats); 1411 if (!tx_sa->stats) 1412 return -ENOMEM; 1413 1414 tx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len); 1415 if (IS_ERR(tx_sa->key.tfm)) { 1416 free_percpu(tx_sa->stats); 1417 return PTR_ERR(tx_sa->key.tfm); 1418 } 1419 1420 tx_sa->active = false; 1421 atomic_set(&tx_sa->refcnt, 1); 1422 spin_lock_init(&tx_sa->lock); 1423 1424 return 0; 1425 } 1426 1427 static void clear_tx_sa(struct macsec_tx_sa *tx_sa) 1428 { 1429 tx_sa->active = false; 1430 1431 macsec_txsa_put(tx_sa); 1432 } 1433 1434 static struct genl_family macsec_fam; 1435 1436 static struct net_device *get_dev_from_nl(struct net *net, 1437 struct nlattr **attrs) 1438 { 1439 int ifindex = nla_get_u32(attrs[MACSEC_ATTR_IFINDEX]); 1440 struct net_device *dev; 1441 1442 dev = __dev_get_by_index(net, ifindex); 1443 if (!dev) 1444 return ERR_PTR(-ENODEV); 1445 1446 if (!netif_is_macsec(dev)) 1447 return ERR_PTR(-ENODEV); 1448 1449 return dev; 1450 } 1451 1452 static sci_t nla_get_sci(const struct nlattr *nla) 1453 { 1454 return (__force sci_t)nla_get_u64(nla); 1455 } 1456 1457 static int nla_put_sci(struct sk_buff *skb, int attrtype, sci_t value, 1458 int padattr) 1459 { 1460 return nla_put_u64_64bit(skb, attrtype, (__force u64)value, padattr); 1461 } 1462 1463 static struct macsec_tx_sa *get_txsa_from_nl(struct net *net, 1464 struct nlattr **attrs, 1465 struct nlattr **tb_sa, 1466 struct net_device **devp, 1467 struct macsec_secy **secyp, 1468 struct macsec_tx_sc **scp, 1469 u8 *assoc_num) 1470 { 1471 struct net_device *dev; 1472 struct macsec_secy *secy; 1473 struct macsec_tx_sc *tx_sc; 1474 struct macsec_tx_sa *tx_sa; 1475 1476 if (!tb_sa[MACSEC_SA_ATTR_AN]) 1477 return ERR_PTR(-EINVAL); 1478 1479 *assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1480 1481 dev = get_dev_from_nl(net, attrs); 1482 if (IS_ERR(dev)) 1483 return ERR_CAST(dev); 1484 1485 if (*assoc_num >= MACSEC_NUM_AN) 1486 return ERR_PTR(-EINVAL); 1487 1488 secy = &macsec_priv(dev)->secy; 1489 tx_sc = &secy->tx_sc; 1490 1491 tx_sa = rtnl_dereference(tx_sc->sa[*assoc_num]); 1492 if (!tx_sa) 1493 return ERR_PTR(-ENODEV); 1494 1495 *devp = dev; 1496 *scp = tx_sc; 1497 *secyp = secy; 1498 return tx_sa; 1499 } 1500 1501 static struct macsec_rx_sc *get_rxsc_from_nl(struct net *net, 1502 struct nlattr **attrs, 1503 struct nlattr **tb_rxsc, 1504 struct net_device **devp, 1505 struct macsec_secy **secyp) 1506 { 1507 struct net_device *dev; 1508 struct macsec_secy *secy; 1509 struct macsec_rx_sc *rx_sc; 1510 sci_t sci; 1511 1512 dev = get_dev_from_nl(net, attrs); 1513 if (IS_ERR(dev)) 1514 return ERR_CAST(dev); 1515 1516 secy = &macsec_priv(dev)->secy; 1517 1518 if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI]) 1519 return ERR_PTR(-EINVAL); 1520 1521 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); 1522 rx_sc = find_rx_sc_rtnl(secy, sci); 1523 if (!rx_sc) 1524 return ERR_PTR(-ENODEV); 1525 1526 *secyp = secy; 1527 *devp = dev; 1528 1529 return rx_sc; 1530 } 1531 1532 static struct macsec_rx_sa *get_rxsa_from_nl(struct net *net, 1533 struct nlattr **attrs, 1534 struct nlattr **tb_rxsc, 1535 struct nlattr **tb_sa, 1536 struct net_device **devp, 1537 struct macsec_secy **secyp, 1538 struct macsec_rx_sc **scp, 1539 u8 *assoc_num) 1540 { 1541 struct macsec_rx_sc *rx_sc; 1542 struct macsec_rx_sa *rx_sa; 1543 1544 if (!tb_sa[MACSEC_SA_ATTR_AN]) 1545 return ERR_PTR(-EINVAL); 1546 1547 *assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1548 if (*assoc_num >= MACSEC_NUM_AN) 1549 return ERR_PTR(-EINVAL); 1550 1551 rx_sc = get_rxsc_from_nl(net, attrs, tb_rxsc, devp, secyp); 1552 if (IS_ERR(rx_sc)) 1553 return ERR_CAST(rx_sc); 1554 1555 rx_sa = rtnl_dereference(rx_sc->sa[*assoc_num]); 1556 if (!rx_sa) 1557 return ERR_PTR(-ENODEV); 1558 1559 *scp = rx_sc; 1560 return rx_sa; 1561 } 1562 1563 1564 static const struct nla_policy macsec_genl_policy[NUM_MACSEC_ATTR] = { 1565 [MACSEC_ATTR_IFINDEX] = { .type = NLA_U32 }, 1566 [MACSEC_ATTR_RXSC_CONFIG] = { .type = NLA_NESTED }, 1567 [MACSEC_ATTR_SA_CONFIG] = { .type = NLA_NESTED }, 1568 }; 1569 1570 static const struct nla_policy macsec_genl_rxsc_policy[NUM_MACSEC_RXSC_ATTR] = { 1571 [MACSEC_RXSC_ATTR_SCI] = { .type = NLA_U64 }, 1572 [MACSEC_RXSC_ATTR_ACTIVE] = { .type = NLA_U8 }, 1573 }; 1574 1575 static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = { 1576 [MACSEC_SA_ATTR_AN] = { .type = NLA_U8 }, 1577 [MACSEC_SA_ATTR_ACTIVE] = { .type = NLA_U8 }, 1578 [MACSEC_SA_ATTR_PN] = { .type = NLA_U32 }, 1579 [MACSEC_SA_ATTR_KEYID] = { .type = NLA_BINARY, 1580 .len = MACSEC_KEYID_LEN, }, 1581 [MACSEC_SA_ATTR_KEY] = { .type = NLA_BINARY, 1582 .len = MACSEC_MAX_KEY_LEN, }, 1583 }; 1584 1585 static int parse_sa_config(struct nlattr **attrs, struct nlattr **tb_sa) 1586 { 1587 if (!attrs[MACSEC_ATTR_SA_CONFIG]) 1588 return -EINVAL; 1589 1590 if (nla_parse_nested(tb_sa, MACSEC_SA_ATTR_MAX, attrs[MACSEC_ATTR_SA_CONFIG], 1591 macsec_genl_sa_policy)) 1592 return -EINVAL; 1593 1594 return 0; 1595 } 1596 1597 static int parse_rxsc_config(struct nlattr **attrs, struct nlattr **tb_rxsc) 1598 { 1599 if (!attrs[MACSEC_ATTR_RXSC_CONFIG]) 1600 return -EINVAL; 1601 1602 if (nla_parse_nested(tb_rxsc, MACSEC_RXSC_ATTR_MAX, attrs[MACSEC_ATTR_RXSC_CONFIG], 1603 macsec_genl_rxsc_policy)) 1604 return -EINVAL; 1605 1606 return 0; 1607 } 1608 1609 static bool validate_add_rxsa(struct nlattr **attrs) 1610 { 1611 if (!attrs[MACSEC_SA_ATTR_AN] || 1612 !attrs[MACSEC_SA_ATTR_KEY] || 1613 !attrs[MACSEC_SA_ATTR_KEYID]) 1614 return false; 1615 1616 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) 1617 return false; 1618 1619 if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0) 1620 return false; 1621 1622 if (attrs[MACSEC_SA_ATTR_ACTIVE]) { 1623 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) 1624 return false; 1625 } 1626 1627 if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN) 1628 return false; 1629 1630 return true; 1631 } 1632 1633 static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info) 1634 { 1635 struct net_device *dev; 1636 struct nlattr **attrs = info->attrs; 1637 struct macsec_secy *secy; 1638 struct macsec_rx_sc *rx_sc; 1639 struct macsec_rx_sa *rx_sa; 1640 unsigned char assoc_num; 1641 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 1642 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 1643 int err; 1644 1645 if (!attrs[MACSEC_ATTR_IFINDEX]) 1646 return -EINVAL; 1647 1648 if (parse_sa_config(attrs, tb_sa)) 1649 return -EINVAL; 1650 1651 if (parse_rxsc_config(attrs, tb_rxsc)) 1652 return -EINVAL; 1653 1654 if (!validate_add_rxsa(tb_sa)) 1655 return -EINVAL; 1656 1657 rtnl_lock(); 1658 rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy); 1659 if (IS_ERR(rx_sc)) { 1660 rtnl_unlock(); 1661 return PTR_ERR(rx_sc); 1662 } 1663 1664 assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1665 1666 if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) { 1667 pr_notice("macsec: nl: add_rxsa: bad key length: %d != %d\n", 1668 nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len); 1669 rtnl_unlock(); 1670 return -EINVAL; 1671 } 1672 1673 rx_sa = rtnl_dereference(rx_sc->sa[assoc_num]); 1674 if (rx_sa) { 1675 rtnl_unlock(); 1676 return -EBUSY; 1677 } 1678 1679 rx_sa = kmalloc(sizeof(*rx_sa), GFP_KERNEL); 1680 if (!rx_sa) { 1681 rtnl_unlock(); 1682 return -ENOMEM; 1683 } 1684 1685 err = init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 1686 secy->key_len, secy->icv_len); 1687 if (err < 0) { 1688 kfree(rx_sa); 1689 rtnl_unlock(); 1690 return err; 1691 } 1692 1693 if (tb_sa[MACSEC_SA_ATTR_PN]) { 1694 spin_lock_bh(&rx_sa->lock); 1695 rx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]); 1696 spin_unlock_bh(&rx_sa->lock); 1697 } 1698 1699 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 1700 rx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 1701 1702 nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN); 1703 rx_sa->sc = rx_sc; 1704 rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa); 1705 1706 rtnl_unlock(); 1707 1708 return 0; 1709 } 1710 1711 static bool validate_add_rxsc(struct nlattr **attrs) 1712 { 1713 if (!attrs[MACSEC_RXSC_ATTR_SCI]) 1714 return false; 1715 1716 if (attrs[MACSEC_RXSC_ATTR_ACTIVE]) { 1717 if (nla_get_u8(attrs[MACSEC_RXSC_ATTR_ACTIVE]) > 1) 1718 return false; 1719 } 1720 1721 return true; 1722 } 1723 1724 static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info) 1725 { 1726 struct net_device *dev; 1727 sci_t sci = MACSEC_UNDEF_SCI; 1728 struct nlattr **attrs = info->attrs; 1729 struct macsec_rx_sc *rx_sc; 1730 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 1731 1732 if (!attrs[MACSEC_ATTR_IFINDEX]) 1733 return -EINVAL; 1734 1735 if (parse_rxsc_config(attrs, tb_rxsc)) 1736 return -EINVAL; 1737 1738 if (!validate_add_rxsc(tb_rxsc)) 1739 return -EINVAL; 1740 1741 rtnl_lock(); 1742 dev = get_dev_from_nl(genl_info_net(info), attrs); 1743 if (IS_ERR(dev)) { 1744 rtnl_unlock(); 1745 return PTR_ERR(dev); 1746 } 1747 1748 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); 1749 1750 rx_sc = create_rx_sc(dev, sci); 1751 if (IS_ERR(rx_sc)) { 1752 rtnl_unlock(); 1753 return PTR_ERR(rx_sc); 1754 } 1755 1756 if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) 1757 rx_sc->active = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]); 1758 1759 rtnl_unlock(); 1760 1761 return 0; 1762 } 1763 1764 static bool validate_add_txsa(struct nlattr **attrs) 1765 { 1766 if (!attrs[MACSEC_SA_ATTR_AN] || 1767 !attrs[MACSEC_SA_ATTR_PN] || 1768 !attrs[MACSEC_SA_ATTR_KEY] || 1769 !attrs[MACSEC_SA_ATTR_KEYID]) 1770 return false; 1771 1772 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) 1773 return false; 1774 1775 if (nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0) 1776 return false; 1777 1778 if (attrs[MACSEC_SA_ATTR_ACTIVE]) { 1779 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) 1780 return false; 1781 } 1782 1783 if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN) 1784 return false; 1785 1786 return true; 1787 } 1788 1789 static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info) 1790 { 1791 struct net_device *dev; 1792 struct nlattr **attrs = info->attrs; 1793 struct macsec_secy *secy; 1794 struct macsec_tx_sc *tx_sc; 1795 struct macsec_tx_sa *tx_sa; 1796 unsigned char assoc_num; 1797 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 1798 int err; 1799 1800 if (!attrs[MACSEC_ATTR_IFINDEX]) 1801 return -EINVAL; 1802 1803 if (parse_sa_config(attrs, tb_sa)) 1804 return -EINVAL; 1805 1806 if (!validate_add_txsa(tb_sa)) 1807 return -EINVAL; 1808 1809 rtnl_lock(); 1810 dev = get_dev_from_nl(genl_info_net(info), attrs); 1811 if (IS_ERR(dev)) { 1812 rtnl_unlock(); 1813 return PTR_ERR(dev); 1814 } 1815 1816 secy = &macsec_priv(dev)->secy; 1817 tx_sc = &secy->tx_sc; 1818 1819 assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1820 1821 if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) { 1822 pr_notice("macsec: nl: add_txsa: bad key length: %d != %d\n", 1823 nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len); 1824 rtnl_unlock(); 1825 return -EINVAL; 1826 } 1827 1828 tx_sa = rtnl_dereference(tx_sc->sa[assoc_num]); 1829 if (tx_sa) { 1830 rtnl_unlock(); 1831 return -EBUSY; 1832 } 1833 1834 tx_sa = kmalloc(sizeof(*tx_sa), GFP_KERNEL); 1835 if (!tx_sa) { 1836 rtnl_unlock(); 1837 return -ENOMEM; 1838 } 1839 1840 err = init_tx_sa(tx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 1841 secy->key_len, secy->icv_len); 1842 if (err < 0) { 1843 kfree(tx_sa); 1844 rtnl_unlock(); 1845 return err; 1846 } 1847 1848 nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN); 1849 1850 spin_lock_bh(&tx_sa->lock); 1851 tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]); 1852 spin_unlock_bh(&tx_sa->lock); 1853 1854 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 1855 tx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 1856 1857 if (assoc_num == tx_sc->encoding_sa && tx_sa->active) 1858 secy->operational = true; 1859 1860 rcu_assign_pointer(tx_sc->sa[assoc_num], tx_sa); 1861 1862 rtnl_unlock(); 1863 1864 return 0; 1865 } 1866 1867 static int macsec_del_rxsa(struct sk_buff *skb, struct genl_info *info) 1868 { 1869 struct nlattr **attrs = info->attrs; 1870 struct net_device *dev; 1871 struct macsec_secy *secy; 1872 struct macsec_rx_sc *rx_sc; 1873 struct macsec_rx_sa *rx_sa; 1874 u8 assoc_num; 1875 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 1876 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 1877 1878 if (!attrs[MACSEC_ATTR_IFINDEX]) 1879 return -EINVAL; 1880 1881 if (parse_sa_config(attrs, tb_sa)) 1882 return -EINVAL; 1883 1884 if (parse_rxsc_config(attrs, tb_rxsc)) 1885 return -EINVAL; 1886 1887 rtnl_lock(); 1888 rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa, 1889 &dev, &secy, &rx_sc, &assoc_num); 1890 if (IS_ERR(rx_sa)) { 1891 rtnl_unlock(); 1892 return PTR_ERR(rx_sa); 1893 } 1894 1895 if (rx_sa->active) { 1896 rtnl_unlock(); 1897 return -EBUSY; 1898 } 1899 1900 RCU_INIT_POINTER(rx_sc->sa[assoc_num], NULL); 1901 clear_rx_sa(rx_sa); 1902 1903 rtnl_unlock(); 1904 1905 return 0; 1906 } 1907 1908 static int macsec_del_rxsc(struct sk_buff *skb, struct genl_info *info) 1909 { 1910 struct nlattr **attrs = info->attrs; 1911 struct net_device *dev; 1912 struct macsec_secy *secy; 1913 struct macsec_rx_sc *rx_sc; 1914 sci_t sci; 1915 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 1916 1917 if (!attrs[MACSEC_ATTR_IFINDEX]) 1918 return -EINVAL; 1919 1920 if (parse_rxsc_config(attrs, tb_rxsc)) 1921 return -EINVAL; 1922 1923 if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI]) 1924 return -EINVAL; 1925 1926 rtnl_lock(); 1927 dev = get_dev_from_nl(genl_info_net(info), info->attrs); 1928 if (IS_ERR(dev)) { 1929 rtnl_unlock(); 1930 return PTR_ERR(dev); 1931 } 1932 1933 secy = &macsec_priv(dev)->secy; 1934 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); 1935 1936 rx_sc = del_rx_sc(secy, sci); 1937 if (!rx_sc) { 1938 rtnl_unlock(); 1939 return -ENODEV; 1940 } 1941 1942 free_rx_sc(rx_sc); 1943 rtnl_unlock(); 1944 1945 return 0; 1946 } 1947 1948 static int macsec_del_txsa(struct sk_buff *skb, struct genl_info *info) 1949 { 1950 struct nlattr **attrs = info->attrs; 1951 struct net_device *dev; 1952 struct macsec_secy *secy; 1953 struct macsec_tx_sc *tx_sc; 1954 struct macsec_tx_sa *tx_sa; 1955 u8 assoc_num; 1956 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 1957 1958 if (!attrs[MACSEC_ATTR_IFINDEX]) 1959 return -EINVAL; 1960 1961 if (parse_sa_config(attrs, tb_sa)) 1962 return -EINVAL; 1963 1964 rtnl_lock(); 1965 tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa, 1966 &dev, &secy, &tx_sc, &assoc_num); 1967 if (IS_ERR(tx_sa)) { 1968 rtnl_unlock(); 1969 return PTR_ERR(tx_sa); 1970 } 1971 1972 if (tx_sa->active) { 1973 rtnl_unlock(); 1974 return -EBUSY; 1975 } 1976 1977 RCU_INIT_POINTER(tx_sc->sa[assoc_num], NULL); 1978 clear_tx_sa(tx_sa); 1979 1980 rtnl_unlock(); 1981 1982 return 0; 1983 } 1984 1985 static bool validate_upd_sa(struct nlattr **attrs) 1986 { 1987 if (!attrs[MACSEC_SA_ATTR_AN] || 1988 attrs[MACSEC_SA_ATTR_KEY] || 1989 attrs[MACSEC_SA_ATTR_KEYID]) 1990 return false; 1991 1992 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) 1993 return false; 1994 1995 if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0) 1996 return false; 1997 1998 if (attrs[MACSEC_SA_ATTR_ACTIVE]) { 1999 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) 2000 return false; 2001 } 2002 2003 return true; 2004 } 2005 2006 static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info) 2007 { 2008 struct nlattr **attrs = info->attrs; 2009 struct net_device *dev; 2010 struct macsec_secy *secy; 2011 struct macsec_tx_sc *tx_sc; 2012 struct macsec_tx_sa *tx_sa; 2013 u8 assoc_num; 2014 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 2015 2016 if (!attrs[MACSEC_ATTR_IFINDEX]) 2017 return -EINVAL; 2018 2019 if (parse_sa_config(attrs, tb_sa)) 2020 return -EINVAL; 2021 2022 if (!validate_upd_sa(tb_sa)) 2023 return -EINVAL; 2024 2025 rtnl_lock(); 2026 tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa, 2027 &dev, &secy, &tx_sc, &assoc_num); 2028 if (IS_ERR(tx_sa)) { 2029 rtnl_unlock(); 2030 return PTR_ERR(tx_sa); 2031 } 2032 2033 if (tb_sa[MACSEC_SA_ATTR_PN]) { 2034 spin_lock_bh(&tx_sa->lock); 2035 tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]); 2036 spin_unlock_bh(&tx_sa->lock); 2037 } 2038 2039 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 2040 tx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 2041 2042 if (assoc_num == tx_sc->encoding_sa) 2043 secy->operational = tx_sa->active; 2044 2045 rtnl_unlock(); 2046 2047 return 0; 2048 } 2049 2050 static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info) 2051 { 2052 struct nlattr **attrs = info->attrs; 2053 struct net_device *dev; 2054 struct macsec_secy *secy; 2055 struct macsec_rx_sc *rx_sc; 2056 struct macsec_rx_sa *rx_sa; 2057 u8 assoc_num; 2058 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 2059 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 2060 2061 if (!attrs[MACSEC_ATTR_IFINDEX]) 2062 return -EINVAL; 2063 2064 if (parse_rxsc_config(attrs, tb_rxsc)) 2065 return -EINVAL; 2066 2067 if (parse_sa_config(attrs, tb_sa)) 2068 return -EINVAL; 2069 2070 if (!validate_upd_sa(tb_sa)) 2071 return -EINVAL; 2072 2073 rtnl_lock(); 2074 rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa, 2075 &dev, &secy, &rx_sc, &assoc_num); 2076 if (IS_ERR(rx_sa)) { 2077 rtnl_unlock(); 2078 return PTR_ERR(rx_sa); 2079 } 2080 2081 if (tb_sa[MACSEC_SA_ATTR_PN]) { 2082 spin_lock_bh(&rx_sa->lock); 2083 rx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]); 2084 spin_unlock_bh(&rx_sa->lock); 2085 } 2086 2087 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 2088 rx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 2089 2090 rtnl_unlock(); 2091 return 0; 2092 } 2093 2094 static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info) 2095 { 2096 struct nlattr **attrs = info->attrs; 2097 struct net_device *dev; 2098 struct macsec_secy *secy; 2099 struct macsec_rx_sc *rx_sc; 2100 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 2101 2102 if (!attrs[MACSEC_ATTR_IFINDEX]) 2103 return -EINVAL; 2104 2105 if (parse_rxsc_config(attrs, tb_rxsc)) 2106 return -EINVAL; 2107 2108 if (!validate_add_rxsc(tb_rxsc)) 2109 return -EINVAL; 2110 2111 rtnl_lock(); 2112 rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy); 2113 if (IS_ERR(rx_sc)) { 2114 rtnl_unlock(); 2115 return PTR_ERR(rx_sc); 2116 } 2117 2118 if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) { 2119 bool new = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]); 2120 2121 if (rx_sc->active != new) 2122 secy->n_rx_sc += new ? 1 : -1; 2123 2124 rx_sc->active = new; 2125 } 2126 2127 rtnl_unlock(); 2128 2129 return 0; 2130 } 2131 2132 static int copy_tx_sa_stats(struct sk_buff *skb, 2133 struct macsec_tx_sa_stats __percpu *pstats) 2134 { 2135 struct macsec_tx_sa_stats sum = {0, }; 2136 int cpu; 2137 2138 for_each_possible_cpu(cpu) { 2139 const struct macsec_tx_sa_stats *stats = per_cpu_ptr(pstats, cpu); 2140 2141 sum.OutPktsProtected += stats->OutPktsProtected; 2142 sum.OutPktsEncrypted += stats->OutPktsEncrypted; 2143 } 2144 2145 if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED, sum.OutPktsProtected) || 2146 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED, sum.OutPktsEncrypted)) 2147 return -EMSGSIZE; 2148 2149 return 0; 2150 } 2151 2152 static int copy_rx_sa_stats(struct sk_buff *skb, 2153 struct macsec_rx_sa_stats __percpu *pstats) 2154 { 2155 struct macsec_rx_sa_stats sum = {0, }; 2156 int cpu; 2157 2158 for_each_possible_cpu(cpu) { 2159 const struct macsec_rx_sa_stats *stats = per_cpu_ptr(pstats, cpu); 2160 2161 sum.InPktsOK += stats->InPktsOK; 2162 sum.InPktsInvalid += stats->InPktsInvalid; 2163 sum.InPktsNotValid += stats->InPktsNotValid; 2164 sum.InPktsNotUsingSA += stats->InPktsNotUsingSA; 2165 sum.InPktsUnusedSA += stats->InPktsUnusedSA; 2166 } 2167 2168 if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_OK, sum.InPktsOK) || 2169 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID, sum.InPktsInvalid) || 2170 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID, sum.InPktsNotValid) || 2171 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA, sum.InPktsNotUsingSA) || 2172 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA, sum.InPktsUnusedSA)) 2173 return -EMSGSIZE; 2174 2175 return 0; 2176 } 2177 2178 static int copy_rx_sc_stats(struct sk_buff *skb, 2179 struct pcpu_rx_sc_stats __percpu *pstats) 2180 { 2181 struct macsec_rx_sc_stats sum = {0, }; 2182 int cpu; 2183 2184 for_each_possible_cpu(cpu) { 2185 const struct pcpu_rx_sc_stats *stats; 2186 struct macsec_rx_sc_stats tmp; 2187 unsigned int start; 2188 2189 stats = per_cpu_ptr(pstats, cpu); 2190 do { 2191 start = u64_stats_fetch_begin_irq(&stats->syncp); 2192 memcpy(&tmp, &stats->stats, sizeof(tmp)); 2193 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 2194 2195 sum.InOctetsValidated += tmp.InOctetsValidated; 2196 sum.InOctetsDecrypted += tmp.InOctetsDecrypted; 2197 sum.InPktsUnchecked += tmp.InPktsUnchecked; 2198 sum.InPktsDelayed += tmp.InPktsDelayed; 2199 sum.InPktsOK += tmp.InPktsOK; 2200 sum.InPktsInvalid += tmp.InPktsInvalid; 2201 sum.InPktsLate += tmp.InPktsLate; 2202 sum.InPktsNotValid += tmp.InPktsNotValid; 2203 sum.InPktsNotUsingSA += tmp.InPktsNotUsingSA; 2204 sum.InPktsUnusedSA += tmp.InPktsUnusedSA; 2205 } 2206 2207 if (nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED, 2208 sum.InOctetsValidated, 2209 MACSEC_RXSC_STATS_ATTR_PAD) || 2210 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED, 2211 sum.InOctetsDecrypted, 2212 MACSEC_RXSC_STATS_ATTR_PAD) || 2213 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED, 2214 sum.InPktsUnchecked, 2215 MACSEC_RXSC_STATS_ATTR_PAD) || 2216 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED, 2217 sum.InPktsDelayed, 2218 MACSEC_RXSC_STATS_ATTR_PAD) || 2219 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK, 2220 sum.InPktsOK, 2221 MACSEC_RXSC_STATS_ATTR_PAD) || 2222 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID, 2223 sum.InPktsInvalid, 2224 MACSEC_RXSC_STATS_ATTR_PAD) || 2225 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE, 2226 sum.InPktsLate, 2227 MACSEC_RXSC_STATS_ATTR_PAD) || 2228 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID, 2229 sum.InPktsNotValid, 2230 MACSEC_RXSC_STATS_ATTR_PAD) || 2231 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA, 2232 sum.InPktsNotUsingSA, 2233 MACSEC_RXSC_STATS_ATTR_PAD) || 2234 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA, 2235 sum.InPktsUnusedSA, 2236 MACSEC_RXSC_STATS_ATTR_PAD)) 2237 return -EMSGSIZE; 2238 2239 return 0; 2240 } 2241 2242 static int copy_tx_sc_stats(struct sk_buff *skb, 2243 struct pcpu_tx_sc_stats __percpu *pstats) 2244 { 2245 struct macsec_tx_sc_stats sum = {0, }; 2246 int cpu; 2247 2248 for_each_possible_cpu(cpu) { 2249 const struct pcpu_tx_sc_stats *stats; 2250 struct macsec_tx_sc_stats tmp; 2251 unsigned int start; 2252 2253 stats = per_cpu_ptr(pstats, cpu); 2254 do { 2255 start = u64_stats_fetch_begin_irq(&stats->syncp); 2256 memcpy(&tmp, &stats->stats, sizeof(tmp)); 2257 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 2258 2259 sum.OutPktsProtected += tmp.OutPktsProtected; 2260 sum.OutPktsEncrypted += tmp.OutPktsEncrypted; 2261 sum.OutOctetsProtected += tmp.OutOctetsProtected; 2262 sum.OutOctetsEncrypted += tmp.OutOctetsEncrypted; 2263 } 2264 2265 if (nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED, 2266 sum.OutPktsProtected, 2267 MACSEC_TXSC_STATS_ATTR_PAD) || 2268 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED, 2269 sum.OutPktsEncrypted, 2270 MACSEC_TXSC_STATS_ATTR_PAD) || 2271 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED, 2272 sum.OutOctetsProtected, 2273 MACSEC_TXSC_STATS_ATTR_PAD) || 2274 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED, 2275 sum.OutOctetsEncrypted, 2276 MACSEC_TXSC_STATS_ATTR_PAD)) 2277 return -EMSGSIZE; 2278 2279 return 0; 2280 } 2281 2282 static int copy_secy_stats(struct sk_buff *skb, 2283 struct pcpu_secy_stats __percpu *pstats) 2284 { 2285 struct macsec_dev_stats sum = {0, }; 2286 int cpu; 2287 2288 for_each_possible_cpu(cpu) { 2289 const struct pcpu_secy_stats *stats; 2290 struct macsec_dev_stats tmp; 2291 unsigned int start; 2292 2293 stats = per_cpu_ptr(pstats, cpu); 2294 do { 2295 start = u64_stats_fetch_begin_irq(&stats->syncp); 2296 memcpy(&tmp, &stats->stats, sizeof(tmp)); 2297 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 2298 2299 sum.OutPktsUntagged += tmp.OutPktsUntagged; 2300 sum.InPktsUntagged += tmp.InPktsUntagged; 2301 sum.OutPktsTooLong += tmp.OutPktsTooLong; 2302 sum.InPktsNoTag += tmp.InPktsNoTag; 2303 sum.InPktsBadTag += tmp.InPktsBadTag; 2304 sum.InPktsUnknownSCI += tmp.InPktsUnknownSCI; 2305 sum.InPktsNoSCI += tmp.InPktsNoSCI; 2306 sum.InPktsOverrun += tmp.InPktsOverrun; 2307 } 2308 2309 if (nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED, 2310 sum.OutPktsUntagged, 2311 MACSEC_SECY_STATS_ATTR_PAD) || 2312 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED, 2313 sum.InPktsUntagged, 2314 MACSEC_SECY_STATS_ATTR_PAD) || 2315 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG, 2316 sum.OutPktsTooLong, 2317 MACSEC_SECY_STATS_ATTR_PAD) || 2318 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG, 2319 sum.InPktsNoTag, 2320 MACSEC_SECY_STATS_ATTR_PAD) || 2321 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG, 2322 sum.InPktsBadTag, 2323 MACSEC_SECY_STATS_ATTR_PAD) || 2324 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI, 2325 sum.InPktsUnknownSCI, 2326 MACSEC_SECY_STATS_ATTR_PAD) || 2327 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI, 2328 sum.InPktsNoSCI, 2329 MACSEC_SECY_STATS_ATTR_PAD) || 2330 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN, 2331 sum.InPktsOverrun, 2332 MACSEC_SECY_STATS_ATTR_PAD)) 2333 return -EMSGSIZE; 2334 2335 return 0; 2336 } 2337 2338 static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb) 2339 { 2340 struct macsec_tx_sc *tx_sc = &secy->tx_sc; 2341 struct nlattr *secy_nest = nla_nest_start(skb, MACSEC_ATTR_SECY); 2342 2343 if (!secy_nest) 2344 return 1; 2345 2346 if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci, 2347 MACSEC_SECY_ATTR_PAD) || 2348 nla_put_u64_64bit(skb, MACSEC_SECY_ATTR_CIPHER_SUITE, 2349 MACSEC_DEFAULT_CIPHER_ID, 2350 MACSEC_SECY_ATTR_PAD) || 2351 nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) || 2352 nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) || 2353 nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) || 2354 nla_put_u8(skb, MACSEC_SECY_ATTR_REPLAY, secy->replay_protect) || 2355 nla_put_u8(skb, MACSEC_SECY_ATTR_VALIDATE, secy->validate_frames) || 2356 nla_put_u8(skb, MACSEC_SECY_ATTR_ENCRYPT, tx_sc->encrypt) || 2357 nla_put_u8(skb, MACSEC_SECY_ATTR_INC_SCI, tx_sc->send_sci) || 2358 nla_put_u8(skb, MACSEC_SECY_ATTR_ES, tx_sc->end_station) || 2359 nla_put_u8(skb, MACSEC_SECY_ATTR_SCB, tx_sc->scb) || 2360 nla_put_u8(skb, MACSEC_SECY_ATTR_ENCODING_SA, tx_sc->encoding_sa)) 2361 goto cancel; 2362 2363 if (secy->replay_protect) { 2364 if (nla_put_u32(skb, MACSEC_SECY_ATTR_WINDOW, secy->replay_window)) 2365 goto cancel; 2366 } 2367 2368 nla_nest_end(skb, secy_nest); 2369 return 0; 2370 2371 cancel: 2372 nla_nest_cancel(skb, secy_nest); 2373 return 1; 2374 } 2375 2376 static int dump_secy(struct macsec_secy *secy, struct net_device *dev, 2377 struct sk_buff *skb, struct netlink_callback *cb) 2378 { 2379 struct macsec_rx_sc *rx_sc; 2380 struct macsec_tx_sc *tx_sc = &secy->tx_sc; 2381 struct nlattr *txsa_list, *rxsc_list; 2382 int i, j; 2383 void *hdr; 2384 struct nlattr *attr; 2385 2386 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 2387 &macsec_fam, NLM_F_MULTI, MACSEC_CMD_GET_TXSC); 2388 if (!hdr) 2389 return -EMSGSIZE; 2390 2391 genl_dump_check_consistent(cb, hdr, &macsec_fam); 2392 2393 if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex)) 2394 goto nla_put_failure; 2395 2396 if (nla_put_secy(secy, skb)) 2397 goto nla_put_failure; 2398 2399 attr = nla_nest_start(skb, MACSEC_ATTR_TXSC_STATS); 2400 if (!attr) 2401 goto nla_put_failure; 2402 if (copy_tx_sc_stats(skb, tx_sc->stats)) { 2403 nla_nest_cancel(skb, attr); 2404 goto nla_put_failure; 2405 } 2406 nla_nest_end(skb, attr); 2407 2408 attr = nla_nest_start(skb, MACSEC_ATTR_SECY_STATS); 2409 if (!attr) 2410 goto nla_put_failure; 2411 if (copy_secy_stats(skb, macsec_priv(dev)->stats)) { 2412 nla_nest_cancel(skb, attr); 2413 goto nla_put_failure; 2414 } 2415 nla_nest_end(skb, attr); 2416 2417 txsa_list = nla_nest_start(skb, MACSEC_ATTR_TXSA_LIST); 2418 if (!txsa_list) 2419 goto nla_put_failure; 2420 for (i = 0, j = 1; i < MACSEC_NUM_AN; i++) { 2421 struct macsec_tx_sa *tx_sa = rtnl_dereference(tx_sc->sa[i]); 2422 struct nlattr *txsa_nest; 2423 2424 if (!tx_sa) 2425 continue; 2426 2427 txsa_nest = nla_nest_start(skb, j++); 2428 if (!txsa_nest) { 2429 nla_nest_cancel(skb, txsa_list); 2430 goto nla_put_failure; 2431 } 2432 2433 if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) || 2434 nla_put_u32(skb, MACSEC_SA_ATTR_PN, tx_sa->next_pn) || 2435 nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, tx_sa->key.id) || 2436 nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, tx_sa->active)) { 2437 nla_nest_cancel(skb, txsa_nest); 2438 nla_nest_cancel(skb, txsa_list); 2439 goto nla_put_failure; 2440 } 2441 2442 attr = nla_nest_start(skb, MACSEC_SA_ATTR_STATS); 2443 if (!attr) { 2444 nla_nest_cancel(skb, txsa_nest); 2445 nla_nest_cancel(skb, txsa_list); 2446 goto nla_put_failure; 2447 } 2448 if (copy_tx_sa_stats(skb, tx_sa->stats)) { 2449 nla_nest_cancel(skb, attr); 2450 nla_nest_cancel(skb, txsa_nest); 2451 nla_nest_cancel(skb, txsa_list); 2452 goto nla_put_failure; 2453 } 2454 nla_nest_end(skb, attr); 2455 2456 nla_nest_end(skb, txsa_nest); 2457 } 2458 nla_nest_end(skb, txsa_list); 2459 2460 rxsc_list = nla_nest_start(skb, MACSEC_ATTR_RXSC_LIST); 2461 if (!rxsc_list) 2462 goto nla_put_failure; 2463 2464 j = 1; 2465 for_each_rxsc_rtnl(secy, rx_sc) { 2466 int k; 2467 struct nlattr *rxsa_list; 2468 struct nlattr *rxsc_nest = nla_nest_start(skb, j++); 2469 2470 if (!rxsc_nest) { 2471 nla_nest_cancel(skb, rxsc_list); 2472 goto nla_put_failure; 2473 } 2474 2475 if (nla_put_u8(skb, MACSEC_RXSC_ATTR_ACTIVE, rx_sc->active) || 2476 nla_put_sci(skb, MACSEC_RXSC_ATTR_SCI, rx_sc->sci, 2477 MACSEC_RXSC_ATTR_PAD)) { 2478 nla_nest_cancel(skb, rxsc_nest); 2479 nla_nest_cancel(skb, rxsc_list); 2480 goto nla_put_failure; 2481 } 2482 2483 attr = nla_nest_start(skb, MACSEC_RXSC_ATTR_STATS); 2484 if (!attr) { 2485 nla_nest_cancel(skb, rxsc_nest); 2486 nla_nest_cancel(skb, rxsc_list); 2487 goto nla_put_failure; 2488 } 2489 if (copy_rx_sc_stats(skb, rx_sc->stats)) { 2490 nla_nest_cancel(skb, attr); 2491 nla_nest_cancel(skb, rxsc_nest); 2492 nla_nest_cancel(skb, rxsc_list); 2493 goto nla_put_failure; 2494 } 2495 nla_nest_end(skb, attr); 2496 2497 rxsa_list = nla_nest_start(skb, MACSEC_RXSC_ATTR_SA_LIST); 2498 if (!rxsa_list) { 2499 nla_nest_cancel(skb, rxsc_nest); 2500 nla_nest_cancel(skb, rxsc_list); 2501 goto nla_put_failure; 2502 } 2503 2504 for (i = 0, k = 1; i < MACSEC_NUM_AN; i++) { 2505 struct macsec_rx_sa *rx_sa = rtnl_dereference(rx_sc->sa[i]); 2506 struct nlattr *rxsa_nest; 2507 2508 if (!rx_sa) 2509 continue; 2510 2511 rxsa_nest = nla_nest_start(skb, k++); 2512 if (!rxsa_nest) { 2513 nla_nest_cancel(skb, rxsa_list); 2514 nla_nest_cancel(skb, rxsc_nest); 2515 nla_nest_cancel(skb, rxsc_list); 2516 goto nla_put_failure; 2517 } 2518 2519 attr = nla_nest_start(skb, MACSEC_SA_ATTR_STATS); 2520 if (!attr) { 2521 nla_nest_cancel(skb, rxsa_list); 2522 nla_nest_cancel(skb, rxsc_nest); 2523 nla_nest_cancel(skb, rxsc_list); 2524 goto nla_put_failure; 2525 } 2526 if (copy_rx_sa_stats(skb, rx_sa->stats)) { 2527 nla_nest_cancel(skb, attr); 2528 nla_nest_cancel(skb, rxsa_list); 2529 nla_nest_cancel(skb, rxsc_nest); 2530 nla_nest_cancel(skb, rxsc_list); 2531 goto nla_put_failure; 2532 } 2533 nla_nest_end(skb, attr); 2534 2535 if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) || 2536 nla_put_u32(skb, MACSEC_SA_ATTR_PN, rx_sa->next_pn) || 2537 nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, rx_sa->key.id) || 2538 nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, rx_sa->active)) { 2539 nla_nest_cancel(skb, rxsa_nest); 2540 nla_nest_cancel(skb, rxsc_nest); 2541 nla_nest_cancel(skb, rxsc_list); 2542 goto nla_put_failure; 2543 } 2544 nla_nest_end(skb, rxsa_nest); 2545 } 2546 2547 nla_nest_end(skb, rxsa_list); 2548 nla_nest_end(skb, rxsc_nest); 2549 } 2550 2551 nla_nest_end(skb, rxsc_list); 2552 2553 genlmsg_end(skb, hdr); 2554 2555 return 0; 2556 2557 nla_put_failure: 2558 genlmsg_cancel(skb, hdr); 2559 return -EMSGSIZE; 2560 } 2561 2562 static int macsec_generation = 1; /* protected by RTNL */ 2563 2564 static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb) 2565 { 2566 struct net *net = sock_net(skb->sk); 2567 struct net_device *dev; 2568 int dev_idx, d; 2569 2570 dev_idx = cb->args[0]; 2571 2572 d = 0; 2573 rtnl_lock(); 2574 2575 cb->seq = macsec_generation; 2576 2577 for_each_netdev(net, dev) { 2578 struct macsec_secy *secy; 2579 2580 if (d < dev_idx) 2581 goto next; 2582 2583 if (!netif_is_macsec(dev)) 2584 goto next; 2585 2586 secy = &macsec_priv(dev)->secy; 2587 if (dump_secy(secy, dev, skb, cb) < 0) 2588 goto done; 2589 next: 2590 d++; 2591 } 2592 2593 done: 2594 rtnl_unlock(); 2595 cb->args[0] = d; 2596 return skb->len; 2597 } 2598 2599 static const struct genl_ops macsec_genl_ops[] = { 2600 { 2601 .cmd = MACSEC_CMD_GET_TXSC, 2602 .dumpit = macsec_dump_txsc, 2603 .policy = macsec_genl_policy, 2604 }, 2605 { 2606 .cmd = MACSEC_CMD_ADD_RXSC, 2607 .doit = macsec_add_rxsc, 2608 .policy = macsec_genl_policy, 2609 .flags = GENL_ADMIN_PERM, 2610 }, 2611 { 2612 .cmd = MACSEC_CMD_DEL_RXSC, 2613 .doit = macsec_del_rxsc, 2614 .policy = macsec_genl_policy, 2615 .flags = GENL_ADMIN_PERM, 2616 }, 2617 { 2618 .cmd = MACSEC_CMD_UPD_RXSC, 2619 .doit = macsec_upd_rxsc, 2620 .policy = macsec_genl_policy, 2621 .flags = GENL_ADMIN_PERM, 2622 }, 2623 { 2624 .cmd = MACSEC_CMD_ADD_TXSA, 2625 .doit = macsec_add_txsa, 2626 .policy = macsec_genl_policy, 2627 .flags = GENL_ADMIN_PERM, 2628 }, 2629 { 2630 .cmd = MACSEC_CMD_DEL_TXSA, 2631 .doit = macsec_del_txsa, 2632 .policy = macsec_genl_policy, 2633 .flags = GENL_ADMIN_PERM, 2634 }, 2635 { 2636 .cmd = MACSEC_CMD_UPD_TXSA, 2637 .doit = macsec_upd_txsa, 2638 .policy = macsec_genl_policy, 2639 .flags = GENL_ADMIN_PERM, 2640 }, 2641 { 2642 .cmd = MACSEC_CMD_ADD_RXSA, 2643 .doit = macsec_add_rxsa, 2644 .policy = macsec_genl_policy, 2645 .flags = GENL_ADMIN_PERM, 2646 }, 2647 { 2648 .cmd = MACSEC_CMD_DEL_RXSA, 2649 .doit = macsec_del_rxsa, 2650 .policy = macsec_genl_policy, 2651 .flags = GENL_ADMIN_PERM, 2652 }, 2653 { 2654 .cmd = MACSEC_CMD_UPD_RXSA, 2655 .doit = macsec_upd_rxsa, 2656 .policy = macsec_genl_policy, 2657 .flags = GENL_ADMIN_PERM, 2658 }, 2659 }; 2660 2661 static struct genl_family macsec_fam __ro_after_init = { 2662 .name = MACSEC_GENL_NAME, 2663 .hdrsize = 0, 2664 .version = MACSEC_GENL_VERSION, 2665 .maxattr = MACSEC_ATTR_MAX, 2666 .netnsok = true, 2667 .module = THIS_MODULE, 2668 .ops = macsec_genl_ops, 2669 .n_ops = ARRAY_SIZE(macsec_genl_ops), 2670 }; 2671 2672 static netdev_tx_t macsec_start_xmit(struct sk_buff *skb, 2673 struct net_device *dev) 2674 { 2675 struct macsec_dev *macsec = netdev_priv(dev); 2676 struct macsec_secy *secy = &macsec->secy; 2677 struct pcpu_secy_stats *secy_stats; 2678 int ret, len; 2679 2680 /* 10.5 */ 2681 if (!secy->protect_frames) { 2682 secy_stats = this_cpu_ptr(macsec->stats); 2683 u64_stats_update_begin(&secy_stats->syncp); 2684 secy_stats->stats.OutPktsUntagged++; 2685 u64_stats_update_end(&secy_stats->syncp); 2686 skb->dev = macsec->real_dev; 2687 len = skb->len; 2688 ret = dev_queue_xmit(skb); 2689 count_tx(dev, ret, len); 2690 return ret; 2691 } 2692 2693 if (!secy->operational) { 2694 kfree_skb(skb); 2695 dev->stats.tx_dropped++; 2696 return NETDEV_TX_OK; 2697 } 2698 2699 skb = macsec_encrypt(skb, dev); 2700 if (IS_ERR(skb)) { 2701 if (PTR_ERR(skb) != -EINPROGRESS) 2702 dev->stats.tx_dropped++; 2703 return NETDEV_TX_OK; 2704 } 2705 2706 macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa); 2707 2708 macsec_encrypt_finish(skb, dev); 2709 len = skb->len; 2710 ret = dev_queue_xmit(skb); 2711 count_tx(dev, ret, len); 2712 return ret; 2713 } 2714 2715 #define MACSEC_FEATURES \ 2716 (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST) 2717 static struct lock_class_key macsec_netdev_addr_lock_key; 2718 2719 static int macsec_dev_init(struct net_device *dev) 2720 { 2721 struct macsec_dev *macsec = macsec_priv(dev); 2722 struct net_device *real_dev = macsec->real_dev; 2723 int err; 2724 2725 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 2726 if (!dev->tstats) 2727 return -ENOMEM; 2728 2729 err = gro_cells_init(&macsec->gro_cells, dev); 2730 if (err) { 2731 free_percpu(dev->tstats); 2732 return err; 2733 } 2734 2735 dev->features = real_dev->features & MACSEC_FEATURES; 2736 dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE; 2737 2738 dev->needed_headroom = real_dev->needed_headroom + 2739 MACSEC_NEEDED_HEADROOM; 2740 dev->needed_tailroom = real_dev->needed_tailroom + 2741 MACSEC_NEEDED_TAILROOM; 2742 2743 if (is_zero_ether_addr(dev->dev_addr)) 2744 eth_hw_addr_inherit(dev, real_dev); 2745 if (is_zero_ether_addr(dev->broadcast)) 2746 memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len); 2747 2748 return 0; 2749 } 2750 2751 static void macsec_dev_uninit(struct net_device *dev) 2752 { 2753 struct macsec_dev *macsec = macsec_priv(dev); 2754 2755 gro_cells_destroy(&macsec->gro_cells); 2756 free_percpu(dev->tstats); 2757 } 2758 2759 static netdev_features_t macsec_fix_features(struct net_device *dev, 2760 netdev_features_t features) 2761 { 2762 struct macsec_dev *macsec = macsec_priv(dev); 2763 struct net_device *real_dev = macsec->real_dev; 2764 2765 features &= (real_dev->features & MACSEC_FEATURES) | 2766 NETIF_F_GSO_SOFTWARE | NETIF_F_SOFT_FEATURES; 2767 features |= NETIF_F_LLTX; 2768 2769 return features; 2770 } 2771 2772 static int macsec_dev_open(struct net_device *dev) 2773 { 2774 struct macsec_dev *macsec = macsec_priv(dev); 2775 struct net_device *real_dev = macsec->real_dev; 2776 int err; 2777 2778 if (!(real_dev->flags & IFF_UP)) 2779 return -ENETDOWN; 2780 2781 err = dev_uc_add(real_dev, dev->dev_addr); 2782 if (err < 0) 2783 return err; 2784 2785 if (dev->flags & IFF_ALLMULTI) { 2786 err = dev_set_allmulti(real_dev, 1); 2787 if (err < 0) 2788 goto del_unicast; 2789 } 2790 2791 if (dev->flags & IFF_PROMISC) { 2792 err = dev_set_promiscuity(real_dev, 1); 2793 if (err < 0) 2794 goto clear_allmulti; 2795 } 2796 2797 if (netif_carrier_ok(real_dev)) 2798 netif_carrier_on(dev); 2799 2800 return 0; 2801 clear_allmulti: 2802 if (dev->flags & IFF_ALLMULTI) 2803 dev_set_allmulti(real_dev, -1); 2804 del_unicast: 2805 dev_uc_del(real_dev, dev->dev_addr); 2806 netif_carrier_off(dev); 2807 return err; 2808 } 2809 2810 static int macsec_dev_stop(struct net_device *dev) 2811 { 2812 struct macsec_dev *macsec = macsec_priv(dev); 2813 struct net_device *real_dev = macsec->real_dev; 2814 2815 netif_carrier_off(dev); 2816 2817 dev_mc_unsync(real_dev, dev); 2818 dev_uc_unsync(real_dev, dev); 2819 2820 if (dev->flags & IFF_ALLMULTI) 2821 dev_set_allmulti(real_dev, -1); 2822 2823 if (dev->flags & IFF_PROMISC) 2824 dev_set_promiscuity(real_dev, -1); 2825 2826 dev_uc_del(real_dev, dev->dev_addr); 2827 2828 return 0; 2829 } 2830 2831 static void macsec_dev_change_rx_flags(struct net_device *dev, int change) 2832 { 2833 struct net_device *real_dev = macsec_priv(dev)->real_dev; 2834 2835 if (!(dev->flags & IFF_UP)) 2836 return; 2837 2838 if (change & IFF_ALLMULTI) 2839 dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1); 2840 2841 if (change & IFF_PROMISC) 2842 dev_set_promiscuity(real_dev, 2843 dev->flags & IFF_PROMISC ? 1 : -1); 2844 } 2845 2846 static void macsec_dev_set_rx_mode(struct net_device *dev) 2847 { 2848 struct net_device *real_dev = macsec_priv(dev)->real_dev; 2849 2850 dev_mc_sync(real_dev, dev); 2851 dev_uc_sync(real_dev, dev); 2852 } 2853 2854 static int macsec_set_mac_address(struct net_device *dev, void *p) 2855 { 2856 struct macsec_dev *macsec = macsec_priv(dev); 2857 struct net_device *real_dev = macsec->real_dev; 2858 struct sockaddr *addr = p; 2859 int err; 2860 2861 if (!is_valid_ether_addr(addr->sa_data)) 2862 return -EADDRNOTAVAIL; 2863 2864 if (!(dev->flags & IFF_UP)) 2865 goto out; 2866 2867 err = dev_uc_add(real_dev, addr->sa_data); 2868 if (err < 0) 2869 return err; 2870 2871 dev_uc_del(real_dev, dev->dev_addr); 2872 2873 out: 2874 ether_addr_copy(dev->dev_addr, addr->sa_data); 2875 return 0; 2876 } 2877 2878 static int macsec_change_mtu(struct net_device *dev, int new_mtu) 2879 { 2880 struct macsec_dev *macsec = macsec_priv(dev); 2881 unsigned int extra = macsec->secy.icv_len + macsec_extra_len(true); 2882 2883 if (macsec->real_dev->mtu - extra < new_mtu) 2884 return -ERANGE; 2885 2886 dev->mtu = new_mtu; 2887 2888 return 0; 2889 } 2890 2891 static struct rtnl_link_stats64 *macsec_get_stats64(struct net_device *dev, 2892 struct rtnl_link_stats64 *s) 2893 { 2894 int cpu; 2895 2896 if (!dev->tstats) 2897 return s; 2898 2899 for_each_possible_cpu(cpu) { 2900 struct pcpu_sw_netstats *stats; 2901 struct pcpu_sw_netstats tmp; 2902 int start; 2903 2904 stats = per_cpu_ptr(dev->tstats, cpu); 2905 do { 2906 start = u64_stats_fetch_begin_irq(&stats->syncp); 2907 tmp.rx_packets = stats->rx_packets; 2908 tmp.rx_bytes = stats->rx_bytes; 2909 tmp.tx_packets = stats->tx_packets; 2910 tmp.tx_bytes = stats->tx_bytes; 2911 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 2912 2913 s->rx_packets += tmp.rx_packets; 2914 s->rx_bytes += tmp.rx_bytes; 2915 s->tx_packets += tmp.tx_packets; 2916 s->tx_bytes += tmp.tx_bytes; 2917 } 2918 2919 s->rx_dropped = dev->stats.rx_dropped; 2920 s->tx_dropped = dev->stats.tx_dropped; 2921 2922 return s; 2923 } 2924 2925 static int macsec_get_iflink(const struct net_device *dev) 2926 { 2927 return macsec_priv(dev)->real_dev->ifindex; 2928 } 2929 2930 2931 static int macsec_get_nest_level(struct net_device *dev) 2932 { 2933 return macsec_priv(dev)->nest_level; 2934 } 2935 2936 2937 static const struct net_device_ops macsec_netdev_ops = { 2938 .ndo_init = macsec_dev_init, 2939 .ndo_uninit = macsec_dev_uninit, 2940 .ndo_open = macsec_dev_open, 2941 .ndo_stop = macsec_dev_stop, 2942 .ndo_fix_features = macsec_fix_features, 2943 .ndo_change_mtu = macsec_change_mtu, 2944 .ndo_set_rx_mode = macsec_dev_set_rx_mode, 2945 .ndo_change_rx_flags = macsec_dev_change_rx_flags, 2946 .ndo_set_mac_address = macsec_set_mac_address, 2947 .ndo_start_xmit = macsec_start_xmit, 2948 .ndo_get_stats64 = macsec_get_stats64, 2949 .ndo_get_iflink = macsec_get_iflink, 2950 .ndo_get_lock_subclass = macsec_get_nest_level, 2951 }; 2952 2953 static const struct device_type macsec_type = { 2954 .name = "macsec", 2955 }; 2956 2957 static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = { 2958 [IFLA_MACSEC_SCI] = { .type = NLA_U64 }, 2959 [IFLA_MACSEC_ICV_LEN] = { .type = NLA_U8 }, 2960 [IFLA_MACSEC_CIPHER_SUITE] = { .type = NLA_U64 }, 2961 [IFLA_MACSEC_WINDOW] = { .type = NLA_U32 }, 2962 [IFLA_MACSEC_ENCODING_SA] = { .type = NLA_U8 }, 2963 [IFLA_MACSEC_ENCRYPT] = { .type = NLA_U8 }, 2964 [IFLA_MACSEC_PROTECT] = { .type = NLA_U8 }, 2965 [IFLA_MACSEC_INC_SCI] = { .type = NLA_U8 }, 2966 [IFLA_MACSEC_ES] = { .type = NLA_U8 }, 2967 [IFLA_MACSEC_SCB] = { .type = NLA_U8 }, 2968 [IFLA_MACSEC_REPLAY_PROTECT] = { .type = NLA_U8 }, 2969 [IFLA_MACSEC_VALIDATION] = { .type = NLA_U8 }, 2970 }; 2971 2972 static void macsec_free_netdev(struct net_device *dev) 2973 { 2974 struct macsec_dev *macsec = macsec_priv(dev); 2975 struct net_device *real_dev = macsec->real_dev; 2976 2977 free_percpu(macsec->stats); 2978 free_percpu(macsec->secy.tx_sc.stats); 2979 2980 dev_put(real_dev); 2981 free_netdev(dev); 2982 } 2983 2984 static void macsec_setup(struct net_device *dev) 2985 { 2986 ether_setup(dev); 2987 dev->min_mtu = 0; 2988 dev->max_mtu = ETH_MAX_MTU; 2989 dev->priv_flags |= IFF_NO_QUEUE; 2990 dev->netdev_ops = &macsec_netdev_ops; 2991 dev->destructor = macsec_free_netdev; 2992 SET_NETDEV_DEVTYPE(dev, &macsec_type); 2993 2994 eth_zero_addr(dev->broadcast); 2995 } 2996 2997 static void macsec_changelink_common(struct net_device *dev, 2998 struct nlattr *data[]) 2999 { 3000 struct macsec_secy *secy; 3001 struct macsec_tx_sc *tx_sc; 3002 3003 secy = &macsec_priv(dev)->secy; 3004 tx_sc = &secy->tx_sc; 3005 3006 if (data[IFLA_MACSEC_ENCODING_SA]) { 3007 struct macsec_tx_sa *tx_sa; 3008 3009 tx_sc->encoding_sa = nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]); 3010 tx_sa = rtnl_dereference(tx_sc->sa[tx_sc->encoding_sa]); 3011 3012 secy->operational = tx_sa && tx_sa->active; 3013 } 3014 3015 if (data[IFLA_MACSEC_WINDOW]) 3016 secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]); 3017 3018 if (data[IFLA_MACSEC_ENCRYPT]) 3019 tx_sc->encrypt = !!nla_get_u8(data[IFLA_MACSEC_ENCRYPT]); 3020 3021 if (data[IFLA_MACSEC_PROTECT]) 3022 secy->protect_frames = !!nla_get_u8(data[IFLA_MACSEC_PROTECT]); 3023 3024 if (data[IFLA_MACSEC_INC_SCI]) 3025 tx_sc->send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]); 3026 3027 if (data[IFLA_MACSEC_ES]) 3028 tx_sc->end_station = !!nla_get_u8(data[IFLA_MACSEC_ES]); 3029 3030 if (data[IFLA_MACSEC_SCB]) 3031 tx_sc->scb = !!nla_get_u8(data[IFLA_MACSEC_SCB]); 3032 3033 if (data[IFLA_MACSEC_REPLAY_PROTECT]) 3034 secy->replay_protect = !!nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT]); 3035 3036 if (data[IFLA_MACSEC_VALIDATION]) 3037 secy->validate_frames = nla_get_u8(data[IFLA_MACSEC_VALIDATION]); 3038 } 3039 3040 static int macsec_changelink(struct net_device *dev, struct nlattr *tb[], 3041 struct nlattr *data[]) 3042 { 3043 if (!data) 3044 return 0; 3045 3046 if (data[IFLA_MACSEC_CIPHER_SUITE] || 3047 data[IFLA_MACSEC_ICV_LEN] || 3048 data[IFLA_MACSEC_SCI] || 3049 data[IFLA_MACSEC_PORT]) 3050 return -EINVAL; 3051 3052 macsec_changelink_common(dev, data); 3053 3054 return 0; 3055 } 3056 3057 static void macsec_del_dev(struct macsec_dev *macsec) 3058 { 3059 int i; 3060 3061 while (macsec->secy.rx_sc) { 3062 struct macsec_rx_sc *rx_sc = rtnl_dereference(macsec->secy.rx_sc); 3063 3064 rcu_assign_pointer(macsec->secy.rx_sc, rx_sc->next); 3065 free_rx_sc(rx_sc); 3066 } 3067 3068 for (i = 0; i < MACSEC_NUM_AN; i++) { 3069 struct macsec_tx_sa *sa = rtnl_dereference(macsec->secy.tx_sc.sa[i]); 3070 3071 if (sa) { 3072 RCU_INIT_POINTER(macsec->secy.tx_sc.sa[i], NULL); 3073 clear_tx_sa(sa); 3074 } 3075 } 3076 } 3077 3078 static void macsec_common_dellink(struct net_device *dev, struct list_head *head) 3079 { 3080 struct macsec_dev *macsec = macsec_priv(dev); 3081 struct net_device *real_dev = macsec->real_dev; 3082 3083 unregister_netdevice_queue(dev, head); 3084 list_del_rcu(&macsec->secys); 3085 macsec_del_dev(macsec); 3086 netdev_upper_dev_unlink(real_dev, dev); 3087 3088 macsec_generation++; 3089 } 3090 3091 static void macsec_dellink(struct net_device *dev, struct list_head *head) 3092 { 3093 struct macsec_dev *macsec = macsec_priv(dev); 3094 struct net_device *real_dev = macsec->real_dev; 3095 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); 3096 3097 macsec_common_dellink(dev, head); 3098 3099 if (list_empty(&rxd->secys)) { 3100 netdev_rx_handler_unregister(real_dev); 3101 kfree(rxd); 3102 } 3103 } 3104 3105 static int register_macsec_dev(struct net_device *real_dev, 3106 struct net_device *dev) 3107 { 3108 struct macsec_dev *macsec = macsec_priv(dev); 3109 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); 3110 3111 if (!rxd) { 3112 int err; 3113 3114 rxd = kmalloc(sizeof(*rxd), GFP_KERNEL); 3115 if (!rxd) 3116 return -ENOMEM; 3117 3118 INIT_LIST_HEAD(&rxd->secys); 3119 3120 err = netdev_rx_handler_register(real_dev, macsec_handle_frame, 3121 rxd); 3122 if (err < 0) { 3123 kfree(rxd); 3124 return err; 3125 } 3126 } 3127 3128 list_add_tail_rcu(&macsec->secys, &rxd->secys); 3129 return 0; 3130 } 3131 3132 static bool sci_exists(struct net_device *dev, sci_t sci) 3133 { 3134 struct macsec_rxh_data *rxd = macsec_data_rtnl(dev); 3135 struct macsec_dev *macsec; 3136 3137 list_for_each_entry(macsec, &rxd->secys, secys) { 3138 if (macsec->secy.sci == sci) 3139 return true; 3140 } 3141 3142 return false; 3143 } 3144 3145 static sci_t dev_to_sci(struct net_device *dev, __be16 port) 3146 { 3147 return make_sci(dev->dev_addr, port); 3148 } 3149 3150 static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len) 3151 { 3152 struct macsec_dev *macsec = macsec_priv(dev); 3153 struct macsec_secy *secy = &macsec->secy; 3154 3155 macsec->stats = netdev_alloc_pcpu_stats(struct pcpu_secy_stats); 3156 if (!macsec->stats) 3157 return -ENOMEM; 3158 3159 secy->tx_sc.stats = netdev_alloc_pcpu_stats(struct pcpu_tx_sc_stats); 3160 if (!secy->tx_sc.stats) { 3161 free_percpu(macsec->stats); 3162 return -ENOMEM; 3163 } 3164 3165 if (sci == MACSEC_UNDEF_SCI) 3166 sci = dev_to_sci(dev, MACSEC_PORT_ES); 3167 3168 secy->netdev = dev; 3169 secy->operational = true; 3170 secy->key_len = DEFAULT_SAK_LEN; 3171 secy->icv_len = icv_len; 3172 secy->validate_frames = MACSEC_VALIDATE_DEFAULT; 3173 secy->protect_frames = true; 3174 secy->replay_protect = false; 3175 3176 secy->sci = sci; 3177 secy->tx_sc.active = true; 3178 secy->tx_sc.encoding_sa = DEFAULT_ENCODING_SA; 3179 secy->tx_sc.encrypt = DEFAULT_ENCRYPT; 3180 secy->tx_sc.send_sci = DEFAULT_SEND_SCI; 3181 secy->tx_sc.end_station = false; 3182 secy->tx_sc.scb = false; 3183 3184 return 0; 3185 } 3186 3187 static int macsec_newlink(struct net *net, struct net_device *dev, 3188 struct nlattr *tb[], struct nlattr *data[]) 3189 { 3190 struct macsec_dev *macsec = macsec_priv(dev); 3191 struct net_device *real_dev; 3192 int err; 3193 sci_t sci; 3194 u8 icv_len = DEFAULT_ICV_LEN; 3195 rx_handler_func_t *rx_handler; 3196 3197 if (!tb[IFLA_LINK]) 3198 return -EINVAL; 3199 real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK])); 3200 if (!real_dev) 3201 return -ENODEV; 3202 3203 dev->priv_flags |= IFF_MACSEC; 3204 3205 macsec->real_dev = real_dev; 3206 3207 if (data && data[IFLA_MACSEC_ICV_LEN]) 3208 icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]); 3209 dev->mtu = real_dev->mtu - icv_len - macsec_extra_len(true); 3210 3211 rx_handler = rtnl_dereference(real_dev->rx_handler); 3212 if (rx_handler && rx_handler != macsec_handle_frame) 3213 return -EBUSY; 3214 3215 err = register_netdevice(dev); 3216 if (err < 0) 3217 return err; 3218 3219 dev_hold(real_dev); 3220 3221 macsec->nest_level = dev_get_nest_level(real_dev) + 1; 3222 netdev_lockdep_set_classes(dev); 3223 lockdep_set_class_and_subclass(&dev->addr_list_lock, 3224 &macsec_netdev_addr_lock_key, 3225 macsec_get_nest_level(dev)); 3226 3227 err = netdev_upper_dev_link(real_dev, dev); 3228 if (err < 0) 3229 goto unregister; 3230 3231 /* need to be already registered so that ->init has run and 3232 * the MAC addr is set 3233 */ 3234 if (data && data[IFLA_MACSEC_SCI]) 3235 sci = nla_get_sci(data[IFLA_MACSEC_SCI]); 3236 else if (data && data[IFLA_MACSEC_PORT]) 3237 sci = dev_to_sci(dev, nla_get_be16(data[IFLA_MACSEC_PORT])); 3238 else 3239 sci = dev_to_sci(dev, MACSEC_PORT_ES); 3240 3241 if (rx_handler && sci_exists(real_dev, sci)) { 3242 err = -EBUSY; 3243 goto unlink; 3244 } 3245 3246 err = macsec_add_dev(dev, sci, icv_len); 3247 if (err) 3248 goto unlink; 3249 3250 if (data) 3251 macsec_changelink_common(dev, data); 3252 3253 err = register_macsec_dev(real_dev, dev); 3254 if (err < 0) 3255 goto del_dev; 3256 3257 macsec_generation++; 3258 3259 return 0; 3260 3261 del_dev: 3262 macsec_del_dev(macsec); 3263 unlink: 3264 netdev_upper_dev_unlink(real_dev, dev); 3265 unregister: 3266 unregister_netdevice(dev); 3267 return err; 3268 } 3269 3270 static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[]) 3271 { 3272 u64 csid = MACSEC_DEFAULT_CIPHER_ID; 3273 u8 icv_len = DEFAULT_ICV_LEN; 3274 int flag; 3275 bool es, scb, sci; 3276 3277 if (!data) 3278 return 0; 3279 3280 if (data[IFLA_MACSEC_CIPHER_SUITE]) 3281 csid = nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE]); 3282 3283 if (data[IFLA_MACSEC_ICV_LEN]) { 3284 icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]); 3285 if (icv_len != DEFAULT_ICV_LEN) { 3286 char dummy_key[DEFAULT_SAK_LEN] = { 0 }; 3287 struct crypto_aead *dummy_tfm; 3288 3289 dummy_tfm = macsec_alloc_tfm(dummy_key, 3290 DEFAULT_SAK_LEN, 3291 icv_len); 3292 if (IS_ERR(dummy_tfm)) 3293 return PTR_ERR(dummy_tfm); 3294 crypto_free_aead(dummy_tfm); 3295 } 3296 } 3297 3298 switch (csid) { 3299 case MACSEC_DEFAULT_CIPHER_ID: 3300 case MACSEC_DEFAULT_CIPHER_ALT: 3301 if (icv_len < MACSEC_MIN_ICV_LEN || 3302 icv_len > MACSEC_STD_ICV_LEN) 3303 return -EINVAL; 3304 break; 3305 default: 3306 return -EINVAL; 3307 } 3308 3309 if (data[IFLA_MACSEC_ENCODING_SA]) { 3310 if (nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]) >= MACSEC_NUM_AN) 3311 return -EINVAL; 3312 } 3313 3314 for (flag = IFLA_MACSEC_ENCODING_SA + 1; 3315 flag < IFLA_MACSEC_VALIDATION; 3316 flag++) { 3317 if (data[flag]) { 3318 if (nla_get_u8(data[flag]) > 1) 3319 return -EINVAL; 3320 } 3321 } 3322 3323 es = data[IFLA_MACSEC_ES] ? nla_get_u8(data[IFLA_MACSEC_ES]) : false; 3324 sci = data[IFLA_MACSEC_INC_SCI] ? nla_get_u8(data[IFLA_MACSEC_INC_SCI]) : false; 3325 scb = data[IFLA_MACSEC_SCB] ? nla_get_u8(data[IFLA_MACSEC_SCB]) : false; 3326 3327 if ((sci && (scb || es)) || (scb && es)) 3328 return -EINVAL; 3329 3330 if (data[IFLA_MACSEC_VALIDATION] && 3331 nla_get_u8(data[IFLA_MACSEC_VALIDATION]) > MACSEC_VALIDATE_MAX) 3332 return -EINVAL; 3333 3334 if ((data[IFLA_MACSEC_REPLAY_PROTECT] && 3335 nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT])) && 3336 !data[IFLA_MACSEC_WINDOW]) 3337 return -EINVAL; 3338 3339 return 0; 3340 } 3341 3342 static struct net *macsec_get_link_net(const struct net_device *dev) 3343 { 3344 return dev_net(macsec_priv(dev)->real_dev); 3345 } 3346 3347 static size_t macsec_get_size(const struct net_device *dev) 3348 { 3349 return nla_total_size_64bit(8) + /* IFLA_MACSEC_SCI */ 3350 nla_total_size(1) + /* IFLA_MACSEC_ICV_LEN */ 3351 nla_total_size_64bit(8) + /* IFLA_MACSEC_CIPHER_SUITE */ 3352 nla_total_size(4) + /* IFLA_MACSEC_WINDOW */ 3353 nla_total_size(1) + /* IFLA_MACSEC_ENCODING_SA */ 3354 nla_total_size(1) + /* IFLA_MACSEC_ENCRYPT */ 3355 nla_total_size(1) + /* IFLA_MACSEC_PROTECT */ 3356 nla_total_size(1) + /* IFLA_MACSEC_INC_SCI */ 3357 nla_total_size(1) + /* IFLA_MACSEC_ES */ 3358 nla_total_size(1) + /* IFLA_MACSEC_SCB */ 3359 nla_total_size(1) + /* IFLA_MACSEC_REPLAY_PROTECT */ 3360 nla_total_size(1) + /* IFLA_MACSEC_VALIDATION */ 3361 0; 3362 } 3363 3364 static int macsec_fill_info(struct sk_buff *skb, 3365 const struct net_device *dev) 3366 { 3367 struct macsec_secy *secy = &macsec_priv(dev)->secy; 3368 struct macsec_tx_sc *tx_sc = &secy->tx_sc; 3369 3370 if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci, 3371 IFLA_MACSEC_PAD) || 3372 nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) || 3373 nla_put_u64_64bit(skb, IFLA_MACSEC_CIPHER_SUITE, 3374 MACSEC_DEFAULT_CIPHER_ID, IFLA_MACSEC_PAD) || 3375 nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) || 3376 nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) || 3377 nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) || 3378 nla_put_u8(skb, IFLA_MACSEC_INC_SCI, tx_sc->send_sci) || 3379 nla_put_u8(skb, IFLA_MACSEC_ES, tx_sc->end_station) || 3380 nla_put_u8(skb, IFLA_MACSEC_SCB, tx_sc->scb) || 3381 nla_put_u8(skb, IFLA_MACSEC_REPLAY_PROTECT, secy->replay_protect) || 3382 nla_put_u8(skb, IFLA_MACSEC_VALIDATION, secy->validate_frames) || 3383 0) 3384 goto nla_put_failure; 3385 3386 if (secy->replay_protect) { 3387 if (nla_put_u32(skb, IFLA_MACSEC_WINDOW, secy->replay_window)) 3388 goto nla_put_failure; 3389 } 3390 3391 return 0; 3392 3393 nla_put_failure: 3394 return -EMSGSIZE; 3395 } 3396 3397 static struct rtnl_link_ops macsec_link_ops __read_mostly = { 3398 .kind = "macsec", 3399 .priv_size = sizeof(struct macsec_dev), 3400 .maxtype = IFLA_MACSEC_MAX, 3401 .policy = macsec_rtnl_policy, 3402 .setup = macsec_setup, 3403 .validate = macsec_validate_attr, 3404 .newlink = macsec_newlink, 3405 .changelink = macsec_changelink, 3406 .dellink = macsec_dellink, 3407 .get_size = macsec_get_size, 3408 .fill_info = macsec_fill_info, 3409 .get_link_net = macsec_get_link_net, 3410 }; 3411 3412 static bool is_macsec_master(struct net_device *dev) 3413 { 3414 return rcu_access_pointer(dev->rx_handler) == macsec_handle_frame; 3415 } 3416 3417 static int macsec_notify(struct notifier_block *this, unsigned long event, 3418 void *ptr) 3419 { 3420 struct net_device *real_dev = netdev_notifier_info_to_dev(ptr); 3421 LIST_HEAD(head); 3422 3423 if (!is_macsec_master(real_dev)) 3424 return NOTIFY_DONE; 3425 3426 switch (event) { 3427 case NETDEV_UNREGISTER: { 3428 struct macsec_dev *m, *n; 3429 struct macsec_rxh_data *rxd; 3430 3431 rxd = macsec_data_rtnl(real_dev); 3432 list_for_each_entry_safe(m, n, &rxd->secys, secys) { 3433 macsec_common_dellink(m->secy.netdev, &head); 3434 } 3435 3436 netdev_rx_handler_unregister(real_dev); 3437 kfree(rxd); 3438 3439 unregister_netdevice_many(&head); 3440 break; 3441 } 3442 case NETDEV_CHANGEMTU: { 3443 struct macsec_dev *m; 3444 struct macsec_rxh_data *rxd; 3445 3446 rxd = macsec_data_rtnl(real_dev); 3447 list_for_each_entry(m, &rxd->secys, secys) { 3448 struct net_device *dev = m->secy.netdev; 3449 unsigned int mtu = real_dev->mtu - (m->secy.icv_len + 3450 macsec_extra_len(true)); 3451 3452 if (dev->mtu > mtu) 3453 dev_set_mtu(dev, mtu); 3454 } 3455 } 3456 } 3457 3458 return NOTIFY_OK; 3459 } 3460 3461 static struct notifier_block macsec_notifier = { 3462 .notifier_call = macsec_notify, 3463 }; 3464 3465 static int __init macsec_init(void) 3466 { 3467 int err; 3468 3469 pr_info("MACsec IEEE 802.1AE\n"); 3470 err = register_netdevice_notifier(&macsec_notifier); 3471 if (err) 3472 return err; 3473 3474 err = rtnl_link_register(&macsec_link_ops); 3475 if (err) 3476 goto notifier; 3477 3478 err = genl_register_family(&macsec_fam); 3479 if (err) 3480 goto rtnl; 3481 3482 return 0; 3483 3484 rtnl: 3485 rtnl_link_unregister(&macsec_link_ops); 3486 notifier: 3487 unregister_netdevice_notifier(&macsec_notifier); 3488 return err; 3489 } 3490 3491 static void __exit macsec_exit(void) 3492 { 3493 genl_unregister_family(&macsec_fam); 3494 rtnl_link_unregister(&macsec_link_ops); 3495 unregister_netdevice_notifier(&macsec_notifier); 3496 rcu_barrier(); 3497 } 3498 3499 module_init(macsec_init); 3500 module_exit(macsec_exit); 3501 3502 MODULE_ALIAS_RTNL_LINK("macsec"); 3503 3504 MODULE_DESCRIPTION("MACsec IEEE 802.1AE"); 3505 MODULE_LICENSE("GPL v2"); 3506