1 /* 2 * drivers/net/macsec.c - MACsec device 3 * 4 * Copyright (c) 2015 Sabrina Dubroca <sd@queasysnail.net> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 */ 11 12 #include <linux/types.h> 13 #include <linux/skbuff.h> 14 #include <linux/socket.h> 15 #include <linux/module.h> 16 #include <crypto/aead.h> 17 #include <linux/etherdevice.h> 18 #include <linux/rtnetlink.h> 19 #include <linux/refcount.h> 20 #include <net/genetlink.h> 21 #include <net/sock.h> 22 #include <net/gro_cells.h> 23 24 #include <uapi/linux/if_macsec.h> 25 26 typedef u64 __bitwise sci_t; 27 28 #define MACSEC_SCI_LEN 8 29 30 /* SecTAG length = macsec_eth_header without the optional SCI */ 31 #define MACSEC_TAG_LEN 6 32 33 struct macsec_eth_header { 34 struct ethhdr eth; 35 /* SecTAG */ 36 u8 tci_an; 37 #if defined(__LITTLE_ENDIAN_BITFIELD) 38 u8 short_length:6, 39 unused:2; 40 #elif defined(__BIG_ENDIAN_BITFIELD) 41 u8 unused:2, 42 short_length:6; 43 #else 44 #error "Please fix <asm/byteorder.h>" 45 #endif 46 __be32 packet_number; 47 u8 secure_channel_id[8]; /* optional */ 48 } __packed; 49 50 #define MACSEC_TCI_VERSION 0x80 51 #define MACSEC_TCI_ES 0x40 /* end station */ 52 #define MACSEC_TCI_SC 0x20 /* SCI present */ 53 #define MACSEC_TCI_SCB 0x10 /* epon */ 54 #define MACSEC_TCI_E 0x08 /* encryption */ 55 #define MACSEC_TCI_C 0x04 /* changed text */ 56 #define MACSEC_AN_MASK 0x03 /* association number */ 57 #define MACSEC_TCI_CONFID (MACSEC_TCI_E | MACSEC_TCI_C) 58 59 /* minimum secure data length deemed "not short", see IEEE 802.1AE-2006 9.7 */ 60 #define MIN_NON_SHORT_LEN 48 61 62 #define GCM_AES_IV_LEN 12 63 #define DEFAULT_ICV_LEN 16 64 65 #define MACSEC_NUM_AN 4 /* 2 bits for the association number */ 66 67 #define for_each_rxsc(secy, sc) \ 68 for (sc = rcu_dereference_bh(secy->rx_sc); \ 69 sc; \ 70 sc = rcu_dereference_bh(sc->next)) 71 #define for_each_rxsc_rtnl(secy, sc) \ 72 for (sc = rtnl_dereference(secy->rx_sc); \ 73 sc; \ 74 sc = rtnl_dereference(sc->next)) 75 76 struct gcm_iv { 77 union { 78 u8 secure_channel_id[8]; 79 sci_t sci; 80 }; 81 __be32 pn; 82 }; 83 84 /** 85 * struct macsec_key - SA key 86 * @id: user-provided key identifier 87 * @tfm: crypto struct, key storage 88 */ 89 struct macsec_key { 90 u8 id[MACSEC_KEYID_LEN]; 91 struct crypto_aead *tfm; 92 }; 93 94 struct macsec_rx_sc_stats { 95 __u64 InOctetsValidated; 96 __u64 InOctetsDecrypted; 97 __u64 InPktsUnchecked; 98 __u64 InPktsDelayed; 99 __u64 InPktsOK; 100 __u64 InPktsInvalid; 101 __u64 InPktsLate; 102 __u64 InPktsNotValid; 103 __u64 InPktsNotUsingSA; 104 __u64 InPktsUnusedSA; 105 }; 106 107 struct macsec_rx_sa_stats { 108 __u32 InPktsOK; 109 __u32 InPktsInvalid; 110 __u32 InPktsNotValid; 111 __u32 InPktsNotUsingSA; 112 __u32 InPktsUnusedSA; 113 }; 114 115 struct macsec_tx_sa_stats { 116 __u32 OutPktsProtected; 117 __u32 OutPktsEncrypted; 118 }; 119 120 struct macsec_tx_sc_stats { 121 __u64 OutPktsProtected; 122 __u64 OutPktsEncrypted; 123 __u64 OutOctetsProtected; 124 __u64 OutOctetsEncrypted; 125 }; 126 127 struct macsec_dev_stats { 128 __u64 OutPktsUntagged; 129 __u64 InPktsUntagged; 130 __u64 OutPktsTooLong; 131 __u64 InPktsNoTag; 132 __u64 InPktsBadTag; 133 __u64 InPktsUnknownSCI; 134 __u64 InPktsNoSCI; 135 __u64 InPktsOverrun; 136 }; 137 138 /** 139 * struct macsec_rx_sa - receive secure association 140 * @active: 141 * @next_pn: packet number expected for the next packet 142 * @lock: protects next_pn manipulations 143 * @key: key structure 144 * @stats: per-SA stats 145 */ 146 struct macsec_rx_sa { 147 struct macsec_key key; 148 spinlock_t lock; 149 u32 next_pn; 150 refcount_t refcnt; 151 bool active; 152 struct macsec_rx_sa_stats __percpu *stats; 153 struct macsec_rx_sc *sc; 154 struct rcu_head rcu; 155 }; 156 157 struct pcpu_rx_sc_stats { 158 struct macsec_rx_sc_stats stats; 159 struct u64_stats_sync syncp; 160 }; 161 162 /** 163 * struct macsec_rx_sc - receive secure channel 164 * @sci: secure channel identifier for this SC 165 * @active: channel is active 166 * @sa: array of secure associations 167 * @stats: per-SC stats 168 */ 169 struct macsec_rx_sc { 170 struct macsec_rx_sc __rcu *next; 171 sci_t sci; 172 bool active; 173 struct macsec_rx_sa __rcu *sa[MACSEC_NUM_AN]; 174 struct pcpu_rx_sc_stats __percpu *stats; 175 refcount_t refcnt; 176 struct rcu_head rcu_head; 177 }; 178 179 /** 180 * struct macsec_tx_sa - transmit secure association 181 * @active: 182 * @next_pn: packet number to use for the next packet 183 * @lock: protects next_pn manipulations 184 * @key: key structure 185 * @stats: per-SA stats 186 */ 187 struct macsec_tx_sa { 188 struct macsec_key key; 189 spinlock_t lock; 190 u32 next_pn; 191 refcount_t refcnt; 192 bool active; 193 struct macsec_tx_sa_stats __percpu *stats; 194 struct rcu_head rcu; 195 }; 196 197 struct pcpu_tx_sc_stats { 198 struct macsec_tx_sc_stats stats; 199 struct u64_stats_sync syncp; 200 }; 201 202 /** 203 * struct macsec_tx_sc - transmit secure channel 204 * @active: 205 * @encoding_sa: association number of the SA currently in use 206 * @encrypt: encrypt packets on transmit, or authenticate only 207 * @send_sci: always include the SCI in the SecTAG 208 * @end_station: 209 * @scb: single copy broadcast flag 210 * @sa: array of secure associations 211 * @stats: stats for this TXSC 212 */ 213 struct macsec_tx_sc { 214 bool active; 215 u8 encoding_sa; 216 bool encrypt; 217 bool send_sci; 218 bool end_station; 219 bool scb; 220 struct macsec_tx_sa __rcu *sa[MACSEC_NUM_AN]; 221 struct pcpu_tx_sc_stats __percpu *stats; 222 }; 223 224 #define MACSEC_VALIDATE_DEFAULT MACSEC_VALIDATE_STRICT 225 226 /** 227 * struct macsec_secy - MACsec Security Entity 228 * @netdev: netdevice for this SecY 229 * @n_rx_sc: number of receive secure channels configured on this SecY 230 * @sci: secure channel identifier used for tx 231 * @key_len: length of keys used by the cipher suite 232 * @icv_len: length of ICV used by the cipher suite 233 * @validate_frames: validation mode 234 * @operational: MAC_Operational flag 235 * @protect_frames: enable protection for this SecY 236 * @replay_protect: enable packet number checks on receive 237 * @replay_window: size of the replay window 238 * @tx_sc: transmit secure channel 239 * @rx_sc: linked list of receive secure channels 240 */ 241 struct macsec_secy { 242 struct net_device *netdev; 243 unsigned int n_rx_sc; 244 sci_t sci; 245 u16 key_len; 246 u16 icv_len; 247 enum macsec_validation_type validate_frames; 248 bool operational; 249 bool protect_frames; 250 bool replay_protect; 251 u32 replay_window; 252 struct macsec_tx_sc tx_sc; 253 struct macsec_rx_sc __rcu *rx_sc; 254 }; 255 256 struct pcpu_secy_stats { 257 struct macsec_dev_stats stats; 258 struct u64_stats_sync syncp; 259 }; 260 261 /** 262 * struct macsec_dev - private data 263 * @secy: SecY config 264 * @real_dev: pointer to underlying netdevice 265 * @stats: MACsec device stats 266 * @secys: linked list of SecY's on the underlying device 267 */ 268 struct macsec_dev { 269 struct macsec_secy secy; 270 struct net_device *real_dev; 271 struct pcpu_secy_stats __percpu *stats; 272 struct list_head secys; 273 struct gro_cells gro_cells; 274 unsigned int nest_level; 275 }; 276 277 /** 278 * struct macsec_rxh_data - rx_handler private argument 279 * @secys: linked list of SecY's on this underlying device 280 */ 281 struct macsec_rxh_data { 282 struct list_head secys; 283 }; 284 285 static struct macsec_dev *macsec_priv(const struct net_device *dev) 286 { 287 return (struct macsec_dev *)netdev_priv(dev); 288 } 289 290 static struct macsec_rxh_data *macsec_data_rcu(const struct net_device *dev) 291 { 292 return rcu_dereference_bh(dev->rx_handler_data); 293 } 294 295 static struct macsec_rxh_data *macsec_data_rtnl(const struct net_device *dev) 296 { 297 return rtnl_dereference(dev->rx_handler_data); 298 } 299 300 struct macsec_cb { 301 struct aead_request *req; 302 union { 303 struct macsec_tx_sa *tx_sa; 304 struct macsec_rx_sa *rx_sa; 305 }; 306 u8 assoc_num; 307 bool valid; 308 bool has_sci; 309 }; 310 311 static struct macsec_rx_sa *macsec_rxsa_get(struct macsec_rx_sa __rcu *ptr) 312 { 313 struct macsec_rx_sa *sa = rcu_dereference_bh(ptr); 314 315 if (!sa || !sa->active) 316 return NULL; 317 318 if (!refcount_inc_not_zero(&sa->refcnt)) 319 return NULL; 320 321 return sa; 322 } 323 324 static void free_rx_sc_rcu(struct rcu_head *head) 325 { 326 struct macsec_rx_sc *rx_sc = container_of(head, struct macsec_rx_sc, rcu_head); 327 328 free_percpu(rx_sc->stats); 329 kfree(rx_sc); 330 } 331 332 static struct macsec_rx_sc *macsec_rxsc_get(struct macsec_rx_sc *sc) 333 { 334 return refcount_inc_not_zero(&sc->refcnt) ? sc : NULL; 335 } 336 337 static void macsec_rxsc_put(struct macsec_rx_sc *sc) 338 { 339 if (refcount_dec_and_test(&sc->refcnt)) 340 call_rcu(&sc->rcu_head, free_rx_sc_rcu); 341 } 342 343 static void free_rxsa(struct rcu_head *head) 344 { 345 struct macsec_rx_sa *sa = container_of(head, struct macsec_rx_sa, rcu); 346 347 crypto_free_aead(sa->key.tfm); 348 free_percpu(sa->stats); 349 kfree(sa); 350 } 351 352 static void macsec_rxsa_put(struct macsec_rx_sa *sa) 353 { 354 if (refcount_dec_and_test(&sa->refcnt)) 355 call_rcu(&sa->rcu, free_rxsa); 356 } 357 358 static struct macsec_tx_sa *macsec_txsa_get(struct macsec_tx_sa __rcu *ptr) 359 { 360 struct macsec_tx_sa *sa = rcu_dereference_bh(ptr); 361 362 if (!sa || !sa->active) 363 return NULL; 364 365 if (!refcount_inc_not_zero(&sa->refcnt)) 366 return NULL; 367 368 return sa; 369 } 370 371 static void free_txsa(struct rcu_head *head) 372 { 373 struct macsec_tx_sa *sa = container_of(head, struct macsec_tx_sa, rcu); 374 375 crypto_free_aead(sa->key.tfm); 376 free_percpu(sa->stats); 377 kfree(sa); 378 } 379 380 static void macsec_txsa_put(struct macsec_tx_sa *sa) 381 { 382 if (refcount_dec_and_test(&sa->refcnt)) 383 call_rcu(&sa->rcu, free_txsa); 384 } 385 386 static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb) 387 { 388 BUILD_BUG_ON(sizeof(struct macsec_cb) > sizeof(skb->cb)); 389 return (struct macsec_cb *)skb->cb; 390 } 391 392 #define MACSEC_PORT_ES (htons(0x0001)) 393 #define MACSEC_PORT_SCB (0x0000) 394 #define MACSEC_UNDEF_SCI ((__force sci_t)0xffffffffffffffffULL) 395 396 #define MACSEC_GCM_AES_128_SAK_LEN 16 397 #define MACSEC_GCM_AES_256_SAK_LEN 32 398 399 #define DEFAULT_SAK_LEN MACSEC_GCM_AES_128_SAK_LEN 400 #define DEFAULT_SEND_SCI true 401 #define DEFAULT_ENCRYPT false 402 #define DEFAULT_ENCODING_SA 0 403 404 static bool send_sci(const struct macsec_secy *secy) 405 { 406 const struct macsec_tx_sc *tx_sc = &secy->tx_sc; 407 408 return tx_sc->send_sci || 409 (secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb); 410 } 411 412 static sci_t make_sci(u8 *addr, __be16 port) 413 { 414 sci_t sci; 415 416 memcpy(&sci, addr, ETH_ALEN); 417 memcpy(((char *)&sci) + ETH_ALEN, &port, sizeof(port)); 418 419 return sci; 420 } 421 422 static sci_t macsec_frame_sci(struct macsec_eth_header *hdr, bool sci_present) 423 { 424 sci_t sci; 425 426 if (sci_present) 427 memcpy(&sci, hdr->secure_channel_id, 428 sizeof(hdr->secure_channel_id)); 429 else 430 sci = make_sci(hdr->eth.h_source, MACSEC_PORT_ES); 431 432 return sci; 433 } 434 435 static unsigned int macsec_sectag_len(bool sci_present) 436 { 437 return MACSEC_TAG_LEN + (sci_present ? MACSEC_SCI_LEN : 0); 438 } 439 440 static unsigned int macsec_hdr_len(bool sci_present) 441 { 442 return macsec_sectag_len(sci_present) + ETH_HLEN; 443 } 444 445 static unsigned int macsec_extra_len(bool sci_present) 446 { 447 return macsec_sectag_len(sci_present) + sizeof(__be16); 448 } 449 450 /* Fill SecTAG according to IEEE 802.1AE-2006 10.5.3 */ 451 static void macsec_fill_sectag(struct macsec_eth_header *h, 452 const struct macsec_secy *secy, u32 pn, 453 bool sci_present) 454 { 455 const struct macsec_tx_sc *tx_sc = &secy->tx_sc; 456 457 memset(&h->tci_an, 0, macsec_sectag_len(sci_present)); 458 h->eth.h_proto = htons(ETH_P_MACSEC); 459 460 if (sci_present) { 461 h->tci_an |= MACSEC_TCI_SC; 462 memcpy(&h->secure_channel_id, &secy->sci, 463 sizeof(h->secure_channel_id)); 464 } else { 465 if (tx_sc->end_station) 466 h->tci_an |= MACSEC_TCI_ES; 467 if (tx_sc->scb) 468 h->tci_an |= MACSEC_TCI_SCB; 469 } 470 471 h->packet_number = htonl(pn); 472 473 /* with GCM, C/E clear for !encrypt, both set for encrypt */ 474 if (tx_sc->encrypt) 475 h->tci_an |= MACSEC_TCI_CONFID; 476 else if (secy->icv_len != DEFAULT_ICV_LEN) 477 h->tci_an |= MACSEC_TCI_C; 478 479 h->tci_an |= tx_sc->encoding_sa; 480 } 481 482 static void macsec_set_shortlen(struct macsec_eth_header *h, size_t data_len) 483 { 484 if (data_len < MIN_NON_SHORT_LEN) 485 h->short_length = data_len; 486 } 487 488 /* validate MACsec packet according to IEEE 802.1AE-2006 9.12 */ 489 static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len) 490 { 491 struct macsec_eth_header *h = (struct macsec_eth_header *)skb->data; 492 int len = skb->len - 2 * ETH_ALEN; 493 int extra_len = macsec_extra_len(!!(h->tci_an & MACSEC_TCI_SC)) + icv_len; 494 495 /* a) It comprises at least 17 octets */ 496 if (skb->len <= 16) 497 return false; 498 499 /* b) MACsec EtherType: already checked */ 500 501 /* c) V bit is clear */ 502 if (h->tci_an & MACSEC_TCI_VERSION) 503 return false; 504 505 /* d) ES or SCB => !SC */ 506 if ((h->tci_an & MACSEC_TCI_ES || h->tci_an & MACSEC_TCI_SCB) && 507 (h->tci_an & MACSEC_TCI_SC)) 508 return false; 509 510 /* e) Bits 7 and 8 of octet 4 of the SecTAG are clear */ 511 if (h->unused) 512 return false; 513 514 /* rx.pn != 0 (figure 10-5) */ 515 if (!h->packet_number) 516 return false; 517 518 /* length check, f) g) h) i) */ 519 if (h->short_length) 520 return len == extra_len + h->short_length; 521 return len >= extra_len + MIN_NON_SHORT_LEN; 522 } 523 524 #define MACSEC_NEEDED_HEADROOM (macsec_extra_len(true)) 525 #define MACSEC_NEEDED_TAILROOM MACSEC_STD_ICV_LEN 526 527 static void macsec_fill_iv(unsigned char *iv, sci_t sci, u32 pn) 528 { 529 struct gcm_iv *gcm_iv = (struct gcm_iv *)iv; 530 531 gcm_iv->sci = sci; 532 gcm_iv->pn = htonl(pn); 533 } 534 535 static struct macsec_eth_header *macsec_ethhdr(struct sk_buff *skb) 536 { 537 return (struct macsec_eth_header *)skb_mac_header(skb); 538 } 539 540 static u32 tx_sa_update_pn(struct macsec_tx_sa *tx_sa, struct macsec_secy *secy) 541 { 542 u32 pn; 543 544 spin_lock_bh(&tx_sa->lock); 545 pn = tx_sa->next_pn; 546 547 tx_sa->next_pn++; 548 if (tx_sa->next_pn == 0) { 549 pr_debug("PN wrapped, transitioning to !oper\n"); 550 tx_sa->active = false; 551 if (secy->protect_frames) 552 secy->operational = false; 553 } 554 spin_unlock_bh(&tx_sa->lock); 555 556 return pn; 557 } 558 559 static void macsec_encrypt_finish(struct sk_buff *skb, struct net_device *dev) 560 { 561 struct macsec_dev *macsec = netdev_priv(dev); 562 563 skb->dev = macsec->real_dev; 564 skb_reset_mac_header(skb); 565 skb->protocol = eth_hdr(skb)->h_proto; 566 } 567 568 static void macsec_count_tx(struct sk_buff *skb, struct macsec_tx_sc *tx_sc, 569 struct macsec_tx_sa *tx_sa) 570 { 571 struct pcpu_tx_sc_stats *txsc_stats = this_cpu_ptr(tx_sc->stats); 572 573 u64_stats_update_begin(&txsc_stats->syncp); 574 if (tx_sc->encrypt) { 575 txsc_stats->stats.OutOctetsEncrypted += skb->len; 576 txsc_stats->stats.OutPktsEncrypted++; 577 this_cpu_inc(tx_sa->stats->OutPktsEncrypted); 578 } else { 579 txsc_stats->stats.OutOctetsProtected += skb->len; 580 txsc_stats->stats.OutPktsProtected++; 581 this_cpu_inc(tx_sa->stats->OutPktsProtected); 582 } 583 u64_stats_update_end(&txsc_stats->syncp); 584 } 585 586 static void count_tx(struct net_device *dev, int ret, int len) 587 { 588 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { 589 struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats); 590 591 u64_stats_update_begin(&stats->syncp); 592 stats->tx_packets++; 593 stats->tx_bytes += len; 594 u64_stats_update_end(&stats->syncp); 595 } 596 } 597 598 static void macsec_encrypt_done(struct crypto_async_request *base, int err) 599 { 600 struct sk_buff *skb = base->data; 601 struct net_device *dev = skb->dev; 602 struct macsec_dev *macsec = macsec_priv(dev); 603 struct macsec_tx_sa *sa = macsec_skb_cb(skb)->tx_sa; 604 int len, ret; 605 606 aead_request_free(macsec_skb_cb(skb)->req); 607 608 rcu_read_lock_bh(); 609 macsec_encrypt_finish(skb, dev); 610 macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa); 611 len = skb->len; 612 ret = dev_queue_xmit(skb); 613 count_tx(dev, ret, len); 614 rcu_read_unlock_bh(); 615 616 macsec_txsa_put(sa); 617 dev_put(dev); 618 } 619 620 static struct aead_request *macsec_alloc_req(struct crypto_aead *tfm, 621 unsigned char **iv, 622 struct scatterlist **sg, 623 int num_frags) 624 { 625 size_t size, iv_offset, sg_offset; 626 struct aead_request *req; 627 void *tmp; 628 629 size = sizeof(struct aead_request) + crypto_aead_reqsize(tfm); 630 iv_offset = size; 631 size += GCM_AES_IV_LEN; 632 633 size = ALIGN(size, __alignof__(struct scatterlist)); 634 sg_offset = size; 635 size += sizeof(struct scatterlist) * num_frags; 636 637 tmp = kmalloc(size, GFP_ATOMIC); 638 if (!tmp) 639 return NULL; 640 641 *iv = (unsigned char *)(tmp + iv_offset); 642 *sg = (struct scatterlist *)(tmp + sg_offset); 643 req = tmp; 644 645 aead_request_set_tfm(req, tfm); 646 647 return req; 648 } 649 650 static struct sk_buff *macsec_encrypt(struct sk_buff *skb, 651 struct net_device *dev) 652 { 653 int ret; 654 struct scatterlist *sg; 655 struct sk_buff *trailer; 656 unsigned char *iv; 657 struct ethhdr *eth; 658 struct macsec_eth_header *hh; 659 size_t unprotected_len; 660 struct aead_request *req; 661 struct macsec_secy *secy; 662 struct macsec_tx_sc *tx_sc; 663 struct macsec_tx_sa *tx_sa; 664 struct macsec_dev *macsec = macsec_priv(dev); 665 bool sci_present; 666 u32 pn; 667 668 secy = &macsec->secy; 669 tx_sc = &secy->tx_sc; 670 671 /* 10.5.1 TX SA assignment */ 672 tx_sa = macsec_txsa_get(tx_sc->sa[tx_sc->encoding_sa]); 673 if (!tx_sa) { 674 secy->operational = false; 675 kfree_skb(skb); 676 return ERR_PTR(-EINVAL); 677 } 678 679 if (unlikely(skb_headroom(skb) < MACSEC_NEEDED_HEADROOM || 680 skb_tailroom(skb) < MACSEC_NEEDED_TAILROOM)) { 681 struct sk_buff *nskb = skb_copy_expand(skb, 682 MACSEC_NEEDED_HEADROOM, 683 MACSEC_NEEDED_TAILROOM, 684 GFP_ATOMIC); 685 if (likely(nskb)) { 686 consume_skb(skb); 687 skb = nskb; 688 } else { 689 macsec_txsa_put(tx_sa); 690 kfree_skb(skb); 691 return ERR_PTR(-ENOMEM); 692 } 693 } else { 694 skb = skb_unshare(skb, GFP_ATOMIC); 695 if (!skb) { 696 macsec_txsa_put(tx_sa); 697 return ERR_PTR(-ENOMEM); 698 } 699 } 700 701 unprotected_len = skb->len; 702 eth = eth_hdr(skb); 703 sci_present = send_sci(secy); 704 hh = skb_push(skb, macsec_extra_len(sci_present)); 705 memmove(hh, eth, 2 * ETH_ALEN); 706 707 pn = tx_sa_update_pn(tx_sa, secy); 708 if (pn == 0) { 709 macsec_txsa_put(tx_sa); 710 kfree_skb(skb); 711 return ERR_PTR(-ENOLINK); 712 } 713 macsec_fill_sectag(hh, secy, pn, sci_present); 714 macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN); 715 716 skb_put(skb, secy->icv_len); 717 718 if (skb->len - ETH_HLEN > macsec_priv(dev)->real_dev->mtu) { 719 struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats); 720 721 u64_stats_update_begin(&secy_stats->syncp); 722 secy_stats->stats.OutPktsTooLong++; 723 u64_stats_update_end(&secy_stats->syncp); 724 725 macsec_txsa_put(tx_sa); 726 kfree_skb(skb); 727 return ERR_PTR(-EINVAL); 728 } 729 730 ret = skb_cow_data(skb, 0, &trailer); 731 if (unlikely(ret < 0)) { 732 macsec_txsa_put(tx_sa); 733 kfree_skb(skb); 734 return ERR_PTR(ret); 735 } 736 737 req = macsec_alloc_req(tx_sa->key.tfm, &iv, &sg, ret); 738 if (!req) { 739 macsec_txsa_put(tx_sa); 740 kfree_skb(skb); 741 return ERR_PTR(-ENOMEM); 742 } 743 744 macsec_fill_iv(iv, secy->sci, pn); 745 746 sg_init_table(sg, ret); 747 ret = skb_to_sgvec(skb, sg, 0, skb->len); 748 if (unlikely(ret < 0)) { 749 aead_request_free(req); 750 macsec_txsa_put(tx_sa); 751 kfree_skb(skb); 752 return ERR_PTR(ret); 753 } 754 755 if (tx_sc->encrypt) { 756 int len = skb->len - macsec_hdr_len(sci_present) - 757 secy->icv_len; 758 aead_request_set_crypt(req, sg, sg, len, iv); 759 aead_request_set_ad(req, macsec_hdr_len(sci_present)); 760 } else { 761 aead_request_set_crypt(req, sg, sg, 0, iv); 762 aead_request_set_ad(req, skb->len - secy->icv_len); 763 } 764 765 macsec_skb_cb(skb)->req = req; 766 macsec_skb_cb(skb)->tx_sa = tx_sa; 767 aead_request_set_callback(req, 0, macsec_encrypt_done, skb); 768 769 dev_hold(skb->dev); 770 ret = crypto_aead_encrypt(req); 771 if (ret == -EINPROGRESS) { 772 return ERR_PTR(ret); 773 } else if (ret != 0) { 774 dev_put(skb->dev); 775 kfree_skb(skb); 776 aead_request_free(req); 777 macsec_txsa_put(tx_sa); 778 return ERR_PTR(-EINVAL); 779 } 780 781 dev_put(skb->dev); 782 aead_request_free(req); 783 macsec_txsa_put(tx_sa); 784 785 return skb; 786 } 787 788 static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u32 pn) 789 { 790 struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa; 791 struct pcpu_rx_sc_stats *rxsc_stats = this_cpu_ptr(rx_sa->sc->stats); 792 struct macsec_eth_header *hdr = macsec_ethhdr(skb); 793 u32 lowest_pn = 0; 794 795 spin_lock(&rx_sa->lock); 796 if (rx_sa->next_pn >= secy->replay_window) 797 lowest_pn = rx_sa->next_pn - secy->replay_window; 798 799 /* Now perform replay protection check again 800 * (see IEEE 802.1AE-2006 figure 10-5) 801 */ 802 if (secy->replay_protect && pn < lowest_pn) { 803 spin_unlock(&rx_sa->lock); 804 u64_stats_update_begin(&rxsc_stats->syncp); 805 rxsc_stats->stats.InPktsLate++; 806 u64_stats_update_end(&rxsc_stats->syncp); 807 return false; 808 } 809 810 if (secy->validate_frames != MACSEC_VALIDATE_DISABLED) { 811 u64_stats_update_begin(&rxsc_stats->syncp); 812 if (hdr->tci_an & MACSEC_TCI_E) 813 rxsc_stats->stats.InOctetsDecrypted += skb->len; 814 else 815 rxsc_stats->stats.InOctetsValidated += skb->len; 816 u64_stats_update_end(&rxsc_stats->syncp); 817 } 818 819 if (!macsec_skb_cb(skb)->valid) { 820 spin_unlock(&rx_sa->lock); 821 822 /* 10.6.5 */ 823 if (hdr->tci_an & MACSEC_TCI_C || 824 secy->validate_frames == MACSEC_VALIDATE_STRICT) { 825 u64_stats_update_begin(&rxsc_stats->syncp); 826 rxsc_stats->stats.InPktsNotValid++; 827 u64_stats_update_end(&rxsc_stats->syncp); 828 return false; 829 } 830 831 u64_stats_update_begin(&rxsc_stats->syncp); 832 if (secy->validate_frames == MACSEC_VALIDATE_CHECK) { 833 rxsc_stats->stats.InPktsInvalid++; 834 this_cpu_inc(rx_sa->stats->InPktsInvalid); 835 } else if (pn < lowest_pn) { 836 rxsc_stats->stats.InPktsDelayed++; 837 } else { 838 rxsc_stats->stats.InPktsUnchecked++; 839 } 840 u64_stats_update_end(&rxsc_stats->syncp); 841 } else { 842 u64_stats_update_begin(&rxsc_stats->syncp); 843 if (pn < lowest_pn) { 844 rxsc_stats->stats.InPktsDelayed++; 845 } else { 846 rxsc_stats->stats.InPktsOK++; 847 this_cpu_inc(rx_sa->stats->InPktsOK); 848 } 849 u64_stats_update_end(&rxsc_stats->syncp); 850 851 if (pn >= rx_sa->next_pn) 852 rx_sa->next_pn = pn + 1; 853 spin_unlock(&rx_sa->lock); 854 } 855 856 return true; 857 } 858 859 static void macsec_reset_skb(struct sk_buff *skb, struct net_device *dev) 860 { 861 skb->pkt_type = PACKET_HOST; 862 skb->protocol = eth_type_trans(skb, dev); 863 864 skb_reset_network_header(skb); 865 if (!skb_transport_header_was_set(skb)) 866 skb_reset_transport_header(skb); 867 skb_reset_mac_len(skb); 868 } 869 870 static void macsec_finalize_skb(struct sk_buff *skb, u8 icv_len, u8 hdr_len) 871 { 872 memmove(skb->data + hdr_len, skb->data, 2 * ETH_ALEN); 873 skb_pull(skb, hdr_len); 874 pskb_trim_unique(skb, skb->len - icv_len); 875 } 876 877 static void count_rx(struct net_device *dev, int len) 878 { 879 struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats); 880 881 u64_stats_update_begin(&stats->syncp); 882 stats->rx_packets++; 883 stats->rx_bytes += len; 884 u64_stats_update_end(&stats->syncp); 885 } 886 887 static void macsec_decrypt_done(struct crypto_async_request *base, int err) 888 { 889 struct sk_buff *skb = base->data; 890 struct net_device *dev = skb->dev; 891 struct macsec_dev *macsec = macsec_priv(dev); 892 struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa; 893 struct macsec_rx_sc *rx_sc = rx_sa->sc; 894 int len; 895 u32 pn; 896 897 aead_request_free(macsec_skb_cb(skb)->req); 898 899 if (!err) 900 macsec_skb_cb(skb)->valid = true; 901 902 rcu_read_lock_bh(); 903 pn = ntohl(macsec_ethhdr(skb)->packet_number); 904 if (!macsec_post_decrypt(skb, &macsec->secy, pn)) { 905 rcu_read_unlock_bh(); 906 kfree_skb(skb); 907 goto out; 908 } 909 910 macsec_finalize_skb(skb, macsec->secy.icv_len, 911 macsec_extra_len(macsec_skb_cb(skb)->has_sci)); 912 macsec_reset_skb(skb, macsec->secy.netdev); 913 914 len = skb->len; 915 if (gro_cells_receive(&macsec->gro_cells, skb) == NET_RX_SUCCESS) 916 count_rx(dev, len); 917 918 rcu_read_unlock_bh(); 919 920 out: 921 macsec_rxsa_put(rx_sa); 922 macsec_rxsc_put(rx_sc); 923 dev_put(dev); 924 } 925 926 static struct sk_buff *macsec_decrypt(struct sk_buff *skb, 927 struct net_device *dev, 928 struct macsec_rx_sa *rx_sa, 929 sci_t sci, 930 struct macsec_secy *secy) 931 { 932 int ret; 933 struct scatterlist *sg; 934 struct sk_buff *trailer; 935 unsigned char *iv; 936 struct aead_request *req; 937 struct macsec_eth_header *hdr; 938 u16 icv_len = secy->icv_len; 939 940 macsec_skb_cb(skb)->valid = false; 941 skb = skb_share_check(skb, GFP_ATOMIC); 942 if (!skb) 943 return ERR_PTR(-ENOMEM); 944 945 ret = skb_cow_data(skb, 0, &trailer); 946 if (unlikely(ret < 0)) { 947 kfree_skb(skb); 948 return ERR_PTR(ret); 949 } 950 req = macsec_alloc_req(rx_sa->key.tfm, &iv, &sg, ret); 951 if (!req) { 952 kfree_skb(skb); 953 return ERR_PTR(-ENOMEM); 954 } 955 956 hdr = (struct macsec_eth_header *)skb->data; 957 macsec_fill_iv(iv, sci, ntohl(hdr->packet_number)); 958 959 sg_init_table(sg, ret); 960 ret = skb_to_sgvec(skb, sg, 0, skb->len); 961 if (unlikely(ret < 0)) { 962 aead_request_free(req); 963 kfree_skb(skb); 964 return ERR_PTR(ret); 965 } 966 967 if (hdr->tci_an & MACSEC_TCI_E) { 968 /* confidentiality: ethernet + macsec header 969 * authenticated, encrypted payload 970 */ 971 int len = skb->len - macsec_hdr_len(macsec_skb_cb(skb)->has_sci); 972 973 aead_request_set_crypt(req, sg, sg, len, iv); 974 aead_request_set_ad(req, macsec_hdr_len(macsec_skb_cb(skb)->has_sci)); 975 skb = skb_unshare(skb, GFP_ATOMIC); 976 if (!skb) { 977 aead_request_free(req); 978 return ERR_PTR(-ENOMEM); 979 } 980 } else { 981 /* integrity only: all headers + data authenticated */ 982 aead_request_set_crypt(req, sg, sg, icv_len, iv); 983 aead_request_set_ad(req, skb->len - icv_len); 984 } 985 986 macsec_skb_cb(skb)->req = req; 987 skb->dev = dev; 988 aead_request_set_callback(req, 0, macsec_decrypt_done, skb); 989 990 dev_hold(dev); 991 ret = crypto_aead_decrypt(req); 992 if (ret == -EINPROGRESS) { 993 return ERR_PTR(ret); 994 } else if (ret != 0) { 995 /* decryption/authentication failed 996 * 10.6 if validateFrames is disabled, deliver anyway 997 */ 998 if (ret != -EBADMSG) { 999 kfree_skb(skb); 1000 skb = ERR_PTR(ret); 1001 } 1002 } else { 1003 macsec_skb_cb(skb)->valid = true; 1004 } 1005 dev_put(dev); 1006 1007 aead_request_free(req); 1008 1009 return skb; 1010 } 1011 1012 static struct macsec_rx_sc *find_rx_sc(struct macsec_secy *secy, sci_t sci) 1013 { 1014 struct macsec_rx_sc *rx_sc; 1015 1016 for_each_rxsc(secy, rx_sc) { 1017 if (rx_sc->sci == sci) 1018 return rx_sc; 1019 } 1020 1021 return NULL; 1022 } 1023 1024 static struct macsec_rx_sc *find_rx_sc_rtnl(struct macsec_secy *secy, sci_t sci) 1025 { 1026 struct macsec_rx_sc *rx_sc; 1027 1028 for_each_rxsc_rtnl(secy, rx_sc) { 1029 if (rx_sc->sci == sci) 1030 return rx_sc; 1031 } 1032 1033 return NULL; 1034 } 1035 1036 static void handle_not_macsec(struct sk_buff *skb) 1037 { 1038 struct macsec_rxh_data *rxd; 1039 struct macsec_dev *macsec; 1040 1041 rcu_read_lock(); 1042 rxd = macsec_data_rcu(skb->dev); 1043 1044 /* 10.6 If the management control validateFrames is not 1045 * Strict, frames without a SecTAG are received, counted, and 1046 * delivered to the Controlled Port 1047 */ 1048 list_for_each_entry_rcu(macsec, &rxd->secys, secys) { 1049 struct sk_buff *nskb; 1050 struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats); 1051 1052 if (macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) { 1053 u64_stats_update_begin(&secy_stats->syncp); 1054 secy_stats->stats.InPktsNoTag++; 1055 u64_stats_update_end(&secy_stats->syncp); 1056 continue; 1057 } 1058 1059 /* deliver on this port */ 1060 nskb = skb_clone(skb, GFP_ATOMIC); 1061 if (!nskb) 1062 break; 1063 1064 nskb->dev = macsec->secy.netdev; 1065 1066 if (netif_rx(nskb) == NET_RX_SUCCESS) { 1067 u64_stats_update_begin(&secy_stats->syncp); 1068 secy_stats->stats.InPktsUntagged++; 1069 u64_stats_update_end(&secy_stats->syncp); 1070 } 1071 } 1072 1073 rcu_read_unlock(); 1074 } 1075 1076 static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) 1077 { 1078 struct sk_buff *skb = *pskb; 1079 struct net_device *dev = skb->dev; 1080 struct macsec_eth_header *hdr; 1081 struct macsec_secy *secy = NULL; 1082 struct macsec_rx_sc *rx_sc; 1083 struct macsec_rx_sa *rx_sa; 1084 struct macsec_rxh_data *rxd; 1085 struct macsec_dev *macsec; 1086 sci_t sci; 1087 u32 pn; 1088 bool cbit; 1089 struct pcpu_rx_sc_stats *rxsc_stats; 1090 struct pcpu_secy_stats *secy_stats; 1091 bool pulled_sci; 1092 int ret; 1093 1094 if (skb_headroom(skb) < ETH_HLEN) 1095 goto drop_direct; 1096 1097 hdr = macsec_ethhdr(skb); 1098 if (hdr->eth.h_proto != htons(ETH_P_MACSEC)) { 1099 handle_not_macsec(skb); 1100 1101 /* and deliver to the uncontrolled port */ 1102 return RX_HANDLER_PASS; 1103 } 1104 1105 skb = skb_unshare(skb, GFP_ATOMIC); 1106 if (!skb) { 1107 *pskb = NULL; 1108 return RX_HANDLER_CONSUMED; 1109 } 1110 1111 pulled_sci = pskb_may_pull(skb, macsec_extra_len(true)); 1112 if (!pulled_sci) { 1113 if (!pskb_may_pull(skb, macsec_extra_len(false))) 1114 goto drop_direct; 1115 } 1116 1117 hdr = macsec_ethhdr(skb); 1118 1119 /* Frames with a SecTAG that has the TCI E bit set but the C 1120 * bit clear are discarded, as this reserved encoding is used 1121 * to identify frames with a SecTAG that are not to be 1122 * delivered to the Controlled Port. 1123 */ 1124 if ((hdr->tci_an & (MACSEC_TCI_C | MACSEC_TCI_E)) == MACSEC_TCI_E) 1125 return RX_HANDLER_PASS; 1126 1127 /* now, pull the extra length */ 1128 if (hdr->tci_an & MACSEC_TCI_SC) { 1129 if (!pulled_sci) 1130 goto drop_direct; 1131 } 1132 1133 /* ethernet header is part of crypto processing */ 1134 skb_push(skb, ETH_HLEN); 1135 1136 macsec_skb_cb(skb)->has_sci = !!(hdr->tci_an & MACSEC_TCI_SC); 1137 macsec_skb_cb(skb)->assoc_num = hdr->tci_an & MACSEC_AN_MASK; 1138 sci = macsec_frame_sci(hdr, macsec_skb_cb(skb)->has_sci); 1139 1140 rcu_read_lock(); 1141 rxd = macsec_data_rcu(skb->dev); 1142 1143 list_for_each_entry_rcu(macsec, &rxd->secys, secys) { 1144 struct macsec_rx_sc *sc = find_rx_sc(&macsec->secy, sci); 1145 1146 sc = sc ? macsec_rxsc_get(sc) : NULL; 1147 1148 if (sc) { 1149 secy = &macsec->secy; 1150 rx_sc = sc; 1151 break; 1152 } 1153 } 1154 1155 if (!secy) 1156 goto nosci; 1157 1158 dev = secy->netdev; 1159 macsec = macsec_priv(dev); 1160 secy_stats = this_cpu_ptr(macsec->stats); 1161 rxsc_stats = this_cpu_ptr(rx_sc->stats); 1162 1163 if (!macsec_validate_skb(skb, secy->icv_len)) { 1164 u64_stats_update_begin(&secy_stats->syncp); 1165 secy_stats->stats.InPktsBadTag++; 1166 u64_stats_update_end(&secy_stats->syncp); 1167 goto drop_nosa; 1168 } 1169 1170 rx_sa = macsec_rxsa_get(rx_sc->sa[macsec_skb_cb(skb)->assoc_num]); 1171 if (!rx_sa) { 1172 /* 10.6.1 if the SA is not in use */ 1173 1174 /* If validateFrames is Strict or the C bit in the 1175 * SecTAG is set, discard 1176 */ 1177 if (hdr->tci_an & MACSEC_TCI_C || 1178 secy->validate_frames == MACSEC_VALIDATE_STRICT) { 1179 u64_stats_update_begin(&rxsc_stats->syncp); 1180 rxsc_stats->stats.InPktsNotUsingSA++; 1181 u64_stats_update_end(&rxsc_stats->syncp); 1182 goto drop_nosa; 1183 } 1184 1185 /* not Strict, the frame (with the SecTAG and ICV 1186 * removed) is delivered to the Controlled Port. 1187 */ 1188 u64_stats_update_begin(&rxsc_stats->syncp); 1189 rxsc_stats->stats.InPktsUnusedSA++; 1190 u64_stats_update_end(&rxsc_stats->syncp); 1191 goto deliver; 1192 } 1193 1194 /* First, PN check to avoid decrypting obviously wrong packets */ 1195 pn = ntohl(hdr->packet_number); 1196 if (secy->replay_protect) { 1197 bool late; 1198 1199 spin_lock(&rx_sa->lock); 1200 late = rx_sa->next_pn >= secy->replay_window && 1201 pn < (rx_sa->next_pn - secy->replay_window); 1202 spin_unlock(&rx_sa->lock); 1203 1204 if (late) { 1205 u64_stats_update_begin(&rxsc_stats->syncp); 1206 rxsc_stats->stats.InPktsLate++; 1207 u64_stats_update_end(&rxsc_stats->syncp); 1208 goto drop; 1209 } 1210 } 1211 1212 macsec_skb_cb(skb)->rx_sa = rx_sa; 1213 1214 /* Disabled && !changed text => skip validation */ 1215 if (hdr->tci_an & MACSEC_TCI_C || 1216 secy->validate_frames != MACSEC_VALIDATE_DISABLED) 1217 skb = macsec_decrypt(skb, dev, rx_sa, sci, secy); 1218 1219 if (IS_ERR(skb)) { 1220 /* the decrypt callback needs the reference */ 1221 if (PTR_ERR(skb) != -EINPROGRESS) { 1222 macsec_rxsa_put(rx_sa); 1223 macsec_rxsc_put(rx_sc); 1224 } 1225 rcu_read_unlock(); 1226 *pskb = NULL; 1227 return RX_HANDLER_CONSUMED; 1228 } 1229 1230 if (!macsec_post_decrypt(skb, secy, pn)) 1231 goto drop; 1232 1233 deliver: 1234 macsec_finalize_skb(skb, secy->icv_len, 1235 macsec_extra_len(macsec_skb_cb(skb)->has_sci)); 1236 macsec_reset_skb(skb, secy->netdev); 1237 1238 if (rx_sa) 1239 macsec_rxsa_put(rx_sa); 1240 macsec_rxsc_put(rx_sc); 1241 1242 ret = gro_cells_receive(&macsec->gro_cells, skb); 1243 if (ret == NET_RX_SUCCESS) 1244 count_rx(dev, skb->len); 1245 else 1246 macsec->secy.netdev->stats.rx_dropped++; 1247 1248 rcu_read_unlock(); 1249 1250 *pskb = NULL; 1251 return RX_HANDLER_CONSUMED; 1252 1253 drop: 1254 macsec_rxsa_put(rx_sa); 1255 drop_nosa: 1256 macsec_rxsc_put(rx_sc); 1257 rcu_read_unlock(); 1258 drop_direct: 1259 kfree_skb(skb); 1260 *pskb = NULL; 1261 return RX_HANDLER_CONSUMED; 1262 1263 nosci: 1264 /* 10.6.1 if the SC is not found */ 1265 cbit = !!(hdr->tci_an & MACSEC_TCI_C); 1266 if (!cbit) 1267 macsec_finalize_skb(skb, DEFAULT_ICV_LEN, 1268 macsec_extra_len(macsec_skb_cb(skb)->has_sci)); 1269 1270 list_for_each_entry_rcu(macsec, &rxd->secys, secys) { 1271 struct sk_buff *nskb; 1272 1273 secy_stats = this_cpu_ptr(macsec->stats); 1274 1275 /* If validateFrames is Strict or the C bit in the 1276 * SecTAG is set, discard 1277 */ 1278 if (cbit || 1279 macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) { 1280 u64_stats_update_begin(&secy_stats->syncp); 1281 secy_stats->stats.InPktsNoSCI++; 1282 u64_stats_update_end(&secy_stats->syncp); 1283 continue; 1284 } 1285 1286 /* not strict, the frame (with the SecTAG and ICV 1287 * removed) is delivered to the Controlled Port. 1288 */ 1289 nskb = skb_clone(skb, GFP_ATOMIC); 1290 if (!nskb) 1291 break; 1292 1293 macsec_reset_skb(nskb, macsec->secy.netdev); 1294 1295 ret = netif_rx(nskb); 1296 if (ret == NET_RX_SUCCESS) { 1297 u64_stats_update_begin(&secy_stats->syncp); 1298 secy_stats->stats.InPktsUnknownSCI++; 1299 u64_stats_update_end(&secy_stats->syncp); 1300 } else { 1301 macsec->secy.netdev->stats.rx_dropped++; 1302 } 1303 } 1304 1305 rcu_read_unlock(); 1306 *pskb = skb; 1307 return RX_HANDLER_PASS; 1308 } 1309 1310 static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len) 1311 { 1312 struct crypto_aead *tfm; 1313 int ret; 1314 1315 tfm = crypto_alloc_aead("gcm(aes)", 0, 0); 1316 1317 if (IS_ERR(tfm)) 1318 return tfm; 1319 1320 ret = crypto_aead_setkey(tfm, key, key_len); 1321 if (ret < 0) 1322 goto fail; 1323 1324 ret = crypto_aead_setauthsize(tfm, icv_len); 1325 if (ret < 0) 1326 goto fail; 1327 1328 return tfm; 1329 fail: 1330 crypto_free_aead(tfm); 1331 return ERR_PTR(ret); 1332 } 1333 1334 static int init_rx_sa(struct macsec_rx_sa *rx_sa, char *sak, int key_len, 1335 int icv_len) 1336 { 1337 rx_sa->stats = alloc_percpu(struct macsec_rx_sa_stats); 1338 if (!rx_sa->stats) 1339 return -ENOMEM; 1340 1341 rx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len); 1342 if (IS_ERR(rx_sa->key.tfm)) { 1343 free_percpu(rx_sa->stats); 1344 return PTR_ERR(rx_sa->key.tfm); 1345 } 1346 1347 rx_sa->active = false; 1348 rx_sa->next_pn = 1; 1349 refcount_set(&rx_sa->refcnt, 1); 1350 spin_lock_init(&rx_sa->lock); 1351 1352 return 0; 1353 } 1354 1355 static void clear_rx_sa(struct macsec_rx_sa *rx_sa) 1356 { 1357 rx_sa->active = false; 1358 1359 macsec_rxsa_put(rx_sa); 1360 } 1361 1362 static void free_rx_sc(struct macsec_rx_sc *rx_sc) 1363 { 1364 int i; 1365 1366 for (i = 0; i < MACSEC_NUM_AN; i++) { 1367 struct macsec_rx_sa *sa = rtnl_dereference(rx_sc->sa[i]); 1368 1369 RCU_INIT_POINTER(rx_sc->sa[i], NULL); 1370 if (sa) 1371 clear_rx_sa(sa); 1372 } 1373 1374 macsec_rxsc_put(rx_sc); 1375 } 1376 1377 static struct macsec_rx_sc *del_rx_sc(struct macsec_secy *secy, sci_t sci) 1378 { 1379 struct macsec_rx_sc *rx_sc, __rcu **rx_scp; 1380 1381 for (rx_scp = &secy->rx_sc, rx_sc = rtnl_dereference(*rx_scp); 1382 rx_sc; 1383 rx_scp = &rx_sc->next, rx_sc = rtnl_dereference(*rx_scp)) { 1384 if (rx_sc->sci == sci) { 1385 if (rx_sc->active) 1386 secy->n_rx_sc--; 1387 rcu_assign_pointer(*rx_scp, rx_sc->next); 1388 return rx_sc; 1389 } 1390 } 1391 1392 return NULL; 1393 } 1394 1395 static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci) 1396 { 1397 struct macsec_rx_sc *rx_sc; 1398 struct macsec_dev *macsec; 1399 struct net_device *real_dev = macsec_priv(dev)->real_dev; 1400 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); 1401 struct macsec_secy *secy; 1402 1403 list_for_each_entry(macsec, &rxd->secys, secys) { 1404 if (find_rx_sc_rtnl(&macsec->secy, sci)) 1405 return ERR_PTR(-EEXIST); 1406 } 1407 1408 rx_sc = kzalloc(sizeof(*rx_sc), GFP_KERNEL); 1409 if (!rx_sc) 1410 return ERR_PTR(-ENOMEM); 1411 1412 rx_sc->stats = netdev_alloc_pcpu_stats(struct pcpu_rx_sc_stats); 1413 if (!rx_sc->stats) { 1414 kfree(rx_sc); 1415 return ERR_PTR(-ENOMEM); 1416 } 1417 1418 rx_sc->sci = sci; 1419 rx_sc->active = true; 1420 refcount_set(&rx_sc->refcnt, 1); 1421 1422 secy = &macsec_priv(dev)->secy; 1423 rcu_assign_pointer(rx_sc->next, secy->rx_sc); 1424 rcu_assign_pointer(secy->rx_sc, rx_sc); 1425 1426 if (rx_sc->active) 1427 secy->n_rx_sc++; 1428 1429 return rx_sc; 1430 } 1431 1432 static int init_tx_sa(struct macsec_tx_sa *tx_sa, char *sak, int key_len, 1433 int icv_len) 1434 { 1435 tx_sa->stats = alloc_percpu(struct macsec_tx_sa_stats); 1436 if (!tx_sa->stats) 1437 return -ENOMEM; 1438 1439 tx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len); 1440 if (IS_ERR(tx_sa->key.tfm)) { 1441 free_percpu(tx_sa->stats); 1442 return PTR_ERR(tx_sa->key.tfm); 1443 } 1444 1445 tx_sa->active = false; 1446 refcount_set(&tx_sa->refcnt, 1); 1447 spin_lock_init(&tx_sa->lock); 1448 1449 return 0; 1450 } 1451 1452 static void clear_tx_sa(struct macsec_tx_sa *tx_sa) 1453 { 1454 tx_sa->active = false; 1455 1456 macsec_txsa_put(tx_sa); 1457 } 1458 1459 static struct genl_family macsec_fam; 1460 1461 static struct net_device *get_dev_from_nl(struct net *net, 1462 struct nlattr **attrs) 1463 { 1464 int ifindex = nla_get_u32(attrs[MACSEC_ATTR_IFINDEX]); 1465 struct net_device *dev; 1466 1467 dev = __dev_get_by_index(net, ifindex); 1468 if (!dev) 1469 return ERR_PTR(-ENODEV); 1470 1471 if (!netif_is_macsec(dev)) 1472 return ERR_PTR(-ENODEV); 1473 1474 return dev; 1475 } 1476 1477 static sci_t nla_get_sci(const struct nlattr *nla) 1478 { 1479 return (__force sci_t)nla_get_u64(nla); 1480 } 1481 1482 static int nla_put_sci(struct sk_buff *skb, int attrtype, sci_t value, 1483 int padattr) 1484 { 1485 return nla_put_u64_64bit(skb, attrtype, (__force u64)value, padattr); 1486 } 1487 1488 static struct macsec_tx_sa *get_txsa_from_nl(struct net *net, 1489 struct nlattr **attrs, 1490 struct nlattr **tb_sa, 1491 struct net_device **devp, 1492 struct macsec_secy **secyp, 1493 struct macsec_tx_sc **scp, 1494 u8 *assoc_num) 1495 { 1496 struct net_device *dev; 1497 struct macsec_secy *secy; 1498 struct macsec_tx_sc *tx_sc; 1499 struct macsec_tx_sa *tx_sa; 1500 1501 if (!tb_sa[MACSEC_SA_ATTR_AN]) 1502 return ERR_PTR(-EINVAL); 1503 1504 *assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1505 1506 dev = get_dev_from_nl(net, attrs); 1507 if (IS_ERR(dev)) 1508 return ERR_CAST(dev); 1509 1510 if (*assoc_num >= MACSEC_NUM_AN) 1511 return ERR_PTR(-EINVAL); 1512 1513 secy = &macsec_priv(dev)->secy; 1514 tx_sc = &secy->tx_sc; 1515 1516 tx_sa = rtnl_dereference(tx_sc->sa[*assoc_num]); 1517 if (!tx_sa) 1518 return ERR_PTR(-ENODEV); 1519 1520 *devp = dev; 1521 *scp = tx_sc; 1522 *secyp = secy; 1523 return tx_sa; 1524 } 1525 1526 static struct macsec_rx_sc *get_rxsc_from_nl(struct net *net, 1527 struct nlattr **attrs, 1528 struct nlattr **tb_rxsc, 1529 struct net_device **devp, 1530 struct macsec_secy **secyp) 1531 { 1532 struct net_device *dev; 1533 struct macsec_secy *secy; 1534 struct macsec_rx_sc *rx_sc; 1535 sci_t sci; 1536 1537 dev = get_dev_from_nl(net, attrs); 1538 if (IS_ERR(dev)) 1539 return ERR_CAST(dev); 1540 1541 secy = &macsec_priv(dev)->secy; 1542 1543 if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI]) 1544 return ERR_PTR(-EINVAL); 1545 1546 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); 1547 rx_sc = find_rx_sc_rtnl(secy, sci); 1548 if (!rx_sc) 1549 return ERR_PTR(-ENODEV); 1550 1551 *secyp = secy; 1552 *devp = dev; 1553 1554 return rx_sc; 1555 } 1556 1557 static struct macsec_rx_sa *get_rxsa_from_nl(struct net *net, 1558 struct nlattr **attrs, 1559 struct nlattr **tb_rxsc, 1560 struct nlattr **tb_sa, 1561 struct net_device **devp, 1562 struct macsec_secy **secyp, 1563 struct macsec_rx_sc **scp, 1564 u8 *assoc_num) 1565 { 1566 struct macsec_rx_sc *rx_sc; 1567 struct macsec_rx_sa *rx_sa; 1568 1569 if (!tb_sa[MACSEC_SA_ATTR_AN]) 1570 return ERR_PTR(-EINVAL); 1571 1572 *assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1573 if (*assoc_num >= MACSEC_NUM_AN) 1574 return ERR_PTR(-EINVAL); 1575 1576 rx_sc = get_rxsc_from_nl(net, attrs, tb_rxsc, devp, secyp); 1577 if (IS_ERR(rx_sc)) 1578 return ERR_CAST(rx_sc); 1579 1580 rx_sa = rtnl_dereference(rx_sc->sa[*assoc_num]); 1581 if (!rx_sa) 1582 return ERR_PTR(-ENODEV); 1583 1584 *scp = rx_sc; 1585 return rx_sa; 1586 } 1587 1588 static const struct nla_policy macsec_genl_policy[NUM_MACSEC_ATTR] = { 1589 [MACSEC_ATTR_IFINDEX] = { .type = NLA_U32 }, 1590 [MACSEC_ATTR_RXSC_CONFIG] = { .type = NLA_NESTED }, 1591 [MACSEC_ATTR_SA_CONFIG] = { .type = NLA_NESTED }, 1592 }; 1593 1594 static const struct nla_policy macsec_genl_rxsc_policy[NUM_MACSEC_RXSC_ATTR] = { 1595 [MACSEC_RXSC_ATTR_SCI] = { .type = NLA_U64 }, 1596 [MACSEC_RXSC_ATTR_ACTIVE] = { .type = NLA_U8 }, 1597 }; 1598 1599 static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = { 1600 [MACSEC_SA_ATTR_AN] = { .type = NLA_U8 }, 1601 [MACSEC_SA_ATTR_ACTIVE] = { .type = NLA_U8 }, 1602 [MACSEC_SA_ATTR_PN] = { .type = NLA_U32 }, 1603 [MACSEC_SA_ATTR_KEYID] = { .type = NLA_BINARY, 1604 .len = MACSEC_KEYID_LEN, }, 1605 [MACSEC_SA_ATTR_KEY] = { .type = NLA_BINARY, 1606 .len = MACSEC_MAX_KEY_LEN, }, 1607 }; 1608 1609 static int parse_sa_config(struct nlattr **attrs, struct nlattr **tb_sa) 1610 { 1611 if (!attrs[MACSEC_ATTR_SA_CONFIG]) 1612 return -EINVAL; 1613 1614 if (nla_parse_nested_deprecated(tb_sa, MACSEC_SA_ATTR_MAX, attrs[MACSEC_ATTR_SA_CONFIG], macsec_genl_sa_policy, NULL)) 1615 return -EINVAL; 1616 1617 return 0; 1618 } 1619 1620 static int parse_rxsc_config(struct nlattr **attrs, struct nlattr **tb_rxsc) 1621 { 1622 if (!attrs[MACSEC_ATTR_RXSC_CONFIG]) 1623 return -EINVAL; 1624 1625 if (nla_parse_nested_deprecated(tb_rxsc, MACSEC_RXSC_ATTR_MAX, attrs[MACSEC_ATTR_RXSC_CONFIG], macsec_genl_rxsc_policy, NULL)) 1626 return -EINVAL; 1627 1628 return 0; 1629 } 1630 1631 static bool validate_add_rxsa(struct nlattr **attrs) 1632 { 1633 if (!attrs[MACSEC_SA_ATTR_AN] || 1634 !attrs[MACSEC_SA_ATTR_KEY] || 1635 !attrs[MACSEC_SA_ATTR_KEYID]) 1636 return false; 1637 1638 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) 1639 return false; 1640 1641 if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0) 1642 return false; 1643 1644 if (attrs[MACSEC_SA_ATTR_ACTIVE]) { 1645 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) 1646 return false; 1647 } 1648 1649 if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN) 1650 return false; 1651 1652 return true; 1653 } 1654 1655 static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info) 1656 { 1657 struct net_device *dev; 1658 struct nlattr **attrs = info->attrs; 1659 struct macsec_secy *secy; 1660 struct macsec_rx_sc *rx_sc; 1661 struct macsec_rx_sa *rx_sa; 1662 unsigned char assoc_num; 1663 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 1664 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 1665 int err; 1666 1667 if (!attrs[MACSEC_ATTR_IFINDEX]) 1668 return -EINVAL; 1669 1670 if (parse_sa_config(attrs, tb_sa)) 1671 return -EINVAL; 1672 1673 if (parse_rxsc_config(attrs, tb_rxsc)) 1674 return -EINVAL; 1675 1676 if (!validate_add_rxsa(tb_sa)) 1677 return -EINVAL; 1678 1679 rtnl_lock(); 1680 rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy); 1681 if (IS_ERR(rx_sc)) { 1682 rtnl_unlock(); 1683 return PTR_ERR(rx_sc); 1684 } 1685 1686 assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1687 1688 if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) { 1689 pr_notice("macsec: nl: add_rxsa: bad key length: %d != %d\n", 1690 nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len); 1691 rtnl_unlock(); 1692 return -EINVAL; 1693 } 1694 1695 rx_sa = rtnl_dereference(rx_sc->sa[assoc_num]); 1696 if (rx_sa) { 1697 rtnl_unlock(); 1698 return -EBUSY; 1699 } 1700 1701 rx_sa = kmalloc(sizeof(*rx_sa), GFP_KERNEL); 1702 if (!rx_sa) { 1703 rtnl_unlock(); 1704 return -ENOMEM; 1705 } 1706 1707 err = init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 1708 secy->key_len, secy->icv_len); 1709 if (err < 0) { 1710 kfree(rx_sa); 1711 rtnl_unlock(); 1712 return err; 1713 } 1714 1715 if (tb_sa[MACSEC_SA_ATTR_PN]) { 1716 spin_lock_bh(&rx_sa->lock); 1717 rx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]); 1718 spin_unlock_bh(&rx_sa->lock); 1719 } 1720 1721 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 1722 rx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 1723 1724 nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN); 1725 rx_sa->sc = rx_sc; 1726 rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa); 1727 1728 rtnl_unlock(); 1729 1730 return 0; 1731 } 1732 1733 static bool validate_add_rxsc(struct nlattr **attrs) 1734 { 1735 if (!attrs[MACSEC_RXSC_ATTR_SCI]) 1736 return false; 1737 1738 if (attrs[MACSEC_RXSC_ATTR_ACTIVE]) { 1739 if (nla_get_u8(attrs[MACSEC_RXSC_ATTR_ACTIVE]) > 1) 1740 return false; 1741 } 1742 1743 return true; 1744 } 1745 1746 static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info) 1747 { 1748 struct net_device *dev; 1749 sci_t sci = MACSEC_UNDEF_SCI; 1750 struct nlattr **attrs = info->attrs; 1751 struct macsec_rx_sc *rx_sc; 1752 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 1753 1754 if (!attrs[MACSEC_ATTR_IFINDEX]) 1755 return -EINVAL; 1756 1757 if (parse_rxsc_config(attrs, tb_rxsc)) 1758 return -EINVAL; 1759 1760 if (!validate_add_rxsc(tb_rxsc)) 1761 return -EINVAL; 1762 1763 rtnl_lock(); 1764 dev = get_dev_from_nl(genl_info_net(info), attrs); 1765 if (IS_ERR(dev)) { 1766 rtnl_unlock(); 1767 return PTR_ERR(dev); 1768 } 1769 1770 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); 1771 1772 rx_sc = create_rx_sc(dev, sci); 1773 if (IS_ERR(rx_sc)) { 1774 rtnl_unlock(); 1775 return PTR_ERR(rx_sc); 1776 } 1777 1778 if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) 1779 rx_sc->active = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]); 1780 1781 rtnl_unlock(); 1782 1783 return 0; 1784 } 1785 1786 static bool validate_add_txsa(struct nlattr **attrs) 1787 { 1788 if (!attrs[MACSEC_SA_ATTR_AN] || 1789 !attrs[MACSEC_SA_ATTR_PN] || 1790 !attrs[MACSEC_SA_ATTR_KEY] || 1791 !attrs[MACSEC_SA_ATTR_KEYID]) 1792 return false; 1793 1794 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) 1795 return false; 1796 1797 if (nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0) 1798 return false; 1799 1800 if (attrs[MACSEC_SA_ATTR_ACTIVE]) { 1801 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) 1802 return false; 1803 } 1804 1805 if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN) 1806 return false; 1807 1808 return true; 1809 } 1810 1811 static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info) 1812 { 1813 struct net_device *dev; 1814 struct nlattr **attrs = info->attrs; 1815 struct macsec_secy *secy; 1816 struct macsec_tx_sc *tx_sc; 1817 struct macsec_tx_sa *tx_sa; 1818 unsigned char assoc_num; 1819 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 1820 int err; 1821 1822 if (!attrs[MACSEC_ATTR_IFINDEX]) 1823 return -EINVAL; 1824 1825 if (parse_sa_config(attrs, tb_sa)) 1826 return -EINVAL; 1827 1828 if (!validate_add_txsa(tb_sa)) 1829 return -EINVAL; 1830 1831 rtnl_lock(); 1832 dev = get_dev_from_nl(genl_info_net(info), attrs); 1833 if (IS_ERR(dev)) { 1834 rtnl_unlock(); 1835 return PTR_ERR(dev); 1836 } 1837 1838 secy = &macsec_priv(dev)->secy; 1839 tx_sc = &secy->tx_sc; 1840 1841 assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1842 1843 if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) { 1844 pr_notice("macsec: nl: add_txsa: bad key length: %d != %d\n", 1845 nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len); 1846 rtnl_unlock(); 1847 return -EINVAL; 1848 } 1849 1850 tx_sa = rtnl_dereference(tx_sc->sa[assoc_num]); 1851 if (tx_sa) { 1852 rtnl_unlock(); 1853 return -EBUSY; 1854 } 1855 1856 tx_sa = kmalloc(sizeof(*tx_sa), GFP_KERNEL); 1857 if (!tx_sa) { 1858 rtnl_unlock(); 1859 return -ENOMEM; 1860 } 1861 1862 err = init_tx_sa(tx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 1863 secy->key_len, secy->icv_len); 1864 if (err < 0) { 1865 kfree(tx_sa); 1866 rtnl_unlock(); 1867 return err; 1868 } 1869 1870 nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN); 1871 1872 spin_lock_bh(&tx_sa->lock); 1873 tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]); 1874 spin_unlock_bh(&tx_sa->lock); 1875 1876 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 1877 tx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 1878 1879 if (assoc_num == tx_sc->encoding_sa && tx_sa->active) 1880 secy->operational = true; 1881 1882 rcu_assign_pointer(tx_sc->sa[assoc_num], tx_sa); 1883 1884 rtnl_unlock(); 1885 1886 return 0; 1887 } 1888 1889 static int macsec_del_rxsa(struct sk_buff *skb, struct genl_info *info) 1890 { 1891 struct nlattr **attrs = info->attrs; 1892 struct net_device *dev; 1893 struct macsec_secy *secy; 1894 struct macsec_rx_sc *rx_sc; 1895 struct macsec_rx_sa *rx_sa; 1896 u8 assoc_num; 1897 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 1898 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 1899 1900 if (!attrs[MACSEC_ATTR_IFINDEX]) 1901 return -EINVAL; 1902 1903 if (parse_sa_config(attrs, tb_sa)) 1904 return -EINVAL; 1905 1906 if (parse_rxsc_config(attrs, tb_rxsc)) 1907 return -EINVAL; 1908 1909 rtnl_lock(); 1910 rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa, 1911 &dev, &secy, &rx_sc, &assoc_num); 1912 if (IS_ERR(rx_sa)) { 1913 rtnl_unlock(); 1914 return PTR_ERR(rx_sa); 1915 } 1916 1917 if (rx_sa->active) { 1918 rtnl_unlock(); 1919 return -EBUSY; 1920 } 1921 1922 RCU_INIT_POINTER(rx_sc->sa[assoc_num], NULL); 1923 clear_rx_sa(rx_sa); 1924 1925 rtnl_unlock(); 1926 1927 return 0; 1928 } 1929 1930 static int macsec_del_rxsc(struct sk_buff *skb, struct genl_info *info) 1931 { 1932 struct nlattr **attrs = info->attrs; 1933 struct net_device *dev; 1934 struct macsec_secy *secy; 1935 struct macsec_rx_sc *rx_sc; 1936 sci_t sci; 1937 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 1938 1939 if (!attrs[MACSEC_ATTR_IFINDEX]) 1940 return -EINVAL; 1941 1942 if (parse_rxsc_config(attrs, tb_rxsc)) 1943 return -EINVAL; 1944 1945 if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI]) 1946 return -EINVAL; 1947 1948 rtnl_lock(); 1949 dev = get_dev_from_nl(genl_info_net(info), info->attrs); 1950 if (IS_ERR(dev)) { 1951 rtnl_unlock(); 1952 return PTR_ERR(dev); 1953 } 1954 1955 secy = &macsec_priv(dev)->secy; 1956 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); 1957 1958 rx_sc = del_rx_sc(secy, sci); 1959 if (!rx_sc) { 1960 rtnl_unlock(); 1961 return -ENODEV; 1962 } 1963 1964 free_rx_sc(rx_sc); 1965 rtnl_unlock(); 1966 1967 return 0; 1968 } 1969 1970 static int macsec_del_txsa(struct sk_buff *skb, struct genl_info *info) 1971 { 1972 struct nlattr **attrs = info->attrs; 1973 struct net_device *dev; 1974 struct macsec_secy *secy; 1975 struct macsec_tx_sc *tx_sc; 1976 struct macsec_tx_sa *tx_sa; 1977 u8 assoc_num; 1978 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 1979 1980 if (!attrs[MACSEC_ATTR_IFINDEX]) 1981 return -EINVAL; 1982 1983 if (parse_sa_config(attrs, tb_sa)) 1984 return -EINVAL; 1985 1986 rtnl_lock(); 1987 tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa, 1988 &dev, &secy, &tx_sc, &assoc_num); 1989 if (IS_ERR(tx_sa)) { 1990 rtnl_unlock(); 1991 return PTR_ERR(tx_sa); 1992 } 1993 1994 if (tx_sa->active) { 1995 rtnl_unlock(); 1996 return -EBUSY; 1997 } 1998 1999 RCU_INIT_POINTER(tx_sc->sa[assoc_num], NULL); 2000 clear_tx_sa(tx_sa); 2001 2002 rtnl_unlock(); 2003 2004 return 0; 2005 } 2006 2007 static bool validate_upd_sa(struct nlattr **attrs) 2008 { 2009 if (!attrs[MACSEC_SA_ATTR_AN] || 2010 attrs[MACSEC_SA_ATTR_KEY] || 2011 attrs[MACSEC_SA_ATTR_KEYID]) 2012 return false; 2013 2014 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) 2015 return false; 2016 2017 if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0) 2018 return false; 2019 2020 if (attrs[MACSEC_SA_ATTR_ACTIVE]) { 2021 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) 2022 return false; 2023 } 2024 2025 return true; 2026 } 2027 2028 static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info) 2029 { 2030 struct nlattr **attrs = info->attrs; 2031 struct net_device *dev; 2032 struct macsec_secy *secy; 2033 struct macsec_tx_sc *tx_sc; 2034 struct macsec_tx_sa *tx_sa; 2035 u8 assoc_num; 2036 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 2037 2038 if (!attrs[MACSEC_ATTR_IFINDEX]) 2039 return -EINVAL; 2040 2041 if (parse_sa_config(attrs, tb_sa)) 2042 return -EINVAL; 2043 2044 if (!validate_upd_sa(tb_sa)) 2045 return -EINVAL; 2046 2047 rtnl_lock(); 2048 tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa, 2049 &dev, &secy, &tx_sc, &assoc_num); 2050 if (IS_ERR(tx_sa)) { 2051 rtnl_unlock(); 2052 return PTR_ERR(tx_sa); 2053 } 2054 2055 if (tb_sa[MACSEC_SA_ATTR_PN]) { 2056 spin_lock_bh(&tx_sa->lock); 2057 tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]); 2058 spin_unlock_bh(&tx_sa->lock); 2059 } 2060 2061 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 2062 tx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 2063 2064 if (assoc_num == tx_sc->encoding_sa) 2065 secy->operational = tx_sa->active; 2066 2067 rtnl_unlock(); 2068 2069 return 0; 2070 } 2071 2072 static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info) 2073 { 2074 struct nlattr **attrs = info->attrs; 2075 struct net_device *dev; 2076 struct macsec_secy *secy; 2077 struct macsec_rx_sc *rx_sc; 2078 struct macsec_rx_sa *rx_sa; 2079 u8 assoc_num; 2080 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 2081 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 2082 2083 if (!attrs[MACSEC_ATTR_IFINDEX]) 2084 return -EINVAL; 2085 2086 if (parse_rxsc_config(attrs, tb_rxsc)) 2087 return -EINVAL; 2088 2089 if (parse_sa_config(attrs, tb_sa)) 2090 return -EINVAL; 2091 2092 if (!validate_upd_sa(tb_sa)) 2093 return -EINVAL; 2094 2095 rtnl_lock(); 2096 rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa, 2097 &dev, &secy, &rx_sc, &assoc_num); 2098 if (IS_ERR(rx_sa)) { 2099 rtnl_unlock(); 2100 return PTR_ERR(rx_sa); 2101 } 2102 2103 if (tb_sa[MACSEC_SA_ATTR_PN]) { 2104 spin_lock_bh(&rx_sa->lock); 2105 rx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]); 2106 spin_unlock_bh(&rx_sa->lock); 2107 } 2108 2109 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 2110 rx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 2111 2112 rtnl_unlock(); 2113 return 0; 2114 } 2115 2116 static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info) 2117 { 2118 struct nlattr **attrs = info->attrs; 2119 struct net_device *dev; 2120 struct macsec_secy *secy; 2121 struct macsec_rx_sc *rx_sc; 2122 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 2123 2124 if (!attrs[MACSEC_ATTR_IFINDEX]) 2125 return -EINVAL; 2126 2127 if (parse_rxsc_config(attrs, tb_rxsc)) 2128 return -EINVAL; 2129 2130 if (!validate_add_rxsc(tb_rxsc)) 2131 return -EINVAL; 2132 2133 rtnl_lock(); 2134 rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy); 2135 if (IS_ERR(rx_sc)) { 2136 rtnl_unlock(); 2137 return PTR_ERR(rx_sc); 2138 } 2139 2140 if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) { 2141 bool new = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]); 2142 2143 if (rx_sc->active != new) 2144 secy->n_rx_sc += new ? 1 : -1; 2145 2146 rx_sc->active = new; 2147 } 2148 2149 rtnl_unlock(); 2150 2151 return 0; 2152 } 2153 2154 static int copy_tx_sa_stats(struct sk_buff *skb, 2155 struct macsec_tx_sa_stats __percpu *pstats) 2156 { 2157 struct macsec_tx_sa_stats sum = {0, }; 2158 int cpu; 2159 2160 for_each_possible_cpu(cpu) { 2161 const struct macsec_tx_sa_stats *stats = per_cpu_ptr(pstats, cpu); 2162 2163 sum.OutPktsProtected += stats->OutPktsProtected; 2164 sum.OutPktsEncrypted += stats->OutPktsEncrypted; 2165 } 2166 2167 if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED, sum.OutPktsProtected) || 2168 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED, sum.OutPktsEncrypted)) 2169 return -EMSGSIZE; 2170 2171 return 0; 2172 } 2173 2174 static noinline_for_stack int 2175 copy_rx_sa_stats(struct sk_buff *skb, 2176 struct macsec_rx_sa_stats __percpu *pstats) 2177 { 2178 struct macsec_rx_sa_stats sum = {0, }; 2179 int cpu; 2180 2181 for_each_possible_cpu(cpu) { 2182 const struct macsec_rx_sa_stats *stats = per_cpu_ptr(pstats, cpu); 2183 2184 sum.InPktsOK += stats->InPktsOK; 2185 sum.InPktsInvalid += stats->InPktsInvalid; 2186 sum.InPktsNotValid += stats->InPktsNotValid; 2187 sum.InPktsNotUsingSA += stats->InPktsNotUsingSA; 2188 sum.InPktsUnusedSA += stats->InPktsUnusedSA; 2189 } 2190 2191 if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_OK, sum.InPktsOK) || 2192 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID, sum.InPktsInvalid) || 2193 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID, sum.InPktsNotValid) || 2194 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA, sum.InPktsNotUsingSA) || 2195 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA, sum.InPktsUnusedSA)) 2196 return -EMSGSIZE; 2197 2198 return 0; 2199 } 2200 2201 static noinline_for_stack int 2202 copy_rx_sc_stats(struct sk_buff *skb, struct pcpu_rx_sc_stats __percpu *pstats) 2203 { 2204 struct macsec_rx_sc_stats sum = {0, }; 2205 int cpu; 2206 2207 for_each_possible_cpu(cpu) { 2208 const struct pcpu_rx_sc_stats *stats; 2209 struct macsec_rx_sc_stats tmp; 2210 unsigned int start; 2211 2212 stats = per_cpu_ptr(pstats, cpu); 2213 do { 2214 start = u64_stats_fetch_begin_irq(&stats->syncp); 2215 memcpy(&tmp, &stats->stats, sizeof(tmp)); 2216 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 2217 2218 sum.InOctetsValidated += tmp.InOctetsValidated; 2219 sum.InOctetsDecrypted += tmp.InOctetsDecrypted; 2220 sum.InPktsUnchecked += tmp.InPktsUnchecked; 2221 sum.InPktsDelayed += tmp.InPktsDelayed; 2222 sum.InPktsOK += tmp.InPktsOK; 2223 sum.InPktsInvalid += tmp.InPktsInvalid; 2224 sum.InPktsLate += tmp.InPktsLate; 2225 sum.InPktsNotValid += tmp.InPktsNotValid; 2226 sum.InPktsNotUsingSA += tmp.InPktsNotUsingSA; 2227 sum.InPktsUnusedSA += tmp.InPktsUnusedSA; 2228 } 2229 2230 if (nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED, 2231 sum.InOctetsValidated, 2232 MACSEC_RXSC_STATS_ATTR_PAD) || 2233 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED, 2234 sum.InOctetsDecrypted, 2235 MACSEC_RXSC_STATS_ATTR_PAD) || 2236 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED, 2237 sum.InPktsUnchecked, 2238 MACSEC_RXSC_STATS_ATTR_PAD) || 2239 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED, 2240 sum.InPktsDelayed, 2241 MACSEC_RXSC_STATS_ATTR_PAD) || 2242 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK, 2243 sum.InPktsOK, 2244 MACSEC_RXSC_STATS_ATTR_PAD) || 2245 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID, 2246 sum.InPktsInvalid, 2247 MACSEC_RXSC_STATS_ATTR_PAD) || 2248 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE, 2249 sum.InPktsLate, 2250 MACSEC_RXSC_STATS_ATTR_PAD) || 2251 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID, 2252 sum.InPktsNotValid, 2253 MACSEC_RXSC_STATS_ATTR_PAD) || 2254 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA, 2255 sum.InPktsNotUsingSA, 2256 MACSEC_RXSC_STATS_ATTR_PAD) || 2257 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA, 2258 sum.InPktsUnusedSA, 2259 MACSEC_RXSC_STATS_ATTR_PAD)) 2260 return -EMSGSIZE; 2261 2262 return 0; 2263 } 2264 2265 static noinline_for_stack int 2266 copy_tx_sc_stats(struct sk_buff *skb, struct pcpu_tx_sc_stats __percpu *pstats) 2267 { 2268 struct macsec_tx_sc_stats sum = {0, }; 2269 int cpu; 2270 2271 for_each_possible_cpu(cpu) { 2272 const struct pcpu_tx_sc_stats *stats; 2273 struct macsec_tx_sc_stats tmp; 2274 unsigned int start; 2275 2276 stats = per_cpu_ptr(pstats, cpu); 2277 do { 2278 start = u64_stats_fetch_begin_irq(&stats->syncp); 2279 memcpy(&tmp, &stats->stats, sizeof(tmp)); 2280 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 2281 2282 sum.OutPktsProtected += tmp.OutPktsProtected; 2283 sum.OutPktsEncrypted += tmp.OutPktsEncrypted; 2284 sum.OutOctetsProtected += tmp.OutOctetsProtected; 2285 sum.OutOctetsEncrypted += tmp.OutOctetsEncrypted; 2286 } 2287 2288 if (nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED, 2289 sum.OutPktsProtected, 2290 MACSEC_TXSC_STATS_ATTR_PAD) || 2291 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED, 2292 sum.OutPktsEncrypted, 2293 MACSEC_TXSC_STATS_ATTR_PAD) || 2294 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED, 2295 sum.OutOctetsProtected, 2296 MACSEC_TXSC_STATS_ATTR_PAD) || 2297 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED, 2298 sum.OutOctetsEncrypted, 2299 MACSEC_TXSC_STATS_ATTR_PAD)) 2300 return -EMSGSIZE; 2301 2302 return 0; 2303 } 2304 2305 static noinline_for_stack int 2306 copy_secy_stats(struct sk_buff *skb, struct pcpu_secy_stats __percpu *pstats) 2307 { 2308 struct macsec_dev_stats sum = {0, }; 2309 int cpu; 2310 2311 for_each_possible_cpu(cpu) { 2312 const struct pcpu_secy_stats *stats; 2313 struct macsec_dev_stats tmp; 2314 unsigned int start; 2315 2316 stats = per_cpu_ptr(pstats, cpu); 2317 do { 2318 start = u64_stats_fetch_begin_irq(&stats->syncp); 2319 memcpy(&tmp, &stats->stats, sizeof(tmp)); 2320 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 2321 2322 sum.OutPktsUntagged += tmp.OutPktsUntagged; 2323 sum.InPktsUntagged += tmp.InPktsUntagged; 2324 sum.OutPktsTooLong += tmp.OutPktsTooLong; 2325 sum.InPktsNoTag += tmp.InPktsNoTag; 2326 sum.InPktsBadTag += tmp.InPktsBadTag; 2327 sum.InPktsUnknownSCI += tmp.InPktsUnknownSCI; 2328 sum.InPktsNoSCI += tmp.InPktsNoSCI; 2329 sum.InPktsOverrun += tmp.InPktsOverrun; 2330 } 2331 2332 if (nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED, 2333 sum.OutPktsUntagged, 2334 MACSEC_SECY_STATS_ATTR_PAD) || 2335 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED, 2336 sum.InPktsUntagged, 2337 MACSEC_SECY_STATS_ATTR_PAD) || 2338 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG, 2339 sum.OutPktsTooLong, 2340 MACSEC_SECY_STATS_ATTR_PAD) || 2341 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG, 2342 sum.InPktsNoTag, 2343 MACSEC_SECY_STATS_ATTR_PAD) || 2344 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG, 2345 sum.InPktsBadTag, 2346 MACSEC_SECY_STATS_ATTR_PAD) || 2347 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI, 2348 sum.InPktsUnknownSCI, 2349 MACSEC_SECY_STATS_ATTR_PAD) || 2350 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI, 2351 sum.InPktsNoSCI, 2352 MACSEC_SECY_STATS_ATTR_PAD) || 2353 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN, 2354 sum.InPktsOverrun, 2355 MACSEC_SECY_STATS_ATTR_PAD)) 2356 return -EMSGSIZE; 2357 2358 return 0; 2359 } 2360 2361 static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb) 2362 { 2363 struct macsec_tx_sc *tx_sc = &secy->tx_sc; 2364 struct nlattr *secy_nest = nla_nest_start_noflag(skb, 2365 MACSEC_ATTR_SECY); 2366 u64 csid; 2367 2368 if (!secy_nest) 2369 return 1; 2370 2371 switch (secy->key_len) { 2372 case MACSEC_GCM_AES_128_SAK_LEN: 2373 csid = MACSEC_DEFAULT_CIPHER_ID; 2374 break; 2375 case MACSEC_GCM_AES_256_SAK_LEN: 2376 csid = MACSEC_CIPHER_ID_GCM_AES_256; 2377 break; 2378 default: 2379 goto cancel; 2380 } 2381 2382 if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci, 2383 MACSEC_SECY_ATTR_PAD) || 2384 nla_put_u64_64bit(skb, MACSEC_SECY_ATTR_CIPHER_SUITE, 2385 csid, MACSEC_SECY_ATTR_PAD) || 2386 nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) || 2387 nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) || 2388 nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) || 2389 nla_put_u8(skb, MACSEC_SECY_ATTR_REPLAY, secy->replay_protect) || 2390 nla_put_u8(skb, MACSEC_SECY_ATTR_VALIDATE, secy->validate_frames) || 2391 nla_put_u8(skb, MACSEC_SECY_ATTR_ENCRYPT, tx_sc->encrypt) || 2392 nla_put_u8(skb, MACSEC_SECY_ATTR_INC_SCI, tx_sc->send_sci) || 2393 nla_put_u8(skb, MACSEC_SECY_ATTR_ES, tx_sc->end_station) || 2394 nla_put_u8(skb, MACSEC_SECY_ATTR_SCB, tx_sc->scb) || 2395 nla_put_u8(skb, MACSEC_SECY_ATTR_ENCODING_SA, tx_sc->encoding_sa)) 2396 goto cancel; 2397 2398 if (secy->replay_protect) { 2399 if (nla_put_u32(skb, MACSEC_SECY_ATTR_WINDOW, secy->replay_window)) 2400 goto cancel; 2401 } 2402 2403 nla_nest_end(skb, secy_nest); 2404 return 0; 2405 2406 cancel: 2407 nla_nest_cancel(skb, secy_nest); 2408 return 1; 2409 } 2410 2411 static noinline_for_stack int 2412 dump_secy(struct macsec_secy *secy, struct net_device *dev, 2413 struct sk_buff *skb, struct netlink_callback *cb) 2414 { 2415 struct macsec_rx_sc *rx_sc; 2416 struct macsec_tx_sc *tx_sc = &secy->tx_sc; 2417 struct nlattr *txsa_list, *rxsc_list; 2418 int i, j; 2419 void *hdr; 2420 struct nlattr *attr; 2421 2422 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 2423 &macsec_fam, NLM_F_MULTI, MACSEC_CMD_GET_TXSC); 2424 if (!hdr) 2425 return -EMSGSIZE; 2426 2427 genl_dump_check_consistent(cb, hdr); 2428 2429 if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex)) 2430 goto nla_put_failure; 2431 2432 if (nla_put_secy(secy, skb)) 2433 goto nla_put_failure; 2434 2435 attr = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSC_STATS); 2436 if (!attr) 2437 goto nla_put_failure; 2438 if (copy_tx_sc_stats(skb, tx_sc->stats)) { 2439 nla_nest_cancel(skb, attr); 2440 goto nla_put_failure; 2441 } 2442 nla_nest_end(skb, attr); 2443 2444 attr = nla_nest_start_noflag(skb, MACSEC_ATTR_SECY_STATS); 2445 if (!attr) 2446 goto nla_put_failure; 2447 if (copy_secy_stats(skb, macsec_priv(dev)->stats)) { 2448 nla_nest_cancel(skb, attr); 2449 goto nla_put_failure; 2450 } 2451 nla_nest_end(skb, attr); 2452 2453 txsa_list = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSA_LIST); 2454 if (!txsa_list) 2455 goto nla_put_failure; 2456 for (i = 0, j = 1; i < MACSEC_NUM_AN; i++) { 2457 struct macsec_tx_sa *tx_sa = rtnl_dereference(tx_sc->sa[i]); 2458 struct nlattr *txsa_nest; 2459 2460 if (!tx_sa) 2461 continue; 2462 2463 txsa_nest = nla_nest_start_noflag(skb, j++); 2464 if (!txsa_nest) { 2465 nla_nest_cancel(skb, txsa_list); 2466 goto nla_put_failure; 2467 } 2468 2469 if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) || 2470 nla_put_u32(skb, MACSEC_SA_ATTR_PN, tx_sa->next_pn) || 2471 nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, tx_sa->key.id) || 2472 nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, tx_sa->active)) { 2473 nla_nest_cancel(skb, txsa_nest); 2474 nla_nest_cancel(skb, txsa_list); 2475 goto nla_put_failure; 2476 } 2477 2478 attr = nla_nest_start_noflag(skb, MACSEC_SA_ATTR_STATS); 2479 if (!attr) { 2480 nla_nest_cancel(skb, txsa_nest); 2481 nla_nest_cancel(skb, txsa_list); 2482 goto nla_put_failure; 2483 } 2484 if (copy_tx_sa_stats(skb, tx_sa->stats)) { 2485 nla_nest_cancel(skb, attr); 2486 nla_nest_cancel(skb, txsa_nest); 2487 nla_nest_cancel(skb, txsa_list); 2488 goto nla_put_failure; 2489 } 2490 nla_nest_end(skb, attr); 2491 2492 nla_nest_end(skb, txsa_nest); 2493 } 2494 nla_nest_end(skb, txsa_list); 2495 2496 rxsc_list = nla_nest_start_noflag(skb, MACSEC_ATTR_RXSC_LIST); 2497 if (!rxsc_list) 2498 goto nla_put_failure; 2499 2500 j = 1; 2501 for_each_rxsc_rtnl(secy, rx_sc) { 2502 int k; 2503 struct nlattr *rxsa_list; 2504 struct nlattr *rxsc_nest = nla_nest_start_noflag(skb, j++); 2505 2506 if (!rxsc_nest) { 2507 nla_nest_cancel(skb, rxsc_list); 2508 goto nla_put_failure; 2509 } 2510 2511 if (nla_put_u8(skb, MACSEC_RXSC_ATTR_ACTIVE, rx_sc->active) || 2512 nla_put_sci(skb, MACSEC_RXSC_ATTR_SCI, rx_sc->sci, 2513 MACSEC_RXSC_ATTR_PAD)) { 2514 nla_nest_cancel(skb, rxsc_nest); 2515 nla_nest_cancel(skb, rxsc_list); 2516 goto nla_put_failure; 2517 } 2518 2519 attr = nla_nest_start_noflag(skb, MACSEC_RXSC_ATTR_STATS); 2520 if (!attr) { 2521 nla_nest_cancel(skb, rxsc_nest); 2522 nla_nest_cancel(skb, rxsc_list); 2523 goto nla_put_failure; 2524 } 2525 if (copy_rx_sc_stats(skb, rx_sc->stats)) { 2526 nla_nest_cancel(skb, attr); 2527 nla_nest_cancel(skb, rxsc_nest); 2528 nla_nest_cancel(skb, rxsc_list); 2529 goto nla_put_failure; 2530 } 2531 nla_nest_end(skb, attr); 2532 2533 rxsa_list = nla_nest_start_noflag(skb, 2534 MACSEC_RXSC_ATTR_SA_LIST); 2535 if (!rxsa_list) { 2536 nla_nest_cancel(skb, rxsc_nest); 2537 nla_nest_cancel(skb, rxsc_list); 2538 goto nla_put_failure; 2539 } 2540 2541 for (i = 0, k = 1; i < MACSEC_NUM_AN; i++) { 2542 struct macsec_rx_sa *rx_sa = rtnl_dereference(rx_sc->sa[i]); 2543 struct nlattr *rxsa_nest; 2544 2545 if (!rx_sa) 2546 continue; 2547 2548 rxsa_nest = nla_nest_start_noflag(skb, k++); 2549 if (!rxsa_nest) { 2550 nla_nest_cancel(skb, rxsa_list); 2551 nla_nest_cancel(skb, rxsc_nest); 2552 nla_nest_cancel(skb, rxsc_list); 2553 goto nla_put_failure; 2554 } 2555 2556 attr = nla_nest_start_noflag(skb, 2557 MACSEC_SA_ATTR_STATS); 2558 if (!attr) { 2559 nla_nest_cancel(skb, rxsa_list); 2560 nla_nest_cancel(skb, rxsc_nest); 2561 nla_nest_cancel(skb, rxsc_list); 2562 goto nla_put_failure; 2563 } 2564 if (copy_rx_sa_stats(skb, rx_sa->stats)) { 2565 nla_nest_cancel(skb, attr); 2566 nla_nest_cancel(skb, rxsa_list); 2567 nla_nest_cancel(skb, rxsc_nest); 2568 nla_nest_cancel(skb, rxsc_list); 2569 goto nla_put_failure; 2570 } 2571 nla_nest_end(skb, attr); 2572 2573 if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) || 2574 nla_put_u32(skb, MACSEC_SA_ATTR_PN, rx_sa->next_pn) || 2575 nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, rx_sa->key.id) || 2576 nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, rx_sa->active)) { 2577 nla_nest_cancel(skb, rxsa_nest); 2578 nla_nest_cancel(skb, rxsc_nest); 2579 nla_nest_cancel(skb, rxsc_list); 2580 goto nla_put_failure; 2581 } 2582 nla_nest_end(skb, rxsa_nest); 2583 } 2584 2585 nla_nest_end(skb, rxsa_list); 2586 nla_nest_end(skb, rxsc_nest); 2587 } 2588 2589 nla_nest_end(skb, rxsc_list); 2590 2591 genlmsg_end(skb, hdr); 2592 2593 return 0; 2594 2595 nla_put_failure: 2596 genlmsg_cancel(skb, hdr); 2597 return -EMSGSIZE; 2598 } 2599 2600 static int macsec_generation = 1; /* protected by RTNL */ 2601 2602 static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb) 2603 { 2604 struct net *net = sock_net(skb->sk); 2605 struct net_device *dev; 2606 int dev_idx, d; 2607 2608 dev_idx = cb->args[0]; 2609 2610 d = 0; 2611 rtnl_lock(); 2612 2613 cb->seq = macsec_generation; 2614 2615 for_each_netdev(net, dev) { 2616 struct macsec_secy *secy; 2617 2618 if (d < dev_idx) 2619 goto next; 2620 2621 if (!netif_is_macsec(dev)) 2622 goto next; 2623 2624 secy = &macsec_priv(dev)->secy; 2625 if (dump_secy(secy, dev, skb, cb) < 0) 2626 goto done; 2627 next: 2628 d++; 2629 } 2630 2631 done: 2632 rtnl_unlock(); 2633 cb->args[0] = d; 2634 return skb->len; 2635 } 2636 2637 static const struct genl_ops macsec_genl_ops[] = { 2638 { 2639 .cmd = MACSEC_CMD_GET_TXSC, 2640 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 2641 .dumpit = macsec_dump_txsc, 2642 }, 2643 { 2644 .cmd = MACSEC_CMD_ADD_RXSC, 2645 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 2646 .doit = macsec_add_rxsc, 2647 .flags = GENL_ADMIN_PERM, 2648 }, 2649 { 2650 .cmd = MACSEC_CMD_DEL_RXSC, 2651 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 2652 .doit = macsec_del_rxsc, 2653 .flags = GENL_ADMIN_PERM, 2654 }, 2655 { 2656 .cmd = MACSEC_CMD_UPD_RXSC, 2657 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 2658 .doit = macsec_upd_rxsc, 2659 .flags = GENL_ADMIN_PERM, 2660 }, 2661 { 2662 .cmd = MACSEC_CMD_ADD_TXSA, 2663 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 2664 .doit = macsec_add_txsa, 2665 .flags = GENL_ADMIN_PERM, 2666 }, 2667 { 2668 .cmd = MACSEC_CMD_DEL_TXSA, 2669 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 2670 .doit = macsec_del_txsa, 2671 .flags = GENL_ADMIN_PERM, 2672 }, 2673 { 2674 .cmd = MACSEC_CMD_UPD_TXSA, 2675 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 2676 .doit = macsec_upd_txsa, 2677 .flags = GENL_ADMIN_PERM, 2678 }, 2679 { 2680 .cmd = MACSEC_CMD_ADD_RXSA, 2681 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 2682 .doit = macsec_add_rxsa, 2683 .flags = GENL_ADMIN_PERM, 2684 }, 2685 { 2686 .cmd = MACSEC_CMD_DEL_RXSA, 2687 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 2688 .doit = macsec_del_rxsa, 2689 .flags = GENL_ADMIN_PERM, 2690 }, 2691 { 2692 .cmd = MACSEC_CMD_UPD_RXSA, 2693 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 2694 .doit = macsec_upd_rxsa, 2695 .flags = GENL_ADMIN_PERM, 2696 }, 2697 }; 2698 2699 static struct genl_family macsec_fam __ro_after_init = { 2700 .name = MACSEC_GENL_NAME, 2701 .hdrsize = 0, 2702 .version = MACSEC_GENL_VERSION, 2703 .maxattr = MACSEC_ATTR_MAX, 2704 .policy = macsec_genl_policy, 2705 .netnsok = true, 2706 .module = THIS_MODULE, 2707 .ops = macsec_genl_ops, 2708 .n_ops = ARRAY_SIZE(macsec_genl_ops), 2709 }; 2710 2711 static netdev_tx_t macsec_start_xmit(struct sk_buff *skb, 2712 struct net_device *dev) 2713 { 2714 struct macsec_dev *macsec = netdev_priv(dev); 2715 struct macsec_secy *secy = &macsec->secy; 2716 struct pcpu_secy_stats *secy_stats; 2717 int ret, len; 2718 2719 /* 10.5 */ 2720 if (!secy->protect_frames) { 2721 secy_stats = this_cpu_ptr(macsec->stats); 2722 u64_stats_update_begin(&secy_stats->syncp); 2723 secy_stats->stats.OutPktsUntagged++; 2724 u64_stats_update_end(&secy_stats->syncp); 2725 skb->dev = macsec->real_dev; 2726 len = skb->len; 2727 ret = dev_queue_xmit(skb); 2728 count_tx(dev, ret, len); 2729 return ret; 2730 } 2731 2732 if (!secy->operational) { 2733 kfree_skb(skb); 2734 dev->stats.tx_dropped++; 2735 return NETDEV_TX_OK; 2736 } 2737 2738 skb = macsec_encrypt(skb, dev); 2739 if (IS_ERR(skb)) { 2740 if (PTR_ERR(skb) != -EINPROGRESS) 2741 dev->stats.tx_dropped++; 2742 return NETDEV_TX_OK; 2743 } 2744 2745 macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa); 2746 2747 macsec_encrypt_finish(skb, dev); 2748 len = skb->len; 2749 ret = dev_queue_xmit(skb); 2750 count_tx(dev, ret, len); 2751 return ret; 2752 } 2753 2754 #define MACSEC_FEATURES \ 2755 (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST) 2756 static struct lock_class_key macsec_netdev_addr_lock_key; 2757 2758 static int macsec_dev_init(struct net_device *dev) 2759 { 2760 struct macsec_dev *macsec = macsec_priv(dev); 2761 struct net_device *real_dev = macsec->real_dev; 2762 int err; 2763 2764 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 2765 if (!dev->tstats) 2766 return -ENOMEM; 2767 2768 err = gro_cells_init(&macsec->gro_cells, dev); 2769 if (err) { 2770 free_percpu(dev->tstats); 2771 return err; 2772 } 2773 2774 dev->features = real_dev->features & MACSEC_FEATURES; 2775 dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE; 2776 2777 dev->needed_headroom = real_dev->needed_headroom + 2778 MACSEC_NEEDED_HEADROOM; 2779 dev->needed_tailroom = real_dev->needed_tailroom + 2780 MACSEC_NEEDED_TAILROOM; 2781 2782 if (is_zero_ether_addr(dev->dev_addr)) 2783 eth_hw_addr_inherit(dev, real_dev); 2784 if (is_zero_ether_addr(dev->broadcast)) 2785 memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len); 2786 2787 return 0; 2788 } 2789 2790 static void macsec_dev_uninit(struct net_device *dev) 2791 { 2792 struct macsec_dev *macsec = macsec_priv(dev); 2793 2794 gro_cells_destroy(&macsec->gro_cells); 2795 free_percpu(dev->tstats); 2796 } 2797 2798 static netdev_features_t macsec_fix_features(struct net_device *dev, 2799 netdev_features_t features) 2800 { 2801 struct macsec_dev *macsec = macsec_priv(dev); 2802 struct net_device *real_dev = macsec->real_dev; 2803 2804 features &= (real_dev->features & MACSEC_FEATURES) | 2805 NETIF_F_GSO_SOFTWARE | NETIF_F_SOFT_FEATURES; 2806 features |= NETIF_F_LLTX; 2807 2808 return features; 2809 } 2810 2811 static int macsec_dev_open(struct net_device *dev) 2812 { 2813 struct macsec_dev *macsec = macsec_priv(dev); 2814 struct net_device *real_dev = macsec->real_dev; 2815 int err; 2816 2817 err = dev_uc_add(real_dev, dev->dev_addr); 2818 if (err < 0) 2819 return err; 2820 2821 if (dev->flags & IFF_ALLMULTI) { 2822 err = dev_set_allmulti(real_dev, 1); 2823 if (err < 0) 2824 goto del_unicast; 2825 } 2826 2827 if (dev->flags & IFF_PROMISC) { 2828 err = dev_set_promiscuity(real_dev, 1); 2829 if (err < 0) 2830 goto clear_allmulti; 2831 } 2832 2833 if (netif_carrier_ok(real_dev)) 2834 netif_carrier_on(dev); 2835 2836 return 0; 2837 clear_allmulti: 2838 if (dev->flags & IFF_ALLMULTI) 2839 dev_set_allmulti(real_dev, -1); 2840 del_unicast: 2841 dev_uc_del(real_dev, dev->dev_addr); 2842 netif_carrier_off(dev); 2843 return err; 2844 } 2845 2846 static int macsec_dev_stop(struct net_device *dev) 2847 { 2848 struct macsec_dev *macsec = macsec_priv(dev); 2849 struct net_device *real_dev = macsec->real_dev; 2850 2851 netif_carrier_off(dev); 2852 2853 dev_mc_unsync(real_dev, dev); 2854 dev_uc_unsync(real_dev, dev); 2855 2856 if (dev->flags & IFF_ALLMULTI) 2857 dev_set_allmulti(real_dev, -1); 2858 2859 if (dev->flags & IFF_PROMISC) 2860 dev_set_promiscuity(real_dev, -1); 2861 2862 dev_uc_del(real_dev, dev->dev_addr); 2863 2864 return 0; 2865 } 2866 2867 static void macsec_dev_change_rx_flags(struct net_device *dev, int change) 2868 { 2869 struct net_device *real_dev = macsec_priv(dev)->real_dev; 2870 2871 if (!(dev->flags & IFF_UP)) 2872 return; 2873 2874 if (change & IFF_ALLMULTI) 2875 dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1); 2876 2877 if (change & IFF_PROMISC) 2878 dev_set_promiscuity(real_dev, 2879 dev->flags & IFF_PROMISC ? 1 : -1); 2880 } 2881 2882 static void macsec_dev_set_rx_mode(struct net_device *dev) 2883 { 2884 struct net_device *real_dev = macsec_priv(dev)->real_dev; 2885 2886 dev_mc_sync(real_dev, dev); 2887 dev_uc_sync(real_dev, dev); 2888 } 2889 2890 static int macsec_set_mac_address(struct net_device *dev, void *p) 2891 { 2892 struct macsec_dev *macsec = macsec_priv(dev); 2893 struct net_device *real_dev = macsec->real_dev; 2894 struct sockaddr *addr = p; 2895 int err; 2896 2897 if (!is_valid_ether_addr(addr->sa_data)) 2898 return -EADDRNOTAVAIL; 2899 2900 if (!(dev->flags & IFF_UP)) 2901 goto out; 2902 2903 err = dev_uc_add(real_dev, addr->sa_data); 2904 if (err < 0) 2905 return err; 2906 2907 dev_uc_del(real_dev, dev->dev_addr); 2908 2909 out: 2910 ether_addr_copy(dev->dev_addr, addr->sa_data); 2911 return 0; 2912 } 2913 2914 static int macsec_change_mtu(struct net_device *dev, int new_mtu) 2915 { 2916 struct macsec_dev *macsec = macsec_priv(dev); 2917 unsigned int extra = macsec->secy.icv_len + macsec_extra_len(true); 2918 2919 if (macsec->real_dev->mtu - extra < new_mtu) 2920 return -ERANGE; 2921 2922 dev->mtu = new_mtu; 2923 2924 return 0; 2925 } 2926 2927 static void macsec_get_stats64(struct net_device *dev, 2928 struct rtnl_link_stats64 *s) 2929 { 2930 int cpu; 2931 2932 if (!dev->tstats) 2933 return; 2934 2935 for_each_possible_cpu(cpu) { 2936 struct pcpu_sw_netstats *stats; 2937 struct pcpu_sw_netstats tmp; 2938 int start; 2939 2940 stats = per_cpu_ptr(dev->tstats, cpu); 2941 do { 2942 start = u64_stats_fetch_begin_irq(&stats->syncp); 2943 tmp.rx_packets = stats->rx_packets; 2944 tmp.rx_bytes = stats->rx_bytes; 2945 tmp.tx_packets = stats->tx_packets; 2946 tmp.tx_bytes = stats->tx_bytes; 2947 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 2948 2949 s->rx_packets += tmp.rx_packets; 2950 s->rx_bytes += tmp.rx_bytes; 2951 s->tx_packets += tmp.tx_packets; 2952 s->tx_bytes += tmp.tx_bytes; 2953 } 2954 2955 s->rx_dropped = dev->stats.rx_dropped; 2956 s->tx_dropped = dev->stats.tx_dropped; 2957 } 2958 2959 static int macsec_get_iflink(const struct net_device *dev) 2960 { 2961 return macsec_priv(dev)->real_dev->ifindex; 2962 } 2963 2964 static int macsec_get_nest_level(struct net_device *dev) 2965 { 2966 return macsec_priv(dev)->nest_level; 2967 } 2968 2969 static const struct net_device_ops macsec_netdev_ops = { 2970 .ndo_init = macsec_dev_init, 2971 .ndo_uninit = macsec_dev_uninit, 2972 .ndo_open = macsec_dev_open, 2973 .ndo_stop = macsec_dev_stop, 2974 .ndo_fix_features = macsec_fix_features, 2975 .ndo_change_mtu = macsec_change_mtu, 2976 .ndo_set_rx_mode = macsec_dev_set_rx_mode, 2977 .ndo_change_rx_flags = macsec_dev_change_rx_flags, 2978 .ndo_set_mac_address = macsec_set_mac_address, 2979 .ndo_start_xmit = macsec_start_xmit, 2980 .ndo_get_stats64 = macsec_get_stats64, 2981 .ndo_get_iflink = macsec_get_iflink, 2982 .ndo_get_lock_subclass = macsec_get_nest_level, 2983 }; 2984 2985 static const struct device_type macsec_type = { 2986 .name = "macsec", 2987 }; 2988 2989 static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = { 2990 [IFLA_MACSEC_SCI] = { .type = NLA_U64 }, 2991 [IFLA_MACSEC_ICV_LEN] = { .type = NLA_U8 }, 2992 [IFLA_MACSEC_CIPHER_SUITE] = { .type = NLA_U64 }, 2993 [IFLA_MACSEC_WINDOW] = { .type = NLA_U32 }, 2994 [IFLA_MACSEC_ENCODING_SA] = { .type = NLA_U8 }, 2995 [IFLA_MACSEC_ENCRYPT] = { .type = NLA_U8 }, 2996 [IFLA_MACSEC_PROTECT] = { .type = NLA_U8 }, 2997 [IFLA_MACSEC_INC_SCI] = { .type = NLA_U8 }, 2998 [IFLA_MACSEC_ES] = { .type = NLA_U8 }, 2999 [IFLA_MACSEC_SCB] = { .type = NLA_U8 }, 3000 [IFLA_MACSEC_REPLAY_PROTECT] = { .type = NLA_U8 }, 3001 [IFLA_MACSEC_VALIDATION] = { .type = NLA_U8 }, 3002 }; 3003 3004 static void macsec_free_netdev(struct net_device *dev) 3005 { 3006 struct macsec_dev *macsec = macsec_priv(dev); 3007 struct net_device *real_dev = macsec->real_dev; 3008 3009 free_percpu(macsec->stats); 3010 free_percpu(macsec->secy.tx_sc.stats); 3011 3012 dev_put(real_dev); 3013 } 3014 3015 static void macsec_setup(struct net_device *dev) 3016 { 3017 ether_setup(dev); 3018 dev->min_mtu = 0; 3019 dev->max_mtu = ETH_MAX_MTU; 3020 dev->priv_flags |= IFF_NO_QUEUE; 3021 dev->netdev_ops = &macsec_netdev_ops; 3022 dev->needs_free_netdev = true; 3023 dev->priv_destructor = macsec_free_netdev; 3024 SET_NETDEV_DEVTYPE(dev, &macsec_type); 3025 3026 eth_zero_addr(dev->broadcast); 3027 } 3028 3029 static int macsec_changelink_common(struct net_device *dev, 3030 struct nlattr *data[]) 3031 { 3032 struct macsec_secy *secy; 3033 struct macsec_tx_sc *tx_sc; 3034 3035 secy = &macsec_priv(dev)->secy; 3036 tx_sc = &secy->tx_sc; 3037 3038 if (data[IFLA_MACSEC_ENCODING_SA]) { 3039 struct macsec_tx_sa *tx_sa; 3040 3041 tx_sc->encoding_sa = nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]); 3042 tx_sa = rtnl_dereference(tx_sc->sa[tx_sc->encoding_sa]); 3043 3044 secy->operational = tx_sa && tx_sa->active; 3045 } 3046 3047 if (data[IFLA_MACSEC_WINDOW]) 3048 secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]); 3049 3050 if (data[IFLA_MACSEC_ENCRYPT]) 3051 tx_sc->encrypt = !!nla_get_u8(data[IFLA_MACSEC_ENCRYPT]); 3052 3053 if (data[IFLA_MACSEC_PROTECT]) 3054 secy->protect_frames = !!nla_get_u8(data[IFLA_MACSEC_PROTECT]); 3055 3056 if (data[IFLA_MACSEC_INC_SCI]) 3057 tx_sc->send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]); 3058 3059 if (data[IFLA_MACSEC_ES]) 3060 tx_sc->end_station = !!nla_get_u8(data[IFLA_MACSEC_ES]); 3061 3062 if (data[IFLA_MACSEC_SCB]) 3063 tx_sc->scb = !!nla_get_u8(data[IFLA_MACSEC_SCB]); 3064 3065 if (data[IFLA_MACSEC_REPLAY_PROTECT]) 3066 secy->replay_protect = !!nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT]); 3067 3068 if (data[IFLA_MACSEC_VALIDATION]) 3069 secy->validate_frames = nla_get_u8(data[IFLA_MACSEC_VALIDATION]); 3070 3071 if (data[IFLA_MACSEC_CIPHER_SUITE]) { 3072 switch (nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE])) { 3073 case MACSEC_CIPHER_ID_GCM_AES_128: 3074 case MACSEC_DEFAULT_CIPHER_ID: 3075 secy->key_len = MACSEC_GCM_AES_128_SAK_LEN; 3076 break; 3077 case MACSEC_CIPHER_ID_GCM_AES_256: 3078 secy->key_len = MACSEC_GCM_AES_256_SAK_LEN; 3079 break; 3080 default: 3081 return -EINVAL; 3082 } 3083 } 3084 3085 return 0; 3086 } 3087 3088 static int macsec_changelink(struct net_device *dev, struct nlattr *tb[], 3089 struct nlattr *data[], 3090 struct netlink_ext_ack *extack) 3091 { 3092 if (!data) 3093 return 0; 3094 3095 if (data[IFLA_MACSEC_CIPHER_SUITE] || 3096 data[IFLA_MACSEC_ICV_LEN] || 3097 data[IFLA_MACSEC_SCI] || 3098 data[IFLA_MACSEC_PORT]) 3099 return -EINVAL; 3100 3101 return macsec_changelink_common(dev, data); 3102 } 3103 3104 static void macsec_del_dev(struct macsec_dev *macsec) 3105 { 3106 int i; 3107 3108 while (macsec->secy.rx_sc) { 3109 struct macsec_rx_sc *rx_sc = rtnl_dereference(macsec->secy.rx_sc); 3110 3111 rcu_assign_pointer(macsec->secy.rx_sc, rx_sc->next); 3112 free_rx_sc(rx_sc); 3113 } 3114 3115 for (i = 0; i < MACSEC_NUM_AN; i++) { 3116 struct macsec_tx_sa *sa = rtnl_dereference(macsec->secy.tx_sc.sa[i]); 3117 3118 if (sa) { 3119 RCU_INIT_POINTER(macsec->secy.tx_sc.sa[i], NULL); 3120 clear_tx_sa(sa); 3121 } 3122 } 3123 } 3124 3125 static void macsec_common_dellink(struct net_device *dev, struct list_head *head) 3126 { 3127 struct macsec_dev *macsec = macsec_priv(dev); 3128 struct net_device *real_dev = macsec->real_dev; 3129 3130 unregister_netdevice_queue(dev, head); 3131 list_del_rcu(&macsec->secys); 3132 macsec_del_dev(macsec); 3133 netdev_upper_dev_unlink(real_dev, dev); 3134 3135 macsec_generation++; 3136 } 3137 3138 static void macsec_dellink(struct net_device *dev, struct list_head *head) 3139 { 3140 struct macsec_dev *macsec = macsec_priv(dev); 3141 struct net_device *real_dev = macsec->real_dev; 3142 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); 3143 3144 macsec_common_dellink(dev, head); 3145 3146 if (list_empty(&rxd->secys)) { 3147 netdev_rx_handler_unregister(real_dev); 3148 kfree(rxd); 3149 } 3150 } 3151 3152 static int register_macsec_dev(struct net_device *real_dev, 3153 struct net_device *dev) 3154 { 3155 struct macsec_dev *macsec = macsec_priv(dev); 3156 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); 3157 3158 if (!rxd) { 3159 int err; 3160 3161 rxd = kmalloc(sizeof(*rxd), GFP_KERNEL); 3162 if (!rxd) 3163 return -ENOMEM; 3164 3165 INIT_LIST_HEAD(&rxd->secys); 3166 3167 err = netdev_rx_handler_register(real_dev, macsec_handle_frame, 3168 rxd); 3169 if (err < 0) { 3170 kfree(rxd); 3171 return err; 3172 } 3173 } 3174 3175 list_add_tail_rcu(&macsec->secys, &rxd->secys); 3176 return 0; 3177 } 3178 3179 static bool sci_exists(struct net_device *dev, sci_t sci) 3180 { 3181 struct macsec_rxh_data *rxd = macsec_data_rtnl(dev); 3182 struct macsec_dev *macsec; 3183 3184 list_for_each_entry(macsec, &rxd->secys, secys) { 3185 if (macsec->secy.sci == sci) 3186 return true; 3187 } 3188 3189 return false; 3190 } 3191 3192 static sci_t dev_to_sci(struct net_device *dev, __be16 port) 3193 { 3194 return make_sci(dev->dev_addr, port); 3195 } 3196 3197 static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len) 3198 { 3199 struct macsec_dev *macsec = macsec_priv(dev); 3200 struct macsec_secy *secy = &macsec->secy; 3201 3202 macsec->stats = netdev_alloc_pcpu_stats(struct pcpu_secy_stats); 3203 if (!macsec->stats) 3204 return -ENOMEM; 3205 3206 secy->tx_sc.stats = netdev_alloc_pcpu_stats(struct pcpu_tx_sc_stats); 3207 if (!secy->tx_sc.stats) { 3208 free_percpu(macsec->stats); 3209 return -ENOMEM; 3210 } 3211 3212 if (sci == MACSEC_UNDEF_SCI) 3213 sci = dev_to_sci(dev, MACSEC_PORT_ES); 3214 3215 secy->netdev = dev; 3216 secy->operational = true; 3217 secy->key_len = DEFAULT_SAK_LEN; 3218 secy->icv_len = icv_len; 3219 secy->validate_frames = MACSEC_VALIDATE_DEFAULT; 3220 secy->protect_frames = true; 3221 secy->replay_protect = false; 3222 3223 secy->sci = sci; 3224 secy->tx_sc.active = true; 3225 secy->tx_sc.encoding_sa = DEFAULT_ENCODING_SA; 3226 secy->tx_sc.encrypt = DEFAULT_ENCRYPT; 3227 secy->tx_sc.send_sci = DEFAULT_SEND_SCI; 3228 secy->tx_sc.end_station = false; 3229 secy->tx_sc.scb = false; 3230 3231 return 0; 3232 } 3233 3234 static int macsec_newlink(struct net *net, struct net_device *dev, 3235 struct nlattr *tb[], struct nlattr *data[], 3236 struct netlink_ext_ack *extack) 3237 { 3238 struct macsec_dev *macsec = macsec_priv(dev); 3239 struct net_device *real_dev; 3240 int err; 3241 sci_t sci; 3242 u8 icv_len = DEFAULT_ICV_LEN; 3243 rx_handler_func_t *rx_handler; 3244 3245 if (!tb[IFLA_LINK]) 3246 return -EINVAL; 3247 real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK])); 3248 if (!real_dev) 3249 return -ENODEV; 3250 3251 dev->priv_flags |= IFF_MACSEC; 3252 3253 macsec->real_dev = real_dev; 3254 3255 if (data && data[IFLA_MACSEC_ICV_LEN]) 3256 icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]); 3257 dev->mtu = real_dev->mtu - icv_len - macsec_extra_len(true); 3258 3259 rx_handler = rtnl_dereference(real_dev->rx_handler); 3260 if (rx_handler && rx_handler != macsec_handle_frame) 3261 return -EBUSY; 3262 3263 err = register_netdevice(dev); 3264 if (err < 0) 3265 return err; 3266 3267 dev_hold(real_dev); 3268 3269 macsec->nest_level = dev_get_nest_level(real_dev) + 1; 3270 netdev_lockdep_set_classes(dev); 3271 lockdep_set_class_and_subclass(&dev->addr_list_lock, 3272 &macsec_netdev_addr_lock_key, 3273 macsec_get_nest_level(dev)); 3274 3275 err = netdev_upper_dev_link(real_dev, dev, extack); 3276 if (err < 0) 3277 goto unregister; 3278 3279 /* need to be already registered so that ->init has run and 3280 * the MAC addr is set 3281 */ 3282 if (data && data[IFLA_MACSEC_SCI]) 3283 sci = nla_get_sci(data[IFLA_MACSEC_SCI]); 3284 else if (data && data[IFLA_MACSEC_PORT]) 3285 sci = dev_to_sci(dev, nla_get_be16(data[IFLA_MACSEC_PORT])); 3286 else 3287 sci = dev_to_sci(dev, MACSEC_PORT_ES); 3288 3289 if (rx_handler && sci_exists(real_dev, sci)) { 3290 err = -EBUSY; 3291 goto unlink; 3292 } 3293 3294 err = macsec_add_dev(dev, sci, icv_len); 3295 if (err) 3296 goto unlink; 3297 3298 if (data) { 3299 err = macsec_changelink_common(dev, data); 3300 if (err) 3301 goto del_dev; 3302 } 3303 3304 err = register_macsec_dev(real_dev, dev); 3305 if (err < 0) 3306 goto del_dev; 3307 3308 netif_stacked_transfer_operstate(real_dev, dev); 3309 linkwatch_fire_event(dev); 3310 3311 macsec_generation++; 3312 3313 return 0; 3314 3315 del_dev: 3316 macsec_del_dev(macsec); 3317 unlink: 3318 netdev_upper_dev_unlink(real_dev, dev); 3319 unregister: 3320 unregister_netdevice(dev); 3321 return err; 3322 } 3323 3324 static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[], 3325 struct netlink_ext_ack *extack) 3326 { 3327 u64 csid = MACSEC_DEFAULT_CIPHER_ID; 3328 u8 icv_len = DEFAULT_ICV_LEN; 3329 int flag; 3330 bool es, scb, sci; 3331 3332 if (!data) 3333 return 0; 3334 3335 if (data[IFLA_MACSEC_CIPHER_SUITE]) 3336 csid = nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE]); 3337 3338 if (data[IFLA_MACSEC_ICV_LEN]) { 3339 icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]); 3340 if (icv_len != DEFAULT_ICV_LEN) { 3341 char dummy_key[DEFAULT_SAK_LEN] = { 0 }; 3342 struct crypto_aead *dummy_tfm; 3343 3344 dummy_tfm = macsec_alloc_tfm(dummy_key, 3345 DEFAULT_SAK_LEN, 3346 icv_len); 3347 if (IS_ERR(dummy_tfm)) 3348 return PTR_ERR(dummy_tfm); 3349 crypto_free_aead(dummy_tfm); 3350 } 3351 } 3352 3353 switch (csid) { 3354 case MACSEC_CIPHER_ID_GCM_AES_128: 3355 case MACSEC_CIPHER_ID_GCM_AES_256: 3356 case MACSEC_DEFAULT_CIPHER_ID: 3357 if (icv_len < MACSEC_MIN_ICV_LEN || 3358 icv_len > MACSEC_STD_ICV_LEN) 3359 return -EINVAL; 3360 break; 3361 default: 3362 return -EINVAL; 3363 } 3364 3365 if (data[IFLA_MACSEC_ENCODING_SA]) { 3366 if (nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]) >= MACSEC_NUM_AN) 3367 return -EINVAL; 3368 } 3369 3370 for (flag = IFLA_MACSEC_ENCODING_SA + 1; 3371 flag < IFLA_MACSEC_VALIDATION; 3372 flag++) { 3373 if (data[flag]) { 3374 if (nla_get_u8(data[flag]) > 1) 3375 return -EINVAL; 3376 } 3377 } 3378 3379 es = data[IFLA_MACSEC_ES] ? nla_get_u8(data[IFLA_MACSEC_ES]) : false; 3380 sci = data[IFLA_MACSEC_INC_SCI] ? nla_get_u8(data[IFLA_MACSEC_INC_SCI]) : false; 3381 scb = data[IFLA_MACSEC_SCB] ? nla_get_u8(data[IFLA_MACSEC_SCB]) : false; 3382 3383 if ((sci && (scb || es)) || (scb && es)) 3384 return -EINVAL; 3385 3386 if (data[IFLA_MACSEC_VALIDATION] && 3387 nla_get_u8(data[IFLA_MACSEC_VALIDATION]) > MACSEC_VALIDATE_MAX) 3388 return -EINVAL; 3389 3390 if ((data[IFLA_MACSEC_REPLAY_PROTECT] && 3391 nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT])) && 3392 !data[IFLA_MACSEC_WINDOW]) 3393 return -EINVAL; 3394 3395 return 0; 3396 } 3397 3398 static struct net *macsec_get_link_net(const struct net_device *dev) 3399 { 3400 return dev_net(macsec_priv(dev)->real_dev); 3401 } 3402 3403 static size_t macsec_get_size(const struct net_device *dev) 3404 { 3405 return nla_total_size_64bit(8) + /* IFLA_MACSEC_SCI */ 3406 nla_total_size(1) + /* IFLA_MACSEC_ICV_LEN */ 3407 nla_total_size_64bit(8) + /* IFLA_MACSEC_CIPHER_SUITE */ 3408 nla_total_size(4) + /* IFLA_MACSEC_WINDOW */ 3409 nla_total_size(1) + /* IFLA_MACSEC_ENCODING_SA */ 3410 nla_total_size(1) + /* IFLA_MACSEC_ENCRYPT */ 3411 nla_total_size(1) + /* IFLA_MACSEC_PROTECT */ 3412 nla_total_size(1) + /* IFLA_MACSEC_INC_SCI */ 3413 nla_total_size(1) + /* IFLA_MACSEC_ES */ 3414 nla_total_size(1) + /* IFLA_MACSEC_SCB */ 3415 nla_total_size(1) + /* IFLA_MACSEC_REPLAY_PROTECT */ 3416 nla_total_size(1) + /* IFLA_MACSEC_VALIDATION */ 3417 0; 3418 } 3419 3420 static int macsec_fill_info(struct sk_buff *skb, 3421 const struct net_device *dev) 3422 { 3423 struct macsec_secy *secy = &macsec_priv(dev)->secy; 3424 struct macsec_tx_sc *tx_sc = &secy->tx_sc; 3425 u64 csid; 3426 3427 switch (secy->key_len) { 3428 case MACSEC_GCM_AES_128_SAK_LEN: 3429 csid = MACSEC_DEFAULT_CIPHER_ID; 3430 break; 3431 case MACSEC_GCM_AES_256_SAK_LEN: 3432 csid = MACSEC_CIPHER_ID_GCM_AES_256; 3433 break; 3434 default: 3435 goto nla_put_failure; 3436 } 3437 3438 if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci, 3439 IFLA_MACSEC_PAD) || 3440 nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) || 3441 nla_put_u64_64bit(skb, IFLA_MACSEC_CIPHER_SUITE, 3442 csid, IFLA_MACSEC_PAD) || 3443 nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) || 3444 nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) || 3445 nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) || 3446 nla_put_u8(skb, IFLA_MACSEC_INC_SCI, tx_sc->send_sci) || 3447 nla_put_u8(skb, IFLA_MACSEC_ES, tx_sc->end_station) || 3448 nla_put_u8(skb, IFLA_MACSEC_SCB, tx_sc->scb) || 3449 nla_put_u8(skb, IFLA_MACSEC_REPLAY_PROTECT, secy->replay_protect) || 3450 nla_put_u8(skb, IFLA_MACSEC_VALIDATION, secy->validate_frames) || 3451 0) 3452 goto nla_put_failure; 3453 3454 if (secy->replay_protect) { 3455 if (nla_put_u32(skb, IFLA_MACSEC_WINDOW, secy->replay_window)) 3456 goto nla_put_failure; 3457 } 3458 3459 return 0; 3460 3461 nla_put_failure: 3462 return -EMSGSIZE; 3463 } 3464 3465 static struct rtnl_link_ops macsec_link_ops __read_mostly = { 3466 .kind = "macsec", 3467 .priv_size = sizeof(struct macsec_dev), 3468 .maxtype = IFLA_MACSEC_MAX, 3469 .policy = macsec_rtnl_policy, 3470 .setup = macsec_setup, 3471 .validate = macsec_validate_attr, 3472 .newlink = macsec_newlink, 3473 .changelink = macsec_changelink, 3474 .dellink = macsec_dellink, 3475 .get_size = macsec_get_size, 3476 .fill_info = macsec_fill_info, 3477 .get_link_net = macsec_get_link_net, 3478 }; 3479 3480 static bool is_macsec_master(struct net_device *dev) 3481 { 3482 return rcu_access_pointer(dev->rx_handler) == macsec_handle_frame; 3483 } 3484 3485 static int macsec_notify(struct notifier_block *this, unsigned long event, 3486 void *ptr) 3487 { 3488 struct net_device *real_dev = netdev_notifier_info_to_dev(ptr); 3489 LIST_HEAD(head); 3490 3491 if (!is_macsec_master(real_dev)) 3492 return NOTIFY_DONE; 3493 3494 switch (event) { 3495 case NETDEV_DOWN: 3496 case NETDEV_UP: 3497 case NETDEV_CHANGE: { 3498 struct macsec_dev *m, *n; 3499 struct macsec_rxh_data *rxd; 3500 3501 rxd = macsec_data_rtnl(real_dev); 3502 list_for_each_entry_safe(m, n, &rxd->secys, secys) { 3503 struct net_device *dev = m->secy.netdev; 3504 3505 netif_stacked_transfer_operstate(real_dev, dev); 3506 } 3507 break; 3508 } 3509 case NETDEV_UNREGISTER: { 3510 struct macsec_dev *m, *n; 3511 struct macsec_rxh_data *rxd; 3512 3513 rxd = macsec_data_rtnl(real_dev); 3514 list_for_each_entry_safe(m, n, &rxd->secys, secys) { 3515 macsec_common_dellink(m->secy.netdev, &head); 3516 } 3517 3518 netdev_rx_handler_unregister(real_dev); 3519 kfree(rxd); 3520 3521 unregister_netdevice_many(&head); 3522 break; 3523 } 3524 case NETDEV_CHANGEMTU: { 3525 struct macsec_dev *m; 3526 struct macsec_rxh_data *rxd; 3527 3528 rxd = macsec_data_rtnl(real_dev); 3529 list_for_each_entry(m, &rxd->secys, secys) { 3530 struct net_device *dev = m->secy.netdev; 3531 unsigned int mtu = real_dev->mtu - (m->secy.icv_len + 3532 macsec_extra_len(true)); 3533 3534 if (dev->mtu > mtu) 3535 dev_set_mtu(dev, mtu); 3536 } 3537 } 3538 } 3539 3540 return NOTIFY_OK; 3541 } 3542 3543 static struct notifier_block macsec_notifier = { 3544 .notifier_call = macsec_notify, 3545 }; 3546 3547 static int __init macsec_init(void) 3548 { 3549 int err; 3550 3551 pr_info("MACsec IEEE 802.1AE\n"); 3552 err = register_netdevice_notifier(&macsec_notifier); 3553 if (err) 3554 return err; 3555 3556 err = rtnl_link_register(&macsec_link_ops); 3557 if (err) 3558 goto notifier; 3559 3560 err = genl_register_family(&macsec_fam); 3561 if (err) 3562 goto rtnl; 3563 3564 return 0; 3565 3566 rtnl: 3567 rtnl_link_unregister(&macsec_link_ops); 3568 notifier: 3569 unregister_netdevice_notifier(&macsec_notifier); 3570 return err; 3571 } 3572 3573 static void __exit macsec_exit(void) 3574 { 3575 genl_unregister_family(&macsec_fam); 3576 rtnl_link_unregister(&macsec_link_ops); 3577 unregister_netdevice_notifier(&macsec_notifier); 3578 rcu_barrier(); 3579 } 3580 3581 module_init(macsec_init); 3582 module_exit(macsec_exit); 3583 3584 MODULE_ALIAS_RTNL_LINK("macsec"); 3585 MODULE_ALIAS_GENL_FAMILY("macsec"); 3586 3587 MODULE_DESCRIPTION("MACsec IEEE 802.1AE"); 3588 MODULE_LICENSE("GPL v2"); 3589