1 /* 2 * drivers/net/macsec.c - MACsec device 3 * 4 * Copyright (c) 2015 Sabrina Dubroca <sd@queasysnail.net> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 */ 11 12 #include <linux/types.h> 13 #include <linux/skbuff.h> 14 #include <linux/socket.h> 15 #include <linux/module.h> 16 #include <crypto/aead.h> 17 #include <linux/etherdevice.h> 18 #include <linux/rtnetlink.h> 19 #include <net/genetlink.h> 20 #include <net/sock.h> 21 #include <net/gro_cells.h> 22 23 #include <uapi/linux/if_macsec.h> 24 25 typedef u64 __bitwise sci_t; 26 27 #define MACSEC_SCI_LEN 8 28 29 /* SecTAG length = macsec_eth_header without the optional SCI */ 30 #define MACSEC_TAG_LEN 6 31 32 struct macsec_eth_header { 33 struct ethhdr eth; 34 /* SecTAG */ 35 u8 tci_an; 36 #if defined(__LITTLE_ENDIAN_BITFIELD) 37 u8 short_length:6, 38 unused:2; 39 #elif defined(__BIG_ENDIAN_BITFIELD) 40 u8 unused:2, 41 short_length:6; 42 #else 43 #error "Please fix <asm/byteorder.h>" 44 #endif 45 __be32 packet_number; 46 u8 secure_channel_id[8]; /* optional */ 47 } __packed; 48 49 #define MACSEC_TCI_VERSION 0x80 50 #define MACSEC_TCI_ES 0x40 /* end station */ 51 #define MACSEC_TCI_SC 0x20 /* SCI present */ 52 #define MACSEC_TCI_SCB 0x10 /* epon */ 53 #define MACSEC_TCI_E 0x08 /* encryption */ 54 #define MACSEC_TCI_C 0x04 /* changed text */ 55 #define MACSEC_AN_MASK 0x03 /* association number */ 56 #define MACSEC_TCI_CONFID (MACSEC_TCI_E | MACSEC_TCI_C) 57 58 /* minimum secure data length deemed "not short", see IEEE 802.1AE-2006 9.7 */ 59 #define MIN_NON_SHORT_LEN 48 60 61 #define GCM_AES_IV_LEN 12 62 #define DEFAULT_ICV_LEN 16 63 64 #define MACSEC_NUM_AN 4 /* 2 bits for the association number */ 65 66 #define for_each_rxsc(secy, sc) \ 67 for (sc = rcu_dereference_bh(secy->rx_sc); \ 68 sc; \ 69 sc = rcu_dereference_bh(sc->next)) 70 #define for_each_rxsc_rtnl(secy, sc) \ 71 for (sc = rtnl_dereference(secy->rx_sc); \ 72 sc; \ 73 sc = rtnl_dereference(sc->next)) 74 75 struct gcm_iv { 76 union { 77 u8 secure_channel_id[8]; 78 sci_t sci; 79 }; 80 __be32 pn; 81 }; 82 83 /** 84 * struct macsec_key - SA key 85 * @id: user-provided key identifier 86 * @tfm: crypto struct, key storage 87 */ 88 struct macsec_key { 89 u8 id[MACSEC_KEYID_LEN]; 90 struct crypto_aead *tfm; 91 }; 92 93 struct macsec_rx_sc_stats { 94 __u64 InOctetsValidated; 95 __u64 InOctetsDecrypted; 96 __u64 InPktsUnchecked; 97 __u64 InPktsDelayed; 98 __u64 InPktsOK; 99 __u64 InPktsInvalid; 100 __u64 InPktsLate; 101 __u64 InPktsNotValid; 102 __u64 InPktsNotUsingSA; 103 __u64 InPktsUnusedSA; 104 }; 105 106 struct macsec_rx_sa_stats { 107 __u32 InPktsOK; 108 __u32 InPktsInvalid; 109 __u32 InPktsNotValid; 110 __u32 InPktsNotUsingSA; 111 __u32 InPktsUnusedSA; 112 }; 113 114 struct macsec_tx_sa_stats { 115 __u32 OutPktsProtected; 116 __u32 OutPktsEncrypted; 117 }; 118 119 struct macsec_tx_sc_stats { 120 __u64 OutPktsProtected; 121 __u64 OutPktsEncrypted; 122 __u64 OutOctetsProtected; 123 __u64 OutOctetsEncrypted; 124 }; 125 126 struct macsec_dev_stats { 127 __u64 OutPktsUntagged; 128 __u64 InPktsUntagged; 129 __u64 OutPktsTooLong; 130 __u64 InPktsNoTag; 131 __u64 InPktsBadTag; 132 __u64 InPktsUnknownSCI; 133 __u64 InPktsNoSCI; 134 __u64 InPktsOverrun; 135 }; 136 137 /** 138 * struct macsec_rx_sa - receive secure association 139 * @active: 140 * @next_pn: packet number expected for the next packet 141 * @lock: protects next_pn manipulations 142 * @key: key structure 143 * @stats: per-SA stats 144 */ 145 struct macsec_rx_sa { 146 struct macsec_key key; 147 spinlock_t lock; 148 u32 next_pn; 149 atomic_t refcnt; 150 bool active; 151 struct macsec_rx_sa_stats __percpu *stats; 152 struct macsec_rx_sc *sc; 153 struct rcu_head rcu; 154 }; 155 156 struct pcpu_rx_sc_stats { 157 struct macsec_rx_sc_stats stats; 158 struct u64_stats_sync syncp; 159 }; 160 161 /** 162 * struct macsec_rx_sc - receive secure channel 163 * @sci: secure channel identifier for this SC 164 * @active: channel is active 165 * @sa: array of secure associations 166 * @stats: per-SC stats 167 */ 168 struct macsec_rx_sc { 169 struct macsec_rx_sc __rcu *next; 170 sci_t sci; 171 bool active; 172 struct macsec_rx_sa __rcu *sa[MACSEC_NUM_AN]; 173 struct pcpu_rx_sc_stats __percpu *stats; 174 atomic_t refcnt; 175 struct rcu_head rcu_head; 176 }; 177 178 /** 179 * struct macsec_tx_sa - transmit secure association 180 * @active: 181 * @next_pn: packet number to use for the next packet 182 * @lock: protects next_pn manipulations 183 * @key: key structure 184 * @stats: per-SA stats 185 */ 186 struct macsec_tx_sa { 187 struct macsec_key key; 188 spinlock_t lock; 189 u32 next_pn; 190 atomic_t refcnt; 191 bool active; 192 struct macsec_tx_sa_stats __percpu *stats; 193 struct rcu_head rcu; 194 }; 195 196 struct pcpu_tx_sc_stats { 197 struct macsec_tx_sc_stats stats; 198 struct u64_stats_sync syncp; 199 }; 200 201 /** 202 * struct macsec_tx_sc - transmit secure channel 203 * @active: 204 * @encoding_sa: association number of the SA currently in use 205 * @encrypt: encrypt packets on transmit, or authenticate only 206 * @send_sci: always include the SCI in the SecTAG 207 * @end_station: 208 * @scb: single copy broadcast flag 209 * @sa: array of secure associations 210 * @stats: stats for this TXSC 211 */ 212 struct macsec_tx_sc { 213 bool active; 214 u8 encoding_sa; 215 bool encrypt; 216 bool send_sci; 217 bool end_station; 218 bool scb; 219 struct macsec_tx_sa __rcu *sa[MACSEC_NUM_AN]; 220 struct pcpu_tx_sc_stats __percpu *stats; 221 }; 222 223 #define MACSEC_VALIDATE_DEFAULT MACSEC_VALIDATE_STRICT 224 225 /** 226 * struct macsec_secy - MACsec Security Entity 227 * @netdev: netdevice for this SecY 228 * @n_rx_sc: number of receive secure channels configured on this SecY 229 * @sci: secure channel identifier used for tx 230 * @key_len: length of keys used by the cipher suite 231 * @icv_len: length of ICV used by the cipher suite 232 * @validate_frames: validation mode 233 * @operational: MAC_Operational flag 234 * @protect_frames: enable protection for this SecY 235 * @replay_protect: enable packet number checks on receive 236 * @replay_window: size of the replay window 237 * @tx_sc: transmit secure channel 238 * @rx_sc: linked list of receive secure channels 239 */ 240 struct macsec_secy { 241 struct net_device *netdev; 242 unsigned int n_rx_sc; 243 sci_t sci; 244 u16 key_len; 245 u16 icv_len; 246 enum macsec_validation_type validate_frames; 247 bool operational; 248 bool protect_frames; 249 bool replay_protect; 250 u32 replay_window; 251 struct macsec_tx_sc tx_sc; 252 struct macsec_rx_sc __rcu *rx_sc; 253 }; 254 255 struct pcpu_secy_stats { 256 struct macsec_dev_stats stats; 257 struct u64_stats_sync syncp; 258 }; 259 260 /** 261 * struct macsec_dev - private data 262 * @secy: SecY config 263 * @real_dev: pointer to underlying netdevice 264 * @stats: MACsec device stats 265 * @secys: linked list of SecY's on the underlying device 266 */ 267 struct macsec_dev { 268 struct macsec_secy secy; 269 struct net_device *real_dev; 270 struct pcpu_secy_stats __percpu *stats; 271 struct list_head secys; 272 struct gro_cells gro_cells; 273 unsigned int nest_level; 274 }; 275 276 /** 277 * struct macsec_rxh_data - rx_handler private argument 278 * @secys: linked list of SecY's on this underlying device 279 */ 280 struct macsec_rxh_data { 281 struct list_head secys; 282 }; 283 284 static struct macsec_dev *macsec_priv(const struct net_device *dev) 285 { 286 return (struct macsec_dev *)netdev_priv(dev); 287 } 288 289 static struct macsec_rxh_data *macsec_data_rcu(const struct net_device *dev) 290 { 291 return rcu_dereference_bh(dev->rx_handler_data); 292 } 293 294 static struct macsec_rxh_data *macsec_data_rtnl(const struct net_device *dev) 295 { 296 return rtnl_dereference(dev->rx_handler_data); 297 } 298 299 struct macsec_cb { 300 struct aead_request *req; 301 union { 302 struct macsec_tx_sa *tx_sa; 303 struct macsec_rx_sa *rx_sa; 304 }; 305 u8 assoc_num; 306 bool valid; 307 bool has_sci; 308 }; 309 310 static struct macsec_rx_sa *macsec_rxsa_get(struct macsec_rx_sa __rcu *ptr) 311 { 312 struct macsec_rx_sa *sa = rcu_dereference_bh(ptr); 313 314 if (!sa || !sa->active) 315 return NULL; 316 317 if (!atomic_inc_not_zero(&sa->refcnt)) 318 return NULL; 319 320 return sa; 321 } 322 323 static void free_rx_sc_rcu(struct rcu_head *head) 324 { 325 struct macsec_rx_sc *rx_sc = container_of(head, struct macsec_rx_sc, rcu_head); 326 327 free_percpu(rx_sc->stats); 328 kfree(rx_sc); 329 } 330 331 static struct macsec_rx_sc *macsec_rxsc_get(struct macsec_rx_sc *sc) 332 { 333 return atomic_inc_not_zero(&sc->refcnt) ? sc : NULL; 334 } 335 336 static void macsec_rxsc_put(struct macsec_rx_sc *sc) 337 { 338 if (atomic_dec_and_test(&sc->refcnt)) 339 call_rcu(&sc->rcu_head, free_rx_sc_rcu); 340 } 341 342 static void free_rxsa(struct rcu_head *head) 343 { 344 struct macsec_rx_sa *sa = container_of(head, struct macsec_rx_sa, rcu); 345 346 crypto_free_aead(sa->key.tfm); 347 free_percpu(sa->stats); 348 kfree(sa); 349 } 350 351 static void macsec_rxsa_put(struct macsec_rx_sa *sa) 352 { 353 if (atomic_dec_and_test(&sa->refcnt)) 354 call_rcu(&sa->rcu, free_rxsa); 355 } 356 357 static struct macsec_tx_sa *macsec_txsa_get(struct macsec_tx_sa __rcu *ptr) 358 { 359 struct macsec_tx_sa *sa = rcu_dereference_bh(ptr); 360 361 if (!sa || !sa->active) 362 return NULL; 363 364 if (!atomic_inc_not_zero(&sa->refcnt)) 365 return NULL; 366 367 return sa; 368 } 369 370 static void free_txsa(struct rcu_head *head) 371 { 372 struct macsec_tx_sa *sa = container_of(head, struct macsec_tx_sa, rcu); 373 374 crypto_free_aead(sa->key.tfm); 375 free_percpu(sa->stats); 376 kfree(sa); 377 } 378 379 static void macsec_txsa_put(struct macsec_tx_sa *sa) 380 { 381 if (atomic_dec_and_test(&sa->refcnt)) 382 call_rcu(&sa->rcu, free_txsa); 383 } 384 385 static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb) 386 { 387 BUILD_BUG_ON(sizeof(struct macsec_cb) > sizeof(skb->cb)); 388 return (struct macsec_cb *)skb->cb; 389 } 390 391 #define MACSEC_PORT_ES (htons(0x0001)) 392 #define MACSEC_PORT_SCB (0x0000) 393 #define MACSEC_UNDEF_SCI ((__force sci_t)0xffffffffffffffffULL) 394 395 #define DEFAULT_SAK_LEN 16 396 #define DEFAULT_SEND_SCI true 397 #define DEFAULT_ENCRYPT false 398 #define DEFAULT_ENCODING_SA 0 399 400 static bool send_sci(const struct macsec_secy *secy) 401 { 402 const struct macsec_tx_sc *tx_sc = &secy->tx_sc; 403 404 return tx_sc->send_sci || 405 (secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb); 406 } 407 408 static sci_t make_sci(u8 *addr, __be16 port) 409 { 410 sci_t sci; 411 412 memcpy(&sci, addr, ETH_ALEN); 413 memcpy(((char *)&sci) + ETH_ALEN, &port, sizeof(port)); 414 415 return sci; 416 } 417 418 static sci_t macsec_frame_sci(struct macsec_eth_header *hdr, bool sci_present) 419 { 420 sci_t sci; 421 422 if (sci_present) 423 memcpy(&sci, hdr->secure_channel_id, 424 sizeof(hdr->secure_channel_id)); 425 else 426 sci = make_sci(hdr->eth.h_source, MACSEC_PORT_ES); 427 428 return sci; 429 } 430 431 static unsigned int macsec_sectag_len(bool sci_present) 432 { 433 return MACSEC_TAG_LEN + (sci_present ? MACSEC_SCI_LEN : 0); 434 } 435 436 static unsigned int macsec_hdr_len(bool sci_present) 437 { 438 return macsec_sectag_len(sci_present) + ETH_HLEN; 439 } 440 441 static unsigned int macsec_extra_len(bool sci_present) 442 { 443 return macsec_sectag_len(sci_present) + sizeof(__be16); 444 } 445 446 /* Fill SecTAG according to IEEE 802.1AE-2006 10.5.3 */ 447 static void macsec_fill_sectag(struct macsec_eth_header *h, 448 const struct macsec_secy *secy, u32 pn, 449 bool sci_present) 450 { 451 const struct macsec_tx_sc *tx_sc = &secy->tx_sc; 452 453 memset(&h->tci_an, 0, macsec_sectag_len(sci_present)); 454 h->eth.h_proto = htons(ETH_P_MACSEC); 455 456 if (sci_present) { 457 h->tci_an |= MACSEC_TCI_SC; 458 memcpy(&h->secure_channel_id, &secy->sci, 459 sizeof(h->secure_channel_id)); 460 } else { 461 if (tx_sc->end_station) 462 h->tci_an |= MACSEC_TCI_ES; 463 if (tx_sc->scb) 464 h->tci_an |= MACSEC_TCI_SCB; 465 } 466 467 h->packet_number = htonl(pn); 468 469 /* with GCM, C/E clear for !encrypt, both set for encrypt */ 470 if (tx_sc->encrypt) 471 h->tci_an |= MACSEC_TCI_CONFID; 472 else if (secy->icv_len != DEFAULT_ICV_LEN) 473 h->tci_an |= MACSEC_TCI_C; 474 475 h->tci_an |= tx_sc->encoding_sa; 476 } 477 478 static void macsec_set_shortlen(struct macsec_eth_header *h, size_t data_len) 479 { 480 if (data_len < MIN_NON_SHORT_LEN) 481 h->short_length = data_len; 482 } 483 484 /* validate MACsec packet according to IEEE 802.1AE-2006 9.12 */ 485 static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len) 486 { 487 struct macsec_eth_header *h = (struct macsec_eth_header *)skb->data; 488 int len = skb->len - 2 * ETH_ALEN; 489 int extra_len = macsec_extra_len(!!(h->tci_an & MACSEC_TCI_SC)) + icv_len; 490 491 /* a) It comprises at least 17 octets */ 492 if (skb->len <= 16) 493 return false; 494 495 /* b) MACsec EtherType: already checked */ 496 497 /* c) V bit is clear */ 498 if (h->tci_an & MACSEC_TCI_VERSION) 499 return false; 500 501 /* d) ES or SCB => !SC */ 502 if ((h->tci_an & MACSEC_TCI_ES || h->tci_an & MACSEC_TCI_SCB) && 503 (h->tci_an & MACSEC_TCI_SC)) 504 return false; 505 506 /* e) Bits 7 and 8 of octet 4 of the SecTAG are clear */ 507 if (h->unused) 508 return false; 509 510 /* rx.pn != 0 (figure 10-5) */ 511 if (!h->packet_number) 512 return false; 513 514 /* length check, f) g) h) i) */ 515 if (h->short_length) 516 return len == extra_len + h->short_length; 517 return len >= extra_len + MIN_NON_SHORT_LEN; 518 } 519 520 #define MACSEC_NEEDED_HEADROOM (macsec_extra_len(true)) 521 #define MACSEC_NEEDED_TAILROOM MACSEC_STD_ICV_LEN 522 523 static void macsec_fill_iv(unsigned char *iv, sci_t sci, u32 pn) 524 { 525 struct gcm_iv *gcm_iv = (struct gcm_iv *)iv; 526 527 gcm_iv->sci = sci; 528 gcm_iv->pn = htonl(pn); 529 } 530 531 static struct macsec_eth_header *macsec_ethhdr(struct sk_buff *skb) 532 { 533 return (struct macsec_eth_header *)skb_mac_header(skb); 534 } 535 536 static u32 tx_sa_update_pn(struct macsec_tx_sa *tx_sa, struct macsec_secy *secy) 537 { 538 u32 pn; 539 540 spin_lock_bh(&tx_sa->lock); 541 pn = tx_sa->next_pn; 542 543 tx_sa->next_pn++; 544 if (tx_sa->next_pn == 0) { 545 pr_debug("PN wrapped, transitioning to !oper\n"); 546 tx_sa->active = false; 547 if (secy->protect_frames) 548 secy->operational = false; 549 } 550 spin_unlock_bh(&tx_sa->lock); 551 552 return pn; 553 } 554 555 static void macsec_encrypt_finish(struct sk_buff *skb, struct net_device *dev) 556 { 557 struct macsec_dev *macsec = netdev_priv(dev); 558 559 skb->dev = macsec->real_dev; 560 skb_reset_mac_header(skb); 561 skb->protocol = eth_hdr(skb)->h_proto; 562 } 563 564 static void macsec_count_tx(struct sk_buff *skb, struct macsec_tx_sc *tx_sc, 565 struct macsec_tx_sa *tx_sa) 566 { 567 struct pcpu_tx_sc_stats *txsc_stats = this_cpu_ptr(tx_sc->stats); 568 569 u64_stats_update_begin(&txsc_stats->syncp); 570 if (tx_sc->encrypt) { 571 txsc_stats->stats.OutOctetsEncrypted += skb->len; 572 txsc_stats->stats.OutPktsEncrypted++; 573 this_cpu_inc(tx_sa->stats->OutPktsEncrypted); 574 } else { 575 txsc_stats->stats.OutOctetsProtected += skb->len; 576 txsc_stats->stats.OutPktsProtected++; 577 this_cpu_inc(tx_sa->stats->OutPktsProtected); 578 } 579 u64_stats_update_end(&txsc_stats->syncp); 580 } 581 582 static void count_tx(struct net_device *dev, int ret, int len) 583 { 584 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { 585 struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats); 586 587 u64_stats_update_begin(&stats->syncp); 588 stats->tx_packets++; 589 stats->tx_bytes += len; 590 u64_stats_update_end(&stats->syncp); 591 } else { 592 dev->stats.tx_dropped++; 593 } 594 } 595 596 static void macsec_encrypt_done(struct crypto_async_request *base, int err) 597 { 598 struct sk_buff *skb = base->data; 599 struct net_device *dev = skb->dev; 600 struct macsec_dev *macsec = macsec_priv(dev); 601 struct macsec_tx_sa *sa = macsec_skb_cb(skb)->tx_sa; 602 int len, ret; 603 604 aead_request_free(macsec_skb_cb(skb)->req); 605 606 rcu_read_lock_bh(); 607 macsec_encrypt_finish(skb, dev); 608 macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa); 609 len = skb->len; 610 ret = dev_queue_xmit(skb); 611 count_tx(dev, ret, len); 612 rcu_read_unlock_bh(); 613 614 macsec_txsa_put(sa); 615 dev_put(dev); 616 } 617 618 static struct aead_request *macsec_alloc_req(struct crypto_aead *tfm, 619 unsigned char **iv, 620 struct scatterlist **sg) 621 { 622 size_t size, iv_offset, sg_offset; 623 struct aead_request *req; 624 void *tmp; 625 626 size = sizeof(struct aead_request) + crypto_aead_reqsize(tfm); 627 iv_offset = size; 628 size += GCM_AES_IV_LEN; 629 630 size = ALIGN(size, __alignof__(struct scatterlist)); 631 sg_offset = size; 632 size += sizeof(struct scatterlist) * (MAX_SKB_FRAGS + 1); 633 634 tmp = kmalloc(size, GFP_ATOMIC); 635 if (!tmp) 636 return NULL; 637 638 *iv = (unsigned char *)(tmp + iv_offset); 639 *sg = (struct scatterlist *)(tmp + sg_offset); 640 req = tmp; 641 642 aead_request_set_tfm(req, tfm); 643 644 return req; 645 } 646 647 static struct sk_buff *macsec_encrypt(struct sk_buff *skb, 648 struct net_device *dev) 649 { 650 int ret; 651 struct scatterlist *sg; 652 unsigned char *iv; 653 struct ethhdr *eth; 654 struct macsec_eth_header *hh; 655 size_t unprotected_len; 656 struct aead_request *req; 657 struct macsec_secy *secy; 658 struct macsec_tx_sc *tx_sc; 659 struct macsec_tx_sa *tx_sa; 660 struct macsec_dev *macsec = macsec_priv(dev); 661 bool sci_present; 662 u32 pn; 663 664 secy = &macsec->secy; 665 tx_sc = &secy->tx_sc; 666 667 /* 10.5.1 TX SA assignment */ 668 tx_sa = macsec_txsa_get(tx_sc->sa[tx_sc->encoding_sa]); 669 if (!tx_sa) { 670 secy->operational = false; 671 kfree_skb(skb); 672 return ERR_PTR(-EINVAL); 673 } 674 675 if (unlikely(skb_headroom(skb) < MACSEC_NEEDED_HEADROOM || 676 skb_tailroom(skb) < MACSEC_NEEDED_TAILROOM)) { 677 struct sk_buff *nskb = skb_copy_expand(skb, 678 MACSEC_NEEDED_HEADROOM, 679 MACSEC_NEEDED_TAILROOM, 680 GFP_ATOMIC); 681 if (likely(nskb)) { 682 consume_skb(skb); 683 skb = nskb; 684 } else { 685 macsec_txsa_put(tx_sa); 686 kfree_skb(skb); 687 return ERR_PTR(-ENOMEM); 688 } 689 } else { 690 skb = skb_unshare(skb, GFP_ATOMIC); 691 if (!skb) { 692 macsec_txsa_put(tx_sa); 693 return ERR_PTR(-ENOMEM); 694 } 695 } 696 697 unprotected_len = skb->len; 698 eth = eth_hdr(skb); 699 sci_present = send_sci(secy); 700 hh = (struct macsec_eth_header *)skb_push(skb, macsec_extra_len(sci_present)); 701 memmove(hh, eth, 2 * ETH_ALEN); 702 703 pn = tx_sa_update_pn(tx_sa, secy); 704 if (pn == 0) { 705 macsec_txsa_put(tx_sa); 706 kfree_skb(skb); 707 return ERR_PTR(-ENOLINK); 708 } 709 macsec_fill_sectag(hh, secy, pn, sci_present); 710 macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN); 711 712 skb_put(skb, secy->icv_len); 713 714 if (skb->len - ETH_HLEN > macsec_priv(dev)->real_dev->mtu) { 715 struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats); 716 717 u64_stats_update_begin(&secy_stats->syncp); 718 secy_stats->stats.OutPktsTooLong++; 719 u64_stats_update_end(&secy_stats->syncp); 720 721 macsec_txsa_put(tx_sa); 722 kfree_skb(skb); 723 return ERR_PTR(-EINVAL); 724 } 725 726 req = macsec_alloc_req(tx_sa->key.tfm, &iv, &sg); 727 if (!req) { 728 macsec_txsa_put(tx_sa); 729 kfree_skb(skb); 730 return ERR_PTR(-ENOMEM); 731 } 732 733 macsec_fill_iv(iv, secy->sci, pn); 734 735 sg_init_table(sg, MAX_SKB_FRAGS + 1); 736 skb_to_sgvec(skb, sg, 0, skb->len); 737 738 if (tx_sc->encrypt) { 739 int len = skb->len - macsec_hdr_len(sci_present) - 740 secy->icv_len; 741 aead_request_set_crypt(req, sg, sg, len, iv); 742 aead_request_set_ad(req, macsec_hdr_len(sci_present)); 743 } else { 744 aead_request_set_crypt(req, sg, sg, 0, iv); 745 aead_request_set_ad(req, skb->len - secy->icv_len); 746 } 747 748 macsec_skb_cb(skb)->req = req; 749 macsec_skb_cb(skb)->tx_sa = tx_sa; 750 aead_request_set_callback(req, 0, macsec_encrypt_done, skb); 751 752 dev_hold(skb->dev); 753 ret = crypto_aead_encrypt(req); 754 if (ret == -EINPROGRESS) { 755 return ERR_PTR(ret); 756 } else if (ret != 0) { 757 dev_put(skb->dev); 758 kfree_skb(skb); 759 aead_request_free(req); 760 macsec_txsa_put(tx_sa); 761 return ERR_PTR(-EINVAL); 762 } 763 764 dev_put(skb->dev); 765 aead_request_free(req); 766 macsec_txsa_put(tx_sa); 767 768 return skb; 769 } 770 771 static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u32 pn) 772 { 773 struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa; 774 struct pcpu_rx_sc_stats *rxsc_stats = this_cpu_ptr(rx_sa->sc->stats); 775 struct macsec_eth_header *hdr = macsec_ethhdr(skb); 776 u32 lowest_pn = 0; 777 778 spin_lock(&rx_sa->lock); 779 if (rx_sa->next_pn >= secy->replay_window) 780 lowest_pn = rx_sa->next_pn - secy->replay_window; 781 782 /* Now perform replay protection check again 783 * (see IEEE 802.1AE-2006 figure 10-5) 784 */ 785 if (secy->replay_protect && pn < lowest_pn) { 786 spin_unlock(&rx_sa->lock); 787 u64_stats_update_begin(&rxsc_stats->syncp); 788 rxsc_stats->stats.InPktsLate++; 789 u64_stats_update_end(&rxsc_stats->syncp); 790 return false; 791 } 792 793 if (secy->validate_frames != MACSEC_VALIDATE_DISABLED) { 794 u64_stats_update_begin(&rxsc_stats->syncp); 795 if (hdr->tci_an & MACSEC_TCI_E) 796 rxsc_stats->stats.InOctetsDecrypted += skb->len; 797 else 798 rxsc_stats->stats.InOctetsValidated += skb->len; 799 u64_stats_update_end(&rxsc_stats->syncp); 800 } 801 802 if (!macsec_skb_cb(skb)->valid) { 803 spin_unlock(&rx_sa->lock); 804 805 /* 10.6.5 */ 806 if (hdr->tci_an & MACSEC_TCI_C || 807 secy->validate_frames == MACSEC_VALIDATE_STRICT) { 808 u64_stats_update_begin(&rxsc_stats->syncp); 809 rxsc_stats->stats.InPktsNotValid++; 810 u64_stats_update_end(&rxsc_stats->syncp); 811 return false; 812 } 813 814 u64_stats_update_begin(&rxsc_stats->syncp); 815 if (secy->validate_frames == MACSEC_VALIDATE_CHECK) { 816 rxsc_stats->stats.InPktsInvalid++; 817 this_cpu_inc(rx_sa->stats->InPktsInvalid); 818 } else if (pn < lowest_pn) { 819 rxsc_stats->stats.InPktsDelayed++; 820 } else { 821 rxsc_stats->stats.InPktsUnchecked++; 822 } 823 u64_stats_update_end(&rxsc_stats->syncp); 824 } else { 825 u64_stats_update_begin(&rxsc_stats->syncp); 826 if (pn < lowest_pn) { 827 rxsc_stats->stats.InPktsDelayed++; 828 } else { 829 rxsc_stats->stats.InPktsOK++; 830 this_cpu_inc(rx_sa->stats->InPktsOK); 831 } 832 u64_stats_update_end(&rxsc_stats->syncp); 833 834 if (pn >= rx_sa->next_pn) 835 rx_sa->next_pn = pn + 1; 836 spin_unlock(&rx_sa->lock); 837 } 838 839 return true; 840 } 841 842 static void macsec_reset_skb(struct sk_buff *skb, struct net_device *dev) 843 { 844 skb->pkt_type = PACKET_HOST; 845 skb->protocol = eth_type_trans(skb, dev); 846 847 skb_reset_network_header(skb); 848 if (!skb_transport_header_was_set(skb)) 849 skb_reset_transport_header(skb); 850 skb_reset_mac_len(skb); 851 } 852 853 static void macsec_finalize_skb(struct sk_buff *skb, u8 icv_len, u8 hdr_len) 854 { 855 memmove(skb->data + hdr_len, skb->data, 2 * ETH_ALEN); 856 skb_pull(skb, hdr_len); 857 pskb_trim_unique(skb, skb->len - icv_len); 858 } 859 860 static void count_rx(struct net_device *dev, int len) 861 { 862 struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats); 863 864 u64_stats_update_begin(&stats->syncp); 865 stats->rx_packets++; 866 stats->rx_bytes += len; 867 u64_stats_update_end(&stats->syncp); 868 } 869 870 static void macsec_decrypt_done(struct crypto_async_request *base, int err) 871 { 872 struct sk_buff *skb = base->data; 873 struct net_device *dev = skb->dev; 874 struct macsec_dev *macsec = macsec_priv(dev); 875 struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa; 876 struct macsec_rx_sc *rx_sc = rx_sa->sc; 877 int len, ret; 878 u32 pn; 879 880 aead_request_free(macsec_skb_cb(skb)->req); 881 882 rcu_read_lock_bh(); 883 pn = ntohl(macsec_ethhdr(skb)->packet_number); 884 if (!macsec_post_decrypt(skb, &macsec->secy, pn)) { 885 rcu_read_unlock_bh(); 886 kfree_skb(skb); 887 goto out; 888 } 889 890 macsec_finalize_skb(skb, macsec->secy.icv_len, 891 macsec_extra_len(macsec_skb_cb(skb)->has_sci)); 892 macsec_reset_skb(skb, macsec->secy.netdev); 893 894 len = skb->len; 895 ret = gro_cells_receive(&macsec->gro_cells, skb); 896 if (ret == NET_RX_SUCCESS) 897 count_rx(dev, len); 898 else 899 macsec->secy.netdev->stats.rx_dropped++; 900 901 rcu_read_unlock_bh(); 902 903 out: 904 macsec_rxsa_put(rx_sa); 905 macsec_rxsc_put(rx_sc); 906 dev_put(dev); 907 } 908 909 static struct sk_buff *macsec_decrypt(struct sk_buff *skb, 910 struct net_device *dev, 911 struct macsec_rx_sa *rx_sa, 912 sci_t sci, 913 struct macsec_secy *secy) 914 { 915 int ret; 916 struct scatterlist *sg; 917 unsigned char *iv; 918 struct aead_request *req; 919 struct macsec_eth_header *hdr; 920 u16 icv_len = secy->icv_len; 921 922 macsec_skb_cb(skb)->valid = false; 923 skb = skb_share_check(skb, GFP_ATOMIC); 924 if (!skb) 925 return ERR_PTR(-ENOMEM); 926 927 req = macsec_alloc_req(rx_sa->key.tfm, &iv, &sg); 928 if (!req) { 929 kfree_skb(skb); 930 return ERR_PTR(-ENOMEM); 931 } 932 933 hdr = (struct macsec_eth_header *)skb->data; 934 macsec_fill_iv(iv, sci, ntohl(hdr->packet_number)); 935 936 sg_init_table(sg, MAX_SKB_FRAGS + 1); 937 skb_to_sgvec(skb, sg, 0, skb->len); 938 939 if (hdr->tci_an & MACSEC_TCI_E) { 940 /* confidentiality: ethernet + macsec header 941 * authenticated, encrypted payload 942 */ 943 int len = skb->len - macsec_hdr_len(macsec_skb_cb(skb)->has_sci); 944 945 aead_request_set_crypt(req, sg, sg, len, iv); 946 aead_request_set_ad(req, macsec_hdr_len(macsec_skb_cb(skb)->has_sci)); 947 skb = skb_unshare(skb, GFP_ATOMIC); 948 if (!skb) { 949 aead_request_free(req); 950 return ERR_PTR(-ENOMEM); 951 } 952 } else { 953 /* integrity only: all headers + data authenticated */ 954 aead_request_set_crypt(req, sg, sg, icv_len, iv); 955 aead_request_set_ad(req, skb->len - icv_len); 956 } 957 958 macsec_skb_cb(skb)->req = req; 959 skb->dev = dev; 960 aead_request_set_callback(req, 0, macsec_decrypt_done, skb); 961 962 dev_hold(dev); 963 ret = crypto_aead_decrypt(req); 964 if (ret == -EINPROGRESS) { 965 return ERR_PTR(ret); 966 } else if (ret != 0) { 967 /* decryption/authentication failed 968 * 10.6 if validateFrames is disabled, deliver anyway 969 */ 970 if (ret != -EBADMSG) { 971 kfree_skb(skb); 972 skb = ERR_PTR(ret); 973 } 974 } else { 975 macsec_skb_cb(skb)->valid = true; 976 } 977 dev_put(dev); 978 979 aead_request_free(req); 980 981 return skb; 982 } 983 984 static struct macsec_rx_sc *find_rx_sc(struct macsec_secy *secy, sci_t sci) 985 { 986 struct macsec_rx_sc *rx_sc; 987 988 for_each_rxsc(secy, rx_sc) { 989 if (rx_sc->sci == sci) 990 return rx_sc; 991 } 992 993 return NULL; 994 } 995 996 static struct macsec_rx_sc *find_rx_sc_rtnl(struct macsec_secy *secy, sci_t sci) 997 { 998 struct macsec_rx_sc *rx_sc; 999 1000 for_each_rxsc_rtnl(secy, rx_sc) { 1001 if (rx_sc->sci == sci) 1002 return rx_sc; 1003 } 1004 1005 return NULL; 1006 } 1007 1008 static void handle_not_macsec(struct sk_buff *skb) 1009 { 1010 struct macsec_rxh_data *rxd; 1011 struct macsec_dev *macsec; 1012 1013 rcu_read_lock(); 1014 rxd = macsec_data_rcu(skb->dev); 1015 1016 /* 10.6 If the management control validateFrames is not 1017 * Strict, frames without a SecTAG are received, counted, and 1018 * delivered to the Controlled Port 1019 */ 1020 list_for_each_entry_rcu(macsec, &rxd->secys, secys) { 1021 struct sk_buff *nskb; 1022 int ret; 1023 struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats); 1024 1025 if (macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) { 1026 u64_stats_update_begin(&secy_stats->syncp); 1027 secy_stats->stats.InPktsNoTag++; 1028 u64_stats_update_end(&secy_stats->syncp); 1029 continue; 1030 } 1031 1032 /* deliver on this port */ 1033 nskb = skb_clone(skb, GFP_ATOMIC); 1034 if (!nskb) 1035 break; 1036 1037 nskb->dev = macsec->secy.netdev; 1038 1039 ret = netif_rx(nskb); 1040 if (ret == NET_RX_SUCCESS) { 1041 u64_stats_update_begin(&secy_stats->syncp); 1042 secy_stats->stats.InPktsUntagged++; 1043 u64_stats_update_end(&secy_stats->syncp); 1044 } else { 1045 macsec->secy.netdev->stats.rx_dropped++; 1046 } 1047 } 1048 1049 rcu_read_unlock(); 1050 } 1051 1052 static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) 1053 { 1054 struct sk_buff *skb = *pskb; 1055 struct net_device *dev = skb->dev; 1056 struct macsec_eth_header *hdr; 1057 struct macsec_secy *secy = NULL; 1058 struct macsec_rx_sc *rx_sc; 1059 struct macsec_rx_sa *rx_sa; 1060 struct macsec_rxh_data *rxd; 1061 struct macsec_dev *macsec; 1062 sci_t sci; 1063 u32 pn; 1064 bool cbit; 1065 struct pcpu_rx_sc_stats *rxsc_stats; 1066 struct pcpu_secy_stats *secy_stats; 1067 bool pulled_sci; 1068 int ret; 1069 1070 if (skb_headroom(skb) < ETH_HLEN) 1071 goto drop_direct; 1072 1073 hdr = macsec_ethhdr(skb); 1074 if (hdr->eth.h_proto != htons(ETH_P_MACSEC)) { 1075 handle_not_macsec(skb); 1076 1077 /* and deliver to the uncontrolled port */ 1078 return RX_HANDLER_PASS; 1079 } 1080 1081 skb = skb_unshare(skb, GFP_ATOMIC); 1082 if (!skb) { 1083 *pskb = NULL; 1084 return RX_HANDLER_CONSUMED; 1085 } 1086 1087 pulled_sci = pskb_may_pull(skb, macsec_extra_len(true)); 1088 if (!pulled_sci) { 1089 if (!pskb_may_pull(skb, macsec_extra_len(false))) 1090 goto drop_direct; 1091 } 1092 1093 hdr = macsec_ethhdr(skb); 1094 1095 /* Frames with a SecTAG that has the TCI E bit set but the C 1096 * bit clear are discarded, as this reserved encoding is used 1097 * to identify frames with a SecTAG that are not to be 1098 * delivered to the Controlled Port. 1099 */ 1100 if ((hdr->tci_an & (MACSEC_TCI_C | MACSEC_TCI_E)) == MACSEC_TCI_E) 1101 return RX_HANDLER_PASS; 1102 1103 /* now, pull the extra length */ 1104 if (hdr->tci_an & MACSEC_TCI_SC) { 1105 if (!pulled_sci) 1106 goto drop_direct; 1107 } 1108 1109 /* ethernet header is part of crypto processing */ 1110 skb_push(skb, ETH_HLEN); 1111 1112 macsec_skb_cb(skb)->has_sci = !!(hdr->tci_an & MACSEC_TCI_SC); 1113 macsec_skb_cb(skb)->assoc_num = hdr->tci_an & MACSEC_AN_MASK; 1114 sci = macsec_frame_sci(hdr, macsec_skb_cb(skb)->has_sci); 1115 1116 rcu_read_lock(); 1117 rxd = macsec_data_rcu(skb->dev); 1118 1119 list_for_each_entry_rcu(macsec, &rxd->secys, secys) { 1120 struct macsec_rx_sc *sc = find_rx_sc(&macsec->secy, sci); 1121 sc = sc ? macsec_rxsc_get(sc) : NULL; 1122 1123 if (sc) { 1124 secy = &macsec->secy; 1125 rx_sc = sc; 1126 break; 1127 } 1128 } 1129 1130 if (!secy) 1131 goto nosci; 1132 1133 dev = secy->netdev; 1134 macsec = macsec_priv(dev); 1135 secy_stats = this_cpu_ptr(macsec->stats); 1136 rxsc_stats = this_cpu_ptr(rx_sc->stats); 1137 1138 if (!macsec_validate_skb(skb, secy->icv_len)) { 1139 u64_stats_update_begin(&secy_stats->syncp); 1140 secy_stats->stats.InPktsBadTag++; 1141 u64_stats_update_end(&secy_stats->syncp); 1142 goto drop_nosa; 1143 } 1144 1145 rx_sa = macsec_rxsa_get(rx_sc->sa[macsec_skb_cb(skb)->assoc_num]); 1146 if (!rx_sa) { 1147 /* 10.6.1 if the SA is not in use */ 1148 1149 /* If validateFrames is Strict or the C bit in the 1150 * SecTAG is set, discard 1151 */ 1152 if (hdr->tci_an & MACSEC_TCI_C || 1153 secy->validate_frames == MACSEC_VALIDATE_STRICT) { 1154 u64_stats_update_begin(&rxsc_stats->syncp); 1155 rxsc_stats->stats.InPktsNotUsingSA++; 1156 u64_stats_update_end(&rxsc_stats->syncp); 1157 goto drop_nosa; 1158 } 1159 1160 /* not Strict, the frame (with the SecTAG and ICV 1161 * removed) is delivered to the Controlled Port. 1162 */ 1163 u64_stats_update_begin(&rxsc_stats->syncp); 1164 rxsc_stats->stats.InPktsUnusedSA++; 1165 u64_stats_update_end(&rxsc_stats->syncp); 1166 goto deliver; 1167 } 1168 1169 /* First, PN check to avoid decrypting obviously wrong packets */ 1170 pn = ntohl(hdr->packet_number); 1171 if (secy->replay_protect) { 1172 bool late; 1173 1174 spin_lock(&rx_sa->lock); 1175 late = rx_sa->next_pn >= secy->replay_window && 1176 pn < (rx_sa->next_pn - secy->replay_window); 1177 spin_unlock(&rx_sa->lock); 1178 1179 if (late) { 1180 u64_stats_update_begin(&rxsc_stats->syncp); 1181 rxsc_stats->stats.InPktsLate++; 1182 u64_stats_update_end(&rxsc_stats->syncp); 1183 goto drop; 1184 } 1185 } 1186 1187 macsec_skb_cb(skb)->rx_sa = rx_sa; 1188 1189 /* Disabled && !changed text => skip validation */ 1190 if (hdr->tci_an & MACSEC_TCI_C || 1191 secy->validate_frames != MACSEC_VALIDATE_DISABLED) 1192 skb = macsec_decrypt(skb, dev, rx_sa, sci, secy); 1193 1194 if (IS_ERR(skb)) { 1195 /* the decrypt callback needs the reference */ 1196 if (PTR_ERR(skb) != -EINPROGRESS) { 1197 macsec_rxsa_put(rx_sa); 1198 macsec_rxsc_put(rx_sc); 1199 } 1200 rcu_read_unlock(); 1201 *pskb = NULL; 1202 return RX_HANDLER_CONSUMED; 1203 } 1204 1205 if (!macsec_post_decrypt(skb, secy, pn)) 1206 goto drop; 1207 1208 deliver: 1209 macsec_finalize_skb(skb, secy->icv_len, 1210 macsec_extra_len(macsec_skb_cb(skb)->has_sci)); 1211 macsec_reset_skb(skb, secy->netdev); 1212 1213 if (rx_sa) 1214 macsec_rxsa_put(rx_sa); 1215 macsec_rxsc_put(rx_sc); 1216 1217 ret = gro_cells_receive(&macsec->gro_cells, skb); 1218 if (ret == NET_RX_SUCCESS) 1219 count_rx(dev, skb->len); 1220 else 1221 macsec->secy.netdev->stats.rx_dropped++; 1222 1223 rcu_read_unlock(); 1224 1225 *pskb = NULL; 1226 return RX_HANDLER_CONSUMED; 1227 1228 drop: 1229 macsec_rxsa_put(rx_sa); 1230 drop_nosa: 1231 macsec_rxsc_put(rx_sc); 1232 rcu_read_unlock(); 1233 drop_direct: 1234 kfree_skb(skb); 1235 *pskb = NULL; 1236 return RX_HANDLER_CONSUMED; 1237 1238 nosci: 1239 /* 10.6.1 if the SC is not found */ 1240 cbit = !!(hdr->tci_an & MACSEC_TCI_C); 1241 if (!cbit) 1242 macsec_finalize_skb(skb, DEFAULT_ICV_LEN, 1243 macsec_extra_len(macsec_skb_cb(skb)->has_sci)); 1244 1245 list_for_each_entry_rcu(macsec, &rxd->secys, secys) { 1246 struct sk_buff *nskb; 1247 1248 secy_stats = this_cpu_ptr(macsec->stats); 1249 1250 /* If validateFrames is Strict or the C bit in the 1251 * SecTAG is set, discard 1252 */ 1253 if (cbit || 1254 macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) { 1255 u64_stats_update_begin(&secy_stats->syncp); 1256 secy_stats->stats.InPktsNoSCI++; 1257 u64_stats_update_end(&secy_stats->syncp); 1258 continue; 1259 } 1260 1261 /* not strict, the frame (with the SecTAG and ICV 1262 * removed) is delivered to the Controlled Port. 1263 */ 1264 nskb = skb_clone(skb, GFP_ATOMIC); 1265 if (!nskb) 1266 break; 1267 1268 macsec_reset_skb(nskb, macsec->secy.netdev); 1269 1270 ret = netif_rx(nskb); 1271 if (ret == NET_RX_SUCCESS) { 1272 u64_stats_update_begin(&secy_stats->syncp); 1273 secy_stats->stats.InPktsUnknownSCI++; 1274 u64_stats_update_end(&secy_stats->syncp); 1275 } else { 1276 macsec->secy.netdev->stats.rx_dropped++; 1277 } 1278 } 1279 1280 rcu_read_unlock(); 1281 *pskb = skb; 1282 return RX_HANDLER_PASS; 1283 } 1284 1285 static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len) 1286 { 1287 struct crypto_aead *tfm; 1288 int ret; 1289 1290 tfm = crypto_alloc_aead("gcm(aes)", 0, 0); 1291 1292 if (IS_ERR(tfm)) 1293 return tfm; 1294 1295 ret = crypto_aead_setkey(tfm, key, key_len); 1296 if (ret < 0) 1297 goto fail; 1298 1299 ret = crypto_aead_setauthsize(tfm, icv_len); 1300 if (ret < 0) 1301 goto fail; 1302 1303 return tfm; 1304 fail: 1305 crypto_free_aead(tfm); 1306 return ERR_PTR(ret); 1307 } 1308 1309 static int init_rx_sa(struct macsec_rx_sa *rx_sa, char *sak, int key_len, 1310 int icv_len) 1311 { 1312 rx_sa->stats = alloc_percpu(struct macsec_rx_sa_stats); 1313 if (!rx_sa->stats) 1314 return -ENOMEM; 1315 1316 rx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len); 1317 if (IS_ERR(rx_sa->key.tfm)) { 1318 free_percpu(rx_sa->stats); 1319 return PTR_ERR(rx_sa->key.tfm); 1320 } 1321 1322 rx_sa->active = false; 1323 rx_sa->next_pn = 1; 1324 atomic_set(&rx_sa->refcnt, 1); 1325 spin_lock_init(&rx_sa->lock); 1326 1327 return 0; 1328 } 1329 1330 static void clear_rx_sa(struct macsec_rx_sa *rx_sa) 1331 { 1332 rx_sa->active = false; 1333 1334 macsec_rxsa_put(rx_sa); 1335 } 1336 1337 static void free_rx_sc(struct macsec_rx_sc *rx_sc) 1338 { 1339 int i; 1340 1341 for (i = 0; i < MACSEC_NUM_AN; i++) { 1342 struct macsec_rx_sa *sa = rtnl_dereference(rx_sc->sa[i]); 1343 1344 RCU_INIT_POINTER(rx_sc->sa[i], NULL); 1345 if (sa) 1346 clear_rx_sa(sa); 1347 } 1348 1349 macsec_rxsc_put(rx_sc); 1350 } 1351 1352 static struct macsec_rx_sc *del_rx_sc(struct macsec_secy *secy, sci_t sci) 1353 { 1354 struct macsec_rx_sc *rx_sc, __rcu **rx_scp; 1355 1356 for (rx_scp = &secy->rx_sc, rx_sc = rtnl_dereference(*rx_scp); 1357 rx_sc; 1358 rx_scp = &rx_sc->next, rx_sc = rtnl_dereference(*rx_scp)) { 1359 if (rx_sc->sci == sci) { 1360 if (rx_sc->active) 1361 secy->n_rx_sc--; 1362 rcu_assign_pointer(*rx_scp, rx_sc->next); 1363 return rx_sc; 1364 } 1365 } 1366 1367 return NULL; 1368 } 1369 1370 static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci) 1371 { 1372 struct macsec_rx_sc *rx_sc; 1373 struct macsec_dev *macsec; 1374 struct net_device *real_dev = macsec_priv(dev)->real_dev; 1375 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); 1376 struct macsec_secy *secy; 1377 1378 list_for_each_entry(macsec, &rxd->secys, secys) { 1379 if (find_rx_sc_rtnl(&macsec->secy, sci)) 1380 return ERR_PTR(-EEXIST); 1381 } 1382 1383 rx_sc = kzalloc(sizeof(*rx_sc), GFP_KERNEL); 1384 if (!rx_sc) 1385 return ERR_PTR(-ENOMEM); 1386 1387 rx_sc->stats = netdev_alloc_pcpu_stats(struct pcpu_rx_sc_stats); 1388 if (!rx_sc->stats) { 1389 kfree(rx_sc); 1390 return ERR_PTR(-ENOMEM); 1391 } 1392 1393 rx_sc->sci = sci; 1394 rx_sc->active = true; 1395 atomic_set(&rx_sc->refcnt, 1); 1396 1397 secy = &macsec_priv(dev)->secy; 1398 rcu_assign_pointer(rx_sc->next, secy->rx_sc); 1399 rcu_assign_pointer(secy->rx_sc, rx_sc); 1400 1401 if (rx_sc->active) 1402 secy->n_rx_sc++; 1403 1404 return rx_sc; 1405 } 1406 1407 static int init_tx_sa(struct macsec_tx_sa *tx_sa, char *sak, int key_len, 1408 int icv_len) 1409 { 1410 tx_sa->stats = alloc_percpu(struct macsec_tx_sa_stats); 1411 if (!tx_sa->stats) 1412 return -ENOMEM; 1413 1414 tx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len); 1415 if (IS_ERR(tx_sa->key.tfm)) { 1416 free_percpu(tx_sa->stats); 1417 return PTR_ERR(tx_sa->key.tfm); 1418 } 1419 1420 tx_sa->active = false; 1421 atomic_set(&tx_sa->refcnt, 1); 1422 spin_lock_init(&tx_sa->lock); 1423 1424 return 0; 1425 } 1426 1427 static void clear_tx_sa(struct macsec_tx_sa *tx_sa) 1428 { 1429 tx_sa->active = false; 1430 1431 macsec_txsa_put(tx_sa); 1432 } 1433 1434 static struct genl_family macsec_fam = { 1435 .id = GENL_ID_GENERATE, 1436 .name = MACSEC_GENL_NAME, 1437 .hdrsize = 0, 1438 .version = MACSEC_GENL_VERSION, 1439 .maxattr = MACSEC_ATTR_MAX, 1440 .netnsok = true, 1441 }; 1442 1443 static struct net_device *get_dev_from_nl(struct net *net, 1444 struct nlattr **attrs) 1445 { 1446 int ifindex = nla_get_u32(attrs[MACSEC_ATTR_IFINDEX]); 1447 struct net_device *dev; 1448 1449 dev = __dev_get_by_index(net, ifindex); 1450 if (!dev) 1451 return ERR_PTR(-ENODEV); 1452 1453 if (!netif_is_macsec(dev)) 1454 return ERR_PTR(-ENODEV); 1455 1456 return dev; 1457 } 1458 1459 static sci_t nla_get_sci(const struct nlattr *nla) 1460 { 1461 return (__force sci_t)nla_get_u64(nla); 1462 } 1463 1464 static int nla_put_sci(struct sk_buff *skb, int attrtype, sci_t value, 1465 int padattr) 1466 { 1467 return nla_put_u64_64bit(skb, attrtype, (__force u64)value, padattr); 1468 } 1469 1470 static struct macsec_tx_sa *get_txsa_from_nl(struct net *net, 1471 struct nlattr **attrs, 1472 struct nlattr **tb_sa, 1473 struct net_device **devp, 1474 struct macsec_secy **secyp, 1475 struct macsec_tx_sc **scp, 1476 u8 *assoc_num) 1477 { 1478 struct net_device *dev; 1479 struct macsec_secy *secy; 1480 struct macsec_tx_sc *tx_sc; 1481 struct macsec_tx_sa *tx_sa; 1482 1483 if (!tb_sa[MACSEC_SA_ATTR_AN]) 1484 return ERR_PTR(-EINVAL); 1485 1486 *assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1487 1488 dev = get_dev_from_nl(net, attrs); 1489 if (IS_ERR(dev)) 1490 return ERR_CAST(dev); 1491 1492 if (*assoc_num >= MACSEC_NUM_AN) 1493 return ERR_PTR(-EINVAL); 1494 1495 secy = &macsec_priv(dev)->secy; 1496 tx_sc = &secy->tx_sc; 1497 1498 tx_sa = rtnl_dereference(tx_sc->sa[*assoc_num]); 1499 if (!tx_sa) 1500 return ERR_PTR(-ENODEV); 1501 1502 *devp = dev; 1503 *scp = tx_sc; 1504 *secyp = secy; 1505 return tx_sa; 1506 } 1507 1508 static struct macsec_rx_sc *get_rxsc_from_nl(struct net *net, 1509 struct nlattr **attrs, 1510 struct nlattr **tb_rxsc, 1511 struct net_device **devp, 1512 struct macsec_secy **secyp) 1513 { 1514 struct net_device *dev; 1515 struct macsec_secy *secy; 1516 struct macsec_rx_sc *rx_sc; 1517 sci_t sci; 1518 1519 dev = get_dev_from_nl(net, attrs); 1520 if (IS_ERR(dev)) 1521 return ERR_CAST(dev); 1522 1523 secy = &macsec_priv(dev)->secy; 1524 1525 if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI]) 1526 return ERR_PTR(-EINVAL); 1527 1528 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); 1529 rx_sc = find_rx_sc_rtnl(secy, sci); 1530 if (!rx_sc) 1531 return ERR_PTR(-ENODEV); 1532 1533 *secyp = secy; 1534 *devp = dev; 1535 1536 return rx_sc; 1537 } 1538 1539 static struct macsec_rx_sa *get_rxsa_from_nl(struct net *net, 1540 struct nlattr **attrs, 1541 struct nlattr **tb_rxsc, 1542 struct nlattr **tb_sa, 1543 struct net_device **devp, 1544 struct macsec_secy **secyp, 1545 struct macsec_rx_sc **scp, 1546 u8 *assoc_num) 1547 { 1548 struct macsec_rx_sc *rx_sc; 1549 struct macsec_rx_sa *rx_sa; 1550 1551 if (!tb_sa[MACSEC_SA_ATTR_AN]) 1552 return ERR_PTR(-EINVAL); 1553 1554 *assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1555 if (*assoc_num >= MACSEC_NUM_AN) 1556 return ERR_PTR(-EINVAL); 1557 1558 rx_sc = get_rxsc_from_nl(net, attrs, tb_rxsc, devp, secyp); 1559 if (IS_ERR(rx_sc)) 1560 return ERR_CAST(rx_sc); 1561 1562 rx_sa = rtnl_dereference(rx_sc->sa[*assoc_num]); 1563 if (!rx_sa) 1564 return ERR_PTR(-ENODEV); 1565 1566 *scp = rx_sc; 1567 return rx_sa; 1568 } 1569 1570 1571 static const struct nla_policy macsec_genl_policy[NUM_MACSEC_ATTR] = { 1572 [MACSEC_ATTR_IFINDEX] = { .type = NLA_U32 }, 1573 [MACSEC_ATTR_RXSC_CONFIG] = { .type = NLA_NESTED }, 1574 [MACSEC_ATTR_SA_CONFIG] = { .type = NLA_NESTED }, 1575 }; 1576 1577 static const struct nla_policy macsec_genl_rxsc_policy[NUM_MACSEC_RXSC_ATTR] = { 1578 [MACSEC_RXSC_ATTR_SCI] = { .type = NLA_U64 }, 1579 [MACSEC_RXSC_ATTR_ACTIVE] = { .type = NLA_U8 }, 1580 }; 1581 1582 static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = { 1583 [MACSEC_SA_ATTR_AN] = { .type = NLA_U8 }, 1584 [MACSEC_SA_ATTR_ACTIVE] = { .type = NLA_U8 }, 1585 [MACSEC_SA_ATTR_PN] = { .type = NLA_U32 }, 1586 [MACSEC_SA_ATTR_KEYID] = { .type = NLA_BINARY, 1587 .len = MACSEC_KEYID_LEN, }, 1588 [MACSEC_SA_ATTR_KEY] = { .type = NLA_BINARY, 1589 .len = MACSEC_MAX_KEY_LEN, }, 1590 }; 1591 1592 static int parse_sa_config(struct nlattr **attrs, struct nlattr **tb_sa) 1593 { 1594 if (!attrs[MACSEC_ATTR_SA_CONFIG]) 1595 return -EINVAL; 1596 1597 if (nla_parse_nested(tb_sa, MACSEC_SA_ATTR_MAX, attrs[MACSEC_ATTR_SA_CONFIG], 1598 macsec_genl_sa_policy)) 1599 return -EINVAL; 1600 1601 return 0; 1602 } 1603 1604 static int parse_rxsc_config(struct nlattr **attrs, struct nlattr **tb_rxsc) 1605 { 1606 if (!attrs[MACSEC_ATTR_RXSC_CONFIG]) 1607 return -EINVAL; 1608 1609 if (nla_parse_nested(tb_rxsc, MACSEC_RXSC_ATTR_MAX, attrs[MACSEC_ATTR_RXSC_CONFIG], 1610 macsec_genl_rxsc_policy)) 1611 return -EINVAL; 1612 1613 return 0; 1614 } 1615 1616 static bool validate_add_rxsa(struct nlattr **attrs) 1617 { 1618 if (!attrs[MACSEC_SA_ATTR_AN] || 1619 !attrs[MACSEC_SA_ATTR_KEY] || 1620 !attrs[MACSEC_SA_ATTR_KEYID]) 1621 return false; 1622 1623 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) 1624 return false; 1625 1626 if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0) 1627 return false; 1628 1629 if (attrs[MACSEC_SA_ATTR_ACTIVE]) { 1630 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) 1631 return false; 1632 } 1633 1634 if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN) 1635 return false; 1636 1637 return true; 1638 } 1639 1640 static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info) 1641 { 1642 struct net_device *dev; 1643 struct nlattr **attrs = info->attrs; 1644 struct macsec_secy *secy; 1645 struct macsec_rx_sc *rx_sc; 1646 struct macsec_rx_sa *rx_sa; 1647 unsigned char assoc_num; 1648 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 1649 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 1650 int err; 1651 1652 if (!attrs[MACSEC_ATTR_IFINDEX]) 1653 return -EINVAL; 1654 1655 if (parse_sa_config(attrs, tb_sa)) 1656 return -EINVAL; 1657 1658 if (parse_rxsc_config(attrs, tb_rxsc)) 1659 return -EINVAL; 1660 1661 if (!validate_add_rxsa(tb_sa)) 1662 return -EINVAL; 1663 1664 rtnl_lock(); 1665 rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy); 1666 if (IS_ERR(rx_sc)) { 1667 rtnl_unlock(); 1668 return PTR_ERR(rx_sc); 1669 } 1670 1671 assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1672 1673 if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) { 1674 pr_notice("macsec: nl: add_rxsa: bad key length: %d != %d\n", 1675 nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len); 1676 rtnl_unlock(); 1677 return -EINVAL; 1678 } 1679 1680 rx_sa = rtnl_dereference(rx_sc->sa[assoc_num]); 1681 if (rx_sa) { 1682 rtnl_unlock(); 1683 return -EBUSY; 1684 } 1685 1686 rx_sa = kmalloc(sizeof(*rx_sa), GFP_KERNEL); 1687 if (!rx_sa) { 1688 rtnl_unlock(); 1689 return -ENOMEM; 1690 } 1691 1692 err = init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 1693 secy->key_len, secy->icv_len); 1694 if (err < 0) { 1695 kfree(rx_sa); 1696 rtnl_unlock(); 1697 return err; 1698 } 1699 1700 if (tb_sa[MACSEC_SA_ATTR_PN]) { 1701 spin_lock_bh(&rx_sa->lock); 1702 rx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]); 1703 spin_unlock_bh(&rx_sa->lock); 1704 } 1705 1706 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 1707 rx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 1708 1709 nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN); 1710 rx_sa->sc = rx_sc; 1711 rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa); 1712 1713 rtnl_unlock(); 1714 1715 return 0; 1716 } 1717 1718 static bool validate_add_rxsc(struct nlattr **attrs) 1719 { 1720 if (!attrs[MACSEC_RXSC_ATTR_SCI]) 1721 return false; 1722 1723 if (attrs[MACSEC_RXSC_ATTR_ACTIVE]) { 1724 if (nla_get_u8(attrs[MACSEC_RXSC_ATTR_ACTIVE]) > 1) 1725 return false; 1726 } 1727 1728 return true; 1729 } 1730 1731 static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info) 1732 { 1733 struct net_device *dev; 1734 sci_t sci = MACSEC_UNDEF_SCI; 1735 struct nlattr **attrs = info->attrs; 1736 struct macsec_rx_sc *rx_sc; 1737 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 1738 1739 if (!attrs[MACSEC_ATTR_IFINDEX]) 1740 return -EINVAL; 1741 1742 if (parse_rxsc_config(attrs, tb_rxsc)) 1743 return -EINVAL; 1744 1745 if (!validate_add_rxsc(tb_rxsc)) 1746 return -EINVAL; 1747 1748 rtnl_lock(); 1749 dev = get_dev_from_nl(genl_info_net(info), attrs); 1750 if (IS_ERR(dev)) { 1751 rtnl_unlock(); 1752 return PTR_ERR(dev); 1753 } 1754 1755 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); 1756 1757 rx_sc = create_rx_sc(dev, sci); 1758 if (IS_ERR(rx_sc)) { 1759 rtnl_unlock(); 1760 return PTR_ERR(rx_sc); 1761 } 1762 1763 if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) 1764 rx_sc->active = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]); 1765 1766 rtnl_unlock(); 1767 1768 return 0; 1769 } 1770 1771 static bool validate_add_txsa(struct nlattr **attrs) 1772 { 1773 if (!attrs[MACSEC_SA_ATTR_AN] || 1774 !attrs[MACSEC_SA_ATTR_PN] || 1775 !attrs[MACSEC_SA_ATTR_KEY] || 1776 !attrs[MACSEC_SA_ATTR_KEYID]) 1777 return false; 1778 1779 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) 1780 return false; 1781 1782 if (nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0) 1783 return false; 1784 1785 if (attrs[MACSEC_SA_ATTR_ACTIVE]) { 1786 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) 1787 return false; 1788 } 1789 1790 if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN) 1791 return false; 1792 1793 return true; 1794 } 1795 1796 static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info) 1797 { 1798 struct net_device *dev; 1799 struct nlattr **attrs = info->attrs; 1800 struct macsec_secy *secy; 1801 struct macsec_tx_sc *tx_sc; 1802 struct macsec_tx_sa *tx_sa; 1803 unsigned char assoc_num; 1804 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 1805 int err; 1806 1807 if (!attrs[MACSEC_ATTR_IFINDEX]) 1808 return -EINVAL; 1809 1810 if (parse_sa_config(attrs, tb_sa)) 1811 return -EINVAL; 1812 1813 if (!validate_add_txsa(tb_sa)) 1814 return -EINVAL; 1815 1816 rtnl_lock(); 1817 dev = get_dev_from_nl(genl_info_net(info), attrs); 1818 if (IS_ERR(dev)) { 1819 rtnl_unlock(); 1820 return PTR_ERR(dev); 1821 } 1822 1823 secy = &macsec_priv(dev)->secy; 1824 tx_sc = &secy->tx_sc; 1825 1826 assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1827 1828 if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) { 1829 pr_notice("macsec: nl: add_txsa: bad key length: %d != %d\n", 1830 nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len); 1831 rtnl_unlock(); 1832 return -EINVAL; 1833 } 1834 1835 tx_sa = rtnl_dereference(tx_sc->sa[assoc_num]); 1836 if (tx_sa) { 1837 rtnl_unlock(); 1838 return -EBUSY; 1839 } 1840 1841 tx_sa = kmalloc(sizeof(*tx_sa), GFP_KERNEL); 1842 if (!tx_sa) { 1843 rtnl_unlock(); 1844 return -ENOMEM; 1845 } 1846 1847 err = init_tx_sa(tx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 1848 secy->key_len, secy->icv_len); 1849 if (err < 0) { 1850 kfree(tx_sa); 1851 rtnl_unlock(); 1852 return err; 1853 } 1854 1855 nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN); 1856 1857 spin_lock_bh(&tx_sa->lock); 1858 tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]); 1859 spin_unlock_bh(&tx_sa->lock); 1860 1861 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 1862 tx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 1863 1864 if (assoc_num == tx_sc->encoding_sa && tx_sa->active) 1865 secy->operational = true; 1866 1867 rcu_assign_pointer(tx_sc->sa[assoc_num], tx_sa); 1868 1869 rtnl_unlock(); 1870 1871 return 0; 1872 } 1873 1874 static int macsec_del_rxsa(struct sk_buff *skb, struct genl_info *info) 1875 { 1876 struct nlattr **attrs = info->attrs; 1877 struct net_device *dev; 1878 struct macsec_secy *secy; 1879 struct macsec_rx_sc *rx_sc; 1880 struct macsec_rx_sa *rx_sa; 1881 u8 assoc_num; 1882 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 1883 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 1884 1885 if (!attrs[MACSEC_ATTR_IFINDEX]) 1886 return -EINVAL; 1887 1888 if (parse_sa_config(attrs, tb_sa)) 1889 return -EINVAL; 1890 1891 if (parse_rxsc_config(attrs, tb_rxsc)) 1892 return -EINVAL; 1893 1894 rtnl_lock(); 1895 rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa, 1896 &dev, &secy, &rx_sc, &assoc_num); 1897 if (IS_ERR(rx_sa)) { 1898 rtnl_unlock(); 1899 return PTR_ERR(rx_sa); 1900 } 1901 1902 if (rx_sa->active) { 1903 rtnl_unlock(); 1904 return -EBUSY; 1905 } 1906 1907 RCU_INIT_POINTER(rx_sc->sa[assoc_num], NULL); 1908 clear_rx_sa(rx_sa); 1909 1910 rtnl_unlock(); 1911 1912 return 0; 1913 } 1914 1915 static int macsec_del_rxsc(struct sk_buff *skb, struct genl_info *info) 1916 { 1917 struct nlattr **attrs = info->attrs; 1918 struct net_device *dev; 1919 struct macsec_secy *secy; 1920 struct macsec_rx_sc *rx_sc; 1921 sci_t sci; 1922 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 1923 1924 if (!attrs[MACSEC_ATTR_IFINDEX]) 1925 return -EINVAL; 1926 1927 if (parse_rxsc_config(attrs, tb_rxsc)) 1928 return -EINVAL; 1929 1930 if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI]) 1931 return -EINVAL; 1932 1933 rtnl_lock(); 1934 dev = get_dev_from_nl(genl_info_net(info), info->attrs); 1935 if (IS_ERR(dev)) { 1936 rtnl_unlock(); 1937 return PTR_ERR(dev); 1938 } 1939 1940 secy = &macsec_priv(dev)->secy; 1941 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); 1942 1943 rx_sc = del_rx_sc(secy, sci); 1944 if (!rx_sc) { 1945 rtnl_unlock(); 1946 return -ENODEV; 1947 } 1948 1949 free_rx_sc(rx_sc); 1950 rtnl_unlock(); 1951 1952 return 0; 1953 } 1954 1955 static int macsec_del_txsa(struct sk_buff *skb, struct genl_info *info) 1956 { 1957 struct nlattr **attrs = info->attrs; 1958 struct net_device *dev; 1959 struct macsec_secy *secy; 1960 struct macsec_tx_sc *tx_sc; 1961 struct macsec_tx_sa *tx_sa; 1962 u8 assoc_num; 1963 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 1964 1965 if (!attrs[MACSEC_ATTR_IFINDEX]) 1966 return -EINVAL; 1967 1968 if (parse_sa_config(attrs, tb_sa)) 1969 return -EINVAL; 1970 1971 rtnl_lock(); 1972 tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa, 1973 &dev, &secy, &tx_sc, &assoc_num); 1974 if (IS_ERR(tx_sa)) { 1975 rtnl_unlock(); 1976 return PTR_ERR(tx_sa); 1977 } 1978 1979 if (tx_sa->active) { 1980 rtnl_unlock(); 1981 return -EBUSY; 1982 } 1983 1984 RCU_INIT_POINTER(tx_sc->sa[assoc_num], NULL); 1985 clear_tx_sa(tx_sa); 1986 1987 rtnl_unlock(); 1988 1989 return 0; 1990 } 1991 1992 static bool validate_upd_sa(struct nlattr **attrs) 1993 { 1994 if (!attrs[MACSEC_SA_ATTR_AN] || 1995 attrs[MACSEC_SA_ATTR_KEY] || 1996 attrs[MACSEC_SA_ATTR_KEYID]) 1997 return false; 1998 1999 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) 2000 return false; 2001 2002 if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0) 2003 return false; 2004 2005 if (attrs[MACSEC_SA_ATTR_ACTIVE]) { 2006 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) 2007 return false; 2008 } 2009 2010 return true; 2011 } 2012 2013 static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info) 2014 { 2015 struct nlattr **attrs = info->attrs; 2016 struct net_device *dev; 2017 struct macsec_secy *secy; 2018 struct macsec_tx_sc *tx_sc; 2019 struct macsec_tx_sa *tx_sa; 2020 u8 assoc_num; 2021 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 2022 2023 if (!attrs[MACSEC_ATTR_IFINDEX]) 2024 return -EINVAL; 2025 2026 if (parse_sa_config(attrs, tb_sa)) 2027 return -EINVAL; 2028 2029 if (!validate_upd_sa(tb_sa)) 2030 return -EINVAL; 2031 2032 rtnl_lock(); 2033 tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa, 2034 &dev, &secy, &tx_sc, &assoc_num); 2035 if (IS_ERR(tx_sa)) { 2036 rtnl_unlock(); 2037 return PTR_ERR(tx_sa); 2038 } 2039 2040 if (tb_sa[MACSEC_SA_ATTR_PN]) { 2041 spin_lock_bh(&tx_sa->lock); 2042 tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]); 2043 spin_unlock_bh(&tx_sa->lock); 2044 } 2045 2046 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 2047 tx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 2048 2049 if (assoc_num == tx_sc->encoding_sa) 2050 secy->operational = tx_sa->active; 2051 2052 rtnl_unlock(); 2053 2054 return 0; 2055 } 2056 2057 static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info) 2058 { 2059 struct nlattr **attrs = info->attrs; 2060 struct net_device *dev; 2061 struct macsec_secy *secy; 2062 struct macsec_rx_sc *rx_sc; 2063 struct macsec_rx_sa *rx_sa; 2064 u8 assoc_num; 2065 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 2066 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 2067 2068 if (!attrs[MACSEC_ATTR_IFINDEX]) 2069 return -EINVAL; 2070 2071 if (parse_rxsc_config(attrs, tb_rxsc)) 2072 return -EINVAL; 2073 2074 if (parse_sa_config(attrs, tb_sa)) 2075 return -EINVAL; 2076 2077 if (!validate_upd_sa(tb_sa)) 2078 return -EINVAL; 2079 2080 rtnl_lock(); 2081 rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa, 2082 &dev, &secy, &rx_sc, &assoc_num); 2083 if (IS_ERR(rx_sa)) { 2084 rtnl_unlock(); 2085 return PTR_ERR(rx_sa); 2086 } 2087 2088 if (tb_sa[MACSEC_SA_ATTR_PN]) { 2089 spin_lock_bh(&rx_sa->lock); 2090 rx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]); 2091 spin_unlock_bh(&rx_sa->lock); 2092 } 2093 2094 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 2095 rx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 2096 2097 rtnl_unlock(); 2098 return 0; 2099 } 2100 2101 static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info) 2102 { 2103 struct nlattr **attrs = info->attrs; 2104 struct net_device *dev; 2105 struct macsec_secy *secy; 2106 struct macsec_rx_sc *rx_sc; 2107 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 2108 2109 if (!attrs[MACSEC_ATTR_IFINDEX]) 2110 return -EINVAL; 2111 2112 if (parse_rxsc_config(attrs, tb_rxsc)) 2113 return -EINVAL; 2114 2115 if (!validate_add_rxsc(tb_rxsc)) 2116 return -EINVAL; 2117 2118 rtnl_lock(); 2119 rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy); 2120 if (IS_ERR(rx_sc)) { 2121 rtnl_unlock(); 2122 return PTR_ERR(rx_sc); 2123 } 2124 2125 if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) { 2126 bool new = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]); 2127 2128 if (rx_sc->active != new) 2129 secy->n_rx_sc += new ? 1 : -1; 2130 2131 rx_sc->active = new; 2132 } 2133 2134 rtnl_unlock(); 2135 2136 return 0; 2137 } 2138 2139 static int copy_tx_sa_stats(struct sk_buff *skb, 2140 struct macsec_tx_sa_stats __percpu *pstats) 2141 { 2142 struct macsec_tx_sa_stats sum = {0, }; 2143 int cpu; 2144 2145 for_each_possible_cpu(cpu) { 2146 const struct macsec_tx_sa_stats *stats = per_cpu_ptr(pstats, cpu); 2147 2148 sum.OutPktsProtected += stats->OutPktsProtected; 2149 sum.OutPktsEncrypted += stats->OutPktsEncrypted; 2150 } 2151 2152 if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED, sum.OutPktsProtected) || 2153 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED, sum.OutPktsEncrypted)) 2154 return -EMSGSIZE; 2155 2156 return 0; 2157 } 2158 2159 static int copy_rx_sa_stats(struct sk_buff *skb, 2160 struct macsec_rx_sa_stats __percpu *pstats) 2161 { 2162 struct macsec_rx_sa_stats sum = {0, }; 2163 int cpu; 2164 2165 for_each_possible_cpu(cpu) { 2166 const struct macsec_rx_sa_stats *stats = per_cpu_ptr(pstats, cpu); 2167 2168 sum.InPktsOK += stats->InPktsOK; 2169 sum.InPktsInvalid += stats->InPktsInvalid; 2170 sum.InPktsNotValid += stats->InPktsNotValid; 2171 sum.InPktsNotUsingSA += stats->InPktsNotUsingSA; 2172 sum.InPktsUnusedSA += stats->InPktsUnusedSA; 2173 } 2174 2175 if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_OK, sum.InPktsOK) || 2176 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID, sum.InPktsInvalid) || 2177 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID, sum.InPktsNotValid) || 2178 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA, sum.InPktsNotUsingSA) || 2179 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA, sum.InPktsUnusedSA)) 2180 return -EMSGSIZE; 2181 2182 return 0; 2183 } 2184 2185 static int copy_rx_sc_stats(struct sk_buff *skb, 2186 struct pcpu_rx_sc_stats __percpu *pstats) 2187 { 2188 struct macsec_rx_sc_stats sum = {0, }; 2189 int cpu; 2190 2191 for_each_possible_cpu(cpu) { 2192 const struct pcpu_rx_sc_stats *stats; 2193 struct macsec_rx_sc_stats tmp; 2194 unsigned int start; 2195 2196 stats = per_cpu_ptr(pstats, cpu); 2197 do { 2198 start = u64_stats_fetch_begin_irq(&stats->syncp); 2199 memcpy(&tmp, &stats->stats, sizeof(tmp)); 2200 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 2201 2202 sum.InOctetsValidated += tmp.InOctetsValidated; 2203 sum.InOctetsDecrypted += tmp.InOctetsDecrypted; 2204 sum.InPktsUnchecked += tmp.InPktsUnchecked; 2205 sum.InPktsDelayed += tmp.InPktsDelayed; 2206 sum.InPktsOK += tmp.InPktsOK; 2207 sum.InPktsInvalid += tmp.InPktsInvalid; 2208 sum.InPktsLate += tmp.InPktsLate; 2209 sum.InPktsNotValid += tmp.InPktsNotValid; 2210 sum.InPktsNotUsingSA += tmp.InPktsNotUsingSA; 2211 sum.InPktsUnusedSA += tmp.InPktsUnusedSA; 2212 } 2213 2214 if (nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED, 2215 sum.InOctetsValidated, 2216 MACSEC_RXSC_STATS_ATTR_PAD) || 2217 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED, 2218 sum.InOctetsDecrypted, 2219 MACSEC_RXSC_STATS_ATTR_PAD) || 2220 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED, 2221 sum.InPktsUnchecked, 2222 MACSEC_RXSC_STATS_ATTR_PAD) || 2223 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED, 2224 sum.InPktsDelayed, 2225 MACSEC_RXSC_STATS_ATTR_PAD) || 2226 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK, 2227 sum.InPktsOK, 2228 MACSEC_RXSC_STATS_ATTR_PAD) || 2229 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID, 2230 sum.InPktsInvalid, 2231 MACSEC_RXSC_STATS_ATTR_PAD) || 2232 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE, 2233 sum.InPktsLate, 2234 MACSEC_RXSC_STATS_ATTR_PAD) || 2235 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID, 2236 sum.InPktsNotValid, 2237 MACSEC_RXSC_STATS_ATTR_PAD) || 2238 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA, 2239 sum.InPktsNotUsingSA, 2240 MACSEC_RXSC_STATS_ATTR_PAD) || 2241 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA, 2242 sum.InPktsUnusedSA, 2243 MACSEC_RXSC_STATS_ATTR_PAD)) 2244 return -EMSGSIZE; 2245 2246 return 0; 2247 } 2248 2249 static int copy_tx_sc_stats(struct sk_buff *skb, 2250 struct pcpu_tx_sc_stats __percpu *pstats) 2251 { 2252 struct macsec_tx_sc_stats sum = {0, }; 2253 int cpu; 2254 2255 for_each_possible_cpu(cpu) { 2256 const struct pcpu_tx_sc_stats *stats; 2257 struct macsec_tx_sc_stats tmp; 2258 unsigned int start; 2259 2260 stats = per_cpu_ptr(pstats, cpu); 2261 do { 2262 start = u64_stats_fetch_begin_irq(&stats->syncp); 2263 memcpy(&tmp, &stats->stats, sizeof(tmp)); 2264 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 2265 2266 sum.OutPktsProtected += tmp.OutPktsProtected; 2267 sum.OutPktsEncrypted += tmp.OutPktsEncrypted; 2268 sum.OutOctetsProtected += tmp.OutOctetsProtected; 2269 sum.OutOctetsEncrypted += tmp.OutOctetsEncrypted; 2270 } 2271 2272 if (nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED, 2273 sum.OutPktsProtected, 2274 MACSEC_TXSC_STATS_ATTR_PAD) || 2275 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED, 2276 sum.OutPktsEncrypted, 2277 MACSEC_TXSC_STATS_ATTR_PAD) || 2278 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED, 2279 sum.OutOctetsProtected, 2280 MACSEC_TXSC_STATS_ATTR_PAD) || 2281 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED, 2282 sum.OutOctetsEncrypted, 2283 MACSEC_TXSC_STATS_ATTR_PAD)) 2284 return -EMSGSIZE; 2285 2286 return 0; 2287 } 2288 2289 static int copy_secy_stats(struct sk_buff *skb, 2290 struct pcpu_secy_stats __percpu *pstats) 2291 { 2292 struct macsec_dev_stats sum = {0, }; 2293 int cpu; 2294 2295 for_each_possible_cpu(cpu) { 2296 const struct pcpu_secy_stats *stats; 2297 struct macsec_dev_stats tmp; 2298 unsigned int start; 2299 2300 stats = per_cpu_ptr(pstats, cpu); 2301 do { 2302 start = u64_stats_fetch_begin_irq(&stats->syncp); 2303 memcpy(&tmp, &stats->stats, sizeof(tmp)); 2304 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 2305 2306 sum.OutPktsUntagged += tmp.OutPktsUntagged; 2307 sum.InPktsUntagged += tmp.InPktsUntagged; 2308 sum.OutPktsTooLong += tmp.OutPktsTooLong; 2309 sum.InPktsNoTag += tmp.InPktsNoTag; 2310 sum.InPktsBadTag += tmp.InPktsBadTag; 2311 sum.InPktsUnknownSCI += tmp.InPktsUnknownSCI; 2312 sum.InPktsNoSCI += tmp.InPktsNoSCI; 2313 sum.InPktsOverrun += tmp.InPktsOverrun; 2314 } 2315 2316 if (nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED, 2317 sum.OutPktsUntagged, 2318 MACSEC_SECY_STATS_ATTR_PAD) || 2319 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED, 2320 sum.InPktsUntagged, 2321 MACSEC_SECY_STATS_ATTR_PAD) || 2322 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG, 2323 sum.OutPktsTooLong, 2324 MACSEC_SECY_STATS_ATTR_PAD) || 2325 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG, 2326 sum.InPktsNoTag, 2327 MACSEC_SECY_STATS_ATTR_PAD) || 2328 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG, 2329 sum.InPktsBadTag, 2330 MACSEC_SECY_STATS_ATTR_PAD) || 2331 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI, 2332 sum.InPktsUnknownSCI, 2333 MACSEC_SECY_STATS_ATTR_PAD) || 2334 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI, 2335 sum.InPktsNoSCI, 2336 MACSEC_SECY_STATS_ATTR_PAD) || 2337 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN, 2338 sum.InPktsOverrun, 2339 MACSEC_SECY_STATS_ATTR_PAD)) 2340 return -EMSGSIZE; 2341 2342 return 0; 2343 } 2344 2345 static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb) 2346 { 2347 struct macsec_tx_sc *tx_sc = &secy->tx_sc; 2348 struct nlattr *secy_nest = nla_nest_start(skb, MACSEC_ATTR_SECY); 2349 2350 if (!secy_nest) 2351 return 1; 2352 2353 if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci, 2354 MACSEC_SECY_ATTR_PAD) || 2355 nla_put_u64_64bit(skb, MACSEC_SECY_ATTR_CIPHER_SUITE, 2356 MACSEC_DEFAULT_CIPHER_ID, 2357 MACSEC_SECY_ATTR_PAD) || 2358 nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) || 2359 nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) || 2360 nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) || 2361 nla_put_u8(skb, MACSEC_SECY_ATTR_REPLAY, secy->replay_protect) || 2362 nla_put_u8(skb, MACSEC_SECY_ATTR_VALIDATE, secy->validate_frames) || 2363 nla_put_u8(skb, MACSEC_SECY_ATTR_ENCRYPT, tx_sc->encrypt) || 2364 nla_put_u8(skb, MACSEC_SECY_ATTR_INC_SCI, tx_sc->send_sci) || 2365 nla_put_u8(skb, MACSEC_SECY_ATTR_ES, tx_sc->end_station) || 2366 nla_put_u8(skb, MACSEC_SECY_ATTR_SCB, tx_sc->scb) || 2367 nla_put_u8(skb, MACSEC_SECY_ATTR_ENCODING_SA, tx_sc->encoding_sa)) 2368 goto cancel; 2369 2370 if (secy->replay_protect) { 2371 if (nla_put_u32(skb, MACSEC_SECY_ATTR_WINDOW, secy->replay_window)) 2372 goto cancel; 2373 } 2374 2375 nla_nest_end(skb, secy_nest); 2376 return 0; 2377 2378 cancel: 2379 nla_nest_cancel(skb, secy_nest); 2380 return 1; 2381 } 2382 2383 static int dump_secy(struct macsec_secy *secy, struct net_device *dev, 2384 struct sk_buff *skb, struct netlink_callback *cb) 2385 { 2386 struct macsec_rx_sc *rx_sc; 2387 struct macsec_tx_sc *tx_sc = &secy->tx_sc; 2388 struct nlattr *txsa_list, *rxsc_list; 2389 int i, j; 2390 void *hdr; 2391 struct nlattr *attr; 2392 2393 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 2394 &macsec_fam, NLM_F_MULTI, MACSEC_CMD_GET_TXSC); 2395 if (!hdr) 2396 return -EMSGSIZE; 2397 2398 genl_dump_check_consistent(cb, hdr, &macsec_fam); 2399 2400 if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex)) 2401 goto nla_put_failure; 2402 2403 if (nla_put_secy(secy, skb)) 2404 goto nla_put_failure; 2405 2406 attr = nla_nest_start(skb, MACSEC_ATTR_TXSC_STATS); 2407 if (!attr) 2408 goto nla_put_failure; 2409 if (copy_tx_sc_stats(skb, tx_sc->stats)) { 2410 nla_nest_cancel(skb, attr); 2411 goto nla_put_failure; 2412 } 2413 nla_nest_end(skb, attr); 2414 2415 attr = nla_nest_start(skb, MACSEC_ATTR_SECY_STATS); 2416 if (!attr) 2417 goto nla_put_failure; 2418 if (copy_secy_stats(skb, macsec_priv(dev)->stats)) { 2419 nla_nest_cancel(skb, attr); 2420 goto nla_put_failure; 2421 } 2422 nla_nest_end(skb, attr); 2423 2424 txsa_list = nla_nest_start(skb, MACSEC_ATTR_TXSA_LIST); 2425 if (!txsa_list) 2426 goto nla_put_failure; 2427 for (i = 0, j = 1; i < MACSEC_NUM_AN; i++) { 2428 struct macsec_tx_sa *tx_sa = rtnl_dereference(tx_sc->sa[i]); 2429 struct nlattr *txsa_nest; 2430 2431 if (!tx_sa) 2432 continue; 2433 2434 txsa_nest = nla_nest_start(skb, j++); 2435 if (!txsa_nest) { 2436 nla_nest_cancel(skb, txsa_list); 2437 goto nla_put_failure; 2438 } 2439 2440 if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) || 2441 nla_put_u32(skb, MACSEC_SA_ATTR_PN, tx_sa->next_pn) || 2442 nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, tx_sa->key.id) || 2443 nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, tx_sa->active)) { 2444 nla_nest_cancel(skb, txsa_nest); 2445 nla_nest_cancel(skb, txsa_list); 2446 goto nla_put_failure; 2447 } 2448 2449 attr = nla_nest_start(skb, MACSEC_SA_ATTR_STATS); 2450 if (!attr) { 2451 nla_nest_cancel(skb, txsa_nest); 2452 nla_nest_cancel(skb, txsa_list); 2453 goto nla_put_failure; 2454 } 2455 if (copy_tx_sa_stats(skb, tx_sa->stats)) { 2456 nla_nest_cancel(skb, attr); 2457 nla_nest_cancel(skb, txsa_nest); 2458 nla_nest_cancel(skb, txsa_list); 2459 goto nla_put_failure; 2460 } 2461 nla_nest_end(skb, attr); 2462 2463 nla_nest_end(skb, txsa_nest); 2464 } 2465 nla_nest_end(skb, txsa_list); 2466 2467 rxsc_list = nla_nest_start(skb, MACSEC_ATTR_RXSC_LIST); 2468 if (!rxsc_list) 2469 goto nla_put_failure; 2470 2471 j = 1; 2472 for_each_rxsc_rtnl(secy, rx_sc) { 2473 int k; 2474 struct nlattr *rxsa_list; 2475 struct nlattr *rxsc_nest = nla_nest_start(skb, j++); 2476 2477 if (!rxsc_nest) { 2478 nla_nest_cancel(skb, rxsc_list); 2479 goto nla_put_failure; 2480 } 2481 2482 if (nla_put_u8(skb, MACSEC_RXSC_ATTR_ACTIVE, rx_sc->active) || 2483 nla_put_sci(skb, MACSEC_RXSC_ATTR_SCI, rx_sc->sci, 2484 MACSEC_RXSC_ATTR_PAD)) { 2485 nla_nest_cancel(skb, rxsc_nest); 2486 nla_nest_cancel(skb, rxsc_list); 2487 goto nla_put_failure; 2488 } 2489 2490 attr = nla_nest_start(skb, MACSEC_RXSC_ATTR_STATS); 2491 if (!attr) { 2492 nla_nest_cancel(skb, rxsc_nest); 2493 nla_nest_cancel(skb, rxsc_list); 2494 goto nla_put_failure; 2495 } 2496 if (copy_rx_sc_stats(skb, rx_sc->stats)) { 2497 nla_nest_cancel(skb, attr); 2498 nla_nest_cancel(skb, rxsc_nest); 2499 nla_nest_cancel(skb, rxsc_list); 2500 goto nla_put_failure; 2501 } 2502 nla_nest_end(skb, attr); 2503 2504 rxsa_list = nla_nest_start(skb, MACSEC_RXSC_ATTR_SA_LIST); 2505 if (!rxsa_list) { 2506 nla_nest_cancel(skb, rxsc_nest); 2507 nla_nest_cancel(skb, rxsc_list); 2508 goto nla_put_failure; 2509 } 2510 2511 for (i = 0, k = 1; i < MACSEC_NUM_AN; i++) { 2512 struct macsec_rx_sa *rx_sa = rtnl_dereference(rx_sc->sa[i]); 2513 struct nlattr *rxsa_nest; 2514 2515 if (!rx_sa) 2516 continue; 2517 2518 rxsa_nest = nla_nest_start(skb, k++); 2519 if (!rxsa_nest) { 2520 nla_nest_cancel(skb, rxsa_list); 2521 nla_nest_cancel(skb, rxsc_nest); 2522 nla_nest_cancel(skb, rxsc_list); 2523 goto nla_put_failure; 2524 } 2525 2526 attr = nla_nest_start(skb, MACSEC_SA_ATTR_STATS); 2527 if (!attr) { 2528 nla_nest_cancel(skb, rxsa_list); 2529 nla_nest_cancel(skb, rxsc_nest); 2530 nla_nest_cancel(skb, rxsc_list); 2531 goto nla_put_failure; 2532 } 2533 if (copy_rx_sa_stats(skb, rx_sa->stats)) { 2534 nla_nest_cancel(skb, attr); 2535 nla_nest_cancel(skb, rxsa_list); 2536 nla_nest_cancel(skb, rxsc_nest); 2537 nla_nest_cancel(skb, rxsc_list); 2538 goto nla_put_failure; 2539 } 2540 nla_nest_end(skb, attr); 2541 2542 if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) || 2543 nla_put_u32(skb, MACSEC_SA_ATTR_PN, rx_sa->next_pn) || 2544 nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, rx_sa->key.id) || 2545 nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, rx_sa->active)) { 2546 nla_nest_cancel(skb, rxsa_nest); 2547 nla_nest_cancel(skb, rxsc_nest); 2548 nla_nest_cancel(skb, rxsc_list); 2549 goto nla_put_failure; 2550 } 2551 nla_nest_end(skb, rxsa_nest); 2552 } 2553 2554 nla_nest_end(skb, rxsa_list); 2555 nla_nest_end(skb, rxsc_nest); 2556 } 2557 2558 nla_nest_end(skb, rxsc_list); 2559 2560 genlmsg_end(skb, hdr); 2561 2562 return 0; 2563 2564 nla_put_failure: 2565 genlmsg_cancel(skb, hdr); 2566 return -EMSGSIZE; 2567 } 2568 2569 static int macsec_generation = 1; /* protected by RTNL */ 2570 2571 static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb) 2572 { 2573 struct net *net = sock_net(skb->sk); 2574 struct net_device *dev; 2575 int dev_idx, d; 2576 2577 dev_idx = cb->args[0]; 2578 2579 d = 0; 2580 rtnl_lock(); 2581 2582 cb->seq = macsec_generation; 2583 2584 for_each_netdev(net, dev) { 2585 struct macsec_secy *secy; 2586 2587 if (d < dev_idx) 2588 goto next; 2589 2590 if (!netif_is_macsec(dev)) 2591 goto next; 2592 2593 secy = &macsec_priv(dev)->secy; 2594 if (dump_secy(secy, dev, skb, cb) < 0) 2595 goto done; 2596 next: 2597 d++; 2598 } 2599 2600 done: 2601 rtnl_unlock(); 2602 cb->args[0] = d; 2603 return skb->len; 2604 } 2605 2606 static const struct genl_ops macsec_genl_ops[] = { 2607 { 2608 .cmd = MACSEC_CMD_GET_TXSC, 2609 .dumpit = macsec_dump_txsc, 2610 .policy = macsec_genl_policy, 2611 }, 2612 { 2613 .cmd = MACSEC_CMD_ADD_RXSC, 2614 .doit = macsec_add_rxsc, 2615 .policy = macsec_genl_policy, 2616 .flags = GENL_ADMIN_PERM, 2617 }, 2618 { 2619 .cmd = MACSEC_CMD_DEL_RXSC, 2620 .doit = macsec_del_rxsc, 2621 .policy = macsec_genl_policy, 2622 .flags = GENL_ADMIN_PERM, 2623 }, 2624 { 2625 .cmd = MACSEC_CMD_UPD_RXSC, 2626 .doit = macsec_upd_rxsc, 2627 .policy = macsec_genl_policy, 2628 .flags = GENL_ADMIN_PERM, 2629 }, 2630 { 2631 .cmd = MACSEC_CMD_ADD_TXSA, 2632 .doit = macsec_add_txsa, 2633 .policy = macsec_genl_policy, 2634 .flags = GENL_ADMIN_PERM, 2635 }, 2636 { 2637 .cmd = MACSEC_CMD_DEL_TXSA, 2638 .doit = macsec_del_txsa, 2639 .policy = macsec_genl_policy, 2640 .flags = GENL_ADMIN_PERM, 2641 }, 2642 { 2643 .cmd = MACSEC_CMD_UPD_TXSA, 2644 .doit = macsec_upd_txsa, 2645 .policy = macsec_genl_policy, 2646 .flags = GENL_ADMIN_PERM, 2647 }, 2648 { 2649 .cmd = MACSEC_CMD_ADD_RXSA, 2650 .doit = macsec_add_rxsa, 2651 .policy = macsec_genl_policy, 2652 .flags = GENL_ADMIN_PERM, 2653 }, 2654 { 2655 .cmd = MACSEC_CMD_DEL_RXSA, 2656 .doit = macsec_del_rxsa, 2657 .policy = macsec_genl_policy, 2658 .flags = GENL_ADMIN_PERM, 2659 }, 2660 { 2661 .cmd = MACSEC_CMD_UPD_RXSA, 2662 .doit = macsec_upd_rxsa, 2663 .policy = macsec_genl_policy, 2664 .flags = GENL_ADMIN_PERM, 2665 }, 2666 }; 2667 2668 static netdev_tx_t macsec_start_xmit(struct sk_buff *skb, 2669 struct net_device *dev) 2670 { 2671 struct macsec_dev *macsec = netdev_priv(dev); 2672 struct macsec_secy *secy = &macsec->secy; 2673 struct pcpu_secy_stats *secy_stats; 2674 int ret, len; 2675 2676 /* 10.5 */ 2677 if (!secy->protect_frames) { 2678 secy_stats = this_cpu_ptr(macsec->stats); 2679 u64_stats_update_begin(&secy_stats->syncp); 2680 secy_stats->stats.OutPktsUntagged++; 2681 u64_stats_update_end(&secy_stats->syncp); 2682 skb->dev = macsec->real_dev; 2683 len = skb->len; 2684 ret = dev_queue_xmit(skb); 2685 count_tx(dev, ret, len); 2686 return ret; 2687 } 2688 2689 if (!secy->operational) { 2690 kfree_skb(skb); 2691 dev->stats.tx_dropped++; 2692 return NETDEV_TX_OK; 2693 } 2694 2695 skb = macsec_encrypt(skb, dev); 2696 if (IS_ERR(skb)) { 2697 if (PTR_ERR(skb) != -EINPROGRESS) 2698 dev->stats.tx_dropped++; 2699 return NETDEV_TX_OK; 2700 } 2701 2702 macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa); 2703 2704 macsec_encrypt_finish(skb, dev); 2705 len = skb->len; 2706 ret = dev_queue_xmit(skb); 2707 count_tx(dev, ret, len); 2708 return ret; 2709 } 2710 2711 #define MACSEC_FEATURES \ 2712 (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST) 2713 static struct lock_class_key macsec_netdev_addr_lock_key; 2714 2715 static int macsec_dev_init(struct net_device *dev) 2716 { 2717 struct macsec_dev *macsec = macsec_priv(dev); 2718 struct net_device *real_dev = macsec->real_dev; 2719 int err; 2720 2721 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 2722 if (!dev->tstats) 2723 return -ENOMEM; 2724 2725 err = gro_cells_init(&macsec->gro_cells, dev); 2726 if (err) { 2727 free_percpu(dev->tstats); 2728 return err; 2729 } 2730 2731 dev->features = real_dev->features & MACSEC_FEATURES; 2732 dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE; 2733 2734 dev->needed_headroom = real_dev->needed_headroom + 2735 MACSEC_NEEDED_HEADROOM; 2736 dev->needed_tailroom = real_dev->needed_tailroom + 2737 MACSEC_NEEDED_TAILROOM; 2738 2739 if (is_zero_ether_addr(dev->dev_addr)) 2740 eth_hw_addr_inherit(dev, real_dev); 2741 if (is_zero_ether_addr(dev->broadcast)) 2742 memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len); 2743 2744 return 0; 2745 } 2746 2747 static void macsec_dev_uninit(struct net_device *dev) 2748 { 2749 struct macsec_dev *macsec = macsec_priv(dev); 2750 2751 gro_cells_destroy(&macsec->gro_cells); 2752 free_percpu(dev->tstats); 2753 } 2754 2755 static netdev_features_t macsec_fix_features(struct net_device *dev, 2756 netdev_features_t features) 2757 { 2758 struct macsec_dev *macsec = macsec_priv(dev); 2759 struct net_device *real_dev = macsec->real_dev; 2760 2761 features &= (real_dev->features & MACSEC_FEATURES) | 2762 NETIF_F_GSO_SOFTWARE | NETIF_F_SOFT_FEATURES; 2763 features |= NETIF_F_LLTX; 2764 2765 return features; 2766 } 2767 2768 static int macsec_dev_open(struct net_device *dev) 2769 { 2770 struct macsec_dev *macsec = macsec_priv(dev); 2771 struct net_device *real_dev = macsec->real_dev; 2772 int err; 2773 2774 if (!(real_dev->flags & IFF_UP)) 2775 return -ENETDOWN; 2776 2777 err = dev_uc_add(real_dev, dev->dev_addr); 2778 if (err < 0) 2779 return err; 2780 2781 if (dev->flags & IFF_ALLMULTI) { 2782 err = dev_set_allmulti(real_dev, 1); 2783 if (err < 0) 2784 goto del_unicast; 2785 } 2786 2787 if (dev->flags & IFF_PROMISC) { 2788 err = dev_set_promiscuity(real_dev, 1); 2789 if (err < 0) 2790 goto clear_allmulti; 2791 } 2792 2793 if (netif_carrier_ok(real_dev)) 2794 netif_carrier_on(dev); 2795 2796 return 0; 2797 clear_allmulti: 2798 if (dev->flags & IFF_ALLMULTI) 2799 dev_set_allmulti(real_dev, -1); 2800 del_unicast: 2801 dev_uc_del(real_dev, dev->dev_addr); 2802 netif_carrier_off(dev); 2803 return err; 2804 } 2805 2806 static int macsec_dev_stop(struct net_device *dev) 2807 { 2808 struct macsec_dev *macsec = macsec_priv(dev); 2809 struct net_device *real_dev = macsec->real_dev; 2810 2811 netif_carrier_off(dev); 2812 2813 dev_mc_unsync(real_dev, dev); 2814 dev_uc_unsync(real_dev, dev); 2815 2816 if (dev->flags & IFF_ALLMULTI) 2817 dev_set_allmulti(real_dev, -1); 2818 2819 if (dev->flags & IFF_PROMISC) 2820 dev_set_promiscuity(real_dev, -1); 2821 2822 dev_uc_del(real_dev, dev->dev_addr); 2823 2824 return 0; 2825 } 2826 2827 static void macsec_dev_change_rx_flags(struct net_device *dev, int change) 2828 { 2829 struct net_device *real_dev = macsec_priv(dev)->real_dev; 2830 2831 if (!(dev->flags & IFF_UP)) 2832 return; 2833 2834 if (change & IFF_ALLMULTI) 2835 dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1); 2836 2837 if (change & IFF_PROMISC) 2838 dev_set_promiscuity(real_dev, 2839 dev->flags & IFF_PROMISC ? 1 : -1); 2840 } 2841 2842 static void macsec_dev_set_rx_mode(struct net_device *dev) 2843 { 2844 struct net_device *real_dev = macsec_priv(dev)->real_dev; 2845 2846 dev_mc_sync(real_dev, dev); 2847 dev_uc_sync(real_dev, dev); 2848 } 2849 2850 static int macsec_set_mac_address(struct net_device *dev, void *p) 2851 { 2852 struct macsec_dev *macsec = macsec_priv(dev); 2853 struct net_device *real_dev = macsec->real_dev; 2854 struct sockaddr *addr = p; 2855 int err; 2856 2857 if (!is_valid_ether_addr(addr->sa_data)) 2858 return -EADDRNOTAVAIL; 2859 2860 if (!(dev->flags & IFF_UP)) 2861 goto out; 2862 2863 err = dev_uc_add(real_dev, addr->sa_data); 2864 if (err < 0) 2865 return err; 2866 2867 dev_uc_del(real_dev, dev->dev_addr); 2868 2869 out: 2870 ether_addr_copy(dev->dev_addr, addr->sa_data); 2871 return 0; 2872 } 2873 2874 static int macsec_change_mtu(struct net_device *dev, int new_mtu) 2875 { 2876 struct macsec_dev *macsec = macsec_priv(dev); 2877 unsigned int extra = macsec->secy.icv_len + macsec_extra_len(true); 2878 2879 if (macsec->real_dev->mtu - extra < new_mtu) 2880 return -ERANGE; 2881 2882 dev->mtu = new_mtu; 2883 2884 return 0; 2885 } 2886 2887 static struct rtnl_link_stats64 *macsec_get_stats64(struct net_device *dev, 2888 struct rtnl_link_stats64 *s) 2889 { 2890 int cpu; 2891 2892 if (!dev->tstats) 2893 return s; 2894 2895 for_each_possible_cpu(cpu) { 2896 struct pcpu_sw_netstats *stats; 2897 struct pcpu_sw_netstats tmp; 2898 int start; 2899 2900 stats = per_cpu_ptr(dev->tstats, cpu); 2901 do { 2902 start = u64_stats_fetch_begin_irq(&stats->syncp); 2903 tmp.rx_packets = stats->rx_packets; 2904 tmp.rx_bytes = stats->rx_bytes; 2905 tmp.tx_packets = stats->tx_packets; 2906 tmp.tx_bytes = stats->tx_bytes; 2907 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 2908 2909 s->rx_packets += tmp.rx_packets; 2910 s->rx_bytes += tmp.rx_bytes; 2911 s->tx_packets += tmp.tx_packets; 2912 s->tx_bytes += tmp.tx_bytes; 2913 } 2914 2915 s->rx_dropped = dev->stats.rx_dropped; 2916 s->tx_dropped = dev->stats.tx_dropped; 2917 2918 return s; 2919 } 2920 2921 static int macsec_get_iflink(const struct net_device *dev) 2922 { 2923 return macsec_priv(dev)->real_dev->ifindex; 2924 } 2925 2926 2927 static int macsec_get_nest_level(struct net_device *dev) 2928 { 2929 return macsec_priv(dev)->nest_level; 2930 } 2931 2932 2933 static const struct net_device_ops macsec_netdev_ops = { 2934 .ndo_init = macsec_dev_init, 2935 .ndo_uninit = macsec_dev_uninit, 2936 .ndo_open = macsec_dev_open, 2937 .ndo_stop = macsec_dev_stop, 2938 .ndo_fix_features = macsec_fix_features, 2939 .ndo_change_mtu = macsec_change_mtu, 2940 .ndo_set_rx_mode = macsec_dev_set_rx_mode, 2941 .ndo_change_rx_flags = macsec_dev_change_rx_flags, 2942 .ndo_set_mac_address = macsec_set_mac_address, 2943 .ndo_start_xmit = macsec_start_xmit, 2944 .ndo_get_stats64 = macsec_get_stats64, 2945 .ndo_get_iflink = macsec_get_iflink, 2946 .ndo_get_lock_subclass = macsec_get_nest_level, 2947 }; 2948 2949 static const struct device_type macsec_type = { 2950 .name = "macsec", 2951 }; 2952 2953 static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = { 2954 [IFLA_MACSEC_SCI] = { .type = NLA_U64 }, 2955 [IFLA_MACSEC_ICV_LEN] = { .type = NLA_U8 }, 2956 [IFLA_MACSEC_CIPHER_SUITE] = { .type = NLA_U64 }, 2957 [IFLA_MACSEC_WINDOW] = { .type = NLA_U32 }, 2958 [IFLA_MACSEC_ENCODING_SA] = { .type = NLA_U8 }, 2959 [IFLA_MACSEC_ENCRYPT] = { .type = NLA_U8 }, 2960 [IFLA_MACSEC_PROTECT] = { .type = NLA_U8 }, 2961 [IFLA_MACSEC_INC_SCI] = { .type = NLA_U8 }, 2962 [IFLA_MACSEC_ES] = { .type = NLA_U8 }, 2963 [IFLA_MACSEC_SCB] = { .type = NLA_U8 }, 2964 [IFLA_MACSEC_REPLAY_PROTECT] = { .type = NLA_U8 }, 2965 [IFLA_MACSEC_VALIDATION] = { .type = NLA_U8 }, 2966 }; 2967 2968 static void macsec_free_netdev(struct net_device *dev) 2969 { 2970 struct macsec_dev *macsec = macsec_priv(dev); 2971 struct net_device *real_dev = macsec->real_dev; 2972 2973 free_percpu(macsec->stats); 2974 free_percpu(macsec->secy.tx_sc.stats); 2975 2976 dev_put(real_dev); 2977 free_netdev(dev); 2978 } 2979 2980 static void macsec_setup(struct net_device *dev) 2981 { 2982 ether_setup(dev); 2983 dev->priv_flags |= IFF_NO_QUEUE; 2984 dev->netdev_ops = &macsec_netdev_ops; 2985 dev->destructor = macsec_free_netdev; 2986 SET_NETDEV_DEVTYPE(dev, &macsec_type); 2987 2988 eth_zero_addr(dev->broadcast); 2989 } 2990 2991 static void macsec_changelink_common(struct net_device *dev, 2992 struct nlattr *data[]) 2993 { 2994 struct macsec_secy *secy; 2995 struct macsec_tx_sc *tx_sc; 2996 2997 secy = &macsec_priv(dev)->secy; 2998 tx_sc = &secy->tx_sc; 2999 3000 if (data[IFLA_MACSEC_ENCODING_SA]) { 3001 struct macsec_tx_sa *tx_sa; 3002 3003 tx_sc->encoding_sa = nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]); 3004 tx_sa = rtnl_dereference(tx_sc->sa[tx_sc->encoding_sa]); 3005 3006 secy->operational = tx_sa && tx_sa->active; 3007 } 3008 3009 if (data[IFLA_MACSEC_WINDOW]) 3010 secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]); 3011 3012 if (data[IFLA_MACSEC_ENCRYPT]) 3013 tx_sc->encrypt = !!nla_get_u8(data[IFLA_MACSEC_ENCRYPT]); 3014 3015 if (data[IFLA_MACSEC_PROTECT]) 3016 secy->protect_frames = !!nla_get_u8(data[IFLA_MACSEC_PROTECT]); 3017 3018 if (data[IFLA_MACSEC_INC_SCI]) 3019 tx_sc->send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]); 3020 3021 if (data[IFLA_MACSEC_ES]) 3022 tx_sc->end_station = !!nla_get_u8(data[IFLA_MACSEC_ES]); 3023 3024 if (data[IFLA_MACSEC_SCB]) 3025 tx_sc->scb = !!nla_get_u8(data[IFLA_MACSEC_SCB]); 3026 3027 if (data[IFLA_MACSEC_REPLAY_PROTECT]) 3028 secy->replay_protect = !!nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT]); 3029 3030 if (data[IFLA_MACSEC_VALIDATION]) 3031 secy->validate_frames = nla_get_u8(data[IFLA_MACSEC_VALIDATION]); 3032 } 3033 3034 static int macsec_changelink(struct net_device *dev, struct nlattr *tb[], 3035 struct nlattr *data[]) 3036 { 3037 if (!data) 3038 return 0; 3039 3040 if (data[IFLA_MACSEC_CIPHER_SUITE] || 3041 data[IFLA_MACSEC_ICV_LEN] || 3042 data[IFLA_MACSEC_SCI] || 3043 data[IFLA_MACSEC_PORT]) 3044 return -EINVAL; 3045 3046 macsec_changelink_common(dev, data); 3047 3048 return 0; 3049 } 3050 3051 static void macsec_del_dev(struct macsec_dev *macsec) 3052 { 3053 int i; 3054 3055 while (macsec->secy.rx_sc) { 3056 struct macsec_rx_sc *rx_sc = rtnl_dereference(macsec->secy.rx_sc); 3057 3058 rcu_assign_pointer(macsec->secy.rx_sc, rx_sc->next); 3059 free_rx_sc(rx_sc); 3060 } 3061 3062 for (i = 0; i < MACSEC_NUM_AN; i++) { 3063 struct macsec_tx_sa *sa = rtnl_dereference(macsec->secy.tx_sc.sa[i]); 3064 3065 if (sa) { 3066 RCU_INIT_POINTER(macsec->secy.tx_sc.sa[i], NULL); 3067 clear_tx_sa(sa); 3068 } 3069 } 3070 } 3071 3072 static void macsec_common_dellink(struct net_device *dev, struct list_head *head) 3073 { 3074 struct macsec_dev *macsec = macsec_priv(dev); 3075 struct net_device *real_dev = macsec->real_dev; 3076 3077 unregister_netdevice_queue(dev, head); 3078 list_del_rcu(&macsec->secys); 3079 macsec_del_dev(macsec); 3080 netdev_upper_dev_unlink(real_dev, dev); 3081 3082 macsec_generation++; 3083 } 3084 3085 static void macsec_dellink(struct net_device *dev, struct list_head *head) 3086 { 3087 struct macsec_dev *macsec = macsec_priv(dev); 3088 struct net_device *real_dev = macsec->real_dev; 3089 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); 3090 3091 macsec_common_dellink(dev, head); 3092 3093 if (list_empty(&rxd->secys)) { 3094 netdev_rx_handler_unregister(real_dev); 3095 kfree(rxd); 3096 } 3097 } 3098 3099 static int register_macsec_dev(struct net_device *real_dev, 3100 struct net_device *dev) 3101 { 3102 struct macsec_dev *macsec = macsec_priv(dev); 3103 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); 3104 3105 if (!rxd) { 3106 int err; 3107 3108 rxd = kmalloc(sizeof(*rxd), GFP_KERNEL); 3109 if (!rxd) 3110 return -ENOMEM; 3111 3112 INIT_LIST_HEAD(&rxd->secys); 3113 3114 err = netdev_rx_handler_register(real_dev, macsec_handle_frame, 3115 rxd); 3116 if (err < 0) { 3117 kfree(rxd); 3118 return err; 3119 } 3120 } 3121 3122 list_add_tail_rcu(&macsec->secys, &rxd->secys); 3123 return 0; 3124 } 3125 3126 static bool sci_exists(struct net_device *dev, sci_t sci) 3127 { 3128 struct macsec_rxh_data *rxd = macsec_data_rtnl(dev); 3129 struct macsec_dev *macsec; 3130 3131 list_for_each_entry(macsec, &rxd->secys, secys) { 3132 if (macsec->secy.sci == sci) 3133 return true; 3134 } 3135 3136 return false; 3137 } 3138 3139 static sci_t dev_to_sci(struct net_device *dev, __be16 port) 3140 { 3141 return make_sci(dev->dev_addr, port); 3142 } 3143 3144 static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len) 3145 { 3146 struct macsec_dev *macsec = macsec_priv(dev); 3147 struct macsec_secy *secy = &macsec->secy; 3148 3149 macsec->stats = netdev_alloc_pcpu_stats(struct pcpu_secy_stats); 3150 if (!macsec->stats) 3151 return -ENOMEM; 3152 3153 secy->tx_sc.stats = netdev_alloc_pcpu_stats(struct pcpu_tx_sc_stats); 3154 if (!secy->tx_sc.stats) { 3155 free_percpu(macsec->stats); 3156 return -ENOMEM; 3157 } 3158 3159 if (sci == MACSEC_UNDEF_SCI) 3160 sci = dev_to_sci(dev, MACSEC_PORT_ES); 3161 3162 secy->netdev = dev; 3163 secy->operational = true; 3164 secy->key_len = DEFAULT_SAK_LEN; 3165 secy->icv_len = icv_len; 3166 secy->validate_frames = MACSEC_VALIDATE_DEFAULT; 3167 secy->protect_frames = true; 3168 secy->replay_protect = false; 3169 3170 secy->sci = sci; 3171 secy->tx_sc.active = true; 3172 secy->tx_sc.encoding_sa = DEFAULT_ENCODING_SA; 3173 secy->tx_sc.encrypt = DEFAULT_ENCRYPT; 3174 secy->tx_sc.send_sci = DEFAULT_SEND_SCI; 3175 secy->tx_sc.end_station = false; 3176 secy->tx_sc.scb = false; 3177 3178 return 0; 3179 } 3180 3181 static int macsec_newlink(struct net *net, struct net_device *dev, 3182 struct nlattr *tb[], struct nlattr *data[]) 3183 { 3184 struct macsec_dev *macsec = macsec_priv(dev); 3185 struct net_device *real_dev; 3186 int err; 3187 sci_t sci; 3188 u8 icv_len = DEFAULT_ICV_LEN; 3189 rx_handler_func_t *rx_handler; 3190 3191 if (!tb[IFLA_LINK]) 3192 return -EINVAL; 3193 real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK])); 3194 if (!real_dev) 3195 return -ENODEV; 3196 3197 dev->priv_flags |= IFF_MACSEC; 3198 3199 macsec->real_dev = real_dev; 3200 3201 if (data && data[IFLA_MACSEC_ICV_LEN]) 3202 icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]); 3203 dev->mtu = real_dev->mtu - icv_len - macsec_extra_len(true); 3204 3205 rx_handler = rtnl_dereference(real_dev->rx_handler); 3206 if (rx_handler && rx_handler != macsec_handle_frame) 3207 return -EBUSY; 3208 3209 err = register_netdevice(dev); 3210 if (err < 0) 3211 return err; 3212 3213 dev_hold(real_dev); 3214 3215 macsec->nest_level = dev_get_nest_level(real_dev) + 1; 3216 netdev_lockdep_set_classes(dev); 3217 lockdep_set_class_and_subclass(&dev->addr_list_lock, 3218 &macsec_netdev_addr_lock_key, 3219 macsec_get_nest_level(dev)); 3220 3221 err = netdev_upper_dev_link(real_dev, dev); 3222 if (err < 0) 3223 goto unregister; 3224 3225 /* need to be already registered so that ->init has run and 3226 * the MAC addr is set 3227 */ 3228 if (data && data[IFLA_MACSEC_SCI]) 3229 sci = nla_get_sci(data[IFLA_MACSEC_SCI]); 3230 else if (data && data[IFLA_MACSEC_PORT]) 3231 sci = dev_to_sci(dev, nla_get_be16(data[IFLA_MACSEC_PORT])); 3232 else 3233 sci = dev_to_sci(dev, MACSEC_PORT_ES); 3234 3235 if (rx_handler && sci_exists(real_dev, sci)) { 3236 err = -EBUSY; 3237 goto unlink; 3238 } 3239 3240 err = macsec_add_dev(dev, sci, icv_len); 3241 if (err) 3242 goto unlink; 3243 3244 if (data) 3245 macsec_changelink_common(dev, data); 3246 3247 err = register_macsec_dev(real_dev, dev); 3248 if (err < 0) 3249 goto del_dev; 3250 3251 macsec_generation++; 3252 3253 return 0; 3254 3255 del_dev: 3256 macsec_del_dev(macsec); 3257 unlink: 3258 netdev_upper_dev_unlink(real_dev, dev); 3259 unregister: 3260 unregister_netdevice(dev); 3261 return err; 3262 } 3263 3264 static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[]) 3265 { 3266 u64 csid = MACSEC_DEFAULT_CIPHER_ID; 3267 u8 icv_len = DEFAULT_ICV_LEN; 3268 int flag; 3269 bool es, scb, sci; 3270 3271 if (!data) 3272 return 0; 3273 3274 if (data[IFLA_MACSEC_CIPHER_SUITE]) 3275 csid = nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE]); 3276 3277 if (data[IFLA_MACSEC_ICV_LEN]) { 3278 icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]); 3279 if (icv_len != DEFAULT_ICV_LEN) { 3280 char dummy_key[DEFAULT_SAK_LEN] = { 0 }; 3281 struct crypto_aead *dummy_tfm; 3282 3283 dummy_tfm = macsec_alloc_tfm(dummy_key, 3284 DEFAULT_SAK_LEN, 3285 icv_len); 3286 if (IS_ERR(dummy_tfm)) 3287 return PTR_ERR(dummy_tfm); 3288 crypto_free_aead(dummy_tfm); 3289 } 3290 } 3291 3292 switch (csid) { 3293 case MACSEC_DEFAULT_CIPHER_ID: 3294 case MACSEC_DEFAULT_CIPHER_ALT: 3295 if (icv_len < MACSEC_MIN_ICV_LEN || 3296 icv_len > MACSEC_STD_ICV_LEN) 3297 return -EINVAL; 3298 break; 3299 default: 3300 return -EINVAL; 3301 } 3302 3303 if (data[IFLA_MACSEC_ENCODING_SA]) { 3304 if (nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]) >= MACSEC_NUM_AN) 3305 return -EINVAL; 3306 } 3307 3308 for (flag = IFLA_MACSEC_ENCODING_SA + 1; 3309 flag < IFLA_MACSEC_VALIDATION; 3310 flag++) { 3311 if (data[flag]) { 3312 if (nla_get_u8(data[flag]) > 1) 3313 return -EINVAL; 3314 } 3315 } 3316 3317 es = data[IFLA_MACSEC_ES] ? nla_get_u8(data[IFLA_MACSEC_ES]) : false; 3318 sci = data[IFLA_MACSEC_INC_SCI] ? nla_get_u8(data[IFLA_MACSEC_INC_SCI]) : false; 3319 scb = data[IFLA_MACSEC_SCB] ? nla_get_u8(data[IFLA_MACSEC_SCB]) : false; 3320 3321 if ((sci && (scb || es)) || (scb && es)) 3322 return -EINVAL; 3323 3324 if (data[IFLA_MACSEC_VALIDATION] && 3325 nla_get_u8(data[IFLA_MACSEC_VALIDATION]) > MACSEC_VALIDATE_MAX) 3326 return -EINVAL; 3327 3328 if ((data[IFLA_MACSEC_REPLAY_PROTECT] && 3329 nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT])) && 3330 !data[IFLA_MACSEC_WINDOW]) 3331 return -EINVAL; 3332 3333 return 0; 3334 } 3335 3336 static struct net *macsec_get_link_net(const struct net_device *dev) 3337 { 3338 return dev_net(macsec_priv(dev)->real_dev); 3339 } 3340 3341 static size_t macsec_get_size(const struct net_device *dev) 3342 { 3343 return 0 + 3344 nla_total_size_64bit(8) + /* SCI */ 3345 nla_total_size(1) + /* ICV_LEN */ 3346 nla_total_size_64bit(8) + /* CIPHER_SUITE */ 3347 nla_total_size(4) + /* WINDOW */ 3348 nla_total_size(1) + /* ENCODING_SA */ 3349 nla_total_size(1) + /* ENCRYPT */ 3350 nla_total_size(1) + /* PROTECT */ 3351 nla_total_size(1) + /* INC_SCI */ 3352 nla_total_size(1) + /* ES */ 3353 nla_total_size(1) + /* SCB */ 3354 nla_total_size(1) + /* REPLAY_PROTECT */ 3355 nla_total_size(1) + /* VALIDATION */ 3356 0; 3357 } 3358 3359 static int macsec_fill_info(struct sk_buff *skb, 3360 const struct net_device *dev) 3361 { 3362 struct macsec_secy *secy = &macsec_priv(dev)->secy; 3363 struct macsec_tx_sc *tx_sc = &secy->tx_sc; 3364 3365 if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci, 3366 IFLA_MACSEC_PAD) || 3367 nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) || 3368 nla_put_u64_64bit(skb, IFLA_MACSEC_CIPHER_SUITE, 3369 MACSEC_DEFAULT_CIPHER_ID, IFLA_MACSEC_PAD) || 3370 nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) || 3371 nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) || 3372 nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) || 3373 nla_put_u8(skb, IFLA_MACSEC_INC_SCI, tx_sc->send_sci) || 3374 nla_put_u8(skb, IFLA_MACSEC_ES, tx_sc->end_station) || 3375 nla_put_u8(skb, IFLA_MACSEC_SCB, tx_sc->scb) || 3376 nla_put_u8(skb, IFLA_MACSEC_REPLAY_PROTECT, secy->replay_protect) || 3377 nla_put_u8(skb, IFLA_MACSEC_VALIDATION, secy->validate_frames) || 3378 0) 3379 goto nla_put_failure; 3380 3381 if (secy->replay_protect) { 3382 if (nla_put_u32(skb, IFLA_MACSEC_WINDOW, secy->replay_window)) 3383 goto nla_put_failure; 3384 } 3385 3386 return 0; 3387 3388 nla_put_failure: 3389 return -EMSGSIZE; 3390 } 3391 3392 static struct rtnl_link_ops macsec_link_ops __read_mostly = { 3393 .kind = "macsec", 3394 .priv_size = sizeof(struct macsec_dev), 3395 .maxtype = IFLA_MACSEC_MAX, 3396 .policy = macsec_rtnl_policy, 3397 .setup = macsec_setup, 3398 .validate = macsec_validate_attr, 3399 .newlink = macsec_newlink, 3400 .changelink = macsec_changelink, 3401 .dellink = macsec_dellink, 3402 .get_size = macsec_get_size, 3403 .fill_info = macsec_fill_info, 3404 .get_link_net = macsec_get_link_net, 3405 }; 3406 3407 static bool is_macsec_master(struct net_device *dev) 3408 { 3409 return rcu_access_pointer(dev->rx_handler) == macsec_handle_frame; 3410 } 3411 3412 static int macsec_notify(struct notifier_block *this, unsigned long event, 3413 void *ptr) 3414 { 3415 struct net_device *real_dev = netdev_notifier_info_to_dev(ptr); 3416 LIST_HEAD(head); 3417 3418 if (!is_macsec_master(real_dev)) 3419 return NOTIFY_DONE; 3420 3421 switch (event) { 3422 case NETDEV_UNREGISTER: { 3423 struct macsec_dev *m, *n; 3424 struct macsec_rxh_data *rxd; 3425 3426 rxd = macsec_data_rtnl(real_dev); 3427 list_for_each_entry_safe(m, n, &rxd->secys, secys) { 3428 macsec_common_dellink(m->secy.netdev, &head); 3429 } 3430 3431 netdev_rx_handler_unregister(real_dev); 3432 kfree(rxd); 3433 3434 unregister_netdevice_many(&head); 3435 break; 3436 } 3437 case NETDEV_CHANGEMTU: { 3438 struct macsec_dev *m; 3439 struct macsec_rxh_data *rxd; 3440 3441 rxd = macsec_data_rtnl(real_dev); 3442 list_for_each_entry(m, &rxd->secys, secys) { 3443 struct net_device *dev = m->secy.netdev; 3444 unsigned int mtu = real_dev->mtu - (m->secy.icv_len + 3445 macsec_extra_len(true)); 3446 3447 if (dev->mtu > mtu) 3448 dev_set_mtu(dev, mtu); 3449 } 3450 } 3451 } 3452 3453 return NOTIFY_OK; 3454 } 3455 3456 static struct notifier_block macsec_notifier = { 3457 .notifier_call = macsec_notify, 3458 }; 3459 3460 static int __init macsec_init(void) 3461 { 3462 int err; 3463 3464 pr_info("MACsec IEEE 802.1AE\n"); 3465 err = register_netdevice_notifier(&macsec_notifier); 3466 if (err) 3467 return err; 3468 3469 err = rtnl_link_register(&macsec_link_ops); 3470 if (err) 3471 goto notifier; 3472 3473 err = genl_register_family_with_ops(&macsec_fam, macsec_genl_ops); 3474 if (err) 3475 goto rtnl; 3476 3477 return 0; 3478 3479 rtnl: 3480 rtnl_link_unregister(&macsec_link_ops); 3481 notifier: 3482 unregister_netdevice_notifier(&macsec_notifier); 3483 return err; 3484 } 3485 3486 static void __exit macsec_exit(void) 3487 { 3488 genl_unregister_family(&macsec_fam); 3489 rtnl_link_unregister(&macsec_link_ops); 3490 unregister_netdevice_notifier(&macsec_notifier); 3491 rcu_barrier(); 3492 } 3493 3494 module_init(macsec_init); 3495 module_exit(macsec_exit); 3496 3497 MODULE_ALIAS_RTNL_LINK("macsec"); 3498 3499 MODULE_DESCRIPTION("MACsec IEEE 802.1AE"); 3500 MODULE_LICENSE("GPL v2"); 3501