1 /* 2 * drivers/net/macsec.c - MACsec device 3 * 4 * Copyright (c) 2015 Sabrina Dubroca <sd@queasysnail.net> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 */ 11 12 #include <linux/types.h> 13 #include <linux/skbuff.h> 14 #include <linux/socket.h> 15 #include <linux/module.h> 16 #include <crypto/aead.h> 17 #include <linux/etherdevice.h> 18 #include <linux/rtnetlink.h> 19 #include <net/genetlink.h> 20 #include <net/sock.h> 21 #include <net/gro_cells.h> 22 23 #include <uapi/linux/if_macsec.h> 24 25 typedef u64 __bitwise sci_t; 26 27 #define MACSEC_SCI_LEN 8 28 29 /* SecTAG length = macsec_eth_header without the optional SCI */ 30 #define MACSEC_TAG_LEN 6 31 32 struct macsec_eth_header { 33 struct ethhdr eth; 34 /* SecTAG */ 35 u8 tci_an; 36 #if defined(__LITTLE_ENDIAN_BITFIELD) 37 u8 short_length:6, 38 unused:2; 39 #elif defined(__BIG_ENDIAN_BITFIELD) 40 u8 unused:2, 41 short_length:6; 42 #else 43 #error "Please fix <asm/byteorder.h>" 44 #endif 45 __be32 packet_number; 46 u8 secure_channel_id[8]; /* optional */ 47 } __packed; 48 49 #define MACSEC_TCI_VERSION 0x80 50 #define MACSEC_TCI_ES 0x40 /* end station */ 51 #define MACSEC_TCI_SC 0x20 /* SCI present */ 52 #define MACSEC_TCI_SCB 0x10 /* epon */ 53 #define MACSEC_TCI_E 0x08 /* encryption */ 54 #define MACSEC_TCI_C 0x04 /* changed text */ 55 #define MACSEC_AN_MASK 0x03 /* association number */ 56 #define MACSEC_TCI_CONFID (MACSEC_TCI_E | MACSEC_TCI_C) 57 58 /* minimum secure data length deemed "not short", see IEEE 802.1AE-2006 9.7 */ 59 #define MIN_NON_SHORT_LEN 48 60 61 #define GCM_AES_IV_LEN 12 62 #define DEFAULT_ICV_LEN 16 63 64 #define MACSEC_NUM_AN 4 /* 2 bits for the association number */ 65 66 #define for_each_rxsc(secy, sc) \ 67 for (sc = rcu_dereference_bh(secy->rx_sc); \ 68 sc; \ 69 sc = rcu_dereference_bh(sc->next)) 70 #define for_each_rxsc_rtnl(secy, sc) \ 71 for (sc = rtnl_dereference(secy->rx_sc); \ 72 sc; \ 73 sc = rtnl_dereference(sc->next)) 74 75 struct gcm_iv { 76 union { 77 u8 secure_channel_id[8]; 78 sci_t sci; 79 }; 80 __be32 pn; 81 }; 82 83 /** 84 * struct macsec_key - SA key 85 * @id: user-provided key identifier 86 * @tfm: crypto struct, key storage 87 */ 88 struct macsec_key { 89 u8 id[MACSEC_KEYID_LEN]; 90 struct crypto_aead *tfm; 91 }; 92 93 struct macsec_rx_sc_stats { 94 __u64 InOctetsValidated; 95 __u64 InOctetsDecrypted; 96 __u64 InPktsUnchecked; 97 __u64 InPktsDelayed; 98 __u64 InPktsOK; 99 __u64 InPktsInvalid; 100 __u64 InPktsLate; 101 __u64 InPktsNotValid; 102 __u64 InPktsNotUsingSA; 103 __u64 InPktsUnusedSA; 104 }; 105 106 struct macsec_rx_sa_stats { 107 __u32 InPktsOK; 108 __u32 InPktsInvalid; 109 __u32 InPktsNotValid; 110 __u32 InPktsNotUsingSA; 111 __u32 InPktsUnusedSA; 112 }; 113 114 struct macsec_tx_sa_stats { 115 __u32 OutPktsProtected; 116 __u32 OutPktsEncrypted; 117 }; 118 119 struct macsec_tx_sc_stats { 120 __u64 OutPktsProtected; 121 __u64 OutPktsEncrypted; 122 __u64 OutOctetsProtected; 123 __u64 OutOctetsEncrypted; 124 }; 125 126 struct macsec_dev_stats { 127 __u64 OutPktsUntagged; 128 __u64 InPktsUntagged; 129 __u64 OutPktsTooLong; 130 __u64 InPktsNoTag; 131 __u64 InPktsBadTag; 132 __u64 InPktsUnknownSCI; 133 __u64 InPktsNoSCI; 134 __u64 InPktsOverrun; 135 }; 136 137 /** 138 * struct macsec_rx_sa - receive secure association 139 * @active: 140 * @next_pn: packet number expected for the next packet 141 * @lock: protects next_pn manipulations 142 * @key: key structure 143 * @stats: per-SA stats 144 */ 145 struct macsec_rx_sa { 146 struct macsec_key key; 147 spinlock_t lock; 148 u32 next_pn; 149 atomic_t refcnt; 150 bool active; 151 struct macsec_rx_sa_stats __percpu *stats; 152 struct macsec_rx_sc *sc; 153 struct rcu_head rcu; 154 }; 155 156 struct pcpu_rx_sc_stats { 157 struct macsec_rx_sc_stats stats; 158 struct u64_stats_sync syncp; 159 }; 160 161 /** 162 * struct macsec_rx_sc - receive secure channel 163 * @sci: secure channel identifier for this SC 164 * @active: channel is active 165 * @sa: array of secure associations 166 * @stats: per-SC stats 167 */ 168 struct macsec_rx_sc { 169 struct macsec_rx_sc __rcu *next; 170 sci_t sci; 171 bool active; 172 struct macsec_rx_sa __rcu *sa[MACSEC_NUM_AN]; 173 struct pcpu_rx_sc_stats __percpu *stats; 174 atomic_t refcnt; 175 struct rcu_head rcu_head; 176 }; 177 178 /** 179 * struct macsec_tx_sa - transmit secure association 180 * @active: 181 * @next_pn: packet number to use for the next packet 182 * @lock: protects next_pn manipulations 183 * @key: key structure 184 * @stats: per-SA stats 185 */ 186 struct macsec_tx_sa { 187 struct macsec_key key; 188 spinlock_t lock; 189 u32 next_pn; 190 atomic_t refcnt; 191 bool active; 192 struct macsec_tx_sa_stats __percpu *stats; 193 struct rcu_head rcu; 194 }; 195 196 struct pcpu_tx_sc_stats { 197 struct macsec_tx_sc_stats stats; 198 struct u64_stats_sync syncp; 199 }; 200 201 /** 202 * struct macsec_tx_sc - transmit secure channel 203 * @active: 204 * @encoding_sa: association number of the SA currently in use 205 * @encrypt: encrypt packets on transmit, or authenticate only 206 * @send_sci: always include the SCI in the SecTAG 207 * @end_station: 208 * @scb: single copy broadcast flag 209 * @sa: array of secure associations 210 * @stats: stats for this TXSC 211 */ 212 struct macsec_tx_sc { 213 bool active; 214 u8 encoding_sa; 215 bool encrypt; 216 bool send_sci; 217 bool end_station; 218 bool scb; 219 struct macsec_tx_sa __rcu *sa[MACSEC_NUM_AN]; 220 struct pcpu_tx_sc_stats __percpu *stats; 221 }; 222 223 #define MACSEC_VALIDATE_DEFAULT MACSEC_VALIDATE_STRICT 224 225 /** 226 * struct macsec_secy - MACsec Security Entity 227 * @netdev: netdevice for this SecY 228 * @n_rx_sc: number of receive secure channels configured on this SecY 229 * @sci: secure channel identifier used for tx 230 * @key_len: length of keys used by the cipher suite 231 * @icv_len: length of ICV used by the cipher suite 232 * @validate_frames: validation mode 233 * @operational: MAC_Operational flag 234 * @protect_frames: enable protection for this SecY 235 * @replay_protect: enable packet number checks on receive 236 * @replay_window: size of the replay window 237 * @tx_sc: transmit secure channel 238 * @rx_sc: linked list of receive secure channels 239 */ 240 struct macsec_secy { 241 struct net_device *netdev; 242 unsigned int n_rx_sc; 243 sci_t sci; 244 u16 key_len; 245 u16 icv_len; 246 enum macsec_validation_type validate_frames; 247 bool operational; 248 bool protect_frames; 249 bool replay_protect; 250 u32 replay_window; 251 struct macsec_tx_sc tx_sc; 252 struct macsec_rx_sc __rcu *rx_sc; 253 }; 254 255 struct pcpu_secy_stats { 256 struct macsec_dev_stats stats; 257 struct u64_stats_sync syncp; 258 }; 259 260 /** 261 * struct macsec_dev - private data 262 * @secy: SecY config 263 * @real_dev: pointer to underlying netdevice 264 * @stats: MACsec device stats 265 * @secys: linked list of SecY's on the underlying device 266 */ 267 struct macsec_dev { 268 struct macsec_secy secy; 269 struct net_device *real_dev; 270 struct pcpu_secy_stats __percpu *stats; 271 struct list_head secys; 272 struct gro_cells gro_cells; 273 }; 274 275 /** 276 * struct macsec_rxh_data - rx_handler private argument 277 * @secys: linked list of SecY's on this underlying device 278 */ 279 struct macsec_rxh_data { 280 struct list_head secys; 281 }; 282 283 static struct macsec_dev *macsec_priv(const struct net_device *dev) 284 { 285 return (struct macsec_dev *)netdev_priv(dev); 286 } 287 288 static struct macsec_rxh_data *macsec_data_rcu(const struct net_device *dev) 289 { 290 return rcu_dereference_bh(dev->rx_handler_data); 291 } 292 293 static struct macsec_rxh_data *macsec_data_rtnl(const struct net_device *dev) 294 { 295 return rtnl_dereference(dev->rx_handler_data); 296 } 297 298 struct macsec_cb { 299 struct aead_request *req; 300 union { 301 struct macsec_tx_sa *tx_sa; 302 struct macsec_rx_sa *rx_sa; 303 }; 304 u8 assoc_num; 305 bool valid; 306 bool has_sci; 307 }; 308 309 static struct macsec_rx_sa *macsec_rxsa_get(struct macsec_rx_sa __rcu *ptr) 310 { 311 struct macsec_rx_sa *sa = rcu_dereference_bh(ptr); 312 313 if (!sa || !sa->active) 314 return NULL; 315 316 if (!atomic_inc_not_zero(&sa->refcnt)) 317 return NULL; 318 319 return sa; 320 } 321 322 static void free_rx_sc_rcu(struct rcu_head *head) 323 { 324 struct macsec_rx_sc *rx_sc = container_of(head, struct macsec_rx_sc, rcu_head); 325 326 free_percpu(rx_sc->stats); 327 kfree(rx_sc); 328 } 329 330 static struct macsec_rx_sc *macsec_rxsc_get(struct macsec_rx_sc *sc) 331 { 332 return atomic_inc_not_zero(&sc->refcnt) ? sc : NULL; 333 } 334 335 static void macsec_rxsc_put(struct macsec_rx_sc *sc) 336 { 337 if (atomic_dec_and_test(&sc->refcnt)) 338 call_rcu(&sc->rcu_head, free_rx_sc_rcu); 339 } 340 341 static void free_rxsa(struct rcu_head *head) 342 { 343 struct macsec_rx_sa *sa = container_of(head, struct macsec_rx_sa, rcu); 344 345 crypto_free_aead(sa->key.tfm); 346 free_percpu(sa->stats); 347 macsec_rxsc_put(sa->sc); 348 kfree(sa); 349 } 350 351 static void macsec_rxsa_put(struct macsec_rx_sa *sa) 352 { 353 if (atomic_dec_and_test(&sa->refcnt)) 354 call_rcu(&sa->rcu, free_rxsa); 355 } 356 357 static struct macsec_tx_sa *macsec_txsa_get(struct macsec_tx_sa __rcu *ptr) 358 { 359 struct macsec_tx_sa *sa = rcu_dereference_bh(ptr); 360 361 if (!sa || !sa->active) 362 return NULL; 363 364 if (!atomic_inc_not_zero(&sa->refcnt)) 365 return NULL; 366 367 return sa; 368 } 369 370 static void free_txsa(struct rcu_head *head) 371 { 372 struct macsec_tx_sa *sa = container_of(head, struct macsec_tx_sa, rcu); 373 374 crypto_free_aead(sa->key.tfm); 375 free_percpu(sa->stats); 376 kfree(sa); 377 } 378 379 static void macsec_txsa_put(struct macsec_tx_sa *sa) 380 { 381 if (atomic_dec_and_test(&sa->refcnt)) 382 call_rcu(&sa->rcu, free_txsa); 383 } 384 385 static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb) 386 { 387 BUILD_BUG_ON(sizeof(struct macsec_cb) > sizeof(skb->cb)); 388 return (struct macsec_cb *)skb->cb; 389 } 390 391 #define MACSEC_PORT_ES (htons(0x0001)) 392 #define MACSEC_PORT_SCB (0x0000) 393 #define MACSEC_UNDEF_SCI ((__force sci_t)0xffffffffffffffffULL) 394 395 #define DEFAULT_SAK_LEN 16 396 #define DEFAULT_SEND_SCI true 397 #define DEFAULT_ENCRYPT false 398 #define DEFAULT_ENCODING_SA 0 399 400 static sci_t make_sci(u8 *addr, __be16 port) 401 { 402 sci_t sci; 403 404 memcpy(&sci, addr, ETH_ALEN); 405 memcpy(((char *)&sci) + ETH_ALEN, &port, sizeof(port)); 406 407 return sci; 408 } 409 410 static sci_t macsec_frame_sci(struct macsec_eth_header *hdr, bool sci_present) 411 { 412 sci_t sci; 413 414 if (sci_present) 415 memcpy(&sci, hdr->secure_channel_id, 416 sizeof(hdr->secure_channel_id)); 417 else 418 sci = make_sci(hdr->eth.h_source, MACSEC_PORT_ES); 419 420 return sci; 421 } 422 423 static unsigned int macsec_sectag_len(bool sci_present) 424 { 425 return MACSEC_TAG_LEN + (sci_present ? MACSEC_SCI_LEN : 0); 426 } 427 428 static unsigned int macsec_hdr_len(bool sci_present) 429 { 430 return macsec_sectag_len(sci_present) + ETH_HLEN; 431 } 432 433 static unsigned int macsec_extra_len(bool sci_present) 434 { 435 return macsec_sectag_len(sci_present) + sizeof(__be16); 436 } 437 438 /* Fill SecTAG according to IEEE 802.1AE-2006 10.5.3 */ 439 static void macsec_fill_sectag(struct macsec_eth_header *h, 440 const struct macsec_secy *secy, u32 pn) 441 { 442 const struct macsec_tx_sc *tx_sc = &secy->tx_sc; 443 444 memset(&h->tci_an, 0, macsec_sectag_len(tx_sc->send_sci)); 445 h->eth.h_proto = htons(ETH_P_MACSEC); 446 447 if (tx_sc->send_sci || 448 (secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb)) { 449 h->tci_an |= MACSEC_TCI_SC; 450 memcpy(&h->secure_channel_id, &secy->sci, 451 sizeof(h->secure_channel_id)); 452 } else { 453 if (tx_sc->end_station) 454 h->tci_an |= MACSEC_TCI_ES; 455 if (tx_sc->scb) 456 h->tci_an |= MACSEC_TCI_SCB; 457 } 458 459 h->packet_number = htonl(pn); 460 461 /* with GCM, C/E clear for !encrypt, both set for encrypt */ 462 if (tx_sc->encrypt) 463 h->tci_an |= MACSEC_TCI_CONFID; 464 else if (secy->icv_len != DEFAULT_ICV_LEN) 465 h->tci_an |= MACSEC_TCI_C; 466 467 h->tci_an |= tx_sc->encoding_sa; 468 } 469 470 static void macsec_set_shortlen(struct macsec_eth_header *h, size_t data_len) 471 { 472 if (data_len < MIN_NON_SHORT_LEN) 473 h->short_length = data_len; 474 } 475 476 /* validate MACsec packet according to IEEE 802.1AE-2006 9.12 */ 477 static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len) 478 { 479 struct macsec_eth_header *h = (struct macsec_eth_header *)skb->data; 480 int len = skb->len - 2 * ETH_ALEN; 481 int extra_len = macsec_extra_len(!!(h->tci_an & MACSEC_TCI_SC)) + icv_len; 482 483 /* a) It comprises at least 17 octets */ 484 if (skb->len <= 16) 485 return false; 486 487 /* b) MACsec EtherType: already checked */ 488 489 /* c) V bit is clear */ 490 if (h->tci_an & MACSEC_TCI_VERSION) 491 return false; 492 493 /* d) ES or SCB => !SC */ 494 if ((h->tci_an & MACSEC_TCI_ES || h->tci_an & MACSEC_TCI_SCB) && 495 (h->tci_an & MACSEC_TCI_SC)) 496 return false; 497 498 /* e) Bits 7 and 8 of octet 4 of the SecTAG are clear */ 499 if (h->unused) 500 return false; 501 502 /* rx.pn != 0 (figure 10-5) */ 503 if (!h->packet_number) 504 return false; 505 506 /* length check, f) g) h) i) */ 507 if (h->short_length) 508 return len == extra_len + h->short_length; 509 return len >= extra_len + MIN_NON_SHORT_LEN; 510 } 511 512 #define MACSEC_NEEDED_HEADROOM (macsec_extra_len(true)) 513 #define MACSEC_NEEDED_TAILROOM MACSEC_STD_ICV_LEN 514 515 static void macsec_fill_iv(unsigned char *iv, sci_t sci, u32 pn) 516 { 517 struct gcm_iv *gcm_iv = (struct gcm_iv *)iv; 518 519 gcm_iv->sci = sci; 520 gcm_iv->pn = htonl(pn); 521 } 522 523 static struct macsec_eth_header *macsec_ethhdr(struct sk_buff *skb) 524 { 525 return (struct macsec_eth_header *)skb_mac_header(skb); 526 } 527 528 static u32 tx_sa_update_pn(struct macsec_tx_sa *tx_sa, struct macsec_secy *secy) 529 { 530 u32 pn; 531 532 spin_lock_bh(&tx_sa->lock); 533 pn = tx_sa->next_pn; 534 535 tx_sa->next_pn++; 536 if (tx_sa->next_pn == 0) { 537 pr_debug("PN wrapped, transitioning to !oper\n"); 538 tx_sa->active = false; 539 if (secy->protect_frames) 540 secy->operational = false; 541 } 542 spin_unlock_bh(&tx_sa->lock); 543 544 return pn; 545 } 546 547 static void macsec_encrypt_finish(struct sk_buff *skb, struct net_device *dev) 548 { 549 struct macsec_dev *macsec = netdev_priv(dev); 550 551 skb->dev = macsec->real_dev; 552 skb_reset_mac_header(skb); 553 skb->protocol = eth_hdr(skb)->h_proto; 554 } 555 556 static void macsec_count_tx(struct sk_buff *skb, struct macsec_tx_sc *tx_sc, 557 struct macsec_tx_sa *tx_sa) 558 { 559 struct pcpu_tx_sc_stats *txsc_stats = this_cpu_ptr(tx_sc->stats); 560 561 u64_stats_update_begin(&txsc_stats->syncp); 562 if (tx_sc->encrypt) { 563 txsc_stats->stats.OutOctetsEncrypted += skb->len; 564 txsc_stats->stats.OutPktsEncrypted++; 565 this_cpu_inc(tx_sa->stats->OutPktsEncrypted); 566 } else { 567 txsc_stats->stats.OutOctetsProtected += skb->len; 568 txsc_stats->stats.OutPktsProtected++; 569 this_cpu_inc(tx_sa->stats->OutPktsProtected); 570 } 571 u64_stats_update_end(&txsc_stats->syncp); 572 } 573 574 static void count_tx(struct net_device *dev, int ret, int len) 575 { 576 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { 577 struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats); 578 579 u64_stats_update_begin(&stats->syncp); 580 stats->tx_packets++; 581 stats->tx_bytes += len; 582 u64_stats_update_end(&stats->syncp); 583 } else { 584 dev->stats.tx_dropped++; 585 } 586 } 587 588 static void macsec_encrypt_done(struct crypto_async_request *base, int err) 589 { 590 struct sk_buff *skb = base->data; 591 struct net_device *dev = skb->dev; 592 struct macsec_dev *macsec = macsec_priv(dev); 593 struct macsec_tx_sa *sa = macsec_skb_cb(skb)->tx_sa; 594 int len, ret; 595 596 aead_request_free(macsec_skb_cb(skb)->req); 597 598 rcu_read_lock_bh(); 599 macsec_encrypt_finish(skb, dev); 600 macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa); 601 len = skb->len; 602 ret = dev_queue_xmit(skb); 603 count_tx(dev, ret, len); 604 rcu_read_unlock_bh(); 605 606 macsec_txsa_put(sa); 607 dev_put(dev); 608 } 609 610 static struct aead_request *macsec_alloc_req(struct crypto_aead *tfm, 611 unsigned char **iv, 612 struct scatterlist **sg) 613 { 614 size_t size, iv_offset, sg_offset; 615 struct aead_request *req; 616 void *tmp; 617 618 size = sizeof(struct aead_request) + crypto_aead_reqsize(tfm); 619 iv_offset = size; 620 size += GCM_AES_IV_LEN; 621 622 size = ALIGN(size, __alignof__(struct scatterlist)); 623 sg_offset = size; 624 size += sizeof(struct scatterlist) * (MAX_SKB_FRAGS + 1); 625 626 tmp = kmalloc(size, GFP_ATOMIC); 627 if (!tmp) 628 return NULL; 629 630 *iv = (unsigned char *)(tmp + iv_offset); 631 *sg = (struct scatterlist *)(tmp + sg_offset); 632 req = tmp; 633 634 aead_request_set_tfm(req, tfm); 635 636 return req; 637 } 638 639 static struct sk_buff *macsec_encrypt(struct sk_buff *skb, 640 struct net_device *dev) 641 { 642 int ret; 643 struct scatterlist *sg; 644 unsigned char *iv; 645 struct ethhdr *eth; 646 struct macsec_eth_header *hh; 647 size_t unprotected_len; 648 struct aead_request *req; 649 struct macsec_secy *secy; 650 struct macsec_tx_sc *tx_sc; 651 struct macsec_tx_sa *tx_sa; 652 struct macsec_dev *macsec = macsec_priv(dev); 653 u32 pn; 654 655 secy = &macsec->secy; 656 tx_sc = &secy->tx_sc; 657 658 /* 10.5.1 TX SA assignment */ 659 tx_sa = macsec_txsa_get(tx_sc->sa[tx_sc->encoding_sa]); 660 if (!tx_sa) { 661 secy->operational = false; 662 kfree_skb(skb); 663 return ERR_PTR(-EINVAL); 664 } 665 666 if (unlikely(skb_headroom(skb) < MACSEC_NEEDED_HEADROOM || 667 skb_tailroom(skb) < MACSEC_NEEDED_TAILROOM)) { 668 struct sk_buff *nskb = skb_copy_expand(skb, 669 MACSEC_NEEDED_HEADROOM, 670 MACSEC_NEEDED_TAILROOM, 671 GFP_ATOMIC); 672 if (likely(nskb)) { 673 consume_skb(skb); 674 skb = nskb; 675 } else { 676 macsec_txsa_put(tx_sa); 677 kfree_skb(skb); 678 return ERR_PTR(-ENOMEM); 679 } 680 } else { 681 skb = skb_unshare(skb, GFP_ATOMIC); 682 if (!skb) { 683 macsec_txsa_put(tx_sa); 684 return ERR_PTR(-ENOMEM); 685 } 686 } 687 688 unprotected_len = skb->len; 689 eth = eth_hdr(skb); 690 hh = (struct macsec_eth_header *)skb_push(skb, macsec_extra_len(tx_sc->send_sci)); 691 memmove(hh, eth, 2 * ETH_ALEN); 692 693 pn = tx_sa_update_pn(tx_sa, secy); 694 if (pn == 0) { 695 macsec_txsa_put(tx_sa); 696 kfree_skb(skb); 697 return ERR_PTR(-ENOLINK); 698 } 699 macsec_fill_sectag(hh, secy, pn); 700 macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN); 701 702 skb_put(skb, secy->icv_len); 703 704 if (skb->len - ETH_HLEN > macsec_priv(dev)->real_dev->mtu) { 705 struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats); 706 707 u64_stats_update_begin(&secy_stats->syncp); 708 secy_stats->stats.OutPktsTooLong++; 709 u64_stats_update_end(&secy_stats->syncp); 710 711 macsec_txsa_put(tx_sa); 712 kfree_skb(skb); 713 return ERR_PTR(-EINVAL); 714 } 715 716 req = macsec_alloc_req(tx_sa->key.tfm, &iv, &sg); 717 if (!req) { 718 macsec_txsa_put(tx_sa); 719 kfree_skb(skb); 720 return ERR_PTR(-ENOMEM); 721 } 722 723 macsec_fill_iv(iv, secy->sci, pn); 724 725 sg_init_table(sg, MAX_SKB_FRAGS + 1); 726 skb_to_sgvec(skb, sg, 0, skb->len); 727 728 if (tx_sc->encrypt) { 729 int len = skb->len - macsec_hdr_len(tx_sc->send_sci) - 730 secy->icv_len; 731 aead_request_set_crypt(req, sg, sg, len, iv); 732 aead_request_set_ad(req, macsec_hdr_len(tx_sc->send_sci)); 733 } else { 734 aead_request_set_crypt(req, sg, sg, 0, iv); 735 aead_request_set_ad(req, skb->len - secy->icv_len); 736 } 737 738 macsec_skb_cb(skb)->req = req; 739 macsec_skb_cb(skb)->tx_sa = tx_sa; 740 aead_request_set_callback(req, 0, macsec_encrypt_done, skb); 741 742 dev_hold(skb->dev); 743 ret = crypto_aead_encrypt(req); 744 if (ret == -EINPROGRESS) { 745 return ERR_PTR(ret); 746 } else if (ret != 0) { 747 dev_put(skb->dev); 748 kfree_skb(skb); 749 aead_request_free(req); 750 macsec_txsa_put(tx_sa); 751 return ERR_PTR(-EINVAL); 752 } 753 754 dev_put(skb->dev); 755 aead_request_free(req); 756 macsec_txsa_put(tx_sa); 757 758 return skb; 759 } 760 761 static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u32 pn) 762 { 763 struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa; 764 struct pcpu_rx_sc_stats *rxsc_stats = this_cpu_ptr(rx_sa->sc->stats); 765 struct macsec_eth_header *hdr = macsec_ethhdr(skb); 766 u32 lowest_pn = 0; 767 768 spin_lock(&rx_sa->lock); 769 if (rx_sa->next_pn >= secy->replay_window) 770 lowest_pn = rx_sa->next_pn - secy->replay_window; 771 772 /* Now perform replay protection check again 773 * (see IEEE 802.1AE-2006 figure 10-5) 774 */ 775 if (secy->replay_protect && pn < lowest_pn) { 776 spin_unlock(&rx_sa->lock); 777 u64_stats_update_begin(&rxsc_stats->syncp); 778 rxsc_stats->stats.InPktsLate++; 779 u64_stats_update_end(&rxsc_stats->syncp); 780 return false; 781 } 782 783 if (secy->validate_frames != MACSEC_VALIDATE_DISABLED) { 784 u64_stats_update_begin(&rxsc_stats->syncp); 785 if (hdr->tci_an & MACSEC_TCI_E) 786 rxsc_stats->stats.InOctetsDecrypted += skb->len; 787 else 788 rxsc_stats->stats.InOctetsValidated += skb->len; 789 u64_stats_update_end(&rxsc_stats->syncp); 790 } 791 792 if (!macsec_skb_cb(skb)->valid) { 793 spin_unlock(&rx_sa->lock); 794 795 /* 10.6.5 */ 796 if (hdr->tci_an & MACSEC_TCI_C || 797 secy->validate_frames == MACSEC_VALIDATE_STRICT) { 798 u64_stats_update_begin(&rxsc_stats->syncp); 799 rxsc_stats->stats.InPktsNotValid++; 800 u64_stats_update_end(&rxsc_stats->syncp); 801 return false; 802 } 803 804 u64_stats_update_begin(&rxsc_stats->syncp); 805 if (secy->validate_frames == MACSEC_VALIDATE_CHECK) { 806 rxsc_stats->stats.InPktsInvalid++; 807 this_cpu_inc(rx_sa->stats->InPktsInvalid); 808 } else if (pn < lowest_pn) { 809 rxsc_stats->stats.InPktsDelayed++; 810 } else { 811 rxsc_stats->stats.InPktsUnchecked++; 812 } 813 u64_stats_update_end(&rxsc_stats->syncp); 814 } else { 815 u64_stats_update_begin(&rxsc_stats->syncp); 816 if (pn < lowest_pn) { 817 rxsc_stats->stats.InPktsDelayed++; 818 } else { 819 rxsc_stats->stats.InPktsOK++; 820 this_cpu_inc(rx_sa->stats->InPktsOK); 821 } 822 u64_stats_update_end(&rxsc_stats->syncp); 823 824 if (pn >= rx_sa->next_pn) 825 rx_sa->next_pn = pn + 1; 826 spin_unlock(&rx_sa->lock); 827 } 828 829 return true; 830 } 831 832 static void macsec_reset_skb(struct sk_buff *skb, struct net_device *dev) 833 { 834 skb->pkt_type = PACKET_HOST; 835 skb->protocol = eth_type_trans(skb, dev); 836 837 skb_reset_network_header(skb); 838 if (!skb_transport_header_was_set(skb)) 839 skb_reset_transport_header(skb); 840 skb_reset_mac_len(skb); 841 } 842 843 static void macsec_finalize_skb(struct sk_buff *skb, u8 icv_len, u8 hdr_len) 844 { 845 memmove(skb->data + hdr_len, skb->data, 2 * ETH_ALEN); 846 skb_pull(skb, hdr_len); 847 pskb_trim_unique(skb, skb->len - icv_len); 848 } 849 850 static void count_rx(struct net_device *dev, int len) 851 { 852 struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats); 853 854 u64_stats_update_begin(&stats->syncp); 855 stats->rx_packets++; 856 stats->rx_bytes += len; 857 u64_stats_update_end(&stats->syncp); 858 } 859 860 static void macsec_decrypt_done(struct crypto_async_request *base, int err) 861 { 862 struct sk_buff *skb = base->data; 863 struct net_device *dev = skb->dev; 864 struct macsec_dev *macsec = macsec_priv(dev); 865 struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa; 866 int len, ret; 867 u32 pn; 868 869 aead_request_free(macsec_skb_cb(skb)->req); 870 871 rcu_read_lock_bh(); 872 pn = ntohl(macsec_ethhdr(skb)->packet_number); 873 if (!macsec_post_decrypt(skb, &macsec->secy, pn)) { 874 rcu_read_unlock_bh(); 875 kfree_skb(skb); 876 goto out; 877 } 878 879 macsec_finalize_skb(skb, macsec->secy.icv_len, 880 macsec_extra_len(macsec_skb_cb(skb)->has_sci)); 881 macsec_reset_skb(skb, macsec->secy.netdev); 882 883 len = skb->len; 884 ret = gro_cells_receive(&macsec->gro_cells, skb); 885 if (ret == NET_RX_SUCCESS) 886 count_rx(dev, len); 887 else 888 macsec->secy.netdev->stats.rx_dropped++; 889 890 rcu_read_unlock_bh(); 891 892 out: 893 macsec_rxsa_put(rx_sa); 894 dev_put(dev); 895 } 896 897 static struct sk_buff *macsec_decrypt(struct sk_buff *skb, 898 struct net_device *dev, 899 struct macsec_rx_sa *rx_sa, 900 sci_t sci, 901 struct macsec_secy *secy) 902 { 903 int ret; 904 struct scatterlist *sg; 905 unsigned char *iv; 906 struct aead_request *req; 907 struct macsec_eth_header *hdr; 908 u16 icv_len = secy->icv_len; 909 910 macsec_skb_cb(skb)->valid = false; 911 skb = skb_share_check(skb, GFP_ATOMIC); 912 if (!skb) 913 return ERR_PTR(-ENOMEM); 914 915 req = macsec_alloc_req(rx_sa->key.tfm, &iv, &sg); 916 if (!req) { 917 kfree_skb(skb); 918 return ERR_PTR(-ENOMEM); 919 } 920 921 hdr = (struct macsec_eth_header *)skb->data; 922 macsec_fill_iv(iv, sci, ntohl(hdr->packet_number)); 923 924 sg_init_table(sg, MAX_SKB_FRAGS + 1); 925 skb_to_sgvec(skb, sg, 0, skb->len); 926 927 if (hdr->tci_an & MACSEC_TCI_E) { 928 /* confidentiality: ethernet + macsec header 929 * authenticated, encrypted payload 930 */ 931 int len = skb->len - macsec_hdr_len(macsec_skb_cb(skb)->has_sci); 932 933 aead_request_set_crypt(req, sg, sg, len, iv); 934 aead_request_set_ad(req, macsec_hdr_len(macsec_skb_cb(skb)->has_sci)); 935 skb = skb_unshare(skb, GFP_ATOMIC); 936 if (!skb) { 937 aead_request_free(req); 938 return ERR_PTR(-ENOMEM); 939 } 940 } else { 941 /* integrity only: all headers + data authenticated */ 942 aead_request_set_crypt(req, sg, sg, icv_len, iv); 943 aead_request_set_ad(req, skb->len - icv_len); 944 } 945 946 macsec_skb_cb(skb)->req = req; 947 skb->dev = dev; 948 aead_request_set_callback(req, 0, macsec_decrypt_done, skb); 949 950 dev_hold(dev); 951 ret = crypto_aead_decrypt(req); 952 if (ret == -EINPROGRESS) { 953 return ERR_PTR(ret); 954 } else if (ret != 0) { 955 /* decryption/authentication failed 956 * 10.6 if validateFrames is disabled, deliver anyway 957 */ 958 if (ret != -EBADMSG) { 959 kfree_skb(skb); 960 skb = ERR_PTR(ret); 961 } 962 } else { 963 macsec_skb_cb(skb)->valid = true; 964 } 965 dev_put(dev); 966 967 aead_request_free(req); 968 969 return skb; 970 } 971 972 static struct macsec_rx_sc *find_rx_sc(struct macsec_secy *secy, sci_t sci) 973 { 974 struct macsec_rx_sc *rx_sc; 975 976 for_each_rxsc(secy, rx_sc) { 977 if (rx_sc->sci == sci) 978 return rx_sc; 979 } 980 981 return NULL; 982 } 983 984 static struct macsec_rx_sc *find_rx_sc_rtnl(struct macsec_secy *secy, sci_t sci) 985 { 986 struct macsec_rx_sc *rx_sc; 987 988 for_each_rxsc_rtnl(secy, rx_sc) { 989 if (rx_sc->sci == sci) 990 return rx_sc; 991 } 992 993 return NULL; 994 } 995 996 static void handle_not_macsec(struct sk_buff *skb) 997 { 998 struct macsec_rxh_data *rxd; 999 struct macsec_dev *macsec; 1000 1001 rcu_read_lock(); 1002 rxd = macsec_data_rcu(skb->dev); 1003 1004 /* 10.6 If the management control validateFrames is not 1005 * Strict, frames without a SecTAG are received, counted, and 1006 * delivered to the Controlled Port 1007 */ 1008 list_for_each_entry_rcu(macsec, &rxd->secys, secys) { 1009 struct sk_buff *nskb; 1010 int ret; 1011 struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats); 1012 1013 if (macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) { 1014 u64_stats_update_begin(&secy_stats->syncp); 1015 secy_stats->stats.InPktsNoTag++; 1016 u64_stats_update_end(&secy_stats->syncp); 1017 continue; 1018 } 1019 1020 /* deliver on this port */ 1021 nskb = skb_clone(skb, GFP_ATOMIC); 1022 if (!nskb) 1023 break; 1024 1025 nskb->dev = macsec->secy.netdev; 1026 1027 ret = netif_rx(nskb); 1028 if (ret == NET_RX_SUCCESS) { 1029 u64_stats_update_begin(&secy_stats->syncp); 1030 secy_stats->stats.InPktsUntagged++; 1031 u64_stats_update_end(&secy_stats->syncp); 1032 } else { 1033 macsec->secy.netdev->stats.rx_dropped++; 1034 } 1035 } 1036 1037 rcu_read_unlock(); 1038 } 1039 1040 static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) 1041 { 1042 struct sk_buff *skb = *pskb; 1043 struct net_device *dev = skb->dev; 1044 struct macsec_eth_header *hdr; 1045 struct macsec_secy *secy = NULL; 1046 struct macsec_rx_sc *rx_sc; 1047 struct macsec_rx_sa *rx_sa; 1048 struct macsec_rxh_data *rxd; 1049 struct macsec_dev *macsec; 1050 sci_t sci; 1051 u32 pn; 1052 bool cbit; 1053 struct pcpu_rx_sc_stats *rxsc_stats; 1054 struct pcpu_secy_stats *secy_stats; 1055 bool pulled_sci; 1056 int ret; 1057 1058 if (skb_headroom(skb) < ETH_HLEN) 1059 goto drop_direct; 1060 1061 hdr = macsec_ethhdr(skb); 1062 if (hdr->eth.h_proto != htons(ETH_P_MACSEC)) { 1063 handle_not_macsec(skb); 1064 1065 /* and deliver to the uncontrolled port */ 1066 return RX_HANDLER_PASS; 1067 } 1068 1069 skb = skb_unshare(skb, GFP_ATOMIC); 1070 if (!skb) { 1071 *pskb = NULL; 1072 return RX_HANDLER_CONSUMED; 1073 } 1074 1075 pulled_sci = pskb_may_pull(skb, macsec_extra_len(true)); 1076 if (!pulled_sci) { 1077 if (!pskb_may_pull(skb, macsec_extra_len(false))) 1078 goto drop_direct; 1079 } 1080 1081 hdr = macsec_ethhdr(skb); 1082 1083 /* Frames with a SecTAG that has the TCI E bit set but the C 1084 * bit clear are discarded, as this reserved encoding is used 1085 * to identify frames with a SecTAG that are not to be 1086 * delivered to the Controlled Port. 1087 */ 1088 if ((hdr->tci_an & (MACSEC_TCI_C | MACSEC_TCI_E)) == MACSEC_TCI_E) 1089 return RX_HANDLER_PASS; 1090 1091 /* now, pull the extra length */ 1092 if (hdr->tci_an & MACSEC_TCI_SC) { 1093 if (!pulled_sci) 1094 goto drop_direct; 1095 } 1096 1097 /* ethernet header is part of crypto processing */ 1098 skb_push(skb, ETH_HLEN); 1099 1100 macsec_skb_cb(skb)->has_sci = !!(hdr->tci_an & MACSEC_TCI_SC); 1101 macsec_skb_cb(skb)->assoc_num = hdr->tci_an & MACSEC_AN_MASK; 1102 sci = macsec_frame_sci(hdr, macsec_skb_cb(skb)->has_sci); 1103 1104 rcu_read_lock(); 1105 rxd = macsec_data_rcu(skb->dev); 1106 1107 list_for_each_entry_rcu(macsec, &rxd->secys, secys) { 1108 struct macsec_rx_sc *sc = find_rx_sc(&macsec->secy, sci); 1109 1110 if (sc) { 1111 secy = &macsec->secy; 1112 rx_sc = sc; 1113 break; 1114 } 1115 } 1116 1117 if (!secy) 1118 goto nosci; 1119 1120 dev = secy->netdev; 1121 macsec = macsec_priv(dev); 1122 secy_stats = this_cpu_ptr(macsec->stats); 1123 rxsc_stats = this_cpu_ptr(rx_sc->stats); 1124 1125 if (!macsec_validate_skb(skb, secy->icv_len)) { 1126 u64_stats_update_begin(&secy_stats->syncp); 1127 secy_stats->stats.InPktsBadTag++; 1128 u64_stats_update_end(&secy_stats->syncp); 1129 goto drop_nosa; 1130 } 1131 1132 rx_sa = macsec_rxsa_get(rx_sc->sa[macsec_skb_cb(skb)->assoc_num]); 1133 if (!rx_sa) { 1134 /* 10.6.1 if the SA is not in use */ 1135 1136 /* If validateFrames is Strict or the C bit in the 1137 * SecTAG is set, discard 1138 */ 1139 if (hdr->tci_an & MACSEC_TCI_C || 1140 secy->validate_frames == MACSEC_VALIDATE_STRICT) { 1141 u64_stats_update_begin(&rxsc_stats->syncp); 1142 rxsc_stats->stats.InPktsNotUsingSA++; 1143 u64_stats_update_end(&rxsc_stats->syncp); 1144 goto drop_nosa; 1145 } 1146 1147 /* not Strict, the frame (with the SecTAG and ICV 1148 * removed) is delivered to the Controlled Port. 1149 */ 1150 u64_stats_update_begin(&rxsc_stats->syncp); 1151 rxsc_stats->stats.InPktsUnusedSA++; 1152 u64_stats_update_end(&rxsc_stats->syncp); 1153 goto deliver; 1154 } 1155 1156 /* First, PN check to avoid decrypting obviously wrong packets */ 1157 pn = ntohl(hdr->packet_number); 1158 if (secy->replay_protect) { 1159 bool late; 1160 1161 spin_lock(&rx_sa->lock); 1162 late = rx_sa->next_pn >= secy->replay_window && 1163 pn < (rx_sa->next_pn - secy->replay_window); 1164 spin_unlock(&rx_sa->lock); 1165 1166 if (late) { 1167 u64_stats_update_begin(&rxsc_stats->syncp); 1168 rxsc_stats->stats.InPktsLate++; 1169 u64_stats_update_end(&rxsc_stats->syncp); 1170 goto drop; 1171 } 1172 } 1173 1174 macsec_skb_cb(skb)->rx_sa = rx_sa; 1175 1176 /* Disabled && !changed text => skip validation */ 1177 if (hdr->tci_an & MACSEC_TCI_C || 1178 secy->validate_frames != MACSEC_VALIDATE_DISABLED) 1179 skb = macsec_decrypt(skb, dev, rx_sa, sci, secy); 1180 1181 if (IS_ERR(skb)) { 1182 /* the decrypt callback needs the reference */ 1183 if (PTR_ERR(skb) != -EINPROGRESS) 1184 macsec_rxsa_put(rx_sa); 1185 rcu_read_unlock(); 1186 *pskb = NULL; 1187 return RX_HANDLER_CONSUMED; 1188 } 1189 1190 if (!macsec_post_decrypt(skb, secy, pn)) 1191 goto drop; 1192 1193 deliver: 1194 macsec_finalize_skb(skb, secy->icv_len, 1195 macsec_extra_len(macsec_skb_cb(skb)->has_sci)); 1196 macsec_reset_skb(skb, secy->netdev); 1197 1198 if (rx_sa) 1199 macsec_rxsa_put(rx_sa); 1200 1201 ret = gro_cells_receive(&macsec->gro_cells, skb); 1202 if (ret == NET_RX_SUCCESS) 1203 count_rx(dev, skb->len); 1204 else 1205 macsec->secy.netdev->stats.rx_dropped++; 1206 1207 rcu_read_unlock(); 1208 1209 *pskb = NULL; 1210 return RX_HANDLER_CONSUMED; 1211 1212 drop: 1213 macsec_rxsa_put(rx_sa); 1214 drop_nosa: 1215 rcu_read_unlock(); 1216 drop_direct: 1217 kfree_skb(skb); 1218 *pskb = NULL; 1219 return RX_HANDLER_CONSUMED; 1220 1221 nosci: 1222 /* 10.6.1 if the SC is not found */ 1223 cbit = !!(hdr->tci_an & MACSEC_TCI_C); 1224 if (!cbit) 1225 macsec_finalize_skb(skb, DEFAULT_ICV_LEN, 1226 macsec_extra_len(macsec_skb_cb(skb)->has_sci)); 1227 1228 list_for_each_entry_rcu(macsec, &rxd->secys, secys) { 1229 struct sk_buff *nskb; 1230 1231 secy_stats = this_cpu_ptr(macsec->stats); 1232 1233 /* If validateFrames is Strict or the C bit in the 1234 * SecTAG is set, discard 1235 */ 1236 if (cbit || 1237 macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) { 1238 u64_stats_update_begin(&secy_stats->syncp); 1239 secy_stats->stats.InPktsNoSCI++; 1240 u64_stats_update_end(&secy_stats->syncp); 1241 continue; 1242 } 1243 1244 /* not strict, the frame (with the SecTAG and ICV 1245 * removed) is delivered to the Controlled Port. 1246 */ 1247 nskb = skb_clone(skb, GFP_ATOMIC); 1248 if (!nskb) 1249 break; 1250 1251 macsec_reset_skb(nskb, macsec->secy.netdev); 1252 1253 ret = netif_rx(nskb); 1254 if (ret == NET_RX_SUCCESS) { 1255 u64_stats_update_begin(&secy_stats->syncp); 1256 secy_stats->stats.InPktsUnknownSCI++; 1257 u64_stats_update_end(&secy_stats->syncp); 1258 } else { 1259 macsec->secy.netdev->stats.rx_dropped++; 1260 } 1261 } 1262 1263 rcu_read_unlock(); 1264 *pskb = skb; 1265 return RX_HANDLER_PASS; 1266 } 1267 1268 static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len) 1269 { 1270 struct crypto_aead *tfm; 1271 int ret; 1272 1273 tfm = crypto_alloc_aead("gcm(aes)", 0, 0); 1274 1275 if (IS_ERR(tfm)) 1276 return tfm; 1277 1278 ret = crypto_aead_setkey(tfm, key, key_len); 1279 if (ret < 0) 1280 goto fail; 1281 1282 ret = crypto_aead_setauthsize(tfm, icv_len); 1283 if (ret < 0) 1284 goto fail; 1285 1286 return tfm; 1287 fail: 1288 crypto_free_aead(tfm); 1289 return ERR_PTR(ret); 1290 } 1291 1292 static int init_rx_sa(struct macsec_rx_sa *rx_sa, char *sak, int key_len, 1293 int icv_len) 1294 { 1295 rx_sa->stats = alloc_percpu(struct macsec_rx_sa_stats); 1296 if (!rx_sa->stats) 1297 return -ENOMEM; 1298 1299 rx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len); 1300 if (IS_ERR(rx_sa->key.tfm)) { 1301 free_percpu(rx_sa->stats); 1302 return PTR_ERR(rx_sa->key.tfm); 1303 } 1304 1305 rx_sa->active = false; 1306 rx_sa->next_pn = 1; 1307 atomic_set(&rx_sa->refcnt, 1); 1308 spin_lock_init(&rx_sa->lock); 1309 1310 return 0; 1311 } 1312 1313 static void clear_rx_sa(struct macsec_rx_sa *rx_sa) 1314 { 1315 rx_sa->active = false; 1316 1317 macsec_rxsa_put(rx_sa); 1318 } 1319 1320 static void free_rx_sc(struct macsec_rx_sc *rx_sc) 1321 { 1322 int i; 1323 1324 for (i = 0; i < MACSEC_NUM_AN; i++) { 1325 struct macsec_rx_sa *sa = rtnl_dereference(rx_sc->sa[i]); 1326 1327 RCU_INIT_POINTER(rx_sc->sa[i], NULL); 1328 if (sa) 1329 clear_rx_sa(sa); 1330 } 1331 1332 macsec_rxsc_put(rx_sc); 1333 } 1334 1335 static struct macsec_rx_sc *del_rx_sc(struct macsec_secy *secy, sci_t sci) 1336 { 1337 struct macsec_rx_sc *rx_sc, __rcu **rx_scp; 1338 1339 for (rx_scp = &secy->rx_sc, rx_sc = rtnl_dereference(*rx_scp); 1340 rx_sc; 1341 rx_scp = &rx_sc->next, rx_sc = rtnl_dereference(*rx_scp)) { 1342 if (rx_sc->sci == sci) { 1343 if (rx_sc->active) 1344 secy->n_rx_sc--; 1345 rcu_assign_pointer(*rx_scp, rx_sc->next); 1346 return rx_sc; 1347 } 1348 } 1349 1350 return NULL; 1351 } 1352 1353 static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci) 1354 { 1355 struct macsec_rx_sc *rx_sc; 1356 struct macsec_dev *macsec; 1357 struct net_device *real_dev = macsec_priv(dev)->real_dev; 1358 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); 1359 struct macsec_secy *secy; 1360 1361 list_for_each_entry(macsec, &rxd->secys, secys) { 1362 if (find_rx_sc_rtnl(&macsec->secy, sci)) 1363 return ERR_PTR(-EEXIST); 1364 } 1365 1366 rx_sc = kzalloc(sizeof(*rx_sc), GFP_KERNEL); 1367 if (!rx_sc) 1368 return ERR_PTR(-ENOMEM); 1369 1370 rx_sc->stats = netdev_alloc_pcpu_stats(struct pcpu_rx_sc_stats); 1371 if (!rx_sc->stats) { 1372 kfree(rx_sc); 1373 return ERR_PTR(-ENOMEM); 1374 } 1375 1376 rx_sc->sci = sci; 1377 rx_sc->active = true; 1378 atomic_set(&rx_sc->refcnt, 1); 1379 1380 secy = &macsec_priv(dev)->secy; 1381 rcu_assign_pointer(rx_sc->next, secy->rx_sc); 1382 rcu_assign_pointer(secy->rx_sc, rx_sc); 1383 1384 if (rx_sc->active) 1385 secy->n_rx_sc++; 1386 1387 return rx_sc; 1388 } 1389 1390 static int init_tx_sa(struct macsec_tx_sa *tx_sa, char *sak, int key_len, 1391 int icv_len) 1392 { 1393 tx_sa->stats = alloc_percpu(struct macsec_tx_sa_stats); 1394 if (!tx_sa->stats) 1395 return -ENOMEM; 1396 1397 tx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len); 1398 if (IS_ERR(tx_sa->key.tfm)) { 1399 free_percpu(tx_sa->stats); 1400 return PTR_ERR(tx_sa->key.tfm); 1401 } 1402 1403 tx_sa->active = false; 1404 atomic_set(&tx_sa->refcnt, 1); 1405 spin_lock_init(&tx_sa->lock); 1406 1407 return 0; 1408 } 1409 1410 static void clear_tx_sa(struct macsec_tx_sa *tx_sa) 1411 { 1412 tx_sa->active = false; 1413 1414 macsec_txsa_put(tx_sa); 1415 } 1416 1417 static struct genl_family macsec_fam = { 1418 .id = GENL_ID_GENERATE, 1419 .name = MACSEC_GENL_NAME, 1420 .hdrsize = 0, 1421 .version = MACSEC_GENL_VERSION, 1422 .maxattr = MACSEC_ATTR_MAX, 1423 .netnsok = true, 1424 }; 1425 1426 static struct net_device *get_dev_from_nl(struct net *net, 1427 struct nlattr **attrs) 1428 { 1429 int ifindex = nla_get_u32(attrs[MACSEC_ATTR_IFINDEX]); 1430 struct net_device *dev; 1431 1432 dev = __dev_get_by_index(net, ifindex); 1433 if (!dev) 1434 return ERR_PTR(-ENODEV); 1435 1436 if (!netif_is_macsec(dev)) 1437 return ERR_PTR(-ENODEV); 1438 1439 return dev; 1440 } 1441 1442 static sci_t nla_get_sci(const struct nlattr *nla) 1443 { 1444 return (__force sci_t)nla_get_u64(nla); 1445 } 1446 1447 static int nla_put_sci(struct sk_buff *skb, int attrtype, sci_t value, 1448 int padattr) 1449 { 1450 return nla_put_u64_64bit(skb, attrtype, (__force u64)value, padattr); 1451 } 1452 1453 static struct macsec_tx_sa *get_txsa_from_nl(struct net *net, 1454 struct nlattr **attrs, 1455 struct nlattr **tb_sa, 1456 struct net_device **devp, 1457 struct macsec_secy **secyp, 1458 struct macsec_tx_sc **scp, 1459 u8 *assoc_num) 1460 { 1461 struct net_device *dev; 1462 struct macsec_secy *secy; 1463 struct macsec_tx_sc *tx_sc; 1464 struct macsec_tx_sa *tx_sa; 1465 1466 if (!tb_sa[MACSEC_SA_ATTR_AN]) 1467 return ERR_PTR(-EINVAL); 1468 1469 *assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1470 1471 dev = get_dev_from_nl(net, attrs); 1472 if (IS_ERR(dev)) 1473 return ERR_CAST(dev); 1474 1475 if (*assoc_num >= MACSEC_NUM_AN) 1476 return ERR_PTR(-EINVAL); 1477 1478 secy = &macsec_priv(dev)->secy; 1479 tx_sc = &secy->tx_sc; 1480 1481 tx_sa = rtnl_dereference(tx_sc->sa[*assoc_num]); 1482 if (!tx_sa) 1483 return ERR_PTR(-ENODEV); 1484 1485 *devp = dev; 1486 *scp = tx_sc; 1487 *secyp = secy; 1488 return tx_sa; 1489 } 1490 1491 static struct macsec_rx_sc *get_rxsc_from_nl(struct net *net, 1492 struct nlattr **attrs, 1493 struct nlattr **tb_rxsc, 1494 struct net_device **devp, 1495 struct macsec_secy **secyp) 1496 { 1497 struct net_device *dev; 1498 struct macsec_secy *secy; 1499 struct macsec_rx_sc *rx_sc; 1500 sci_t sci; 1501 1502 dev = get_dev_from_nl(net, attrs); 1503 if (IS_ERR(dev)) 1504 return ERR_CAST(dev); 1505 1506 secy = &macsec_priv(dev)->secy; 1507 1508 if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI]) 1509 return ERR_PTR(-EINVAL); 1510 1511 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); 1512 rx_sc = find_rx_sc_rtnl(secy, sci); 1513 if (!rx_sc) 1514 return ERR_PTR(-ENODEV); 1515 1516 *secyp = secy; 1517 *devp = dev; 1518 1519 return rx_sc; 1520 } 1521 1522 static struct macsec_rx_sa *get_rxsa_from_nl(struct net *net, 1523 struct nlattr **attrs, 1524 struct nlattr **tb_rxsc, 1525 struct nlattr **tb_sa, 1526 struct net_device **devp, 1527 struct macsec_secy **secyp, 1528 struct macsec_rx_sc **scp, 1529 u8 *assoc_num) 1530 { 1531 struct macsec_rx_sc *rx_sc; 1532 struct macsec_rx_sa *rx_sa; 1533 1534 if (!tb_sa[MACSEC_SA_ATTR_AN]) 1535 return ERR_PTR(-EINVAL); 1536 1537 *assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1538 if (*assoc_num >= MACSEC_NUM_AN) 1539 return ERR_PTR(-EINVAL); 1540 1541 rx_sc = get_rxsc_from_nl(net, attrs, tb_rxsc, devp, secyp); 1542 if (IS_ERR(rx_sc)) 1543 return ERR_CAST(rx_sc); 1544 1545 rx_sa = rtnl_dereference(rx_sc->sa[*assoc_num]); 1546 if (!rx_sa) 1547 return ERR_PTR(-ENODEV); 1548 1549 *scp = rx_sc; 1550 return rx_sa; 1551 } 1552 1553 1554 static const struct nla_policy macsec_genl_policy[NUM_MACSEC_ATTR] = { 1555 [MACSEC_ATTR_IFINDEX] = { .type = NLA_U32 }, 1556 [MACSEC_ATTR_RXSC_CONFIG] = { .type = NLA_NESTED }, 1557 [MACSEC_ATTR_SA_CONFIG] = { .type = NLA_NESTED }, 1558 }; 1559 1560 static const struct nla_policy macsec_genl_rxsc_policy[NUM_MACSEC_RXSC_ATTR] = { 1561 [MACSEC_RXSC_ATTR_SCI] = { .type = NLA_U64 }, 1562 [MACSEC_RXSC_ATTR_ACTIVE] = { .type = NLA_U8 }, 1563 }; 1564 1565 static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = { 1566 [MACSEC_SA_ATTR_AN] = { .type = NLA_U8 }, 1567 [MACSEC_SA_ATTR_ACTIVE] = { .type = NLA_U8 }, 1568 [MACSEC_SA_ATTR_PN] = { .type = NLA_U32 }, 1569 [MACSEC_SA_ATTR_KEYID] = { .type = NLA_BINARY, 1570 .len = MACSEC_KEYID_LEN, }, 1571 [MACSEC_SA_ATTR_KEY] = { .type = NLA_BINARY, 1572 .len = MACSEC_MAX_KEY_LEN, }, 1573 }; 1574 1575 static int parse_sa_config(struct nlattr **attrs, struct nlattr **tb_sa) 1576 { 1577 if (!attrs[MACSEC_ATTR_SA_CONFIG]) 1578 return -EINVAL; 1579 1580 if (nla_parse_nested(tb_sa, MACSEC_SA_ATTR_MAX, attrs[MACSEC_ATTR_SA_CONFIG], 1581 macsec_genl_sa_policy)) 1582 return -EINVAL; 1583 1584 return 0; 1585 } 1586 1587 static int parse_rxsc_config(struct nlattr **attrs, struct nlattr **tb_rxsc) 1588 { 1589 if (!attrs[MACSEC_ATTR_RXSC_CONFIG]) 1590 return -EINVAL; 1591 1592 if (nla_parse_nested(tb_rxsc, MACSEC_RXSC_ATTR_MAX, attrs[MACSEC_ATTR_RXSC_CONFIG], 1593 macsec_genl_rxsc_policy)) 1594 return -EINVAL; 1595 1596 return 0; 1597 } 1598 1599 static bool validate_add_rxsa(struct nlattr **attrs) 1600 { 1601 if (!attrs[MACSEC_SA_ATTR_AN] || 1602 !attrs[MACSEC_SA_ATTR_KEY] || 1603 !attrs[MACSEC_SA_ATTR_KEYID]) 1604 return false; 1605 1606 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) 1607 return false; 1608 1609 if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0) 1610 return false; 1611 1612 if (attrs[MACSEC_SA_ATTR_ACTIVE]) { 1613 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) 1614 return false; 1615 } 1616 1617 if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN) 1618 return false; 1619 1620 return true; 1621 } 1622 1623 static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info) 1624 { 1625 struct net_device *dev; 1626 struct nlattr **attrs = info->attrs; 1627 struct macsec_secy *secy; 1628 struct macsec_rx_sc *rx_sc; 1629 struct macsec_rx_sa *rx_sa; 1630 unsigned char assoc_num; 1631 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 1632 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 1633 int err; 1634 1635 if (!attrs[MACSEC_ATTR_IFINDEX]) 1636 return -EINVAL; 1637 1638 if (parse_sa_config(attrs, tb_sa)) 1639 return -EINVAL; 1640 1641 if (parse_rxsc_config(attrs, tb_rxsc)) 1642 return -EINVAL; 1643 1644 if (!validate_add_rxsa(tb_sa)) 1645 return -EINVAL; 1646 1647 rtnl_lock(); 1648 rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy); 1649 if (IS_ERR(rx_sc) || !macsec_rxsc_get(rx_sc)) { 1650 rtnl_unlock(); 1651 return PTR_ERR(rx_sc); 1652 } 1653 1654 assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1655 1656 if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) { 1657 pr_notice("macsec: nl: add_rxsa: bad key length: %d != %d\n", 1658 nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len); 1659 rtnl_unlock(); 1660 return -EINVAL; 1661 } 1662 1663 rx_sa = rtnl_dereference(rx_sc->sa[assoc_num]); 1664 if (rx_sa) { 1665 rtnl_unlock(); 1666 return -EBUSY; 1667 } 1668 1669 rx_sa = kmalloc(sizeof(*rx_sa), GFP_KERNEL); 1670 if (!rx_sa) { 1671 rtnl_unlock(); 1672 return -ENOMEM; 1673 } 1674 1675 err = init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 1676 secy->key_len, secy->icv_len); 1677 if (err < 0) { 1678 kfree(rx_sa); 1679 rtnl_unlock(); 1680 return err; 1681 } 1682 1683 if (tb_sa[MACSEC_SA_ATTR_PN]) { 1684 spin_lock_bh(&rx_sa->lock); 1685 rx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]); 1686 spin_unlock_bh(&rx_sa->lock); 1687 } 1688 1689 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 1690 rx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 1691 1692 nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN); 1693 rx_sa->sc = rx_sc; 1694 rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa); 1695 1696 rtnl_unlock(); 1697 1698 return 0; 1699 } 1700 1701 static bool validate_add_rxsc(struct nlattr **attrs) 1702 { 1703 if (!attrs[MACSEC_RXSC_ATTR_SCI]) 1704 return false; 1705 1706 if (attrs[MACSEC_RXSC_ATTR_ACTIVE]) { 1707 if (nla_get_u8(attrs[MACSEC_RXSC_ATTR_ACTIVE]) > 1) 1708 return false; 1709 } 1710 1711 return true; 1712 } 1713 1714 static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info) 1715 { 1716 struct net_device *dev; 1717 sci_t sci = MACSEC_UNDEF_SCI; 1718 struct nlattr **attrs = info->attrs; 1719 struct macsec_rx_sc *rx_sc; 1720 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 1721 1722 if (!attrs[MACSEC_ATTR_IFINDEX]) 1723 return -EINVAL; 1724 1725 if (parse_rxsc_config(attrs, tb_rxsc)) 1726 return -EINVAL; 1727 1728 if (!validate_add_rxsc(tb_rxsc)) 1729 return -EINVAL; 1730 1731 rtnl_lock(); 1732 dev = get_dev_from_nl(genl_info_net(info), attrs); 1733 if (IS_ERR(dev)) { 1734 rtnl_unlock(); 1735 return PTR_ERR(dev); 1736 } 1737 1738 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); 1739 1740 rx_sc = create_rx_sc(dev, sci); 1741 if (IS_ERR(rx_sc)) { 1742 rtnl_unlock(); 1743 return PTR_ERR(rx_sc); 1744 } 1745 1746 if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) 1747 rx_sc->active = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]); 1748 1749 rtnl_unlock(); 1750 1751 return 0; 1752 } 1753 1754 static bool validate_add_txsa(struct nlattr **attrs) 1755 { 1756 if (!attrs[MACSEC_SA_ATTR_AN] || 1757 !attrs[MACSEC_SA_ATTR_PN] || 1758 !attrs[MACSEC_SA_ATTR_KEY] || 1759 !attrs[MACSEC_SA_ATTR_KEYID]) 1760 return false; 1761 1762 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) 1763 return false; 1764 1765 if (nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0) 1766 return false; 1767 1768 if (attrs[MACSEC_SA_ATTR_ACTIVE]) { 1769 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) 1770 return false; 1771 } 1772 1773 if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN) 1774 return false; 1775 1776 return true; 1777 } 1778 1779 static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info) 1780 { 1781 struct net_device *dev; 1782 struct nlattr **attrs = info->attrs; 1783 struct macsec_secy *secy; 1784 struct macsec_tx_sc *tx_sc; 1785 struct macsec_tx_sa *tx_sa; 1786 unsigned char assoc_num; 1787 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 1788 int err; 1789 1790 if (!attrs[MACSEC_ATTR_IFINDEX]) 1791 return -EINVAL; 1792 1793 if (parse_sa_config(attrs, tb_sa)) 1794 return -EINVAL; 1795 1796 if (!validate_add_txsa(tb_sa)) 1797 return -EINVAL; 1798 1799 rtnl_lock(); 1800 dev = get_dev_from_nl(genl_info_net(info), attrs); 1801 if (IS_ERR(dev)) { 1802 rtnl_unlock(); 1803 return PTR_ERR(dev); 1804 } 1805 1806 secy = &macsec_priv(dev)->secy; 1807 tx_sc = &secy->tx_sc; 1808 1809 assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1810 1811 if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) { 1812 pr_notice("macsec: nl: add_txsa: bad key length: %d != %d\n", 1813 nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len); 1814 rtnl_unlock(); 1815 return -EINVAL; 1816 } 1817 1818 tx_sa = rtnl_dereference(tx_sc->sa[assoc_num]); 1819 if (tx_sa) { 1820 rtnl_unlock(); 1821 return -EBUSY; 1822 } 1823 1824 tx_sa = kmalloc(sizeof(*tx_sa), GFP_KERNEL); 1825 if (!tx_sa) { 1826 rtnl_unlock(); 1827 return -ENOMEM; 1828 } 1829 1830 err = init_tx_sa(tx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 1831 secy->key_len, secy->icv_len); 1832 if (err < 0) { 1833 kfree(tx_sa); 1834 rtnl_unlock(); 1835 return err; 1836 } 1837 1838 nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN); 1839 1840 spin_lock_bh(&tx_sa->lock); 1841 tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]); 1842 spin_unlock_bh(&tx_sa->lock); 1843 1844 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 1845 tx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 1846 1847 if (assoc_num == tx_sc->encoding_sa && tx_sa->active) 1848 secy->operational = true; 1849 1850 rcu_assign_pointer(tx_sc->sa[assoc_num], tx_sa); 1851 1852 rtnl_unlock(); 1853 1854 return 0; 1855 } 1856 1857 static int macsec_del_rxsa(struct sk_buff *skb, struct genl_info *info) 1858 { 1859 struct nlattr **attrs = info->attrs; 1860 struct net_device *dev; 1861 struct macsec_secy *secy; 1862 struct macsec_rx_sc *rx_sc; 1863 struct macsec_rx_sa *rx_sa; 1864 u8 assoc_num; 1865 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 1866 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 1867 1868 if (!attrs[MACSEC_ATTR_IFINDEX]) 1869 return -EINVAL; 1870 1871 if (parse_sa_config(attrs, tb_sa)) 1872 return -EINVAL; 1873 1874 if (parse_rxsc_config(attrs, tb_rxsc)) 1875 return -EINVAL; 1876 1877 rtnl_lock(); 1878 rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa, 1879 &dev, &secy, &rx_sc, &assoc_num); 1880 if (IS_ERR(rx_sa)) { 1881 rtnl_unlock(); 1882 return PTR_ERR(rx_sa); 1883 } 1884 1885 if (rx_sa->active) { 1886 rtnl_unlock(); 1887 return -EBUSY; 1888 } 1889 1890 RCU_INIT_POINTER(rx_sc->sa[assoc_num], NULL); 1891 clear_rx_sa(rx_sa); 1892 1893 rtnl_unlock(); 1894 1895 return 0; 1896 } 1897 1898 static int macsec_del_rxsc(struct sk_buff *skb, struct genl_info *info) 1899 { 1900 struct nlattr **attrs = info->attrs; 1901 struct net_device *dev; 1902 struct macsec_secy *secy; 1903 struct macsec_rx_sc *rx_sc; 1904 sci_t sci; 1905 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 1906 1907 if (!attrs[MACSEC_ATTR_IFINDEX]) 1908 return -EINVAL; 1909 1910 if (parse_rxsc_config(attrs, tb_rxsc)) 1911 return -EINVAL; 1912 1913 if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI]) 1914 return -EINVAL; 1915 1916 rtnl_lock(); 1917 dev = get_dev_from_nl(genl_info_net(info), info->attrs); 1918 if (IS_ERR(dev)) { 1919 rtnl_unlock(); 1920 return PTR_ERR(dev); 1921 } 1922 1923 secy = &macsec_priv(dev)->secy; 1924 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); 1925 1926 rx_sc = del_rx_sc(secy, sci); 1927 if (!rx_sc) { 1928 rtnl_unlock(); 1929 return -ENODEV; 1930 } 1931 1932 free_rx_sc(rx_sc); 1933 rtnl_unlock(); 1934 1935 return 0; 1936 } 1937 1938 static int macsec_del_txsa(struct sk_buff *skb, struct genl_info *info) 1939 { 1940 struct nlattr **attrs = info->attrs; 1941 struct net_device *dev; 1942 struct macsec_secy *secy; 1943 struct macsec_tx_sc *tx_sc; 1944 struct macsec_tx_sa *tx_sa; 1945 u8 assoc_num; 1946 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 1947 1948 if (!attrs[MACSEC_ATTR_IFINDEX]) 1949 return -EINVAL; 1950 1951 if (parse_sa_config(attrs, tb_sa)) 1952 return -EINVAL; 1953 1954 rtnl_lock(); 1955 tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa, 1956 &dev, &secy, &tx_sc, &assoc_num); 1957 if (IS_ERR(tx_sa)) { 1958 rtnl_unlock(); 1959 return PTR_ERR(tx_sa); 1960 } 1961 1962 if (tx_sa->active) { 1963 rtnl_unlock(); 1964 return -EBUSY; 1965 } 1966 1967 RCU_INIT_POINTER(tx_sc->sa[assoc_num], NULL); 1968 clear_tx_sa(tx_sa); 1969 1970 rtnl_unlock(); 1971 1972 return 0; 1973 } 1974 1975 static bool validate_upd_sa(struct nlattr **attrs) 1976 { 1977 if (!attrs[MACSEC_SA_ATTR_AN] || 1978 attrs[MACSEC_SA_ATTR_KEY] || 1979 attrs[MACSEC_SA_ATTR_KEYID]) 1980 return false; 1981 1982 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) 1983 return false; 1984 1985 if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0) 1986 return false; 1987 1988 if (attrs[MACSEC_SA_ATTR_ACTIVE]) { 1989 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) 1990 return false; 1991 } 1992 1993 return true; 1994 } 1995 1996 static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info) 1997 { 1998 struct nlattr **attrs = info->attrs; 1999 struct net_device *dev; 2000 struct macsec_secy *secy; 2001 struct macsec_tx_sc *tx_sc; 2002 struct macsec_tx_sa *tx_sa; 2003 u8 assoc_num; 2004 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 2005 2006 if (!attrs[MACSEC_ATTR_IFINDEX]) 2007 return -EINVAL; 2008 2009 if (parse_sa_config(attrs, tb_sa)) 2010 return -EINVAL; 2011 2012 if (!validate_upd_sa(tb_sa)) 2013 return -EINVAL; 2014 2015 rtnl_lock(); 2016 tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa, 2017 &dev, &secy, &tx_sc, &assoc_num); 2018 if (IS_ERR(tx_sa)) { 2019 rtnl_unlock(); 2020 return PTR_ERR(tx_sa); 2021 } 2022 2023 if (tb_sa[MACSEC_SA_ATTR_PN]) { 2024 spin_lock_bh(&tx_sa->lock); 2025 tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]); 2026 spin_unlock_bh(&tx_sa->lock); 2027 } 2028 2029 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 2030 tx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 2031 2032 if (assoc_num == tx_sc->encoding_sa) 2033 secy->operational = tx_sa->active; 2034 2035 rtnl_unlock(); 2036 2037 return 0; 2038 } 2039 2040 static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info) 2041 { 2042 struct nlattr **attrs = info->attrs; 2043 struct net_device *dev; 2044 struct macsec_secy *secy; 2045 struct macsec_rx_sc *rx_sc; 2046 struct macsec_rx_sa *rx_sa; 2047 u8 assoc_num; 2048 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 2049 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 2050 2051 if (!attrs[MACSEC_ATTR_IFINDEX]) 2052 return -EINVAL; 2053 2054 if (parse_rxsc_config(attrs, tb_rxsc)) 2055 return -EINVAL; 2056 2057 if (parse_sa_config(attrs, tb_sa)) 2058 return -EINVAL; 2059 2060 if (!validate_upd_sa(tb_sa)) 2061 return -EINVAL; 2062 2063 rtnl_lock(); 2064 rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa, 2065 &dev, &secy, &rx_sc, &assoc_num); 2066 if (IS_ERR(rx_sa)) { 2067 rtnl_unlock(); 2068 return PTR_ERR(rx_sa); 2069 } 2070 2071 if (tb_sa[MACSEC_SA_ATTR_PN]) { 2072 spin_lock_bh(&rx_sa->lock); 2073 rx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]); 2074 spin_unlock_bh(&rx_sa->lock); 2075 } 2076 2077 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 2078 rx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 2079 2080 rtnl_unlock(); 2081 return 0; 2082 } 2083 2084 static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info) 2085 { 2086 struct nlattr **attrs = info->attrs; 2087 struct net_device *dev; 2088 struct macsec_secy *secy; 2089 struct macsec_rx_sc *rx_sc; 2090 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 2091 2092 if (!attrs[MACSEC_ATTR_IFINDEX]) 2093 return -EINVAL; 2094 2095 if (parse_rxsc_config(attrs, tb_rxsc)) 2096 return -EINVAL; 2097 2098 if (!validate_add_rxsc(tb_rxsc)) 2099 return -EINVAL; 2100 2101 rtnl_lock(); 2102 rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy); 2103 if (IS_ERR(rx_sc)) { 2104 rtnl_unlock(); 2105 return PTR_ERR(rx_sc); 2106 } 2107 2108 if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) { 2109 bool new = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]); 2110 2111 if (rx_sc->active != new) 2112 secy->n_rx_sc += new ? 1 : -1; 2113 2114 rx_sc->active = new; 2115 } 2116 2117 rtnl_unlock(); 2118 2119 return 0; 2120 } 2121 2122 static int copy_tx_sa_stats(struct sk_buff *skb, 2123 struct macsec_tx_sa_stats __percpu *pstats) 2124 { 2125 struct macsec_tx_sa_stats sum = {0, }; 2126 int cpu; 2127 2128 for_each_possible_cpu(cpu) { 2129 const struct macsec_tx_sa_stats *stats = per_cpu_ptr(pstats, cpu); 2130 2131 sum.OutPktsProtected += stats->OutPktsProtected; 2132 sum.OutPktsEncrypted += stats->OutPktsEncrypted; 2133 } 2134 2135 if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED, sum.OutPktsProtected) || 2136 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED, sum.OutPktsEncrypted)) 2137 return -EMSGSIZE; 2138 2139 return 0; 2140 } 2141 2142 static int copy_rx_sa_stats(struct sk_buff *skb, 2143 struct macsec_rx_sa_stats __percpu *pstats) 2144 { 2145 struct macsec_rx_sa_stats sum = {0, }; 2146 int cpu; 2147 2148 for_each_possible_cpu(cpu) { 2149 const struct macsec_rx_sa_stats *stats = per_cpu_ptr(pstats, cpu); 2150 2151 sum.InPktsOK += stats->InPktsOK; 2152 sum.InPktsInvalid += stats->InPktsInvalid; 2153 sum.InPktsNotValid += stats->InPktsNotValid; 2154 sum.InPktsNotUsingSA += stats->InPktsNotUsingSA; 2155 sum.InPktsUnusedSA += stats->InPktsUnusedSA; 2156 } 2157 2158 if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_OK, sum.InPktsOK) || 2159 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID, sum.InPktsInvalid) || 2160 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID, sum.InPktsNotValid) || 2161 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA, sum.InPktsNotUsingSA) || 2162 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA, sum.InPktsUnusedSA)) 2163 return -EMSGSIZE; 2164 2165 return 0; 2166 } 2167 2168 static int copy_rx_sc_stats(struct sk_buff *skb, 2169 struct pcpu_rx_sc_stats __percpu *pstats) 2170 { 2171 struct macsec_rx_sc_stats sum = {0, }; 2172 int cpu; 2173 2174 for_each_possible_cpu(cpu) { 2175 const struct pcpu_rx_sc_stats *stats; 2176 struct macsec_rx_sc_stats tmp; 2177 unsigned int start; 2178 2179 stats = per_cpu_ptr(pstats, cpu); 2180 do { 2181 start = u64_stats_fetch_begin_irq(&stats->syncp); 2182 memcpy(&tmp, &stats->stats, sizeof(tmp)); 2183 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 2184 2185 sum.InOctetsValidated += tmp.InOctetsValidated; 2186 sum.InOctetsDecrypted += tmp.InOctetsDecrypted; 2187 sum.InPktsUnchecked += tmp.InPktsUnchecked; 2188 sum.InPktsDelayed += tmp.InPktsDelayed; 2189 sum.InPktsOK += tmp.InPktsOK; 2190 sum.InPktsInvalid += tmp.InPktsInvalid; 2191 sum.InPktsLate += tmp.InPktsLate; 2192 sum.InPktsNotValid += tmp.InPktsNotValid; 2193 sum.InPktsNotUsingSA += tmp.InPktsNotUsingSA; 2194 sum.InPktsUnusedSA += tmp.InPktsUnusedSA; 2195 } 2196 2197 if (nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED, 2198 sum.InOctetsValidated, 2199 MACSEC_RXSC_STATS_ATTR_PAD) || 2200 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED, 2201 sum.InOctetsDecrypted, 2202 MACSEC_RXSC_STATS_ATTR_PAD) || 2203 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED, 2204 sum.InPktsUnchecked, 2205 MACSEC_RXSC_STATS_ATTR_PAD) || 2206 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED, 2207 sum.InPktsDelayed, 2208 MACSEC_RXSC_STATS_ATTR_PAD) || 2209 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK, 2210 sum.InPktsOK, 2211 MACSEC_RXSC_STATS_ATTR_PAD) || 2212 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID, 2213 sum.InPktsInvalid, 2214 MACSEC_RXSC_STATS_ATTR_PAD) || 2215 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE, 2216 sum.InPktsLate, 2217 MACSEC_RXSC_STATS_ATTR_PAD) || 2218 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID, 2219 sum.InPktsNotValid, 2220 MACSEC_RXSC_STATS_ATTR_PAD) || 2221 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA, 2222 sum.InPktsNotUsingSA, 2223 MACSEC_RXSC_STATS_ATTR_PAD) || 2224 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA, 2225 sum.InPktsUnusedSA, 2226 MACSEC_RXSC_STATS_ATTR_PAD)) 2227 return -EMSGSIZE; 2228 2229 return 0; 2230 } 2231 2232 static int copy_tx_sc_stats(struct sk_buff *skb, 2233 struct pcpu_tx_sc_stats __percpu *pstats) 2234 { 2235 struct macsec_tx_sc_stats sum = {0, }; 2236 int cpu; 2237 2238 for_each_possible_cpu(cpu) { 2239 const struct pcpu_tx_sc_stats *stats; 2240 struct macsec_tx_sc_stats tmp; 2241 unsigned int start; 2242 2243 stats = per_cpu_ptr(pstats, cpu); 2244 do { 2245 start = u64_stats_fetch_begin_irq(&stats->syncp); 2246 memcpy(&tmp, &stats->stats, sizeof(tmp)); 2247 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 2248 2249 sum.OutPktsProtected += tmp.OutPktsProtected; 2250 sum.OutPktsEncrypted += tmp.OutPktsEncrypted; 2251 sum.OutOctetsProtected += tmp.OutOctetsProtected; 2252 sum.OutOctetsEncrypted += tmp.OutOctetsEncrypted; 2253 } 2254 2255 if (nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED, 2256 sum.OutPktsProtected, 2257 MACSEC_TXSC_STATS_ATTR_PAD) || 2258 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED, 2259 sum.OutPktsEncrypted, 2260 MACSEC_TXSC_STATS_ATTR_PAD) || 2261 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED, 2262 sum.OutOctetsProtected, 2263 MACSEC_TXSC_STATS_ATTR_PAD) || 2264 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED, 2265 sum.OutOctetsEncrypted, 2266 MACSEC_TXSC_STATS_ATTR_PAD)) 2267 return -EMSGSIZE; 2268 2269 return 0; 2270 } 2271 2272 static int copy_secy_stats(struct sk_buff *skb, 2273 struct pcpu_secy_stats __percpu *pstats) 2274 { 2275 struct macsec_dev_stats sum = {0, }; 2276 int cpu; 2277 2278 for_each_possible_cpu(cpu) { 2279 const struct pcpu_secy_stats *stats; 2280 struct macsec_dev_stats tmp; 2281 unsigned int start; 2282 2283 stats = per_cpu_ptr(pstats, cpu); 2284 do { 2285 start = u64_stats_fetch_begin_irq(&stats->syncp); 2286 memcpy(&tmp, &stats->stats, sizeof(tmp)); 2287 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 2288 2289 sum.OutPktsUntagged += tmp.OutPktsUntagged; 2290 sum.InPktsUntagged += tmp.InPktsUntagged; 2291 sum.OutPktsTooLong += tmp.OutPktsTooLong; 2292 sum.InPktsNoTag += tmp.InPktsNoTag; 2293 sum.InPktsBadTag += tmp.InPktsBadTag; 2294 sum.InPktsUnknownSCI += tmp.InPktsUnknownSCI; 2295 sum.InPktsNoSCI += tmp.InPktsNoSCI; 2296 sum.InPktsOverrun += tmp.InPktsOverrun; 2297 } 2298 2299 if (nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED, 2300 sum.OutPktsUntagged, 2301 MACSEC_SECY_STATS_ATTR_PAD) || 2302 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED, 2303 sum.InPktsUntagged, 2304 MACSEC_SECY_STATS_ATTR_PAD) || 2305 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG, 2306 sum.OutPktsTooLong, 2307 MACSEC_SECY_STATS_ATTR_PAD) || 2308 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG, 2309 sum.InPktsNoTag, 2310 MACSEC_SECY_STATS_ATTR_PAD) || 2311 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG, 2312 sum.InPktsBadTag, 2313 MACSEC_SECY_STATS_ATTR_PAD) || 2314 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI, 2315 sum.InPktsUnknownSCI, 2316 MACSEC_SECY_STATS_ATTR_PAD) || 2317 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI, 2318 sum.InPktsNoSCI, 2319 MACSEC_SECY_STATS_ATTR_PAD) || 2320 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN, 2321 sum.InPktsOverrun, 2322 MACSEC_SECY_STATS_ATTR_PAD)) 2323 return -EMSGSIZE; 2324 2325 return 0; 2326 } 2327 2328 static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb) 2329 { 2330 struct macsec_tx_sc *tx_sc = &secy->tx_sc; 2331 struct nlattr *secy_nest = nla_nest_start(skb, MACSEC_ATTR_SECY); 2332 2333 if (!secy_nest) 2334 return 1; 2335 2336 if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci, 2337 MACSEC_SECY_ATTR_PAD) || 2338 nla_put_u64_64bit(skb, MACSEC_SECY_ATTR_CIPHER_SUITE, 2339 MACSEC_DEFAULT_CIPHER_ID, 2340 MACSEC_SECY_ATTR_PAD) || 2341 nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) || 2342 nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) || 2343 nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) || 2344 nla_put_u8(skb, MACSEC_SECY_ATTR_REPLAY, secy->replay_protect) || 2345 nla_put_u8(skb, MACSEC_SECY_ATTR_VALIDATE, secy->validate_frames) || 2346 nla_put_u8(skb, MACSEC_SECY_ATTR_ENCRYPT, tx_sc->encrypt) || 2347 nla_put_u8(skb, MACSEC_SECY_ATTR_INC_SCI, tx_sc->send_sci) || 2348 nla_put_u8(skb, MACSEC_SECY_ATTR_ES, tx_sc->end_station) || 2349 nla_put_u8(skb, MACSEC_SECY_ATTR_SCB, tx_sc->scb) || 2350 nla_put_u8(skb, MACSEC_SECY_ATTR_ENCODING_SA, tx_sc->encoding_sa)) 2351 goto cancel; 2352 2353 if (secy->replay_protect) { 2354 if (nla_put_u32(skb, MACSEC_SECY_ATTR_WINDOW, secy->replay_window)) 2355 goto cancel; 2356 } 2357 2358 nla_nest_end(skb, secy_nest); 2359 return 0; 2360 2361 cancel: 2362 nla_nest_cancel(skb, secy_nest); 2363 return 1; 2364 } 2365 2366 static int dump_secy(struct macsec_secy *secy, struct net_device *dev, 2367 struct sk_buff *skb, struct netlink_callback *cb) 2368 { 2369 struct macsec_rx_sc *rx_sc; 2370 struct macsec_tx_sc *tx_sc = &secy->tx_sc; 2371 struct nlattr *txsa_list, *rxsc_list; 2372 int i, j; 2373 void *hdr; 2374 struct nlattr *attr; 2375 2376 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 2377 &macsec_fam, NLM_F_MULTI, MACSEC_CMD_GET_TXSC); 2378 if (!hdr) 2379 return -EMSGSIZE; 2380 2381 genl_dump_check_consistent(cb, hdr, &macsec_fam); 2382 2383 if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex)) 2384 goto nla_put_failure; 2385 2386 if (nla_put_secy(secy, skb)) 2387 goto nla_put_failure; 2388 2389 attr = nla_nest_start(skb, MACSEC_ATTR_TXSC_STATS); 2390 if (!attr) 2391 goto nla_put_failure; 2392 if (copy_tx_sc_stats(skb, tx_sc->stats)) { 2393 nla_nest_cancel(skb, attr); 2394 goto nla_put_failure; 2395 } 2396 nla_nest_end(skb, attr); 2397 2398 attr = nla_nest_start(skb, MACSEC_ATTR_SECY_STATS); 2399 if (!attr) 2400 goto nla_put_failure; 2401 if (copy_secy_stats(skb, macsec_priv(dev)->stats)) { 2402 nla_nest_cancel(skb, attr); 2403 goto nla_put_failure; 2404 } 2405 nla_nest_end(skb, attr); 2406 2407 txsa_list = nla_nest_start(skb, MACSEC_ATTR_TXSA_LIST); 2408 if (!txsa_list) 2409 goto nla_put_failure; 2410 for (i = 0, j = 1; i < MACSEC_NUM_AN; i++) { 2411 struct macsec_tx_sa *tx_sa = rtnl_dereference(tx_sc->sa[i]); 2412 struct nlattr *txsa_nest; 2413 2414 if (!tx_sa) 2415 continue; 2416 2417 txsa_nest = nla_nest_start(skb, j++); 2418 if (!txsa_nest) { 2419 nla_nest_cancel(skb, txsa_list); 2420 goto nla_put_failure; 2421 } 2422 2423 if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) || 2424 nla_put_u32(skb, MACSEC_SA_ATTR_PN, tx_sa->next_pn) || 2425 nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, tx_sa->key.id) || 2426 nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, tx_sa->active)) { 2427 nla_nest_cancel(skb, txsa_nest); 2428 nla_nest_cancel(skb, txsa_list); 2429 goto nla_put_failure; 2430 } 2431 2432 attr = nla_nest_start(skb, MACSEC_SA_ATTR_STATS); 2433 if (!attr) { 2434 nla_nest_cancel(skb, txsa_nest); 2435 nla_nest_cancel(skb, txsa_list); 2436 goto nla_put_failure; 2437 } 2438 if (copy_tx_sa_stats(skb, tx_sa->stats)) { 2439 nla_nest_cancel(skb, attr); 2440 nla_nest_cancel(skb, txsa_nest); 2441 nla_nest_cancel(skb, txsa_list); 2442 goto nla_put_failure; 2443 } 2444 nla_nest_end(skb, attr); 2445 2446 nla_nest_end(skb, txsa_nest); 2447 } 2448 nla_nest_end(skb, txsa_list); 2449 2450 rxsc_list = nla_nest_start(skb, MACSEC_ATTR_RXSC_LIST); 2451 if (!rxsc_list) 2452 goto nla_put_failure; 2453 2454 j = 1; 2455 for_each_rxsc_rtnl(secy, rx_sc) { 2456 int k; 2457 struct nlattr *rxsa_list; 2458 struct nlattr *rxsc_nest = nla_nest_start(skb, j++); 2459 2460 if (!rxsc_nest) { 2461 nla_nest_cancel(skb, rxsc_list); 2462 goto nla_put_failure; 2463 } 2464 2465 if (nla_put_u8(skb, MACSEC_RXSC_ATTR_ACTIVE, rx_sc->active) || 2466 nla_put_sci(skb, MACSEC_RXSC_ATTR_SCI, rx_sc->sci, 2467 MACSEC_RXSC_ATTR_PAD)) { 2468 nla_nest_cancel(skb, rxsc_nest); 2469 nla_nest_cancel(skb, rxsc_list); 2470 goto nla_put_failure; 2471 } 2472 2473 attr = nla_nest_start(skb, MACSEC_RXSC_ATTR_STATS); 2474 if (!attr) { 2475 nla_nest_cancel(skb, rxsc_nest); 2476 nla_nest_cancel(skb, rxsc_list); 2477 goto nla_put_failure; 2478 } 2479 if (copy_rx_sc_stats(skb, rx_sc->stats)) { 2480 nla_nest_cancel(skb, attr); 2481 nla_nest_cancel(skb, rxsc_nest); 2482 nla_nest_cancel(skb, rxsc_list); 2483 goto nla_put_failure; 2484 } 2485 nla_nest_end(skb, attr); 2486 2487 rxsa_list = nla_nest_start(skb, MACSEC_RXSC_ATTR_SA_LIST); 2488 if (!rxsa_list) { 2489 nla_nest_cancel(skb, rxsc_nest); 2490 nla_nest_cancel(skb, rxsc_list); 2491 goto nla_put_failure; 2492 } 2493 2494 for (i = 0, k = 1; i < MACSEC_NUM_AN; i++) { 2495 struct macsec_rx_sa *rx_sa = rtnl_dereference(rx_sc->sa[i]); 2496 struct nlattr *rxsa_nest; 2497 2498 if (!rx_sa) 2499 continue; 2500 2501 rxsa_nest = nla_nest_start(skb, k++); 2502 if (!rxsa_nest) { 2503 nla_nest_cancel(skb, rxsa_list); 2504 nla_nest_cancel(skb, rxsc_nest); 2505 nla_nest_cancel(skb, rxsc_list); 2506 goto nla_put_failure; 2507 } 2508 2509 attr = nla_nest_start(skb, MACSEC_SA_ATTR_STATS); 2510 if (!attr) { 2511 nla_nest_cancel(skb, rxsa_list); 2512 nla_nest_cancel(skb, rxsc_nest); 2513 nla_nest_cancel(skb, rxsc_list); 2514 goto nla_put_failure; 2515 } 2516 if (copy_rx_sa_stats(skb, rx_sa->stats)) { 2517 nla_nest_cancel(skb, attr); 2518 nla_nest_cancel(skb, rxsa_list); 2519 nla_nest_cancel(skb, rxsc_nest); 2520 nla_nest_cancel(skb, rxsc_list); 2521 goto nla_put_failure; 2522 } 2523 nla_nest_end(skb, attr); 2524 2525 if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) || 2526 nla_put_u32(skb, MACSEC_SA_ATTR_PN, rx_sa->next_pn) || 2527 nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, rx_sa->key.id) || 2528 nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, rx_sa->active)) { 2529 nla_nest_cancel(skb, rxsa_nest); 2530 nla_nest_cancel(skb, rxsc_nest); 2531 nla_nest_cancel(skb, rxsc_list); 2532 goto nla_put_failure; 2533 } 2534 nla_nest_end(skb, rxsa_nest); 2535 } 2536 2537 nla_nest_end(skb, rxsa_list); 2538 nla_nest_end(skb, rxsc_nest); 2539 } 2540 2541 nla_nest_end(skb, rxsc_list); 2542 2543 genlmsg_end(skb, hdr); 2544 2545 return 0; 2546 2547 nla_put_failure: 2548 genlmsg_cancel(skb, hdr); 2549 return -EMSGSIZE; 2550 } 2551 2552 static int macsec_generation = 1; /* protected by RTNL */ 2553 2554 static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb) 2555 { 2556 struct net *net = sock_net(skb->sk); 2557 struct net_device *dev; 2558 int dev_idx, d; 2559 2560 dev_idx = cb->args[0]; 2561 2562 d = 0; 2563 rtnl_lock(); 2564 2565 cb->seq = macsec_generation; 2566 2567 for_each_netdev(net, dev) { 2568 struct macsec_secy *secy; 2569 2570 if (d < dev_idx) 2571 goto next; 2572 2573 if (!netif_is_macsec(dev)) 2574 goto next; 2575 2576 secy = &macsec_priv(dev)->secy; 2577 if (dump_secy(secy, dev, skb, cb) < 0) 2578 goto done; 2579 next: 2580 d++; 2581 } 2582 2583 done: 2584 rtnl_unlock(); 2585 cb->args[0] = d; 2586 return skb->len; 2587 } 2588 2589 static const struct genl_ops macsec_genl_ops[] = { 2590 { 2591 .cmd = MACSEC_CMD_GET_TXSC, 2592 .dumpit = macsec_dump_txsc, 2593 .policy = macsec_genl_policy, 2594 }, 2595 { 2596 .cmd = MACSEC_CMD_ADD_RXSC, 2597 .doit = macsec_add_rxsc, 2598 .policy = macsec_genl_policy, 2599 .flags = GENL_ADMIN_PERM, 2600 }, 2601 { 2602 .cmd = MACSEC_CMD_DEL_RXSC, 2603 .doit = macsec_del_rxsc, 2604 .policy = macsec_genl_policy, 2605 .flags = GENL_ADMIN_PERM, 2606 }, 2607 { 2608 .cmd = MACSEC_CMD_UPD_RXSC, 2609 .doit = macsec_upd_rxsc, 2610 .policy = macsec_genl_policy, 2611 .flags = GENL_ADMIN_PERM, 2612 }, 2613 { 2614 .cmd = MACSEC_CMD_ADD_TXSA, 2615 .doit = macsec_add_txsa, 2616 .policy = macsec_genl_policy, 2617 .flags = GENL_ADMIN_PERM, 2618 }, 2619 { 2620 .cmd = MACSEC_CMD_DEL_TXSA, 2621 .doit = macsec_del_txsa, 2622 .policy = macsec_genl_policy, 2623 .flags = GENL_ADMIN_PERM, 2624 }, 2625 { 2626 .cmd = MACSEC_CMD_UPD_TXSA, 2627 .doit = macsec_upd_txsa, 2628 .policy = macsec_genl_policy, 2629 .flags = GENL_ADMIN_PERM, 2630 }, 2631 { 2632 .cmd = MACSEC_CMD_ADD_RXSA, 2633 .doit = macsec_add_rxsa, 2634 .policy = macsec_genl_policy, 2635 .flags = GENL_ADMIN_PERM, 2636 }, 2637 { 2638 .cmd = MACSEC_CMD_DEL_RXSA, 2639 .doit = macsec_del_rxsa, 2640 .policy = macsec_genl_policy, 2641 .flags = GENL_ADMIN_PERM, 2642 }, 2643 { 2644 .cmd = MACSEC_CMD_UPD_RXSA, 2645 .doit = macsec_upd_rxsa, 2646 .policy = macsec_genl_policy, 2647 .flags = GENL_ADMIN_PERM, 2648 }, 2649 }; 2650 2651 static netdev_tx_t macsec_start_xmit(struct sk_buff *skb, 2652 struct net_device *dev) 2653 { 2654 struct macsec_dev *macsec = netdev_priv(dev); 2655 struct macsec_secy *secy = &macsec->secy; 2656 struct pcpu_secy_stats *secy_stats; 2657 int ret, len; 2658 2659 /* 10.5 */ 2660 if (!secy->protect_frames) { 2661 secy_stats = this_cpu_ptr(macsec->stats); 2662 u64_stats_update_begin(&secy_stats->syncp); 2663 secy_stats->stats.OutPktsUntagged++; 2664 u64_stats_update_end(&secy_stats->syncp); 2665 skb->dev = macsec->real_dev; 2666 len = skb->len; 2667 ret = dev_queue_xmit(skb); 2668 count_tx(dev, ret, len); 2669 return ret; 2670 } 2671 2672 if (!secy->operational) { 2673 kfree_skb(skb); 2674 dev->stats.tx_dropped++; 2675 return NETDEV_TX_OK; 2676 } 2677 2678 skb = macsec_encrypt(skb, dev); 2679 if (IS_ERR(skb)) { 2680 if (PTR_ERR(skb) != -EINPROGRESS) 2681 dev->stats.tx_dropped++; 2682 return NETDEV_TX_OK; 2683 } 2684 2685 macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa); 2686 2687 macsec_encrypt_finish(skb, dev); 2688 len = skb->len; 2689 ret = dev_queue_xmit(skb); 2690 count_tx(dev, ret, len); 2691 return ret; 2692 } 2693 2694 #define MACSEC_FEATURES \ 2695 (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST) 2696 static int macsec_dev_init(struct net_device *dev) 2697 { 2698 struct macsec_dev *macsec = macsec_priv(dev); 2699 struct net_device *real_dev = macsec->real_dev; 2700 int err; 2701 2702 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 2703 if (!dev->tstats) 2704 return -ENOMEM; 2705 2706 err = gro_cells_init(&macsec->gro_cells, dev); 2707 if (err) { 2708 free_percpu(dev->tstats); 2709 return err; 2710 } 2711 2712 dev->features = real_dev->features & MACSEC_FEATURES; 2713 dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE; 2714 2715 dev->needed_headroom = real_dev->needed_headroom + 2716 MACSEC_NEEDED_HEADROOM; 2717 dev->needed_tailroom = real_dev->needed_tailroom + 2718 MACSEC_NEEDED_TAILROOM; 2719 2720 if (is_zero_ether_addr(dev->dev_addr)) 2721 eth_hw_addr_inherit(dev, real_dev); 2722 if (is_zero_ether_addr(dev->broadcast)) 2723 memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len); 2724 2725 return 0; 2726 } 2727 2728 static void macsec_dev_uninit(struct net_device *dev) 2729 { 2730 struct macsec_dev *macsec = macsec_priv(dev); 2731 2732 gro_cells_destroy(&macsec->gro_cells); 2733 free_percpu(dev->tstats); 2734 } 2735 2736 static netdev_features_t macsec_fix_features(struct net_device *dev, 2737 netdev_features_t features) 2738 { 2739 struct macsec_dev *macsec = macsec_priv(dev); 2740 struct net_device *real_dev = macsec->real_dev; 2741 2742 features &= (real_dev->features & MACSEC_FEATURES) | 2743 NETIF_F_GSO_SOFTWARE | NETIF_F_SOFT_FEATURES; 2744 features |= NETIF_F_LLTX; 2745 2746 return features; 2747 } 2748 2749 static int macsec_dev_open(struct net_device *dev) 2750 { 2751 struct macsec_dev *macsec = macsec_priv(dev); 2752 struct net_device *real_dev = macsec->real_dev; 2753 int err; 2754 2755 if (!(real_dev->flags & IFF_UP)) 2756 return -ENETDOWN; 2757 2758 err = dev_uc_add(real_dev, dev->dev_addr); 2759 if (err < 0) 2760 return err; 2761 2762 if (dev->flags & IFF_ALLMULTI) { 2763 err = dev_set_allmulti(real_dev, 1); 2764 if (err < 0) 2765 goto del_unicast; 2766 } 2767 2768 if (dev->flags & IFF_PROMISC) { 2769 err = dev_set_promiscuity(real_dev, 1); 2770 if (err < 0) 2771 goto clear_allmulti; 2772 } 2773 2774 if (netif_carrier_ok(real_dev)) 2775 netif_carrier_on(dev); 2776 2777 return 0; 2778 clear_allmulti: 2779 if (dev->flags & IFF_ALLMULTI) 2780 dev_set_allmulti(real_dev, -1); 2781 del_unicast: 2782 dev_uc_del(real_dev, dev->dev_addr); 2783 netif_carrier_off(dev); 2784 return err; 2785 } 2786 2787 static int macsec_dev_stop(struct net_device *dev) 2788 { 2789 struct macsec_dev *macsec = macsec_priv(dev); 2790 struct net_device *real_dev = macsec->real_dev; 2791 2792 netif_carrier_off(dev); 2793 2794 dev_mc_unsync(real_dev, dev); 2795 dev_uc_unsync(real_dev, dev); 2796 2797 if (dev->flags & IFF_ALLMULTI) 2798 dev_set_allmulti(real_dev, -1); 2799 2800 if (dev->flags & IFF_PROMISC) 2801 dev_set_promiscuity(real_dev, -1); 2802 2803 dev_uc_del(real_dev, dev->dev_addr); 2804 2805 return 0; 2806 } 2807 2808 static void macsec_dev_change_rx_flags(struct net_device *dev, int change) 2809 { 2810 struct net_device *real_dev = macsec_priv(dev)->real_dev; 2811 2812 if (!(dev->flags & IFF_UP)) 2813 return; 2814 2815 if (change & IFF_ALLMULTI) 2816 dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1); 2817 2818 if (change & IFF_PROMISC) 2819 dev_set_promiscuity(real_dev, 2820 dev->flags & IFF_PROMISC ? 1 : -1); 2821 } 2822 2823 static void macsec_dev_set_rx_mode(struct net_device *dev) 2824 { 2825 struct net_device *real_dev = macsec_priv(dev)->real_dev; 2826 2827 dev_mc_sync(real_dev, dev); 2828 dev_uc_sync(real_dev, dev); 2829 } 2830 2831 static int macsec_set_mac_address(struct net_device *dev, void *p) 2832 { 2833 struct macsec_dev *macsec = macsec_priv(dev); 2834 struct net_device *real_dev = macsec->real_dev; 2835 struct sockaddr *addr = p; 2836 int err; 2837 2838 if (!is_valid_ether_addr(addr->sa_data)) 2839 return -EADDRNOTAVAIL; 2840 2841 if (!(dev->flags & IFF_UP)) 2842 goto out; 2843 2844 err = dev_uc_add(real_dev, addr->sa_data); 2845 if (err < 0) 2846 return err; 2847 2848 dev_uc_del(real_dev, dev->dev_addr); 2849 2850 out: 2851 ether_addr_copy(dev->dev_addr, addr->sa_data); 2852 return 0; 2853 } 2854 2855 static int macsec_change_mtu(struct net_device *dev, int new_mtu) 2856 { 2857 struct macsec_dev *macsec = macsec_priv(dev); 2858 unsigned int extra = macsec->secy.icv_len + macsec_extra_len(true); 2859 2860 if (macsec->real_dev->mtu - extra < new_mtu) 2861 return -ERANGE; 2862 2863 dev->mtu = new_mtu; 2864 2865 return 0; 2866 } 2867 2868 static struct rtnl_link_stats64 *macsec_get_stats64(struct net_device *dev, 2869 struct rtnl_link_stats64 *s) 2870 { 2871 int cpu; 2872 2873 if (!dev->tstats) 2874 return s; 2875 2876 for_each_possible_cpu(cpu) { 2877 struct pcpu_sw_netstats *stats; 2878 struct pcpu_sw_netstats tmp; 2879 int start; 2880 2881 stats = per_cpu_ptr(dev->tstats, cpu); 2882 do { 2883 start = u64_stats_fetch_begin_irq(&stats->syncp); 2884 tmp.rx_packets = stats->rx_packets; 2885 tmp.rx_bytes = stats->rx_bytes; 2886 tmp.tx_packets = stats->tx_packets; 2887 tmp.tx_bytes = stats->tx_bytes; 2888 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 2889 2890 s->rx_packets += tmp.rx_packets; 2891 s->rx_bytes += tmp.rx_bytes; 2892 s->tx_packets += tmp.tx_packets; 2893 s->tx_bytes += tmp.tx_bytes; 2894 } 2895 2896 s->rx_dropped = dev->stats.rx_dropped; 2897 s->tx_dropped = dev->stats.tx_dropped; 2898 2899 return s; 2900 } 2901 2902 static int macsec_get_iflink(const struct net_device *dev) 2903 { 2904 return macsec_priv(dev)->real_dev->ifindex; 2905 } 2906 2907 static const struct net_device_ops macsec_netdev_ops = { 2908 .ndo_init = macsec_dev_init, 2909 .ndo_uninit = macsec_dev_uninit, 2910 .ndo_open = macsec_dev_open, 2911 .ndo_stop = macsec_dev_stop, 2912 .ndo_fix_features = macsec_fix_features, 2913 .ndo_change_mtu = macsec_change_mtu, 2914 .ndo_set_rx_mode = macsec_dev_set_rx_mode, 2915 .ndo_change_rx_flags = macsec_dev_change_rx_flags, 2916 .ndo_set_mac_address = macsec_set_mac_address, 2917 .ndo_start_xmit = macsec_start_xmit, 2918 .ndo_get_stats64 = macsec_get_stats64, 2919 .ndo_get_iflink = macsec_get_iflink, 2920 }; 2921 2922 static const struct device_type macsec_type = { 2923 .name = "macsec", 2924 }; 2925 2926 static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = { 2927 [IFLA_MACSEC_SCI] = { .type = NLA_U64 }, 2928 [IFLA_MACSEC_ICV_LEN] = { .type = NLA_U8 }, 2929 [IFLA_MACSEC_CIPHER_SUITE] = { .type = NLA_U64 }, 2930 [IFLA_MACSEC_WINDOW] = { .type = NLA_U32 }, 2931 [IFLA_MACSEC_ENCODING_SA] = { .type = NLA_U8 }, 2932 [IFLA_MACSEC_ENCRYPT] = { .type = NLA_U8 }, 2933 [IFLA_MACSEC_PROTECT] = { .type = NLA_U8 }, 2934 [IFLA_MACSEC_INC_SCI] = { .type = NLA_U8 }, 2935 [IFLA_MACSEC_ES] = { .type = NLA_U8 }, 2936 [IFLA_MACSEC_SCB] = { .type = NLA_U8 }, 2937 [IFLA_MACSEC_REPLAY_PROTECT] = { .type = NLA_U8 }, 2938 [IFLA_MACSEC_VALIDATION] = { .type = NLA_U8 }, 2939 }; 2940 2941 static void macsec_free_netdev(struct net_device *dev) 2942 { 2943 struct macsec_dev *macsec = macsec_priv(dev); 2944 struct net_device *real_dev = macsec->real_dev; 2945 2946 free_percpu(macsec->stats); 2947 free_percpu(macsec->secy.tx_sc.stats); 2948 2949 dev_put(real_dev); 2950 free_netdev(dev); 2951 } 2952 2953 static void macsec_setup(struct net_device *dev) 2954 { 2955 ether_setup(dev); 2956 dev->priv_flags |= IFF_NO_QUEUE; 2957 dev->netdev_ops = &macsec_netdev_ops; 2958 dev->destructor = macsec_free_netdev; 2959 2960 eth_zero_addr(dev->broadcast); 2961 } 2962 2963 static void macsec_changelink_common(struct net_device *dev, 2964 struct nlattr *data[]) 2965 { 2966 struct macsec_secy *secy; 2967 struct macsec_tx_sc *tx_sc; 2968 2969 secy = &macsec_priv(dev)->secy; 2970 tx_sc = &secy->tx_sc; 2971 2972 if (data[IFLA_MACSEC_ENCODING_SA]) { 2973 struct macsec_tx_sa *tx_sa; 2974 2975 tx_sc->encoding_sa = nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]); 2976 tx_sa = rtnl_dereference(tx_sc->sa[tx_sc->encoding_sa]); 2977 2978 secy->operational = tx_sa && tx_sa->active; 2979 } 2980 2981 if (data[IFLA_MACSEC_WINDOW]) 2982 secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]); 2983 2984 if (data[IFLA_MACSEC_ENCRYPT]) 2985 tx_sc->encrypt = !!nla_get_u8(data[IFLA_MACSEC_ENCRYPT]); 2986 2987 if (data[IFLA_MACSEC_PROTECT]) 2988 secy->protect_frames = !!nla_get_u8(data[IFLA_MACSEC_PROTECT]); 2989 2990 if (data[IFLA_MACSEC_INC_SCI]) 2991 tx_sc->send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]); 2992 2993 if (data[IFLA_MACSEC_ES]) 2994 tx_sc->end_station = !!nla_get_u8(data[IFLA_MACSEC_ES]); 2995 2996 if (data[IFLA_MACSEC_SCB]) 2997 tx_sc->scb = !!nla_get_u8(data[IFLA_MACSEC_SCB]); 2998 2999 if (data[IFLA_MACSEC_REPLAY_PROTECT]) 3000 secy->replay_protect = !!nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT]); 3001 3002 if (data[IFLA_MACSEC_VALIDATION]) 3003 secy->validate_frames = nla_get_u8(data[IFLA_MACSEC_VALIDATION]); 3004 } 3005 3006 static int macsec_changelink(struct net_device *dev, struct nlattr *tb[], 3007 struct nlattr *data[]) 3008 { 3009 if (!data) 3010 return 0; 3011 3012 if (data[IFLA_MACSEC_CIPHER_SUITE] || 3013 data[IFLA_MACSEC_ICV_LEN] || 3014 data[IFLA_MACSEC_SCI] || 3015 data[IFLA_MACSEC_PORT]) 3016 return -EINVAL; 3017 3018 macsec_changelink_common(dev, data); 3019 3020 return 0; 3021 } 3022 3023 static void macsec_del_dev(struct macsec_dev *macsec) 3024 { 3025 int i; 3026 3027 while (macsec->secy.rx_sc) { 3028 struct macsec_rx_sc *rx_sc = rtnl_dereference(macsec->secy.rx_sc); 3029 3030 rcu_assign_pointer(macsec->secy.rx_sc, rx_sc->next); 3031 free_rx_sc(rx_sc); 3032 } 3033 3034 for (i = 0; i < MACSEC_NUM_AN; i++) { 3035 struct macsec_tx_sa *sa = rtnl_dereference(macsec->secy.tx_sc.sa[i]); 3036 3037 if (sa) { 3038 RCU_INIT_POINTER(macsec->secy.tx_sc.sa[i], NULL); 3039 clear_tx_sa(sa); 3040 } 3041 } 3042 } 3043 3044 static void macsec_dellink(struct net_device *dev, struct list_head *head) 3045 { 3046 struct macsec_dev *macsec = macsec_priv(dev); 3047 struct net_device *real_dev = macsec->real_dev; 3048 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); 3049 3050 macsec_generation++; 3051 3052 unregister_netdevice_queue(dev, head); 3053 list_del_rcu(&macsec->secys); 3054 if (list_empty(&rxd->secys)) { 3055 netdev_rx_handler_unregister(real_dev); 3056 kfree(rxd); 3057 } 3058 3059 macsec_del_dev(macsec); 3060 } 3061 3062 static int register_macsec_dev(struct net_device *real_dev, 3063 struct net_device *dev) 3064 { 3065 struct macsec_dev *macsec = macsec_priv(dev); 3066 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); 3067 3068 if (!rxd) { 3069 int err; 3070 3071 rxd = kmalloc(sizeof(*rxd), GFP_KERNEL); 3072 if (!rxd) 3073 return -ENOMEM; 3074 3075 INIT_LIST_HEAD(&rxd->secys); 3076 3077 err = netdev_rx_handler_register(real_dev, macsec_handle_frame, 3078 rxd); 3079 if (err < 0) { 3080 kfree(rxd); 3081 return err; 3082 } 3083 } 3084 3085 list_add_tail_rcu(&macsec->secys, &rxd->secys); 3086 return 0; 3087 } 3088 3089 static bool sci_exists(struct net_device *dev, sci_t sci) 3090 { 3091 struct macsec_rxh_data *rxd = macsec_data_rtnl(dev); 3092 struct macsec_dev *macsec; 3093 3094 list_for_each_entry(macsec, &rxd->secys, secys) { 3095 if (macsec->secy.sci == sci) 3096 return true; 3097 } 3098 3099 return false; 3100 } 3101 3102 static sci_t dev_to_sci(struct net_device *dev, __be16 port) 3103 { 3104 return make_sci(dev->dev_addr, port); 3105 } 3106 3107 static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len) 3108 { 3109 struct macsec_dev *macsec = macsec_priv(dev); 3110 struct macsec_secy *secy = &macsec->secy; 3111 3112 macsec->stats = netdev_alloc_pcpu_stats(struct pcpu_secy_stats); 3113 if (!macsec->stats) 3114 return -ENOMEM; 3115 3116 secy->tx_sc.stats = netdev_alloc_pcpu_stats(struct pcpu_tx_sc_stats); 3117 if (!secy->tx_sc.stats) { 3118 free_percpu(macsec->stats); 3119 return -ENOMEM; 3120 } 3121 3122 if (sci == MACSEC_UNDEF_SCI) 3123 sci = dev_to_sci(dev, MACSEC_PORT_ES); 3124 3125 secy->netdev = dev; 3126 secy->operational = true; 3127 secy->key_len = DEFAULT_SAK_LEN; 3128 secy->icv_len = icv_len; 3129 secy->validate_frames = MACSEC_VALIDATE_DEFAULT; 3130 secy->protect_frames = true; 3131 secy->replay_protect = false; 3132 3133 secy->sci = sci; 3134 secy->tx_sc.active = true; 3135 secy->tx_sc.encoding_sa = DEFAULT_ENCODING_SA; 3136 secy->tx_sc.encrypt = DEFAULT_ENCRYPT; 3137 secy->tx_sc.send_sci = DEFAULT_SEND_SCI; 3138 secy->tx_sc.end_station = false; 3139 secy->tx_sc.scb = false; 3140 3141 return 0; 3142 } 3143 3144 static int macsec_newlink(struct net *net, struct net_device *dev, 3145 struct nlattr *tb[], struct nlattr *data[]) 3146 { 3147 struct macsec_dev *macsec = macsec_priv(dev); 3148 struct net_device *real_dev; 3149 int err; 3150 sci_t sci; 3151 u8 icv_len = DEFAULT_ICV_LEN; 3152 rx_handler_func_t *rx_handler; 3153 3154 if (!tb[IFLA_LINK]) 3155 return -EINVAL; 3156 real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK])); 3157 if (!real_dev) 3158 return -ENODEV; 3159 3160 dev->priv_flags |= IFF_MACSEC; 3161 3162 macsec->real_dev = real_dev; 3163 3164 if (data && data[IFLA_MACSEC_ICV_LEN]) 3165 icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]); 3166 dev->mtu = real_dev->mtu - icv_len - macsec_extra_len(true); 3167 3168 rx_handler = rtnl_dereference(real_dev->rx_handler); 3169 if (rx_handler && rx_handler != macsec_handle_frame) 3170 return -EBUSY; 3171 3172 err = register_netdevice(dev); 3173 if (err < 0) 3174 return err; 3175 3176 /* need to be already registered so that ->init has run and 3177 * the MAC addr is set 3178 */ 3179 if (data && data[IFLA_MACSEC_SCI]) 3180 sci = nla_get_sci(data[IFLA_MACSEC_SCI]); 3181 else if (data && data[IFLA_MACSEC_PORT]) 3182 sci = dev_to_sci(dev, nla_get_be16(data[IFLA_MACSEC_PORT])); 3183 else 3184 sci = dev_to_sci(dev, MACSEC_PORT_ES); 3185 3186 if (rx_handler && sci_exists(real_dev, sci)) { 3187 err = -EBUSY; 3188 goto unregister; 3189 } 3190 3191 err = macsec_add_dev(dev, sci, icv_len); 3192 if (err) 3193 goto unregister; 3194 3195 if (data) 3196 macsec_changelink_common(dev, data); 3197 3198 err = register_macsec_dev(real_dev, dev); 3199 if (err < 0) 3200 goto del_dev; 3201 3202 macsec_generation++; 3203 3204 dev_hold(real_dev); 3205 3206 return 0; 3207 3208 del_dev: 3209 macsec_del_dev(macsec); 3210 unregister: 3211 unregister_netdevice(dev); 3212 return err; 3213 } 3214 3215 static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[]) 3216 { 3217 u64 csid = MACSEC_DEFAULT_CIPHER_ID; 3218 u8 icv_len = DEFAULT_ICV_LEN; 3219 int flag; 3220 bool es, scb, sci; 3221 3222 if (!data) 3223 return 0; 3224 3225 if (data[IFLA_MACSEC_CIPHER_SUITE]) 3226 csid = nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE]); 3227 3228 if (data[IFLA_MACSEC_ICV_LEN]) { 3229 icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]); 3230 if (icv_len != DEFAULT_ICV_LEN) { 3231 char dummy_key[DEFAULT_SAK_LEN] = { 0 }; 3232 struct crypto_aead *dummy_tfm; 3233 3234 dummy_tfm = macsec_alloc_tfm(dummy_key, 3235 DEFAULT_SAK_LEN, 3236 icv_len); 3237 if (IS_ERR(dummy_tfm)) 3238 return PTR_ERR(dummy_tfm); 3239 crypto_free_aead(dummy_tfm); 3240 } 3241 } 3242 3243 switch (csid) { 3244 case MACSEC_DEFAULT_CIPHER_ID: 3245 case MACSEC_DEFAULT_CIPHER_ALT: 3246 if (icv_len < MACSEC_MIN_ICV_LEN || 3247 icv_len > MACSEC_STD_ICV_LEN) 3248 return -EINVAL; 3249 break; 3250 default: 3251 return -EINVAL; 3252 } 3253 3254 if (data[IFLA_MACSEC_ENCODING_SA]) { 3255 if (nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]) >= MACSEC_NUM_AN) 3256 return -EINVAL; 3257 } 3258 3259 for (flag = IFLA_MACSEC_ENCODING_SA + 1; 3260 flag < IFLA_MACSEC_VALIDATION; 3261 flag++) { 3262 if (data[flag]) { 3263 if (nla_get_u8(data[flag]) > 1) 3264 return -EINVAL; 3265 } 3266 } 3267 3268 es = data[IFLA_MACSEC_ES] ? nla_get_u8(data[IFLA_MACSEC_ES]) : false; 3269 sci = data[IFLA_MACSEC_INC_SCI] ? nla_get_u8(data[IFLA_MACSEC_INC_SCI]) : false; 3270 scb = data[IFLA_MACSEC_SCB] ? nla_get_u8(data[IFLA_MACSEC_SCB]) : false; 3271 3272 if ((sci && (scb || es)) || (scb && es)) 3273 return -EINVAL; 3274 3275 if (data[IFLA_MACSEC_VALIDATION] && 3276 nla_get_u8(data[IFLA_MACSEC_VALIDATION]) > MACSEC_VALIDATE_MAX) 3277 return -EINVAL; 3278 3279 if ((data[IFLA_MACSEC_REPLAY_PROTECT] && 3280 nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT])) && 3281 !data[IFLA_MACSEC_WINDOW]) 3282 return -EINVAL; 3283 3284 return 0; 3285 } 3286 3287 static struct net *macsec_get_link_net(const struct net_device *dev) 3288 { 3289 return dev_net(macsec_priv(dev)->real_dev); 3290 } 3291 3292 static size_t macsec_get_size(const struct net_device *dev) 3293 { 3294 return 0 + 3295 nla_total_size_64bit(8) + /* SCI */ 3296 nla_total_size(1) + /* ICV_LEN */ 3297 nla_total_size_64bit(8) + /* CIPHER_SUITE */ 3298 nla_total_size(4) + /* WINDOW */ 3299 nla_total_size(1) + /* ENCODING_SA */ 3300 nla_total_size(1) + /* ENCRYPT */ 3301 nla_total_size(1) + /* PROTECT */ 3302 nla_total_size(1) + /* INC_SCI */ 3303 nla_total_size(1) + /* ES */ 3304 nla_total_size(1) + /* SCB */ 3305 nla_total_size(1) + /* REPLAY_PROTECT */ 3306 nla_total_size(1) + /* VALIDATION */ 3307 0; 3308 } 3309 3310 static int macsec_fill_info(struct sk_buff *skb, 3311 const struct net_device *dev) 3312 { 3313 struct macsec_secy *secy = &macsec_priv(dev)->secy; 3314 struct macsec_tx_sc *tx_sc = &secy->tx_sc; 3315 3316 if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci, 3317 IFLA_MACSEC_PAD) || 3318 nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) || 3319 nla_put_u64_64bit(skb, IFLA_MACSEC_CIPHER_SUITE, 3320 MACSEC_DEFAULT_CIPHER_ID, IFLA_MACSEC_PAD) || 3321 nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) || 3322 nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) || 3323 nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) || 3324 nla_put_u8(skb, IFLA_MACSEC_INC_SCI, tx_sc->send_sci) || 3325 nla_put_u8(skb, IFLA_MACSEC_ES, tx_sc->end_station) || 3326 nla_put_u8(skb, IFLA_MACSEC_SCB, tx_sc->scb) || 3327 nla_put_u8(skb, IFLA_MACSEC_REPLAY_PROTECT, secy->replay_protect) || 3328 nla_put_u8(skb, IFLA_MACSEC_VALIDATION, secy->validate_frames) || 3329 0) 3330 goto nla_put_failure; 3331 3332 if (secy->replay_protect) { 3333 if (nla_put_u32(skb, IFLA_MACSEC_WINDOW, secy->replay_window)) 3334 goto nla_put_failure; 3335 } 3336 3337 return 0; 3338 3339 nla_put_failure: 3340 return -EMSGSIZE; 3341 } 3342 3343 static struct rtnl_link_ops macsec_link_ops __read_mostly = { 3344 .kind = "macsec", 3345 .priv_size = sizeof(struct macsec_dev), 3346 .maxtype = IFLA_MACSEC_MAX, 3347 .policy = macsec_rtnl_policy, 3348 .setup = macsec_setup, 3349 .validate = macsec_validate_attr, 3350 .newlink = macsec_newlink, 3351 .changelink = macsec_changelink, 3352 .dellink = macsec_dellink, 3353 .get_size = macsec_get_size, 3354 .fill_info = macsec_fill_info, 3355 .get_link_net = macsec_get_link_net, 3356 }; 3357 3358 static bool is_macsec_master(struct net_device *dev) 3359 { 3360 return rcu_access_pointer(dev->rx_handler) == macsec_handle_frame; 3361 } 3362 3363 static int macsec_notify(struct notifier_block *this, unsigned long event, 3364 void *ptr) 3365 { 3366 struct net_device *real_dev = netdev_notifier_info_to_dev(ptr); 3367 LIST_HEAD(head); 3368 3369 if (!is_macsec_master(real_dev)) 3370 return NOTIFY_DONE; 3371 3372 switch (event) { 3373 case NETDEV_UNREGISTER: { 3374 struct macsec_dev *m, *n; 3375 struct macsec_rxh_data *rxd; 3376 3377 rxd = macsec_data_rtnl(real_dev); 3378 list_for_each_entry_safe(m, n, &rxd->secys, secys) { 3379 macsec_dellink(m->secy.netdev, &head); 3380 } 3381 unregister_netdevice_many(&head); 3382 break; 3383 } 3384 case NETDEV_CHANGEMTU: { 3385 struct macsec_dev *m; 3386 struct macsec_rxh_data *rxd; 3387 3388 rxd = macsec_data_rtnl(real_dev); 3389 list_for_each_entry(m, &rxd->secys, secys) { 3390 struct net_device *dev = m->secy.netdev; 3391 unsigned int mtu = real_dev->mtu - (m->secy.icv_len + 3392 macsec_extra_len(true)); 3393 3394 if (dev->mtu > mtu) 3395 dev_set_mtu(dev, mtu); 3396 } 3397 } 3398 } 3399 3400 return NOTIFY_OK; 3401 } 3402 3403 static struct notifier_block macsec_notifier = { 3404 .notifier_call = macsec_notify, 3405 }; 3406 3407 static int __init macsec_init(void) 3408 { 3409 int err; 3410 3411 pr_info("MACsec IEEE 802.1AE\n"); 3412 err = register_netdevice_notifier(&macsec_notifier); 3413 if (err) 3414 return err; 3415 3416 err = rtnl_link_register(&macsec_link_ops); 3417 if (err) 3418 goto notifier; 3419 3420 err = genl_register_family_with_ops(&macsec_fam, macsec_genl_ops); 3421 if (err) 3422 goto rtnl; 3423 3424 return 0; 3425 3426 rtnl: 3427 rtnl_link_unregister(&macsec_link_ops); 3428 notifier: 3429 unregister_netdevice_notifier(&macsec_notifier); 3430 return err; 3431 } 3432 3433 static void __exit macsec_exit(void) 3434 { 3435 genl_unregister_family(&macsec_fam); 3436 rtnl_link_unregister(&macsec_link_ops); 3437 unregister_netdevice_notifier(&macsec_notifier); 3438 rcu_barrier(); 3439 } 3440 3441 module_init(macsec_init); 3442 module_exit(macsec_exit); 3443 3444 MODULE_ALIAS_RTNL_LINK("macsec"); 3445 3446 MODULE_DESCRIPTION("MACsec IEEE 802.1AE"); 3447 MODULE_LICENSE("GPL v2"); 3448