1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * drivers/net/macsec.c - MACsec device 4 * 5 * Copyright (c) 2015 Sabrina Dubroca <sd@queasysnail.net> 6 */ 7 8 #include <linux/types.h> 9 #include <linux/skbuff.h> 10 #include <linux/socket.h> 11 #include <linux/module.h> 12 #include <crypto/aead.h> 13 #include <linux/etherdevice.h> 14 #include <linux/netdevice.h> 15 #include <linux/rtnetlink.h> 16 #include <linux/refcount.h> 17 #include <net/genetlink.h> 18 #include <net/sock.h> 19 #include <net/gro_cells.h> 20 #include <net/macsec.h> 21 #include <linux/phy.h> 22 23 #include <uapi/linux/if_macsec.h> 24 25 #define MACSEC_SCI_LEN 8 26 27 /* SecTAG length = macsec_eth_header without the optional SCI */ 28 #define MACSEC_TAG_LEN 6 29 30 struct macsec_eth_header { 31 struct ethhdr eth; 32 /* SecTAG */ 33 u8 tci_an; 34 #if defined(__LITTLE_ENDIAN_BITFIELD) 35 u8 short_length:6, 36 unused:2; 37 #elif defined(__BIG_ENDIAN_BITFIELD) 38 u8 unused:2, 39 short_length:6; 40 #else 41 #error "Please fix <asm/byteorder.h>" 42 #endif 43 __be32 packet_number; 44 u8 secure_channel_id[8]; /* optional */ 45 } __packed; 46 47 #define MACSEC_TCI_VERSION 0x80 48 #define MACSEC_TCI_ES 0x40 /* end station */ 49 #define MACSEC_TCI_SC 0x20 /* SCI present */ 50 #define MACSEC_TCI_SCB 0x10 /* epon */ 51 #define MACSEC_TCI_E 0x08 /* encryption */ 52 #define MACSEC_TCI_C 0x04 /* changed text */ 53 #define MACSEC_AN_MASK 0x03 /* association number */ 54 #define MACSEC_TCI_CONFID (MACSEC_TCI_E | MACSEC_TCI_C) 55 56 /* minimum secure data length deemed "not short", see IEEE 802.1AE-2006 9.7 */ 57 #define MIN_NON_SHORT_LEN 48 58 59 #define GCM_AES_IV_LEN 12 60 #define DEFAULT_ICV_LEN 16 61 62 #define for_each_rxsc(secy, sc) \ 63 for (sc = rcu_dereference_bh(secy->rx_sc); \ 64 sc; \ 65 sc = rcu_dereference_bh(sc->next)) 66 #define for_each_rxsc_rtnl(secy, sc) \ 67 for (sc = rtnl_dereference(secy->rx_sc); \ 68 sc; \ 69 sc = rtnl_dereference(sc->next)) 70 71 struct gcm_iv { 72 union { 73 u8 secure_channel_id[8]; 74 sci_t sci; 75 }; 76 __be32 pn; 77 }; 78 79 struct macsec_dev_stats { 80 __u64 OutPktsUntagged; 81 __u64 InPktsUntagged; 82 __u64 OutPktsTooLong; 83 __u64 InPktsNoTag; 84 __u64 InPktsBadTag; 85 __u64 InPktsUnknownSCI; 86 __u64 InPktsNoSCI; 87 __u64 InPktsOverrun; 88 }; 89 90 #define MACSEC_VALIDATE_DEFAULT MACSEC_VALIDATE_STRICT 91 92 struct pcpu_secy_stats { 93 struct macsec_dev_stats stats; 94 struct u64_stats_sync syncp; 95 }; 96 97 /** 98 * struct macsec_dev - private data 99 * @secy: SecY config 100 * @real_dev: pointer to underlying netdevice 101 * @stats: MACsec device stats 102 * @secys: linked list of SecY's on the underlying device 103 * @offload: status of offloading on the MACsec device 104 */ 105 struct macsec_dev { 106 struct macsec_secy secy; 107 struct net_device *real_dev; 108 struct pcpu_secy_stats __percpu *stats; 109 struct list_head secys; 110 struct gro_cells gro_cells; 111 enum macsec_offload offload; 112 }; 113 114 /** 115 * struct macsec_rxh_data - rx_handler private argument 116 * @secys: linked list of SecY's on this underlying device 117 */ 118 struct macsec_rxh_data { 119 struct list_head secys; 120 }; 121 122 static struct macsec_dev *macsec_priv(const struct net_device *dev) 123 { 124 return (struct macsec_dev *)netdev_priv(dev); 125 } 126 127 static struct macsec_rxh_data *macsec_data_rcu(const struct net_device *dev) 128 { 129 return rcu_dereference_bh(dev->rx_handler_data); 130 } 131 132 static struct macsec_rxh_data *macsec_data_rtnl(const struct net_device *dev) 133 { 134 return rtnl_dereference(dev->rx_handler_data); 135 } 136 137 struct macsec_cb { 138 struct aead_request *req; 139 union { 140 struct macsec_tx_sa *tx_sa; 141 struct macsec_rx_sa *rx_sa; 142 }; 143 u8 assoc_num; 144 bool valid; 145 bool has_sci; 146 }; 147 148 static struct macsec_rx_sa *macsec_rxsa_get(struct macsec_rx_sa __rcu *ptr) 149 { 150 struct macsec_rx_sa *sa = rcu_dereference_bh(ptr); 151 152 if (!sa || !sa->active) 153 return NULL; 154 155 if (!refcount_inc_not_zero(&sa->refcnt)) 156 return NULL; 157 158 return sa; 159 } 160 161 static void free_rx_sc_rcu(struct rcu_head *head) 162 { 163 struct macsec_rx_sc *rx_sc = container_of(head, struct macsec_rx_sc, rcu_head); 164 165 free_percpu(rx_sc->stats); 166 kfree(rx_sc); 167 } 168 169 static struct macsec_rx_sc *macsec_rxsc_get(struct macsec_rx_sc *sc) 170 { 171 return refcount_inc_not_zero(&sc->refcnt) ? sc : NULL; 172 } 173 174 static void macsec_rxsc_put(struct macsec_rx_sc *sc) 175 { 176 if (refcount_dec_and_test(&sc->refcnt)) 177 call_rcu(&sc->rcu_head, free_rx_sc_rcu); 178 } 179 180 static void free_rxsa(struct rcu_head *head) 181 { 182 struct macsec_rx_sa *sa = container_of(head, struct macsec_rx_sa, rcu); 183 184 crypto_free_aead(sa->key.tfm); 185 free_percpu(sa->stats); 186 kfree(sa); 187 } 188 189 static void macsec_rxsa_put(struct macsec_rx_sa *sa) 190 { 191 if (refcount_dec_and_test(&sa->refcnt)) 192 call_rcu(&sa->rcu, free_rxsa); 193 } 194 195 static struct macsec_tx_sa *macsec_txsa_get(struct macsec_tx_sa __rcu *ptr) 196 { 197 struct macsec_tx_sa *sa = rcu_dereference_bh(ptr); 198 199 if (!sa || !sa->active) 200 return NULL; 201 202 if (!refcount_inc_not_zero(&sa->refcnt)) 203 return NULL; 204 205 return sa; 206 } 207 208 static void free_txsa(struct rcu_head *head) 209 { 210 struct macsec_tx_sa *sa = container_of(head, struct macsec_tx_sa, rcu); 211 212 crypto_free_aead(sa->key.tfm); 213 free_percpu(sa->stats); 214 kfree(sa); 215 } 216 217 static void macsec_txsa_put(struct macsec_tx_sa *sa) 218 { 219 if (refcount_dec_and_test(&sa->refcnt)) 220 call_rcu(&sa->rcu, free_txsa); 221 } 222 223 static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb) 224 { 225 BUILD_BUG_ON(sizeof(struct macsec_cb) > sizeof(skb->cb)); 226 return (struct macsec_cb *)skb->cb; 227 } 228 229 #define MACSEC_PORT_ES (htons(0x0001)) 230 #define MACSEC_PORT_SCB (0x0000) 231 #define MACSEC_UNDEF_SCI ((__force sci_t)0xffffffffffffffffULL) 232 233 #define MACSEC_GCM_AES_128_SAK_LEN 16 234 #define MACSEC_GCM_AES_256_SAK_LEN 32 235 236 #define DEFAULT_SAK_LEN MACSEC_GCM_AES_128_SAK_LEN 237 #define DEFAULT_SEND_SCI true 238 #define DEFAULT_ENCRYPT false 239 #define DEFAULT_ENCODING_SA 0 240 241 static bool send_sci(const struct macsec_secy *secy) 242 { 243 const struct macsec_tx_sc *tx_sc = &secy->tx_sc; 244 245 return tx_sc->send_sci || 246 (secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb); 247 } 248 249 static sci_t make_sci(u8 *addr, __be16 port) 250 { 251 sci_t sci; 252 253 memcpy(&sci, addr, ETH_ALEN); 254 memcpy(((char *)&sci) + ETH_ALEN, &port, sizeof(port)); 255 256 return sci; 257 } 258 259 static sci_t macsec_frame_sci(struct macsec_eth_header *hdr, bool sci_present) 260 { 261 sci_t sci; 262 263 if (sci_present) 264 memcpy(&sci, hdr->secure_channel_id, 265 sizeof(hdr->secure_channel_id)); 266 else 267 sci = make_sci(hdr->eth.h_source, MACSEC_PORT_ES); 268 269 return sci; 270 } 271 272 static unsigned int macsec_sectag_len(bool sci_present) 273 { 274 return MACSEC_TAG_LEN + (sci_present ? MACSEC_SCI_LEN : 0); 275 } 276 277 static unsigned int macsec_hdr_len(bool sci_present) 278 { 279 return macsec_sectag_len(sci_present) + ETH_HLEN; 280 } 281 282 static unsigned int macsec_extra_len(bool sci_present) 283 { 284 return macsec_sectag_len(sci_present) + sizeof(__be16); 285 } 286 287 /* Fill SecTAG according to IEEE 802.1AE-2006 10.5.3 */ 288 static void macsec_fill_sectag(struct macsec_eth_header *h, 289 const struct macsec_secy *secy, u32 pn, 290 bool sci_present) 291 { 292 const struct macsec_tx_sc *tx_sc = &secy->tx_sc; 293 294 memset(&h->tci_an, 0, macsec_sectag_len(sci_present)); 295 h->eth.h_proto = htons(ETH_P_MACSEC); 296 297 if (sci_present) { 298 h->tci_an |= MACSEC_TCI_SC; 299 memcpy(&h->secure_channel_id, &secy->sci, 300 sizeof(h->secure_channel_id)); 301 } else { 302 if (tx_sc->end_station) 303 h->tci_an |= MACSEC_TCI_ES; 304 if (tx_sc->scb) 305 h->tci_an |= MACSEC_TCI_SCB; 306 } 307 308 h->packet_number = htonl(pn); 309 310 /* with GCM, C/E clear for !encrypt, both set for encrypt */ 311 if (tx_sc->encrypt) 312 h->tci_an |= MACSEC_TCI_CONFID; 313 else if (secy->icv_len != DEFAULT_ICV_LEN) 314 h->tci_an |= MACSEC_TCI_C; 315 316 h->tci_an |= tx_sc->encoding_sa; 317 } 318 319 static void macsec_set_shortlen(struct macsec_eth_header *h, size_t data_len) 320 { 321 if (data_len < MIN_NON_SHORT_LEN) 322 h->short_length = data_len; 323 } 324 325 /* Checks if a MACsec interface is being offloaded to an hardware engine */ 326 static bool macsec_is_offloaded(struct macsec_dev *macsec) 327 { 328 if (macsec->offload == MACSEC_OFFLOAD_PHY) 329 return true; 330 331 return false; 332 } 333 334 /* Checks if underlying layers implement MACsec offloading functions. */ 335 static bool macsec_check_offload(enum macsec_offload offload, 336 struct macsec_dev *macsec) 337 { 338 if (!macsec || !macsec->real_dev) 339 return false; 340 341 if (offload == MACSEC_OFFLOAD_PHY) 342 return macsec->real_dev->phydev && 343 macsec->real_dev->phydev->macsec_ops; 344 345 return false; 346 } 347 348 static const struct macsec_ops *__macsec_get_ops(enum macsec_offload offload, 349 struct macsec_dev *macsec, 350 struct macsec_context *ctx) 351 { 352 if (ctx) { 353 memset(ctx, 0, sizeof(*ctx)); 354 ctx->offload = offload; 355 356 if (offload == MACSEC_OFFLOAD_PHY) 357 ctx->phydev = macsec->real_dev->phydev; 358 } 359 360 return macsec->real_dev->phydev->macsec_ops; 361 } 362 363 /* Returns a pointer to the MACsec ops struct if any and updates the MACsec 364 * context device reference if provided. 365 */ 366 static const struct macsec_ops *macsec_get_ops(struct macsec_dev *macsec, 367 struct macsec_context *ctx) 368 { 369 if (!macsec_check_offload(macsec->offload, macsec)) 370 return NULL; 371 372 return __macsec_get_ops(macsec->offload, macsec, ctx); 373 } 374 375 /* validate MACsec packet according to IEEE 802.1AE-2006 9.12 */ 376 static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len) 377 { 378 struct macsec_eth_header *h = (struct macsec_eth_header *)skb->data; 379 int len = skb->len - 2 * ETH_ALEN; 380 int extra_len = macsec_extra_len(!!(h->tci_an & MACSEC_TCI_SC)) + icv_len; 381 382 /* a) It comprises at least 17 octets */ 383 if (skb->len <= 16) 384 return false; 385 386 /* b) MACsec EtherType: already checked */ 387 388 /* c) V bit is clear */ 389 if (h->tci_an & MACSEC_TCI_VERSION) 390 return false; 391 392 /* d) ES or SCB => !SC */ 393 if ((h->tci_an & MACSEC_TCI_ES || h->tci_an & MACSEC_TCI_SCB) && 394 (h->tci_an & MACSEC_TCI_SC)) 395 return false; 396 397 /* e) Bits 7 and 8 of octet 4 of the SecTAG are clear */ 398 if (h->unused) 399 return false; 400 401 /* rx.pn != 0 (figure 10-5) */ 402 if (!h->packet_number) 403 return false; 404 405 /* length check, f) g) h) i) */ 406 if (h->short_length) 407 return len == extra_len + h->short_length; 408 return len >= extra_len + MIN_NON_SHORT_LEN; 409 } 410 411 #define MACSEC_NEEDED_HEADROOM (macsec_extra_len(true)) 412 #define MACSEC_NEEDED_TAILROOM MACSEC_STD_ICV_LEN 413 414 static void macsec_fill_iv(unsigned char *iv, sci_t sci, u32 pn) 415 { 416 struct gcm_iv *gcm_iv = (struct gcm_iv *)iv; 417 418 gcm_iv->sci = sci; 419 gcm_iv->pn = htonl(pn); 420 } 421 422 static struct macsec_eth_header *macsec_ethhdr(struct sk_buff *skb) 423 { 424 return (struct macsec_eth_header *)skb_mac_header(skb); 425 } 426 427 static sci_t dev_to_sci(struct net_device *dev, __be16 port) 428 { 429 return make_sci(dev->dev_addr, port); 430 } 431 432 static void __macsec_pn_wrapped(struct macsec_secy *secy, 433 struct macsec_tx_sa *tx_sa) 434 { 435 pr_debug("PN wrapped, transitioning to !oper\n"); 436 tx_sa->active = false; 437 if (secy->protect_frames) 438 secy->operational = false; 439 } 440 441 void macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa) 442 { 443 spin_lock_bh(&tx_sa->lock); 444 __macsec_pn_wrapped(secy, tx_sa); 445 spin_unlock_bh(&tx_sa->lock); 446 } 447 EXPORT_SYMBOL_GPL(macsec_pn_wrapped); 448 449 static u32 tx_sa_update_pn(struct macsec_tx_sa *tx_sa, struct macsec_secy *secy) 450 { 451 u32 pn; 452 453 spin_lock_bh(&tx_sa->lock); 454 pn = tx_sa->next_pn; 455 456 tx_sa->next_pn++; 457 if (tx_sa->next_pn == 0) 458 __macsec_pn_wrapped(secy, tx_sa); 459 spin_unlock_bh(&tx_sa->lock); 460 461 return pn; 462 } 463 464 static void macsec_encrypt_finish(struct sk_buff *skb, struct net_device *dev) 465 { 466 struct macsec_dev *macsec = netdev_priv(dev); 467 468 skb->dev = macsec->real_dev; 469 skb_reset_mac_header(skb); 470 skb->protocol = eth_hdr(skb)->h_proto; 471 } 472 473 static void macsec_count_tx(struct sk_buff *skb, struct macsec_tx_sc *tx_sc, 474 struct macsec_tx_sa *tx_sa) 475 { 476 struct pcpu_tx_sc_stats *txsc_stats = this_cpu_ptr(tx_sc->stats); 477 478 u64_stats_update_begin(&txsc_stats->syncp); 479 if (tx_sc->encrypt) { 480 txsc_stats->stats.OutOctetsEncrypted += skb->len; 481 txsc_stats->stats.OutPktsEncrypted++; 482 this_cpu_inc(tx_sa->stats->OutPktsEncrypted); 483 } else { 484 txsc_stats->stats.OutOctetsProtected += skb->len; 485 txsc_stats->stats.OutPktsProtected++; 486 this_cpu_inc(tx_sa->stats->OutPktsProtected); 487 } 488 u64_stats_update_end(&txsc_stats->syncp); 489 } 490 491 static void count_tx(struct net_device *dev, int ret, int len) 492 { 493 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { 494 struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats); 495 496 u64_stats_update_begin(&stats->syncp); 497 stats->tx_packets++; 498 stats->tx_bytes += len; 499 u64_stats_update_end(&stats->syncp); 500 } 501 } 502 503 static void macsec_encrypt_done(struct crypto_async_request *base, int err) 504 { 505 struct sk_buff *skb = base->data; 506 struct net_device *dev = skb->dev; 507 struct macsec_dev *macsec = macsec_priv(dev); 508 struct macsec_tx_sa *sa = macsec_skb_cb(skb)->tx_sa; 509 int len, ret; 510 511 aead_request_free(macsec_skb_cb(skb)->req); 512 513 rcu_read_lock_bh(); 514 macsec_encrypt_finish(skb, dev); 515 macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa); 516 len = skb->len; 517 ret = dev_queue_xmit(skb); 518 count_tx(dev, ret, len); 519 rcu_read_unlock_bh(); 520 521 macsec_txsa_put(sa); 522 dev_put(dev); 523 } 524 525 static struct aead_request *macsec_alloc_req(struct crypto_aead *tfm, 526 unsigned char **iv, 527 struct scatterlist **sg, 528 int num_frags) 529 { 530 size_t size, iv_offset, sg_offset; 531 struct aead_request *req; 532 void *tmp; 533 534 size = sizeof(struct aead_request) + crypto_aead_reqsize(tfm); 535 iv_offset = size; 536 size += GCM_AES_IV_LEN; 537 538 size = ALIGN(size, __alignof__(struct scatterlist)); 539 sg_offset = size; 540 size += sizeof(struct scatterlist) * num_frags; 541 542 tmp = kmalloc(size, GFP_ATOMIC); 543 if (!tmp) 544 return NULL; 545 546 *iv = (unsigned char *)(tmp + iv_offset); 547 *sg = (struct scatterlist *)(tmp + sg_offset); 548 req = tmp; 549 550 aead_request_set_tfm(req, tfm); 551 552 return req; 553 } 554 555 static struct sk_buff *macsec_encrypt(struct sk_buff *skb, 556 struct net_device *dev) 557 { 558 int ret; 559 struct scatterlist *sg; 560 struct sk_buff *trailer; 561 unsigned char *iv; 562 struct ethhdr *eth; 563 struct macsec_eth_header *hh; 564 size_t unprotected_len; 565 struct aead_request *req; 566 struct macsec_secy *secy; 567 struct macsec_tx_sc *tx_sc; 568 struct macsec_tx_sa *tx_sa; 569 struct macsec_dev *macsec = macsec_priv(dev); 570 bool sci_present; 571 u32 pn; 572 573 secy = &macsec->secy; 574 tx_sc = &secy->tx_sc; 575 576 /* 10.5.1 TX SA assignment */ 577 tx_sa = macsec_txsa_get(tx_sc->sa[tx_sc->encoding_sa]); 578 if (!tx_sa) { 579 secy->operational = false; 580 kfree_skb(skb); 581 return ERR_PTR(-EINVAL); 582 } 583 584 if (unlikely(skb_headroom(skb) < MACSEC_NEEDED_HEADROOM || 585 skb_tailroom(skb) < MACSEC_NEEDED_TAILROOM)) { 586 struct sk_buff *nskb = skb_copy_expand(skb, 587 MACSEC_NEEDED_HEADROOM, 588 MACSEC_NEEDED_TAILROOM, 589 GFP_ATOMIC); 590 if (likely(nskb)) { 591 consume_skb(skb); 592 skb = nskb; 593 } else { 594 macsec_txsa_put(tx_sa); 595 kfree_skb(skb); 596 return ERR_PTR(-ENOMEM); 597 } 598 } else { 599 skb = skb_unshare(skb, GFP_ATOMIC); 600 if (!skb) { 601 macsec_txsa_put(tx_sa); 602 return ERR_PTR(-ENOMEM); 603 } 604 } 605 606 unprotected_len = skb->len; 607 eth = eth_hdr(skb); 608 sci_present = send_sci(secy); 609 hh = skb_push(skb, macsec_extra_len(sci_present)); 610 memmove(hh, eth, 2 * ETH_ALEN); 611 612 pn = tx_sa_update_pn(tx_sa, secy); 613 if (pn == 0) { 614 macsec_txsa_put(tx_sa); 615 kfree_skb(skb); 616 return ERR_PTR(-ENOLINK); 617 } 618 macsec_fill_sectag(hh, secy, pn, sci_present); 619 macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN); 620 621 skb_put(skb, secy->icv_len); 622 623 if (skb->len - ETH_HLEN > macsec_priv(dev)->real_dev->mtu) { 624 struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats); 625 626 u64_stats_update_begin(&secy_stats->syncp); 627 secy_stats->stats.OutPktsTooLong++; 628 u64_stats_update_end(&secy_stats->syncp); 629 630 macsec_txsa_put(tx_sa); 631 kfree_skb(skb); 632 return ERR_PTR(-EINVAL); 633 } 634 635 ret = skb_cow_data(skb, 0, &trailer); 636 if (unlikely(ret < 0)) { 637 macsec_txsa_put(tx_sa); 638 kfree_skb(skb); 639 return ERR_PTR(ret); 640 } 641 642 req = macsec_alloc_req(tx_sa->key.tfm, &iv, &sg, ret); 643 if (!req) { 644 macsec_txsa_put(tx_sa); 645 kfree_skb(skb); 646 return ERR_PTR(-ENOMEM); 647 } 648 649 macsec_fill_iv(iv, secy->sci, pn); 650 651 sg_init_table(sg, ret); 652 ret = skb_to_sgvec(skb, sg, 0, skb->len); 653 if (unlikely(ret < 0)) { 654 aead_request_free(req); 655 macsec_txsa_put(tx_sa); 656 kfree_skb(skb); 657 return ERR_PTR(ret); 658 } 659 660 if (tx_sc->encrypt) { 661 int len = skb->len - macsec_hdr_len(sci_present) - 662 secy->icv_len; 663 aead_request_set_crypt(req, sg, sg, len, iv); 664 aead_request_set_ad(req, macsec_hdr_len(sci_present)); 665 } else { 666 aead_request_set_crypt(req, sg, sg, 0, iv); 667 aead_request_set_ad(req, skb->len - secy->icv_len); 668 } 669 670 macsec_skb_cb(skb)->req = req; 671 macsec_skb_cb(skb)->tx_sa = tx_sa; 672 aead_request_set_callback(req, 0, macsec_encrypt_done, skb); 673 674 dev_hold(skb->dev); 675 ret = crypto_aead_encrypt(req); 676 if (ret == -EINPROGRESS) { 677 return ERR_PTR(ret); 678 } else if (ret != 0) { 679 dev_put(skb->dev); 680 kfree_skb(skb); 681 aead_request_free(req); 682 macsec_txsa_put(tx_sa); 683 return ERR_PTR(-EINVAL); 684 } 685 686 dev_put(skb->dev); 687 aead_request_free(req); 688 macsec_txsa_put(tx_sa); 689 690 return skb; 691 } 692 693 static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u32 pn) 694 { 695 struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa; 696 struct pcpu_rx_sc_stats *rxsc_stats = this_cpu_ptr(rx_sa->sc->stats); 697 struct macsec_eth_header *hdr = macsec_ethhdr(skb); 698 u32 lowest_pn = 0; 699 700 spin_lock(&rx_sa->lock); 701 if (rx_sa->next_pn >= secy->replay_window) 702 lowest_pn = rx_sa->next_pn - secy->replay_window; 703 704 /* Now perform replay protection check again 705 * (see IEEE 802.1AE-2006 figure 10-5) 706 */ 707 if (secy->replay_protect && pn < lowest_pn) { 708 spin_unlock(&rx_sa->lock); 709 u64_stats_update_begin(&rxsc_stats->syncp); 710 rxsc_stats->stats.InPktsLate++; 711 u64_stats_update_end(&rxsc_stats->syncp); 712 return false; 713 } 714 715 if (secy->validate_frames != MACSEC_VALIDATE_DISABLED) { 716 u64_stats_update_begin(&rxsc_stats->syncp); 717 if (hdr->tci_an & MACSEC_TCI_E) 718 rxsc_stats->stats.InOctetsDecrypted += skb->len; 719 else 720 rxsc_stats->stats.InOctetsValidated += skb->len; 721 u64_stats_update_end(&rxsc_stats->syncp); 722 } 723 724 if (!macsec_skb_cb(skb)->valid) { 725 spin_unlock(&rx_sa->lock); 726 727 /* 10.6.5 */ 728 if (hdr->tci_an & MACSEC_TCI_C || 729 secy->validate_frames == MACSEC_VALIDATE_STRICT) { 730 u64_stats_update_begin(&rxsc_stats->syncp); 731 rxsc_stats->stats.InPktsNotValid++; 732 u64_stats_update_end(&rxsc_stats->syncp); 733 return false; 734 } 735 736 u64_stats_update_begin(&rxsc_stats->syncp); 737 if (secy->validate_frames == MACSEC_VALIDATE_CHECK) { 738 rxsc_stats->stats.InPktsInvalid++; 739 this_cpu_inc(rx_sa->stats->InPktsInvalid); 740 } else if (pn < lowest_pn) { 741 rxsc_stats->stats.InPktsDelayed++; 742 } else { 743 rxsc_stats->stats.InPktsUnchecked++; 744 } 745 u64_stats_update_end(&rxsc_stats->syncp); 746 } else { 747 u64_stats_update_begin(&rxsc_stats->syncp); 748 if (pn < lowest_pn) { 749 rxsc_stats->stats.InPktsDelayed++; 750 } else { 751 rxsc_stats->stats.InPktsOK++; 752 this_cpu_inc(rx_sa->stats->InPktsOK); 753 } 754 u64_stats_update_end(&rxsc_stats->syncp); 755 756 if (pn >= rx_sa->next_pn) 757 rx_sa->next_pn = pn + 1; 758 spin_unlock(&rx_sa->lock); 759 } 760 761 return true; 762 } 763 764 static void macsec_reset_skb(struct sk_buff *skb, struct net_device *dev) 765 { 766 skb->pkt_type = PACKET_HOST; 767 skb->protocol = eth_type_trans(skb, dev); 768 769 skb_reset_network_header(skb); 770 if (!skb_transport_header_was_set(skb)) 771 skb_reset_transport_header(skb); 772 skb_reset_mac_len(skb); 773 } 774 775 static void macsec_finalize_skb(struct sk_buff *skb, u8 icv_len, u8 hdr_len) 776 { 777 skb->ip_summed = CHECKSUM_NONE; 778 memmove(skb->data + hdr_len, skb->data, 2 * ETH_ALEN); 779 skb_pull(skb, hdr_len); 780 pskb_trim_unique(skb, skb->len - icv_len); 781 } 782 783 static void count_rx(struct net_device *dev, int len) 784 { 785 struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats); 786 787 u64_stats_update_begin(&stats->syncp); 788 stats->rx_packets++; 789 stats->rx_bytes += len; 790 u64_stats_update_end(&stats->syncp); 791 } 792 793 static void macsec_decrypt_done(struct crypto_async_request *base, int err) 794 { 795 struct sk_buff *skb = base->data; 796 struct net_device *dev = skb->dev; 797 struct macsec_dev *macsec = macsec_priv(dev); 798 struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa; 799 struct macsec_rx_sc *rx_sc = rx_sa->sc; 800 int len; 801 u32 pn; 802 803 aead_request_free(macsec_skb_cb(skb)->req); 804 805 if (!err) 806 macsec_skb_cb(skb)->valid = true; 807 808 rcu_read_lock_bh(); 809 pn = ntohl(macsec_ethhdr(skb)->packet_number); 810 if (!macsec_post_decrypt(skb, &macsec->secy, pn)) { 811 rcu_read_unlock_bh(); 812 kfree_skb(skb); 813 goto out; 814 } 815 816 macsec_finalize_skb(skb, macsec->secy.icv_len, 817 macsec_extra_len(macsec_skb_cb(skb)->has_sci)); 818 macsec_reset_skb(skb, macsec->secy.netdev); 819 820 len = skb->len; 821 if (gro_cells_receive(&macsec->gro_cells, skb) == NET_RX_SUCCESS) 822 count_rx(dev, len); 823 824 rcu_read_unlock_bh(); 825 826 out: 827 macsec_rxsa_put(rx_sa); 828 macsec_rxsc_put(rx_sc); 829 dev_put(dev); 830 } 831 832 static struct sk_buff *macsec_decrypt(struct sk_buff *skb, 833 struct net_device *dev, 834 struct macsec_rx_sa *rx_sa, 835 sci_t sci, 836 struct macsec_secy *secy) 837 { 838 int ret; 839 struct scatterlist *sg; 840 struct sk_buff *trailer; 841 unsigned char *iv; 842 struct aead_request *req; 843 struct macsec_eth_header *hdr; 844 u16 icv_len = secy->icv_len; 845 846 macsec_skb_cb(skb)->valid = false; 847 skb = skb_share_check(skb, GFP_ATOMIC); 848 if (!skb) 849 return ERR_PTR(-ENOMEM); 850 851 ret = skb_cow_data(skb, 0, &trailer); 852 if (unlikely(ret < 0)) { 853 kfree_skb(skb); 854 return ERR_PTR(ret); 855 } 856 req = macsec_alloc_req(rx_sa->key.tfm, &iv, &sg, ret); 857 if (!req) { 858 kfree_skb(skb); 859 return ERR_PTR(-ENOMEM); 860 } 861 862 hdr = (struct macsec_eth_header *)skb->data; 863 macsec_fill_iv(iv, sci, ntohl(hdr->packet_number)); 864 865 sg_init_table(sg, ret); 866 ret = skb_to_sgvec(skb, sg, 0, skb->len); 867 if (unlikely(ret < 0)) { 868 aead_request_free(req); 869 kfree_skb(skb); 870 return ERR_PTR(ret); 871 } 872 873 if (hdr->tci_an & MACSEC_TCI_E) { 874 /* confidentiality: ethernet + macsec header 875 * authenticated, encrypted payload 876 */ 877 int len = skb->len - macsec_hdr_len(macsec_skb_cb(skb)->has_sci); 878 879 aead_request_set_crypt(req, sg, sg, len, iv); 880 aead_request_set_ad(req, macsec_hdr_len(macsec_skb_cb(skb)->has_sci)); 881 skb = skb_unshare(skb, GFP_ATOMIC); 882 if (!skb) { 883 aead_request_free(req); 884 return ERR_PTR(-ENOMEM); 885 } 886 } else { 887 /* integrity only: all headers + data authenticated */ 888 aead_request_set_crypt(req, sg, sg, icv_len, iv); 889 aead_request_set_ad(req, skb->len - icv_len); 890 } 891 892 macsec_skb_cb(skb)->req = req; 893 skb->dev = dev; 894 aead_request_set_callback(req, 0, macsec_decrypt_done, skb); 895 896 dev_hold(dev); 897 ret = crypto_aead_decrypt(req); 898 if (ret == -EINPROGRESS) { 899 return ERR_PTR(ret); 900 } else if (ret != 0) { 901 /* decryption/authentication failed 902 * 10.6 if validateFrames is disabled, deliver anyway 903 */ 904 if (ret != -EBADMSG) { 905 kfree_skb(skb); 906 skb = ERR_PTR(ret); 907 } 908 } else { 909 macsec_skb_cb(skb)->valid = true; 910 } 911 dev_put(dev); 912 913 aead_request_free(req); 914 915 return skb; 916 } 917 918 static struct macsec_rx_sc *find_rx_sc(struct macsec_secy *secy, sci_t sci) 919 { 920 struct macsec_rx_sc *rx_sc; 921 922 for_each_rxsc(secy, rx_sc) { 923 if (rx_sc->sci == sci) 924 return rx_sc; 925 } 926 927 return NULL; 928 } 929 930 static struct macsec_rx_sc *find_rx_sc_rtnl(struct macsec_secy *secy, sci_t sci) 931 { 932 struct macsec_rx_sc *rx_sc; 933 934 for_each_rxsc_rtnl(secy, rx_sc) { 935 if (rx_sc->sci == sci) 936 return rx_sc; 937 } 938 939 return NULL; 940 } 941 942 static enum rx_handler_result handle_not_macsec(struct sk_buff *skb) 943 { 944 /* Deliver to the uncontrolled port by default */ 945 enum rx_handler_result ret = RX_HANDLER_PASS; 946 struct macsec_rxh_data *rxd; 947 struct macsec_dev *macsec; 948 949 rcu_read_lock(); 950 rxd = macsec_data_rcu(skb->dev); 951 952 /* 10.6 If the management control validateFrames is not 953 * Strict, frames without a SecTAG are received, counted, and 954 * delivered to the Controlled Port 955 */ 956 list_for_each_entry_rcu(macsec, &rxd->secys, secys) { 957 struct sk_buff *nskb; 958 struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats); 959 960 if (!macsec_is_offloaded(macsec) && 961 macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) { 962 u64_stats_update_begin(&secy_stats->syncp); 963 secy_stats->stats.InPktsNoTag++; 964 u64_stats_update_end(&secy_stats->syncp); 965 continue; 966 } 967 968 /* deliver on this port */ 969 nskb = skb_clone(skb, GFP_ATOMIC); 970 if (!nskb) 971 break; 972 973 nskb->dev = macsec->secy.netdev; 974 975 if (netif_rx(nskb) == NET_RX_SUCCESS) { 976 u64_stats_update_begin(&secy_stats->syncp); 977 secy_stats->stats.InPktsUntagged++; 978 u64_stats_update_end(&secy_stats->syncp); 979 } 980 981 if (netif_running(macsec->secy.netdev) && 982 macsec_is_offloaded(macsec)) { 983 ret = RX_HANDLER_EXACT; 984 goto out; 985 } 986 } 987 988 out: 989 rcu_read_unlock(); 990 return ret; 991 } 992 993 static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) 994 { 995 struct sk_buff *skb = *pskb; 996 struct net_device *dev = skb->dev; 997 struct macsec_eth_header *hdr; 998 struct macsec_secy *secy = NULL; 999 struct macsec_rx_sc *rx_sc; 1000 struct macsec_rx_sa *rx_sa; 1001 struct macsec_rxh_data *rxd; 1002 struct macsec_dev *macsec; 1003 sci_t sci; 1004 u32 pn; 1005 bool cbit; 1006 struct pcpu_rx_sc_stats *rxsc_stats; 1007 struct pcpu_secy_stats *secy_stats; 1008 bool pulled_sci; 1009 int ret; 1010 1011 if (skb_headroom(skb) < ETH_HLEN) 1012 goto drop_direct; 1013 1014 hdr = macsec_ethhdr(skb); 1015 if (hdr->eth.h_proto != htons(ETH_P_MACSEC)) 1016 return handle_not_macsec(skb); 1017 1018 skb = skb_unshare(skb, GFP_ATOMIC); 1019 *pskb = skb; 1020 if (!skb) 1021 return RX_HANDLER_CONSUMED; 1022 1023 pulled_sci = pskb_may_pull(skb, macsec_extra_len(true)); 1024 if (!pulled_sci) { 1025 if (!pskb_may_pull(skb, macsec_extra_len(false))) 1026 goto drop_direct; 1027 } 1028 1029 hdr = macsec_ethhdr(skb); 1030 1031 /* Frames with a SecTAG that has the TCI E bit set but the C 1032 * bit clear are discarded, as this reserved encoding is used 1033 * to identify frames with a SecTAG that are not to be 1034 * delivered to the Controlled Port. 1035 */ 1036 if ((hdr->tci_an & (MACSEC_TCI_C | MACSEC_TCI_E)) == MACSEC_TCI_E) 1037 return RX_HANDLER_PASS; 1038 1039 /* now, pull the extra length */ 1040 if (hdr->tci_an & MACSEC_TCI_SC) { 1041 if (!pulled_sci) 1042 goto drop_direct; 1043 } 1044 1045 /* ethernet header is part of crypto processing */ 1046 skb_push(skb, ETH_HLEN); 1047 1048 macsec_skb_cb(skb)->has_sci = !!(hdr->tci_an & MACSEC_TCI_SC); 1049 macsec_skb_cb(skb)->assoc_num = hdr->tci_an & MACSEC_AN_MASK; 1050 sci = macsec_frame_sci(hdr, macsec_skb_cb(skb)->has_sci); 1051 1052 rcu_read_lock(); 1053 rxd = macsec_data_rcu(skb->dev); 1054 1055 list_for_each_entry_rcu(macsec, &rxd->secys, secys) { 1056 struct macsec_rx_sc *sc = find_rx_sc(&macsec->secy, sci); 1057 1058 sc = sc ? macsec_rxsc_get(sc) : NULL; 1059 1060 if (sc) { 1061 secy = &macsec->secy; 1062 rx_sc = sc; 1063 break; 1064 } 1065 } 1066 1067 if (!secy) 1068 goto nosci; 1069 1070 dev = secy->netdev; 1071 macsec = macsec_priv(dev); 1072 secy_stats = this_cpu_ptr(macsec->stats); 1073 rxsc_stats = this_cpu_ptr(rx_sc->stats); 1074 1075 if (!macsec_validate_skb(skb, secy->icv_len)) { 1076 u64_stats_update_begin(&secy_stats->syncp); 1077 secy_stats->stats.InPktsBadTag++; 1078 u64_stats_update_end(&secy_stats->syncp); 1079 goto drop_nosa; 1080 } 1081 1082 rx_sa = macsec_rxsa_get(rx_sc->sa[macsec_skb_cb(skb)->assoc_num]); 1083 if (!rx_sa) { 1084 /* 10.6.1 if the SA is not in use */ 1085 1086 /* If validateFrames is Strict or the C bit in the 1087 * SecTAG is set, discard 1088 */ 1089 if (hdr->tci_an & MACSEC_TCI_C || 1090 secy->validate_frames == MACSEC_VALIDATE_STRICT) { 1091 u64_stats_update_begin(&rxsc_stats->syncp); 1092 rxsc_stats->stats.InPktsNotUsingSA++; 1093 u64_stats_update_end(&rxsc_stats->syncp); 1094 goto drop_nosa; 1095 } 1096 1097 /* not Strict, the frame (with the SecTAG and ICV 1098 * removed) is delivered to the Controlled Port. 1099 */ 1100 u64_stats_update_begin(&rxsc_stats->syncp); 1101 rxsc_stats->stats.InPktsUnusedSA++; 1102 u64_stats_update_end(&rxsc_stats->syncp); 1103 goto deliver; 1104 } 1105 1106 /* First, PN check to avoid decrypting obviously wrong packets */ 1107 pn = ntohl(hdr->packet_number); 1108 if (secy->replay_protect) { 1109 bool late; 1110 1111 spin_lock(&rx_sa->lock); 1112 late = rx_sa->next_pn >= secy->replay_window && 1113 pn < (rx_sa->next_pn - secy->replay_window); 1114 spin_unlock(&rx_sa->lock); 1115 1116 if (late) { 1117 u64_stats_update_begin(&rxsc_stats->syncp); 1118 rxsc_stats->stats.InPktsLate++; 1119 u64_stats_update_end(&rxsc_stats->syncp); 1120 goto drop; 1121 } 1122 } 1123 1124 macsec_skb_cb(skb)->rx_sa = rx_sa; 1125 1126 /* Disabled && !changed text => skip validation */ 1127 if (hdr->tci_an & MACSEC_TCI_C || 1128 secy->validate_frames != MACSEC_VALIDATE_DISABLED) 1129 skb = macsec_decrypt(skb, dev, rx_sa, sci, secy); 1130 1131 if (IS_ERR(skb)) { 1132 /* the decrypt callback needs the reference */ 1133 if (PTR_ERR(skb) != -EINPROGRESS) { 1134 macsec_rxsa_put(rx_sa); 1135 macsec_rxsc_put(rx_sc); 1136 } 1137 rcu_read_unlock(); 1138 *pskb = NULL; 1139 return RX_HANDLER_CONSUMED; 1140 } 1141 1142 if (!macsec_post_decrypt(skb, secy, pn)) 1143 goto drop; 1144 1145 deliver: 1146 macsec_finalize_skb(skb, secy->icv_len, 1147 macsec_extra_len(macsec_skb_cb(skb)->has_sci)); 1148 macsec_reset_skb(skb, secy->netdev); 1149 1150 if (rx_sa) 1151 macsec_rxsa_put(rx_sa); 1152 macsec_rxsc_put(rx_sc); 1153 1154 skb_orphan(skb); 1155 ret = gro_cells_receive(&macsec->gro_cells, skb); 1156 if (ret == NET_RX_SUCCESS) 1157 count_rx(dev, skb->len); 1158 else 1159 macsec->secy.netdev->stats.rx_dropped++; 1160 1161 rcu_read_unlock(); 1162 1163 *pskb = NULL; 1164 return RX_HANDLER_CONSUMED; 1165 1166 drop: 1167 macsec_rxsa_put(rx_sa); 1168 drop_nosa: 1169 macsec_rxsc_put(rx_sc); 1170 rcu_read_unlock(); 1171 drop_direct: 1172 kfree_skb(skb); 1173 *pskb = NULL; 1174 return RX_HANDLER_CONSUMED; 1175 1176 nosci: 1177 /* 10.6.1 if the SC is not found */ 1178 cbit = !!(hdr->tci_an & MACSEC_TCI_C); 1179 if (!cbit) 1180 macsec_finalize_skb(skb, DEFAULT_ICV_LEN, 1181 macsec_extra_len(macsec_skb_cb(skb)->has_sci)); 1182 1183 list_for_each_entry_rcu(macsec, &rxd->secys, secys) { 1184 struct sk_buff *nskb; 1185 1186 secy_stats = this_cpu_ptr(macsec->stats); 1187 1188 /* If validateFrames is Strict or the C bit in the 1189 * SecTAG is set, discard 1190 */ 1191 if (cbit || 1192 macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) { 1193 u64_stats_update_begin(&secy_stats->syncp); 1194 secy_stats->stats.InPktsNoSCI++; 1195 u64_stats_update_end(&secy_stats->syncp); 1196 continue; 1197 } 1198 1199 /* not strict, the frame (with the SecTAG and ICV 1200 * removed) is delivered to the Controlled Port. 1201 */ 1202 nskb = skb_clone(skb, GFP_ATOMIC); 1203 if (!nskb) 1204 break; 1205 1206 macsec_reset_skb(nskb, macsec->secy.netdev); 1207 1208 ret = netif_rx(nskb); 1209 if (ret == NET_RX_SUCCESS) { 1210 u64_stats_update_begin(&secy_stats->syncp); 1211 secy_stats->stats.InPktsUnknownSCI++; 1212 u64_stats_update_end(&secy_stats->syncp); 1213 } else { 1214 macsec->secy.netdev->stats.rx_dropped++; 1215 } 1216 } 1217 1218 rcu_read_unlock(); 1219 *pskb = skb; 1220 return RX_HANDLER_PASS; 1221 } 1222 1223 static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len) 1224 { 1225 struct crypto_aead *tfm; 1226 int ret; 1227 1228 tfm = crypto_alloc_aead("gcm(aes)", 0, 0); 1229 1230 if (IS_ERR(tfm)) 1231 return tfm; 1232 1233 ret = crypto_aead_setkey(tfm, key, key_len); 1234 if (ret < 0) 1235 goto fail; 1236 1237 ret = crypto_aead_setauthsize(tfm, icv_len); 1238 if (ret < 0) 1239 goto fail; 1240 1241 return tfm; 1242 fail: 1243 crypto_free_aead(tfm); 1244 return ERR_PTR(ret); 1245 } 1246 1247 static int init_rx_sa(struct macsec_rx_sa *rx_sa, char *sak, int key_len, 1248 int icv_len) 1249 { 1250 rx_sa->stats = alloc_percpu(struct macsec_rx_sa_stats); 1251 if (!rx_sa->stats) 1252 return -ENOMEM; 1253 1254 rx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len); 1255 if (IS_ERR(rx_sa->key.tfm)) { 1256 free_percpu(rx_sa->stats); 1257 return PTR_ERR(rx_sa->key.tfm); 1258 } 1259 1260 rx_sa->active = false; 1261 rx_sa->next_pn = 1; 1262 refcount_set(&rx_sa->refcnt, 1); 1263 spin_lock_init(&rx_sa->lock); 1264 1265 return 0; 1266 } 1267 1268 static void clear_rx_sa(struct macsec_rx_sa *rx_sa) 1269 { 1270 rx_sa->active = false; 1271 1272 macsec_rxsa_put(rx_sa); 1273 } 1274 1275 static void free_rx_sc(struct macsec_rx_sc *rx_sc) 1276 { 1277 int i; 1278 1279 for (i = 0; i < MACSEC_NUM_AN; i++) { 1280 struct macsec_rx_sa *sa = rtnl_dereference(rx_sc->sa[i]); 1281 1282 RCU_INIT_POINTER(rx_sc->sa[i], NULL); 1283 if (sa) 1284 clear_rx_sa(sa); 1285 } 1286 1287 macsec_rxsc_put(rx_sc); 1288 } 1289 1290 static struct macsec_rx_sc *del_rx_sc(struct macsec_secy *secy, sci_t sci) 1291 { 1292 struct macsec_rx_sc *rx_sc, __rcu **rx_scp; 1293 1294 for (rx_scp = &secy->rx_sc, rx_sc = rtnl_dereference(*rx_scp); 1295 rx_sc; 1296 rx_scp = &rx_sc->next, rx_sc = rtnl_dereference(*rx_scp)) { 1297 if (rx_sc->sci == sci) { 1298 if (rx_sc->active) 1299 secy->n_rx_sc--; 1300 rcu_assign_pointer(*rx_scp, rx_sc->next); 1301 return rx_sc; 1302 } 1303 } 1304 1305 return NULL; 1306 } 1307 1308 static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci) 1309 { 1310 struct macsec_rx_sc *rx_sc; 1311 struct macsec_dev *macsec; 1312 struct net_device *real_dev = macsec_priv(dev)->real_dev; 1313 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); 1314 struct macsec_secy *secy; 1315 1316 list_for_each_entry(macsec, &rxd->secys, secys) { 1317 if (find_rx_sc_rtnl(&macsec->secy, sci)) 1318 return ERR_PTR(-EEXIST); 1319 } 1320 1321 rx_sc = kzalloc(sizeof(*rx_sc), GFP_KERNEL); 1322 if (!rx_sc) 1323 return ERR_PTR(-ENOMEM); 1324 1325 rx_sc->stats = netdev_alloc_pcpu_stats(struct pcpu_rx_sc_stats); 1326 if (!rx_sc->stats) { 1327 kfree(rx_sc); 1328 return ERR_PTR(-ENOMEM); 1329 } 1330 1331 rx_sc->sci = sci; 1332 rx_sc->active = true; 1333 refcount_set(&rx_sc->refcnt, 1); 1334 1335 secy = &macsec_priv(dev)->secy; 1336 rcu_assign_pointer(rx_sc->next, secy->rx_sc); 1337 rcu_assign_pointer(secy->rx_sc, rx_sc); 1338 1339 if (rx_sc->active) 1340 secy->n_rx_sc++; 1341 1342 return rx_sc; 1343 } 1344 1345 static int init_tx_sa(struct macsec_tx_sa *tx_sa, char *sak, int key_len, 1346 int icv_len) 1347 { 1348 tx_sa->stats = alloc_percpu(struct macsec_tx_sa_stats); 1349 if (!tx_sa->stats) 1350 return -ENOMEM; 1351 1352 tx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len); 1353 if (IS_ERR(tx_sa->key.tfm)) { 1354 free_percpu(tx_sa->stats); 1355 return PTR_ERR(tx_sa->key.tfm); 1356 } 1357 1358 tx_sa->active = false; 1359 refcount_set(&tx_sa->refcnt, 1); 1360 spin_lock_init(&tx_sa->lock); 1361 1362 return 0; 1363 } 1364 1365 static void clear_tx_sa(struct macsec_tx_sa *tx_sa) 1366 { 1367 tx_sa->active = false; 1368 1369 macsec_txsa_put(tx_sa); 1370 } 1371 1372 static struct genl_family macsec_fam; 1373 1374 static struct net_device *get_dev_from_nl(struct net *net, 1375 struct nlattr **attrs) 1376 { 1377 int ifindex = nla_get_u32(attrs[MACSEC_ATTR_IFINDEX]); 1378 struct net_device *dev; 1379 1380 dev = __dev_get_by_index(net, ifindex); 1381 if (!dev) 1382 return ERR_PTR(-ENODEV); 1383 1384 if (!netif_is_macsec(dev)) 1385 return ERR_PTR(-ENODEV); 1386 1387 return dev; 1388 } 1389 1390 static sci_t nla_get_sci(const struct nlattr *nla) 1391 { 1392 return (__force sci_t)nla_get_u64(nla); 1393 } 1394 1395 static int nla_put_sci(struct sk_buff *skb, int attrtype, sci_t value, 1396 int padattr) 1397 { 1398 return nla_put_u64_64bit(skb, attrtype, (__force u64)value, padattr); 1399 } 1400 1401 static struct macsec_tx_sa *get_txsa_from_nl(struct net *net, 1402 struct nlattr **attrs, 1403 struct nlattr **tb_sa, 1404 struct net_device **devp, 1405 struct macsec_secy **secyp, 1406 struct macsec_tx_sc **scp, 1407 u8 *assoc_num) 1408 { 1409 struct net_device *dev; 1410 struct macsec_secy *secy; 1411 struct macsec_tx_sc *tx_sc; 1412 struct macsec_tx_sa *tx_sa; 1413 1414 if (!tb_sa[MACSEC_SA_ATTR_AN]) 1415 return ERR_PTR(-EINVAL); 1416 1417 *assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1418 1419 dev = get_dev_from_nl(net, attrs); 1420 if (IS_ERR(dev)) 1421 return ERR_CAST(dev); 1422 1423 if (*assoc_num >= MACSEC_NUM_AN) 1424 return ERR_PTR(-EINVAL); 1425 1426 secy = &macsec_priv(dev)->secy; 1427 tx_sc = &secy->tx_sc; 1428 1429 tx_sa = rtnl_dereference(tx_sc->sa[*assoc_num]); 1430 if (!tx_sa) 1431 return ERR_PTR(-ENODEV); 1432 1433 *devp = dev; 1434 *scp = tx_sc; 1435 *secyp = secy; 1436 return tx_sa; 1437 } 1438 1439 static struct macsec_rx_sc *get_rxsc_from_nl(struct net *net, 1440 struct nlattr **attrs, 1441 struct nlattr **tb_rxsc, 1442 struct net_device **devp, 1443 struct macsec_secy **secyp) 1444 { 1445 struct net_device *dev; 1446 struct macsec_secy *secy; 1447 struct macsec_rx_sc *rx_sc; 1448 sci_t sci; 1449 1450 dev = get_dev_from_nl(net, attrs); 1451 if (IS_ERR(dev)) 1452 return ERR_CAST(dev); 1453 1454 secy = &macsec_priv(dev)->secy; 1455 1456 if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI]) 1457 return ERR_PTR(-EINVAL); 1458 1459 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); 1460 rx_sc = find_rx_sc_rtnl(secy, sci); 1461 if (!rx_sc) 1462 return ERR_PTR(-ENODEV); 1463 1464 *secyp = secy; 1465 *devp = dev; 1466 1467 return rx_sc; 1468 } 1469 1470 static struct macsec_rx_sa *get_rxsa_from_nl(struct net *net, 1471 struct nlattr **attrs, 1472 struct nlattr **tb_rxsc, 1473 struct nlattr **tb_sa, 1474 struct net_device **devp, 1475 struct macsec_secy **secyp, 1476 struct macsec_rx_sc **scp, 1477 u8 *assoc_num) 1478 { 1479 struct macsec_rx_sc *rx_sc; 1480 struct macsec_rx_sa *rx_sa; 1481 1482 if (!tb_sa[MACSEC_SA_ATTR_AN]) 1483 return ERR_PTR(-EINVAL); 1484 1485 *assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1486 if (*assoc_num >= MACSEC_NUM_AN) 1487 return ERR_PTR(-EINVAL); 1488 1489 rx_sc = get_rxsc_from_nl(net, attrs, tb_rxsc, devp, secyp); 1490 if (IS_ERR(rx_sc)) 1491 return ERR_CAST(rx_sc); 1492 1493 rx_sa = rtnl_dereference(rx_sc->sa[*assoc_num]); 1494 if (!rx_sa) 1495 return ERR_PTR(-ENODEV); 1496 1497 *scp = rx_sc; 1498 return rx_sa; 1499 } 1500 1501 static const struct nla_policy macsec_genl_policy[NUM_MACSEC_ATTR] = { 1502 [MACSEC_ATTR_IFINDEX] = { .type = NLA_U32 }, 1503 [MACSEC_ATTR_RXSC_CONFIG] = { .type = NLA_NESTED }, 1504 [MACSEC_ATTR_SA_CONFIG] = { .type = NLA_NESTED }, 1505 [MACSEC_ATTR_OFFLOAD] = { .type = NLA_NESTED }, 1506 }; 1507 1508 static const struct nla_policy macsec_genl_rxsc_policy[NUM_MACSEC_RXSC_ATTR] = { 1509 [MACSEC_RXSC_ATTR_SCI] = { .type = NLA_U64 }, 1510 [MACSEC_RXSC_ATTR_ACTIVE] = { .type = NLA_U8 }, 1511 }; 1512 1513 static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = { 1514 [MACSEC_SA_ATTR_AN] = { .type = NLA_U8 }, 1515 [MACSEC_SA_ATTR_ACTIVE] = { .type = NLA_U8 }, 1516 [MACSEC_SA_ATTR_PN] = { .type = NLA_U32 }, 1517 [MACSEC_SA_ATTR_KEYID] = { .type = NLA_BINARY, 1518 .len = MACSEC_KEYID_LEN, }, 1519 [MACSEC_SA_ATTR_KEY] = { .type = NLA_BINARY, 1520 .len = MACSEC_MAX_KEY_LEN, }, 1521 }; 1522 1523 static const struct nla_policy macsec_genl_offload_policy[NUM_MACSEC_OFFLOAD_ATTR] = { 1524 [MACSEC_OFFLOAD_ATTR_TYPE] = { .type = NLA_U8 }, 1525 }; 1526 1527 /* Offloads an operation to a device driver */ 1528 static int macsec_offload(int (* const func)(struct macsec_context *), 1529 struct macsec_context *ctx) 1530 { 1531 int ret; 1532 1533 if (unlikely(!func)) 1534 return 0; 1535 1536 if (ctx->offload == MACSEC_OFFLOAD_PHY) 1537 mutex_lock(&ctx->phydev->lock); 1538 1539 /* Phase I: prepare. The drive should fail here if there are going to be 1540 * issues in the commit phase. 1541 */ 1542 ctx->prepare = true; 1543 ret = (*func)(ctx); 1544 if (ret) 1545 goto phy_unlock; 1546 1547 /* Phase II: commit. This step cannot fail. */ 1548 ctx->prepare = false; 1549 ret = (*func)(ctx); 1550 /* This should never happen: commit is not allowed to fail */ 1551 if (unlikely(ret)) 1552 WARN(1, "MACsec offloading commit failed (%d)\n", ret); 1553 1554 phy_unlock: 1555 if (ctx->offload == MACSEC_OFFLOAD_PHY) 1556 mutex_unlock(&ctx->phydev->lock); 1557 1558 return ret; 1559 } 1560 1561 static int parse_sa_config(struct nlattr **attrs, struct nlattr **tb_sa) 1562 { 1563 if (!attrs[MACSEC_ATTR_SA_CONFIG]) 1564 return -EINVAL; 1565 1566 if (nla_parse_nested_deprecated(tb_sa, MACSEC_SA_ATTR_MAX, attrs[MACSEC_ATTR_SA_CONFIG], macsec_genl_sa_policy, NULL)) 1567 return -EINVAL; 1568 1569 return 0; 1570 } 1571 1572 static int parse_rxsc_config(struct nlattr **attrs, struct nlattr **tb_rxsc) 1573 { 1574 if (!attrs[MACSEC_ATTR_RXSC_CONFIG]) 1575 return -EINVAL; 1576 1577 if (nla_parse_nested_deprecated(tb_rxsc, MACSEC_RXSC_ATTR_MAX, attrs[MACSEC_ATTR_RXSC_CONFIG], macsec_genl_rxsc_policy, NULL)) 1578 return -EINVAL; 1579 1580 return 0; 1581 } 1582 1583 static bool validate_add_rxsa(struct nlattr **attrs) 1584 { 1585 if (!attrs[MACSEC_SA_ATTR_AN] || 1586 !attrs[MACSEC_SA_ATTR_KEY] || 1587 !attrs[MACSEC_SA_ATTR_KEYID]) 1588 return false; 1589 1590 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) 1591 return false; 1592 1593 if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0) 1594 return false; 1595 1596 if (attrs[MACSEC_SA_ATTR_ACTIVE]) { 1597 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) 1598 return false; 1599 } 1600 1601 if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN) 1602 return false; 1603 1604 return true; 1605 } 1606 1607 static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info) 1608 { 1609 struct net_device *dev; 1610 struct nlattr **attrs = info->attrs; 1611 struct macsec_secy *secy; 1612 struct macsec_rx_sc *rx_sc; 1613 struct macsec_rx_sa *rx_sa; 1614 unsigned char assoc_num; 1615 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 1616 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 1617 int err; 1618 1619 if (!attrs[MACSEC_ATTR_IFINDEX]) 1620 return -EINVAL; 1621 1622 if (parse_sa_config(attrs, tb_sa)) 1623 return -EINVAL; 1624 1625 if (parse_rxsc_config(attrs, tb_rxsc)) 1626 return -EINVAL; 1627 1628 if (!validate_add_rxsa(tb_sa)) 1629 return -EINVAL; 1630 1631 rtnl_lock(); 1632 rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy); 1633 if (IS_ERR(rx_sc)) { 1634 rtnl_unlock(); 1635 return PTR_ERR(rx_sc); 1636 } 1637 1638 assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1639 1640 if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) { 1641 pr_notice("macsec: nl: add_rxsa: bad key length: %d != %d\n", 1642 nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len); 1643 rtnl_unlock(); 1644 return -EINVAL; 1645 } 1646 1647 rx_sa = rtnl_dereference(rx_sc->sa[assoc_num]); 1648 if (rx_sa) { 1649 rtnl_unlock(); 1650 return -EBUSY; 1651 } 1652 1653 rx_sa = kmalloc(sizeof(*rx_sa), GFP_KERNEL); 1654 if (!rx_sa) { 1655 rtnl_unlock(); 1656 return -ENOMEM; 1657 } 1658 1659 err = init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 1660 secy->key_len, secy->icv_len); 1661 if (err < 0) { 1662 kfree(rx_sa); 1663 rtnl_unlock(); 1664 return err; 1665 } 1666 1667 if (tb_sa[MACSEC_SA_ATTR_PN]) { 1668 spin_lock_bh(&rx_sa->lock); 1669 rx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]); 1670 spin_unlock_bh(&rx_sa->lock); 1671 } 1672 1673 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 1674 rx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 1675 1676 rx_sa->sc = rx_sc; 1677 1678 /* If h/w offloading is available, propagate to the device */ 1679 if (macsec_is_offloaded(netdev_priv(dev))) { 1680 const struct macsec_ops *ops; 1681 struct macsec_context ctx; 1682 1683 ops = macsec_get_ops(netdev_priv(dev), &ctx); 1684 if (!ops) { 1685 err = -EOPNOTSUPP; 1686 goto cleanup; 1687 } 1688 1689 ctx.sa.assoc_num = assoc_num; 1690 ctx.sa.rx_sa = rx_sa; 1691 memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 1692 MACSEC_KEYID_LEN); 1693 1694 err = macsec_offload(ops->mdo_add_rxsa, &ctx); 1695 if (err) 1696 goto cleanup; 1697 } 1698 1699 nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN); 1700 rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa); 1701 1702 rtnl_unlock(); 1703 1704 return 0; 1705 1706 cleanup: 1707 kfree(rx_sa); 1708 rtnl_unlock(); 1709 return err; 1710 } 1711 1712 static bool validate_add_rxsc(struct nlattr **attrs) 1713 { 1714 if (!attrs[MACSEC_RXSC_ATTR_SCI]) 1715 return false; 1716 1717 if (attrs[MACSEC_RXSC_ATTR_ACTIVE]) { 1718 if (nla_get_u8(attrs[MACSEC_RXSC_ATTR_ACTIVE]) > 1) 1719 return false; 1720 } 1721 1722 return true; 1723 } 1724 1725 static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info) 1726 { 1727 struct net_device *dev; 1728 sci_t sci = MACSEC_UNDEF_SCI; 1729 struct nlattr **attrs = info->attrs; 1730 struct macsec_rx_sc *rx_sc; 1731 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 1732 bool was_active; 1733 int ret; 1734 1735 if (!attrs[MACSEC_ATTR_IFINDEX]) 1736 return -EINVAL; 1737 1738 if (parse_rxsc_config(attrs, tb_rxsc)) 1739 return -EINVAL; 1740 1741 if (!validate_add_rxsc(tb_rxsc)) 1742 return -EINVAL; 1743 1744 rtnl_lock(); 1745 dev = get_dev_from_nl(genl_info_net(info), attrs); 1746 if (IS_ERR(dev)) { 1747 rtnl_unlock(); 1748 return PTR_ERR(dev); 1749 } 1750 1751 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); 1752 1753 rx_sc = create_rx_sc(dev, sci); 1754 if (IS_ERR(rx_sc)) { 1755 rtnl_unlock(); 1756 return PTR_ERR(rx_sc); 1757 } 1758 1759 was_active = rx_sc->active; 1760 if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) 1761 rx_sc->active = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]); 1762 1763 if (macsec_is_offloaded(netdev_priv(dev))) { 1764 const struct macsec_ops *ops; 1765 struct macsec_context ctx; 1766 1767 ops = macsec_get_ops(netdev_priv(dev), &ctx); 1768 if (!ops) { 1769 ret = -EOPNOTSUPP; 1770 goto cleanup; 1771 } 1772 1773 ctx.rx_sc = rx_sc; 1774 1775 ret = macsec_offload(ops->mdo_add_rxsc, &ctx); 1776 if (ret) 1777 goto cleanup; 1778 } 1779 1780 rtnl_unlock(); 1781 1782 return 0; 1783 1784 cleanup: 1785 rx_sc->active = was_active; 1786 rtnl_unlock(); 1787 return ret; 1788 } 1789 1790 static bool validate_add_txsa(struct nlattr **attrs) 1791 { 1792 if (!attrs[MACSEC_SA_ATTR_AN] || 1793 !attrs[MACSEC_SA_ATTR_PN] || 1794 !attrs[MACSEC_SA_ATTR_KEY] || 1795 !attrs[MACSEC_SA_ATTR_KEYID]) 1796 return false; 1797 1798 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) 1799 return false; 1800 1801 if (nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0) 1802 return false; 1803 1804 if (attrs[MACSEC_SA_ATTR_ACTIVE]) { 1805 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) 1806 return false; 1807 } 1808 1809 if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN) 1810 return false; 1811 1812 return true; 1813 } 1814 1815 static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info) 1816 { 1817 struct net_device *dev; 1818 struct nlattr **attrs = info->attrs; 1819 struct macsec_secy *secy; 1820 struct macsec_tx_sc *tx_sc; 1821 struct macsec_tx_sa *tx_sa; 1822 unsigned char assoc_num; 1823 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 1824 bool was_operational; 1825 int err; 1826 1827 if (!attrs[MACSEC_ATTR_IFINDEX]) 1828 return -EINVAL; 1829 1830 if (parse_sa_config(attrs, tb_sa)) 1831 return -EINVAL; 1832 1833 if (!validate_add_txsa(tb_sa)) 1834 return -EINVAL; 1835 1836 rtnl_lock(); 1837 dev = get_dev_from_nl(genl_info_net(info), attrs); 1838 if (IS_ERR(dev)) { 1839 rtnl_unlock(); 1840 return PTR_ERR(dev); 1841 } 1842 1843 secy = &macsec_priv(dev)->secy; 1844 tx_sc = &secy->tx_sc; 1845 1846 assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1847 1848 if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) { 1849 pr_notice("macsec: nl: add_txsa: bad key length: %d != %d\n", 1850 nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len); 1851 rtnl_unlock(); 1852 return -EINVAL; 1853 } 1854 1855 tx_sa = rtnl_dereference(tx_sc->sa[assoc_num]); 1856 if (tx_sa) { 1857 rtnl_unlock(); 1858 return -EBUSY; 1859 } 1860 1861 tx_sa = kmalloc(sizeof(*tx_sa), GFP_KERNEL); 1862 if (!tx_sa) { 1863 rtnl_unlock(); 1864 return -ENOMEM; 1865 } 1866 1867 err = init_tx_sa(tx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 1868 secy->key_len, secy->icv_len); 1869 if (err < 0) { 1870 kfree(tx_sa); 1871 rtnl_unlock(); 1872 return err; 1873 } 1874 1875 spin_lock_bh(&tx_sa->lock); 1876 tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]); 1877 spin_unlock_bh(&tx_sa->lock); 1878 1879 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 1880 tx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 1881 1882 was_operational = secy->operational; 1883 if (assoc_num == tx_sc->encoding_sa && tx_sa->active) 1884 secy->operational = true; 1885 1886 /* If h/w offloading is available, propagate to the device */ 1887 if (macsec_is_offloaded(netdev_priv(dev))) { 1888 const struct macsec_ops *ops; 1889 struct macsec_context ctx; 1890 1891 ops = macsec_get_ops(netdev_priv(dev), &ctx); 1892 if (!ops) { 1893 err = -EOPNOTSUPP; 1894 goto cleanup; 1895 } 1896 1897 ctx.sa.assoc_num = assoc_num; 1898 ctx.sa.tx_sa = tx_sa; 1899 memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 1900 MACSEC_KEYID_LEN); 1901 1902 err = macsec_offload(ops->mdo_add_txsa, &ctx); 1903 if (err) 1904 goto cleanup; 1905 } 1906 1907 nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN); 1908 rcu_assign_pointer(tx_sc->sa[assoc_num], tx_sa); 1909 1910 rtnl_unlock(); 1911 1912 return 0; 1913 1914 cleanup: 1915 secy->operational = was_operational; 1916 kfree(tx_sa); 1917 rtnl_unlock(); 1918 return err; 1919 } 1920 1921 static int macsec_del_rxsa(struct sk_buff *skb, struct genl_info *info) 1922 { 1923 struct nlattr **attrs = info->attrs; 1924 struct net_device *dev; 1925 struct macsec_secy *secy; 1926 struct macsec_rx_sc *rx_sc; 1927 struct macsec_rx_sa *rx_sa; 1928 u8 assoc_num; 1929 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 1930 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 1931 int ret; 1932 1933 if (!attrs[MACSEC_ATTR_IFINDEX]) 1934 return -EINVAL; 1935 1936 if (parse_sa_config(attrs, tb_sa)) 1937 return -EINVAL; 1938 1939 if (parse_rxsc_config(attrs, tb_rxsc)) 1940 return -EINVAL; 1941 1942 rtnl_lock(); 1943 rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa, 1944 &dev, &secy, &rx_sc, &assoc_num); 1945 if (IS_ERR(rx_sa)) { 1946 rtnl_unlock(); 1947 return PTR_ERR(rx_sa); 1948 } 1949 1950 if (rx_sa->active) { 1951 rtnl_unlock(); 1952 return -EBUSY; 1953 } 1954 1955 /* If h/w offloading is available, propagate to the device */ 1956 if (macsec_is_offloaded(netdev_priv(dev))) { 1957 const struct macsec_ops *ops; 1958 struct macsec_context ctx; 1959 1960 ops = macsec_get_ops(netdev_priv(dev), &ctx); 1961 if (!ops) { 1962 ret = -EOPNOTSUPP; 1963 goto cleanup; 1964 } 1965 1966 ctx.sa.assoc_num = assoc_num; 1967 ctx.sa.rx_sa = rx_sa; 1968 1969 ret = macsec_offload(ops->mdo_del_rxsa, &ctx); 1970 if (ret) 1971 goto cleanup; 1972 } 1973 1974 RCU_INIT_POINTER(rx_sc->sa[assoc_num], NULL); 1975 clear_rx_sa(rx_sa); 1976 1977 rtnl_unlock(); 1978 1979 return 0; 1980 1981 cleanup: 1982 rtnl_unlock(); 1983 return ret; 1984 } 1985 1986 static int macsec_del_rxsc(struct sk_buff *skb, struct genl_info *info) 1987 { 1988 struct nlattr **attrs = info->attrs; 1989 struct net_device *dev; 1990 struct macsec_secy *secy; 1991 struct macsec_rx_sc *rx_sc; 1992 sci_t sci; 1993 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 1994 int ret; 1995 1996 if (!attrs[MACSEC_ATTR_IFINDEX]) 1997 return -EINVAL; 1998 1999 if (parse_rxsc_config(attrs, tb_rxsc)) 2000 return -EINVAL; 2001 2002 if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI]) 2003 return -EINVAL; 2004 2005 rtnl_lock(); 2006 dev = get_dev_from_nl(genl_info_net(info), info->attrs); 2007 if (IS_ERR(dev)) { 2008 rtnl_unlock(); 2009 return PTR_ERR(dev); 2010 } 2011 2012 secy = &macsec_priv(dev)->secy; 2013 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); 2014 2015 rx_sc = del_rx_sc(secy, sci); 2016 if (!rx_sc) { 2017 rtnl_unlock(); 2018 return -ENODEV; 2019 } 2020 2021 /* If h/w offloading is available, propagate to the device */ 2022 if (macsec_is_offloaded(netdev_priv(dev))) { 2023 const struct macsec_ops *ops; 2024 struct macsec_context ctx; 2025 2026 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2027 if (!ops) { 2028 ret = -EOPNOTSUPP; 2029 goto cleanup; 2030 } 2031 2032 ctx.rx_sc = rx_sc; 2033 ret = macsec_offload(ops->mdo_del_rxsc, &ctx); 2034 if (ret) 2035 goto cleanup; 2036 } 2037 2038 free_rx_sc(rx_sc); 2039 rtnl_unlock(); 2040 2041 return 0; 2042 2043 cleanup: 2044 rtnl_unlock(); 2045 return ret; 2046 } 2047 2048 static int macsec_del_txsa(struct sk_buff *skb, struct genl_info *info) 2049 { 2050 struct nlattr **attrs = info->attrs; 2051 struct net_device *dev; 2052 struct macsec_secy *secy; 2053 struct macsec_tx_sc *tx_sc; 2054 struct macsec_tx_sa *tx_sa; 2055 u8 assoc_num; 2056 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 2057 int ret; 2058 2059 if (!attrs[MACSEC_ATTR_IFINDEX]) 2060 return -EINVAL; 2061 2062 if (parse_sa_config(attrs, tb_sa)) 2063 return -EINVAL; 2064 2065 rtnl_lock(); 2066 tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa, 2067 &dev, &secy, &tx_sc, &assoc_num); 2068 if (IS_ERR(tx_sa)) { 2069 rtnl_unlock(); 2070 return PTR_ERR(tx_sa); 2071 } 2072 2073 if (tx_sa->active) { 2074 rtnl_unlock(); 2075 return -EBUSY; 2076 } 2077 2078 /* If h/w offloading is available, propagate to the device */ 2079 if (macsec_is_offloaded(netdev_priv(dev))) { 2080 const struct macsec_ops *ops; 2081 struct macsec_context ctx; 2082 2083 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2084 if (!ops) { 2085 ret = -EOPNOTSUPP; 2086 goto cleanup; 2087 } 2088 2089 ctx.sa.assoc_num = assoc_num; 2090 ctx.sa.tx_sa = tx_sa; 2091 2092 ret = macsec_offload(ops->mdo_del_txsa, &ctx); 2093 if (ret) 2094 goto cleanup; 2095 } 2096 2097 RCU_INIT_POINTER(tx_sc->sa[assoc_num], NULL); 2098 clear_tx_sa(tx_sa); 2099 2100 rtnl_unlock(); 2101 2102 return 0; 2103 2104 cleanup: 2105 rtnl_unlock(); 2106 return ret; 2107 } 2108 2109 static bool validate_upd_sa(struct nlattr **attrs) 2110 { 2111 if (!attrs[MACSEC_SA_ATTR_AN] || 2112 attrs[MACSEC_SA_ATTR_KEY] || 2113 attrs[MACSEC_SA_ATTR_KEYID]) 2114 return false; 2115 2116 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) 2117 return false; 2118 2119 if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0) 2120 return false; 2121 2122 if (attrs[MACSEC_SA_ATTR_ACTIVE]) { 2123 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) 2124 return false; 2125 } 2126 2127 return true; 2128 } 2129 2130 static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info) 2131 { 2132 struct nlattr **attrs = info->attrs; 2133 struct net_device *dev; 2134 struct macsec_secy *secy; 2135 struct macsec_tx_sc *tx_sc; 2136 struct macsec_tx_sa *tx_sa; 2137 u8 assoc_num; 2138 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 2139 bool was_operational, was_active; 2140 u32 prev_pn = 0; 2141 int ret = 0; 2142 2143 if (!attrs[MACSEC_ATTR_IFINDEX]) 2144 return -EINVAL; 2145 2146 if (parse_sa_config(attrs, tb_sa)) 2147 return -EINVAL; 2148 2149 if (!validate_upd_sa(tb_sa)) 2150 return -EINVAL; 2151 2152 rtnl_lock(); 2153 tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa, 2154 &dev, &secy, &tx_sc, &assoc_num); 2155 if (IS_ERR(tx_sa)) { 2156 rtnl_unlock(); 2157 return PTR_ERR(tx_sa); 2158 } 2159 2160 if (tb_sa[MACSEC_SA_ATTR_PN]) { 2161 spin_lock_bh(&tx_sa->lock); 2162 prev_pn = tx_sa->next_pn; 2163 tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]); 2164 spin_unlock_bh(&tx_sa->lock); 2165 } 2166 2167 was_active = tx_sa->active; 2168 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 2169 tx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 2170 2171 was_operational = secy->operational; 2172 if (assoc_num == tx_sc->encoding_sa) 2173 secy->operational = tx_sa->active; 2174 2175 /* If h/w offloading is available, propagate to the device */ 2176 if (macsec_is_offloaded(netdev_priv(dev))) { 2177 const struct macsec_ops *ops; 2178 struct macsec_context ctx; 2179 2180 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2181 if (!ops) { 2182 ret = -EOPNOTSUPP; 2183 goto cleanup; 2184 } 2185 2186 ctx.sa.assoc_num = assoc_num; 2187 ctx.sa.tx_sa = tx_sa; 2188 2189 ret = macsec_offload(ops->mdo_upd_txsa, &ctx); 2190 if (ret) 2191 goto cleanup; 2192 } 2193 2194 rtnl_unlock(); 2195 2196 return 0; 2197 2198 cleanup: 2199 if (tb_sa[MACSEC_SA_ATTR_PN]) { 2200 spin_lock_bh(&tx_sa->lock); 2201 tx_sa->next_pn = prev_pn; 2202 spin_unlock_bh(&tx_sa->lock); 2203 } 2204 tx_sa->active = was_active; 2205 secy->operational = was_operational; 2206 rtnl_unlock(); 2207 return ret; 2208 } 2209 2210 static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info) 2211 { 2212 struct nlattr **attrs = info->attrs; 2213 struct net_device *dev; 2214 struct macsec_secy *secy; 2215 struct macsec_rx_sc *rx_sc; 2216 struct macsec_rx_sa *rx_sa; 2217 u8 assoc_num; 2218 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 2219 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 2220 bool was_active; 2221 u32 prev_pn = 0; 2222 int ret = 0; 2223 2224 if (!attrs[MACSEC_ATTR_IFINDEX]) 2225 return -EINVAL; 2226 2227 if (parse_rxsc_config(attrs, tb_rxsc)) 2228 return -EINVAL; 2229 2230 if (parse_sa_config(attrs, tb_sa)) 2231 return -EINVAL; 2232 2233 if (!validate_upd_sa(tb_sa)) 2234 return -EINVAL; 2235 2236 rtnl_lock(); 2237 rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa, 2238 &dev, &secy, &rx_sc, &assoc_num); 2239 if (IS_ERR(rx_sa)) { 2240 rtnl_unlock(); 2241 return PTR_ERR(rx_sa); 2242 } 2243 2244 if (tb_sa[MACSEC_SA_ATTR_PN]) { 2245 spin_lock_bh(&rx_sa->lock); 2246 prev_pn = rx_sa->next_pn; 2247 rx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]); 2248 spin_unlock_bh(&rx_sa->lock); 2249 } 2250 2251 was_active = rx_sa->active; 2252 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 2253 rx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 2254 2255 /* If h/w offloading is available, propagate to the device */ 2256 if (macsec_is_offloaded(netdev_priv(dev))) { 2257 const struct macsec_ops *ops; 2258 struct macsec_context ctx; 2259 2260 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2261 if (!ops) { 2262 ret = -EOPNOTSUPP; 2263 goto cleanup; 2264 } 2265 2266 ctx.sa.assoc_num = assoc_num; 2267 ctx.sa.rx_sa = rx_sa; 2268 2269 ret = macsec_offload(ops->mdo_upd_rxsa, &ctx); 2270 if (ret) 2271 goto cleanup; 2272 } 2273 2274 rtnl_unlock(); 2275 return 0; 2276 2277 cleanup: 2278 if (tb_sa[MACSEC_SA_ATTR_PN]) { 2279 spin_lock_bh(&rx_sa->lock); 2280 rx_sa->next_pn = prev_pn; 2281 spin_unlock_bh(&rx_sa->lock); 2282 } 2283 rx_sa->active = was_active; 2284 rtnl_unlock(); 2285 return ret; 2286 } 2287 2288 static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info) 2289 { 2290 struct nlattr **attrs = info->attrs; 2291 struct net_device *dev; 2292 struct macsec_secy *secy; 2293 struct macsec_rx_sc *rx_sc; 2294 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 2295 unsigned int prev_n_rx_sc; 2296 bool was_active; 2297 int ret; 2298 2299 if (!attrs[MACSEC_ATTR_IFINDEX]) 2300 return -EINVAL; 2301 2302 if (parse_rxsc_config(attrs, tb_rxsc)) 2303 return -EINVAL; 2304 2305 if (!validate_add_rxsc(tb_rxsc)) 2306 return -EINVAL; 2307 2308 rtnl_lock(); 2309 rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy); 2310 if (IS_ERR(rx_sc)) { 2311 rtnl_unlock(); 2312 return PTR_ERR(rx_sc); 2313 } 2314 2315 was_active = rx_sc->active; 2316 prev_n_rx_sc = secy->n_rx_sc; 2317 if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) { 2318 bool new = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]); 2319 2320 if (rx_sc->active != new) 2321 secy->n_rx_sc += new ? 1 : -1; 2322 2323 rx_sc->active = new; 2324 } 2325 2326 /* If h/w offloading is available, propagate to the device */ 2327 if (macsec_is_offloaded(netdev_priv(dev))) { 2328 const struct macsec_ops *ops; 2329 struct macsec_context ctx; 2330 2331 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2332 if (!ops) { 2333 ret = -EOPNOTSUPP; 2334 goto cleanup; 2335 } 2336 2337 ctx.rx_sc = rx_sc; 2338 2339 ret = macsec_offload(ops->mdo_upd_rxsc, &ctx); 2340 if (ret) 2341 goto cleanup; 2342 } 2343 2344 rtnl_unlock(); 2345 2346 return 0; 2347 2348 cleanup: 2349 secy->n_rx_sc = prev_n_rx_sc; 2350 rx_sc->active = was_active; 2351 rtnl_unlock(); 2352 return ret; 2353 } 2354 2355 static bool macsec_is_configured(struct macsec_dev *macsec) 2356 { 2357 struct macsec_secy *secy = &macsec->secy; 2358 struct macsec_tx_sc *tx_sc = &secy->tx_sc; 2359 int i; 2360 2361 if (secy->n_rx_sc > 0) 2362 return true; 2363 2364 for (i = 0; i < MACSEC_NUM_AN; i++) 2365 if (tx_sc->sa[i]) 2366 return true; 2367 2368 return false; 2369 } 2370 2371 static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info) 2372 { 2373 struct nlattr *tb_offload[MACSEC_OFFLOAD_ATTR_MAX + 1]; 2374 enum macsec_offload offload, prev_offload; 2375 int (*func)(struct macsec_context *ctx); 2376 struct nlattr **attrs = info->attrs; 2377 struct net_device *dev, *loop_dev; 2378 const struct macsec_ops *ops; 2379 struct macsec_context ctx; 2380 struct macsec_dev *macsec; 2381 struct net *loop_net; 2382 int ret; 2383 2384 if (!attrs[MACSEC_ATTR_IFINDEX]) 2385 return -EINVAL; 2386 2387 if (!attrs[MACSEC_ATTR_OFFLOAD]) 2388 return -EINVAL; 2389 2390 if (nla_parse_nested_deprecated(tb_offload, MACSEC_OFFLOAD_ATTR_MAX, 2391 attrs[MACSEC_ATTR_OFFLOAD], 2392 macsec_genl_offload_policy, NULL)) 2393 return -EINVAL; 2394 2395 dev = get_dev_from_nl(genl_info_net(info), attrs); 2396 if (IS_ERR(dev)) 2397 return PTR_ERR(dev); 2398 macsec = macsec_priv(dev); 2399 2400 offload = nla_get_u8(tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]); 2401 if (macsec->offload == offload) 2402 return 0; 2403 2404 /* Check if the offloading mode is supported by the underlying layers */ 2405 if (offload != MACSEC_OFFLOAD_OFF && 2406 !macsec_check_offload(offload, macsec)) 2407 return -EOPNOTSUPP; 2408 2409 if (offload == MACSEC_OFFLOAD_OFF) 2410 goto skip_limitation; 2411 2412 /* Check the physical interface isn't offloading another interface 2413 * first. 2414 */ 2415 for_each_net(loop_net) { 2416 for_each_netdev(loop_net, loop_dev) { 2417 struct macsec_dev *priv; 2418 2419 if (!netif_is_macsec(loop_dev)) 2420 continue; 2421 2422 priv = macsec_priv(loop_dev); 2423 2424 if (priv->real_dev == macsec->real_dev && 2425 priv->offload != MACSEC_OFFLOAD_OFF) 2426 return -EBUSY; 2427 } 2428 } 2429 2430 skip_limitation: 2431 /* Check if the net device is busy. */ 2432 if (netif_running(dev)) 2433 return -EBUSY; 2434 2435 rtnl_lock(); 2436 2437 prev_offload = macsec->offload; 2438 macsec->offload = offload; 2439 2440 /* Check if the device already has rules configured: we do not support 2441 * rules migration. 2442 */ 2443 if (macsec_is_configured(macsec)) { 2444 ret = -EBUSY; 2445 goto rollback; 2446 } 2447 2448 ops = __macsec_get_ops(offload == MACSEC_OFFLOAD_OFF ? prev_offload : offload, 2449 macsec, &ctx); 2450 if (!ops) { 2451 ret = -EOPNOTSUPP; 2452 goto rollback; 2453 } 2454 2455 if (prev_offload == MACSEC_OFFLOAD_OFF) 2456 func = ops->mdo_add_secy; 2457 else 2458 func = ops->mdo_del_secy; 2459 2460 ctx.secy = &macsec->secy; 2461 ret = macsec_offload(func, &ctx); 2462 if (ret) 2463 goto rollback; 2464 2465 rtnl_unlock(); 2466 return 0; 2467 2468 rollback: 2469 macsec->offload = prev_offload; 2470 2471 rtnl_unlock(); 2472 return ret; 2473 } 2474 2475 static int copy_tx_sa_stats(struct sk_buff *skb, 2476 struct macsec_tx_sa_stats __percpu *pstats) 2477 { 2478 struct macsec_tx_sa_stats sum = {0, }; 2479 int cpu; 2480 2481 for_each_possible_cpu(cpu) { 2482 const struct macsec_tx_sa_stats *stats = per_cpu_ptr(pstats, cpu); 2483 2484 sum.OutPktsProtected += stats->OutPktsProtected; 2485 sum.OutPktsEncrypted += stats->OutPktsEncrypted; 2486 } 2487 2488 if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED, sum.OutPktsProtected) || 2489 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED, sum.OutPktsEncrypted)) 2490 return -EMSGSIZE; 2491 2492 return 0; 2493 } 2494 2495 static noinline_for_stack int 2496 copy_rx_sa_stats(struct sk_buff *skb, 2497 struct macsec_rx_sa_stats __percpu *pstats) 2498 { 2499 struct macsec_rx_sa_stats sum = {0, }; 2500 int cpu; 2501 2502 for_each_possible_cpu(cpu) { 2503 const struct macsec_rx_sa_stats *stats = per_cpu_ptr(pstats, cpu); 2504 2505 sum.InPktsOK += stats->InPktsOK; 2506 sum.InPktsInvalid += stats->InPktsInvalid; 2507 sum.InPktsNotValid += stats->InPktsNotValid; 2508 sum.InPktsNotUsingSA += stats->InPktsNotUsingSA; 2509 sum.InPktsUnusedSA += stats->InPktsUnusedSA; 2510 } 2511 2512 if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_OK, sum.InPktsOK) || 2513 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID, sum.InPktsInvalid) || 2514 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID, sum.InPktsNotValid) || 2515 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA, sum.InPktsNotUsingSA) || 2516 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA, sum.InPktsUnusedSA)) 2517 return -EMSGSIZE; 2518 2519 return 0; 2520 } 2521 2522 static noinline_for_stack int 2523 copy_rx_sc_stats(struct sk_buff *skb, struct pcpu_rx_sc_stats __percpu *pstats) 2524 { 2525 struct macsec_rx_sc_stats sum = {0, }; 2526 int cpu; 2527 2528 for_each_possible_cpu(cpu) { 2529 const struct pcpu_rx_sc_stats *stats; 2530 struct macsec_rx_sc_stats tmp; 2531 unsigned int start; 2532 2533 stats = per_cpu_ptr(pstats, cpu); 2534 do { 2535 start = u64_stats_fetch_begin_irq(&stats->syncp); 2536 memcpy(&tmp, &stats->stats, sizeof(tmp)); 2537 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 2538 2539 sum.InOctetsValidated += tmp.InOctetsValidated; 2540 sum.InOctetsDecrypted += tmp.InOctetsDecrypted; 2541 sum.InPktsUnchecked += tmp.InPktsUnchecked; 2542 sum.InPktsDelayed += tmp.InPktsDelayed; 2543 sum.InPktsOK += tmp.InPktsOK; 2544 sum.InPktsInvalid += tmp.InPktsInvalid; 2545 sum.InPktsLate += tmp.InPktsLate; 2546 sum.InPktsNotValid += tmp.InPktsNotValid; 2547 sum.InPktsNotUsingSA += tmp.InPktsNotUsingSA; 2548 sum.InPktsUnusedSA += tmp.InPktsUnusedSA; 2549 } 2550 2551 if (nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED, 2552 sum.InOctetsValidated, 2553 MACSEC_RXSC_STATS_ATTR_PAD) || 2554 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED, 2555 sum.InOctetsDecrypted, 2556 MACSEC_RXSC_STATS_ATTR_PAD) || 2557 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED, 2558 sum.InPktsUnchecked, 2559 MACSEC_RXSC_STATS_ATTR_PAD) || 2560 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED, 2561 sum.InPktsDelayed, 2562 MACSEC_RXSC_STATS_ATTR_PAD) || 2563 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK, 2564 sum.InPktsOK, 2565 MACSEC_RXSC_STATS_ATTR_PAD) || 2566 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID, 2567 sum.InPktsInvalid, 2568 MACSEC_RXSC_STATS_ATTR_PAD) || 2569 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE, 2570 sum.InPktsLate, 2571 MACSEC_RXSC_STATS_ATTR_PAD) || 2572 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID, 2573 sum.InPktsNotValid, 2574 MACSEC_RXSC_STATS_ATTR_PAD) || 2575 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA, 2576 sum.InPktsNotUsingSA, 2577 MACSEC_RXSC_STATS_ATTR_PAD) || 2578 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA, 2579 sum.InPktsUnusedSA, 2580 MACSEC_RXSC_STATS_ATTR_PAD)) 2581 return -EMSGSIZE; 2582 2583 return 0; 2584 } 2585 2586 static noinline_for_stack int 2587 copy_tx_sc_stats(struct sk_buff *skb, struct pcpu_tx_sc_stats __percpu *pstats) 2588 { 2589 struct macsec_tx_sc_stats sum = {0, }; 2590 int cpu; 2591 2592 for_each_possible_cpu(cpu) { 2593 const struct pcpu_tx_sc_stats *stats; 2594 struct macsec_tx_sc_stats tmp; 2595 unsigned int start; 2596 2597 stats = per_cpu_ptr(pstats, cpu); 2598 do { 2599 start = u64_stats_fetch_begin_irq(&stats->syncp); 2600 memcpy(&tmp, &stats->stats, sizeof(tmp)); 2601 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 2602 2603 sum.OutPktsProtected += tmp.OutPktsProtected; 2604 sum.OutPktsEncrypted += tmp.OutPktsEncrypted; 2605 sum.OutOctetsProtected += tmp.OutOctetsProtected; 2606 sum.OutOctetsEncrypted += tmp.OutOctetsEncrypted; 2607 } 2608 2609 if (nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED, 2610 sum.OutPktsProtected, 2611 MACSEC_TXSC_STATS_ATTR_PAD) || 2612 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED, 2613 sum.OutPktsEncrypted, 2614 MACSEC_TXSC_STATS_ATTR_PAD) || 2615 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED, 2616 sum.OutOctetsProtected, 2617 MACSEC_TXSC_STATS_ATTR_PAD) || 2618 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED, 2619 sum.OutOctetsEncrypted, 2620 MACSEC_TXSC_STATS_ATTR_PAD)) 2621 return -EMSGSIZE; 2622 2623 return 0; 2624 } 2625 2626 static noinline_for_stack int 2627 copy_secy_stats(struct sk_buff *skb, struct pcpu_secy_stats __percpu *pstats) 2628 { 2629 struct macsec_dev_stats sum = {0, }; 2630 int cpu; 2631 2632 for_each_possible_cpu(cpu) { 2633 const struct pcpu_secy_stats *stats; 2634 struct macsec_dev_stats tmp; 2635 unsigned int start; 2636 2637 stats = per_cpu_ptr(pstats, cpu); 2638 do { 2639 start = u64_stats_fetch_begin_irq(&stats->syncp); 2640 memcpy(&tmp, &stats->stats, sizeof(tmp)); 2641 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 2642 2643 sum.OutPktsUntagged += tmp.OutPktsUntagged; 2644 sum.InPktsUntagged += tmp.InPktsUntagged; 2645 sum.OutPktsTooLong += tmp.OutPktsTooLong; 2646 sum.InPktsNoTag += tmp.InPktsNoTag; 2647 sum.InPktsBadTag += tmp.InPktsBadTag; 2648 sum.InPktsUnknownSCI += tmp.InPktsUnknownSCI; 2649 sum.InPktsNoSCI += tmp.InPktsNoSCI; 2650 sum.InPktsOverrun += tmp.InPktsOverrun; 2651 } 2652 2653 if (nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED, 2654 sum.OutPktsUntagged, 2655 MACSEC_SECY_STATS_ATTR_PAD) || 2656 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED, 2657 sum.InPktsUntagged, 2658 MACSEC_SECY_STATS_ATTR_PAD) || 2659 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG, 2660 sum.OutPktsTooLong, 2661 MACSEC_SECY_STATS_ATTR_PAD) || 2662 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG, 2663 sum.InPktsNoTag, 2664 MACSEC_SECY_STATS_ATTR_PAD) || 2665 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG, 2666 sum.InPktsBadTag, 2667 MACSEC_SECY_STATS_ATTR_PAD) || 2668 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI, 2669 sum.InPktsUnknownSCI, 2670 MACSEC_SECY_STATS_ATTR_PAD) || 2671 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI, 2672 sum.InPktsNoSCI, 2673 MACSEC_SECY_STATS_ATTR_PAD) || 2674 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN, 2675 sum.InPktsOverrun, 2676 MACSEC_SECY_STATS_ATTR_PAD)) 2677 return -EMSGSIZE; 2678 2679 return 0; 2680 } 2681 2682 static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb) 2683 { 2684 struct macsec_tx_sc *tx_sc = &secy->tx_sc; 2685 struct nlattr *secy_nest = nla_nest_start_noflag(skb, 2686 MACSEC_ATTR_SECY); 2687 u64 csid; 2688 2689 if (!secy_nest) 2690 return 1; 2691 2692 switch (secy->key_len) { 2693 case MACSEC_GCM_AES_128_SAK_LEN: 2694 csid = MACSEC_DEFAULT_CIPHER_ID; 2695 break; 2696 case MACSEC_GCM_AES_256_SAK_LEN: 2697 csid = MACSEC_CIPHER_ID_GCM_AES_256; 2698 break; 2699 default: 2700 goto cancel; 2701 } 2702 2703 if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci, 2704 MACSEC_SECY_ATTR_PAD) || 2705 nla_put_u64_64bit(skb, MACSEC_SECY_ATTR_CIPHER_SUITE, 2706 csid, MACSEC_SECY_ATTR_PAD) || 2707 nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) || 2708 nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) || 2709 nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) || 2710 nla_put_u8(skb, MACSEC_SECY_ATTR_REPLAY, secy->replay_protect) || 2711 nla_put_u8(skb, MACSEC_SECY_ATTR_VALIDATE, secy->validate_frames) || 2712 nla_put_u8(skb, MACSEC_SECY_ATTR_ENCRYPT, tx_sc->encrypt) || 2713 nla_put_u8(skb, MACSEC_SECY_ATTR_INC_SCI, tx_sc->send_sci) || 2714 nla_put_u8(skb, MACSEC_SECY_ATTR_ES, tx_sc->end_station) || 2715 nla_put_u8(skb, MACSEC_SECY_ATTR_SCB, tx_sc->scb) || 2716 nla_put_u8(skb, MACSEC_SECY_ATTR_ENCODING_SA, tx_sc->encoding_sa)) 2717 goto cancel; 2718 2719 if (secy->replay_protect) { 2720 if (nla_put_u32(skb, MACSEC_SECY_ATTR_WINDOW, secy->replay_window)) 2721 goto cancel; 2722 } 2723 2724 nla_nest_end(skb, secy_nest); 2725 return 0; 2726 2727 cancel: 2728 nla_nest_cancel(skb, secy_nest); 2729 return 1; 2730 } 2731 2732 static noinline_for_stack int 2733 dump_secy(struct macsec_secy *secy, struct net_device *dev, 2734 struct sk_buff *skb, struct netlink_callback *cb) 2735 { 2736 struct macsec_dev *macsec = netdev_priv(dev); 2737 struct macsec_tx_sc *tx_sc = &secy->tx_sc; 2738 struct nlattr *txsa_list, *rxsc_list; 2739 struct macsec_rx_sc *rx_sc; 2740 struct nlattr *attr; 2741 void *hdr; 2742 int i, j; 2743 2744 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 2745 &macsec_fam, NLM_F_MULTI, MACSEC_CMD_GET_TXSC); 2746 if (!hdr) 2747 return -EMSGSIZE; 2748 2749 genl_dump_check_consistent(cb, hdr); 2750 2751 if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex)) 2752 goto nla_put_failure; 2753 2754 attr = nla_nest_start_noflag(skb, MACSEC_ATTR_OFFLOAD); 2755 if (!attr) 2756 goto nla_put_failure; 2757 if (nla_put_u8(skb, MACSEC_OFFLOAD_ATTR_TYPE, macsec->offload)) 2758 goto nla_put_failure; 2759 nla_nest_end(skb, attr); 2760 2761 if (nla_put_secy(secy, skb)) 2762 goto nla_put_failure; 2763 2764 attr = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSC_STATS); 2765 if (!attr) 2766 goto nla_put_failure; 2767 if (copy_tx_sc_stats(skb, tx_sc->stats)) { 2768 nla_nest_cancel(skb, attr); 2769 goto nla_put_failure; 2770 } 2771 nla_nest_end(skb, attr); 2772 2773 attr = nla_nest_start_noflag(skb, MACSEC_ATTR_SECY_STATS); 2774 if (!attr) 2775 goto nla_put_failure; 2776 if (copy_secy_stats(skb, macsec_priv(dev)->stats)) { 2777 nla_nest_cancel(skb, attr); 2778 goto nla_put_failure; 2779 } 2780 nla_nest_end(skb, attr); 2781 2782 txsa_list = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSA_LIST); 2783 if (!txsa_list) 2784 goto nla_put_failure; 2785 for (i = 0, j = 1; i < MACSEC_NUM_AN; i++) { 2786 struct macsec_tx_sa *tx_sa = rtnl_dereference(tx_sc->sa[i]); 2787 struct nlattr *txsa_nest; 2788 2789 if (!tx_sa) 2790 continue; 2791 2792 txsa_nest = nla_nest_start_noflag(skb, j++); 2793 if (!txsa_nest) { 2794 nla_nest_cancel(skb, txsa_list); 2795 goto nla_put_failure; 2796 } 2797 2798 if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) || 2799 nla_put_u32(skb, MACSEC_SA_ATTR_PN, tx_sa->next_pn) || 2800 nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, tx_sa->key.id) || 2801 nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, tx_sa->active)) { 2802 nla_nest_cancel(skb, txsa_nest); 2803 nla_nest_cancel(skb, txsa_list); 2804 goto nla_put_failure; 2805 } 2806 2807 attr = nla_nest_start_noflag(skb, MACSEC_SA_ATTR_STATS); 2808 if (!attr) { 2809 nla_nest_cancel(skb, txsa_nest); 2810 nla_nest_cancel(skb, txsa_list); 2811 goto nla_put_failure; 2812 } 2813 if (copy_tx_sa_stats(skb, tx_sa->stats)) { 2814 nla_nest_cancel(skb, attr); 2815 nla_nest_cancel(skb, txsa_nest); 2816 nla_nest_cancel(skb, txsa_list); 2817 goto nla_put_failure; 2818 } 2819 nla_nest_end(skb, attr); 2820 2821 nla_nest_end(skb, txsa_nest); 2822 } 2823 nla_nest_end(skb, txsa_list); 2824 2825 rxsc_list = nla_nest_start_noflag(skb, MACSEC_ATTR_RXSC_LIST); 2826 if (!rxsc_list) 2827 goto nla_put_failure; 2828 2829 j = 1; 2830 for_each_rxsc_rtnl(secy, rx_sc) { 2831 int k; 2832 struct nlattr *rxsa_list; 2833 struct nlattr *rxsc_nest = nla_nest_start_noflag(skb, j++); 2834 2835 if (!rxsc_nest) { 2836 nla_nest_cancel(skb, rxsc_list); 2837 goto nla_put_failure; 2838 } 2839 2840 if (nla_put_u8(skb, MACSEC_RXSC_ATTR_ACTIVE, rx_sc->active) || 2841 nla_put_sci(skb, MACSEC_RXSC_ATTR_SCI, rx_sc->sci, 2842 MACSEC_RXSC_ATTR_PAD)) { 2843 nla_nest_cancel(skb, rxsc_nest); 2844 nla_nest_cancel(skb, rxsc_list); 2845 goto nla_put_failure; 2846 } 2847 2848 attr = nla_nest_start_noflag(skb, MACSEC_RXSC_ATTR_STATS); 2849 if (!attr) { 2850 nla_nest_cancel(skb, rxsc_nest); 2851 nla_nest_cancel(skb, rxsc_list); 2852 goto nla_put_failure; 2853 } 2854 if (copy_rx_sc_stats(skb, rx_sc->stats)) { 2855 nla_nest_cancel(skb, attr); 2856 nla_nest_cancel(skb, rxsc_nest); 2857 nla_nest_cancel(skb, rxsc_list); 2858 goto nla_put_failure; 2859 } 2860 nla_nest_end(skb, attr); 2861 2862 rxsa_list = nla_nest_start_noflag(skb, 2863 MACSEC_RXSC_ATTR_SA_LIST); 2864 if (!rxsa_list) { 2865 nla_nest_cancel(skb, rxsc_nest); 2866 nla_nest_cancel(skb, rxsc_list); 2867 goto nla_put_failure; 2868 } 2869 2870 for (i = 0, k = 1; i < MACSEC_NUM_AN; i++) { 2871 struct macsec_rx_sa *rx_sa = rtnl_dereference(rx_sc->sa[i]); 2872 struct nlattr *rxsa_nest; 2873 2874 if (!rx_sa) 2875 continue; 2876 2877 rxsa_nest = nla_nest_start_noflag(skb, k++); 2878 if (!rxsa_nest) { 2879 nla_nest_cancel(skb, rxsa_list); 2880 nla_nest_cancel(skb, rxsc_nest); 2881 nla_nest_cancel(skb, rxsc_list); 2882 goto nla_put_failure; 2883 } 2884 2885 attr = nla_nest_start_noflag(skb, 2886 MACSEC_SA_ATTR_STATS); 2887 if (!attr) { 2888 nla_nest_cancel(skb, rxsa_list); 2889 nla_nest_cancel(skb, rxsc_nest); 2890 nla_nest_cancel(skb, rxsc_list); 2891 goto nla_put_failure; 2892 } 2893 if (copy_rx_sa_stats(skb, rx_sa->stats)) { 2894 nla_nest_cancel(skb, attr); 2895 nla_nest_cancel(skb, rxsa_list); 2896 nla_nest_cancel(skb, rxsc_nest); 2897 nla_nest_cancel(skb, rxsc_list); 2898 goto nla_put_failure; 2899 } 2900 nla_nest_end(skb, attr); 2901 2902 if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) || 2903 nla_put_u32(skb, MACSEC_SA_ATTR_PN, rx_sa->next_pn) || 2904 nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, rx_sa->key.id) || 2905 nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, rx_sa->active)) { 2906 nla_nest_cancel(skb, rxsa_nest); 2907 nla_nest_cancel(skb, rxsc_nest); 2908 nla_nest_cancel(skb, rxsc_list); 2909 goto nla_put_failure; 2910 } 2911 nla_nest_end(skb, rxsa_nest); 2912 } 2913 2914 nla_nest_end(skb, rxsa_list); 2915 nla_nest_end(skb, rxsc_nest); 2916 } 2917 2918 nla_nest_end(skb, rxsc_list); 2919 2920 genlmsg_end(skb, hdr); 2921 2922 return 0; 2923 2924 nla_put_failure: 2925 genlmsg_cancel(skb, hdr); 2926 return -EMSGSIZE; 2927 } 2928 2929 static int macsec_generation = 1; /* protected by RTNL */ 2930 2931 static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb) 2932 { 2933 struct net *net = sock_net(skb->sk); 2934 struct net_device *dev; 2935 int dev_idx, d; 2936 2937 dev_idx = cb->args[0]; 2938 2939 d = 0; 2940 rtnl_lock(); 2941 2942 cb->seq = macsec_generation; 2943 2944 for_each_netdev(net, dev) { 2945 struct macsec_secy *secy; 2946 2947 if (d < dev_idx) 2948 goto next; 2949 2950 if (!netif_is_macsec(dev)) 2951 goto next; 2952 2953 secy = &macsec_priv(dev)->secy; 2954 if (dump_secy(secy, dev, skb, cb) < 0) 2955 goto done; 2956 next: 2957 d++; 2958 } 2959 2960 done: 2961 rtnl_unlock(); 2962 cb->args[0] = d; 2963 return skb->len; 2964 } 2965 2966 static const struct genl_ops macsec_genl_ops[] = { 2967 { 2968 .cmd = MACSEC_CMD_GET_TXSC, 2969 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 2970 .dumpit = macsec_dump_txsc, 2971 }, 2972 { 2973 .cmd = MACSEC_CMD_ADD_RXSC, 2974 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 2975 .doit = macsec_add_rxsc, 2976 .flags = GENL_ADMIN_PERM, 2977 }, 2978 { 2979 .cmd = MACSEC_CMD_DEL_RXSC, 2980 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 2981 .doit = macsec_del_rxsc, 2982 .flags = GENL_ADMIN_PERM, 2983 }, 2984 { 2985 .cmd = MACSEC_CMD_UPD_RXSC, 2986 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 2987 .doit = macsec_upd_rxsc, 2988 .flags = GENL_ADMIN_PERM, 2989 }, 2990 { 2991 .cmd = MACSEC_CMD_ADD_TXSA, 2992 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 2993 .doit = macsec_add_txsa, 2994 .flags = GENL_ADMIN_PERM, 2995 }, 2996 { 2997 .cmd = MACSEC_CMD_DEL_TXSA, 2998 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 2999 .doit = macsec_del_txsa, 3000 .flags = GENL_ADMIN_PERM, 3001 }, 3002 { 3003 .cmd = MACSEC_CMD_UPD_TXSA, 3004 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3005 .doit = macsec_upd_txsa, 3006 .flags = GENL_ADMIN_PERM, 3007 }, 3008 { 3009 .cmd = MACSEC_CMD_ADD_RXSA, 3010 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3011 .doit = macsec_add_rxsa, 3012 .flags = GENL_ADMIN_PERM, 3013 }, 3014 { 3015 .cmd = MACSEC_CMD_DEL_RXSA, 3016 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3017 .doit = macsec_del_rxsa, 3018 .flags = GENL_ADMIN_PERM, 3019 }, 3020 { 3021 .cmd = MACSEC_CMD_UPD_RXSA, 3022 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3023 .doit = macsec_upd_rxsa, 3024 .flags = GENL_ADMIN_PERM, 3025 }, 3026 { 3027 .cmd = MACSEC_CMD_UPD_OFFLOAD, 3028 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3029 .doit = macsec_upd_offload, 3030 .flags = GENL_ADMIN_PERM, 3031 }, 3032 }; 3033 3034 static struct genl_family macsec_fam __ro_after_init = { 3035 .name = MACSEC_GENL_NAME, 3036 .hdrsize = 0, 3037 .version = MACSEC_GENL_VERSION, 3038 .maxattr = MACSEC_ATTR_MAX, 3039 .policy = macsec_genl_policy, 3040 .netnsok = true, 3041 .module = THIS_MODULE, 3042 .ops = macsec_genl_ops, 3043 .n_ops = ARRAY_SIZE(macsec_genl_ops), 3044 }; 3045 3046 static netdev_tx_t macsec_start_xmit(struct sk_buff *skb, 3047 struct net_device *dev) 3048 { 3049 struct macsec_dev *macsec = netdev_priv(dev); 3050 struct macsec_secy *secy = &macsec->secy; 3051 struct pcpu_secy_stats *secy_stats; 3052 int ret, len; 3053 3054 if (macsec_is_offloaded(netdev_priv(dev))) { 3055 skb->dev = macsec->real_dev; 3056 return dev_queue_xmit(skb); 3057 } 3058 3059 /* 10.5 */ 3060 if (!secy->protect_frames) { 3061 secy_stats = this_cpu_ptr(macsec->stats); 3062 u64_stats_update_begin(&secy_stats->syncp); 3063 secy_stats->stats.OutPktsUntagged++; 3064 u64_stats_update_end(&secy_stats->syncp); 3065 skb->dev = macsec->real_dev; 3066 len = skb->len; 3067 ret = dev_queue_xmit(skb); 3068 count_tx(dev, ret, len); 3069 return ret; 3070 } 3071 3072 if (!secy->operational) { 3073 kfree_skb(skb); 3074 dev->stats.tx_dropped++; 3075 return NETDEV_TX_OK; 3076 } 3077 3078 skb = macsec_encrypt(skb, dev); 3079 if (IS_ERR(skb)) { 3080 if (PTR_ERR(skb) != -EINPROGRESS) 3081 dev->stats.tx_dropped++; 3082 return NETDEV_TX_OK; 3083 } 3084 3085 macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa); 3086 3087 macsec_encrypt_finish(skb, dev); 3088 len = skb->len; 3089 ret = dev_queue_xmit(skb); 3090 count_tx(dev, ret, len); 3091 return ret; 3092 } 3093 3094 #define MACSEC_FEATURES \ 3095 (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST) 3096 3097 static int macsec_dev_init(struct net_device *dev) 3098 { 3099 struct macsec_dev *macsec = macsec_priv(dev); 3100 struct net_device *real_dev = macsec->real_dev; 3101 int err; 3102 3103 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 3104 if (!dev->tstats) 3105 return -ENOMEM; 3106 3107 err = gro_cells_init(&macsec->gro_cells, dev); 3108 if (err) { 3109 free_percpu(dev->tstats); 3110 return err; 3111 } 3112 3113 dev->features = real_dev->features & MACSEC_FEATURES; 3114 dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE; 3115 3116 dev->needed_headroom = real_dev->needed_headroom + 3117 MACSEC_NEEDED_HEADROOM; 3118 dev->needed_tailroom = real_dev->needed_tailroom + 3119 MACSEC_NEEDED_TAILROOM; 3120 3121 if (is_zero_ether_addr(dev->dev_addr)) 3122 eth_hw_addr_inherit(dev, real_dev); 3123 if (is_zero_ether_addr(dev->broadcast)) 3124 memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len); 3125 3126 return 0; 3127 } 3128 3129 static void macsec_dev_uninit(struct net_device *dev) 3130 { 3131 struct macsec_dev *macsec = macsec_priv(dev); 3132 3133 gro_cells_destroy(&macsec->gro_cells); 3134 free_percpu(dev->tstats); 3135 } 3136 3137 static netdev_features_t macsec_fix_features(struct net_device *dev, 3138 netdev_features_t features) 3139 { 3140 struct macsec_dev *macsec = macsec_priv(dev); 3141 struct net_device *real_dev = macsec->real_dev; 3142 3143 features &= (real_dev->features & MACSEC_FEATURES) | 3144 NETIF_F_GSO_SOFTWARE | NETIF_F_SOFT_FEATURES; 3145 features |= NETIF_F_LLTX; 3146 3147 return features; 3148 } 3149 3150 static int macsec_dev_open(struct net_device *dev) 3151 { 3152 struct macsec_dev *macsec = macsec_priv(dev); 3153 struct net_device *real_dev = macsec->real_dev; 3154 int err; 3155 3156 err = dev_uc_add(real_dev, dev->dev_addr); 3157 if (err < 0) 3158 return err; 3159 3160 if (dev->flags & IFF_ALLMULTI) { 3161 err = dev_set_allmulti(real_dev, 1); 3162 if (err < 0) 3163 goto del_unicast; 3164 } 3165 3166 if (dev->flags & IFF_PROMISC) { 3167 err = dev_set_promiscuity(real_dev, 1); 3168 if (err < 0) 3169 goto clear_allmulti; 3170 } 3171 3172 /* If h/w offloading is available, propagate to the device */ 3173 if (macsec_is_offloaded(macsec)) { 3174 const struct macsec_ops *ops; 3175 struct macsec_context ctx; 3176 3177 ops = macsec_get_ops(netdev_priv(dev), &ctx); 3178 if (!ops) { 3179 err = -EOPNOTSUPP; 3180 goto clear_allmulti; 3181 } 3182 3183 err = macsec_offload(ops->mdo_dev_open, &ctx); 3184 if (err) 3185 goto clear_allmulti; 3186 } 3187 3188 if (netif_carrier_ok(real_dev)) 3189 netif_carrier_on(dev); 3190 3191 return 0; 3192 clear_allmulti: 3193 if (dev->flags & IFF_ALLMULTI) 3194 dev_set_allmulti(real_dev, -1); 3195 del_unicast: 3196 dev_uc_del(real_dev, dev->dev_addr); 3197 netif_carrier_off(dev); 3198 return err; 3199 } 3200 3201 static int macsec_dev_stop(struct net_device *dev) 3202 { 3203 struct macsec_dev *macsec = macsec_priv(dev); 3204 struct net_device *real_dev = macsec->real_dev; 3205 3206 netif_carrier_off(dev); 3207 3208 /* If h/w offloading is available, propagate to the device */ 3209 if (macsec_is_offloaded(macsec)) { 3210 const struct macsec_ops *ops; 3211 struct macsec_context ctx; 3212 3213 ops = macsec_get_ops(macsec, &ctx); 3214 if (ops) 3215 macsec_offload(ops->mdo_dev_stop, &ctx); 3216 } 3217 3218 dev_mc_unsync(real_dev, dev); 3219 dev_uc_unsync(real_dev, dev); 3220 3221 if (dev->flags & IFF_ALLMULTI) 3222 dev_set_allmulti(real_dev, -1); 3223 3224 if (dev->flags & IFF_PROMISC) 3225 dev_set_promiscuity(real_dev, -1); 3226 3227 dev_uc_del(real_dev, dev->dev_addr); 3228 3229 return 0; 3230 } 3231 3232 static void macsec_dev_change_rx_flags(struct net_device *dev, int change) 3233 { 3234 struct net_device *real_dev = macsec_priv(dev)->real_dev; 3235 3236 if (!(dev->flags & IFF_UP)) 3237 return; 3238 3239 if (change & IFF_ALLMULTI) 3240 dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1); 3241 3242 if (change & IFF_PROMISC) 3243 dev_set_promiscuity(real_dev, 3244 dev->flags & IFF_PROMISC ? 1 : -1); 3245 } 3246 3247 static void macsec_dev_set_rx_mode(struct net_device *dev) 3248 { 3249 struct net_device *real_dev = macsec_priv(dev)->real_dev; 3250 3251 dev_mc_sync(real_dev, dev); 3252 dev_uc_sync(real_dev, dev); 3253 } 3254 3255 static int macsec_set_mac_address(struct net_device *dev, void *p) 3256 { 3257 struct macsec_dev *macsec = macsec_priv(dev); 3258 struct net_device *real_dev = macsec->real_dev; 3259 struct sockaddr *addr = p; 3260 int err; 3261 3262 if (!is_valid_ether_addr(addr->sa_data)) 3263 return -EADDRNOTAVAIL; 3264 3265 if (!(dev->flags & IFF_UP)) 3266 goto out; 3267 3268 err = dev_uc_add(real_dev, addr->sa_data); 3269 if (err < 0) 3270 return err; 3271 3272 dev_uc_del(real_dev, dev->dev_addr); 3273 3274 out: 3275 ether_addr_copy(dev->dev_addr, addr->sa_data); 3276 macsec->secy.sci = dev_to_sci(dev, MACSEC_PORT_ES); 3277 3278 /* If h/w offloading is available, propagate to the device */ 3279 if (macsec_is_offloaded(macsec)) { 3280 const struct macsec_ops *ops; 3281 struct macsec_context ctx; 3282 3283 ops = macsec_get_ops(macsec, &ctx); 3284 if (ops) { 3285 ctx.secy = &macsec->secy; 3286 macsec_offload(ops->mdo_upd_secy, &ctx); 3287 } 3288 } 3289 3290 return 0; 3291 } 3292 3293 static int macsec_change_mtu(struct net_device *dev, int new_mtu) 3294 { 3295 struct macsec_dev *macsec = macsec_priv(dev); 3296 unsigned int extra = macsec->secy.icv_len + macsec_extra_len(true); 3297 3298 if (macsec->real_dev->mtu - extra < new_mtu) 3299 return -ERANGE; 3300 3301 dev->mtu = new_mtu; 3302 3303 return 0; 3304 } 3305 3306 static void macsec_get_stats64(struct net_device *dev, 3307 struct rtnl_link_stats64 *s) 3308 { 3309 int cpu; 3310 3311 if (!dev->tstats) 3312 return; 3313 3314 for_each_possible_cpu(cpu) { 3315 struct pcpu_sw_netstats *stats; 3316 struct pcpu_sw_netstats tmp; 3317 int start; 3318 3319 stats = per_cpu_ptr(dev->tstats, cpu); 3320 do { 3321 start = u64_stats_fetch_begin_irq(&stats->syncp); 3322 tmp.rx_packets = stats->rx_packets; 3323 tmp.rx_bytes = stats->rx_bytes; 3324 tmp.tx_packets = stats->tx_packets; 3325 tmp.tx_bytes = stats->tx_bytes; 3326 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 3327 3328 s->rx_packets += tmp.rx_packets; 3329 s->rx_bytes += tmp.rx_bytes; 3330 s->tx_packets += tmp.tx_packets; 3331 s->tx_bytes += tmp.tx_bytes; 3332 } 3333 3334 s->rx_dropped = dev->stats.rx_dropped; 3335 s->tx_dropped = dev->stats.tx_dropped; 3336 } 3337 3338 static int macsec_get_iflink(const struct net_device *dev) 3339 { 3340 return macsec_priv(dev)->real_dev->ifindex; 3341 } 3342 3343 static const struct net_device_ops macsec_netdev_ops = { 3344 .ndo_init = macsec_dev_init, 3345 .ndo_uninit = macsec_dev_uninit, 3346 .ndo_open = macsec_dev_open, 3347 .ndo_stop = macsec_dev_stop, 3348 .ndo_fix_features = macsec_fix_features, 3349 .ndo_change_mtu = macsec_change_mtu, 3350 .ndo_set_rx_mode = macsec_dev_set_rx_mode, 3351 .ndo_change_rx_flags = macsec_dev_change_rx_flags, 3352 .ndo_set_mac_address = macsec_set_mac_address, 3353 .ndo_start_xmit = macsec_start_xmit, 3354 .ndo_get_stats64 = macsec_get_stats64, 3355 .ndo_get_iflink = macsec_get_iflink, 3356 }; 3357 3358 static const struct device_type macsec_type = { 3359 .name = "macsec", 3360 }; 3361 3362 static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = { 3363 [IFLA_MACSEC_SCI] = { .type = NLA_U64 }, 3364 [IFLA_MACSEC_PORT] = { .type = NLA_U16 }, 3365 [IFLA_MACSEC_ICV_LEN] = { .type = NLA_U8 }, 3366 [IFLA_MACSEC_CIPHER_SUITE] = { .type = NLA_U64 }, 3367 [IFLA_MACSEC_WINDOW] = { .type = NLA_U32 }, 3368 [IFLA_MACSEC_ENCODING_SA] = { .type = NLA_U8 }, 3369 [IFLA_MACSEC_ENCRYPT] = { .type = NLA_U8 }, 3370 [IFLA_MACSEC_PROTECT] = { .type = NLA_U8 }, 3371 [IFLA_MACSEC_INC_SCI] = { .type = NLA_U8 }, 3372 [IFLA_MACSEC_ES] = { .type = NLA_U8 }, 3373 [IFLA_MACSEC_SCB] = { .type = NLA_U8 }, 3374 [IFLA_MACSEC_REPLAY_PROTECT] = { .type = NLA_U8 }, 3375 [IFLA_MACSEC_VALIDATION] = { .type = NLA_U8 }, 3376 }; 3377 3378 static void macsec_free_netdev(struct net_device *dev) 3379 { 3380 struct macsec_dev *macsec = macsec_priv(dev); 3381 3382 free_percpu(macsec->stats); 3383 free_percpu(macsec->secy.tx_sc.stats); 3384 3385 } 3386 3387 static void macsec_setup(struct net_device *dev) 3388 { 3389 ether_setup(dev); 3390 dev->min_mtu = 0; 3391 dev->max_mtu = ETH_MAX_MTU; 3392 dev->priv_flags |= IFF_NO_QUEUE; 3393 dev->netdev_ops = &macsec_netdev_ops; 3394 dev->needs_free_netdev = true; 3395 dev->priv_destructor = macsec_free_netdev; 3396 SET_NETDEV_DEVTYPE(dev, &macsec_type); 3397 3398 eth_zero_addr(dev->broadcast); 3399 } 3400 3401 static int macsec_changelink_common(struct net_device *dev, 3402 struct nlattr *data[]) 3403 { 3404 struct macsec_secy *secy; 3405 struct macsec_tx_sc *tx_sc; 3406 3407 secy = &macsec_priv(dev)->secy; 3408 tx_sc = &secy->tx_sc; 3409 3410 if (data[IFLA_MACSEC_ENCODING_SA]) { 3411 struct macsec_tx_sa *tx_sa; 3412 3413 tx_sc->encoding_sa = nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]); 3414 tx_sa = rtnl_dereference(tx_sc->sa[tx_sc->encoding_sa]); 3415 3416 secy->operational = tx_sa && tx_sa->active; 3417 } 3418 3419 if (data[IFLA_MACSEC_WINDOW]) 3420 secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]); 3421 3422 if (data[IFLA_MACSEC_ENCRYPT]) 3423 tx_sc->encrypt = !!nla_get_u8(data[IFLA_MACSEC_ENCRYPT]); 3424 3425 if (data[IFLA_MACSEC_PROTECT]) 3426 secy->protect_frames = !!nla_get_u8(data[IFLA_MACSEC_PROTECT]); 3427 3428 if (data[IFLA_MACSEC_INC_SCI]) 3429 tx_sc->send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]); 3430 3431 if (data[IFLA_MACSEC_ES]) 3432 tx_sc->end_station = !!nla_get_u8(data[IFLA_MACSEC_ES]); 3433 3434 if (data[IFLA_MACSEC_SCB]) 3435 tx_sc->scb = !!nla_get_u8(data[IFLA_MACSEC_SCB]); 3436 3437 if (data[IFLA_MACSEC_REPLAY_PROTECT]) 3438 secy->replay_protect = !!nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT]); 3439 3440 if (data[IFLA_MACSEC_VALIDATION]) 3441 secy->validate_frames = nla_get_u8(data[IFLA_MACSEC_VALIDATION]); 3442 3443 if (data[IFLA_MACSEC_CIPHER_SUITE]) { 3444 switch (nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE])) { 3445 case MACSEC_CIPHER_ID_GCM_AES_128: 3446 case MACSEC_DEFAULT_CIPHER_ID: 3447 secy->key_len = MACSEC_GCM_AES_128_SAK_LEN; 3448 break; 3449 case MACSEC_CIPHER_ID_GCM_AES_256: 3450 secy->key_len = MACSEC_GCM_AES_256_SAK_LEN; 3451 break; 3452 default: 3453 return -EINVAL; 3454 } 3455 } 3456 3457 return 0; 3458 } 3459 3460 static int macsec_changelink(struct net_device *dev, struct nlattr *tb[], 3461 struct nlattr *data[], 3462 struct netlink_ext_ack *extack) 3463 { 3464 struct macsec_dev *macsec = macsec_priv(dev); 3465 struct macsec_tx_sa tx_sc; 3466 struct macsec_secy secy; 3467 int ret; 3468 3469 if (!data) 3470 return 0; 3471 3472 if (data[IFLA_MACSEC_CIPHER_SUITE] || 3473 data[IFLA_MACSEC_ICV_LEN] || 3474 data[IFLA_MACSEC_SCI] || 3475 data[IFLA_MACSEC_PORT]) 3476 return -EINVAL; 3477 3478 /* Keep a copy of unmodified secy and tx_sc, in case the offload 3479 * propagation fails, to revert macsec_changelink_common. 3480 */ 3481 memcpy(&secy, &macsec->secy, sizeof(secy)); 3482 memcpy(&tx_sc, &macsec->secy.tx_sc, sizeof(tx_sc)); 3483 3484 ret = macsec_changelink_common(dev, data); 3485 if (ret) 3486 return ret; 3487 3488 /* If h/w offloading is available, propagate to the device */ 3489 if (macsec_is_offloaded(macsec)) { 3490 const struct macsec_ops *ops; 3491 struct macsec_context ctx; 3492 int ret; 3493 3494 ops = macsec_get_ops(netdev_priv(dev), &ctx); 3495 if (!ops) { 3496 ret = -EOPNOTSUPP; 3497 goto cleanup; 3498 } 3499 3500 ctx.secy = &macsec->secy; 3501 ret = macsec_offload(ops->mdo_upd_secy, &ctx); 3502 if (ret) 3503 goto cleanup; 3504 } 3505 3506 return 0; 3507 3508 cleanup: 3509 memcpy(&macsec->secy.tx_sc, &tx_sc, sizeof(tx_sc)); 3510 memcpy(&macsec->secy, &secy, sizeof(secy)); 3511 3512 return ret; 3513 } 3514 3515 static void macsec_del_dev(struct macsec_dev *macsec) 3516 { 3517 int i; 3518 3519 while (macsec->secy.rx_sc) { 3520 struct macsec_rx_sc *rx_sc = rtnl_dereference(macsec->secy.rx_sc); 3521 3522 rcu_assign_pointer(macsec->secy.rx_sc, rx_sc->next); 3523 free_rx_sc(rx_sc); 3524 } 3525 3526 for (i = 0; i < MACSEC_NUM_AN; i++) { 3527 struct macsec_tx_sa *sa = rtnl_dereference(macsec->secy.tx_sc.sa[i]); 3528 3529 if (sa) { 3530 RCU_INIT_POINTER(macsec->secy.tx_sc.sa[i], NULL); 3531 clear_tx_sa(sa); 3532 } 3533 } 3534 } 3535 3536 static void macsec_common_dellink(struct net_device *dev, struct list_head *head) 3537 { 3538 struct macsec_dev *macsec = macsec_priv(dev); 3539 struct net_device *real_dev = macsec->real_dev; 3540 3541 unregister_netdevice_queue(dev, head); 3542 list_del_rcu(&macsec->secys); 3543 macsec_del_dev(macsec); 3544 netdev_upper_dev_unlink(real_dev, dev); 3545 3546 macsec_generation++; 3547 } 3548 3549 static void macsec_dellink(struct net_device *dev, struct list_head *head) 3550 { 3551 struct macsec_dev *macsec = macsec_priv(dev); 3552 struct net_device *real_dev = macsec->real_dev; 3553 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); 3554 3555 /* If h/w offloading is available, propagate to the device */ 3556 if (macsec_is_offloaded(macsec)) { 3557 const struct macsec_ops *ops; 3558 struct macsec_context ctx; 3559 3560 ops = macsec_get_ops(netdev_priv(dev), &ctx); 3561 if (ops) { 3562 ctx.secy = &macsec->secy; 3563 macsec_offload(ops->mdo_del_secy, &ctx); 3564 } 3565 } 3566 3567 macsec_common_dellink(dev, head); 3568 3569 if (list_empty(&rxd->secys)) { 3570 netdev_rx_handler_unregister(real_dev); 3571 kfree(rxd); 3572 } 3573 } 3574 3575 static int register_macsec_dev(struct net_device *real_dev, 3576 struct net_device *dev) 3577 { 3578 struct macsec_dev *macsec = macsec_priv(dev); 3579 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); 3580 3581 if (!rxd) { 3582 int err; 3583 3584 rxd = kmalloc(sizeof(*rxd), GFP_KERNEL); 3585 if (!rxd) 3586 return -ENOMEM; 3587 3588 INIT_LIST_HEAD(&rxd->secys); 3589 3590 err = netdev_rx_handler_register(real_dev, macsec_handle_frame, 3591 rxd); 3592 if (err < 0) { 3593 kfree(rxd); 3594 return err; 3595 } 3596 } 3597 3598 list_add_tail_rcu(&macsec->secys, &rxd->secys); 3599 return 0; 3600 } 3601 3602 static bool sci_exists(struct net_device *dev, sci_t sci) 3603 { 3604 struct macsec_rxh_data *rxd = macsec_data_rtnl(dev); 3605 struct macsec_dev *macsec; 3606 3607 list_for_each_entry(macsec, &rxd->secys, secys) { 3608 if (macsec->secy.sci == sci) 3609 return true; 3610 } 3611 3612 return false; 3613 } 3614 3615 static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len) 3616 { 3617 struct macsec_dev *macsec = macsec_priv(dev); 3618 struct macsec_secy *secy = &macsec->secy; 3619 3620 macsec->stats = netdev_alloc_pcpu_stats(struct pcpu_secy_stats); 3621 if (!macsec->stats) 3622 return -ENOMEM; 3623 3624 secy->tx_sc.stats = netdev_alloc_pcpu_stats(struct pcpu_tx_sc_stats); 3625 if (!secy->tx_sc.stats) { 3626 free_percpu(macsec->stats); 3627 return -ENOMEM; 3628 } 3629 3630 if (sci == MACSEC_UNDEF_SCI) 3631 sci = dev_to_sci(dev, MACSEC_PORT_ES); 3632 3633 secy->netdev = dev; 3634 secy->operational = true; 3635 secy->key_len = DEFAULT_SAK_LEN; 3636 secy->icv_len = icv_len; 3637 secy->validate_frames = MACSEC_VALIDATE_DEFAULT; 3638 secy->protect_frames = true; 3639 secy->replay_protect = false; 3640 3641 secy->sci = sci; 3642 secy->tx_sc.active = true; 3643 secy->tx_sc.encoding_sa = DEFAULT_ENCODING_SA; 3644 secy->tx_sc.encrypt = DEFAULT_ENCRYPT; 3645 secy->tx_sc.send_sci = DEFAULT_SEND_SCI; 3646 secy->tx_sc.end_station = false; 3647 secy->tx_sc.scb = false; 3648 3649 return 0; 3650 } 3651 3652 static int macsec_newlink(struct net *net, struct net_device *dev, 3653 struct nlattr *tb[], struct nlattr *data[], 3654 struct netlink_ext_ack *extack) 3655 { 3656 struct macsec_dev *macsec = macsec_priv(dev); 3657 struct net_device *real_dev; 3658 int err; 3659 sci_t sci; 3660 u8 icv_len = DEFAULT_ICV_LEN; 3661 rx_handler_func_t *rx_handler; 3662 3663 if (!tb[IFLA_LINK]) 3664 return -EINVAL; 3665 real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK])); 3666 if (!real_dev) 3667 return -ENODEV; 3668 3669 dev->priv_flags |= IFF_MACSEC; 3670 3671 macsec->real_dev = real_dev; 3672 3673 /* MACsec offloading is off by default */ 3674 macsec->offload = MACSEC_OFFLOAD_OFF; 3675 3676 if (data && data[IFLA_MACSEC_ICV_LEN]) 3677 icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]); 3678 dev->mtu = real_dev->mtu - icv_len - macsec_extra_len(true); 3679 3680 rx_handler = rtnl_dereference(real_dev->rx_handler); 3681 if (rx_handler && rx_handler != macsec_handle_frame) 3682 return -EBUSY; 3683 3684 err = register_netdevice(dev); 3685 if (err < 0) 3686 return err; 3687 3688 err = netdev_upper_dev_link(real_dev, dev, extack); 3689 if (err < 0) 3690 goto unregister; 3691 3692 /* need to be already registered so that ->init has run and 3693 * the MAC addr is set 3694 */ 3695 if (data && data[IFLA_MACSEC_SCI]) 3696 sci = nla_get_sci(data[IFLA_MACSEC_SCI]); 3697 else if (data && data[IFLA_MACSEC_PORT]) 3698 sci = dev_to_sci(dev, nla_get_be16(data[IFLA_MACSEC_PORT])); 3699 else 3700 sci = dev_to_sci(dev, MACSEC_PORT_ES); 3701 3702 if (rx_handler && sci_exists(real_dev, sci)) { 3703 err = -EBUSY; 3704 goto unlink; 3705 } 3706 3707 err = macsec_add_dev(dev, sci, icv_len); 3708 if (err) 3709 goto unlink; 3710 3711 if (data) { 3712 err = macsec_changelink_common(dev, data); 3713 if (err) 3714 goto del_dev; 3715 } 3716 3717 err = register_macsec_dev(real_dev, dev); 3718 if (err < 0) 3719 goto del_dev; 3720 3721 netif_stacked_transfer_operstate(real_dev, dev); 3722 linkwatch_fire_event(dev); 3723 3724 macsec_generation++; 3725 3726 return 0; 3727 3728 del_dev: 3729 macsec_del_dev(macsec); 3730 unlink: 3731 netdev_upper_dev_unlink(real_dev, dev); 3732 unregister: 3733 unregister_netdevice(dev); 3734 return err; 3735 } 3736 3737 static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[], 3738 struct netlink_ext_ack *extack) 3739 { 3740 u64 csid = MACSEC_DEFAULT_CIPHER_ID; 3741 u8 icv_len = DEFAULT_ICV_LEN; 3742 int flag; 3743 bool es, scb, sci; 3744 3745 if (!data) 3746 return 0; 3747 3748 if (data[IFLA_MACSEC_CIPHER_SUITE]) 3749 csid = nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE]); 3750 3751 if (data[IFLA_MACSEC_ICV_LEN]) { 3752 icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]); 3753 if (icv_len != DEFAULT_ICV_LEN) { 3754 char dummy_key[DEFAULT_SAK_LEN] = { 0 }; 3755 struct crypto_aead *dummy_tfm; 3756 3757 dummy_tfm = macsec_alloc_tfm(dummy_key, 3758 DEFAULT_SAK_LEN, 3759 icv_len); 3760 if (IS_ERR(dummy_tfm)) 3761 return PTR_ERR(dummy_tfm); 3762 crypto_free_aead(dummy_tfm); 3763 } 3764 } 3765 3766 switch (csid) { 3767 case MACSEC_CIPHER_ID_GCM_AES_128: 3768 case MACSEC_CIPHER_ID_GCM_AES_256: 3769 case MACSEC_DEFAULT_CIPHER_ID: 3770 if (icv_len < MACSEC_MIN_ICV_LEN || 3771 icv_len > MACSEC_STD_ICV_LEN) 3772 return -EINVAL; 3773 break; 3774 default: 3775 return -EINVAL; 3776 } 3777 3778 if (data[IFLA_MACSEC_ENCODING_SA]) { 3779 if (nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]) >= MACSEC_NUM_AN) 3780 return -EINVAL; 3781 } 3782 3783 for (flag = IFLA_MACSEC_ENCODING_SA + 1; 3784 flag < IFLA_MACSEC_VALIDATION; 3785 flag++) { 3786 if (data[flag]) { 3787 if (nla_get_u8(data[flag]) > 1) 3788 return -EINVAL; 3789 } 3790 } 3791 3792 es = data[IFLA_MACSEC_ES] ? nla_get_u8(data[IFLA_MACSEC_ES]) : false; 3793 sci = data[IFLA_MACSEC_INC_SCI] ? nla_get_u8(data[IFLA_MACSEC_INC_SCI]) : false; 3794 scb = data[IFLA_MACSEC_SCB] ? nla_get_u8(data[IFLA_MACSEC_SCB]) : false; 3795 3796 if ((sci && (scb || es)) || (scb && es)) 3797 return -EINVAL; 3798 3799 if (data[IFLA_MACSEC_VALIDATION] && 3800 nla_get_u8(data[IFLA_MACSEC_VALIDATION]) > MACSEC_VALIDATE_MAX) 3801 return -EINVAL; 3802 3803 if ((data[IFLA_MACSEC_REPLAY_PROTECT] && 3804 nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT])) && 3805 !data[IFLA_MACSEC_WINDOW]) 3806 return -EINVAL; 3807 3808 return 0; 3809 } 3810 3811 static struct net *macsec_get_link_net(const struct net_device *dev) 3812 { 3813 return dev_net(macsec_priv(dev)->real_dev); 3814 } 3815 3816 static size_t macsec_get_size(const struct net_device *dev) 3817 { 3818 return nla_total_size_64bit(8) + /* IFLA_MACSEC_SCI */ 3819 nla_total_size(1) + /* IFLA_MACSEC_ICV_LEN */ 3820 nla_total_size_64bit(8) + /* IFLA_MACSEC_CIPHER_SUITE */ 3821 nla_total_size(4) + /* IFLA_MACSEC_WINDOW */ 3822 nla_total_size(1) + /* IFLA_MACSEC_ENCODING_SA */ 3823 nla_total_size(1) + /* IFLA_MACSEC_ENCRYPT */ 3824 nla_total_size(1) + /* IFLA_MACSEC_PROTECT */ 3825 nla_total_size(1) + /* IFLA_MACSEC_INC_SCI */ 3826 nla_total_size(1) + /* IFLA_MACSEC_ES */ 3827 nla_total_size(1) + /* IFLA_MACSEC_SCB */ 3828 nla_total_size(1) + /* IFLA_MACSEC_REPLAY_PROTECT */ 3829 nla_total_size(1) + /* IFLA_MACSEC_VALIDATION */ 3830 0; 3831 } 3832 3833 static int macsec_fill_info(struct sk_buff *skb, 3834 const struct net_device *dev) 3835 { 3836 struct macsec_secy *secy = &macsec_priv(dev)->secy; 3837 struct macsec_tx_sc *tx_sc = &secy->tx_sc; 3838 u64 csid; 3839 3840 switch (secy->key_len) { 3841 case MACSEC_GCM_AES_128_SAK_LEN: 3842 csid = MACSEC_DEFAULT_CIPHER_ID; 3843 break; 3844 case MACSEC_GCM_AES_256_SAK_LEN: 3845 csid = MACSEC_CIPHER_ID_GCM_AES_256; 3846 break; 3847 default: 3848 goto nla_put_failure; 3849 } 3850 3851 if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci, 3852 IFLA_MACSEC_PAD) || 3853 nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) || 3854 nla_put_u64_64bit(skb, IFLA_MACSEC_CIPHER_SUITE, 3855 csid, IFLA_MACSEC_PAD) || 3856 nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) || 3857 nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) || 3858 nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) || 3859 nla_put_u8(skb, IFLA_MACSEC_INC_SCI, tx_sc->send_sci) || 3860 nla_put_u8(skb, IFLA_MACSEC_ES, tx_sc->end_station) || 3861 nla_put_u8(skb, IFLA_MACSEC_SCB, tx_sc->scb) || 3862 nla_put_u8(skb, IFLA_MACSEC_REPLAY_PROTECT, secy->replay_protect) || 3863 nla_put_u8(skb, IFLA_MACSEC_VALIDATION, secy->validate_frames) || 3864 0) 3865 goto nla_put_failure; 3866 3867 if (secy->replay_protect) { 3868 if (nla_put_u32(skb, IFLA_MACSEC_WINDOW, secy->replay_window)) 3869 goto nla_put_failure; 3870 } 3871 3872 return 0; 3873 3874 nla_put_failure: 3875 return -EMSGSIZE; 3876 } 3877 3878 static struct rtnl_link_ops macsec_link_ops __read_mostly = { 3879 .kind = "macsec", 3880 .priv_size = sizeof(struct macsec_dev), 3881 .maxtype = IFLA_MACSEC_MAX, 3882 .policy = macsec_rtnl_policy, 3883 .setup = macsec_setup, 3884 .validate = macsec_validate_attr, 3885 .newlink = macsec_newlink, 3886 .changelink = macsec_changelink, 3887 .dellink = macsec_dellink, 3888 .get_size = macsec_get_size, 3889 .fill_info = macsec_fill_info, 3890 .get_link_net = macsec_get_link_net, 3891 }; 3892 3893 static bool is_macsec_master(struct net_device *dev) 3894 { 3895 return rcu_access_pointer(dev->rx_handler) == macsec_handle_frame; 3896 } 3897 3898 static int macsec_notify(struct notifier_block *this, unsigned long event, 3899 void *ptr) 3900 { 3901 struct net_device *real_dev = netdev_notifier_info_to_dev(ptr); 3902 LIST_HEAD(head); 3903 3904 if (!is_macsec_master(real_dev)) 3905 return NOTIFY_DONE; 3906 3907 switch (event) { 3908 case NETDEV_DOWN: 3909 case NETDEV_UP: 3910 case NETDEV_CHANGE: { 3911 struct macsec_dev *m, *n; 3912 struct macsec_rxh_data *rxd; 3913 3914 rxd = macsec_data_rtnl(real_dev); 3915 list_for_each_entry_safe(m, n, &rxd->secys, secys) { 3916 struct net_device *dev = m->secy.netdev; 3917 3918 netif_stacked_transfer_operstate(real_dev, dev); 3919 } 3920 break; 3921 } 3922 case NETDEV_UNREGISTER: { 3923 struct macsec_dev *m, *n; 3924 struct macsec_rxh_data *rxd; 3925 3926 rxd = macsec_data_rtnl(real_dev); 3927 list_for_each_entry_safe(m, n, &rxd->secys, secys) { 3928 macsec_common_dellink(m->secy.netdev, &head); 3929 } 3930 3931 netdev_rx_handler_unregister(real_dev); 3932 kfree(rxd); 3933 3934 unregister_netdevice_many(&head); 3935 break; 3936 } 3937 case NETDEV_CHANGEMTU: { 3938 struct macsec_dev *m; 3939 struct macsec_rxh_data *rxd; 3940 3941 rxd = macsec_data_rtnl(real_dev); 3942 list_for_each_entry(m, &rxd->secys, secys) { 3943 struct net_device *dev = m->secy.netdev; 3944 unsigned int mtu = real_dev->mtu - (m->secy.icv_len + 3945 macsec_extra_len(true)); 3946 3947 if (dev->mtu > mtu) 3948 dev_set_mtu(dev, mtu); 3949 } 3950 } 3951 } 3952 3953 return NOTIFY_OK; 3954 } 3955 3956 static struct notifier_block macsec_notifier = { 3957 .notifier_call = macsec_notify, 3958 }; 3959 3960 static int __init macsec_init(void) 3961 { 3962 int err; 3963 3964 pr_info("MACsec IEEE 802.1AE\n"); 3965 err = register_netdevice_notifier(&macsec_notifier); 3966 if (err) 3967 return err; 3968 3969 err = rtnl_link_register(&macsec_link_ops); 3970 if (err) 3971 goto notifier; 3972 3973 err = genl_register_family(&macsec_fam); 3974 if (err) 3975 goto rtnl; 3976 3977 return 0; 3978 3979 rtnl: 3980 rtnl_link_unregister(&macsec_link_ops); 3981 notifier: 3982 unregister_netdevice_notifier(&macsec_notifier); 3983 return err; 3984 } 3985 3986 static void __exit macsec_exit(void) 3987 { 3988 genl_unregister_family(&macsec_fam); 3989 rtnl_link_unregister(&macsec_link_ops); 3990 unregister_netdevice_notifier(&macsec_notifier); 3991 rcu_barrier(); 3992 } 3993 3994 module_init(macsec_init); 3995 module_exit(macsec_exit); 3996 3997 MODULE_ALIAS_RTNL_LINK("macsec"); 3998 MODULE_ALIAS_GENL_FAMILY("macsec"); 3999 4000 MODULE_DESCRIPTION("MACsec IEEE 802.1AE"); 4001 MODULE_LICENSE("GPL v2"); 4002