1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * drivers/net/macsec.c - MACsec device 4 * 5 * Copyright (c) 2015 Sabrina Dubroca <sd@queasysnail.net> 6 */ 7 8 #include <linux/types.h> 9 #include <linux/skbuff.h> 10 #include <linux/socket.h> 11 #include <linux/module.h> 12 #include <crypto/aead.h> 13 #include <linux/etherdevice.h> 14 #include <linux/netdevice.h> 15 #include <linux/rtnetlink.h> 16 #include <linux/refcount.h> 17 #include <net/genetlink.h> 18 #include <net/sock.h> 19 #include <net/gro_cells.h> 20 #include <net/macsec.h> 21 #include <net/dst_metadata.h> 22 #include <linux/phy.h> 23 #include <linux/byteorder/generic.h> 24 #include <linux/if_arp.h> 25 26 #include <uapi/linux/if_macsec.h> 27 28 /* SecTAG length = macsec_eth_header without the optional SCI */ 29 #define MACSEC_TAG_LEN 6 30 31 struct macsec_eth_header { 32 struct ethhdr eth; 33 /* SecTAG */ 34 u8 tci_an; 35 #if defined(__LITTLE_ENDIAN_BITFIELD) 36 u8 short_length:6, 37 unused:2; 38 #elif defined(__BIG_ENDIAN_BITFIELD) 39 u8 unused:2, 40 short_length:6; 41 #else 42 #error "Please fix <asm/byteorder.h>" 43 #endif 44 __be32 packet_number; 45 u8 secure_channel_id[8]; /* optional */ 46 } __packed; 47 48 /* minimum secure data length deemed "not short", see IEEE 802.1AE-2006 9.7 */ 49 #define MIN_NON_SHORT_LEN 48 50 51 #define GCM_AES_IV_LEN 12 52 53 #define for_each_rxsc(secy, sc) \ 54 for (sc = rcu_dereference_bh(secy->rx_sc); \ 55 sc; \ 56 sc = rcu_dereference_bh(sc->next)) 57 #define for_each_rxsc_rtnl(secy, sc) \ 58 for (sc = rtnl_dereference(secy->rx_sc); \ 59 sc; \ 60 sc = rtnl_dereference(sc->next)) 61 62 #define pn_same_half(pn1, pn2) (!(((pn1) >> 31) ^ ((pn2) >> 31))) 63 64 struct gcm_iv_xpn { 65 union { 66 u8 short_secure_channel_id[4]; 67 ssci_t ssci; 68 }; 69 __be64 pn; 70 } __packed; 71 72 struct gcm_iv { 73 union { 74 u8 secure_channel_id[8]; 75 sci_t sci; 76 }; 77 __be32 pn; 78 }; 79 80 #define MACSEC_VALIDATE_DEFAULT MACSEC_VALIDATE_STRICT 81 82 struct pcpu_secy_stats { 83 struct macsec_dev_stats stats; 84 struct u64_stats_sync syncp; 85 }; 86 87 /** 88 * struct macsec_dev - private data 89 * @secy: SecY config 90 * @real_dev: pointer to underlying netdevice 91 * @dev_tracker: refcount tracker for @real_dev reference 92 * @stats: MACsec device stats 93 * @secys: linked list of SecY's on the underlying device 94 * @gro_cells: pointer to the Generic Receive Offload cell 95 * @offload: status of offloading on the MACsec device 96 */ 97 struct macsec_dev { 98 struct macsec_secy secy; 99 struct net_device *real_dev; 100 netdevice_tracker dev_tracker; 101 struct pcpu_secy_stats __percpu *stats; 102 struct list_head secys; 103 struct gro_cells gro_cells; 104 enum macsec_offload offload; 105 }; 106 107 /** 108 * struct macsec_rxh_data - rx_handler private argument 109 * @secys: linked list of SecY's on this underlying device 110 */ 111 struct macsec_rxh_data { 112 struct list_head secys; 113 }; 114 115 static struct macsec_dev *macsec_priv(const struct net_device *dev) 116 { 117 return (struct macsec_dev *)netdev_priv(dev); 118 } 119 120 static struct macsec_rxh_data *macsec_data_rcu(const struct net_device *dev) 121 { 122 return rcu_dereference_bh(dev->rx_handler_data); 123 } 124 125 static struct macsec_rxh_data *macsec_data_rtnl(const struct net_device *dev) 126 { 127 return rtnl_dereference(dev->rx_handler_data); 128 } 129 130 struct macsec_cb { 131 struct aead_request *req; 132 union { 133 struct macsec_tx_sa *tx_sa; 134 struct macsec_rx_sa *rx_sa; 135 }; 136 u8 assoc_num; 137 bool valid; 138 bool has_sci; 139 }; 140 141 static struct macsec_rx_sa *macsec_rxsa_get(struct macsec_rx_sa __rcu *ptr) 142 { 143 struct macsec_rx_sa *sa = rcu_dereference_bh(ptr); 144 145 if (!sa || !sa->active) 146 return NULL; 147 148 if (!refcount_inc_not_zero(&sa->refcnt)) 149 return NULL; 150 151 return sa; 152 } 153 154 static struct macsec_rx_sa *macsec_active_rxsa_get(struct macsec_rx_sc *rx_sc) 155 { 156 struct macsec_rx_sa *sa = NULL; 157 int an; 158 159 for (an = 0; an < MACSEC_NUM_AN; an++) { 160 sa = macsec_rxsa_get(rx_sc->sa[an]); 161 if (sa) 162 break; 163 } 164 return sa; 165 } 166 167 static void free_rx_sc_rcu(struct rcu_head *head) 168 { 169 struct macsec_rx_sc *rx_sc = container_of(head, struct macsec_rx_sc, rcu_head); 170 171 free_percpu(rx_sc->stats); 172 kfree(rx_sc); 173 } 174 175 static struct macsec_rx_sc *macsec_rxsc_get(struct macsec_rx_sc *sc) 176 { 177 return refcount_inc_not_zero(&sc->refcnt) ? sc : NULL; 178 } 179 180 static void macsec_rxsc_put(struct macsec_rx_sc *sc) 181 { 182 if (refcount_dec_and_test(&sc->refcnt)) 183 call_rcu(&sc->rcu_head, free_rx_sc_rcu); 184 } 185 186 static void free_rxsa(struct rcu_head *head) 187 { 188 struct macsec_rx_sa *sa = container_of(head, struct macsec_rx_sa, rcu); 189 190 crypto_free_aead(sa->key.tfm); 191 free_percpu(sa->stats); 192 kfree(sa); 193 } 194 195 static void macsec_rxsa_put(struct macsec_rx_sa *sa) 196 { 197 if (refcount_dec_and_test(&sa->refcnt)) 198 call_rcu(&sa->rcu, free_rxsa); 199 } 200 201 static struct macsec_tx_sa *macsec_txsa_get(struct macsec_tx_sa __rcu *ptr) 202 { 203 struct macsec_tx_sa *sa = rcu_dereference_bh(ptr); 204 205 if (!sa || !sa->active) 206 return NULL; 207 208 if (!refcount_inc_not_zero(&sa->refcnt)) 209 return NULL; 210 211 return sa; 212 } 213 214 static void free_txsa(struct rcu_head *head) 215 { 216 struct macsec_tx_sa *sa = container_of(head, struct macsec_tx_sa, rcu); 217 218 crypto_free_aead(sa->key.tfm); 219 free_percpu(sa->stats); 220 kfree(sa); 221 } 222 223 static void macsec_txsa_put(struct macsec_tx_sa *sa) 224 { 225 if (refcount_dec_and_test(&sa->refcnt)) 226 call_rcu(&sa->rcu, free_txsa); 227 } 228 229 static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb) 230 { 231 BUILD_BUG_ON(sizeof(struct macsec_cb) > sizeof(skb->cb)); 232 return (struct macsec_cb *)skb->cb; 233 } 234 235 #define MACSEC_PORT_SCB (0x0000) 236 #define MACSEC_UNDEF_SCI ((__force sci_t)0xffffffffffffffffULL) 237 #define MACSEC_UNDEF_SSCI ((__force ssci_t)0xffffffff) 238 239 #define MACSEC_GCM_AES_128_SAK_LEN 16 240 #define MACSEC_GCM_AES_256_SAK_LEN 32 241 242 #define DEFAULT_SAK_LEN MACSEC_GCM_AES_128_SAK_LEN 243 #define DEFAULT_XPN false 244 #define DEFAULT_SEND_SCI true 245 #define DEFAULT_ENCRYPT false 246 #define DEFAULT_ENCODING_SA 0 247 #define MACSEC_XPN_MAX_REPLAY_WINDOW (((1 << 30) - 1)) 248 249 static sci_t make_sci(const u8 *addr, __be16 port) 250 { 251 sci_t sci; 252 253 memcpy(&sci, addr, ETH_ALEN); 254 memcpy(((char *)&sci) + ETH_ALEN, &port, sizeof(port)); 255 256 return sci; 257 } 258 259 static sci_t macsec_frame_sci(struct macsec_eth_header *hdr, bool sci_present) 260 { 261 sci_t sci; 262 263 if (sci_present) 264 memcpy(&sci, hdr->secure_channel_id, 265 sizeof(hdr->secure_channel_id)); 266 else 267 sci = make_sci(hdr->eth.h_source, MACSEC_PORT_ES); 268 269 return sci; 270 } 271 272 static unsigned int macsec_sectag_len(bool sci_present) 273 { 274 return MACSEC_TAG_LEN + (sci_present ? MACSEC_SCI_LEN : 0); 275 } 276 277 static unsigned int macsec_hdr_len(bool sci_present) 278 { 279 return macsec_sectag_len(sci_present) + ETH_HLEN; 280 } 281 282 static unsigned int macsec_extra_len(bool sci_present) 283 { 284 return macsec_sectag_len(sci_present) + sizeof(__be16); 285 } 286 287 /* Fill SecTAG according to IEEE 802.1AE-2006 10.5.3 */ 288 static void macsec_fill_sectag(struct macsec_eth_header *h, 289 const struct macsec_secy *secy, u32 pn, 290 bool sci_present) 291 { 292 const struct macsec_tx_sc *tx_sc = &secy->tx_sc; 293 294 memset(&h->tci_an, 0, macsec_sectag_len(sci_present)); 295 h->eth.h_proto = htons(ETH_P_MACSEC); 296 297 if (sci_present) { 298 h->tci_an |= MACSEC_TCI_SC; 299 memcpy(&h->secure_channel_id, &secy->sci, 300 sizeof(h->secure_channel_id)); 301 } else { 302 if (tx_sc->end_station) 303 h->tci_an |= MACSEC_TCI_ES; 304 if (tx_sc->scb) 305 h->tci_an |= MACSEC_TCI_SCB; 306 } 307 308 h->packet_number = htonl(pn); 309 310 /* with GCM, C/E clear for !encrypt, both set for encrypt */ 311 if (tx_sc->encrypt) 312 h->tci_an |= MACSEC_TCI_CONFID; 313 else if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN) 314 h->tci_an |= MACSEC_TCI_C; 315 316 h->tci_an |= tx_sc->encoding_sa; 317 } 318 319 static void macsec_set_shortlen(struct macsec_eth_header *h, size_t data_len) 320 { 321 if (data_len < MIN_NON_SHORT_LEN) 322 h->short_length = data_len; 323 } 324 325 /* Checks if a MACsec interface is being offloaded to an hardware engine */ 326 static bool macsec_is_offloaded(struct macsec_dev *macsec) 327 { 328 if (macsec->offload == MACSEC_OFFLOAD_MAC || 329 macsec->offload == MACSEC_OFFLOAD_PHY) 330 return true; 331 332 return false; 333 } 334 335 /* Checks if underlying layers implement MACsec offloading functions. */ 336 static bool macsec_check_offload(enum macsec_offload offload, 337 struct macsec_dev *macsec) 338 { 339 if (!macsec || !macsec->real_dev) 340 return false; 341 342 if (offload == MACSEC_OFFLOAD_PHY) 343 return macsec->real_dev->phydev && 344 macsec->real_dev->phydev->macsec_ops; 345 else if (offload == MACSEC_OFFLOAD_MAC) 346 return macsec->real_dev->features & NETIF_F_HW_MACSEC && 347 macsec->real_dev->macsec_ops; 348 349 return false; 350 } 351 352 static const struct macsec_ops *__macsec_get_ops(enum macsec_offload offload, 353 struct macsec_dev *macsec, 354 struct macsec_context *ctx) 355 { 356 if (ctx) { 357 memset(ctx, 0, sizeof(*ctx)); 358 ctx->offload = offload; 359 360 if (offload == MACSEC_OFFLOAD_PHY) 361 ctx->phydev = macsec->real_dev->phydev; 362 else if (offload == MACSEC_OFFLOAD_MAC) 363 ctx->netdev = macsec->real_dev; 364 } 365 366 if (offload == MACSEC_OFFLOAD_PHY) 367 return macsec->real_dev->phydev->macsec_ops; 368 else 369 return macsec->real_dev->macsec_ops; 370 } 371 372 /* Returns a pointer to the MACsec ops struct if any and updates the MACsec 373 * context device reference if provided. 374 */ 375 static const struct macsec_ops *macsec_get_ops(struct macsec_dev *macsec, 376 struct macsec_context *ctx) 377 { 378 if (!macsec_check_offload(macsec->offload, macsec)) 379 return NULL; 380 381 return __macsec_get_ops(macsec->offload, macsec, ctx); 382 } 383 384 /* validate MACsec packet according to IEEE 802.1AE-2018 9.12 */ 385 static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len, bool xpn) 386 { 387 struct macsec_eth_header *h = (struct macsec_eth_header *)skb->data; 388 int len = skb->len - 2 * ETH_ALEN; 389 int extra_len = macsec_extra_len(!!(h->tci_an & MACSEC_TCI_SC)) + icv_len; 390 391 /* a) It comprises at least 17 octets */ 392 if (skb->len <= 16) 393 return false; 394 395 /* b) MACsec EtherType: already checked */ 396 397 /* c) V bit is clear */ 398 if (h->tci_an & MACSEC_TCI_VERSION) 399 return false; 400 401 /* d) ES or SCB => !SC */ 402 if ((h->tci_an & MACSEC_TCI_ES || h->tci_an & MACSEC_TCI_SCB) && 403 (h->tci_an & MACSEC_TCI_SC)) 404 return false; 405 406 /* e) Bits 7 and 8 of octet 4 of the SecTAG are clear */ 407 if (h->unused) 408 return false; 409 410 /* rx.pn != 0 if not XPN (figure 10-5 with 802.11AEbw-2013 amendment) */ 411 if (!h->packet_number && !xpn) 412 return false; 413 414 /* length check, f) g) h) i) */ 415 if (h->short_length) 416 return len == extra_len + h->short_length; 417 return len >= extra_len + MIN_NON_SHORT_LEN; 418 } 419 420 #define MACSEC_NEEDED_HEADROOM (macsec_extra_len(true)) 421 #define MACSEC_NEEDED_TAILROOM MACSEC_STD_ICV_LEN 422 423 static void macsec_fill_iv_xpn(unsigned char *iv, ssci_t ssci, u64 pn, 424 salt_t salt) 425 { 426 struct gcm_iv_xpn *gcm_iv = (struct gcm_iv_xpn *)iv; 427 428 gcm_iv->ssci = ssci ^ salt.ssci; 429 gcm_iv->pn = cpu_to_be64(pn) ^ salt.pn; 430 } 431 432 static void macsec_fill_iv(unsigned char *iv, sci_t sci, u32 pn) 433 { 434 struct gcm_iv *gcm_iv = (struct gcm_iv *)iv; 435 436 gcm_iv->sci = sci; 437 gcm_iv->pn = htonl(pn); 438 } 439 440 static struct macsec_eth_header *macsec_ethhdr(struct sk_buff *skb) 441 { 442 return (struct macsec_eth_header *)skb_mac_header(skb); 443 } 444 445 static void __macsec_pn_wrapped(struct macsec_secy *secy, 446 struct macsec_tx_sa *tx_sa) 447 { 448 pr_debug("PN wrapped, transitioning to !oper\n"); 449 tx_sa->active = false; 450 if (secy->protect_frames) 451 secy->operational = false; 452 } 453 454 void macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa) 455 { 456 spin_lock_bh(&tx_sa->lock); 457 __macsec_pn_wrapped(secy, tx_sa); 458 spin_unlock_bh(&tx_sa->lock); 459 } 460 EXPORT_SYMBOL_GPL(macsec_pn_wrapped); 461 462 static pn_t tx_sa_update_pn(struct macsec_tx_sa *tx_sa, 463 struct macsec_secy *secy) 464 { 465 pn_t pn; 466 467 spin_lock_bh(&tx_sa->lock); 468 469 pn = tx_sa->next_pn_halves; 470 if (secy->xpn) 471 tx_sa->next_pn++; 472 else 473 tx_sa->next_pn_halves.lower++; 474 475 if (tx_sa->next_pn == 0) 476 __macsec_pn_wrapped(secy, tx_sa); 477 spin_unlock_bh(&tx_sa->lock); 478 479 return pn; 480 } 481 482 static void macsec_encrypt_finish(struct sk_buff *skb, struct net_device *dev) 483 { 484 struct macsec_dev *macsec = netdev_priv(dev); 485 486 skb->dev = macsec->real_dev; 487 skb_reset_mac_header(skb); 488 skb->protocol = eth_hdr(skb)->h_proto; 489 } 490 491 static unsigned int macsec_msdu_len(struct sk_buff *skb) 492 { 493 struct macsec_dev *macsec = macsec_priv(skb->dev); 494 struct macsec_secy *secy = &macsec->secy; 495 bool sci_present = macsec_skb_cb(skb)->has_sci; 496 497 return skb->len - macsec_hdr_len(sci_present) - secy->icv_len; 498 } 499 500 static void macsec_count_tx(struct sk_buff *skb, struct macsec_tx_sc *tx_sc, 501 struct macsec_tx_sa *tx_sa) 502 { 503 unsigned int msdu_len = macsec_msdu_len(skb); 504 struct pcpu_tx_sc_stats *txsc_stats = this_cpu_ptr(tx_sc->stats); 505 506 u64_stats_update_begin(&txsc_stats->syncp); 507 if (tx_sc->encrypt) { 508 txsc_stats->stats.OutOctetsEncrypted += msdu_len; 509 txsc_stats->stats.OutPktsEncrypted++; 510 this_cpu_inc(tx_sa->stats->OutPktsEncrypted); 511 } else { 512 txsc_stats->stats.OutOctetsProtected += msdu_len; 513 txsc_stats->stats.OutPktsProtected++; 514 this_cpu_inc(tx_sa->stats->OutPktsProtected); 515 } 516 u64_stats_update_end(&txsc_stats->syncp); 517 } 518 519 static void count_tx(struct net_device *dev, int ret, int len) 520 { 521 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { 522 struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats); 523 524 u64_stats_update_begin(&stats->syncp); 525 u64_stats_inc(&stats->tx_packets); 526 u64_stats_add(&stats->tx_bytes, len); 527 u64_stats_update_end(&stats->syncp); 528 } 529 } 530 531 static void macsec_encrypt_done(struct crypto_async_request *base, int err) 532 { 533 struct sk_buff *skb = base->data; 534 struct net_device *dev = skb->dev; 535 struct macsec_dev *macsec = macsec_priv(dev); 536 struct macsec_tx_sa *sa = macsec_skb_cb(skb)->tx_sa; 537 int len, ret; 538 539 aead_request_free(macsec_skb_cb(skb)->req); 540 541 rcu_read_lock_bh(); 542 macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa); 543 /* packet is encrypted/protected so tx_bytes must be calculated */ 544 len = macsec_msdu_len(skb) + 2 * ETH_ALEN; 545 macsec_encrypt_finish(skb, dev); 546 ret = dev_queue_xmit(skb); 547 count_tx(dev, ret, len); 548 rcu_read_unlock_bh(); 549 550 macsec_txsa_put(sa); 551 dev_put(dev); 552 } 553 554 static struct aead_request *macsec_alloc_req(struct crypto_aead *tfm, 555 unsigned char **iv, 556 struct scatterlist **sg, 557 int num_frags) 558 { 559 size_t size, iv_offset, sg_offset; 560 struct aead_request *req; 561 void *tmp; 562 563 size = sizeof(struct aead_request) + crypto_aead_reqsize(tfm); 564 iv_offset = size; 565 size += GCM_AES_IV_LEN; 566 567 size = ALIGN(size, __alignof__(struct scatterlist)); 568 sg_offset = size; 569 size += sizeof(struct scatterlist) * num_frags; 570 571 tmp = kmalloc(size, GFP_ATOMIC); 572 if (!tmp) 573 return NULL; 574 575 *iv = (unsigned char *)(tmp + iv_offset); 576 *sg = (struct scatterlist *)(tmp + sg_offset); 577 req = tmp; 578 579 aead_request_set_tfm(req, tfm); 580 581 return req; 582 } 583 584 static struct sk_buff *macsec_encrypt(struct sk_buff *skb, 585 struct net_device *dev) 586 { 587 int ret; 588 struct scatterlist *sg; 589 struct sk_buff *trailer; 590 unsigned char *iv; 591 struct ethhdr *eth; 592 struct macsec_eth_header *hh; 593 size_t unprotected_len; 594 struct aead_request *req; 595 struct macsec_secy *secy; 596 struct macsec_tx_sc *tx_sc; 597 struct macsec_tx_sa *tx_sa; 598 struct macsec_dev *macsec = macsec_priv(dev); 599 bool sci_present; 600 pn_t pn; 601 602 secy = &macsec->secy; 603 tx_sc = &secy->tx_sc; 604 605 /* 10.5.1 TX SA assignment */ 606 tx_sa = macsec_txsa_get(tx_sc->sa[tx_sc->encoding_sa]); 607 if (!tx_sa) { 608 secy->operational = false; 609 kfree_skb(skb); 610 return ERR_PTR(-EINVAL); 611 } 612 613 if (unlikely(skb_headroom(skb) < MACSEC_NEEDED_HEADROOM || 614 skb_tailroom(skb) < MACSEC_NEEDED_TAILROOM)) { 615 struct sk_buff *nskb = skb_copy_expand(skb, 616 MACSEC_NEEDED_HEADROOM, 617 MACSEC_NEEDED_TAILROOM, 618 GFP_ATOMIC); 619 if (likely(nskb)) { 620 consume_skb(skb); 621 skb = nskb; 622 } else { 623 macsec_txsa_put(tx_sa); 624 kfree_skb(skb); 625 return ERR_PTR(-ENOMEM); 626 } 627 } else { 628 skb = skb_unshare(skb, GFP_ATOMIC); 629 if (!skb) { 630 macsec_txsa_put(tx_sa); 631 return ERR_PTR(-ENOMEM); 632 } 633 } 634 635 unprotected_len = skb->len; 636 eth = eth_hdr(skb); 637 sci_present = macsec_send_sci(secy); 638 hh = skb_push(skb, macsec_extra_len(sci_present)); 639 memmove(hh, eth, 2 * ETH_ALEN); 640 641 pn = tx_sa_update_pn(tx_sa, secy); 642 if (pn.full64 == 0) { 643 macsec_txsa_put(tx_sa); 644 kfree_skb(skb); 645 return ERR_PTR(-ENOLINK); 646 } 647 macsec_fill_sectag(hh, secy, pn.lower, sci_present); 648 macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN); 649 650 skb_put(skb, secy->icv_len); 651 652 if (skb->len - ETH_HLEN > macsec_priv(dev)->real_dev->mtu) { 653 struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats); 654 655 u64_stats_update_begin(&secy_stats->syncp); 656 secy_stats->stats.OutPktsTooLong++; 657 u64_stats_update_end(&secy_stats->syncp); 658 659 macsec_txsa_put(tx_sa); 660 kfree_skb(skb); 661 return ERR_PTR(-EINVAL); 662 } 663 664 ret = skb_cow_data(skb, 0, &trailer); 665 if (unlikely(ret < 0)) { 666 macsec_txsa_put(tx_sa); 667 kfree_skb(skb); 668 return ERR_PTR(ret); 669 } 670 671 req = macsec_alloc_req(tx_sa->key.tfm, &iv, &sg, ret); 672 if (!req) { 673 macsec_txsa_put(tx_sa); 674 kfree_skb(skb); 675 return ERR_PTR(-ENOMEM); 676 } 677 678 if (secy->xpn) 679 macsec_fill_iv_xpn(iv, tx_sa->ssci, pn.full64, tx_sa->key.salt); 680 else 681 macsec_fill_iv(iv, secy->sci, pn.lower); 682 683 sg_init_table(sg, ret); 684 ret = skb_to_sgvec(skb, sg, 0, skb->len); 685 if (unlikely(ret < 0)) { 686 aead_request_free(req); 687 macsec_txsa_put(tx_sa); 688 kfree_skb(skb); 689 return ERR_PTR(ret); 690 } 691 692 if (tx_sc->encrypt) { 693 int len = skb->len - macsec_hdr_len(sci_present) - 694 secy->icv_len; 695 aead_request_set_crypt(req, sg, sg, len, iv); 696 aead_request_set_ad(req, macsec_hdr_len(sci_present)); 697 } else { 698 aead_request_set_crypt(req, sg, sg, 0, iv); 699 aead_request_set_ad(req, skb->len - secy->icv_len); 700 } 701 702 macsec_skb_cb(skb)->req = req; 703 macsec_skb_cb(skb)->tx_sa = tx_sa; 704 macsec_skb_cb(skb)->has_sci = sci_present; 705 aead_request_set_callback(req, 0, macsec_encrypt_done, skb); 706 707 dev_hold(skb->dev); 708 ret = crypto_aead_encrypt(req); 709 if (ret == -EINPROGRESS) { 710 return ERR_PTR(ret); 711 } else if (ret != 0) { 712 dev_put(skb->dev); 713 kfree_skb(skb); 714 aead_request_free(req); 715 macsec_txsa_put(tx_sa); 716 return ERR_PTR(-EINVAL); 717 } 718 719 dev_put(skb->dev); 720 aead_request_free(req); 721 macsec_txsa_put(tx_sa); 722 723 return skb; 724 } 725 726 static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u32 pn) 727 { 728 struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa; 729 struct pcpu_rx_sc_stats *rxsc_stats = this_cpu_ptr(rx_sa->sc->stats); 730 struct macsec_eth_header *hdr = macsec_ethhdr(skb); 731 u32 lowest_pn = 0; 732 733 spin_lock(&rx_sa->lock); 734 if (rx_sa->next_pn_halves.lower >= secy->replay_window) 735 lowest_pn = rx_sa->next_pn_halves.lower - secy->replay_window; 736 737 /* Now perform replay protection check again 738 * (see IEEE 802.1AE-2006 figure 10-5) 739 */ 740 if (secy->replay_protect && pn < lowest_pn && 741 (!secy->xpn || pn_same_half(pn, lowest_pn))) { 742 spin_unlock(&rx_sa->lock); 743 u64_stats_update_begin(&rxsc_stats->syncp); 744 rxsc_stats->stats.InPktsLate++; 745 u64_stats_update_end(&rxsc_stats->syncp); 746 secy->netdev->stats.rx_dropped++; 747 return false; 748 } 749 750 if (secy->validate_frames != MACSEC_VALIDATE_DISABLED) { 751 unsigned int msdu_len = macsec_msdu_len(skb); 752 u64_stats_update_begin(&rxsc_stats->syncp); 753 if (hdr->tci_an & MACSEC_TCI_E) 754 rxsc_stats->stats.InOctetsDecrypted += msdu_len; 755 else 756 rxsc_stats->stats.InOctetsValidated += msdu_len; 757 u64_stats_update_end(&rxsc_stats->syncp); 758 } 759 760 if (!macsec_skb_cb(skb)->valid) { 761 spin_unlock(&rx_sa->lock); 762 763 /* 10.6.5 */ 764 if (hdr->tci_an & MACSEC_TCI_C || 765 secy->validate_frames == MACSEC_VALIDATE_STRICT) { 766 u64_stats_update_begin(&rxsc_stats->syncp); 767 rxsc_stats->stats.InPktsNotValid++; 768 u64_stats_update_end(&rxsc_stats->syncp); 769 this_cpu_inc(rx_sa->stats->InPktsNotValid); 770 secy->netdev->stats.rx_errors++; 771 return false; 772 } 773 774 u64_stats_update_begin(&rxsc_stats->syncp); 775 if (secy->validate_frames == MACSEC_VALIDATE_CHECK) { 776 rxsc_stats->stats.InPktsInvalid++; 777 this_cpu_inc(rx_sa->stats->InPktsInvalid); 778 } else if (pn < lowest_pn) { 779 rxsc_stats->stats.InPktsDelayed++; 780 } else { 781 rxsc_stats->stats.InPktsUnchecked++; 782 } 783 u64_stats_update_end(&rxsc_stats->syncp); 784 } else { 785 u64_stats_update_begin(&rxsc_stats->syncp); 786 if (pn < lowest_pn) { 787 rxsc_stats->stats.InPktsDelayed++; 788 } else { 789 rxsc_stats->stats.InPktsOK++; 790 this_cpu_inc(rx_sa->stats->InPktsOK); 791 } 792 u64_stats_update_end(&rxsc_stats->syncp); 793 794 // Instead of "pn >=" - to support pn overflow in xpn 795 if (pn + 1 > rx_sa->next_pn_halves.lower) { 796 rx_sa->next_pn_halves.lower = pn + 1; 797 } else if (secy->xpn && 798 !pn_same_half(pn, rx_sa->next_pn_halves.lower)) { 799 rx_sa->next_pn_halves.upper++; 800 rx_sa->next_pn_halves.lower = pn + 1; 801 } 802 803 spin_unlock(&rx_sa->lock); 804 } 805 806 return true; 807 } 808 809 static void macsec_reset_skb(struct sk_buff *skb, struct net_device *dev) 810 { 811 skb->pkt_type = PACKET_HOST; 812 skb->protocol = eth_type_trans(skb, dev); 813 814 skb_reset_network_header(skb); 815 if (!skb_transport_header_was_set(skb)) 816 skb_reset_transport_header(skb); 817 skb_reset_mac_len(skb); 818 } 819 820 static void macsec_finalize_skb(struct sk_buff *skb, u8 icv_len, u8 hdr_len) 821 { 822 skb->ip_summed = CHECKSUM_NONE; 823 memmove(skb->data + hdr_len, skb->data, 2 * ETH_ALEN); 824 skb_pull(skb, hdr_len); 825 pskb_trim_unique(skb, skb->len - icv_len); 826 } 827 828 static void count_rx(struct net_device *dev, int len) 829 { 830 struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats); 831 832 u64_stats_update_begin(&stats->syncp); 833 u64_stats_inc(&stats->rx_packets); 834 u64_stats_add(&stats->rx_bytes, len); 835 u64_stats_update_end(&stats->syncp); 836 } 837 838 static void macsec_decrypt_done(struct crypto_async_request *base, int err) 839 { 840 struct sk_buff *skb = base->data; 841 struct net_device *dev = skb->dev; 842 struct macsec_dev *macsec = macsec_priv(dev); 843 struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa; 844 struct macsec_rx_sc *rx_sc = rx_sa->sc; 845 int len; 846 u32 pn; 847 848 aead_request_free(macsec_skb_cb(skb)->req); 849 850 if (!err) 851 macsec_skb_cb(skb)->valid = true; 852 853 rcu_read_lock_bh(); 854 pn = ntohl(macsec_ethhdr(skb)->packet_number); 855 if (!macsec_post_decrypt(skb, &macsec->secy, pn)) { 856 rcu_read_unlock_bh(); 857 kfree_skb(skb); 858 goto out; 859 } 860 861 macsec_finalize_skb(skb, macsec->secy.icv_len, 862 macsec_extra_len(macsec_skb_cb(skb)->has_sci)); 863 len = skb->len; 864 macsec_reset_skb(skb, macsec->secy.netdev); 865 866 if (gro_cells_receive(&macsec->gro_cells, skb) == NET_RX_SUCCESS) 867 count_rx(dev, len); 868 869 rcu_read_unlock_bh(); 870 871 out: 872 macsec_rxsa_put(rx_sa); 873 macsec_rxsc_put(rx_sc); 874 dev_put(dev); 875 } 876 877 static struct sk_buff *macsec_decrypt(struct sk_buff *skb, 878 struct net_device *dev, 879 struct macsec_rx_sa *rx_sa, 880 sci_t sci, 881 struct macsec_secy *secy) 882 { 883 int ret; 884 struct scatterlist *sg; 885 struct sk_buff *trailer; 886 unsigned char *iv; 887 struct aead_request *req; 888 struct macsec_eth_header *hdr; 889 u32 hdr_pn; 890 u16 icv_len = secy->icv_len; 891 892 macsec_skb_cb(skb)->valid = false; 893 skb = skb_share_check(skb, GFP_ATOMIC); 894 if (!skb) 895 return ERR_PTR(-ENOMEM); 896 897 ret = skb_cow_data(skb, 0, &trailer); 898 if (unlikely(ret < 0)) { 899 kfree_skb(skb); 900 return ERR_PTR(ret); 901 } 902 req = macsec_alloc_req(rx_sa->key.tfm, &iv, &sg, ret); 903 if (!req) { 904 kfree_skb(skb); 905 return ERR_PTR(-ENOMEM); 906 } 907 908 hdr = (struct macsec_eth_header *)skb->data; 909 hdr_pn = ntohl(hdr->packet_number); 910 911 if (secy->xpn) { 912 pn_t recovered_pn = rx_sa->next_pn_halves; 913 914 recovered_pn.lower = hdr_pn; 915 if (hdr_pn < rx_sa->next_pn_halves.lower && 916 !pn_same_half(hdr_pn, rx_sa->next_pn_halves.lower)) 917 recovered_pn.upper++; 918 919 macsec_fill_iv_xpn(iv, rx_sa->ssci, recovered_pn.full64, 920 rx_sa->key.salt); 921 } else { 922 macsec_fill_iv(iv, sci, hdr_pn); 923 } 924 925 sg_init_table(sg, ret); 926 ret = skb_to_sgvec(skb, sg, 0, skb->len); 927 if (unlikely(ret < 0)) { 928 aead_request_free(req); 929 kfree_skb(skb); 930 return ERR_PTR(ret); 931 } 932 933 if (hdr->tci_an & MACSEC_TCI_E) { 934 /* confidentiality: ethernet + macsec header 935 * authenticated, encrypted payload 936 */ 937 int len = skb->len - macsec_hdr_len(macsec_skb_cb(skb)->has_sci); 938 939 aead_request_set_crypt(req, sg, sg, len, iv); 940 aead_request_set_ad(req, macsec_hdr_len(macsec_skb_cb(skb)->has_sci)); 941 skb = skb_unshare(skb, GFP_ATOMIC); 942 if (!skb) { 943 aead_request_free(req); 944 return ERR_PTR(-ENOMEM); 945 } 946 } else { 947 /* integrity only: all headers + data authenticated */ 948 aead_request_set_crypt(req, sg, sg, icv_len, iv); 949 aead_request_set_ad(req, skb->len - icv_len); 950 } 951 952 macsec_skb_cb(skb)->req = req; 953 skb->dev = dev; 954 aead_request_set_callback(req, 0, macsec_decrypt_done, skb); 955 956 dev_hold(dev); 957 ret = crypto_aead_decrypt(req); 958 if (ret == -EINPROGRESS) { 959 return ERR_PTR(ret); 960 } else if (ret != 0) { 961 /* decryption/authentication failed 962 * 10.6 if validateFrames is disabled, deliver anyway 963 */ 964 if (ret != -EBADMSG) { 965 kfree_skb(skb); 966 skb = ERR_PTR(ret); 967 } 968 } else { 969 macsec_skb_cb(skb)->valid = true; 970 } 971 dev_put(dev); 972 973 aead_request_free(req); 974 975 return skb; 976 } 977 978 static struct macsec_rx_sc *find_rx_sc(struct macsec_secy *secy, sci_t sci) 979 { 980 struct macsec_rx_sc *rx_sc; 981 982 for_each_rxsc(secy, rx_sc) { 983 if (rx_sc->sci == sci) 984 return rx_sc; 985 } 986 987 return NULL; 988 } 989 990 static struct macsec_rx_sc *find_rx_sc_rtnl(struct macsec_secy *secy, sci_t sci) 991 { 992 struct macsec_rx_sc *rx_sc; 993 994 for_each_rxsc_rtnl(secy, rx_sc) { 995 if (rx_sc->sci == sci) 996 return rx_sc; 997 } 998 999 return NULL; 1000 } 1001 1002 static enum rx_handler_result handle_not_macsec(struct sk_buff *skb) 1003 { 1004 /* Deliver to the uncontrolled port by default */ 1005 enum rx_handler_result ret = RX_HANDLER_PASS; 1006 struct ethhdr *hdr = eth_hdr(skb); 1007 struct metadata_dst *md_dst; 1008 struct macsec_rxh_data *rxd; 1009 struct macsec_dev *macsec; 1010 1011 rcu_read_lock(); 1012 rxd = macsec_data_rcu(skb->dev); 1013 md_dst = skb_metadata_dst(skb); 1014 1015 list_for_each_entry_rcu(macsec, &rxd->secys, secys) { 1016 struct sk_buff *nskb; 1017 struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats); 1018 struct net_device *ndev = macsec->secy.netdev; 1019 1020 /* If h/w offloading is enabled, HW decodes frames and strips 1021 * the SecTAG, so we have to deduce which port to deliver to. 1022 */ 1023 if (macsec_is_offloaded(macsec) && netif_running(ndev)) { 1024 if (md_dst && md_dst->type == METADATA_MACSEC && 1025 (!find_rx_sc(&macsec->secy, md_dst->u.macsec_info.sci))) 1026 continue; 1027 1028 if (ether_addr_equal_64bits(hdr->h_dest, 1029 ndev->dev_addr)) { 1030 /* exact match, divert skb to this port */ 1031 skb->dev = ndev; 1032 skb->pkt_type = PACKET_HOST; 1033 ret = RX_HANDLER_ANOTHER; 1034 goto out; 1035 } else if (is_multicast_ether_addr_64bits( 1036 hdr->h_dest)) { 1037 /* multicast frame, deliver on this port too */ 1038 nskb = skb_clone(skb, GFP_ATOMIC); 1039 if (!nskb) 1040 break; 1041 1042 nskb->dev = ndev; 1043 if (ether_addr_equal_64bits(hdr->h_dest, 1044 ndev->broadcast)) 1045 nskb->pkt_type = PACKET_BROADCAST; 1046 else 1047 nskb->pkt_type = PACKET_MULTICAST; 1048 1049 __netif_rx(nskb); 1050 } 1051 continue; 1052 } 1053 1054 /* 10.6 If the management control validateFrames is not 1055 * Strict, frames without a SecTAG are received, counted, and 1056 * delivered to the Controlled Port 1057 */ 1058 if (macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) { 1059 u64_stats_update_begin(&secy_stats->syncp); 1060 secy_stats->stats.InPktsNoTag++; 1061 u64_stats_update_end(&secy_stats->syncp); 1062 macsec->secy.netdev->stats.rx_dropped++; 1063 continue; 1064 } 1065 1066 /* deliver on this port */ 1067 nskb = skb_clone(skb, GFP_ATOMIC); 1068 if (!nskb) 1069 break; 1070 1071 nskb->dev = ndev; 1072 1073 if (__netif_rx(nskb) == NET_RX_SUCCESS) { 1074 u64_stats_update_begin(&secy_stats->syncp); 1075 secy_stats->stats.InPktsUntagged++; 1076 u64_stats_update_end(&secy_stats->syncp); 1077 } 1078 } 1079 1080 out: 1081 rcu_read_unlock(); 1082 return ret; 1083 } 1084 1085 static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) 1086 { 1087 struct sk_buff *skb = *pskb; 1088 struct net_device *dev = skb->dev; 1089 struct macsec_eth_header *hdr; 1090 struct macsec_secy *secy = NULL; 1091 struct macsec_rx_sc *rx_sc; 1092 struct macsec_rx_sa *rx_sa; 1093 struct macsec_rxh_data *rxd; 1094 struct macsec_dev *macsec; 1095 unsigned int len; 1096 sci_t sci; 1097 u32 hdr_pn; 1098 bool cbit; 1099 struct pcpu_rx_sc_stats *rxsc_stats; 1100 struct pcpu_secy_stats *secy_stats; 1101 bool pulled_sci; 1102 int ret; 1103 1104 if (skb_headroom(skb) < ETH_HLEN) 1105 goto drop_direct; 1106 1107 hdr = macsec_ethhdr(skb); 1108 if (hdr->eth.h_proto != htons(ETH_P_MACSEC)) 1109 return handle_not_macsec(skb); 1110 1111 skb = skb_unshare(skb, GFP_ATOMIC); 1112 *pskb = skb; 1113 if (!skb) 1114 return RX_HANDLER_CONSUMED; 1115 1116 pulled_sci = pskb_may_pull(skb, macsec_extra_len(true)); 1117 if (!pulled_sci) { 1118 if (!pskb_may_pull(skb, macsec_extra_len(false))) 1119 goto drop_direct; 1120 } 1121 1122 hdr = macsec_ethhdr(skb); 1123 1124 /* Frames with a SecTAG that has the TCI E bit set but the C 1125 * bit clear are discarded, as this reserved encoding is used 1126 * to identify frames with a SecTAG that are not to be 1127 * delivered to the Controlled Port. 1128 */ 1129 if ((hdr->tci_an & (MACSEC_TCI_C | MACSEC_TCI_E)) == MACSEC_TCI_E) 1130 return RX_HANDLER_PASS; 1131 1132 /* now, pull the extra length */ 1133 if (hdr->tci_an & MACSEC_TCI_SC) { 1134 if (!pulled_sci) 1135 goto drop_direct; 1136 } 1137 1138 /* ethernet header is part of crypto processing */ 1139 skb_push(skb, ETH_HLEN); 1140 1141 macsec_skb_cb(skb)->has_sci = !!(hdr->tci_an & MACSEC_TCI_SC); 1142 macsec_skb_cb(skb)->assoc_num = hdr->tci_an & MACSEC_AN_MASK; 1143 sci = macsec_frame_sci(hdr, macsec_skb_cb(skb)->has_sci); 1144 1145 rcu_read_lock(); 1146 rxd = macsec_data_rcu(skb->dev); 1147 1148 list_for_each_entry_rcu(macsec, &rxd->secys, secys) { 1149 struct macsec_rx_sc *sc = find_rx_sc(&macsec->secy, sci); 1150 1151 sc = sc ? macsec_rxsc_get(sc) : NULL; 1152 1153 if (sc) { 1154 secy = &macsec->secy; 1155 rx_sc = sc; 1156 break; 1157 } 1158 } 1159 1160 if (!secy) 1161 goto nosci; 1162 1163 dev = secy->netdev; 1164 macsec = macsec_priv(dev); 1165 secy_stats = this_cpu_ptr(macsec->stats); 1166 rxsc_stats = this_cpu_ptr(rx_sc->stats); 1167 1168 if (!macsec_validate_skb(skb, secy->icv_len, secy->xpn)) { 1169 u64_stats_update_begin(&secy_stats->syncp); 1170 secy_stats->stats.InPktsBadTag++; 1171 u64_stats_update_end(&secy_stats->syncp); 1172 secy->netdev->stats.rx_errors++; 1173 goto drop_nosa; 1174 } 1175 1176 rx_sa = macsec_rxsa_get(rx_sc->sa[macsec_skb_cb(skb)->assoc_num]); 1177 if (!rx_sa) { 1178 /* 10.6.1 if the SA is not in use */ 1179 1180 /* If validateFrames is Strict or the C bit in the 1181 * SecTAG is set, discard 1182 */ 1183 struct macsec_rx_sa *active_rx_sa = macsec_active_rxsa_get(rx_sc); 1184 if (hdr->tci_an & MACSEC_TCI_C || 1185 secy->validate_frames == MACSEC_VALIDATE_STRICT) { 1186 u64_stats_update_begin(&rxsc_stats->syncp); 1187 rxsc_stats->stats.InPktsNotUsingSA++; 1188 u64_stats_update_end(&rxsc_stats->syncp); 1189 secy->netdev->stats.rx_errors++; 1190 if (active_rx_sa) 1191 this_cpu_inc(active_rx_sa->stats->InPktsNotUsingSA); 1192 goto drop_nosa; 1193 } 1194 1195 /* not Strict, the frame (with the SecTAG and ICV 1196 * removed) is delivered to the Controlled Port. 1197 */ 1198 u64_stats_update_begin(&rxsc_stats->syncp); 1199 rxsc_stats->stats.InPktsUnusedSA++; 1200 u64_stats_update_end(&rxsc_stats->syncp); 1201 if (active_rx_sa) 1202 this_cpu_inc(active_rx_sa->stats->InPktsUnusedSA); 1203 goto deliver; 1204 } 1205 1206 /* First, PN check to avoid decrypting obviously wrong packets */ 1207 hdr_pn = ntohl(hdr->packet_number); 1208 if (secy->replay_protect) { 1209 bool late; 1210 1211 spin_lock(&rx_sa->lock); 1212 late = rx_sa->next_pn_halves.lower >= secy->replay_window && 1213 hdr_pn < (rx_sa->next_pn_halves.lower - secy->replay_window); 1214 1215 if (secy->xpn) 1216 late = late && pn_same_half(rx_sa->next_pn_halves.lower, hdr_pn); 1217 spin_unlock(&rx_sa->lock); 1218 1219 if (late) { 1220 u64_stats_update_begin(&rxsc_stats->syncp); 1221 rxsc_stats->stats.InPktsLate++; 1222 u64_stats_update_end(&rxsc_stats->syncp); 1223 macsec->secy.netdev->stats.rx_dropped++; 1224 goto drop; 1225 } 1226 } 1227 1228 macsec_skb_cb(skb)->rx_sa = rx_sa; 1229 1230 /* Disabled && !changed text => skip validation */ 1231 if (hdr->tci_an & MACSEC_TCI_C || 1232 secy->validate_frames != MACSEC_VALIDATE_DISABLED) 1233 skb = macsec_decrypt(skb, dev, rx_sa, sci, secy); 1234 1235 if (IS_ERR(skb)) { 1236 /* the decrypt callback needs the reference */ 1237 if (PTR_ERR(skb) != -EINPROGRESS) { 1238 macsec_rxsa_put(rx_sa); 1239 macsec_rxsc_put(rx_sc); 1240 } 1241 rcu_read_unlock(); 1242 *pskb = NULL; 1243 return RX_HANDLER_CONSUMED; 1244 } 1245 1246 if (!macsec_post_decrypt(skb, secy, hdr_pn)) 1247 goto drop; 1248 1249 deliver: 1250 macsec_finalize_skb(skb, secy->icv_len, 1251 macsec_extra_len(macsec_skb_cb(skb)->has_sci)); 1252 len = skb->len; 1253 macsec_reset_skb(skb, secy->netdev); 1254 1255 if (rx_sa) 1256 macsec_rxsa_put(rx_sa); 1257 macsec_rxsc_put(rx_sc); 1258 1259 skb_orphan(skb); 1260 ret = gro_cells_receive(&macsec->gro_cells, skb); 1261 if (ret == NET_RX_SUCCESS) 1262 count_rx(dev, len); 1263 else 1264 macsec->secy.netdev->stats.rx_dropped++; 1265 1266 rcu_read_unlock(); 1267 1268 *pskb = NULL; 1269 return RX_HANDLER_CONSUMED; 1270 1271 drop: 1272 macsec_rxsa_put(rx_sa); 1273 drop_nosa: 1274 macsec_rxsc_put(rx_sc); 1275 rcu_read_unlock(); 1276 drop_direct: 1277 kfree_skb(skb); 1278 *pskb = NULL; 1279 return RX_HANDLER_CONSUMED; 1280 1281 nosci: 1282 /* 10.6.1 if the SC is not found */ 1283 cbit = !!(hdr->tci_an & MACSEC_TCI_C); 1284 if (!cbit) 1285 macsec_finalize_skb(skb, MACSEC_DEFAULT_ICV_LEN, 1286 macsec_extra_len(macsec_skb_cb(skb)->has_sci)); 1287 1288 list_for_each_entry_rcu(macsec, &rxd->secys, secys) { 1289 struct sk_buff *nskb; 1290 1291 secy_stats = this_cpu_ptr(macsec->stats); 1292 1293 /* If validateFrames is Strict or the C bit in the 1294 * SecTAG is set, discard 1295 */ 1296 if (cbit || 1297 macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) { 1298 u64_stats_update_begin(&secy_stats->syncp); 1299 secy_stats->stats.InPktsNoSCI++; 1300 u64_stats_update_end(&secy_stats->syncp); 1301 macsec->secy.netdev->stats.rx_errors++; 1302 continue; 1303 } 1304 1305 /* not strict, the frame (with the SecTAG and ICV 1306 * removed) is delivered to the Controlled Port. 1307 */ 1308 nskb = skb_clone(skb, GFP_ATOMIC); 1309 if (!nskb) 1310 break; 1311 1312 macsec_reset_skb(nskb, macsec->secy.netdev); 1313 1314 ret = __netif_rx(nskb); 1315 if (ret == NET_RX_SUCCESS) { 1316 u64_stats_update_begin(&secy_stats->syncp); 1317 secy_stats->stats.InPktsUnknownSCI++; 1318 u64_stats_update_end(&secy_stats->syncp); 1319 } else { 1320 macsec->secy.netdev->stats.rx_dropped++; 1321 } 1322 } 1323 1324 rcu_read_unlock(); 1325 *pskb = skb; 1326 return RX_HANDLER_PASS; 1327 } 1328 1329 static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len) 1330 { 1331 struct crypto_aead *tfm; 1332 int ret; 1333 1334 /* Pick a sync gcm(aes) cipher to ensure order is preserved. */ 1335 tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC); 1336 1337 if (IS_ERR(tfm)) 1338 return tfm; 1339 1340 ret = crypto_aead_setkey(tfm, key, key_len); 1341 if (ret < 0) 1342 goto fail; 1343 1344 ret = crypto_aead_setauthsize(tfm, icv_len); 1345 if (ret < 0) 1346 goto fail; 1347 1348 return tfm; 1349 fail: 1350 crypto_free_aead(tfm); 1351 return ERR_PTR(ret); 1352 } 1353 1354 static int init_rx_sa(struct macsec_rx_sa *rx_sa, char *sak, int key_len, 1355 int icv_len) 1356 { 1357 rx_sa->stats = alloc_percpu(struct macsec_rx_sa_stats); 1358 if (!rx_sa->stats) 1359 return -ENOMEM; 1360 1361 rx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len); 1362 if (IS_ERR(rx_sa->key.tfm)) { 1363 free_percpu(rx_sa->stats); 1364 return PTR_ERR(rx_sa->key.tfm); 1365 } 1366 1367 rx_sa->ssci = MACSEC_UNDEF_SSCI; 1368 rx_sa->active = false; 1369 rx_sa->next_pn = 1; 1370 refcount_set(&rx_sa->refcnt, 1); 1371 spin_lock_init(&rx_sa->lock); 1372 1373 return 0; 1374 } 1375 1376 static void clear_rx_sa(struct macsec_rx_sa *rx_sa) 1377 { 1378 rx_sa->active = false; 1379 1380 macsec_rxsa_put(rx_sa); 1381 } 1382 1383 static void free_rx_sc(struct macsec_rx_sc *rx_sc) 1384 { 1385 int i; 1386 1387 for (i = 0; i < MACSEC_NUM_AN; i++) { 1388 struct macsec_rx_sa *sa = rtnl_dereference(rx_sc->sa[i]); 1389 1390 RCU_INIT_POINTER(rx_sc->sa[i], NULL); 1391 if (sa) 1392 clear_rx_sa(sa); 1393 } 1394 1395 macsec_rxsc_put(rx_sc); 1396 } 1397 1398 static struct macsec_rx_sc *del_rx_sc(struct macsec_secy *secy, sci_t sci) 1399 { 1400 struct macsec_rx_sc *rx_sc, __rcu **rx_scp; 1401 1402 for (rx_scp = &secy->rx_sc, rx_sc = rtnl_dereference(*rx_scp); 1403 rx_sc; 1404 rx_scp = &rx_sc->next, rx_sc = rtnl_dereference(*rx_scp)) { 1405 if (rx_sc->sci == sci) { 1406 if (rx_sc->active) 1407 secy->n_rx_sc--; 1408 rcu_assign_pointer(*rx_scp, rx_sc->next); 1409 return rx_sc; 1410 } 1411 } 1412 1413 return NULL; 1414 } 1415 1416 static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci, 1417 bool active) 1418 { 1419 struct macsec_rx_sc *rx_sc; 1420 struct macsec_dev *macsec; 1421 struct net_device *real_dev = macsec_priv(dev)->real_dev; 1422 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); 1423 struct macsec_secy *secy; 1424 1425 list_for_each_entry(macsec, &rxd->secys, secys) { 1426 if (find_rx_sc_rtnl(&macsec->secy, sci)) 1427 return ERR_PTR(-EEXIST); 1428 } 1429 1430 rx_sc = kzalloc(sizeof(*rx_sc), GFP_KERNEL); 1431 if (!rx_sc) 1432 return ERR_PTR(-ENOMEM); 1433 1434 rx_sc->stats = netdev_alloc_pcpu_stats(struct pcpu_rx_sc_stats); 1435 if (!rx_sc->stats) { 1436 kfree(rx_sc); 1437 return ERR_PTR(-ENOMEM); 1438 } 1439 1440 rx_sc->sci = sci; 1441 rx_sc->active = active; 1442 refcount_set(&rx_sc->refcnt, 1); 1443 1444 secy = &macsec_priv(dev)->secy; 1445 rcu_assign_pointer(rx_sc->next, secy->rx_sc); 1446 rcu_assign_pointer(secy->rx_sc, rx_sc); 1447 1448 if (rx_sc->active) 1449 secy->n_rx_sc++; 1450 1451 return rx_sc; 1452 } 1453 1454 static int init_tx_sa(struct macsec_tx_sa *tx_sa, char *sak, int key_len, 1455 int icv_len) 1456 { 1457 tx_sa->stats = alloc_percpu(struct macsec_tx_sa_stats); 1458 if (!tx_sa->stats) 1459 return -ENOMEM; 1460 1461 tx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len); 1462 if (IS_ERR(tx_sa->key.tfm)) { 1463 free_percpu(tx_sa->stats); 1464 return PTR_ERR(tx_sa->key.tfm); 1465 } 1466 1467 tx_sa->ssci = MACSEC_UNDEF_SSCI; 1468 tx_sa->active = false; 1469 refcount_set(&tx_sa->refcnt, 1); 1470 spin_lock_init(&tx_sa->lock); 1471 1472 return 0; 1473 } 1474 1475 static void clear_tx_sa(struct macsec_tx_sa *tx_sa) 1476 { 1477 tx_sa->active = false; 1478 1479 macsec_txsa_put(tx_sa); 1480 } 1481 1482 static struct genl_family macsec_fam; 1483 1484 static struct net_device *get_dev_from_nl(struct net *net, 1485 struct nlattr **attrs) 1486 { 1487 int ifindex = nla_get_u32(attrs[MACSEC_ATTR_IFINDEX]); 1488 struct net_device *dev; 1489 1490 dev = __dev_get_by_index(net, ifindex); 1491 if (!dev) 1492 return ERR_PTR(-ENODEV); 1493 1494 if (!netif_is_macsec(dev)) 1495 return ERR_PTR(-ENODEV); 1496 1497 return dev; 1498 } 1499 1500 static enum macsec_offload nla_get_offload(const struct nlattr *nla) 1501 { 1502 return (__force enum macsec_offload)nla_get_u8(nla); 1503 } 1504 1505 static sci_t nla_get_sci(const struct nlattr *nla) 1506 { 1507 return (__force sci_t)nla_get_u64(nla); 1508 } 1509 1510 static int nla_put_sci(struct sk_buff *skb, int attrtype, sci_t value, 1511 int padattr) 1512 { 1513 return nla_put_u64_64bit(skb, attrtype, (__force u64)value, padattr); 1514 } 1515 1516 static ssci_t nla_get_ssci(const struct nlattr *nla) 1517 { 1518 return (__force ssci_t)nla_get_u32(nla); 1519 } 1520 1521 static int nla_put_ssci(struct sk_buff *skb, int attrtype, ssci_t value) 1522 { 1523 return nla_put_u32(skb, attrtype, (__force u64)value); 1524 } 1525 1526 static struct macsec_tx_sa *get_txsa_from_nl(struct net *net, 1527 struct nlattr **attrs, 1528 struct nlattr **tb_sa, 1529 struct net_device **devp, 1530 struct macsec_secy **secyp, 1531 struct macsec_tx_sc **scp, 1532 u8 *assoc_num) 1533 { 1534 struct net_device *dev; 1535 struct macsec_secy *secy; 1536 struct macsec_tx_sc *tx_sc; 1537 struct macsec_tx_sa *tx_sa; 1538 1539 if (!tb_sa[MACSEC_SA_ATTR_AN]) 1540 return ERR_PTR(-EINVAL); 1541 1542 *assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1543 1544 dev = get_dev_from_nl(net, attrs); 1545 if (IS_ERR(dev)) 1546 return ERR_CAST(dev); 1547 1548 if (*assoc_num >= MACSEC_NUM_AN) 1549 return ERR_PTR(-EINVAL); 1550 1551 secy = &macsec_priv(dev)->secy; 1552 tx_sc = &secy->tx_sc; 1553 1554 tx_sa = rtnl_dereference(tx_sc->sa[*assoc_num]); 1555 if (!tx_sa) 1556 return ERR_PTR(-ENODEV); 1557 1558 *devp = dev; 1559 *scp = tx_sc; 1560 *secyp = secy; 1561 return tx_sa; 1562 } 1563 1564 static struct macsec_rx_sc *get_rxsc_from_nl(struct net *net, 1565 struct nlattr **attrs, 1566 struct nlattr **tb_rxsc, 1567 struct net_device **devp, 1568 struct macsec_secy **secyp) 1569 { 1570 struct net_device *dev; 1571 struct macsec_secy *secy; 1572 struct macsec_rx_sc *rx_sc; 1573 sci_t sci; 1574 1575 dev = get_dev_from_nl(net, attrs); 1576 if (IS_ERR(dev)) 1577 return ERR_CAST(dev); 1578 1579 secy = &macsec_priv(dev)->secy; 1580 1581 if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI]) 1582 return ERR_PTR(-EINVAL); 1583 1584 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); 1585 rx_sc = find_rx_sc_rtnl(secy, sci); 1586 if (!rx_sc) 1587 return ERR_PTR(-ENODEV); 1588 1589 *secyp = secy; 1590 *devp = dev; 1591 1592 return rx_sc; 1593 } 1594 1595 static struct macsec_rx_sa *get_rxsa_from_nl(struct net *net, 1596 struct nlattr **attrs, 1597 struct nlattr **tb_rxsc, 1598 struct nlattr **tb_sa, 1599 struct net_device **devp, 1600 struct macsec_secy **secyp, 1601 struct macsec_rx_sc **scp, 1602 u8 *assoc_num) 1603 { 1604 struct macsec_rx_sc *rx_sc; 1605 struct macsec_rx_sa *rx_sa; 1606 1607 if (!tb_sa[MACSEC_SA_ATTR_AN]) 1608 return ERR_PTR(-EINVAL); 1609 1610 *assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1611 if (*assoc_num >= MACSEC_NUM_AN) 1612 return ERR_PTR(-EINVAL); 1613 1614 rx_sc = get_rxsc_from_nl(net, attrs, tb_rxsc, devp, secyp); 1615 if (IS_ERR(rx_sc)) 1616 return ERR_CAST(rx_sc); 1617 1618 rx_sa = rtnl_dereference(rx_sc->sa[*assoc_num]); 1619 if (!rx_sa) 1620 return ERR_PTR(-ENODEV); 1621 1622 *scp = rx_sc; 1623 return rx_sa; 1624 } 1625 1626 static const struct nla_policy macsec_genl_policy[NUM_MACSEC_ATTR] = { 1627 [MACSEC_ATTR_IFINDEX] = { .type = NLA_U32 }, 1628 [MACSEC_ATTR_RXSC_CONFIG] = { .type = NLA_NESTED }, 1629 [MACSEC_ATTR_SA_CONFIG] = { .type = NLA_NESTED }, 1630 [MACSEC_ATTR_OFFLOAD] = { .type = NLA_NESTED }, 1631 }; 1632 1633 static const struct nla_policy macsec_genl_rxsc_policy[NUM_MACSEC_RXSC_ATTR] = { 1634 [MACSEC_RXSC_ATTR_SCI] = { .type = NLA_U64 }, 1635 [MACSEC_RXSC_ATTR_ACTIVE] = { .type = NLA_U8 }, 1636 }; 1637 1638 static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = { 1639 [MACSEC_SA_ATTR_AN] = { .type = NLA_U8 }, 1640 [MACSEC_SA_ATTR_ACTIVE] = { .type = NLA_U8 }, 1641 [MACSEC_SA_ATTR_PN] = NLA_POLICY_MIN_LEN(4), 1642 [MACSEC_SA_ATTR_KEYID] = { .type = NLA_BINARY, 1643 .len = MACSEC_KEYID_LEN, }, 1644 [MACSEC_SA_ATTR_KEY] = { .type = NLA_BINARY, 1645 .len = MACSEC_MAX_KEY_LEN, }, 1646 [MACSEC_SA_ATTR_SSCI] = { .type = NLA_U32 }, 1647 [MACSEC_SA_ATTR_SALT] = { .type = NLA_BINARY, 1648 .len = MACSEC_SALT_LEN, }, 1649 }; 1650 1651 static const struct nla_policy macsec_genl_offload_policy[NUM_MACSEC_OFFLOAD_ATTR] = { 1652 [MACSEC_OFFLOAD_ATTR_TYPE] = { .type = NLA_U8 }, 1653 }; 1654 1655 /* Offloads an operation to a device driver */ 1656 static int macsec_offload(int (* const func)(struct macsec_context *), 1657 struct macsec_context *ctx) 1658 { 1659 int ret; 1660 1661 if (unlikely(!func)) 1662 return 0; 1663 1664 if (ctx->offload == MACSEC_OFFLOAD_PHY) 1665 mutex_lock(&ctx->phydev->lock); 1666 1667 ret = (*func)(ctx); 1668 1669 if (ctx->offload == MACSEC_OFFLOAD_PHY) 1670 mutex_unlock(&ctx->phydev->lock); 1671 1672 return ret; 1673 } 1674 1675 static int parse_sa_config(struct nlattr **attrs, struct nlattr **tb_sa) 1676 { 1677 if (!attrs[MACSEC_ATTR_SA_CONFIG]) 1678 return -EINVAL; 1679 1680 if (nla_parse_nested_deprecated(tb_sa, MACSEC_SA_ATTR_MAX, attrs[MACSEC_ATTR_SA_CONFIG], macsec_genl_sa_policy, NULL)) 1681 return -EINVAL; 1682 1683 return 0; 1684 } 1685 1686 static int parse_rxsc_config(struct nlattr **attrs, struct nlattr **tb_rxsc) 1687 { 1688 if (!attrs[MACSEC_ATTR_RXSC_CONFIG]) 1689 return -EINVAL; 1690 1691 if (nla_parse_nested_deprecated(tb_rxsc, MACSEC_RXSC_ATTR_MAX, attrs[MACSEC_ATTR_RXSC_CONFIG], macsec_genl_rxsc_policy, NULL)) 1692 return -EINVAL; 1693 1694 return 0; 1695 } 1696 1697 static bool validate_add_rxsa(struct nlattr **attrs) 1698 { 1699 if (!attrs[MACSEC_SA_ATTR_AN] || 1700 !attrs[MACSEC_SA_ATTR_KEY] || 1701 !attrs[MACSEC_SA_ATTR_KEYID]) 1702 return false; 1703 1704 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) 1705 return false; 1706 1707 if (attrs[MACSEC_SA_ATTR_PN] && 1708 nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0) 1709 return false; 1710 1711 if (attrs[MACSEC_SA_ATTR_ACTIVE]) { 1712 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) 1713 return false; 1714 } 1715 1716 if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN) 1717 return false; 1718 1719 return true; 1720 } 1721 1722 static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info) 1723 { 1724 struct net_device *dev; 1725 struct nlattr **attrs = info->attrs; 1726 struct macsec_secy *secy; 1727 struct macsec_rx_sc *rx_sc; 1728 struct macsec_rx_sa *rx_sa; 1729 unsigned char assoc_num; 1730 int pn_len; 1731 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 1732 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 1733 int err; 1734 1735 if (!attrs[MACSEC_ATTR_IFINDEX]) 1736 return -EINVAL; 1737 1738 if (parse_sa_config(attrs, tb_sa)) 1739 return -EINVAL; 1740 1741 if (parse_rxsc_config(attrs, tb_rxsc)) 1742 return -EINVAL; 1743 1744 if (!validate_add_rxsa(tb_sa)) 1745 return -EINVAL; 1746 1747 rtnl_lock(); 1748 rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy); 1749 if (IS_ERR(rx_sc)) { 1750 rtnl_unlock(); 1751 return PTR_ERR(rx_sc); 1752 } 1753 1754 assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1755 1756 if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) { 1757 pr_notice("macsec: nl: add_rxsa: bad key length: %d != %d\n", 1758 nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len); 1759 rtnl_unlock(); 1760 return -EINVAL; 1761 } 1762 1763 pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN; 1764 if (tb_sa[MACSEC_SA_ATTR_PN] && 1765 nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) { 1766 pr_notice("macsec: nl: add_rxsa: bad pn length: %d != %d\n", 1767 nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len); 1768 rtnl_unlock(); 1769 return -EINVAL; 1770 } 1771 1772 if (secy->xpn) { 1773 if (!tb_sa[MACSEC_SA_ATTR_SSCI] || !tb_sa[MACSEC_SA_ATTR_SALT]) { 1774 rtnl_unlock(); 1775 return -EINVAL; 1776 } 1777 1778 if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) { 1779 pr_notice("macsec: nl: add_rxsa: bad salt length: %d != %d\n", 1780 nla_len(tb_sa[MACSEC_SA_ATTR_SALT]), 1781 MACSEC_SALT_LEN); 1782 rtnl_unlock(); 1783 return -EINVAL; 1784 } 1785 } 1786 1787 rx_sa = rtnl_dereference(rx_sc->sa[assoc_num]); 1788 if (rx_sa) { 1789 rtnl_unlock(); 1790 return -EBUSY; 1791 } 1792 1793 rx_sa = kmalloc(sizeof(*rx_sa), GFP_KERNEL); 1794 if (!rx_sa) { 1795 rtnl_unlock(); 1796 return -ENOMEM; 1797 } 1798 1799 err = init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 1800 secy->key_len, secy->icv_len); 1801 if (err < 0) { 1802 kfree(rx_sa); 1803 rtnl_unlock(); 1804 return err; 1805 } 1806 1807 if (tb_sa[MACSEC_SA_ATTR_PN]) { 1808 spin_lock_bh(&rx_sa->lock); 1809 rx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]); 1810 spin_unlock_bh(&rx_sa->lock); 1811 } 1812 1813 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 1814 rx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 1815 1816 rx_sa->sc = rx_sc; 1817 1818 if (secy->xpn) { 1819 rx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]); 1820 nla_memcpy(rx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT], 1821 MACSEC_SALT_LEN); 1822 } 1823 1824 /* If h/w offloading is available, propagate to the device */ 1825 if (macsec_is_offloaded(netdev_priv(dev))) { 1826 const struct macsec_ops *ops; 1827 struct macsec_context ctx; 1828 1829 ops = macsec_get_ops(netdev_priv(dev), &ctx); 1830 if (!ops) { 1831 err = -EOPNOTSUPP; 1832 goto cleanup; 1833 } 1834 1835 ctx.sa.assoc_num = assoc_num; 1836 ctx.sa.rx_sa = rx_sa; 1837 ctx.secy = secy; 1838 memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 1839 secy->key_len); 1840 1841 err = macsec_offload(ops->mdo_add_rxsa, &ctx); 1842 memzero_explicit(ctx.sa.key, secy->key_len); 1843 if (err) 1844 goto cleanup; 1845 } 1846 1847 nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN); 1848 rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa); 1849 1850 rtnl_unlock(); 1851 1852 return 0; 1853 1854 cleanup: 1855 macsec_rxsa_put(rx_sa); 1856 rtnl_unlock(); 1857 return err; 1858 } 1859 1860 static bool validate_add_rxsc(struct nlattr **attrs) 1861 { 1862 if (!attrs[MACSEC_RXSC_ATTR_SCI]) 1863 return false; 1864 1865 if (attrs[MACSEC_RXSC_ATTR_ACTIVE]) { 1866 if (nla_get_u8(attrs[MACSEC_RXSC_ATTR_ACTIVE]) > 1) 1867 return false; 1868 } 1869 1870 return true; 1871 } 1872 1873 static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info) 1874 { 1875 struct net_device *dev; 1876 sci_t sci = MACSEC_UNDEF_SCI; 1877 struct nlattr **attrs = info->attrs; 1878 struct macsec_rx_sc *rx_sc; 1879 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 1880 struct macsec_secy *secy; 1881 bool active = true; 1882 int ret; 1883 1884 if (!attrs[MACSEC_ATTR_IFINDEX]) 1885 return -EINVAL; 1886 1887 if (parse_rxsc_config(attrs, tb_rxsc)) 1888 return -EINVAL; 1889 1890 if (!validate_add_rxsc(tb_rxsc)) 1891 return -EINVAL; 1892 1893 rtnl_lock(); 1894 dev = get_dev_from_nl(genl_info_net(info), attrs); 1895 if (IS_ERR(dev)) { 1896 rtnl_unlock(); 1897 return PTR_ERR(dev); 1898 } 1899 1900 secy = &macsec_priv(dev)->secy; 1901 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); 1902 1903 if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) 1904 active = nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]); 1905 1906 rx_sc = create_rx_sc(dev, sci, active); 1907 if (IS_ERR(rx_sc)) { 1908 rtnl_unlock(); 1909 return PTR_ERR(rx_sc); 1910 } 1911 1912 if (macsec_is_offloaded(netdev_priv(dev))) { 1913 const struct macsec_ops *ops; 1914 struct macsec_context ctx; 1915 1916 ops = macsec_get_ops(netdev_priv(dev), &ctx); 1917 if (!ops) { 1918 ret = -EOPNOTSUPP; 1919 goto cleanup; 1920 } 1921 1922 ctx.rx_sc = rx_sc; 1923 ctx.secy = secy; 1924 1925 ret = macsec_offload(ops->mdo_add_rxsc, &ctx); 1926 if (ret) 1927 goto cleanup; 1928 } 1929 1930 rtnl_unlock(); 1931 1932 return 0; 1933 1934 cleanup: 1935 del_rx_sc(secy, sci); 1936 free_rx_sc(rx_sc); 1937 rtnl_unlock(); 1938 return ret; 1939 } 1940 1941 static bool validate_add_txsa(struct nlattr **attrs) 1942 { 1943 if (!attrs[MACSEC_SA_ATTR_AN] || 1944 !attrs[MACSEC_SA_ATTR_PN] || 1945 !attrs[MACSEC_SA_ATTR_KEY] || 1946 !attrs[MACSEC_SA_ATTR_KEYID]) 1947 return false; 1948 1949 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) 1950 return false; 1951 1952 if (nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0) 1953 return false; 1954 1955 if (attrs[MACSEC_SA_ATTR_ACTIVE]) { 1956 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) 1957 return false; 1958 } 1959 1960 if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN) 1961 return false; 1962 1963 return true; 1964 } 1965 1966 static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info) 1967 { 1968 struct net_device *dev; 1969 struct nlattr **attrs = info->attrs; 1970 struct macsec_secy *secy; 1971 struct macsec_tx_sc *tx_sc; 1972 struct macsec_tx_sa *tx_sa; 1973 unsigned char assoc_num; 1974 int pn_len; 1975 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 1976 bool was_operational; 1977 int err; 1978 1979 if (!attrs[MACSEC_ATTR_IFINDEX]) 1980 return -EINVAL; 1981 1982 if (parse_sa_config(attrs, tb_sa)) 1983 return -EINVAL; 1984 1985 if (!validate_add_txsa(tb_sa)) 1986 return -EINVAL; 1987 1988 rtnl_lock(); 1989 dev = get_dev_from_nl(genl_info_net(info), attrs); 1990 if (IS_ERR(dev)) { 1991 rtnl_unlock(); 1992 return PTR_ERR(dev); 1993 } 1994 1995 secy = &macsec_priv(dev)->secy; 1996 tx_sc = &secy->tx_sc; 1997 1998 assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1999 2000 if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) { 2001 pr_notice("macsec: nl: add_txsa: bad key length: %d != %d\n", 2002 nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len); 2003 rtnl_unlock(); 2004 return -EINVAL; 2005 } 2006 2007 pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN; 2008 if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) { 2009 pr_notice("macsec: nl: add_txsa: bad pn length: %d != %d\n", 2010 nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len); 2011 rtnl_unlock(); 2012 return -EINVAL; 2013 } 2014 2015 if (secy->xpn) { 2016 if (!tb_sa[MACSEC_SA_ATTR_SSCI] || !tb_sa[MACSEC_SA_ATTR_SALT]) { 2017 rtnl_unlock(); 2018 return -EINVAL; 2019 } 2020 2021 if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) { 2022 pr_notice("macsec: nl: add_txsa: bad salt length: %d != %d\n", 2023 nla_len(tb_sa[MACSEC_SA_ATTR_SALT]), 2024 MACSEC_SALT_LEN); 2025 rtnl_unlock(); 2026 return -EINVAL; 2027 } 2028 } 2029 2030 tx_sa = rtnl_dereference(tx_sc->sa[assoc_num]); 2031 if (tx_sa) { 2032 rtnl_unlock(); 2033 return -EBUSY; 2034 } 2035 2036 tx_sa = kmalloc(sizeof(*tx_sa), GFP_KERNEL); 2037 if (!tx_sa) { 2038 rtnl_unlock(); 2039 return -ENOMEM; 2040 } 2041 2042 err = init_tx_sa(tx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 2043 secy->key_len, secy->icv_len); 2044 if (err < 0) { 2045 kfree(tx_sa); 2046 rtnl_unlock(); 2047 return err; 2048 } 2049 2050 spin_lock_bh(&tx_sa->lock); 2051 tx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]); 2052 spin_unlock_bh(&tx_sa->lock); 2053 2054 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 2055 tx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 2056 2057 was_operational = secy->operational; 2058 if (assoc_num == tx_sc->encoding_sa && tx_sa->active) 2059 secy->operational = true; 2060 2061 if (secy->xpn) { 2062 tx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]); 2063 nla_memcpy(tx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT], 2064 MACSEC_SALT_LEN); 2065 } 2066 2067 /* If h/w offloading is available, propagate to the device */ 2068 if (macsec_is_offloaded(netdev_priv(dev))) { 2069 const struct macsec_ops *ops; 2070 struct macsec_context ctx; 2071 2072 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2073 if (!ops) { 2074 err = -EOPNOTSUPP; 2075 goto cleanup; 2076 } 2077 2078 ctx.sa.assoc_num = assoc_num; 2079 ctx.sa.tx_sa = tx_sa; 2080 ctx.secy = secy; 2081 memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 2082 secy->key_len); 2083 2084 err = macsec_offload(ops->mdo_add_txsa, &ctx); 2085 memzero_explicit(ctx.sa.key, secy->key_len); 2086 if (err) 2087 goto cleanup; 2088 } 2089 2090 nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN); 2091 rcu_assign_pointer(tx_sc->sa[assoc_num], tx_sa); 2092 2093 rtnl_unlock(); 2094 2095 return 0; 2096 2097 cleanup: 2098 secy->operational = was_operational; 2099 macsec_txsa_put(tx_sa); 2100 rtnl_unlock(); 2101 return err; 2102 } 2103 2104 static int macsec_del_rxsa(struct sk_buff *skb, struct genl_info *info) 2105 { 2106 struct nlattr **attrs = info->attrs; 2107 struct net_device *dev; 2108 struct macsec_secy *secy; 2109 struct macsec_rx_sc *rx_sc; 2110 struct macsec_rx_sa *rx_sa; 2111 u8 assoc_num; 2112 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 2113 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 2114 int ret; 2115 2116 if (!attrs[MACSEC_ATTR_IFINDEX]) 2117 return -EINVAL; 2118 2119 if (parse_sa_config(attrs, tb_sa)) 2120 return -EINVAL; 2121 2122 if (parse_rxsc_config(attrs, tb_rxsc)) 2123 return -EINVAL; 2124 2125 rtnl_lock(); 2126 rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa, 2127 &dev, &secy, &rx_sc, &assoc_num); 2128 if (IS_ERR(rx_sa)) { 2129 rtnl_unlock(); 2130 return PTR_ERR(rx_sa); 2131 } 2132 2133 if (rx_sa->active) { 2134 rtnl_unlock(); 2135 return -EBUSY; 2136 } 2137 2138 /* If h/w offloading is available, propagate to the device */ 2139 if (macsec_is_offloaded(netdev_priv(dev))) { 2140 const struct macsec_ops *ops; 2141 struct macsec_context ctx; 2142 2143 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2144 if (!ops) { 2145 ret = -EOPNOTSUPP; 2146 goto cleanup; 2147 } 2148 2149 ctx.sa.assoc_num = assoc_num; 2150 ctx.sa.rx_sa = rx_sa; 2151 ctx.secy = secy; 2152 2153 ret = macsec_offload(ops->mdo_del_rxsa, &ctx); 2154 if (ret) 2155 goto cleanup; 2156 } 2157 2158 RCU_INIT_POINTER(rx_sc->sa[assoc_num], NULL); 2159 clear_rx_sa(rx_sa); 2160 2161 rtnl_unlock(); 2162 2163 return 0; 2164 2165 cleanup: 2166 rtnl_unlock(); 2167 return ret; 2168 } 2169 2170 static int macsec_del_rxsc(struct sk_buff *skb, struct genl_info *info) 2171 { 2172 struct nlattr **attrs = info->attrs; 2173 struct net_device *dev; 2174 struct macsec_secy *secy; 2175 struct macsec_rx_sc *rx_sc; 2176 sci_t sci; 2177 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 2178 int ret; 2179 2180 if (!attrs[MACSEC_ATTR_IFINDEX]) 2181 return -EINVAL; 2182 2183 if (parse_rxsc_config(attrs, tb_rxsc)) 2184 return -EINVAL; 2185 2186 if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI]) 2187 return -EINVAL; 2188 2189 rtnl_lock(); 2190 dev = get_dev_from_nl(genl_info_net(info), info->attrs); 2191 if (IS_ERR(dev)) { 2192 rtnl_unlock(); 2193 return PTR_ERR(dev); 2194 } 2195 2196 secy = &macsec_priv(dev)->secy; 2197 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); 2198 2199 rx_sc = del_rx_sc(secy, sci); 2200 if (!rx_sc) { 2201 rtnl_unlock(); 2202 return -ENODEV; 2203 } 2204 2205 /* If h/w offloading is available, propagate to the device */ 2206 if (macsec_is_offloaded(netdev_priv(dev))) { 2207 const struct macsec_ops *ops; 2208 struct macsec_context ctx; 2209 2210 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2211 if (!ops) { 2212 ret = -EOPNOTSUPP; 2213 goto cleanup; 2214 } 2215 2216 ctx.rx_sc = rx_sc; 2217 ctx.secy = secy; 2218 ret = macsec_offload(ops->mdo_del_rxsc, &ctx); 2219 if (ret) 2220 goto cleanup; 2221 } 2222 2223 free_rx_sc(rx_sc); 2224 rtnl_unlock(); 2225 2226 return 0; 2227 2228 cleanup: 2229 rtnl_unlock(); 2230 return ret; 2231 } 2232 2233 static int macsec_del_txsa(struct sk_buff *skb, struct genl_info *info) 2234 { 2235 struct nlattr **attrs = info->attrs; 2236 struct net_device *dev; 2237 struct macsec_secy *secy; 2238 struct macsec_tx_sc *tx_sc; 2239 struct macsec_tx_sa *tx_sa; 2240 u8 assoc_num; 2241 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 2242 int ret; 2243 2244 if (!attrs[MACSEC_ATTR_IFINDEX]) 2245 return -EINVAL; 2246 2247 if (parse_sa_config(attrs, tb_sa)) 2248 return -EINVAL; 2249 2250 rtnl_lock(); 2251 tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa, 2252 &dev, &secy, &tx_sc, &assoc_num); 2253 if (IS_ERR(tx_sa)) { 2254 rtnl_unlock(); 2255 return PTR_ERR(tx_sa); 2256 } 2257 2258 if (tx_sa->active) { 2259 rtnl_unlock(); 2260 return -EBUSY; 2261 } 2262 2263 /* If h/w offloading is available, propagate to the device */ 2264 if (macsec_is_offloaded(netdev_priv(dev))) { 2265 const struct macsec_ops *ops; 2266 struct macsec_context ctx; 2267 2268 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2269 if (!ops) { 2270 ret = -EOPNOTSUPP; 2271 goto cleanup; 2272 } 2273 2274 ctx.sa.assoc_num = assoc_num; 2275 ctx.sa.tx_sa = tx_sa; 2276 ctx.secy = secy; 2277 2278 ret = macsec_offload(ops->mdo_del_txsa, &ctx); 2279 if (ret) 2280 goto cleanup; 2281 } 2282 2283 RCU_INIT_POINTER(tx_sc->sa[assoc_num], NULL); 2284 clear_tx_sa(tx_sa); 2285 2286 rtnl_unlock(); 2287 2288 return 0; 2289 2290 cleanup: 2291 rtnl_unlock(); 2292 return ret; 2293 } 2294 2295 static bool validate_upd_sa(struct nlattr **attrs) 2296 { 2297 if (!attrs[MACSEC_SA_ATTR_AN] || 2298 attrs[MACSEC_SA_ATTR_KEY] || 2299 attrs[MACSEC_SA_ATTR_KEYID] || 2300 attrs[MACSEC_SA_ATTR_SSCI] || 2301 attrs[MACSEC_SA_ATTR_SALT]) 2302 return false; 2303 2304 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) 2305 return false; 2306 2307 if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0) 2308 return false; 2309 2310 if (attrs[MACSEC_SA_ATTR_ACTIVE]) { 2311 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) 2312 return false; 2313 } 2314 2315 return true; 2316 } 2317 2318 static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info) 2319 { 2320 struct nlattr **attrs = info->attrs; 2321 struct net_device *dev; 2322 struct macsec_secy *secy; 2323 struct macsec_tx_sc *tx_sc; 2324 struct macsec_tx_sa *tx_sa; 2325 u8 assoc_num; 2326 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 2327 bool was_operational, was_active; 2328 pn_t prev_pn; 2329 int ret = 0; 2330 2331 prev_pn.full64 = 0; 2332 2333 if (!attrs[MACSEC_ATTR_IFINDEX]) 2334 return -EINVAL; 2335 2336 if (parse_sa_config(attrs, tb_sa)) 2337 return -EINVAL; 2338 2339 if (!validate_upd_sa(tb_sa)) 2340 return -EINVAL; 2341 2342 rtnl_lock(); 2343 tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa, 2344 &dev, &secy, &tx_sc, &assoc_num); 2345 if (IS_ERR(tx_sa)) { 2346 rtnl_unlock(); 2347 return PTR_ERR(tx_sa); 2348 } 2349 2350 if (tb_sa[MACSEC_SA_ATTR_PN]) { 2351 int pn_len; 2352 2353 pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN; 2354 if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) { 2355 pr_notice("macsec: nl: upd_txsa: bad pn length: %d != %d\n", 2356 nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len); 2357 rtnl_unlock(); 2358 return -EINVAL; 2359 } 2360 2361 spin_lock_bh(&tx_sa->lock); 2362 prev_pn = tx_sa->next_pn_halves; 2363 tx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]); 2364 spin_unlock_bh(&tx_sa->lock); 2365 } 2366 2367 was_active = tx_sa->active; 2368 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 2369 tx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 2370 2371 was_operational = secy->operational; 2372 if (assoc_num == tx_sc->encoding_sa) 2373 secy->operational = tx_sa->active; 2374 2375 /* If h/w offloading is available, propagate to the device */ 2376 if (macsec_is_offloaded(netdev_priv(dev))) { 2377 const struct macsec_ops *ops; 2378 struct macsec_context ctx; 2379 2380 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2381 if (!ops) { 2382 ret = -EOPNOTSUPP; 2383 goto cleanup; 2384 } 2385 2386 ctx.sa.assoc_num = assoc_num; 2387 ctx.sa.tx_sa = tx_sa; 2388 ctx.secy = secy; 2389 2390 ret = macsec_offload(ops->mdo_upd_txsa, &ctx); 2391 if (ret) 2392 goto cleanup; 2393 } 2394 2395 rtnl_unlock(); 2396 2397 return 0; 2398 2399 cleanup: 2400 if (tb_sa[MACSEC_SA_ATTR_PN]) { 2401 spin_lock_bh(&tx_sa->lock); 2402 tx_sa->next_pn_halves = prev_pn; 2403 spin_unlock_bh(&tx_sa->lock); 2404 } 2405 tx_sa->active = was_active; 2406 secy->operational = was_operational; 2407 rtnl_unlock(); 2408 return ret; 2409 } 2410 2411 static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info) 2412 { 2413 struct nlattr **attrs = info->attrs; 2414 struct net_device *dev; 2415 struct macsec_secy *secy; 2416 struct macsec_rx_sc *rx_sc; 2417 struct macsec_rx_sa *rx_sa; 2418 u8 assoc_num; 2419 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 2420 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 2421 bool was_active; 2422 pn_t prev_pn; 2423 int ret = 0; 2424 2425 prev_pn.full64 = 0; 2426 2427 if (!attrs[MACSEC_ATTR_IFINDEX]) 2428 return -EINVAL; 2429 2430 if (parse_rxsc_config(attrs, tb_rxsc)) 2431 return -EINVAL; 2432 2433 if (parse_sa_config(attrs, tb_sa)) 2434 return -EINVAL; 2435 2436 if (!validate_upd_sa(tb_sa)) 2437 return -EINVAL; 2438 2439 rtnl_lock(); 2440 rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa, 2441 &dev, &secy, &rx_sc, &assoc_num); 2442 if (IS_ERR(rx_sa)) { 2443 rtnl_unlock(); 2444 return PTR_ERR(rx_sa); 2445 } 2446 2447 if (tb_sa[MACSEC_SA_ATTR_PN]) { 2448 int pn_len; 2449 2450 pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN; 2451 if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) { 2452 pr_notice("macsec: nl: upd_rxsa: bad pn length: %d != %d\n", 2453 nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len); 2454 rtnl_unlock(); 2455 return -EINVAL; 2456 } 2457 2458 spin_lock_bh(&rx_sa->lock); 2459 prev_pn = rx_sa->next_pn_halves; 2460 rx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]); 2461 spin_unlock_bh(&rx_sa->lock); 2462 } 2463 2464 was_active = rx_sa->active; 2465 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 2466 rx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 2467 2468 /* If h/w offloading is available, propagate to the device */ 2469 if (macsec_is_offloaded(netdev_priv(dev))) { 2470 const struct macsec_ops *ops; 2471 struct macsec_context ctx; 2472 2473 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2474 if (!ops) { 2475 ret = -EOPNOTSUPP; 2476 goto cleanup; 2477 } 2478 2479 ctx.sa.assoc_num = assoc_num; 2480 ctx.sa.rx_sa = rx_sa; 2481 ctx.secy = secy; 2482 2483 ret = macsec_offload(ops->mdo_upd_rxsa, &ctx); 2484 if (ret) 2485 goto cleanup; 2486 } 2487 2488 rtnl_unlock(); 2489 return 0; 2490 2491 cleanup: 2492 if (tb_sa[MACSEC_SA_ATTR_PN]) { 2493 spin_lock_bh(&rx_sa->lock); 2494 rx_sa->next_pn_halves = prev_pn; 2495 spin_unlock_bh(&rx_sa->lock); 2496 } 2497 rx_sa->active = was_active; 2498 rtnl_unlock(); 2499 return ret; 2500 } 2501 2502 static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info) 2503 { 2504 struct nlattr **attrs = info->attrs; 2505 struct net_device *dev; 2506 struct macsec_secy *secy; 2507 struct macsec_rx_sc *rx_sc; 2508 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 2509 unsigned int prev_n_rx_sc; 2510 bool was_active; 2511 int ret; 2512 2513 if (!attrs[MACSEC_ATTR_IFINDEX]) 2514 return -EINVAL; 2515 2516 if (parse_rxsc_config(attrs, tb_rxsc)) 2517 return -EINVAL; 2518 2519 if (!validate_add_rxsc(tb_rxsc)) 2520 return -EINVAL; 2521 2522 rtnl_lock(); 2523 rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy); 2524 if (IS_ERR(rx_sc)) { 2525 rtnl_unlock(); 2526 return PTR_ERR(rx_sc); 2527 } 2528 2529 was_active = rx_sc->active; 2530 prev_n_rx_sc = secy->n_rx_sc; 2531 if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) { 2532 bool new = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]); 2533 2534 if (rx_sc->active != new) 2535 secy->n_rx_sc += new ? 1 : -1; 2536 2537 rx_sc->active = new; 2538 } 2539 2540 /* If h/w offloading is available, propagate to the device */ 2541 if (macsec_is_offloaded(netdev_priv(dev))) { 2542 const struct macsec_ops *ops; 2543 struct macsec_context ctx; 2544 2545 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2546 if (!ops) { 2547 ret = -EOPNOTSUPP; 2548 goto cleanup; 2549 } 2550 2551 ctx.rx_sc = rx_sc; 2552 ctx.secy = secy; 2553 2554 ret = macsec_offload(ops->mdo_upd_rxsc, &ctx); 2555 if (ret) 2556 goto cleanup; 2557 } 2558 2559 rtnl_unlock(); 2560 2561 return 0; 2562 2563 cleanup: 2564 secy->n_rx_sc = prev_n_rx_sc; 2565 rx_sc->active = was_active; 2566 rtnl_unlock(); 2567 return ret; 2568 } 2569 2570 static bool macsec_is_configured(struct macsec_dev *macsec) 2571 { 2572 struct macsec_secy *secy = &macsec->secy; 2573 struct macsec_tx_sc *tx_sc = &secy->tx_sc; 2574 int i; 2575 2576 if (secy->rx_sc) 2577 return true; 2578 2579 for (i = 0; i < MACSEC_NUM_AN; i++) 2580 if (tx_sc->sa[i]) 2581 return true; 2582 2583 return false; 2584 } 2585 2586 static int macsec_update_offload(struct net_device *dev, enum macsec_offload offload) 2587 { 2588 enum macsec_offload prev_offload; 2589 const struct macsec_ops *ops; 2590 struct macsec_context ctx; 2591 struct macsec_dev *macsec; 2592 int ret = 0; 2593 2594 macsec = macsec_priv(dev); 2595 2596 /* Check if the offloading mode is supported by the underlying layers */ 2597 if (offload != MACSEC_OFFLOAD_OFF && 2598 !macsec_check_offload(offload, macsec)) 2599 return -EOPNOTSUPP; 2600 2601 /* Check if the net device is busy. */ 2602 if (netif_running(dev)) 2603 return -EBUSY; 2604 2605 /* Check if the device already has rules configured: we do not support 2606 * rules migration. 2607 */ 2608 if (macsec_is_configured(macsec)) 2609 return -EBUSY; 2610 2611 prev_offload = macsec->offload; 2612 2613 ops = __macsec_get_ops(offload == MACSEC_OFFLOAD_OFF ? prev_offload : offload, 2614 macsec, &ctx); 2615 if (!ops) 2616 return -EOPNOTSUPP; 2617 2618 macsec->offload = offload; 2619 2620 ctx.secy = &macsec->secy; 2621 ret = offload == MACSEC_OFFLOAD_OFF ? macsec_offload(ops->mdo_del_secy, &ctx) 2622 : macsec_offload(ops->mdo_add_secy, &ctx); 2623 if (ret) 2624 macsec->offload = prev_offload; 2625 2626 return ret; 2627 } 2628 2629 static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info) 2630 { 2631 struct nlattr *tb_offload[MACSEC_OFFLOAD_ATTR_MAX + 1]; 2632 struct nlattr **attrs = info->attrs; 2633 enum macsec_offload offload; 2634 struct macsec_dev *macsec; 2635 struct net_device *dev; 2636 int ret = 0; 2637 2638 if (!attrs[MACSEC_ATTR_IFINDEX]) 2639 return -EINVAL; 2640 2641 if (!attrs[MACSEC_ATTR_OFFLOAD]) 2642 return -EINVAL; 2643 2644 if (nla_parse_nested_deprecated(tb_offload, MACSEC_OFFLOAD_ATTR_MAX, 2645 attrs[MACSEC_ATTR_OFFLOAD], 2646 macsec_genl_offload_policy, NULL)) 2647 return -EINVAL; 2648 2649 rtnl_lock(); 2650 2651 dev = get_dev_from_nl(genl_info_net(info), attrs); 2652 if (IS_ERR(dev)) { 2653 ret = PTR_ERR(dev); 2654 goto out; 2655 } 2656 macsec = macsec_priv(dev); 2657 2658 if (!tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]) { 2659 ret = -EINVAL; 2660 goto out; 2661 } 2662 2663 offload = nla_get_u8(tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]); 2664 2665 if (macsec->offload != offload) 2666 ret = macsec_update_offload(dev, offload); 2667 out: 2668 rtnl_unlock(); 2669 return ret; 2670 } 2671 2672 static void get_tx_sa_stats(struct net_device *dev, int an, 2673 struct macsec_tx_sa *tx_sa, 2674 struct macsec_tx_sa_stats *sum) 2675 { 2676 struct macsec_dev *macsec = macsec_priv(dev); 2677 int cpu; 2678 2679 /* If h/w offloading is available, propagate to the device */ 2680 if (macsec_is_offloaded(macsec)) { 2681 const struct macsec_ops *ops; 2682 struct macsec_context ctx; 2683 2684 ops = macsec_get_ops(macsec, &ctx); 2685 if (ops) { 2686 ctx.sa.assoc_num = an; 2687 ctx.sa.tx_sa = tx_sa; 2688 ctx.stats.tx_sa_stats = sum; 2689 ctx.secy = &macsec_priv(dev)->secy; 2690 macsec_offload(ops->mdo_get_tx_sa_stats, &ctx); 2691 } 2692 return; 2693 } 2694 2695 for_each_possible_cpu(cpu) { 2696 const struct macsec_tx_sa_stats *stats = 2697 per_cpu_ptr(tx_sa->stats, cpu); 2698 2699 sum->OutPktsProtected += stats->OutPktsProtected; 2700 sum->OutPktsEncrypted += stats->OutPktsEncrypted; 2701 } 2702 } 2703 2704 static int copy_tx_sa_stats(struct sk_buff *skb, struct macsec_tx_sa_stats *sum) 2705 { 2706 if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED, 2707 sum->OutPktsProtected) || 2708 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED, 2709 sum->OutPktsEncrypted)) 2710 return -EMSGSIZE; 2711 2712 return 0; 2713 } 2714 2715 static void get_rx_sa_stats(struct net_device *dev, 2716 struct macsec_rx_sc *rx_sc, int an, 2717 struct macsec_rx_sa *rx_sa, 2718 struct macsec_rx_sa_stats *sum) 2719 { 2720 struct macsec_dev *macsec = macsec_priv(dev); 2721 int cpu; 2722 2723 /* If h/w offloading is available, propagate to the device */ 2724 if (macsec_is_offloaded(macsec)) { 2725 const struct macsec_ops *ops; 2726 struct macsec_context ctx; 2727 2728 ops = macsec_get_ops(macsec, &ctx); 2729 if (ops) { 2730 ctx.sa.assoc_num = an; 2731 ctx.sa.rx_sa = rx_sa; 2732 ctx.stats.rx_sa_stats = sum; 2733 ctx.secy = &macsec_priv(dev)->secy; 2734 ctx.rx_sc = rx_sc; 2735 macsec_offload(ops->mdo_get_rx_sa_stats, &ctx); 2736 } 2737 return; 2738 } 2739 2740 for_each_possible_cpu(cpu) { 2741 const struct macsec_rx_sa_stats *stats = 2742 per_cpu_ptr(rx_sa->stats, cpu); 2743 2744 sum->InPktsOK += stats->InPktsOK; 2745 sum->InPktsInvalid += stats->InPktsInvalid; 2746 sum->InPktsNotValid += stats->InPktsNotValid; 2747 sum->InPktsNotUsingSA += stats->InPktsNotUsingSA; 2748 sum->InPktsUnusedSA += stats->InPktsUnusedSA; 2749 } 2750 } 2751 2752 static int copy_rx_sa_stats(struct sk_buff *skb, 2753 struct macsec_rx_sa_stats *sum) 2754 { 2755 if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_OK, sum->InPktsOK) || 2756 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID, 2757 sum->InPktsInvalid) || 2758 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID, 2759 sum->InPktsNotValid) || 2760 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA, 2761 sum->InPktsNotUsingSA) || 2762 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA, 2763 sum->InPktsUnusedSA)) 2764 return -EMSGSIZE; 2765 2766 return 0; 2767 } 2768 2769 static void get_rx_sc_stats(struct net_device *dev, 2770 struct macsec_rx_sc *rx_sc, 2771 struct macsec_rx_sc_stats *sum) 2772 { 2773 struct macsec_dev *macsec = macsec_priv(dev); 2774 int cpu; 2775 2776 /* If h/w offloading is available, propagate to the device */ 2777 if (macsec_is_offloaded(macsec)) { 2778 const struct macsec_ops *ops; 2779 struct macsec_context ctx; 2780 2781 ops = macsec_get_ops(macsec, &ctx); 2782 if (ops) { 2783 ctx.stats.rx_sc_stats = sum; 2784 ctx.secy = &macsec_priv(dev)->secy; 2785 ctx.rx_sc = rx_sc; 2786 macsec_offload(ops->mdo_get_rx_sc_stats, &ctx); 2787 } 2788 return; 2789 } 2790 2791 for_each_possible_cpu(cpu) { 2792 const struct pcpu_rx_sc_stats *stats; 2793 struct macsec_rx_sc_stats tmp; 2794 unsigned int start; 2795 2796 stats = per_cpu_ptr(rx_sc->stats, cpu); 2797 do { 2798 start = u64_stats_fetch_begin(&stats->syncp); 2799 memcpy(&tmp, &stats->stats, sizeof(tmp)); 2800 } while (u64_stats_fetch_retry(&stats->syncp, start)); 2801 2802 sum->InOctetsValidated += tmp.InOctetsValidated; 2803 sum->InOctetsDecrypted += tmp.InOctetsDecrypted; 2804 sum->InPktsUnchecked += tmp.InPktsUnchecked; 2805 sum->InPktsDelayed += tmp.InPktsDelayed; 2806 sum->InPktsOK += tmp.InPktsOK; 2807 sum->InPktsInvalid += tmp.InPktsInvalid; 2808 sum->InPktsLate += tmp.InPktsLate; 2809 sum->InPktsNotValid += tmp.InPktsNotValid; 2810 sum->InPktsNotUsingSA += tmp.InPktsNotUsingSA; 2811 sum->InPktsUnusedSA += tmp.InPktsUnusedSA; 2812 } 2813 } 2814 2815 static int copy_rx_sc_stats(struct sk_buff *skb, struct macsec_rx_sc_stats *sum) 2816 { 2817 if (nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED, 2818 sum->InOctetsValidated, 2819 MACSEC_RXSC_STATS_ATTR_PAD) || 2820 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED, 2821 sum->InOctetsDecrypted, 2822 MACSEC_RXSC_STATS_ATTR_PAD) || 2823 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED, 2824 sum->InPktsUnchecked, 2825 MACSEC_RXSC_STATS_ATTR_PAD) || 2826 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED, 2827 sum->InPktsDelayed, 2828 MACSEC_RXSC_STATS_ATTR_PAD) || 2829 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK, 2830 sum->InPktsOK, 2831 MACSEC_RXSC_STATS_ATTR_PAD) || 2832 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID, 2833 sum->InPktsInvalid, 2834 MACSEC_RXSC_STATS_ATTR_PAD) || 2835 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE, 2836 sum->InPktsLate, 2837 MACSEC_RXSC_STATS_ATTR_PAD) || 2838 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID, 2839 sum->InPktsNotValid, 2840 MACSEC_RXSC_STATS_ATTR_PAD) || 2841 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA, 2842 sum->InPktsNotUsingSA, 2843 MACSEC_RXSC_STATS_ATTR_PAD) || 2844 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA, 2845 sum->InPktsUnusedSA, 2846 MACSEC_RXSC_STATS_ATTR_PAD)) 2847 return -EMSGSIZE; 2848 2849 return 0; 2850 } 2851 2852 static void get_tx_sc_stats(struct net_device *dev, 2853 struct macsec_tx_sc_stats *sum) 2854 { 2855 struct macsec_dev *macsec = macsec_priv(dev); 2856 int cpu; 2857 2858 /* If h/w offloading is available, propagate to the device */ 2859 if (macsec_is_offloaded(macsec)) { 2860 const struct macsec_ops *ops; 2861 struct macsec_context ctx; 2862 2863 ops = macsec_get_ops(macsec, &ctx); 2864 if (ops) { 2865 ctx.stats.tx_sc_stats = sum; 2866 ctx.secy = &macsec_priv(dev)->secy; 2867 macsec_offload(ops->mdo_get_tx_sc_stats, &ctx); 2868 } 2869 return; 2870 } 2871 2872 for_each_possible_cpu(cpu) { 2873 const struct pcpu_tx_sc_stats *stats; 2874 struct macsec_tx_sc_stats tmp; 2875 unsigned int start; 2876 2877 stats = per_cpu_ptr(macsec_priv(dev)->secy.tx_sc.stats, cpu); 2878 do { 2879 start = u64_stats_fetch_begin(&stats->syncp); 2880 memcpy(&tmp, &stats->stats, sizeof(tmp)); 2881 } while (u64_stats_fetch_retry(&stats->syncp, start)); 2882 2883 sum->OutPktsProtected += tmp.OutPktsProtected; 2884 sum->OutPktsEncrypted += tmp.OutPktsEncrypted; 2885 sum->OutOctetsProtected += tmp.OutOctetsProtected; 2886 sum->OutOctetsEncrypted += tmp.OutOctetsEncrypted; 2887 } 2888 } 2889 2890 static int copy_tx_sc_stats(struct sk_buff *skb, struct macsec_tx_sc_stats *sum) 2891 { 2892 if (nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED, 2893 sum->OutPktsProtected, 2894 MACSEC_TXSC_STATS_ATTR_PAD) || 2895 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED, 2896 sum->OutPktsEncrypted, 2897 MACSEC_TXSC_STATS_ATTR_PAD) || 2898 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED, 2899 sum->OutOctetsProtected, 2900 MACSEC_TXSC_STATS_ATTR_PAD) || 2901 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED, 2902 sum->OutOctetsEncrypted, 2903 MACSEC_TXSC_STATS_ATTR_PAD)) 2904 return -EMSGSIZE; 2905 2906 return 0; 2907 } 2908 2909 static void get_secy_stats(struct net_device *dev, struct macsec_dev_stats *sum) 2910 { 2911 struct macsec_dev *macsec = macsec_priv(dev); 2912 int cpu; 2913 2914 /* If h/w offloading is available, propagate to the device */ 2915 if (macsec_is_offloaded(macsec)) { 2916 const struct macsec_ops *ops; 2917 struct macsec_context ctx; 2918 2919 ops = macsec_get_ops(macsec, &ctx); 2920 if (ops) { 2921 ctx.stats.dev_stats = sum; 2922 ctx.secy = &macsec_priv(dev)->secy; 2923 macsec_offload(ops->mdo_get_dev_stats, &ctx); 2924 } 2925 return; 2926 } 2927 2928 for_each_possible_cpu(cpu) { 2929 const struct pcpu_secy_stats *stats; 2930 struct macsec_dev_stats tmp; 2931 unsigned int start; 2932 2933 stats = per_cpu_ptr(macsec_priv(dev)->stats, cpu); 2934 do { 2935 start = u64_stats_fetch_begin(&stats->syncp); 2936 memcpy(&tmp, &stats->stats, sizeof(tmp)); 2937 } while (u64_stats_fetch_retry(&stats->syncp, start)); 2938 2939 sum->OutPktsUntagged += tmp.OutPktsUntagged; 2940 sum->InPktsUntagged += tmp.InPktsUntagged; 2941 sum->OutPktsTooLong += tmp.OutPktsTooLong; 2942 sum->InPktsNoTag += tmp.InPktsNoTag; 2943 sum->InPktsBadTag += tmp.InPktsBadTag; 2944 sum->InPktsUnknownSCI += tmp.InPktsUnknownSCI; 2945 sum->InPktsNoSCI += tmp.InPktsNoSCI; 2946 sum->InPktsOverrun += tmp.InPktsOverrun; 2947 } 2948 } 2949 2950 static int copy_secy_stats(struct sk_buff *skb, struct macsec_dev_stats *sum) 2951 { 2952 if (nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED, 2953 sum->OutPktsUntagged, 2954 MACSEC_SECY_STATS_ATTR_PAD) || 2955 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED, 2956 sum->InPktsUntagged, 2957 MACSEC_SECY_STATS_ATTR_PAD) || 2958 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG, 2959 sum->OutPktsTooLong, 2960 MACSEC_SECY_STATS_ATTR_PAD) || 2961 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG, 2962 sum->InPktsNoTag, 2963 MACSEC_SECY_STATS_ATTR_PAD) || 2964 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG, 2965 sum->InPktsBadTag, 2966 MACSEC_SECY_STATS_ATTR_PAD) || 2967 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI, 2968 sum->InPktsUnknownSCI, 2969 MACSEC_SECY_STATS_ATTR_PAD) || 2970 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI, 2971 sum->InPktsNoSCI, 2972 MACSEC_SECY_STATS_ATTR_PAD) || 2973 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN, 2974 sum->InPktsOverrun, 2975 MACSEC_SECY_STATS_ATTR_PAD)) 2976 return -EMSGSIZE; 2977 2978 return 0; 2979 } 2980 2981 static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb) 2982 { 2983 struct macsec_tx_sc *tx_sc = &secy->tx_sc; 2984 struct nlattr *secy_nest = nla_nest_start_noflag(skb, 2985 MACSEC_ATTR_SECY); 2986 u64 csid; 2987 2988 if (!secy_nest) 2989 return 1; 2990 2991 switch (secy->key_len) { 2992 case MACSEC_GCM_AES_128_SAK_LEN: 2993 csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 : MACSEC_DEFAULT_CIPHER_ID; 2994 break; 2995 case MACSEC_GCM_AES_256_SAK_LEN: 2996 csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_256 : MACSEC_CIPHER_ID_GCM_AES_256; 2997 break; 2998 default: 2999 goto cancel; 3000 } 3001 3002 if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci, 3003 MACSEC_SECY_ATTR_PAD) || 3004 nla_put_u64_64bit(skb, MACSEC_SECY_ATTR_CIPHER_SUITE, 3005 csid, MACSEC_SECY_ATTR_PAD) || 3006 nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) || 3007 nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) || 3008 nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) || 3009 nla_put_u8(skb, MACSEC_SECY_ATTR_REPLAY, secy->replay_protect) || 3010 nla_put_u8(skb, MACSEC_SECY_ATTR_VALIDATE, secy->validate_frames) || 3011 nla_put_u8(skb, MACSEC_SECY_ATTR_ENCRYPT, tx_sc->encrypt) || 3012 nla_put_u8(skb, MACSEC_SECY_ATTR_INC_SCI, tx_sc->send_sci) || 3013 nla_put_u8(skb, MACSEC_SECY_ATTR_ES, tx_sc->end_station) || 3014 nla_put_u8(skb, MACSEC_SECY_ATTR_SCB, tx_sc->scb) || 3015 nla_put_u8(skb, MACSEC_SECY_ATTR_ENCODING_SA, tx_sc->encoding_sa)) 3016 goto cancel; 3017 3018 if (secy->replay_protect) { 3019 if (nla_put_u32(skb, MACSEC_SECY_ATTR_WINDOW, secy->replay_window)) 3020 goto cancel; 3021 } 3022 3023 nla_nest_end(skb, secy_nest); 3024 return 0; 3025 3026 cancel: 3027 nla_nest_cancel(skb, secy_nest); 3028 return 1; 3029 } 3030 3031 static noinline_for_stack int 3032 dump_secy(struct macsec_secy *secy, struct net_device *dev, 3033 struct sk_buff *skb, struct netlink_callback *cb) 3034 { 3035 struct macsec_tx_sc_stats tx_sc_stats = {0, }; 3036 struct macsec_tx_sa_stats tx_sa_stats = {0, }; 3037 struct macsec_rx_sc_stats rx_sc_stats = {0, }; 3038 struct macsec_rx_sa_stats rx_sa_stats = {0, }; 3039 struct macsec_dev *macsec = netdev_priv(dev); 3040 struct macsec_dev_stats dev_stats = {0, }; 3041 struct macsec_tx_sc *tx_sc = &secy->tx_sc; 3042 struct nlattr *txsa_list, *rxsc_list; 3043 struct macsec_rx_sc *rx_sc; 3044 struct nlattr *attr; 3045 void *hdr; 3046 int i, j; 3047 3048 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 3049 &macsec_fam, NLM_F_MULTI, MACSEC_CMD_GET_TXSC); 3050 if (!hdr) 3051 return -EMSGSIZE; 3052 3053 genl_dump_check_consistent(cb, hdr); 3054 3055 if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex)) 3056 goto nla_put_failure; 3057 3058 attr = nla_nest_start_noflag(skb, MACSEC_ATTR_OFFLOAD); 3059 if (!attr) 3060 goto nla_put_failure; 3061 if (nla_put_u8(skb, MACSEC_OFFLOAD_ATTR_TYPE, macsec->offload)) 3062 goto nla_put_failure; 3063 nla_nest_end(skb, attr); 3064 3065 if (nla_put_secy(secy, skb)) 3066 goto nla_put_failure; 3067 3068 attr = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSC_STATS); 3069 if (!attr) 3070 goto nla_put_failure; 3071 3072 get_tx_sc_stats(dev, &tx_sc_stats); 3073 if (copy_tx_sc_stats(skb, &tx_sc_stats)) { 3074 nla_nest_cancel(skb, attr); 3075 goto nla_put_failure; 3076 } 3077 nla_nest_end(skb, attr); 3078 3079 attr = nla_nest_start_noflag(skb, MACSEC_ATTR_SECY_STATS); 3080 if (!attr) 3081 goto nla_put_failure; 3082 get_secy_stats(dev, &dev_stats); 3083 if (copy_secy_stats(skb, &dev_stats)) { 3084 nla_nest_cancel(skb, attr); 3085 goto nla_put_failure; 3086 } 3087 nla_nest_end(skb, attr); 3088 3089 txsa_list = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSA_LIST); 3090 if (!txsa_list) 3091 goto nla_put_failure; 3092 for (i = 0, j = 1; i < MACSEC_NUM_AN; i++) { 3093 struct macsec_tx_sa *tx_sa = rtnl_dereference(tx_sc->sa[i]); 3094 struct nlattr *txsa_nest; 3095 u64 pn; 3096 int pn_len; 3097 3098 if (!tx_sa) 3099 continue; 3100 3101 txsa_nest = nla_nest_start_noflag(skb, j++); 3102 if (!txsa_nest) { 3103 nla_nest_cancel(skb, txsa_list); 3104 goto nla_put_failure; 3105 } 3106 3107 attr = nla_nest_start_noflag(skb, MACSEC_SA_ATTR_STATS); 3108 if (!attr) { 3109 nla_nest_cancel(skb, txsa_nest); 3110 nla_nest_cancel(skb, txsa_list); 3111 goto nla_put_failure; 3112 } 3113 memset(&tx_sa_stats, 0, sizeof(tx_sa_stats)); 3114 get_tx_sa_stats(dev, i, tx_sa, &tx_sa_stats); 3115 if (copy_tx_sa_stats(skb, &tx_sa_stats)) { 3116 nla_nest_cancel(skb, attr); 3117 nla_nest_cancel(skb, txsa_nest); 3118 nla_nest_cancel(skb, txsa_list); 3119 goto nla_put_failure; 3120 } 3121 nla_nest_end(skb, attr); 3122 3123 if (secy->xpn) { 3124 pn = tx_sa->next_pn; 3125 pn_len = MACSEC_XPN_PN_LEN; 3126 } else { 3127 pn = tx_sa->next_pn_halves.lower; 3128 pn_len = MACSEC_DEFAULT_PN_LEN; 3129 } 3130 3131 if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) || 3132 nla_put(skb, MACSEC_SA_ATTR_PN, pn_len, &pn) || 3133 nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, tx_sa->key.id) || 3134 (secy->xpn && nla_put_ssci(skb, MACSEC_SA_ATTR_SSCI, tx_sa->ssci)) || 3135 nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, tx_sa->active)) { 3136 nla_nest_cancel(skb, txsa_nest); 3137 nla_nest_cancel(skb, txsa_list); 3138 goto nla_put_failure; 3139 } 3140 3141 nla_nest_end(skb, txsa_nest); 3142 } 3143 nla_nest_end(skb, txsa_list); 3144 3145 rxsc_list = nla_nest_start_noflag(skb, MACSEC_ATTR_RXSC_LIST); 3146 if (!rxsc_list) 3147 goto nla_put_failure; 3148 3149 j = 1; 3150 for_each_rxsc_rtnl(secy, rx_sc) { 3151 int k; 3152 struct nlattr *rxsa_list; 3153 struct nlattr *rxsc_nest = nla_nest_start_noflag(skb, j++); 3154 3155 if (!rxsc_nest) { 3156 nla_nest_cancel(skb, rxsc_list); 3157 goto nla_put_failure; 3158 } 3159 3160 if (nla_put_u8(skb, MACSEC_RXSC_ATTR_ACTIVE, rx_sc->active) || 3161 nla_put_sci(skb, MACSEC_RXSC_ATTR_SCI, rx_sc->sci, 3162 MACSEC_RXSC_ATTR_PAD)) { 3163 nla_nest_cancel(skb, rxsc_nest); 3164 nla_nest_cancel(skb, rxsc_list); 3165 goto nla_put_failure; 3166 } 3167 3168 attr = nla_nest_start_noflag(skb, MACSEC_RXSC_ATTR_STATS); 3169 if (!attr) { 3170 nla_nest_cancel(skb, rxsc_nest); 3171 nla_nest_cancel(skb, rxsc_list); 3172 goto nla_put_failure; 3173 } 3174 memset(&rx_sc_stats, 0, sizeof(rx_sc_stats)); 3175 get_rx_sc_stats(dev, rx_sc, &rx_sc_stats); 3176 if (copy_rx_sc_stats(skb, &rx_sc_stats)) { 3177 nla_nest_cancel(skb, attr); 3178 nla_nest_cancel(skb, rxsc_nest); 3179 nla_nest_cancel(skb, rxsc_list); 3180 goto nla_put_failure; 3181 } 3182 nla_nest_end(skb, attr); 3183 3184 rxsa_list = nla_nest_start_noflag(skb, 3185 MACSEC_RXSC_ATTR_SA_LIST); 3186 if (!rxsa_list) { 3187 nla_nest_cancel(skb, rxsc_nest); 3188 nla_nest_cancel(skb, rxsc_list); 3189 goto nla_put_failure; 3190 } 3191 3192 for (i = 0, k = 1; i < MACSEC_NUM_AN; i++) { 3193 struct macsec_rx_sa *rx_sa = rtnl_dereference(rx_sc->sa[i]); 3194 struct nlattr *rxsa_nest; 3195 u64 pn; 3196 int pn_len; 3197 3198 if (!rx_sa) 3199 continue; 3200 3201 rxsa_nest = nla_nest_start_noflag(skb, k++); 3202 if (!rxsa_nest) { 3203 nla_nest_cancel(skb, rxsa_list); 3204 nla_nest_cancel(skb, rxsc_nest); 3205 nla_nest_cancel(skb, rxsc_list); 3206 goto nla_put_failure; 3207 } 3208 3209 attr = nla_nest_start_noflag(skb, 3210 MACSEC_SA_ATTR_STATS); 3211 if (!attr) { 3212 nla_nest_cancel(skb, rxsa_list); 3213 nla_nest_cancel(skb, rxsc_nest); 3214 nla_nest_cancel(skb, rxsc_list); 3215 goto nla_put_failure; 3216 } 3217 memset(&rx_sa_stats, 0, sizeof(rx_sa_stats)); 3218 get_rx_sa_stats(dev, rx_sc, i, rx_sa, &rx_sa_stats); 3219 if (copy_rx_sa_stats(skb, &rx_sa_stats)) { 3220 nla_nest_cancel(skb, attr); 3221 nla_nest_cancel(skb, rxsa_list); 3222 nla_nest_cancel(skb, rxsc_nest); 3223 nla_nest_cancel(skb, rxsc_list); 3224 goto nla_put_failure; 3225 } 3226 nla_nest_end(skb, attr); 3227 3228 if (secy->xpn) { 3229 pn = rx_sa->next_pn; 3230 pn_len = MACSEC_XPN_PN_LEN; 3231 } else { 3232 pn = rx_sa->next_pn_halves.lower; 3233 pn_len = MACSEC_DEFAULT_PN_LEN; 3234 } 3235 3236 if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) || 3237 nla_put(skb, MACSEC_SA_ATTR_PN, pn_len, &pn) || 3238 nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, rx_sa->key.id) || 3239 (secy->xpn && nla_put_ssci(skb, MACSEC_SA_ATTR_SSCI, rx_sa->ssci)) || 3240 nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, rx_sa->active)) { 3241 nla_nest_cancel(skb, rxsa_nest); 3242 nla_nest_cancel(skb, rxsc_nest); 3243 nla_nest_cancel(skb, rxsc_list); 3244 goto nla_put_failure; 3245 } 3246 nla_nest_end(skb, rxsa_nest); 3247 } 3248 3249 nla_nest_end(skb, rxsa_list); 3250 nla_nest_end(skb, rxsc_nest); 3251 } 3252 3253 nla_nest_end(skb, rxsc_list); 3254 3255 genlmsg_end(skb, hdr); 3256 3257 return 0; 3258 3259 nla_put_failure: 3260 genlmsg_cancel(skb, hdr); 3261 return -EMSGSIZE; 3262 } 3263 3264 static int macsec_generation = 1; /* protected by RTNL */ 3265 3266 static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb) 3267 { 3268 struct net *net = sock_net(skb->sk); 3269 struct net_device *dev; 3270 int dev_idx, d; 3271 3272 dev_idx = cb->args[0]; 3273 3274 d = 0; 3275 rtnl_lock(); 3276 3277 cb->seq = macsec_generation; 3278 3279 for_each_netdev(net, dev) { 3280 struct macsec_secy *secy; 3281 3282 if (d < dev_idx) 3283 goto next; 3284 3285 if (!netif_is_macsec(dev)) 3286 goto next; 3287 3288 secy = &macsec_priv(dev)->secy; 3289 if (dump_secy(secy, dev, skb, cb) < 0) 3290 goto done; 3291 next: 3292 d++; 3293 } 3294 3295 done: 3296 rtnl_unlock(); 3297 cb->args[0] = d; 3298 return skb->len; 3299 } 3300 3301 static const struct genl_small_ops macsec_genl_ops[] = { 3302 { 3303 .cmd = MACSEC_CMD_GET_TXSC, 3304 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3305 .dumpit = macsec_dump_txsc, 3306 }, 3307 { 3308 .cmd = MACSEC_CMD_ADD_RXSC, 3309 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3310 .doit = macsec_add_rxsc, 3311 .flags = GENL_ADMIN_PERM, 3312 }, 3313 { 3314 .cmd = MACSEC_CMD_DEL_RXSC, 3315 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3316 .doit = macsec_del_rxsc, 3317 .flags = GENL_ADMIN_PERM, 3318 }, 3319 { 3320 .cmd = MACSEC_CMD_UPD_RXSC, 3321 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3322 .doit = macsec_upd_rxsc, 3323 .flags = GENL_ADMIN_PERM, 3324 }, 3325 { 3326 .cmd = MACSEC_CMD_ADD_TXSA, 3327 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3328 .doit = macsec_add_txsa, 3329 .flags = GENL_ADMIN_PERM, 3330 }, 3331 { 3332 .cmd = MACSEC_CMD_DEL_TXSA, 3333 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3334 .doit = macsec_del_txsa, 3335 .flags = GENL_ADMIN_PERM, 3336 }, 3337 { 3338 .cmd = MACSEC_CMD_UPD_TXSA, 3339 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3340 .doit = macsec_upd_txsa, 3341 .flags = GENL_ADMIN_PERM, 3342 }, 3343 { 3344 .cmd = MACSEC_CMD_ADD_RXSA, 3345 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3346 .doit = macsec_add_rxsa, 3347 .flags = GENL_ADMIN_PERM, 3348 }, 3349 { 3350 .cmd = MACSEC_CMD_DEL_RXSA, 3351 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3352 .doit = macsec_del_rxsa, 3353 .flags = GENL_ADMIN_PERM, 3354 }, 3355 { 3356 .cmd = MACSEC_CMD_UPD_RXSA, 3357 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3358 .doit = macsec_upd_rxsa, 3359 .flags = GENL_ADMIN_PERM, 3360 }, 3361 { 3362 .cmd = MACSEC_CMD_UPD_OFFLOAD, 3363 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3364 .doit = macsec_upd_offload, 3365 .flags = GENL_ADMIN_PERM, 3366 }, 3367 }; 3368 3369 static struct genl_family macsec_fam __ro_after_init = { 3370 .name = MACSEC_GENL_NAME, 3371 .hdrsize = 0, 3372 .version = MACSEC_GENL_VERSION, 3373 .maxattr = MACSEC_ATTR_MAX, 3374 .policy = macsec_genl_policy, 3375 .netnsok = true, 3376 .module = THIS_MODULE, 3377 .small_ops = macsec_genl_ops, 3378 .n_small_ops = ARRAY_SIZE(macsec_genl_ops), 3379 .resv_start_op = MACSEC_CMD_UPD_OFFLOAD + 1, 3380 }; 3381 3382 static netdev_tx_t macsec_start_xmit(struct sk_buff *skb, 3383 struct net_device *dev) 3384 { 3385 struct macsec_dev *macsec = netdev_priv(dev); 3386 struct macsec_secy *secy = &macsec->secy; 3387 struct pcpu_secy_stats *secy_stats; 3388 int ret, len; 3389 3390 if (macsec_is_offloaded(netdev_priv(dev))) { 3391 struct metadata_dst *md_dst = secy->tx_sc.md_dst; 3392 3393 skb_dst_drop(skb); 3394 dst_hold(&md_dst->dst); 3395 skb_dst_set(skb, &md_dst->dst); 3396 skb->dev = macsec->real_dev; 3397 return dev_queue_xmit(skb); 3398 } 3399 3400 /* 10.5 */ 3401 if (!secy->protect_frames) { 3402 secy_stats = this_cpu_ptr(macsec->stats); 3403 u64_stats_update_begin(&secy_stats->syncp); 3404 secy_stats->stats.OutPktsUntagged++; 3405 u64_stats_update_end(&secy_stats->syncp); 3406 skb->dev = macsec->real_dev; 3407 len = skb->len; 3408 ret = dev_queue_xmit(skb); 3409 count_tx(dev, ret, len); 3410 return ret; 3411 } 3412 3413 if (!secy->operational) { 3414 kfree_skb(skb); 3415 dev->stats.tx_dropped++; 3416 return NETDEV_TX_OK; 3417 } 3418 3419 len = skb->len; 3420 skb = macsec_encrypt(skb, dev); 3421 if (IS_ERR(skb)) { 3422 if (PTR_ERR(skb) != -EINPROGRESS) 3423 dev->stats.tx_dropped++; 3424 return NETDEV_TX_OK; 3425 } 3426 3427 macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa); 3428 3429 macsec_encrypt_finish(skb, dev); 3430 ret = dev_queue_xmit(skb); 3431 count_tx(dev, ret, len); 3432 return ret; 3433 } 3434 3435 #define MACSEC_FEATURES \ 3436 (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST) 3437 3438 static int macsec_dev_init(struct net_device *dev) 3439 { 3440 struct macsec_dev *macsec = macsec_priv(dev); 3441 struct net_device *real_dev = macsec->real_dev; 3442 int err; 3443 3444 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 3445 if (!dev->tstats) 3446 return -ENOMEM; 3447 3448 err = gro_cells_init(&macsec->gro_cells, dev); 3449 if (err) { 3450 free_percpu(dev->tstats); 3451 return err; 3452 } 3453 3454 dev->features = real_dev->features & MACSEC_FEATURES; 3455 dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE; 3456 3457 dev->needed_headroom = real_dev->needed_headroom + 3458 MACSEC_NEEDED_HEADROOM; 3459 dev->needed_tailroom = real_dev->needed_tailroom + 3460 MACSEC_NEEDED_TAILROOM; 3461 3462 if (is_zero_ether_addr(dev->dev_addr)) 3463 eth_hw_addr_inherit(dev, real_dev); 3464 if (is_zero_ether_addr(dev->broadcast)) 3465 memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len); 3466 3467 /* Get macsec's reference to real_dev */ 3468 netdev_hold(real_dev, &macsec->dev_tracker, GFP_KERNEL); 3469 3470 return 0; 3471 } 3472 3473 static void macsec_dev_uninit(struct net_device *dev) 3474 { 3475 struct macsec_dev *macsec = macsec_priv(dev); 3476 3477 gro_cells_destroy(&macsec->gro_cells); 3478 free_percpu(dev->tstats); 3479 } 3480 3481 static netdev_features_t macsec_fix_features(struct net_device *dev, 3482 netdev_features_t features) 3483 { 3484 struct macsec_dev *macsec = macsec_priv(dev); 3485 struct net_device *real_dev = macsec->real_dev; 3486 3487 features &= (real_dev->features & MACSEC_FEATURES) | 3488 NETIF_F_GSO_SOFTWARE | NETIF_F_SOFT_FEATURES; 3489 features |= NETIF_F_LLTX; 3490 3491 return features; 3492 } 3493 3494 static int macsec_dev_open(struct net_device *dev) 3495 { 3496 struct macsec_dev *macsec = macsec_priv(dev); 3497 struct net_device *real_dev = macsec->real_dev; 3498 int err; 3499 3500 err = dev_uc_add(real_dev, dev->dev_addr); 3501 if (err < 0) 3502 return err; 3503 3504 if (dev->flags & IFF_ALLMULTI) { 3505 err = dev_set_allmulti(real_dev, 1); 3506 if (err < 0) 3507 goto del_unicast; 3508 } 3509 3510 if (dev->flags & IFF_PROMISC) { 3511 err = dev_set_promiscuity(real_dev, 1); 3512 if (err < 0) 3513 goto clear_allmulti; 3514 } 3515 3516 /* If h/w offloading is available, propagate to the device */ 3517 if (macsec_is_offloaded(macsec)) { 3518 const struct macsec_ops *ops; 3519 struct macsec_context ctx; 3520 3521 ops = macsec_get_ops(netdev_priv(dev), &ctx); 3522 if (!ops) { 3523 err = -EOPNOTSUPP; 3524 goto clear_allmulti; 3525 } 3526 3527 ctx.secy = &macsec->secy; 3528 err = macsec_offload(ops->mdo_dev_open, &ctx); 3529 if (err) 3530 goto clear_allmulti; 3531 } 3532 3533 if (netif_carrier_ok(real_dev)) 3534 netif_carrier_on(dev); 3535 3536 return 0; 3537 clear_allmulti: 3538 if (dev->flags & IFF_ALLMULTI) 3539 dev_set_allmulti(real_dev, -1); 3540 del_unicast: 3541 dev_uc_del(real_dev, dev->dev_addr); 3542 netif_carrier_off(dev); 3543 return err; 3544 } 3545 3546 static int macsec_dev_stop(struct net_device *dev) 3547 { 3548 struct macsec_dev *macsec = macsec_priv(dev); 3549 struct net_device *real_dev = macsec->real_dev; 3550 3551 netif_carrier_off(dev); 3552 3553 /* If h/w offloading is available, propagate to the device */ 3554 if (macsec_is_offloaded(macsec)) { 3555 const struct macsec_ops *ops; 3556 struct macsec_context ctx; 3557 3558 ops = macsec_get_ops(macsec, &ctx); 3559 if (ops) { 3560 ctx.secy = &macsec->secy; 3561 macsec_offload(ops->mdo_dev_stop, &ctx); 3562 } 3563 } 3564 3565 dev_mc_unsync(real_dev, dev); 3566 dev_uc_unsync(real_dev, dev); 3567 3568 if (dev->flags & IFF_ALLMULTI) 3569 dev_set_allmulti(real_dev, -1); 3570 3571 if (dev->flags & IFF_PROMISC) 3572 dev_set_promiscuity(real_dev, -1); 3573 3574 dev_uc_del(real_dev, dev->dev_addr); 3575 3576 return 0; 3577 } 3578 3579 static void macsec_dev_change_rx_flags(struct net_device *dev, int change) 3580 { 3581 struct net_device *real_dev = macsec_priv(dev)->real_dev; 3582 3583 if (!(dev->flags & IFF_UP)) 3584 return; 3585 3586 if (change & IFF_ALLMULTI) 3587 dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1); 3588 3589 if (change & IFF_PROMISC) 3590 dev_set_promiscuity(real_dev, 3591 dev->flags & IFF_PROMISC ? 1 : -1); 3592 } 3593 3594 static void macsec_dev_set_rx_mode(struct net_device *dev) 3595 { 3596 struct net_device *real_dev = macsec_priv(dev)->real_dev; 3597 3598 dev_mc_sync(real_dev, dev); 3599 dev_uc_sync(real_dev, dev); 3600 } 3601 3602 static int macsec_set_mac_address(struct net_device *dev, void *p) 3603 { 3604 struct macsec_dev *macsec = macsec_priv(dev); 3605 struct net_device *real_dev = macsec->real_dev; 3606 struct sockaddr *addr = p; 3607 int err; 3608 3609 if (!is_valid_ether_addr(addr->sa_data)) 3610 return -EADDRNOTAVAIL; 3611 3612 if (!(dev->flags & IFF_UP)) 3613 goto out; 3614 3615 err = dev_uc_add(real_dev, addr->sa_data); 3616 if (err < 0) 3617 return err; 3618 3619 dev_uc_del(real_dev, dev->dev_addr); 3620 3621 out: 3622 eth_hw_addr_set(dev, addr->sa_data); 3623 3624 /* If h/w offloading is available, propagate to the device */ 3625 if (macsec_is_offloaded(macsec)) { 3626 const struct macsec_ops *ops; 3627 struct macsec_context ctx; 3628 3629 ops = macsec_get_ops(macsec, &ctx); 3630 if (ops) { 3631 ctx.secy = &macsec->secy; 3632 macsec_offload(ops->mdo_upd_secy, &ctx); 3633 } 3634 } 3635 3636 return 0; 3637 } 3638 3639 static int macsec_change_mtu(struct net_device *dev, int new_mtu) 3640 { 3641 struct macsec_dev *macsec = macsec_priv(dev); 3642 unsigned int extra = macsec->secy.icv_len + macsec_extra_len(true); 3643 3644 if (macsec->real_dev->mtu - extra < new_mtu) 3645 return -ERANGE; 3646 3647 dev->mtu = new_mtu; 3648 3649 return 0; 3650 } 3651 3652 static void macsec_get_stats64(struct net_device *dev, 3653 struct rtnl_link_stats64 *s) 3654 { 3655 if (!dev->tstats) 3656 return; 3657 3658 dev_fetch_sw_netstats(s, dev->tstats); 3659 3660 s->rx_dropped = dev->stats.rx_dropped; 3661 s->tx_dropped = dev->stats.tx_dropped; 3662 s->rx_errors = dev->stats.rx_errors; 3663 } 3664 3665 static int macsec_get_iflink(const struct net_device *dev) 3666 { 3667 return macsec_priv(dev)->real_dev->ifindex; 3668 } 3669 3670 static const struct net_device_ops macsec_netdev_ops = { 3671 .ndo_init = macsec_dev_init, 3672 .ndo_uninit = macsec_dev_uninit, 3673 .ndo_open = macsec_dev_open, 3674 .ndo_stop = macsec_dev_stop, 3675 .ndo_fix_features = macsec_fix_features, 3676 .ndo_change_mtu = macsec_change_mtu, 3677 .ndo_set_rx_mode = macsec_dev_set_rx_mode, 3678 .ndo_change_rx_flags = macsec_dev_change_rx_flags, 3679 .ndo_set_mac_address = macsec_set_mac_address, 3680 .ndo_start_xmit = macsec_start_xmit, 3681 .ndo_get_stats64 = macsec_get_stats64, 3682 .ndo_get_iflink = macsec_get_iflink, 3683 }; 3684 3685 static const struct device_type macsec_type = { 3686 .name = "macsec", 3687 }; 3688 3689 static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = { 3690 [IFLA_MACSEC_SCI] = { .type = NLA_U64 }, 3691 [IFLA_MACSEC_PORT] = { .type = NLA_U16 }, 3692 [IFLA_MACSEC_ICV_LEN] = { .type = NLA_U8 }, 3693 [IFLA_MACSEC_CIPHER_SUITE] = { .type = NLA_U64 }, 3694 [IFLA_MACSEC_WINDOW] = { .type = NLA_U32 }, 3695 [IFLA_MACSEC_ENCODING_SA] = { .type = NLA_U8 }, 3696 [IFLA_MACSEC_ENCRYPT] = { .type = NLA_U8 }, 3697 [IFLA_MACSEC_PROTECT] = { .type = NLA_U8 }, 3698 [IFLA_MACSEC_INC_SCI] = { .type = NLA_U8 }, 3699 [IFLA_MACSEC_ES] = { .type = NLA_U8 }, 3700 [IFLA_MACSEC_SCB] = { .type = NLA_U8 }, 3701 [IFLA_MACSEC_REPLAY_PROTECT] = { .type = NLA_U8 }, 3702 [IFLA_MACSEC_VALIDATION] = { .type = NLA_U8 }, 3703 [IFLA_MACSEC_OFFLOAD] = { .type = NLA_U8 }, 3704 }; 3705 3706 static void macsec_free_netdev(struct net_device *dev) 3707 { 3708 struct macsec_dev *macsec = macsec_priv(dev); 3709 3710 if (macsec->secy.tx_sc.md_dst) 3711 metadata_dst_free(macsec->secy.tx_sc.md_dst); 3712 free_percpu(macsec->stats); 3713 free_percpu(macsec->secy.tx_sc.stats); 3714 3715 /* Get rid of the macsec's reference to real_dev */ 3716 netdev_put(macsec->real_dev, &macsec->dev_tracker); 3717 } 3718 3719 static void macsec_setup(struct net_device *dev) 3720 { 3721 ether_setup(dev); 3722 dev->min_mtu = 0; 3723 dev->max_mtu = ETH_MAX_MTU; 3724 dev->priv_flags |= IFF_NO_QUEUE; 3725 dev->netdev_ops = &macsec_netdev_ops; 3726 dev->needs_free_netdev = true; 3727 dev->priv_destructor = macsec_free_netdev; 3728 SET_NETDEV_DEVTYPE(dev, &macsec_type); 3729 3730 eth_zero_addr(dev->broadcast); 3731 } 3732 3733 static int macsec_changelink_common(struct net_device *dev, 3734 struct nlattr *data[]) 3735 { 3736 struct macsec_secy *secy; 3737 struct macsec_tx_sc *tx_sc; 3738 3739 secy = &macsec_priv(dev)->secy; 3740 tx_sc = &secy->tx_sc; 3741 3742 if (data[IFLA_MACSEC_ENCODING_SA]) { 3743 struct macsec_tx_sa *tx_sa; 3744 3745 tx_sc->encoding_sa = nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]); 3746 tx_sa = rtnl_dereference(tx_sc->sa[tx_sc->encoding_sa]); 3747 3748 secy->operational = tx_sa && tx_sa->active; 3749 } 3750 3751 if (data[IFLA_MACSEC_ENCRYPT]) 3752 tx_sc->encrypt = !!nla_get_u8(data[IFLA_MACSEC_ENCRYPT]); 3753 3754 if (data[IFLA_MACSEC_PROTECT]) 3755 secy->protect_frames = !!nla_get_u8(data[IFLA_MACSEC_PROTECT]); 3756 3757 if (data[IFLA_MACSEC_INC_SCI]) 3758 tx_sc->send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]); 3759 3760 if (data[IFLA_MACSEC_ES]) 3761 tx_sc->end_station = !!nla_get_u8(data[IFLA_MACSEC_ES]); 3762 3763 if (data[IFLA_MACSEC_SCB]) 3764 tx_sc->scb = !!nla_get_u8(data[IFLA_MACSEC_SCB]); 3765 3766 if (data[IFLA_MACSEC_REPLAY_PROTECT]) 3767 secy->replay_protect = !!nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT]); 3768 3769 if (data[IFLA_MACSEC_VALIDATION]) 3770 secy->validate_frames = nla_get_u8(data[IFLA_MACSEC_VALIDATION]); 3771 3772 if (data[IFLA_MACSEC_CIPHER_SUITE]) { 3773 switch (nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE])) { 3774 case MACSEC_CIPHER_ID_GCM_AES_128: 3775 case MACSEC_DEFAULT_CIPHER_ID: 3776 secy->key_len = MACSEC_GCM_AES_128_SAK_LEN; 3777 secy->xpn = false; 3778 break; 3779 case MACSEC_CIPHER_ID_GCM_AES_256: 3780 secy->key_len = MACSEC_GCM_AES_256_SAK_LEN; 3781 secy->xpn = false; 3782 break; 3783 case MACSEC_CIPHER_ID_GCM_AES_XPN_128: 3784 secy->key_len = MACSEC_GCM_AES_128_SAK_LEN; 3785 secy->xpn = true; 3786 break; 3787 case MACSEC_CIPHER_ID_GCM_AES_XPN_256: 3788 secy->key_len = MACSEC_GCM_AES_256_SAK_LEN; 3789 secy->xpn = true; 3790 break; 3791 default: 3792 return -EINVAL; 3793 } 3794 } 3795 3796 if (data[IFLA_MACSEC_WINDOW]) { 3797 secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]); 3798 3799 /* IEEE 802.1AEbw-2013 10.7.8 - maximum replay window 3800 * for XPN cipher suites */ 3801 if (secy->xpn && 3802 secy->replay_window > MACSEC_XPN_MAX_REPLAY_WINDOW) 3803 return -EINVAL; 3804 } 3805 3806 return 0; 3807 } 3808 3809 static int macsec_changelink(struct net_device *dev, struct nlattr *tb[], 3810 struct nlattr *data[], 3811 struct netlink_ext_ack *extack) 3812 { 3813 struct macsec_dev *macsec = macsec_priv(dev); 3814 bool macsec_offload_state_change = false; 3815 enum macsec_offload offload; 3816 struct macsec_tx_sc tx_sc; 3817 struct macsec_secy secy; 3818 int ret; 3819 3820 if (!data) 3821 return 0; 3822 3823 if (data[IFLA_MACSEC_CIPHER_SUITE] || 3824 data[IFLA_MACSEC_ICV_LEN] || 3825 data[IFLA_MACSEC_SCI] || 3826 data[IFLA_MACSEC_PORT]) 3827 return -EINVAL; 3828 3829 /* Keep a copy of unmodified secy and tx_sc, in case the offload 3830 * propagation fails, to revert macsec_changelink_common. 3831 */ 3832 memcpy(&secy, &macsec->secy, sizeof(secy)); 3833 memcpy(&tx_sc, &macsec->secy.tx_sc, sizeof(tx_sc)); 3834 3835 ret = macsec_changelink_common(dev, data); 3836 if (ret) 3837 goto cleanup; 3838 3839 if (data[IFLA_MACSEC_OFFLOAD]) { 3840 offload = nla_get_u8(data[IFLA_MACSEC_OFFLOAD]); 3841 if (macsec->offload != offload) { 3842 macsec_offload_state_change = true; 3843 ret = macsec_update_offload(dev, offload); 3844 if (ret) 3845 goto cleanup; 3846 } 3847 } 3848 3849 /* If h/w offloading is available, propagate to the device */ 3850 if (!macsec_offload_state_change && macsec_is_offloaded(macsec)) { 3851 const struct macsec_ops *ops; 3852 struct macsec_context ctx; 3853 3854 ops = macsec_get_ops(netdev_priv(dev), &ctx); 3855 if (!ops) { 3856 ret = -EOPNOTSUPP; 3857 goto cleanup; 3858 } 3859 3860 ctx.secy = &macsec->secy; 3861 ret = macsec_offload(ops->mdo_upd_secy, &ctx); 3862 if (ret) 3863 goto cleanup; 3864 } 3865 3866 return 0; 3867 3868 cleanup: 3869 memcpy(&macsec->secy.tx_sc, &tx_sc, sizeof(tx_sc)); 3870 memcpy(&macsec->secy, &secy, sizeof(secy)); 3871 3872 return ret; 3873 } 3874 3875 static void macsec_del_dev(struct macsec_dev *macsec) 3876 { 3877 int i; 3878 3879 while (macsec->secy.rx_sc) { 3880 struct macsec_rx_sc *rx_sc = rtnl_dereference(macsec->secy.rx_sc); 3881 3882 rcu_assign_pointer(macsec->secy.rx_sc, rx_sc->next); 3883 free_rx_sc(rx_sc); 3884 } 3885 3886 for (i = 0; i < MACSEC_NUM_AN; i++) { 3887 struct macsec_tx_sa *sa = rtnl_dereference(macsec->secy.tx_sc.sa[i]); 3888 3889 if (sa) { 3890 RCU_INIT_POINTER(macsec->secy.tx_sc.sa[i], NULL); 3891 clear_tx_sa(sa); 3892 } 3893 } 3894 } 3895 3896 static void macsec_common_dellink(struct net_device *dev, struct list_head *head) 3897 { 3898 struct macsec_dev *macsec = macsec_priv(dev); 3899 struct net_device *real_dev = macsec->real_dev; 3900 3901 /* If h/w offloading is available, propagate to the device */ 3902 if (macsec_is_offloaded(macsec)) { 3903 const struct macsec_ops *ops; 3904 struct macsec_context ctx; 3905 3906 ops = macsec_get_ops(netdev_priv(dev), &ctx); 3907 if (ops) { 3908 ctx.secy = &macsec->secy; 3909 macsec_offload(ops->mdo_del_secy, &ctx); 3910 } 3911 } 3912 3913 unregister_netdevice_queue(dev, head); 3914 list_del_rcu(&macsec->secys); 3915 macsec_del_dev(macsec); 3916 netdev_upper_dev_unlink(real_dev, dev); 3917 3918 macsec_generation++; 3919 } 3920 3921 static void macsec_dellink(struct net_device *dev, struct list_head *head) 3922 { 3923 struct macsec_dev *macsec = macsec_priv(dev); 3924 struct net_device *real_dev = macsec->real_dev; 3925 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); 3926 3927 macsec_common_dellink(dev, head); 3928 3929 if (list_empty(&rxd->secys)) { 3930 netdev_rx_handler_unregister(real_dev); 3931 kfree(rxd); 3932 } 3933 } 3934 3935 static int register_macsec_dev(struct net_device *real_dev, 3936 struct net_device *dev) 3937 { 3938 struct macsec_dev *macsec = macsec_priv(dev); 3939 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); 3940 3941 if (!rxd) { 3942 int err; 3943 3944 rxd = kmalloc(sizeof(*rxd), GFP_KERNEL); 3945 if (!rxd) 3946 return -ENOMEM; 3947 3948 INIT_LIST_HEAD(&rxd->secys); 3949 3950 err = netdev_rx_handler_register(real_dev, macsec_handle_frame, 3951 rxd); 3952 if (err < 0) { 3953 kfree(rxd); 3954 return err; 3955 } 3956 } 3957 3958 list_add_tail_rcu(&macsec->secys, &rxd->secys); 3959 return 0; 3960 } 3961 3962 static bool sci_exists(struct net_device *dev, sci_t sci) 3963 { 3964 struct macsec_rxh_data *rxd = macsec_data_rtnl(dev); 3965 struct macsec_dev *macsec; 3966 3967 list_for_each_entry(macsec, &rxd->secys, secys) { 3968 if (macsec->secy.sci == sci) 3969 return true; 3970 } 3971 3972 return false; 3973 } 3974 3975 static sci_t dev_to_sci(struct net_device *dev, __be16 port) 3976 { 3977 return make_sci(dev->dev_addr, port); 3978 } 3979 3980 static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len) 3981 { 3982 struct macsec_dev *macsec = macsec_priv(dev); 3983 struct macsec_secy *secy = &macsec->secy; 3984 3985 macsec->stats = netdev_alloc_pcpu_stats(struct pcpu_secy_stats); 3986 if (!macsec->stats) 3987 return -ENOMEM; 3988 3989 secy->tx_sc.stats = netdev_alloc_pcpu_stats(struct pcpu_tx_sc_stats); 3990 if (!secy->tx_sc.stats) { 3991 free_percpu(macsec->stats); 3992 return -ENOMEM; 3993 } 3994 3995 secy->tx_sc.md_dst = metadata_dst_alloc(0, METADATA_MACSEC, GFP_KERNEL); 3996 if (!secy->tx_sc.md_dst) { 3997 free_percpu(secy->tx_sc.stats); 3998 free_percpu(macsec->stats); 3999 return -ENOMEM; 4000 } 4001 4002 if (sci == MACSEC_UNDEF_SCI) 4003 sci = dev_to_sci(dev, MACSEC_PORT_ES); 4004 4005 secy->netdev = dev; 4006 secy->operational = true; 4007 secy->key_len = DEFAULT_SAK_LEN; 4008 secy->icv_len = icv_len; 4009 secy->validate_frames = MACSEC_VALIDATE_DEFAULT; 4010 secy->protect_frames = true; 4011 secy->replay_protect = false; 4012 secy->xpn = DEFAULT_XPN; 4013 4014 secy->sci = sci; 4015 secy->tx_sc.md_dst->u.macsec_info.sci = sci; 4016 secy->tx_sc.active = true; 4017 secy->tx_sc.encoding_sa = DEFAULT_ENCODING_SA; 4018 secy->tx_sc.encrypt = DEFAULT_ENCRYPT; 4019 secy->tx_sc.send_sci = DEFAULT_SEND_SCI; 4020 secy->tx_sc.end_station = false; 4021 secy->tx_sc.scb = false; 4022 4023 return 0; 4024 } 4025 4026 static struct lock_class_key macsec_netdev_addr_lock_key; 4027 4028 static int macsec_newlink(struct net *net, struct net_device *dev, 4029 struct nlattr *tb[], struct nlattr *data[], 4030 struct netlink_ext_ack *extack) 4031 { 4032 struct macsec_dev *macsec = macsec_priv(dev); 4033 rx_handler_func_t *rx_handler; 4034 u8 icv_len = MACSEC_DEFAULT_ICV_LEN; 4035 struct net_device *real_dev; 4036 int err, mtu; 4037 sci_t sci; 4038 4039 if (!tb[IFLA_LINK]) 4040 return -EINVAL; 4041 real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK])); 4042 if (!real_dev) 4043 return -ENODEV; 4044 if (real_dev->type != ARPHRD_ETHER) 4045 return -EINVAL; 4046 4047 dev->priv_flags |= IFF_MACSEC; 4048 4049 macsec->real_dev = real_dev; 4050 4051 if (data && data[IFLA_MACSEC_OFFLOAD]) 4052 macsec->offload = nla_get_offload(data[IFLA_MACSEC_OFFLOAD]); 4053 else 4054 /* MACsec offloading is off by default */ 4055 macsec->offload = MACSEC_OFFLOAD_OFF; 4056 4057 /* Check if the offloading mode is supported by the underlying layers */ 4058 if (macsec->offload != MACSEC_OFFLOAD_OFF && 4059 !macsec_check_offload(macsec->offload, macsec)) 4060 return -EOPNOTSUPP; 4061 4062 /* send_sci must be set to true when transmit sci explicitly is set */ 4063 if ((data && data[IFLA_MACSEC_SCI]) && 4064 (data && data[IFLA_MACSEC_INC_SCI])) { 4065 u8 send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]); 4066 4067 if (!send_sci) 4068 return -EINVAL; 4069 } 4070 4071 if (data && data[IFLA_MACSEC_ICV_LEN]) 4072 icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]); 4073 mtu = real_dev->mtu - icv_len - macsec_extra_len(true); 4074 if (mtu < 0) 4075 dev->mtu = 0; 4076 else 4077 dev->mtu = mtu; 4078 4079 rx_handler = rtnl_dereference(real_dev->rx_handler); 4080 if (rx_handler && rx_handler != macsec_handle_frame) 4081 return -EBUSY; 4082 4083 err = register_netdevice(dev); 4084 if (err < 0) 4085 return err; 4086 4087 netdev_lockdep_set_classes(dev); 4088 lockdep_set_class(&dev->addr_list_lock, 4089 &macsec_netdev_addr_lock_key); 4090 4091 err = netdev_upper_dev_link(real_dev, dev, extack); 4092 if (err < 0) 4093 goto unregister; 4094 4095 /* need to be already registered so that ->init has run and 4096 * the MAC addr is set 4097 */ 4098 if (data && data[IFLA_MACSEC_SCI]) 4099 sci = nla_get_sci(data[IFLA_MACSEC_SCI]); 4100 else if (data && data[IFLA_MACSEC_PORT]) 4101 sci = dev_to_sci(dev, nla_get_be16(data[IFLA_MACSEC_PORT])); 4102 else 4103 sci = dev_to_sci(dev, MACSEC_PORT_ES); 4104 4105 if (rx_handler && sci_exists(real_dev, sci)) { 4106 err = -EBUSY; 4107 goto unlink; 4108 } 4109 4110 err = macsec_add_dev(dev, sci, icv_len); 4111 if (err) 4112 goto unlink; 4113 4114 if (data) { 4115 err = macsec_changelink_common(dev, data); 4116 if (err) 4117 goto del_dev; 4118 } 4119 4120 /* If h/w offloading is available, propagate to the device */ 4121 if (macsec_is_offloaded(macsec)) { 4122 const struct macsec_ops *ops; 4123 struct macsec_context ctx; 4124 4125 ops = macsec_get_ops(macsec, &ctx); 4126 if (ops) { 4127 ctx.secy = &macsec->secy; 4128 err = macsec_offload(ops->mdo_add_secy, &ctx); 4129 if (err) 4130 goto del_dev; 4131 } 4132 } 4133 4134 err = register_macsec_dev(real_dev, dev); 4135 if (err < 0) 4136 goto del_dev; 4137 4138 netif_stacked_transfer_operstate(real_dev, dev); 4139 linkwatch_fire_event(dev); 4140 4141 macsec_generation++; 4142 4143 return 0; 4144 4145 del_dev: 4146 macsec_del_dev(macsec); 4147 unlink: 4148 netdev_upper_dev_unlink(real_dev, dev); 4149 unregister: 4150 unregister_netdevice(dev); 4151 return err; 4152 } 4153 4154 static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[], 4155 struct netlink_ext_ack *extack) 4156 { 4157 u64 csid = MACSEC_DEFAULT_CIPHER_ID; 4158 u8 icv_len = MACSEC_DEFAULT_ICV_LEN; 4159 int flag; 4160 bool es, scb, sci; 4161 4162 if (!data) 4163 return 0; 4164 4165 if (data[IFLA_MACSEC_CIPHER_SUITE]) 4166 csid = nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE]); 4167 4168 if (data[IFLA_MACSEC_ICV_LEN]) { 4169 icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]); 4170 if (icv_len != MACSEC_DEFAULT_ICV_LEN) { 4171 char dummy_key[DEFAULT_SAK_LEN] = { 0 }; 4172 struct crypto_aead *dummy_tfm; 4173 4174 dummy_tfm = macsec_alloc_tfm(dummy_key, 4175 DEFAULT_SAK_LEN, 4176 icv_len); 4177 if (IS_ERR(dummy_tfm)) 4178 return PTR_ERR(dummy_tfm); 4179 crypto_free_aead(dummy_tfm); 4180 } 4181 } 4182 4183 switch (csid) { 4184 case MACSEC_CIPHER_ID_GCM_AES_128: 4185 case MACSEC_CIPHER_ID_GCM_AES_256: 4186 case MACSEC_CIPHER_ID_GCM_AES_XPN_128: 4187 case MACSEC_CIPHER_ID_GCM_AES_XPN_256: 4188 case MACSEC_DEFAULT_CIPHER_ID: 4189 if (icv_len < MACSEC_MIN_ICV_LEN || 4190 icv_len > MACSEC_STD_ICV_LEN) 4191 return -EINVAL; 4192 break; 4193 default: 4194 return -EINVAL; 4195 } 4196 4197 if (data[IFLA_MACSEC_ENCODING_SA]) { 4198 if (nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]) >= MACSEC_NUM_AN) 4199 return -EINVAL; 4200 } 4201 4202 for (flag = IFLA_MACSEC_ENCODING_SA + 1; 4203 flag < IFLA_MACSEC_VALIDATION; 4204 flag++) { 4205 if (data[flag]) { 4206 if (nla_get_u8(data[flag]) > 1) 4207 return -EINVAL; 4208 } 4209 } 4210 4211 es = data[IFLA_MACSEC_ES] ? nla_get_u8(data[IFLA_MACSEC_ES]) : false; 4212 sci = data[IFLA_MACSEC_INC_SCI] ? nla_get_u8(data[IFLA_MACSEC_INC_SCI]) : false; 4213 scb = data[IFLA_MACSEC_SCB] ? nla_get_u8(data[IFLA_MACSEC_SCB]) : false; 4214 4215 if ((sci && (scb || es)) || (scb && es)) 4216 return -EINVAL; 4217 4218 if (data[IFLA_MACSEC_VALIDATION] && 4219 nla_get_u8(data[IFLA_MACSEC_VALIDATION]) > MACSEC_VALIDATE_MAX) 4220 return -EINVAL; 4221 4222 if ((data[IFLA_MACSEC_REPLAY_PROTECT] && 4223 nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT])) && 4224 !data[IFLA_MACSEC_WINDOW]) 4225 return -EINVAL; 4226 4227 return 0; 4228 } 4229 4230 static struct net *macsec_get_link_net(const struct net_device *dev) 4231 { 4232 return dev_net(macsec_priv(dev)->real_dev); 4233 } 4234 4235 static size_t macsec_get_size(const struct net_device *dev) 4236 { 4237 return nla_total_size_64bit(8) + /* IFLA_MACSEC_SCI */ 4238 nla_total_size(1) + /* IFLA_MACSEC_ICV_LEN */ 4239 nla_total_size_64bit(8) + /* IFLA_MACSEC_CIPHER_SUITE */ 4240 nla_total_size(4) + /* IFLA_MACSEC_WINDOW */ 4241 nla_total_size(1) + /* IFLA_MACSEC_ENCODING_SA */ 4242 nla_total_size(1) + /* IFLA_MACSEC_ENCRYPT */ 4243 nla_total_size(1) + /* IFLA_MACSEC_PROTECT */ 4244 nla_total_size(1) + /* IFLA_MACSEC_INC_SCI */ 4245 nla_total_size(1) + /* IFLA_MACSEC_ES */ 4246 nla_total_size(1) + /* IFLA_MACSEC_SCB */ 4247 nla_total_size(1) + /* IFLA_MACSEC_REPLAY_PROTECT */ 4248 nla_total_size(1) + /* IFLA_MACSEC_VALIDATION */ 4249 nla_total_size(1) + /* IFLA_MACSEC_OFFLOAD */ 4250 0; 4251 } 4252 4253 static int macsec_fill_info(struct sk_buff *skb, 4254 const struct net_device *dev) 4255 { 4256 struct macsec_tx_sc *tx_sc; 4257 struct macsec_dev *macsec; 4258 struct macsec_secy *secy; 4259 u64 csid; 4260 4261 macsec = macsec_priv(dev); 4262 secy = &macsec->secy; 4263 tx_sc = &secy->tx_sc; 4264 4265 switch (secy->key_len) { 4266 case MACSEC_GCM_AES_128_SAK_LEN: 4267 csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 : MACSEC_DEFAULT_CIPHER_ID; 4268 break; 4269 case MACSEC_GCM_AES_256_SAK_LEN: 4270 csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_256 : MACSEC_CIPHER_ID_GCM_AES_256; 4271 break; 4272 default: 4273 goto nla_put_failure; 4274 } 4275 4276 if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci, 4277 IFLA_MACSEC_PAD) || 4278 nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) || 4279 nla_put_u64_64bit(skb, IFLA_MACSEC_CIPHER_SUITE, 4280 csid, IFLA_MACSEC_PAD) || 4281 nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) || 4282 nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) || 4283 nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) || 4284 nla_put_u8(skb, IFLA_MACSEC_INC_SCI, tx_sc->send_sci) || 4285 nla_put_u8(skb, IFLA_MACSEC_ES, tx_sc->end_station) || 4286 nla_put_u8(skb, IFLA_MACSEC_SCB, tx_sc->scb) || 4287 nla_put_u8(skb, IFLA_MACSEC_REPLAY_PROTECT, secy->replay_protect) || 4288 nla_put_u8(skb, IFLA_MACSEC_VALIDATION, secy->validate_frames) || 4289 nla_put_u8(skb, IFLA_MACSEC_OFFLOAD, macsec->offload) || 4290 0) 4291 goto nla_put_failure; 4292 4293 if (secy->replay_protect) { 4294 if (nla_put_u32(skb, IFLA_MACSEC_WINDOW, secy->replay_window)) 4295 goto nla_put_failure; 4296 } 4297 4298 return 0; 4299 4300 nla_put_failure: 4301 return -EMSGSIZE; 4302 } 4303 4304 static struct rtnl_link_ops macsec_link_ops __read_mostly = { 4305 .kind = "macsec", 4306 .priv_size = sizeof(struct macsec_dev), 4307 .maxtype = IFLA_MACSEC_MAX, 4308 .policy = macsec_rtnl_policy, 4309 .setup = macsec_setup, 4310 .validate = macsec_validate_attr, 4311 .newlink = macsec_newlink, 4312 .changelink = macsec_changelink, 4313 .dellink = macsec_dellink, 4314 .get_size = macsec_get_size, 4315 .fill_info = macsec_fill_info, 4316 .get_link_net = macsec_get_link_net, 4317 }; 4318 4319 static bool is_macsec_master(struct net_device *dev) 4320 { 4321 return rcu_access_pointer(dev->rx_handler) == macsec_handle_frame; 4322 } 4323 4324 static int macsec_notify(struct notifier_block *this, unsigned long event, 4325 void *ptr) 4326 { 4327 struct net_device *real_dev = netdev_notifier_info_to_dev(ptr); 4328 LIST_HEAD(head); 4329 4330 if (!is_macsec_master(real_dev)) 4331 return NOTIFY_DONE; 4332 4333 switch (event) { 4334 case NETDEV_DOWN: 4335 case NETDEV_UP: 4336 case NETDEV_CHANGE: { 4337 struct macsec_dev *m, *n; 4338 struct macsec_rxh_data *rxd; 4339 4340 rxd = macsec_data_rtnl(real_dev); 4341 list_for_each_entry_safe(m, n, &rxd->secys, secys) { 4342 struct net_device *dev = m->secy.netdev; 4343 4344 netif_stacked_transfer_operstate(real_dev, dev); 4345 } 4346 break; 4347 } 4348 case NETDEV_UNREGISTER: { 4349 struct macsec_dev *m, *n; 4350 struct macsec_rxh_data *rxd; 4351 4352 rxd = macsec_data_rtnl(real_dev); 4353 list_for_each_entry_safe(m, n, &rxd->secys, secys) { 4354 macsec_common_dellink(m->secy.netdev, &head); 4355 } 4356 4357 netdev_rx_handler_unregister(real_dev); 4358 kfree(rxd); 4359 4360 unregister_netdevice_many(&head); 4361 break; 4362 } 4363 case NETDEV_CHANGEMTU: { 4364 struct macsec_dev *m; 4365 struct macsec_rxh_data *rxd; 4366 4367 rxd = macsec_data_rtnl(real_dev); 4368 list_for_each_entry(m, &rxd->secys, secys) { 4369 struct net_device *dev = m->secy.netdev; 4370 unsigned int mtu = real_dev->mtu - (m->secy.icv_len + 4371 macsec_extra_len(true)); 4372 4373 if (dev->mtu > mtu) 4374 dev_set_mtu(dev, mtu); 4375 } 4376 } 4377 } 4378 4379 return NOTIFY_OK; 4380 } 4381 4382 static struct notifier_block macsec_notifier = { 4383 .notifier_call = macsec_notify, 4384 }; 4385 4386 static int __init macsec_init(void) 4387 { 4388 int err; 4389 4390 pr_info("MACsec IEEE 802.1AE\n"); 4391 err = register_netdevice_notifier(&macsec_notifier); 4392 if (err) 4393 return err; 4394 4395 err = rtnl_link_register(&macsec_link_ops); 4396 if (err) 4397 goto notifier; 4398 4399 err = genl_register_family(&macsec_fam); 4400 if (err) 4401 goto rtnl; 4402 4403 return 0; 4404 4405 rtnl: 4406 rtnl_link_unregister(&macsec_link_ops); 4407 notifier: 4408 unregister_netdevice_notifier(&macsec_notifier); 4409 return err; 4410 } 4411 4412 static void __exit macsec_exit(void) 4413 { 4414 genl_unregister_family(&macsec_fam); 4415 rtnl_link_unregister(&macsec_link_ops); 4416 unregister_netdevice_notifier(&macsec_notifier); 4417 rcu_barrier(); 4418 } 4419 4420 module_init(macsec_init); 4421 module_exit(macsec_exit); 4422 4423 MODULE_ALIAS_RTNL_LINK("macsec"); 4424 MODULE_ALIAS_GENL_FAMILY("macsec"); 4425 4426 MODULE_DESCRIPTION("MACsec IEEE 802.1AE"); 4427 MODULE_LICENSE("GPL v2"); 4428