1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * drivers/net/macsec.c - MACsec device 4 * 5 * Copyright (c) 2015 Sabrina Dubroca <sd@queasysnail.net> 6 */ 7 8 #include <linux/types.h> 9 #include <linux/skbuff.h> 10 #include <linux/socket.h> 11 #include <linux/module.h> 12 #include <crypto/aead.h> 13 #include <linux/etherdevice.h> 14 #include <linux/netdevice.h> 15 #include <linux/rtnetlink.h> 16 #include <linux/refcount.h> 17 #include <net/genetlink.h> 18 #include <net/sock.h> 19 #include <net/gro_cells.h> 20 #include <net/macsec.h> 21 #include <net/dst_metadata.h> 22 #include <linux/phy.h> 23 #include <linux/byteorder/generic.h> 24 #include <linux/if_arp.h> 25 26 #include <uapi/linux/if_macsec.h> 27 28 /* SecTAG length = macsec_eth_header without the optional SCI */ 29 #define MACSEC_TAG_LEN 6 30 31 struct macsec_eth_header { 32 struct ethhdr eth; 33 /* SecTAG */ 34 u8 tci_an; 35 #if defined(__LITTLE_ENDIAN_BITFIELD) 36 u8 short_length:6, 37 unused:2; 38 #elif defined(__BIG_ENDIAN_BITFIELD) 39 u8 unused:2, 40 short_length:6; 41 #else 42 #error "Please fix <asm/byteorder.h>" 43 #endif 44 __be32 packet_number; 45 u8 secure_channel_id[8]; /* optional */ 46 } __packed; 47 48 /* minimum secure data length deemed "not short", see IEEE 802.1AE-2006 9.7 */ 49 #define MIN_NON_SHORT_LEN 48 50 51 #define GCM_AES_IV_LEN 12 52 53 #define for_each_rxsc(secy, sc) \ 54 for (sc = rcu_dereference_bh(secy->rx_sc); \ 55 sc; \ 56 sc = rcu_dereference_bh(sc->next)) 57 #define for_each_rxsc_rtnl(secy, sc) \ 58 for (sc = rtnl_dereference(secy->rx_sc); \ 59 sc; \ 60 sc = rtnl_dereference(sc->next)) 61 62 #define pn_same_half(pn1, pn2) (!(((pn1) >> 31) ^ ((pn2) >> 31))) 63 64 struct gcm_iv_xpn { 65 union { 66 u8 short_secure_channel_id[4]; 67 ssci_t ssci; 68 }; 69 __be64 pn; 70 } __packed; 71 72 struct gcm_iv { 73 union { 74 u8 secure_channel_id[8]; 75 sci_t sci; 76 }; 77 __be32 pn; 78 }; 79 80 #define MACSEC_VALIDATE_DEFAULT MACSEC_VALIDATE_STRICT 81 82 struct pcpu_secy_stats { 83 struct macsec_dev_stats stats; 84 struct u64_stats_sync syncp; 85 }; 86 87 /** 88 * struct macsec_dev - private data 89 * @secy: SecY config 90 * @real_dev: pointer to underlying netdevice 91 * @dev_tracker: refcount tracker for @real_dev reference 92 * @stats: MACsec device stats 93 * @secys: linked list of SecY's on the underlying device 94 * @gro_cells: pointer to the Generic Receive Offload cell 95 * @offload: status of offloading on the MACsec device 96 */ 97 struct macsec_dev { 98 struct macsec_secy secy; 99 struct net_device *real_dev; 100 netdevice_tracker dev_tracker; 101 struct pcpu_secy_stats __percpu *stats; 102 struct list_head secys; 103 struct gro_cells gro_cells; 104 enum macsec_offload offload; 105 }; 106 107 /** 108 * struct macsec_rxh_data - rx_handler private argument 109 * @secys: linked list of SecY's on this underlying device 110 */ 111 struct macsec_rxh_data { 112 struct list_head secys; 113 }; 114 115 static struct macsec_dev *macsec_priv(const struct net_device *dev) 116 { 117 return (struct macsec_dev *)netdev_priv(dev); 118 } 119 120 static struct macsec_rxh_data *macsec_data_rcu(const struct net_device *dev) 121 { 122 return rcu_dereference_bh(dev->rx_handler_data); 123 } 124 125 static struct macsec_rxh_data *macsec_data_rtnl(const struct net_device *dev) 126 { 127 return rtnl_dereference(dev->rx_handler_data); 128 } 129 130 struct macsec_cb { 131 struct aead_request *req; 132 union { 133 struct macsec_tx_sa *tx_sa; 134 struct macsec_rx_sa *rx_sa; 135 }; 136 u8 assoc_num; 137 bool valid; 138 bool has_sci; 139 }; 140 141 static struct macsec_rx_sa *macsec_rxsa_get(struct macsec_rx_sa __rcu *ptr) 142 { 143 struct macsec_rx_sa *sa = rcu_dereference_bh(ptr); 144 145 if (!sa || !sa->active) 146 return NULL; 147 148 if (!refcount_inc_not_zero(&sa->refcnt)) 149 return NULL; 150 151 return sa; 152 } 153 154 static struct macsec_rx_sa *macsec_active_rxsa_get(struct macsec_rx_sc *rx_sc) 155 { 156 struct macsec_rx_sa *sa = NULL; 157 int an; 158 159 for (an = 0; an < MACSEC_NUM_AN; an++) { 160 sa = macsec_rxsa_get(rx_sc->sa[an]); 161 if (sa) 162 break; 163 } 164 return sa; 165 } 166 167 static void free_rx_sc_rcu(struct rcu_head *head) 168 { 169 struct macsec_rx_sc *rx_sc = container_of(head, struct macsec_rx_sc, rcu_head); 170 171 free_percpu(rx_sc->stats); 172 kfree(rx_sc); 173 } 174 175 static struct macsec_rx_sc *macsec_rxsc_get(struct macsec_rx_sc *sc) 176 { 177 return refcount_inc_not_zero(&sc->refcnt) ? sc : NULL; 178 } 179 180 static void macsec_rxsc_put(struct macsec_rx_sc *sc) 181 { 182 if (refcount_dec_and_test(&sc->refcnt)) 183 call_rcu(&sc->rcu_head, free_rx_sc_rcu); 184 } 185 186 static void free_rxsa(struct rcu_head *head) 187 { 188 struct macsec_rx_sa *sa = container_of(head, struct macsec_rx_sa, rcu); 189 190 crypto_free_aead(sa->key.tfm); 191 free_percpu(sa->stats); 192 kfree(sa); 193 } 194 195 static void macsec_rxsa_put(struct macsec_rx_sa *sa) 196 { 197 if (refcount_dec_and_test(&sa->refcnt)) 198 call_rcu(&sa->rcu, free_rxsa); 199 } 200 201 static struct macsec_tx_sa *macsec_txsa_get(struct macsec_tx_sa __rcu *ptr) 202 { 203 struct macsec_tx_sa *sa = rcu_dereference_bh(ptr); 204 205 if (!sa || !sa->active) 206 return NULL; 207 208 if (!refcount_inc_not_zero(&sa->refcnt)) 209 return NULL; 210 211 return sa; 212 } 213 214 static void free_txsa(struct rcu_head *head) 215 { 216 struct macsec_tx_sa *sa = container_of(head, struct macsec_tx_sa, rcu); 217 218 crypto_free_aead(sa->key.tfm); 219 free_percpu(sa->stats); 220 kfree(sa); 221 } 222 223 static void macsec_txsa_put(struct macsec_tx_sa *sa) 224 { 225 if (refcount_dec_and_test(&sa->refcnt)) 226 call_rcu(&sa->rcu, free_txsa); 227 } 228 229 static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb) 230 { 231 BUILD_BUG_ON(sizeof(struct macsec_cb) > sizeof(skb->cb)); 232 return (struct macsec_cb *)skb->cb; 233 } 234 235 #define MACSEC_PORT_SCB (0x0000) 236 #define MACSEC_UNDEF_SCI ((__force sci_t)0xffffffffffffffffULL) 237 #define MACSEC_UNDEF_SSCI ((__force ssci_t)0xffffffff) 238 239 #define MACSEC_GCM_AES_128_SAK_LEN 16 240 #define MACSEC_GCM_AES_256_SAK_LEN 32 241 242 #define DEFAULT_SAK_LEN MACSEC_GCM_AES_128_SAK_LEN 243 #define DEFAULT_XPN false 244 #define DEFAULT_SEND_SCI true 245 #define DEFAULT_ENCRYPT false 246 #define DEFAULT_ENCODING_SA 0 247 #define MACSEC_XPN_MAX_REPLAY_WINDOW (((1 << 30) - 1)) 248 249 static sci_t make_sci(const u8 *addr, __be16 port) 250 { 251 sci_t sci; 252 253 memcpy(&sci, addr, ETH_ALEN); 254 memcpy(((char *)&sci) + ETH_ALEN, &port, sizeof(port)); 255 256 return sci; 257 } 258 259 static sci_t macsec_frame_sci(struct macsec_eth_header *hdr, bool sci_present) 260 { 261 sci_t sci; 262 263 if (sci_present) 264 memcpy(&sci, hdr->secure_channel_id, 265 sizeof(hdr->secure_channel_id)); 266 else 267 sci = make_sci(hdr->eth.h_source, MACSEC_PORT_ES); 268 269 return sci; 270 } 271 272 static unsigned int macsec_sectag_len(bool sci_present) 273 { 274 return MACSEC_TAG_LEN + (sci_present ? MACSEC_SCI_LEN : 0); 275 } 276 277 static unsigned int macsec_hdr_len(bool sci_present) 278 { 279 return macsec_sectag_len(sci_present) + ETH_HLEN; 280 } 281 282 static unsigned int macsec_extra_len(bool sci_present) 283 { 284 return macsec_sectag_len(sci_present) + sizeof(__be16); 285 } 286 287 /* Fill SecTAG according to IEEE 802.1AE-2006 10.5.3 */ 288 static void macsec_fill_sectag(struct macsec_eth_header *h, 289 const struct macsec_secy *secy, u32 pn, 290 bool sci_present) 291 { 292 const struct macsec_tx_sc *tx_sc = &secy->tx_sc; 293 294 memset(&h->tci_an, 0, macsec_sectag_len(sci_present)); 295 h->eth.h_proto = htons(ETH_P_MACSEC); 296 297 if (sci_present) { 298 h->tci_an |= MACSEC_TCI_SC; 299 memcpy(&h->secure_channel_id, &secy->sci, 300 sizeof(h->secure_channel_id)); 301 } else { 302 if (tx_sc->end_station) 303 h->tci_an |= MACSEC_TCI_ES; 304 if (tx_sc->scb) 305 h->tci_an |= MACSEC_TCI_SCB; 306 } 307 308 h->packet_number = htonl(pn); 309 310 /* with GCM, C/E clear for !encrypt, both set for encrypt */ 311 if (tx_sc->encrypt) 312 h->tci_an |= MACSEC_TCI_CONFID; 313 else if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN) 314 h->tci_an |= MACSEC_TCI_C; 315 316 h->tci_an |= tx_sc->encoding_sa; 317 } 318 319 static void macsec_set_shortlen(struct macsec_eth_header *h, size_t data_len) 320 { 321 if (data_len < MIN_NON_SHORT_LEN) 322 h->short_length = data_len; 323 } 324 325 /* Checks if a MACsec interface is being offloaded to an hardware engine */ 326 static bool macsec_is_offloaded(struct macsec_dev *macsec) 327 { 328 if (macsec->offload == MACSEC_OFFLOAD_MAC || 329 macsec->offload == MACSEC_OFFLOAD_PHY) 330 return true; 331 332 return false; 333 } 334 335 /* Checks if underlying layers implement MACsec offloading functions. */ 336 static bool macsec_check_offload(enum macsec_offload offload, 337 struct macsec_dev *macsec) 338 { 339 if (!macsec || !macsec->real_dev) 340 return false; 341 342 if (offload == MACSEC_OFFLOAD_PHY) 343 return macsec->real_dev->phydev && 344 macsec->real_dev->phydev->macsec_ops; 345 else if (offload == MACSEC_OFFLOAD_MAC) 346 return macsec->real_dev->features & NETIF_F_HW_MACSEC && 347 macsec->real_dev->macsec_ops; 348 349 return false; 350 } 351 352 static const struct macsec_ops *__macsec_get_ops(enum macsec_offload offload, 353 struct macsec_dev *macsec, 354 struct macsec_context *ctx) 355 { 356 if (ctx) { 357 memset(ctx, 0, sizeof(*ctx)); 358 ctx->offload = offload; 359 360 if (offload == MACSEC_OFFLOAD_PHY) 361 ctx->phydev = macsec->real_dev->phydev; 362 else if (offload == MACSEC_OFFLOAD_MAC) 363 ctx->netdev = macsec->real_dev; 364 } 365 366 if (offload == MACSEC_OFFLOAD_PHY) 367 return macsec->real_dev->phydev->macsec_ops; 368 else 369 return macsec->real_dev->macsec_ops; 370 } 371 372 /* Returns a pointer to the MACsec ops struct if any and updates the MACsec 373 * context device reference if provided. 374 */ 375 static const struct macsec_ops *macsec_get_ops(struct macsec_dev *macsec, 376 struct macsec_context *ctx) 377 { 378 if (!macsec_check_offload(macsec->offload, macsec)) 379 return NULL; 380 381 return __macsec_get_ops(macsec->offload, macsec, ctx); 382 } 383 384 /* validate MACsec packet according to IEEE 802.1AE-2018 9.12 */ 385 static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len, bool xpn) 386 { 387 struct macsec_eth_header *h = (struct macsec_eth_header *)skb->data; 388 int len = skb->len - 2 * ETH_ALEN; 389 int extra_len = macsec_extra_len(!!(h->tci_an & MACSEC_TCI_SC)) + icv_len; 390 391 /* a) It comprises at least 17 octets */ 392 if (skb->len <= 16) 393 return false; 394 395 /* b) MACsec EtherType: already checked */ 396 397 /* c) V bit is clear */ 398 if (h->tci_an & MACSEC_TCI_VERSION) 399 return false; 400 401 /* d) ES or SCB => !SC */ 402 if ((h->tci_an & MACSEC_TCI_ES || h->tci_an & MACSEC_TCI_SCB) && 403 (h->tci_an & MACSEC_TCI_SC)) 404 return false; 405 406 /* e) Bits 7 and 8 of octet 4 of the SecTAG are clear */ 407 if (h->unused) 408 return false; 409 410 /* rx.pn != 0 if not XPN (figure 10-5 with 802.11AEbw-2013 amendment) */ 411 if (!h->packet_number && !xpn) 412 return false; 413 414 /* length check, f) g) h) i) */ 415 if (h->short_length) 416 return len == extra_len + h->short_length; 417 return len >= extra_len + MIN_NON_SHORT_LEN; 418 } 419 420 #define MACSEC_NEEDED_HEADROOM (macsec_extra_len(true)) 421 #define MACSEC_NEEDED_TAILROOM MACSEC_STD_ICV_LEN 422 423 static void macsec_fill_iv_xpn(unsigned char *iv, ssci_t ssci, u64 pn, 424 salt_t salt) 425 { 426 struct gcm_iv_xpn *gcm_iv = (struct gcm_iv_xpn *)iv; 427 428 gcm_iv->ssci = ssci ^ salt.ssci; 429 gcm_iv->pn = cpu_to_be64(pn) ^ salt.pn; 430 } 431 432 static void macsec_fill_iv(unsigned char *iv, sci_t sci, u32 pn) 433 { 434 struct gcm_iv *gcm_iv = (struct gcm_iv *)iv; 435 436 gcm_iv->sci = sci; 437 gcm_iv->pn = htonl(pn); 438 } 439 440 static struct macsec_eth_header *macsec_ethhdr(struct sk_buff *skb) 441 { 442 return (struct macsec_eth_header *)skb_mac_header(skb); 443 } 444 445 static void __macsec_pn_wrapped(struct macsec_secy *secy, 446 struct macsec_tx_sa *tx_sa) 447 { 448 pr_debug("PN wrapped, transitioning to !oper\n"); 449 tx_sa->active = false; 450 if (secy->protect_frames) 451 secy->operational = false; 452 } 453 454 void macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa) 455 { 456 spin_lock_bh(&tx_sa->lock); 457 __macsec_pn_wrapped(secy, tx_sa); 458 spin_unlock_bh(&tx_sa->lock); 459 } 460 EXPORT_SYMBOL_GPL(macsec_pn_wrapped); 461 462 static pn_t tx_sa_update_pn(struct macsec_tx_sa *tx_sa, 463 struct macsec_secy *secy) 464 { 465 pn_t pn; 466 467 spin_lock_bh(&tx_sa->lock); 468 469 pn = tx_sa->next_pn_halves; 470 if (secy->xpn) 471 tx_sa->next_pn++; 472 else 473 tx_sa->next_pn_halves.lower++; 474 475 if (tx_sa->next_pn == 0) 476 __macsec_pn_wrapped(secy, tx_sa); 477 spin_unlock_bh(&tx_sa->lock); 478 479 return pn; 480 } 481 482 static void macsec_encrypt_finish(struct sk_buff *skb, struct net_device *dev) 483 { 484 struct macsec_dev *macsec = netdev_priv(dev); 485 486 skb->dev = macsec->real_dev; 487 skb_reset_mac_header(skb); 488 skb->protocol = eth_hdr(skb)->h_proto; 489 } 490 491 static unsigned int macsec_msdu_len(struct sk_buff *skb) 492 { 493 struct macsec_dev *macsec = macsec_priv(skb->dev); 494 struct macsec_secy *secy = &macsec->secy; 495 bool sci_present = macsec_skb_cb(skb)->has_sci; 496 497 return skb->len - macsec_hdr_len(sci_present) - secy->icv_len; 498 } 499 500 static void macsec_count_tx(struct sk_buff *skb, struct macsec_tx_sc *tx_sc, 501 struct macsec_tx_sa *tx_sa) 502 { 503 unsigned int msdu_len = macsec_msdu_len(skb); 504 struct pcpu_tx_sc_stats *txsc_stats = this_cpu_ptr(tx_sc->stats); 505 506 u64_stats_update_begin(&txsc_stats->syncp); 507 if (tx_sc->encrypt) { 508 txsc_stats->stats.OutOctetsEncrypted += msdu_len; 509 txsc_stats->stats.OutPktsEncrypted++; 510 this_cpu_inc(tx_sa->stats->OutPktsEncrypted); 511 } else { 512 txsc_stats->stats.OutOctetsProtected += msdu_len; 513 txsc_stats->stats.OutPktsProtected++; 514 this_cpu_inc(tx_sa->stats->OutPktsProtected); 515 } 516 u64_stats_update_end(&txsc_stats->syncp); 517 } 518 519 static void count_tx(struct net_device *dev, int ret, int len) 520 { 521 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) 522 dev_sw_netstats_tx_add(dev, 1, len); 523 } 524 525 static void macsec_encrypt_done(void *data, int err) 526 { 527 struct sk_buff *skb = data; 528 struct net_device *dev = skb->dev; 529 struct macsec_dev *macsec = macsec_priv(dev); 530 struct macsec_tx_sa *sa = macsec_skb_cb(skb)->tx_sa; 531 int len, ret; 532 533 aead_request_free(macsec_skb_cb(skb)->req); 534 535 rcu_read_lock_bh(); 536 macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa); 537 /* packet is encrypted/protected so tx_bytes must be calculated */ 538 len = macsec_msdu_len(skb) + 2 * ETH_ALEN; 539 macsec_encrypt_finish(skb, dev); 540 ret = dev_queue_xmit(skb); 541 count_tx(dev, ret, len); 542 rcu_read_unlock_bh(); 543 544 macsec_txsa_put(sa); 545 dev_put(dev); 546 } 547 548 static struct aead_request *macsec_alloc_req(struct crypto_aead *tfm, 549 unsigned char **iv, 550 struct scatterlist **sg, 551 int num_frags) 552 { 553 size_t size, iv_offset, sg_offset; 554 struct aead_request *req; 555 void *tmp; 556 557 size = sizeof(struct aead_request) + crypto_aead_reqsize(tfm); 558 iv_offset = size; 559 size += GCM_AES_IV_LEN; 560 561 size = ALIGN(size, __alignof__(struct scatterlist)); 562 sg_offset = size; 563 size += sizeof(struct scatterlist) * num_frags; 564 565 tmp = kmalloc(size, GFP_ATOMIC); 566 if (!tmp) 567 return NULL; 568 569 *iv = (unsigned char *)(tmp + iv_offset); 570 *sg = (struct scatterlist *)(tmp + sg_offset); 571 req = tmp; 572 573 aead_request_set_tfm(req, tfm); 574 575 return req; 576 } 577 578 static struct sk_buff *macsec_encrypt(struct sk_buff *skb, 579 struct net_device *dev) 580 { 581 int ret; 582 struct scatterlist *sg; 583 struct sk_buff *trailer; 584 unsigned char *iv; 585 struct ethhdr *eth; 586 struct macsec_eth_header *hh; 587 size_t unprotected_len; 588 struct aead_request *req; 589 struct macsec_secy *secy; 590 struct macsec_tx_sc *tx_sc; 591 struct macsec_tx_sa *tx_sa; 592 struct macsec_dev *macsec = macsec_priv(dev); 593 bool sci_present; 594 pn_t pn; 595 596 secy = &macsec->secy; 597 tx_sc = &secy->tx_sc; 598 599 /* 10.5.1 TX SA assignment */ 600 tx_sa = macsec_txsa_get(tx_sc->sa[tx_sc->encoding_sa]); 601 if (!tx_sa) { 602 secy->operational = false; 603 kfree_skb(skb); 604 return ERR_PTR(-EINVAL); 605 } 606 607 if (unlikely(skb_headroom(skb) < MACSEC_NEEDED_HEADROOM || 608 skb_tailroom(skb) < MACSEC_NEEDED_TAILROOM)) { 609 struct sk_buff *nskb = skb_copy_expand(skb, 610 MACSEC_NEEDED_HEADROOM, 611 MACSEC_NEEDED_TAILROOM, 612 GFP_ATOMIC); 613 if (likely(nskb)) { 614 consume_skb(skb); 615 skb = nskb; 616 } else { 617 macsec_txsa_put(tx_sa); 618 kfree_skb(skb); 619 return ERR_PTR(-ENOMEM); 620 } 621 } else { 622 skb = skb_unshare(skb, GFP_ATOMIC); 623 if (!skb) { 624 macsec_txsa_put(tx_sa); 625 return ERR_PTR(-ENOMEM); 626 } 627 } 628 629 unprotected_len = skb->len; 630 eth = eth_hdr(skb); 631 sci_present = macsec_send_sci(secy); 632 hh = skb_push(skb, macsec_extra_len(sci_present)); 633 memmove(hh, eth, 2 * ETH_ALEN); 634 635 pn = tx_sa_update_pn(tx_sa, secy); 636 if (pn.full64 == 0) { 637 macsec_txsa_put(tx_sa); 638 kfree_skb(skb); 639 return ERR_PTR(-ENOLINK); 640 } 641 macsec_fill_sectag(hh, secy, pn.lower, sci_present); 642 macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN); 643 644 skb_put(skb, secy->icv_len); 645 646 if (skb->len - ETH_HLEN > macsec_priv(dev)->real_dev->mtu) { 647 struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats); 648 649 u64_stats_update_begin(&secy_stats->syncp); 650 secy_stats->stats.OutPktsTooLong++; 651 u64_stats_update_end(&secy_stats->syncp); 652 653 macsec_txsa_put(tx_sa); 654 kfree_skb(skb); 655 return ERR_PTR(-EINVAL); 656 } 657 658 ret = skb_cow_data(skb, 0, &trailer); 659 if (unlikely(ret < 0)) { 660 macsec_txsa_put(tx_sa); 661 kfree_skb(skb); 662 return ERR_PTR(ret); 663 } 664 665 req = macsec_alloc_req(tx_sa->key.tfm, &iv, &sg, ret); 666 if (!req) { 667 macsec_txsa_put(tx_sa); 668 kfree_skb(skb); 669 return ERR_PTR(-ENOMEM); 670 } 671 672 if (secy->xpn) 673 macsec_fill_iv_xpn(iv, tx_sa->ssci, pn.full64, tx_sa->key.salt); 674 else 675 macsec_fill_iv(iv, secy->sci, pn.lower); 676 677 sg_init_table(sg, ret); 678 ret = skb_to_sgvec(skb, sg, 0, skb->len); 679 if (unlikely(ret < 0)) { 680 aead_request_free(req); 681 macsec_txsa_put(tx_sa); 682 kfree_skb(skb); 683 return ERR_PTR(ret); 684 } 685 686 if (tx_sc->encrypt) { 687 int len = skb->len - macsec_hdr_len(sci_present) - 688 secy->icv_len; 689 aead_request_set_crypt(req, sg, sg, len, iv); 690 aead_request_set_ad(req, macsec_hdr_len(sci_present)); 691 } else { 692 aead_request_set_crypt(req, sg, sg, 0, iv); 693 aead_request_set_ad(req, skb->len - secy->icv_len); 694 } 695 696 macsec_skb_cb(skb)->req = req; 697 macsec_skb_cb(skb)->tx_sa = tx_sa; 698 macsec_skb_cb(skb)->has_sci = sci_present; 699 aead_request_set_callback(req, 0, macsec_encrypt_done, skb); 700 701 dev_hold(skb->dev); 702 ret = crypto_aead_encrypt(req); 703 if (ret == -EINPROGRESS) { 704 return ERR_PTR(ret); 705 } else if (ret != 0) { 706 dev_put(skb->dev); 707 kfree_skb(skb); 708 aead_request_free(req); 709 macsec_txsa_put(tx_sa); 710 return ERR_PTR(-EINVAL); 711 } 712 713 dev_put(skb->dev); 714 aead_request_free(req); 715 macsec_txsa_put(tx_sa); 716 717 return skb; 718 } 719 720 static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u32 pn) 721 { 722 struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa; 723 struct pcpu_rx_sc_stats *rxsc_stats = this_cpu_ptr(rx_sa->sc->stats); 724 struct macsec_eth_header *hdr = macsec_ethhdr(skb); 725 u32 lowest_pn = 0; 726 727 spin_lock(&rx_sa->lock); 728 if (rx_sa->next_pn_halves.lower >= secy->replay_window) 729 lowest_pn = rx_sa->next_pn_halves.lower - secy->replay_window; 730 731 /* Now perform replay protection check again 732 * (see IEEE 802.1AE-2006 figure 10-5) 733 */ 734 if (secy->replay_protect && pn < lowest_pn && 735 (!secy->xpn || pn_same_half(pn, lowest_pn))) { 736 spin_unlock(&rx_sa->lock); 737 u64_stats_update_begin(&rxsc_stats->syncp); 738 rxsc_stats->stats.InPktsLate++; 739 u64_stats_update_end(&rxsc_stats->syncp); 740 DEV_STATS_INC(secy->netdev, rx_dropped); 741 return false; 742 } 743 744 if (secy->validate_frames != MACSEC_VALIDATE_DISABLED) { 745 unsigned int msdu_len = macsec_msdu_len(skb); 746 u64_stats_update_begin(&rxsc_stats->syncp); 747 if (hdr->tci_an & MACSEC_TCI_E) 748 rxsc_stats->stats.InOctetsDecrypted += msdu_len; 749 else 750 rxsc_stats->stats.InOctetsValidated += msdu_len; 751 u64_stats_update_end(&rxsc_stats->syncp); 752 } 753 754 if (!macsec_skb_cb(skb)->valid) { 755 spin_unlock(&rx_sa->lock); 756 757 /* 10.6.5 */ 758 if (hdr->tci_an & MACSEC_TCI_C || 759 secy->validate_frames == MACSEC_VALIDATE_STRICT) { 760 u64_stats_update_begin(&rxsc_stats->syncp); 761 rxsc_stats->stats.InPktsNotValid++; 762 u64_stats_update_end(&rxsc_stats->syncp); 763 this_cpu_inc(rx_sa->stats->InPktsNotValid); 764 DEV_STATS_INC(secy->netdev, rx_errors); 765 return false; 766 } 767 768 u64_stats_update_begin(&rxsc_stats->syncp); 769 if (secy->validate_frames == MACSEC_VALIDATE_CHECK) { 770 rxsc_stats->stats.InPktsInvalid++; 771 this_cpu_inc(rx_sa->stats->InPktsInvalid); 772 } else if (pn < lowest_pn) { 773 rxsc_stats->stats.InPktsDelayed++; 774 } else { 775 rxsc_stats->stats.InPktsUnchecked++; 776 } 777 u64_stats_update_end(&rxsc_stats->syncp); 778 } else { 779 u64_stats_update_begin(&rxsc_stats->syncp); 780 if (pn < lowest_pn) { 781 rxsc_stats->stats.InPktsDelayed++; 782 } else { 783 rxsc_stats->stats.InPktsOK++; 784 this_cpu_inc(rx_sa->stats->InPktsOK); 785 } 786 u64_stats_update_end(&rxsc_stats->syncp); 787 788 // Instead of "pn >=" - to support pn overflow in xpn 789 if (pn + 1 > rx_sa->next_pn_halves.lower) { 790 rx_sa->next_pn_halves.lower = pn + 1; 791 } else if (secy->xpn && 792 !pn_same_half(pn, rx_sa->next_pn_halves.lower)) { 793 rx_sa->next_pn_halves.upper++; 794 rx_sa->next_pn_halves.lower = pn + 1; 795 } 796 797 spin_unlock(&rx_sa->lock); 798 } 799 800 return true; 801 } 802 803 static void macsec_reset_skb(struct sk_buff *skb, struct net_device *dev) 804 { 805 skb->pkt_type = PACKET_HOST; 806 skb->protocol = eth_type_trans(skb, dev); 807 808 skb_reset_network_header(skb); 809 if (!skb_transport_header_was_set(skb)) 810 skb_reset_transport_header(skb); 811 skb_reset_mac_len(skb); 812 } 813 814 static void macsec_finalize_skb(struct sk_buff *skb, u8 icv_len, u8 hdr_len) 815 { 816 skb->ip_summed = CHECKSUM_NONE; 817 memmove(skb->data + hdr_len, skb->data, 2 * ETH_ALEN); 818 skb_pull(skb, hdr_len); 819 pskb_trim_unique(skb, skb->len - icv_len); 820 } 821 822 static void count_rx(struct net_device *dev, int len) 823 { 824 dev_sw_netstats_rx_add(dev, len); 825 } 826 827 static void macsec_decrypt_done(void *data, int err) 828 { 829 struct sk_buff *skb = data; 830 struct net_device *dev = skb->dev; 831 struct macsec_dev *macsec = macsec_priv(dev); 832 struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa; 833 struct macsec_rx_sc *rx_sc = rx_sa->sc; 834 int len; 835 u32 pn; 836 837 aead_request_free(macsec_skb_cb(skb)->req); 838 839 if (!err) 840 macsec_skb_cb(skb)->valid = true; 841 842 rcu_read_lock_bh(); 843 pn = ntohl(macsec_ethhdr(skb)->packet_number); 844 if (!macsec_post_decrypt(skb, &macsec->secy, pn)) { 845 rcu_read_unlock_bh(); 846 kfree_skb(skb); 847 goto out; 848 } 849 850 macsec_finalize_skb(skb, macsec->secy.icv_len, 851 macsec_extra_len(macsec_skb_cb(skb)->has_sci)); 852 len = skb->len; 853 macsec_reset_skb(skb, macsec->secy.netdev); 854 855 if (gro_cells_receive(&macsec->gro_cells, skb) == NET_RX_SUCCESS) 856 count_rx(dev, len); 857 858 rcu_read_unlock_bh(); 859 860 out: 861 macsec_rxsa_put(rx_sa); 862 macsec_rxsc_put(rx_sc); 863 dev_put(dev); 864 } 865 866 static struct sk_buff *macsec_decrypt(struct sk_buff *skb, 867 struct net_device *dev, 868 struct macsec_rx_sa *rx_sa, 869 sci_t sci, 870 struct macsec_secy *secy) 871 { 872 int ret; 873 struct scatterlist *sg; 874 struct sk_buff *trailer; 875 unsigned char *iv; 876 struct aead_request *req; 877 struct macsec_eth_header *hdr; 878 u32 hdr_pn; 879 u16 icv_len = secy->icv_len; 880 881 macsec_skb_cb(skb)->valid = false; 882 skb = skb_share_check(skb, GFP_ATOMIC); 883 if (!skb) 884 return ERR_PTR(-ENOMEM); 885 886 ret = skb_cow_data(skb, 0, &trailer); 887 if (unlikely(ret < 0)) { 888 kfree_skb(skb); 889 return ERR_PTR(ret); 890 } 891 req = macsec_alloc_req(rx_sa->key.tfm, &iv, &sg, ret); 892 if (!req) { 893 kfree_skb(skb); 894 return ERR_PTR(-ENOMEM); 895 } 896 897 hdr = (struct macsec_eth_header *)skb->data; 898 hdr_pn = ntohl(hdr->packet_number); 899 900 if (secy->xpn) { 901 pn_t recovered_pn = rx_sa->next_pn_halves; 902 903 recovered_pn.lower = hdr_pn; 904 if (hdr_pn < rx_sa->next_pn_halves.lower && 905 !pn_same_half(hdr_pn, rx_sa->next_pn_halves.lower)) 906 recovered_pn.upper++; 907 908 macsec_fill_iv_xpn(iv, rx_sa->ssci, recovered_pn.full64, 909 rx_sa->key.salt); 910 } else { 911 macsec_fill_iv(iv, sci, hdr_pn); 912 } 913 914 sg_init_table(sg, ret); 915 ret = skb_to_sgvec(skb, sg, 0, skb->len); 916 if (unlikely(ret < 0)) { 917 aead_request_free(req); 918 kfree_skb(skb); 919 return ERR_PTR(ret); 920 } 921 922 if (hdr->tci_an & MACSEC_TCI_E) { 923 /* confidentiality: ethernet + macsec header 924 * authenticated, encrypted payload 925 */ 926 int len = skb->len - macsec_hdr_len(macsec_skb_cb(skb)->has_sci); 927 928 aead_request_set_crypt(req, sg, sg, len, iv); 929 aead_request_set_ad(req, macsec_hdr_len(macsec_skb_cb(skb)->has_sci)); 930 skb = skb_unshare(skb, GFP_ATOMIC); 931 if (!skb) { 932 aead_request_free(req); 933 return ERR_PTR(-ENOMEM); 934 } 935 } else { 936 /* integrity only: all headers + data authenticated */ 937 aead_request_set_crypt(req, sg, sg, icv_len, iv); 938 aead_request_set_ad(req, skb->len - icv_len); 939 } 940 941 macsec_skb_cb(skb)->req = req; 942 skb->dev = dev; 943 aead_request_set_callback(req, 0, macsec_decrypt_done, skb); 944 945 dev_hold(dev); 946 ret = crypto_aead_decrypt(req); 947 if (ret == -EINPROGRESS) { 948 return ERR_PTR(ret); 949 } else if (ret != 0) { 950 /* decryption/authentication failed 951 * 10.6 if validateFrames is disabled, deliver anyway 952 */ 953 if (ret != -EBADMSG) { 954 kfree_skb(skb); 955 skb = ERR_PTR(ret); 956 } 957 } else { 958 macsec_skb_cb(skb)->valid = true; 959 } 960 dev_put(dev); 961 962 aead_request_free(req); 963 964 return skb; 965 } 966 967 static struct macsec_rx_sc *find_rx_sc(struct macsec_secy *secy, sci_t sci) 968 { 969 struct macsec_rx_sc *rx_sc; 970 971 for_each_rxsc(secy, rx_sc) { 972 if (rx_sc->sci == sci) 973 return rx_sc; 974 } 975 976 return NULL; 977 } 978 979 static struct macsec_rx_sc *find_rx_sc_rtnl(struct macsec_secy *secy, sci_t sci) 980 { 981 struct macsec_rx_sc *rx_sc; 982 983 for_each_rxsc_rtnl(secy, rx_sc) { 984 if (rx_sc->sci == sci) 985 return rx_sc; 986 } 987 988 return NULL; 989 } 990 991 static enum rx_handler_result handle_not_macsec(struct sk_buff *skb) 992 { 993 /* Deliver to the uncontrolled port by default */ 994 enum rx_handler_result ret = RX_HANDLER_PASS; 995 struct ethhdr *hdr = eth_hdr(skb); 996 struct metadata_dst *md_dst; 997 struct macsec_rxh_data *rxd; 998 struct macsec_dev *macsec; 999 1000 rcu_read_lock(); 1001 rxd = macsec_data_rcu(skb->dev); 1002 md_dst = skb_metadata_dst(skb); 1003 1004 list_for_each_entry_rcu(macsec, &rxd->secys, secys) { 1005 struct sk_buff *nskb; 1006 struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats); 1007 struct net_device *ndev = macsec->secy.netdev; 1008 1009 /* If h/w offloading is enabled, HW decodes frames and strips 1010 * the SecTAG, so we have to deduce which port to deliver to. 1011 */ 1012 if (macsec_is_offloaded(macsec) && netif_running(ndev)) { 1013 struct macsec_rx_sc *rx_sc = NULL; 1014 1015 if (md_dst && md_dst->type == METADATA_MACSEC) 1016 rx_sc = find_rx_sc(&macsec->secy, md_dst->u.macsec_info.sci); 1017 1018 if (md_dst && md_dst->type == METADATA_MACSEC && !rx_sc) 1019 continue; 1020 1021 if (ether_addr_equal_64bits(hdr->h_dest, 1022 ndev->dev_addr)) { 1023 /* exact match, divert skb to this port */ 1024 skb->dev = ndev; 1025 skb->pkt_type = PACKET_HOST; 1026 ret = RX_HANDLER_ANOTHER; 1027 goto out; 1028 } else if (is_multicast_ether_addr_64bits( 1029 hdr->h_dest)) { 1030 /* multicast frame, deliver on this port too */ 1031 nskb = skb_clone(skb, GFP_ATOMIC); 1032 if (!nskb) 1033 break; 1034 1035 nskb->dev = ndev; 1036 if (ether_addr_equal_64bits(hdr->h_dest, 1037 ndev->broadcast)) 1038 nskb->pkt_type = PACKET_BROADCAST; 1039 else 1040 nskb->pkt_type = PACKET_MULTICAST; 1041 1042 __netif_rx(nskb); 1043 } else if (rx_sc || ndev->flags & IFF_PROMISC) { 1044 skb->dev = ndev; 1045 skb->pkt_type = PACKET_HOST; 1046 ret = RX_HANDLER_ANOTHER; 1047 goto out; 1048 } 1049 1050 continue; 1051 } 1052 1053 /* 10.6 If the management control validateFrames is not 1054 * Strict, frames without a SecTAG are received, counted, and 1055 * delivered to the Controlled Port 1056 */ 1057 if (macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) { 1058 u64_stats_update_begin(&secy_stats->syncp); 1059 secy_stats->stats.InPktsNoTag++; 1060 u64_stats_update_end(&secy_stats->syncp); 1061 DEV_STATS_INC(macsec->secy.netdev, rx_dropped); 1062 continue; 1063 } 1064 1065 /* deliver on this port */ 1066 nskb = skb_clone(skb, GFP_ATOMIC); 1067 if (!nskb) 1068 break; 1069 1070 nskb->dev = ndev; 1071 1072 if (__netif_rx(nskb) == NET_RX_SUCCESS) { 1073 u64_stats_update_begin(&secy_stats->syncp); 1074 secy_stats->stats.InPktsUntagged++; 1075 u64_stats_update_end(&secy_stats->syncp); 1076 } 1077 } 1078 1079 out: 1080 rcu_read_unlock(); 1081 return ret; 1082 } 1083 1084 static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) 1085 { 1086 struct sk_buff *skb = *pskb; 1087 struct net_device *dev = skb->dev; 1088 struct macsec_eth_header *hdr; 1089 struct macsec_secy *secy = NULL; 1090 struct macsec_rx_sc *rx_sc; 1091 struct macsec_rx_sa *rx_sa; 1092 struct macsec_rxh_data *rxd; 1093 struct macsec_dev *macsec; 1094 unsigned int len; 1095 sci_t sci; 1096 u32 hdr_pn; 1097 bool cbit; 1098 struct pcpu_rx_sc_stats *rxsc_stats; 1099 struct pcpu_secy_stats *secy_stats; 1100 bool pulled_sci; 1101 int ret; 1102 1103 if (skb_headroom(skb) < ETH_HLEN) 1104 goto drop_direct; 1105 1106 hdr = macsec_ethhdr(skb); 1107 if (hdr->eth.h_proto != htons(ETH_P_MACSEC)) 1108 return handle_not_macsec(skb); 1109 1110 skb = skb_unshare(skb, GFP_ATOMIC); 1111 *pskb = skb; 1112 if (!skb) 1113 return RX_HANDLER_CONSUMED; 1114 1115 pulled_sci = pskb_may_pull(skb, macsec_extra_len(true)); 1116 if (!pulled_sci) { 1117 if (!pskb_may_pull(skb, macsec_extra_len(false))) 1118 goto drop_direct; 1119 } 1120 1121 hdr = macsec_ethhdr(skb); 1122 1123 /* Frames with a SecTAG that has the TCI E bit set but the C 1124 * bit clear are discarded, as this reserved encoding is used 1125 * to identify frames with a SecTAG that are not to be 1126 * delivered to the Controlled Port. 1127 */ 1128 if ((hdr->tci_an & (MACSEC_TCI_C | MACSEC_TCI_E)) == MACSEC_TCI_E) 1129 return RX_HANDLER_PASS; 1130 1131 /* now, pull the extra length */ 1132 if (hdr->tci_an & MACSEC_TCI_SC) { 1133 if (!pulled_sci) 1134 goto drop_direct; 1135 } 1136 1137 /* ethernet header is part of crypto processing */ 1138 skb_push(skb, ETH_HLEN); 1139 1140 macsec_skb_cb(skb)->has_sci = !!(hdr->tci_an & MACSEC_TCI_SC); 1141 macsec_skb_cb(skb)->assoc_num = hdr->tci_an & MACSEC_AN_MASK; 1142 sci = macsec_frame_sci(hdr, macsec_skb_cb(skb)->has_sci); 1143 1144 rcu_read_lock(); 1145 rxd = macsec_data_rcu(skb->dev); 1146 1147 list_for_each_entry_rcu(macsec, &rxd->secys, secys) { 1148 struct macsec_rx_sc *sc = find_rx_sc(&macsec->secy, sci); 1149 1150 sc = sc ? macsec_rxsc_get(sc) : NULL; 1151 1152 if (sc) { 1153 secy = &macsec->secy; 1154 rx_sc = sc; 1155 break; 1156 } 1157 } 1158 1159 if (!secy) 1160 goto nosci; 1161 1162 dev = secy->netdev; 1163 macsec = macsec_priv(dev); 1164 secy_stats = this_cpu_ptr(macsec->stats); 1165 rxsc_stats = this_cpu_ptr(rx_sc->stats); 1166 1167 if (!macsec_validate_skb(skb, secy->icv_len, secy->xpn)) { 1168 u64_stats_update_begin(&secy_stats->syncp); 1169 secy_stats->stats.InPktsBadTag++; 1170 u64_stats_update_end(&secy_stats->syncp); 1171 DEV_STATS_INC(secy->netdev, rx_errors); 1172 goto drop_nosa; 1173 } 1174 1175 rx_sa = macsec_rxsa_get(rx_sc->sa[macsec_skb_cb(skb)->assoc_num]); 1176 if (!rx_sa) { 1177 /* 10.6.1 if the SA is not in use */ 1178 1179 /* If validateFrames is Strict or the C bit in the 1180 * SecTAG is set, discard 1181 */ 1182 struct macsec_rx_sa *active_rx_sa = macsec_active_rxsa_get(rx_sc); 1183 if (hdr->tci_an & MACSEC_TCI_C || 1184 secy->validate_frames == MACSEC_VALIDATE_STRICT) { 1185 u64_stats_update_begin(&rxsc_stats->syncp); 1186 rxsc_stats->stats.InPktsNotUsingSA++; 1187 u64_stats_update_end(&rxsc_stats->syncp); 1188 DEV_STATS_INC(secy->netdev, rx_errors); 1189 if (active_rx_sa) 1190 this_cpu_inc(active_rx_sa->stats->InPktsNotUsingSA); 1191 goto drop_nosa; 1192 } 1193 1194 /* not Strict, the frame (with the SecTAG and ICV 1195 * removed) is delivered to the Controlled Port. 1196 */ 1197 u64_stats_update_begin(&rxsc_stats->syncp); 1198 rxsc_stats->stats.InPktsUnusedSA++; 1199 u64_stats_update_end(&rxsc_stats->syncp); 1200 if (active_rx_sa) 1201 this_cpu_inc(active_rx_sa->stats->InPktsUnusedSA); 1202 goto deliver; 1203 } 1204 1205 /* First, PN check to avoid decrypting obviously wrong packets */ 1206 hdr_pn = ntohl(hdr->packet_number); 1207 if (secy->replay_protect) { 1208 bool late; 1209 1210 spin_lock(&rx_sa->lock); 1211 late = rx_sa->next_pn_halves.lower >= secy->replay_window && 1212 hdr_pn < (rx_sa->next_pn_halves.lower - secy->replay_window); 1213 1214 if (secy->xpn) 1215 late = late && pn_same_half(rx_sa->next_pn_halves.lower, hdr_pn); 1216 spin_unlock(&rx_sa->lock); 1217 1218 if (late) { 1219 u64_stats_update_begin(&rxsc_stats->syncp); 1220 rxsc_stats->stats.InPktsLate++; 1221 u64_stats_update_end(&rxsc_stats->syncp); 1222 DEV_STATS_INC(macsec->secy.netdev, rx_dropped); 1223 goto drop; 1224 } 1225 } 1226 1227 macsec_skb_cb(skb)->rx_sa = rx_sa; 1228 1229 /* Disabled && !changed text => skip validation */ 1230 if (hdr->tci_an & MACSEC_TCI_C || 1231 secy->validate_frames != MACSEC_VALIDATE_DISABLED) 1232 skb = macsec_decrypt(skb, dev, rx_sa, sci, secy); 1233 1234 if (IS_ERR(skb)) { 1235 /* the decrypt callback needs the reference */ 1236 if (PTR_ERR(skb) != -EINPROGRESS) { 1237 macsec_rxsa_put(rx_sa); 1238 macsec_rxsc_put(rx_sc); 1239 } 1240 rcu_read_unlock(); 1241 *pskb = NULL; 1242 return RX_HANDLER_CONSUMED; 1243 } 1244 1245 if (!macsec_post_decrypt(skb, secy, hdr_pn)) 1246 goto drop; 1247 1248 deliver: 1249 macsec_finalize_skb(skb, secy->icv_len, 1250 macsec_extra_len(macsec_skb_cb(skb)->has_sci)); 1251 len = skb->len; 1252 macsec_reset_skb(skb, secy->netdev); 1253 1254 if (rx_sa) 1255 macsec_rxsa_put(rx_sa); 1256 macsec_rxsc_put(rx_sc); 1257 1258 skb_orphan(skb); 1259 ret = gro_cells_receive(&macsec->gro_cells, skb); 1260 if (ret == NET_RX_SUCCESS) 1261 count_rx(dev, len); 1262 else 1263 DEV_STATS_INC(macsec->secy.netdev, rx_dropped); 1264 1265 rcu_read_unlock(); 1266 1267 *pskb = NULL; 1268 return RX_HANDLER_CONSUMED; 1269 1270 drop: 1271 macsec_rxsa_put(rx_sa); 1272 drop_nosa: 1273 macsec_rxsc_put(rx_sc); 1274 rcu_read_unlock(); 1275 drop_direct: 1276 kfree_skb(skb); 1277 *pskb = NULL; 1278 return RX_HANDLER_CONSUMED; 1279 1280 nosci: 1281 /* 10.6.1 if the SC is not found */ 1282 cbit = !!(hdr->tci_an & MACSEC_TCI_C); 1283 if (!cbit) 1284 macsec_finalize_skb(skb, MACSEC_DEFAULT_ICV_LEN, 1285 macsec_extra_len(macsec_skb_cb(skb)->has_sci)); 1286 1287 list_for_each_entry_rcu(macsec, &rxd->secys, secys) { 1288 struct sk_buff *nskb; 1289 1290 secy_stats = this_cpu_ptr(macsec->stats); 1291 1292 /* If validateFrames is Strict or the C bit in the 1293 * SecTAG is set, discard 1294 */ 1295 if (cbit || 1296 macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) { 1297 u64_stats_update_begin(&secy_stats->syncp); 1298 secy_stats->stats.InPktsNoSCI++; 1299 u64_stats_update_end(&secy_stats->syncp); 1300 DEV_STATS_INC(macsec->secy.netdev, rx_errors); 1301 continue; 1302 } 1303 1304 /* not strict, the frame (with the SecTAG and ICV 1305 * removed) is delivered to the Controlled Port. 1306 */ 1307 nskb = skb_clone(skb, GFP_ATOMIC); 1308 if (!nskb) 1309 break; 1310 1311 macsec_reset_skb(nskb, macsec->secy.netdev); 1312 1313 ret = __netif_rx(nskb); 1314 if (ret == NET_RX_SUCCESS) { 1315 u64_stats_update_begin(&secy_stats->syncp); 1316 secy_stats->stats.InPktsUnknownSCI++; 1317 u64_stats_update_end(&secy_stats->syncp); 1318 } else { 1319 DEV_STATS_INC(macsec->secy.netdev, rx_dropped); 1320 } 1321 } 1322 1323 rcu_read_unlock(); 1324 *pskb = skb; 1325 return RX_HANDLER_PASS; 1326 } 1327 1328 static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len) 1329 { 1330 struct crypto_aead *tfm; 1331 int ret; 1332 1333 tfm = crypto_alloc_aead("gcm(aes)", 0, 0); 1334 1335 if (IS_ERR(tfm)) 1336 return tfm; 1337 1338 ret = crypto_aead_setkey(tfm, key, key_len); 1339 if (ret < 0) 1340 goto fail; 1341 1342 ret = crypto_aead_setauthsize(tfm, icv_len); 1343 if (ret < 0) 1344 goto fail; 1345 1346 return tfm; 1347 fail: 1348 crypto_free_aead(tfm); 1349 return ERR_PTR(ret); 1350 } 1351 1352 static int init_rx_sa(struct macsec_rx_sa *rx_sa, char *sak, int key_len, 1353 int icv_len) 1354 { 1355 rx_sa->stats = alloc_percpu(struct macsec_rx_sa_stats); 1356 if (!rx_sa->stats) 1357 return -ENOMEM; 1358 1359 rx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len); 1360 if (IS_ERR(rx_sa->key.tfm)) { 1361 free_percpu(rx_sa->stats); 1362 return PTR_ERR(rx_sa->key.tfm); 1363 } 1364 1365 rx_sa->ssci = MACSEC_UNDEF_SSCI; 1366 rx_sa->active = false; 1367 rx_sa->next_pn = 1; 1368 refcount_set(&rx_sa->refcnt, 1); 1369 spin_lock_init(&rx_sa->lock); 1370 1371 return 0; 1372 } 1373 1374 static void clear_rx_sa(struct macsec_rx_sa *rx_sa) 1375 { 1376 rx_sa->active = false; 1377 1378 macsec_rxsa_put(rx_sa); 1379 } 1380 1381 static void free_rx_sc(struct macsec_rx_sc *rx_sc) 1382 { 1383 int i; 1384 1385 for (i = 0; i < MACSEC_NUM_AN; i++) { 1386 struct macsec_rx_sa *sa = rtnl_dereference(rx_sc->sa[i]); 1387 1388 RCU_INIT_POINTER(rx_sc->sa[i], NULL); 1389 if (sa) 1390 clear_rx_sa(sa); 1391 } 1392 1393 macsec_rxsc_put(rx_sc); 1394 } 1395 1396 static struct macsec_rx_sc *del_rx_sc(struct macsec_secy *secy, sci_t sci) 1397 { 1398 struct macsec_rx_sc *rx_sc, __rcu **rx_scp; 1399 1400 for (rx_scp = &secy->rx_sc, rx_sc = rtnl_dereference(*rx_scp); 1401 rx_sc; 1402 rx_scp = &rx_sc->next, rx_sc = rtnl_dereference(*rx_scp)) { 1403 if (rx_sc->sci == sci) { 1404 if (rx_sc->active) 1405 secy->n_rx_sc--; 1406 rcu_assign_pointer(*rx_scp, rx_sc->next); 1407 return rx_sc; 1408 } 1409 } 1410 1411 return NULL; 1412 } 1413 1414 static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci, 1415 bool active) 1416 { 1417 struct macsec_rx_sc *rx_sc; 1418 struct macsec_dev *macsec; 1419 struct net_device *real_dev = macsec_priv(dev)->real_dev; 1420 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); 1421 struct macsec_secy *secy; 1422 1423 list_for_each_entry(macsec, &rxd->secys, secys) { 1424 if (find_rx_sc_rtnl(&macsec->secy, sci)) 1425 return ERR_PTR(-EEXIST); 1426 } 1427 1428 rx_sc = kzalloc(sizeof(*rx_sc), GFP_KERNEL); 1429 if (!rx_sc) 1430 return ERR_PTR(-ENOMEM); 1431 1432 rx_sc->stats = netdev_alloc_pcpu_stats(struct pcpu_rx_sc_stats); 1433 if (!rx_sc->stats) { 1434 kfree(rx_sc); 1435 return ERR_PTR(-ENOMEM); 1436 } 1437 1438 rx_sc->sci = sci; 1439 rx_sc->active = active; 1440 refcount_set(&rx_sc->refcnt, 1); 1441 1442 secy = &macsec_priv(dev)->secy; 1443 rcu_assign_pointer(rx_sc->next, secy->rx_sc); 1444 rcu_assign_pointer(secy->rx_sc, rx_sc); 1445 1446 if (rx_sc->active) 1447 secy->n_rx_sc++; 1448 1449 return rx_sc; 1450 } 1451 1452 static int init_tx_sa(struct macsec_tx_sa *tx_sa, char *sak, int key_len, 1453 int icv_len) 1454 { 1455 tx_sa->stats = alloc_percpu(struct macsec_tx_sa_stats); 1456 if (!tx_sa->stats) 1457 return -ENOMEM; 1458 1459 tx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len); 1460 if (IS_ERR(tx_sa->key.tfm)) { 1461 free_percpu(tx_sa->stats); 1462 return PTR_ERR(tx_sa->key.tfm); 1463 } 1464 1465 tx_sa->ssci = MACSEC_UNDEF_SSCI; 1466 tx_sa->active = false; 1467 refcount_set(&tx_sa->refcnt, 1); 1468 spin_lock_init(&tx_sa->lock); 1469 1470 return 0; 1471 } 1472 1473 static void clear_tx_sa(struct macsec_tx_sa *tx_sa) 1474 { 1475 tx_sa->active = false; 1476 1477 macsec_txsa_put(tx_sa); 1478 } 1479 1480 static struct genl_family macsec_fam; 1481 1482 static struct net_device *get_dev_from_nl(struct net *net, 1483 struct nlattr **attrs) 1484 { 1485 int ifindex = nla_get_u32(attrs[MACSEC_ATTR_IFINDEX]); 1486 struct net_device *dev; 1487 1488 dev = __dev_get_by_index(net, ifindex); 1489 if (!dev) 1490 return ERR_PTR(-ENODEV); 1491 1492 if (!netif_is_macsec(dev)) 1493 return ERR_PTR(-ENODEV); 1494 1495 return dev; 1496 } 1497 1498 static enum macsec_offload nla_get_offload(const struct nlattr *nla) 1499 { 1500 return (__force enum macsec_offload)nla_get_u8(nla); 1501 } 1502 1503 static sci_t nla_get_sci(const struct nlattr *nla) 1504 { 1505 return (__force sci_t)nla_get_u64(nla); 1506 } 1507 1508 static int nla_put_sci(struct sk_buff *skb, int attrtype, sci_t value, 1509 int padattr) 1510 { 1511 return nla_put_u64_64bit(skb, attrtype, (__force u64)value, padattr); 1512 } 1513 1514 static ssci_t nla_get_ssci(const struct nlattr *nla) 1515 { 1516 return (__force ssci_t)nla_get_u32(nla); 1517 } 1518 1519 static int nla_put_ssci(struct sk_buff *skb, int attrtype, ssci_t value) 1520 { 1521 return nla_put_u32(skb, attrtype, (__force u64)value); 1522 } 1523 1524 static struct macsec_tx_sa *get_txsa_from_nl(struct net *net, 1525 struct nlattr **attrs, 1526 struct nlattr **tb_sa, 1527 struct net_device **devp, 1528 struct macsec_secy **secyp, 1529 struct macsec_tx_sc **scp, 1530 u8 *assoc_num) 1531 { 1532 struct net_device *dev; 1533 struct macsec_secy *secy; 1534 struct macsec_tx_sc *tx_sc; 1535 struct macsec_tx_sa *tx_sa; 1536 1537 if (!tb_sa[MACSEC_SA_ATTR_AN]) 1538 return ERR_PTR(-EINVAL); 1539 1540 *assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1541 1542 dev = get_dev_from_nl(net, attrs); 1543 if (IS_ERR(dev)) 1544 return ERR_CAST(dev); 1545 1546 if (*assoc_num >= MACSEC_NUM_AN) 1547 return ERR_PTR(-EINVAL); 1548 1549 secy = &macsec_priv(dev)->secy; 1550 tx_sc = &secy->tx_sc; 1551 1552 tx_sa = rtnl_dereference(tx_sc->sa[*assoc_num]); 1553 if (!tx_sa) 1554 return ERR_PTR(-ENODEV); 1555 1556 *devp = dev; 1557 *scp = tx_sc; 1558 *secyp = secy; 1559 return tx_sa; 1560 } 1561 1562 static struct macsec_rx_sc *get_rxsc_from_nl(struct net *net, 1563 struct nlattr **attrs, 1564 struct nlattr **tb_rxsc, 1565 struct net_device **devp, 1566 struct macsec_secy **secyp) 1567 { 1568 struct net_device *dev; 1569 struct macsec_secy *secy; 1570 struct macsec_rx_sc *rx_sc; 1571 sci_t sci; 1572 1573 dev = get_dev_from_nl(net, attrs); 1574 if (IS_ERR(dev)) 1575 return ERR_CAST(dev); 1576 1577 secy = &macsec_priv(dev)->secy; 1578 1579 if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI]) 1580 return ERR_PTR(-EINVAL); 1581 1582 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); 1583 rx_sc = find_rx_sc_rtnl(secy, sci); 1584 if (!rx_sc) 1585 return ERR_PTR(-ENODEV); 1586 1587 *secyp = secy; 1588 *devp = dev; 1589 1590 return rx_sc; 1591 } 1592 1593 static struct macsec_rx_sa *get_rxsa_from_nl(struct net *net, 1594 struct nlattr **attrs, 1595 struct nlattr **tb_rxsc, 1596 struct nlattr **tb_sa, 1597 struct net_device **devp, 1598 struct macsec_secy **secyp, 1599 struct macsec_rx_sc **scp, 1600 u8 *assoc_num) 1601 { 1602 struct macsec_rx_sc *rx_sc; 1603 struct macsec_rx_sa *rx_sa; 1604 1605 if (!tb_sa[MACSEC_SA_ATTR_AN]) 1606 return ERR_PTR(-EINVAL); 1607 1608 *assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1609 if (*assoc_num >= MACSEC_NUM_AN) 1610 return ERR_PTR(-EINVAL); 1611 1612 rx_sc = get_rxsc_from_nl(net, attrs, tb_rxsc, devp, secyp); 1613 if (IS_ERR(rx_sc)) 1614 return ERR_CAST(rx_sc); 1615 1616 rx_sa = rtnl_dereference(rx_sc->sa[*assoc_num]); 1617 if (!rx_sa) 1618 return ERR_PTR(-ENODEV); 1619 1620 *scp = rx_sc; 1621 return rx_sa; 1622 } 1623 1624 static const struct nla_policy macsec_genl_policy[NUM_MACSEC_ATTR] = { 1625 [MACSEC_ATTR_IFINDEX] = { .type = NLA_U32 }, 1626 [MACSEC_ATTR_RXSC_CONFIG] = { .type = NLA_NESTED }, 1627 [MACSEC_ATTR_SA_CONFIG] = { .type = NLA_NESTED }, 1628 [MACSEC_ATTR_OFFLOAD] = { .type = NLA_NESTED }, 1629 }; 1630 1631 static const struct nla_policy macsec_genl_rxsc_policy[NUM_MACSEC_RXSC_ATTR] = { 1632 [MACSEC_RXSC_ATTR_SCI] = { .type = NLA_U64 }, 1633 [MACSEC_RXSC_ATTR_ACTIVE] = { .type = NLA_U8 }, 1634 }; 1635 1636 static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = { 1637 [MACSEC_SA_ATTR_AN] = { .type = NLA_U8 }, 1638 [MACSEC_SA_ATTR_ACTIVE] = { .type = NLA_U8 }, 1639 [MACSEC_SA_ATTR_PN] = NLA_POLICY_MIN_LEN(4), 1640 [MACSEC_SA_ATTR_KEYID] = { .type = NLA_BINARY, 1641 .len = MACSEC_KEYID_LEN, }, 1642 [MACSEC_SA_ATTR_KEY] = { .type = NLA_BINARY, 1643 .len = MACSEC_MAX_KEY_LEN, }, 1644 [MACSEC_SA_ATTR_SSCI] = { .type = NLA_U32 }, 1645 [MACSEC_SA_ATTR_SALT] = { .type = NLA_BINARY, 1646 .len = MACSEC_SALT_LEN, }, 1647 }; 1648 1649 static const struct nla_policy macsec_genl_offload_policy[NUM_MACSEC_OFFLOAD_ATTR] = { 1650 [MACSEC_OFFLOAD_ATTR_TYPE] = { .type = NLA_U8 }, 1651 }; 1652 1653 /* Offloads an operation to a device driver */ 1654 static int macsec_offload(int (* const func)(struct macsec_context *), 1655 struct macsec_context *ctx) 1656 { 1657 int ret; 1658 1659 if (unlikely(!func)) 1660 return 0; 1661 1662 if (ctx->offload == MACSEC_OFFLOAD_PHY) 1663 mutex_lock(&ctx->phydev->lock); 1664 1665 ret = (*func)(ctx); 1666 1667 if (ctx->offload == MACSEC_OFFLOAD_PHY) 1668 mutex_unlock(&ctx->phydev->lock); 1669 1670 return ret; 1671 } 1672 1673 static int parse_sa_config(struct nlattr **attrs, struct nlattr **tb_sa) 1674 { 1675 if (!attrs[MACSEC_ATTR_SA_CONFIG]) 1676 return -EINVAL; 1677 1678 if (nla_parse_nested_deprecated(tb_sa, MACSEC_SA_ATTR_MAX, attrs[MACSEC_ATTR_SA_CONFIG], macsec_genl_sa_policy, NULL)) 1679 return -EINVAL; 1680 1681 return 0; 1682 } 1683 1684 static int parse_rxsc_config(struct nlattr **attrs, struct nlattr **tb_rxsc) 1685 { 1686 if (!attrs[MACSEC_ATTR_RXSC_CONFIG]) 1687 return -EINVAL; 1688 1689 if (nla_parse_nested_deprecated(tb_rxsc, MACSEC_RXSC_ATTR_MAX, attrs[MACSEC_ATTR_RXSC_CONFIG], macsec_genl_rxsc_policy, NULL)) 1690 return -EINVAL; 1691 1692 return 0; 1693 } 1694 1695 static bool validate_add_rxsa(struct nlattr **attrs) 1696 { 1697 if (!attrs[MACSEC_SA_ATTR_AN] || 1698 !attrs[MACSEC_SA_ATTR_KEY] || 1699 !attrs[MACSEC_SA_ATTR_KEYID]) 1700 return false; 1701 1702 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) 1703 return false; 1704 1705 if (attrs[MACSEC_SA_ATTR_PN] && 1706 nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0) 1707 return false; 1708 1709 if (attrs[MACSEC_SA_ATTR_ACTIVE]) { 1710 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) 1711 return false; 1712 } 1713 1714 if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN) 1715 return false; 1716 1717 return true; 1718 } 1719 1720 static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info) 1721 { 1722 struct net_device *dev; 1723 struct nlattr **attrs = info->attrs; 1724 struct macsec_secy *secy; 1725 struct macsec_rx_sc *rx_sc; 1726 struct macsec_rx_sa *rx_sa; 1727 unsigned char assoc_num; 1728 int pn_len; 1729 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 1730 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 1731 int err; 1732 1733 if (!attrs[MACSEC_ATTR_IFINDEX]) 1734 return -EINVAL; 1735 1736 if (parse_sa_config(attrs, tb_sa)) 1737 return -EINVAL; 1738 1739 if (parse_rxsc_config(attrs, tb_rxsc)) 1740 return -EINVAL; 1741 1742 if (!validate_add_rxsa(tb_sa)) 1743 return -EINVAL; 1744 1745 rtnl_lock(); 1746 rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy); 1747 if (IS_ERR(rx_sc)) { 1748 rtnl_unlock(); 1749 return PTR_ERR(rx_sc); 1750 } 1751 1752 assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1753 1754 if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) { 1755 pr_notice("macsec: nl: add_rxsa: bad key length: %d != %d\n", 1756 nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len); 1757 rtnl_unlock(); 1758 return -EINVAL; 1759 } 1760 1761 pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN; 1762 if (tb_sa[MACSEC_SA_ATTR_PN] && 1763 nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) { 1764 pr_notice("macsec: nl: add_rxsa: bad pn length: %d != %d\n", 1765 nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len); 1766 rtnl_unlock(); 1767 return -EINVAL; 1768 } 1769 1770 if (secy->xpn) { 1771 if (!tb_sa[MACSEC_SA_ATTR_SSCI] || !tb_sa[MACSEC_SA_ATTR_SALT]) { 1772 rtnl_unlock(); 1773 return -EINVAL; 1774 } 1775 1776 if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) { 1777 pr_notice("macsec: nl: add_rxsa: bad salt length: %d != %d\n", 1778 nla_len(tb_sa[MACSEC_SA_ATTR_SALT]), 1779 MACSEC_SALT_LEN); 1780 rtnl_unlock(); 1781 return -EINVAL; 1782 } 1783 } 1784 1785 rx_sa = rtnl_dereference(rx_sc->sa[assoc_num]); 1786 if (rx_sa) { 1787 rtnl_unlock(); 1788 return -EBUSY; 1789 } 1790 1791 rx_sa = kmalloc(sizeof(*rx_sa), GFP_KERNEL); 1792 if (!rx_sa) { 1793 rtnl_unlock(); 1794 return -ENOMEM; 1795 } 1796 1797 err = init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 1798 secy->key_len, secy->icv_len); 1799 if (err < 0) { 1800 kfree(rx_sa); 1801 rtnl_unlock(); 1802 return err; 1803 } 1804 1805 if (tb_sa[MACSEC_SA_ATTR_PN]) { 1806 spin_lock_bh(&rx_sa->lock); 1807 rx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]); 1808 spin_unlock_bh(&rx_sa->lock); 1809 } 1810 1811 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 1812 rx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 1813 1814 rx_sa->sc = rx_sc; 1815 1816 if (secy->xpn) { 1817 rx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]); 1818 nla_memcpy(rx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT], 1819 MACSEC_SALT_LEN); 1820 } 1821 1822 /* If h/w offloading is available, propagate to the device */ 1823 if (macsec_is_offloaded(netdev_priv(dev))) { 1824 const struct macsec_ops *ops; 1825 struct macsec_context ctx; 1826 1827 ops = macsec_get_ops(netdev_priv(dev), &ctx); 1828 if (!ops) { 1829 err = -EOPNOTSUPP; 1830 goto cleanup; 1831 } 1832 1833 ctx.sa.assoc_num = assoc_num; 1834 ctx.sa.rx_sa = rx_sa; 1835 ctx.secy = secy; 1836 memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 1837 secy->key_len); 1838 1839 err = macsec_offload(ops->mdo_add_rxsa, &ctx); 1840 memzero_explicit(ctx.sa.key, secy->key_len); 1841 if (err) 1842 goto cleanup; 1843 } 1844 1845 nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN); 1846 rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa); 1847 1848 rtnl_unlock(); 1849 1850 return 0; 1851 1852 cleanup: 1853 macsec_rxsa_put(rx_sa); 1854 rtnl_unlock(); 1855 return err; 1856 } 1857 1858 static bool validate_add_rxsc(struct nlattr **attrs) 1859 { 1860 if (!attrs[MACSEC_RXSC_ATTR_SCI]) 1861 return false; 1862 1863 if (attrs[MACSEC_RXSC_ATTR_ACTIVE]) { 1864 if (nla_get_u8(attrs[MACSEC_RXSC_ATTR_ACTIVE]) > 1) 1865 return false; 1866 } 1867 1868 return true; 1869 } 1870 1871 static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info) 1872 { 1873 struct net_device *dev; 1874 sci_t sci = MACSEC_UNDEF_SCI; 1875 struct nlattr **attrs = info->attrs; 1876 struct macsec_rx_sc *rx_sc; 1877 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 1878 struct macsec_secy *secy; 1879 bool active = true; 1880 int ret; 1881 1882 if (!attrs[MACSEC_ATTR_IFINDEX]) 1883 return -EINVAL; 1884 1885 if (parse_rxsc_config(attrs, tb_rxsc)) 1886 return -EINVAL; 1887 1888 if (!validate_add_rxsc(tb_rxsc)) 1889 return -EINVAL; 1890 1891 rtnl_lock(); 1892 dev = get_dev_from_nl(genl_info_net(info), attrs); 1893 if (IS_ERR(dev)) { 1894 rtnl_unlock(); 1895 return PTR_ERR(dev); 1896 } 1897 1898 secy = &macsec_priv(dev)->secy; 1899 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); 1900 1901 if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) 1902 active = nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]); 1903 1904 rx_sc = create_rx_sc(dev, sci, active); 1905 if (IS_ERR(rx_sc)) { 1906 rtnl_unlock(); 1907 return PTR_ERR(rx_sc); 1908 } 1909 1910 if (macsec_is_offloaded(netdev_priv(dev))) { 1911 const struct macsec_ops *ops; 1912 struct macsec_context ctx; 1913 1914 ops = macsec_get_ops(netdev_priv(dev), &ctx); 1915 if (!ops) { 1916 ret = -EOPNOTSUPP; 1917 goto cleanup; 1918 } 1919 1920 ctx.rx_sc = rx_sc; 1921 ctx.secy = secy; 1922 1923 ret = macsec_offload(ops->mdo_add_rxsc, &ctx); 1924 if (ret) 1925 goto cleanup; 1926 } 1927 1928 rtnl_unlock(); 1929 1930 return 0; 1931 1932 cleanup: 1933 del_rx_sc(secy, sci); 1934 free_rx_sc(rx_sc); 1935 rtnl_unlock(); 1936 return ret; 1937 } 1938 1939 static bool validate_add_txsa(struct nlattr **attrs) 1940 { 1941 if (!attrs[MACSEC_SA_ATTR_AN] || 1942 !attrs[MACSEC_SA_ATTR_PN] || 1943 !attrs[MACSEC_SA_ATTR_KEY] || 1944 !attrs[MACSEC_SA_ATTR_KEYID]) 1945 return false; 1946 1947 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) 1948 return false; 1949 1950 if (nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0) 1951 return false; 1952 1953 if (attrs[MACSEC_SA_ATTR_ACTIVE]) { 1954 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) 1955 return false; 1956 } 1957 1958 if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN) 1959 return false; 1960 1961 return true; 1962 } 1963 1964 static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info) 1965 { 1966 struct net_device *dev; 1967 struct nlattr **attrs = info->attrs; 1968 struct macsec_secy *secy; 1969 struct macsec_tx_sc *tx_sc; 1970 struct macsec_tx_sa *tx_sa; 1971 unsigned char assoc_num; 1972 int pn_len; 1973 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 1974 bool was_operational; 1975 int err; 1976 1977 if (!attrs[MACSEC_ATTR_IFINDEX]) 1978 return -EINVAL; 1979 1980 if (parse_sa_config(attrs, tb_sa)) 1981 return -EINVAL; 1982 1983 if (!validate_add_txsa(tb_sa)) 1984 return -EINVAL; 1985 1986 rtnl_lock(); 1987 dev = get_dev_from_nl(genl_info_net(info), attrs); 1988 if (IS_ERR(dev)) { 1989 rtnl_unlock(); 1990 return PTR_ERR(dev); 1991 } 1992 1993 secy = &macsec_priv(dev)->secy; 1994 tx_sc = &secy->tx_sc; 1995 1996 assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1997 1998 if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) { 1999 pr_notice("macsec: nl: add_txsa: bad key length: %d != %d\n", 2000 nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len); 2001 rtnl_unlock(); 2002 return -EINVAL; 2003 } 2004 2005 pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN; 2006 if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) { 2007 pr_notice("macsec: nl: add_txsa: bad pn length: %d != %d\n", 2008 nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len); 2009 rtnl_unlock(); 2010 return -EINVAL; 2011 } 2012 2013 if (secy->xpn) { 2014 if (!tb_sa[MACSEC_SA_ATTR_SSCI] || !tb_sa[MACSEC_SA_ATTR_SALT]) { 2015 rtnl_unlock(); 2016 return -EINVAL; 2017 } 2018 2019 if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) { 2020 pr_notice("macsec: nl: add_txsa: bad salt length: %d != %d\n", 2021 nla_len(tb_sa[MACSEC_SA_ATTR_SALT]), 2022 MACSEC_SALT_LEN); 2023 rtnl_unlock(); 2024 return -EINVAL; 2025 } 2026 } 2027 2028 tx_sa = rtnl_dereference(tx_sc->sa[assoc_num]); 2029 if (tx_sa) { 2030 rtnl_unlock(); 2031 return -EBUSY; 2032 } 2033 2034 tx_sa = kmalloc(sizeof(*tx_sa), GFP_KERNEL); 2035 if (!tx_sa) { 2036 rtnl_unlock(); 2037 return -ENOMEM; 2038 } 2039 2040 err = init_tx_sa(tx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 2041 secy->key_len, secy->icv_len); 2042 if (err < 0) { 2043 kfree(tx_sa); 2044 rtnl_unlock(); 2045 return err; 2046 } 2047 2048 spin_lock_bh(&tx_sa->lock); 2049 tx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]); 2050 spin_unlock_bh(&tx_sa->lock); 2051 2052 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 2053 tx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 2054 2055 was_operational = secy->operational; 2056 if (assoc_num == tx_sc->encoding_sa && tx_sa->active) 2057 secy->operational = true; 2058 2059 if (secy->xpn) { 2060 tx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]); 2061 nla_memcpy(tx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT], 2062 MACSEC_SALT_LEN); 2063 } 2064 2065 /* If h/w offloading is available, propagate to the device */ 2066 if (macsec_is_offloaded(netdev_priv(dev))) { 2067 const struct macsec_ops *ops; 2068 struct macsec_context ctx; 2069 2070 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2071 if (!ops) { 2072 err = -EOPNOTSUPP; 2073 goto cleanup; 2074 } 2075 2076 ctx.sa.assoc_num = assoc_num; 2077 ctx.sa.tx_sa = tx_sa; 2078 ctx.secy = secy; 2079 memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 2080 secy->key_len); 2081 2082 err = macsec_offload(ops->mdo_add_txsa, &ctx); 2083 memzero_explicit(ctx.sa.key, secy->key_len); 2084 if (err) 2085 goto cleanup; 2086 } 2087 2088 nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN); 2089 rcu_assign_pointer(tx_sc->sa[assoc_num], tx_sa); 2090 2091 rtnl_unlock(); 2092 2093 return 0; 2094 2095 cleanup: 2096 secy->operational = was_operational; 2097 macsec_txsa_put(tx_sa); 2098 rtnl_unlock(); 2099 return err; 2100 } 2101 2102 static int macsec_del_rxsa(struct sk_buff *skb, struct genl_info *info) 2103 { 2104 struct nlattr **attrs = info->attrs; 2105 struct net_device *dev; 2106 struct macsec_secy *secy; 2107 struct macsec_rx_sc *rx_sc; 2108 struct macsec_rx_sa *rx_sa; 2109 u8 assoc_num; 2110 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 2111 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 2112 int ret; 2113 2114 if (!attrs[MACSEC_ATTR_IFINDEX]) 2115 return -EINVAL; 2116 2117 if (parse_sa_config(attrs, tb_sa)) 2118 return -EINVAL; 2119 2120 if (parse_rxsc_config(attrs, tb_rxsc)) 2121 return -EINVAL; 2122 2123 rtnl_lock(); 2124 rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa, 2125 &dev, &secy, &rx_sc, &assoc_num); 2126 if (IS_ERR(rx_sa)) { 2127 rtnl_unlock(); 2128 return PTR_ERR(rx_sa); 2129 } 2130 2131 if (rx_sa->active) { 2132 rtnl_unlock(); 2133 return -EBUSY; 2134 } 2135 2136 /* If h/w offloading is available, propagate to the device */ 2137 if (macsec_is_offloaded(netdev_priv(dev))) { 2138 const struct macsec_ops *ops; 2139 struct macsec_context ctx; 2140 2141 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2142 if (!ops) { 2143 ret = -EOPNOTSUPP; 2144 goto cleanup; 2145 } 2146 2147 ctx.sa.assoc_num = assoc_num; 2148 ctx.sa.rx_sa = rx_sa; 2149 ctx.secy = secy; 2150 2151 ret = macsec_offload(ops->mdo_del_rxsa, &ctx); 2152 if (ret) 2153 goto cleanup; 2154 } 2155 2156 RCU_INIT_POINTER(rx_sc->sa[assoc_num], NULL); 2157 clear_rx_sa(rx_sa); 2158 2159 rtnl_unlock(); 2160 2161 return 0; 2162 2163 cleanup: 2164 rtnl_unlock(); 2165 return ret; 2166 } 2167 2168 static int macsec_del_rxsc(struct sk_buff *skb, struct genl_info *info) 2169 { 2170 struct nlattr **attrs = info->attrs; 2171 struct net_device *dev; 2172 struct macsec_secy *secy; 2173 struct macsec_rx_sc *rx_sc; 2174 sci_t sci; 2175 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 2176 int ret; 2177 2178 if (!attrs[MACSEC_ATTR_IFINDEX]) 2179 return -EINVAL; 2180 2181 if (parse_rxsc_config(attrs, tb_rxsc)) 2182 return -EINVAL; 2183 2184 if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI]) 2185 return -EINVAL; 2186 2187 rtnl_lock(); 2188 dev = get_dev_from_nl(genl_info_net(info), info->attrs); 2189 if (IS_ERR(dev)) { 2190 rtnl_unlock(); 2191 return PTR_ERR(dev); 2192 } 2193 2194 secy = &macsec_priv(dev)->secy; 2195 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); 2196 2197 rx_sc = del_rx_sc(secy, sci); 2198 if (!rx_sc) { 2199 rtnl_unlock(); 2200 return -ENODEV; 2201 } 2202 2203 /* If h/w offloading is available, propagate to the device */ 2204 if (macsec_is_offloaded(netdev_priv(dev))) { 2205 const struct macsec_ops *ops; 2206 struct macsec_context ctx; 2207 2208 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2209 if (!ops) { 2210 ret = -EOPNOTSUPP; 2211 goto cleanup; 2212 } 2213 2214 ctx.rx_sc = rx_sc; 2215 ctx.secy = secy; 2216 ret = macsec_offload(ops->mdo_del_rxsc, &ctx); 2217 if (ret) 2218 goto cleanup; 2219 } 2220 2221 free_rx_sc(rx_sc); 2222 rtnl_unlock(); 2223 2224 return 0; 2225 2226 cleanup: 2227 rtnl_unlock(); 2228 return ret; 2229 } 2230 2231 static int macsec_del_txsa(struct sk_buff *skb, struct genl_info *info) 2232 { 2233 struct nlattr **attrs = info->attrs; 2234 struct net_device *dev; 2235 struct macsec_secy *secy; 2236 struct macsec_tx_sc *tx_sc; 2237 struct macsec_tx_sa *tx_sa; 2238 u8 assoc_num; 2239 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 2240 int ret; 2241 2242 if (!attrs[MACSEC_ATTR_IFINDEX]) 2243 return -EINVAL; 2244 2245 if (parse_sa_config(attrs, tb_sa)) 2246 return -EINVAL; 2247 2248 rtnl_lock(); 2249 tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa, 2250 &dev, &secy, &tx_sc, &assoc_num); 2251 if (IS_ERR(tx_sa)) { 2252 rtnl_unlock(); 2253 return PTR_ERR(tx_sa); 2254 } 2255 2256 if (tx_sa->active) { 2257 rtnl_unlock(); 2258 return -EBUSY; 2259 } 2260 2261 /* If h/w offloading is available, propagate to the device */ 2262 if (macsec_is_offloaded(netdev_priv(dev))) { 2263 const struct macsec_ops *ops; 2264 struct macsec_context ctx; 2265 2266 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2267 if (!ops) { 2268 ret = -EOPNOTSUPP; 2269 goto cleanup; 2270 } 2271 2272 ctx.sa.assoc_num = assoc_num; 2273 ctx.sa.tx_sa = tx_sa; 2274 ctx.secy = secy; 2275 2276 ret = macsec_offload(ops->mdo_del_txsa, &ctx); 2277 if (ret) 2278 goto cleanup; 2279 } 2280 2281 RCU_INIT_POINTER(tx_sc->sa[assoc_num], NULL); 2282 clear_tx_sa(tx_sa); 2283 2284 rtnl_unlock(); 2285 2286 return 0; 2287 2288 cleanup: 2289 rtnl_unlock(); 2290 return ret; 2291 } 2292 2293 static bool validate_upd_sa(struct nlattr **attrs) 2294 { 2295 if (!attrs[MACSEC_SA_ATTR_AN] || 2296 attrs[MACSEC_SA_ATTR_KEY] || 2297 attrs[MACSEC_SA_ATTR_KEYID] || 2298 attrs[MACSEC_SA_ATTR_SSCI] || 2299 attrs[MACSEC_SA_ATTR_SALT]) 2300 return false; 2301 2302 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) 2303 return false; 2304 2305 if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0) 2306 return false; 2307 2308 if (attrs[MACSEC_SA_ATTR_ACTIVE]) { 2309 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) 2310 return false; 2311 } 2312 2313 return true; 2314 } 2315 2316 static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info) 2317 { 2318 struct nlattr **attrs = info->attrs; 2319 struct net_device *dev; 2320 struct macsec_secy *secy; 2321 struct macsec_tx_sc *tx_sc; 2322 struct macsec_tx_sa *tx_sa; 2323 u8 assoc_num; 2324 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 2325 bool was_operational, was_active; 2326 pn_t prev_pn; 2327 int ret = 0; 2328 2329 prev_pn.full64 = 0; 2330 2331 if (!attrs[MACSEC_ATTR_IFINDEX]) 2332 return -EINVAL; 2333 2334 if (parse_sa_config(attrs, tb_sa)) 2335 return -EINVAL; 2336 2337 if (!validate_upd_sa(tb_sa)) 2338 return -EINVAL; 2339 2340 rtnl_lock(); 2341 tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa, 2342 &dev, &secy, &tx_sc, &assoc_num); 2343 if (IS_ERR(tx_sa)) { 2344 rtnl_unlock(); 2345 return PTR_ERR(tx_sa); 2346 } 2347 2348 if (tb_sa[MACSEC_SA_ATTR_PN]) { 2349 int pn_len; 2350 2351 pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN; 2352 if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) { 2353 pr_notice("macsec: nl: upd_txsa: bad pn length: %d != %d\n", 2354 nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len); 2355 rtnl_unlock(); 2356 return -EINVAL; 2357 } 2358 2359 spin_lock_bh(&tx_sa->lock); 2360 prev_pn = tx_sa->next_pn_halves; 2361 tx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]); 2362 spin_unlock_bh(&tx_sa->lock); 2363 } 2364 2365 was_active = tx_sa->active; 2366 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 2367 tx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 2368 2369 was_operational = secy->operational; 2370 if (assoc_num == tx_sc->encoding_sa) 2371 secy->operational = tx_sa->active; 2372 2373 /* If h/w offloading is available, propagate to the device */ 2374 if (macsec_is_offloaded(netdev_priv(dev))) { 2375 const struct macsec_ops *ops; 2376 struct macsec_context ctx; 2377 2378 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2379 if (!ops) { 2380 ret = -EOPNOTSUPP; 2381 goto cleanup; 2382 } 2383 2384 ctx.sa.assoc_num = assoc_num; 2385 ctx.sa.tx_sa = tx_sa; 2386 ctx.secy = secy; 2387 2388 ret = macsec_offload(ops->mdo_upd_txsa, &ctx); 2389 if (ret) 2390 goto cleanup; 2391 } 2392 2393 rtnl_unlock(); 2394 2395 return 0; 2396 2397 cleanup: 2398 if (tb_sa[MACSEC_SA_ATTR_PN]) { 2399 spin_lock_bh(&tx_sa->lock); 2400 tx_sa->next_pn_halves = prev_pn; 2401 spin_unlock_bh(&tx_sa->lock); 2402 } 2403 tx_sa->active = was_active; 2404 secy->operational = was_operational; 2405 rtnl_unlock(); 2406 return ret; 2407 } 2408 2409 static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info) 2410 { 2411 struct nlattr **attrs = info->attrs; 2412 struct net_device *dev; 2413 struct macsec_secy *secy; 2414 struct macsec_rx_sc *rx_sc; 2415 struct macsec_rx_sa *rx_sa; 2416 u8 assoc_num; 2417 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 2418 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 2419 bool was_active; 2420 pn_t prev_pn; 2421 int ret = 0; 2422 2423 prev_pn.full64 = 0; 2424 2425 if (!attrs[MACSEC_ATTR_IFINDEX]) 2426 return -EINVAL; 2427 2428 if (parse_rxsc_config(attrs, tb_rxsc)) 2429 return -EINVAL; 2430 2431 if (parse_sa_config(attrs, tb_sa)) 2432 return -EINVAL; 2433 2434 if (!validate_upd_sa(tb_sa)) 2435 return -EINVAL; 2436 2437 rtnl_lock(); 2438 rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa, 2439 &dev, &secy, &rx_sc, &assoc_num); 2440 if (IS_ERR(rx_sa)) { 2441 rtnl_unlock(); 2442 return PTR_ERR(rx_sa); 2443 } 2444 2445 if (tb_sa[MACSEC_SA_ATTR_PN]) { 2446 int pn_len; 2447 2448 pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN; 2449 if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) { 2450 pr_notice("macsec: nl: upd_rxsa: bad pn length: %d != %d\n", 2451 nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len); 2452 rtnl_unlock(); 2453 return -EINVAL; 2454 } 2455 2456 spin_lock_bh(&rx_sa->lock); 2457 prev_pn = rx_sa->next_pn_halves; 2458 rx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]); 2459 spin_unlock_bh(&rx_sa->lock); 2460 } 2461 2462 was_active = rx_sa->active; 2463 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 2464 rx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 2465 2466 /* If h/w offloading is available, propagate to the device */ 2467 if (macsec_is_offloaded(netdev_priv(dev))) { 2468 const struct macsec_ops *ops; 2469 struct macsec_context ctx; 2470 2471 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2472 if (!ops) { 2473 ret = -EOPNOTSUPP; 2474 goto cleanup; 2475 } 2476 2477 ctx.sa.assoc_num = assoc_num; 2478 ctx.sa.rx_sa = rx_sa; 2479 ctx.secy = secy; 2480 2481 ret = macsec_offload(ops->mdo_upd_rxsa, &ctx); 2482 if (ret) 2483 goto cleanup; 2484 } 2485 2486 rtnl_unlock(); 2487 return 0; 2488 2489 cleanup: 2490 if (tb_sa[MACSEC_SA_ATTR_PN]) { 2491 spin_lock_bh(&rx_sa->lock); 2492 rx_sa->next_pn_halves = prev_pn; 2493 spin_unlock_bh(&rx_sa->lock); 2494 } 2495 rx_sa->active = was_active; 2496 rtnl_unlock(); 2497 return ret; 2498 } 2499 2500 static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info) 2501 { 2502 struct nlattr **attrs = info->attrs; 2503 struct net_device *dev; 2504 struct macsec_secy *secy; 2505 struct macsec_rx_sc *rx_sc; 2506 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 2507 unsigned int prev_n_rx_sc; 2508 bool was_active; 2509 int ret; 2510 2511 if (!attrs[MACSEC_ATTR_IFINDEX]) 2512 return -EINVAL; 2513 2514 if (parse_rxsc_config(attrs, tb_rxsc)) 2515 return -EINVAL; 2516 2517 if (!validate_add_rxsc(tb_rxsc)) 2518 return -EINVAL; 2519 2520 rtnl_lock(); 2521 rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy); 2522 if (IS_ERR(rx_sc)) { 2523 rtnl_unlock(); 2524 return PTR_ERR(rx_sc); 2525 } 2526 2527 was_active = rx_sc->active; 2528 prev_n_rx_sc = secy->n_rx_sc; 2529 if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) { 2530 bool new = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]); 2531 2532 if (rx_sc->active != new) 2533 secy->n_rx_sc += new ? 1 : -1; 2534 2535 rx_sc->active = new; 2536 } 2537 2538 /* If h/w offloading is available, propagate to the device */ 2539 if (macsec_is_offloaded(netdev_priv(dev))) { 2540 const struct macsec_ops *ops; 2541 struct macsec_context ctx; 2542 2543 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2544 if (!ops) { 2545 ret = -EOPNOTSUPP; 2546 goto cleanup; 2547 } 2548 2549 ctx.rx_sc = rx_sc; 2550 ctx.secy = secy; 2551 2552 ret = macsec_offload(ops->mdo_upd_rxsc, &ctx); 2553 if (ret) 2554 goto cleanup; 2555 } 2556 2557 rtnl_unlock(); 2558 2559 return 0; 2560 2561 cleanup: 2562 secy->n_rx_sc = prev_n_rx_sc; 2563 rx_sc->active = was_active; 2564 rtnl_unlock(); 2565 return ret; 2566 } 2567 2568 static bool macsec_is_configured(struct macsec_dev *macsec) 2569 { 2570 struct macsec_secy *secy = &macsec->secy; 2571 struct macsec_tx_sc *tx_sc = &secy->tx_sc; 2572 int i; 2573 2574 if (secy->rx_sc) 2575 return true; 2576 2577 for (i = 0; i < MACSEC_NUM_AN; i++) 2578 if (tx_sc->sa[i]) 2579 return true; 2580 2581 return false; 2582 } 2583 2584 static int macsec_update_offload(struct net_device *dev, enum macsec_offload offload) 2585 { 2586 enum macsec_offload prev_offload; 2587 const struct macsec_ops *ops; 2588 struct macsec_context ctx; 2589 struct macsec_dev *macsec; 2590 int ret = 0; 2591 2592 macsec = macsec_priv(dev); 2593 2594 /* Check if the offloading mode is supported by the underlying layers */ 2595 if (offload != MACSEC_OFFLOAD_OFF && 2596 !macsec_check_offload(offload, macsec)) 2597 return -EOPNOTSUPP; 2598 2599 /* Check if the net device is busy. */ 2600 if (netif_running(dev)) 2601 return -EBUSY; 2602 2603 /* Check if the device already has rules configured: we do not support 2604 * rules migration. 2605 */ 2606 if (macsec_is_configured(macsec)) 2607 return -EBUSY; 2608 2609 prev_offload = macsec->offload; 2610 2611 ops = __macsec_get_ops(offload == MACSEC_OFFLOAD_OFF ? prev_offload : offload, 2612 macsec, &ctx); 2613 if (!ops) 2614 return -EOPNOTSUPP; 2615 2616 macsec->offload = offload; 2617 2618 ctx.secy = &macsec->secy; 2619 ret = offload == MACSEC_OFFLOAD_OFF ? macsec_offload(ops->mdo_del_secy, &ctx) 2620 : macsec_offload(ops->mdo_add_secy, &ctx); 2621 if (ret) 2622 macsec->offload = prev_offload; 2623 2624 return ret; 2625 } 2626 2627 static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info) 2628 { 2629 struct nlattr *tb_offload[MACSEC_OFFLOAD_ATTR_MAX + 1]; 2630 struct nlattr **attrs = info->attrs; 2631 enum macsec_offload offload; 2632 struct macsec_dev *macsec; 2633 struct net_device *dev; 2634 int ret = 0; 2635 2636 if (!attrs[MACSEC_ATTR_IFINDEX]) 2637 return -EINVAL; 2638 2639 if (!attrs[MACSEC_ATTR_OFFLOAD]) 2640 return -EINVAL; 2641 2642 if (nla_parse_nested_deprecated(tb_offload, MACSEC_OFFLOAD_ATTR_MAX, 2643 attrs[MACSEC_ATTR_OFFLOAD], 2644 macsec_genl_offload_policy, NULL)) 2645 return -EINVAL; 2646 2647 rtnl_lock(); 2648 2649 dev = get_dev_from_nl(genl_info_net(info), attrs); 2650 if (IS_ERR(dev)) { 2651 ret = PTR_ERR(dev); 2652 goto out; 2653 } 2654 macsec = macsec_priv(dev); 2655 2656 if (!tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]) { 2657 ret = -EINVAL; 2658 goto out; 2659 } 2660 2661 offload = nla_get_u8(tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]); 2662 2663 if (macsec->offload != offload) 2664 ret = macsec_update_offload(dev, offload); 2665 out: 2666 rtnl_unlock(); 2667 return ret; 2668 } 2669 2670 static void get_tx_sa_stats(struct net_device *dev, int an, 2671 struct macsec_tx_sa *tx_sa, 2672 struct macsec_tx_sa_stats *sum) 2673 { 2674 struct macsec_dev *macsec = macsec_priv(dev); 2675 int cpu; 2676 2677 /* If h/w offloading is available, propagate to the device */ 2678 if (macsec_is_offloaded(macsec)) { 2679 const struct macsec_ops *ops; 2680 struct macsec_context ctx; 2681 2682 ops = macsec_get_ops(macsec, &ctx); 2683 if (ops) { 2684 ctx.sa.assoc_num = an; 2685 ctx.sa.tx_sa = tx_sa; 2686 ctx.stats.tx_sa_stats = sum; 2687 ctx.secy = &macsec_priv(dev)->secy; 2688 macsec_offload(ops->mdo_get_tx_sa_stats, &ctx); 2689 } 2690 return; 2691 } 2692 2693 for_each_possible_cpu(cpu) { 2694 const struct macsec_tx_sa_stats *stats = 2695 per_cpu_ptr(tx_sa->stats, cpu); 2696 2697 sum->OutPktsProtected += stats->OutPktsProtected; 2698 sum->OutPktsEncrypted += stats->OutPktsEncrypted; 2699 } 2700 } 2701 2702 static int copy_tx_sa_stats(struct sk_buff *skb, struct macsec_tx_sa_stats *sum) 2703 { 2704 if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED, 2705 sum->OutPktsProtected) || 2706 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED, 2707 sum->OutPktsEncrypted)) 2708 return -EMSGSIZE; 2709 2710 return 0; 2711 } 2712 2713 static void get_rx_sa_stats(struct net_device *dev, 2714 struct macsec_rx_sc *rx_sc, int an, 2715 struct macsec_rx_sa *rx_sa, 2716 struct macsec_rx_sa_stats *sum) 2717 { 2718 struct macsec_dev *macsec = macsec_priv(dev); 2719 int cpu; 2720 2721 /* If h/w offloading is available, propagate to the device */ 2722 if (macsec_is_offloaded(macsec)) { 2723 const struct macsec_ops *ops; 2724 struct macsec_context ctx; 2725 2726 ops = macsec_get_ops(macsec, &ctx); 2727 if (ops) { 2728 ctx.sa.assoc_num = an; 2729 ctx.sa.rx_sa = rx_sa; 2730 ctx.stats.rx_sa_stats = sum; 2731 ctx.secy = &macsec_priv(dev)->secy; 2732 ctx.rx_sc = rx_sc; 2733 macsec_offload(ops->mdo_get_rx_sa_stats, &ctx); 2734 } 2735 return; 2736 } 2737 2738 for_each_possible_cpu(cpu) { 2739 const struct macsec_rx_sa_stats *stats = 2740 per_cpu_ptr(rx_sa->stats, cpu); 2741 2742 sum->InPktsOK += stats->InPktsOK; 2743 sum->InPktsInvalid += stats->InPktsInvalid; 2744 sum->InPktsNotValid += stats->InPktsNotValid; 2745 sum->InPktsNotUsingSA += stats->InPktsNotUsingSA; 2746 sum->InPktsUnusedSA += stats->InPktsUnusedSA; 2747 } 2748 } 2749 2750 static int copy_rx_sa_stats(struct sk_buff *skb, 2751 struct macsec_rx_sa_stats *sum) 2752 { 2753 if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_OK, sum->InPktsOK) || 2754 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID, 2755 sum->InPktsInvalid) || 2756 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID, 2757 sum->InPktsNotValid) || 2758 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA, 2759 sum->InPktsNotUsingSA) || 2760 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA, 2761 sum->InPktsUnusedSA)) 2762 return -EMSGSIZE; 2763 2764 return 0; 2765 } 2766 2767 static void get_rx_sc_stats(struct net_device *dev, 2768 struct macsec_rx_sc *rx_sc, 2769 struct macsec_rx_sc_stats *sum) 2770 { 2771 struct macsec_dev *macsec = macsec_priv(dev); 2772 int cpu; 2773 2774 /* If h/w offloading is available, propagate to the device */ 2775 if (macsec_is_offloaded(macsec)) { 2776 const struct macsec_ops *ops; 2777 struct macsec_context ctx; 2778 2779 ops = macsec_get_ops(macsec, &ctx); 2780 if (ops) { 2781 ctx.stats.rx_sc_stats = sum; 2782 ctx.secy = &macsec_priv(dev)->secy; 2783 ctx.rx_sc = rx_sc; 2784 macsec_offload(ops->mdo_get_rx_sc_stats, &ctx); 2785 } 2786 return; 2787 } 2788 2789 for_each_possible_cpu(cpu) { 2790 const struct pcpu_rx_sc_stats *stats; 2791 struct macsec_rx_sc_stats tmp; 2792 unsigned int start; 2793 2794 stats = per_cpu_ptr(rx_sc->stats, cpu); 2795 do { 2796 start = u64_stats_fetch_begin(&stats->syncp); 2797 memcpy(&tmp, &stats->stats, sizeof(tmp)); 2798 } while (u64_stats_fetch_retry(&stats->syncp, start)); 2799 2800 sum->InOctetsValidated += tmp.InOctetsValidated; 2801 sum->InOctetsDecrypted += tmp.InOctetsDecrypted; 2802 sum->InPktsUnchecked += tmp.InPktsUnchecked; 2803 sum->InPktsDelayed += tmp.InPktsDelayed; 2804 sum->InPktsOK += tmp.InPktsOK; 2805 sum->InPktsInvalid += tmp.InPktsInvalid; 2806 sum->InPktsLate += tmp.InPktsLate; 2807 sum->InPktsNotValid += tmp.InPktsNotValid; 2808 sum->InPktsNotUsingSA += tmp.InPktsNotUsingSA; 2809 sum->InPktsUnusedSA += tmp.InPktsUnusedSA; 2810 } 2811 } 2812 2813 static int copy_rx_sc_stats(struct sk_buff *skb, struct macsec_rx_sc_stats *sum) 2814 { 2815 if (nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED, 2816 sum->InOctetsValidated, 2817 MACSEC_RXSC_STATS_ATTR_PAD) || 2818 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED, 2819 sum->InOctetsDecrypted, 2820 MACSEC_RXSC_STATS_ATTR_PAD) || 2821 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED, 2822 sum->InPktsUnchecked, 2823 MACSEC_RXSC_STATS_ATTR_PAD) || 2824 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED, 2825 sum->InPktsDelayed, 2826 MACSEC_RXSC_STATS_ATTR_PAD) || 2827 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK, 2828 sum->InPktsOK, 2829 MACSEC_RXSC_STATS_ATTR_PAD) || 2830 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID, 2831 sum->InPktsInvalid, 2832 MACSEC_RXSC_STATS_ATTR_PAD) || 2833 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE, 2834 sum->InPktsLate, 2835 MACSEC_RXSC_STATS_ATTR_PAD) || 2836 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID, 2837 sum->InPktsNotValid, 2838 MACSEC_RXSC_STATS_ATTR_PAD) || 2839 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA, 2840 sum->InPktsNotUsingSA, 2841 MACSEC_RXSC_STATS_ATTR_PAD) || 2842 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA, 2843 sum->InPktsUnusedSA, 2844 MACSEC_RXSC_STATS_ATTR_PAD)) 2845 return -EMSGSIZE; 2846 2847 return 0; 2848 } 2849 2850 static void get_tx_sc_stats(struct net_device *dev, 2851 struct macsec_tx_sc_stats *sum) 2852 { 2853 struct macsec_dev *macsec = macsec_priv(dev); 2854 int cpu; 2855 2856 /* If h/w offloading is available, propagate to the device */ 2857 if (macsec_is_offloaded(macsec)) { 2858 const struct macsec_ops *ops; 2859 struct macsec_context ctx; 2860 2861 ops = macsec_get_ops(macsec, &ctx); 2862 if (ops) { 2863 ctx.stats.tx_sc_stats = sum; 2864 ctx.secy = &macsec_priv(dev)->secy; 2865 macsec_offload(ops->mdo_get_tx_sc_stats, &ctx); 2866 } 2867 return; 2868 } 2869 2870 for_each_possible_cpu(cpu) { 2871 const struct pcpu_tx_sc_stats *stats; 2872 struct macsec_tx_sc_stats tmp; 2873 unsigned int start; 2874 2875 stats = per_cpu_ptr(macsec_priv(dev)->secy.tx_sc.stats, cpu); 2876 do { 2877 start = u64_stats_fetch_begin(&stats->syncp); 2878 memcpy(&tmp, &stats->stats, sizeof(tmp)); 2879 } while (u64_stats_fetch_retry(&stats->syncp, start)); 2880 2881 sum->OutPktsProtected += tmp.OutPktsProtected; 2882 sum->OutPktsEncrypted += tmp.OutPktsEncrypted; 2883 sum->OutOctetsProtected += tmp.OutOctetsProtected; 2884 sum->OutOctetsEncrypted += tmp.OutOctetsEncrypted; 2885 } 2886 } 2887 2888 static int copy_tx_sc_stats(struct sk_buff *skb, struct macsec_tx_sc_stats *sum) 2889 { 2890 if (nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED, 2891 sum->OutPktsProtected, 2892 MACSEC_TXSC_STATS_ATTR_PAD) || 2893 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED, 2894 sum->OutPktsEncrypted, 2895 MACSEC_TXSC_STATS_ATTR_PAD) || 2896 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED, 2897 sum->OutOctetsProtected, 2898 MACSEC_TXSC_STATS_ATTR_PAD) || 2899 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED, 2900 sum->OutOctetsEncrypted, 2901 MACSEC_TXSC_STATS_ATTR_PAD)) 2902 return -EMSGSIZE; 2903 2904 return 0; 2905 } 2906 2907 static void get_secy_stats(struct net_device *dev, struct macsec_dev_stats *sum) 2908 { 2909 struct macsec_dev *macsec = macsec_priv(dev); 2910 int cpu; 2911 2912 /* If h/w offloading is available, propagate to the device */ 2913 if (macsec_is_offloaded(macsec)) { 2914 const struct macsec_ops *ops; 2915 struct macsec_context ctx; 2916 2917 ops = macsec_get_ops(macsec, &ctx); 2918 if (ops) { 2919 ctx.stats.dev_stats = sum; 2920 ctx.secy = &macsec_priv(dev)->secy; 2921 macsec_offload(ops->mdo_get_dev_stats, &ctx); 2922 } 2923 return; 2924 } 2925 2926 for_each_possible_cpu(cpu) { 2927 const struct pcpu_secy_stats *stats; 2928 struct macsec_dev_stats tmp; 2929 unsigned int start; 2930 2931 stats = per_cpu_ptr(macsec_priv(dev)->stats, cpu); 2932 do { 2933 start = u64_stats_fetch_begin(&stats->syncp); 2934 memcpy(&tmp, &stats->stats, sizeof(tmp)); 2935 } while (u64_stats_fetch_retry(&stats->syncp, start)); 2936 2937 sum->OutPktsUntagged += tmp.OutPktsUntagged; 2938 sum->InPktsUntagged += tmp.InPktsUntagged; 2939 sum->OutPktsTooLong += tmp.OutPktsTooLong; 2940 sum->InPktsNoTag += tmp.InPktsNoTag; 2941 sum->InPktsBadTag += tmp.InPktsBadTag; 2942 sum->InPktsUnknownSCI += tmp.InPktsUnknownSCI; 2943 sum->InPktsNoSCI += tmp.InPktsNoSCI; 2944 sum->InPktsOverrun += tmp.InPktsOverrun; 2945 } 2946 } 2947 2948 static int copy_secy_stats(struct sk_buff *skb, struct macsec_dev_stats *sum) 2949 { 2950 if (nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED, 2951 sum->OutPktsUntagged, 2952 MACSEC_SECY_STATS_ATTR_PAD) || 2953 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED, 2954 sum->InPktsUntagged, 2955 MACSEC_SECY_STATS_ATTR_PAD) || 2956 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG, 2957 sum->OutPktsTooLong, 2958 MACSEC_SECY_STATS_ATTR_PAD) || 2959 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG, 2960 sum->InPktsNoTag, 2961 MACSEC_SECY_STATS_ATTR_PAD) || 2962 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG, 2963 sum->InPktsBadTag, 2964 MACSEC_SECY_STATS_ATTR_PAD) || 2965 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI, 2966 sum->InPktsUnknownSCI, 2967 MACSEC_SECY_STATS_ATTR_PAD) || 2968 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI, 2969 sum->InPktsNoSCI, 2970 MACSEC_SECY_STATS_ATTR_PAD) || 2971 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN, 2972 sum->InPktsOverrun, 2973 MACSEC_SECY_STATS_ATTR_PAD)) 2974 return -EMSGSIZE; 2975 2976 return 0; 2977 } 2978 2979 static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb) 2980 { 2981 struct macsec_tx_sc *tx_sc = &secy->tx_sc; 2982 struct nlattr *secy_nest = nla_nest_start_noflag(skb, 2983 MACSEC_ATTR_SECY); 2984 u64 csid; 2985 2986 if (!secy_nest) 2987 return 1; 2988 2989 switch (secy->key_len) { 2990 case MACSEC_GCM_AES_128_SAK_LEN: 2991 csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 : MACSEC_DEFAULT_CIPHER_ID; 2992 break; 2993 case MACSEC_GCM_AES_256_SAK_LEN: 2994 csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_256 : MACSEC_CIPHER_ID_GCM_AES_256; 2995 break; 2996 default: 2997 goto cancel; 2998 } 2999 3000 if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci, 3001 MACSEC_SECY_ATTR_PAD) || 3002 nla_put_u64_64bit(skb, MACSEC_SECY_ATTR_CIPHER_SUITE, 3003 csid, MACSEC_SECY_ATTR_PAD) || 3004 nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) || 3005 nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) || 3006 nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) || 3007 nla_put_u8(skb, MACSEC_SECY_ATTR_REPLAY, secy->replay_protect) || 3008 nla_put_u8(skb, MACSEC_SECY_ATTR_VALIDATE, secy->validate_frames) || 3009 nla_put_u8(skb, MACSEC_SECY_ATTR_ENCRYPT, tx_sc->encrypt) || 3010 nla_put_u8(skb, MACSEC_SECY_ATTR_INC_SCI, tx_sc->send_sci) || 3011 nla_put_u8(skb, MACSEC_SECY_ATTR_ES, tx_sc->end_station) || 3012 nla_put_u8(skb, MACSEC_SECY_ATTR_SCB, tx_sc->scb) || 3013 nla_put_u8(skb, MACSEC_SECY_ATTR_ENCODING_SA, tx_sc->encoding_sa)) 3014 goto cancel; 3015 3016 if (secy->replay_protect) { 3017 if (nla_put_u32(skb, MACSEC_SECY_ATTR_WINDOW, secy->replay_window)) 3018 goto cancel; 3019 } 3020 3021 nla_nest_end(skb, secy_nest); 3022 return 0; 3023 3024 cancel: 3025 nla_nest_cancel(skb, secy_nest); 3026 return 1; 3027 } 3028 3029 static noinline_for_stack int 3030 dump_secy(struct macsec_secy *secy, struct net_device *dev, 3031 struct sk_buff *skb, struct netlink_callback *cb) 3032 { 3033 struct macsec_tx_sc_stats tx_sc_stats = {0, }; 3034 struct macsec_tx_sa_stats tx_sa_stats = {0, }; 3035 struct macsec_rx_sc_stats rx_sc_stats = {0, }; 3036 struct macsec_rx_sa_stats rx_sa_stats = {0, }; 3037 struct macsec_dev *macsec = netdev_priv(dev); 3038 struct macsec_dev_stats dev_stats = {0, }; 3039 struct macsec_tx_sc *tx_sc = &secy->tx_sc; 3040 struct nlattr *txsa_list, *rxsc_list; 3041 struct macsec_rx_sc *rx_sc; 3042 struct nlattr *attr; 3043 void *hdr; 3044 int i, j; 3045 3046 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 3047 &macsec_fam, NLM_F_MULTI, MACSEC_CMD_GET_TXSC); 3048 if (!hdr) 3049 return -EMSGSIZE; 3050 3051 genl_dump_check_consistent(cb, hdr); 3052 3053 if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex)) 3054 goto nla_put_failure; 3055 3056 attr = nla_nest_start_noflag(skb, MACSEC_ATTR_OFFLOAD); 3057 if (!attr) 3058 goto nla_put_failure; 3059 if (nla_put_u8(skb, MACSEC_OFFLOAD_ATTR_TYPE, macsec->offload)) 3060 goto nla_put_failure; 3061 nla_nest_end(skb, attr); 3062 3063 if (nla_put_secy(secy, skb)) 3064 goto nla_put_failure; 3065 3066 attr = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSC_STATS); 3067 if (!attr) 3068 goto nla_put_failure; 3069 3070 get_tx_sc_stats(dev, &tx_sc_stats); 3071 if (copy_tx_sc_stats(skb, &tx_sc_stats)) { 3072 nla_nest_cancel(skb, attr); 3073 goto nla_put_failure; 3074 } 3075 nla_nest_end(skb, attr); 3076 3077 attr = nla_nest_start_noflag(skb, MACSEC_ATTR_SECY_STATS); 3078 if (!attr) 3079 goto nla_put_failure; 3080 get_secy_stats(dev, &dev_stats); 3081 if (copy_secy_stats(skb, &dev_stats)) { 3082 nla_nest_cancel(skb, attr); 3083 goto nla_put_failure; 3084 } 3085 nla_nest_end(skb, attr); 3086 3087 txsa_list = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSA_LIST); 3088 if (!txsa_list) 3089 goto nla_put_failure; 3090 for (i = 0, j = 1; i < MACSEC_NUM_AN; i++) { 3091 struct macsec_tx_sa *tx_sa = rtnl_dereference(tx_sc->sa[i]); 3092 struct nlattr *txsa_nest; 3093 u64 pn; 3094 int pn_len; 3095 3096 if (!tx_sa) 3097 continue; 3098 3099 txsa_nest = nla_nest_start_noflag(skb, j++); 3100 if (!txsa_nest) { 3101 nla_nest_cancel(skb, txsa_list); 3102 goto nla_put_failure; 3103 } 3104 3105 attr = nla_nest_start_noflag(skb, MACSEC_SA_ATTR_STATS); 3106 if (!attr) { 3107 nla_nest_cancel(skb, txsa_nest); 3108 nla_nest_cancel(skb, txsa_list); 3109 goto nla_put_failure; 3110 } 3111 memset(&tx_sa_stats, 0, sizeof(tx_sa_stats)); 3112 get_tx_sa_stats(dev, i, tx_sa, &tx_sa_stats); 3113 if (copy_tx_sa_stats(skb, &tx_sa_stats)) { 3114 nla_nest_cancel(skb, attr); 3115 nla_nest_cancel(skb, txsa_nest); 3116 nla_nest_cancel(skb, txsa_list); 3117 goto nla_put_failure; 3118 } 3119 nla_nest_end(skb, attr); 3120 3121 if (secy->xpn) { 3122 pn = tx_sa->next_pn; 3123 pn_len = MACSEC_XPN_PN_LEN; 3124 } else { 3125 pn = tx_sa->next_pn_halves.lower; 3126 pn_len = MACSEC_DEFAULT_PN_LEN; 3127 } 3128 3129 if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) || 3130 nla_put(skb, MACSEC_SA_ATTR_PN, pn_len, &pn) || 3131 nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, tx_sa->key.id) || 3132 (secy->xpn && nla_put_ssci(skb, MACSEC_SA_ATTR_SSCI, tx_sa->ssci)) || 3133 nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, tx_sa->active)) { 3134 nla_nest_cancel(skb, txsa_nest); 3135 nla_nest_cancel(skb, txsa_list); 3136 goto nla_put_failure; 3137 } 3138 3139 nla_nest_end(skb, txsa_nest); 3140 } 3141 nla_nest_end(skb, txsa_list); 3142 3143 rxsc_list = nla_nest_start_noflag(skb, MACSEC_ATTR_RXSC_LIST); 3144 if (!rxsc_list) 3145 goto nla_put_failure; 3146 3147 j = 1; 3148 for_each_rxsc_rtnl(secy, rx_sc) { 3149 int k; 3150 struct nlattr *rxsa_list; 3151 struct nlattr *rxsc_nest = nla_nest_start_noflag(skb, j++); 3152 3153 if (!rxsc_nest) { 3154 nla_nest_cancel(skb, rxsc_list); 3155 goto nla_put_failure; 3156 } 3157 3158 if (nla_put_u8(skb, MACSEC_RXSC_ATTR_ACTIVE, rx_sc->active) || 3159 nla_put_sci(skb, MACSEC_RXSC_ATTR_SCI, rx_sc->sci, 3160 MACSEC_RXSC_ATTR_PAD)) { 3161 nla_nest_cancel(skb, rxsc_nest); 3162 nla_nest_cancel(skb, rxsc_list); 3163 goto nla_put_failure; 3164 } 3165 3166 attr = nla_nest_start_noflag(skb, MACSEC_RXSC_ATTR_STATS); 3167 if (!attr) { 3168 nla_nest_cancel(skb, rxsc_nest); 3169 nla_nest_cancel(skb, rxsc_list); 3170 goto nla_put_failure; 3171 } 3172 memset(&rx_sc_stats, 0, sizeof(rx_sc_stats)); 3173 get_rx_sc_stats(dev, rx_sc, &rx_sc_stats); 3174 if (copy_rx_sc_stats(skb, &rx_sc_stats)) { 3175 nla_nest_cancel(skb, attr); 3176 nla_nest_cancel(skb, rxsc_nest); 3177 nla_nest_cancel(skb, rxsc_list); 3178 goto nla_put_failure; 3179 } 3180 nla_nest_end(skb, attr); 3181 3182 rxsa_list = nla_nest_start_noflag(skb, 3183 MACSEC_RXSC_ATTR_SA_LIST); 3184 if (!rxsa_list) { 3185 nla_nest_cancel(skb, rxsc_nest); 3186 nla_nest_cancel(skb, rxsc_list); 3187 goto nla_put_failure; 3188 } 3189 3190 for (i = 0, k = 1; i < MACSEC_NUM_AN; i++) { 3191 struct macsec_rx_sa *rx_sa = rtnl_dereference(rx_sc->sa[i]); 3192 struct nlattr *rxsa_nest; 3193 u64 pn; 3194 int pn_len; 3195 3196 if (!rx_sa) 3197 continue; 3198 3199 rxsa_nest = nla_nest_start_noflag(skb, k++); 3200 if (!rxsa_nest) { 3201 nla_nest_cancel(skb, rxsa_list); 3202 nla_nest_cancel(skb, rxsc_nest); 3203 nla_nest_cancel(skb, rxsc_list); 3204 goto nla_put_failure; 3205 } 3206 3207 attr = nla_nest_start_noflag(skb, 3208 MACSEC_SA_ATTR_STATS); 3209 if (!attr) { 3210 nla_nest_cancel(skb, rxsa_list); 3211 nla_nest_cancel(skb, rxsc_nest); 3212 nla_nest_cancel(skb, rxsc_list); 3213 goto nla_put_failure; 3214 } 3215 memset(&rx_sa_stats, 0, sizeof(rx_sa_stats)); 3216 get_rx_sa_stats(dev, rx_sc, i, rx_sa, &rx_sa_stats); 3217 if (copy_rx_sa_stats(skb, &rx_sa_stats)) { 3218 nla_nest_cancel(skb, attr); 3219 nla_nest_cancel(skb, rxsa_list); 3220 nla_nest_cancel(skb, rxsc_nest); 3221 nla_nest_cancel(skb, rxsc_list); 3222 goto nla_put_failure; 3223 } 3224 nla_nest_end(skb, attr); 3225 3226 if (secy->xpn) { 3227 pn = rx_sa->next_pn; 3228 pn_len = MACSEC_XPN_PN_LEN; 3229 } else { 3230 pn = rx_sa->next_pn_halves.lower; 3231 pn_len = MACSEC_DEFAULT_PN_LEN; 3232 } 3233 3234 if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) || 3235 nla_put(skb, MACSEC_SA_ATTR_PN, pn_len, &pn) || 3236 nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, rx_sa->key.id) || 3237 (secy->xpn && nla_put_ssci(skb, MACSEC_SA_ATTR_SSCI, rx_sa->ssci)) || 3238 nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, rx_sa->active)) { 3239 nla_nest_cancel(skb, rxsa_nest); 3240 nla_nest_cancel(skb, rxsc_nest); 3241 nla_nest_cancel(skb, rxsc_list); 3242 goto nla_put_failure; 3243 } 3244 nla_nest_end(skb, rxsa_nest); 3245 } 3246 3247 nla_nest_end(skb, rxsa_list); 3248 nla_nest_end(skb, rxsc_nest); 3249 } 3250 3251 nla_nest_end(skb, rxsc_list); 3252 3253 genlmsg_end(skb, hdr); 3254 3255 return 0; 3256 3257 nla_put_failure: 3258 genlmsg_cancel(skb, hdr); 3259 return -EMSGSIZE; 3260 } 3261 3262 static int macsec_generation = 1; /* protected by RTNL */ 3263 3264 static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb) 3265 { 3266 struct net *net = sock_net(skb->sk); 3267 struct net_device *dev; 3268 int dev_idx, d; 3269 3270 dev_idx = cb->args[0]; 3271 3272 d = 0; 3273 rtnl_lock(); 3274 3275 cb->seq = macsec_generation; 3276 3277 for_each_netdev(net, dev) { 3278 struct macsec_secy *secy; 3279 3280 if (d < dev_idx) 3281 goto next; 3282 3283 if (!netif_is_macsec(dev)) 3284 goto next; 3285 3286 secy = &macsec_priv(dev)->secy; 3287 if (dump_secy(secy, dev, skb, cb) < 0) 3288 goto done; 3289 next: 3290 d++; 3291 } 3292 3293 done: 3294 rtnl_unlock(); 3295 cb->args[0] = d; 3296 return skb->len; 3297 } 3298 3299 static const struct genl_small_ops macsec_genl_ops[] = { 3300 { 3301 .cmd = MACSEC_CMD_GET_TXSC, 3302 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3303 .dumpit = macsec_dump_txsc, 3304 }, 3305 { 3306 .cmd = MACSEC_CMD_ADD_RXSC, 3307 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3308 .doit = macsec_add_rxsc, 3309 .flags = GENL_ADMIN_PERM, 3310 }, 3311 { 3312 .cmd = MACSEC_CMD_DEL_RXSC, 3313 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3314 .doit = macsec_del_rxsc, 3315 .flags = GENL_ADMIN_PERM, 3316 }, 3317 { 3318 .cmd = MACSEC_CMD_UPD_RXSC, 3319 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3320 .doit = macsec_upd_rxsc, 3321 .flags = GENL_ADMIN_PERM, 3322 }, 3323 { 3324 .cmd = MACSEC_CMD_ADD_TXSA, 3325 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3326 .doit = macsec_add_txsa, 3327 .flags = GENL_ADMIN_PERM, 3328 }, 3329 { 3330 .cmd = MACSEC_CMD_DEL_TXSA, 3331 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3332 .doit = macsec_del_txsa, 3333 .flags = GENL_ADMIN_PERM, 3334 }, 3335 { 3336 .cmd = MACSEC_CMD_UPD_TXSA, 3337 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3338 .doit = macsec_upd_txsa, 3339 .flags = GENL_ADMIN_PERM, 3340 }, 3341 { 3342 .cmd = MACSEC_CMD_ADD_RXSA, 3343 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3344 .doit = macsec_add_rxsa, 3345 .flags = GENL_ADMIN_PERM, 3346 }, 3347 { 3348 .cmd = MACSEC_CMD_DEL_RXSA, 3349 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3350 .doit = macsec_del_rxsa, 3351 .flags = GENL_ADMIN_PERM, 3352 }, 3353 { 3354 .cmd = MACSEC_CMD_UPD_RXSA, 3355 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3356 .doit = macsec_upd_rxsa, 3357 .flags = GENL_ADMIN_PERM, 3358 }, 3359 { 3360 .cmd = MACSEC_CMD_UPD_OFFLOAD, 3361 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3362 .doit = macsec_upd_offload, 3363 .flags = GENL_ADMIN_PERM, 3364 }, 3365 }; 3366 3367 static struct genl_family macsec_fam __ro_after_init = { 3368 .name = MACSEC_GENL_NAME, 3369 .hdrsize = 0, 3370 .version = MACSEC_GENL_VERSION, 3371 .maxattr = MACSEC_ATTR_MAX, 3372 .policy = macsec_genl_policy, 3373 .netnsok = true, 3374 .module = THIS_MODULE, 3375 .small_ops = macsec_genl_ops, 3376 .n_small_ops = ARRAY_SIZE(macsec_genl_ops), 3377 .resv_start_op = MACSEC_CMD_UPD_OFFLOAD + 1, 3378 }; 3379 3380 static netdev_tx_t macsec_start_xmit(struct sk_buff *skb, 3381 struct net_device *dev) 3382 { 3383 struct macsec_dev *macsec = netdev_priv(dev); 3384 struct macsec_secy *secy = &macsec->secy; 3385 struct pcpu_secy_stats *secy_stats; 3386 int ret, len; 3387 3388 if (macsec_is_offloaded(netdev_priv(dev))) { 3389 struct metadata_dst *md_dst = secy->tx_sc.md_dst; 3390 3391 skb_dst_drop(skb); 3392 dst_hold(&md_dst->dst); 3393 skb_dst_set(skb, &md_dst->dst); 3394 skb->dev = macsec->real_dev; 3395 return dev_queue_xmit(skb); 3396 } 3397 3398 /* 10.5 */ 3399 if (!secy->protect_frames) { 3400 secy_stats = this_cpu_ptr(macsec->stats); 3401 u64_stats_update_begin(&secy_stats->syncp); 3402 secy_stats->stats.OutPktsUntagged++; 3403 u64_stats_update_end(&secy_stats->syncp); 3404 skb->dev = macsec->real_dev; 3405 len = skb->len; 3406 ret = dev_queue_xmit(skb); 3407 count_tx(dev, ret, len); 3408 return ret; 3409 } 3410 3411 if (!secy->operational) { 3412 kfree_skb(skb); 3413 DEV_STATS_INC(dev, tx_dropped); 3414 return NETDEV_TX_OK; 3415 } 3416 3417 len = skb->len; 3418 skb = macsec_encrypt(skb, dev); 3419 if (IS_ERR(skb)) { 3420 if (PTR_ERR(skb) != -EINPROGRESS) 3421 DEV_STATS_INC(dev, tx_dropped); 3422 return NETDEV_TX_OK; 3423 } 3424 3425 macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa); 3426 3427 macsec_encrypt_finish(skb, dev); 3428 ret = dev_queue_xmit(skb); 3429 count_tx(dev, ret, len); 3430 return ret; 3431 } 3432 3433 #define MACSEC_FEATURES \ 3434 (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST) 3435 3436 static int macsec_dev_init(struct net_device *dev) 3437 { 3438 struct macsec_dev *macsec = macsec_priv(dev); 3439 struct net_device *real_dev = macsec->real_dev; 3440 int err; 3441 3442 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 3443 if (!dev->tstats) 3444 return -ENOMEM; 3445 3446 err = gro_cells_init(&macsec->gro_cells, dev); 3447 if (err) { 3448 free_percpu(dev->tstats); 3449 return err; 3450 } 3451 3452 dev->features = real_dev->features & MACSEC_FEATURES; 3453 dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE; 3454 3455 dev->needed_headroom = real_dev->needed_headroom + 3456 MACSEC_NEEDED_HEADROOM; 3457 dev->needed_tailroom = real_dev->needed_tailroom + 3458 MACSEC_NEEDED_TAILROOM; 3459 3460 if (is_zero_ether_addr(dev->dev_addr)) 3461 eth_hw_addr_inherit(dev, real_dev); 3462 if (is_zero_ether_addr(dev->broadcast)) 3463 memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len); 3464 3465 /* Get macsec's reference to real_dev */ 3466 netdev_hold(real_dev, &macsec->dev_tracker, GFP_KERNEL); 3467 3468 return 0; 3469 } 3470 3471 static void macsec_dev_uninit(struct net_device *dev) 3472 { 3473 struct macsec_dev *macsec = macsec_priv(dev); 3474 3475 gro_cells_destroy(&macsec->gro_cells); 3476 free_percpu(dev->tstats); 3477 } 3478 3479 static netdev_features_t macsec_fix_features(struct net_device *dev, 3480 netdev_features_t features) 3481 { 3482 struct macsec_dev *macsec = macsec_priv(dev); 3483 struct net_device *real_dev = macsec->real_dev; 3484 3485 features &= (real_dev->features & MACSEC_FEATURES) | 3486 NETIF_F_GSO_SOFTWARE | NETIF_F_SOFT_FEATURES; 3487 features |= NETIF_F_LLTX; 3488 3489 return features; 3490 } 3491 3492 static int macsec_dev_open(struct net_device *dev) 3493 { 3494 struct macsec_dev *macsec = macsec_priv(dev); 3495 struct net_device *real_dev = macsec->real_dev; 3496 int err; 3497 3498 err = dev_uc_add(real_dev, dev->dev_addr); 3499 if (err < 0) 3500 return err; 3501 3502 if (dev->flags & IFF_ALLMULTI) { 3503 err = dev_set_allmulti(real_dev, 1); 3504 if (err < 0) 3505 goto del_unicast; 3506 } 3507 3508 if (dev->flags & IFF_PROMISC) { 3509 err = dev_set_promiscuity(real_dev, 1); 3510 if (err < 0) 3511 goto clear_allmulti; 3512 } 3513 3514 /* If h/w offloading is available, propagate to the device */ 3515 if (macsec_is_offloaded(macsec)) { 3516 const struct macsec_ops *ops; 3517 struct macsec_context ctx; 3518 3519 ops = macsec_get_ops(netdev_priv(dev), &ctx); 3520 if (!ops) { 3521 err = -EOPNOTSUPP; 3522 goto clear_allmulti; 3523 } 3524 3525 ctx.secy = &macsec->secy; 3526 err = macsec_offload(ops->mdo_dev_open, &ctx); 3527 if (err) 3528 goto clear_allmulti; 3529 } 3530 3531 if (netif_carrier_ok(real_dev)) 3532 netif_carrier_on(dev); 3533 3534 return 0; 3535 clear_allmulti: 3536 if (dev->flags & IFF_ALLMULTI) 3537 dev_set_allmulti(real_dev, -1); 3538 del_unicast: 3539 dev_uc_del(real_dev, dev->dev_addr); 3540 netif_carrier_off(dev); 3541 return err; 3542 } 3543 3544 static int macsec_dev_stop(struct net_device *dev) 3545 { 3546 struct macsec_dev *macsec = macsec_priv(dev); 3547 struct net_device *real_dev = macsec->real_dev; 3548 3549 netif_carrier_off(dev); 3550 3551 /* If h/w offloading is available, propagate to the device */ 3552 if (macsec_is_offloaded(macsec)) { 3553 const struct macsec_ops *ops; 3554 struct macsec_context ctx; 3555 3556 ops = macsec_get_ops(macsec, &ctx); 3557 if (ops) { 3558 ctx.secy = &macsec->secy; 3559 macsec_offload(ops->mdo_dev_stop, &ctx); 3560 } 3561 } 3562 3563 dev_mc_unsync(real_dev, dev); 3564 dev_uc_unsync(real_dev, dev); 3565 3566 if (dev->flags & IFF_ALLMULTI) 3567 dev_set_allmulti(real_dev, -1); 3568 3569 if (dev->flags & IFF_PROMISC) 3570 dev_set_promiscuity(real_dev, -1); 3571 3572 dev_uc_del(real_dev, dev->dev_addr); 3573 3574 return 0; 3575 } 3576 3577 static void macsec_dev_change_rx_flags(struct net_device *dev, int change) 3578 { 3579 struct net_device *real_dev = macsec_priv(dev)->real_dev; 3580 3581 if (!(dev->flags & IFF_UP)) 3582 return; 3583 3584 if (change & IFF_ALLMULTI) 3585 dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1); 3586 3587 if (change & IFF_PROMISC) 3588 dev_set_promiscuity(real_dev, 3589 dev->flags & IFF_PROMISC ? 1 : -1); 3590 } 3591 3592 static void macsec_dev_set_rx_mode(struct net_device *dev) 3593 { 3594 struct net_device *real_dev = macsec_priv(dev)->real_dev; 3595 3596 dev_mc_sync(real_dev, dev); 3597 dev_uc_sync(real_dev, dev); 3598 } 3599 3600 static int macsec_set_mac_address(struct net_device *dev, void *p) 3601 { 3602 struct macsec_dev *macsec = macsec_priv(dev); 3603 struct net_device *real_dev = macsec->real_dev; 3604 struct sockaddr *addr = p; 3605 int err; 3606 3607 if (!is_valid_ether_addr(addr->sa_data)) 3608 return -EADDRNOTAVAIL; 3609 3610 if (!(dev->flags & IFF_UP)) 3611 goto out; 3612 3613 err = dev_uc_add(real_dev, addr->sa_data); 3614 if (err < 0) 3615 return err; 3616 3617 dev_uc_del(real_dev, dev->dev_addr); 3618 3619 out: 3620 eth_hw_addr_set(dev, addr->sa_data); 3621 3622 /* If h/w offloading is available, propagate to the device */ 3623 if (macsec_is_offloaded(macsec)) { 3624 const struct macsec_ops *ops; 3625 struct macsec_context ctx; 3626 3627 ops = macsec_get_ops(macsec, &ctx); 3628 if (ops) { 3629 ctx.secy = &macsec->secy; 3630 macsec_offload(ops->mdo_upd_secy, &ctx); 3631 } 3632 } 3633 3634 return 0; 3635 } 3636 3637 static int macsec_change_mtu(struct net_device *dev, int new_mtu) 3638 { 3639 struct macsec_dev *macsec = macsec_priv(dev); 3640 unsigned int extra = macsec->secy.icv_len + macsec_extra_len(true); 3641 3642 if (macsec->real_dev->mtu - extra < new_mtu) 3643 return -ERANGE; 3644 3645 dev->mtu = new_mtu; 3646 3647 return 0; 3648 } 3649 3650 static void macsec_get_stats64(struct net_device *dev, 3651 struct rtnl_link_stats64 *s) 3652 { 3653 if (!dev->tstats) 3654 return; 3655 3656 dev_fetch_sw_netstats(s, dev->tstats); 3657 3658 s->rx_dropped = atomic_long_read(&dev->stats.__rx_dropped); 3659 s->tx_dropped = atomic_long_read(&dev->stats.__tx_dropped); 3660 s->rx_errors = atomic_long_read(&dev->stats.__rx_errors); 3661 } 3662 3663 static int macsec_get_iflink(const struct net_device *dev) 3664 { 3665 return macsec_priv(dev)->real_dev->ifindex; 3666 } 3667 3668 static const struct net_device_ops macsec_netdev_ops = { 3669 .ndo_init = macsec_dev_init, 3670 .ndo_uninit = macsec_dev_uninit, 3671 .ndo_open = macsec_dev_open, 3672 .ndo_stop = macsec_dev_stop, 3673 .ndo_fix_features = macsec_fix_features, 3674 .ndo_change_mtu = macsec_change_mtu, 3675 .ndo_set_rx_mode = macsec_dev_set_rx_mode, 3676 .ndo_change_rx_flags = macsec_dev_change_rx_flags, 3677 .ndo_set_mac_address = macsec_set_mac_address, 3678 .ndo_start_xmit = macsec_start_xmit, 3679 .ndo_get_stats64 = macsec_get_stats64, 3680 .ndo_get_iflink = macsec_get_iflink, 3681 }; 3682 3683 static const struct device_type macsec_type = { 3684 .name = "macsec", 3685 }; 3686 3687 static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = { 3688 [IFLA_MACSEC_SCI] = { .type = NLA_U64 }, 3689 [IFLA_MACSEC_PORT] = { .type = NLA_U16 }, 3690 [IFLA_MACSEC_ICV_LEN] = { .type = NLA_U8 }, 3691 [IFLA_MACSEC_CIPHER_SUITE] = { .type = NLA_U64 }, 3692 [IFLA_MACSEC_WINDOW] = { .type = NLA_U32 }, 3693 [IFLA_MACSEC_ENCODING_SA] = { .type = NLA_U8 }, 3694 [IFLA_MACSEC_ENCRYPT] = { .type = NLA_U8 }, 3695 [IFLA_MACSEC_PROTECT] = { .type = NLA_U8 }, 3696 [IFLA_MACSEC_INC_SCI] = { .type = NLA_U8 }, 3697 [IFLA_MACSEC_ES] = { .type = NLA_U8 }, 3698 [IFLA_MACSEC_SCB] = { .type = NLA_U8 }, 3699 [IFLA_MACSEC_REPLAY_PROTECT] = { .type = NLA_U8 }, 3700 [IFLA_MACSEC_VALIDATION] = { .type = NLA_U8 }, 3701 [IFLA_MACSEC_OFFLOAD] = { .type = NLA_U8 }, 3702 }; 3703 3704 static void macsec_free_netdev(struct net_device *dev) 3705 { 3706 struct macsec_dev *macsec = macsec_priv(dev); 3707 3708 if (macsec->secy.tx_sc.md_dst) 3709 metadata_dst_free(macsec->secy.tx_sc.md_dst); 3710 free_percpu(macsec->stats); 3711 free_percpu(macsec->secy.tx_sc.stats); 3712 3713 /* Get rid of the macsec's reference to real_dev */ 3714 netdev_put(macsec->real_dev, &macsec->dev_tracker); 3715 } 3716 3717 static void macsec_setup(struct net_device *dev) 3718 { 3719 ether_setup(dev); 3720 dev->min_mtu = 0; 3721 dev->max_mtu = ETH_MAX_MTU; 3722 dev->priv_flags |= IFF_NO_QUEUE; 3723 dev->netdev_ops = &macsec_netdev_ops; 3724 dev->needs_free_netdev = true; 3725 dev->priv_destructor = macsec_free_netdev; 3726 SET_NETDEV_DEVTYPE(dev, &macsec_type); 3727 3728 eth_zero_addr(dev->broadcast); 3729 } 3730 3731 static int macsec_changelink_common(struct net_device *dev, 3732 struct nlattr *data[]) 3733 { 3734 struct macsec_secy *secy; 3735 struct macsec_tx_sc *tx_sc; 3736 3737 secy = &macsec_priv(dev)->secy; 3738 tx_sc = &secy->tx_sc; 3739 3740 if (data[IFLA_MACSEC_ENCODING_SA]) { 3741 struct macsec_tx_sa *tx_sa; 3742 3743 tx_sc->encoding_sa = nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]); 3744 tx_sa = rtnl_dereference(tx_sc->sa[tx_sc->encoding_sa]); 3745 3746 secy->operational = tx_sa && tx_sa->active; 3747 } 3748 3749 if (data[IFLA_MACSEC_ENCRYPT]) 3750 tx_sc->encrypt = !!nla_get_u8(data[IFLA_MACSEC_ENCRYPT]); 3751 3752 if (data[IFLA_MACSEC_PROTECT]) 3753 secy->protect_frames = !!nla_get_u8(data[IFLA_MACSEC_PROTECT]); 3754 3755 if (data[IFLA_MACSEC_INC_SCI]) 3756 tx_sc->send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]); 3757 3758 if (data[IFLA_MACSEC_ES]) 3759 tx_sc->end_station = !!nla_get_u8(data[IFLA_MACSEC_ES]); 3760 3761 if (data[IFLA_MACSEC_SCB]) 3762 tx_sc->scb = !!nla_get_u8(data[IFLA_MACSEC_SCB]); 3763 3764 if (data[IFLA_MACSEC_REPLAY_PROTECT]) 3765 secy->replay_protect = !!nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT]); 3766 3767 if (data[IFLA_MACSEC_VALIDATION]) 3768 secy->validate_frames = nla_get_u8(data[IFLA_MACSEC_VALIDATION]); 3769 3770 if (data[IFLA_MACSEC_CIPHER_SUITE]) { 3771 switch (nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE])) { 3772 case MACSEC_CIPHER_ID_GCM_AES_128: 3773 case MACSEC_DEFAULT_CIPHER_ID: 3774 secy->key_len = MACSEC_GCM_AES_128_SAK_LEN; 3775 secy->xpn = false; 3776 break; 3777 case MACSEC_CIPHER_ID_GCM_AES_256: 3778 secy->key_len = MACSEC_GCM_AES_256_SAK_LEN; 3779 secy->xpn = false; 3780 break; 3781 case MACSEC_CIPHER_ID_GCM_AES_XPN_128: 3782 secy->key_len = MACSEC_GCM_AES_128_SAK_LEN; 3783 secy->xpn = true; 3784 break; 3785 case MACSEC_CIPHER_ID_GCM_AES_XPN_256: 3786 secy->key_len = MACSEC_GCM_AES_256_SAK_LEN; 3787 secy->xpn = true; 3788 break; 3789 default: 3790 return -EINVAL; 3791 } 3792 } 3793 3794 if (data[IFLA_MACSEC_WINDOW]) { 3795 secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]); 3796 3797 /* IEEE 802.1AEbw-2013 10.7.8 - maximum replay window 3798 * for XPN cipher suites */ 3799 if (secy->xpn && 3800 secy->replay_window > MACSEC_XPN_MAX_REPLAY_WINDOW) 3801 return -EINVAL; 3802 } 3803 3804 return 0; 3805 } 3806 3807 static int macsec_changelink(struct net_device *dev, struct nlattr *tb[], 3808 struct nlattr *data[], 3809 struct netlink_ext_ack *extack) 3810 { 3811 struct macsec_dev *macsec = macsec_priv(dev); 3812 bool macsec_offload_state_change = false; 3813 enum macsec_offload offload; 3814 struct macsec_tx_sc tx_sc; 3815 struct macsec_secy secy; 3816 int ret; 3817 3818 if (!data) 3819 return 0; 3820 3821 if (data[IFLA_MACSEC_CIPHER_SUITE] || 3822 data[IFLA_MACSEC_ICV_LEN] || 3823 data[IFLA_MACSEC_SCI] || 3824 data[IFLA_MACSEC_PORT]) 3825 return -EINVAL; 3826 3827 /* Keep a copy of unmodified secy and tx_sc, in case the offload 3828 * propagation fails, to revert macsec_changelink_common. 3829 */ 3830 memcpy(&secy, &macsec->secy, sizeof(secy)); 3831 memcpy(&tx_sc, &macsec->secy.tx_sc, sizeof(tx_sc)); 3832 3833 ret = macsec_changelink_common(dev, data); 3834 if (ret) 3835 goto cleanup; 3836 3837 if (data[IFLA_MACSEC_OFFLOAD]) { 3838 offload = nla_get_u8(data[IFLA_MACSEC_OFFLOAD]); 3839 if (macsec->offload != offload) { 3840 macsec_offload_state_change = true; 3841 ret = macsec_update_offload(dev, offload); 3842 if (ret) 3843 goto cleanup; 3844 } 3845 } 3846 3847 /* If h/w offloading is available, propagate to the device */ 3848 if (!macsec_offload_state_change && macsec_is_offloaded(macsec)) { 3849 const struct macsec_ops *ops; 3850 struct macsec_context ctx; 3851 3852 ops = macsec_get_ops(netdev_priv(dev), &ctx); 3853 if (!ops) { 3854 ret = -EOPNOTSUPP; 3855 goto cleanup; 3856 } 3857 3858 ctx.secy = &macsec->secy; 3859 ret = macsec_offload(ops->mdo_upd_secy, &ctx); 3860 if (ret) 3861 goto cleanup; 3862 } 3863 3864 return 0; 3865 3866 cleanup: 3867 memcpy(&macsec->secy.tx_sc, &tx_sc, sizeof(tx_sc)); 3868 memcpy(&macsec->secy, &secy, sizeof(secy)); 3869 3870 return ret; 3871 } 3872 3873 static void macsec_del_dev(struct macsec_dev *macsec) 3874 { 3875 int i; 3876 3877 while (macsec->secy.rx_sc) { 3878 struct macsec_rx_sc *rx_sc = rtnl_dereference(macsec->secy.rx_sc); 3879 3880 rcu_assign_pointer(macsec->secy.rx_sc, rx_sc->next); 3881 free_rx_sc(rx_sc); 3882 } 3883 3884 for (i = 0; i < MACSEC_NUM_AN; i++) { 3885 struct macsec_tx_sa *sa = rtnl_dereference(macsec->secy.tx_sc.sa[i]); 3886 3887 if (sa) { 3888 RCU_INIT_POINTER(macsec->secy.tx_sc.sa[i], NULL); 3889 clear_tx_sa(sa); 3890 } 3891 } 3892 } 3893 3894 static void macsec_common_dellink(struct net_device *dev, struct list_head *head) 3895 { 3896 struct macsec_dev *macsec = macsec_priv(dev); 3897 struct net_device *real_dev = macsec->real_dev; 3898 3899 /* If h/w offloading is available, propagate to the device */ 3900 if (macsec_is_offloaded(macsec)) { 3901 const struct macsec_ops *ops; 3902 struct macsec_context ctx; 3903 3904 ops = macsec_get_ops(netdev_priv(dev), &ctx); 3905 if (ops) { 3906 ctx.secy = &macsec->secy; 3907 macsec_offload(ops->mdo_del_secy, &ctx); 3908 } 3909 } 3910 3911 unregister_netdevice_queue(dev, head); 3912 list_del_rcu(&macsec->secys); 3913 macsec_del_dev(macsec); 3914 netdev_upper_dev_unlink(real_dev, dev); 3915 3916 macsec_generation++; 3917 } 3918 3919 static void macsec_dellink(struct net_device *dev, struct list_head *head) 3920 { 3921 struct macsec_dev *macsec = macsec_priv(dev); 3922 struct net_device *real_dev = macsec->real_dev; 3923 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); 3924 3925 macsec_common_dellink(dev, head); 3926 3927 if (list_empty(&rxd->secys)) { 3928 netdev_rx_handler_unregister(real_dev); 3929 kfree(rxd); 3930 } 3931 } 3932 3933 static int register_macsec_dev(struct net_device *real_dev, 3934 struct net_device *dev) 3935 { 3936 struct macsec_dev *macsec = macsec_priv(dev); 3937 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); 3938 3939 if (!rxd) { 3940 int err; 3941 3942 rxd = kmalloc(sizeof(*rxd), GFP_KERNEL); 3943 if (!rxd) 3944 return -ENOMEM; 3945 3946 INIT_LIST_HEAD(&rxd->secys); 3947 3948 err = netdev_rx_handler_register(real_dev, macsec_handle_frame, 3949 rxd); 3950 if (err < 0) { 3951 kfree(rxd); 3952 return err; 3953 } 3954 } 3955 3956 list_add_tail_rcu(&macsec->secys, &rxd->secys); 3957 return 0; 3958 } 3959 3960 static bool sci_exists(struct net_device *dev, sci_t sci) 3961 { 3962 struct macsec_rxh_data *rxd = macsec_data_rtnl(dev); 3963 struct macsec_dev *macsec; 3964 3965 list_for_each_entry(macsec, &rxd->secys, secys) { 3966 if (macsec->secy.sci == sci) 3967 return true; 3968 } 3969 3970 return false; 3971 } 3972 3973 static sci_t dev_to_sci(struct net_device *dev, __be16 port) 3974 { 3975 return make_sci(dev->dev_addr, port); 3976 } 3977 3978 static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len) 3979 { 3980 struct macsec_dev *macsec = macsec_priv(dev); 3981 struct macsec_secy *secy = &macsec->secy; 3982 3983 macsec->stats = netdev_alloc_pcpu_stats(struct pcpu_secy_stats); 3984 if (!macsec->stats) 3985 return -ENOMEM; 3986 3987 secy->tx_sc.stats = netdev_alloc_pcpu_stats(struct pcpu_tx_sc_stats); 3988 if (!secy->tx_sc.stats) 3989 return -ENOMEM; 3990 3991 secy->tx_sc.md_dst = metadata_dst_alloc(0, METADATA_MACSEC, GFP_KERNEL); 3992 if (!secy->tx_sc.md_dst) 3993 /* macsec and secy percpu stats will be freed when unregistering 3994 * net_device in macsec_free_netdev() 3995 */ 3996 return -ENOMEM; 3997 3998 if (sci == MACSEC_UNDEF_SCI) 3999 sci = dev_to_sci(dev, MACSEC_PORT_ES); 4000 4001 secy->netdev = dev; 4002 secy->operational = true; 4003 secy->key_len = DEFAULT_SAK_LEN; 4004 secy->icv_len = icv_len; 4005 secy->validate_frames = MACSEC_VALIDATE_DEFAULT; 4006 secy->protect_frames = true; 4007 secy->replay_protect = false; 4008 secy->xpn = DEFAULT_XPN; 4009 4010 secy->sci = sci; 4011 secy->tx_sc.md_dst->u.macsec_info.sci = sci; 4012 secy->tx_sc.active = true; 4013 secy->tx_sc.encoding_sa = DEFAULT_ENCODING_SA; 4014 secy->tx_sc.encrypt = DEFAULT_ENCRYPT; 4015 secy->tx_sc.send_sci = DEFAULT_SEND_SCI; 4016 secy->tx_sc.end_station = false; 4017 secy->tx_sc.scb = false; 4018 4019 return 0; 4020 } 4021 4022 static struct lock_class_key macsec_netdev_addr_lock_key; 4023 4024 static int macsec_newlink(struct net *net, struct net_device *dev, 4025 struct nlattr *tb[], struct nlattr *data[], 4026 struct netlink_ext_ack *extack) 4027 { 4028 struct macsec_dev *macsec = macsec_priv(dev); 4029 rx_handler_func_t *rx_handler; 4030 u8 icv_len = MACSEC_DEFAULT_ICV_LEN; 4031 struct net_device *real_dev; 4032 int err, mtu; 4033 sci_t sci; 4034 4035 if (!tb[IFLA_LINK]) 4036 return -EINVAL; 4037 real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK])); 4038 if (!real_dev) 4039 return -ENODEV; 4040 if (real_dev->type != ARPHRD_ETHER) 4041 return -EINVAL; 4042 4043 dev->priv_flags |= IFF_MACSEC; 4044 4045 macsec->real_dev = real_dev; 4046 4047 if (data && data[IFLA_MACSEC_OFFLOAD]) 4048 macsec->offload = nla_get_offload(data[IFLA_MACSEC_OFFLOAD]); 4049 else 4050 /* MACsec offloading is off by default */ 4051 macsec->offload = MACSEC_OFFLOAD_OFF; 4052 4053 /* Check if the offloading mode is supported by the underlying layers */ 4054 if (macsec->offload != MACSEC_OFFLOAD_OFF && 4055 !macsec_check_offload(macsec->offload, macsec)) 4056 return -EOPNOTSUPP; 4057 4058 /* send_sci must be set to true when transmit sci explicitly is set */ 4059 if ((data && data[IFLA_MACSEC_SCI]) && 4060 (data && data[IFLA_MACSEC_INC_SCI])) { 4061 u8 send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]); 4062 4063 if (!send_sci) 4064 return -EINVAL; 4065 } 4066 4067 if (data && data[IFLA_MACSEC_ICV_LEN]) 4068 icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]); 4069 mtu = real_dev->mtu - icv_len - macsec_extra_len(true); 4070 if (mtu < 0) 4071 dev->mtu = 0; 4072 else 4073 dev->mtu = mtu; 4074 4075 rx_handler = rtnl_dereference(real_dev->rx_handler); 4076 if (rx_handler && rx_handler != macsec_handle_frame) 4077 return -EBUSY; 4078 4079 err = register_netdevice(dev); 4080 if (err < 0) 4081 return err; 4082 4083 netdev_lockdep_set_classes(dev); 4084 lockdep_set_class(&dev->addr_list_lock, 4085 &macsec_netdev_addr_lock_key); 4086 4087 err = netdev_upper_dev_link(real_dev, dev, extack); 4088 if (err < 0) 4089 goto unregister; 4090 4091 /* need to be already registered so that ->init has run and 4092 * the MAC addr is set 4093 */ 4094 if (data && data[IFLA_MACSEC_SCI]) 4095 sci = nla_get_sci(data[IFLA_MACSEC_SCI]); 4096 else if (data && data[IFLA_MACSEC_PORT]) 4097 sci = dev_to_sci(dev, nla_get_be16(data[IFLA_MACSEC_PORT])); 4098 else 4099 sci = dev_to_sci(dev, MACSEC_PORT_ES); 4100 4101 if (rx_handler && sci_exists(real_dev, sci)) { 4102 err = -EBUSY; 4103 goto unlink; 4104 } 4105 4106 err = macsec_add_dev(dev, sci, icv_len); 4107 if (err) 4108 goto unlink; 4109 4110 if (data) { 4111 err = macsec_changelink_common(dev, data); 4112 if (err) 4113 goto del_dev; 4114 } 4115 4116 /* If h/w offloading is available, propagate to the device */ 4117 if (macsec_is_offloaded(macsec)) { 4118 const struct macsec_ops *ops; 4119 struct macsec_context ctx; 4120 4121 ops = macsec_get_ops(macsec, &ctx); 4122 if (ops) { 4123 ctx.secy = &macsec->secy; 4124 err = macsec_offload(ops->mdo_add_secy, &ctx); 4125 if (err) 4126 goto del_dev; 4127 } 4128 } 4129 4130 err = register_macsec_dev(real_dev, dev); 4131 if (err < 0) 4132 goto del_dev; 4133 4134 netif_stacked_transfer_operstate(real_dev, dev); 4135 linkwatch_fire_event(dev); 4136 4137 macsec_generation++; 4138 4139 return 0; 4140 4141 del_dev: 4142 macsec_del_dev(macsec); 4143 unlink: 4144 netdev_upper_dev_unlink(real_dev, dev); 4145 unregister: 4146 unregister_netdevice(dev); 4147 return err; 4148 } 4149 4150 static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[], 4151 struct netlink_ext_ack *extack) 4152 { 4153 u64 csid = MACSEC_DEFAULT_CIPHER_ID; 4154 u8 icv_len = MACSEC_DEFAULT_ICV_LEN; 4155 int flag; 4156 bool es, scb, sci; 4157 4158 if (!data) 4159 return 0; 4160 4161 if (data[IFLA_MACSEC_CIPHER_SUITE]) 4162 csid = nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE]); 4163 4164 if (data[IFLA_MACSEC_ICV_LEN]) { 4165 icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]); 4166 if (icv_len != MACSEC_DEFAULT_ICV_LEN) { 4167 char dummy_key[DEFAULT_SAK_LEN] = { 0 }; 4168 struct crypto_aead *dummy_tfm; 4169 4170 dummy_tfm = macsec_alloc_tfm(dummy_key, 4171 DEFAULT_SAK_LEN, 4172 icv_len); 4173 if (IS_ERR(dummy_tfm)) 4174 return PTR_ERR(dummy_tfm); 4175 crypto_free_aead(dummy_tfm); 4176 } 4177 } 4178 4179 switch (csid) { 4180 case MACSEC_CIPHER_ID_GCM_AES_128: 4181 case MACSEC_CIPHER_ID_GCM_AES_256: 4182 case MACSEC_CIPHER_ID_GCM_AES_XPN_128: 4183 case MACSEC_CIPHER_ID_GCM_AES_XPN_256: 4184 case MACSEC_DEFAULT_CIPHER_ID: 4185 if (icv_len < MACSEC_MIN_ICV_LEN || 4186 icv_len > MACSEC_STD_ICV_LEN) 4187 return -EINVAL; 4188 break; 4189 default: 4190 return -EINVAL; 4191 } 4192 4193 if (data[IFLA_MACSEC_ENCODING_SA]) { 4194 if (nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]) >= MACSEC_NUM_AN) 4195 return -EINVAL; 4196 } 4197 4198 for (flag = IFLA_MACSEC_ENCODING_SA + 1; 4199 flag < IFLA_MACSEC_VALIDATION; 4200 flag++) { 4201 if (data[flag]) { 4202 if (nla_get_u8(data[flag]) > 1) 4203 return -EINVAL; 4204 } 4205 } 4206 4207 es = data[IFLA_MACSEC_ES] ? nla_get_u8(data[IFLA_MACSEC_ES]) : false; 4208 sci = data[IFLA_MACSEC_INC_SCI] ? nla_get_u8(data[IFLA_MACSEC_INC_SCI]) : false; 4209 scb = data[IFLA_MACSEC_SCB] ? nla_get_u8(data[IFLA_MACSEC_SCB]) : false; 4210 4211 if ((sci && (scb || es)) || (scb && es)) 4212 return -EINVAL; 4213 4214 if (data[IFLA_MACSEC_VALIDATION] && 4215 nla_get_u8(data[IFLA_MACSEC_VALIDATION]) > MACSEC_VALIDATE_MAX) 4216 return -EINVAL; 4217 4218 if ((data[IFLA_MACSEC_REPLAY_PROTECT] && 4219 nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT])) && 4220 !data[IFLA_MACSEC_WINDOW]) 4221 return -EINVAL; 4222 4223 return 0; 4224 } 4225 4226 static struct net *macsec_get_link_net(const struct net_device *dev) 4227 { 4228 return dev_net(macsec_priv(dev)->real_dev); 4229 } 4230 4231 struct net_device *macsec_get_real_dev(const struct net_device *dev) 4232 { 4233 return macsec_priv(dev)->real_dev; 4234 } 4235 EXPORT_SYMBOL_GPL(macsec_get_real_dev); 4236 4237 bool macsec_netdev_is_offloaded(struct net_device *dev) 4238 { 4239 return macsec_is_offloaded(macsec_priv(dev)); 4240 } 4241 EXPORT_SYMBOL_GPL(macsec_netdev_is_offloaded); 4242 4243 static size_t macsec_get_size(const struct net_device *dev) 4244 { 4245 return nla_total_size_64bit(8) + /* IFLA_MACSEC_SCI */ 4246 nla_total_size(1) + /* IFLA_MACSEC_ICV_LEN */ 4247 nla_total_size_64bit(8) + /* IFLA_MACSEC_CIPHER_SUITE */ 4248 nla_total_size(4) + /* IFLA_MACSEC_WINDOW */ 4249 nla_total_size(1) + /* IFLA_MACSEC_ENCODING_SA */ 4250 nla_total_size(1) + /* IFLA_MACSEC_ENCRYPT */ 4251 nla_total_size(1) + /* IFLA_MACSEC_PROTECT */ 4252 nla_total_size(1) + /* IFLA_MACSEC_INC_SCI */ 4253 nla_total_size(1) + /* IFLA_MACSEC_ES */ 4254 nla_total_size(1) + /* IFLA_MACSEC_SCB */ 4255 nla_total_size(1) + /* IFLA_MACSEC_REPLAY_PROTECT */ 4256 nla_total_size(1) + /* IFLA_MACSEC_VALIDATION */ 4257 nla_total_size(1) + /* IFLA_MACSEC_OFFLOAD */ 4258 0; 4259 } 4260 4261 static int macsec_fill_info(struct sk_buff *skb, 4262 const struct net_device *dev) 4263 { 4264 struct macsec_tx_sc *tx_sc; 4265 struct macsec_dev *macsec; 4266 struct macsec_secy *secy; 4267 u64 csid; 4268 4269 macsec = macsec_priv(dev); 4270 secy = &macsec->secy; 4271 tx_sc = &secy->tx_sc; 4272 4273 switch (secy->key_len) { 4274 case MACSEC_GCM_AES_128_SAK_LEN: 4275 csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 : MACSEC_DEFAULT_CIPHER_ID; 4276 break; 4277 case MACSEC_GCM_AES_256_SAK_LEN: 4278 csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_256 : MACSEC_CIPHER_ID_GCM_AES_256; 4279 break; 4280 default: 4281 goto nla_put_failure; 4282 } 4283 4284 if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci, 4285 IFLA_MACSEC_PAD) || 4286 nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) || 4287 nla_put_u64_64bit(skb, IFLA_MACSEC_CIPHER_SUITE, 4288 csid, IFLA_MACSEC_PAD) || 4289 nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) || 4290 nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) || 4291 nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) || 4292 nla_put_u8(skb, IFLA_MACSEC_INC_SCI, tx_sc->send_sci) || 4293 nla_put_u8(skb, IFLA_MACSEC_ES, tx_sc->end_station) || 4294 nla_put_u8(skb, IFLA_MACSEC_SCB, tx_sc->scb) || 4295 nla_put_u8(skb, IFLA_MACSEC_REPLAY_PROTECT, secy->replay_protect) || 4296 nla_put_u8(skb, IFLA_MACSEC_VALIDATION, secy->validate_frames) || 4297 nla_put_u8(skb, IFLA_MACSEC_OFFLOAD, macsec->offload) || 4298 0) 4299 goto nla_put_failure; 4300 4301 if (secy->replay_protect) { 4302 if (nla_put_u32(skb, IFLA_MACSEC_WINDOW, secy->replay_window)) 4303 goto nla_put_failure; 4304 } 4305 4306 return 0; 4307 4308 nla_put_failure: 4309 return -EMSGSIZE; 4310 } 4311 4312 static struct rtnl_link_ops macsec_link_ops __read_mostly = { 4313 .kind = "macsec", 4314 .priv_size = sizeof(struct macsec_dev), 4315 .maxtype = IFLA_MACSEC_MAX, 4316 .policy = macsec_rtnl_policy, 4317 .setup = macsec_setup, 4318 .validate = macsec_validate_attr, 4319 .newlink = macsec_newlink, 4320 .changelink = macsec_changelink, 4321 .dellink = macsec_dellink, 4322 .get_size = macsec_get_size, 4323 .fill_info = macsec_fill_info, 4324 .get_link_net = macsec_get_link_net, 4325 }; 4326 4327 static bool is_macsec_master(struct net_device *dev) 4328 { 4329 return rcu_access_pointer(dev->rx_handler) == macsec_handle_frame; 4330 } 4331 4332 static int macsec_notify(struct notifier_block *this, unsigned long event, 4333 void *ptr) 4334 { 4335 struct net_device *real_dev = netdev_notifier_info_to_dev(ptr); 4336 LIST_HEAD(head); 4337 4338 if (!is_macsec_master(real_dev)) 4339 return NOTIFY_DONE; 4340 4341 switch (event) { 4342 case NETDEV_DOWN: 4343 case NETDEV_UP: 4344 case NETDEV_CHANGE: { 4345 struct macsec_dev *m, *n; 4346 struct macsec_rxh_data *rxd; 4347 4348 rxd = macsec_data_rtnl(real_dev); 4349 list_for_each_entry_safe(m, n, &rxd->secys, secys) { 4350 struct net_device *dev = m->secy.netdev; 4351 4352 netif_stacked_transfer_operstate(real_dev, dev); 4353 } 4354 break; 4355 } 4356 case NETDEV_UNREGISTER: { 4357 struct macsec_dev *m, *n; 4358 struct macsec_rxh_data *rxd; 4359 4360 rxd = macsec_data_rtnl(real_dev); 4361 list_for_each_entry_safe(m, n, &rxd->secys, secys) { 4362 macsec_common_dellink(m->secy.netdev, &head); 4363 } 4364 4365 netdev_rx_handler_unregister(real_dev); 4366 kfree(rxd); 4367 4368 unregister_netdevice_many(&head); 4369 break; 4370 } 4371 case NETDEV_CHANGEMTU: { 4372 struct macsec_dev *m; 4373 struct macsec_rxh_data *rxd; 4374 4375 rxd = macsec_data_rtnl(real_dev); 4376 list_for_each_entry(m, &rxd->secys, secys) { 4377 struct net_device *dev = m->secy.netdev; 4378 unsigned int mtu = real_dev->mtu - (m->secy.icv_len + 4379 macsec_extra_len(true)); 4380 4381 if (dev->mtu > mtu) 4382 dev_set_mtu(dev, mtu); 4383 } 4384 } 4385 } 4386 4387 return NOTIFY_OK; 4388 } 4389 4390 static struct notifier_block macsec_notifier = { 4391 .notifier_call = macsec_notify, 4392 }; 4393 4394 static int __init macsec_init(void) 4395 { 4396 int err; 4397 4398 pr_info("MACsec IEEE 802.1AE\n"); 4399 err = register_netdevice_notifier(&macsec_notifier); 4400 if (err) 4401 return err; 4402 4403 err = rtnl_link_register(&macsec_link_ops); 4404 if (err) 4405 goto notifier; 4406 4407 err = genl_register_family(&macsec_fam); 4408 if (err) 4409 goto rtnl; 4410 4411 return 0; 4412 4413 rtnl: 4414 rtnl_link_unregister(&macsec_link_ops); 4415 notifier: 4416 unregister_netdevice_notifier(&macsec_notifier); 4417 return err; 4418 } 4419 4420 static void __exit macsec_exit(void) 4421 { 4422 genl_unregister_family(&macsec_fam); 4423 rtnl_link_unregister(&macsec_link_ops); 4424 unregister_netdevice_notifier(&macsec_notifier); 4425 rcu_barrier(); 4426 } 4427 4428 module_init(macsec_init); 4429 module_exit(macsec_exit); 4430 4431 MODULE_ALIAS_RTNL_LINK("macsec"); 4432 MODULE_ALIAS_GENL_FAMILY("macsec"); 4433 4434 MODULE_DESCRIPTION("MACsec IEEE 802.1AE"); 4435 MODULE_LICENSE("GPL v2"); 4436