1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * drivers/net/macsec.c - MACsec device 4 * 5 * Copyright (c) 2015 Sabrina Dubroca <sd@queasysnail.net> 6 */ 7 8 #include <linux/types.h> 9 #include <linux/skbuff.h> 10 #include <linux/socket.h> 11 #include <linux/module.h> 12 #include <crypto/aead.h> 13 #include <linux/etherdevice.h> 14 #include <linux/netdevice.h> 15 #include <linux/rtnetlink.h> 16 #include <linux/refcount.h> 17 #include <net/genetlink.h> 18 #include <net/sock.h> 19 #include <net/gro_cells.h> 20 #include <net/macsec.h> 21 #include <net/dst_metadata.h> 22 #include <linux/phy.h> 23 #include <linux/byteorder/generic.h> 24 #include <linux/if_arp.h> 25 26 #include <uapi/linux/if_macsec.h> 27 28 /* SecTAG length = macsec_eth_header without the optional SCI */ 29 #define MACSEC_TAG_LEN 6 30 31 struct macsec_eth_header { 32 struct ethhdr eth; 33 /* SecTAG */ 34 u8 tci_an; 35 #if defined(__LITTLE_ENDIAN_BITFIELD) 36 u8 short_length:6, 37 unused:2; 38 #elif defined(__BIG_ENDIAN_BITFIELD) 39 u8 unused:2, 40 short_length:6; 41 #else 42 #error "Please fix <asm/byteorder.h>" 43 #endif 44 __be32 packet_number; 45 u8 secure_channel_id[8]; /* optional */ 46 } __packed; 47 48 /* minimum secure data length deemed "not short", see IEEE 802.1AE-2006 9.7 */ 49 #define MIN_NON_SHORT_LEN 48 50 51 #define GCM_AES_IV_LEN 12 52 53 #define for_each_rxsc(secy, sc) \ 54 for (sc = rcu_dereference_bh(secy->rx_sc); \ 55 sc; \ 56 sc = rcu_dereference_bh(sc->next)) 57 #define for_each_rxsc_rtnl(secy, sc) \ 58 for (sc = rtnl_dereference(secy->rx_sc); \ 59 sc; \ 60 sc = rtnl_dereference(sc->next)) 61 62 #define pn_same_half(pn1, pn2) (!(((pn1) >> 31) ^ ((pn2) >> 31))) 63 64 struct gcm_iv_xpn { 65 union { 66 u8 short_secure_channel_id[4]; 67 ssci_t ssci; 68 }; 69 __be64 pn; 70 } __packed; 71 72 struct gcm_iv { 73 union { 74 u8 secure_channel_id[8]; 75 sci_t sci; 76 }; 77 __be32 pn; 78 }; 79 80 #define MACSEC_VALIDATE_DEFAULT MACSEC_VALIDATE_STRICT 81 82 struct pcpu_secy_stats { 83 struct macsec_dev_stats stats; 84 struct u64_stats_sync syncp; 85 }; 86 87 /** 88 * struct macsec_dev - private data 89 * @secy: SecY config 90 * @real_dev: pointer to underlying netdevice 91 * @dev_tracker: refcount tracker for @real_dev reference 92 * @stats: MACsec device stats 93 * @secys: linked list of SecY's on the underlying device 94 * @gro_cells: pointer to the Generic Receive Offload cell 95 * @offload: status of offloading on the MACsec device 96 */ 97 struct macsec_dev { 98 struct macsec_secy secy; 99 struct net_device *real_dev; 100 netdevice_tracker dev_tracker; 101 struct pcpu_secy_stats __percpu *stats; 102 struct list_head secys; 103 struct gro_cells gro_cells; 104 enum macsec_offload offload; 105 }; 106 107 /** 108 * struct macsec_rxh_data - rx_handler private argument 109 * @secys: linked list of SecY's on this underlying device 110 */ 111 struct macsec_rxh_data { 112 struct list_head secys; 113 }; 114 115 static struct macsec_dev *macsec_priv(const struct net_device *dev) 116 { 117 return (struct macsec_dev *)netdev_priv(dev); 118 } 119 120 static struct macsec_rxh_data *macsec_data_rcu(const struct net_device *dev) 121 { 122 return rcu_dereference_bh(dev->rx_handler_data); 123 } 124 125 static struct macsec_rxh_data *macsec_data_rtnl(const struct net_device *dev) 126 { 127 return rtnl_dereference(dev->rx_handler_data); 128 } 129 130 struct macsec_cb { 131 struct aead_request *req; 132 union { 133 struct macsec_tx_sa *tx_sa; 134 struct macsec_rx_sa *rx_sa; 135 }; 136 u8 assoc_num; 137 bool valid; 138 bool has_sci; 139 }; 140 141 static struct macsec_rx_sa *macsec_rxsa_get(struct macsec_rx_sa __rcu *ptr) 142 { 143 struct macsec_rx_sa *sa = rcu_dereference_bh(ptr); 144 145 if (!sa || !sa->active) 146 return NULL; 147 148 if (!refcount_inc_not_zero(&sa->refcnt)) 149 return NULL; 150 151 return sa; 152 } 153 154 static struct macsec_rx_sa *macsec_active_rxsa_get(struct macsec_rx_sc *rx_sc) 155 { 156 struct macsec_rx_sa *sa = NULL; 157 int an; 158 159 for (an = 0; an < MACSEC_NUM_AN; an++) { 160 sa = macsec_rxsa_get(rx_sc->sa[an]); 161 if (sa) 162 break; 163 } 164 return sa; 165 } 166 167 static void free_rx_sc_rcu(struct rcu_head *head) 168 { 169 struct macsec_rx_sc *rx_sc = container_of(head, struct macsec_rx_sc, rcu_head); 170 171 free_percpu(rx_sc->stats); 172 kfree(rx_sc); 173 } 174 175 static struct macsec_rx_sc *macsec_rxsc_get(struct macsec_rx_sc *sc) 176 { 177 return refcount_inc_not_zero(&sc->refcnt) ? sc : NULL; 178 } 179 180 static void macsec_rxsc_put(struct macsec_rx_sc *sc) 181 { 182 if (refcount_dec_and_test(&sc->refcnt)) 183 call_rcu(&sc->rcu_head, free_rx_sc_rcu); 184 } 185 186 static void free_rxsa(struct rcu_head *head) 187 { 188 struct macsec_rx_sa *sa = container_of(head, struct macsec_rx_sa, rcu); 189 190 crypto_free_aead(sa->key.tfm); 191 free_percpu(sa->stats); 192 kfree(sa); 193 } 194 195 static void macsec_rxsa_put(struct macsec_rx_sa *sa) 196 { 197 if (refcount_dec_and_test(&sa->refcnt)) 198 call_rcu(&sa->rcu, free_rxsa); 199 } 200 201 static struct macsec_tx_sa *macsec_txsa_get(struct macsec_tx_sa __rcu *ptr) 202 { 203 struct macsec_tx_sa *sa = rcu_dereference_bh(ptr); 204 205 if (!sa || !sa->active) 206 return NULL; 207 208 if (!refcount_inc_not_zero(&sa->refcnt)) 209 return NULL; 210 211 return sa; 212 } 213 214 static void free_txsa(struct rcu_head *head) 215 { 216 struct macsec_tx_sa *sa = container_of(head, struct macsec_tx_sa, rcu); 217 218 crypto_free_aead(sa->key.tfm); 219 free_percpu(sa->stats); 220 kfree(sa); 221 } 222 223 static void macsec_txsa_put(struct macsec_tx_sa *sa) 224 { 225 if (refcount_dec_and_test(&sa->refcnt)) 226 call_rcu(&sa->rcu, free_txsa); 227 } 228 229 static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb) 230 { 231 BUILD_BUG_ON(sizeof(struct macsec_cb) > sizeof(skb->cb)); 232 return (struct macsec_cb *)skb->cb; 233 } 234 235 #define MACSEC_PORT_SCB (0x0000) 236 #define MACSEC_UNDEF_SCI ((__force sci_t)0xffffffffffffffffULL) 237 #define MACSEC_UNDEF_SSCI ((__force ssci_t)0xffffffff) 238 239 #define MACSEC_GCM_AES_128_SAK_LEN 16 240 #define MACSEC_GCM_AES_256_SAK_LEN 32 241 242 #define DEFAULT_SAK_LEN MACSEC_GCM_AES_128_SAK_LEN 243 #define DEFAULT_XPN false 244 #define DEFAULT_SEND_SCI true 245 #define DEFAULT_ENCRYPT false 246 #define DEFAULT_ENCODING_SA 0 247 #define MACSEC_XPN_MAX_REPLAY_WINDOW (((1 << 30) - 1)) 248 249 static sci_t make_sci(const u8 *addr, __be16 port) 250 { 251 sci_t sci; 252 253 memcpy(&sci, addr, ETH_ALEN); 254 memcpy(((char *)&sci) + ETH_ALEN, &port, sizeof(port)); 255 256 return sci; 257 } 258 259 static sci_t macsec_frame_sci(struct macsec_eth_header *hdr, bool sci_present) 260 { 261 sci_t sci; 262 263 if (sci_present) 264 memcpy(&sci, hdr->secure_channel_id, 265 sizeof(hdr->secure_channel_id)); 266 else 267 sci = make_sci(hdr->eth.h_source, MACSEC_PORT_ES); 268 269 return sci; 270 } 271 272 static unsigned int macsec_sectag_len(bool sci_present) 273 { 274 return MACSEC_TAG_LEN + (sci_present ? MACSEC_SCI_LEN : 0); 275 } 276 277 static unsigned int macsec_hdr_len(bool sci_present) 278 { 279 return macsec_sectag_len(sci_present) + ETH_HLEN; 280 } 281 282 static unsigned int macsec_extra_len(bool sci_present) 283 { 284 return macsec_sectag_len(sci_present) + sizeof(__be16); 285 } 286 287 /* Fill SecTAG according to IEEE 802.1AE-2006 10.5.3 */ 288 static void macsec_fill_sectag(struct macsec_eth_header *h, 289 const struct macsec_secy *secy, u32 pn, 290 bool sci_present) 291 { 292 const struct macsec_tx_sc *tx_sc = &secy->tx_sc; 293 294 memset(&h->tci_an, 0, macsec_sectag_len(sci_present)); 295 h->eth.h_proto = htons(ETH_P_MACSEC); 296 297 if (sci_present) { 298 h->tci_an |= MACSEC_TCI_SC; 299 memcpy(&h->secure_channel_id, &secy->sci, 300 sizeof(h->secure_channel_id)); 301 } else { 302 if (tx_sc->end_station) 303 h->tci_an |= MACSEC_TCI_ES; 304 if (tx_sc->scb) 305 h->tci_an |= MACSEC_TCI_SCB; 306 } 307 308 h->packet_number = htonl(pn); 309 310 /* with GCM, C/E clear for !encrypt, both set for encrypt */ 311 if (tx_sc->encrypt) 312 h->tci_an |= MACSEC_TCI_CONFID; 313 else if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN) 314 h->tci_an |= MACSEC_TCI_C; 315 316 h->tci_an |= tx_sc->encoding_sa; 317 } 318 319 static void macsec_set_shortlen(struct macsec_eth_header *h, size_t data_len) 320 { 321 if (data_len < MIN_NON_SHORT_LEN) 322 h->short_length = data_len; 323 } 324 325 /* Checks if a MACsec interface is being offloaded to an hardware engine */ 326 static bool macsec_is_offloaded(struct macsec_dev *macsec) 327 { 328 if (macsec->offload == MACSEC_OFFLOAD_MAC || 329 macsec->offload == MACSEC_OFFLOAD_PHY) 330 return true; 331 332 return false; 333 } 334 335 /* Checks if underlying layers implement MACsec offloading functions. */ 336 static bool macsec_check_offload(enum macsec_offload offload, 337 struct macsec_dev *macsec) 338 { 339 if (!macsec || !macsec->real_dev) 340 return false; 341 342 if (offload == MACSEC_OFFLOAD_PHY) 343 return macsec->real_dev->phydev && 344 macsec->real_dev->phydev->macsec_ops; 345 else if (offload == MACSEC_OFFLOAD_MAC) 346 return macsec->real_dev->features & NETIF_F_HW_MACSEC && 347 macsec->real_dev->macsec_ops; 348 349 return false; 350 } 351 352 static const struct macsec_ops *__macsec_get_ops(enum macsec_offload offload, 353 struct macsec_dev *macsec, 354 struct macsec_context *ctx) 355 { 356 if (ctx) { 357 memset(ctx, 0, sizeof(*ctx)); 358 ctx->offload = offload; 359 360 if (offload == MACSEC_OFFLOAD_PHY) 361 ctx->phydev = macsec->real_dev->phydev; 362 else if (offload == MACSEC_OFFLOAD_MAC) 363 ctx->netdev = macsec->real_dev; 364 } 365 366 if (offload == MACSEC_OFFLOAD_PHY) 367 return macsec->real_dev->phydev->macsec_ops; 368 else 369 return macsec->real_dev->macsec_ops; 370 } 371 372 /* Returns a pointer to the MACsec ops struct if any and updates the MACsec 373 * context device reference if provided. 374 */ 375 static const struct macsec_ops *macsec_get_ops(struct macsec_dev *macsec, 376 struct macsec_context *ctx) 377 { 378 if (!macsec_check_offload(macsec->offload, macsec)) 379 return NULL; 380 381 return __macsec_get_ops(macsec->offload, macsec, ctx); 382 } 383 384 /* validate MACsec packet according to IEEE 802.1AE-2018 9.12 */ 385 static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len, bool xpn) 386 { 387 struct macsec_eth_header *h = (struct macsec_eth_header *)skb->data; 388 int len = skb->len - 2 * ETH_ALEN; 389 int extra_len = macsec_extra_len(!!(h->tci_an & MACSEC_TCI_SC)) + icv_len; 390 391 /* a) It comprises at least 17 octets */ 392 if (skb->len <= 16) 393 return false; 394 395 /* b) MACsec EtherType: already checked */ 396 397 /* c) V bit is clear */ 398 if (h->tci_an & MACSEC_TCI_VERSION) 399 return false; 400 401 /* d) ES or SCB => !SC */ 402 if ((h->tci_an & MACSEC_TCI_ES || h->tci_an & MACSEC_TCI_SCB) && 403 (h->tci_an & MACSEC_TCI_SC)) 404 return false; 405 406 /* e) Bits 7 and 8 of octet 4 of the SecTAG are clear */ 407 if (h->unused) 408 return false; 409 410 /* rx.pn != 0 if not XPN (figure 10-5 with 802.11AEbw-2013 amendment) */ 411 if (!h->packet_number && !xpn) 412 return false; 413 414 /* length check, f) g) h) i) */ 415 if (h->short_length) 416 return len == extra_len + h->short_length; 417 return len >= extra_len + MIN_NON_SHORT_LEN; 418 } 419 420 #define MACSEC_NEEDED_HEADROOM (macsec_extra_len(true)) 421 #define MACSEC_NEEDED_TAILROOM MACSEC_STD_ICV_LEN 422 423 static void macsec_fill_iv_xpn(unsigned char *iv, ssci_t ssci, u64 pn, 424 salt_t salt) 425 { 426 struct gcm_iv_xpn *gcm_iv = (struct gcm_iv_xpn *)iv; 427 428 gcm_iv->ssci = ssci ^ salt.ssci; 429 gcm_iv->pn = cpu_to_be64(pn) ^ salt.pn; 430 } 431 432 static void macsec_fill_iv(unsigned char *iv, sci_t sci, u32 pn) 433 { 434 struct gcm_iv *gcm_iv = (struct gcm_iv *)iv; 435 436 gcm_iv->sci = sci; 437 gcm_iv->pn = htonl(pn); 438 } 439 440 static struct macsec_eth_header *macsec_ethhdr(struct sk_buff *skb) 441 { 442 return (struct macsec_eth_header *)skb_mac_header(skb); 443 } 444 445 static void __macsec_pn_wrapped(struct macsec_secy *secy, 446 struct macsec_tx_sa *tx_sa) 447 { 448 pr_debug("PN wrapped, transitioning to !oper\n"); 449 tx_sa->active = false; 450 if (secy->protect_frames) 451 secy->operational = false; 452 } 453 454 void macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa) 455 { 456 spin_lock_bh(&tx_sa->lock); 457 __macsec_pn_wrapped(secy, tx_sa); 458 spin_unlock_bh(&tx_sa->lock); 459 } 460 EXPORT_SYMBOL_GPL(macsec_pn_wrapped); 461 462 static pn_t tx_sa_update_pn(struct macsec_tx_sa *tx_sa, 463 struct macsec_secy *secy) 464 { 465 pn_t pn; 466 467 spin_lock_bh(&tx_sa->lock); 468 469 pn = tx_sa->next_pn_halves; 470 if (secy->xpn) 471 tx_sa->next_pn++; 472 else 473 tx_sa->next_pn_halves.lower++; 474 475 if (tx_sa->next_pn == 0) 476 __macsec_pn_wrapped(secy, tx_sa); 477 spin_unlock_bh(&tx_sa->lock); 478 479 return pn; 480 } 481 482 static void macsec_encrypt_finish(struct sk_buff *skb, struct net_device *dev) 483 { 484 struct macsec_dev *macsec = netdev_priv(dev); 485 486 skb->dev = macsec->real_dev; 487 skb_reset_mac_header(skb); 488 skb->protocol = eth_hdr(skb)->h_proto; 489 } 490 491 static unsigned int macsec_msdu_len(struct sk_buff *skb) 492 { 493 struct macsec_dev *macsec = macsec_priv(skb->dev); 494 struct macsec_secy *secy = &macsec->secy; 495 bool sci_present = macsec_skb_cb(skb)->has_sci; 496 497 return skb->len - macsec_hdr_len(sci_present) - secy->icv_len; 498 } 499 500 static void macsec_count_tx(struct sk_buff *skb, struct macsec_tx_sc *tx_sc, 501 struct macsec_tx_sa *tx_sa) 502 { 503 unsigned int msdu_len = macsec_msdu_len(skb); 504 struct pcpu_tx_sc_stats *txsc_stats = this_cpu_ptr(tx_sc->stats); 505 506 u64_stats_update_begin(&txsc_stats->syncp); 507 if (tx_sc->encrypt) { 508 txsc_stats->stats.OutOctetsEncrypted += msdu_len; 509 txsc_stats->stats.OutPktsEncrypted++; 510 this_cpu_inc(tx_sa->stats->OutPktsEncrypted); 511 } else { 512 txsc_stats->stats.OutOctetsProtected += msdu_len; 513 txsc_stats->stats.OutPktsProtected++; 514 this_cpu_inc(tx_sa->stats->OutPktsProtected); 515 } 516 u64_stats_update_end(&txsc_stats->syncp); 517 } 518 519 static void count_tx(struct net_device *dev, int ret, int len) 520 { 521 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) 522 dev_sw_netstats_tx_add(dev, 1, len); 523 } 524 525 static void macsec_encrypt_done(void *data, int err) 526 { 527 struct sk_buff *skb = data; 528 struct net_device *dev = skb->dev; 529 struct macsec_dev *macsec = macsec_priv(dev); 530 struct macsec_tx_sa *sa = macsec_skb_cb(skb)->tx_sa; 531 int len, ret; 532 533 aead_request_free(macsec_skb_cb(skb)->req); 534 535 rcu_read_lock_bh(); 536 macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa); 537 /* packet is encrypted/protected so tx_bytes must be calculated */ 538 len = macsec_msdu_len(skb) + 2 * ETH_ALEN; 539 macsec_encrypt_finish(skb, dev); 540 ret = dev_queue_xmit(skb); 541 count_tx(dev, ret, len); 542 rcu_read_unlock_bh(); 543 544 macsec_txsa_put(sa); 545 dev_put(dev); 546 } 547 548 static struct aead_request *macsec_alloc_req(struct crypto_aead *tfm, 549 unsigned char **iv, 550 struct scatterlist **sg, 551 int num_frags) 552 { 553 size_t size, iv_offset, sg_offset; 554 struct aead_request *req; 555 void *tmp; 556 557 size = sizeof(struct aead_request) + crypto_aead_reqsize(tfm); 558 iv_offset = size; 559 size += GCM_AES_IV_LEN; 560 561 size = ALIGN(size, __alignof__(struct scatterlist)); 562 sg_offset = size; 563 size += sizeof(struct scatterlist) * num_frags; 564 565 tmp = kmalloc(size, GFP_ATOMIC); 566 if (!tmp) 567 return NULL; 568 569 *iv = (unsigned char *)(tmp + iv_offset); 570 *sg = (struct scatterlist *)(tmp + sg_offset); 571 req = tmp; 572 573 aead_request_set_tfm(req, tfm); 574 575 return req; 576 } 577 578 static struct sk_buff *macsec_encrypt(struct sk_buff *skb, 579 struct net_device *dev) 580 { 581 int ret; 582 struct scatterlist *sg; 583 struct sk_buff *trailer; 584 unsigned char *iv; 585 struct ethhdr *eth; 586 struct macsec_eth_header *hh; 587 size_t unprotected_len; 588 struct aead_request *req; 589 struct macsec_secy *secy; 590 struct macsec_tx_sc *tx_sc; 591 struct macsec_tx_sa *tx_sa; 592 struct macsec_dev *macsec = macsec_priv(dev); 593 bool sci_present; 594 pn_t pn; 595 596 secy = &macsec->secy; 597 tx_sc = &secy->tx_sc; 598 599 /* 10.5.1 TX SA assignment */ 600 tx_sa = macsec_txsa_get(tx_sc->sa[tx_sc->encoding_sa]); 601 if (!tx_sa) { 602 secy->operational = false; 603 kfree_skb(skb); 604 return ERR_PTR(-EINVAL); 605 } 606 607 if (unlikely(skb_headroom(skb) < MACSEC_NEEDED_HEADROOM || 608 skb_tailroom(skb) < MACSEC_NEEDED_TAILROOM)) { 609 struct sk_buff *nskb = skb_copy_expand(skb, 610 MACSEC_NEEDED_HEADROOM, 611 MACSEC_NEEDED_TAILROOM, 612 GFP_ATOMIC); 613 if (likely(nskb)) { 614 consume_skb(skb); 615 skb = nskb; 616 } else { 617 macsec_txsa_put(tx_sa); 618 kfree_skb(skb); 619 return ERR_PTR(-ENOMEM); 620 } 621 } else { 622 skb = skb_unshare(skb, GFP_ATOMIC); 623 if (!skb) { 624 macsec_txsa_put(tx_sa); 625 return ERR_PTR(-ENOMEM); 626 } 627 } 628 629 unprotected_len = skb->len; 630 eth = eth_hdr(skb); 631 sci_present = macsec_send_sci(secy); 632 hh = skb_push(skb, macsec_extra_len(sci_present)); 633 memmove(hh, eth, 2 * ETH_ALEN); 634 635 pn = tx_sa_update_pn(tx_sa, secy); 636 if (pn.full64 == 0) { 637 macsec_txsa_put(tx_sa); 638 kfree_skb(skb); 639 return ERR_PTR(-ENOLINK); 640 } 641 macsec_fill_sectag(hh, secy, pn.lower, sci_present); 642 macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN); 643 644 skb_put(skb, secy->icv_len); 645 646 if (skb->len - ETH_HLEN > macsec_priv(dev)->real_dev->mtu) { 647 struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats); 648 649 u64_stats_update_begin(&secy_stats->syncp); 650 secy_stats->stats.OutPktsTooLong++; 651 u64_stats_update_end(&secy_stats->syncp); 652 653 macsec_txsa_put(tx_sa); 654 kfree_skb(skb); 655 return ERR_PTR(-EINVAL); 656 } 657 658 ret = skb_cow_data(skb, 0, &trailer); 659 if (unlikely(ret < 0)) { 660 macsec_txsa_put(tx_sa); 661 kfree_skb(skb); 662 return ERR_PTR(ret); 663 } 664 665 req = macsec_alloc_req(tx_sa->key.tfm, &iv, &sg, ret); 666 if (!req) { 667 macsec_txsa_put(tx_sa); 668 kfree_skb(skb); 669 return ERR_PTR(-ENOMEM); 670 } 671 672 if (secy->xpn) 673 macsec_fill_iv_xpn(iv, tx_sa->ssci, pn.full64, tx_sa->key.salt); 674 else 675 macsec_fill_iv(iv, secy->sci, pn.lower); 676 677 sg_init_table(sg, ret); 678 ret = skb_to_sgvec(skb, sg, 0, skb->len); 679 if (unlikely(ret < 0)) { 680 aead_request_free(req); 681 macsec_txsa_put(tx_sa); 682 kfree_skb(skb); 683 return ERR_PTR(ret); 684 } 685 686 if (tx_sc->encrypt) { 687 int len = skb->len - macsec_hdr_len(sci_present) - 688 secy->icv_len; 689 aead_request_set_crypt(req, sg, sg, len, iv); 690 aead_request_set_ad(req, macsec_hdr_len(sci_present)); 691 } else { 692 aead_request_set_crypt(req, sg, sg, 0, iv); 693 aead_request_set_ad(req, skb->len - secy->icv_len); 694 } 695 696 macsec_skb_cb(skb)->req = req; 697 macsec_skb_cb(skb)->tx_sa = tx_sa; 698 macsec_skb_cb(skb)->has_sci = sci_present; 699 aead_request_set_callback(req, 0, macsec_encrypt_done, skb); 700 701 dev_hold(skb->dev); 702 ret = crypto_aead_encrypt(req); 703 if (ret == -EINPROGRESS) { 704 return ERR_PTR(ret); 705 } else if (ret != 0) { 706 dev_put(skb->dev); 707 kfree_skb(skb); 708 aead_request_free(req); 709 macsec_txsa_put(tx_sa); 710 return ERR_PTR(-EINVAL); 711 } 712 713 dev_put(skb->dev); 714 aead_request_free(req); 715 macsec_txsa_put(tx_sa); 716 717 return skb; 718 } 719 720 static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u32 pn) 721 { 722 struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa; 723 struct pcpu_rx_sc_stats *rxsc_stats = this_cpu_ptr(rx_sa->sc->stats); 724 struct macsec_eth_header *hdr = macsec_ethhdr(skb); 725 u32 lowest_pn = 0; 726 727 spin_lock(&rx_sa->lock); 728 if (rx_sa->next_pn_halves.lower >= secy->replay_window) 729 lowest_pn = rx_sa->next_pn_halves.lower - secy->replay_window; 730 731 /* Now perform replay protection check again 732 * (see IEEE 802.1AE-2006 figure 10-5) 733 */ 734 if (secy->replay_protect && pn < lowest_pn && 735 (!secy->xpn || pn_same_half(pn, lowest_pn))) { 736 spin_unlock(&rx_sa->lock); 737 u64_stats_update_begin(&rxsc_stats->syncp); 738 rxsc_stats->stats.InPktsLate++; 739 u64_stats_update_end(&rxsc_stats->syncp); 740 DEV_STATS_INC(secy->netdev, rx_dropped); 741 return false; 742 } 743 744 if (secy->validate_frames != MACSEC_VALIDATE_DISABLED) { 745 unsigned int msdu_len = macsec_msdu_len(skb); 746 u64_stats_update_begin(&rxsc_stats->syncp); 747 if (hdr->tci_an & MACSEC_TCI_E) 748 rxsc_stats->stats.InOctetsDecrypted += msdu_len; 749 else 750 rxsc_stats->stats.InOctetsValidated += msdu_len; 751 u64_stats_update_end(&rxsc_stats->syncp); 752 } 753 754 if (!macsec_skb_cb(skb)->valid) { 755 spin_unlock(&rx_sa->lock); 756 757 /* 10.6.5 */ 758 if (hdr->tci_an & MACSEC_TCI_C || 759 secy->validate_frames == MACSEC_VALIDATE_STRICT) { 760 u64_stats_update_begin(&rxsc_stats->syncp); 761 rxsc_stats->stats.InPktsNotValid++; 762 u64_stats_update_end(&rxsc_stats->syncp); 763 this_cpu_inc(rx_sa->stats->InPktsNotValid); 764 DEV_STATS_INC(secy->netdev, rx_errors); 765 return false; 766 } 767 768 u64_stats_update_begin(&rxsc_stats->syncp); 769 if (secy->validate_frames == MACSEC_VALIDATE_CHECK) { 770 rxsc_stats->stats.InPktsInvalid++; 771 this_cpu_inc(rx_sa->stats->InPktsInvalid); 772 } else if (pn < lowest_pn) { 773 rxsc_stats->stats.InPktsDelayed++; 774 } else { 775 rxsc_stats->stats.InPktsUnchecked++; 776 } 777 u64_stats_update_end(&rxsc_stats->syncp); 778 } else { 779 u64_stats_update_begin(&rxsc_stats->syncp); 780 if (pn < lowest_pn) { 781 rxsc_stats->stats.InPktsDelayed++; 782 } else { 783 rxsc_stats->stats.InPktsOK++; 784 this_cpu_inc(rx_sa->stats->InPktsOK); 785 } 786 u64_stats_update_end(&rxsc_stats->syncp); 787 788 // Instead of "pn >=" - to support pn overflow in xpn 789 if (pn + 1 > rx_sa->next_pn_halves.lower) { 790 rx_sa->next_pn_halves.lower = pn + 1; 791 } else if (secy->xpn && 792 !pn_same_half(pn, rx_sa->next_pn_halves.lower)) { 793 rx_sa->next_pn_halves.upper++; 794 rx_sa->next_pn_halves.lower = pn + 1; 795 } 796 797 spin_unlock(&rx_sa->lock); 798 } 799 800 return true; 801 } 802 803 static void macsec_reset_skb(struct sk_buff *skb, struct net_device *dev) 804 { 805 skb->pkt_type = PACKET_HOST; 806 skb->protocol = eth_type_trans(skb, dev); 807 808 skb_reset_network_header(skb); 809 if (!skb_transport_header_was_set(skb)) 810 skb_reset_transport_header(skb); 811 skb_reset_mac_len(skb); 812 } 813 814 static void macsec_finalize_skb(struct sk_buff *skb, u8 icv_len, u8 hdr_len) 815 { 816 skb->ip_summed = CHECKSUM_NONE; 817 memmove(skb->data + hdr_len, skb->data, 2 * ETH_ALEN); 818 skb_pull(skb, hdr_len); 819 pskb_trim_unique(skb, skb->len - icv_len); 820 } 821 822 static void count_rx(struct net_device *dev, int len) 823 { 824 dev_sw_netstats_rx_add(dev, len); 825 } 826 827 static void macsec_decrypt_done(void *data, int err) 828 { 829 struct sk_buff *skb = data; 830 struct net_device *dev = skb->dev; 831 struct macsec_dev *macsec = macsec_priv(dev); 832 struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa; 833 struct macsec_rx_sc *rx_sc = rx_sa->sc; 834 int len; 835 u32 pn; 836 837 aead_request_free(macsec_skb_cb(skb)->req); 838 839 if (!err) 840 macsec_skb_cb(skb)->valid = true; 841 842 rcu_read_lock_bh(); 843 pn = ntohl(macsec_ethhdr(skb)->packet_number); 844 if (!macsec_post_decrypt(skb, &macsec->secy, pn)) { 845 rcu_read_unlock_bh(); 846 kfree_skb(skb); 847 goto out; 848 } 849 850 macsec_finalize_skb(skb, macsec->secy.icv_len, 851 macsec_extra_len(macsec_skb_cb(skb)->has_sci)); 852 len = skb->len; 853 macsec_reset_skb(skb, macsec->secy.netdev); 854 855 if (gro_cells_receive(&macsec->gro_cells, skb) == NET_RX_SUCCESS) 856 count_rx(dev, len); 857 858 rcu_read_unlock_bh(); 859 860 out: 861 macsec_rxsa_put(rx_sa); 862 macsec_rxsc_put(rx_sc); 863 dev_put(dev); 864 } 865 866 static struct sk_buff *macsec_decrypt(struct sk_buff *skb, 867 struct net_device *dev, 868 struct macsec_rx_sa *rx_sa, 869 sci_t sci, 870 struct macsec_secy *secy) 871 { 872 int ret; 873 struct scatterlist *sg; 874 struct sk_buff *trailer; 875 unsigned char *iv; 876 struct aead_request *req; 877 struct macsec_eth_header *hdr; 878 u32 hdr_pn; 879 u16 icv_len = secy->icv_len; 880 881 macsec_skb_cb(skb)->valid = false; 882 skb = skb_share_check(skb, GFP_ATOMIC); 883 if (!skb) 884 return ERR_PTR(-ENOMEM); 885 886 ret = skb_cow_data(skb, 0, &trailer); 887 if (unlikely(ret < 0)) { 888 kfree_skb(skb); 889 return ERR_PTR(ret); 890 } 891 req = macsec_alloc_req(rx_sa->key.tfm, &iv, &sg, ret); 892 if (!req) { 893 kfree_skb(skb); 894 return ERR_PTR(-ENOMEM); 895 } 896 897 hdr = (struct macsec_eth_header *)skb->data; 898 hdr_pn = ntohl(hdr->packet_number); 899 900 if (secy->xpn) { 901 pn_t recovered_pn = rx_sa->next_pn_halves; 902 903 recovered_pn.lower = hdr_pn; 904 if (hdr_pn < rx_sa->next_pn_halves.lower && 905 !pn_same_half(hdr_pn, rx_sa->next_pn_halves.lower)) 906 recovered_pn.upper++; 907 908 macsec_fill_iv_xpn(iv, rx_sa->ssci, recovered_pn.full64, 909 rx_sa->key.salt); 910 } else { 911 macsec_fill_iv(iv, sci, hdr_pn); 912 } 913 914 sg_init_table(sg, ret); 915 ret = skb_to_sgvec(skb, sg, 0, skb->len); 916 if (unlikely(ret < 0)) { 917 aead_request_free(req); 918 kfree_skb(skb); 919 return ERR_PTR(ret); 920 } 921 922 if (hdr->tci_an & MACSEC_TCI_E) { 923 /* confidentiality: ethernet + macsec header 924 * authenticated, encrypted payload 925 */ 926 int len = skb->len - macsec_hdr_len(macsec_skb_cb(skb)->has_sci); 927 928 aead_request_set_crypt(req, sg, sg, len, iv); 929 aead_request_set_ad(req, macsec_hdr_len(macsec_skb_cb(skb)->has_sci)); 930 skb = skb_unshare(skb, GFP_ATOMIC); 931 if (!skb) { 932 aead_request_free(req); 933 return ERR_PTR(-ENOMEM); 934 } 935 } else { 936 /* integrity only: all headers + data authenticated */ 937 aead_request_set_crypt(req, sg, sg, icv_len, iv); 938 aead_request_set_ad(req, skb->len - icv_len); 939 } 940 941 macsec_skb_cb(skb)->req = req; 942 skb->dev = dev; 943 aead_request_set_callback(req, 0, macsec_decrypt_done, skb); 944 945 dev_hold(dev); 946 ret = crypto_aead_decrypt(req); 947 if (ret == -EINPROGRESS) { 948 return ERR_PTR(ret); 949 } else if (ret != 0) { 950 /* decryption/authentication failed 951 * 10.6 if validateFrames is disabled, deliver anyway 952 */ 953 if (ret != -EBADMSG) { 954 kfree_skb(skb); 955 skb = ERR_PTR(ret); 956 } 957 } else { 958 macsec_skb_cb(skb)->valid = true; 959 } 960 dev_put(dev); 961 962 aead_request_free(req); 963 964 return skb; 965 } 966 967 static struct macsec_rx_sc *find_rx_sc(struct macsec_secy *secy, sci_t sci) 968 { 969 struct macsec_rx_sc *rx_sc; 970 971 for_each_rxsc(secy, rx_sc) { 972 if (rx_sc->sci == sci) 973 return rx_sc; 974 } 975 976 return NULL; 977 } 978 979 static struct macsec_rx_sc *find_rx_sc_rtnl(struct macsec_secy *secy, sci_t sci) 980 { 981 struct macsec_rx_sc *rx_sc; 982 983 for_each_rxsc_rtnl(secy, rx_sc) { 984 if (rx_sc->sci == sci) 985 return rx_sc; 986 } 987 988 return NULL; 989 } 990 991 static enum rx_handler_result handle_not_macsec(struct sk_buff *skb) 992 { 993 /* Deliver to the uncontrolled port by default */ 994 enum rx_handler_result ret = RX_HANDLER_PASS; 995 struct ethhdr *hdr = eth_hdr(skb); 996 struct metadata_dst *md_dst; 997 struct macsec_rxh_data *rxd; 998 struct macsec_dev *macsec; 999 1000 rcu_read_lock(); 1001 rxd = macsec_data_rcu(skb->dev); 1002 md_dst = skb_metadata_dst(skb); 1003 1004 list_for_each_entry_rcu(macsec, &rxd->secys, secys) { 1005 struct sk_buff *nskb; 1006 struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats); 1007 struct net_device *ndev = macsec->secy.netdev; 1008 1009 /* If h/w offloading is enabled, HW decodes frames and strips 1010 * the SecTAG, so we have to deduce which port to deliver to. 1011 */ 1012 if (macsec_is_offloaded(macsec) && netif_running(ndev)) { 1013 struct macsec_rx_sc *rx_sc = NULL; 1014 1015 if (md_dst && md_dst->type == METADATA_MACSEC) 1016 rx_sc = find_rx_sc(&macsec->secy, md_dst->u.macsec_info.sci); 1017 1018 if (md_dst && md_dst->type == METADATA_MACSEC && !rx_sc) 1019 continue; 1020 1021 if (ether_addr_equal_64bits(hdr->h_dest, 1022 ndev->dev_addr)) { 1023 /* exact match, divert skb to this port */ 1024 skb->dev = ndev; 1025 skb->pkt_type = PACKET_HOST; 1026 ret = RX_HANDLER_ANOTHER; 1027 goto out; 1028 } else if (is_multicast_ether_addr_64bits( 1029 hdr->h_dest)) { 1030 /* multicast frame, deliver on this port too */ 1031 nskb = skb_clone(skb, GFP_ATOMIC); 1032 if (!nskb) 1033 break; 1034 1035 nskb->dev = ndev; 1036 if (ether_addr_equal_64bits(hdr->h_dest, 1037 ndev->broadcast)) 1038 nskb->pkt_type = PACKET_BROADCAST; 1039 else 1040 nskb->pkt_type = PACKET_MULTICAST; 1041 1042 __netif_rx(nskb); 1043 } else if (rx_sc || ndev->flags & IFF_PROMISC) { 1044 skb->dev = ndev; 1045 skb->pkt_type = PACKET_HOST; 1046 ret = RX_HANDLER_ANOTHER; 1047 goto out; 1048 } 1049 1050 continue; 1051 } 1052 1053 /* 10.6 If the management control validateFrames is not 1054 * Strict, frames without a SecTAG are received, counted, and 1055 * delivered to the Controlled Port 1056 */ 1057 if (macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) { 1058 u64_stats_update_begin(&secy_stats->syncp); 1059 secy_stats->stats.InPktsNoTag++; 1060 u64_stats_update_end(&secy_stats->syncp); 1061 DEV_STATS_INC(macsec->secy.netdev, rx_dropped); 1062 continue; 1063 } 1064 1065 /* deliver on this port */ 1066 nskb = skb_clone(skb, GFP_ATOMIC); 1067 if (!nskb) 1068 break; 1069 1070 nskb->dev = ndev; 1071 1072 if (__netif_rx(nskb) == NET_RX_SUCCESS) { 1073 u64_stats_update_begin(&secy_stats->syncp); 1074 secy_stats->stats.InPktsUntagged++; 1075 u64_stats_update_end(&secy_stats->syncp); 1076 } 1077 } 1078 1079 out: 1080 rcu_read_unlock(); 1081 return ret; 1082 } 1083 1084 static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) 1085 { 1086 struct sk_buff *skb = *pskb; 1087 struct net_device *dev = skb->dev; 1088 struct macsec_eth_header *hdr; 1089 struct macsec_secy *secy = NULL; 1090 struct macsec_rx_sc *rx_sc; 1091 struct macsec_rx_sa *rx_sa; 1092 struct macsec_rxh_data *rxd; 1093 struct macsec_dev *macsec; 1094 unsigned int len; 1095 sci_t sci; 1096 u32 hdr_pn; 1097 bool cbit; 1098 struct pcpu_rx_sc_stats *rxsc_stats; 1099 struct pcpu_secy_stats *secy_stats; 1100 bool pulled_sci; 1101 int ret; 1102 1103 if (skb_headroom(skb) < ETH_HLEN) 1104 goto drop_direct; 1105 1106 hdr = macsec_ethhdr(skb); 1107 if (hdr->eth.h_proto != htons(ETH_P_MACSEC)) 1108 return handle_not_macsec(skb); 1109 1110 skb = skb_unshare(skb, GFP_ATOMIC); 1111 *pskb = skb; 1112 if (!skb) 1113 return RX_HANDLER_CONSUMED; 1114 1115 pulled_sci = pskb_may_pull(skb, macsec_extra_len(true)); 1116 if (!pulled_sci) { 1117 if (!pskb_may_pull(skb, macsec_extra_len(false))) 1118 goto drop_direct; 1119 } 1120 1121 hdr = macsec_ethhdr(skb); 1122 1123 /* Frames with a SecTAG that has the TCI E bit set but the C 1124 * bit clear are discarded, as this reserved encoding is used 1125 * to identify frames with a SecTAG that are not to be 1126 * delivered to the Controlled Port. 1127 */ 1128 if ((hdr->tci_an & (MACSEC_TCI_C | MACSEC_TCI_E)) == MACSEC_TCI_E) 1129 return RX_HANDLER_PASS; 1130 1131 /* now, pull the extra length */ 1132 if (hdr->tci_an & MACSEC_TCI_SC) { 1133 if (!pulled_sci) 1134 goto drop_direct; 1135 } 1136 1137 /* ethernet header is part of crypto processing */ 1138 skb_push(skb, ETH_HLEN); 1139 1140 macsec_skb_cb(skb)->has_sci = !!(hdr->tci_an & MACSEC_TCI_SC); 1141 macsec_skb_cb(skb)->assoc_num = hdr->tci_an & MACSEC_AN_MASK; 1142 sci = macsec_frame_sci(hdr, macsec_skb_cb(skb)->has_sci); 1143 1144 rcu_read_lock(); 1145 rxd = macsec_data_rcu(skb->dev); 1146 1147 list_for_each_entry_rcu(macsec, &rxd->secys, secys) { 1148 struct macsec_rx_sc *sc = find_rx_sc(&macsec->secy, sci); 1149 1150 sc = sc ? macsec_rxsc_get(sc) : NULL; 1151 1152 if (sc) { 1153 secy = &macsec->secy; 1154 rx_sc = sc; 1155 break; 1156 } 1157 } 1158 1159 if (!secy) 1160 goto nosci; 1161 1162 dev = secy->netdev; 1163 macsec = macsec_priv(dev); 1164 secy_stats = this_cpu_ptr(macsec->stats); 1165 rxsc_stats = this_cpu_ptr(rx_sc->stats); 1166 1167 if (!macsec_validate_skb(skb, secy->icv_len, secy->xpn)) { 1168 u64_stats_update_begin(&secy_stats->syncp); 1169 secy_stats->stats.InPktsBadTag++; 1170 u64_stats_update_end(&secy_stats->syncp); 1171 DEV_STATS_INC(secy->netdev, rx_errors); 1172 goto drop_nosa; 1173 } 1174 1175 rx_sa = macsec_rxsa_get(rx_sc->sa[macsec_skb_cb(skb)->assoc_num]); 1176 if (!rx_sa) { 1177 /* 10.6.1 if the SA is not in use */ 1178 1179 /* If validateFrames is Strict or the C bit in the 1180 * SecTAG is set, discard 1181 */ 1182 struct macsec_rx_sa *active_rx_sa = macsec_active_rxsa_get(rx_sc); 1183 if (hdr->tci_an & MACSEC_TCI_C || 1184 secy->validate_frames == MACSEC_VALIDATE_STRICT) { 1185 u64_stats_update_begin(&rxsc_stats->syncp); 1186 rxsc_stats->stats.InPktsNotUsingSA++; 1187 u64_stats_update_end(&rxsc_stats->syncp); 1188 DEV_STATS_INC(secy->netdev, rx_errors); 1189 if (active_rx_sa) 1190 this_cpu_inc(active_rx_sa->stats->InPktsNotUsingSA); 1191 goto drop_nosa; 1192 } 1193 1194 /* not Strict, the frame (with the SecTAG and ICV 1195 * removed) is delivered to the Controlled Port. 1196 */ 1197 u64_stats_update_begin(&rxsc_stats->syncp); 1198 rxsc_stats->stats.InPktsUnusedSA++; 1199 u64_stats_update_end(&rxsc_stats->syncp); 1200 if (active_rx_sa) 1201 this_cpu_inc(active_rx_sa->stats->InPktsUnusedSA); 1202 goto deliver; 1203 } 1204 1205 /* First, PN check to avoid decrypting obviously wrong packets */ 1206 hdr_pn = ntohl(hdr->packet_number); 1207 if (secy->replay_protect) { 1208 bool late; 1209 1210 spin_lock(&rx_sa->lock); 1211 late = rx_sa->next_pn_halves.lower >= secy->replay_window && 1212 hdr_pn < (rx_sa->next_pn_halves.lower - secy->replay_window); 1213 1214 if (secy->xpn) 1215 late = late && pn_same_half(rx_sa->next_pn_halves.lower, hdr_pn); 1216 spin_unlock(&rx_sa->lock); 1217 1218 if (late) { 1219 u64_stats_update_begin(&rxsc_stats->syncp); 1220 rxsc_stats->stats.InPktsLate++; 1221 u64_stats_update_end(&rxsc_stats->syncp); 1222 DEV_STATS_INC(macsec->secy.netdev, rx_dropped); 1223 goto drop; 1224 } 1225 } 1226 1227 macsec_skb_cb(skb)->rx_sa = rx_sa; 1228 1229 /* Disabled && !changed text => skip validation */ 1230 if (hdr->tci_an & MACSEC_TCI_C || 1231 secy->validate_frames != MACSEC_VALIDATE_DISABLED) 1232 skb = macsec_decrypt(skb, dev, rx_sa, sci, secy); 1233 1234 if (IS_ERR(skb)) { 1235 /* the decrypt callback needs the reference */ 1236 if (PTR_ERR(skb) != -EINPROGRESS) { 1237 macsec_rxsa_put(rx_sa); 1238 macsec_rxsc_put(rx_sc); 1239 } 1240 rcu_read_unlock(); 1241 *pskb = NULL; 1242 return RX_HANDLER_CONSUMED; 1243 } 1244 1245 if (!macsec_post_decrypt(skb, secy, hdr_pn)) 1246 goto drop; 1247 1248 deliver: 1249 macsec_finalize_skb(skb, secy->icv_len, 1250 macsec_extra_len(macsec_skb_cb(skb)->has_sci)); 1251 len = skb->len; 1252 macsec_reset_skb(skb, secy->netdev); 1253 1254 if (rx_sa) 1255 macsec_rxsa_put(rx_sa); 1256 macsec_rxsc_put(rx_sc); 1257 1258 skb_orphan(skb); 1259 ret = gro_cells_receive(&macsec->gro_cells, skb); 1260 if (ret == NET_RX_SUCCESS) 1261 count_rx(dev, len); 1262 else 1263 DEV_STATS_INC(macsec->secy.netdev, rx_dropped); 1264 1265 rcu_read_unlock(); 1266 1267 *pskb = NULL; 1268 return RX_HANDLER_CONSUMED; 1269 1270 drop: 1271 macsec_rxsa_put(rx_sa); 1272 drop_nosa: 1273 macsec_rxsc_put(rx_sc); 1274 rcu_read_unlock(); 1275 drop_direct: 1276 kfree_skb(skb); 1277 *pskb = NULL; 1278 return RX_HANDLER_CONSUMED; 1279 1280 nosci: 1281 /* 10.6.1 if the SC is not found */ 1282 cbit = !!(hdr->tci_an & MACSEC_TCI_C); 1283 if (!cbit) 1284 macsec_finalize_skb(skb, MACSEC_DEFAULT_ICV_LEN, 1285 macsec_extra_len(macsec_skb_cb(skb)->has_sci)); 1286 1287 list_for_each_entry_rcu(macsec, &rxd->secys, secys) { 1288 struct sk_buff *nskb; 1289 1290 secy_stats = this_cpu_ptr(macsec->stats); 1291 1292 /* If validateFrames is Strict or the C bit in the 1293 * SecTAG is set, discard 1294 */ 1295 if (cbit || 1296 macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) { 1297 u64_stats_update_begin(&secy_stats->syncp); 1298 secy_stats->stats.InPktsNoSCI++; 1299 u64_stats_update_end(&secy_stats->syncp); 1300 DEV_STATS_INC(macsec->secy.netdev, rx_errors); 1301 continue; 1302 } 1303 1304 /* not strict, the frame (with the SecTAG and ICV 1305 * removed) is delivered to the Controlled Port. 1306 */ 1307 nskb = skb_clone(skb, GFP_ATOMIC); 1308 if (!nskb) 1309 break; 1310 1311 macsec_reset_skb(nskb, macsec->secy.netdev); 1312 1313 ret = __netif_rx(nskb); 1314 if (ret == NET_RX_SUCCESS) { 1315 u64_stats_update_begin(&secy_stats->syncp); 1316 secy_stats->stats.InPktsUnknownSCI++; 1317 u64_stats_update_end(&secy_stats->syncp); 1318 } else { 1319 DEV_STATS_INC(macsec->secy.netdev, rx_dropped); 1320 } 1321 } 1322 1323 rcu_read_unlock(); 1324 *pskb = skb; 1325 return RX_HANDLER_PASS; 1326 } 1327 1328 static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len) 1329 { 1330 struct crypto_aead *tfm; 1331 int ret; 1332 1333 /* Pick a sync gcm(aes) cipher to ensure order is preserved. */ 1334 tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC); 1335 1336 if (IS_ERR(tfm)) 1337 return tfm; 1338 1339 ret = crypto_aead_setkey(tfm, key, key_len); 1340 if (ret < 0) 1341 goto fail; 1342 1343 ret = crypto_aead_setauthsize(tfm, icv_len); 1344 if (ret < 0) 1345 goto fail; 1346 1347 return tfm; 1348 fail: 1349 crypto_free_aead(tfm); 1350 return ERR_PTR(ret); 1351 } 1352 1353 static int init_rx_sa(struct macsec_rx_sa *rx_sa, char *sak, int key_len, 1354 int icv_len) 1355 { 1356 rx_sa->stats = alloc_percpu(struct macsec_rx_sa_stats); 1357 if (!rx_sa->stats) 1358 return -ENOMEM; 1359 1360 rx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len); 1361 if (IS_ERR(rx_sa->key.tfm)) { 1362 free_percpu(rx_sa->stats); 1363 return PTR_ERR(rx_sa->key.tfm); 1364 } 1365 1366 rx_sa->ssci = MACSEC_UNDEF_SSCI; 1367 rx_sa->active = false; 1368 rx_sa->next_pn = 1; 1369 refcount_set(&rx_sa->refcnt, 1); 1370 spin_lock_init(&rx_sa->lock); 1371 1372 return 0; 1373 } 1374 1375 static void clear_rx_sa(struct macsec_rx_sa *rx_sa) 1376 { 1377 rx_sa->active = false; 1378 1379 macsec_rxsa_put(rx_sa); 1380 } 1381 1382 static void free_rx_sc(struct macsec_rx_sc *rx_sc) 1383 { 1384 int i; 1385 1386 for (i = 0; i < MACSEC_NUM_AN; i++) { 1387 struct macsec_rx_sa *sa = rtnl_dereference(rx_sc->sa[i]); 1388 1389 RCU_INIT_POINTER(rx_sc->sa[i], NULL); 1390 if (sa) 1391 clear_rx_sa(sa); 1392 } 1393 1394 macsec_rxsc_put(rx_sc); 1395 } 1396 1397 static struct macsec_rx_sc *del_rx_sc(struct macsec_secy *secy, sci_t sci) 1398 { 1399 struct macsec_rx_sc *rx_sc, __rcu **rx_scp; 1400 1401 for (rx_scp = &secy->rx_sc, rx_sc = rtnl_dereference(*rx_scp); 1402 rx_sc; 1403 rx_scp = &rx_sc->next, rx_sc = rtnl_dereference(*rx_scp)) { 1404 if (rx_sc->sci == sci) { 1405 if (rx_sc->active) 1406 secy->n_rx_sc--; 1407 rcu_assign_pointer(*rx_scp, rx_sc->next); 1408 return rx_sc; 1409 } 1410 } 1411 1412 return NULL; 1413 } 1414 1415 static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci, 1416 bool active) 1417 { 1418 struct macsec_rx_sc *rx_sc; 1419 struct macsec_dev *macsec; 1420 struct net_device *real_dev = macsec_priv(dev)->real_dev; 1421 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); 1422 struct macsec_secy *secy; 1423 1424 list_for_each_entry(macsec, &rxd->secys, secys) { 1425 if (find_rx_sc_rtnl(&macsec->secy, sci)) 1426 return ERR_PTR(-EEXIST); 1427 } 1428 1429 rx_sc = kzalloc(sizeof(*rx_sc), GFP_KERNEL); 1430 if (!rx_sc) 1431 return ERR_PTR(-ENOMEM); 1432 1433 rx_sc->stats = netdev_alloc_pcpu_stats(struct pcpu_rx_sc_stats); 1434 if (!rx_sc->stats) { 1435 kfree(rx_sc); 1436 return ERR_PTR(-ENOMEM); 1437 } 1438 1439 rx_sc->sci = sci; 1440 rx_sc->active = active; 1441 refcount_set(&rx_sc->refcnt, 1); 1442 1443 secy = &macsec_priv(dev)->secy; 1444 rcu_assign_pointer(rx_sc->next, secy->rx_sc); 1445 rcu_assign_pointer(secy->rx_sc, rx_sc); 1446 1447 if (rx_sc->active) 1448 secy->n_rx_sc++; 1449 1450 return rx_sc; 1451 } 1452 1453 static int init_tx_sa(struct macsec_tx_sa *tx_sa, char *sak, int key_len, 1454 int icv_len) 1455 { 1456 tx_sa->stats = alloc_percpu(struct macsec_tx_sa_stats); 1457 if (!tx_sa->stats) 1458 return -ENOMEM; 1459 1460 tx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len); 1461 if (IS_ERR(tx_sa->key.tfm)) { 1462 free_percpu(tx_sa->stats); 1463 return PTR_ERR(tx_sa->key.tfm); 1464 } 1465 1466 tx_sa->ssci = MACSEC_UNDEF_SSCI; 1467 tx_sa->active = false; 1468 refcount_set(&tx_sa->refcnt, 1); 1469 spin_lock_init(&tx_sa->lock); 1470 1471 return 0; 1472 } 1473 1474 static void clear_tx_sa(struct macsec_tx_sa *tx_sa) 1475 { 1476 tx_sa->active = false; 1477 1478 macsec_txsa_put(tx_sa); 1479 } 1480 1481 static struct genl_family macsec_fam; 1482 1483 static struct net_device *get_dev_from_nl(struct net *net, 1484 struct nlattr **attrs) 1485 { 1486 int ifindex = nla_get_u32(attrs[MACSEC_ATTR_IFINDEX]); 1487 struct net_device *dev; 1488 1489 dev = __dev_get_by_index(net, ifindex); 1490 if (!dev) 1491 return ERR_PTR(-ENODEV); 1492 1493 if (!netif_is_macsec(dev)) 1494 return ERR_PTR(-ENODEV); 1495 1496 return dev; 1497 } 1498 1499 static enum macsec_offload nla_get_offload(const struct nlattr *nla) 1500 { 1501 return (__force enum macsec_offload)nla_get_u8(nla); 1502 } 1503 1504 static sci_t nla_get_sci(const struct nlattr *nla) 1505 { 1506 return (__force sci_t)nla_get_u64(nla); 1507 } 1508 1509 static int nla_put_sci(struct sk_buff *skb, int attrtype, sci_t value, 1510 int padattr) 1511 { 1512 return nla_put_u64_64bit(skb, attrtype, (__force u64)value, padattr); 1513 } 1514 1515 static ssci_t nla_get_ssci(const struct nlattr *nla) 1516 { 1517 return (__force ssci_t)nla_get_u32(nla); 1518 } 1519 1520 static int nla_put_ssci(struct sk_buff *skb, int attrtype, ssci_t value) 1521 { 1522 return nla_put_u32(skb, attrtype, (__force u64)value); 1523 } 1524 1525 static struct macsec_tx_sa *get_txsa_from_nl(struct net *net, 1526 struct nlattr **attrs, 1527 struct nlattr **tb_sa, 1528 struct net_device **devp, 1529 struct macsec_secy **secyp, 1530 struct macsec_tx_sc **scp, 1531 u8 *assoc_num) 1532 { 1533 struct net_device *dev; 1534 struct macsec_secy *secy; 1535 struct macsec_tx_sc *tx_sc; 1536 struct macsec_tx_sa *tx_sa; 1537 1538 if (!tb_sa[MACSEC_SA_ATTR_AN]) 1539 return ERR_PTR(-EINVAL); 1540 1541 *assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1542 1543 dev = get_dev_from_nl(net, attrs); 1544 if (IS_ERR(dev)) 1545 return ERR_CAST(dev); 1546 1547 if (*assoc_num >= MACSEC_NUM_AN) 1548 return ERR_PTR(-EINVAL); 1549 1550 secy = &macsec_priv(dev)->secy; 1551 tx_sc = &secy->tx_sc; 1552 1553 tx_sa = rtnl_dereference(tx_sc->sa[*assoc_num]); 1554 if (!tx_sa) 1555 return ERR_PTR(-ENODEV); 1556 1557 *devp = dev; 1558 *scp = tx_sc; 1559 *secyp = secy; 1560 return tx_sa; 1561 } 1562 1563 static struct macsec_rx_sc *get_rxsc_from_nl(struct net *net, 1564 struct nlattr **attrs, 1565 struct nlattr **tb_rxsc, 1566 struct net_device **devp, 1567 struct macsec_secy **secyp) 1568 { 1569 struct net_device *dev; 1570 struct macsec_secy *secy; 1571 struct macsec_rx_sc *rx_sc; 1572 sci_t sci; 1573 1574 dev = get_dev_from_nl(net, attrs); 1575 if (IS_ERR(dev)) 1576 return ERR_CAST(dev); 1577 1578 secy = &macsec_priv(dev)->secy; 1579 1580 if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI]) 1581 return ERR_PTR(-EINVAL); 1582 1583 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); 1584 rx_sc = find_rx_sc_rtnl(secy, sci); 1585 if (!rx_sc) 1586 return ERR_PTR(-ENODEV); 1587 1588 *secyp = secy; 1589 *devp = dev; 1590 1591 return rx_sc; 1592 } 1593 1594 static struct macsec_rx_sa *get_rxsa_from_nl(struct net *net, 1595 struct nlattr **attrs, 1596 struct nlattr **tb_rxsc, 1597 struct nlattr **tb_sa, 1598 struct net_device **devp, 1599 struct macsec_secy **secyp, 1600 struct macsec_rx_sc **scp, 1601 u8 *assoc_num) 1602 { 1603 struct macsec_rx_sc *rx_sc; 1604 struct macsec_rx_sa *rx_sa; 1605 1606 if (!tb_sa[MACSEC_SA_ATTR_AN]) 1607 return ERR_PTR(-EINVAL); 1608 1609 *assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1610 if (*assoc_num >= MACSEC_NUM_AN) 1611 return ERR_PTR(-EINVAL); 1612 1613 rx_sc = get_rxsc_from_nl(net, attrs, tb_rxsc, devp, secyp); 1614 if (IS_ERR(rx_sc)) 1615 return ERR_CAST(rx_sc); 1616 1617 rx_sa = rtnl_dereference(rx_sc->sa[*assoc_num]); 1618 if (!rx_sa) 1619 return ERR_PTR(-ENODEV); 1620 1621 *scp = rx_sc; 1622 return rx_sa; 1623 } 1624 1625 static const struct nla_policy macsec_genl_policy[NUM_MACSEC_ATTR] = { 1626 [MACSEC_ATTR_IFINDEX] = { .type = NLA_U32 }, 1627 [MACSEC_ATTR_RXSC_CONFIG] = { .type = NLA_NESTED }, 1628 [MACSEC_ATTR_SA_CONFIG] = { .type = NLA_NESTED }, 1629 [MACSEC_ATTR_OFFLOAD] = { .type = NLA_NESTED }, 1630 }; 1631 1632 static const struct nla_policy macsec_genl_rxsc_policy[NUM_MACSEC_RXSC_ATTR] = { 1633 [MACSEC_RXSC_ATTR_SCI] = { .type = NLA_U64 }, 1634 [MACSEC_RXSC_ATTR_ACTIVE] = { .type = NLA_U8 }, 1635 }; 1636 1637 static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = { 1638 [MACSEC_SA_ATTR_AN] = { .type = NLA_U8 }, 1639 [MACSEC_SA_ATTR_ACTIVE] = { .type = NLA_U8 }, 1640 [MACSEC_SA_ATTR_PN] = NLA_POLICY_MIN_LEN(4), 1641 [MACSEC_SA_ATTR_KEYID] = { .type = NLA_BINARY, 1642 .len = MACSEC_KEYID_LEN, }, 1643 [MACSEC_SA_ATTR_KEY] = { .type = NLA_BINARY, 1644 .len = MACSEC_MAX_KEY_LEN, }, 1645 [MACSEC_SA_ATTR_SSCI] = { .type = NLA_U32 }, 1646 [MACSEC_SA_ATTR_SALT] = { .type = NLA_BINARY, 1647 .len = MACSEC_SALT_LEN, }, 1648 }; 1649 1650 static const struct nla_policy macsec_genl_offload_policy[NUM_MACSEC_OFFLOAD_ATTR] = { 1651 [MACSEC_OFFLOAD_ATTR_TYPE] = { .type = NLA_U8 }, 1652 }; 1653 1654 /* Offloads an operation to a device driver */ 1655 static int macsec_offload(int (* const func)(struct macsec_context *), 1656 struct macsec_context *ctx) 1657 { 1658 int ret; 1659 1660 if (unlikely(!func)) 1661 return 0; 1662 1663 if (ctx->offload == MACSEC_OFFLOAD_PHY) 1664 mutex_lock(&ctx->phydev->lock); 1665 1666 ret = (*func)(ctx); 1667 1668 if (ctx->offload == MACSEC_OFFLOAD_PHY) 1669 mutex_unlock(&ctx->phydev->lock); 1670 1671 return ret; 1672 } 1673 1674 static int parse_sa_config(struct nlattr **attrs, struct nlattr **tb_sa) 1675 { 1676 if (!attrs[MACSEC_ATTR_SA_CONFIG]) 1677 return -EINVAL; 1678 1679 if (nla_parse_nested_deprecated(tb_sa, MACSEC_SA_ATTR_MAX, attrs[MACSEC_ATTR_SA_CONFIG], macsec_genl_sa_policy, NULL)) 1680 return -EINVAL; 1681 1682 return 0; 1683 } 1684 1685 static int parse_rxsc_config(struct nlattr **attrs, struct nlattr **tb_rxsc) 1686 { 1687 if (!attrs[MACSEC_ATTR_RXSC_CONFIG]) 1688 return -EINVAL; 1689 1690 if (nla_parse_nested_deprecated(tb_rxsc, MACSEC_RXSC_ATTR_MAX, attrs[MACSEC_ATTR_RXSC_CONFIG], macsec_genl_rxsc_policy, NULL)) 1691 return -EINVAL; 1692 1693 return 0; 1694 } 1695 1696 static bool validate_add_rxsa(struct nlattr **attrs) 1697 { 1698 if (!attrs[MACSEC_SA_ATTR_AN] || 1699 !attrs[MACSEC_SA_ATTR_KEY] || 1700 !attrs[MACSEC_SA_ATTR_KEYID]) 1701 return false; 1702 1703 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) 1704 return false; 1705 1706 if (attrs[MACSEC_SA_ATTR_PN] && 1707 nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0) 1708 return false; 1709 1710 if (attrs[MACSEC_SA_ATTR_ACTIVE]) { 1711 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) 1712 return false; 1713 } 1714 1715 if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN) 1716 return false; 1717 1718 return true; 1719 } 1720 1721 static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info) 1722 { 1723 struct net_device *dev; 1724 struct nlattr **attrs = info->attrs; 1725 struct macsec_secy *secy; 1726 struct macsec_rx_sc *rx_sc; 1727 struct macsec_rx_sa *rx_sa; 1728 unsigned char assoc_num; 1729 int pn_len; 1730 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 1731 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 1732 int err; 1733 1734 if (!attrs[MACSEC_ATTR_IFINDEX]) 1735 return -EINVAL; 1736 1737 if (parse_sa_config(attrs, tb_sa)) 1738 return -EINVAL; 1739 1740 if (parse_rxsc_config(attrs, tb_rxsc)) 1741 return -EINVAL; 1742 1743 if (!validate_add_rxsa(tb_sa)) 1744 return -EINVAL; 1745 1746 rtnl_lock(); 1747 rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy); 1748 if (IS_ERR(rx_sc)) { 1749 rtnl_unlock(); 1750 return PTR_ERR(rx_sc); 1751 } 1752 1753 assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1754 1755 if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) { 1756 pr_notice("macsec: nl: add_rxsa: bad key length: %d != %d\n", 1757 nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len); 1758 rtnl_unlock(); 1759 return -EINVAL; 1760 } 1761 1762 pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN; 1763 if (tb_sa[MACSEC_SA_ATTR_PN] && 1764 nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) { 1765 pr_notice("macsec: nl: add_rxsa: bad pn length: %d != %d\n", 1766 nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len); 1767 rtnl_unlock(); 1768 return -EINVAL; 1769 } 1770 1771 if (secy->xpn) { 1772 if (!tb_sa[MACSEC_SA_ATTR_SSCI] || !tb_sa[MACSEC_SA_ATTR_SALT]) { 1773 rtnl_unlock(); 1774 return -EINVAL; 1775 } 1776 1777 if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) { 1778 pr_notice("macsec: nl: add_rxsa: bad salt length: %d != %d\n", 1779 nla_len(tb_sa[MACSEC_SA_ATTR_SALT]), 1780 MACSEC_SALT_LEN); 1781 rtnl_unlock(); 1782 return -EINVAL; 1783 } 1784 } 1785 1786 rx_sa = rtnl_dereference(rx_sc->sa[assoc_num]); 1787 if (rx_sa) { 1788 rtnl_unlock(); 1789 return -EBUSY; 1790 } 1791 1792 rx_sa = kmalloc(sizeof(*rx_sa), GFP_KERNEL); 1793 if (!rx_sa) { 1794 rtnl_unlock(); 1795 return -ENOMEM; 1796 } 1797 1798 err = init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 1799 secy->key_len, secy->icv_len); 1800 if (err < 0) { 1801 kfree(rx_sa); 1802 rtnl_unlock(); 1803 return err; 1804 } 1805 1806 if (tb_sa[MACSEC_SA_ATTR_PN]) { 1807 spin_lock_bh(&rx_sa->lock); 1808 rx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]); 1809 spin_unlock_bh(&rx_sa->lock); 1810 } 1811 1812 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 1813 rx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 1814 1815 rx_sa->sc = rx_sc; 1816 1817 if (secy->xpn) { 1818 rx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]); 1819 nla_memcpy(rx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT], 1820 MACSEC_SALT_LEN); 1821 } 1822 1823 /* If h/w offloading is available, propagate to the device */ 1824 if (macsec_is_offloaded(netdev_priv(dev))) { 1825 const struct macsec_ops *ops; 1826 struct macsec_context ctx; 1827 1828 ops = macsec_get_ops(netdev_priv(dev), &ctx); 1829 if (!ops) { 1830 err = -EOPNOTSUPP; 1831 goto cleanup; 1832 } 1833 1834 ctx.sa.assoc_num = assoc_num; 1835 ctx.sa.rx_sa = rx_sa; 1836 ctx.secy = secy; 1837 memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 1838 secy->key_len); 1839 1840 err = macsec_offload(ops->mdo_add_rxsa, &ctx); 1841 memzero_explicit(ctx.sa.key, secy->key_len); 1842 if (err) 1843 goto cleanup; 1844 } 1845 1846 nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN); 1847 rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa); 1848 1849 rtnl_unlock(); 1850 1851 return 0; 1852 1853 cleanup: 1854 macsec_rxsa_put(rx_sa); 1855 rtnl_unlock(); 1856 return err; 1857 } 1858 1859 static bool validate_add_rxsc(struct nlattr **attrs) 1860 { 1861 if (!attrs[MACSEC_RXSC_ATTR_SCI]) 1862 return false; 1863 1864 if (attrs[MACSEC_RXSC_ATTR_ACTIVE]) { 1865 if (nla_get_u8(attrs[MACSEC_RXSC_ATTR_ACTIVE]) > 1) 1866 return false; 1867 } 1868 1869 return true; 1870 } 1871 1872 static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info) 1873 { 1874 struct net_device *dev; 1875 sci_t sci = MACSEC_UNDEF_SCI; 1876 struct nlattr **attrs = info->attrs; 1877 struct macsec_rx_sc *rx_sc; 1878 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 1879 struct macsec_secy *secy; 1880 bool active = true; 1881 int ret; 1882 1883 if (!attrs[MACSEC_ATTR_IFINDEX]) 1884 return -EINVAL; 1885 1886 if (parse_rxsc_config(attrs, tb_rxsc)) 1887 return -EINVAL; 1888 1889 if (!validate_add_rxsc(tb_rxsc)) 1890 return -EINVAL; 1891 1892 rtnl_lock(); 1893 dev = get_dev_from_nl(genl_info_net(info), attrs); 1894 if (IS_ERR(dev)) { 1895 rtnl_unlock(); 1896 return PTR_ERR(dev); 1897 } 1898 1899 secy = &macsec_priv(dev)->secy; 1900 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); 1901 1902 if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) 1903 active = nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]); 1904 1905 rx_sc = create_rx_sc(dev, sci, active); 1906 if (IS_ERR(rx_sc)) { 1907 rtnl_unlock(); 1908 return PTR_ERR(rx_sc); 1909 } 1910 1911 if (macsec_is_offloaded(netdev_priv(dev))) { 1912 const struct macsec_ops *ops; 1913 struct macsec_context ctx; 1914 1915 ops = macsec_get_ops(netdev_priv(dev), &ctx); 1916 if (!ops) { 1917 ret = -EOPNOTSUPP; 1918 goto cleanup; 1919 } 1920 1921 ctx.rx_sc = rx_sc; 1922 ctx.secy = secy; 1923 1924 ret = macsec_offload(ops->mdo_add_rxsc, &ctx); 1925 if (ret) 1926 goto cleanup; 1927 } 1928 1929 rtnl_unlock(); 1930 1931 return 0; 1932 1933 cleanup: 1934 del_rx_sc(secy, sci); 1935 free_rx_sc(rx_sc); 1936 rtnl_unlock(); 1937 return ret; 1938 } 1939 1940 static bool validate_add_txsa(struct nlattr **attrs) 1941 { 1942 if (!attrs[MACSEC_SA_ATTR_AN] || 1943 !attrs[MACSEC_SA_ATTR_PN] || 1944 !attrs[MACSEC_SA_ATTR_KEY] || 1945 !attrs[MACSEC_SA_ATTR_KEYID]) 1946 return false; 1947 1948 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) 1949 return false; 1950 1951 if (nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0) 1952 return false; 1953 1954 if (attrs[MACSEC_SA_ATTR_ACTIVE]) { 1955 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) 1956 return false; 1957 } 1958 1959 if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN) 1960 return false; 1961 1962 return true; 1963 } 1964 1965 static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info) 1966 { 1967 struct net_device *dev; 1968 struct nlattr **attrs = info->attrs; 1969 struct macsec_secy *secy; 1970 struct macsec_tx_sc *tx_sc; 1971 struct macsec_tx_sa *tx_sa; 1972 unsigned char assoc_num; 1973 int pn_len; 1974 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 1975 bool was_operational; 1976 int err; 1977 1978 if (!attrs[MACSEC_ATTR_IFINDEX]) 1979 return -EINVAL; 1980 1981 if (parse_sa_config(attrs, tb_sa)) 1982 return -EINVAL; 1983 1984 if (!validate_add_txsa(tb_sa)) 1985 return -EINVAL; 1986 1987 rtnl_lock(); 1988 dev = get_dev_from_nl(genl_info_net(info), attrs); 1989 if (IS_ERR(dev)) { 1990 rtnl_unlock(); 1991 return PTR_ERR(dev); 1992 } 1993 1994 secy = &macsec_priv(dev)->secy; 1995 tx_sc = &secy->tx_sc; 1996 1997 assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1998 1999 if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) { 2000 pr_notice("macsec: nl: add_txsa: bad key length: %d != %d\n", 2001 nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len); 2002 rtnl_unlock(); 2003 return -EINVAL; 2004 } 2005 2006 pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN; 2007 if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) { 2008 pr_notice("macsec: nl: add_txsa: bad pn length: %d != %d\n", 2009 nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len); 2010 rtnl_unlock(); 2011 return -EINVAL; 2012 } 2013 2014 if (secy->xpn) { 2015 if (!tb_sa[MACSEC_SA_ATTR_SSCI] || !tb_sa[MACSEC_SA_ATTR_SALT]) { 2016 rtnl_unlock(); 2017 return -EINVAL; 2018 } 2019 2020 if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) { 2021 pr_notice("macsec: nl: add_txsa: bad salt length: %d != %d\n", 2022 nla_len(tb_sa[MACSEC_SA_ATTR_SALT]), 2023 MACSEC_SALT_LEN); 2024 rtnl_unlock(); 2025 return -EINVAL; 2026 } 2027 } 2028 2029 tx_sa = rtnl_dereference(tx_sc->sa[assoc_num]); 2030 if (tx_sa) { 2031 rtnl_unlock(); 2032 return -EBUSY; 2033 } 2034 2035 tx_sa = kmalloc(sizeof(*tx_sa), GFP_KERNEL); 2036 if (!tx_sa) { 2037 rtnl_unlock(); 2038 return -ENOMEM; 2039 } 2040 2041 err = init_tx_sa(tx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 2042 secy->key_len, secy->icv_len); 2043 if (err < 0) { 2044 kfree(tx_sa); 2045 rtnl_unlock(); 2046 return err; 2047 } 2048 2049 spin_lock_bh(&tx_sa->lock); 2050 tx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]); 2051 spin_unlock_bh(&tx_sa->lock); 2052 2053 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 2054 tx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 2055 2056 was_operational = secy->operational; 2057 if (assoc_num == tx_sc->encoding_sa && tx_sa->active) 2058 secy->operational = true; 2059 2060 if (secy->xpn) { 2061 tx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]); 2062 nla_memcpy(tx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT], 2063 MACSEC_SALT_LEN); 2064 } 2065 2066 /* If h/w offloading is available, propagate to the device */ 2067 if (macsec_is_offloaded(netdev_priv(dev))) { 2068 const struct macsec_ops *ops; 2069 struct macsec_context ctx; 2070 2071 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2072 if (!ops) { 2073 err = -EOPNOTSUPP; 2074 goto cleanup; 2075 } 2076 2077 ctx.sa.assoc_num = assoc_num; 2078 ctx.sa.tx_sa = tx_sa; 2079 ctx.secy = secy; 2080 memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 2081 secy->key_len); 2082 2083 err = macsec_offload(ops->mdo_add_txsa, &ctx); 2084 memzero_explicit(ctx.sa.key, secy->key_len); 2085 if (err) 2086 goto cleanup; 2087 } 2088 2089 nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN); 2090 rcu_assign_pointer(tx_sc->sa[assoc_num], tx_sa); 2091 2092 rtnl_unlock(); 2093 2094 return 0; 2095 2096 cleanup: 2097 secy->operational = was_operational; 2098 macsec_txsa_put(tx_sa); 2099 rtnl_unlock(); 2100 return err; 2101 } 2102 2103 static int macsec_del_rxsa(struct sk_buff *skb, struct genl_info *info) 2104 { 2105 struct nlattr **attrs = info->attrs; 2106 struct net_device *dev; 2107 struct macsec_secy *secy; 2108 struct macsec_rx_sc *rx_sc; 2109 struct macsec_rx_sa *rx_sa; 2110 u8 assoc_num; 2111 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 2112 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 2113 int ret; 2114 2115 if (!attrs[MACSEC_ATTR_IFINDEX]) 2116 return -EINVAL; 2117 2118 if (parse_sa_config(attrs, tb_sa)) 2119 return -EINVAL; 2120 2121 if (parse_rxsc_config(attrs, tb_rxsc)) 2122 return -EINVAL; 2123 2124 rtnl_lock(); 2125 rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa, 2126 &dev, &secy, &rx_sc, &assoc_num); 2127 if (IS_ERR(rx_sa)) { 2128 rtnl_unlock(); 2129 return PTR_ERR(rx_sa); 2130 } 2131 2132 if (rx_sa->active) { 2133 rtnl_unlock(); 2134 return -EBUSY; 2135 } 2136 2137 /* If h/w offloading is available, propagate to the device */ 2138 if (macsec_is_offloaded(netdev_priv(dev))) { 2139 const struct macsec_ops *ops; 2140 struct macsec_context ctx; 2141 2142 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2143 if (!ops) { 2144 ret = -EOPNOTSUPP; 2145 goto cleanup; 2146 } 2147 2148 ctx.sa.assoc_num = assoc_num; 2149 ctx.sa.rx_sa = rx_sa; 2150 ctx.secy = secy; 2151 2152 ret = macsec_offload(ops->mdo_del_rxsa, &ctx); 2153 if (ret) 2154 goto cleanup; 2155 } 2156 2157 RCU_INIT_POINTER(rx_sc->sa[assoc_num], NULL); 2158 clear_rx_sa(rx_sa); 2159 2160 rtnl_unlock(); 2161 2162 return 0; 2163 2164 cleanup: 2165 rtnl_unlock(); 2166 return ret; 2167 } 2168 2169 static int macsec_del_rxsc(struct sk_buff *skb, struct genl_info *info) 2170 { 2171 struct nlattr **attrs = info->attrs; 2172 struct net_device *dev; 2173 struct macsec_secy *secy; 2174 struct macsec_rx_sc *rx_sc; 2175 sci_t sci; 2176 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 2177 int ret; 2178 2179 if (!attrs[MACSEC_ATTR_IFINDEX]) 2180 return -EINVAL; 2181 2182 if (parse_rxsc_config(attrs, tb_rxsc)) 2183 return -EINVAL; 2184 2185 if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI]) 2186 return -EINVAL; 2187 2188 rtnl_lock(); 2189 dev = get_dev_from_nl(genl_info_net(info), info->attrs); 2190 if (IS_ERR(dev)) { 2191 rtnl_unlock(); 2192 return PTR_ERR(dev); 2193 } 2194 2195 secy = &macsec_priv(dev)->secy; 2196 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); 2197 2198 rx_sc = del_rx_sc(secy, sci); 2199 if (!rx_sc) { 2200 rtnl_unlock(); 2201 return -ENODEV; 2202 } 2203 2204 /* If h/w offloading is available, propagate to the device */ 2205 if (macsec_is_offloaded(netdev_priv(dev))) { 2206 const struct macsec_ops *ops; 2207 struct macsec_context ctx; 2208 2209 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2210 if (!ops) { 2211 ret = -EOPNOTSUPP; 2212 goto cleanup; 2213 } 2214 2215 ctx.rx_sc = rx_sc; 2216 ctx.secy = secy; 2217 ret = macsec_offload(ops->mdo_del_rxsc, &ctx); 2218 if (ret) 2219 goto cleanup; 2220 } 2221 2222 free_rx_sc(rx_sc); 2223 rtnl_unlock(); 2224 2225 return 0; 2226 2227 cleanup: 2228 rtnl_unlock(); 2229 return ret; 2230 } 2231 2232 static int macsec_del_txsa(struct sk_buff *skb, struct genl_info *info) 2233 { 2234 struct nlattr **attrs = info->attrs; 2235 struct net_device *dev; 2236 struct macsec_secy *secy; 2237 struct macsec_tx_sc *tx_sc; 2238 struct macsec_tx_sa *tx_sa; 2239 u8 assoc_num; 2240 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 2241 int ret; 2242 2243 if (!attrs[MACSEC_ATTR_IFINDEX]) 2244 return -EINVAL; 2245 2246 if (parse_sa_config(attrs, tb_sa)) 2247 return -EINVAL; 2248 2249 rtnl_lock(); 2250 tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa, 2251 &dev, &secy, &tx_sc, &assoc_num); 2252 if (IS_ERR(tx_sa)) { 2253 rtnl_unlock(); 2254 return PTR_ERR(tx_sa); 2255 } 2256 2257 if (tx_sa->active) { 2258 rtnl_unlock(); 2259 return -EBUSY; 2260 } 2261 2262 /* If h/w offloading is available, propagate to the device */ 2263 if (macsec_is_offloaded(netdev_priv(dev))) { 2264 const struct macsec_ops *ops; 2265 struct macsec_context ctx; 2266 2267 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2268 if (!ops) { 2269 ret = -EOPNOTSUPP; 2270 goto cleanup; 2271 } 2272 2273 ctx.sa.assoc_num = assoc_num; 2274 ctx.sa.tx_sa = tx_sa; 2275 ctx.secy = secy; 2276 2277 ret = macsec_offload(ops->mdo_del_txsa, &ctx); 2278 if (ret) 2279 goto cleanup; 2280 } 2281 2282 RCU_INIT_POINTER(tx_sc->sa[assoc_num], NULL); 2283 clear_tx_sa(tx_sa); 2284 2285 rtnl_unlock(); 2286 2287 return 0; 2288 2289 cleanup: 2290 rtnl_unlock(); 2291 return ret; 2292 } 2293 2294 static bool validate_upd_sa(struct nlattr **attrs) 2295 { 2296 if (!attrs[MACSEC_SA_ATTR_AN] || 2297 attrs[MACSEC_SA_ATTR_KEY] || 2298 attrs[MACSEC_SA_ATTR_KEYID] || 2299 attrs[MACSEC_SA_ATTR_SSCI] || 2300 attrs[MACSEC_SA_ATTR_SALT]) 2301 return false; 2302 2303 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) 2304 return false; 2305 2306 if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0) 2307 return false; 2308 2309 if (attrs[MACSEC_SA_ATTR_ACTIVE]) { 2310 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) 2311 return false; 2312 } 2313 2314 return true; 2315 } 2316 2317 static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info) 2318 { 2319 struct nlattr **attrs = info->attrs; 2320 struct net_device *dev; 2321 struct macsec_secy *secy; 2322 struct macsec_tx_sc *tx_sc; 2323 struct macsec_tx_sa *tx_sa; 2324 u8 assoc_num; 2325 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 2326 bool was_operational, was_active; 2327 pn_t prev_pn; 2328 int ret = 0; 2329 2330 prev_pn.full64 = 0; 2331 2332 if (!attrs[MACSEC_ATTR_IFINDEX]) 2333 return -EINVAL; 2334 2335 if (parse_sa_config(attrs, tb_sa)) 2336 return -EINVAL; 2337 2338 if (!validate_upd_sa(tb_sa)) 2339 return -EINVAL; 2340 2341 rtnl_lock(); 2342 tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa, 2343 &dev, &secy, &tx_sc, &assoc_num); 2344 if (IS_ERR(tx_sa)) { 2345 rtnl_unlock(); 2346 return PTR_ERR(tx_sa); 2347 } 2348 2349 if (tb_sa[MACSEC_SA_ATTR_PN]) { 2350 int pn_len; 2351 2352 pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN; 2353 if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) { 2354 pr_notice("macsec: nl: upd_txsa: bad pn length: %d != %d\n", 2355 nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len); 2356 rtnl_unlock(); 2357 return -EINVAL; 2358 } 2359 2360 spin_lock_bh(&tx_sa->lock); 2361 prev_pn = tx_sa->next_pn_halves; 2362 tx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]); 2363 spin_unlock_bh(&tx_sa->lock); 2364 } 2365 2366 was_active = tx_sa->active; 2367 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 2368 tx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 2369 2370 was_operational = secy->operational; 2371 if (assoc_num == tx_sc->encoding_sa) 2372 secy->operational = tx_sa->active; 2373 2374 /* If h/w offloading is available, propagate to the device */ 2375 if (macsec_is_offloaded(netdev_priv(dev))) { 2376 const struct macsec_ops *ops; 2377 struct macsec_context ctx; 2378 2379 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2380 if (!ops) { 2381 ret = -EOPNOTSUPP; 2382 goto cleanup; 2383 } 2384 2385 ctx.sa.assoc_num = assoc_num; 2386 ctx.sa.tx_sa = tx_sa; 2387 ctx.secy = secy; 2388 2389 ret = macsec_offload(ops->mdo_upd_txsa, &ctx); 2390 if (ret) 2391 goto cleanup; 2392 } 2393 2394 rtnl_unlock(); 2395 2396 return 0; 2397 2398 cleanup: 2399 if (tb_sa[MACSEC_SA_ATTR_PN]) { 2400 spin_lock_bh(&tx_sa->lock); 2401 tx_sa->next_pn_halves = prev_pn; 2402 spin_unlock_bh(&tx_sa->lock); 2403 } 2404 tx_sa->active = was_active; 2405 secy->operational = was_operational; 2406 rtnl_unlock(); 2407 return ret; 2408 } 2409 2410 static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info) 2411 { 2412 struct nlattr **attrs = info->attrs; 2413 struct net_device *dev; 2414 struct macsec_secy *secy; 2415 struct macsec_rx_sc *rx_sc; 2416 struct macsec_rx_sa *rx_sa; 2417 u8 assoc_num; 2418 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 2419 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 2420 bool was_active; 2421 pn_t prev_pn; 2422 int ret = 0; 2423 2424 prev_pn.full64 = 0; 2425 2426 if (!attrs[MACSEC_ATTR_IFINDEX]) 2427 return -EINVAL; 2428 2429 if (parse_rxsc_config(attrs, tb_rxsc)) 2430 return -EINVAL; 2431 2432 if (parse_sa_config(attrs, tb_sa)) 2433 return -EINVAL; 2434 2435 if (!validate_upd_sa(tb_sa)) 2436 return -EINVAL; 2437 2438 rtnl_lock(); 2439 rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa, 2440 &dev, &secy, &rx_sc, &assoc_num); 2441 if (IS_ERR(rx_sa)) { 2442 rtnl_unlock(); 2443 return PTR_ERR(rx_sa); 2444 } 2445 2446 if (tb_sa[MACSEC_SA_ATTR_PN]) { 2447 int pn_len; 2448 2449 pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN; 2450 if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) { 2451 pr_notice("macsec: nl: upd_rxsa: bad pn length: %d != %d\n", 2452 nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len); 2453 rtnl_unlock(); 2454 return -EINVAL; 2455 } 2456 2457 spin_lock_bh(&rx_sa->lock); 2458 prev_pn = rx_sa->next_pn_halves; 2459 rx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]); 2460 spin_unlock_bh(&rx_sa->lock); 2461 } 2462 2463 was_active = rx_sa->active; 2464 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 2465 rx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 2466 2467 /* If h/w offloading is available, propagate to the device */ 2468 if (macsec_is_offloaded(netdev_priv(dev))) { 2469 const struct macsec_ops *ops; 2470 struct macsec_context ctx; 2471 2472 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2473 if (!ops) { 2474 ret = -EOPNOTSUPP; 2475 goto cleanup; 2476 } 2477 2478 ctx.sa.assoc_num = assoc_num; 2479 ctx.sa.rx_sa = rx_sa; 2480 ctx.secy = secy; 2481 2482 ret = macsec_offload(ops->mdo_upd_rxsa, &ctx); 2483 if (ret) 2484 goto cleanup; 2485 } 2486 2487 rtnl_unlock(); 2488 return 0; 2489 2490 cleanup: 2491 if (tb_sa[MACSEC_SA_ATTR_PN]) { 2492 spin_lock_bh(&rx_sa->lock); 2493 rx_sa->next_pn_halves = prev_pn; 2494 spin_unlock_bh(&rx_sa->lock); 2495 } 2496 rx_sa->active = was_active; 2497 rtnl_unlock(); 2498 return ret; 2499 } 2500 2501 static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info) 2502 { 2503 struct nlattr **attrs = info->attrs; 2504 struct net_device *dev; 2505 struct macsec_secy *secy; 2506 struct macsec_rx_sc *rx_sc; 2507 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 2508 unsigned int prev_n_rx_sc; 2509 bool was_active; 2510 int ret; 2511 2512 if (!attrs[MACSEC_ATTR_IFINDEX]) 2513 return -EINVAL; 2514 2515 if (parse_rxsc_config(attrs, tb_rxsc)) 2516 return -EINVAL; 2517 2518 if (!validate_add_rxsc(tb_rxsc)) 2519 return -EINVAL; 2520 2521 rtnl_lock(); 2522 rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy); 2523 if (IS_ERR(rx_sc)) { 2524 rtnl_unlock(); 2525 return PTR_ERR(rx_sc); 2526 } 2527 2528 was_active = rx_sc->active; 2529 prev_n_rx_sc = secy->n_rx_sc; 2530 if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) { 2531 bool new = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]); 2532 2533 if (rx_sc->active != new) 2534 secy->n_rx_sc += new ? 1 : -1; 2535 2536 rx_sc->active = new; 2537 } 2538 2539 /* If h/w offloading is available, propagate to the device */ 2540 if (macsec_is_offloaded(netdev_priv(dev))) { 2541 const struct macsec_ops *ops; 2542 struct macsec_context ctx; 2543 2544 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2545 if (!ops) { 2546 ret = -EOPNOTSUPP; 2547 goto cleanup; 2548 } 2549 2550 ctx.rx_sc = rx_sc; 2551 ctx.secy = secy; 2552 2553 ret = macsec_offload(ops->mdo_upd_rxsc, &ctx); 2554 if (ret) 2555 goto cleanup; 2556 } 2557 2558 rtnl_unlock(); 2559 2560 return 0; 2561 2562 cleanup: 2563 secy->n_rx_sc = prev_n_rx_sc; 2564 rx_sc->active = was_active; 2565 rtnl_unlock(); 2566 return ret; 2567 } 2568 2569 static bool macsec_is_configured(struct macsec_dev *macsec) 2570 { 2571 struct macsec_secy *secy = &macsec->secy; 2572 struct macsec_tx_sc *tx_sc = &secy->tx_sc; 2573 int i; 2574 2575 if (secy->rx_sc) 2576 return true; 2577 2578 for (i = 0; i < MACSEC_NUM_AN; i++) 2579 if (tx_sc->sa[i]) 2580 return true; 2581 2582 return false; 2583 } 2584 2585 static int macsec_update_offload(struct net_device *dev, enum macsec_offload offload) 2586 { 2587 enum macsec_offload prev_offload; 2588 const struct macsec_ops *ops; 2589 struct macsec_context ctx; 2590 struct macsec_dev *macsec; 2591 int ret = 0; 2592 2593 macsec = macsec_priv(dev); 2594 2595 /* Check if the offloading mode is supported by the underlying layers */ 2596 if (offload != MACSEC_OFFLOAD_OFF && 2597 !macsec_check_offload(offload, macsec)) 2598 return -EOPNOTSUPP; 2599 2600 /* Check if the net device is busy. */ 2601 if (netif_running(dev)) 2602 return -EBUSY; 2603 2604 /* Check if the device already has rules configured: we do not support 2605 * rules migration. 2606 */ 2607 if (macsec_is_configured(macsec)) 2608 return -EBUSY; 2609 2610 prev_offload = macsec->offload; 2611 2612 ops = __macsec_get_ops(offload == MACSEC_OFFLOAD_OFF ? prev_offload : offload, 2613 macsec, &ctx); 2614 if (!ops) 2615 return -EOPNOTSUPP; 2616 2617 macsec->offload = offload; 2618 2619 ctx.secy = &macsec->secy; 2620 ret = offload == MACSEC_OFFLOAD_OFF ? macsec_offload(ops->mdo_del_secy, &ctx) 2621 : macsec_offload(ops->mdo_add_secy, &ctx); 2622 if (ret) 2623 macsec->offload = prev_offload; 2624 2625 return ret; 2626 } 2627 2628 static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info) 2629 { 2630 struct nlattr *tb_offload[MACSEC_OFFLOAD_ATTR_MAX + 1]; 2631 struct nlattr **attrs = info->attrs; 2632 enum macsec_offload offload; 2633 struct macsec_dev *macsec; 2634 struct net_device *dev; 2635 int ret = 0; 2636 2637 if (!attrs[MACSEC_ATTR_IFINDEX]) 2638 return -EINVAL; 2639 2640 if (!attrs[MACSEC_ATTR_OFFLOAD]) 2641 return -EINVAL; 2642 2643 if (nla_parse_nested_deprecated(tb_offload, MACSEC_OFFLOAD_ATTR_MAX, 2644 attrs[MACSEC_ATTR_OFFLOAD], 2645 macsec_genl_offload_policy, NULL)) 2646 return -EINVAL; 2647 2648 rtnl_lock(); 2649 2650 dev = get_dev_from_nl(genl_info_net(info), attrs); 2651 if (IS_ERR(dev)) { 2652 ret = PTR_ERR(dev); 2653 goto out; 2654 } 2655 macsec = macsec_priv(dev); 2656 2657 if (!tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]) { 2658 ret = -EINVAL; 2659 goto out; 2660 } 2661 2662 offload = nla_get_u8(tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]); 2663 2664 if (macsec->offload != offload) 2665 ret = macsec_update_offload(dev, offload); 2666 out: 2667 rtnl_unlock(); 2668 return ret; 2669 } 2670 2671 static void get_tx_sa_stats(struct net_device *dev, int an, 2672 struct macsec_tx_sa *tx_sa, 2673 struct macsec_tx_sa_stats *sum) 2674 { 2675 struct macsec_dev *macsec = macsec_priv(dev); 2676 int cpu; 2677 2678 /* If h/w offloading is available, propagate to the device */ 2679 if (macsec_is_offloaded(macsec)) { 2680 const struct macsec_ops *ops; 2681 struct macsec_context ctx; 2682 2683 ops = macsec_get_ops(macsec, &ctx); 2684 if (ops) { 2685 ctx.sa.assoc_num = an; 2686 ctx.sa.tx_sa = tx_sa; 2687 ctx.stats.tx_sa_stats = sum; 2688 ctx.secy = &macsec_priv(dev)->secy; 2689 macsec_offload(ops->mdo_get_tx_sa_stats, &ctx); 2690 } 2691 return; 2692 } 2693 2694 for_each_possible_cpu(cpu) { 2695 const struct macsec_tx_sa_stats *stats = 2696 per_cpu_ptr(tx_sa->stats, cpu); 2697 2698 sum->OutPktsProtected += stats->OutPktsProtected; 2699 sum->OutPktsEncrypted += stats->OutPktsEncrypted; 2700 } 2701 } 2702 2703 static int copy_tx_sa_stats(struct sk_buff *skb, struct macsec_tx_sa_stats *sum) 2704 { 2705 if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED, 2706 sum->OutPktsProtected) || 2707 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED, 2708 sum->OutPktsEncrypted)) 2709 return -EMSGSIZE; 2710 2711 return 0; 2712 } 2713 2714 static void get_rx_sa_stats(struct net_device *dev, 2715 struct macsec_rx_sc *rx_sc, int an, 2716 struct macsec_rx_sa *rx_sa, 2717 struct macsec_rx_sa_stats *sum) 2718 { 2719 struct macsec_dev *macsec = macsec_priv(dev); 2720 int cpu; 2721 2722 /* If h/w offloading is available, propagate to the device */ 2723 if (macsec_is_offloaded(macsec)) { 2724 const struct macsec_ops *ops; 2725 struct macsec_context ctx; 2726 2727 ops = macsec_get_ops(macsec, &ctx); 2728 if (ops) { 2729 ctx.sa.assoc_num = an; 2730 ctx.sa.rx_sa = rx_sa; 2731 ctx.stats.rx_sa_stats = sum; 2732 ctx.secy = &macsec_priv(dev)->secy; 2733 ctx.rx_sc = rx_sc; 2734 macsec_offload(ops->mdo_get_rx_sa_stats, &ctx); 2735 } 2736 return; 2737 } 2738 2739 for_each_possible_cpu(cpu) { 2740 const struct macsec_rx_sa_stats *stats = 2741 per_cpu_ptr(rx_sa->stats, cpu); 2742 2743 sum->InPktsOK += stats->InPktsOK; 2744 sum->InPktsInvalid += stats->InPktsInvalid; 2745 sum->InPktsNotValid += stats->InPktsNotValid; 2746 sum->InPktsNotUsingSA += stats->InPktsNotUsingSA; 2747 sum->InPktsUnusedSA += stats->InPktsUnusedSA; 2748 } 2749 } 2750 2751 static int copy_rx_sa_stats(struct sk_buff *skb, 2752 struct macsec_rx_sa_stats *sum) 2753 { 2754 if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_OK, sum->InPktsOK) || 2755 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID, 2756 sum->InPktsInvalid) || 2757 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID, 2758 sum->InPktsNotValid) || 2759 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA, 2760 sum->InPktsNotUsingSA) || 2761 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA, 2762 sum->InPktsUnusedSA)) 2763 return -EMSGSIZE; 2764 2765 return 0; 2766 } 2767 2768 static void get_rx_sc_stats(struct net_device *dev, 2769 struct macsec_rx_sc *rx_sc, 2770 struct macsec_rx_sc_stats *sum) 2771 { 2772 struct macsec_dev *macsec = macsec_priv(dev); 2773 int cpu; 2774 2775 /* If h/w offloading is available, propagate to the device */ 2776 if (macsec_is_offloaded(macsec)) { 2777 const struct macsec_ops *ops; 2778 struct macsec_context ctx; 2779 2780 ops = macsec_get_ops(macsec, &ctx); 2781 if (ops) { 2782 ctx.stats.rx_sc_stats = sum; 2783 ctx.secy = &macsec_priv(dev)->secy; 2784 ctx.rx_sc = rx_sc; 2785 macsec_offload(ops->mdo_get_rx_sc_stats, &ctx); 2786 } 2787 return; 2788 } 2789 2790 for_each_possible_cpu(cpu) { 2791 const struct pcpu_rx_sc_stats *stats; 2792 struct macsec_rx_sc_stats tmp; 2793 unsigned int start; 2794 2795 stats = per_cpu_ptr(rx_sc->stats, cpu); 2796 do { 2797 start = u64_stats_fetch_begin(&stats->syncp); 2798 memcpy(&tmp, &stats->stats, sizeof(tmp)); 2799 } while (u64_stats_fetch_retry(&stats->syncp, start)); 2800 2801 sum->InOctetsValidated += tmp.InOctetsValidated; 2802 sum->InOctetsDecrypted += tmp.InOctetsDecrypted; 2803 sum->InPktsUnchecked += tmp.InPktsUnchecked; 2804 sum->InPktsDelayed += tmp.InPktsDelayed; 2805 sum->InPktsOK += tmp.InPktsOK; 2806 sum->InPktsInvalid += tmp.InPktsInvalid; 2807 sum->InPktsLate += tmp.InPktsLate; 2808 sum->InPktsNotValid += tmp.InPktsNotValid; 2809 sum->InPktsNotUsingSA += tmp.InPktsNotUsingSA; 2810 sum->InPktsUnusedSA += tmp.InPktsUnusedSA; 2811 } 2812 } 2813 2814 static int copy_rx_sc_stats(struct sk_buff *skb, struct macsec_rx_sc_stats *sum) 2815 { 2816 if (nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED, 2817 sum->InOctetsValidated, 2818 MACSEC_RXSC_STATS_ATTR_PAD) || 2819 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED, 2820 sum->InOctetsDecrypted, 2821 MACSEC_RXSC_STATS_ATTR_PAD) || 2822 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED, 2823 sum->InPktsUnchecked, 2824 MACSEC_RXSC_STATS_ATTR_PAD) || 2825 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED, 2826 sum->InPktsDelayed, 2827 MACSEC_RXSC_STATS_ATTR_PAD) || 2828 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK, 2829 sum->InPktsOK, 2830 MACSEC_RXSC_STATS_ATTR_PAD) || 2831 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID, 2832 sum->InPktsInvalid, 2833 MACSEC_RXSC_STATS_ATTR_PAD) || 2834 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE, 2835 sum->InPktsLate, 2836 MACSEC_RXSC_STATS_ATTR_PAD) || 2837 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID, 2838 sum->InPktsNotValid, 2839 MACSEC_RXSC_STATS_ATTR_PAD) || 2840 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA, 2841 sum->InPktsNotUsingSA, 2842 MACSEC_RXSC_STATS_ATTR_PAD) || 2843 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA, 2844 sum->InPktsUnusedSA, 2845 MACSEC_RXSC_STATS_ATTR_PAD)) 2846 return -EMSGSIZE; 2847 2848 return 0; 2849 } 2850 2851 static void get_tx_sc_stats(struct net_device *dev, 2852 struct macsec_tx_sc_stats *sum) 2853 { 2854 struct macsec_dev *macsec = macsec_priv(dev); 2855 int cpu; 2856 2857 /* If h/w offloading is available, propagate to the device */ 2858 if (macsec_is_offloaded(macsec)) { 2859 const struct macsec_ops *ops; 2860 struct macsec_context ctx; 2861 2862 ops = macsec_get_ops(macsec, &ctx); 2863 if (ops) { 2864 ctx.stats.tx_sc_stats = sum; 2865 ctx.secy = &macsec_priv(dev)->secy; 2866 macsec_offload(ops->mdo_get_tx_sc_stats, &ctx); 2867 } 2868 return; 2869 } 2870 2871 for_each_possible_cpu(cpu) { 2872 const struct pcpu_tx_sc_stats *stats; 2873 struct macsec_tx_sc_stats tmp; 2874 unsigned int start; 2875 2876 stats = per_cpu_ptr(macsec_priv(dev)->secy.tx_sc.stats, cpu); 2877 do { 2878 start = u64_stats_fetch_begin(&stats->syncp); 2879 memcpy(&tmp, &stats->stats, sizeof(tmp)); 2880 } while (u64_stats_fetch_retry(&stats->syncp, start)); 2881 2882 sum->OutPktsProtected += tmp.OutPktsProtected; 2883 sum->OutPktsEncrypted += tmp.OutPktsEncrypted; 2884 sum->OutOctetsProtected += tmp.OutOctetsProtected; 2885 sum->OutOctetsEncrypted += tmp.OutOctetsEncrypted; 2886 } 2887 } 2888 2889 static int copy_tx_sc_stats(struct sk_buff *skb, struct macsec_tx_sc_stats *sum) 2890 { 2891 if (nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED, 2892 sum->OutPktsProtected, 2893 MACSEC_TXSC_STATS_ATTR_PAD) || 2894 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED, 2895 sum->OutPktsEncrypted, 2896 MACSEC_TXSC_STATS_ATTR_PAD) || 2897 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED, 2898 sum->OutOctetsProtected, 2899 MACSEC_TXSC_STATS_ATTR_PAD) || 2900 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED, 2901 sum->OutOctetsEncrypted, 2902 MACSEC_TXSC_STATS_ATTR_PAD)) 2903 return -EMSGSIZE; 2904 2905 return 0; 2906 } 2907 2908 static void get_secy_stats(struct net_device *dev, struct macsec_dev_stats *sum) 2909 { 2910 struct macsec_dev *macsec = macsec_priv(dev); 2911 int cpu; 2912 2913 /* If h/w offloading is available, propagate to the device */ 2914 if (macsec_is_offloaded(macsec)) { 2915 const struct macsec_ops *ops; 2916 struct macsec_context ctx; 2917 2918 ops = macsec_get_ops(macsec, &ctx); 2919 if (ops) { 2920 ctx.stats.dev_stats = sum; 2921 ctx.secy = &macsec_priv(dev)->secy; 2922 macsec_offload(ops->mdo_get_dev_stats, &ctx); 2923 } 2924 return; 2925 } 2926 2927 for_each_possible_cpu(cpu) { 2928 const struct pcpu_secy_stats *stats; 2929 struct macsec_dev_stats tmp; 2930 unsigned int start; 2931 2932 stats = per_cpu_ptr(macsec_priv(dev)->stats, cpu); 2933 do { 2934 start = u64_stats_fetch_begin(&stats->syncp); 2935 memcpy(&tmp, &stats->stats, sizeof(tmp)); 2936 } while (u64_stats_fetch_retry(&stats->syncp, start)); 2937 2938 sum->OutPktsUntagged += tmp.OutPktsUntagged; 2939 sum->InPktsUntagged += tmp.InPktsUntagged; 2940 sum->OutPktsTooLong += tmp.OutPktsTooLong; 2941 sum->InPktsNoTag += tmp.InPktsNoTag; 2942 sum->InPktsBadTag += tmp.InPktsBadTag; 2943 sum->InPktsUnknownSCI += tmp.InPktsUnknownSCI; 2944 sum->InPktsNoSCI += tmp.InPktsNoSCI; 2945 sum->InPktsOverrun += tmp.InPktsOverrun; 2946 } 2947 } 2948 2949 static int copy_secy_stats(struct sk_buff *skb, struct macsec_dev_stats *sum) 2950 { 2951 if (nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED, 2952 sum->OutPktsUntagged, 2953 MACSEC_SECY_STATS_ATTR_PAD) || 2954 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED, 2955 sum->InPktsUntagged, 2956 MACSEC_SECY_STATS_ATTR_PAD) || 2957 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG, 2958 sum->OutPktsTooLong, 2959 MACSEC_SECY_STATS_ATTR_PAD) || 2960 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG, 2961 sum->InPktsNoTag, 2962 MACSEC_SECY_STATS_ATTR_PAD) || 2963 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG, 2964 sum->InPktsBadTag, 2965 MACSEC_SECY_STATS_ATTR_PAD) || 2966 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI, 2967 sum->InPktsUnknownSCI, 2968 MACSEC_SECY_STATS_ATTR_PAD) || 2969 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI, 2970 sum->InPktsNoSCI, 2971 MACSEC_SECY_STATS_ATTR_PAD) || 2972 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN, 2973 sum->InPktsOverrun, 2974 MACSEC_SECY_STATS_ATTR_PAD)) 2975 return -EMSGSIZE; 2976 2977 return 0; 2978 } 2979 2980 static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb) 2981 { 2982 struct macsec_tx_sc *tx_sc = &secy->tx_sc; 2983 struct nlattr *secy_nest = nla_nest_start_noflag(skb, 2984 MACSEC_ATTR_SECY); 2985 u64 csid; 2986 2987 if (!secy_nest) 2988 return 1; 2989 2990 switch (secy->key_len) { 2991 case MACSEC_GCM_AES_128_SAK_LEN: 2992 csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 : MACSEC_DEFAULT_CIPHER_ID; 2993 break; 2994 case MACSEC_GCM_AES_256_SAK_LEN: 2995 csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_256 : MACSEC_CIPHER_ID_GCM_AES_256; 2996 break; 2997 default: 2998 goto cancel; 2999 } 3000 3001 if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci, 3002 MACSEC_SECY_ATTR_PAD) || 3003 nla_put_u64_64bit(skb, MACSEC_SECY_ATTR_CIPHER_SUITE, 3004 csid, MACSEC_SECY_ATTR_PAD) || 3005 nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) || 3006 nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) || 3007 nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) || 3008 nla_put_u8(skb, MACSEC_SECY_ATTR_REPLAY, secy->replay_protect) || 3009 nla_put_u8(skb, MACSEC_SECY_ATTR_VALIDATE, secy->validate_frames) || 3010 nla_put_u8(skb, MACSEC_SECY_ATTR_ENCRYPT, tx_sc->encrypt) || 3011 nla_put_u8(skb, MACSEC_SECY_ATTR_INC_SCI, tx_sc->send_sci) || 3012 nla_put_u8(skb, MACSEC_SECY_ATTR_ES, tx_sc->end_station) || 3013 nla_put_u8(skb, MACSEC_SECY_ATTR_SCB, tx_sc->scb) || 3014 nla_put_u8(skb, MACSEC_SECY_ATTR_ENCODING_SA, tx_sc->encoding_sa)) 3015 goto cancel; 3016 3017 if (secy->replay_protect) { 3018 if (nla_put_u32(skb, MACSEC_SECY_ATTR_WINDOW, secy->replay_window)) 3019 goto cancel; 3020 } 3021 3022 nla_nest_end(skb, secy_nest); 3023 return 0; 3024 3025 cancel: 3026 nla_nest_cancel(skb, secy_nest); 3027 return 1; 3028 } 3029 3030 static noinline_for_stack int 3031 dump_secy(struct macsec_secy *secy, struct net_device *dev, 3032 struct sk_buff *skb, struct netlink_callback *cb) 3033 { 3034 struct macsec_tx_sc_stats tx_sc_stats = {0, }; 3035 struct macsec_tx_sa_stats tx_sa_stats = {0, }; 3036 struct macsec_rx_sc_stats rx_sc_stats = {0, }; 3037 struct macsec_rx_sa_stats rx_sa_stats = {0, }; 3038 struct macsec_dev *macsec = netdev_priv(dev); 3039 struct macsec_dev_stats dev_stats = {0, }; 3040 struct macsec_tx_sc *tx_sc = &secy->tx_sc; 3041 struct nlattr *txsa_list, *rxsc_list; 3042 struct macsec_rx_sc *rx_sc; 3043 struct nlattr *attr; 3044 void *hdr; 3045 int i, j; 3046 3047 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 3048 &macsec_fam, NLM_F_MULTI, MACSEC_CMD_GET_TXSC); 3049 if (!hdr) 3050 return -EMSGSIZE; 3051 3052 genl_dump_check_consistent(cb, hdr); 3053 3054 if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex)) 3055 goto nla_put_failure; 3056 3057 attr = nla_nest_start_noflag(skb, MACSEC_ATTR_OFFLOAD); 3058 if (!attr) 3059 goto nla_put_failure; 3060 if (nla_put_u8(skb, MACSEC_OFFLOAD_ATTR_TYPE, macsec->offload)) 3061 goto nla_put_failure; 3062 nla_nest_end(skb, attr); 3063 3064 if (nla_put_secy(secy, skb)) 3065 goto nla_put_failure; 3066 3067 attr = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSC_STATS); 3068 if (!attr) 3069 goto nla_put_failure; 3070 3071 get_tx_sc_stats(dev, &tx_sc_stats); 3072 if (copy_tx_sc_stats(skb, &tx_sc_stats)) { 3073 nla_nest_cancel(skb, attr); 3074 goto nla_put_failure; 3075 } 3076 nla_nest_end(skb, attr); 3077 3078 attr = nla_nest_start_noflag(skb, MACSEC_ATTR_SECY_STATS); 3079 if (!attr) 3080 goto nla_put_failure; 3081 get_secy_stats(dev, &dev_stats); 3082 if (copy_secy_stats(skb, &dev_stats)) { 3083 nla_nest_cancel(skb, attr); 3084 goto nla_put_failure; 3085 } 3086 nla_nest_end(skb, attr); 3087 3088 txsa_list = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSA_LIST); 3089 if (!txsa_list) 3090 goto nla_put_failure; 3091 for (i = 0, j = 1; i < MACSEC_NUM_AN; i++) { 3092 struct macsec_tx_sa *tx_sa = rtnl_dereference(tx_sc->sa[i]); 3093 struct nlattr *txsa_nest; 3094 u64 pn; 3095 int pn_len; 3096 3097 if (!tx_sa) 3098 continue; 3099 3100 txsa_nest = nla_nest_start_noflag(skb, j++); 3101 if (!txsa_nest) { 3102 nla_nest_cancel(skb, txsa_list); 3103 goto nla_put_failure; 3104 } 3105 3106 attr = nla_nest_start_noflag(skb, MACSEC_SA_ATTR_STATS); 3107 if (!attr) { 3108 nla_nest_cancel(skb, txsa_nest); 3109 nla_nest_cancel(skb, txsa_list); 3110 goto nla_put_failure; 3111 } 3112 memset(&tx_sa_stats, 0, sizeof(tx_sa_stats)); 3113 get_tx_sa_stats(dev, i, tx_sa, &tx_sa_stats); 3114 if (copy_tx_sa_stats(skb, &tx_sa_stats)) { 3115 nla_nest_cancel(skb, attr); 3116 nla_nest_cancel(skb, txsa_nest); 3117 nla_nest_cancel(skb, txsa_list); 3118 goto nla_put_failure; 3119 } 3120 nla_nest_end(skb, attr); 3121 3122 if (secy->xpn) { 3123 pn = tx_sa->next_pn; 3124 pn_len = MACSEC_XPN_PN_LEN; 3125 } else { 3126 pn = tx_sa->next_pn_halves.lower; 3127 pn_len = MACSEC_DEFAULT_PN_LEN; 3128 } 3129 3130 if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) || 3131 nla_put(skb, MACSEC_SA_ATTR_PN, pn_len, &pn) || 3132 nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, tx_sa->key.id) || 3133 (secy->xpn && nla_put_ssci(skb, MACSEC_SA_ATTR_SSCI, tx_sa->ssci)) || 3134 nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, tx_sa->active)) { 3135 nla_nest_cancel(skb, txsa_nest); 3136 nla_nest_cancel(skb, txsa_list); 3137 goto nla_put_failure; 3138 } 3139 3140 nla_nest_end(skb, txsa_nest); 3141 } 3142 nla_nest_end(skb, txsa_list); 3143 3144 rxsc_list = nla_nest_start_noflag(skb, MACSEC_ATTR_RXSC_LIST); 3145 if (!rxsc_list) 3146 goto nla_put_failure; 3147 3148 j = 1; 3149 for_each_rxsc_rtnl(secy, rx_sc) { 3150 int k; 3151 struct nlattr *rxsa_list; 3152 struct nlattr *rxsc_nest = nla_nest_start_noflag(skb, j++); 3153 3154 if (!rxsc_nest) { 3155 nla_nest_cancel(skb, rxsc_list); 3156 goto nla_put_failure; 3157 } 3158 3159 if (nla_put_u8(skb, MACSEC_RXSC_ATTR_ACTIVE, rx_sc->active) || 3160 nla_put_sci(skb, MACSEC_RXSC_ATTR_SCI, rx_sc->sci, 3161 MACSEC_RXSC_ATTR_PAD)) { 3162 nla_nest_cancel(skb, rxsc_nest); 3163 nla_nest_cancel(skb, rxsc_list); 3164 goto nla_put_failure; 3165 } 3166 3167 attr = nla_nest_start_noflag(skb, MACSEC_RXSC_ATTR_STATS); 3168 if (!attr) { 3169 nla_nest_cancel(skb, rxsc_nest); 3170 nla_nest_cancel(skb, rxsc_list); 3171 goto nla_put_failure; 3172 } 3173 memset(&rx_sc_stats, 0, sizeof(rx_sc_stats)); 3174 get_rx_sc_stats(dev, rx_sc, &rx_sc_stats); 3175 if (copy_rx_sc_stats(skb, &rx_sc_stats)) { 3176 nla_nest_cancel(skb, attr); 3177 nla_nest_cancel(skb, rxsc_nest); 3178 nla_nest_cancel(skb, rxsc_list); 3179 goto nla_put_failure; 3180 } 3181 nla_nest_end(skb, attr); 3182 3183 rxsa_list = nla_nest_start_noflag(skb, 3184 MACSEC_RXSC_ATTR_SA_LIST); 3185 if (!rxsa_list) { 3186 nla_nest_cancel(skb, rxsc_nest); 3187 nla_nest_cancel(skb, rxsc_list); 3188 goto nla_put_failure; 3189 } 3190 3191 for (i = 0, k = 1; i < MACSEC_NUM_AN; i++) { 3192 struct macsec_rx_sa *rx_sa = rtnl_dereference(rx_sc->sa[i]); 3193 struct nlattr *rxsa_nest; 3194 u64 pn; 3195 int pn_len; 3196 3197 if (!rx_sa) 3198 continue; 3199 3200 rxsa_nest = nla_nest_start_noflag(skb, k++); 3201 if (!rxsa_nest) { 3202 nla_nest_cancel(skb, rxsa_list); 3203 nla_nest_cancel(skb, rxsc_nest); 3204 nla_nest_cancel(skb, rxsc_list); 3205 goto nla_put_failure; 3206 } 3207 3208 attr = nla_nest_start_noflag(skb, 3209 MACSEC_SA_ATTR_STATS); 3210 if (!attr) { 3211 nla_nest_cancel(skb, rxsa_list); 3212 nla_nest_cancel(skb, rxsc_nest); 3213 nla_nest_cancel(skb, rxsc_list); 3214 goto nla_put_failure; 3215 } 3216 memset(&rx_sa_stats, 0, sizeof(rx_sa_stats)); 3217 get_rx_sa_stats(dev, rx_sc, i, rx_sa, &rx_sa_stats); 3218 if (copy_rx_sa_stats(skb, &rx_sa_stats)) { 3219 nla_nest_cancel(skb, attr); 3220 nla_nest_cancel(skb, rxsa_list); 3221 nla_nest_cancel(skb, rxsc_nest); 3222 nla_nest_cancel(skb, rxsc_list); 3223 goto nla_put_failure; 3224 } 3225 nla_nest_end(skb, attr); 3226 3227 if (secy->xpn) { 3228 pn = rx_sa->next_pn; 3229 pn_len = MACSEC_XPN_PN_LEN; 3230 } else { 3231 pn = rx_sa->next_pn_halves.lower; 3232 pn_len = MACSEC_DEFAULT_PN_LEN; 3233 } 3234 3235 if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) || 3236 nla_put(skb, MACSEC_SA_ATTR_PN, pn_len, &pn) || 3237 nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, rx_sa->key.id) || 3238 (secy->xpn && nla_put_ssci(skb, MACSEC_SA_ATTR_SSCI, rx_sa->ssci)) || 3239 nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, rx_sa->active)) { 3240 nla_nest_cancel(skb, rxsa_nest); 3241 nla_nest_cancel(skb, rxsc_nest); 3242 nla_nest_cancel(skb, rxsc_list); 3243 goto nla_put_failure; 3244 } 3245 nla_nest_end(skb, rxsa_nest); 3246 } 3247 3248 nla_nest_end(skb, rxsa_list); 3249 nla_nest_end(skb, rxsc_nest); 3250 } 3251 3252 nla_nest_end(skb, rxsc_list); 3253 3254 genlmsg_end(skb, hdr); 3255 3256 return 0; 3257 3258 nla_put_failure: 3259 genlmsg_cancel(skb, hdr); 3260 return -EMSGSIZE; 3261 } 3262 3263 static int macsec_generation = 1; /* protected by RTNL */ 3264 3265 static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb) 3266 { 3267 struct net *net = sock_net(skb->sk); 3268 struct net_device *dev; 3269 int dev_idx, d; 3270 3271 dev_idx = cb->args[0]; 3272 3273 d = 0; 3274 rtnl_lock(); 3275 3276 cb->seq = macsec_generation; 3277 3278 for_each_netdev(net, dev) { 3279 struct macsec_secy *secy; 3280 3281 if (d < dev_idx) 3282 goto next; 3283 3284 if (!netif_is_macsec(dev)) 3285 goto next; 3286 3287 secy = &macsec_priv(dev)->secy; 3288 if (dump_secy(secy, dev, skb, cb) < 0) 3289 goto done; 3290 next: 3291 d++; 3292 } 3293 3294 done: 3295 rtnl_unlock(); 3296 cb->args[0] = d; 3297 return skb->len; 3298 } 3299 3300 static const struct genl_small_ops macsec_genl_ops[] = { 3301 { 3302 .cmd = MACSEC_CMD_GET_TXSC, 3303 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3304 .dumpit = macsec_dump_txsc, 3305 }, 3306 { 3307 .cmd = MACSEC_CMD_ADD_RXSC, 3308 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3309 .doit = macsec_add_rxsc, 3310 .flags = GENL_ADMIN_PERM, 3311 }, 3312 { 3313 .cmd = MACSEC_CMD_DEL_RXSC, 3314 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3315 .doit = macsec_del_rxsc, 3316 .flags = GENL_ADMIN_PERM, 3317 }, 3318 { 3319 .cmd = MACSEC_CMD_UPD_RXSC, 3320 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3321 .doit = macsec_upd_rxsc, 3322 .flags = GENL_ADMIN_PERM, 3323 }, 3324 { 3325 .cmd = MACSEC_CMD_ADD_TXSA, 3326 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3327 .doit = macsec_add_txsa, 3328 .flags = GENL_ADMIN_PERM, 3329 }, 3330 { 3331 .cmd = MACSEC_CMD_DEL_TXSA, 3332 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3333 .doit = macsec_del_txsa, 3334 .flags = GENL_ADMIN_PERM, 3335 }, 3336 { 3337 .cmd = MACSEC_CMD_UPD_TXSA, 3338 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3339 .doit = macsec_upd_txsa, 3340 .flags = GENL_ADMIN_PERM, 3341 }, 3342 { 3343 .cmd = MACSEC_CMD_ADD_RXSA, 3344 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3345 .doit = macsec_add_rxsa, 3346 .flags = GENL_ADMIN_PERM, 3347 }, 3348 { 3349 .cmd = MACSEC_CMD_DEL_RXSA, 3350 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3351 .doit = macsec_del_rxsa, 3352 .flags = GENL_ADMIN_PERM, 3353 }, 3354 { 3355 .cmd = MACSEC_CMD_UPD_RXSA, 3356 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3357 .doit = macsec_upd_rxsa, 3358 .flags = GENL_ADMIN_PERM, 3359 }, 3360 { 3361 .cmd = MACSEC_CMD_UPD_OFFLOAD, 3362 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3363 .doit = macsec_upd_offload, 3364 .flags = GENL_ADMIN_PERM, 3365 }, 3366 }; 3367 3368 static struct genl_family macsec_fam __ro_after_init = { 3369 .name = MACSEC_GENL_NAME, 3370 .hdrsize = 0, 3371 .version = MACSEC_GENL_VERSION, 3372 .maxattr = MACSEC_ATTR_MAX, 3373 .policy = macsec_genl_policy, 3374 .netnsok = true, 3375 .module = THIS_MODULE, 3376 .small_ops = macsec_genl_ops, 3377 .n_small_ops = ARRAY_SIZE(macsec_genl_ops), 3378 .resv_start_op = MACSEC_CMD_UPD_OFFLOAD + 1, 3379 }; 3380 3381 static netdev_tx_t macsec_start_xmit(struct sk_buff *skb, 3382 struct net_device *dev) 3383 { 3384 struct macsec_dev *macsec = netdev_priv(dev); 3385 struct macsec_secy *secy = &macsec->secy; 3386 struct pcpu_secy_stats *secy_stats; 3387 int ret, len; 3388 3389 if (macsec_is_offloaded(netdev_priv(dev))) { 3390 struct metadata_dst *md_dst = secy->tx_sc.md_dst; 3391 3392 skb_dst_drop(skb); 3393 dst_hold(&md_dst->dst); 3394 skb_dst_set(skb, &md_dst->dst); 3395 skb->dev = macsec->real_dev; 3396 return dev_queue_xmit(skb); 3397 } 3398 3399 /* 10.5 */ 3400 if (!secy->protect_frames) { 3401 secy_stats = this_cpu_ptr(macsec->stats); 3402 u64_stats_update_begin(&secy_stats->syncp); 3403 secy_stats->stats.OutPktsUntagged++; 3404 u64_stats_update_end(&secy_stats->syncp); 3405 skb->dev = macsec->real_dev; 3406 len = skb->len; 3407 ret = dev_queue_xmit(skb); 3408 count_tx(dev, ret, len); 3409 return ret; 3410 } 3411 3412 if (!secy->operational) { 3413 kfree_skb(skb); 3414 DEV_STATS_INC(dev, tx_dropped); 3415 return NETDEV_TX_OK; 3416 } 3417 3418 len = skb->len; 3419 skb = macsec_encrypt(skb, dev); 3420 if (IS_ERR(skb)) { 3421 if (PTR_ERR(skb) != -EINPROGRESS) 3422 DEV_STATS_INC(dev, tx_dropped); 3423 return NETDEV_TX_OK; 3424 } 3425 3426 macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa); 3427 3428 macsec_encrypt_finish(skb, dev); 3429 ret = dev_queue_xmit(skb); 3430 count_tx(dev, ret, len); 3431 return ret; 3432 } 3433 3434 #define MACSEC_FEATURES \ 3435 (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST) 3436 3437 static int macsec_dev_init(struct net_device *dev) 3438 { 3439 struct macsec_dev *macsec = macsec_priv(dev); 3440 struct net_device *real_dev = macsec->real_dev; 3441 int err; 3442 3443 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 3444 if (!dev->tstats) 3445 return -ENOMEM; 3446 3447 err = gro_cells_init(&macsec->gro_cells, dev); 3448 if (err) { 3449 free_percpu(dev->tstats); 3450 return err; 3451 } 3452 3453 dev->features = real_dev->features & MACSEC_FEATURES; 3454 dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE; 3455 3456 dev->needed_headroom = real_dev->needed_headroom + 3457 MACSEC_NEEDED_HEADROOM; 3458 dev->needed_tailroom = real_dev->needed_tailroom + 3459 MACSEC_NEEDED_TAILROOM; 3460 3461 if (is_zero_ether_addr(dev->dev_addr)) 3462 eth_hw_addr_inherit(dev, real_dev); 3463 if (is_zero_ether_addr(dev->broadcast)) 3464 memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len); 3465 3466 /* Get macsec's reference to real_dev */ 3467 netdev_hold(real_dev, &macsec->dev_tracker, GFP_KERNEL); 3468 3469 return 0; 3470 } 3471 3472 static void macsec_dev_uninit(struct net_device *dev) 3473 { 3474 struct macsec_dev *macsec = macsec_priv(dev); 3475 3476 gro_cells_destroy(&macsec->gro_cells); 3477 free_percpu(dev->tstats); 3478 } 3479 3480 static netdev_features_t macsec_fix_features(struct net_device *dev, 3481 netdev_features_t features) 3482 { 3483 struct macsec_dev *macsec = macsec_priv(dev); 3484 struct net_device *real_dev = macsec->real_dev; 3485 3486 features &= (real_dev->features & MACSEC_FEATURES) | 3487 NETIF_F_GSO_SOFTWARE | NETIF_F_SOFT_FEATURES; 3488 features |= NETIF_F_LLTX; 3489 3490 return features; 3491 } 3492 3493 static int macsec_dev_open(struct net_device *dev) 3494 { 3495 struct macsec_dev *macsec = macsec_priv(dev); 3496 struct net_device *real_dev = macsec->real_dev; 3497 int err; 3498 3499 err = dev_uc_add(real_dev, dev->dev_addr); 3500 if (err < 0) 3501 return err; 3502 3503 if (dev->flags & IFF_ALLMULTI) { 3504 err = dev_set_allmulti(real_dev, 1); 3505 if (err < 0) 3506 goto del_unicast; 3507 } 3508 3509 if (dev->flags & IFF_PROMISC) { 3510 err = dev_set_promiscuity(real_dev, 1); 3511 if (err < 0) 3512 goto clear_allmulti; 3513 } 3514 3515 /* If h/w offloading is available, propagate to the device */ 3516 if (macsec_is_offloaded(macsec)) { 3517 const struct macsec_ops *ops; 3518 struct macsec_context ctx; 3519 3520 ops = macsec_get_ops(netdev_priv(dev), &ctx); 3521 if (!ops) { 3522 err = -EOPNOTSUPP; 3523 goto clear_allmulti; 3524 } 3525 3526 ctx.secy = &macsec->secy; 3527 err = macsec_offload(ops->mdo_dev_open, &ctx); 3528 if (err) 3529 goto clear_allmulti; 3530 } 3531 3532 if (netif_carrier_ok(real_dev)) 3533 netif_carrier_on(dev); 3534 3535 return 0; 3536 clear_allmulti: 3537 if (dev->flags & IFF_ALLMULTI) 3538 dev_set_allmulti(real_dev, -1); 3539 del_unicast: 3540 dev_uc_del(real_dev, dev->dev_addr); 3541 netif_carrier_off(dev); 3542 return err; 3543 } 3544 3545 static int macsec_dev_stop(struct net_device *dev) 3546 { 3547 struct macsec_dev *macsec = macsec_priv(dev); 3548 struct net_device *real_dev = macsec->real_dev; 3549 3550 netif_carrier_off(dev); 3551 3552 /* If h/w offloading is available, propagate to the device */ 3553 if (macsec_is_offloaded(macsec)) { 3554 const struct macsec_ops *ops; 3555 struct macsec_context ctx; 3556 3557 ops = macsec_get_ops(macsec, &ctx); 3558 if (ops) { 3559 ctx.secy = &macsec->secy; 3560 macsec_offload(ops->mdo_dev_stop, &ctx); 3561 } 3562 } 3563 3564 dev_mc_unsync(real_dev, dev); 3565 dev_uc_unsync(real_dev, dev); 3566 3567 if (dev->flags & IFF_ALLMULTI) 3568 dev_set_allmulti(real_dev, -1); 3569 3570 if (dev->flags & IFF_PROMISC) 3571 dev_set_promiscuity(real_dev, -1); 3572 3573 dev_uc_del(real_dev, dev->dev_addr); 3574 3575 return 0; 3576 } 3577 3578 static void macsec_dev_change_rx_flags(struct net_device *dev, int change) 3579 { 3580 struct net_device *real_dev = macsec_priv(dev)->real_dev; 3581 3582 if (!(dev->flags & IFF_UP)) 3583 return; 3584 3585 if (change & IFF_ALLMULTI) 3586 dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1); 3587 3588 if (change & IFF_PROMISC) 3589 dev_set_promiscuity(real_dev, 3590 dev->flags & IFF_PROMISC ? 1 : -1); 3591 } 3592 3593 static void macsec_dev_set_rx_mode(struct net_device *dev) 3594 { 3595 struct net_device *real_dev = macsec_priv(dev)->real_dev; 3596 3597 dev_mc_sync(real_dev, dev); 3598 dev_uc_sync(real_dev, dev); 3599 } 3600 3601 static int macsec_set_mac_address(struct net_device *dev, void *p) 3602 { 3603 struct macsec_dev *macsec = macsec_priv(dev); 3604 struct net_device *real_dev = macsec->real_dev; 3605 struct sockaddr *addr = p; 3606 int err; 3607 3608 if (!is_valid_ether_addr(addr->sa_data)) 3609 return -EADDRNOTAVAIL; 3610 3611 if (!(dev->flags & IFF_UP)) 3612 goto out; 3613 3614 err = dev_uc_add(real_dev, addr->sa_data); 3615 if (err < 0) 3616 return err; 3617 3618 dev_uc_del(real_dev, dev->dev_addr); 3619 3620 out: 3621 eth_hw_addr_set(dev, addr->sa_data); 3622 3623 /* If h/w offloading is available, propagate to the device */ 3624 if (macsec_is_offloaded(macsec)) { 3625 const struct macsec_ops *ops; 3626 struct macsec_context ctx; 3627 3628 ops = macsec_get_ops(macsec, &ctx); 3629 if (ops) { 3630 ctx.secy = &macsec->secy; 3631 macsec_offload(ops->mdo_upd_secy, &ctx); 3632 } 3633 } 3634 3635 return 0; 3636 } 3637 3638 static int macsec_change_mtu(struct net_device *dev, int new_mtu) 3639 { 3640 struct macsec_dev *macsec = macsec_priv(dev); 3641 unsigned int extra = macsec->secy.icv_len + macsec_extra_len(true); 3642 3643 if (macsec->real_dev->mtu - extra < new_mtu) 3644 return -ERANGE; 3645 3646 dev->mtu = new_mtu; 3647 3648 return 0; 3649 } 3650 3651 static void macsec_get_stats64(struct net_device *dev, 3652 struct rtnl_link_stats64 *s) 3653 { 3654 if (!dev->tstats) 3655 return; 3656 3657 dev_fetch_sw_netstats(s, dev->tstats); 3658 3659 s->rx_dropped = atomic_long_read(&dev->stats.__rx_dropped); 3660 s->tx_dropped = atomic_long_read(&dev->stats.__tx_dropped); 3661 s->rx_errors = atomic_long_read(&dev->stats.__rx_errors); 3662 } 3663 3664 static int macsec_get_iflink(const struct net_device *dev) 3665 { 3666 return macsec_priv(dev)->real_dev->ifindex; 3667 } 3668 3669 static const struct net_device_ops macsec_netdev_ops = { 3670 .ndo_init = macsec_dev_init, 3671 .ndo_uninit = macsec_dev_uninit, 3672 .ndo_open = macsec_dev_open, 3673 .ndo_stop = macsec_dev_stop, 3674 .ndo_fix_features = macsec_fix_features, 3675 .ndo_change_mtu = macsec_change_mtu, 3676 .ndo_set_rx_mode = macsec_dev_set_rx_mode, 3677 .ndo_change_rx_flags = macsec_dev_change_rx_flags, 3678 .ndo_set_mac_address = macsec_set_mac_address, 3679 .ndo_start_xmit = macsec_start_xmit, 3680 .ndo_get_stats64 = macsec_get_stats64, 3681 .ndo_get_iflink = macsec_get_iflink, 3682 }; 3683 3684 static const struct device_type macsec_type = { 3685 .name = "macsec", 3686 }; 3687 3688 static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = { 3689 [IFLA_MACSEC_SCI] = { .type = NLA_U64 }, 3690 [IFLA_MACSEC_PORT] = { .type = NLA_U16 }, 3691 [IFLA_MACSEC_ICV_LEN] = { .type = NLA_U8 }, 3692 [IFLA_MACSEC_CIPHER_SUITE] = { .type = NLA_U64 }, 3693 [IFLA_MACSEC_WINDOW] = { .type = NLA_U32 }, 3694 [IFLA_MACSEC_ENCODING_SA] = { .type = NLA_U8 }, 3695 [IFLA_MACSEC_ENCRYPT] = { .type = NLA_U8 }, 3696 [IFLA_MACSEC_PROTECT] = { .type = NLA_U8 }, 3697 [IFLA_MACSEC_INC_SCI] = { .type = NLA_U8 }, 3698 [IFLA_MACSEC_ES] = { .type = NLA_U8 }, 3699 [IFLA_MACSEC_SCB] = { .type = NLA_U8 }, 3700 [IFLA_MACSEC_REPLAY_PROTECT] = { .type = NLA_U8 }, 3701 [IFLA_MACSEC_VALIDATION] = { .type = NLA_U8 }, 3702 [IFLA_MACSEC_OFFLOAD] = { .type = NLA_U8 }, 3703 }; 3704 3705 static void macsec_free_netdev(struct net_device *dev) 3706 { 3707 struct macsec_dev *macsec = macsec_priv(dev); 3708 3709 if (macsec->secy.tx_sc.md_dst) 3710 metadata_dst_free(macsec->secy.tx_sc.md_dst); 3711 free_percpu(macsec->stats); 3712 free_percpu(macsec->secy.tx_sc.stats); 3713 3714 /* Get rid of the macsec's reference to real_dev */ 3715 netdev_put(macsec->real_dev, &macsec->dev_tracker); 3716 } 3717 3718 static void macsec_setup(struct net_device *dev) 3719 { 3720 ether_setup(dev); 3721 dev->min_mtu = 0; 3722 dev->max_mtu = ETH_MAX_MTU; 3723 dev->priv_flags |= IFF_NO_QUEUE; 3724 dev->netdev_ops = &macsec_netdev_ops; 3725 dev->needs_free_netdev = true; 3726 dev->priv_destructor = macsec_free_netdev; 3727 SET_NETDEV_DEVTYPE(dev, &macsec_type); 3728 3729 eth_zero_addr(dev->broadcast); 3730 } 3731 3732 static int macsec_changelink_common(struct net_device *dev, 3733 struct nlattr *data[]) 3734 { 3735 struct macsec_secy *secy; 3736 struct macsec_tx_sc *tx_sc; 3737 3738 secy = &macsec_priv(dev)->secy; 3739 tx_sc = &secy->tx_sc; 3740 3741 if (data[IFLA_MACSEC_ENCODING_SA]) { 3742 struct macsec_tx_sa *tx_sa; 3743 3744 tx_sc->encoding_sa = nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]); 3745 tx_sa = rtnl_dereference(tx_sc->sa[tx_sc->encoding_sa]); 3746 3747 secy->operational = tx_sa && tx_sa->active; 3748 } 3749 3750 if (data[IFLA_MACSEC_ENCRYPT]) 3751 tx_sc->encrypt = !!nla_get_u8(data[IFLA_MACSEC_ENCRYPT]); 3752 3753 if (data[IFLA_MACSEC_PROTECT]) 3754 secy->protect_frames = !!nla_get_u8(data[IFLA_MACSEC_PROTECT]); 3755 3756 if (data[IFLA_MACSEC_INC_SCI]) 3757 tx_sc->send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]); 3758 3759 if (data[IFLA_MACSEC_ES]) 3760 tx_sc->end_station = !!nla_get_u8(data[IFLA_MACSEC_ES]); 3761 3762 if (data[IFLA_MACSEC_SCB]) 3763 tx_sc->scb = !!nla_get_u8(data[IFLA_MACSEC_SCB]); 3764 3765 if (data[IFLA_MACSEC_REPLAY_PROTECT]) 3766 secy->replay_protect = !!nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT]); 3767 3768 if (data[IFLA_MACSEC_VALIDATION]) 3769 secy->validate_frames = nla_get_u8(data[IFLA_MACSEC_VALIDATION]); 3770 3771 if (data[IFLA_MACSEC_CIPHER_SUITE]) { 3772 switch (nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE])) { 3773 case MACSEC_CIPHER_ID_GCM_AES_128: 3774 case MACSEC_DEFAULT_CIPHER_ID: 3775 secy->key_len = MACSEC_GCM_AES_128_SAK_LEN; 3776 secy->xpn = false; 3777 break; 3778 case MACSEC_CIPHER_ID_GCM_AES_256: 3779 secy->key_len = MACSEC_GCM_AES_256_SAK_LEN; 3780 secy->xpn = false; 3781 break; 3782 case MACSEC_CIPHER_ID_GCM_AES_XPN_128: 3783 secy->key_len = MACSEC_GCM_AES_128_SAK_LEN; 3784 secy->xpn = true; 3785 break; 3786 case MACSEC_CIPHER_ID_GCM_AES_XPN_256: 3787 secy->key_len = MACSEC_GCM_AES_256_SAK_LEN; 3788 secy->xpn = true; 3789 break; 3790 default: 3791 return -EINVAL; 3792 } 3793 } 3794 3795 if (data[IFLA_MACSEC_WINDOW]) { 3796 secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]); 3797 3798 /* IEEE 802.1AEbw-2013 10.7.8 - maximum replay window 3799 * for XPN cipher suites */ 3800 if (secy->xpn && 3801 secy->replay_window > MACSEC_XPN_MAX_REPLAY_WINDOW) 3802 return -EINVAL; 3803 } 3804 3805 return 0; 3806 } 3807 3808 static int macsec_changelink(struct net_device *dev, struct nlattr *tb[], 3809 struct nlattr *data[], 3810 struct netlink_ext_ack *extack) 3811 { 3812 struct macsec_dev *macsec = macsec_priv(dev); 3813 bool macsec_offload_state_change = false; 3814 enum macsec_offload offload; 3815 struct macsec_tx_sc tx_sc; 3816 struct macsec_secy secy; 3817 int ret; 3818 3819 if (!data) 3820 return 0; 3821 3822 if (data[IFLA_MACSEC_CIPHER_SUITE] || 3823 data[IFLA_MACSEC_ICV_LEN] || 3824 data[IFLA_MACSEC_SCI] || 3825 data[IFLA_MACSEC_PORT]) 3826 return -EINVAL; 3827 3828 /* Keep a copy of unmodified secy and tx_sc, in case the offload 3829 * propagation fails, to revert macsec_changelink_common. 3830 */ 3831 memcpy(&secy, &macsec->secy, sizeof(secy)); 3832 memcpy(&tx_sc, &macsec->secy.tx_sc, sizeof(tx_sc)); 3833 3834 ret = macsec_changelink_common(dev, data); 3835 if (ret) 3836 goto cleanup; 3837 3838 if (data[IFLA_MACSEC_OFFLOAD]) { 3839 offload = nla_get_u8(data[IFLA_MACSEC_OFFLOAD]); 3840 if (macsec->offload != offload) { 3841 macsec_offload_state_change = true; 3842 ret = macsec_update_offload(dev, offload); 3843 if (ret) 3844 goto cleanup; 3845 } 3846 } 3847 3848 /* If h/w offloading is available, propagate to the device */ 3849 if (!macsec_offload_state_change && macsec_is_offloaded(macsec)) { 3850 const struct macsec_ops *ops; 3851 struct macsec_context ctx; 3852 3853 ops = macsec_get_ops(netdev_priv(dev), &ctx); 3854 if (!ops) { 3855 ret = -EOPNOTSUPP; 3856 goto cleanup; 3857 } 3858 3859 ctx.secy = &macsec->secy; 3860 ret = macsec_offload(ops->mdo_upd_secy, &ctx); 3861 if (ret) 3862 goto cleanup; 3863 } 3864 3865 return 0; 3866 3867 cleanup: 3868 memcpy(&macsec->secy.tx_sc, &tx_sc, sizeof(tx_sc)); 3869 memcpy(&macsec->secy, &secy, sizeof(secy)); 3870 3871 return ret; 3872 } 3873 3874 static void macsec_del_dev(struct macsec_dev *macsec) 3875 { 3876 int i; 3877 3878 while (macsec->secy.rx_sc) { 3879 struct macsec_rx_sc *rx_sc = rtnl_dereference(macsec->secy.rx_sc); 3880 3881 rcu_assign_pointer(macsec->secy.rx_sc, rx_sc->next); 3882 free_rx_sc(rx_sc); 3883 } 3884 3885 for (i = 0; i < MACSEC_NUM_AN; i++) { 3886 struct macsec_tx_sa *sa = rtnl_dereference(macsec->secy.tx_sc.sa[i]); 3887 3888 if (sa) { 3889 RCU_INIT_POINTER(macsec->secy.tx_sc.sa[i], NULL); 3890 clear_tx_sa(sa); 3891 } 3892 } 3893 } 3894 3895 static void macsec_common_dellink(struct net_device *dev, struct list_head *head) 3896 { 3897 struct macsec_dev *macsec = macsec_priv(dev); 3898 struct net_device *real_dev = macsec->real_dev; 3899 3900 /* If h/w offloading is available, propagate to the device */ 3901 if (macsec_is_offloaded(macsec)) { 3902 const struct macsec_ops *ops; 3903 struct macsec_context ctx; 3904 3905 ops = macsec_get_ops(netdev_priv(dev), &ctx); 3906 if (ops) { 3907 ctx.secy = &macsec->secy; 3908 macsec_offload(ops->mdo_del_secy, &ctx); 3909 } 3910 } 3911 3912 unregister_netdevice_queue(dev, head); 3913 list_del_rcu(&macsec->secys); 3914 macsec_del_dev(macsec); 3915 netdev_upper_dev_unlink(real_dev, dev); 3916 3917 macsec_generation++; 3918 } 3919 3920 static void macsec_dellink(struct net_device *dev, struct list_head *head) 3921 { 3922 struct macsec_dev *macsec = macsec_priv(dev); 3923 struct net_device *real_dev = macsec->real_dev; 3924 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); 3925 3926 macsec_common_dellink(dev, head); 3927 3928 if (list_empty(&rxd->secys)) { 3929 netdev_rx_handler_unregister(real_dev); 3930 kfree(rxd); 3931 } 3932 } 3933 3934 static int register_macsec_dev(struct net_device *real_dev, 3935 struct net_device *dev) 3936 { 3937 struct macsec_dev *macsec = macsec_priv(dev); 3938 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); 3939 3940 if (!rxd) { 3941 int err; 3942 3943 rxd = kmalloc(sizeof(*rxd), GFP_KERNEL); 3944 if (!rxd) 3945 return -ENOMEM; 3946 3947 INIT_LIST_HEAD(&rxd->secys); 3948 3949 err = netdev_rx_handler_register(real_dev, macsec_handle_frame, 3950 rxd); 3951 if (err < 0) { 3952 kfree(rxd); 3953 return err; 3954 } 3955 } 3956 3957 list_add_tail_rcu(&macsec->secys, &rxd->secys); 3958 return 0; 3959 } 3960 3961 static bool sci_exists(struct net_device *dev, sci_t sci) 3962 { 3963 struct macsec_rxh_data *rxd = macsec_data_rtnl(dev); 3964 struct macsec_dev *macsec; 3965 3966 list_for_each_entry(macsec, &rxd->secys, secys) { 3967 if (macsec->secy.sci == sci) 3968 return true; 3969 } 3970 3971 return false; 3972 } 3973 3974 static sci_t dev_to_sci(struct net_device *dev, __be16 port) 3975 { 3976 return make_sci(dev->dev_addr, port); 3977 } 3978 3979 static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len) 3980 { 3981 struct macsec_dev *macsec = macsec_priv(dev); 3982 struct macsec_secy *secy = &macsec->secy; 3983 3984 macsec->stats = netdev_alloc_pcpu_stats(struct pcpu_secy_stats); 3985 if (!macsec->stats) 3986 return -ENOMEM; 3987 3988 secy->tx_sc.stats = netdev_alloc_pcpu_stats(struct pcpu_tx_sc_stats); 3989 if (!secy->tx_sc.stats) 3990 return -ENOMEM; 3991 3992 secy->tx_sc.md_dst = metadata_dst_alloc(0, METADATA_MACSEC, GFP_KERNEL); 3993 if (!secy->tx_sc.md_dst) 3994 /* macsec and secy percpu stats will be freed when unregistering 3995 * net_device in macsec_free_netdev() 3996 */ 3997 return -ENOMEM; 3998 3999 if (sci == MACSEC_UNDEF_SCI) 4000 sci = dev_to_sci(dev, MACSEC_PORT_ES); 4001 4002 secy->netdev = dev; 4003 secy->operational = true; 4004 secy->key_len = DEFAULT_SAK_LEN; 4005 secy->icv_len = icv_len; 4006 secy->validate_frames = MACSEC_VALIDATE_DEFAULT; 4007 secy->protect_frames = true; 4008 secy->replay_protect = false; 4009 secy->xpn = DEFAULT_XPN; 4010 4011 secy->sci = sci; 4012 secy->tx_sc.md_dst->u.macsec_info.sci = sci; 4013 secy->tx_sc.active = true; 4014 secy->tx_sc.encoding_sa = DEFAULT_ENCODING_SA; 4015 secy->tx_sc.encrypt = DEFAULT_ENCRYPT; 4016 secy->tx_sc.send_sci = DEFAULT_SEND_SCI; 4017 secy->tx_sc.end_station = false; 4018 secy->tx_sc.scb = false; 4019 4020 return 0; 4021 } 4022 4023 static struct lock_class_key macsec_netdev_addr_lock_key; 4024 4025 static int macsec_newlink(struct net *net, struct net_device *dev, 4026 struct nlattr *tb[], struct nlattr *data[], 4027 struct netlink_ext_ack *extack) 4028 { 4029 struct macsec_dev *macsec = macsec_priv(dev); 4030 rx_handler_func_t *rx_handler; 4031 u8 icv_len = MACSEC_DEFAULT_ICV_LEN; 4032 struct net_device *real_dev; 4033 int err, mtu; 4034 sci_t sci; 4035 4036 if (!tb[IFLA_LINK]) 4037 return -EINVAL; 4038 real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK])); 4039 if (!real_dev) 4040 return -ENODEV; 4041 if (real_dev->type != ARPHRD_ETHER) 4042 return -EINVAL; 4043 4044 dev->priv_flags |= IFF_MACSEC; 4045 4046 macsec->real_dev = real_dev; 4047 4048 if (data && data[IFLA_MACSEC_OFFLOAD]) 4049 macsec->offload = nla_get_offload(data[IFLA_MACSEC_OFFLOAD]); 4050 else 4051 /* MACsec offloading is off by default */ 4052 macsec->offload = MACSEC_OFFLOAD_OFF; 4053 4054 /* Check if the offloading mode is supported by the underlying layers */ 4055 if (macsec->offload != MACSEC_OFFLOAD_OFF && 4056 !macsec_check_offload(macsec->offload, macsec)) 4057 return -EOPNOTSUPP; 4058 4059 /* send_sci must be set to true when transmit sci explicitly is set */ 4060 if ((data && data[IFLA_MACSEC_SCI]) && 4061 (data && data[IFLA_MACSEC_INC_SCI])) { 4062 u8 send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]); 4063 4064 if (!send_sci) 4065 return -EINVAL; 4066 } 4067 4068 if (data && data[IFLA_MACSEC_ICV_LEN]) 4069 icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]); 4070 mtu = real_dev->mtu - icv_len - macsec_extra_len(true); 4071 if (mtu < 0) 4072 dev->mtu = 0; 4073 else 4074 dev->mtu = mtu; 4075 4076 rx_handler = rtnl_dereference(real_dev->rx_handler); 4077 if (rx_handler && rx_handler != macsec_handle_frame) 4078 return -EBUSY; 4079 4080 err = register_netdevice(dev); 4081 if (err < 0) 4082 return err; 4083 4084 netdev_lockdep_set_classes(dev); 4085 lockdep_set_class(&dev->addr_list_lock, 4086 &macsec_netdev_addr_lock_key); 4087 4088 err = netdev_upper_dev_link(real_dev, dev, extack); 4089 if (err < 0) 4090 goto unregister; 4091 4092 /* need to be already registered so that ->init has run and 4093 * the MAC addr is set 4094 */ 4095 if (data && data[IFLA_MACSEC_SCI]) 4096 sci = nla_get_sci(data[IFLA_MACSEC_SCI]); 4097 else if (data && data[IFLA_MACSEC_PORT]) 4098 sci = dev_to_sci(dev, nla_get_be16(data[IFLA_MACSEC_PORT])); 4099 else 4100 sci = dev_to_sci(dev, MACSEC_PORT_ES); 4101 4102 if (rx_handler && sci_exists(real_dev, sci)) { 4103 err = -EBUSY; 4104 goto unlink; 4105 } 4106 4107 err = macsec_add_dev(dev, sci, icv_len); 4108 if (err) 4109 goto unlink; 4110 4111 if (data) { 4112 err = macsec_changelink_common(dev, data); 4113 if (err) 4114 goto del_dev; 4115 } 4116 4117 /* If h/w offloading is available, propagate to the device */ 4118 if (macsec_is_offloaded(macsec)) { 4119 const struct macsec_ops *ops; 4120 struct macsec_context ctx; 4121 4122 ops = macsec_get_ops(macsec, &ctx); 4123 if (ops) { 4124 ctx.secy = &macsec->secy; 4125 err = macsec_offload(ops->mdo_add_secy, &ctx); 4126 if (err) 4127 goto del_dev; 4128 } 4129 } 4130 4131 err = register_macsec_dev(real_dev, dev); 4132 if (err < 0) 4133 goto del_dev; 4134 4135 netif_stacked_transfer_operstate(real_dev, dev); 4136 linkwatch_fire_event(dev); 4137 4138 macsec_generation++; 4139 4140 return 0; 4141 4142 del_dev: 4143 macsec_del_dev(macsec); 4144 unlink: 4145 netdev_upper_dev_unlink(real_dev, dev); 4146 unregister: 4147 unregister_netdevice(dev); 4148 return err; 4149 } 4150 4151 static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[], 4152 struct netlink_ext_ack *extack) 4153 { 4154 u64 csid = MACSEC_DEFAULT_CIPHER_ID; 4155 u8 icv_len = MACSEC_DEFAULT_ICV_LEN; 4156 int flag; 4157 bool es, scb, sci; 4158 4159 if (!data) 4160 return 0; 4161 4162 if (data[IFLA_MACSEC_CIPHER_SUITE]) 4163 csid = nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE]); 4164 4165 if (data[IFLA_MACSEC_ICV_LEN]) { 4166 icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]); 4167 if (icv_len != MACSEC_DEFAULT_ICV_LEN) { 4168 char dummy_key[DEFAULT_SAK_LEN] = { 0 }; 4169 struct crypto_aead *dummy_tfm; 4170 4171 dummy_tfm = macsec_alloc_tfm(dummy_key, 4172 DEFAULT_SAK_LEN, 4173 icv_len); 4174 if (IS_ERR(dummy_tfm)) 4175 return PTR_ERR(dummy_tfm); 4176 crypto_free_aead(dummy_tfm); 4177 } 4178 } 4179 4180 switch (csid) { 4181 case MACSEC_CIPHER_ID_GCM_AES_128: 4182 case MACSEC_CIPHER_ID_GCM_AES_256: 4183 case MACSEC_CIPHER_ID_GCM_AES_XPN_128: 4184 case MACSEC_CIPHER_ID_GCM_AES_XPN_256: 4185 case MACSEC_DEFAULT_CIPHER_ID: 4186 if (icv_len < MACSEC_MIN_ICV_LEN || 4187 icv_len > MACSEC_STD_ICV_LEN) 4188 return -EINVAL; 4189 break; 4190 default: 4191 return -EINVAL; 4192 } 4193 4194 if (data[IFLA_MACSEC_ENCODING_SA]) { 4195 if (nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]) >= MACSEC_NUM_AN) 4196 return -EINVAL; 4197 } 4198 4199 for (flag = IFLA_MACSEC_ENCODING_SA + 1; 4200 flag < IFLA_MACSEC_VALIDATION; 4201 flag++) { 4202 if (data[flag]) { 4203 if (nla_get_u8(data[flag]) > 1) 4204 return -EINVAL; 4205 } 4206 } 4207 4208 es = data[IFLA_MACSEC_ES] ? nla_get_u8(data[IFLA_MACSEC_ES]) : false; 4209 sci = data[IFLA_MACSEC_INC_SCI] ? nla_get_u8(data[IFLA_MACSEC_INC_SCI]) : false; 4210 scb = data[IFLA_MACSEC_SCB] ? nla_get_u8(data[IFLA_MACSEC_SCB]) : false; 4211 4212 if ((sci && (scb || es)) || (scb && es)) 4213 return -EINVAL; 4214 4215 if (data[IFLA_MACSEC_VALIDATION] && 4216 nla_get_u8(data[IFLA_MACSEC_VALIDATION]) > MACSEC_VALIDATE_MAX) 4217 return -EINVAL; 4218 4219 if ((data[IFLA_MACSEC_REPLAY_PROTECT] && 4220 nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT])) && 4221 !data[IFLA_MACSEC_WINDOW]) 4222 return -EINVAL; 4223 4224 return 0; 4225 } 4226 4227 static struct net *macsec_get_link_net(const struct net_device *dev) 4228 { 4229 return dev_net(macsec_priv(dev)->real_dev); 4230 } 4231 4232 struct net_device *macsec_get_real_dev(const struct net_device *dev) 4233 { 4234 return macsec_priv(dev)->real_dev; 4235 } 4236 EXPORT_SYMBOL_GPL(macsec_get_real_dev); 4237 4238 bool macsec_netdev_is_offloaded(struct net_device *dev) 4239 { 4240 return macsec_is_offloaded(macsec_priv(dev)); 4241 } 4242 EXPORT_SYMBOL_GPL(macsec_netdev_is_offloaded); 4243 4244 static size_t macsec_get_size(const struct net_device *dev) 4245 { 4246 return nla_total_size_64bit(8) + /* IFLA_MACSEC_SCI */ 4247 nla_total_size(1) + /* IFLA_MACSEC_ICV_LEN */ 4248 nla_total_size_64bit(8) + /* IFLA_MACSEC_CIPHER_SUITE */ 4249 nla_total_size(4) + /* IFLA_MACSEC_WINDOW */ 4250 nla_total_size(1) + /* IFLA_MACSEC_ENCODING_SA */ 4251 nla_total_size(1) + /* IFLA_MACSEC_ENCRYPT */ 4252 nla_total_size(1) + /* IFLA_MACSEC_PROTECT */ 4253 nla_total_size(1) + /* IFLA_MACSEC_INC_SCI */ 4254 nla_total_size(1) + /* IFLA_MACSEC_ES */ 4255 nla_total_size(1) + /* IFLA_MACSEC_SCB */ 4256 nla_total_size(1) + /* IFLA_MACSEC_REPLAY_PROTECT */ 4257 nla_total_size(1) + /* IFLA_MACSEC_VALIDATION */ 4258 nla_total_size(1) + /* IFLA_MACSEC_OFFLOAD */ 4259 0; 4260 } 4261 4262 static int macsec_fill_info(struct sk_buff *skb, 4263 const struct net_device *dev) 4264 { 4265 struct macsec_tx_sc *tx_sc; 4266 struct macsec_dev *macsec; 4267 struct macsec_secy *secy; 4268 u64 csid; 4269 4270 macsec = macsec_priv(dev); 4271 secy = &macsec->secy; 4272 tx_sc = &secy->tx_sc; 4273 4274 switch (secy->key_len) { 4275 case MACSEC_GCM_AES_128_SAK_LEN: 4276 csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 : MACSEC_DEFAULT_CIPHER_ID; 4277 break; 4278 case MACSEC_GCM_AES_256_SAK_LEN: 4279 csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_256 : MACSEC_CIPHER_ID_GCM_AES_256; 4280 break; 4281 default: 4282 goto nla_put_failure; 4283 } 4284 4285 if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci, 4286 IFLA_MACSEC_PAD) || 4287 nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) || 4288 nla_put_u64_64bit(skb, IFLA_MACSEC_CIPHER_SUITE, 4289 csid, IFLA_MACSEC_PAD) || 4290 nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) || 4291 nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) || 4292 nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) || 4293 nla_put_u8(skb, IFLA_MACSEC_INC_SCI, tx_sc->send_sci) || 4294 nla_put_u8(skb, IFLA_MACSEC_ES, tx_sc->end_station) || 4295 nla_put_u8(skb, IFLA_MACSEC_SCB, tx_sc->scb) || 4296 nla_put_u8(skb, IFLA_MACSEC_REPLAY_PROTECT, secy->replay_protect) || 4297 nla_put_u8(skb, IFLA_MACSEC_VALIDATION, secy->validate_frames) || 4298 nla_put_u8(skb, IFLA_MACSEC_OFFLOAD, macsec->offload) || 4299 0) 4300 goto nla_put_failure; 4301 4302 if (secy->replay_protect) { 4303 if (nla_put_u32(skb, IFLA_MACSEC_WINDOW, secy->replay_window)) 4304 goto nla_put_failure; 4305 } 4306 4307 return 0; 4308 4309 nla_put_failure: 4310 return -EMSGSIZE; 4311 } 4312 4313 static struct rtnl_link_ops macsec_link_ops __read_mostly = { 4314 .kind = "macsec", 4315 .priv_size = sizeof(struct macsec_dev), 4316 .maxtype = IFLA_MACSEC_MAX, 4317 .policy = macsec_rtnl_policy, 4318 .setup = macsec_setup, 4319 .validate = macsec_validate_attr, 4320 .newlink = macsec_newlink, 4321 .changelink = macsec_changelink, 4322 .dellink = macsec_dellink, 4323 .get_size = macsec_get_size, 4324 .fill_info = macsec_fill_info, 4325 .get_link_net = macsec_get_link_net, 4326 }; 4327 4328 static bool is_macsec_master(struct net_device *dev) 4329 { 4330 return rcu_access_pointer(dev->rx_handler) == macsec_handle_frame; 4331 } 4332 4333 static int macsec_notify(struct notifier_block *this, unsigned long event, 4334 void *ptr) 4335 { 4336 struct net_device *real_dev = netdev_notifier_info_to_dev(ptr); 4337 LIST_HEAD(head); 4338 4339 if (!is_macsec_master(real_dev)) 4340 return NOTIFY_DONE; 4341 4342 switch (event) { 4343 case NETDEV_DOWN: 4344 case NETDEV_UP: 4345 case NETDEV_CHANGE: { 4346 struct macsec_dev *m, *n; 4347 struct macsec_rxh_data *rxd; 4348 4349 rxd = macsec_data_rtnl(real_dev); 4350 list_for_each_entry_safe(m, n, &rxd->secys, secys) { 4351 struct net_device *dev = m->secy.netdev; 4352 4353 netif_stacked_transfer_operstate(real_dev, dev); 4354 } 4355 break; 4356 } 4357 case NETDEV_UNREGISTER: { 4358 struct macsec_dev *m, *n; 4359 struct macsec_rxh_data *rxd; 4360 4361 rxd = macsec_data_rtnl(real_dev); 4362 list_for_each_entry_safe(m, n, &rxd->secys, secys) { 4363 macsec_common_dellink(m->secy.netdev, &head); 4364 } 4365 4366 netdev_rx_handler_unregister(real_dev); 4367 kfree(rxd); 4368 4369 unregister_netdevice_many(&head); 4370 break; 4371 } 4372 case NETDEV_CHANGEMTU: { 4373 struct macsec_dev *m; 4374 struct macsec_rxh_data *rxd; 4375 4376 rxd = macsec_data_rtnl(real_dev); 4377 list_for_each_entry(m, &rxd->secys, secys) { 4378 struct net_device *dev = m->secy.netdev; 4379 unsigned int mtu = real_dev->mtu - (m->secy.icv_len + 4380 macsec_extra_len(true)); 4381 4382 if (dev->mtu > mtu) 4383 dev_set_mtu(dev, mtu); 4384 } 4385 } 4386 } 4387 4388 return NOTIFY_OK; 4389 } 4390 4391 static struct notifier_block macsec_notifier = { 4392 .notifier_call = macsec_notify, 4393 }; 4394 4395 static int __init macsec_init(void) 4396 { 4397 int err; 4398 4399 pr_info("MACsec IEEE 802.1AE\n"); 4400 err = register_netdevice_notifier(&macsec_notifier); 4401 if (err) 4402 return err; 4403 4404 err = rtnl_link_register(&macsec_link_ops); 4405 if (err) 4406 goto notifier; 4407 4408 err = genl_register_family(&macsec_fam); 4409 if (err) 4410 goto rtnl; 4411 4412 return 0; 4413 4414 rtnl: 4415 rtnl_link_unregister(&macsec_link_ops); 4416 notifier: 4417 unregister_netdevice_notifier(&macsec_notifier); 4418 return err; 4419 } 4420 4421 static void __exit macsec_exit(void) 4422 { 4423 genl_unregister_family(&macsec_fam); 4424 rtnl_link_unregister(&macsec_link_ops); 4425 unregister_netdevice_notifier(&macsec_notifier); 4426 rcu_barrier(); 4427 } 4428 4429 module_init(macsec_init); 4430 module_exit(macsec_exit); 4431 4432 MODULE_ALIAS_RTNL_LINK("macsec"); 4433 MODULE_ALIAS_GENL_FAMILY("macsec"); 4434 4435 MODULE_DESCRIPTION("MACsec IEEE 802.1AE"); 4436 MODULE_LICENSE("GPL v2"); 4437