1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * drivers/net/macsec.c - MACsec device 4 * 5 * Copyright (c) 2015 Sabrina Dubroca <sd@queasysnail.net> 6 */ 7 8 #include <linux/types.h> 9 #include <linux/skbuff.h> 10 #include <linux/socket.h> 11 #include <linux/module.h> 12 #include <crypto/aead.h> 13 #include <linux/etherdevice.h> 14 #include <linux/netdevice.h> 15 #include <linux/rtnetlink.h> 16 #include <linux/refcount.h> 17 #include <net/genetlink.h> 18 #include <net/sock.h> 19 #include <net/gro_cells.h> 20 #include <net/macsec.h> 21 #include <net/dst_metadata.h> 22 #include <linux/phy.h> 23 #include <linux/byteorder/generic.h> 24 #include <linux/if_arp.h> 25 26 #include <uapi/linux/if_macsec.h> 27 28 /* SecTAG length = macsec_eth_header without the optional SCI */ 29 #define MACSEC_TAG_LEN 6 30 31 struct macsec_eth_header { 32 struct ethhdr eth; 33 /* SecTAG */ 34 u8 tci_an; 35 #if defined(__LITTLE_ENDIAN_BITFIELD) 36 u8 short_length:6, 37 unused:2; 38 #elif defined(__BIG_ENDIAN_BITFIELD) 39 u8 unused:2, 40 short_length:6; 41 #else 42 #error "Please fix <asm/byteorder.h>" 43 #endif 44 __be32 packet_number; 45 u8 secure_channel_id[8]; /* optional */ 46 } __packed; 47 48 /* minimum secure data length deemed "not short", see IEEE 802.1AE-2006 9.7 */ 49 #define MIN_NON_SHORT_LEN 48 50 51 #define GCM_AES_IV_LEN 12 52 53 #define for_each_rxsc(secy, sc) \ 54 for (sc = rcu_dereference_bh(secy->rx_sc); \ 55 sc; \ 56 sc = rcu_dereference_bh(sc->next)) 57 #define for_each_rxsc_rtnl(secy, sc) \ 58 for (sc = rtnl_dereference(secy->rx_sc); \ 59 sc; \ 60 sc = rtnl_dereference(sc->next)) 61 62 #define pn_same_half(pn1, pn2) (!(((pn1) >> 31) ^ ((pn2) >> 31))) 63 64 struct gcm_iv_xpn { 65 union { 66 u8 short_secure_channel_id[4]; 67 ssci_t ssci; 68 }; 69 __be64 pn; 70 } __packed; 71 72 struct gcm_iv { 73 union { 74 u8 secure_channel_id[8]; 75 sci_t sci; 76 }; 77 __be32 pn; 78 }; 79 80 #define MACSEC_VALIDATE_DEFAULT MACSEC_VALIDATE_STRICT 81 82 struct pcpu_secy_stats { 83 struct macsec_dev_stats stats; 84 struct u64_stats_sync syncp; 85 }; 86 87 /** 88 * struct macsec_dev - private data 89 * @secy: SecY config 90 * @real_dev: pointer to underlying netdevice 91 * @dev_tracker: refcount tracker for @real_dev reference 92 * @stats: MACsec device stats 93 * @secys: linked list of SecY's on the underlying device 94 * @gro_cells: pointer to the Generic Receive Offload cell 95 * @offload: status of offloading on the MACsec device 96 */ 97 struct macsec_dev { 98 struct macsec_secy secy; 99 struct net_device *real_dev; 100 netdevice_tracker dev_tracker; 101 struct pcpu_secy_stats __percpu *stats; 102 struct list_head secys; 103 struct gro_cells gro_cells; 104 enum macsec_offload offload; 105 }; 106 107 /** 108 * struct macsec_rxh_data - rx_handler private argument 109 * @secys: linked list of SecY's on this underlying device 110 */ 111 struct macsec_rxh_data { 112 struct list_head secys; 113 }; 114 115 static struct macsec_dev *macsec_priv(const struct net_device *dev) 116 { 117 return (struct macsec_dev *)netdev_priv(dev); 118 } 119 120 static struct macsec_rxh_data *macsec_data_rcu(const struct net_device *dev) 121 { 122 return rcu_dereference_bh(dev->rx_handler_data); 123 } 124 125 static struct macsec_rxh_data *macsec_data_rtnl(const struct net_device *dev) 126 { 127 return rtnl_dereference(dev->rx_handler_data); 128 } 129 130 struct macsec_cb { 131 struct aead_request *req; 132 union { 133 struct macsec_tx_sa *tx_sa; 134 struct macsec_rx_sa *rx_sa; 135 }; 136 u8 assoc_num; 137 bool valid; 138 bool has_sci; 139 }; 140 141 static struct macsec_rx_sa *macsec_rxsa_get(struct macsec_rx_sa __rcu *ptr) 142 { 143 struct macsec_rx_sa *sa = rcu_dereference_bh(ptr); 144 145 if (!sa || !sa->active) 146 return NULL; 147 148 if (!refcount_inc_not_zero(&sa->refcnt)) 149 return NULL; 150 151 return sa; 152 } 153 154 static void free_rx_sc_rcu(struct rcu_head *head) 155 { 156 struct macsec_rx_sc *rx_sc = container_of(head, struct macsec_rx_sc, rcu_head); 157 158 free_percpu(rx_sc->stats); 159 kfree(rx_sc); 160 } 161 162 static struct macsec_rx_sc *macsec_rxsc_get(struct macsec_rx_sc *sc) 163 { 164 return refcount_inc_not_zero(&sc->refcnt) ? sc : NULL; 165 } 166 167 static void macsec_rxsc_put(struct macsec_rx_sc *sc) 168 { 169 if (refcount_dec_and_test(&sc->refcnt)) 170 call_rcu(&sc->rcu_head, free_rx_sc_rcu); 171 } 172 173 static void free_rxsa(struct rcu_head *head) 174 { 175 struct macsec_rx_sa *sa = container_of(head, struct macsec_rx_sa, rcu); 176 177 crypto_free_aead(sa->key.tfm); 178 free_percpu(sa->stats); 179 kfree(sa); 180 } 181 182 static void macsec_rxsa_put(struct macsec_rx_sa *sa) 183 { 184 if (refcount_dec_and_test(&sa->refcnt)) 185 call_rcu(&sa->rcu, free_rxsa); 186 } 187 188 static struct macsec_tx_sa *macsec_txsa_get(struct macsec_tx_sa __rcu *ptr) 189 { 190 struct macsec_tx_sa *sa = rcu_dereference_bh(ptr); 191 192 if (!sa || !sa->active) 193 return NULL; 194 195 if (!refcount_inc_not_zero(&sa->refcnt)) 196 return NULL; 197 198 return sa; 199 } 200 201 static void free_txsa(struct rcu_head *head) 202 { 203 struct macsec_tx_sa *sa = container_of(head, struct macsec_tx_sa, rcu); 204 205 crypto_free_aead(sa->key.tfm); 206 free_percpu(sa->stats); 207 kfree(sa); 208 } 209 210 static void macsec_txsa_put(struct macsec_tx_sa *sa) 211 { 212 if (refcount_dec_and_test(&sa->refcnt)) 213 call_rcu(&sa->rcu, free_txsa); 214 } 215 216 static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb) 217 { 218 BUILD_BUG_ON(sizeof(struct macsec_cb) > sizeof(skb->cb)); 219 return (struct macsec_cb *)skb->cb; 220 } 221 222 #define MACSEC_PORT_SCB (0x0000) 223 #define MACSEC_UNDEF_SCI ((__force sci_t)0xffffffffffffffffULL) 224 #define MACSEC_UNDEF_SSCI ((__force ssci_t)0xffffffff) 225 226 #define MACSEC_GCM_AES_128_SAK_LEN 16 227 #define MACSEC_GCM_AES_256_SAK_LEN 32 228 229 #define DEFAULT_SAK_LEN MACSEC_GCM_AES_128_SAK_LEN 230 #define DEFAULT_XPN false 231 #define DEFAULT_SEND_SCI true 232 #define DEFAULT_ENCRYPT false 233 #define DEFAULT_ENCODING_SA 0 234 #define MACSEC_XPN_MAX_REPLAY_WINDOW (((1 << 30) - 1)) 235 236 static sci_t make_sci(const u8 *addr, __be16 port) 237 { 238 sci_t sci; 239 240 memcpy(&sci, addr, ETH_ALEN); 241 memcpy(((char *)&sci) + ETH_ALEN, &port, sizeof(port)); 242 243 return sci; 244 } 245 246 static sci_t macsec_frame_sci(struct macsec_eth_header *hdr, bool sci_present) 247 { 248 sci_t sci; 249 250 if (sci_present) 251 memcpy(&sci, hdr->secure_channel_id, 252 sizeof(hdr->secure_channel_id)); 253 else 254 sci = make_sci(hdr->eth.h_source, MACSEC_PORT_ES); 255 256 return sci; 257 } 258 259 static unsigned int macsec_sectag_len(bool sci_present) 260 { 261 return MACSEC_TAG_LEN + (sci_present ? MACSEC_SCI_LEN : 0); 262 } 263 264 static unsigned int macsec_hdr_len(bool sci_present) 265 { 266 return macsec_sectag_len(sci_present) + ETH_HLEN; 267 } 268 269 static unsigned int macsec_extra_len(bool sci_present) 270 { 271 return macsec_sectag_len(sci_present) + sizeof(__be16); 272 } 273 274 /* Fill SecTAG according to IEEE 802.1AE-2006 10.5.3 */ 275 static void macsec_fill_sectag(struct macsec_eth_header *h, 276 const struct macsec_secy *secy, u32 pn, 277 bool sci_present) 278 { 279 const struct macsec_tx_sc *tx_sc = &secy->tx_sc; 280 281 memset(&h->tci_an, 0, macsec_sectag_len(sci_present)); 282 h->eth.h_proto = htons(ETH_P_MACSEC); 283 284 if (sci_present) { 285 h->tci_an |= MACSEC_TCI_SC; 286 memcpy(&h->secure_channel_id, &secy->sci, 287 sizeof(h->secure_channel_id)); 288 } else { 289 if (tx_sc->end_station) 290 h->tci_an |= MACSEC_TCI_ES; 291 if (tx_sc->scb) 292 h->tci_an |= MACSEC_TCI_SCB; 293 } 294 295 h->packet_number = htonl(pn); 296 297 /* with GCM, C/E clear for !encrypt, both set for encrypt */ 298 if (tx_sc->encrypt) 299 h->tci_an |= MACSEC_TCI_CONFID; 300 else if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN) 301 h->tci_an |= MACSEC_TCI_C; 302 303 h->tci_an |= tx_sc->encoding_sa; 304 } 305 306 static void macsec_set_shortlen(struct macsec_eth_header *h, size_t data_len) 307 { 308 if (data_len < MIN_NON_SHORT_LEN) 309 h->short_length = data_len; 310 } 311 312 /* Checks if a MACsec interface is being offloaded to an hardware engine */ 313 static bool macsec_is_offloaded(struct macsec_dev *macsec) 314 { 315 if (macsec->offload == MACSEC_OFFLOAD_MAC || 316 macsec->offload == MACSEC_OFFLOAD_PHY) 317 return true; 318 319 return false; 320 } 321 322 /* Checks if underlying layers implement MACsec offloading functions. */ 323 static bool macsec_check_offload(enum macsec_offload offload, 324 struct macsec_dev *macsec) 325 { 326 if (!macsec || !macsec->real_dev) 327 return false; 328 329 if (offload == MACSEC_OFFLOAD_PHY) 330 return macsec->real_dev->phydev && 331 macsec->real_dev->phydev->macsec_ops; 332 else if (offload == MACSEC_OFFLOAD_MAC) 333 return macsec->real_dev->features & NETIF_F_HW_MACSEC && 334 macsec->real_dev->macsec_ops; 335 336 return false; 337 } 338 339 static const struct macsec_ops *__macsec_get_ops(enum macsec_offload offload, 340 struct macsec_dev *macsec, 341 struct macsec_context *ctx) 342 { 343 if (ctx) { 344 memset(ctx, 0, sizeof(*ctx)); 345 ctx->offload = offload; 346 347 if (offload == MACSEC_OFFLOAD_PHY) 348 ctx->phydev = macsec->real_dev->phydev; 349 else if (offload == MACSEC_OFFLOAD_MAC) 350 ctx->netdev = macsec->real_dev; 351 } 352 353 if (offload == MACSEC_OFFLOAD_PHY) 354 return macsec->real_dev->phydev->macsec_ops; 355 else 356 return macsec->real_dev->macsec_ops; 357 } 358 359 /* Returns a pointer to the MACsec ops struct if any and updates the MACsec 360 * context device reference if provided. 361 */ 362 static const struct macsec_ops *macsec_get_ops(struct macsec_dev *macsec, 363 struct macsec_context *ctx) 364 { 365 if (!macsec_check_offload(macsec->offload, macsec)) 366 return NULL; 367 368 return __macsec_get_ops(macsec->offload, macsec, ctx); 369 } 370 371 /* validate MACsec packet according to IEEE 802.1AE-2018 9.12 */ 372 static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len, bool xpn) 373 { 374 struct macsec_eth_header *h = (struct macsec_eth_header *)skb->data; 375 int len = skb->len - 2 * ETH_ALEN; 376 int extra_len = macsec_extra_len(!!(h->tci_an & MACSEC_TCI_SC)) + icv_len; 377 378 /* a) It comprises at least 17 octets */ 379 if (skb->len <= 16) 380 return false; 381 382 /* b) MACsec EtherType: already checked */ 383 384 /* c) V bit is clear */ 385 if (h->tci_an & MACSEC_TCI_VERSION) 386 return false; 387 388 /* d) ES or SCB => !SC */ 389 if ((h->tci_an & MACSEC_TCI_ES || h->tci_an & MACSEC_TCI_SCB) && 390 (h->tci_an & MACSEC_TCI_SC)) 391 return false; 392 393 /* e) Bits 7 and 8 of octet 4 of the SecTAG are clear */ 394 if (h->unused) 395 return false; 396 397 /* rx.pn != 0 if not XPN (figure 10-5 with 802.11AEbw-2013 amendment) */ 398 if (!h->packet_number && !xpn) 399 return false; 400 401 /* length check, f) g) h) i) */ 402 if (h->short_length) 403 return len == extra_len + h->short_length; 404 return len >= extra_len + MIN_NON_SHORT_LEN; 405 } 406 407 #define MACSEC_NEEDED_HEADROOM (macsec_extra_len(true)) 408 #define MACSEC_NEEDED_TAILROOM MACSEC_STD_ICV_LEN 409 410 static void macsec_fill_iv_xpn(unsigned char *iv, ssci_t ssci, u64 pn, 411 salt_t salt) 412 { 413 struct gcm_iv_xpn *gcm_iv = (struct gcm_iv_xpn *)iv; 414 415 gcm_iv->ssci = ssci ^ salt.ssci; 416 gcm_iv->pn = cpu_to_be64(pn) ^ salt.pn; 417 } 418 419 static void macsec_fill_iv(unsigned char *iv, sci_t sci, u32 pn) 420 { 421 struct gcm_iv *gcm_iv = (struct gcm_iv *)iv; 422 423 gcm_iv->sci = sci; 424 gcm_iv->pn = htonl(pn); 425 } 426 427 static struct macsec_eth_header *macsec_ethhdr(struct sk_buff *skb) 428 { 429 return (struct macsec_eth_header *)skb_mac_header(skb); 430 } 431 432 static void __macsec_pn_wrapped(struct macsec_secy *secy, 433 struct macsec_tx_sa *tx_sa) 434 { 435 pr_debug("PN wrapped, transitioning to !oper\n"); 436 tx_sa->active = false; 437 if (secy->protect_frames) 438 secy->operational = false; 439 } 440 441 void macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa) 442 { 443 spin_lock_bh(&tx_sa->lock); 444 __macsec_pn_wrapped(secy, tx_sa); 445 spin_unlock_bh(&tx_sa->lock); 446 } 447 EXPORT_SYMBOL_GPL(macsec_pn_wrapped); 448 449 static pn_t tx_sa_update_pn(struct macsec_tx_sa *tx_sa, 450 struct macsec_secy *secy) 451 { 452 pn_t pn; 453 454 spin_lock_bh(&tx_sa->lock); 455 456 pn = tx_sa->next_pn_halves; 457 if (secy->xpn) 458 tx_sa->next_pn++; 459 else 460 tx_sa->next_pn_halves.lower++; 461 462 if (tx_sa->next_pn == 0) 463 __macsec_pn_wrapped(secy, tx_sa); 464 spin_unlock_bh(&tx_sa->lock); 465 466 return pn; 467 } 468 469 static void macsec_encrypt_finish(struct sk_buff *skb, struct net_device *dev) 470 { 471 struct macsec_dev *macsec = netdev_priv(dev); 472 473 skb->dev = macsec->real_dev; 474 skb_reset_mac_header(skb); 475 skb->protocol = eth_hdr(skb)->h_proto; 476 } 477 478 static unsigned int macsec_msdu_len(struct sk_buff *skb) 479 { 480 struct macsec_dev *macsec = macsec_priv(skb->dev); 481 struct macsec_secy *secy = &macsec->secy; 482 bool sci_present = macsec_skb_cb(skb)->has_sci; 483 484 return skb->len - macsec_hdr_len(sci_present) - secy->icv_len; 485 } 486 487 static void macsec_count_tx(struct sk_buff *skb, struct macsec_tx_sc *tx_sc, 488 struct macsec_tx_sa *tx_sa) 489 { 490 unsigned int msdu_len = macsec_msdu_len(skb); 491 struct pcpu_tx_sc_stats *txsc_stats = this_cpu_ptr(tx_sc->stats); 492 493 u64_stats_update_begin(&txsc_stats->syncp); 494 if (tx_sc->encrypt) { 495 txsc_stats->stats.OutOctetsEncrypted += msdu_len; 496 txsc_stats->stats.OutPktsEncrypted++; 497 this_cpu_inc(tx_sa->stats->OutPktsEncrypted); 498 } else { 499 txsc_stats->stats.OutOctetsProtected += msdu_len; 500 txsc_stats->stats.OutPktsProtected++; 501 this_cpu_inc(tx_sa->stats->OutPktsProtected); 502 } 503 u64_stats_update_end(&txsc_stats->syncp); 504 } 505 506 static void count_tx(struct net_device *dev, int ret, int len) 507 { 508 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) 509 dev_sw_netstats_tx_add(dev, 1, len); 510 } 511 512 static void macsec_encrypt_done(void *data, int err) 513 { 514 struct sk_buff *skb = data; 515 struct net_device *dev = skb->dev; 516 struct macsec_dev *macsec = macsec_priv(dev); 517 struct macsec_tx_sa *sa = macsec_skb_cb(skb)->tx_sa; 518 int len, ret; 519 520 aead_request_free(macsec_skb_cb(skb)->req); 521 522 rcu_read_lock_bh(); 523 macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa); 524 /* packet is encrypted/protected so tx_bytes must be calculated */ 525 len = macsec_msdu_len(skb) + 2 * ETH_ALEN; 526 macsec_encrypt_finish(skb, dev); 527 ret = dev_queue_xmit(skb); 528 count_tx(dev, ret, len); 529 rcu_read_unlock_bh(); 530 531 macsec_txsa_put(sa); 532 dev_put(dev); 533 } 534 535 static struct aead_request *macsec_alloc_req(struct crypto_aead *tfm, 536 unsigned char **iv, 537 struct scatterlist **sg, 538 int num_frags) 539 { 540 size_t size, iv_offset, sg_offset; 541 struct aead_request *req; 542 void *tmp; 543 544 size = sizeof(struct aead_request) + crypto_aead_reqsize(tfm); 545 iv_offset = size; 546 size += GCM_AES_IV_LEN; 547 548 size = ALIGN(size, __alignof__(struct scatterlist)); 549 sg_offset = size; 550 size += sizeof(struct scatterlist) * num_frags; 551 552 tmp = kmalloc(size, GFP_ATOMIC); 553 if (!tmp) 554 return NULL; 555 556 *iv = (unsigned char *)(tmp + iv_offset); 557 *sg = (struct scatterlist *)(tmp + sg_offset); 558 req = tmp; 559 560 aead_request_set_tfm(req, tfm); 561 562 return req; 563 } 564 565 static struct sk_buff *macsec_encrypt(struct sk_buff *skb, 566 struct net_device *dev) 567 { 568 int ret; 569 struct scatterlist *sg; 570 struct sk_buff *trailer; 571 unsigned char *iv; 572 struct ethhdr *eth; 573 struct macsec_eth_header *hh; 574 size_t unprotected_len; 575 struct aead_request *req; 576 struct macsec_secy *secy; 577 struct macsec_tx_sc *tx_sc; 578 struct macsec_tx_sa *tx_sa; 579 struct macsec_dev *macsec = macsec_priv(dev); 580 bool sci_present; 581 pn_t pn; 582 583 secy = &macsec->secy; 584 tx_sc = &secy->tx_sc; 585 586 /* 10.5.1 TX SA assignment */ 587 tx_sa = macsec_txsa_get(tx_sc->sa[tx_sc->encoding_sa]); 588 if (!tx_sa) { 589 secy->operational = false; 590 kfree_skb(skb); 591 return ERR_PTR(-EINVAL); 592 } 593 594 if (unlikely(skb_headroom(skb) < MACSEC_NEEDED_HEADROOM || 595 skb_tailroom(skb) < MACSEC_NEEDED_TAILROOM)) { 596 struct sk_buff *nskb = skb_copy_expand(skb, 597 MACSEC_NEEDED_HEADROOM, 598 MACSEC_NEEDED_TAILROOM, 599 GFP_ATOMIC); 600 if (likely(nskb)) { 601 consume_skb(skb); 602 skb = nskb; 603 } else { 604 macsec_txsa_put(tx_sa); 605 kfree_skb(skb); 606 return ERR_PTR(-ENOMEM); 607 } 608 } else { 609 skb = skb_unshare(skb, GFP_ATOMIC); 610 if (!skb) { 611 macsec_txsa_put(tx_sa); 612 return ERR_PTR(-ENOMEM); 613 } 614 } 615 616 unprotected_len = skb->len; 617 eth = eth_hdr(skb); 618 sci_present = macsec_send_sci(secy); 619 hh = skb_push(skb, macsec_extra_len(sci_present)); 620 memmove(hh, eth, 2 * ETH_ALEN); 621 622 pn = tx_sa_update_pn(tx_sa, secy); 623 if (pn.full64 == 0) { 624 macsec_txsa_put(tx_sa); 625 kfree_skb(skb); 626 return ERR_PTR(-ENOLINK); 627 } 628 macsec_fill_sectag(hh, secy, pn.lower, sci_present); 629 macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN); 630 631 skb_put(skb, secy->icv_len); 632 633 if (skb->len - ETH_HLEN > macsec_priv(dev)->real_dev->mtu) { 634 struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats); 635 636 u64_stats_update_begin(&secy_stats->syncp); 637 secy_stats->stats.OutPktsTooLong++; 638 u64_stats_update_end(&secy_stats->syncp); 639 640 macsec_txsa_put(tx_sa); 641 kfree_skb(skb); 642 return ERR_PTR(-EINVAL); 643 } 644 645 ret = skb_cow_data(skb, 0, &trailer); 646 if (unlikely(ret < 0)) { 647 macsec_txsa_put(tx_sa); 648 kfree_skb(skb); 649 return ERR_PTR(ret); 650 } 651 652 req = macsec_alloc_req(tx_sa->key.tfm, &iv, &sg, ret); 653 if (!req) { 654 macsec_txsa_put(tx_sa); 655 kfree_skb(skb); 656 return ERR_PTR(-ENOMEM); 657 } 658 659 if (secy->xpn) 660 macsec_fill_iv_xpn(iv, tx_sa->ssci, pn.full64, tx_sa->key.salt); 661 else 662 macsec_fill_iv(iv, secy->sci, pn.lower); 663 664 sg_init_table(sg, ret); 665 ret = skb_to_sgvec(skb, sg, 0, skb->len); 666 if (unlikely(ret < 0)) { 667 aead_request_free(req); 668 macsec_txsa_put(tx_sa); 669 kfree_skb(skb); 670 return ERR_PTR(ret); 671 } 672 673 if (tx_sc->encrypt) { 674 int len = skb->len - macsec_hdr_len(sci_present) - 675 secy->icv_len; 676 aead_request_set_crypt(req, sg, sg, len, iv); 677 aead_request_set_ad(req, macsec_hdr_len(sci_present)); 678 } else { 679 aead_request_set_crypt(req, sg, sg, 0, iv); 680 aead_request_set_ad(req, skb->len - secy->icv_len); 681 } 682 683 macsec_skb_cb(skb)->req = req; 684 macsec_skb_cb(skb)->tx_sa = tx_sa; 685 macsec_skb_cb(skb)->has_sci = sci_present; 686 aead_request_set_callback(req, 0, macsec_encrypt_done, skb); 687 688 dev_hold(skb->dev); 689 ret = crypto_aead_encrypt(req); 690 if (ret == -EINPROGRESS) { 691 return ERR_PTR(ret); 692 } else if (ret != 0) { 693 dev_put(skb->dev); 694 kfree_skb(skb); 695 aead_request_free(req); 696 macsec_txsa_put(tx_sa); 697 return ERR_PTR(-EINVAL); 698 } 699 700 dev_put(skb->dev); 701 aead_request_free(req); 702 macsec_txsa_put(tx_sa); 703 704 return skb; 705 } 706 707 static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u32 pn) 708 { 709 struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa; 710 struct pcpu_rx_sc_stats *rxsc_stats = this_cpu_ptr(rx_sa->sc->stats); 711 struct macsec_eth_header *hdr = macsec_ethhdr(skb); 712 u32 lowest_pn = 0; 713 714 spin_lock(&rx_sa->lock); 715 if (rx_sa->next_pn_halves.lower >= secy->replay_window) 716 lowest_pn = rx_sa->next_pn_halves.lower - secy->replay_window; 717 718 /* Now perform replay protection check again 719 * (see IEEE 802.1AE-2006 figure 10-5) 720 */ 721 if (secy->replay_protect && pn < lowest_pn && 722 (!secy->xpn || pn_same_half(pn, lowest_pn))) { 723 spin_unlock(&rx_sa->lock); 724 u64_stats_update_begin(&rxsc_stats->syncp); 725 rxsc_stats->stats.InPktsLate++; 726 u64_stats_update_end(&rxsc_stats->syncp); 727 DEV_STATS_INC(secy->netdev, rx_dropped); 728 return false; 729 } 730 731 if (secy->validate_frames != MACSEC_VALIDATE_DISABLED) { 732 unsigned int msdu_len = macsec_msdu_len(skb); 733 u64_stats_update_begin(&rxsc_stats->syncp); 734 if (hdr->tci_an & MACSEC_TCI_E) 735 rxsc_stats->stats.InOctetsDecrypted += msdu_len; 736 else 737 rxsc_stats->stats.InOctetsValidated += msdu_len; 738 u64_stats_update_end(&rxsc_stats->syncp); 739 } 740 741 if (!macsec_skb_cb(skb)->valid) { 742 spin_unlock(&rx_sa->lock); 743 744 /* 10.6.5 */ 745 if (hdr->tci_an & MACSEC_TCI_C || 746 secy->validate_frames == MACSEC_VALIDATE_STRICT) { 747 u64_stats_update_begin(&rxsc_stats->syncp); 748 rxsc_stats->stats.InPktsNotValid++; 749 u64_stats_update_end(&rxsc_stats->syncp); 750 this_cpu_inc(rx_sa->stats->InPktsNotValid); 751 DEV_STATS_INC(secy->netdev, rx_errors); 752 return false; 753 } 754 755 u64_stats_update_begin(&rxsc_stats->syncp); 756 if (secy->validate_frames == MACSEC_VALIDATE_CHECK) { 757 rxsc_stats->stats.InPktsInvalid++; 758 this_cpu_inc(rx_sa->stats->InPktsInvalid); 759 } else if (pn < lowest_pn) { 760 rxsc_stats->stats.InPktsDelayed++; 761 } else { 762 rxsc_stats->stats.InPktsUnchecked++; 763 } 764 u64_stats_update_end(&rxsc_stats->syncp); 765 } else { 766 u64_stats_update_begin(&rxsc_stats->syncp); 767 if (pn < lowest_pn) { 768 rxsc_stats->stats.InPktsDelayed++; 769 } else { 770 rxsc_stats->stats.InPktsOK++; 771 this_cpu_inc(rx_sa->stats->InPktsOK); 772 } 773 u64_stats_update_end(&rxsc_stats->syncp); 774 775 // Instead of "pn >=" - to support pn overflow in xpn 776 if (pn + 1 > rx_sa->next_pn_halves.lower) { 777 rx_sa->next_pn_halves.lower = pn + 1; 778 } else if (secy->xpn && 779 !pn_same_half(pn, rx_sa->next_pn_halves.lower)) { 780 rx_sa->next_pn_halves.upper++; 781 rx_sa->next_pn_halves.lower = pn + 1; 782 } 783 784 spin_unlock(&rx_sa->lock); 785 } 786 787 return true; 788 } 789 790 static void macsec_reset_skb(struct sk_buff *skb, struct net_device *dev) 791 { 792 skb->pkt_type = PACKET_HOST; 793 skb->protocol = eth_type_trans(skb, dev); 794 795 skb_reset_network_header(skb); 796 if (!skb_transport_header_was_set(skb)) 797 skb_reset_transport_header(skb); 798 skb_reset_mac_len(skb); 799 } 800 801 static void macsec_finalize_skb(struct sk_buff *skb, u8 icv_len, u8 hdr_len) 802 { 803 skb->ip_summed = CHECKSUM_NONE; 804 memmove(skb->data + hdr_len, skb->data, 2 * ETH_ALEN); 805 skb_pull(skb, hdr_len); 806 pskb_trim_unique(skb, skb->len - icv_len); 807 } 808 809 static void count_rx(struct net_device *dev, int len) 810 { 811 dev_sw_netstats_rx_add(dev, len); 812 } 813 814 static void macsec_decrypt_done(void *data, int err) 815 { 816 struct sk_buff *skb = data; 817 struct net_device *dev = skb->dev; 818 struct macsec_dev *macsec = macsec_priv(dev); 819 struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa; 820 struct macsec_rx_sc *rx_sc = rx_sa->sc; 821 int len; 822 u32 pn; 823 824 aead_request_free(macsec_skb_cb(skb)->req); 825 826 if (!err) 827 macsec_skb_cb(skb)->valid = true; 828 829 rcu_read_lock_bh(); 830 pn = ntohl(macsec_ethhdr(skb)->packet_number); 831 if (!macsec_post_decrypt(skb, &macsec->secy, pn)) { 832 rcu_read_unlock_bh(); 833 kfree_skb(skb); 834 goto out; 835 } 836 837 macsec_finalize_skb(skb, macsec->secy.icv_len, 838 macsec_extra_len(macsec_skb_cb(skb)->has_sci)); 839 len = skb->len; 840 macsec_reset_skb(skb, macsec->secy.netdev); 841 842 if (gro_cells_receive(&macsec->gro_cells, skb) == NET_RX_SUCCESS) 843 count_rx(dev, len); 844 845 rcu_read_unlock_bh(); 846 847 out: 848 macsec_rxsa_put(rx_sa); 849 macsec_rxsc_put(rx_sc); 850 dev_put(dev); 851 } 852 853 static struct sk_buff *macsec_decrypt(struct sk_buff *skb, 854 struct net_device *dev, 855 struct macsec_rx_sa *rx_sa, 856 sci_t sci, 857 struct macsec_secy *secy) 858 { 859 int ret; 860 struct scatterlist *sg; 861 struct sk_buff *trailer; 862 unsigned char *iv; 863 struct aead_request *req; 864 struct macsec_eth_header *hdr; 865 u32 hdr_pn; 866 u16 icv_len = secy->icv_len; 867 868 macsec_skb_cb(skb)->valid = false; 869 skb = skb_share_check(skb, GFP_ATOMIC); 870 if (!skb) 871 return ERR_PTR(-ENOMEM); 872 873 ret = skb_cow_data(skb, 0, &trailer); 874 if (unlikely(ret < 0)) { 875 kfree_skb(skb); 876 return ERR_PTR(ret); 877 } 878 req = macsec_alloc_req(rx_sa->key.tfm, &iv, &sg, ret); 879 if (!req) { 880 kfree_skb(skb); 881 return ERR_PTR(-ENOMEM); 882 } 883 884 hdr = (struct macsec_eth_header *)skb->data; 885 hdr_pn = ntohl(hdr->packet_number); 886 887 if (secy->xpn) { 888 pn_t recovered_pn = rx_sa->next_pn_halves; 889 890 recovered_pn.lower = hdr_pn; 891 if (hdr_pn < rx_sa->next_pn_halves.lower && 892 !pn_same_half(hdr_pn, rx_sa->next_pn_halves.lower)) 893 recovered_pn.upper++; 894 895 macsec_fill_iv_xpn(iv, rx_sa->ssci, recovered_pn.full64, 896 rx_sa->key.salt); 897 } else { 898 macsec_fill_iv(iv, sci, hdr_pn); 899 } 900 901 sg_init_table(sg, ret); 902 ret = skb_to_sgvec(skb, sg, 0, skb->len); 903 if (unlikely(ret < 0)) { 904 aead_request_free(req); 905 kfree_skb(skb); 906 return ERR_PTR(ret); 907 } 908 909 if (hdr->tci_an & MACSEC_TCI_E) { 910 /* confidentiality: ethernet + macsec header 911 * authenticated, encrypted payload 912 */ 913 int len = skb->len - macsec_hdr_len(macsec_skb_cb(skb)->has_sci); 914 915 aead_request_set_crypt(req, sg, sg, len, iv); 916 aead_request_set_ad(req, macsec_hdr_len(macsec_skb_cb(skb)->has_sci)); 917 skb = skb_unshare(skb, GFP_ATOMIC); 918 if (!skb) { 919 aead_request_free(req); 920 return ERR_PTR(-ENOMEM); 921 } 922 } else { 923 /* integrity only: all headers + data authenticated */ 924 aead_request_set_crypt(req, sg, sg, icv_len, iv); 925 aead_request_set_ad(req, skb->len - icv_len); 926 } 927 928 macsec_skb_cb(skb)->req = req; 929 skb->dev = dev; 930 aead_request_set_callback(req, 0, macsec_decrypt_done, skb); 931 932 dev_hold(dev); 933 ret = crypto_aead_decrypt(req); 934 if (ret == -EINPROGRESS) { 935 return ERR_PTR(ret); 936 } else if (ret != 0) { 937 /* decryption/authentication failed 938 * 10.6 if validateFrames is disabled, deliver anyway 939 */ 940 if (ret != -EBADMSG) { 941 kfree_skb(skb); 942 skb = ERR_PTR(ret); 943 } 944 } else { 945 macsec_skb_cb(skb)->valid = true; 946 } 947 dev_put(dev); 948 949 aead_request_free(req); 950 951 return skb; 952 } 953 954 static struct macsec_rx_sc *find_rx_sc(struct macsec_secy *secy, sci_t sci) 955 { 956 struct macsec_rx_sc *rx_sc; 957 958 for_each_rxsc(secy, rx_sc) { 959 if (rx_sc->sci == sci) 960 return rx_sc; 961 } 962 963 return NULL; 964 } 965 966 static struct macsec_rx_sc *find_rx_sc_rtnl(struct macsec_secy *secy, sci_t sci) 967 { 968 struct macsec_rx_sc *rx_sc; 969 970 for_each_rxsc_rtnl(secy, rx_sc) { 971 if (rx_sc->sci == sci) 972 return rx_sc; 973 } 974 975 return NULL; 976 } 977 978 static enum rx_handler_result handle_not_macsec(struct sk_buff *skb) 979 { 980 /* Deliver to the uncontrolled port by default */ 981 enum rx_handler_result ret = RX_HANDLER_PASS; 982 struct ethhdr *hdr = eth_hdr(skb); 983 struct metadata_dst *md_dst; 984 struct macsec_rxh_data *rxd; 985 struct macsec_dev *macsec; 986 bool is_macsec_md_dst; 987 988 rcu_read_lock(); 989 rxd = macsec_data_rcu(skb->dev); 990 md_dst = skb_metadata_dst(skb); 991 is_macsec_md_dst = md_dst && md_dst->type == METADATA_MACSEC; 992 993 list_for_each_entry_rcu(macsec, &rxd->secys, secys) { 994 struct sk_buff *nskb; 995 struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats); 996 struct net_device *ndev = macsec->secy.netdev; 997 998 /* If h/w offloading is enabled, HW decodes frames and strips 999 * the SecTAG, so we have to deduce which port to deliver to. 1000 */ 1001 if (macsec_is_offloaded(macsec) && netif_running(ndev)) { 1002 const struct macsec_ops *ops; 1003 1004 ops = macsec_get_ops(macsec, NULL); 1005 1006 if (ops->rx_uses_md_dst && !is_macsec_md_dst) 1007 continue; 1008 1009 if (is_macsec_md_dst) { 1010 struct macsec_rx_sc *rx_sc; 1011 1012 /* All drivers that implement MACsec offload 1013 * support using skb metadata destinations must 1014 * indicate that they do so. 1015 */ 1016 DEBUG_NET_WARN_ON_ONCE(!ops->rx_uses_md_dst); 1017 rx_sc = find_rx_sc(&macsec->secy, 1018 md_dst->u.macsec_info.sci); 1019 if (!rx_sc) 1020 continue; 1021 /* device indicated macsec offload occurred */ 1022 skb->dev = ndev; 1023 skb->pkt_type = PACKET_HOST; 1024 eth_skb_pkt_type(skb, ndev); 1025 ret = RX_HANDLER_ANOTHER; 1026 goto out; 1027 } 1028 1029 /* This datapath is insecure because it is unable to 1030 * enforce isolation of broadcast/multicast traffic and 1031 * unicast traffic with promiscuous mode on the macsec 1032 * netdev. Since the core stack has no mechanism to 1033 * check that the hardware did indeed receive MACsec 1034 * traffic, it is possible that the response handling 1035 * done by the MACsec port was to a plaintext packet. 1036 * This violates the MACsec protocol standard. 1037 */ 1038 if (ether_addr_equal_64bits(hdr->h_dest, 1039 ndev->dev_addr)) { 1040 /* exact match, divert skb to this port */ 1041 skb->dev = ndev; 1042 skb->pkt_type = PACKET_HOST; 1043 ret = RX_HANDLER_ANOTHER; 1044 goto out; 1045 } else if (is_multicast_ether_addr_64bits( 1046 hdr->h_dest)) { 1047 /* multicast frame, deliver on this port too */ 1048 nskb = skb_clone(skb, GFP_ATOMIC); 1049 if (!nskb) 1050 break; 1051 1052 nskb->dev = ndev; 1053 eth_skb_pkt_type(nskb, ndev); 1054 1055 __netif_rx(nskb); 1056 } else if (ndev->flags & IFF_PROMISC) { 1057 skb->dev = ndev; 1058 skb->pkt_type = PACKET_HOST; 1059 ret = RX_HANDLER_ANOTHER; 1060 goto out; 1061 } 1062 1063 continue; 1064 } 1065 1066 /* 10.6 If the management control validateFrames is not 1067 * Strict, frames without a SecTAG are received, counted, and 1068 * delivered to the Controlled Port 1069 */ 1070 if (macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) { 1071 u64_stats_update_begin(&secy_stats->syncp); 1072 secy_stats->stats.InPktsNoTag++; 1073 u64_stats_update_end(&secy_stats->syncp); 1074 DEV_STATS_INC(macsec->secy.netdev, rx_dropped); 1075 continue; 1076 } 1077 1078 /* deliver on this port */ 1079 nskb = skb_clone(skb, GFP_ATOMIC); 1080 if (!nskb) 1081 break; 1082 1083 nskb->dev = ndev; 1084 1085 if (__netif_rx(nskb) == NET_RX_SUCCESS) { 1086 u64_stats_update_begin(&secy_stats->syncp); 1087 secy_stats->stats.InPktsUntagged++; 1088 u64_stats_update_end(&secy_stats->syncp); 1089 } 1090 } 1091 1092 out: 1093 rcu_read_unlock(); 1094 return ret; 1095 } 1096 1097 static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) 1098 { 1099 struct sk_buff *skb = *pskb; 1100 struct net_device *dev = skb->dev; 1101 struct macsec_eth_header *hdr; 1102 struct macsec_secy *secy = NULL; 1103 struct macsec_rx_sc *rx_sc; 1104 struct macsec_rx_sa *rx_sa; 1105 struct macsec_rxh_data *rxd; 1106 struct macsec_dev *macsec; 1107 unsigned int len; 1108 sci_t sci; 1109 u32 hdr_pn; 1110 bool cbit; 1111 struct pcpu_rx_sc_stats *rxsc_stats; 1112 struct pcpu_secy_stats *secy_stats; 1113 bool pulled_sci; 1114 int ret; 1115 1116 if (skb_headroom(skb) < ETH_HLEN) 1117 goto drop_direct; 1118 1119 hdr = macsec_ethhdr(skb); 1120 if (hdr->eth.h_proto != htons(ETH_P_MACSEC)) 1121 return handle_not_macsec(skb); 1122 1123 skb = skb_unshare(skb, GFP_ATOMIC); 1124 *pskb = skb; 1125 if (!skb) 1126 return RX_HANDLER_CONSUMED; 1127 1128 pulled_sci = pskb_may_pull(skb, macsec_extra_len(true)); 1129 if (!pulled_sci) { 1130 if (!pskb_may_pull(skb, macsec_extra_len(false))) 1131 goto drop_direct; 1132 } 1133 1134 hdr = macsec_ethhdr(skb); 1135 1136 /* Frames with a SecTAG that has the TCI E bit set but the C 1137 * bit clear are discarded, as this reserved encoding is used 1138 * to identify frames with a SecTAG that are not to be 1139 * delivered to the Controlled Port. 1140 */ 1141 if ((hdr->tci_an & (MACSEC_TCI_C | MACSEC_TCI_E)) == MACSEC_TCI_E) 1142 return RX_HANDLER_PASS; 1143 1144 /* now, pull the extra length */ 1145 if (hdr->tci_an & MACSEC_TCI_SC) { 1146 if (!pulled_sci) 1147 goto drop_direct; 1148 } 1149 1150 /* ethernet header is part of crypto processing */ 1151 skb_push(skb, ETH_HLEN); 1152 1153 macsec_skb_cb(skb)->has_sci = !!(hdr->tci_an & MACSEC_TCI_SC); 1154 macsec_skb_cb(skb)->assoc_num = hdr->tci_an & MACSEC_AN_MASK; 1155 sci = macsec_frame_sci(hdr, macsec_skb_cb(skb)->has_sci); 1156 1157 rcu_read_lock(); 1158 rxd = macsec_data_rcu(skb->dev); 1159 1160 list_for_each_entry_rcu(macsec, &rxd->secys, secys) { 1161 struct macsec_rx_sc *sc = find_rx_sc(&macsec->secy, sci); 1162 1163 sc = sc ? macsec_rxsc_get(sc) : NULL; 1164 1165 if (sc) { 1166 secy = &macsec->secy; 1167 rx_sc = sc; 1168 break; 1169 } 1170 } 1171 1172 if (!secy) 1173 goto nosci; 1174 1175 dev = secy->netdev; 1176 macsec = macsec_priv(dev); 1177 secy_stats = this_cpu_ptr(macsec->stats); 1178 rxsc_stats = this_cpu_ptr(rx_sc->stats); 1179 1180 if (!macsec_validate_skb(skb, secy->icv_len, secy->xpn)) { 1181 u64_stats_update_begin(&secy_stats->syncp); 1182 secy_stats->stats.InPktsBadTag++; 1183 u64_stats_update_end(&secy_stats->syncp); 1184 DEV_STATS_INC(secy->netdev, rx_errors); 1185 goto drop_nosa; 1186 } 1187 1188 rx_sa = macsec_rxsa_get(rx_sc->sa[macsec_skb_cb(skb)->assoc_num]); 1189 if (!rx_sa) { 1190 /* 10.6.1 if the SA is not in use */ 1191 1192 /* If validateFrames is Strict or the C bit in the 1193 * SecTAG is set, discard 1194 */ 1195 if (hdr->tci_an & MACSEC_TCI_C || 1196 secy->validate_frames == MACSEC_VALIDATE_STRICT) { 1197 u64_stats_update_begin(&rxsc_stats->syncp); 1198 rxsc_stats->stats.InPktsNotUsingSA++; 1199 u64_stats_update_end(&rxsc_stats->syncp); 1200 DEV_STATS_INC(secy->netdev, rx_errors); 1201 goto drop_nosa; 1202 } 1203 1204 /* not Strict, the frame (with the SecTAG and ICV 1205 * removed) is delivered to the Controlled Port. 1206 */ 1207 u64_stats_update_begin(&rxsc_stats->syncp); 1208 rxsc_stats->stats.InPktsUnusedSA++; 1209 u64_stats_update_end(&rxsc_stats->syncp); 1210 goto deliver; 1211 } 1212 1213 /* First, PN check to avoid decrypting obviously wrong packets */ 1214 hdr_pn = ntohl(hdr->packet_number); 1215 if (secy->replay_protect) { 1216 bool late; 1217 1218 spin_lock(&rx_sa->lock); 1219 late = rx_sa->next_pn_halves.lower >= secy->replay_window && 1220 hdr_pn < (rx_sa->next_pn_halves.lower - secy->replay_window); 1221 1222 if (secy->xpn) 1223 late = late && pn_same_half(rx_sa->next_pn_halves.lower, hdr_pn); 1224 spin_unlock(&rx_sa->lock); 1225 1226 if (late) { 1227 u64_stats_update_begin(&rxsc_stats->syncp); 1228 rxsc_stats->stats.InPktsLate++; 1229 u64_stats_update_end(&rxsc_stats->syncp); 1230 DEV_STATS_INC(macsec->secy.netdev, rx_dropped); 1231 goto drop; 1232 } 1233 } 1234 1235 macsec_skb_cb(skb)->rx_sa = rx_sa; 1236 1237 /* Disabled && !changed text => skip validation */ 1238 if (hdr->tci_an & MACSEC_TCI_C || 1239 secy->validate_frames != MACSEC_VALIDATE_DISABLED) 1240 skb = macsec_decrypt(skb, dev, rx_sa, sci, secy); 1241 1242 if (IS_ERR(skb)) { 1243 /* the decrypt callback needs the reference */ 1244 if (PTR_ERR(skb) != -EINPROGRESS) { 1245 macsec_rxsa_put(rx_sa); 1246 macsec_rxsc_put(rx_sc); 1247 } 1248 rcu_read_unlock(); 1249 *pskb = NULL; 1250 return RX_HANDLER_CONSUMED; 1251 } 1252 1253 if (!macsec_post_decrypt(skb, secy, hdr_pn)) 1254 goto drop; 1255 1256 deliver: 1257 macsec_finalize_skb(skb, secy->icv_len, 1258 macsec_extra_len(macsec_skb_cb(skb)->has_sci)); 1259 len = skb->len; 1260 macsec_reset_skb(skb, secy->netdev); 1261 1262 if (rx_sa) 1263 macsec_rxsa_put(rx_sa); 1264 macsec_rxsc_put(rx_sc); 1265 1266 skb_orphan(skb); 1267 ret = gro_cells_receive(&macsec->gro_cells, skb); 1268 if (ret == NET_RX_SUCCESS) 1269 count_rx(dev, len); 1270 else 1271 DEV_STATS_INC(macsec->secy.netdev, rx_dropped); 1272 1273 rcu_read_unlock(); 1274 1275 *pskb = NULL; 1276 return RX_HANDLER_CONSUMED; 1277 1278 drop: 1279 macsec_rxsa_put(rx_sa); 1280 drop_nosa: 1281 macsec_rxsc_put(rx_sc); 1282 rcu_read_unlock(); 1283 drop_direct: 1284 kfree_skb(skb); 1285 *pskb = NULL; 1286 return RX_HANDLER_CONSUMED; 1287 1288 nosci: 1289 /* 10.6.1 if the SC is not found */ 1290 cbit = !!(hdr->tci_an & MACSEC_TCI_C); 1291 if (!cbit) 1292 macsec_finalize_skb(skb, MACSEC_DEFAULT_ICV_LEN, 1293 macsec_extra_len(macsec_skb_cb(skb)->has_sci)); 1294 1295 list_for_each_entry_rcu(macsec, &rxd->secys, secys) { 1296 struct sk_buff *nskb; 1297 1298 secy_stats = this_cpu_ptr(macsec->stats); 1299 1300 /* If validateFrames is Strict or the C bit in the 1301 * SecTAG is set, discard 1302 */ 1303 if (cbit || 1304 macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) { 1305 u64_stats_update_begin(&secy_stats->syncp); 1306 secy_stats->stats.InPktsNoSCI++; 1307 u64_stats_update_end(&secy_stats->syncp); 1308 DEV_STATS_INC(macsec->secy.netdev, rx_errors); 1309 continue; 1310 } 1311 1312 /* not strict, the frame (with the SecTAG and ICV 1313 * removed) is delivered to the Controlled Port. 1314 */ 1315 nskb = skb_clone(skb, GFP_ATOMIC); 1316 if (!nskb) 1317 break; 1318 1319 macsec_reset_skb(nskb, macsec->secy.netdev); 1320 1321 ret = __netif_rx(nskb); 1322 if (ret == NET_RX_SUCCESS) { 1323 u64_stats_update_begin(&secy_stats->syncp); 1324 secy_stats->stats.InPktsUnknownSCI++; 1325 u64_stats_update_end(&secy_stats->syncp); 1326 } else { 1327 DEV_STATS_INC(macsec->secy.netdev, rx_dropped); 1328 } 1329 } 1330 1331 rcu_read_unlock(); 1332 *pskb = skb; 1333 return RX_HANDLER_PASS; 1334 } 1335 1336 static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len) 1337 { 1338 struct crypto_aead *tfm; 1339 int ret; 1340 1341 tfm = crypto_alloc_aead("gcm(aes)", 0, 0); 1342 1343 if (IS_ERR(tfm)) 1344 return tfm; 1345 1346 ret = crypto_aead_setkey(tfm, key, key_len); 1347 if (ret < 0) 1348 goto fail; 1349 1350 ret = crypto_aead_setauthsize(tfm, icv_len); 1351 if (ret < 0) 1352 goto fail; 1353 1354 return tfm; 1355 fail: 1356 crypto_free_aead(tfm); 1357 return ERR_PTR(ret); 1358 } 1359 1360 static int init_rx_sa(struct macsec_rx_sa *rx_sa, char *sak, int key_len, 1361 int icv_len) 1362 { 1363 rx_sa->stats = alloc_percpu(struct macsec_rx_sa_stats); 1364 if (!rx_sa->stats) 1365 return -ENOMEM; 1366 1367 rx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len); 1368 if (IS_ERR(rx_sa->key.tfm)) { 1369 free_percpu(rx_sa->stats); 1370 return PTR_ERR(rx_sa->key.tfm); 1371 } 1372 1373 rx_sa->ssci = MACSEC_UNDEF_SSCI; 1374 rx_sa->active = false; 1375 rx_sa->next_pn = 1; 1376 refcount_set(&rx_sa->refcnt, 1); 1377 spin_lock_init(&rx_sa->lock); 1378 1379 return 0; 1380 } 1381 1382 static void clear_rx_sa(struct macsec_rx_sa *rx_sa) 1383 { 1384 rx_sa->active = false; 1385 1386 macsec_rxsa_put(rx_sa); 1387 } 1388 1389 static void free_rx_sc(struct macsec_rx_sc *rx_sc) 1390 { 1391 int i; 1392 1393 for (i = 0; i < MACSEC_NUM_AN; i++) { 1394 struct macsec_rx_sa *sa = rtnl_dereference(rx_sc->sa[i]); 1395 1396 RCU_INIT_POINTER(rx_sc->sa[i], NULL); 1397 if (sa) 1398 clear_rx_sa(sa); 1399 } 1400 1401 macsec_rxsc_put(rx_sc); 1402 } 1403 1404 static struct macsec_rx_sc *del_rx_sc(struct macsec_secy *secy, sci_t sci) 1405 { 1406 struct macsec_rx_sc *rx_sc, __rcu **rx_scp; 1407 1408 for (rx_scp = &secy->rx_sc, rx_sc = rtnl_dereference(*rx_scp); 1409 rx_sc; 1410 rx_scp = &rx_sc->next, rx_sc = rtnl_dereference(*rx_scp)) { 1411 if (rx_sc->sci == sci) { 1412 if (rx_sc->active) 1413 secy->n_rx_sc--; 1414 rcu_assign_pointer(*rx_scp, rx_sc->next); 1415 return rx_sc; 1416 } 1417 } 1418 1419 return NULL; 1420 } 1421 1422 static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci, 1423 bool active) 1424 { 1425 struct macsec_rx_sc *rx_sc; 1426 struct macsec_dev *macsec; 1427 struct net_device *real_dev = macsec_priv(dev)->real_dev; 1428 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); 1429 struct macsec_secy *secy; 1430 1431 list_for_each_entry(macsec, &rxd->secys, secys) { 1432 if (find_rx_sc_rtnl(&macsec->secy, sci)) 1433 return ERR_PTR(-EEXIST); 1434 } 1435 1436 rx_sc = kzalloc(sizeof(*rx_sc), GFP_KERNEL); 1437 if (!rx_sc) 1438 return ERR_PTR(-ENOMEM); 1439 1440 rx_sc->stats = netdev_alloc_pcpu_stats(struct pcpu_rx_sc_stats); 1441 if (!rx_sc->stats) { 1442 kfree(rx_sc); 1443 return ERR_PTR(-ENOMEM); 1444 } 1445 1446 rx_sc->sci = sci; 1447 rx_sc->active = active; 1448 refcount_set(&rx_sc->refcnt, 1); 1449 1450 secy = &macsec_priv(dev)->secy; 1451 rcu_assign_pointer(rx_sc->next, secy->rx_sc); 1452 rcu_assign_pointer(secy->rx_sc, rx_sc); 1453 1454 if (rx_sc->active) 1455 secy->n_rx_sc++; 1456 1457 return rx_sc; 1458 } 1459 1460 static int init_tx_sa(struct macsec_tx_sa *tx_sa, char *sak, int key_len, 1461 int icv_len) 1462 { 1463 tx_sa->stats = alloc_percpu(struct macsec_tx_sa_stats); 1464 if (!tx_sa->stats) 1465 return -ENOMEM; 1466 1467 tx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len); 1468 if (IS_ERR(tx_sa->key.tfm)) { 1469 free_percpu(tx_sa->stats); 1470 return PTR_ERR(tx_sa->key.tfm); 1471 } 1472 1473 tx_sa->ssci = MACSEC_UNDEF_SSCI; 1474 tx_sa->active = false; 1475 refcount_set(&tx_sa->refcnt, 1); 1476 spin_lock_init(&tx_sa->lock); 1477 1478 return 0; 1479 } 1480 1481 static void clear_tx_sa(struct macsec_tx_sa *tx_sa) 1482 { 1483 tx_sa->active = false; 1484 1485 macsec_txsa_put(tx_sa); 1486 } 1487 1488 static struct genl_family macsec_fam; 1489 1490 static struct net_device *get_dev_from_nl(struct net *net, 1491 struct nlattr **attrs) 1492 { 1493 int ifindex = nla_get_u32(attrs[MACSEC_ATTR_IFINDEX]); 1494 struct net_device *dev; 1495 1496 dev = __dev_get_by_index(net, ifindex); 1497 if (!dev) 1498 return ERR_PTR(-ENODEV); 1499 1500 if (!netif_is_macsec(dev)) 1501 return ERR_PTR(-ENODEV); 1502 1503 return dev; 1504 } 1505 1506 static enum macsec_offload nla_get_offload(const struct nlattr *nla) 1507 { 1508 return (__force enum macsec_offload)nla_get_u8(nla); 1509 } 1510 1511 static sci_t nla_get_sci(const struct nlattr *nla) 1512 { 1513 return (__force sci_t)nla_get_u64(nla); 1514 } 1515 1516 static int nla_put_sci(struct sk_buff *skb, int attrtype, sci_t value, 1517 int padattr) 1518 { 1519 return nla_put_u64_64bit(skb, attrtype, (__force u64)value, padattr); 1520 } 1521 1522 static ssci_t nla_get_ssci(const struct nlattr *nla) 1523 { 1524 return (__force ssci_t)nla_get_u32(nla); 1525 } 1526 1527 static int nla_put_ssci(struct sk_buff *skb, int attrtype, ssci_t value) 1528 { 1529 return nla_put_u32(skb, attrtype, (__force u64)value); 1530 } 1531 1532 static struct macsec_tx_sa *get_txsa_from_nl(struct net *net, 1533 struct nlattr **attrs, 1534 struct nlattr **tb_sa, 1535 struct net_device **devp, 1536 struct macsec_secy **secyp, 1537 struct macsec_tx_sc **scp, 1538 u8 *assoc_num) 1539 { 1540 struct net_device *dev; 1541 struct macsec_secy *secy; 1542 struct macsec_tx_sc *tx_sc; 1543 struct macsec_tx_sa *tx_sa; 1544 1545 if (!tb_sa[MACSEC_SA_ATTR_AN]) 1546 return ERR_PTR(-EINVAL); 1547 1548 *assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1549 1550 dev = get_dev_from_nl(net, attrs); 1551 if (IS_ERR(dev)) 1552 return ERR_CAST(dev); 1553 1554 if (*assoc_num >= MACSEC_NUM_AN) 1555 return ERR_PTR(-EINVAL); 1556 1557 secy = &macsec_priv(dev)->secy; 1558 tx_sc = &secy->tx_sc; 1559 1560 tx_sa = rtnl_dereference(tx_sc->sa[*assoc_num]); 1561 if (!tx_sa) 1562 return ERR_PTR(-ENODEV); 1563 1564 *devp = dev; 1565 *scp = tx_sc; 1566 *secyp = secy; 1567 return tx_sa; 1568 } 1569 1570 static struct macsec_rx_sc *get_rxsc_from_nl(struct net *net, 1571 struct nlattr **attrs, 1572 struct nlattr **tb_rxsc, 1573 struct net_device **devp, 1574 struct macsec_secy **secyp) 1575 { 1576 struct net_device *dev; 1577 struct macsec_secy *secy; 1578 struct macsec_rx_sc *rx_sc; 1579 sci_t sci; 1580 1581 dev = get_dev_from_nl(net, attrs); 1582 if (IS_ERR(dev)) 1583 return ERR_CAST(dev); 1584 1585 secy = &macsec_priv(dev)->secy; 1586 1587 if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI]) 1588 return ERR_PTR(-EINVAL); 1589 1590 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); 1591 rx_sc = find_rx_sc_rtnl(secy, sci); 1592 if (!rx_sc) 1593 return ERR_PTR(-ENODEV); 1594 1595 *secyp = secy; 1596 *devp = dev; 1597 1598 return rx_sc; 1599 } 1600 1601 static struct macsec_rx_sa *get_rxsa_from_nl(struct net *net, 1602 struct nlattr **attrs, 1603 struct nlattr **tb_rxsc, 1604 struct nlattr **tb_sa, 1605 struct net_device **devp, 1606 struct macsec_secy **secyp, 1607 struct macsec_rx_sc **scp, 1608 u8 *assoc_num) 1609 { 1610 struct macsec_rx_sc *rx_sc; 1611 struct macsec_rx_sa *rx_sa; 1612 1613 if (!tb_sa[MACSEC_SA_ATTR_AN]) 1614 return ERR_PTR(-EINVAL); 1615 1616 *assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1617 if (*assoc_num >= MACSEC_NUM_AN) 1618 return ERR_PTR(-EINVAL); 1619 1620 rx_sc = get_rxsc_from_nl(net, attrs, tb_rxsc, devp, secyp); 1621 if (IS_ERR(rx_sc)) 1622 return ERR_CAST(rx_sc); 1623 1624 rx_sa = rtnl_dereference(rx_sc->sa[*assoc_num]); 1625 if (!rx_sa) 1626 return ERR_PTR(-ENODEV); 1627 1628 *scp = rx_sc; 1629 return rx_sa; 1630 } 1631 1632 static const struct nla_policy macsec_genl_policy[NUM_MACSEC_ATTR] = { 1633 [MACSEC_ATTR_IFINDEX] = { .type = NLA_U32 }, 1634 [MACSEC_ATTR_RXSC_CONFIG] = { .type = NLA_NESTED }, 1635 [MACSEC_ATTR_SA_CONFIG] = { .type = NLA_NESTED }, 1636 [MACSEC_ATTR_OFFLOAD] = { .type = NLA_NESTED }, 1637 }; 1638 1639 static const struct nla_policy macsec_genl_rxsc_policy[NUM_MACSEC_RXSC_ATTR] = { 1640 [MACSEC_RXSC_ATTR_SCI] = { .type = NLA_U64 }, 1641 [MACSEC_RXSC_ATTR_ACTIVE] = { .type = NLA_U8 }, 1642 }; 1643 1644 static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = { 1645 [MACSEC_SA_ATTR_AN] = { .type = NLA_U8 }, 1646 [MACSEC_SA_ATTR_ACTIVE] = { .type = NLA_U8 }, 1647 [MACSEC_SA_ATTR_PN] = NLA_POLICY_MIN_LEN(4), 1648 [MACSEC_SA_ATTR_KEYID] = { .type = NLA_BINARY, 1649 .len = MACSEC_KEYID_LEN, }, 1650 [MACSEC_SA_ATTR_KEY] = { .type = NLA_BINARY, 1651 .len = MACSEC_MAX_KEY_LEN, }, 1652 [MACSEC_SA_ATTR_SSCI] = { .type = NLA_U32 }, 1653 [MACSEC_SA_ATTR_SALT] = { .type = NLA_BINARY, 1654 .len = MACSEC_SALT_LEN, }, 1655 }; 1656 1657 static const struct nla_policy macsec_genl_offload_policy[NUM_MACSEC_OFFLOAD_ATTR] = { 1658 [MACSEC_OFFLOAD_ATTR_TYPE] = { .type = NLA_U8 }, 1659 }; 1660 1661 /* Offloads an operation to a device driver */ 1662 static int macsec_offload(int (* const func)(struct macsec_context *), 1663 struct macsec_context *ctx) 1664 { 1665 int ret; 1666 1667 if (unlikely(!func)) 1668 return 0; 1669 1670 if (ctx->offload == MACSEC_OFFLOAD_PHY) 1671 mutex_lock(&ctx->phydev->lock); 1672 1673 ret = (*func)(ctx); 1674 1675 if (ctx->offload == MACSEC_OFFLOAD_PHY) 1676 mutex_unlock(&ctx->phydev->lock); 1677 1678 return ret; 1679 } 1680 1681 static int parse_sa_config(struct nlattr **attrs, struct nlattr **tb_sa) 1682 { 1683 if (!attrs[MACSEC_ATTR_SA_CONFIG]) 1684 return -EINVAL; 1685 1686 if (nla_parse_nested_deprecated(tb_sa, MACSEC_SA_ATTR_MAX, attrs[MACSEC_ATTR_SA_CONFIG], macsec_genl_sa_policy, NULL)) 1687 return -EINVAL; 1688 1689 return 0; 1690 } 1691 1692 static int parse_rxsc_config(struct nlattr **attrs, struct nlattr **tb_rxsc) 1693 { 1694 if (!attrs[MACSEC_ATTR_RXSC_CONFIG]) 1695 return -EINVAL; 1696 1697 if (nla_parse_nested_deprecated(tb_rxsc, MACSEC_RXSC_ATTR_MAX, attrs[MACSEC_ATTR_RXSC_CONFIG], macsec_genl_rxsc_policy, NULL)) 1698 return -EINVAL; 1699 1700 return 0; 1701 } 1702 1703 static bool validate_add_rxsa(struct nlattr **attrs) 1704 { 1705 if (!attrs[MACSEC_SA_ATTR_AN] || 1706 !attrs[MACSEC_SA_ATTR_KEY] || 1707 !attrs[MACSEC_SA_ATTR_KEYID]) 1708 return false; 1709 1710 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) 1711 return false; 1712 1713 if (attrs[MACSEC_SA_ATTR_PN] && 1714 nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0) 1715 return false; 1716 1717 if (attrs[MACSEC_SA_ATTR_ACTIVE]) { 1718 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) 1719 return false; 1720 } 1721 1722 if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN) 1723 return false; 1724 1725 return true; 1726 } 1727 1728 static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info) 1729 { 1730 struct net_device *dev; 1731 struct nlattr **attrs = info->attrs; 1732 struct macsec_secy *secy; 1733 struct macsec_rx_sc *rx_sc; 1734 struct macsec_rx_sa *rx_sa; 1735 unsigned char assoc_num; 1736 int pn_len; 1737 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 1738 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 1739 int err; 1740 1741 if (!attrs[MACSEC_ATTR_IFINDEX]) 1742 return -EINVAL; 1743 1744 if (parse_sa_config(attrs, tb_sa)) 1745 return -EINVAL; 1746 1747 if (parse_rxsc_config(attrs, tb_rxsc)) 1748 return -EINVAL; 1749 1750 if (!validate_add_rxsa(tb_sa)) 1751 return -EINVAL; 1752 1753 rtnl_lock(); 1754 rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy); 1755 if (IS_ERR(rx_sc)) { 1756 rtnl_unlock(); 1757 return PTR_ERR(rx_sc); 1758 } 1759 1760 assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1761 1762 if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) { 1763 pr_notice("macsec: nl: add_rxsa: bad key length: %d != %d\n", 1764 nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len); 1765 rtnl_unlock(); 1766 return -EINVAL; 1767 } 1768 1769 pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN; 1770 if (tb_sa[MACSEC_SA_ATTR_PN] && 1771 nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) { 1772 pr_notice("macsec: nl: add_rxsa: bad pn length: %d != %d\n", 1773 nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len); 1774 rtnl_unlock(); 1775 return -EINVAL; 1776 } 1777 1778 if (secy->xpn) { 1779 if (!tb_sa[MACSEC_SA_ATTR_SSCI] || !tb_sa[MACSEC_SA_ATTR_SALT]) { 1780 rtnl_unlock(); 1781 return -EINVAL; 1782 } 1783 1784 if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) { 1785 pr_notice("macsec: nl: add_rxsa: bad salt length: %d != %d\n", 1786 nla_len(tb_sa[MACSEC_SA_ATTR_SALT]), 1787 MACSEC_SALT_LEN); 1788 rtnl_unlock(); 1789 return -EINVAL; 1790 } 1791 } 1792 1793 rx_sa = rtnl_dereference(rx_sc->sa[assoc_num]); 1794 if (rx_sa) { 1795 rtnl_unlock(); 1796 return -EBUSY; 1797 } 1798 1799 rx_sa = kmalloc(sizeof(*rx_sa), GFP_KERNEL); 1800 if (!rx_sa) { 1801 rtnl_unlock(); 1802 return -ENOMEM; 1803 } 1804 1805 err = init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 1806 secy->key_len, secy->icv_len); 1807 if (err < 0) { 1808 kfree(rx_sa); 1809 rtnl_unlock(); 1810 return err; 1811 } 1812 1813 if (tb_sa[MACSEC_SA_ATTR_PN]) { 1814 spin_lock_bh(&rx_sa->lock); 1815 rx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]); 1816 spin_unlock_bh(&rx_sa->lock); 1817 } 1818 1819 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 1820 rx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 1821 1822 rx_sa->sc = rx_sc; 1823 1824 if (secy->xpn) { 1825 rx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]); 1826 nla_memcpy(rx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT], 1827 MACSEC_SALT_LEN); 1828 } 1829 1830 /* If h/w offloading is available, propagate to the device */ 1831 if (macsec_is_offloaded(netdev_priv(dev))) { 1832 const struct macsec_ops *ops; 1833 struct macsec_context ctx; 1834 1835 ops = macsec_get_ops(netdev_priv(dev), &ctx); 1836 if (!ops) { 1837 err = -EOPNOTSUPP; 1838 goto cleanup; 1839 } 1840 1841 ctx.sa.assoc_num = assoc_num; 1842 ctx.sa.rx_sa = rx_sa; 1843 ctx.secy = secy; 1844 memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 1845 secy->key_len); 1846 1847 err = macsec_offload(ops->mdo_add_rxsa, &ctx); 1848 memzero_explicit(ctx.sa.key, secy->key_len); 1849 if (err) 1850 goto cleanup; 1851 } 1852 1853 nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN); 1854 rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa); 1855 1856 rtnl_unlock(); 1857 1858 return 0; 1859 1860 cleanup: 1861 macsec_rxsa_put(rx_sa); 1862 rtnl_unlock(); 1863 return err; 1864 } 1865 1866 static bool validate_add_rxsc(struct nlattr **attrs) 1867 { 1868 if (!attrs[MACSEC_RXSC_ATTR_SCI]) 1869 return false; 1870 1871 if (attrs[MACSEC_RXSC_ATTR_ACTIVE]) { 1872 if (nla_get_u8(attrs[MACSEC_RXSC_ATTR_ACTIVE]) > 1) 1873 return false; 1874 } 1875 1876 return true; 1877 } 1878 1879 static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info) 1880 { 1881 struct net_device *dev; 1882 sci_t sci = MACSEC_UNDEF_SCI; 1883 struct nlattr **attrs = info->attrs; 1884 struct macsec_rx_sc *rx_sc; 1885 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 1886 struct macsec_secy *secy; 1887 bool active = true; 1888 int ret; 1889 1890 if (!attrs[MACSEC_ATTR_IFINDEX]) 1891 return -EINVAL; 1892 1893 if (parse_rxsc_config(attrs, tb_rxsc)) 1894 return -EINVAL; 1895 1896 if (!validate_add_rxsc(tb_rxsc)) 1897 return -EINVAL; 1898 1899 rtnl_lock(); 1900 dev = get_dev_from_nl(genl_info_net(info), attrs); 1901 if (IS_ERR(dev)) { 1902 rtnl_unlock(); 1903 return PTR_ERR(dev); 1904 } 1905 1906 secy = &macsec_priv(dev)->secy; 1907 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); 1908 1909 if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) 1910 active = nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]); 1911 1912 rx_sc = create_rx_sc(dev, sci, active); 1913 if (IS_ERR(rx_sc)) { 1914 rtnl_unlock(); 1915 return PTR_ERR(rx_sc); 1916 } 1917 1918 if (macsec_is_offloaded(netdev_priv(dev))) { 1919 const struct macsec_ops *ops; 1920 struct macsec_context ctx; 1921 1922 ops = macsec_get_ops(netdev_priv(dev), &ctx); 1923 if (!ops) { 1924 ret = -EOPNOTSUPP; 1925 goto cleanup; 1926 } 1927 1928 ctx.rx_sc = rx_sc; 1929 ctx.secy = secy; 1930 1931 ret = macsec_offload(ops->mdo_add_rxsc, &ctx); 1932 if (ret) 1933 goto cleanup; 1934 } 1935 1936 rtnl_unlock(); 1937 1938 return 0; 1939 1940 cleanup: 1941 del_rx_sc(secy, sci); 1942 free_rx_sc(rx_sc); 1943 rtnl_unlock(); 1944 return ret; 1945 } 1946 1947 static bool validate_add_txsa(struct nlattr **attrs) 1948 { 1949 if (!attrs[MACSEC_SA_ATTR_AN] || 1950 !attrs[MACSEC_SA_ATTR_PN] || 1951 !attrs[MACSEC_SA_ATTR_KEY] || 1952 !attrs[MACSEC_SA_ATTR_KEYID]) 1953 return false; 1954 1955 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) 1956 return false; 1957 1958 if (nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0) 1959 return false; 1960 1961 if (attrs[MACSEC_SA_ATTR_ACTIVE]) { 1962 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) 1963 return false; 1964 } 1965 1966 if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN) 1967 return false; 1968 1969 return true; 1970 } 1971 1972 static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info) 1973 { 1974 struct net_device *dev; 1975 struct nlattr **attrs = info->attrs; 1976 struct macsec_secy *secy; 1977 struct macsec_tx_sc *tx_sc; 1978 struct macsec_tx_sa *tx_sa; 1979 unsigned char assoc_num; 1980 int pn_len; 1981 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 1982 bool was_operational; 1983 int err; 1984 1985 if (!attrs[MACSEC_ATTR_IFINDEX]) 1986 return -EINVAL; 1987 1988 if (parse_sa_config(attrs, tb_sa)) 1989 return -EINVAL; 1990 1991 if (!validate_add_txsa(tb_sa)) 1992 return -EINVAL; 1993 1994 rtnl_lock(); 1995 dev = get_dev_from_nl(genl_info_net(info), attrs); 1996 if (IS_ERR(dev)) { 1997 rtnl_unlock(); 1998 return PTR_ERR(dev); 1999 } 2000 2001 secy = &macsec_priv(dev)->secy; 2002 tx_sc = &secy->tx_sc; 2003 2004 assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 2005 2006 if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) { 2007 pr_notice("macsec: nl: add_txsa: bad key length: %d != %d\n", 2008 nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len); 2009 rtnl_unlock(); 2010 return -EINVAL; 2011 } 2012 2013 pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN; 2014 if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) { 2015 pr_notice("macsec: nl: add_txsa: bad pn length: %d != %d\n", 2016 nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len); 2017 rtnl_unlock(); 2018 return -EINVAL; 2019 } 2020 2021 if (secy->xpn) { 2022 if (!tb_sa[MACSEC_SA_ATTR_SSCI] || !tb_sa[MACSEC_SA_ATTR_SALT]) { 2023 rtnl_unlock(); 2024 return -EINVAL; 2025 } 2026 2027 if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) { 2028 pr_notice("macsec: nl: add_txsa: bad salt length: %d != %d\n", 2029 nla_len(tb_sa[MACSEC_SA_ATTR_SALT]), 2030 MACSEC_SALT_LEN); 2031 rtnl_unlock(); 2032 return -EINVAL; 2033 } 2034 } 2035 2036 tx_sa = rtnl_dereference(tx_sc->sa[assoc_num]); 2037 if (tx_sa) { 2038 rtnl_unlock(); 2039 return -EBUSY; 2040 } 2041 2042 tx_sa = kmalloc(sizeof(*tx_sa), GFP_KERNEL); 2043 if (!tx_sa) { 2044 rtnl_unlock(); 2045 return -ENOMEM; 2046 } 2047 2048 err = init_tx_sa(tx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 2049 secy->key_len, secy->icv_len); 2050 if (err < 0) { 2051 kfree(tx_sa); 2052 rtnl_unlock(); 2053 return err; 2054 } 2055 2056 spin_lock_bh(&tx_sa->lock); 2057 tx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]); 2058 spin_unlock_bh(&tx_sa->lock); 2059 2060 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 2061 tx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 2062 2063 was_operational = secy->operational; 2064 if (assoc_num == tx_sc->encoding_sa && tx_sa->active) 2065 secy->operational = true; 2066 2067 if (secy->xpn) { 2068 tx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]); 2069 nla_memcpy(tx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT], 2070 MACSEC_SALT_LEN); 2071 } 2072 2073 /* If h/w offloading is available, propagate to the device */ 2074 if (macsec_is_offloaded(netdev_priv(dev))) { 2075 const struct macsec_ops *ops; 2076 struct macsec_context ctx; 2077 2078 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2079 if (!ops) { 2080 err = -EOPNOTSUPP; 2081 goto cleanup; 2082 } 2083 2084 ctx.sa.assoc_num = assoc_num; 2085 ctx.sa.tx_sa = tx_sa; 2086 ctx.secy = secy; 2087 memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 2088 secy->key_len); 2089 2090 err = macsec_offload(ops->mdo_add_txsa, &ctx); 2091 memzero_explicit(ctx.sa.key, secy->key_len); 2092 if (err) 2093 goto cleanup; 2094 } 2095 2096 nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN); 2097 rcu_assign_pointer(tx_sc->sa[assoc_num], tx_sa); 2098 2099 rtnl_unlock(); 2100 2101 return 0; 2102 2103 cleanup: 2104 secy->operational = was_operational; 2105 macsec_txsa_put(tx_sa); 2106 rtnl_unlock(); 2107 return err; 2108 } 2109 2110 static int macsec_del_rxsa(struct sk_buff *skb, struct genl_info *info) 2111 { 2112 struct nlattr **attrs = info->attrs; 2113 struct net_device *dev; 2114 struct macsec_secy *secy; 2115 struct macsec_rx_sc *rx_sc; 2116 struct macsec_rx_sa *rx_sa; 2117 u8 assoc_num; 2118 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 2119 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 2120 int ret; 2121 2122 if (!attrs[MACSEC_ATTR_IFINDEX]) 2123 return -EINVAL; 2124 2125 if (parse_sa_config(attrs, tb_sa)) 2126 return -EINVAL; 2127 2128 if (parse_rxsc_config(attrs, tb_rxsc)) 2129 return -EINVAL; 2130 2131 rtnl_lock(); 2132 rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa, 2133 &dev, &secy, &rx_sc, &assoc_num); 2134 if (IS_ERR(rx_sa)) { 2135 rtnl_unlock(); 2136 return PTR_ERR(rx_sa); 2137 } 2138 2139 if (rx_sa->active) { 2140 rtnl_unlock(); 2141 return -EBUSY; 2142 } 2143 2144 /* If h/w offloading is available, propagate to the device */ 2145 if (macsec_is_offloaded(netdev_priv(dev))) { 2146 const struct macsec_ops *ops; 2147 struct macsec_context ctx; 2148 2149 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2150 if (!ops) { 2151 ret = -EOPNOTSUPP; 2152 goto cleanup; 2153 } 2154 2155 ctx.sa.assoc_num = assoc_num; 2156 ctx.sa.rx_sa = rx_sa; 2157 ctx.secy = secy; 2158 2159 ret = macsec_offload(ops->mdo_del_rxsa, &ctx); 2160 if (ret) 2161 goto cleanup; 2162 } 2163 2164 RCU_INIT_POINTER(rx_sc->sa[assoc_num], NULL); 2165 clear_rx_sa(rx_sa); 2166 2167 rtnl_unlock(); 2168 2169 return 0; 2170 2171 cleanup: 2172 rtnl_unlock(); 2173 return ret; 2174 } 2175 2176 static int macsec_del_rxsc(struct sk_buff *skb, struct genl_info *info) 2177 { 2178 struct nlattr **attrs = info->attrs; 2179 struct net_device *dev; 2180 struct macsec_secy *secy; 2181 struct macsec_rx_sc *rx_sc; 2182 sci_t sci; 2183 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 2184 int ret; 2185 2186 if (!attrs[MACSEC_ATTR_IFINDEX]) 2187 return -EINVAL; 2188 2189 if (parse_rxsc_config(attrs, tb_rxsc)) 2190 return -EINVAL; 2191 2192 if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI]) 2193 return -EINVAL; 2194 2195 rtnl_lock(); 2196 dev = get_dev_from_nl(genl_info_net(info), info->attrs); 2197 if (IS_ERR(dev)) { 2198 rtnl_unlock(); 2199 return PTR_ERR(dev); 2200 } 2201 2202 secy = &macsec_priv(dev)->secy; 2203 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); 2204 2205 rx_sc = del_rx_sc(secy, sci); 2206 if (!rx_sc) { 2207 rtnl_unlock(); 2208 return -ENODEV; 2209 } 2210 2211 /* If h/w offloading is available, propagate to the device */ 2212 if (macsec_is_offloaded(netdev_priv(dev))) { 2213 const struct macsec_ops *ops; 2214 struct macsec_context ctx; 2215 2216 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2217 if (!ops) { 2218 ret = -EOPNOTSUPP; 2219 goto cleanup; 2220 } 2221 2222 ctx.rx_sc = rx_sc; 2223 ctx.secy = secy; 2224 ret = macsec_offload(ops->mdo_del_rxsc, &ctx); 2225 if (ret) 2226 goto cleanup; 2227 } 2228 2229 free_rx_sc(rx_sc); 2230 rtnl_unlock(); 2231 2232 return 0; 2233 2234 cleanup: 2235 rtnl_unlock(); 2236 return ret; 2237 } 2238 2239 static int macsec_del_txsa(struct sk_buff *skb, struct genl_info *info) 2240 { 2241 struct nlattr **attrs = info->attrs; 2242 struct net_device *dev; 2243 struct macsec_secy *secy; 2244 struct macsec_tx_sc *tx_sc; 2245 struct macsec_tx_sa *tx_sa; 2246 u8 assoc_num; 2247 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 2248 int ret; 2249 2250 if (!attrs[MACSEC_ATTR_IFINDEX]) 2251 return -EINVAL; 2252 2253 if (parse_sa_config(attrs, tb_sa)) 2254 return -EINVAL; 2255 2256 rtnl_lock(); 2257 tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa, 2258 &dev, &secy, &tx_sc, &assoc_num); 2259 if (IS_ERR(tx_sa)) { 2260 rtnl_unlock(); 2261 return PTR_ERR(tx_sa); 2262 } 2263 2264 if (tx_sa->active) { 2265 rtnl_unlock(); 2266 return -EBUSY; 2267 } 2268 2269 /* If h/w offloading is available, propagate to the device */ 2270 if (macsec_is_offloaded(netdev_priv(dev))) { 2271 const struct macsec_ops *ops; 2272 struct macsec_context ctx; 2273 2274 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2275 if (!ops) { 2276 ret = -EOPNOTSUPP; 2277 goto cleanup; 2278 } 2279 2280 ctx.sa.assoc_num = assoc_num; 2281 ctx.sa.tx_sa = tx_sa; 2282 ctx.secy = secy; 2283 2284 ret = macsec_offload(ops->mdo_del_txsa, &ctx); 2285 if (ret) 2286 goto cleanup; 2287 } 2288 2289 RCU_INIT_POINTER(tx_sc->sa[assoc_num], NULL); 2290 clear_tx_sa(tx_sa); 2291 2292 rtnl_unlock(); 2293 2294 return 0; 2295 2296 cleanup: 2297 rtnl_unlock(); 2298 return ret; 2299 } 2300 2301 static bool validate_upd_sa(struct nlattr **attrs) 2302 { 2303 if (!attrs[MACSEC_SA_ATTR_AN] || 2304 attrs[MACSEC_SA_ATTR_KEY] || 2305 attrs[MACSEC_SA_ATTR_KEYID] || 2306 attrs[MACSEC_SA_ATTR_SSCI] || 2307 attrs[MACSEC_SA_ATTR_SALT]) 2308 return false; 2309 2310 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) 2311 return false; 2312 2313 if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0) 2314 return false; 2315 2316 if (attrs[MACSEC_SA_ATTR_ACTIVE]) { 2317 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) 2318 return false; 2319 } 2320 2321 return true; 2322 } 2323 2324 static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info) 2325 { 2326 struct nlattr **attrs = info->attrs; 2327 struct net_device *dev; 2328 struct macsec_secy *secy; 2329 struct macsec_tx_sc *tx_sc; 2330 struct macsec_tx_sa *tx_sa; 2331 u8 assoc_num; 2332 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 2333 bool was_operational, was_active; 2334 pn_t prev_pn; 2335 int ret = 0; 2336 2337 prev_pn.full64 = 0; 2338 2339 if (!attrs[MACSEC_ATTR_IFINDEX]) 2340 return -EINVAL; 2341 2342 if (parse_sa_config(attrs, tb_sa)) 2343 return -EINVAL; 2344 2345 if (!validate_upd_sa(tb_sa)) 2346 return -EINVAL; 2347 2348 rtnl_lock(); 2349 tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa, 2350 &dev, &secy, &tx_sc, &assoc_num); 2351 if (IS_ERR(tx_sa)) { 2352 rtnl_unlock(); 2353 return PTR_ERR(tx_sa); 2354 } 2355 2356 if (tb_sa[MACSEC_SA_ATTR_PN]) { 2357 int pn_len; 2358 2359 pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN; 2360 if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) { 2361 pr_notice("macsec: nl: upd_txsa: bad pn length: %d != %d\n", 2362 nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len); 2363 rtnl_unlock(); 2364 return -EINVAL; 2365 } 2366 2367 spin_lock_bh(&tx_sa->lock); 2368 prev_pn = tx_sa->next_pn_halves; 2369 tx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]); 2370 spin_unlock_bh(&tx_sa->lock); 2371 } 2372 2373 was_active = tx_sa->active; 2374 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 2375 tx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 2376 2377 was_operational = secy->operational; 2378 if (assoc_num == tx_sc->encoding_sa) 2379 secy->operational = tx_sa->active; 2380 2381 /* If h/w offloading is available, propagate to the device */ 2382 if (macsec_is_offloaded(netdev_priv(dev))) { 2383 const struct macsec_ops *ops; 2384 struct macsec_context ctx; 2385 2386 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2387 if (!ops) { 2388 ret = -EOPNOTSUPP; 2389 goto cleanup; 2390 } 2391 2392 ctx.sa.assoc_num = assoc_num; 2393 ctx.sa.tx_sa = tx_sa; 2394 ctx.sa.update_pn = !!prev_pn.full64; 2395 ctx.secy = secy; 2396 2397 ret = macsec_offload(ops->mdo_upd_txsa, &ctx); 2398 if (ret) 2399 goto cleanup; 2400 } 2401 2402 rtnl_unlock(); 2403 2404 return 0; 2405 2406 cleanup: 2407 if (tb_sa[MACSEC_SA_ATTR_PN]) { 2408 spin_lock_bh(&tx_sa->lock); 2409 tx_sa->next_pn_halves = prev_pn; 2410 spin_unlock_bh(&tx_sa->lock); 2411 } 2412 tx_sa->active = was_active; 2413 secy->operational = was_operational; 2414 rtnl_unlock(); 2415 return ret; 2416 } 2417 2418 static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info) 2419 { 2420 struct nlattr **attrs = info->attrs; 2421 struct net_device *dev; 2422 struct macsec_secy *secy; 2423 struct macsec_rx_sc *rx_sc; 2424 struct macsec_rx_sa *rx_sa; 2425 u8 assoc_num; 2426 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 2427 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 2428 bool was_active; 2429 pn_t prev_pn; 2430 int ret = 0; 2431 2432 prev_pn.full64 = 0; 2433 2434 if (!attrs[MACSEC_ATTR_IFINDEX]) 2435 return -EINVAL; 2436 2437 if (parse_rxsc_config(attrs, tb_rxsc)) 2438 return -EINVAL; 2439 2440 if (parse_sa_config(attrs, tb_sa)) 2441 return -EINVAL; 2442 2443 if (!validate_upd_sa(tb_sa)) 2444 return -EINVAL; 2445 2446 rtnl_lock(); 2447 rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa, 2448 &dev, &secy, &rx_sc, &assoc_num); 2449 if (IS_ERR(rx_sa)) { 2450 rtnl_unlock(); 2451 return PTR_ERR(rx_sa); 2452 } 2453 2454 if (tb_sa[MACSEC_SA_ATTR_PN]) { 2455 int pn_len; 2456 2457 pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN; 2458 if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) { 2459 pr_notice("macsec: nl: upd_rxsa: bad pn length: %d != %d\n", 2460 nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len); 2461 rtnl_unlock(); 2462 return -EINVAL; 2463 } 2464 2465 spin_lock_bh(&rx_sa->lock); 2466 prev_pn = rx_sa->next_pn_halves; 2467 rx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]); 2468 spin_unlock_bh(&rx_sa->lock); 2469 } 2470 2471 was_active = rx_sa->active; 2472 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 2473 rx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 2474 2475 /* If h/w offloading is available, propagate to the device */ 2476 if (macsec_is_offloaded(netdev_priv(dev))) { 2477 const struct macsec_ops *ops; 2478 struct macsec_context ctx; 2479 2480 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2481 if (!ops) { 2482 ret = -EOPNOTSUPP; 2483 goto cleanup; 2484 } 2485 2486 ctx.sa.assoc_num = assoc_num; 2487 ctx.sa.rx_sa = rx_sa; 2488 ctx.sa.update_pn = !!prev_pn.full64; 2489 ctx.secy = secy; 2490 2491 ret = macsec_offload(ops->mdo_upd_rxsa, &ctx); 2492 if (ret) 2493 goto cleanup; 2494 } 2495 2496 rtnl_unlock(); 2497 return 0; 2498 2499 cleanup: 2500 if (tb_sa[MACSEC_SA_ATTR_PN]) { 2501 spin_lock_bh(&rx_sa->lock); 2502 rx_sa->next_pn_halves = prev_pn; 2503 spin_unlock_bh(&rx_sa->lock); 2504 } 2505 rx_sa->active = was_active; 2506 rtnl_unlock(); 2507 return ret; 2508 } 2509 2510 static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info) 2511 { 2512 struct nlattr **attrs = info->attrs; 2513 struct net_device *dev; 2514 struct macsec_secy *secy; 2515 struct macsec_rx_sc *rx_sc; 2516 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 2517 unsigned int prev_n_rx_sc; 2518 bool was_active; 2519 int ret; 2520 2521 if (!attrs[MACSEC_ATTR_IFINDEX]) 2522 return -EINVAL; 2523 2524 if (parse_rxsc_config(attrs, tb_rxsc)) 2525 return -EINVAL; 2526 2527 if (!validate_add_rxsc(tb_rxsc)) 2528 return -EINVAL; 2529 2530 rtnl_lock(); 2531 rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy); 2532 if (IS_ERR(rx_sc)) { 2533 rtnl_unlock(); 2534 return PTR_ERR(rx_sc); 2535 } 2536 2537 was_active = rx_sc->active; 2538 prev_n_rx_sc = secy->n_rx_sc; 2539 if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) { 2540 bool new = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]); 2541 2542 if (rx_sc->active != new) 2543 secy->n_rx_sc += new ? 1 : -1; 2544 2545 rx_sc->active = new; 2546 } 2547 2548 /* If h/w offloading is available, propagate to the device */ 2549 if (macsec_is_offloaded(netdev_priv(dev))) { 2550 const struct macsec_ops *ops; 2551 struct macsec_context ctx; 2552 2553 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2554 if (!ops) { 2555 ret = -EOPNOTSUPP; 2556 goto cleanup; 2557 } 2558 2559 ctx.rx_sc = rx_sc; 2560 ctx.secy = secy; 2561 2562 ret = macsec_offload(ops->mdo_upd_rxsc, &ctx); 2563 if (ret) 2564 goto cleanup; 2565 } 2566 2567 rtnl_unlock(); 2568 2569 return 0; 2570 2571 cleanup: 2572 secy->n_rx_sc = prev_n_rx_sc; 2573 rx_sc->active = was_active; 2574 rtnl_unlock(); 2575 return ret; 2576 } 2577 2578 static bool macsec_is_configured(struct macsec_dev *macsec) 2579 { 2580 struct macsec_secy *secy = &macsec->secy; 2581 struct macsec_tx_sc *tx_sc = &secy->tx_sc; 2582 int i; 2583 2584 if (secy->rx_sc) 2585 return true; 2586 2587 for (i = 0; i < MACSEC_NUM_AN; i++) 2588 if (tx_sc->sa[i]) 2589 return true; 2590 2591 return false; 2592 } 2593 2594 static int macsec_update_offload(struct net_device *dev, enum macsec_offload offload) 2595 { 2596 enum macsec_offload prev_offload; 2597 const struct macsec_ops *ops; 2598 struct macsec_context ctx; 2599 struct macsec_dev *macsec; 2600 int ret = 0; 2601 2602 macsec = macsec_priv(dev); 2603 2604 /* Check if the offloading mode is supported by the underlying layers */ 2605 if (offload != MACSEC_OFFLOAD_OFF && 2606 !macsec_check_offload(offload, macsec)) 2607 return -EOPNOTSUPP; 2608 2609 /* Check if the net device is busy. */ 2610 if (netif_running(dev)) 2611 return -EBUSY; 2612 2613 /* Check if the device already has rules configured: we do not support 2614 * rules migration. 2615 */ 2616 if (macsec_is_configured(macsec)) 2617 return -EBUSY; 2618 2619 prev_offload = macsec->offload; 2620 2621 ops = __macsec_get_ops(offload == MACSEC_OFFLOAD_OFF ? prev_offload : offload, 2622 macsec, &ctx); 2623 if (!ops) 2624 return -EOPNOTSUPP; 2625 2626 macsec->offload = offload; 2627 2628 ctx.secy = &macsec->secy; 2629 ret = offload == MACSEC_OFFLOAD_OFF ? macsec_offload(ops->mdo_del_secy, &ctx) 2630 : macsec_offload(ops->mdo_add_secy, &ctx); 2631 if (ret) 2632 macsec->offload = prev_offload; 2633 2634 return ret; 2635 } 2636 2637 static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info) 2638 { 2639 struct nlattr *tb_offload[MACSEC_OFFLOAD_ATTR_MAX + 1]; 2640 struct nlattr **attrs = info->attrs; 2641 enum macsec_offload offload; 2642 struct macsec_dev *macsec; 2643 struct net_device *dev; 2644 int ret = 0; 2645 2646 if (!attrs[MACSEC_ATTR_IFINDEX]) 2647 return -EINVAL; 2648 2649 if (!attrs[MACSEC_ATTR_OFFLOAD]) 2650 return -EINVAL; 2651 2652 if (nla_parse_nested_deprecated(tb_offload, MACSEC_OFFLOAD_ATTR_MAX, 2653 attrs[MACSEC_ATTR_OFFLOAD], 2654 macsec_genl_offload_policy, NULL)) 2655 return -EINVAL; 2656 2657 rtnl_lock(); 2658 2659 dev = get_dev_from_nl(genl_info_net(info), attrs); 2660 if (IS_ERR(dev)) { 2661 ret = PTR_ERR(dev); 2662 goto out; 2663 } 2664 macsec = macsec_priv(dev); 2665 2666 if (!tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]) { 2667 ret = -EINVAL; 2668 goto out; 2669 } 2670 2671 offload = nla_get_u8(tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]); 2672 2673 if (macsec->offload != offload) 2674 ret = macsec_update_offload(dev, offload); 2675 out: 2676 rtnl_unlock(); 2677 return ret; 2678 } 2679 2680 static void get_tx_sa_stats(struct net_device *dev, int an, 2681 struct macsec_tx_sa *tx_sa, 2682 struct macsec_tx_sa_stats *sum) 2683 { 2684 struct macsec_dev *macsec = macsec_priv(dev); 2685 int cpu; 2686 2687 /* If h/w offloading is available, propagate to the device */ 2688 if (macsec_is_offloaded(macsec)) { 2689 const struct macsec_ops *ops; 2690 struct macsec_context ctx; 2691 2692 ops = macsec_get_ops(macsec, &ctx); 2693 if (ops) { 2694 ctx.sa.assoc_num = an; 2695 ctx.sa.tx_sa = tx_sa; 2696 ctx.stats.tx_sa_stats = sum; 2697 ctx.secy = &macsec_priv(dev)->secy; 2698 macsec_offload(ops->mdo_get_tx_sa_stats, &ctx); 2699 } 2700 return; 2701 } 2702 2703 for_each_possible_cpu(cpu) { 2704 const struct macsec_tx_sa_stats *stats = 2705 per_cpu_ptr(tx_sa->stats, cpu); 2706 2707 sum->OutPktsProtected += stats->OutPktsProtected; 2708 sum->OutPktsEncrypted += stats->OutPktsEncrypted; 2709 } 2710 } 2711 2712 static int copy_tx_sa_stats(struct sk_buff *skb, struct macsec_tx_sa_stats *sum) 2713 { 2714 if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED, 2715 sum->OutPktsProtected) || 2716 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED, 2717 sum->OutPktsEncrypted)) 2718 return -EMSGSIZE; 2719 2720 return 0; 2721 } 2722 2723 static void get_rx_sa_stats(struct net_device *dev, 2724 struct macsec_rx_sc *rx_sc, int an, 2725 struct macsec_rx_sa *rx_sa, 2726 struct macsec_rx_sa_stats *sum) 2727 { 2728 struct macsec_dev *macsec = macsec_priv(dev); 2729 int cpu; 2730 2731 /* If h/w offloading is available, propagate to the device */ 2732 if (macsec_is_offloaded(macsec)) { 2733 const struct macsec_ops *ops; 2734 struct macsec_context ctx; 2735 2736 ops = macsec_get_ops(macsec, &ctx); 2737 if (ops) { 2738 ctx.sa.assoc_num = an; 2739 ctx.sa.rx_sa = rx_sa; 2740 ctx.stats.rx_sa_stats = sum; 2741 ctx.secy = &macsec_priv(dev)->secy; 2742 ctx.rx_sc = rx_sc; 2743 macsec_offload(ops->mdo_get_rx_sa_stats, &ctx); 2744 } 2745 return; 2746 } 2747 2748 for_each_possible_cpu(cpu) { 2749 const struct macsec_rx_sa_stats *stats = 2750 per_cpu_ptr(rx_sa->stats, cpu); 2751 2752 sum->InPktsOK += stats->InPktsOK; 2753 sum->InPktsInvalid += stats->InPktsInvalid; 2754 sum->InPktsNotValid += stats->InPktsNotValid; 2755 sum->InPktsNotUsingSA += stats->InPktsNotUsingSA; 2756 sum->InPktsUnusedSA += stats->InPktsUnusedSA; 2757 } 2758 } 2759 2760 static int copy_rx_sa_stats(struct sk_buff *skb, 2761 struct macsec_rx_sa_stats *sum) 2762 { 2763 if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_OK, sum->InPktsOK) || 2764 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID, 2765 sum->InPktsInvalid) || 2766 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID, 2767 sum->InPktsNotValid) || 2768 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA, 2769 sum->InPktsNotUsingSA) || 2770 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA, 2771 sum->InPktsUnusedSA)) 2772 return -EMSGSIZE; 2773 2774 return 0; 2775 } 2776 2777 static void get_rx_sc_stats(struct net_device *dev, 2778 struct macsec_rx_sc *rx_sc, 2779 struct macsec_rx_sc_stats *sum) 2780 { 2781 struct macsec_dev *macsec = macsec_priv(dev); 2782 int cpu; 2783 2784 /* If h/w offloading is available, propagate to the device */ 2785 if (macsec_is_offloaded(macsec)) { 2786 const struct macsec_ops *ops; 2787 struct macsec_context ctx; 2788 2789 ops = macsec_get_ops(macsec, &ctx); 2790 if (ops) { 2791 ctx.stats.rx_sc_stats = sum; 2792 ctx.secy = &macsec_priv(dev)->secy; 2793 ctx.rx_sc = rx_sc; 2794 macsec_offload(ops->mdo_get_rx_sc_stats, &ctx); 2795 } 2796 return; 2797 } 2798 2799 for_each_possible_cpu(cpu) { 2800 const struct pcpu_rx_sc_stats *stats; 2801 struct macsec_rx_sc_stats tmp; 2802 unsigned int start; 2803 2804 stats = per_cpu_ptr(rx_sc->stats, cpu); 2805 do { 2806 start = u64_stats_fetch_begin(&stats->syncp); 2807 memcpy(&tmp, &stats->stats, sizeof(tmp)); 2808 } while (u64_stats_fetch_retry(&stats->syncp, start)); 2809 2810 sum->InOctetsValidated += tmp.InOctetsValidated; 2811 sum->InOctetsDecrypted += tmp.InOctetsDecrypted; 2812 sum->InPktsUnchecked += tmp.InPktsUnchecked; 2813 sum->InPktsDelayed += tmp.InPktsDelayed; 2814 sum->InPktsOK += tmp.InPktsOK; 2815 sum->InPktsInvalid += tmp.InPktsInvalid; 2816 sum->InPktsLate += tmp.InPktsLate; 2817 sum->InPktsNotValid += tmp.InPktsNotValid; 2818 sum->InPktsNotUsingSA += tmp.InPktsNotUsingSA; 2819 sum->InPktsUnusedSA += tmp.InPktsUnusedSA; 2820 } 2821 } 2822 2823 static int copy_rx_sc_stats(struct sk_buff *skb, struct macsec_rx_sc_stats *sum) 2824 { 2825 if (nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED, 2826 sum->InOctetsValidated, 2827 MACSEC_RXSC_STATS_ATTR_PAD) || 2828 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED, 2829 sum->InOctetsDecrypted, 2830 MACSEC_RXSC_STATS_ATTR_PAD) || 2831 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED, 2832 sum->InPktsUnchecked, 2833 MACSEC_RXSC_STATS_ATTR_PAD) || 2834 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED, 2835 sum->InPktsDelayed, 2836 MACSEC_RXSC_STATS_ATTR_PAD) || 2837 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK, 2838 sum->InPktsOK, 2839 MACSEC_RXSC_STATS_ATTR_PAD) || 2840 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID, 2841 sum->InPktsInvalid, 2842 MACSEC_RXSC_STATS_ATTR_PAD) || 2843 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE, 2844 sum->InPktsLate, 2845 MACSEC_RXSC_STATS_ATTR_PAD) || 2846 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID, 2847 sum->InPktsNotValid, 2848 MACSEC_RXSC_STATS_ATTR_PAD) || 2849 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA, 2850 sum->InPktsNotUsingSA, 2851 MACSEC_RXSC_STATS_ATTR_PAD) || 2852 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA, 2853 sum->InPktsUnusedSA, 2854 MACSEC_RXSC_STATS_ATTR_PAD)) 2855 return -EMSGSIZE; 2856 2857 return 0; 2858 } 2859 2860 static void get_tx_sc_stats(struct net_device *dev, 2861 struct macsec_tx_sc_stats *sum) 2862 { 2863 struct macsec_dev *macsec = macsec_priv(dev); 2864 int cpu; 2865 2866 /* If h/w offloading is available, propagate to the device */ 2867 if (macsec_is_offloaded(macsec)) { 2868 const struct macsec_ops *ops; 2869 struct macsec_context ctx; 2870 2871 ops = macsec_get_ops(macsec, &ctx); 2872 if (ops) { 2873 ctx.stats.tx_sc_stats = sum; 2874 ctx.secy = &macsec_priv(dev)->secy; 2875 macsec_offload(ops->mdo_get_tx_sc_stats, &ctx); 2876 } 2877 return; 2878 } 2879 2880 for_each_possible_cpu(cpu) { 2881 const struct pcpu_tx_sc_stats *stats; 2882 struct macsec_tx_sc_stats tmp; 2883 unsigned int start; 2884 2885 stats = per_cpu_ptr(macsec_priv(dev)->secy.tx_sc.stats, cpu); 2886 do { 2887 start = u64_stats_fetch_begin(&stats->syncp); 2888 memcpy(&tmp, &stats->stats, sizeof(tmp)); 2889 } while (u64_stats_fetch_retry(&stats->syncp, start)); 2890 2891 sum->OutPktsProtected += tmp.OutPktsProtected; 2892 sum->OutPktsEncrypted += tmp.OutPktsEncrypted; 2893 sum->OutOctetsProtected += tmp.OutOctetsProtected; 2894 sum->OutOctetsEncrypted += tmp.OutOctetsEncrypted; 2895 } 2896 } 2897 2898 static int copy_tx_sc_stats(struct sk_buff *skb, struct macsec_tx_sc_stats *sum) 2899 { 2900 if (nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED, 2901 sum->OutPktsProtected, 2902 MACSEC_TXSC_STATS_ATTR_PAD) || 2903 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED, 2904 sum->OutPktsEncrypted, 2905 MACSEC_TXSC_STATS_ATTR_PAD) || 2906 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED, 2907 sum->OutOctetsProtected, 2908 MACSEC_TXSC_STATS_ATTR_PAD) || 2909 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED, 2910 sum->OutOctetsEncrypted, 2911 MACSEC_TXSC_STATS_ATTR_PAD)) 2912 return -EMSGSIZE; 2913 2914 return 0; 2915 } 2916 2917 static void get_secy_stats(struct net_device *dev, struct macsec_dev_stats *sum) 2918 { 2919 struct macsec_dev *macsec = macsec_priv(dev); 2920 int cpu; 2921 2922 /* If h/w offloading is available, propagate to the device */ 2923 if (macsec_is_offloaded(macsec)) { 2924 const struct macsec_ops *ops; 2925 struct macsec_context ctx; 2926 2927 ops = macsec_get_ops(macsec, &ctx); 2928 if (ops) { 2929 ctx.stats.dev_stats = sum; 2930 ctx.secy = &macsec_priv(dev)->secy; 2931 macsec_offload(ops->mdo_get_dev_stats, &ctx); 2932 } 2933 return; 2934 } 2935 2936 for_each_possible_cpu(cpu) { 2937 const struct pcpu_secy_stats *stats; 2938 struct macsec_dev_stats tmp; 2939 unsigned int start; 2940 2941 stats = per_cpu_ptr(macsec_priv(dev)->stats, cpu); 2942 do { 2943 start = u64_stats_fetch_begin(&stats->syncp); 2944 memcpy(&tmp, &stats->stats, sizeof(tmp)); 2945 } while (u64_stats_fetch_retry(&stats->syncp, start)); 2946 2947 sum->OutPktsUntagged += tmp.OutPktsUntagged; 2948 sum->InPktsUntagged += tmp.InPktsUntagged; 2949 sum->OutPktsTooLong += tmp.OutPktsTooLong; 2950 sum->InPktsNoTag += tmp.InPktsNoTag; 2951 sum->InPktsBadTag += tmp.InPktsBadTag; 2952 sum->InPktsUnknownSCI += tmp.InPktsUnknownSCI; 2953 sum->InPktsNoSCI += tmp.InPktsNoSCI; 2954 sum->InPktsOverrun += tmp.InPktsOverrun; 2955 } 2956 } 2957 2958 static int copy_secy_stats(struct sk_buff *skb, struct macsec_dev_stats *sum) 2959 { 2960 if (nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED, 2961 sum->OutPktsUntagged, 2962 MACSEC_SECY_STATS_ATTR_PAD) || 2963 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED, 2964 sum->InPktsUntagged, 2965 MACSEC_SECY_STATS_ATTR_PAD) || 2966 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG, 2967 sum->OutPktsTooLong, 2968 MACSEC_SECY_STATS_ATTR_PAD) || 2969 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG, 2970 sum->InPktsNoTag, 2971 MACSEC_SECY_STATS_ATTR_PAD) || 2972 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG, 2973 sum->InPktsBadTag, 2974 MACSEC_SECY_STATS_ATTR_PAD) || 2975 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI, 2976 sum->InPktsUnknownSCI, 2977 MACSEC_SECY_STATS_ATTR_PAD) || 2978 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI, 2979 sum->InPktsNoSCI, 2980 MACSEC_SECY_STATS_ATTR_PAD) || 2981 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN, 2982 sum->InPktsOverrun, 2983 MACSEC_SECY_STATS_ATTR_PAD)) 2984 return -EMSGSIZE; 2985 2986 return 0; 2987 } 2988 2989 static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb) 2990 { 2991 struct macsec_tx_sc *tx_sc = &secy->tx_sc; 2992 struct nlattr *secy_nest = nla_nest_start_noflag(skb, 2993 MACSEC_ATTR_SECY); 2994 u64 csid; 2995 2996 if (!secy_nest) 2997 return 1; 2998 2999 switch (secy->key_len) { 3000 case MACSEC_GCM_AES_128_SAK_LEN: 3001 csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 : MACSEC_DEFAULT_CIPHER_ID; 3002 break; 3003 case MACSEC_GCM_AES_256_SAK_LEN: 3004 csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_256 : MACSEC_CIPHER_ID_GCM_AES_256; 3005 break; 3006 default: 3007 goto cancel; 3008 } 3009 3010 if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci, 3011 MACSEC_SECY_ATTR_PAD) || 3012 nla_put_u64_64bit(skb, MACSEC_SECY_ATTR_CIPHER_SUITE, 3013 csid, MACSEC_SECY_ATTR_PAD) || 3014 nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) || 3015 nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) || 3016 nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) || 3017 nla_put_u8(skb, MACSEC_SECY_ATTR_REPLAY, secy->replay_protect) || 3018 nla_put_u8(skb, MACSEC_SECY_ATTR_VALIDATE, secy->validate_frames) || 3019 nla_put_u8(skb, MACSEC_SECY_ATTR_ENCRYPT, tx_sc->encrypt) || 3020 nla_put_u8(skb, MACSEC_SECY_ATTR_INC_SCI, tx_sc->send_sci) || 3021 nla_put_u8(skb, MACSEC_SECY_ATTR_ES, tx_sc->end_station) || 3022 nla_put_u8(skb, MACSEC_SECY_ATTR_SCB, tx_sc->scb) || 3023 nla_put_u8(skb, MACSEC_SECY_ATTR_ENCODING_SA, tx_sc->encoding_sa)) 3024 goto cancel; 3025 3026 if (secy->replay_protect) { 3027 if (nla_put_u32(skb, MACSEC_SECY_ATTR_WINDOW, secy->replay_window)) 3028 goto cancel; 3029 } 3030 3031 nla_nest_end(skb, secy_nest); 3032 return 0; 3033 3034 cancel: 3035 nla_nest_cancel(skb, secy_nest); 3036 return 1; 3037 } 3038 3039 static noinline_for_stack int 3040 dump_secy(struct macsec_secy *secy, struct net_device *dev, 3041 struct sk_buff *skb, struct netlink_callback *cb) 3042 { 3043 struct macsec_tx_sc_stats tx_sc_stats = {0, }; 3044 struct macsec_tx_sa_stats tx_sa_stats = {0, }; 3045 struct macsec_rx_sc_stats rx_sc_stats = {0, }; 3046 struct macsec_rx_sa_stats rx_sa_stats = {0, }; 3047 struct macsec_dev *macsec = netdev_priv(dev); 3048 struct macsec_dev_stats dev_stats = {0, }; 3049 struct macsec_tx_sc *tx_sc = &secy->tx_sc; 3050 struct nlattr *txsa_list, *rxsc_list; 3051 struct macsec_rx_sc *rx_sc; 3052 struct nlattr *attr; 3053 void *hdr; 3054 int i, j; 3055 3056 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 3057 &macsec_fam, NLM_F_MULTI, MACSEC_CMD_GET_TXSC); 3058 if (!hdr) 3059 return -EMSGSIZE; 3060 3061 genl_dump_check_consistent(cb, hdr); 3062 3063 if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex)) 3064 goto nla_put_failure; 3065 3066 attr = nla_nest_start_noflag(skb, MACSEC_ATTR_OFFLOAD); 3067 if (!attr) 3068 goto nla_put_failure; 3069 if (nla_put_u8(skb, MACSEC_OFFLOAD_ATTR_TYPE, macsec->offload)) 3070 goto nla_put_failure; 3071 nla_nest_end(skb, attr); 3072 3073 if (nla_put_secy(secy, skb)) 3074 goto nla_put_failure; 3075 3076 attr = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSC_STATS); 3077 if (!attr) 3078 goto nla_put_failure; 3079 3080 get_tx_sc_stats(dev, &tx_sc_stats); 3081 if (copy_tx_sc_stats(skb, &tx_sc_stats)) { 3082 nla_nest_cancel(skb, attr); 3083 goto nla_put_failure; 3084 } 3085 nla_nest_end(skb, attr); 3086 3087 attr = nla_nest_start_noflag(skb, MACSEC_ATTR_SECY_STATS); 3088 if (!attr) 3089 goto nla_put_failure; 3090 get_secy_stats(dev, &dev_stats); 3091 if (copy_secy_stats(skb, &dev_stats)) { 3092 nla_nest_cancel(skb, attr); 3093 goto nla_put_failure; 3094 } 3095 nla_nest_end(skb, attr); 3096 3097 txsa_list = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSA_LIST); 3098 if (!txsa_list) 3099 goto nla_put_failure; 3100 for (i = 0, j = 1; i < MACSEC_NUM_AN; i++) { 3101 struct macsec_tx_sa *tx_sa = rtnl_dereference(tx_sc->sa[i]); 3102 struct nlattr *txsa_nest; 3103 u64 pn; 3104 int pn_len; 3105 3106 if (!tx_sa) 3107 continue; 3108 3109 txsa_nest = nla_nest_start_noflag(skb, j++); 3110 if (!txsa_nest) { 3111 nla_nest_cancel(skb, txsa_list); 3112 goto nla_put_failure; 3113 } 3114 3115 attr = nla_nest_start_noflag(skb, MACSEC_SA_ATTR_STATS); 3116 if (!attr) { 3117 nla_nest_cancel(skb, txsa_nest); 3118 nla_nest_cancel(skb, txsa_list); 3119 goto nla_put_failure; 3120 } 3121 memset(&tx_sa_stats, 0, sizeof(tx_sa_stats)); 3122 get_tx_sa_stats(dev, i, tx_sa, &tx_sa_stats); 3123 if (copy_tx_sa_stats(skb, &tx_sa_stats)) { 3124 nla_nest_cancel(skb, attr); 3125 nla_nest_cancel(skb, txsa_nest); 3126 nla_nest_cancel(skb, txsa_list); 3127 goto nla_put_failure; 3128 } 3129 nla_nest_end(skb, attr); 3130 3131 if (secy->xpn) { 3132 pn = tx_sa->next_pn; 3133 pn_len = MACSEC_XPN_PN_LEN; 3134 } else { 3135 pn = tx_sa->next_pn_halves.lower; 3136 pn_len = MACSEC_DEFAULT_PN_LEN; 3137 } 3138 3139 if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) || 3140 nla_put(skb, MACSEC_SA_ATTR_PN, pn_len, &pn) || 3141 nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, tx_sa->key.id) || 3142 (secy->xpn && nla_put_ssci(skb, MACSEC_SA_ATTR_SSCI, tx_sa->ssci)) || 3143 nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, tx_sa->active)) { 3144 nla_nest_cancel(skb, txsa_nest); 3145 nla_nest_cancel(skb, txsa_list); 3146 goto nla_put_failure; 3147 } 3148 3149 nla_nest_end(skb, txsa_nest); 3150 } 3151 nla_nest_end(skb, txsa_list); 3152 3153 rxsc_list = nla_nest_start_noflag(skb, MACSEC_ATTR_RXSC_LIST); 3154 if (!rxsc_list) 3155 goto nla_put_failure; 3156 3157 j = 1; 3158 for_each_rxsc_rtnl(secy, rx_sc) { 3159 int k; 3160 struct nlattr *rxsa_list; 3161 struct nlattr *rxsc_nest = nla_nest_start_noflag(skb, j++); 3162 3163 if (!rxsc_nest) { 3164 nla_nest_cancel(skb, rxsc_list); 3165 goto nla_put_failure; 3166 } 3167 3168 if (nla_put_u8(skb, MACSEC_RXSC_ATTR_ACTIVE, rx_sc->active) || 3169 nla_put_sci(skb, MACSEC_RXSC_ATTR_SCI, rx_sc->sci, 3170 MACSEC_RXSC_ATTR_PAD)) { 3171 nla_nest_cancel(skb, rxsc_nest); 3172 nla_nest_cancel(skb, rxsc_list); 3173 goto nla_put_failure; 3174 } 3175 3176 attr = nla_nest_start_noflag(skb, MACSEC_RXSC_ATTR_STATS); 3177 if (!attr) { 3178 nla_nest_cancel(skb, rxsc_nest); 3179 nla_nest_cancel(skb, rxsc_list); 3180 goto nla_put_failure; 3181 } 3182 memset(&rx_sc_stats, 0, sizeof(rx_sc_stats)); 3183 get_rx_sc_stats(dev, rx_sc, &rx_sc_stats); 3184 if (copy_rx_sc_stats(skb, &rx_sc_stats)) { 3185 nla_nest_cancel(skb, attr); 3186 nla_nest_cancel(skb, rxsc_nest); 3187 nla_nest_cancel(skb, rxsc_list); 3188 goto nla_put_failure; 3189 } 3190 nla_nest_end(skb, attr); 3191 3192 rxsa_list = nla_nest_start_noflag(skb, 3193 MACSEC_RXSC_ATTR_SA_LIST); 3194 if (!rxsa_list) { 3195 nla_nest_cancel(skb, rxsc_nest); 3196 nla_nest_cancel(skb, rxsc_list); 3197 goto nla_put_failure; 3198 } 3199 3200 for (i = 0, k = 1; i < MACSEC_NUM_AN; i++) { 3201 struct macsec_rx_sa *rx_sa = rtnl_dereference(rx_sc->sa[i]); 3202 struct nlattr *rxsa_nest; 3203 u64 pn; 3204 int pn_len; 3205 3206 if (!rx_sa) 3207 continue; 3208 3209 rxsa_nest = nla_nest_start_noflag(skb, k++); 3210 if (!rxsa_nest) { 3211 nla_nest_cancel(skb, rxsa_list); 3212 nla_nest_cancel(skb, rxsc_nest); 3213 nla_nest_cancel(skb, rxsc_list); 3214 goto nla_put_failure; 3215 } 3216 3217 attr = nla_nest_start_noflag(skb, 3218 MACSEC_SA_ATTR_STATS); 3219 if (!attr) { 3220 nla_nest_cancel(skb, rxsa_list); 3221 nla_nest_cancel(skb, rxsc_nest); 3222 nla_nest_cancel(skb, rxsc_list); 3223 goto nla_put_failure; 3224 } 3225 memset(&rx_sa_stats, 0, sizeof(rx_sa_stats)); 3226 get_rx_sa_stats(dev, rx_sc, i, rx_sa, &rx_sa_stats); 3227 if (copy_rx_sa_stats(skb, &rx_sa_stats)) { 3228 nla_nest_cancel(skb, attr); 3229 nla_nest_cancel(skb, rxsa_list); 3230 nla_nest_cancel(skb, rxsc_nest); 3231 nla_nest_cancel(skb, rxsc_list); 3232 goto nla_put_failure; 3233 } 3234 nla_nest_end(skb, attr); 3235 3236 if (secy->xpn) { 3237 pn = rx_sa->next_pn; 3238 pn_len = MACSEC_XPN_PN_LEN; 3239 } else { 3240 pn = rx_sa->next_pn_halves.lower; 3241 pn_len = MACSEC_DEFAULT_PN_LEN; 3242 } 3243 3244 if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) || 3245 nla_put(skb, MACSEC_SA_ATTR_PN, pn_len, &pn) || 3246 nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, rx_sa->key.id) || 3247 (secy->xpn && nla_put_ssci(skb, MACSEC_SA_ATTR_SSCI, rx_sa->ssci)) || 3248 nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, rx_sa->active)) { 3249 nla_nest_cancel(skb, rxsa_nest); 3250 nla_nest_cancel(skb, rxsc_nest); 3251 nla_nest_cancel(skb, rxsc_list); 3252 goto nla_put_failure; 3253 } 3254 nla_nest_end(skb, rxsa_nest); 3255 } 3256 3257 nla_nest_end(skb, rxsa_list); 3258 nla_nest_end(skb, rxsc_nest); 3259 } 3260 3261 nla_nest_end(skb, rxsc_list); 3262 3263 genlmsg_end(skb, hdr); 3264 3265 return 0; 3266 3267 nla_put_failure: 3268 genlmsg_cancel(skb, hdr); 3269 return -EMSGSIZE; 3270 } 3271 3272 static int macsec_generation = 1; /* protected by RTNL */ 3273 3274 static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb) 3275 { 3276 struct net *net = sock_net(skb->sk); 3277 struct net_device *dev; 3278 int dev_idx, d; 3279 3280 dev_idx = cb->args[0]; 3281 3282 d = 0; 3283 rtnl_lock(); 3284 3285 cb->seq = macsec_generation; 3286 3287 for_each_netdev(net, dev) { 3288 struct macsec_secy *secy; 3289 3290 if (d < dev_idx) 3291 goto next; 3292 3293 if (!netif_is_macsec(dev)) 3294 goto next; 3295 3296 secy = &macsec_priv(dev)->secy; 3297 if (dump_secy(secy, dev, skb, cb) < 0) 3298 goto done; 3299 next: 3300 d++; 3301 } 3302 3303 done: 3304 rtnl_unlock(); 3305 cb->args[0] = d; 3306 return skb->len; 3307 } 3308 3309 static const struct genl_small_ops macsec_genl_ops[] = { 3310 { 3311 .cmd = MACSEC_CMD_GET_TXSC, 3312 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3313 .dumpit = macsec_dump_txsc, 3314 }, 3315 { 3316 .cmd = MACSEC_CMD_ADD_RXSC, 3317 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3318 .doit = macsec_add_rxsc, 3319 .flags = GENL_ADMIN_PERM, 3320 }, 3321 { 3322 .cmd = MACSEC_CMD_DEL_RXSC, 3323 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3324 .doit = macsec_del_rxsc, 3325 .flags = GENL_ADMIN_PERM, 3326 }, 3327 { 3328 .cmd = MACSEC_CMD_UPD_RXSC, 3329 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3330 .doit = macsec_upd_rxsc, 3331 .flags = GENL_ADMIN_PERM, 3332 }, 3333 { 3334 .cmd = MACSEC_CMD_ADD_TXSA, 3335 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3336 .doit = macsec_add_txsa, 3337 .flags = GENL_ADMIN_PERM, 3338 }, 3339 { 3340 .cmd = MACSEC_CMD_DEL_TXSA, 3341 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3342 .doit = macsec_del_txsa, 3343 .flags = GENL_ADMIN_PERM, 3344 }, 3345 { 3346 .cmd = MACSEC_CMD_UPD_TXSA, 3347 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3348 .doit = macsec_upd_txsa, 3349 .flags = GENL_ADMIN_PERM, 3350 }, 3351 { 3352 .cmd = MACSEC_CMD_ADD_RXSA, 3353 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3354 .doit = macsec_add_rxsa, 3355 .flags = GENL_ADMIN_PERM, 3356 }, 3357 { 3358 .cmd = MACSEC_CMD_DEL_RXSA, 3359 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3360 .doit = macsec_del_rxsa, 3361 .flags = GENL_ADMIN_PERM, 3362 }, 3363 { 3364 .cmd = MACSEC_CMD_UPD_RXSA, 3365 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3366 .doit = macsec_upd_rxsa, 3367 .flags = GENL_ADMIN_PERM, 3368 }, 3369 { 3370 .cmd = MACSEC_CMD_UPD_OFFLOAD, 3371 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3372 .doit = macsec_upd_offload, 3373 .flags = GENL_ADMIN_PERM, 3374 }, 3375 }; 3376 3377 static struct genl_family macsec_fam __ro_after_init = { 3378 .name = MACSEC_GENL_NAME, 3379 .hdrsize = 0, 3380 .version = MACSEC_GENL_VERSION, 3381 .maxattr = MACSEC_ATTR_MAX, 3382 .policy = macsec_genl_policy, 3383 .netnsok = true, 3384 .module = THIS_MODULE, 3385 .small_ops = macsec_genl_ops, 3386 .n_small_ops = ARRAY_SIZE(macsec_genl_ops), 3387 .resv_start_op = MACSEC_CMD_UPD_OFFLOAD + 1, 3388 }; 3389 3390 static netdev_tx_t macsec_start_xmit(struct sk_buff *skb, 3391 struct net_device *dev) 3392 { 3393 struct macsec_dev *macsec = netdev_priv(dev); 3394 struct macsec_secy *secy = &macsec->secy; 3395 struct pcpu_secy_stats *secy_stats; 3396 int ret, len; 3397 3398 if (macsec_is_offloaded(netdev_priv(dev))) { 3399 struct metadata_dst *md_dst = secy->tx_sc.md_dst; 3400 3401 skb_dst_drop(skb); 3402 dst_hold(&md_dst->dst); 3403 skb_dst_set(skb, &md_dst->dst); 3404 skb->dev = macsec->real_dev; 3405 return dev_queue_xmit(skb); 3406 } 3407 3408 /* 10.5 */ 3409 if (!secy->protect_frames) { 3410 secy_stats = this_cpu_ptr(macsec->stats); 3411 u64_stats_update_begin(&secy_stats->syncp); 3412 secy_stats->stats.OutPktsUntagged++; 3413 u64_stats_update_end(&secy_stats->syncp); 3414 skb->dev = macsec->real_dev; 3415 len = skb->len; 3416 ret = dev_queue_xmit(skb); 3417 count_tx(dev, ret, len); 3418 return ret; 3419 } 3420 3421 if (!secy->operational) { 3422 kfree_skb(skb); 3423 DEV_STATS_INC(dev, tx_dropped); 3424 return NETDEV_TX_OK; 3425 } 3426 3427 len = skb->len; 3428 skb = macsec_encrypt(skb, dev); 3429 if (IS_ERR(skb)) { 3430 if (PTR_ERR(skb) != -EINPROGRESS) 3431 DEV_STATS_INC(dev, tx_dropped); 3432 return NETDEV_TX_OK; 3433 } 3434 3435 macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa); 3436 3437 macsec_encrypt_finish(skb, dev); 3438 ret = dev_queue_xmit(skb); 3439 count_tx(dev, ret, len); 3440 return ret; 3441 } 3442 3443 #define MACSEC_FEATURES \ 3444 (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST) 3445 3446 static int macsec_dev_init(struct net_device *dev) 3447 { 3448 struct macsec_dev *macsec = macsec_priv(dev); 3449 struct net_device *real_dev = macsec->real_dev; 3450 int err; 3451 3452 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 3453 if (!dev->tstats) 3454 return -ENOMEM; 3455 3456 err = gro_cells_init(&macsec->gro_cells, dev); 3457 if (err) { 3458 free_percpu(dev->tstats); 3459 return err; 3460 } 3461 3462 dev->features = real_dev->features & MACSEC_FEATURES; 3463 dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE; 3464 3465 dev->needed_headroom = real_dev->needed_headroom + 3466 MACSEC_NEEDED_HEADROOM; 3467 dev->needed_tailroom = real_dev->needed_tailroom + 3468 MACSEC_NEEDED_TAILROOM; 3469 3470 if (is_zero_ether_addr(dev->dev_addr)) 3471 eth_hw_addr_inherit(dev, real_dev); 3472 if (is_zero_ether_addr(dev->broadcast)) 3473 memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len); 3474 3475 /* Get macsec's reference to real_dev */ 3476 netdev_hold(real_dev, &macsec->dev_tracker, GFP_KERNEL); 3477 3478 return 0; 3479 } 3480 3481 static void macsec_dev_uninit(struct net_device *dev) 3482 { 3483 struct macsec_dev *macsec = macsec_priv(dev); 3484 3485 gro_cells_destroy(&macsec->gro_cells); 3486 free_percpu(dev->tstats); 3487 } 3488 3489 static netdev_features_t macsec_fix_features(struct net_device *dev, 3490 netdev_features_t features) 3491 { 3492 struct macsec_dev *macsec = macsec_priv(dev); 3493 struct net_device *real_dev = macsec->real_dev; 3494 3495 features &= (real_dev->features & MACSEC_FEATURES) | 3496 NETIF_F_GSO_SOFTWARE | NETIF_F_SOFT_FEATURES; 3497 features |= NETIF_F_LLTX; 3498 3499 return features; 3500 } 3501 3502 static int macsec_dev_open(struct net_device *dev) 3503 { 3504 struct macsec_dev *macsec = macsec_priv(dev); 3505 struct net_device *real_dev = macsec->real_dev; 3506 int err; 3507 3508 err = dev_uc_add(real_dev, dev->dev_addr); 3509 if (err < 0) 3510 return err; 3511 3512 if (dev->flags & IFF_ALLMULTI) { 3513 err = dev_set_allmulti(real_dev, 1); 3514 if (err < 0) 3515 goto del_unicast; 3516 } 3517 3518 if (dev->flags & IFF_PROMISC) { 3519 err = dev_set_promiscuity(real_dev, 1); 3520 if (err < 0) 3521 goto clear_allmulti; 3522 } 3523 3524 /* If h/w offloading is available, propagate to the device */ 3525 if (macsec_is_offloaded(macsec)) { 3526 const struct macsec_ops *ops; 3527 struct macsec_context ctx; 3528 3529 ops = macsec_get_ops(netdev_priv(dev), &ctx); 3530 if (!ops) { 3531 err = -EOPNOTSUPP; 3532 goto clear_allmulti; 3533 } 3534 3535 ctx.secy = &macsec->secy; 3536 err = macsec_offload(ops->mdo_dev_open, &ctx); 3537 if (err) 3538 goto clear_allmulti; 3539 } 3540 3541 if (netif_carrier_ok(real_dev)) 3542 netif_carrier_on(dev); 3543 3544 return 0; 3545 clear_allmulti: 3546 if (dev->flags & IFF_ALLMULTI) 3547 dev_set_allmulti(real_dev, -1); 3548 del_unicast: 3549 dev_uc_del(real_dev, dev->dev_addr); 3550 netif_carrier_off(dev); 3551 return err; 3552 } 3553 3554 static int macsec_dev_stop(struct net_device *dev) 3555 { 3556 struct macsec_dev *macsec = macsec_priv(dev); 3557 struct net_device *real_dev = macsec->real_dev; 3558 3559 netif_carrier_off(dev); 3560 3561 /* If h/w offloading is available, propagate to the device */ 3562 if (macsec_is_offloaded(macsec)) { 3563 const struct macsec_ops *ops; 3564 struct macsec_context ctx; 3565 3566 ops = macsec_get_ops(macsec, &ctx); 3567 if (ops) { 3568 ctx.secy = &macsec->secy; 3569 macsec_offload(ops->mdo_dev_stop, &ctx); 3570 } 3571 } 3572 3573 dev_mc_unsync(real_dev, dev); 3574 dev_uc_unsync(real_dev, dev); 3575 3576 if (dev->flags & IFF_ALLMULTI) 3577 dev_set_allmulti(real_dev, -1); 3578 3579 if (dev->flags & IFF_PROMISC) 3580 dev_set_promiscuity(real_dev, -1); 3581 3582 dev_uc_del(real_dev, dev->dev_addr); 3583 3584 return 0; 3585 } 3586 3587 static void macsec_dev_change_rx_flags(struct net_device *dev, int change) 3588 { 3589 struct net_device *real_dev = macsec_priv(dev)->real_dev; 3590 3591 if (!(dev->flags & IFF_UP)) 3592 return; 3593 3594 if (change & IFF_ALLMULTI) 3595 dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1); 3596 3597 if (change & IFF_PROMISC) 3598 dev_set_promiscuity(real_dev, 3599 dev->flags & IFF_PROMISC ? 1 : -1); 3600 } 3601 3602 static void macsec_dev_set_rx_mode(struct net_device *dev) 3603 { 3604 struct net_device *real_dev = macsec_priv(dev)->real_dev; 3605 3606 dev_mc_sync(real_dev, dev); 3607 dev_uc_sync(real_dev, dev); 3608 } 3609 3610 static int macsec_set_mac_address(struct net_device *dev, void *p) 3611 { 3612 struct macsec_dev *macsec = macsec_priv(dev); 3613 struct net_device *real_dev = macsec->real_dev; 3614 struct sockaddr *addr = p; 3615 int err; 3616 3617 if (!is_valid_ether_addr(addr->sa_data)) 3618 return -EADDRNOTAVAIL; 3619 3620 if (!(dev->flags & IFF_UP)) 3621 goto out; 3622 3623 err = dev_uc_add(real_dev, addr->sa_data); 3624 if (err < 0) 3625 return err; 3626 3627 dev_uc_del(real_dev, dev->dev_addr); 3628 3629 out: 3630 eth_hw_addr_set(dev, addr->sa_data); 3631 3632 /* If h/w offloading is available, propagate to the device */ 3633 if (macsec_is_offloaded(macsec)) { 3634 const struct macsec_ops *ops; 3635 struct macsec_context ctx; 3636 3637 ops = macsec_get_ops(macsec, &ctx); 3638 if (ops) { 3639 ctx.secy = &macsec->secy; 3640 macsec_offload(ops->mdo_upd_secy, &ctx); 3641 } 3642 } 3643 3644 return 0; 3645 } 3646 3647 static int macsec_change_mtu(struct net_device *dev, int new_mtu) 3648 { 3649 struct macsec_dev *macsec = macsec_priv(dev); 3650 unsigned int extra = macsec->secy.icv_len + macsec_extra_len(true); 3651 3652 if (macsec->real_dev->mtu - extra < new_mtu) 3653 return -ERANGE; 3654 3655 dev->mtu = new_mtu; 3656 3657 return 0; 3658 } 3659 3660 static void macsec_get_stats64(struct net_device *dev, 3661 struct rtnl_link_stats64 *s) 3662 { 3663 if (!dev->tstats) 3664 return; 3665 3666 dev_fetch_sw_netstats(s, dev->tstats); 3667 3668 s->rx_dropped = DEV_STATS_READ(dev, rx_dropped); 3669 s->tx_dropped = DEV_STATS_READ(dev, tx_dropped); 3670 s->rx_errors = DEV_STATS_READ(dev, rx_errors); 3671 } 3672 3673 static int macsec_get_iflink(const struct net_device *dev) 3674 { 3675 return macsec_priv(dev)->real_dev->ifindex; 3676 } 3677 3678 static const struct net_device_ops macsec_netdev_ops = { 3679 .ndo_init = macsec_dev_init, 3680 .ndo_uninit = macsec_dev_uninit, 3681 .ndo_open = macsec_dev_open, 3682 .ndo_stop = macsec_dev_stop, 3683 .ndo_fix_features = macsec_fix_features, 3684 .ndo_change_mtu = macsec_change_mtu, 3685 .ndo_set_rx_mode = macsec_dev_set_rx_mode, 3686 .ndo_change_rx_flags = macsec_dev_change_rx_flags, 3687 .ndo_set_mac_address = macsec_set_mac_address, 3688 .ndo_start_xmit = macsec_start_xmit, 3689 .ndo_get_stats64 = macsec_get_stats64, 3690 .ndo_get_iflink = macsec_get_iflink, 3691 }; 3692 3693 static const struct device_type macsec_type = { 3694 .name = "macsec", 3695 }; 3696 3697 static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = { 3698 [IFLA_MACSEC_SCI] = { .type = NLA_U64 }, 3699 [IFLA_MACSEC_PORT] = { .type = NLA_U16 }, 3700 [IFLA_MACSEC_ICV_LEN] = { .type = NLA_U8 }, 3701 [IFLA_MACSEC_CIPHER_SUITE] = { .type = NLA_U64 }, 3702 [IFLA_MACSEC_WINDOW] = { .type = NLA_U32 }, 3703 [IFLA_MACSEC_ENCODING_SA] = { .type = NLA_U8 }, 3704 [IFLA_MACSEC_ENCRYPT] = { .type = NLA_U8 }, 3705 [IFLA_MACSEC_PROTECT] = { .type = NLA_U8 }, 3706 [IFLA_MACSEC_INC_SCI] = { .type = NLA_U8 }, 3707 [IFLA_MACSEC_ES] = { .type = NLA_U8 }, 3708 [IFLA_MACSEC_SCB] = { .type = NLA_U8 }, 3709 [IFLA_MACSEC_REPLAY_PROTECT] = { .type = NLA_U8 }, 3710 [IFLA_MACSEC_VALIDATION] = { .type = NLA_U8 }, 3711 [IFLA_MACSEC_OFFLOAD] = { .type = NLA_U8 }, 3712 }; 3713 3714 static void macsec_free_netdev(struct net_device *dev) 3715 { 3716 struct macsec_dev *macsec = macsec_priv(dev); 3717 3718 dst_release(&macsec->secy.tx_sc.md_dst->dst); 3719 free_percpu(macsec->stats); 3720 free_percpu(macsec->secy.tx_sc.stats); 3721 3722 /* Get rid of the macsec's reference to real_dev */ 3723 netdev_put(macsec->real_dev, &macsec->dev_tracker); 3724 } 3725 3726 static void macsec_setup(struct net_device *dev) 3727 { 3728 ether_setup(dev); 3729 dev->min_mtu = 0; 3730 dev->max_mtu = ETH_MAX_MTU; 3731 dev->priv_flags |= IFF_NO_QUEUE; 3732 dev->netdev_ops = &macsec_netdev_ops; 3733 dev->needs_free_netdev = true; 3734 dev->priv_destructor = macsec_free_netdev; 3735 SET_NETDEV_DEVTYPE(dev, &macsec_type); 3736 3737 eth_zero_addr(dev->broadcast); 3738 } 3739 3740 static int macsec_changelink_common(struct net_device *dev, 3741 struct nlattr *data[]) 3742 { 3743 struct macsec_secy *secy; 3744 struct macsec_tx_sc *tx_sc; 3745 3746 secy = &macsec_priv(dev)->secy; 3747 tx_sc = &secy->tx_sc; 3748 3749 if (data[IFLA_MACSEC_ENCODING_SA]) { 3750 struct macsec_tx_sa *tx_sa; 3751 3752 tx_sc->encoding_sa = nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]); 3753 tx_sa = rtnl_dereference(tx_sc->sa[tx_sc->encoding_sa]); 3754 3755 secy->operational = tx_sa && tx_sa->active; 3756 } 3757 3758 if (data[IFLA_MACSEC_ENCRYPT]) 3759 tx_sc->encrypt = !!nla_get_u8(data[IFLA_MACSEC_ENCRYPT]); 3760 3761 if (data[IFLA_MACSEC_PROTECT]) 3762 secy->protect_frames = !!nla_get_u8(data[IFLA_MACSEC_PROTECT]); 3763 3764 if (data[IFLA_MACSEC_INC_SCI]) 3765 tx_sc->send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]); 3766 3767 if (data[IFLA_MACSEC_ES]) 3768 tx_sc->end_station = !!nla_get_u8(data[IFLA_MACSEC_ES]); 3769 3770 if (data[IFLA_MACSEC_SCB]) 3771 tx_sc->scb = !!nla_get_u8(data[IFLA_MACSEC_SCB]); 3772 3773 if (data[IFLA_MACSEC_REPLAY_PROTECT]) 3774 secy->replay_protect = !!nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT]); 3775 3776 if (data[IFLA_MACSEC_VALIDATION]) 3777 secy->validate_frames = nla_get_u8(data[IFLA_MACSEC_VALIDATION]); 3778 3779 if (data[IFLA_MACSEC_CIPHER_SUITE]) { 3780 switch (nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE])) { 3781 case MACSEC_CIPHER_ID_GCM_AES_128: 3782 case MACSEC_DEFAULT_CIPHER_ID: 3783 secy->key_len = MACSEC_GCM_AES_128_SAK_LEN; 3784 secy->xpn = false; 3785 break; 3786 case MACSEC_CIPHER_ID_GCM_AES_256: 3787 secy->key_len = MACSEC_GCM_AES_256_SAK_LEN; 3788 secy->xpn = false; 3789 break; 3790 case MACSEC_CIPHER_ID_GCM_AES_XPN_128: 3791 secy->key_len = MACSEC_GCM_AES_128_SAK_LEN; 3792 secy->xpn = true; 3793 break; 3794 case MACSEC_CIPHER_ID_GCM_AES_XPN_256: 3795 secy->key_len = MACSEC_GCM_AES_256_SAK_LEN; 3796 secy->xpn = true; 3797 break; 3798 default: 3799 return -EINVAL; 3800 } 3801 } 3802 3803 if (data[IFLA_MACSEC_WINDOW]) { 3804 secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]); 3805 3806 /* IEEE 802.1AEbw-2013 10.7.8 - maximum replay window 3807 * for XPN cipher suites */ 3808 if (secy->xpn && 3809 secy->replay_window > MACSEC_XPN_MAX_REPLAY_WINDOW) 3810 return -EINVAL; 3811 } 3812 3813 return 0; 3814 } 3815 3816 static int macsec_changelink(struct net_device *dev, struct nlattr *tb[], 3817 struct nlattr *data[], 3818 struct netlink_ext_ack *extack) 3819 { 3820 struct macsec_dev *macsec = macsec_priv(dev); 3821 bool macsec_offload_state_change = false; 3822 enum macsec_offload offload; 3823 struct macsec_tx_sc tx_sc; 3824 struct macsec_secy secy; 3825 int ret; 3826 3827 if (!data) 3828 return 0; 3829 3830 if (data[IFLA_MACSEC_CIPHER_SUITE] || 3831 data[IFLA_MACSEC_ICV_LEN] || 3832 data[IFLA_MACSEC_SCI] || 3833 data[IFLA_MACSEC_PORT]) 3834 return -EINVAL; 3835 3836 /* Keep a copy of unmodified secy and tx_sc, in case the offload 3837 * propagation fails, to revert macsec_changelink_common. 3838 */ 3839 memcpy(&secy, &macsec->secy, sizeof(secy)); 3840 memcpy(&tx_sc, &macsec->secy.tx_sc, sizeof(tx_sc)); 3841 3842 ret = macsec_changelink_common(dev, data); 3843 if (ret) 3844 goto cleanup; 3845 3846 if (data[IFLA_MACSEC_OFFLOAD]) { 3847 offload = nla_get_u8(data[IFLA_MACSEC_OFFLOAD]); 3848 if (macsec->offload != offload) { 3849 macsec_offload_state_change = true; 3850 ret = macsec_update_offload(dev, offload); 3851 if (ret) 3852 goto cleanup; 3853 } 3854 } 3855 3856 /* If h/w offloading is available, propagate to the device */ 3857 if (!macsec_offload_state_change && macsec_is_offloaded(macsec)) { 3858 const struct macsec_ops *ops; 3859 struct macsec_context ctx; 3860 3861 ops = macsec_get_ops(netdev_priv(dev), &ctx); 3862 if (!ops) { 3863 ret = -EOPNOTSUPP; 3864 goto cleanup; 3865 } 3866 3867 ctx.secy = &macsec->secy; 3868 ret = macsec_offload(ops->mdo_upd_secy, &ctx); 3869 if (ret) 3870 goto cleanup; 3871 } 3872 3873 return 0; 3874 3875 cleanup: 3876 memcpy(&macsec->secy.tx_sc, &tx_sc, sizeof(tx_sc)); 3877 memcpy(&macsec->secy, &secy, sizeof(secy)); 3878 3879 return ret; 3880 } 3881 3882 static void macsec_del_dev(struct macsec_dev *macsec) 3883 { 3884 int i; 3885 3886 while (macsec->secy.rx_sc) { 3887 struct macsec_rx_sc *rx_sc = rtnl_dereference(macsec->secy.rx_sc); 3888 3889 rcu_assign_pointer(macsec->secy.rx_sc, rx_sc->next); 3890 free_rx_sc(rx_sc); 3891 } 3892 3893 for (i = 0; i < MACSEC_NUM_AN; i++) { 3894 struct macsec_tx_sa *sa = rtnl_dereference(macsec->secy.tx_sc.sa[i]); 3895 3896 if (sa) { 3897 RCU_INIT_POINTER(macsec->secy.tx_sc.sa[i], NULL); 3898 clear_tx_sa(sa); 3899 } 3900 } 3901 } 3902 3903 static void macsec_common_dellink(struct net_device *dev, struct list_head *head) 3904 { 3905 struct macsec_dev *macsec = macsec_priv(dev); 3906 struct net_device *real_dev = macsec->real_dev; 3907 3908 /* If h/w offloading is available, propagate to the device */ 3909 if (macsec_is_offloaded(macsec)) { 3910 const struct macsec_ops *ops; 3911 struct macsec_context ctx; 3912 3913 ops = macsec_get_ops(netdev_priv(dev), &ctx); 3914 if (ops) { 3915 ctx.secy = &macsec->secy; 3916 macsec_offload(ops->mdo_del_secy, &ctx); 3917 } 3918 } 3919 3920 unregister_netdevice_queue(dev, head); 3921 list_del_rcu(&macsec->secys); 3922 macsec_del_dev(macsec); 3923 netdev_upper_dev_unlink(real_dev, dev); 3924 3925 macsec_generation++; 3926 } 3927 3928 static void macsec_dellink(struct net_device *dev, struct list_head *head) 3929 { 3930 struct macsec_dev *macsec = macsec_priv(dev); 3931 struct net_device *real_dev = macsec->real_dev; 3932 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); 3933 3934 macsec_common_dellink(dev, head); 3935 3936 if (list_empty(&rxd->secys)) { 3937 netdev_rx_handler_unregister(real_dev); 3938 kfree(rxd); 3939 } 3940 } 3941 3942 static int register_macsec_dev(struct net_device *real_dev, 3943 struct net_device *dev) 3944 { 3945 struct macsec_dev *macsec = macsec_priv(dev); 3946 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); 3947 3948 if (!rxd) { 3949 int err; 3950 3951 rxd = kmalloc(sizeof(*rxd), GFP_KERNEL); 3952 if (!rxd) 3953 return -ENOMEM; 3954 3955 INIT_LIST_HEAD(&rxd->secys); 3956 3957 err = netdev_rx_handler_register(real_dev, macsec_handle_frame, 3958 rxd); 3959 if (err < 0) { 3960 kfree(rxd); 3961 return err; 3962 } 3963 } 3964 3965 list_add_tail_rcu(&macsec->secys, &rxd->secys); 3966 return 0; 3967 } 3968 3969 static bool sci_exists(struct net_device *dev, sci_t sci) 3970 { 3971 struct macsec_rxh_data *rxd = macsec_data_rtnl(dev); 3972 struct macsec_dev *macsec; 3973 3974 list_for_each_entry(macsec, &rxd->secys, secys) { 3975 if (macsec->secy.sci == sci) 3976 return true; 3977 } 3978 3979 return false; 3980 } 3981 3982 static sci_t dev_to_sci(struct net_device *dev, __be16 port) 3983 { 3984 return make_sci(dev->dev_addr, port); 3985 } 3986 3987 static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len) 3988 { 3989 struct macsec_dev *macsec = macsec_priv(dev); 3990 struct macsec_secy *secy = &macsec->secy; 3991 3992 macsec->stats = netdev_alloc_pcpu_stats(struct pcpu_secy_stats); 3993 if (!macsec->stats) 3994 return -ENOMEM; 3995 3996 secy->tx_sc.stats = netdev_alloc_pcpu_stats(struct pcpu_tx_sc_stats); 3997 if (!secy->tx_sc.stats) 3998 return -ENOMEM; 3999 4000 secy->tx_sc.md_dst = metadata_dst_alloc(0, METADATA_MACSEC, GFP_KERNEL); 4001 if (!secy->tx_sc.md_dst) 4002 /* macsec and secy percpu stats will be freed when unregistering 4003 * net_device in macsec_free_netdev() 4004 */ 4005 return -ENOMEM; 4006 4007 if (sci == MACSEC_UNDEF_SCI) 4008 sci = dev_to_sci(dev, MACSEC_PORT_ES); 4009 4010 secy->netdev = dev; 4011 secy->operational = true; 4012 secy->key_len = DEFAULT_SAK_LEN; 4013 secy->icv_len = icv_len; 4014 secy->validate_frames = MACSEC_VALIDATE_DEFAULT; 4015 secy->protect_frames = true; 4016 secy->replay_protect = false; 4017 secy->xpn = DEFAULT_XPN; 4018 4019 secy->sci = sci; 4020 secy->tx_sc.md_dst->u.macsec_info.sci = sci; 4021 secy->tx_sc.active = true; 4022 secy->tx_sc.encoding_sa = DEFAULT_ENCODING_SA; 4023 secy->tx_sc.encrypt = DEFAULT_ENCRYPT; 4024 secy->tx_sc.send_sci = DEFAULT_SEND_SCI; 4025 secy->tx_sc.end_station = false; 4026 secy->tx_sc.scb = false; 4027 4028 return 0; 4029 } 4030 4031 static struct lock_class_key macsec_netdev_addr_lock_key; 4032 4033 static int macsec_newlink(struct net *net, struct net_device *dev, 4034 struct nlattr *tb[], struct nlattr *data[], 4035 struct netlink_ext_ack *extack) 4036 { 4037 struct macsec_dev *macsec = macsec_priv(dev); 4038 rx_handler_func_t *rx_handler; 4039 u8 icv_len = MACSEC_DEFAULT_ICV_LEN; 4040 struct net_device *real_dev; 4041 int err, mtu; 4042 sci_t sci; 4043 4044 if (!tb[IFLA_LINK]) 4045 return -EINVAL; 4046 real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK])); 4047 if (!real_dev) 4048 return -ENODEV; 4049 if (real_dev->type != ARPHRD_ETHER) 4050 return -EINVAL; 4051 4052 dev->priv_flags |= IFF_MACSEC; 4053 4054 macsec->real_dev = real_dev; 4055 4056 if (data && data[IFLA_MACSEC_OFFLOAD]) 4057 macsec->offload = nla_get_offload(data[IFLA_MACSEC_OFFLOAD]); 4058 else 4059 /* MACsec offloading is off by default */ 4060 macsec->offload = MACSEC_OFFLOAD_OFF; 4061 4062 /* Check if the offloading mode is supported by the underlying layers */ 4063 if (macsec->offload != MACSEC_OFFLOAD_OFF && 4064 !macsec_check_offload(macsec->offload, macsec)) 4065 return -EOPNOTSUPP; 4066 4067 /* send_sci must be set to true when transmit sci explicitly is set */ 4068 if ((data && data[IFLA_MACSEC_SCI]) && 4069 (data && data[IFLA_MACSEC_INC_SCI])) { 4070 u8 send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]); 4071 4072 if (!send_sci) 4073 return -EINVAL; 4074 } 4075 4076 if (data && data[IFLA_MACSEC_ICV_LEN]) 4077 icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]); 4078 mtu = real_dev->mtu - icv_len - macsec_extra_len(true); 4079 if (mtu < 0) 4080 dev->mtu = 0; 4081 else 4082 dev->mtu = mtu; 4083 4084 rx_handler = rtnl_dereference(real_dev->rx_handler); 4085 if (rx_handler && rx_handler != macsec_handle_frame) 4086 return -EBUSY; 4087 4088 err = register_netdevice(dev); 4089 if (err < 0) 4090 return err; 4091 4092 netdev_lockdep_set_classes(dev); 4093 lockdep_set_class(&dev->addr_list_lock, 4094 &macsec_netdev_addr_lock_key); 4095 4096 err = netdev_upper_dev_link(real_dev, dev, extack); 4097 if (err < 0) 4098 goto unregister; 4099 4100 /* need to be already registered so that ->init has run and 4101 * the MAC addr is set 4102 */ 4103 if (data && data[IFLA_MACSEC_SCI]) 4104 sci = nla_get_sci(data[IFLA_MACSEC_SCI]); 4105 else if (data && data[IFLA_MACSEC_PORT]) 4106 sci = dev_to_sci(dev, nla_get_be16(data[IFLA_MACSEC_PORT])); 4107 else 4108 sci = dev_to_sci(dev, MACSEC_PORT_ES); 4109 4110 if (rx_handler && sci_exists(real_dev, sci)) { 4111 err = -EBUSY; 4112 goto unlink; 4113 } 4114 4115 err = macsec_add_dev(dev, sci, icv_len); 4116 if (err) 4117 goto unlink; 4118 4119 if (data) { 4120 err = macsec_changelink_common(dev, data); 4121 if (err) 4122 goto del_dev; 4123 } 4124 4125 /* If h/w offloading is available, propagate to the device */ 4126 if (macsec_is_offloaded(macsec)) { 4127 const struct macsec_ops *ops; 4128 struct macsec_context ctx; 4129 4130 ops = macsec_get_ops(macsec, &ctx); 4131 if (ops) { 4132 ctx.secy = &macsec->secy; 4133 err = macsec_offload(ops->mdo_add_secy, &ctx); 4134 if (err) 4135 goto del_dev; 4136 } 4137 } 4138 4139 err = register_macsec_dev(real_dev, dev); 4140 if (err < 0) 4141 goto del_dev; 4142 4143 netif_stacked_transfer_operstate(real_dev, dev); 4144 linkwatch_fire_event(dev); 4145 4146 macsec_generation++; 4147 4148 return 0; 4149 4150 del_dev: 4151 macsec_del_dev(macsec); 4152 unlink: 4153 netdev_upper_dev_unlink(real_dev, dev); 4154 unregister: 4155 unregister_netdevice(dev); 4156 return err; 4157 } 4158 4159 static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[], 4160 struct netlink_ext_ack *extack) 4161 { 4162 u64 csid = MACSEC_DEFAULT_CIPHER_ID; 4163 u8 icv_len = MACSEC_DEFAULT_ICV_LEN; 4164 int flag; 4165 bool es, scb, sci; 4166 4167 if (!data) 4168 return 0; 4169 4170 if (data[IFLA_MACSEC_CIPHER_SUITE]) 4171 csid = nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE]); 4172 4173 if (data[IFLA_MACSEC_ICV_LEN]) { 4174 icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]); 4175 if (icv_len != MACSEC_DEFAULT_ICV_LEN) { 4176 char dummy_key[DEFAULT_SAK_LEN] = { 0 }; 4177 struct crypto_aead *dummy_tfm; 4178 4179 dummy_tfm = macsec_alloc_tfm(dummy_key, 4180 DEFAULT_SAK_LEN, 4181 icv_len); 4182 if (IS_ERR(dummy_tfm)) 4183 return PTR_ERR(dummy_tfm); 4184 crypto_free_aead(dummy_tfm); 4185 } 4186 } 4187 4188 switch (csid) { 4189 case MACSEC_CIPHER_ID_GCM_AES_128: 4190 case MACSEC_CIPHER_ID_GCM_AES_256: 4191 case MACSEC_CIPHER_ID_GCM_AES_XPN_128: 4192 case MACSEC_CIPHER_ID_GCM_AES_XPN_256: 4193 case MACSEC_DEFAULT_CIPHER_ID: 4194 if (icv_len < MACSEC_MIN_ICV_LEN || 4195 icv_len > MACSEC_STD_ICV_LEN) 4196 return -EINVAL; 4197 break; 4198 default: 4199 return -EINVAL; 4200 } 4201 4202 if (data[IFLA_MACSEC_ENCODING_SA]) { 4203 if (nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]) >= MACSEC_NUM_AN) 4204 return -EINVAL; 4205 } 4206 4207 for (flag = IFLA_MACSEC_ENCODING_SA + 1; 4208 flag < IFLA_MACSEC_VALIDATION; 4209 flag++) { 4210 if (data[flag]) { 4211 if (nla_get_u8(data[flag]) > 1) 4212 return -EINVAL; 4213 } 4214 } 4215 4216 es = data[IFLA_MACSEC_ES] ? nla_get_u8(data[IFLA_MACSEC_ES]) : false; 4217 sci = data[IFLA_MACSEC_INC_SCI] ? nla_get_u8(data[IFLA_MACSEC_INC_SCI]) : false; 4218 scb = data[IFLA_MACSEC_SCB] ? nla_get_u8(data[IFLA_MACSEC_SCB]) : false; 4219 4220 if ((sci && (scb || es)) || (scb && es)) 4221 return -EINVAL; 4222 4223 if (data[IFLA_MACSEC_VALIDATION] && 4224 nla_get_u8(data[IFLA_MACSEC_VALIDATION]) > MACSEC_VALIDATE_MAX) 4225 return -EINVAL; 4226 4227 if ((data[IFLA_MACSEC_REPLAY_PROTECT] && 4228 nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT])) && 4229 !data[IFLA_MACSEC_WINDOW]) 4230 return -EINVAL; 4231 4232 return 0; 4233 } 4234 4235 static struct net *macsec_get_link_net(const struct net_device *dev) 4236 { 4237 return dev_net(macsec_priv(dev)->real_dev); 4238 } 4239 4240 struct net_device *macsec_get_real_dev(const struct net_device *dev) 4241 { 4242 return macsec_priv(dev)->real_dev; 4243 } 4244 EXPORT_SYMBOL_GPL(macsec_get_real_dev); 4245 4246 bool macsec_netdev_is_offloaded(struct net_device *dev) 4247 { 4248 return macsec_is_offloaded(macsec_priv(dev)); 4249 } 4250 EXPORT_SYMBOL_GPL(macsec_netdev_is_offloaded); 4251 4252 static size_t macsec_get_size(const struct net_device *dev) 4253 { 4254 return nla_total_size_64bit(8) + /* IFLA_MACSEC_SCI */ 4255 nla_total_size(1) + /* IFLA_MACSEC_ICV_LEN */ 4256 nla_total_size_64bit(8) + /* IFLA_MACSEC_CIPHER_SUITE */ 4257 nla_total_size(4) + /* IFLA_MACSEC_WINDOW */ 4258 nla_total_size(1) + /* IFLA_MACSEC_ENCODING_SA */ 4259 nla_total_size(1) + /* IFLA_MACSEC_ENCRYPT */ 4260 nla_total_size(1) + /* IFLA_MACSEC_PROTECT */ 4261 nla_total_size(1) + /* IFLA_MACSEC_INC_SCI */ 4262 nla_total_size(1) + /* IFLA_MACSEC_ES */ 4263 nla_total_size(1) + /* IFLA_MACSEC_SCB */ 4264 nla_total_size(1) + /* IFLA_MACSEC_REPLAY_PROTECT */ 4265 nla_total_size(1) + /* IFLA_MACSEC_VALIDATION */ 4266 nla_total_size(1) + /* IFLA_MACSEC_OFFLOAD */ 4267 0; 4268 } 4269 4270 static int macsec_fill_info(struct sk_buff *skb, 4271 const struct net_device *dev) 4272 { 4273 struct macsec_tx_sc *tx_sc; 4274 struct macsec_dev *macsec; 4275 struct macsec_secy *secy; 4276 u64 csid; 4277 4278 macsec = macsec_priv(dev); 4279 secy = &macsec->secy; 4280 tx_sc = &secy->tx_sc; 4281 4282 switch (secy->key_len) { 4283 case MACSEC_GCM_AES_128_SAK_LEN: 4284 csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 : MACSEC_DEFAULT_CIPHER_ID; 4285 break; 4286 case MACSEC_GCM_AES_256_SAK_LEN: 4287 csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_256 : MACSEC_CIPHER_ID_GCM_AES_256; 4288 break; 4289 default: 4290 goto nla_put_failure; 4291 } 4292 4293 if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci, 4294 IFLA_MACSEC_PAD) || 4295 nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) || 4296 nla_put_u64_64bit(skb, IFLA_MACSEC_CIPHER_SUITE, 4297 csid, IFLA_MACSEC_PAD) || 4298 nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) || 4299 nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) || 4300 nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) || 4301 nla_put_u8(skb, IFLA_MACSEC_INC_SCI, tx_sc->send_sci) || 4302 nla_put_u8(skb, IFLA_MACSEC_ES, tx_sc->end_station) || 4303 nla_put_u8(skb, IFLA_MACSEC_SCB, tx_sc->scb) || 4304 nla_put_u8(skb, IFLA_MACSEC_REPLAY_PROTECT, secy->replay_protect) || 4305 nla_put_u8(skb, IFLA_MACSEC_VALIDATION, secy->validate_frames) || 4306 nla_put_u8(skb, IFLA_MACSEC_OFFLOAD, macsec->offload) || 4307 0) 4308 goto nla_put_failure; 4309 4310 if (secy->replay_protect) { 4311 if (nla_put_u32(skb, IFLA_MACSEC_WINDOW, secy->replay_window)) 4312 goto nla_put_failure; 4313 } 4314 4315 return 0; 4316 4317 nla_put_failure: 4318 return -EMSGSIZE; 4319 } 4320 4321 static struct rtnl_link_ops macsec_link_ops __read_mostly = { 4322 .kind = "macsec", 4323 .priv_size = sizeof(struct macsec_dev), 4324 .maxtype = IFLA_MACSEC_MAX, 4325 .policy = macsec_rtnl_policy, 4326 .setup = macsec_setup, 4327 .validate = macsec_validate_attr, 4328 .newlink = macsec_newlink, 4329 .changelink = macsec_changelink, 4330 .dellink = macsec_dellink, 4331 .get_size = macsec_get_size, 4332 .fill_info = macsec_fill_info, 4333 .get_link_net = macsec_get_link_net, 4334 }; 4335 4336 static bool is_macsec_master(struct net_device *dev) 4337 { 4338 return rcu_access_pointer(dev->rx_handler) == macsec_handle_frame; 4339 } 4340 4341 static int macsec_notify(struct notifier_block *this, unsigned long event, 4342 void *ptr) 4343 { 4344 struct net_device *real_dev = netdev_notifier_info_to_dev(ptr); 4345 LIST_HEAD(head); 4346 4347 if (!is_macsec_master(real_dev)) 4348 return NOTIFY_DONE; 4349 4350 switch (event) { 4351 case NETDEV_DOWN: 4352 case NETDEV_UP: 4353 case NETDEV_CHANGE: { 4354 struct macsec_dev *m, *n; 4355 struct macsec_rxh_data *rxd; 4356 4357 rxd = macsec_data_rtnl(real_dev); 4358 list_for_each_entry_safe(m, n, &rxd->secys, secys) { 4359 struct net_device *dev = m->secy.netdev; 4360 4361 netif_stacked_transfer_operstate(real_dev, dev); 4362 } 4363 break; 4364 } 4365 case NETDEV_UNREGISTER: { 4366 struct macsec_dev *m, *n; 4367 struct macsec_rxh_data *rxd; 4368 4369 rxd = macsec_data_rtnl(real_dev); 4370 list_for_each_entry_safe(m, n, &rxd->secys, secys) { 4371 macsec_common_dellink(m->secy.netdev, &head); 4372 } 4373 4374 netdev_rx_handler_unregister(real_dev); 4375 kfree(rxd); 4376 4377 unregister_netdevice_many(&head); 4378 break; 4379 } 4380 case NETDEV_CHANGEMTU: { 4381 struct macsec_dev *m; 4382 struct macsec_rxh_data *rxd; 4383 4384 rxd = macsec_data_rtnl(real_dev); 4385 list_for_each_entry(m, &rxd->secys, secys) { 4386 struct net_device *dev = m->secy.netdev; 4387 unsigned int mtu = real_dev->mtu - (m->secy.icv_len + 4388 macsec_extra_len(true)); 4389 4390 if (dev->mtu > mtu) 4391 dev_set_mtu(dev, mtu); 4392 } 4393 } 4394 } 4395 4396 return NOTIFY_OK; 4397 } 4398 4399 static struct notifier_block macsec_notifier = { 4400 .notifier_call = macsec_notify, 4401 }; 4402 4403 static int __init macsec_init(void) 4404 { 4405 int err; 4406 4407 pr_info("MACsec IEEE 802.1AE\n"); 4408 err = register_netdevice_notifier(&macsec_notifier); 4409 if (err) 4410 return err; 4411 4412 err = rtnl_link_register(&macsec_link_ops); 4413 if (err) 4414 goto notifier; 4415 4416 err = genl_register_family(&macsec_fam); 4417 if (err) 4418 goto rtnl; 4419 4420 return 0; 4421 4422 rtnl: 4423 rtnl_link_unregister(&macsec_link_ops); 4424 notifier: 4425 unregister_netdevice_notifier(&macsec_notifier); 4426 return err; 4427 } 4428 4429 static void __exit macsec_exit(void) 4430 { 4431 genl_unregister_family(&macsec_fam); 4432 rtnl_link_unregister(&macsec_link_ops); 4433 unregister_netdevice_notifier(&macsec_notifier); 4434 rcu_barrier(); 4435 } 4436 4437 module_init(macsec_init); 4438 module_exit(macsec_exit); 4439 4440 MODULE_ALIAS_RTNL_LINK("macsec"); 4441 MODULE_ALIAS_GENL_FAMILY("macsec"); 4442 4443 MODULE_DESCRIPTION("MACsec IEEE 802.1AE"); 4444 MODULE_LICENSE("GPL v2"); 4445