1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * drivers/net/macsec.c - MACsec device 4 * 5 * Copyright (c) 2015 Sabrina Dubroca <sd@queasysnail.net> 6 */ 7 8 #include <linux/types.h> 9 #include <linux/skbuff.h> 10 #include <linux/socket.h> 11 #include <linux/module.h> 12 #include <crypto/aead.h> 13 #include <linux/etherdevice.h> 14 #include <linux/netdevice.h> 15 #include <linux/rtnetlink.h> 16 #include <linux/refcount.h> 17 #include <net/genetlink.h> 18 #include <net/sock.h> 19 #include <net/gro_cells.h> 20 #include <net/macsec.h> 21 #include <linux/phy.h> 22 #include <linux/byteorder/generic.h> 23 #include <linux/if_arp.h> 24 25 #include <uapi/linux/if_macsec.h> 26 27 #define MACSEC_SCI_LEN 8 28 29 /* SecTAG length = macsec_eth_header without the optional SCI */ 30 #define MACSEC_TAG_LEN 6 31 32 struct macsec_eth_header { 33 struct ethhdr eth; 34 /* SecTAG */ 35 u8 tci_an; 36 #if defined(__LITTLE_ENDIAN_BITFIELD) 37 u8 short_length:6, 38 unused:2; 39 #elif defined(__BIG_ENDIAN_BITFIELD) 40 u8 unused:2, 41 short_length:6; 42 #else 43 #error "Please fix <asm/byteorder.h>" 44 #endif 45 __be32 packet_number; 46 u8 secure_channel_id[8]; /* optional */ 47 } __packed; 48 49 #define MACSEC_TCI_VERSION 0x80 50 #define MACSEC_TCI_ES 0x40 /* end station */ 51 #define MACSEC_TCI_SC 0x20 /* SCI present */ 52 #define MACSEC_TCI_SCB 0x10 /* epon */ 53 #define MACSEC_TCI_E 0x08 /* encryption */ 54 #define MACSEC_TCI_C 0x04 /* changed text */ 55 #define MACSEC_AN_MASK 0x03 /* association number */ 56 #define MACSEC_TCI_CONFID (MACSEC_TCI_E | MACSEC_TCI_C) 57 58 /* minimum secure data length deemed "not short", see IEEE 802.1AE-2006 9.7 */ 59 #define MIN_NON_SHORT_LEN 48 60 61 #define GCM_AES_IV_LEN 12 62 #define DEFAULT_ICV_LEN 16 63 64 #define for_each_rxsc(secy, sc) \ 65 for (sc = rcu_dereference_bh(secy->rx_sc); \ 66 sc; \ 67 sc = rcu_dereference_bh(sc->next)) 68 #define for_each_rxsc_rtnl(secy, sc) \ 69 for (sc = rtnl_dereference(secy->rx_sc); \ 70 sc; \ 71 sc = rtnl_dereference(sc->next)) 72 73 #define pn_same_half(pn1, pn2) (!(((pn1) >> 31) ^ ((pn2) >> 31))) 74 75 struct gcm_iv_xpn { 76 union { 77 u8 short_secure_channel_id[4]; 78 ssci_t ssci; 79 }; 80 __be64 pn; 81 } __packed; 82 83 struct gcm_iv { 84 union { 85 u8 secure_channel_id[8]; 86 sci_t sci; 87 }; 88 __be32 pn; 89 }; 90 91 #define MACSEC_VALIDATE_DEFAULT MACSEC_VALIDATE_STRICT 92 93 struct pcpu_secy_stats { 94 struct macsec_dev_stats stats; 95 struct u64_stats_sync syncp; 96 }; 97 98 /** 99 * struct macsec_dev - private data 100 * @secy: SecY config 101 * @real_dev: pointer to underlying netdevice 102 * @dev_tracker: refcount tracker for @real_dev reference 103 * @stats: MACsec device stats 104 * @secys: linked list of SecY's on the underlying device 105 * @gro_cells: pointer to the Generic Receive Offload cell 106 * @offload: status of offloading on the MACsec device 107 */ 108 struct macsec_dev { 109 struct macsec_secy secy; 110 struct net_device *real_dev; 111 netdevice_tracker dev_tracker; 112 struct pcpu_secy_stats __percpu *stats; 113 struct list_head secys; 114 struct gro_cells gro_cells; 115 enum macsec_offload offload; 116 }; 117 118 /** 119 * struct macsec_rxh_data - rx_handler private argument 120 * @secys: linked list of SecY's on this underlying device 121 */ 122 struct macsec_rxh_data { 123 struct list_head secys; 124 }; 125 126 static struct macsec_dev *macsec_priv(const struct net_device *dev) 127 { 128 return (struct macsec_dev *)netdev_priv(dev); 129 } 130 131 static struct macsec_rxh_data *macsec_data_rcu(const struct net_device *dev) 132 { 133 return rcu_dereference_bh(dev->rx_handler_data); 134 } 135 136 static struct macsec_rxh_data *macsec_data_rtnl(const struct net_device *dev) 137 { 138 return rtnl_dereference(dev->rx_handler_data); 139 } 140 141 struct macsec_cb { 142 struct aead_request *req; 143 union { 144 struct macsec_tx_sa *tx_sa; 145 struct macsec_rx_sa *rx_sa; 146 }; 147 u8 assoc_num; 148 bool valid; 149 bool has_sci; 150 }; 151 152 static struct macsec_rx_sa *macsec_rxsa_get(struct macsec_rx_sa __rcu *ptr) 153 { 154 struct macsec_rx_sa *sa = rcu_dereference_bh(ptr); 155 156 if (!sa || !sa->active) 157 return NULL; 158 159 if (!refcount_inc_not_zero(&sa->refcnt)) 160 return NULL; 161 162 return sa; 163 } 164 165 static void free_rx_sc_rcu(struct rcu_head *head) 166 { 167 struct macsec_rx_sc *rx_sc = container_of(head, struct macsec_rx_sc, rcu_head); 168 169 free_percpu(rx_sc->stats); 170 kfree(rx_sc); 171 } 172 173 static struct macsec_rx_sc *macsec_rxsc_get(struct macsec_rx_sc *sc) 174 { 175 return refcount_inc_not_zero(&sc->refcnt) ? sc : NULL; 176 } 177 178 static void macsec_rxsc_put(struct macsec_rx_sc *sc) 179 { 180 if (refcount_dec_and_test(&sc->refcnt)) 181 call_rcu(&sc->rcu_head, free_rx_sc_rcu); 182 } 183 184 static void free_rxsa(struct rcu_head *head) 185 { 186 struct macsec_rx_sa *sa = container_of(head, struct macsec_rx_sa, rcu); 187 188 crypto_free_aead(sa->key.tfm); 189 free_percpu(sa->stats); 190 kfree(sa); 191 } 192 193 static void macsec_rxsa_put(struct macsec_rx_sa *sa) 194 { 195 if (refcount_dec_and_test(&sa->refcnt)) 196 call_rcu(&sa->rcu, free_rxsa); 197 } 198 199 static struct macsec_tx_sa *macsec_txsa_get(struct macsec_tx_sa __rcu *ptr) 200 { 201 struct macsec_tx_sa *sa = rcu_dereference_bh(ptr); 202 203 if (!sa || !sa->active) 204 return NULL; 205 206 if (!refcount_inc_not_zero(&sa->refcnt)) 207 return NULL; 208 209 return sa; 210 } 211 212 static void free_txsa(struct rcu_head *head) 213 { 214 struct macsec_tx_sa *sa = container_of(head, struct macsec_tx_sa, rcu); 215 216 crypto_free_aead(sa->key.tfm); 217 free_percpu(sa->stats); 218 kfree(sa); 219 } 220 221 static void macsec_txsa_put(struct macsec_tx_sa *sa) 222 { 223 if (refcount_dec_and_test(&sa->refcnt)) 224 call_rcu(&sa->rcu, free_txsa); 225 } 226 227 static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb) 228 { 229 BUILD_BUG_ON(sizeof(struct macsec_cb) > sizeof(skb->cb)); 230 return (struct macsec_cb *)skb->cb; 231 } 232 233 #define MACSEC_PORT_ES (htons(0x0001)) 234 #define MACSEC_PORT_SCB (0x0000) 235 #define MACSEC_UNDEF_SCI ((__force sci_t)0xffffffffffffffffULL) 236 #define MACSEC_UNDEF_SSCI ((__force ssci_t)0xffffffff) 237 238 #define MACSEC_GCM_AES_128_SAK_LEN 16 239 #define MACSEC_GCM_AES_256_SAK_LEN 32 240 241 #define DEFAULT_SAK_LEN MACSEC_GCM_AES_128_SAK_LEN 242 #define DEFAULT_XPN false 243 #define DEFAULT_SEND_SCI true 244 #define DEFAULT_ENCRYPT false 245 #define DEFAULT_ENCODING_SA 0 246 #define MACSEC_XPN_MAX_REPLAY_WINDOW (((1 << 30) - 1)) 247 248 static bool send_sci(const struct macsec_secy *secy) 249 { 250 const struct macsec_tx_sc *tx_sc = &secy->tx_sc; 251 252 return tx_sc->send_sci || 253 (secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb); 254 } 255 256 static sci_t make_sci(const u8 *addr, __be16 port) 257 { 258 sci_t sci; 259 260 memcpy(&sci, addr, ETH_ALEN); 261 memcpy(((char *)&sci) + ETH_ALEN, &port, sizeof(port)); 262 263 return sci; 264 } 265 266 static sci_t macsec_frame_sci(struct macsec_eth_header *hdr, bool sci_present) 267 { 268 sci_t sci; 269 270 if (sci_present) 271 memcpy(&sci, hdr->secure_channel_id, 272 sizeof(hdr->secure_channel_id)); 273 else 274 sci = make_sci(hdr->eth.h_source, MACSEC_PORT_ES); 275 276 return sci; 277 } 278 279 static unsigned int macsec_sectag_len(bool sci_present) 280 { 281 return MACSEC_TAG_LEN + (sci_present ? MACSEC_SCI_LEN : 0); 282 } 283 284 static unsigned int macsec_hdr_len(bool sci_present) 285 { 286 return macsec_sectag_len(sci_present) + ETH_HLEN; 287 } 288 289 static unsigned int macsec_extra_len(bool sci_present) 290 { 291 return macsec_sectag_len(sci_present) + sizeof(__be16); 292 } 293 294 /* Fill SecTAG according to IEEE 802.1AE-2006 10.5.3 */ 295 static void macsec_fill_sectag(struct macsec_eth_header *h, 296 const struct macsec_secy *secy, u32 pn, 297 bool sci_present) 298 { 299 const struct macsec_tx_sc *tx_sc = &secy->tx_sc; 300 301 memset(&h->tci_an, 0, macsec_sectag_len(sci_present)); 302 h->eth.h_proto = htons(ETH_P_MACSEC); 303 304 if (sci_present) { 305 h->tci_an |= MACSEC_TCI_SC; 306 memcpy(&h->secure_channel_id, &secy->sci, 307 sizeof(h->secure_channel_id)); 308 } else { 309 if (tx_sc->end_station) 310 h->tci_an |= MACSEC_TCI_ES; 311 if (tx_sc->scb) 312 h->tci_an |= MACSEC_TCI_SCB; 313 } 314 315 h->packet_number = htonl(pn); 316 317 /* with GCM, C/E clear for !encrypt, both set for encrypt */ 318 if (tx_sc->encrypt) 319 h->tci_an |= MACSEC_TCI_CONFID; 320 else if (secy->icv_len != DEFAULT_ICV_LEN) 321 h->tci_an |= MACSEC_TCI_C; 322 323 h->tci_an |= tx_sc->encoding_sa; 324 } 325 326 static void macsec_set_shortlen(struct macsec_eth_header *h, size_t data_len) 327 { 328 if (data_len < MIN_NON_SHORT_LEN) 329 h->short_length = data_len; 330 } 331 332 /* Checks if a MACsec interface is being offloaded to an hardware engine */ 333 static bool macsec_is_offloaded(struct macsec_dev *macsec) 334 { 335 if (macsec->offload == MACSEC_OFFLOAD_MAC || 336 macsec->offload == MACSEC_OFFLOAD_PHY) 337 return true; 338 339 return false; 340 } 341 342 /* Checks if underlying layers implement MACsec offloading functions. */ 343 static bool macsec_check_offload(enum macsec_offload offload, 344 struct macsec_dev *macsec) 345 { 346 if (!macsec || !macsec->real_dev) 347 return false; 348 349 if (offload == MACSEC_OFFLOAD_PHY) 350 return macsec->real_dev->phydev && 351 macsec->real_dev->phydev->macsec_ops; 352 else if (offload == MACSEC_OFFLOAD_MAC) 353 return macsec->real_dev->features & NETIF_F_HW_MACSEC && 354 macsec->real_dev->macsec_ops; 355 356 return false; 357 } 358 359 static const struct macsec_ops *__macsec_get_ops(enum macsec_offload offload, 360 struct macsec_dev *macsec, 361 struct macsec_context *ctx) 362 { 363 if (ctx) { 364 memset(ctx, 0, sizeof(*ctx)); 365 ctx->offload = offload; 366 367 if (offload == MACSEC_OFFLOAD_PHY) 368 ctx->phydev = macsec->real_dev->phydev; 369 else if (offload == MACSEC_OFFLOAD_MAC) 370 ctx->netdev = macsec->real_dev; 371 } 372 373 if (offload == MACSEC_OFFLOAD_PHY) 374 return macsec->real_dev->phydev->macsec_ops; 375 else 376 return macsec->real_dev->macsec_ops; 377 } 378 379 /* Returns a pointer to the MACsec ops struct if any and updates the MACsec 380 * context device reference if provided. 381 */ 382 static const struct macsec_ops *macsec_get_ops(struct macsec_dev *macsec, 383 struct macsec_context *ctx) 384 { 385 if (!macsec_check_offload(macsec->offload, macsec)) 386 return NULL; 387 388 return __macsec_get_ops(macsec->offload, macsec, ctx); 389 } 390 391 /* validate MACsec packet according to IEEE 802.1AE-2018 9.12 */ 392 static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len, bool xpn) 393 { 394 struct macsec_eth_header *h = (struct macsec_eth_header *)skb->data; 395 int len = skb->len - 2 * ETH_ALEN; 396 int extra_len = macsec_extra_len(!!(h->tci_an & MACSEC_TCI_SC)) + icv_len; 397 398 /* a) It comprises at least 17 octets */ 399 if (skb->len <= 16) 400 return false; 401 402 /* b) MACsec EtherType: already checked */ 403 404 /* c) V bit is clear */ 405 if (h->tci_an & MACSEC_TCI_VERSION) 406 return false; 407 408 /* d) ES or SCB => !SC */ 409 if ((h->tci_an & MACSEC_TCI_ES || h->tci_an & MACSEC_TCI_SCB) && 410 (h->tci_an & MACSEC_TCI_SC)) 411 return false; 412 413 /* e) Bits 7 and 8 of octet 4 of the SecTAG are clear */ 414 if (h->unused) 415 return false; 416 417 /* rx.pn != 0 if not XPN (figure 10-5 with 802.11AEbw-2013 amendment) */ 418 if (!h->packet_number && !xpn) 419 return false; 420 421 /* length check, f) g) h) i) */ 422 if (h->short_length) 423 return len == extra_len + h->short_length; 424 return len >= extra_len + MIN_NON_SHORT_LEN; 425 } 426 427 #define MACSEC_NEEDED_HEADROOM (macsec_extra_len(true)) 428 #define MACSEC_NEEDED_TAILROOM MACSEC_STD_ICV_LEN 429 430 static void macsec_fill_iv_xpn(unsigned char *iv, ssci_t ssci, u64 pn, 431 salt_t salt) 432 { 433 struct gcm_iv_xpn *gcm_iv = (struct gcm_iv_xpn *)iv; 434 435 gcm_iv->ssci = ssci ^ salt.ssci; 436 gcm_iv->pn = cpu_to_be64(pn) ^ salt.pn; 437 } 438 439 static void macsec_fill_iv(unsigned char *iv, sci_t sci, u32 pn) 440 { 441 struct gcm_iv *gcm_iv = (struct gcm_iv *)iv; 442 443 gcm_iv->sci = sci; 444 gcm_iv->pn = htonl(pn); 445 } 446 447 static struct macsec_eth_header *macsec_ethhdr(struct sk_buff *skb) 448 { 449 return (struct macsec_eth_header *)skb_mac_header(skb); 450 } 451 452 static sci_t dev_to_sci(struct net_device *dev, __be16 port) 453 { 454 return make_sci(dev->dev_addr, port); 455 } 456 457 static void __macsec_pn_wrapped(struct macsec_secy *secy, 458 struct macsec_tx_sa *tx_sa) 459 { 460 pr_debug("PN wrapped, transitioning to !oper\n"); 461 tx_sa->active = false; 462 if (secy->protect_frames) 463 secy->operational = false; 464 } 465 466 void macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa) 467 { 468 spin_lock_bh(&tx_sa->lock); 469 __macsec_pn_wrapped(secy, tx_sa); 470 spin_unlock_bh(&tx_sa->lock); 471 } 472 EXPORT_SYMBOL_GPL(macsec_pn_wrapped); 473 474 static pn_t tx_sa_update_pn(struct macsec_tx_sa *tx_sa, 475 struct macsec_secy *secy) 476 { 477 pn_t pn; 478 479 spin_lock_bh(&tx_sa->lock); 480 481 pn = tx_sa->next_pn_halves; 482 if (secy->xpn) 483 tx_sa->next_pn++; 484 else 485 tx_sa->next_pn_halves.lower++; 486 487 if (tx_sa->next_pn == 0) 488 __macsec_pn_wrapped(secy, tx_sa); 489 spin_unlock_bh(&tx_sa->lock); 490 491 return pn; 492 } 493 494 static void macsec_encrypt_finish(struct sk_buff *skb, struct net_device *dev) 495 { 496 struct macsec_dev *macsec = netdev_priv(dev); 497 498 skb->dev = macsec->real_dev; 499 skb_reset_mac_header(skb); 500 skb->protocol = eth_hdr(skb)->h_proto; 501 } 502 503 static void macsec_count_tx(struct sk_buff *skb, struct macsec_tx_sc *tx_sc, 504 struct macsec_tx_sa *tx_sa) 505 { 506 struct pcpu_tx_sc_stats *txsc_stats = this_cpu_ptr(tx_sc->stats); 507 508 u64_stats_update_begin(&txsc_stats->syncp); 509 if (tx_sc->encrypt) { 510 txsc_stats->stats.OutOctetsEncrypted += skb->len; 511 txsc_stats->stats.OutPktsEncrypted++; 512 this_cpu_inc(tx_sa->stats->OutPktsEncrypted); 513 } else { 514 txsc_stats->stats.OutOctetsProtected += skb->len; 515 txsc_stats->stats.OutPktsProtected++; 516 this_cpu_inc(tx_sa->stats->OutPktsProtected); 517 } 518 u64_stats_update_end(&txsc_stats->syncp); 519 } 520 521 static void count_tx(struct net_device *dev, int ret, int len) 522 { 523 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { 524 struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats); 525 526 u64_stats_update_begin(&stats->syncp); 527 u64_stats_inc(&stats->tx_packets); 528 u64_stats_add(&stats->tx_bytes, len); 529 u64_stats_update_end(&stats->syncp); 530 } 531 } 532 533 static void macsec_encrypt_done(struct crypto_async_request *base, int err) 534 { 535 struct sk_buff *skb = base->data; 536 struct net_device *dev = skb->dev; 537 struct macsec_dev *macsec = macsec_priv(dev); 538 struct macsec_tx_sa *sa = macsec_skb_cb(skb)->tx_sa; 539 int len, ret; 540 541 aead_request_free(macsec_skb_cb(skb)->req); 542 543 rcu_read_lock_bh(); 544 macsec_encrypt_finish(skb, dev); 545 macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa); 546 len = skb->len; 547 ret = dev_queue_xmit(skb); 548 count_tx(dev, ret, len); 549 rcu_read_unlock_bh(); 550 551 macsec_txsa_put(sa); 552 dev_put(dev); 553 } 554 555 static struct aead_request *macsec_alloc_req(struct crypto_aead *tfm, 556 unsigned char **iv, 557 struct scatterlist **sg, 558 int num_frags) 559 { 560 size_t size, iv_offset, sg_offset; 561 struct aead_request *req; 562 void *tmp; 563 564 size = sizeof(struct aead_request) + crypto_aead_reqsize(tfm); 565 iv_offset = size; 566 size += GCM_AES_IV_LEN; 567 568 size = ALIGN(size, __alignof__(struct scatterlist)); 569 sg_offset = size; 570 size += sizeof(struct scatterlist) * num_frags; 571 572 tmp = kmalloc(size, GFP_ATOMIC); 573 if (!tmp) 574 return NULL; 575 576 *iv = (unsigned char *)(tmp + iv_offset); 577 *sg = (struct scatterlist *)(tmp + sg_offset); 578 req = tmp; 579 580 aead_request_set_tfm(req, tfm); 581 582 return req; 583 } 584 585 static struct sk_buff *macsec_encrypt(struct sk_buff *skb, 586 struct net_device *dev) 587 { 588 int ret; 589 struct scatterlist *sg; 590 struct sk_buff *trailer; 591 unsigned char *iv; 592 struct ethhdr *eth; 593 struct macsec_eth_header *hh; 594 size_t unprotected_len; 595 struct aead_request *req; 596 struct macsec_secy *secy; 597 struct macsec_tx_sc *tx_sc; 598 struct macsec_tx_sa *tx_sa; 599 struct macsec_dev *macsec = macsec_priv(dev); 600 bool sci_present; 601 pn_t pn; 602 603 secy = &macsec->secy; 604 tx_sc = &secy->tx_sc; 605 606 /* 10.5.1 TX SA assignment */ 607 tx_sa = macsec_txsa_get(tx_sc->sa[tx_sc->encoding_sa]); 608 if (!tx_sa) { 609 secy->operational = false; 610 kfree_skb(skb); 611 return ERR_PTR(-EINVAL); 612 } 613 614 if (unlikely(skb_headroom(skb) < MACSEC_NEEDED_HEADROOM || 615 skb_tailroom(skb) < MACSEC_NEEDED_TAILROOM)) { 616 struct sk_buff *nskb = skb_copy_expand(skb, 617 MACSEC_NEEDED_HEADROOM, 618 MACSEC_NEEDED_TAILROOM, 619 GFP_ATOMIC); 620 if (likely(nskb)) { 621 consume_skb(skb); 622 skb = nskb; 623 } else { 624 macsec_txsa_put(tx_sa); 625 kfree_skb(skb); 626 return ERR_PTR(-ENOMEM); 627 } 628 } else { 629 skb = skb_unshare(skb, GFP_ATOMIC); 630 if (!skb) { 631 macsec_txsa_put(tx_sa); 632 return ERR_PTR(-ENOMEM); 633 } 634 } 635 636 unprotected_len = skb->len; 637 eth = eth_hdr(skb); 638 sci_present = send_sci(secy); 639 hh = skb_push(skb, macsec_extra_len(sci_present)); 640 memmove(hh, eth, 2 * ETH_ALEN); 641 642 pn = tx_sa_update_pn(tx_sa, secy); 643 if (pn.full64 == 0) { 644 macsec_txsa_put(tx_sa); 645 kfree_skb(skb); 646 return ERR_PTR(-ENOLINK); 647 } 648 macsec_fill_sectag(hh, secy, pn.lower, sci_present); 649 macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN); 650 651 skb_put(skb, secy->icv_len); 652 653 if (skb->len - ETH_HLEN > macsec_priv(dev)->real_dev->mtu) { 654 struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats); 655 656 u64_stats_update_begin(&secy_stats->syncp); 657 secy_stats->stats.OutPktsTooLong++; 658 u64_stats_update_end(&secy_stats->syncp); 659 660 macsec_txsa_put(tx_sa); 661 kfree_skb(skb); 662 return ERR_PTR(-EINVAL); 663 } 664 665 ret = skb_cow_data(skb, 0, &trailer); 666 if (unlikely(ret < 0)) { 667 macsec_txsa_put(tx_sa); 668 kfree_skb(skb); 669 return ERR_PTR(ret); 670 } 671 672 req = macsec_alloc_req(tx_sa->key.tfm, &iv, &sg, ret); 673 if (!req) { 674 macsec_txsa_put(tx_sa); 675 kfree_skb(skb); 676 return ERR_PTR(-ENOMEM); 677 } 678 679 if (secy->xpn) 680 macsec_fill_iv_xpn(iv, tx_sa->ssci, pn.full64, tx_sa->key.salt); 681 else 682 macsec_fill_iv(iv, secy->sci, pn.lower); 683 684 sg_init_table(sg, ret); 685 ret = skb_to_sgvec(skb, sg, 0, skb->len); 686 if (unlikely(ret < 0)) { 687 aead_request_free(req); 688 macsec_txsa_put(tx_sa); 689 kfree_skb(skb); 690 return ERR_PTR(ret); 691 } 692 693 if (tx_sc->encrypt) { 694 int len = skb->len - macsec_hdr_len(sci_present) - 695 secy->icv_len; 696 aead_request_set_crypt(req, sg, sg, len, iv); 697 aead_request_set_ad(req, macsec_hdr_len(sci_present)); 698 } else { 699 aead_request_set_crypt(req, sg, sg, 0, iv); 700 aead_request_set_ad(req, skb->len - secy->icv_len); 701 } 702 703 macsec_skb_cb(skb)->req = req; 704 macsec_skb_cb(skb)->tx_sa = tx_sa; 705 aead_request_set_callback(req, 0, macsec_encrypt_done, skb); 706 707 dev_hold(skb->dev); 708 ret = crypto_aead_encrypt(req); 709 if (ret == -EINPROGRESS) { 710 return ERR_PTR(ret); 711 } else if (ret != 0) { 712 dev_put(skb->dev); 713 kfree_skb(skb); 714 aead_request_free(req); 715 macsec_txsa_put(tx_sa); 716 return ERR_PTR(-EINVAL); 717 } 718 719 dev_put(skb->dev); 720 aead_request_free(req); 721 macsec_txsa_put(tx_sa); 722 723 return skb; 724 } 725 726 static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u32 pn) 727 { 728 struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa; 729 struct pcpu_rx_sc_stats *rxsc_stats = this_cpu_ptr(rx_sa->sc->stats); 730 struct macsec_eth_header *hdr = macsec_ethhdr(skb); 731 u32 lowest_pn = 0; 732 733 spin_lock(&rx_sa->lock); 734 if (rx_sa->next_pn_halves.lower >= secy->replay_window) 735 lowest_pn = rx_sa->next_pn_halves.lower - secy->replay_window; 736 737 /* Now perform replay protection check again 738 * (see IEEE 802.1AE-2006 figure 10-5) 739 */ 740 if (secy->replay_protect && pn < lowest_pn && 741 (!secy->xpn || pn_same_half(pn, lowest_pn))) { 742 spin_unlock(&rx_sa->lock); 743 u64_stats_update_begin(&rxsc_stats->syncp); 744 rxsc_stats->stats.InPktsLate++; 745 u64_stats_update_end(&rxsc_stats->syncp); 746 return false; 747 } 748 749 if (secy->validate_frames != MACSEC_VALIDATE_DISABLED) { 750 u64_stats_update_begin(&rxsc_stats->syncp); 751 if (hdr->tci_an & MACSEC_TCI_E) 752 rxsc_stats->stats.InOctetsDecrypted += skb->len; 753 else 754 rxsc_stats->stats.InOctetsValidated += skb->len; 755 u64_stats_update_end(&rxsc_stats->syncp); 756 } 757 758 if (!macsec_skb_cb(skb)->valid) { 759 spin_unlock(&rx_sa->lock); 760 761 /* 10.6.5 */ 762 if (hdr->tci_an & MACSEC_TCI_C || 763 secy->validate_frames == MACSEC_VALIDATE_STRICT) { 764 u64_stats_update_begin(&rxsc_stats->syncp); 765 rxsc_stats->stats.InPktsNotValid++; 766 u64_stats_update_end(&rxsc_stats->syncp); 767 return false; 768 } 769 770 u64_stats_update_begin(&rxsc_stats->syncp); 771 if (secy->validate_frames == MACSEC_VALIDATE_CHECK) { 772 rxsc_stats->stats.InPktsInvalid++; 773 this_cpu_inc(rx_sa->stats->InPktsInvalid); 774 } else if (pn < lowest_pn) { 775 rxsc_stats->stats.InPktsDelayed++; 776 } else { 777 rxsc_stats->stats.InPktsUnchecked++; 778 } 779 u64_stats_update_end(&rxsc_stats->syncp); 780 } else { 781 u64_stats_update_begin(&rxsc_stats->syncp); 782 if (pn < lowest_pn) { 783 rxsc_stats->stats.InPktsDelayed++; 784 } else { 785 rxsc_stats->stats.InPktsOK++; 786 this_cpu_inc(rx_sa->stats->InPktsOK); 787 } 788 u64_stats_update_end(&rxsc_stats->syncp); 789 790 // Instead of "pn >=" - to support pn overflow in xpn 791 if (pn + 1 > rx_sa->next_pn_halves.lower) { 792 rx_sa->next_pn_halves.lower = pn + 1; 793 } else if (secy->xpn && 794 !pn_same_half(pn, rx_sa->next_pn_halves.lower)) { 795 rx_sa->next_pn_halves.upper++; 796 rx_sa->next_pn_halves.lower = pn + 1; 797 } 798 799 spin_unlock(&rx_sa->lock); 800 } 801 802 return true; 803 } 804 805 static void macsec_reset_skb(struct sk_buff *skb, struct net_device *dev) 806 { 807 skb->pkt_type = PACKET_HOST; 808 skb->protocol = eth_type_trans(skb, dev); 809 810 skb_reset_network_header(skb); 811 if (!skb_transport_header_was_set(skb)) 812 skb_reset_transport_header(skb); 813 skb_reset_mac_len(skb); 814 } 815 816 static void macsec_finalize_skb(struct sk_buff *skb, u8 icv_len, u8 hdr_len) 817 { 818 skb->ip_summed = CHECKSUM_NONE; 819 memmove(skb->data + hdr_len, skb->data, 2 * ETH_ALEN); 820 skb_pull(skb, hdr_len); 821 pskb_trim_unique(skb, skb->len - icv_len); 822 } 823 824 static void count_rx(struct net_device *dev, int len) 825 { 826 struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats); 827 828 u64_stats_update_begin(&stats->syncp); 829 u64_stats_inc(&stats->rx_packets); 830 u64_stats_add(&stats->rx_bytes, len); 831 u64_stats_update_end(&stats->syncp); 832 } 833 834 static void macsec_decrypt_done(struct crypto_async_request *base, int err) 835 { 836 struct sk_buff *skb = base->data; 837 struct net_device *dev = skb->dev; 838 struct macsec_dev *macsec = macsec_priv(dev); 839 struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa; 840 struct macsec_rx_sc *rx_sc = rx_sa->sc; 841 int len; 842 u32 pn; 843 844 aead_request_free(macsec_skb_cb(skb)->req); 845 846 if (!err) 847 macsec_skb_cb(skb)->valid = true; 848 849 rcu_read_lock_bh(); 850 pn = ntohl(macsec_ethhdr(skb)->packet_number); 851 if (!macsec_post_decrypt(skb, &macsec->secy, pn)) { 852 rcu_read_unlock_bh(); 853 kfree_skb(skb); 854 goto out; 855 } 856 857 macsec_finalize_skb(skb, macsec->secy.icv_len, 858 macsec_extra_len(macsec_skb_cb(skb)->has_sci)); 859 macsec_reset_skb(skb, macsec->secy.netdev); 860 861 len = skb->len; 862 if (gro_cells_receive(&macsec->gro_cells, skb) == NET_RX_SUCCESS) 863 count_rx(dev, len); 864 865 rcu_read_unlock_bh(); 866 867 out: 868 macsec_rxsa_put(rx_sa); 869 macsec_rxsc_put(rx_sc); 870 dev_put(dev); 871 } 872 873 static struct sk_buff *macsec_decrypt(struct sk_buff *skb, 874 struct net_device *dev, 875 struct macsec_rx_sa *rx_sa, 876 sci_t sci, 877 struct macsec_secy *secy) 878 { 879 int ret; 880 struct scatterlist *sg; 881 struct sk_buff *trailer; 882 unsigned char *iv; 883 struct aead_request *req; 884 struct macsec_eth_header *hdr; 885 u32 hdr_pn; 886 u16 icv_len = secy->icv_len; 887 888 macsec_skb_cb(skb)->valid = false; 889 skb = skb_share_check(skb, GFP_ATOMIC); 890 if (!skb) 891 return ERR_PTR(-ENOMEM); 892 893 ret = skb_cow_data(skb, 0, &trailer); 894 if (unlikely(ret < 0)) { 895 kfree_skb(skb); 896 return ERR_PTR(ret); 897 } 898 req = macsec_alloc_req(rx_sa->key.tfm, &iv, &sg, ret); 899 if (!req) { 900 kfree_skb(skb); 901 return ERR_PTR(-ENOMEM); 902 } 903 904 hdr = (struct macsec_eth_header *)skb->data; 905 hdr_pn = ntohl(hdr->packet_number); 906 907 if (secy->xpn) { 908 pn_t recovered_pn = rx_sa->next_pn_halves; 909 910 recovered_pn.lower = hdr_pn; 911 if (hdr_pn < rx_sa->next_pn_halves.lower && 912 !pn_same_half(hdr_pn, rx_sa->next_pn_halves.lower)) 913 recovered_pn.upper++; 914 915 macsec_fill_iv_xpn(iv, rx_sa->ssci, recovered_pn.full64, 916 rx_sa->key.salt); 917 } else { 918 macsec_fill_iv(iv, sci, hdr_pn); 919 } 920 921 sg_init_table(sg, ret); 922 ret = skb_to_sgvec(skb, sg, 0, skb->len); 923 if (unlikely(ret < 0)) { 924 aead_request_free(req); 925 kfree_skb(skb); 926 return ERR_PTR(ret); 927 } 928 929 if (hdr->tci_an & MACSEC_TCI_E) { 930 /* confidentiality: ethernet + macsec header 931 * authenticated, encrypted payload 932 */ 933 int len = skb->len - macsec_hdr_len(macsec_skb_cb(skb)->has_sci); 934 935 aead_request_set_crypt(req, sg, sg, len, iv); 936 aead_request_set_ad(req, macsec_hdr_len(macsec_skb_cb(skb)->has_sci)); 937 skb = skb_unshare(skb, GFP_ATOMIC); 938 if (!skb) { 939 aead_request_free(req); 940 return ERR_PTR(-ENOMEM); 941 } 942 } else { 943 /* integrity only: all headers + data authenticated */ 944 aead_request_set_crypt(req, sg, sg, icv_len, iv); 945 aead_request_set_ad(req, skb->len - icv_len); 946 } 947 948 macsec_skb_cb(skb)->req = req; 949 skb->dev = dev; 950 aead_request_set_callback(req, 0, macsec_decrypt_done, skb); 951 952 dev_hold(dev); 953 ret = crypto_aead_decrypt(req); 954 if (ret == -EINPROGRESS) { 955 return ERR_PTR(ret); 956 } else if (ret != 0) { 957 /* decryption/authentication failed 958 * 10.6 if validateFrames is disabled, deliver anyway 959 */ 960 if (ret != -EBADMSG) { 961 kfree_skb(skb); 962 skb = ERR_PTR(ret); 963 } 964 } else { 965 macsec_skb_cb(skb)->valid = true; 966 } 967 dev_put(dev); 968 969 aead_request_free(req); 970 971 return skb; 972 } 973 974 static struct macsec_rx_sc *find_rx_sc(struct macsec_secy *secy, sci_t sci) 975 { 976 struct macsec_rx_sc *rx_sc; 977 978 for_each_rxsc(secy, rx_sc) { 979 if (rx_sc->sci == sci) 980 return rx_sc; 981 } 982 983 return NULL; 984 } 985 986 static struct macsec_rx_sc *find_rx_sc_rtnl(struct macsec_secy *secy, sci_t sci) 987 { 988 struct macsec_rx_sc *rx_sc; 989 990 for_each_rxsc_rtnl(secy, rx_sc) { 991 if (rx_sc->sci == sci) 992 return rx_sc; 993 } 994 995 return NULL; 996 } 997 998 static enum rx_handler_result handle_not_macsec(struct sk_buff *skb) 999 { 1000 /* Deliver to the uncontrolled port by default */ 1001 enum rx_handler_result ret = RX_HANDLER_PASS; 1002 struct ethhdr *hdr = eth_hdr(skb); 1003 struct macsec_rxh_data *rxd; 1004 struct macsec_dev *macsec; 1005 1006 rcu_read_lock(); 1007 rxd = macsec_data_rcu(skb->dev); 1008 1009 list_for_each_entry_rcu(macsec, &rxd->secys, secys) { 1010 struct sk_buff *nskb; 1011 struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats); 1012 struct net_device *ndev = macsec->secy.netdev; 1013 1014 /* If h/w offloading is enabled, HW decodes frames and strips 1015 * the SecTAG, so we have to deduce which port to deliver to. 1016 */ 1017 if (macsec_is_offloaded(macsec) && netif_running(ndev)) { 1018 if (ether_addr_equal_64bits(hdr->h_dest, 1019 ndev->dev_addr)) { 1020 /* exact match, divert skb to this port */ 1021 skb->dev = ndev; 1022 skb->pkt_type = PACKET_HOST; 1023 ret = RX_HANDLER_ANOTHER; 1024 goto out; 1025 } else if (is_multicast_ether_addr_64bits( 1026 hdr->h_dest)) { 1027 /* multicast frame, deliver on this port too */ 1028 nskb = skb_clone(skb, GFP_ATOMIC); 1029 if (!nskb) 1030 break; 1031 1032 nskb->dev = ndev; 1033 if (ether_addr_equal_64bits(hdr->h_dest, 1034 ndev->broadcast)) 1035 nskb->pkt_type = PACKET_BROADCAST; 1036 else 1037 nskb->pkt_type = PACKET_MULTICAST; 1038 1039 __netif_rx(nskb); 1040 } 1041 continue; 1042 } 1043 1044 /* 10.6 If the management control validateFrames is not 1045 * Strict, frames without a SecTAG are received, counted, and 1046 * delivered to the Controlled Port 1047 */ 1048 if (macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) { 1049 u64_stats_update_begin(&secy_stats->syncp); 1050 secy_stats->stats.InPktsNoTag++; 1051 u64_stats_update_end(&secy_stats->syncp); 1052 continue; 1053 } 1054 1055 /* deliver on this port */ 1056 nskb = skb_clone(skb, GFP_ATOMIC); 1057 if (!nskb) 1058 break; 1059 1060 nskb->dev = ndev; 1061 1062 if (__netif_rx(nskb) == NET_RX_SUCCESS) { 1063 u64_stats_update_begin(&secy_stats->syncp); 1064 secy_stats->stats.InPktsUntagged++; 1065 u64_stats_update_end(&secy_stats->syncp); 1066 } 1067 } 1068 1069 out: 1070 rcu_read_unlock(); 1071 return ret; 1072 } 1073 1074 static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) 1075 { 1076 struct sk_buff *skb = *pskb; 1077 struct net_device *dev = skb->dev; 1078 struct macsec_eth_header *hdr; 1079 struct macsec_secy *secy = NULL; 1080 struct macsec_rx_sc *rx_sc; 1081 struct macsec_rx_sa *rx_sa; 1082 struct macsec_rxh_data *rxd; 1083 struct macsec_dev *macsec; 1084 unsigned int len; 1085 sci_t sci; 1086 u32 hdr_pn; 1087 bool cbit; 1088 struct pcpu_rx_sc_stats *rxsc_stats; 1089 struct pcpu_secy_stats *secy_stats; 1090 bool pulled_sci; 1091 int ret; 1092 1093 if (skb_headroom(skb) < ETH_HLEN) 1094 goto drop_direct; 1095 1096 hdr = macsec_ethhdr(skb); 1097 if (hdr->eth.h_proto != htons(ETH_P_MACSEC)) 1098 return handle_not_macsec(skb); 1099 1100 skb = skb_unshare(skb, GFP_ATOMIC); 1101 *pskb = skb; 1102 if (!skb) 1103 return RX_HANDLER_CONSUMED; 1104 1105 pulled_sci = pskb_may_pull(skb, macsec_extra_len(true)); 1106 if (!pulled_sci) { 1107 if (!pskb_may_pull(skb, macsec_extra_len(false))) 1108 goto drop_direct; 1109 } 1110 1111 hdr = macsec_ethhdr(skb); 1112 1113 /* Frames with a SecTAG that has the TCI E bit set but the C 1114 * bit clear are discarded, as this reserved encoding is used 1115 * to identify frames with a SecTAG that are not to be 1116 * delivered to the Controlled Port. 1117 */ 1118 if ((hdr->tci_an & (MACSEC_TCI_C | MACSEC_TCI_E)) == MACSEC_TCI_E) 1119 return RX_HANDLER_PASS; 1120 1121 /* now, pull the extra length */ 1122 if (hdr->tci_an & MACSEC_TCI_SC) { 1123 if (!pulled_sci) 1124 goto drop_direct; 1125 } 1126 1127 /* ethernet header is part of crypto processing */ 1128 skb_push(skb, ETH_HLEN); 1129 1130 macsec_skb_cb(skb)->has_sci = !!(hdr->tci_an & MACSEC_TCI_SC); 1131 macsec_skb_cb(skb)->assoc_num = hdr->tci_an & MACSEC_AN_MASK; 1132 sci = macsec_frame_sci(hdr, macsec_skb_cb(skb)->has_sci); 1133 1134 rcu_read_lock(); 1135 rxd = macsec_data_rcu(skb->dev); 1136 1137 list_for_each_entry_rcu(macsec, &rxd->secys, secys) { 1138 struct macsec_rx_sc *sc = find_rx_sc(&macsec->secy, sci); 1139 1140 sc = sc ? macsec_rxsc_get(sc) : NULL; 1141 1142 if (sc) { 1143 secy = &macsec->secy; 1144 rx_sc = sc; 1145 break; 1146 } 1147 } 1148 1149 if (!secy) 1150 goto nosci; 1151 1152 dev = secy->netdev; 1153 macsec = macsec_priv(dev); 1154 secy_stats = this_cpu_ptr(macsec->stats); 1155 rxsc_stats = this_cpu_ptr(rx_sc->stats); 1156 1157 if (!macsec_validate_skb(skb, secy->icv_len, secy->xpn)) { 1158 u64_stats_update_begin(&secy_stats->syncp); 1159 secy_stats->stats.InPktsBadTag++; 1160 u64_stats_update_end(&secy_stats->syncp); 1161 goto drop_nosa; 1162 } 1163 1164 rx_sa = macsec_rxsa_get(rx_sc->sa[macsec_skb_cb(skb)->assoc_num]); 1165 if (!rx_sa) { 1166 /* 10.6.1 if the SA is not in use */ 1167 1168 /* If validateFrames is Strict or the C bit in the 1169 * SecTAG is set, discard 1170 */ 1171 if (hdr->tci_an & MACSEC_TCI_C || 1172 secy->validate_frames == MACSEC_VALIDATE_STRICT) { 1173 u64_stats_update_begin(&rxsc_stats->syncp); 1174 rxsc_stats->stats.InPktsNotUsingSA++; 1175 u64_stats_update_end(&rxsc_stats->syncp); 1176 goto drop_nosa; 1177 } 1178 1179 /* not Strict, the frame (with the SecTAG and ICV 1180 * removed) is delivered to the Controlled Port. 1181 */ 1182 u64_stats_update_begin(&rxsc_stats->syncp); 1183 rxsc_stats->stats.InPktsUnusedSA++; 1184 u64_stats_update_end(&rxsc_stats->syncp); 1185 goto deliver; 1186 } 1187 1188 /* First, PN check to avoid decrypting obviously wrong packets */ 1189 hdr_pn = ntohl(hdr->packet_number); 1190 if (secy->replay_protect) { 1191 bool late; 1192 1193 spin_lock(&rx_sa->lock); 1194 late = rx_sa->next_pn_halves.lower >= secy->replay_window && 1195 hdr_pn < (rx_sa->next_pn_halves.lower - secy->replay_window); 1196 1197 if (secy->xpn) 1198 late = late && pn_same_half(rx_sa->next_pn_halves.lower, hdr_pn); 1199 spin_unlock(&rx_sa->lock); 1200 1201 if (late) { 1202 u64_stats_update_begin(&rxsc_stats->syncp); 1203 rxsc_stats->stats.InPktsLate++; 1204 u64_stats_update_end(&rxsc_stats->syncp); 1205 goto drop; 1206 } 1207 } 1208 1209 macsec_skb_cb(skb)->rx_sa = rx_sa; 1210 1211 /* Disabled && !changed text => skip validation */ 1212 if (hdr->tci_an & MACSEC_TCI_C || 1213 secy->validate_frames != MACSEC_VALIDATE_DISABLED) 1214 skb = macsec_decrypt(skb, dev, rx_sa, sci, secy); 1215 1216 if (IS_ERR(skb)) { 1217 /* the decrypt callback needs the reference */ 1218 if (PTR_ERR(skb) != -EINPROGRESS) { 1219 macsec_rxsa_put(rx_sa); 1220 macsec_rxsc_put(rx_sc); 1221 } 1222 rcu_read_unlock(); 1223 *pskb = NULL; 1224 return RX_HANDLER_CONSUMED; 1225 } 1226 1227 if (!macsec_post_decrypt(skb, secy, hdr_pn)) 1228 goto drop; 1229 1230 deliver: 1231 macsec_finalize_skb(skb, secy->icv_len, 1232 macsec_extra_len(macsec_skb_cb(skb)->has_sci)); 1233 macsec_reset_skb(skb, secy->netdev); 1234 1235 if (rx_sa) 1236 macsec_rxsa_put(rx_sa); 1237 macsec_rxsc_put(rx_sc); 1238 1239 skb_orphan(skb); 1240 len = skb->len; 1241 ret = gro_cells_receive(&macsec->gro_cells, skb); 1242 if (ret == NET_RX_SUCCESS) 1243 count_rx(dev, len); 1244 else 1245 macsec->secy.netdev->stats.rx_dropped++; 1246 1247 rcu_read_unlock(); 1248 1249 *pskb = NULL; 1250 return RX_HANDLER_CONSUMED; 1251 1252 drop: 1253 macsec_rxsa_put(rx_sa); 1254 drop_nosa: 1255 macsec_rxsc_put(rx_sc); 1256 rcu_read_unlock(); 1257 drop_direct: 1258 kfree_skb(skb); 1259 *pskb = NULL; 1260 return RX_HANDLER_CONSUMED; 1261 1262 nosci: 1263 /* 10.6.1 if the SC is not found */ 1264 cbit = !!(hdr->tci_an & MACSEC_TCI_C); 1265 if (!cbit) 1266 macsec_finalize_skb(skb, DEFAULT_ICV_LEN, 1267 macsec_extra_len(macsec_skb_cb(skb)->has_sci)); 1268 1269 list_for_each_entry_rcu(macsec, &rxd->secys, secys) { 1270 struct sk_buff *nskb; 1271 1272 secy_stats = this_cpu_ptr(macsec->stats); 1273 1274 /* If validateFrames is Strict or the C bit in the 1275 * SecTAG is set, discard 1276 */ 1277 if (cbit || 1278 macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) { 1279 u64_stats_update_begin(&secy_stats->syncp); 1280 secy_stats->stats.InPktsNoSCI++; 1281 u64_stats_update_end(&secy_stats->syncp); 1282 continue; 1283 } 1284 1285 /* not strict, the frame (with the SecTAG and ICV 1286 * removed) is delivered to the Controlled Port. 1287 */ 1288 nskb = skb_clone(skb, GFP_ATOMIC); 1289 if (!nskb) 1290 break; 1291 1292 macsec_reset_skb(nskb, macsec->secy.netdev); 1293 1294 ret = __netif_rx(nskb); 1295 if (ret == NET_RX_SUCCESS) { 1296 u64_stats_update_begin(&secy_stats->syncp); 1297 secy_stats->stats.InPktsUnknownSCI++; 1298 u64_stats_update_end(&secy_stats->syncp); 1299 } else { 1300 macsec->secy.netdev->stats.rx_dropped++; 1301 } 1302 } 1303 1304 rcu_read_unlock(); 1305 *pskb = skb; 1306 return RX_HANDLER_PASS; 1307 } 1308 1309 static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len) 1310 { 1311 struct crypto_aead *tfm; 1312 int ret; 1313 1314 /* Pick a sync gcm(aes) cipher to ensure order is preserved. */ 1315 tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC); 1316 1317 if (IS_ERR(tfm)) 1318 return tfm; 1319 1320 ret = crypto_aead_setkey(tfm, key, key_len); 1321 if (ret < 0) 1322 goto fail; 1323 1324 ret = crypto_aead_setauthsize(tfm, icv_len); 1325 if (ret < 0) 1326 goto fail; 1327 1328 return tfm; 1329 fail: 1330 crypto_free_aead(tfm); 1331 return ERR_PTR(ret); 1332 } 1333 1334 static int init_rx_sa(struct macsec_rx_sa *rx_sa, char *sak, int key_len, 1335 int icv_len) 1336 { 1337 rx_sa->stats = alloc_percpu(struct macsec_rx_sa_stats); 1338 if (!rx_sa->stats) 1339 return -ENOMEM; 1340 1341 rx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len); 1342 if (IS_ERR(rx_sa->key.tfm)) { 1343 free_percpu(rx_sa->stats); 1344 return PTR_ERR(rx_sa->key.tfm); 1345 } 1346 1347 rx_sa->ssci = MACSEC_UNDEF_SSCI; 1348 rx_sa->active = false; 1349 rx_sa->next_pn = 1; 1350 refcount_set(&rx_sa->refcnt, 1); 1351 spin_lock_init(&rx_sa->lock); 1352 1353 return 0; 1354 } 1355 1356 static void clear_rx_sa(struct macsec_rx_sa *rx_sa) 1357 { 1358 rx_sa->active = false; 1359 1360 macsec_rxsa_put(rx_sa); 1361 } 1362 1363 static void free_rx_sc(struct macsec_rx_sc *rx_sc) 1364 { 1365 int i; 1366 1367 for (i = 0; i < MACSEC_NUM_AN; i++) { 1368 struct macsec_rx_sa *sa = rtnl_dereference(rx_sc->sa[i]); 1369 1370 RCU_INIT_POINTER(rx_sc->sa[i], NULL); 1371 if (sa) 1372 clear_rx_sa(sa); 1373 } 1374 1375 macsec_rxsc_put(rx_sc); 1376 } 1377 1378 static struct macsec_rx_sc *del_rx_sc(struct macsec_secy *secy, sci_t sci) 1379 { 1380 struct macsec_rx_sc *rx_sc, __rcu **rx_scp; 1381 1382 for (rx_scp = &secy->rx_sc, rx_sc = rtnl_dereference(*rx_scp); 1383 rx_sc; 1384 rx_scp = &rx_sc->next, rx_sc = rtnl_dereference(*rx_scp)) { 1385 if (rx_sc->sci == sci) { 1386 if (rx_sc->active) 1387 secy->n_rx_sc--; 1388 rcu_assign_pointer(*rx_scp, rx_sc->next); 1389 return rx_sc; 1390 } 1391 } 1392 1393 return NULL; 1394 } 1395 1396 static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci) 1397 { 1398 struct macsec_rx_sc *rx_sc; 1399 struct macsec_dev *macsec; 1400 struct net_device *real_dev = macsec_priv(dev)->real_dev; 1401 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); 1402 struct macsec_secy *secy; 1403 1404 list_for_each_entry(macsec, &rxd->secys, secys) { 1405 if (find_rx_sc_rtnl(&macsec->secy, sci)) 1406 return ERR_PTR(-EEXIST); 1407 } 1408 1409 rx_sc = kzalloc(sizeof(*rx_sc), GFP_KERNEL); 1410 if (!rx_sc) 1411 return ERR_PTR(-ENOMEM); 1412 1413 rx_sc->stats = netdev_alloc_pcpu_stats(struct pcpu_rx_sc_stats); 1414 if (!rx_sc->stats) { 1415 kfree(rx_sc); 1416 return ERR_PTR(-ENOMEM); 1417 } 1418 1419 rx_sc->sci = sci; 1420 rx_sc->active = true; 1421 refcount_set(&rx_sc->refcnt, 1); 1422 1423 secy = &macsec_priv(dev)->secy; 1424 rcu_assign_pointer(rx_sc->next, secy->rx_sc); 1425 rcu_assign_pointer(secy->rx_sc, rx_sc); 1426 1427 if (rx_sc->active) 1428 secy->n_rx_sc++; 1429 1430 return rx_sc; 1431 } 1432 1433 static int init_tx_sa(struct macsec_tx_sa *tx_sa, char *sak, int key_len, 1434 int icv_len) 1435 { 1436 tx_sa->stats = alloc_percpu(struct macsec_tx_sa_stats); 1437 if (!tx_sa->stats) 1438 return -ENOMEM; 1439 1440 tx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len); 1441 if (IS_ERR(tx_sa->key.tfm)) { 1442 free_percpu(tx_sa->stats); 1443 return PTR_ERR(tx_sa->key.tfm); 1444 } 1445 1446 tx_sa->ssci = MACSEC_UNDEF_SSCI; 1447 tx_sa->active = false; 1448 refcount_set(&tx_sa->refcnt, 1); 1449 spin_lock_init(&tx_sa->lock); 1450 1451 return 0; 1452 } 1453 1454 static void clear_tx_sa(struct macsec_tx_sa *tx_sa) 1455 { 1456 tx_sa->active = false; 1457 1458 macsec_txsa_put(tx_sa); 1459 } 1460 1461 static struct genl_family macsec_fam; 1462 1463 static struct net_device *get_dev_from_nl(struct net *net, 1464 struct nlattr **attrs) 1465 { 1466 int ifindex = nla_get_u32(attrs[MACSEC_ATTR_IFINDEX]); 1467 struct net_device *dev; 1468 1469 dev = __dev_get_by_index(net, ifindex); 1470 if (!dev) 1471 return ERR_PTR(-ENODEV); 1472 1473 if (!netif_is_macsec(dev)) 1474 return ERR_PTR(-ENODEV); 1475 1476 return dev; 1477 } 1478 1479 static enum macsec_offload nla_get_offload(const struct nlattr *nla) 1480 { 1481 return (__force enum macsec_offload)nla_get_u8(nla); 1482 } 1483 1484 static sci_t nla_get_sci(const struct nlattr *nla) 1485 { 1486 return (__force sci_t)nla_get_u64(nla); 1487 } 1488 1489 static int nla_put_sci(struct sk_buff *skb, int attrtype, sci_t value, 1490 int padattr) 1491 { 1492 return nla_put_u64_64bit(skb, attrtype, (__force u64)value, padattr); 1493 } 1494 1495 static ssci_t nla_get_ssci(const struct nlattr *nla) 1496 { 1497 return (__force ssci_t)nla_get_u32(nla); 1498 } 1499 1500 static int nla_put_ssci(struct sk_buff *skb, int attrtype, ssci_t value) 1501 { 1502 return nla_put_u32(skb, attrtype, (__force u64)value); 1503 } 1504 1505 static struct macsec_tx_sa *get_txsa_from_nl(struct net *net, 1506 struct nlattr **attrs, 1507 struct nlattr **tb_sa, 1508 struct net_device **devp, 1509 struct macsec_secy **secyp, 1510 struct macsec_tx_sc **scp, 1511 u8 *assoc_num) 1512 { 1513 struct net_device *dev; 1514 struct macsec_secy *secy; 1515 struct macsec_tx_sc *tx_sc; 1516 struct macsec_tx_sa *tx_sa; 1517 1518 if (!tb_sa[MACSEC_SA_ATTR_AN]) 1519 return ERR_PTR(-EINVAL); 1520 1521 *assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1522 1523 dev = get_dev_from_nl(net, attrs); 1524 if (IS_ERR(dev)) 1525 return ERR_CAST(dev); 1526 1527 if (*assoc_num >= MACSEC_NUM_AN) 1528 return ERR_PTR(-EINVAL); 1529 1530 secy = &macsec_priv(dev)->secy; 1531 tx_sc = &secy->tx_sc; 1532 1533 tx_sa = rtnl_dereference(tx_sc->sa[*assoc_num]); 1534 if (!tx_sa) 1535 return ERR_PTR(-ENODEV); 1536 1537 *devp = dev; 1538 *scp = tx_sc; 1539 *secyp = secy; 1540 return tx_sa; 1541 } 1542 1543 static struct macsec_rx_sc *get_rxsc_from_nl(struct net *net, 1544 struct nlattr **attrs, 1545 struct nlattr **tb_rxsc, 1546 struct net_device **devp, 1547 struct macsec_secy **secyp) 1548 { 1549 struct net_device *dev; 1550 struct macsec_secy *secy; 1551 struct macsec_rx_sc *rx_sc; 1552 sci_t sci; 1553 1554 dev = get_dev_from_nl(net, attrs); 1555 if (IS_ERR(dev)) 1556 return ERR_CAST(dev); 1557 1558 secy = &macsec_priv(dev)->secy; 1559 1560 if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI]) 1561 return ERR_PTR(-EINVAL); 1562 1563 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); 1564 rx_sc = find_rx_sc_rtnl(secy, sci); 1565 if (!rx_sc) 1566 return ERR_PTR(-ENODEV); 1567 1568 *secyp = secy; 1569 *devp = dev; 1570 1571 return rx_sc; 1572 } 1573 1574 static struct macsec_rx_sa *get_rxsa_from_nl(struct net *net, 1575 struct nlattr **attrs, 1576 struct nlattr **tb_rxsc, 1577 struct nlattr **tb_sa, 1578 struct net_device **devp, 1579 struct macsec_secy **secyp, 1580 struct macsec_rx_sc **scp, 1581 u8 *assoc_num) 1582 { 1583 struct macsec_rx_sc *rx_sc; 1584 struct macsec_rx_sa *rx_sa; 1585 1586 if (!tb_sa[MACSEC_SA_ATTR_AN]) 1587 return ERR_PTR(-EINVAL); 1588 1589 *assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1590 if (*assoc_num >= MACSEC_NUM_AN) 1591 return ERR_PTR(-EINVAL); 1592 1593 rx_sc = get_rxsc_from_nl(net, attrs, tb_rxsc, devp, secyp); 1594 if (IS_ERR(rx_sc)) 1595 return ERR_CAST(rx_sc); 1596 1597 rx_sa = rtnl_dereference(rx_sc->sa[*assoc_num]); 1598 if (!rx_sa) 1599 return ERR_PTR(-ENODEV); 1600 1601 *scp = rx_sc; 1602 return rx_sa; 1603 } 1604 1605 static const struct nla_policy macsec_genl_policy[NUM_MACSEC_ATTR] = { 1606 [MACSEC_ATTR_IFINDEX] = { .type = NLA_U32 }, 1607 [MACSEC_ATTR_RXSC_CONFIG] = { .type = NLA_NESTED }, 1608 [MACSEC_ATTR_SA_CONFIG] = { .type = NLA_NESTED }, 1609 [MACSEC_ATTR_OFFLOAD] = { .type = NLA_NESTED }, 1610 }; 1611 1612 static const struct nla_policy macsec_genl_rxsc_policy[NUM_MACSEC_RXSC_ATTR] = { 1613 [MACSEC_RXSC_ATTR_SCI] = { .type = NLA_U64 }, 1614 [MACSEC_RXSC_ATTR_ACTIVE] = { .type = NLA_U8 }, 1615 }; 1616 1617 static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = { 1618 [MACSEC_SA_ATTR_AN] = { .type = NLA_U8 }, 1619 [MACSEC_SA_ATTR_ACTIVE] = { .type = NLA_U8 }, 1620 [MACSEC_SA_ATTR_PN] = NLA_POLICY_MIN_LEN(4), 1621 [MACSEC_SA_ATTR_KEYID] = { .type = NLA_BINARY, 1622 .len = MACSEC_KEYID_LEN, }, 1623 [MACSEC_SA_ATTR_KEY] = { .type = NLA_BINARY, 1624 .len = MACSEC_MAX_KEY_LEN, }, 1625 [MACSEC_SA_ATTR_SSCI] = { .type = NLA_U32 }, 1626 [MACSEC_SA_ATTR_SALT] = { .type = NLA_BINARY, 1627 .len = MACSEC_SALT_LEN, }, 1628 }; 1629 1630 static const struct nla_policy macsec_genl_offload_policy[NUM_MACSEC_OFFLOAD_ATTR] = { 1631 [MACSEC_OFFLOAD_ATTR_TYPE] = { .type = NLA_U8 }, 1632 }; 1633 1634 /* Offloads an operation to a device driver */ 1635 static int macsec_offload(int (* const func)(struct macsec_context *), 1636 struct macsec_context *ctx) 1637 { 1638 int ret; 1639 1640 if (unlikely(!func)) 1641 return 0; 1642 1643 if (ctx->offload == MACSEC_OFFLOAD_PHY) 1644 mutex_lock(&ctx->phydev->lock); 1645 1646 /* Phase I: prepare. The drive should fail here if there are going to be 1647 * issues in the commit phase. 1648 */ 1649 ctx->prepare = true; 1650 ret = (*func)(ctx); 1651 if (ret) 1652 goto phy_unlock; 1653 1654 /* Phase II: commit. This step cannot fail. */ 1655 ctx->prepare = false; 1656 ret = (*func)(ctx); 1657 /* This should never happen: commit is not allowed to fail */ 1658 if (unlikely(ret)) 1659 WARN(1, "MACsec offloading commit failed (%d)\n", ret); 1660 1661 phy_unlock: 1662 if (ctx->offload == MACSEC_OFFLOAD_PHY) 1663 mutex_unlock(&ctx->phydev->lock); 1664 1665 return ret; 1666 } 1667 1668 static int parse_sa_config(struct nlattr **attrs, struct nlattr **tb_sa) 1669 { 1670 if (!attrs[MACSEC_ATTR_SA_CONFIG]) 1671 return -EINVAL; 1672 1673 if (nla_parse_nested_deprecated(tb_sa, MACSEC_SA_ATTR_MAX, attrs[MACSEC_ATTR_SA_CONFIG], macsec_genl_sa_policy, NULL)) 1674 return -EINVAL; 1675 1676 return 0; 1677 } 1678 1679 static int parse_rxsc_config(struct nlattr **attrs, struct nlattr **tb_rxsc) 1680 { 1681 if (!attrs[MACSEC_ATTR_RXSC_CONFIG]) 1682 return -EINVAL; 1683 1684 if (nla_parse_nested_deprecated(tb_rxsc, MACSEC_RXSC_ATTR_MAX, attrs[MACSEC_ATTR_RXSC_CONFIG], macsec_genl_rxsc_policy, NULL)) 1685 return -EINVAL; 1686 1687 return 0; 1688 } 1689 1690 static bool validate_add_rxsa(struct nlattr **attrs) 1691 { 1692 if (!attrs[MACSEC_SA_ATTR_AN] || 1693 !attrs[MACSEC_SA_ATTR_KEY] || 1694 !attrs[MACSEC_SA_ATTR_KEYID]) 1695 return false; 1696 1697 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) 1698 return false; 1699 1700 if (attrs[MACSEC_SA_ATTR_PN] && 1701 nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0) 1702 return false; 1703 1704 if (attrs[MACSEC_SA_ATTR_ACTIVE]) { 1705 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) 1706 return false; 1707 } 1708 1709 if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN) 1710 return false; 1711 1712 return true; 1713 } 1714 1715 static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info) 1716 { 1717 struct net_device *dev; 1718 struct nlattr **attrs = info->attrs; 1719 struct macsec_secy *secy; 1720 struct macsec_rx_sc *rx_sc; 1721 struct macsec_rx_sa *rx_sa; 1722 unsigned char assoc_num; 1723 int pn_len; 1724 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 1725 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 1726 int err; 1727 1728 if (!attrs[MACSEC_ATTR_IFINDEX]) 1729 return -EINVAL; 1730 1731 if (parse_sa_config(attrs, tb_sa)) 1732 return -EINVAL; 1733 1734 if (parse_rxsc_config(attrs, tb_rxsc)) 1735 return -EINVAL; 1736 1737 if (!validate_add_rxsa(tb_sa)) 1738 return -EINVAL; 1739 1740 rtnl_lock(); 1741 rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy); 1742 if (IS_ERR(rx_sc)) { 1743 rtnl_unlock(); 1744 return PTR_ERR(rx_sc); 1745 } 1746 1747 assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1748 1749 if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) { 1750 pr_notice("macsec: nl: add_rxsa: bad key length: %d != %d\n", 1751 nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len); 1752 rtnl_unlock(); 1753 return -EINVAL; 1754 } 1755 1756 pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN; 1757 if (tb_sa[MACSEC_SA_ATTR_PN] && 1758 nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) { 1759 pr_notice("macsec: nl: add_rxsa: bad pn length: %d != %d\n", 1760 nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len); 1761 rtnl_unlock(); 1762 return -EINVAL; 1763 } 1764 1765 if (secy->xpn) { 1766 if (!tb_sa[MACSEC_SA_ATTR_SSCI] || !tb_sa[MACSEC_SA_ATTR_SALT]) { 1767 rtnl_unlock(); 1768 return -EINVAL; 1769 } 1770 1771 if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) { 1772 pr_notice("macsec: nl: add_rxsa: bad salt length: %d != %d\n", 1773 nla_len(tb_sa[MACSEC_SA_ATTR_SALT]), 1774 MACSEC_SALT_LEN); 1775 rtnl_unlock(); 1776 return -EINVAL; 1777 } 1778 } 1779 1780 rx_sa = rtnl_dereference(rx_sc->sa[assoc_num]); 1781 if (rx_sa) { 1782 rtnl_unlock(); 1783 return -EBUSY; 1784 } 1785 1786 rx_sa = kmalloc(sizeof(*rx_sa), GFP_KERNEL); 1787 if (!rx_sa) { 1788 rtnl_unlock(); 1789 return -ENOMEM; 1790 } 1791 1792 err = init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 1793 secy->key_len, secy->icv_len); 1794 if (err < 0) { 1795 kfree(rx_sa); 1796 rtnl_unlock(); 1797 return err; 1798 } 1799 1800 if (tb_sa[MACSEC_SA_ATTR_PN]) { 1801 spin_lock_bh(&rx_sa->lock); 1802 rx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]); 1803 spin_unlock_bh(&rx_sa->lock); 1804 } 1805 1806 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 1807 rx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 1808 1809 rx_sa->sc = rx_sc; 1810 1811 /* If h/w offloading is available, propagate to the device */ 1812 if (macsec_is_offloaded(netdev_priv(dev))) { 1813 const struct macsec_ops *ops; 1814 struct macsec_context ctx; 1815 1816 ops = macsec_get_ops(netdev_priv(dev), &ctx); 1817 if (!ops) { 1818 err = -EOPNOTSUPP; 1819 goto cleanup; 1820 } 1821 1822 ctx.sa.assoc_num = assoc_num; 1823 ctx.sa.rx_sa = rx_sa; 1824 ctx.secy = secy; 1825 memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 1826 secy->key_len); 1827 1828 err = macsec_offload(ops->mdo_add_rxsa, &ctx); 1829 if (err) 1830 goto cleanup; 1831 } 1832 1833 if (secy->xpn) { 1834 rx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]); 1835 nla_memcpy(rx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT], 1836 MACSEC_SALT_LEN); 1837 } 1838 1839 nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN); 1840 rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa); 1841 1842 rtnl_unlock(); 1843 1844 return 0; 1845 1846 cleanup: 1847 macsec_rxsa_put(rx_sa); 1848 rtnl_unlock(); 1849 return err; 1850 } 1851 1852 static bool validate_add_rxsc(struct nlattr **attrs) 1853 { 1854 if (!attrs[MACSEC_RXSC_ATTR_SCI]) 1855 return false; 1856 1857 if (attrs[MACSEC_RXSC_ATTR_ACTIVE]) { 1858 if (nla_get_u8(attrs[MACSEC_RXSC_ATTR_ACTIVE]) > 1) 1859 return false; 1860 } 1861 1862 return true; 1863 } 1864 1865 static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info) 1866 { 1867 struct net_device *dev; 1868 sci_t sci = MACSEC_UNDEF_SCI; 1869 struct nlattr **attrs = info->attrs; 1870 struct macsec_rx_sc *rx_sc; 1871 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 1872 struct macsec_secy *secy; 1873 bool was_active; 1874 int ret; 1875 1876 if (!attrs[MACSEC_ATTR_IFINDEX]) 1877 return -EINVAL; 1878 1879 if (parse_rxsc_config(attrs, tb_rxsc)) 1880 return -EINVAL; 1881 1882 if (!validate_add_rxsc(tb_rxsc)) 1883 return -EINVAL; 1884 1885 rtnl_lock(); 1886 dev = get_dev_from_nl(genl_info_net(info), attrs); 1887 if (IS_ERR(dev)) { 1888 rtnl_unlock(); 1889 return PTR_ERR(dev); 1890 } 1891 1892 secy = &macsec_priv(dev)->secy; 1893 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); 1894 1895 rx_sc = create_rx_sc(dev, sci); 1896 if (IS_ERR(rx_sc)) { 1897 rtnl_unlock(); 1898 return PTR_ERR(rx_sc); 1899 } 1900 1901 was_active = rx_sc->active; 1902 if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) 1903 rx_sc->active = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]); 1904 1905 if (macsec_is_offloaded(netdev_priv(dev))) { 1906 const struct macsec_ops *ops; 1907 struct macsec_context ctx; 1908 1909 ops = macsec_get_ops(netdev_priv(dev), &ctx); 1910 if (!ops) { 1911 ret = -EOPNOTSUPP; 1912 goto cleanup; 1913 } 1914 1915 ctx.rx_sc = rx_sc; 1916 ctx.secy = secy; 1917 1918 ret = macsec_offload(ops->mdo_add_rxsc, &ctx); 1919 if (ret) 1920 goto cleanup; 1921 } 1922 1923 rtnl_unlock(); 1924 1925 return 0; 1926 1927 cleanup: 1928 rx_sc->active = was_active; 1929 rtnl_unlock(); 1930 return ret; 1931 } 1932 1933 static bool validate_add_txsa(struct nlattr **attrs) 1934 { 1935 if (!attrs[MACSEC_SA_ATTR_AN] || 1936 !attrs[MACSEC_SA_ATTR_PN] || 1937 !attrs[MACSEC_SA_ATTR_KEY] || 1938 !attrs[MACSEC_SA_ATTR_KEYID]) 1939 return false; 1940 1941 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) 1942 return false; 1943 1944 if (nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0) 1945 return false; 1946 1947 if (attrs[MACSEC_SA_ATTR_ACTIVE]) { 1948 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) 1949 return false; 1950 } 1951 1952 if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN) 1953 return false; 1954 1955 return true; 1956 } 1957 1958 static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info) 1959 { 1960 struct net_device *dev; 1961 struct nlattr **attrs = info->attrs; 1962 struct macsec_secy *secy; 1963 struct macsec_tx_sc *tx_sc; 1964 struct macsec_tx_sa *tx_sa; 1965 unsigned char assoc_num; 1966 int pn_len; 1967 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 1968 bool was_operational; 1969 int err; 1970 1971 if (!attrs[MACSEC_ATTR_IFINDEX]) 1972 return -EINVAL; 1973 1974 if (parse_sa_config(attrs, tb_sa)) 1975 return -EINVAL; 1976 1977 if (!validate_add_txsa(tb_sa)) 1978 return -EINVAL; 1979 1980 rtnl_lock(); 1981 dev = get_dev_from_nl(genl_info_net(info), attrs); 1982 if (IS_ERR(dev)) { 1983 rtnl_unlock(); 1984 return PTR_ERR(dev); 1985 } 1986 1987 secy = &macsec_priv(dev)->secy; 1988 tx_sc = &secy->tx_sc; 1989 1990 assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1991 1992 if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) { 1993 pr_notice("macsec: nl: add_txsa: bad key length: %d != %d\n", 1994 nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len); 1995 rtnl_unlock(); 1996 return -EINVAL; 1997 } 1998 1999 pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN; 2000 if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) { 2001 pr_notice("macsec: nl: add_txsa: bad pn length: %d != %d\n", 2002 nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len); 2003 rtnl_unlock(); 2004 return -EINVAL; 2005 } 2006 2007 if (secy->xpn) { 2008 if (!tb_sa[MACSEC_SA_ATTR_SSCI] || !tb_sa[MACSEC_SA_ATTR_SALT]) { 2009 rtnl_unlock(); 2010 return -EINVAL; 2011 } 2012 2013 if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) { 2014 pr_notice("macsec: nl: add_txsa: bad salt length: %d != %d\n", 2015 nla_len(tb_sa[MACSEC_SA_ATTR_SALT]), 2016 MACSEC_SALT_LEN); 2017 rtnl_unlock(); 2018 return -EINVAL; 2019 } 2020 } 2021 2022 tx_sa = rtnl_dereference(tx_sc->sa[assoc_num]); 2023 if (tx_sa) { 2024 rtnl_unlock(); 2025 return -EBUSY; 2026 } 2027 2028 tx_sa = kmalloc(sizeof(*tx_sa), GFP_KERNEL); 2029 if (!tx_sa) { 2030 rtnl_unlock(); 2031 return -ENOMEM; 2032 } 2033 2034 err = init_tx_sa(tx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 2035 secy->key_len, secy->icv_len); 2036 if (err < 0) { 2037 kfree(tx_sa); 2038 rtnl_unlock(); 2039 return err; 2040 } 2041 2042 spin_lock_bh(&tx_sa->lock); 2043 tx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]); 2044 spin_unlock_bh(&tx_sa->lock); 2045 2046 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 2047 tx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 2048 2049 was_operational = secy->operational; 2050 if (assoc_num == tx_sc->encoding_sa && tx_sa->active) 2051 secy->operational = true; 2052 2053 /* If h/w offloading is available, propagate to the device */ 2054 if (macsec_is_offloaded(netdev_priv(dev))) { 2055 const struct macsec_ops *ops; 2056 struct macsec_context ctx; 2057 2058 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2059 if (!ops) { 2060 err = -EOPNOTSUPP; 2061 goto cleanup; 2062 } 2063 2064 ctx.sa.assoc_num = assoc_num; 2065 ctx.sa.tx_sa = tx_sa; 2066 ctx.secy = secy; 2067 memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 2068 secy->key_len); 2069 2070 err = macsec_offload(ops->mdo_add_txsa, &ctx); 2071 if (err) 2072 goto cleanup; 2073 } 2074 2075 if (secy->xpn) { 2076 tx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]); 2077 nla_memcpy(tx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT], 2078 MACSEC_SALT_LEN); 2079 } 2080 2081 nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN); 2082 rcu_assign_pointer(tx_sc->sa[assoc_num], tx_sa); 2083 2084 rtnl_unlock(); 2085 2086 return 0; 2087 2088 cleanup: 2089 secy->operational = was_operational; 2090 macsec_txsa_put(tx_sa); 2091 rtnl_unlock(); 2092 return err; 2093 } 2094 2095 static int macsec_del_rxsa(struct sk_buff *skb, struct genl_info *info) 2096 { 2097 struct nlattr **attrs = info->attrs; 2098 struct net_device *dev; 2099 struct macsec_secy *secy; 2100 struct macsec_rx_sc *rx_sc; 2101 struct macsec_rx_sa *rx_sa; 2102 u8 assoc_num; 2103 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 2104 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 2105 int ret; 2106 2107 if (!attrs[MACSEC_ATTR_IFINDEX]) 2108 return -EINVAL; 2109 2110 if (parse_sa_config(attrs, tb_sa)) 2111 return -EINVAL; 2112 2113 if (parse_rxsc_config(attrs, tb_rxsc)) 2114 return -EINVAL; 2115 2116 rtnl_lock(); 2117 rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa, 2118 &dev, &secy, &rx_sc, &assoc_num); 2119 if (IS_ERR(rx_sa)) { 2120 rtnl_unlock(); 2121 return PTR_ERR(rx_sa); 2122 } 2123 2124 if (rx_sa->active) { 2125 rtnl_unlock(); 2126 return -EBUSY; 2127 } 2128 2129 /* If h/w offloading is available, propagate to the device */ 2130 if (macsec_is_offloaded(netdev_priv(dev))) { 2131 const struct macsec_ops *ops; 2132 struct macsec_context ctx; 2133 2134 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2135 if (!ops) { 2136 ret = -EOPNOTSUPP; 2137 goto cleanup; 2138 } 2139 2140 ctx.sa.assoc_num = assoc_num; 2141 ctx.sa.rx_sa = rx_sa; 2142 ctx.secy = secy; 2143 2144 ret = macsec_offload(ops->mdo_del_rxsa, &ctx); 2145 if (ret) 2146 goto cleanup; 2147 } 2148 2149 RCU_INIT_POINTER(rx_sc->sa[assoc_num], NULL); 2150 clear_rx_sa(rx_sa); 2151 2152 rtnl_unlock(); 2153 2154 return 0; 2155 2156 cleanup: 2157 rtnl_unlock(); 2158 return ret; 2159 } 2160 2161 static int macsec_del_rxsc(struct sk_buff *skb, struct genl_info *info) 2162 { 2163 struct nlattr **attrs = info->attrs; 2164 struct net_device *dev; 2165 struct macsec_secy *secy; 2166 struct macsec_rx_sc *rx_sc; 2167 sci_t sci; 2168 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 2169 int ret; 2170 2171 if (!attrs[MACSEC_ATTR_IFINDEX]) 2172 return -EINVAL; 2173 2174 if (parse_rxsc_config(attrs, tb_rxsc)) 2175 return -EINVAL; 2176 2177 if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI]) 2178 return -EINVAL; 2179 2180 rtnl_lock(); 2181 dev = get_dev_from_nl(genl_info_net(info), info->attrs); 2182 if (IS_ERR(dev)) { 2183 rtnl_unlock(); 2184 return PTR_ERR(dev); 2185 } 2186 2187 secy = &macsec_priv(dev)->secy; 2188 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); 2189 2190 rx_sc = del_rx_sc(secy, sci); 2191 if (!rx_sc) { 2192 rtnl_unlock(); 2193 return -ENODEV; 2194 } 2195 2196 /* If h/w offloading is available, propagate to the device */ 2197 if (macsec_is_offloaded(netdev_priv(dev))) { 2198 const struct macsec_ops *ops; 2199 struct macsec_context ctx; 2200 2201 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2202 if (!ops) { 2203 ret = -EOPNOTSUPP; 2204 goto cleanup; 2205 } 2206 2207 ctx.rx_sc = rx_sc; 2208 ctx.secy = secy; 2209 ret = macsec_offload(ops->mdo_del_rxsc, &ctx); 2210 if (ret) 2211 goto cleanup; 2212 } 2213 2214 free_rx_sc(rx_sc); 2215 rtnl_unlock(); 2216 2217 return 0; 2218 2219 cleanup: 2220 rtnl_unlock(); 2221 return ret; 2222 } 2223 2224 static int macsec_del_txsa(struct sk_buff *skb, struct genl_info *info) 2225 { 2226 struct nlattr **attrs = info->attrs; 2227 struct net_device *dev; 2228 struct macsec_secy *secy; 2229 struct macsec_tx_sc *tx_sc; 2230 struct macsec_tx_sa *tx_sa; 2231 u8 assoc_num; 2232 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 2233 int ret; 2234 2235 if (!attrs[MACSEC_ATTR_IFINDEX]) 2236 return -EINVAL; 2237 2238 if (parse_sa_config(attrs, tb_sa)) 2239 return -EINVAL; 2240 2241 rtnl_lock(); 2242 tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa, 2243 &dev, &secy, &tx_sc, &assoc_num); 2244 if (IS_ERR(tx_sa)) { 2245 rtnl_unlock(); 2246 return PTR_ERR(tx_sa); 2247 } 2248 2249 if (tx_sa->active) { 2250 rtnl_unlock(); 2251 return -EBUSY; 2252 } 2253 2254 /* If h/w offloading is available, propagate to the device */ 2255 if (macsec_is_offloaded(netdev_priv(dev))) { 2256 const struct macsec_ops *ops; 2257 struct macsec_context ctx; 2258 2259 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2260 if (!ops) { 2261 ret = -EOPNOTSUPP; 2262 goto cleanup; 2263 } 2264 2265 ctx.sa.assoc_num = assoc_num; 2266 ctx.sa.tx_sa = tx_sa; 2267 ctx.secy = secy; 2268 2269 ret = macsec_offload(ops->mdo_del_txsa, &ctx); 2270 if (ret) 2271 goto cleanup; 2272 } 2273 2274 RCU_INIT_POINTER(tx_sc->sa[assoc_num], NULL); 2275 clear_tx_sa(tx_sa); 2276 2277 rtnl_unlock(); 2278 2279 return 0; 2280 2281 cleanup: 2282 rtnl_unlock(); 2283 return ret; 2284 } 2285 2286 static bool validate_upd_sa(struct nlattr **attrs) 2287 { 2288 if (!attrs[MACSEC_SA_ATTR_AN] || 2289 attrs[MACSEC_SA_ATTR_KEY] || 2290 attrs[MACSEC_SA_ATTR_KEYID] || 2291 attrs[MACSEC_SA_ATTR_SSCI] || 2292 attrs[MACSEC_SA_ATTR_SALT]) 2293 return false; 2294 2295 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) 2296 return false; 2297 2298 if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0) 2299 return false; 2300 2301 if (attrs[MACSEC_SA_ATTR_ACTIVE]) { 2302 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) 2303 return false; 2304 } 2305 2306 return true; 2307 } 2308 2309 static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info) 2310 { 2311 struct nlattr **attrs = info->attrs; 2312 struct net_device *dev; 2313 struct macsec_secy *secy; 2314 struct macsec_tx_sc *tx_sc; 2315 struct macsec_tx_sa *tx_sa; 2316 u8 assoc_num; 2317 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 2318 bool was_operational, was_active; 2319 pn_t prev_pn; 2320 int ret = 0; 2321 2322 prev_pn.full64 = 0; 2323 2324 if (!attrs[MACSEC_ATTR_IFINDEX]) 2325 return -EINVAL; 2326 2327 if (parse_sa_config(attrs, tb_sa)) 2328 return -EINVAL; 2329 2330 if (!validate_upd_sa(tb_sa)) 2331 return -EINVAL; 2332 2333 rtnl_lock(); 2334 tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa, 2335 &dev, &secy, &tx_sc, &assoc_num); 2336 if (IS_ERR(tx_sa)) { 2337 rtnl_unlock(); 2338 return PTR_ERR(tx_sa); 2339 } 2340 2341 if (tb_sa[MACSEC_SA_ATTR_PN]) { 2342 int pn_len; 2343 2344 pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN; 2345 if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) { 2346 pr_notice("macsec: nl: upd_txsa: bad pn length: %d != %d\n", 2347 nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len); 2348 rtnl_unlock(); 2349 return -EINVAL; 2350 } 2351 2352 spin_lock_bh(&tx_sa->lock); 2353 prev_pn = tx_sa->next_pn_halves; 2354 tx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]); 2355 spin_unlock_bh(&tx_sa->lock); 2356 } 2357 2358 was_active = tx_sa->active; 2359 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 2360 tx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 2361 2362 was_operational = secy->operational; 2363 if (assoc_num == tx_sc->encoding_sa) 2364 secy->operational = tx_sa->active; 2365 2366 /* If h/w offloading is available, propagate to the device */ 2367 if (macsec_is_offloaded(netdev_priv(dev))) { 2368 const struct macsec_ops *ops; 2369 struct macsec_context ctx; 2370 2371 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2372 if (!ops) { 2373 ret = -EOPNOTSUPP; 2374 goto cleanup; 2375 } 2376 2377 ctx.sa.assoc_num = assoc_num; 2378 ctx.sa.tx_sa = tx_sa; 2379 ctx.secy = secy; 2380 2381 ret = macsec_offload(ops->mdo_upd_txsa, &ctx); 2382 if (ret) 2383 goto cleanup; 2384 } 2385 2386 rtnl_unlock(); 2387 2388 return 0; 2389 2390 cleanup: 2391 if (tb_sa[MACSEC_SA_ATTR_PN]) { 2392 spin_lock_bh(&tx_sa->lock); 2393 tx_sa->next_pn_halves = prev_pn; 2394 spin_unlock_bh(&tx_sa->lock); 2395 } 2396 tx_sa->active = was_active; 2397 secy->operational = was_operational; 2398 rtnl_unlock(); 2399 return ret; 2400 } 2401 2402 static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info) 2403 { 2404 struct nlattr **attrs = info->attrs; 2405 struct net_device *dev; 2406 struct macsec_secy *secy; 2407 struct macsec_rx_sc *rx_sc; 2408 struct macsec_rx_sa *rx_sa; 2409 u8 assoc_num; 2410 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 2411 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 2412 bool was_active; 2413 pn_t prev_pn; 2414 int ret = 0; 2415 2416 prev_pn.full64 = 0; 2417 2418 if (!attrs[MACSEC_ATTR_IFINDEX]) 2419 return -EINVAL; 2420 2421 if (parse_rxsc_config(attrs, tb_rxsc)) 2422 return -EINVAL; 2423 2424 if (parse_sa_config(attrs, tb_sa)) 2425 return -EINVAL; 2426 2427 if (!validate_upd_sa(tb_sa)) 2428 return -EINVAL; 2429 2430 rtnl_lock(); 2431 rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa, 2432 &dev, &secy, &rx_sc, &assoc_num); 2433 if (IS_ERR(rx_sa)) { 2434 rtnl_unlock(); 2435 return PTR_ERR(rx_sa); 2436 } 2437 2438 if (tb_sa[MACSEC_SA_ATTR_PN]) { 2439 int pn_len; 2440 2441 pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN; 2442 if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) { 2443 pr_notice("macsec: nl: upd_rxsa: bad pn length: %d != %d\n", 2444 nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len); 2445 rtnl_unlock(); 2446 return -EINVAL; 2447 } 2448 2449 spin_lock_bh(&rx_sa->lock); 2450 prev_pn = rx_sa->next_pn_halves; 2451 rx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]); 2452 spin_unlock_bh(&rx_sa->lock); 2453 } 2454 2455 was_active = rx_sa->active; 2456 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 2457 rx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 2458 2459 /* If h/w offloading is available, propagate to the device */ 2460 if (macsec_is_offloaded(netdev_priv(dev))) { 2461 const struct macsec_ops *ops; 2462 struct macsec_context ctx; 2463 2464 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2465 if (!ops) { 2466 ret = -EOPNOTSUPP; 2467 goto cleanup; 2468 } 2469 2470 ctx.sa.assoc_num = assoc_num; 2471 ctx.sa.rx_sa = rx_sa; 2472 ctx.secy = secy; 2473 2474 ret = macsec_offload(ops->mdo_upd_rxsa, &ctx); 2475 if (ret) 2476 goto cleanup; 2477 } 2478 2479 rtnl_unlock(); 2480 return 0; 2481 2482 cleanup: 2483 if (tb_sa[MACSEC_SA_ATTR_PN]) { 2484 spin_lock_bh(&rx_sa->lock); 2485 rx_sa->next_pn_halves = prev_pn; 2486 spin_unlock_bh(&rx_sa->lock); 2487 } 2488 rx_sa->active = was_active; 2489 rtnl_unlock(); 2490 return ret; 2491 } 2492 2493 static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info) 2494 { 2495 struct nlattr **attrs = info->attrs; 2496 struct net_device *dev; 2497 struct macsec_secy *secy; 2498 struct macsec_rx_sc *rx_sc; 2499 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 2500 unsigned int prev_n_rx_sc; 2501 bool was_active; 2502 int ret; 2503 2504 if (!attrs[MACSEC_ATTR_IFINDEX]) 2505 return -EINVAL; 2506 2507 if (parse_rxsc_config(attrs, tb_rxsc)) 2508 return -EINVAL; 2509 2510 if (!validate_add_rxsc(tb_rxsc)) 2511 return -EINVAL; 2512 2513 rtnl_lock(); 2514 rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy); 2515 if (IS_ERR(rx_sc)) { 2516 rtnl_unlock(); 2517 return PTR_ERR(rx_sc); 2518 } 2519 2520 was_active = rx_sc->active; 2521 prev_n_rx_sc = secy->n_rx_sc; 2522 if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) { 2523 bool new = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]); 2524 2525 if (rx_sc->active != new) 2526 secy->n_rx_sc += new ? 1 : -1; 2527 2528 rx_sc->active = new; 2529 } 2530 2531 /* If h/w offloading is available, propagate to the device */ 2532 if (macsec_is_offloaded(netdev_priv(dev))) { 2533 const struct macsec_ops *ops; 2534 struct macsec_context ctx; 2535 2536 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2537 if (!ops) { 2538 ret = -EOPNOTSUPP; 2539 goto cleanup; 2540 } 2541 2542 ctx.rx_sc = rx_sc; 2543 ctx.secy = secy; 2544 2545 ret = macsec_offload(ops->mdo_upd_rxsc, &ctx); 2546 if (ret) 2547 goto cleanup; 2548 } 2549 2550 rtnl_unlock(); 2551 2552 return 0; 2553 2554 cleanup: 2555 secy->n_rx_sc = prev_n_rx_sc; 2556 rx_sc->active = was_active; 2557 rtnl_unlock(); 2558 return ret; 2559 } 2560 2561 static bool macsec_is_configured(struct macsec_dev *macsec) 2562 { 2563 struct macsec_secy *secy = &macsec->secy; 2564 struct macsec_tx_sc *tx_sc = &secy->tx_sc; 2565 int i; 2566 2567 if (secy->n_rx_sc > 0) 2568 return true; 2569 2570 for (i = 0; i < MACSEC_NUM_AN; i++) 2571 if (tx_sc->sa[i]) 2572 return true; 2573 2574 return false; 2575 } 2576 2577 static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info) 2578 { 2579 struct nlattr *tb_offload[MACSEC_OFFLOAD_ATTR_MAX + 1]; 2580 enum macsec_offload offload, prev_offload; 2581 int (*func)(struct macsec_context *ctx); 2582 struct nlattr **attrs = info->attrs; 2583 struct net_device *dev; 2584 const struct macsec_ops *ops; 2585 struct macsec_context ctx; 2586 struct macsec_dev *macsec; 2587 int ret; 2588 2589 if (!attrs[MACSEC_ATTR_IFINDEX]) 2590 return -EINVAL; 2591 2592 if (!attrs[MACSEC_ATTR_OFFLOAD]) 2593 return -EINVAL; 2594 2595 if (nla_parse_nested_deprecated(tb_offload, MACSEC_OFFLOAD_ATTR_MAX, 2596 attrs[MACSEC_ATTR_OFFLOAD], 2597 macsec_genl_offload_policy, NULL)) 2598 return -EINVAL; 2599 2600 dev = get_dev_from_nl(genl_info_net(info), attrs); 2601 if (IS_ERR(dev)) 2602 return PTR_ERR(dev); 2603 macsec = macsec_priv(dev); 2604 2605 if (!tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]) 2606 return -EINVAL; 2607 2608 offload = nla_get_u8(tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]); 2609 if (macsec->offload == offload) 2610 return 0; 2611 2612 /* Check if the offloading mode is supported by the underlying layers */ 2613 if (offload != MACSEC_OFFLOAD_OFF && 2614 !macsec_check_offload(offload, macsec)) 2615 return -EOPNOTSUPP; 2616 2617 /* Check if the net device is busy. */ 2618 if (netif_running(dev)) 2619 return -EBUSY; 2620 2621 rtnl_lock(); 2622 2623 prev_offload = macsec->offload; 2624 macsec->offload = offload; 2625 2626 /* Check if the device already has rules configured: we do not support 2627 * rules migration. 2628 */ 2629 if (macsec_is_configured(macsec)) { 2630 ret = -EBUSY; 2631 goto rollback; 2632 } 2633 2634 ops = __macsec_get_ops(offload == MACSEC_OFFLOAD_OFF ? prev_offload : offload, 2635 macsec, &ctx); 2636 if (!ops) { 2637 ret = -EOPNOTSUPP; 2638 goto rollback; 2639 } 2640 2641 if (prev_offload == MACSEC_OFFLOAD_OFF) 2642 func = ops->mdo_add_secy; 2643 else 2644 func = ops->mdo_del_secy; 2645 2646 ctx.secy = &macsec->secy; 2647 ret = macsec_offload(func, &ctx); 2648 if (ret) 2649 goto rollback; 2650 2651 /* Force features update, since they are different for SW MACSec and 2652 * HW offloading cases. 2653 */ 2654 netdev_update_features(dev); 2655 2656 rtnl_unlock(); 2657 return 0; 2658 2659 rollback: 2660 macsec->offload = prev_offload; 2661 2662 rtnl_unlock(); 2663 return ret; 2664 } 2665 2666 static void get_tx_sa_stats(struct net_device *dev, int an, 2667 struct macsec_tx_sa *tx_sa, 2668 struct macsec_tx_sa_stats *sum) 2669 { 2670 struct macsec_dev *macsec = macsec_priv(dev); 2671 int cpu; 2672 2673 /* If h/w offloading is available, propagate to the device */ 2674 if (macsec_is_offloaded(macsec)) { 2675 const struct macsec_ops *ops; 2676 struct macsec_context ctx; 2677 2678 ops = macsec_get_ops(macsec, &ctx); 2679 if (ops) { 2680 ctx.sa.assoc_num = an; 2681 ctx.sa.tx_sa = tx_sa; 2682 ctx.stats.tx_sa_stats = sum; 2683 ctx.secy = &macsec_priv(dev)->secy; 2684 macsec_offload(ops->mdo_get_tx_sa_stats, &ctx); 2685 } 2686 return; 2687 } 2688 2689 for_each_possible_cpu(cpu) { 2690 const struct macsec_tx_sa_stats *stats = 2691 per_cpu_ptr(tx_sa->stats, cpu); 2692 2693 sum->OutPktsProtected += stats->OutPktsProtected; 2694 sum->OutPktsEncrypted += stats->OutPktsEncrypted; 2695 } 2696 } 2697 2698 static int copy_tx_sa_stats(struct sk_buff *skb, struct macsec_tx_sa_stats *sum) 2699 { 2700 if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED, 2701 sum->OutPktsProtected) || 2702 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED, 2703 sum->OutPktsEncrypted)) 2704 return -EMSGSIZE; 2705 2706 return 0; 2707 } 2708 2709 static void get_rx_sa_stats(struct net_device *dev, 2710 struct macsec_rx_sc *rx_sc, int an, 2711 struct macsec_rx_sa *rx_sa, 2712 struct macsec_rx_sa_stats *sum) 2713 { 2714 struct macsec_dev *macsec = macsec_priv(dev); 2715 int cpu; 2716 2717 /* If h/w offloading is available, propagate to the device */ 2718 if (macsec_is_offloaded(macsec)) { 2719 const struct macsec_ops *ops; 2720 struct macsec_context ctx; 2721 2722 ops = macsec_get_ops(macsec, &ctx); 2723 if (ops) { 2724 ctx.sa.assoc_num = an; 2725 ctx.sa.rx_sa = rx_sa; 2726 ctx.stats.rx_sa_stats = sum; 2727 ctx.secy = &macsec_priv(dev)->secy; 2728 ctx.rx_sc = rx_sc; 2729 macsec_offload(ops->mdo_get_rx_sa_stats, &ctx); 2730 } 2731 return; 2732 } 2733 2734 for_each_possible_cpu(cpu) { 2735 const struct macsec_rx_sa_stats *stats = 2736 per_cpu_ptr(rx_sa->stats, cpu); 2737 2738 sum->InPktsOK += stats->InPktsOK; 2739 sum->InPktsInvalid += stats->InPktsInvalid; 2740 sum->InPktsNotValid += stats->InPktsNotValid; 2741 sum->InPktsNotUsingSA += stats->InPktsNotUsingSA; 2742 sum->InPktsUnusedSA += stats->InPktsUnusedSA; 2743 } 2744 } 2745 2746 static int copy_rx_sa_stats(struct sk_buff *skb, 2747 struct macsec_rx_sa_stats *sum) 2748 { 2749 if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_OK, sum->InPktsOK) || 2750 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID, 2751 sum->InPktsInvalid) || 2752 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID, 2753 sum->InPktsNotValid) || 2754 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA, 2755 sum->InPktsNotUsingSA) || 2756 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA, 2757 sum->InPktsUnusedSA)) 2758 return -EMSGSIZE; 2759 2760 return 0; 2761 } 2762 2763 static void get_rx_sc_stats(struct net_device *dev, 2764 struct macsec_rx_sc *rx_sc, 2765 struct macsec_rx_sc_stats *sum) 2766 { 2767 struct macsec_dev *macsec = macsec_priv(dev); 2768 int cpu; 2769 2770 /* If h/w offloading is available, propagate to the device */ 2771 if (macsec_is_offloaded(macsec)) { 2772 const struct macsec_ops *ops; 2773 struct macsec_context ctx; 2774 2775 ops = macsec_get_ops(macsec, &ctx); 2776 if (ops) { 2777 ctx.stats.rx_sc_stats = sum; 2778 ctx.secy = &macsec_priv(dev)->secy; 2779 ctx.rx_sc = rx_sc; 2780 macsec_offload(ops->mdo_get_rx_sc_stats, &ctx); 2781 } 2782 return; 2783 } 2784 2785 for_each_possible_cpu(cpu) { 2786 const struct pcpu_rx_sc_stats *stats; 2787 struct macsec_rx_sc_stats tmp; 2788 unsigned int start; 2789 2790 stats = per_cpu_ptr(rx_sc->stats, cpu); 2791 do { 2792 start = u64_stats_fetch_begin_irq(&stats->syncp); 2793 memcpy(&tmp, &stats->stats, sizeof(tmp)); 2794 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 2795 2796 sum->InOctetsValidated += tmp.InOctetsValidated; 2797 sum->InOctetsDecrypted += tmp.InOctetsDecrypted; 2798 sum->InPktsUnchecked += tmp.InPktsUnchecked; 2799 sum->InPktsDelayed += tmp.InPktsDelayed; 2800 sum->InPktsOK += tmp.InPktsOK; 2801 sum->InPktsInvalid += tmp.InPktsInvalid; 2802 sum->InPktsLate += tmp.InPktsLate; 2803 sum->InPktsNotValid += tmp.InPktsNotValid; 2804 sum->InPktsNotUsingSA += tmp.InPktsNotUsingSA; 2805 sum->InPktsUnusedSA += tmp.InPktsUnusedSA; 2806 } 2807 } 2808 2809 static int copy_rx_sc_stats(struct sk_buff *skb, struct macsec_rx_sc_stats *sum) 2810 { 2811 if (nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED, 2812 sum->InOctetsValidated, 2813 MACSEC_RXSC_STATS_ATTR_PAD) || 2814 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED, 2815 sum->InOctetsDecrypted, 2816 MACSEC_RXSC_STATS_ATTR_PAD) || 2817 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED, 2818 sum->InPktsUnchecked, 2819 MACSEC_RXSC_STATS_ATTR_PAD) || 2820 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED, 2821 sum->InPktsDelayed, 2822 MACSEC_RXSC_STATS_ATTR_PAD) || 2823 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK, 2824 sum->InPktsOK, 2825 MACSEC_RXSC_STATS_ATTR_PAD) || 2826 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID, 2827 sum->InPktsInvalid, 2828 MACSEC_RXSC_STATS_ATTR_PAD) || 2829 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE, 2830 sum->InPktsLate, 2831 MACSEC_RXSC_STATS_ATTR_PAD) || 2832 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID, 2833 sum->InPktsNotValid, 2834 MACSEC_RXSC_STATS_ATTR_PAD) || 2835 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA, 2836 sum->InPktsNotUsingSA, 2837 MACSEC_RXSC_STATS_ATTR_PAD) || 2838 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA, 2839 sum->InPktsUnusedSA, 2840 MACSEC_RXSC_STATS_ATTR_PAD)) 2841 return -EMSGSIZE; 2842 2843 return 0; 2844 } 2845 2846 static void get_tx_sc_stats(struct net_device *dev, 2847 struct macsec_tx_sc_stats *sum) 2848 { 2849 struct macsec_dev *macsec = macsec_priv(dev); 2850 int cpu; 2851 2852 /* If h/w offloading is available, propagate to the device */ 2853 if (macsec_is_offloaded(macsec)) { 2854 const struct macsec_ops *ops; 2855 struct macsec_context ctx; 2856 2857 ops = macsec_get_ops(macsec, &ctx); 2858 if (ops) { 2859 ctx.stats.tx_sc_stats = sum; 2860 ctx.secy = &macsec_priv(dev)->secy; 2861 macsec_offload(ops->mdo_get_tx_sc_stats, &ctx); 2862 } 2863 return; 2864 } 2865 2866 for_each_possible_cpu(cpu) { 2867 const struct pcpu_tx_sc_stats *stats; 2868 struct macsec_tx_sc_stats tmp; 2869 unsigned int start; 2870 2871 stats = per_cpu_ptr(macsec_priv(dev)->secy.tx_sc.stats, cpu); 2872 do { 2873 start = u64_stats_fetch_begin_irq(&stats->syncp); 2874 memcpy(&tmp, &stats->stats, sizeof(tmp)); 2875 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 2876 2877 sum->OutPktsProtected += tmp.OutPktsProtected; 2878 sum->OutPktsEncrypted += tmp.OutPktsEncrypted; 2879 sum->OutOctetsProtected += tmp.OutOctetsProtected; 2880 sum->OutOctetsEncrypted += tmp.OutOctetsEncrypted; 2881 } 2882 } 2883 2884 static int copy_tx_sc_stats(struct sk_buff *skb, struct macsec_tx_sc_stats *sum) 2885 { 2886 if (nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED, 2887 sum->OutPktsProtected, 2888 MACSEC_TXSC_STATS_ATTR_PAD) || 2889 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED, 2890 sum->OutPktsEncrypted, 2891 MACSEC_TXSC_STATS_ATTR_PAD) || 2892 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED, 2893 sum->OutOctetsProtected, 2894 MACSEC_TXSC_STATS_ATTR_PAD) || 2895 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED, 2896 sum->OutOctetsEncrypted, 2897 MACSEC_TXSC_STATS_ATTR_PAD)) 2898 return -EMSGSIZE; 2899 2900 return 0; 2901 } 2902 2903 static void get_secy_stats(struct net_device *dev, struct macsec_dev_stats *sum) 2904 { 2905 struct macsec_dev *macsec = macsec_priv(dev); 2906 int cpu; 2907 2908 /* If h/w offloading is available, propagate to the device */ 2909 if (macsec_is_offloaded(macsec)) { 2910 const struct macsec_ops *ops; 2911 struct macsec_context ctx; 2912 2913 ops = macsec_get_ops(macsec, &ctx); 2914 if (ops) { 2915 ctx.stats.dev_stats = sum; 2916 ctx.secy = &macsec_priv(dev)->secy; 2917 macsec_offload(ops->mdo_get_dev_stats, &ctx); 2918 } 2919 return; 2920 } 2921 2922 for_each_possible_cpu(cpu) { 2923 const struct pcpu_secy_stats *stats; 2924 struct macsec_dev_stats tmp; 2925 unsigned int start; 2926 2927 stats = per_cpu_ptr(macsec_priv(dev)->stats, cpu); 2928 do { 2929 start = u64_stats_fetch_begin_irq(&stats->syncp); 2930 memcpy(&tmp, &stats->stats, sizeof(tmp)); 2931 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 2932 2933 sum->OutPktsUntagged += tmp.OutPktsUntagged; 2934 sum->InPktsUntagged += tmp.InPktsUntagged; 2935 sum->OutPktsTooLong += tmp.OutPktsTooLong; 2936 sum->InPktsNoTag += tmp.InPktsNoTag; 2937 sum->InPktsBadTag += tmp.InPktsBadTag; 2938 sum->InPktsUnknownSCI += tmp.InPktsUnknownSCI; 2939 sum->InPktsNoSCI += tmp.InPktsNoSCI; 2940 sum->InPktsOverrun += tmp.InPktsOverrun; 2941 } 2942 } 2943 2944 static int copy_secy_stats(struct sk_buff *skb, struct macsec_dev_stats *sum) 2945 { 2946 if (nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED, 2947 sum->OutPktsUntagged, 2948 MACSEC_SECY_STATS_ATTR_PAD) || 2949 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED, 2950 sum->InPktsUntagged, 2951 MACSEC_SECY_STATS_ATTR_PAD) || 2952 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG, 2953 sum->OutPktsTooLong, 2954 MACSEC_SECY_STATS_ATTR_PAD) || 2955 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG, 2956 sum->InPktsNoTag, 2957 MACSEC_SECY_STATS_ATTR_PAD) || 2958 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG, 2959 sum->InPktsBadTag, 2960 MACSEC_SECY_STATS_ATTR_PAD) || 2961 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI, 2962 sum->InPktsUnknownSCI, 2963 MACSEC_SECY_STATS_ATTR_PAD) || 2964 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI, 2965 sum->InPktsNoSCI, 2966 MACSEC_SECY_STATS_ATTR_PAD) || 2967 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN, 2968 sum->InPktsOverrun, 2969 MACSEC_SECY_STATS_ATTR_PAD)) 2970 return -EMSGSIZE; 2971 2972 return 0; 2973 } 2974 2975 static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb) 2976 { 2977 struct macsec_tx_sc *tx_sc = &secy->tx_sc; 2978 struct nlattr *secy_nest = nla_nest_start_noflag(skb, 2979 MACSEC_ATTR_SECY); 2980 u64 csid; 2981 2982 if (!secy_nest) 2983 return 1; 2984 2985 switch (secy->key_len) { 2986 case MACSEC_GCM_AES_128_SAK_LEN: 2987 csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 : MACSEC_DEFAULT_CIPHER_ID; 2988 break; 2989 case MACSEC_GCM_AES_256_SAK_LEN: 2990 csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_256 : MACSEC_CIPHER_ID_GCM_AES_256; 2991 break; 2992 default: 2993 goto cancel; 2994 } 2995 2996 if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci, 2997 MACSEC_SECY_ATTR_PAD) || 2998 nla_put_u64_64bit(skb, MACSEC_SECY_ATTR_CIPHER_SUITE, 2999 csid, MACSEC_SECY_ATTR_PAD) || 3000 nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) || 3001 nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) || 3002 nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) || 3003 nla_put_u8(skb, MACSEC_SECY_ATTR_REPLAY, secy->replay_protect) || 3004 nla_put_u8(skb, MACSEC_SECY_ATTR_VALIDATE, secy->validate_frames) || 3005 nla_put_u8(skb, MACSEC_SECY_ATTR_ENCRYPT, tx_sc->encrypt) || 3006 nla_put_u8(skb, MACSEC_SECY_ATTR_INC_SCI, tx_sc->send_sci) || 3007 nla_put_u8(skb, MACSEC_SECY_ATTR_ES, tx_sc->end_station) || 3008 nla_put_u8(skb, MACSEC_SECY_ATTR_SCB, tx_sc->scb) || 3009 nla_put_u8(skb, MACSEC_SECY_ATTR_ENCODING_SA, tx_sc->encoding_sa)) 3010 goto cancel; 3011 3012 if (secy->replay_protect) { 3013 if (nla_put_u32(skb, MACSEC_SECY_ATTR_WINDOW, secy->replay_window)) 3014 goto cancel; 3015 } 3016 3017 nla_nest_end(skb, secy_nest); 3018 return 0; 3019 3020 cancel: 3021 nla_nest_cancel(skb, secy_nest); 3022 return 1; 3023 } 3024 3025 static noinline_for_stack int 3026 dump_secy(struct macsec_secy *secy, struct net_device *dev, 3027 struct sk_buff *skb, struct netlink_callback *cb) 3028 { 3029 struct macsec_tx_sc_stats tx_sc_stats = {0, }; 3030 struct macsec_tx_sa_stats tx_sa_stats = {0, }; 3031 struct macsec_rx_sc_stats rx_sc_stats = {0, }; 3032 struct macsec_rx_sa_stats rx_sa_stats = {0, }; 3033 struct macsec_dev *macsec = netdev_priv(dev); 3034 struct macsec_dev_stats dev_stats = {0, }; 3035 struct macsec_tx_sc *tx_sc = &secy->tx_sc; 3036 struct nlattr *txsa_list, *rxsc_list; 3037 struct macsec_rx_sc *rx_sc; 3038 struct nlattr *attr; 3039 void *hdr; 3040 int i, j; 3041 3042 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 3043 &macsec_fam, NLM_F_MULTI, MACSEC_CMD_GET_TXSC); 3044 if (!hdr) 3045 return -EMSGSIZE; 3046 3047 genl_dump_check_consistent(cb, hdr); 3048 3049 if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex)) 3050 goto nla_put_failure; 3051 3052 attr = nla_nest_start_noflag(skb, MACSEC_ATTR_OFFLOAD); 3053 if (!attr) 3054 goto nla_put_failure; 3055 if (nla_put_u8(skb, MACSEC_OFFLOAD_ATTR_TYPE, macsec->offload)) 3056 goto nla_put_failure; 3057 nla_nest_end(skb, attr); 3058 3059 if (nla_put_secy(secy, skb)) 3060 goto nla_put_failure; 3061 3062 attr = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSC_STATS); 3063 if (!attr) 3064 goto nla_put_failure; 3065 3066 get_tx_sc_stats(dev, &tx_sc_stats); 3067 if (copy_tx_sc_stats(skb, &tx_sc_stats)) { 3068 nla_nest_cancel(skb, attr); 3069 goto nla_put_failure; 3070 } 3071 nla_nest_end(skb, attr); 3072 3073 attr = nla_nest_start_noflag(skb, MACSEC_ATTR_SECY_STATS); 3074 if (!attr) 3075 goto nla_put_failure; 3076 get_secy_stats(dev, &dev_stats); 3077 if (copy_secy_stats(skb, &dev_stats)) { 3078 nla_nest_cancel(skb, attr); 3079 goto nla_put_failure; 3080 } 3081 nla_nest_end(skb, attr); 3082 3083 txsa_list = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSA_LIST); 3084 if (!txsa_list) 3085 goto nla_put_failure; 3086 for (i = 0, j = 1; i < MACSEC_NUM_AN; i++) { 3087 struct macsec_tx_sa *tx_sa = rtnl_dereference(tx_sc->sa[i]); 3088 struct nlattr *txsa_nest; 3089 u64 pn; 3090 int pn_len; 3091 3092 if (!tx_sa) 3093 continue; 3094 3095 txsa_nest = nla_nest_start_noflag(skb, j++); 3096 if (!txsa_nest) { 3097 nla_nest_cancel(skb, txsa_list); 3098 goto nla_put_failure; 3099 } 3100 3101 attr = nla_nest_start_noflag(skb, MACSEC_SA_ATTR_STATS); 3102 if (!attr) { 3103 nla_nest_cancel(skb, txsa_nest); 3104 nla_nest_cancel(skb, txsa_list); 3105 goto nla_put_failure; 3106 } 3107 memset(&tx_sa_stats, 0, sizeof(tx_sa_stats)); 3108 get_tx_sa_stats(dev, i, tx_sa, &tx_sa_stats); 3109 if (copy_tx_sa_stats(skb, &tx_sa_stats)) { 3110 nla_nest_cancel(skb, attr); 3111 nla_nest_cancel(skb, txsa_nest); 3112 nla_nest_cancel(skb, txsa_list); 3113 goto nla_put_failure; 3114 } 3115 nla_nest_end(skb, attr); 3116 3117 if (secy->xpn) { 3118 pn = tx_sa->next_pn; 3119 pn_len = MACSEC_XPN_PN_LEN; 3120 } else { 3121 pn = tx_sa->next_pn_halves.lower; 3122 pn_len = MACSEC_DEFAULT_PN_LEN; 3123 } 3124 3125 if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) || 3126 nla_put(skb, MACSEC_SA_ATTR_PN, pn_len, &pn) || 3127 nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, tx_sa->key.id) || 3128 (secy->xpn && nla_put_ssci(skb, MACSEC_SA_ATTR_SSCI, tx_sa->ssci)) || 3129 nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, tx_sa->active)) { 3130 nla_nest_cancel(skb, txsa_nest); 3131 nla_nest_cancel(skb, txsa_list); 3132 goto nla_put_failure; 3133 } 3134 3135 nla_nest_end(skb, txsa_nest); 3136 } 3137 nla_nest_end(skb, txsa_list); 3138 3139 rxsc_list = nla_nest_start_noflag(skb, MACSEC_ATTR_RXSC_LIST); 3140 if (!rxsc_list) 3141 goto nla_put_failure; 3142 3143 j = 1; 3144 for_each_rxsc_rtnl(secy, rx_sc) { 3145 int k; 3146 struct nlattr *rxsa_list; 3147 struct nlattr *rxsc_nest = nla_nest_start_noflag(skb, j++); 3148 3149 if (!rxsc_nest) { 3150 nla_nest_cancel(skb, rxsc_list); 3151 goto nla_put_failure; 3152 } 3153 3154 if (nla_put_u8(skb, MACSEC_RXSC_ATTR_ACTIVE, rx_sc->active) || 3155 nla_put_sci(skb, MACSEC_RXSC_ATTR_SCI, rx_sc->sci, 3156 MACSEC_RXSC_ATTR_PAD)) { 3157 nla_nest_cancel(skb, rxsc_nest); 3158 nla_nest_cancel(skb, rxsc_list); 3159 goto nla_put_failure; 3160 } 3161 3162 attr = nla_nest_start_noflag(skb, MACSEC_RXSC_ATTR_STATS); 3163 if (!attr) { 3164 nla_nest_cancel(skb, rxsc_nest); 3165 nla_nest_cancel(skb, rxsc_list); 3166 goto nla_put_failure; 3167 } 3168 memset(&rx_sc_stats, 0, sizeof(rx_sc_stats)); 3169 get_rx_sc_stats(dev, rx_sc, &rx_sc_stats); 3170 if (copy_rx_sc_stats(skb, &rx_sc_stats)) { 3171 nla_nest_cancel(skb, attr); 3172 nla_nest_cancel(skb, rxsc_nest); 3173 nla_nest_cancel(skb, rxsc_list); 3174 goto nla_put_failure; 3175 } 3176 nla_nest_end(skb, attr); 3177 3178 rxsa_list = nla_nest_start_noflag(skb, 3179 MACSEC_RXSC_ATTR_SA_LIST); 3180 if (!rxsa_list) { 3181 nla_nest_cancel(skb, rxsc_nest); 3182 nla_nest_cancel(skb, rxsc_list); 3183 goto nla_put_failure; 3184 } 3185 3186 for (i = 0, k = 1; i < MACSEC_NUM_AN; i++) { 3187 struct macsec_rx_sa *rx_sa = rtnl_dereference(rx_sc->sa[i]); 3188 struct nlattr *rxsa_nest; 3189 u64 pn; 3190 int pn_len; 3191 3192 if (!rx_sa) 3193 continue; 3194 3195 rxsa_nest = nla_nest_start_noflag(skb, k++); 3196 if (!rxsa_nest) { 3197 nla_nest_cancel(skb, rxsa_list); 3198 nla_nest_cancel(skb, rxsc_nest); 3199 nla_nest_cancel(skb, rxsc_list); 3200 goto nla_put_failure; 3201 } 3202 3203 attr = nla_nest_start_noflag(skb, 3204 MACSEC_SA_ATTR_STATS); 3205 if (!attr) { 3206 nla_nest_cancel(skb, rxsa_list); 3207 nla_nest_cancel(skb, rxsc_nest); 3208 nla_nest_cancel(skb, rxsc_list); 3209 goto nla_put_failure; 3210 } 3211 memset(&rx_sa_stats, 0, sizeof(rx_sa_stats)); 3212 get_rx_sa_stats(dev, rx_sc, i, rx_sa, &rx_sa_stats); 3213 if (copy_rx_sa_stats(skb, &rx_sa_stats)) { 3214 nla_nest_cancel(skb, attr); 3215 nla_nest_cancel(skb, rxsa_list); 3216 nla_nest_cancel(skb, rxsc_nest); 3217 nla_nest_cancel(skb, rxsc_list); 3218 goto nla_put_failure; 3219 } 3220 nla_nest_end(skb, attr); 3221 3222 if (secy->xpn) { 3223 pn = rx_sa->next_pn; 3224 pn_len = MACSEC_XPN_PN_LEN; 3225 } else { 3226 pn = rx_sa->next_pn_halves.lower; 3227 pn_len = MACSEC_DEFAULT_PN_LEN; 3228 } 3229 3230 if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) || 3231 nla_put(skb, MACSEC_SA_ATTR_PN, pn_len, &pn) || 3232 nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, rx_sa->key.id) || 3233 (secy->xpn && nla_put_ssci(skb, MACSEC_SA_ATTR_SSCI, rx_sa->ssci)) || 3234 nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, rx_sa->active)) { 3235 nla_nest_cancel(skb, rxsa_nest); 3236 nla_nest_cancel(skb, rxsc_nest); 3237 nla_nest_cancel(skb, rxsc_list); 3238 goto nla_put_failure; 3239 } 3240 nla_nest_end(skb, rxsa_nest); 3241 } 3242 3243 nla_nest_end(skb, rxsa_list); 3244 nla_nest_end(skb, rxsc_nest); 3245 } 3246 3247 nla_nest_end(skb, rxsc_list); 3248 3249 genlmsg_end(skb, hdr); 3250 3251 return 0; 3252 3253 nla_put_failure: 3254 genlmsg_cancel(skb, hdr); 3255 return -EMSGSIZE; 3256 } 3257 3258 static int macsec_generation = 1; /* protected by RTNL */ 3259 3260 static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb) 3261 { 3262 struct net *net = sock_net(skb->sk); 3263 struct net_device *dev; 3264 int dev_idx, d; 3265 3266 dev_idx = cb->args[0]; 3267 3268 d = 0; 3269 rtnl_lock(); 3270 3271 cb->seq = macsec_generation; 3272 3273 for_each_netdev(net, dev) { 3274 struct macsec_secy *secy; 3275 3276 if (d < dev_idx) 3277 goto next; 3278 3279 if (!netif_is_macsec(dev)) 3280 goto next; 3281 3282 secy = &macsec_priv(dev)->secy; 3283 if (dump_secy(secy, dev, skb, cb) < 0) 3284 goto done; 3285 next: 3286 d++; 3287 } 3288 3289 done: 3290 rtnl_unlock(); 3291 cb->args[0] = d; 3292 return skb->len; 3293 } 3294 3295 static const struct genl_small_ops macsec_genl_ops[] = { 3296 { 3297 .cmd = MACSEC_CMD_GET_TXSC, 3298 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3299 .dumpit = macsec_dump_txsc, 3300 }, 3301 { 3302 .cmd = MACSEC_CMD_ADD_RXSC, 3303 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3304 .doit = macsec_add_rxsc, 3305 .flags = GENL_ADMIN_PERM, 3306 }, 3307 { 3308 .cmd = MACSEC_CMD_DEL_RXSC, 3309 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3310 .doit = macsec_del_rxsc, 3311 .flags = GENL_ADMIN_PERM, 3312 }, 3313 { 3314 .cmd = MACSEC_CMD_UPD_RXSC, 3315 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3316 .doit = macsec_upd_rxsc, 3317 .flags = GENL_ADMIN_PERM, 3318 }, 3319 { 3320 .cmd = MACSEC_CMD_ADD_TXSA, 3321 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3322 .doit = macsec_add_txsa, 3323 .flags = GENL_ADMIN_PERM, 3324 }, 3325 { 3326 .cmd = MACSEC_CMD_DEL_TXSA, 3327 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3328 .doit = macsec_del_txsa, 3329 .flags = GENL_ADMIN_PERM, 3330 }, 3331 { 3332 .cmd = MACSEC_CMD_UPD_TXSA, 3333 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3334 .doit = macsec_upd_txsa, 3335 .flags = GENL_ADMIN_PERM, 3336 }, 3337 { 3338 .cmd = MACSEC_CMD_ADD_RXSA, 3339 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3340 .doit = macsec_add_rxsa, 3341 .flags = GENL_ADMIN_PERM, 3342 }, 3343 { 3344 .cmd = MACSEC_CMD_DEL_RXSA, 3345 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3346 .doit = macsec_del_rxsa, 3347 .flags = GENL_ADMIN_PERM, 3348 }, 3349 { 3350 .cmd = MACSEC_CMD_UPD_RXSA, 3351 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3352 .doit = macsec_upd_rxsa, 3353 .flags = GENL_ADMIN_PERM, 3354 }, 3355 { 3356 .cmd = MACSEC_CMD_UPD_OFFLOAD, 3357 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3358 .doit = macsec_upd_offload, 3359 .flags = GENL_ADMIN_PERM, 3360 }, 3361 }; 3362 3363 static struct genl_family macsec_fam __ro_after_init = { 3364 .name = MACSEC_GENL_NAME, 3365 .hdrsize = 0, 3366 .version = MACSEC_GENL_VERSION, 3367 .maxattr = MACSEC_ATTR_MAX, 3368 .policy = macsec_genl_policy, 3369 .netnsok = true, 3370 .module = THIS_MODULE, 3371 .small_ops = macsec_genl_ops, 3372 .n_small_ops = ARRAY_SIZE(macsec_genl_ops), 3373 }; 3374 3375 static netdev_tx_t macsec_start_xmit(struct sk_buff *skb, 3376 struct net_device *dev) 3377 { 3378 struct macsec_dev *macsec = netdev_priv(dev); 3379 struct macsec_secy *secy = &macsec->secy; 3380 struct pcpu_secy_stats *secy_stats; 3381 int ret, len; 3382 3383 if (macsec_is_offloaded(netdev_priv(dev))) { 3384 skb->dev = macsec->real_dev; 3385 return dev_queue_xmit(skb); 3386 } 3387 3388 /* 10.5 */ 3389 if (!secy->protect_frames) { 3390 secy_stats = this_cpu_ptr(macsec->stats); 3391 u64_stats_update_begin(&secy_stats->syncp); 3392 secy_stats->stats.OutPktsUntagged++; 3393 u64_stats_update_end(&secy_stats->syncp); 3394 skb->dev = macsec->real_dev; 3395 len = skb->len; 3396 ret = dev_queue_xmit(skb); 3397 count_tx(dev, ret, len); 3398 return ret; 3399 } 3400 3401 if (!secy->operational) { 3402 kfree_skb(skb); 3403 dev->stats.tx_dropped++; 3404 return NETDEV_TX_OK; 3405 } 3406 3407 skb = macsec_encrypt(skb, dev); 3408 if (IS_ERR(skb)) { 3409 if (PTR_ERR(skb) != -EINPROGRESS) 3410 dev->stats.tx_dropped++; 3411 return NETDEV_TX_OK; 3412 } 3413 3414 macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa); 3415 3416 macsec_encrypt_finish(skb, dev); 3417 len = skb->len; 3418 ret = dev_queue_xmit(skb); 3419 count_tx(dev, ret, len); 3420 return ret; 3421 } 3422 3423 #define SW_MACSEC_FEATURES \ 3424 (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST) 3425 3426 /* If h/w offloading is enabled, use real device features save for 3427 * VLAN_FEATURES - they require additional ops 3428 * HW_MACSEC - no reason to report it 3429 */ 3430 #define REAL_DEV_FEATURES(dev) \ 3431 ((dev)->features & ~(NETIF_F_VLAN_FEATURES | NETIF_F_HW_MACSEC)) 3432 3433 static int macsec_dev_init(struct net_device *dev) 3434 { 3435 struct macsec_dev *macsec = macsec_priv(dev); 3436 struct net_device *real_dev = macsec->real_dev; 3437 int err; 3438 3439 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 3440 if (!dev->tstats) 3441 return -ENOMEM; 3442 3443 err = gro_cells_init(&macsec->gro_cells, dev); 3444 if (err) { 3445 free_percpu(dev->tstats); 3446 return err; 3447 } 3448 3449 if (macsec_is_offloaded(macsec)) { 3450 dev->features = REAL_DEV_FEATURES(real_dev); 3451 } else { 3452 dev->features = real_dev->features & SW_MACSEC_FEATURES; 3453 dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE; 3454 } 3455 3456 dev->needed_headroom = real_dev->needed_headroom + 3457 MACSEC_NEEDED_HEADROOM; 3458 dev->needed_tailroom = real_dev->needed_tailroom + 3459 MACSEC_NEEDED_TAILROOM; 3460 3461 if (is_zero_ether_addr(dev->dev_addr)) 3462 eth_hw_addr_inherit(dev, real_dev); 3463 if (is_zero_ether_addr(dev->broadcast)) 3464 memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len); 3465 3466 /* Get macsec's reference to real_dev */ 3467 netdev_hold(real_dev, &macsec->dev_tracker, GFP_KERNEL); 3468 3469 return 0; 3470 } 3471 3472 static void macsec_dev_uninit(struct net_device *dev) 3473 { 3474 struct macsec_dev *macsec = macsec_priv(dev); 3475 3476 gro_cells_destroy(&macsec->gro_cells); 3477 free_percpu(dev->tstats); 3478 } 3479 3480 static netdev_features_t macsec_fix_features(struct net_device *dev, 3481 netdev_features_t features) 3482 { 3483 struct macsec_dev *macsec = macsec_priv(dev); 3484 struct net_device *real_dev = macsec->real_dev; 3485 3486 if (macsec_is_offloaded(macsec)) 3487 return REAL_DEV_FEATURES(real_dev); 3488 3489 features &= (real_dev->features & SW_MACSEC_FEATURES) | 3490 NETIF_F_GSO_SOFTWARE | NETIF_F_SOFT_FEATURES; 3491 features |= NETIF_F_LLTX; 3492 3493 return features; 3494 } 3495 3496 static int macsec_dev_open(struct net_device *dev) 3497 { 3498 struct macsec_dev *macsec = macsec_priv(dev); 3499 struct net_device *real_dev = macsec->real_dev; 3500 int err; 3501 3502 err = dev_uc_add(real_dev, dev->dev_addr); 3503 if (err < 0) 3504 return err; 3505 3506 if (dev->flags & IFF_ALLMULTI) { 3507 err = dev_set_allmulti(real_dev, 1); 3508 if (err < 0) 3509 goto del_unicast; 3510 } 3511 3512 if (dev->flags & IFF_PROMISC) { 3513 err = dev_set_promiscuity(real_dev, 1); 3514 if (err < 0) 3515 goto clear_allmulti; 3516 } 3517 3518 /* If h/w offloading is available, propagate to the device */ 3519 if (macsec_is_offloaded(macsec)) { 3520 const struct macsec_ops *ops; 3521 struct macsec_context ctx; 3522 3523 ops = macsec_get_ops(netdev_priv(dev), &ctx); 3524 if (!ops) { 3525 err = -EOPNOTSUPP; 3526 goto clear_allmulti; 3527 } 3528 3529 ctx.secy = &macsec->secy; 3530 err = macsec_offload(ops->mdo_dev_open, &ctx); 3531 if (err) 3532 goto clear_allmulti; 3533 } 3534 3535 if (netif_carrier_ok(real_dev)) 3536 netif_carrier_on(dev); 3537 3538 return 0; 3539 clear_allmulti: 3540 if (dev->flags & IFF_ALLMULTI) 3541 dev_set_allmulti(real_dev, -1); 3542 del_unicast: 3543 dev_uc_del(real_dev, dev->dev_addr); 3544 netif_carrier_off(dev); 3545 return err; 3546 } 3547 3548 static int macsec_dev_stop(struct net_device *dev) 3549 { 3550 struct macsec_dev *macsec = macsec_priv(dev); 3551 struct net_device *real_dev = macsec->real_dev; 3552 3553 netif_carrier_off(dev); 3554 3555 /* If h/w offloading is available, propagate to the device */ 3556 if (macsec_is_offloaded(macsec)) { 3557 const struct macsec_ops *ops; 3558 struct macsec_context ctx; 3559 3560 ops = macsec_get_ops(macsec, &ctx); 3561 if (ops) { 3562 ctx.secy = &macsec->secy; 3563 macsec_offload(ops->mdo_dev_stop, &ctx); 3564 } 3565 } 3566 3567 dev_mc_unsync(real_dev, dev); 3568 dev_uc_unsync(real_dev, dev); 3569 3570 if (dev->flags & IFF_ALLMULTI) 3571 dev_set_allmulti(real_dev, -1); 3572 3573 if (dev->flags & IFF_PROMISC) 3574 dev_set_promiscuity(real_dev, -1); 3575 3576 dev_uc_del(real_dev, dev->dev_addr); 3577 3578 return 0; 3579 } 3580 3581 static void macsec_dev_change_rx_flags(struct net_device *dev, int change) 3582 { 3583 struct net_device *real_dev = macsec_priv(dev)->real_dev; 3584 3585 if (!(dev->flags & IFF_UP)) 3586 return; 3587 3588 if (change & IFF_ALLMULTI) 3589 dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1); 3590 3591 if (change & IFF_PROMISC) 3592 dev_set_promiscuity(real_dev, 3593 dev->flags & IFF_PROMISC ? 1 : -1); 3594 } 3595 3596 static void macsec_dev_set_rx_mode(struct net_device *dev) 3597 { 3598 struct net_device *real_dev = macsec_priv(dev)->real_dev; 3599 3600 dev_mc_sync(real_dev, dev); 3601 dev_uc_sync(real_dev, dev); 3602 } 3603 3604 static int macsec_set_mac_address(struct net_device *dev, void *p) 3605 { 3606 struct macsec_dev *macsec = macsec_priv(dev); 3607 struct net_device *real_dev = macsec->real_dev; 3608 struct sockaddr *addr = p; 3609 int err; 3610 3611 if (!is_valid_ether_addr(addr->sa_data)) 3612 return -EADDRNOTAVAIL; 3613 3614 if (!(dev->flags & IFF_UP)) 3615 goto out; 3616 3617 err = dev_uc_add(real_dev, addr->sa_data); 3618 if (err < 0) 3619 return err; 3620 3621 dev_uc_del(real_dev, dev->dev_addr); 3622 3623 out: 3624 eth_hw_addr_set(dev, addr->sa_data); 3625 macsec->secy.sci = dev_to_sci(dev, MACSEC_PORT_ES); 3626 3627 /* If h/w offloading is available, propagate to the device */ 3628 if (macsec_is_offloaded(macsec)) { 3629 const struct macsec_ops *ops; 3630 struct macsec_context ctx; 3631 3632 ops = macsec_get_ops(macsec, &ctx); 3633 if (ops) { 3634 ctx.secy = &macsec->secy; 3635 macsec_offload(ops->mdo_upd_secy, &ctx); 3636 } 3637 } 3638 3639 return 0; 3640 } 3641 3642 static int macsec_change_mtu(struct net_device *dev, int new_mtu) 3643 { 3644 struct macsec_dev *macsec = macsec_priv(dev); 3645 unsigned int extra = macsec->secy.icv_len + macsec_extra_len(true); 3646 3647 if (macsec->real_dev->mtu - extra < new_mtu) 3648 return -ERANGE; 3649 3650 dev->mtu = new_mtu; 3651 3652 return 0; 3653 } 3654 3655 static void macsec_get_stats64(struct net_device *dev, 3656 struct rtnl_link_stats64 *s) 3657 { 3658 if (!dev->tstats) 3659 return; 3660 3661 dev_fetch_sw_netstats(s, dev->tstats); 3662 3663 s->rx_dropped = dev->stats.rx_dropped; 3664 s->tx_dropped = dev->stats.tx_dropped; 3665 } 3666 3667 static int macsec_get_iflink(const struct net_device *dev) 3668 { 3669 return macsec_priv(dev)->real_dev->ifindex; 3670 } 3671 3672 static const struct net_device_ops macsec_netdev_ops = { 3673 .ndo_init = macsec_dev_init, 3674 .ndo_uninit = macsec_dev_uninit, 3675 .ndo_open = macsec_dev_open, 3676 .ndo_stop = macsec_dev_stop, 3677 .ndo_fix_features = macsec_fix_features, 3678 .ndo_change_mtu = macsec_change_mtu, 3679 .ndo_set_rx_mode = macsec_dev_set_rx_mode, 3680 .ndo_change_rx_flags = macsec_dev_change_rx_flags, 3681 .ndo_set_mac_address = macsec_set_mac_address, 3682 .ndo_start_xmit = macsec_start_xmit, 3683 .ndo_get_stats64 = macsec_get_stats64, 3684 .ndo_get_iflink = macsec_get_iflink, 3685 }; 3686 3687 static const struct device_type macsec_type = { 3688 .name = "macsec", 3689 }; 3690 3691 static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = { 3692 [IFLA_MACSEC_SCI] = { .type = NLA_U64 }, 3693 [IFLA_MACSEC_PORT] = { .type = NLA_U16 }, 3694 [IFLA_MACSEC_ICV_LEN] = { .type = NLA_U8 }, 3695 [IFLA_MACSEC_CIPHER_SUITE] = { .type = NLA_U64 }, 3696 [IFLA_MACSEC_WINDOW] = { .type = NLA_U32 }, 3697 [IFLA_MACSEC_ENCODING_SA] = { .type = NLA_U8 }, 3698 [IFLA_MACSEC_ENCRYPT] = { .type = NLA_U8 }, 3699 [IFLA_MACSEC_PROTECT] = { .type = NLA_U8 }, 3700 [IFLA_MACSEC_INC_SCI] = { .type = NLA_U8 }, 3701 [IFLA_MACSEC_ES] = { .type = NLA_U8 }, 3702 [IFLA_MACSEC_SCB] = { .type = NLA_U8 }, 3703 [IFLA_MACSEC_REPLAY_PROTECT] = { .type = NLA_U8 }, 3704 [IFLA_MACSEC_VALIDATION] = { .type = NLA_U8 }, 3705 }; 3706 3707 static void macsec_free_netdev(struct net_device *dev) 3708 { 3709 struct macsec_dev *macsec = macsec_priv(dev); 3710 3711 free_percpu(macsec->stats); 3712 free_percpu(macsec->secy.tx_sc.stats); 3713 3714 /* Get rid of the macsec's reference to real_dev */ 3715 netdev_put(macsec->real_dev, &macsec->dev_tracker); 3716 } 3717 3718 static void macsec_setup(struct net_device *dev) 3719 { 3720 ether_setup(dev); 3721 dev->min_mtu = 0; 3722 dev->max_mtu = ETH_MAX_MTU; 3723 dev->priv_flags |= IFF_NO_QUEUE; 3724 dev->netdev_ops = &macsec_netdev_ops; 3725 dev->needs_free_netdev = true; 3726 dev->priv_destructor = macsec_free_netdev; 3727 SET_NETDEV_DEVTYPE(dev, &macsec_type); 3728 3729 eth_zero_addr(dev->broadcast); 3730 } 3731 3732 static int macsec_changelink_common(struct net_device *dev, 3733 struct nlattr *data[]) 3734 { 3735 struct macsec_secy *secy; 3736 struct macsec_tx_sc *tx_sc; 3737 3738 secy = &macsec_priv(dev)->secy; 3739 tx_sc = &secy->tx_sc; 3740 3741 if (data[IFLA_MACSEC_ENCODING_SA]) { 3742 struct macsec_tx_sa *tx_sa; 3743 3744 tx_sc->encoding_sa = nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]); 3745 tx_sa = rtnl_dereference(tx_sc->sa[tx_sc->encoding_sa]); 3746 3747 secy->operational = tx_sa && tx_sa->active; 3748 } 3749 3750 if (data[IFLA_MACSEC_ENCRYPT]) 3751 tx_sc->encrypt = !!nla_get_u8(data[IFLA_MACSEC_ENCRYPT]); 3752 3753 if (data[IFLA_MACSEC_PROTECT]) 3754 secy->protect_frames = !!nla_get_u8(data[IFLA_MACSEC_PROTECT]); 3755 3756 if (data[IFLA_MACSEC_INC_SCI]) 3757 tx_sc->send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]); 3758 3759 if (data[IFLA_MACSEC_ES]) 3760 tx_sc->end_station = !!nla_get_u8(data[IFLA_MACSEC_ES]); 3761 3762 if (data[IFLA_MACSEC_SCB]) 3763 tx_sc->scb = !!nla_get_u8(data[IFLA_MACSEC_SCB]); 3764 3765 if (data[IFLA_MACSEC_REPLAY_PROTECT]) 3766 secy->replay_protect = !!nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT]); 3767 3768 if (data[IFLA_MACSEC_VALIDATION]) 3769 secy->validate_frames = nla_get_u8(data[IFLA_MACSEC_VALIDATION]); 3770 3771 if (data[IFLA_MACSEC_CIPHER_SUITE]) { 3772 switch (nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE])) { 3773 case MACSEC_CIPHER_ID_GCM_AES_128: 3774 case MACSEC_DEFAULT_CIPHER_ID: 3775 secy->key_len = MACSEC_GCM_AES_128_SAK_LEN; 3776 secy->xpn = false; 3777 break; 3778 case MACSEC_CIPHER_ID_GCM_AES_256: 3779 secy->key_len = MACSEC_GCM_AES_256_SAK_LEN; 3780 secy->xpn = false; 3781 break; 3782 case MACSEC_CIPHER_ID_GCM_AES_XPN_128: 3783 secy->key_len = MACSEC_GCM_AES_128_SAK_LEN; 3784 secy->xpn = true; 3785 break; 3786 case MACSEC_CIPHER_ID_GCM_AES_XPN_256: 3787 secy->key_len = MACSEC_GCM_AES_256_SAK_LEN; 3788 secy->xpn = true; 3789 break; 3790 default: 3791 return -EINVAL; 3792 } 3793 } 3794 3795 if (data[IFLA_MACSEC_WINDOW]) { 3796 secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]); 3797 3798 /* IEEE 802.1AEbw-2013 10.7.8 - maximum replay window 3799 * for XPN cipher suites */ 3800 if (secy->xpn && 3801 secy->replay_window > MACSEC_XPN_MAX_REPLAY_WINDOW) 3802 return -EINVAL; 3803 } 3804 3805 return 0; 3806 } 3807 3808 static int macsec_changelink(struct net_device *dev, struct nlattr *tb[], 3809 struct nlattr *data[], 3810 struct netlink_ext_ack *extack) 3811 { 3812 struct macsec_dev *macsec = macsec_priv(dev); 3813 struct macsec_tx_sc tx_sc; 3814 struct macsec_secy secy; 3815 int ret; 3816 3817 if (!data) 3818 return 0; 3819 3820 if (data[IFLA_MACSEC_CIPHER_SUITE] || 3821 data[IFLA_MACSEC_ICV_LEN] || 3822 data[IFLA_MACSEC_SCI] || 3823 data[IFLA_MACSEC_PORT]) 3824 return -EINVAL; 3825 3826 /* Keep a copy of unmodified secy and tx_sc, in case the offload 3827 * propagation fails, to revert macsec_changelink_common. 3828 */ 3829 memcpy(&secy, &macsec->secy, sizeof(secy)); 3830 memcpy(&tx_sc, &macsec->secy.tx_sc, sizeof(tx_sc)); 3831 3832 ret = macsec_changelink_common(dev, data); 3833 if (ret) 3834 goto cleanup; 3835 3836 /* If h/w offloading is available, propagate to the device */ 3837 if (macsec_is_offloaded(macsec)) { 3838 const struct macsec_ops *ops; 3839 struct macsec_context ctx; 3840 int ret; 3841 3842 ops = macsec_get_ops(netdev_priv(dev), &ctx); 3843 if (!ops) { 3844 ret = -EOPNOTSUPP; 3845 goto cleanup; 3846 } 3847 3848 ctx.secy = &macsec->secy; 3849 ret = macsec_offload(ops->mdo_upd_secy, &ctx); 3850 if (ret) 3851 goto cleanup; 3852 } 3853 3854 return 0; 3855 3856 cleanup: 3857 memcpy(&macsec->secy.tx_sc, &tx_sc, sizeof(tx_sc)); 3858 memcpy(&macsec->secy, &secy, sizeof(secy)); 3859 3860 return ret; 3861 } 3862 3863 static void macsec_del_dev(struct macsec_dev *macsec) 3864 { 3865 int i; 3866 3867 while (macsec->secy.rx_sc) { 3868 struct macsec_rx_sc *rx_sc = rtnl_dereference(macsec->secy.rx_sc); 3869 3870 rcu_assign_pointer(macsec->secy.rx_sc, rx_sc->next); 3871 free_rx_sc(rx_sc); 3872 } 3873 3874 for (i = 0; i < MACSEC_NUM_AN; i++) { 3875 struct macsec_tx_sa *sa = rtnl_dereference(macsec->secy.tx_sc.sa[i]); 3876 3877 if (sa) { 3878 RCU_INIT_POINTER(macsec->secy.tx_sc.sa[i], NULL); 3879 clear_tx_sa(sa); 3880 } 3881 } 3882 } 3883 3884 static void macsec_common_dellink(struct net_device *dev, struct list_head *head) 3885 { 3886 struct macsec_dev *macsec = macsec_priv(dev); 3887 struct net_device *real_dev = macsec->real_dev; 3888 3889 /* If h/w offloading is available, propagate to the device */ 3890 if (macsec_is_offloaded(macsec)) { 3891 const struct macsec_ops *ops; 3892 struct macsec_context ctx; 3893 3894 ops = macsec_get_ops(netdev_priv(dev), &ctx); 3895 if (ops) { 3896 ctx.secy = &macsec->secy; 3897 macsec_offload(ops->mdo_del_secy, &ctx); 3898 } 3899 } 3900 3901 unregister_netdevice_queue(dev, head); 3902 list_del_rcu(&macsec->secys); 3903 macsec_del_dev(macsec); 3904 netdev_upper_dev_unlink(real_dev, dev); 3905 3906 macsec_generation++; 3907 } 3908 3909 static void macsec_dellink(struct net_device *dev, struct list_head *head) 3910 { 3911 struct macsec_dev *macsec = macsec_priv(dev); 3912 struct net_device *real_dev = macsec->real_dev; 3913 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); 3914 3915 macsec_common_dellink(dev, head); 3916 3917 if (list_empty(&rxd->secys)) { 3918 netdev_rx_handler_unregister(real_dev); 3919 kfree(rxd); 3920 } 3921 } 3922 3923 static int register_macsec_dev(struct net_device *real_dev, 3924 struct net_device *dev) 3925 { 3926 struct macsec_dev *macsec = macsec_priv(dev); 3927 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); 3928 3929 if (!rxd) { 3930 int err; 3931 3932 rxd = kmalloc(sizeof(*rxd), GFP_KERNEL); 3933 if (!rxd) 3934 return -ENOMEM; 3935 3936 INIT_LIST_HEAD(&rxd->secys); 3937 3938 err = netdev_rx_handler_register(real_dev, macsec_handle_frame, 3939 rxd); 3940 if (err < 0) { 3941 kfree(rxd); 3942 return err; 3943 } 3944 } 3945 3946 list_add_tail_rcu(&macsec->secys, &rxd->secys); 3947 return 0; 3948 } 3949 3950 static bool sci_exists(struct net_device *dev, sci_t sci) 3951 { 3952 struct macsec_rxh_data *rxd = macsec_data_rtnl(dev); 3953 struct macsec_dev *macsec; 3954 3955 list_for_each_entry(macsec, &rxd->secys, secys) { 3956 if (macsec->secy.sci == sci) 3957 return true; 3958 } 3959 3960 return false; 3961 } 3962 3963 static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len) 3964 { 3965 struct macsec_dev *macsec = macsec_priv(dev); 3966 struct macsec_secy *secy = &macsec->secy; 3967 3968 macsec->stats = netdev_alloc_pcpu_stats(struct pcpu_secy_stats); 3969 if (!macsec->stats) 3970 return -ENOMEM; 3971 3972 secy->tx_sc.stats = netdev_alloc_pcpu_stats(struct pcpu_tx_sc_stats); 3973 if (!secy->tx_sc.stats) { 3974 free_percpu(macsec->stats); 3975 return -ENOMEM; 3976 } 3977 3978 if (sci == MACSEC_UNDEF_SCI) 3979 sci = dev_to_sci(dev, MACSEC_PORT_ES); 3980 3981 secy->netdev = dev; 3982 secy->operational = true; 3983 secy->key_len = DEFAULT_SAK_LEN; 3984 secy->icv_len = icv_len; 3985 secy->validate_frames = MACSEC_VALIDATE_DEFAULT; 3986 secy->protect_frames = true; 3987 secy->replay_protect = false; 3988 secy->xpn = DEFAULT_XPN; 3989 3990 secy->sci = sci; 3991 secy->tx_sc.active = true; 3992 secy->tx_sc.encoding_sa = DEFAULT_ENCODING_SA; 3993 secy->tx_sc.encrypt = DEFAULT_ENCRYPT; 3994 secy->tx_sc.send_sci = DEFAULT_SEND_SCI; 3995 secy->tx_sc.end_station = false; 3996 secy->tx_sc.scb = false; 3997 3998 return 0; 3999 } 4000 4001 static struct lock_class_key macsec_netdev_addr_lock_key; 4002 4003 static int macsec_newlink(struct net *net, struct net_device *dev, 4004 struct nlattr *tb[], struct nlattr *data[], 4005 struct netlink_ext_ack *extack) 4006 { 4007 struct macsec_dev *macsec = macsec_priv(dev); 4008 rx_handler_func_t *rx_handler; 4009 u8 icv_len = DEFAULT_ICV_LEN; 4010 struct net_device *real_dev; 4011 int err, mtu; 4012 sci_t sci; 4013 4014 if (!tb[IFLA_LINK]) 4015 return -EINVAL; 4016 real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK])); 4017 if (!real_dev) 4018 return -ENODEV; 4019 if (real_dev->type != ARPHRD_ETHER) 4020 return -EINVAL; 4021 4022 dev->priv_flags |= IFF_MACSEC; 4023 4024 macsec->real_dev = real_dev; 4025 4026 if (data && data[IFLA_MACSEC_OFFLOAD]) 4027 macsec->offload = nla_get_offload(data[IFLA_MACSEC_OFFLOAD]); 4028 else 4029 /* MACsec offloading is off by default */ 4030 macsec->offload = MACSEC_OFFLOAD_OFF; 4031 4032 /* Check if the offloading mode is supported by the underlying layers */ 4033 if (macsec->offload != MACSEC_OFFLOAD_OFF && 4034 !macsec_check_offload(macsec->offload, macsec)) 4035 return -EOPNOTSUPP; 4036 4037 /* send_sci must be set to true when transmit sci explicitly is set */ 4038 if ((data && data[IFLA_MACSEC_SCI]) && 4039 (data && data[IFLA_MACSEC_INC_SCI])) { 4040 u8 send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]); 4041 4042 if (!send_sci) 4043 return -EINVAL; 4044 } 4045 4046 if (data && data[IFLA_MACSEC_ICV_LEN]) 4047 icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]); 4048 mtu = real_dev->mtu - icv_len - macsec_extra_len(true); 4049 if (mtu < 0) 4050 dev->mtu = 0; 4051 else 4052 dev->mtu = mtu; 4053 4054 rx_handler = rtnl_dereference(real_dev->rx_handler); 4055 if (rx_handler && rx_handler != macsec_handle_frame) 4056 return -EBUSY; 4057 4058 err = register_netdevice(dev); 4059 if (err < 0) 4060 return err; 4061 4062 netdev_lockdep_set_classes(dev); 4063 lockdep_set_class(&dev->addr_list_lock, 4064 &macsec_netdev_addr_lock_key); 4065 4066 err = netdev_upper_dev_link(real_dev, dev, extack); 4067 if (err < 0) 4068 goto unregister; 4069 4070 /* need to be already registered so that ->init has run and 4071 * the MAC addr is set 4072 */ 4073 if (data && data[IFLA_MACSEC_SCI]) 4074 sci = nla_get_sci(data[IFLA_MACSEC_SCI]); 4075 else if (data && data[IFLA_MACSEC_PORT]) 4076 sci = dev_to_sci(dev, nla_get_be16(data[IFLA_MACSEC_PORT])); 4077 else 4078 sci = dev_to_sci(dev, MACSEC_PORT_ES); 4079 4080 if (rx_handler && sci_exists(real_dev, sci)) { 4081 err = -EBUSY; 4082 goto unlink; 4083 } 4084 4085 err = macsec_add_dev(dev, sci, icv_len); 4086 if (err) 4087 goto unlink; 4088 4089 if (data) { 4090 err = macsec_changelink_common(dev, data); 4091 if (err) 4092 goto del_dev; 4093 } 4094 4095 /* If h/w offloading is available, propagate to the device */ 4096 if (macsec_is_offloaded(macsec)) { 4097 const struct macsec_ops *ops; 4098 struct macsec_context ctx; 4099 4100 ops = macsec_get_ops(macsec, &ctx); 4101 if (ops) { 4102 ctx.secy = &macsec->secy; 4103 err = macsec_offload(ops->mdo_add_secy, &ctx); 4104 if (err) 4105 goto del_dev; 4106 } 4107 } 4108 4109 err = register_macsec_dev(real_dev, dev); 4110 if (err < 0) 4111 goto del_dev; 4112 4113 netif_stacked_transfer_operstate(real_dev, dev); 4114 linkwatch_fire_event(dev); 4115 4116 macsec_generation++; 4117 4118 return 0; 4119 4120 del_dev: 4121 macsec_del_dev(macsec); 4122 unlink: 4123 netdev_upper_dev_unlink(real_dev, dev); 4124 unregister: 4125 unregister_netdevice(dev); 4126 return err; 4127 } 4128 4129 static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[], 4130 struct netlink_ext_ack *extack) 4131 { 4132 u64 csid = MACSEC_DEFAULT_CIPHER_ID; 4133 u8 icv_len = DEFAULT_ICV_LEN; 4134 int flag; 4135 bool es, scb, sci; 4136 4137 if (!data) 4138 return 0; 4139 4140 if (data[IFLA_MACSEC_CIPHER_SUITE]) 4141 csid = nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE]); 4142 4143 if (data[IFLA_MACSEC_ICV_LEN]) { 4144 icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]); 4145 if (icv_len != DEFAULT_ICV_LEN) { 4146 char dummy_key[DEFAULT_SAK_LEN] = { 0 }; 4147 struct crypto_aead *dummy_tfm; 4148 4149 dummy_tfm = macsec_alloc_tfm(dummy_key, 4150 DEFAULT_SAK_LEN, 4151 icv_len); 4152 if (IS_ERR(dummy_tfm)) 4153 return PTR_ERR(dummy_tfm); 4154 crypto_free_aead(dummy_tfm); 4155 } 4156 } 4157 4158 switch (csid) { 4159 case MACSEC_CIPHER_ID_GCM_AES_128: 4160 case MACSEC_CIPHER_ID_GCM_AES_256: 4161 case MACSEC_CIPHER_ID_GCM_AES_XPN_128: 4162 case MACSEC_CIPHER_ID_GCM_AES_XPN_256: 4163 case MACSEC_DEFAULT_CIPHER_ID: 4164 if (icv_len < MACSEC_MIN_ICV_LEN || 4165 icv_len > MACSEC_STD_ICV_LEN) 4166 return -EINVAL; 4167 break; 4168 default: 4169 return -EINVAL; 4170 } 4171 4172 if (data[IFLA_MACSEC_ENCODING_SA]) { 4173 if (nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]) >= MACSEC_NUM_AN) 4174 return -EINVAL; 4175 } 4176 4177 for (flag = IFLA_MACSEC_ENCODING_SA + 1; 4178 flag < IFLA_MACSEC_VALIDATION; 4179 flag++) { 4180 if (data[flag]) { 4181 if (nla_get_u8(data[flag]) > 1) 4182 return -EINVAL; 4183 } 4184 } 4185 4186 es = data[IFLA_MACSEC_ES] ? nla_get_u8(data[IFLA_MACSEC_ES]) : false; 4187 sci = data[IFLA_MACSEC_INC_SCI] ? nla_get_u8(data[IFLA_MACSEC_INC_SCI]) : false; 4188 scb = data[IFLA_MACSEC_SCB] ? nla_get_u8(data[IFLA_MACSEC_SCB]) : false; 4189 4190 if ((sci && (scb || es)) || (scb && es)) 4191 return -EINVAL; 4192 4193 if (data[IFLA_MACSEC_VALIDATION] && 4194 nla_get_u8(data[IFLA_MACSEC_VALIDATION]) > MACSEC_VALIDATE_MAX) 4195 return -EINVAL; 4196 4197 if ((data[IFLA_MACSEC_REPLAY_PROTECT] && 4198 nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT])) && 4199 !data[IFLA_MACSEC_WINDOW]) 4200 return -EINVAL; 4201 4202 return 0; 4203 } 4204 4205 static struct net *macsec_get_link_net(const struct net_device *dev) 4206 { 4207 return dev_net(macsec_priv(dev)->real_dev); 4208 } 4209 4210 static size_t macsec_get_size(const struct net_device *dev) 4211 { 4212 return nla_total_size_64bit(8) + /* IFLA_MACSEC_SCI */ 4213 nla_total_size(1) + /* IFLA_MACSEC_ICV_LEN */ 4214 nla_total_size_64bit(8) + /* IFLA_MACSEC_CIPHER_SUITE */ 4215 nla_total_size(4) + /* IFLA_MACSEC_WINDOW */ 4216 nla_total_size(1) + /* IFLA_MACSEC_ENCODING_SA */ 4217 nla_total_size(1) + /* IFLA_MACSEC_ENCRYPT */ 4218 nla_total_size(1) + /* IFLA_MACSEC_PROTECT */ 4219 nla_total_size(1) + /* IFLA_MACSEC_INC_SCI */ 4220 nla_total_size(1) + /* IFLA_MACSEC_ES */ 4221 nla_total_size(1) + /* IFLA_MACSEC_SCB */ 4222 nla_total_size(1) + /* IFLA_MACSEC_REPLAY_PROTECT */ 4223 nla_total_size(1) + /* IFLA_MACSEC_VALIDATION */ 4224 0; 4225 } 4226 4227 static int macsec_fill_info(struct sk_buff *skb, 4228 const struct net_device *dev) 4229 { 4230 struct macsec_secy *secy = &macsec_priv(dev)->secy; 4231 struct macsec_tx_sc *tx_sc = &secy->tx_sc; 4232 u64 csid; 4233 4234 switch (secy->key_len) { 4235 case MACSEC_GCM_AES_128_SAK_LEN: 4236 csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 : MACSEC_DEFAULT_CIPHER_ID; 4237 break; 4238 case MACSEC_GCM_AES_256_SAK_LEN: 4239 csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_256 : MACSEC_CIPHER_ID_GCM_AES_256; 4240 break; 4241 default: 4242 goto nla_put_failure; 4243 } 4244 4245 if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci, 4246 IFLA_MACSEC_PAD) || 4247 nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) || 4248 nla_put_u64_64bit(skb, IFLA_MACSEC_CIPHER_SUITE, 4249 csid, IFLA_MACSEC_PAD) || 4250 nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) || 4251 nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) || 4252 nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) || 4253 nla_put_u8(skb, IFLA_MACSEC_INC_SCI, tx_sc->send_sci) || 4254 nla_put_u8(skb, IFLA_MACSEC_ES, tx_sc->end_station) || 4255 nla_put_u8(skb, IFLA_MACSEC_SCB, tx_sc->scb) || 4256 nla_put_u8(skb, IFLA_MACSEC_REPLAY_PROTECT, secy->replay_protect) || 4257 nla_put_u8(skb, IFLA_MACSEC_VALIDATION, secy->validate_frames) || 4258 0) 4259 goto nla_put_failure; 4260 4261 if (secy->replay_protect) { 4262 if (nla_put_u32(skb, IFLA_MACSEC_WINDOW, secy->replay_window)) 4263 goto nla_put_failure; 4264 } 4265 4266 return 0; 4267 4268 nla_put_failure: 4269 return -EMSGSIZE; 4270 } 4271 4272 static struct rtnl_link_ops macsec_link_ops __read_mostly = { 4273 .kind = "macsec", 4274 .priv_size = sizeof(struct macsec_dev), 4275 .maxtype = IFLA_MACSEC_MAX, 4276 .policy = macsec_rtnl_policy, 4277 .setup = macsec_setup, 4278 .validate = macsec_validate_attr, 4279 .newlink = macsec_newlink, 4280 .changelink = macsec_changelink, 4281 .dellink = macsec_dellink, 4282 .get_size = macsec_get_size, 4283 .fill_info = macsec_fill_info, 4284 .get_link_net = macsec_get_link_net, 4285 }; 4286 4287 static bool is_macsec_master(struct net_device *dev) 4288 { 4289 return rcu_access_pointer(dev->rx_handler) == macsec_handle_frame; 4290 } 4291 4292 static int macsec_notify(struct notifier_block *this, unsigned long event, 4293 void *ptr) 4294 { 4295 struct net_device *real_dev = netdev_notifier_info_to_dev(ptr); 4296 LIST_HEAD(head); 4297 4298 if (!is_macsec_master(real_dev)) 4299 return NOTIFY_DONE; 4300 4301 switch (event) { 4302 case NETDEV_DOWN: 4303 case NETDEV_UP: 4304 case NETDEV_CHANGE: { 4305 struct macsec_dev *m, *n; 4306 struct macsec_rxh_data *rxd; 4307 4308 rxd = macsec_data_rtnl(real_dev); 4309 list_for_each_entry_safe(m, n, &rxd->secys, secys) { 4310 struct net_device *dev = m->secy.netdev; 4311 4312 netif_stacked_transfer_operstate(real_dev, dev); 4313 } 4314 break; 4315 } 4316 case NETDEV_UNREGISTER: { 4317 struct macsec_dev *m, *n; 4318 struct macsec_rxh_data *rxd; 4319 4320 rxd = macsec_data_rtnl(real_dev); 4321 list_for_each_entry_safe(m, n, &rxd->secys, secys) { 4322 macsec_common_dellink(m->secy.netdev, &head); 4323 } 4324 4325 netdev_rx_handler_unregister(real_dev); 4326 kfree(rxd); 4327 4328 unregister_netdevice_many(&head); 4329 break; 4330 } 4331 case NETDEV_CHANGEMTU: { 4332 struct macsec_dev *m; 4333 struct macsec_rxh_data *rxd; 4334 4335 rxd = macsec_data_rtnl(real_dev); 4336 list_for_each_entry(m, &rxd->secys, secys) { 4337 struct net_device *dev = m->secy.netdev; 4338 unsigned int mtu = real_dev->mtu - (m->secy.icv_len + 4339 macsec_extra_len(true)); 4340 4341 if (dev->mtu > mtu) 4342 dev_set_mtu(dev, mtu); 4343 } 4344 } 4345 } 4346 4347 return NOTIFY_OK; 4348 } 4349 4350 static struct notifier_block macsec_notifier = { 4351 .notifier_call = macsec_notify, 4352 }; 4353 4354 static int __init macsec_init(void) 4355 { 4356 int err; 4357 4358 pr_info("MACsec IEEE 802.1AE\n"); 4359 err = register_netdevice_notifier(&macsec_notifier); 4360 if (err) 4361 return err; 4362 4363 err = rtnl_link_register(&macsec_link_ops); 4364 if (err) 4365 goto notifier; 4366 4367 err = genl_register_family(&macsec_fam); 4368 if (err) 4369 goto rtnl; 4370 4371 return 0; 4372 4373 rtnl: 4374 rtnl_link_unregister(&macsec_link_ops); 4375 notifier: 4376 unregister_netdevice_notifier(&macsec_notifier); 4377 return err; 4378 } 4379 4380 static void __exit macsec_exit(void) 4381 { 4382 genl_unregister_family(&macsec_fam); 4383 rtnl_link_unregister(&macsec_link_ops); 4384 unregister_netdevice_notifier(&macsec_notifier); 4385 rcu_barrier(); 4386 } 4387 4388 module_init(macsec_init); 4389 module_exit(macsec_exit); 4390 4391 MODULE_ALIAS_RTNL_LINK("macsec"); 4392 MODULE_ALIAS_GENL_FAMILY("macsec"); 4393 4394 MODULE_DESCRIPTION("MACsec IEEE 802.1AE"); 4395 MODULE_LICENSE("GPL v2"); 4396