1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * drivers/net/macsec.c - MACsec device 4 * 5 * Copyright (c) 2015 Sabrina Dubroca <sd@queasysnail.net> 6 */ 7 8 #include <linux/types.h> 9 #include <linux/skbuff.h> 10 #include <linux/socket.h> 11 #include <linux/module.h> 12 #include <crypto/aead.h> 13 #include <linux/etherdevice.h> 14 #include <linux/netdevice.h> 15 #include <linux/rtnetlink.h> 16 #include <linux/refcount.h> 17 #include <net/genetlink.h> 18 #include <net/sock.h> 19 #include <net/gro_cells.h> 20 #include <net/macsec.h> 21 #include <linux/phy.h> 22 #include <linux/byteorder/generic.h> 23 #include <linux/if_arp.h> 24 25 #include <uapi/linux/if_macsec.h> 26 27 #define MACSEC_SCI_LEN 8 28 29 /* SecTAG length = macsec_eth_header without the optional SCI */ 30 #define MACSEC_TAG_LEN 6 31 32 struct macsec_eth_header { 33 struct ethhdr eth; 34 /* SecTAG */ 35 u8 tci_an; 36 #if defined(__LITTLE_ENDIAN_BITFIELD) 37 u8 short_length:6, 38 unused:2; 39 #elif defined(__BIG_ENDIAN_BITFIELD) 40 u8 unused:2, 41 short_length:6; 42 #else 43 #error "Please fix <asm/byteorder.h>" 44 #endif 45 __be32 packet_number; 46 u8 secure_channel_id[8]; /* optional */ 47 } __packed; 48 49 #define MACSEC_TCI_VERSION 0x80 50 #define MACSEC_TCI_ES 0x40 /* end station */ 51 #define MACSEC_TCI_SC 0x20 /* SCI present */ 52 #define MACSEC_TCI_SCB 0x10 /* epon */ 53 #define MACSEC_TCI_E 0x08 /* encryption */ 54 #define MACSEC_TCI_C 0x04 /* changed text */ 55 #define MACSEC_AN_MASK 0x03 /* association number */ 56 #define MACSEC_TCI_CONFID (MACSEC_TCI_E | MACSEC_TCI_C) 57 58 /* minimum secure data length deemed "not short", see IEEE 802.1AE-2006 9.7 */ 59 #define MIN_NON_SHORT_LEN 48 60 61 #define GCM_AES_IV_LEN 12 62 #define DEFAULT_ICV_LEN 16 63 64 #define for_each_rxsc(secy, sc) \ 65 for (sc = rcu_dereference_bh(secy->rx_sc); \ 66 sc; \ 67 sc = rcu_dereference_bh(sc->next)) 68 #define for_each_rxsc_rtnl(secy, sc) \ 69 for (sc = rtnl_dereference(secy->rx_sc); \ 70 sc; \ 71 sc = rtnl_dereference(sc->next)) 72 73 #define pn_same_half(pn1, pn2) (!(((pn1) >> 31) ^ ((pn2) >> 31))) 74 75 struct gcm_iv_xpn { 76 union { 77 u8 short_secure_channel_id[4]; 78 ssci_t ssci; 79 }; 80 __be64 pn; 81 } __packed; 82 83 struct gcm_iv { 84 union { 85 u8 secure_channel_id[8]; 86 sci_t sci; 87 }; 88 __be32 pn; 89 }; 90 91 #define MACSEC_VALIDATE_DEFAULT MACSEC_VALIDATE_STRICT 92 93 struct pcpu_secy_stats { 94 struct macsec_dev_stats stats; 95 struct u64_stats_sync syncp; 96 }; 97 98 /** 99 * struct macsec_dev - private data 100 * @secy: SecY config 101 * @real_dev: pointer to underlying netdevice 102 * @stats: MACsec device stats 103 * @secys: linked list of SecY's on the underlying device 104 * @offload: status of offloading on the MACsec device 105 */ 106 struct macsec_dev { 107 struct macsec_secy secy; 108 struct net_device *real_dev; 109 struct pcpu_secy_stats __percpu *stats; 110 struct list_head secys; 111 struct gro_cells gro_cells; 112 enum macsec_offload offload; 113 }; 114 115 /** 116 * struct macsec_rxh_data - rx_handler private argument 117 * @secys: linked list of SecY's on this underlying device 118 */ 119 struct macsec_rxh_data { 120 struct list_head secys; 121 }; 122 123 static struct macsec_dev *macsec_priv(const struct net_device *dev) 124 { 125 return (struct macsec_dev *)netdev_priv(dev); 126 } 127 128 static struct macsec_rxh_data *macsec_data_rcu(const struct net_device *dev) 129 { 130 return rcu_dereference_bh(dev->rx_handler_data); 131 } 132 133 static struct macsec_rxh_data *macsec_data_rtnl(const struct net_device *dev) 134 { 135 return rtnl_dereference(dev->rx_handler_data); 136 } 137 138 struct macsec_cb { 139 struct aead_request *req; 140 union { 141 struct macsec_tx_sa *tx_sa; 142 struct macsec_rx_sa *rx_sa; 143 }; 144 u8 assoc_num; 145 bool valid; 146 bool has_sci; 147 }; 148 149 static struct macsec_rx_sa *macsec_rxsa_get(struct macsec_rx_sa __rcu *ptr) 150 { 151 struct macsec_rx_sa *sa = rcu_dereference_bh(ptr); 152 153 if (!sa || !sa->active) 154 return NULL; 155 156 if (!refcount_inc_not_zero(&sa->refcnt)) 157 return NULL; 158 159 return sa; 160 } 161 162 static void free_rx_sc_rcu(struct rcu_head *head) 163 { 164 struct macsec_rx_sc *rx_sc = container_of(head, struct macsec_rx_sc, rcu_head); 165 166 free_percpu(rx_sc->stats); 167 kfree(rx_sc); 168 } 169 170 static struct macsec_rx_sc *macsec_rxsc_get(struct macsec_rx_sc *sc) 171 { 172 return refcount_inc_not_zero(&sc->refcnt) ? sc : NULL; 173 } 174 175 static void macsec_rxsc_put(struct macsec_rx_sc *sc) 176 { 177 if (refcount_dec_and_test(&sc->refcnt)) 178 call_rcu(&sc->rcu_head, free_rx_sc_rcu); 179 } 180 181 static void free_rxsa(struct rcu_head *head) 182 { 183 struct macsec_rx_sa *sa = container_of(head, struct macsec_rx_sa, rcu); 184 185 crypto_free_aead(sa->key.tfm); 186 free_percpu(sa->stats); 187 kfree(sa); 188 } 189 190 static void macsec_rxsa_put(struct macsec_rx_sa *sa) 191 { 192 if (refcount_dec_and_test(&sa->refcnt)) 193 call_rcu(&sa->rcu, free_rxsa); 194 } 195 196 static struct macsec_tx_sa *macsec_txsa_get(struct macsec_tx_sa __rcu *ptr) 197 { 198 struct macsec_tx_sa *sa = rcu_dereference_bh(ptr); 199 200 if (!sa || !sa->active) 201 return NULL; 202 203 if (!refcount_inc_not_zero(&sa->refcnt)) 204 return NULL; 205 206 return sa; 207 } 208 209 static void free_txsa(struct rcu_head *head) 210 { 211 struct macsec_tx_sa *sa = container_of(head, struct macsec_tx_sa, rcu); 212 213 crypto_free_aead(sa->key.tfm); 214 free_percpu(sa->stats); 215 kfree(sa); 216 } 217 218 static void macsec_txsa_put(struct macsec_tx_sa *sa) 219 { 220 if (refcount_dec_and_test(&sa->refcnt)) 221 call_rcu(&sa->rcu, free_txsa); 222 } 223 224 static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb) 225 { 226 BUILD_BUG_ON(sizeof(struct macsec_cb) > sizeof(skb->cb)); 227 return (struct macsec_cb *)skb->cb; 228 } 229 230 #define MACSEC_PORT_ES (htons(0x0001)) 231 #define MACSEC_PORT_SCB (0x0000) 232 #define MACSEC_UNDEF_SCI ((__force sci_t)0xffffffffffffffffULL) 233 #define MACSEC_UNDEF_SSCI ((__force ssci_t)0xffffffff) 234 235 #define MACSEC_GCM_AES_128_SAK_LEN 16 236 #define MACSEC_GCM_AES_256_SAK_LEN 32 237 238 #define DEFAULT_SAK_LEN MACSEC_GCM_AES_128_SAK_LEN 239 #define DEFAULT_XPN false 240 #define DEFAULT_SEND_SCI true 241 #define DEFAULT_ENCRYPT false 242 #define DEFAULT_ENCODING_SA 0 243 244 static bool send_sci(const struct macsec_secy *secy) 245 { 246 const struct macsec_tx_sc *tx_sc = &secy->tx_sc; 247 248 return tx_sc->send_sci || 249 (secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb); 250 } 251 252 static sci_t make_sci(u8 *addr, __be16 port) 253 { 254 sci_t sci; 255 256 memcpy(&sci, addr, ETH_ALEN); 257 memcpy(((char *)&sci) + ETH_ALEN, &port, sizeof(port)); 258 259 return sci; 260 } 261 262 static sci_t macsec_frame_sci(struct macsec_eth_header *hdr, bool sci_present) 263 { 264 sci_t sci; 265 266 if (sci_present) 267 memcpy(&sci, hdr->secure_channel_id, 268 sizeof(hdr->secure_channel_id)); 269 else 270 sci = make_sci(hdr->eth.h_source, MACSEC_PORT_ES); 271 272 return sci; 273 } 274 275 static unsigned int macsec_sectag_len(bool sci_present) 276 { 277 return MACSEC_TAG_LEN + (sci_present ? MACSEC_SCI_LEN : 0); 278 } 279 280 static unsigned int macsec_hdr_len(bool sci_present) 281 { 282 return macsec_sectag_len(sci_present) + ETH_HLEN; 283 } 284 285 static unsigned int macsec_extra_len(bool sci_present) 286 { 287 return macsec_sectag_len(sci_present) + sizeof(__be16); 288 } 289 290 /* Fill SecTAG according to IEEE 802.1AE-2006 10.5.3 */ 291 static void macsec_fill_sectag(struct macsec_eth_header *h, 292 const struct macsec_secy *secy, u32 pn, 293 bool sci_present) 294 { 295 const struct macsec_tx_sc *tx_sc = &secy->tx_sc; 296 297 memset(&h->tci_an, 0, macsec_sectag_len(sci_present)); 298 h->eth.h_proto = htons(ETH_P_MACSEC); 299 300 if (sci_present) { 301 h->tci_an |= MACSEC_TCI_SC; 302 memcpy(&h->secure_channel_id, &secy->sci, 303 sizeof(h->secure_channel_id)); 304 } else { 305 if (tx_sc->end_station) 306 h->tci_an |= MACSEC_TCI_ES; 307 if (tx_sc->scb) 308 h->tci_an |= MACSEC_TCI_SCB; 309 } 310 311 h->packet_number = htonl(pn); 312 313 /* with GCM, C/E clear for !encrypt, both set for encrypt */ 314 if (tx_sc->encrypt) 315 h->tci_an |= MACSEC_TCI_CONFID; 316 else if (secy->icv_len != DEFAULT_ICV_LEN) 317 h->tci_an |= MACSEC_TCI_C; 318 319 h->tci_an |= tx_sc->encoding_sa; 320 } 321 322 static void macsec_set_shortlen(struct macsec_eth_header *h, size_t data_len) 323 { 324 if (data_len < MIN_NON_SHORT_LEN) 325 h->short_length = data_len; 326 } 327 328 /* Checks if a MACsec interface is being offloaded to an hardware engine */ 329 static bool macsec_is_offloaded(struct macsec_dev *macsec) 330 { 331 if (macsec->offload == MACSEC_OFFLOAD_MAC || 332 macsec->offload == MACSEC_OFFLOAD_PHY) 333 return true; 334 335 return false; 336 } 337 338 /* Checks if underlying layers implement MACsec offloading functions. */ 339 static bool macsec_check_offload(enum macsec_offload offload, 340 struct macsec_dev *macsec) 341 { 342 if (!macsec || !macsec->real_dev) 343 return false; 344 345 if (offload == MACSEC_OFFLOAD_PHY) 346 return macsec->real_dev->phydev && 347 macsec->real_dev->phydev->macsec_ops; 348 else if (offload == MACSEC_OFFLOAD_MAC) 349 return macsec->real_dev->features & NETIF_F_HW_MACSEC && 350 macsec->real_dev->macsec_ops; 351 352 return false; 353 } 354 355 static const struct macsec_ops *__macsec_get_ops(enum macsec_offload offload, 356 struct macsec_dev *macsec, 357 struct macsec_context *ctx) 358 { 359 if (ctx) { 360 memset(ctx, 0, sizeof(*ctx)); 361 ctx->offload = offload; 362 363 if (offload == MACSEC_OFFLOAD_PHY) 364 ctx->phydev = macsec->real_dev->phydev; 365 else if (offload == MACSEC_OFFLOAD_MAC) 366 ctx->netdev = macsec->real_dev; 367 } 368 369 if (offload == MACSEC_OFFLOAD_PHY) 370 return macsec->real_dev->phydev->macsec_ops; 371 else 372 return macsec->real_dev->macsec_ops; 373 } 374 375 /* Returns a pointer to the MACsec ops struct if any and updates the MACsec 376 * context device reference if provided. 377 */ 378 static const struct macsec_ops *macsec_get_ops(struct macsec_dev *macsec, 379 struct macsec_context *ctx) 380 { 381 if (!macsec_check_offload(macsec->offload, macsec)) 382 return NULL; 383 384 return __macsec_get_ops(macsec->offload, macsec, ctx); 385 } 386 387 /* validate MACsec packet according to IEEE 802.1AE-2018 9.12 */ 388 static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len, bool xpn) 389 { 390 struct macsec_eth_header *h = (struct macsec_eth_header *)skb->data; 391 int len = skb->len - 2 * ETH_ALEN; 392 int extra_len = macsec_extra_len(!!(h->tci_an & MACSEC_TCI_SC)) + icv_len; 393 394 /* a) It comprises at least 17 octets */ 395 if (skb->len <= 16) 396 return false; 397 398 /* b) MACsec EtherType: already checked */ 399 400 /* c) V bit is clear */ 401 if (h->tci_an & MACSEC_TCI_VERSION) 402 return false; 403 404 /* d) ES or SCB => !SC */ 405 if ((h->tci_an & MACSEC_TCI_ES || h->tci_an & MACSEC_TCI_SCB) && 406 (h->tci_an & MACSEC_TCI_SC)) 407 return false; 408 409 /* e) Bits 7 and 8 of octet 4 of the SecTAG are clear */ 410 if (h->unused) 411 return false; 412 413 /* rx.pn != 0 if not XPN (figure 10-5 with 802.11AEbw-2013 amendment) */ 414 if (!h->packet_number && !xpn) 415 return false; 416 417 /* length check, f) g) h) i) */ 418 if (h->short_length) 419 return len == extra_len + h->short_length; 420 return len >= extra_len + MIN_NON_SHORT_LEN; 421 } 422 423 #define MACSEC_NEEDED_HEADROOM (macsec_extra_len(true)) 424 #define MACSEC_NEEDED_TAILROOM MACSEC_STD_ICV_LEN 425 426 static void macsec_fill_iv_xpn(unsigned char *iv, ssci_t ssci, u64 pn, 427 salt_t salt) 428 { 429 struct gcm_iv_xpn *gcm_iv = (struct gcm_iv_xpn *)iv; 430 431 gcm_iv->ssci = ssci ^ salt.ssci; 432 gcm_iv->pn = cpu_to_be64(pn) ^ salt.pn; 433 } 434 435 static void macsec_fill_iv(unsigned char *iv, sci_t sci, u32 pn) 436 { 437 struct gcm_iv *gcm_iv = (struct gcm_iv *)iv; 438 439 gcm_iv->sci = sci; 440 gcm_iv->pn = htonl(pn); 441 } 442 443 static struct macsec_eth_header *macsec_ethhdr(struct sk_buff *skb) 444 { 445 return (struct macsec_eth_header *)skb_mac_header(skb); 446 } 447 448 static sci_t dev_to_sci(struct net_device *dev, __be16 port) 449 { 450 return make_sci(dev->dev_addr, port); 451 } 452 453 static void __macsec_pn_wrapped(struct macsec_secy *secy, 454 struct macsec_tx_sa *tx_sa) 455 { 456 pr_debug("PN wrapped, transitioning to !oper\n"); 457 tx_sa->active = false; 458 if (secy->protect_frames) 459 secy->operational = false; 460 } 461 462 void macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa) 463 { 464 spin_lock_bh(&tx_sa->lock); 465 __macsec_pn_wrapped(secy, tx_sa); 466 spin_unlock_bh(&tx_sa->lock); 467 } 468 EXPORT_SYMBOL_GPL(macsec_pn_wrapped); 469 470 static pn_t tx_sa_update_pn(struct macsec_tx_sa *tx_sa, 471 struct macsec_secy *secy) 472 { 473 pn_t pn; 474 475 spin_lock_bh(&tx_sa->lock); 476 477 pn = tx_sa->next_pn_halves; 478 if (secy->xpn) 479 tx_sa->next_pn++; 480 else 481 tx_sa->next_pn_halves.lower++; 482 483 if (tx_sa->next_pn == 0) 484 __macsec_pn_wrapped(secy, tx_sa); 485 spin_unlock_bh(&tx_sa->lock); 486 487 return pn; 488 } 489 490 static void macsec_encrypt_finish(struct sk_buff *skb, struct net_device *dev) 491 { 492 struct macsec_dev *macsec = netdev_priv(dev); 493 494 skb->dev = macsec->real_dev; 495 skb_reset_mac_header(skb); 496 skb->protocol = eth_hdr(skb)->h_proto; 497 } 498 499 static void macsec_count_tx(struct sk_buff *skb, struct macsec_tx_sc *tx_sc, 500 struct macsec_tx_sa *tx_sa) 501 { 502 struct pcpu_tx_sc_stats *txsc_stats = this_cpu_ptr(tx_sc->stats); 503 504 u64_stats_update_begin(&txsc_stats->syncp); 505 if (tx_sc->encrypt) { 506 txsc_stats->stats.OutOctetsEncrypted += skb->len; 507 txsc_stats->stats.OutPktsEncrypted++; 508 this_cpu_inc(tx_sa->stats->OutPktsEncrypted); 509 } else { 510 txsc_stats->stats.OutOctetsProtected += skb->len; 511 txsc_stats->stats.OutPktsProtected++; 512 this_cpu_inc(tx_sa->stats->OutPktsProtected); 513 } 514 u64_stats_update_end(&txsc_stats->syncp); 515 } 516 517 static void count_tx(struct net_device *dev, int ret, int len) 518 { 519 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { 520 struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats); 521 522 u64_stats_update_begin(&stats->syncp); 523 stats->tx_packets++; 524 stats->tx_bytes += len; 525 u64_stats_update_end(&stats->syncp); 526 } 527 } 528 529 static void macsec_encrypt_done(struct crypto_async_request *base, int err) 530 { 531 struct sk_buff *skb = base->data; 532 struct net_device *dev = skb->dev; 533 struct macsec_dev *macsec = macsec_priv(dev); 534 struct macsec_tx_sa *sa = macsec_skb_cb(skb)->tx_sa; 535 int len, ret; 536 537 aead_request_free(macsec_skb_cb(skb)->req); 538 539 rcu_read_lock_bh(); 540 macsec_encrypt_finish(skb, dev); 541 macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa); 542 len = skb->len; 543 ret = dev_queue_xmit(skb); 544 count_tx(dev, ret, len); 545 rcu_read_unlock_bh(); 546 547 macsec_txsa_put(sa); 548 dev_put(dev); 549 } 550 551 static struct aead_request *macsec_alloc_req(struct crypto_aead *tfm, 552 unsigned char **iv, 553 struct scatterlist **sg, 554 int num_frags) 555 { 556 size_t size, iv_offset, sg_offset; 557 struct aead_request *req; 558 void *tmp; 559 560 size = sizeof(struct aead_request) + crypto_aead_reqsize(tfm); 561 iv_offset = size; 562 size += GCM_AES_IV_LEN; 563 564 size = ALIGN(size, __alignof__(struct scatterlist)); 565 sg_offset = size; 566 size += sizeof(struct scatterlist) * num_frags; 567 568 tmp = kmalloc(size, GFP_ATOMIC); 569 if (!tmp) 570 return NULL; 571 572 *iv = (unsigned char *)(tmp + iv_offset); 573 *sg = (struct scatterlist *)(tmp + sg_offset); 574 req = tmp; 575 576 aead_request_set_tfm(req, tfm); 577 578 return req; 579 } 580 581 static struct sk_buff *macsec_encrypt(struct sk_buff *skb, 582 struct net_device *dev) 583 { 584 int ret; 585 struct scatterlist *sg; 586 struct sk_buff *trailer; 587 unsigned char *iv; 588 struct ethhdr *eth; 589 struct macsec_eth_header *hh; 590 size_t unprotected_len; 591 struct aead_request *req; 592 struct macsec_secy *secy; 593 struct macsec_tx_sc *tx_sc; 594 struct macsec_tx_sa *tx_sa; 595 struct macsec_dev *macsec = macsec_priv(dev); 596 bool sci_present; 597 pn_t pn; 598 599 secy = &macsec->secy; 600 tx_sc = &secy->tx_sc; 601 602 /* 10.5.1 TX SA assignment */ 603 tx_sa = macsec_txsa_get(tx_sc->sa[tx_sc->encoding_sa]); 604 if (!tx_sa) { 605 secy->operational = false; 606 kfree_skb(skb); 607 return ERR_PTR(-EINVAL); 608 } 609 610 if (unlikely(skb_headroom(skb) < MACSEC_NEEDED_HEADROOM || 611 skb_tailroom(skb) < MACSEC_NEEDED_TAILROOM)) { 612 struct sk_buff *nskb = skb_copy_expand(skb, 613 MACSEC_NEEDED_HEADROOM, 614 MACSEC_NEEDED_TAILROOM, 615 GFP_ATOMIC); 616 if (likely(nskb)) { 617 consume_skb(skb); 618 skb = nskb; 619 } else { 620 macsec_txsa_put(tx_sa); 621 kfree_skb(skb); 622 return ERR_PTR(-ENOMEM); 623 } 624 } else { 625 skb = skb_unshare(skb, GFP_ATOMIC); 626 if (!skb) { 627 macsec_txsa_put(tx_sa); 628 return ERR_PTR(-ENOMEM); 629 } 630 } 631 632 unprotected_len = skb->len; 633 eth = eth_hdr(skb); 634 sci_present = send_sci(secy); 635 hh = skb_push(skb, macsec_extra_len(sci_present)); 636 memmove(hh, eth, 2 * ETH_ALEN); 637 638 pn = tx_sa_update_pn(tx_sa, secy); 639 if (pn.full64 == 0) { 640 macsec_txsa_put(tx_sa); 641 kfree_skb(skb); 642 return ERR_PTR(-ENOLINK); 643 } 644 macsec_fill_sectag(hh, secy, pn.lower, sci_present); 645 macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN); 646 647 skb_put(skb, secy->icv_len); 648 649 if (skb->len - ETH_HLEN > macsec_priv(dev)->real_dev->mtu) { 650 struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats); 651 652 u64_stats_update_begin(&secy_stats->syncp); 653 secy_stats->stats.OutPktsTooLong++; 654 u64_stats_update_end(&secy_stats->syncp); 655 656 macsec_txsa_put(tx_sa); 657 kfree_skb(skb); 658 return ERR_PTR(-EINVAL); 659 } 660 661 ret = skb_cow_data(skb, 0, &trailer); 662 if (unlikely(ret < 0)) { 663 macsec_txsa_put(tx_sa); 664 kfree_skb(skb); 665 return ERR_PTR(ret); 666 } 667 668 req = macsec_alloc_req(tx_sa->key.tfm, &iv, &sg, ret); 669 if (!req) { 670 macsec_txsa_put(tx_sa); 671 kfree_skb(skb); 672 return ERR_PTR(-ENOMEM); 673 } 674 675 if (secy->xpn) 676 macsec_fill_iv_xpn(iv, tx_sa->ssci, pn.full64, tx_sa->key.salt); 677 else 678 macsec_fill_iv(iv, secy->sci, pn.lower); 679 680 sg_init_table(sg, ret); 681 ret = skb_to_sgvec(skb, sg, 0, skb->len); 682 if (unlikely(ret < 0)) { 683 aead_request_free(req); 684 macsec_txsa_put(tx_sa); 685 kfree_skb(skb); 686 return ERR_PTR(ret); 687 } 688 689 if (tx_sc->encrypt) { 690 int len = skb->len - macsec_hdr_len(sci_present) - 691 secy->icv_len; 692 aead_request_set_crypt(req, sg, sg, len, iv); 693 aead_request_set_ad(req, macsec_hdr_len(sci_present)); 694 } else { 695 aead_request_set_crypt(req, sg, sg, 0, iv); 696 aead_request_set_ad(req, skb->len - secy->icv_len); 697 } 698 699 macsec_skb_cb(skb)->req = req; 700 macsec_skb_cb(skb)->tx_sa = tx_sa; 701 aead_request_set_callback(req, 0, macsec_encrypt_done, skb); 702 703 dev_hold(skb->dev); 704 ret = crypto_aead_encrypt(req); 705 if (ret == -EINPROGRESS) { 706 return ERR_PTR(ret); 707 } else if (ret != 0) { 708 dev_put(skb->dev); 709 kfree_skb(skb); 710 aead_request_free(req); 711 macsec_txsa_put(tx_sa); 712 return ERR_PTR(-EINVAL); 713 } 714 715 dev_put(skb->dev); 716 aead_request_free(req); 717 macsec_txsa_put(tx_sa); 718 719 return skb; 720 } 721 722 static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u32 pn) 723 { 724 struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa; 725 struct pcpu_rx_sc_stats *rxsc_stats = this_cpu_ptr(rx_sa->sc->stats); 726 struct macsec_eth_header *hdr = macsec_ethhdr(skb); 727 u32 lowest_pn = 0; 728 729 spin_lock(&rx_sa->lock); 730 if (rx_sa->next_pn_halves.lower >= secy->replay_window) 731 lowest_pn = rx_sa->next_pn_halves.lower - secy->replay_window; 732 733 /* Now perform replay protection check again 734 * (see IEEE 802.1AE-2006 figure 10-5) 735 */ 736 if (secy->replay_protect && pn < lowest_pn && 737 (!secy->xpn || pn_same_half(pn, lowest_pn))) { 738 spin_unlock(&rx_sa->lock); 739 u64_stats_update_begin(&rxsc_stats->syncp); 740 rxsc_stats->stats.InPktsLate++; 741 u64_stats_update_end(&rxsc_stats->syncp); 742 return false; 743 } 744 745 if (secy->validate_frames != MACSEC_VALIDATE_DISABLED) { 746 u64_stats_update_begin(&rxsc_stats->syncp); 747 if (hdr->tci_an & MACSEC_TCI_E) 748 rxsc_stats->stats.InOctetsDecrypted += skb->len; 749 else 750 rxsc_stats->stats.InOctetsValidated += skb->len; 751 u64_stats_update_end(&rxsc_stats->syncp); 752 } 753 754 if (!macsec_skb_cb(skb)->valid) { 755 spin_unlock(&rx_sa->lock); 756 757 /* 10.6.5 */ 758 if (hdr->tci_an & MACSEC_TCI_C || 759 secy->validate_frames == MACSEC_VALIDATE_STRICT) { 760 u64_stats_update_begin(&rxsc_stats->syncp); 761 rxsc_stats->stats.InPktsNotValid++; 762 u64_stats_update_end(&rxsc_stats->syncp); 763 return false; 764 } 765 766 u64_stats_update_begin(&rxsc_stats->syncp); 767 if (secy->validate_frames == MACSEC_VALIDATE_CHECK) { 768 rxsc_stats->stats.InPktsInvalid++; 769 this_cpu_inc(rx_sa->stats->InPktsInvalid); 770 } else if (pn < lowest_pn) { 771 rxsc_stats->stats.InPktsDelayed++; 772 } else { 773 rxsc_stats->stats.InPktsUnchecked++; 774 } 775 u64_stats_update_end(&rxsc_stats->syncp); 776 } else { 777 u64_stats_update_begin(&rxsc_stats->syncp); 778 if (pn < lowest_pn) { 779 rxsc_stats->stats.InPktsDelayed++; 780 } else { 781 rxsc_stats->stats.InPktsOK++; 782 this_cpu_inc(rx_sa->stats->InPktsOK); 783 } 784 u64_stats_update_end(&rxsc_stats->syncp); 785 786 // Instead of "pn >=" - to support pn overflow in xpn 787 if (pn + 1 > rx_sa->next_pn_halves.lower) { 788 rx_sa->next_pn_halves.lower = pn + 1; 789 } else if (secy->xpn && 790 !pn_same_half(pn, rx_sa->next_pn_halves.lower)) { 791 rx_sa->next_pn_halves.upper++; 792 rx_sa->next_pn_halves.lower = pn + 1; 793 } 794 795 spin_unlock(&rx_sa->lock); 796 } 797 798 return true; 799 } 800 801 static void macsec_reset_skb(struct sk_buff *skb, struct net_device *dev) 802 { 803 skb->pkt_type = PACKET_HOST; 804 skb->protocol = eth_type_trans(skb, dev); 805 806 skb_reset_network_header(skb); 807 if (!skb_transport_header_was_set(skb)) 808 skb_reset_transport_header(skb); 809 skb_reset_mac_len(skb); 810 } 811 812 static void macsec_finalize_skb(struct sk_buff *skb, u8 icv_len, u8 hdr_len) 813 { 814 skb->ip_summed = CHECKSUM_NONE; 815 memmove(skb->data + hdr_len, skb->data, 2 * ETH_ALEN); 816 skb_pull(skb, hdr_len); 817 pskb_trim_unique(skb, skb->len - icv_len); 818 } 819 820 static void count_rx(struct net_device *dev, int len) 821 { 822 struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats); 823 824 u64_stats_update_begin(&stats->syncp); 825 stats->rx_packets++; 826 stats->rx_bytes += len; 827 u64_stats_update_end(&stats->syncp); 828 } 829 830 static void macsec_decrypt_done(struct crypto_async_request *base, int err) 831 { 832 struct sk_buff *skb = base->data; 833 struct net_device *dev = skb->dev; 834 struct macsec_dev *macsec = macsec_priv(dev); 835 struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa; 836 struct macsec_rx_sc *rx_sc = rx_sa->sc; 837 int len; 838 u32 pn; 839 840 aead_request_free(macsec_skb_cb(skb)->req); 841 842 if (!err) 843 macsec_skb_cb(skb)->valid = true; 844 845 rcu_read_lock_bh(); 846 pn = ntohl(macsec_ethhdr(skb)->packet_number); 847 if (!macsec_post_decrypt(skb, &macsec->secy, pn)) { 848 rcu_read_unlock_bh(); 849 kfree_skb(skb); 850 goto out; 851 } 852 853 macsec_finalize_skb(skb, macsec->secy.icv_len, 854 macsec_extra_len(macsec_skb_cb(skb)->has_sci)); 855 macsec_reset_skb(skb, macsec->secy.netdev); 856 857 len = skb->len; 858 if (gro_cells_receive(&macsec->gro_cells, skb) == NET_RX_SUCCESS) 859 count_rx(dev, len); 860 861 rcu_read_unlock_bh(); 862 863 out: 864 macsec_rxsa_put(rx_sa); 865 macsec_rxsc_put(rx_sc); 866 dev_put(dev); 867 } 868 869 static struct sk_buff *macsec_decrypt(struct sk_buff *skb, 870 struct net_device *dev, 871 struct macsec_rx_sa *rx_sa, 872 sci_t sci, 873 struct macsec_secy *secy) 874 { 875 int ret; 876 struct scatterlist *sg; 877 struct sk_buff *trailer; 878 unsigned char *iv; 879 struct aead_request *req; 880 struct macsec_eth_header *hdr; 881 u32 hdr_pn; 882 u16 icv_len = secy->icv_len; 883 884 macsec_skb_cb(skb)->valid = false; 885 skb = skb_share_check(skb, GFP_ATOMIC); 886 if (!skb) 887 return ERR_PTR(-ENOMEM); 888 889 ret = skb_cow_data(skb, 0, &trailer); 890 if (unlikely(ret < 0)) { 891 kfree_skb(skb); 892 return ERR_PTR(ret); 893 } 894 req = macsec_alloc_req(rx_sa->key.tfm, &iv, &sg, ret); 895 if (!req) { 896 kfree_skb(skb); 897 return ERR_PTR(-ENOMEM); 898 } 899 900 hdr = (struct macsec_eth_header *)skb->data; 901 hdr_pn = ntohl(hdr->packet_number); 902 903 if (secy->xpn) { 904 pn_t recovered_pn = rx_sa->next_pn_halves; 905 906 recovered_pn.lower = hdr_pn; 907 if (hdr_pn < rx_sa->next_pn_halves.lower && 908 !pn_same_half(hdr_pn, rx_sa->next_pn_halves.lower)) 909 recovered_pn.upper++; 910 911 macsec_fill_iv_xpn(iv, rx_sa->ssci, recovered_pn.full64, 912 rx_sa->key.salt); 913 } else { 914 macsec_fill_iv(iv, sci, hdr_pn); 915 } 916 917 sg_init_table(sg, ret); 918 ret = skb_to_sgvec(skb, sg, 0, skb->len); 919 if (unlikely(ret < 0)) { 920 aead_request_free(req); 921 kfree_skb(skb); 922 return ERR_PTR(ret); 923 } 924 925 if (hdr->tci_an & MACSEC_TCI_E) { 926 /* confidentiality: ethernet + macsec header 927 * authenticated, encrypted payload 928 */ 929 int len = skb->len - macsec_hdr_len(macsec_skb_cb(skb)->has_sci); 930 931 aead_request_set_crypt(req, sg, sg, len, iv); 932 aead_request_set_ad(req, macsec_hdr_len(macsec_skb_cb(skb)->has_sci)); 933 skb = skb_unshare(skb, GFP_ATOMIC); 934 if (!skb) { 935 aead_request_free(req); 936 return ERR_PTR(-ENOMEM); 937 } 938 } else { 939 /* integrity only: all headers + data authenticated */ 940 aead_request_set_crypt(req, sg, sg, icv_len, iv); 941 aead_request_set_ad(req, skb->len - icv_len); 942 } 943 944 macsec_skb_cb(skb)->req = req; 945 skb->dev = dev; 946 aead_request_set_callback(req, 0, macsec_decrypt_done, skb); 947 948 dev_hold(dev); 949 ret = crypto_aead_decrypt(req); 950 if (ret == -EINPROGRESS) { 951 return ERR_PTR(ret); 952 } else if (ret != 0) { 953 /* decryption/authentication failed 954 * 10.6 if validateFrames is disabled, deliver anyway 955 */ 956 if (ret != -EBADMSG) { 957 kfree_skb(skb); 958 skb = ERR_PTR(ret); 959 } 960 } else { 961 macsec_skb_cb(skb)->valid = true; 962 } 963 dev_put(dev); 964 965 aead_request_free(req); 966 967 return skb; 968 } 969 970 static struct macsec_rx_sc *find_rx_sc(struct macsec_secy *secy, sci_t sci) 971 { 972 struct macsec_rx_sc *rx_sc; 973 974 for_each_rxsc(secy, rx_sc) { 975 if (rx_sc->sci == sci) 976 return rx_sc; 977 } 978 979 return NULL; 980 } 981 982 static struct macsec_rx_sc *find_rx_sc_rtnl(struct macsec_secy *secy, sci_t sci) 983 { 984 struct macsec_rx_sc *rx_sc; 985 986 for_each_rxsc_rtnl(secy, rx_sc) { 987 if (rx_sc->sci == sci) 988 return rx_sc; 989 } 990 991 return NULL; 992 } 993 994 static enum rx_handler_result handle_not_macsec(struct sk_buff *skb) 995 { 996 /* Deliver to the uncontrolled port by default */ 997 enum rx_handler_result ret = RX_HANDLER_PASS; 998 struct ethhdr *hdr = eth_hdr(skb); 999 struct macsec_rxh_data *rxd; 1000 struct macsec_dev *macsec; 1001 1002 rcu_read_lock(); 1003 rxd = macsec_data_rcu(skb->dev); 1004 1005 list_for_each_entry_rcu(macsec, &rxd->secys, secys) { 1006 struct sk_buff *nskb; 1007 struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats); 1008 struct net_device *ndev = macsec->secy.netdev; 1009 1010 /* If h/w offloading is enabled, HW decodes frames and strips 1011 * the SecTAG, so we have to deduce which port to deliver to. 1012 */ 1013 if (macsec_is_offloaded(macsec) && netif_running(ndev)) { 1014 if (ether_addr_equal_64bits(hdr->h_dest, 1015 ndev->dev_addr)) { 1016 /* exact match, divert skb to this port */ 1017 skb->dev = ndev; 1018 skb->pkt_type = PACKET_HOST; 1019 ret = RX_HANDLER_ANOTHER; 1020 goto out; 1021 } else if (is_multicast_ether_addr_64bits( 1022 hdr->h_dest)) { 1023 /* multicast frame, deliver on this port too */ 1024 nskb = skb_clone(skb, GFP_ATOMIC); 1025 if (!nskb) 1026 break; 1027 1028 nskb->dev = ndev; 1029 if (ether_addr_equal_64bits(hdr->h_dest, 1030 ndev->broadcast)) 1031 nskb->pkt_type = PACKET_BROADCAST; 1032 else 1033 nskb->pkt_type = PACKET_MULTICAST; 1034 1035 netif_rx(nskb); 1036 } 1037 continue; 1038 } 1039 1040 /* 10.6 If the management control validateFrames is not 1041 * Strict, frames without a SecTAG are received, counted, and 1042 * delivered to the Controlled Port 1043 */ 1044 if (macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) { 1045 u64_stats_update_begin(&secy_stats->syncp); 1046 secy_stats->stats.InPktsNoTag++; 1047 u64_stats_update_end(&secy_stats->syncp); 1048 continue; 1049 } 1050 1051 /* deliver on this port */ 1052 nskb = skb_clone(skb, GFP_ATOMIC); 1053 if (!nskb) 1054 break; 1055 1056 nskb->dev = ndev; 1057 1058 if (netif_rx(nskb) == NET_RX_SUCCESS) { 1059 u64_stats_update_begin(&secy_stats->syncp); 1060 secy_stats->stats.InPktsUntagged++; 1061 u64_stats_update_end(&secy_stats->syncp); 1062 } 1063 } 1064 1065 out: 1066 rcu_read_unlock(); 1067 return ret; 1068 } 1069 1070 static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) 1071 { 1072 struct sk_buff *skb = *pskb; 1073 struct net_device *dev = skb->dev; 1074 struct macsec_eth_header *hdr; 1075 struct macsec_secy *secy = NULL; 1076 struct macsec_rx_sc *rx_sc; 1077 struct macsec_rx_sa *rx_sa; 1078 struct macsec_rxh_data *rxd; 1079 struct macsec_dev *macsec; 1080 sci_t sci; 1081 u32 hdr_pn; 1082 bool cbit; 1083 struct pcpu_rx_sc_stats *rxsc_stats; 1084 struct pcpu_secy_stats *secy_stats; 1085 bool pulled_sci; 1086 int ret; 1087 1088 if (skb_headroom(skb) < ETH_HLEN) 1089 goto drop_direct; 1090 1091 hdr = macsec_ethhdr(skb); 1092 if (hdr->eth.h_proto != htons(ETH_P_MACSEC)) 1093 return handle_not_macsec(skb); 1094 1095 skb = skb_unshare(skb, GFP_ATOMIC); 1096 *pskb = skb; 1097 if (!skb) 1098 return RX_HANDLER_CONSUMED; 1099 1100 pulled_sci = pskb_may_pull(skb, macsec_extra_len(true)); 1101 if (!pulled_sci) { 1102 if (!pskb_may_pull(skb, macsec_extra_len(false))) 1103 goto drop_direct; 1104 } 1105 1106 hdr = macsec_ethhdr(skb); 1107 1108 /* Frames with a SecTAG that has the TCI E bit set but the C 1109 * bit clear are discarded, as this reserved encoding is used 1110 * to identify frames with a SecTAG that are not to be 1111 * delivered to the Controlled Port. 1112 */ 1113 if ((hdr->tci_an & (MACSEC_TCI_C | MACSEC_TCI_E)) == MACSEC_TCI_E) 1114 return RX_HANDLER_PASS; 1115 1116 /* now, pull the extra length */ 1117 if (hdr->tci_an & MACSEC_TCI_SC) { 1118 if (!pulled_sci) 1119 goto drop_direct; 1120 } 1121 1122 /* ethernet header is part of crypto processing */ 1123 skb_push(skb, ETH_HLEN); 1124 1125 macsec_skb_cb(skb)->has_sci = !!(hdr->tci_an & MACSEC_TCI_SC); 1126 macsec_skb_cb(skb)->assoc_num = hdr->tci_an & MACSEC_AN_MASK; 1127 sci = macsec_frame_sci(hdr, macsec_skb_cb(skb)->has_sci); 1128 1129 rcu_read_lock(); 1130 rxd = macsec_data_rcu(skb->dev); 1131 1132 list_for_each_entry_rcu(macsec, &rxd->secys, secys) { 1133 struct macsec_rx_sc *sc = find_rx_sc(&macsec->secy, sci); 1134 1135 sc = sc ? macsec_rxsc_get(sc) : NULL; 1136 1137 if (sc) { 1138 secy = &macsec->secy; 1139 rx_sc = sc; 1140 break; 1141 } 1142 } 1143 1144 if (!secy) 1145 goto nosci; 1146 1147 dev = secy->netdev; 1148 macsec = macsec_priv(dev); 1149 secy_stats = this_cpu_ptr(macsec->stats); 1150 rxsc_stats = this_cpu_ptr(rx_sc->stats); 1151 1152 if (!macsec_validate_skb(skb, secy->icv_len, secy->xpn)) { 1153 u64_stats_update_begin(&secy_stats->syncp); 1154 secy_stats->stats.InPktsBadTag++; 1155 u64_stats_update_end(&secy_stats->syncp); 1156 goto drop_nosa; 1157 } 1158 1159 rx_sa = macsec_rxsa_get(rx_sc->sa[macsec_skb_cb(skb)->assoc_num]); 1160 if (!rx_sa) { 1161 /* 10.6.1 if the SA is not in use */ 1162 1163 /* If validateFrames is Strict or the C bit in the 1164 * SecTAG is set, discard 1165 */ 1166 if (hdr->tci_an & MACSEC_TCI_C || 1167 secy->validate_frames == MACSEC_VALIDATE_STRICT) { 1168 u64_stats_update_begin(&rxsc_stats->syncp); 1169 rxsc_stats->stats.InPktsNotUsingSA++; 1170 u64_stats_update_end(&rxsc_stats->syncp); 1171 goto drop_nosa; 1172 } 1173 1174 /* not Strict, the frame (with the SecTAG and ICV 1175 * removed) is delivered to the Controlled Port. 1176 */ 1177 u64_stats_update_begin(&rxsc_stats->syncp); 1178 rxsc_stats->stats.InPktsUnusedSA++; 1179 u64_stats_update_end(&rxsc_stats->syncp); 1180 goto deliver; 1181 } 1182 1183 /* First, PN check to avoid decrypting obviously wrong packets */ 1184 hdr_pn = ntohl(hdr->packet_number); 1185 if (secy->replay_protect) { 1186 bool late; 1187 1188 spin_lock(&rx_sa->lock); 1189 late = rx_sa->next_pn_halves.lower >= secy->replay_window && 1190 hdr_pn < (rx_sa->next_pn_halves.lower - secy->replay_window); 1191 1192 if (secy->xpn) 1193 late = late && pn_same_half(rx_sa->next_pn_halves.lower, hdr_pn); 1194 spin_unlock(&rx_sa->lock); 1195 1196 if (late) { 1197 u64_stats_update_begin(&rxsc_stats->syncp); 1198 rxsc_stats->stats.InPktsLate++; 1199 u64_stats_update_end(&rxsc_stats->syncp); 1200 goto drop; 1201 } 1202 } 1203 1204 macsec_skb_cb(skb)->rx_sa = rx_sa; 1205 1206 /* Disabled && !changed text => skip validation */ 1207 if (hdr->tci_an & MACSEC_TCI_C || 1208 secy->validate_frames != MACSEC_VALIDATE_DISABLED) 1209 skb = macsec_decrypt(skb, dev, rx_sa, sci, secy); 1210 1211 if (IS_ERR(skb)) { 1212 /* the decrypt callback needs the reference */ 1213 if (PTR_ERR(skb) != -EINPROGRESS) { 1214 macsec_rxsa_put(rx_sa); 1215 macsec_rxsc_put(rx_sc); 1216 } 1217 rcu_read_unlock(); 1218 *pskb = NULL; 1219 return RX_HANDLER_CONSUMED; 1220 } 1221 1222 if (!macsec_post_decrypt(skb, secy, hdr_pn)) 1223 goto drop; 1224 1225 deliver: 1226 macsec_finalize_skb(skb, secy->icv_len, 1227 macsec_extra_len(macsec_skb_cb(skb)->has_sci)); 1228 macsec_reset_skb(skb, secy->netdev); 1229 1230 if (rx_sa) 1231 macsec_rxsa_put(rx_sa); 1232 macsec_rxsc_put(rx_sc); 1233 1234 skb_orphan(skb); 1235 ret = gro_cells_receive(&macsec->gro_cells, skb); 1236 if (ret == NET_RX_SUCCESS) 1237 count_rx(dev, skb->len); 1238 else 1239 macsec->secy.netdev->stats.rx_dropped++; 1240 1241 rcu_read_unlock(); 1242 1243 *pskb = NULL; 1244 return RX_HANDLER_CONSUMED; 1245 1246 drop: 1247 macsec_rxsa_put(rx_sa); 1248 drop_nosa: 1249 macsec_rxsc_put(rx_sc); 1250 rcu_read_unlock(); 1251 drop_direct: 1252 kfree_skb(skb); 1253 *pskb = NULL; 1254 return RX_HANDLER_CONSUMED; 1255 1256 nosci: 1257 /* 10.6.1 if the SC is not found */ 1258 cbit = !!(hdr->tci_an & MACSEC_TCI_C); 1259 if (!cbit) 1260 macsec_finalize_skb(skb, DEFAULT_ICV_LEN, 1261 macsec_extra_len(macsec_skb_cb(skb)->has_sci)); 1262 1263 list_for_each_entry_rcu(macsec, &rxd->secys, secys) { 1264 struct sk_buff *nskb; 1265 1266 secy_stats = this_cpu_ptr(macsec->stats); 1267 1268 /* If validateFrames is Strict or the C bit in the 1269 * SecTAG is set, discard 1270 */ 1271 if (cbit || 1272 macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) { 1273 u64_stats_update_begin(&secy_stats->syncp); 1274 secy_stats->stats.InPktsNoSCI++; 1275 u64_stats_update_end(&secy_stats->syncp); 1276 continue; 1277 } 1278 1279 /* not strict, the frame (with the SecTAG and ICV 1280 * removed) is delivered to the Controlled Port. 1281 */ 1282 nskb = skb_clone(skb, GFP_ATOMIC); 1283 if (!nskb) 1284 break; 1285 1286 macsec_reset_skb(nskb, macsec->secy.netdev); 1287 1288 ret = netif_rx(nskb); 1289 if (ret == NET_RX_SUCCESS) { 1290 u64_stats_update_begin(&secy_stats->syncp); 1291 secy_stats->stats.InPktsUnknownSCI++; 1292 u64_stats_update_end(&secy_stats->syncp); 1293 } else { 1294 macsec->secy.netdev->stats.rx_dropped++; 1295 } 1296 } 1297 1298 rcu_read_unlock(); 1299 *pskb = skb; 1300 return RX_HANDLER_PASS; 1301 } 1302 1303 static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len) 1304 { 1305 struct crypto_aead *tfm; 1306 int ret; 1307 1308 /* Pick a sync gcm(aes) cipher to ensure order is preserved. */ 1309 tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC); 1310 1311 if (IS_ERR(tfm)) 1312 return tfm; 1313 1314 ret = crypto_aead_setkey(tfm, key, key_len); 1315 if (ret < 0) 1316 goto fail; 1317 1318 ret = crypto_aead_setauthsize(tfm, icv_len); 1319 if (ret < 0) 1320 goto fail; 1321 1322 return tfm; 1323 fail: 1324 crypto_free_aead(tfm); 1325 return ERR_PTR(ret); 1326 } 1327 1328 static int init_rx_sa(struct macsec_rx_sa *rx_sa, char *sak, int key_len, 1329 int icv_len) 1330 { 1331 rx_sa->stats = alloc_percpu(struct macsec_rx_sa_stats); 1332 if (!rx_sa->stats) 1333 return -ENOMEM; 1334 1335 rx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len); 1336 if (IS_ERR(rx_sa->key.tfm)) { 1337 free_percpu(rx_sa->stats); 1338 return PTR_ERR(rx_sa->key.tfm); 1339 } 1340 1341 rx_sa->ssci = MACSEC_UNDEF_SSCI; 1342 rx_sa->active = false; 1343 rx_sa->next_pn = 1; 1344 refcount_set(&rx_sa->refcnt, 1); 1345 spin_lock_init(&rx_sa->lock); 1346 1347 return 0; 1348 } 1349 1350 static void clear_rx_sa(struct macsec_rx_sa *rx_sa) 1351 { 1352 rx_sa->active = false; 1353 1354 macsec_rxsa_put(rx_sa); 1355 } 1356 1357 static void free_rx_sc(struct macsec_rx_sc *rx_sc) 1358 { 1359 int i; 1360 1361 for (i = 0; i < MACSEC_NUM_AN; i++) { 1362 struct macsec_rx_sa *sa = rtnl_dereference(rx_sc->sa[i]); 1363 1364 RCU_INIT_POINTER(rx_sc->sa[i], NULL); 1365 if (sa) 1366 clear_rx_sa(sa); 1367 } 1368 1369 macsec_rxsc_put(rx_sc); 1370 } 1371 1372 static struct macsec_rx_sc *del_rx_sc(struct macsec_secy *secy, sci_t sci) 1373 { 1374 struct macsec_rx_sc *rx_sc, __rcu **rx_scp; 1375 1376 for (rx_scp = &secy->rx_sc, rx_sc = rtnl_dereference(*rx_scp); 1377 rx_sc; 1378 rx_scp = &rx_sc->next, rx_sc = rtnl_dereference(*rx_scp)) { 1379 if (rx_sc->sci == sci) { 1380 if (rx_sc->active) 1381 secy->n_rx_sc--; 1382 rcu_assign_pointer(*rx_scp, rx_sc->next); 1383 return rx_sc; 1384 } 1385 } 1386 1387 return NULL; 1388 } 1389 1390 static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci) 1391 { 1392 struct macsec_rx_sc *rx_sc; 1393 struct macsec_dev *macsec; 1394 struct net_device *real_dev = macsec_priv(dev)->real_dev; 1395 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); 1396 struct macsec_secy *secy; 1397 1398 list_for_each_entry(macsec, &rxd->secys, secys) { 1399 if (find_rx_sc_rtnl(&macsec->secy, sci)) 1400 return ERR_PTR(-EEXIST); 1401 } 1402 1403 rx_sc = kzalloc(sizeof(*rx_sc), GFP_KERNEL); 1404 if (!rx_sc) 1405 return ERR_PTR(-ENOMEM); 1406 1407 rx_sc->stats = netdev_alloc_pcpu_stats(struct pcpu_rx_sc_stats); 1408 if (!rx_sc->stats) { 1409 kfree(rx_sc); 1410 return ERR_PTR(-ENOMEM); 1411 } 1412 1413 rx_sc->sci = sci; 1414 rx_sc->active = true; 1415 refcount_set(&rx_sc->refcnt, 1); 1416 1417 secy = &macsec_priv(dev)->secy; 1418 rcu_assign_pointer(rx_sc->next, secy->rx_sc); 1419 rcu_assign_pointer(secy->rx_sc, rx_sc); 1420 1421 if (rx_sc->active) 1422 secy->n_rx_sc++; 1423 1424 return rx_sc; 1425 } 1426 1427 static int init_tx_sa(struct macsec_tx_sa *tx_sa, char *sak, int key_len, 1428 int icv_len) 1429 { 1430 tx_sa->stats = alloc_percpu(struct macsec_tx_sa_stats); 1431 if (!tx_sa->stats) 1432 return -ENOMEM; 1433 1434 tx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len); 1435 if (IS_ERR(tx_sa->key.tfm)) { 1436 free_percpu(tx_sa->stats); 1437 return PTR_ERR(tx_sa->key.tfm); 1438 } 1439 1440 tx_sa->ssci = MACSEC_UNDEF_SSCI; 1441 tx_sa->active = false; 1442 refcount_set(&tx_sa->refcnt, 1); 1443 spin_lock_init(&tx_sa->lock); 1444 1445 return 0; 1446 } 1447 1448 static void clear_tx_sa(struct macsec_tx_sa *tx_sa) 1449 { 1450 tx_sa->active = false; 1451 1452 macsec_txsa_put(tx_sa); 1453 } 1454 1455 static struct genl_family macsec_fam; 1456 1457 static struct net_device *get_dev_from_nl(struct net *net, 1458 struct nlattr **attrs) 1459 { 1460 int ifindex = nla_get_u32(attrs[MACSEC_ATTR_IFINDEX]); 1461 struct net_device *dev; 1462 1463 dev = __dev_get_by_index(net, ifindex); 1464 if (!dev) 1465 return ERR_PTR(-ENODEV); 1466 1467 if (!netif_is_macsec(dev)) 1468 return ERR_PTR(-ENODEV); 1469 1470 return dev; 1471 } 1472 1473 static enum macsec_offload nla_get_offload(const struct nlattr *nla) 1474 { 1475 return (__force enum macsec_offload)nla_get_u8(nla); 1476 } 1477 1478 static sci_t nla_get_sci(const struct nlattr *nla) 1479 { 1480 return (__force sci_t)nla_get_u64(nla); 1481 } 1482 1483 static int nla_put_sci(struct sk_buff *skb, int attrtype, sci_t value, 1484 int padattr) 1485 { 1486 return nla_put_u64_64bit(skb, attrtype, (__force u64)value, padattr); 1487 } 1488 1489 static ssci_t nla_get_ssci(const struct nlattr *nla) 1490 { 1491 return (__force ssci_t)nla_get_u32(nla); 1492 } 1493 1494 static int nla_put_ssci(struct sk_buff *skb, int attrtype, ssci_t value) 1495 { 1496 return nla_put_u32(skb, attrtype, (__force u64)value); 1497 } 1498 1499 static struct macsec_tx_sa *get_txsa_from_nl(struct net *net, 1500 struct nlattr **attrs, 1501 struct nlattr **tb_sa, 1502 struct net_device **devp, 1503 struct macsec_secy **secyp, 1504 struct macsec_tx_sc **scp, 1505 u8 *assoc_num) 1506 { 1507 struct net_device *dev; 1508 struct macsec_secy *secy; 1509 struct macsec_tx_sc *tx_sc; 1510 struct macsec_tx_sa *tx_sa; 1511 1512 if (!tb_sa[MACSEC_SA_ATTR_AN]) 1513 return ERR_PTR(-EINVAL); 1514 1515 *assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1516 1517 dev = get_dev_from_nl(net, attrs); 1518 if (IS_ERR(dev)) 1519 return ERR_CAST(dev); 1520 1521 if (*assoc_num >= MACSEC_NUM_AN) 1522 return ERR_PTR(-EINVAL); 1523 1524 secy = &macsec_priv(dev)->secy; 1525 tx_sc = &secy->tx_sc; 1526 1527 tx_sa = rtnl_dereference(tx_sc->sa[*assoc_num]); 1528 if (!tx_sa) 1529 return ERR_PTR(-ENODEV); 1530 1531 *devp = dev; 1532 *scp = tx_sc; 1533 *secyp = secy; 1534 return tx_sa; 1535 } 1536 1537 static struct macsec_rx_sc *get_rxsc_from_nl(struct net *net, 1538 struct nlattr **attrs, 1539 struct nlattr **tb_rxsc, 1540 struct net_device **devp, 1541 struct macsec_secy **secyp) 1542 { 1543 struct net_device *dev; 1544 struct macsec_secy *secy; 1545 struct macsec_rx_sc *rx_sc; 1546 sci_t sci; 1547 1548 dev = get_dev_from_nl(net, attrs); 1549 if (IS_ERR(dev)) 1550 return ERR_CAST(dev); 1551 1552 secy = &macsec_priv(dev)->secy; 1553 1554 if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI]) 1555 return ERR_PTR(-EINVAL); 1556 1557 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); 1558 rx_sc = find_rx_sc_rtnl(secy, sci); 1559 if (!rx_sc) 1560 return ERR_PTR(-ENODEV); 1561 1562 *secyp = secy; 1563 *devp = dev; 1564 1565 return rx_sc; 1566 } 1567 1568 static struct macsec_rx_sa *get_rxsa_from_nl(struct net *net, 1569 struct nlattr **attrs, 1570 struct nlattr **tb_rxsc, 1571 struct nlattr **tb_sa, 1572 struct net_device **devp, 1573 struct macsec_secy **secyp, 1574 struct macsec_rx_sc **scp, 1575 u8 *assoc_num) 1576 { 1577 struct macsec_rx_sc *rx_sc; 1578 struct macsec_rx_sa *rx_sa; 1579 1580 if (!tb_sa[MACSEC_SA_ATTR_AN]) 1581 return ERR_PTR(-EINVAL); 1582 1583 *assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1584 if (*assoc_num >= MACSEC_NUM_AN) 1585 return ERR_PTR(-EINVAL); 1586 1587 rx_sc = get_rxsc_from_nl(net, attrs, tb_rxsc, devp, secyp); 1588 if (IS_ERR(rx_sc)) 1589 return ERR_CAST(rx_sc); 1590 1591 rx_sa = rtnl_dereference(rx_sc->sa[*assoc_num]); 1592 if (!rx_sa) 1593 return ERR_PTR(-ENODEV); 1594 1595 *scp = rx_sc; 1596 return rx_sa; 1597 } 1598 1599 static const struct nla_policy macsec_genl_policy[NUM_MACSEC_ATTR] = { 1600 [MACSEC_ATTR_IFINDEX] = { .type = NLA_U32 }, 1601 [MACSEC_ATTR_RXSC_CONFIG] = { .type = NLA_NESTED }, 1602 [MACSEC_ATTR_SA_CONFIG] = { .type = NLA_NESTED }, 1603 [MACSEC_ATTR_OFFLOAD] = { .type = NLA_NESTED }, 1604 }; 1605 1606 static const struct nla_policy macsec_genl_rxsc_policy[NUM_MACSEC_RXSC_ATTR] = { 1607 [MACSEC_RXSC_ATTR_SCI] = { .type = NLA_U64 }, 1608 [MACSEC_RXSC_ATTR_ACTIVE] = { .type = NLA_U8 }, 1609 }; 1610 1611 static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = { 1612 [MACSEC_SA_ATTR_AN] = { .type = NLA_U8 }, 1613 [MACSEC_SA_ATTR_ACTIVE] = { .type = NLA_U8 }, 1614 [MACSEC_SA_ATTR_PN] = { .type = NLA_MIN_LEN, .len = 4 }, 1615 [MACSEC_SA_ATTR_KEYID] = { .type = NLA_BINARY, 1616 .len = MACSEC_KEYID_LEN, }, 1617 [MACSEC_SA_ATTR_KEY] = { .type = NLA_BINARY, 1618 .len = MACSEC_MAX_KEY_LEN, }, 1619 [MACSEC_SA_ATTR_SSCI] = { .type = NLA_U32 }, 1620 [MACSEC_SA_ATTR_SALT] = { .type = NLA_BINARY, 1621 .len = MACSEC_SALT_LEN, }, 1622 }; 1623 1624 static const struct nla_policy macsec_genl_offload_policy[NUM_MACSEC_OFFLOAD_ATTR] = { 1625 [MACSEC_OFFLOAD_ATTR_TYPE] = { .type = NLA_U8 }, 1626 }; 1627 1628 /* Offloads an operation to a device driver */ 1629 static int macsec_offload(int (* const func)(struct macsec_context *), 1630 struct macsec_context *ctx) 1631 { 1632 int ret; 1633 1634 if (unlikely(!func)) 1635 return 0; 1636 1637 if (ctx->offload == MACSEC_OFFLOAD_PHY) 1638 mutex_lock(&ctx->phydev->lock); 1639 1640 /* Phase I: prepare. The drive should fail here if there are going to be 1641 * issues in the commit phase. 1642 */ 1643 ctx->prepare = true; 1644 ret = (*func)(ctx); 1645 if (ret) 1646 goto phy_unlock; 1647 1648 /* Phase II: commit. This step cannot fail. */ 1649 ctx->prepare = false; 1650 ret = (*func)(ctx); 1651 /* This should never happen: commit is not allowed to fail */ 1652 if (unlikely(ret)) 1653 WARN(1, "MACsec offloading commit failed (%d)\n", ret); 1654 1655 phy_unlock: 1656 if (ctx->offload == MACSEC_OFFLOAD_PHY) 1657 mutex_unlock(&ctx->phydev->lock); 1658 1659 return ret; 1660 } 1661 1662 static int parse_sa_config(struct nlattr **attrs, struct nlattr **tb_sa) 1663 { 1664 if (!attrs[MACSEC_ATTR_SA_CONFIG]) 1665 return -EINVAL; 1666 1667 if (nla_parse_nested_deprecated(tb_sa, MACSEC_SA_ATTR_MAX, attrs[MACSEC_ATTR_SA_CONFIG], macsec_genl_sa_policy, NULL)) 1668 return -EINVAL; 1669 1670 return 0; 1671 } 1672 1673 static int parse_rxsc_config(struct nlattr **attrs, struct nlattr **tb_rxsc) 1674 { 1675 if (!attrs[MACSEC_ATTR_RXSC_CONFIG]) 1676 return -EINVAL; 1677 1678 if (nla_parse_nested_deprecated(tb_rxsc, MACSEC_RXSC_ATTR_MAX, attrs[MACSEC_ATTR_RXSC_CONFIG], macsec_genl_rxsc_policy, NULL)) 1679 return -EINVAL; 1680 1681 return 0; 1682 } 1683 1684 static bool validate_add_rxsa(struct nlattr **attrs) 1685 { 1686 if (!attrs[MACSEC_SA_ATTR_AN] || 1687 !attrs[MACSEC_SA_ATTR_KEY] || 1688 !attrs[MACSEC_SA_ATTR_KEYID]) 1689 return false; 1690 1691 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) 1692 return false; 1693 1694 if (attrs[MACSEC_SA_ATTR_PN] && 1695 *(u64 *)nla_data(attrs[MACSEC_SA_ATTR_PN]) == 0) 1696 return false; 1697 1698 if (attrs[MACSEC_SA_ATTR_ACTIVE]) { 1699 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) 1700 return false; 1701 } 1702 1703 if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN) 1704 return false; 1705 1706 return true; 1707 } 1708 1709 static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info) 1710 { 1711 struct net_device *dev; 1712 struct nlattr **attrs = info->attrs; 1713 struct macsec_secy *secy; 1714 struct macsec_rx_sc *rx_sc; 1715 struct macsec_rx_sa *rx_sa; 1716 unsigned char assoc_num; 1717 int pn_len; 1718 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 1719 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 1720 int err; 1721 1722 if (!attrs[MACSEC_ATTR_IFINDEX]) 1723 return -EINVAL; 1724 1725 if (parse_sa_config(attrs, tb_sa)) 1726 return -EINVAL; 1727 1728 if (parse_rxsc_config(attrs, tb_rxsc)) 1729 return -EINVAL; 1730 1731 if (!validate_add_rxsa(tb_sa)) 1732 return -EINVAL; 1733 1734 rtnl_lock(); 1735 rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy); 1736 if (IS_ERR(rx_sc)) { 1737 rtnl_unlock(); 1738 return PTR_ERR(rx_sc); 1739 } 1740 1741 assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1742 1743 if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) { 1744 pr_notice("macsec: nl: add_rxsa: bad key length: %d != %d\n", 1745 nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len); 1746 rtnl_unlock(); 1747 return -EINVAL; 1748 } 1749 1750 pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN; 1751 if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) { 1752 pr_notice("macsec: nl: add_rxsa: bad pn length: %d != %d\n", 1753 nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len); 1754 rtnl_unlock(); 1755 return -EINVAL; 1756 } 1757 1758 if (secy->xpn) { 1759 if (!tb_sa[MACSEC_SA_ATTR_SSCI] || !tb_sa[MACSEC_SA_ATTR_SALT]) { 1760 rtnl_unlock(); 1761 return -EINVAL; 1762 } 1763 1764 if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) { 1765 pr_notice("macsec: nl: add_rxsa: bad salt length: %d != %d\n", 1766 nla_len(tb_sa[MACSEC_SA_ATTR_SALT]), 1767 MACSEC_SA_ATTR_SALT); 1768 rtnl_unlock(); 1769 return -EINVAL; 1770 } 1771 } 1772 1773 rx_sa = rtnl_dereference(rx_sc->sa[assoc_num]); 1774 if (rx_sa) { 1775 rtnl_unlock(); 1776 return -EBUSY; 1777 } 1778 1779 rx_sa = kmalloc(sizeof(*rx_sa), GFP_KERNEL); 1780 if (!rx_sa) { 1781 rtnl_unlock(); 1782 return -ENOMEM; 1783 } 1784 1785 err = init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 1786 secy->key_len, secy->icv_len); 1787 if (err < 0) { 1788 kfree(rx_sa); 1789 rtnl_unlock(); 1790 return err; 1791 } 1792 1793 if (tb_sa[MACSEC_SA_ATTR_PN]) { 1794 spin_lock_bh(&rx_sa->lock); 1795 rx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]); 1796 spin_unlock_bh(&rx_sa->lock); 1797 } 1798 1799 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 1800 rx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 1801 1802 rx_sa->sc = rx_sc; 1803 1804 /* If h/w offloading is available, propagate to the device */ 1805 if (macsec_is_offloaded(netdev_priv(dev))) { 1806 const struct macsec_ops *ops; 1807 struct macsec_context ctx; 1808 1809 ops = macsec_get_ops(netdev_priv(dev), &ctx); 1810 if (!ops) { 1811 err = -EOPNOTSUPP; 1812 goto cleanup; 1813 } 1814 1815 ctx.sa.assoc_num = assoc_num; 1816 ctx.sa.rx_sa = rx_sa; 1817 ctx.secy = secy; 1818 memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 1819 MACSEC_KEYID_LEN); 1820 1821 err = macsec_offload(ops->mdo_add_rxsa, &ctx); 1822 if (err) 1823 goto cleanup; 1824 } 1825 1826 if (secy->xpn) { 1827 rx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]); 1828 nla_memcpy(rx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT], 1829 MACSEC_SALT_LEN); 1830 } 1831 1832 nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN); 1833 rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa); 1834 1835 rtnl_unlock(); 1836 1837 return 0; 1838 1839 cleanup: 1840 kfree(rx_sa); 1841 rtnl_unlock(); 1842 return err; 1843 } 1844 1845 static bool validate_add_rxsc(struct nlattr **attrs) 1846 { 1847 if (!attrs[MACSEC_RXSC_ATTR_SCI]) 1848 return false; 1849 1850 if (attrs[MACSEC_RXSC_ATTR_ACTIVE]) { 1851 if (nla_get_u8(attrs[MACSEC_RXSC_ATTR_ACTIVE]) > 1) 1852 return false; 1853 } 1854 1855 return true; 1856 } 1857 1858 static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info) 1859 { 1860 struct net_device *dev; 1861 sci_t sci = MACSEC_UNDEF_SCI; 1862 struct nlattr **attrs = info->attrs; 1863 struct macsec_rx_sc *rx_sc; 1864 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 1865 struct macsec_secy *secy; 1866 bool was_active; 1867 int ret; 1868 1869 if (!attrs[MACSEC_ATTR_IFINDEX]) 1870 return -EINVAL; 1871 1872 if (parse_rxsc_config(attrs, tb_rxsc)) 1873 return -EINVAL; 1874 1875 if (!validate_add_rxsc(tb_rxsc)) 1876 return -EINVAL; 1877 1878 rtnl_lock(); 1879 dev = get_dev_from_nl(genl_info_net(info), attrs); 1880 if (IS_ERR(dev)) { 1881 rtnl_unlock(); 1882 return PTR_ERR(dev); 1883 } 1884 1885 secy = &macsec_priv(dev)->secy; 1886 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); 1887 1888 rx_sc = create_rx_sc(dev, sci); 1889 if (IS_ERR(rx_sc)) { 1890 rtnl_unlock(); 1891 return PTR_ERR(rx_sc); 1892 } 1893 1894 was_active = rx_sc->active; 1895 if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) 1896 rx_sc->active = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]); 1897 1898 if (macsec_is_offloaded(netdev_priv(dev))) { 1899 const struct macsec_ops *ops; 1900 struct macsec_context ctx; 1901 1902 ops = macsec_get_ops(netdev_priv(dev), &ctx); 1903 if (!ops) { 1904 ret = -EOPNOTSUPP; 1905 goto cleanup; 1906 } 1907 1908 ctx.rx_sc = rx_sc; 1909 ctx.secy = secy; 1910 1911 ret = macsec_offload(ops->mdo_add_rxsc, &ctx); 1912 if (ret) 1913 goto cleanup; 1914 } 1915 1916 rtnl_unlock(); 1917 1918 return 0; 1919 1920 cleanup: 1921 rx_sc->active = was_active; 1922 rtnl_unlock(); 1923 return ret; 1924 } 1925 1926 static bool validate_add_txsa(struct nlattr **attrs) 1927 { 1928 if (!attrs[MACSEC_SA_ATTR_AN] || 1929 !attrs[MACSEC_SA_ATTR_PN] || 1930 !attrs[MACSEC_SA_ATTR_KEY] || 1931 !attrs[MACSEC_SA_ATTR_KEYID]) 1932 return false; 1933 1934 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) 1935 return false; 1936 1937 if (nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0) 1938 return false; 1939 1940 if (attrs[MACSEC_SA_ATTR_ACTIVE]) { 1941 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) 1942 return false; 1943 } 1944 1945 if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN) 1946 return false; 1947 1948 return true; 1949 } 1950 1951 static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info) 1952 { 1953 struct net_device *dev; 1954 struct nlattr **attrs = info->attrs; 1955 struct macsec_secy *secy; 1956 struct macsec_tx_sc *tx_sc; 1957 struct macsec_tx_sa *tx_sa; 1958 unsigned char assoc_num; 1959 int pn_len; 1960 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 1961 bool was_operational; 1962 int err; 1963 1964 if (!attrs[MACSEC_ATTR_IFINDEX]) 1965 return -EINVAL; 1966 1967 if (parse_sa_config(attrs, tb_sa)) 1968 return -EINVAL; 1969 1970 if (!validate_add_txsa(tb_sa)) 1971 return -EINVAL; 1972 1973 rtnl_lock(); 1974 dev = get_dev_from_nl(genl_info_net(info), attrs); 1975 if (IS_ERR(dev)) { 1976 rtnl_unlock(); 1977 return PTR_ERR(dev); 1978 } 1979 1980 secy = &macsec_priv(dev)->secy; 1981 tx_sc = &secy->tx_sc; 1982 1983 assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1984 1985 if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) { 1986 pr_notice("macsec: nl: add_txsa: bad key length: %d != %d\n", 1987 nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len); 1988 rtnl_unlock(); 1989 return -EINVAL; 1990 } 1991 1992 pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN; 1993 if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) { 1994 pr_notice("macsec: nl: add_txsa: bad pn length: %d != %d\n", 1995 nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len); 1996 rtnl_unlock(); 1997 return -EINVAL; 1998 } 1999 2000 if (secy->xpn) { 2001 if (!tb_sa[MACSEC_SA_ATTR_SSCI] || !tb_sa[MACSEC_SA_ATTR_SALT]) { 2002 rtnl_unlock(); 2003 return -EINVAL; 2004 } 2005 2006 if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) { 2007 pr_notice("macsec: nl: add_txsa: bad salt length: %d != %d\n", 2008 nla_len(tb_sa[MACSEC_SA_ATTR_SALT]), 2009 MACSEC_SA_ATTR_SALT); 2010 rtnl_unlock(); 2011 return -EINVAL; 2012 } 2013 } 2014 2015 tx_sa = rtnl_dereference(tx_sc->sa[assoc_num]); 2016 if (tx_sa) { 2017 rtnl_unlock(); 2018 return -EBUSY; 2019 } 2020 2021 tx_sa = kmalloc(sizeof(*tx_sa), GFP_KERNEL); 2022 if (!tx_sa) { 2023 rtnl_unlock(); 2024 return -ENOMEM; 2025 } 2026 2027 err = init_tx_sa(tx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 2028 secy->key_len, secy->icv_len); 2029 if (err < 0) { 2030 kfree(tx_sa); 2031 rtnl_unlock(); 2032 return err; 2033 } 2034 2035 spin_lock_bh(&tx_sa->lock); 2036 tx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]); 2037 spin_unlock_bh(&tx_sa->lock); 2038 2039 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 2040 tx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 2041 2042 was_operational = secy->operational; 2043 if (assoc_num == tx_sc->encoding_sa && tx_sa->active) 2044 secy->operational = true; 2045 2046 /* If h/w offloading is available, propagate to the device */ 2047 if (macsec_is_offloaded(netdev_priv(dev))) { 2048 const struct macsec_ops *ops; 2049 struct macsec_context ctx; 2050 2051 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2052 if (!ops) { 2053 err = -EOPNOTSUPP; 2054 goto cleanup; 2055 } 2056 2057 ctx.sa.assoc_num = assoc_num; 2058 ctx.sa.tx_sa = tx_sa; 2059 ctx.secy = secy; 2060 memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 2061 MACSEC_KEYID_LEN); 2062 2063 err = macsec_offload(ops->mdo_add_txsa, &ctx); 2064 if (err) 2065 goto cleanup; 2066 } 2067 2068 if (secy->xpn) { 2069 tx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]); 2070 nla_memcpy(tx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT], 2071 MACSEC_SALT_LEN); 2072 } 2073 2074 nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN); 2075 rcu_assign_pointer(tx_sc->sa[assoc_num], tx_sa); 2076 2077 rtnl_unlock(); 2078 2079 return 0; 2080 2081 cleanup: 2082 secy->operational = was_operational; 2083 kfree(tx_sa); 2084 rtnl_unlock(); 2085 return err; 2086 } 2087 2088 static int macsec_del_rxsa(struct sk_buff *skb, struct genl_info *info) 2089 { 2090 struct nlattr **attrs = info->attrs; 2091 struct net_device *dev; 2092 struct macsec_secy *secy; 2093 struct macsec_rx_sc *rx_sc; 2094 struct macsec_rx_sa *rx_sa; 2095 u8 assoc_num; 2096 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 2097 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 2098 int ret; 2099 2100 if (!attrs[MACSEC_ATTR_IFINDEX]) 2101 return -EINVAL; 2102 2103 if (parse_sa_config(attrs, tb_sa)) 2104 return -EINVAL; 2105 2106 if (parse_rxsc_config(attrs, tb_rxsc)) 2107 return -EINVAL; 2108 2109 rtnl_lock(); 2110 rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa, 2111 &dev, &secy, &rx_sc, &assoc_num); 2112 if (IS_ERR(rx_sa)) { 2113 rtnl_unlock(); 2114 return PTR_ERR(rx_sa); 2115 } 2116 2117 if (rx_sa->active) { 2118 rtnl_unlock(); 2119 return -EBUSY; 2120 } 2121 2122 /* If h/w offloading is available, propagate to the device */ 2123 if (macsec_is_offloaded(netdev_priv(dev))) { 2124 const struct macsec_ops *ops; 2125 struct macsec_context ctx; 2126 2127 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2128 if (!ops) { 2129 ret = -EOPNOTSUPP; 2130 goto cleanup; 2131 } 2132 2133 ctx.sa.assoc_num = assoc_num; 2134 ctx.sa.rx_sa = rx_sa; 2135 ctx.secy = secy; 2136 2137 ret = macsec_offload(ops->mdo_del_rxsa, &ctx); 2138 if (ret) 2139 goto cleanup; 2140 } 2141 2142 RCU_INIT_POINTER(rx_sc->sa[assoc_num], NULL); 2143 clear_rx_sa(rx_sa); 2144 2145 rtnl_unlock(); 2146 2147 return 0; 2148 2149 cleanup: 2150 rtnl_unlock(); 2151 return ret; 2152 } 2153 2154 static int macsec_del_rxsc(struct sk_buff *skb, struct genl_info *info) 2155 { 2156 struct nlattr **attrs = info->attrs; 2157 struct net_device *dev; 2158 struct macsec_secy *secy; 2159 struct macsec_rx_sc *rx_sc; 2160 sci_t sci; 2161 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 2162 int ret; 2163 2164 if (!attrs[MACSEC_ATTR_IFINDEX]) 2165 return -EINVAL; 2166 2167 if (parse_rxsc_config(attrs, tb_rxsc)) 2168 return -EINVAL; 2169 2170 if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI]) 2171 return -EINVAL; 2172 2173 rtnl_lock(); 2174 dev = get_dev_from_nl(genl_info_net(info), info->attrs); 2175 if (IS_ERR(dev)) { 2176 rtnl_unlock(); 2177 return PTR_ERR(dev); 2178 } 2179 2180 secy = &macsec_priv(dev)->secy; 2181 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); 2182 2183 rx_sc = del_rx_sc(secy, sci); 2184 if (!rx_sc) { 2185 rtnl_unlock(); 2186 return -ENODEV; 2187 } 2188 2189 /* If h/w offloading is available, propagate to the device */ 2190 if (macsec_is_offloaded(netdev_priv(dev))) { 2191 const struct macsec_ops *ops; 2192 struct macsec_context ctx; 2193 2194 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2195 if (!ops) { 2196 ret = -EOPNOTSUPP; 2197 goto cleanup; 2198 } 2199 2200 ctx.rx_sc = rx_sc; 2201 ctx.secy = secy; 2202 ret = macsec_offload(ops->mdo_del_rxsc, &ctx); 2203 if (ret) 2204 goto cleanup; 2205 } 2206 2207 free_rx_sc(rx_sc); 2208 rtnl_unlock(); 2209 2210 return 0; 2211 2212 cleanup: 2213 rtnl_unlock(); 2214 return ret; 2215 } 2216 2217 static int macsec_del_txsa(struct sk_buff *skb, struct genl_info *info) 2218 { 2219 struct nlattr **attrs = info->attrs; 2220 struct net_device *dev; 2221 struct macsec_secy *secy; 2222 struct macsec_tx_sc *tx_sc; 2223 struct macsec_tx_sa *tx_sa; 2224 u8 assoc_num; 2225 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 2226 int ret; 2227 2228 if (!attrs[MACSEC_ATTR_IFINDEX]) 2229 return -EINVAL; 2230 2231 if (parse_sa_config(attrs, tb_sa)) 2232 return -EINVAL; 2233 2234 rtnl_lock(); 2235 tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa, 2236 &dev, &secy, &tx_sc, &assoc_num); 2237 if (IS_ERR(tx_sa)) { 2238 rtnl_unlock(); 2239 return PTR_ERR(tx_sa); 2240 } 2241 2242 if (tx_sa->active) { 2243 rtnl_unlock(); 2244 return -EBUSY; 2245 } 2246 2247 /* If h/w offloading is available, propagate to the device */ 2248 if (macsec_is_offloaded(netdev_priv(dev))) { 2249 const struct macsec_ops *ops; 2250 struct macsec_context ctx; 2251 2252 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2253 if (!ops) { 2254 ret = -EOPNOTSUPP; 2255 goto cleanup; 2256 } 2257 2258 ctx.sa.assoc_num = assoc_num; 2259 ctx.sa.tx_sa = tx_sa; 2260 ctx.secy = secy; 2261 2262 ret = macsec_offload(ops->mdo_del_txsa, &ctx); 2263 if (ret) 2264 goto cleanup; 2265 } 2266 2267 RCU_INIT_POINTER(tx_sc->sa[assoc_num], NULL); 2268 clear_tx_sa(tx_sa); 2269 2270 rtnl_unlock(); 2271 2272 return 0; 2273 2274 cleanup: 2275 rtnl_unlock(); 2276 return ret; 2277 } 2278 2279 static bool validate_upd_sa(struct nlattr **attrs) 2280 { 2281 if (!attrs[MACSEC_SA_ATTR_AN] || 2282 attrs[MACSEC_SA_ATTR_KEY] || 2283 attrs[MACSEC_SA_ATTR_KEYID] || 2284 attrs[MACSEC_SA_ATTR_SSCI] || 2285 attrs[MACSEC_SA_ATTR_SALT]) 2286 return false; 2287 2288 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) 2289 return false; 2290 2291 if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0) 2292 return false; 2293 2294 if (attrs[MACSEC_SA_ATTR_ACTIVE]) { 2295 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) 2296 return false; 2297 } 2298 2299 return true; 2300 } 2301 2302 static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info) 2303 { 2304 struct nlattr **attrs = info->attrs; 2305 struct net_device *dev; 2306 struct macsec_secy *secy; 2307 struct macsec_tx_sc *tx_sc; 2308 struct macsec_tx_sa *tx_sa; 2309 u8 assoc_num; 2310 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 2311 bool was_operational, was_active; 2312 pn_t prev_pn; 2313 int ret = 0; 2314 2315 prev_pn.full64 = 0; 2316 2317 if (!attrs[MACSEC_ATTR_IFINDEX]) 2318 return -EINVAL; 2319 2320 if (parse_sa_config(attrs, tb_sa)) 2321 return -EINVAL; 2322 2323 if (!validate_upd_sa(tb_sa)) 2324 return -EINVAL; 2325 2326 rtnl_lock(); 2327 tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa, 2328 &dev, &secy, &tx_sc, &assoc_num); 2329 if (IS_ERR(tx_sa)) { 2330 rtnl_unlock(); 2331 return PTR_ERR(tx_sa); 2332 } 2333 2334 if (tb_sa[MACSEC_SA_ATTR_PN]) { 2335 int pn_len; 2336 2337 pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN; 2338 if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) { 2339 pr_notice("macsec: nl: upd_txsa: bad pn length: %d != %d\n", 2340 nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len); 2341 rtnl_unlock(); 2342 return -EINVAL; 2343 } 2344 2345 spin_lock_bh(&tx_sa->lock); 2346 prev_pn = tx_sa->next_pn_halves; 2347 tx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]); 2348 spin_unlock_bh(&tx_sa->lock); 2349 } 2350 2351 was_active = tx_sa->active; 2352 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 2353 tx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 2354 2355 was_operational = secy->operational; 2356 if (assoc_num == tx_sc->encoding_sa) 2357 secy->operational = tx_sa->active; 2358 2359 /* If h/w offloading is available, propagate to the device */ 2360 if (macsec_is_offloaded(netdev_priv(dev))) { 2361 const struct macsec_ops *ops; 2362 struct macsec_context ctx; 2363 2364 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2365 if (!ops) { 2366 ret = -EOPNOTSUPP; 2367 goto cleanup; 2368 } 2369 2370 ctx.sa.assoc_num = assoc_num; 2371 ctx.sa.tx_sa = tx_sa; 2372 ctx.secy = secy; 2373 2374 ret = macsec_offload(ops->mdo_upd_txsa, &ctx); 2375 if (ret) 2376 goto cleanup; 2377 } 2378 2379 rtnl_unlock(); 2380 2381 return 0; 2382 2383 cleanup: 2384 if (tb_sa[MACSEC_SA_ATTR_PN]) { 2385 spin_lock_bh(&tx_sa->lock); 2386 tx_sa->next_pn_halves = prev_pn; 2387 spin_unlock_bh(&tx_sa->lock); 2388 } 2389 tx_sa->active = was_active; 2390 secy->operational = was_operational; 2391 rtnl_unlock(); 2392 return ret; 2393 } 2394 2395 static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info) 2396 { 2397 struct nlattr **attrs = info->attrs; 2398 struct net_device *dev; 2399 struct macsec_secy *secy; 2400 struct macsec_rx_sc *rx_sc; 2401 struct macsec_rx_sa *rx_sa; 2402 u8 assoc_num; 2403 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 2404 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 2405 bool was_active; 2406 pn_t prev_pn; 2407 int ret = 0; 2408 2409 prev_pn.full64 = 0; 2410 2411 if (!attrs[MACSEC_ATTR_IFINDEX]) 2412 return -EINVAL; 2413 2414 if (parse_rxsc_config(attrs, tb_rxsc)) 2415 return -EINVAL; 2416 2417 if (parse_sa_config(attrs, tb_sa)) 2418 return -EINVAL; 2419 2420 if (!validate_upd_sa(tb_sa)) 2421 return -EINVAL; 2422 2423 rtnl_lock(); 2424 rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa, 2425 &dev, &secy, &rx_sc, &assoc_num); 2426 if (IS_ERR(rx_sa)) { 2427 rtnl_unlock(); 2428 return PTR_ERR(rx_sa); 2429 } 2430 2431 if (tb_sa[MACSEC_SA_ATTR_PN]) { 2432 int pn_len; 2433 2434 pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN; 2435 if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) { 2436 pr_notice("macsec: nl: upd_rxsa: bad pn length: %d != %d\n", 2437 nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len); 2438 rtnl_unlock(); 2439 return -EINVAL; 2440 } 2441 2442 spin_lock_bh(&rx_sa->lock); 2443 prev_pn = rx_sa->next_pn_halves; 2444 rx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]); 2445 spin_unlock_bh(&rx_sa->lock); 2446 } 2447 2448 was_active = rx_sa->active; 2449 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 2450 rx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 2451 2452 /* If h/w offloading is available, propagate to the device */ 2453 if (macsec_is_offloaded(netdev_priv(dev))) { 2454 const struct macsec_ops *ops; 2455 struct macsec_context ctx; 2456 2457 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2458 if (!ops) { 2459 ret = -EOPNOTSUPP; 2460 goto cleanup; 2461 } 2462 2463 ctx.sa.assoc_num = assoc_num; 2464 ctx.sa.rx_sa = rx_sa; 2465 ctx.secy = secy; 2466 2467 ret = macsec_offload(ops->mdo_upd_rxsa, &ctx); 2468 if (ret) 2469 goto cleanup; 2470 } 2471 2472 rtnl_unlock(); 2473 return 0; 2474 2475 cleanup: 2476 if (tb_sa[MACSEC_SA_ATTR_PN]) { 2477 spin_lock_bh(&rx_sa->lock); 2478 rx_sa->next_pn_halves = prev_pn; 2479 spin_unlock_bh(&rx_sa->lock); 2480 } 2481 rx_sa->active = was_active; 2482 rtnl_unlock(); 2483 return ret; 2484 } 2485 2486 static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info) 2487 { 2488 struct nlattr **attrs = info->attrs; 2489 struct net_device *dev; 2490 struct macsec_secy *secy; 2491 struct macsec_rx_sc *rx_sc; 2492 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 2493 unsigned int prev_n_rx_sc; 2494 bool was_active; 2495 int ret; 2496 2497 if (!attrs[MACSEC_ATTR_IFINDEX]) 2498 return -EINVAL; 2499 2500 if (parse_rxsc_config(attrs, tb_rxsc)) 2501 return -EINVAL; 2502 2503 if (!validate_add_rxsc(tb_rxsc)) 2504 return -EINVAL; 2505 2506 rtnl_lock(); 2507 rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy); 2508 if (IS_ERR(rx_sc)) { 2509 rtnl_unlock(); 2510 return PTR_ERR(rx_sc); 2511 } 2512 2513 was_active = rx_sc->active; 2514 prev_n_rx_sc = secy->n_rx_sc; 2515 if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) { 2516 bool new = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]); 2517 2518 if (rx_sc->active != new) 2519 secy->n_rx_sc += new ? 1 : -1; 2520 2521 rx_sc->active = new; 2522 } 2523 2524 /* If h/w offloading is available, propagate to the device */ 2525 if (macsec_is_offloaded(netdev_priv(dev))) { 2526 const struct macsec_ops *ops; 2527 struct macsec_context ctx; 2528 2529 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2530 if (!ops) { 2531 ret = -EOPNOTSUPP; 2532 goto cleanup; 2533 } 2534 2535 ctx.rx_sc = rx_sc; 2536 ctx.secy = secy; 2537 2538 ret = macsec_offload(ops->mdo_upd_rxsc, &ctx); 2539 if (ret) 2540 goto cleanup; 2541 } 2542 2543 rtnl_unlock(); 2544 2545 return 0; 2546 2547 cleanup: 2548 secy->n_rx_sc = prev_n_rx_sc; 2549 rx_sc->active = was_active; 2550 rtnl_unlock(); 2551 return ret; 2552 } 2553 2554 static bool macsec_is_configured(struct macsec_dev *macsec) 2555 { 2556 struct macsec_secy *secy = &macsec->secy; 2557 struct macsec_tx_sc *tx_sc = &secy->tx_sc; 2558 int i; 2559 2560 if (secy->n_rx_sc > 0) 2561 return true; 2562 2563 for (i = 0; i < MACSEC_NUM_AN; i++) 2564 if (tx_sc->sa[i]) 2565 return true; 2566 2567 return false; 2568 } 2569 2570 static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info) 2571 { 2572 struct nlattr *tb_offload[MACSEC_OFFLOAD_ATTR_MAX + 1]; 2573 enum macsec_offload offload, prev_offload; 2574 int (*func)(struct macsec_context *ctx); 2575 struct nlattr **attrs = info->attrs; 2576 struct net_device *dev; 2577 const struct macsec_ops *ops; 2578 struct macsec_context ctx; 2579 struct macsec_dev *macsec; 2580 int ret; 2581 2582 if (!attrs[MACSEC_ATTR_IFINDEX]) 2583 return -EINVAL; 2584 2585 if (!attrs[MACSEC_ATTR_OFFLOAD]) 2586 return -EINVAL; 2587 2588 if (nla_parse_nested_deprecated(tb_offload, MACSEC_OFFLOAD_ATTR_MAX, 2589 attrs[MACSEC_ATTR_OFFLOAD], 2590 macsec_genl_offload_policy, NULL)) 2591 return -EINVAL; 2592 2593 dev = get_dev_from_nl(genl_info_net(info), attrs); 2594 if (IS_ERR(dev)) 2595 return PTR_ERR(dev); 2596 macsec = macsec_priv(dev); 2597 2598 if (!tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]) 2599 return -EINVAL; 2600 2601 offload = nla_get_u8(tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]); 2602 if (macsec->offload == offload) 2603 return 0; 2604 2605 /* Check if the offloading mode is supported by the underlying layers */ 2606 if (offload != MACSEC_OFFLOAD_OFF && 2607 !macsec_check_offload(offload, macsec)) 2608 return -EOPNOTSUPP; 2609 2610 /* Check if the net device is busy. */ 2611 if (netif_running(dev)) 2612 return -EBUSY; 2613 2614 rtnl_lock(); 2615 2616 prev_offload = macsec->offload; 2617 macsec->offload = offload; 2618 2619 /* Check if the device already has rules configured: we do not support 2620 * rules migration. 2621 */ 2622 if (macsec_is_configured(macsec)) { 2623 ret = -EBUSY; 2624 goto rollback; 2625 } 2626 2627 ops = __macsec_get_ops(offload == MACSEC_OFFLOAD_OFF ? prev_offload : offload, 2628 macsec, &ctx); 2629 if (!ops) { 2630 ret = -EOPNOTSUPP; 2631 goto rollback; 2632 } 2633 2634 if (prev_offload == MACSEC_OFFLOAD_OFF) 2635 func = ops->mdo_add_secy; 2636 else 2637 func = ops->mdo_del_secy; 2638 2639 ctx.secy = &macsec->secy; 2640 ret = macsec_offload(func, &ctx); 2641 if (ret) 2642 goto rollback; 2643 2644 rtnl_unlock(); 2645 /* Force features update, since they are different for SW MACSec and 2646 * HW offloading cases. 2647 */ 2648 netdev_update_features(dev); 2649 return 0; 2650 2651 rollback: 2652 macsec->offload = prev_offload; 2653 2654 rtnl_unlock(); 2655 return ret; 2656 } 2657 2658 static void get_tx_sa_stats(struct net_device *dev, int an, 2659 struct macsec_tx_sa *tx_sa, 2660 struct macsec_tx_sa_stats *sum) 2661 { 2662 struct macsec_dev *macsec = macsec_priv(dev); 2663 int cpu; 2664 2665 /* If h/w offloading is available, propagate to the device */ 2666 if (macsec_is_offloaded(macsec)) { 2667 const struct macsec_ops *ops; 2668 struct macsec_context ctx; 2669 2670 ops = macsec_get_ops(macsec, &ctx); 2671 if (ops) { 2672 ctx.sa.assoc_num = an; 2673 ctx.sa.tx_sa = tx_sa; 2674 ctx.stats.tx_sa_stats = sum; 2675 ctx.secy = &macsec_priv(dev)->secy; 2676 macsec_offload(ops->mdo_get_tx_sa_stats, &ctx); 2677 } 2678 return; 2679 } 2680 2681 for_each_possible_cpu(cpu) { 2682 const struct macsec_tx_sa_stats *stats = 2683 per_cpu_ptr(tx_sa->stats, cpu); 2684 2685 sum->OutPktsProtected += stats->OutPktsProtected; 2686 sum->OutPktsEncrypted += stats->OutPktsEncrypted; 2687 } 2688 } 2689 2690 static int copy_tx_sa_stats(struct sk_buff *skb, struct macsec_tx_sa_stats *sum) 2691 { 2692 if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED, 2693 sum->OutPktsProtected) || 2694 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED, 2695 sum->OutPktsEncrypted)) 2696 return -EMSGSIZE; 2697 2698 return 0; 2699 } 2700 2701 static void get_rx_sa_stats(struct net_device *dev, 2702 struct macsec_rx_sc *rx_sc, int an, 2703 struct macsec_rx_sa *rx_sa, 2704 struct macsec_rx_sa_stats *sum) 2705 { 2706 struct macsec_dev *macsec = macsec_priv(dev); 2707 int cpu; 2708 2709 /* If h/w offloading is available, propagate to the device */ 2710 if (macsec_is_offloaded(macsec)) { 2711 const struct macsec_ops *ops; 2712 struct macsec_context ctx; 2713 2714 ops = macsec_get_ops(macsec, &ctx); 2715 if (ops) { 2716 ctx.sa.assoc_num = an; 2717 ctx.sa.rx_sa = rx_sa; 2718 ctx.stats.rx_sa_stats = sum; 2719 ctx.secy = &macsec_priv(dev)->secy; 2720 ctx.rx_sc = rx_sc; 2721 macsec_offload(ops->mdo_get_rx_sa_stats, &ctx); 2722 } 2723 return; 2724 } 2725 2726 for_each_possible_cpu(cpu) { 2727 const struct macsec_rx_sa_stats *stats = 2728 per_cpu_ptr(rx_sa->stats, cpu); 2729 2730 sum->InPktsOK += stats->InPktsOK; 2731 sum->InPktsInvalid += stats->InPktsInvalid; 2732 sum->InPktsNotValid += stats->InPktsNotValid; 2733 sum->InPktsNotUsingSA += stats->InPktsNotUsingSA; 2734 sum->InPktsUnusedSA += stats->InPktsUnusedSA; 2735 } 2736 } 2737 2738 static int copy_rx_sa_stats(struct sk_buff *skb, 2739 struct macsec_rx_sa_stats *sum) 2740 { 2741 if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_OK, sum->InPktsOK) || 2742 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID, 2743 sum->InPktsInvalid) || 2744 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID, 2745 sum->InPktsNotValid) || 2746 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA, 2747 sum->InPktsNotUsingSA) || 2748 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA, 2749 sum->InPktsUnusedSA)) 2750 return -EMSGSIZE; 2751 2752 return 0; 2753 } 2754 2755 static void get_rx_sc_stats(struct net_device *dev, 2756 struct macsec_rx_sc *rx_sc, 2757 struct macsec_rx_sc_stats *sum) 2758 { 2759 struct macsec_dev *macsec = macsec_priv(dev); 2760 int cpu; 2761 2762 /* If h/w offloading is available, propagate to the device */ 2763 if (macsec_is_offloaded(macsec)) { 2764 const struct macsec_ops *ops; 2765 struct macsec_context ctx; 2766 2767 ops = macsec_get_ops(macsec, &ctx); 2768 if (ops) { 2769 ctx.stats.rx_sc_stats = sum; 2770 ctx.secy = &macsec_priv(dev)->secy; 2771 ctx.rx_sc = rx_sc; 2772 macsec_offload(ops->mdo_get_rx_sc_stats, &ctx); 2773 } 2774 return; 2775 } 2776 2777 for_each_possible_cpu(cpu) { 2778 const struct pcpu_rx_sc_stats *stats; 2779 struct macsec_rx_sc_stats tmp; 2780 unsigned int start; 2781 2782 stats = per_cpu_ptr(rx_sc->stats, cpu); 2783 do { 2784 start = u64_stats_fetch_begin_irq(&stats->syncp); 2785 memcpy(&tmp, &stats->stats, sizeof(tmp)); 2786 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 2787 2788 sum->InOctetsValidated += tmp.InOctetsValidated; 2789 sum->InOctetsDecrypted += tmp.InOctetsDecrypted; 2790 sum->InPktsUnchecked += tmp.InPktsUnchecked; 2791 sum->InPktsDelayed += tmp.InPktsDelayed; 2792 sum->InPktsOK += tmp.InPktsOK; 2793 sum->InPktsInvalid += tmp.InPktsInvalid; 2794 sum->InPktsLate += tmp.InPktsLate; 2795 sum->InPktsNotValid += tmp.InPktsNotValid; 2796 sum->InPktsNotUsingSA += tmp.InPktsNotUsingSA; 2797 sum->InPktsUnusedSA += tmp.InPktsUnusedSA; 2798 } 2799 } 2800 2801 static int copy_rx_sc_stats(struct sk_buff *skb, struct macsec_rx_sc_stats *sum) 2802 { 2803 if (nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED, 2804 sum->InOctetsValidated, 2805 MACSEC_RXSC_STATS_ATTR_PAD) || 2806 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED, 2807 sum->InOctetsDecrypted, 2808 MACSEC_RXSC_STATS_ATTR_PAD) || 2809 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED, 2810 sum->InPktsUnchecked, 2811 MACSEC_RXSC_STATS_ATTR_PAD) || 2812 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED, 2813 sum->InPktsDelayed, 2814 MACSEC_RXSC_STATS_ATTR_PAD) || 2815 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK, 2816 sum->InPktsOK, 2817 MACSEC_RXSC_STATS_ATTR_PAD) || 2818 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID, 2819 sum->InPktsInvalid, 2820 MACSEC_RXSC_STATS_ATTR_PAD) || 2821 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE, 2822 sum->InPktsLate, 2823 MACSEC_RXSC_STATS_ATTR_PAD) || 2824 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID, 2825 sum->InPktsNotValid, 2826 MACSEC_RXSC_STATS_ATTR_PAD) || 2827 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA, 2828 sum->InPktsNotUsingSA, 2829 MACSEC_RXSC_STATS_ATTR_PAD) || 2830 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA, 2831 sum->InPktsUnusedSA, 2832 MACSEC_RXSC_STATS_ATTR_PAD)) 2833 return -EMSGSIZE; 2834 2835 return 0; 2836 } 2837 2838 static void get_tx_sc_stats(struct net_device *dev, 2839 struct macsec_tx_sc_stats *sum) 2840 { 2841 struct macsec_dev *macsec = macsec_priv(dev); 2842 int cpu; 2843 2844 /* If h/w offloading is available, propagate to the device */ 2845 if (macsec_is_offloaded(macsec)) { 2846 const struct macsec_ops *ops; 2847 struct macsec_context ctx; 2848 2849 ops = macsec_get_ops(macsec, &ctx); 2850 if (ops) { 2851 ctx.stats.tx_sc_stats = sum; 2852 ctx.secy = &macsec_priv(dev)->secy; 2853 macsec_offload(ops->mdo_get_tx_sc_stats, &ctx); 2854 } 2855 return; 2856 } 2857 2858 for_each_possible_cpu(cpu) { 2859 const struct pcpu_tx_sc_stats *stats; 2860 struct macsec_tx_sc_stats tmp; 2861 unsigned int start; 2862 2863 stats = per_cpu_ptr(macsec_priv(dev)->secy.tx_sc.stats, cpu); 2864 do { 2865 start = u64_stats_fetch_begin_irq(&stats->syncp); 2866 memcpy(&tmp, &stats->stats, sizeof(tmp)); 2867 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 2868 2869 sum->OutPktsProtected += tmp.OutPktsProtected; 2870 sum->OutPktsEncrypted += tmp.OutPktsEncrypted; 2871 sum->OutOctetsProtected += tmp.OutOctetsProtected; 2872 sum->OutOctetsEncrypted += tmp.OutOctetsEncrypted; 2873 } 2874 } 2875 2876 static int copy_tx_sc_stats(struct sk_buff *skb, struct macsec_tx_sc_stats *sum) 2877 { 2878 if (nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED, 2879 sum->OutPktsProtected, 2880 MACSEC_TXSC_STATS_ATTR_PAD) || 2881 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED, 2882 sum->OutPktsEncrypted, 2883 MACSEC_TXSC_STATS_ATTR_PAD) || 2884 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED, 2885 sum->OutOctetsProtected, 2886 MACSEC_TXSC_STATS_ATTR_PAD) || 2887 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED, 2888 sum->OutOctetsEncrypted, 2889 MACSEC_TXSC_STATS_ATTR_PAD)) 2890 return -EMSGSIZE; 2891 2892 return 0; 2893 } 2894 2895 static void get_secy_stats(struct net_device *dev, struct macsec_dev_stats *sum) 2896 { 2897 struct macsec_dev *macsec = macsec_priv(dev); 2898 int cpu; 2899 2900 /* If h/w offloading is available, propagate to the device */ 2901 if (macsec_is_offloaded(macsec)) { 2902 const struct macsec_ops *ops; 2903 struct macsec_context ctx; 2904 2905 ops = macsec_get_ops(macsec, &ctx); 2906 if (ops) { 2907 ctx.stats.dev_stats = sum; 2908 ctx.secy = &macsec_priv(dev)->secy; 2909 macsec_offload(ops->mdo_get_dev_stats, &ctx); 2910 } 2911 return; 2912 } 2913 2914 for_each_possible_cpu(cpu) { 2915 const struct pcpu_secy_stats *stats; 2916 struct macsec_dev_stats tmp; 2917 unsigned int start; 2918 2919 stats = per_cpu_ptr(macsec_priv(dev)->stats, cpu); 2920 do { 2921 start = u64_stats_fetch_begin_irq(&stats->syncp); 2922 memcpy(&tmp, &stats->stats, sizeof(tmp)); 2923 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 2924 2925 sum->OutPktsUntagged += tmp.OutPktsUntagged; 2926 sum->InPktsUntagged += tmp.InPktsUntagged; 2927 sum->OutPktsTooLong += tmp.OutPktsTooLong; 2928 sum->InPktsNoTag += tmp.InPktsNoTag; 2929 sum->InPktsBadTag += tmp.InPktsBadTag; 2930 sum->InPktsUnknownSCI += tmp.InPktsUnknownSCI; 2931 sum->InPktsNoSCI += tmp.InPktsNoSCI; 2932 sum->InPktsOverrun += tmp.InPktsOverrun; 2933 } 2934 } 2935 2936 static int copy_secy_stats(struct sk_buff *skb, struct macsec_dev_stats *sum) 2937 { 2938 if (nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED, 2939 sum->OutPktsUntagged, 2940 MACSEC_SECY_STATS_ATTR_PAD) || 2941 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED, 2942 sum->InPktsUntagged, 2943 MACSEC_SECY_STATS_ATTR_PAD) || 2944 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG, 2945 sum->OutPktsTooLong, 2946 MACSEC_SECY_STATS_ATTR_PAD) || 2947 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG, 2948 sum->InPktsNoTag, 2949 MACSEC_SECY_STATS_ATTR_PAD) || 2950 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG, 2951 sum->InPktsBadTag, 2952 MACSEC_SECY_STATS_ATTR_PAD) || 2953 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI, 2954 sum->InPktsUnknownSCI, 2955 MACSEC_SECY_STATS_ATTR_PAD) || 2956 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI, 2957 sum->InPktsNoSCI, 2958 MACSEC_SECY_STATS_ATTR_PAD) || 2959 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN, 2960 sum->InPktsOverrun, 2961 MACSEC_SECY_STATS_ATTR_PAD)) 2962 return -EMSGSIZE; 2963 2964 return 0; 2965 } 2966 2967 static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb) 2968 { 2969 struct macsec_tx_sc *tx_sc = &secy->tx_sc; 2970 struct nlattr *secy_nest = nla_nest_start_noflag(skb, 2971 MACSEC_ATTR_SECY); 2972 u64 csid; 2973 2974 if (!secy_nest) 2975 return 1; 2976 2977 switch (secy->key_len) { 2978 case MACSEC_GCM_AES_128_SAK_LEN: 2979 csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 : MACSEC_DEFAULT_CIPHER_ID; 2980 break; 2981 case MACSEC_GCM_AES_256_SAK_LEN: 2982 csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_256 : MACSEC_CIPHER_ID_GCM_AES_256; 2983 break; 2984 default: 2985 goto cancel; 2986 } 2987 2988 if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci, 2989 MACSEC_SECY_ATTR_PAD) || 2990 nla_put_u64_64bit(skb, MACSEC_SECY_ATTR_CIPHER_SUITE, 2991 csid, MACSEC_SECY_ATTR_PAD) || 2992 nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) || 2993 nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) || 2994 nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) || 2995 nla_put_u8(skb, MACSEC_SECY_ATTR_REPLAY, secy->replay_protect) || 2996 nla_put_u8(skb, MACSEC_SECY_ATTR_VALIDATE, secy->validate_frames) || 2997 nla_put_u8(skb, MACSEC_SECY_ATTR_ENCRYPT, tx_sc->encrypt) || 2998 nla_put_u8(skb, MACSEC_SECY_ATTR_INC_SCI, tx_sc->send_sci) || 2999 nla_put_u8(skb, MACSEC_SECY_ATTR_ES, tx_sc->end_station) || 3000 nla_put_u8(skb, MACSEC_SECY_ATTR_SCB, tx_sc->scb) || 3001 nla_put_u8(skb, MACSEC_SECY_ATTR_ENCODING_SA, tx_sc->encoding_sa)) 3002 goto cancel; 3003 3004 if (secy->replay_protect) { 3005 if (nla_put_u32(skb, MACSEC_SECY_ATTR_WINDOW, secy->replay_window)) 3006 goto cancel; 3007 } 3008 3009 nla_nest_end(skb, secy_nest); 3010 return 0; 3011 3012 cancel: 3013 nla_nest_cancel(skb, secy_nest); 3014 return 1; 3015 } 3016 3017 static noinline_for_stack int 3018 dump_secy(struct macsec_secy *secy, struct net_device *dev, 3019 struct sk_buff *skb, struct netlink_callback *cb) 3020 { 3021 struct macsec_tx_sc_stats tx_sc_stats = {0, }; 3022 struct macsec_tx_sa_stats tx_sa_stats = {0, }; 3023 struct macsec_rx_sc_stats rx_sc_stats = {0, }; 3024 struct macsec_rx_sa_stats rx_sa_stats = {0, }; 3025 struct macsec_dev *macsec = netdev_priv(dev); 3026 struct macsec_dev_stats dev_stats = {0, }; 3027 struct macsec_tx_sc *tx_sc = &secy->tx_sc; 3028 struct nlattr *txsa_list, *rxsc_list; 3029 struct macsec_rx_sc *rx_sc; 3030 struct nlattr *attr; 3031 void *hdr; 3032 int i, j; 3033 3034 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 3035 &macsec_fam, NLM_F_MULTI, MACSEC_CMD_GET_TXSC); 3036 if (!hdr) 3037 return -EMSGSIZE; 3038 3039 genl_dump_check_consistent(cb, hdr); 3040 3041 if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex)) 3042 goto nla_put_failure; 3043 3044 attr = nla_nest_start_noflag(skb, MACSEC_ATTR_OFFLOAD); 3045 if (!attr) 3046 goto nla_put_failure; 3047 if (nla_put_u8(skb, MACSEC_OFFLOAD_ATTR_TYPE, macsec->offload)) 3048 goto nla_put_failure; 3049 nla_nest_end(skb, attr); 3050 3051 if (nla_put_secy(secy, skb)) 3052 goto nla_put_failure; 3053 3054 attr = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSC_STATS); 3055 if (!attr) 3056 goto nla_put_failure; 3057 3058 get_tx_sc_stats(dev, &tx_sc_stats); 3059 if (copy_tx_sc_stats(skb, &tx_sc_stats)) { 3060 nla_nest_cancel(skb, attr); 3061 goto nla_put_failure; 3062 } 3063 nla_nest_end(skb, attr); 3064 3065 attr = nla_nest_start_noflag(skb, MACSEC_ATTR_SECY_STATS); 3066 if (!attr) 3067 goto nla_put_failure; 3068 get_secy_stats(dev, &dev_stats); 3069 if (copy_secy_stats(skb, &dev_stats)) { 3070 nla_nest_cancel(skb, attr); 3071 goto nla_put_failure; 3072 } 3073 nla_nest_end(skb, attr); 3074 3075 txsa_list = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSA_LIST); 3076 if (!txsa_list) 3077 goto nla_put_failure; 3078 for (i = 0, j = 1; i < MACSEC_NUM_AN; i++) { 3079 struct macsec_tx_sa *tx_sa = rtnl_dereference(tx_sc->sa[i]); 3080 struct nlattr *txsa_nest; 3081 u64 pn; 3082 int pn_len; 3083 3084 if (!tx_sa) 3085 continue; 3086 3087 txsa_nest = nla_nest_start_noflag(skb, j++); 3088 if (!txsa_nest) { 3089 nla_nest_cancel(skb, txsa_list); 3090 goto nla_put_failure; 3091 } 3092 3093 attr = nla_nest_start_noflag(skb, MACSEC_SA_ATTR_STATS); 3094 if (!attr) { 3095 nla_nest_cancel(skb, txsa_nest); 3096 nla_nest_cancel(skb, txsa_list); 3097 goto nla_put_failure; 3098 } 3099 memset(&tx_sa_stats, 0, sizeof(tx_sa_stats)); 3100 get_tx_sa_stats(dev, i, tx_sa, &tx_sa_stats); 3101 if (copy_tx_sa_stats(skb, &tx_sa_stats)) { 3102 nla_nest_cancel(skb, attr); 3103 nla_nest_cancel(skb, txsa_nest); 3104 nla_nest_cancel(skb, txsa_list); 3105 goto nla_put_failure; 3106 } 3107 nla_nest_end(skb, attr); 3108 3109 if (secy->xpn) { 3110 pn = tx_sa->next_pn; 3111 pn_len = MACSEC_XPN_PN_LEN; 3112 } else { 3113 pn = tx_sa->next_pn_halves.lower; 3114 pn_len = MACSEC_DEFAULT_PN_LEN; 3115 } 3116 3117 if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) || 3118 nla_put(skb, MACSEC_SA_ATTR_PN, pn_len, &pn) || 3119 nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, tx_sa->key.id) || 3120 (secy->xpn && nla_put_ssci(skb, MACSEC_SA_ATTR_SSCI, tx_sa->ssci)) || 3121 nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, tx_sa->active)) { 3122 nla_nest_cancel(skb, txsa_nest); 3123 nla_nest_cancel(skb, txsa_list); 3124 goto nla_put_failure; 3125 } 3126 3127 nla_nest_end(skb, txsa_nest); 3128 } 3129 nla_nest_end(skb, txsa_list); 3130 3131 rxsc_list = nla_nest_start_noflag(skb, MACSEC_ATTR_RXSC_LIST); 3132 if (!rxsc_list) 3133 goto nla_put_failure; 3134 3135 j = 1; 3136 for_each_rxsc_rtnl(secy, rx_sc) { 3137 int k; 3138 struct nlattr *rxsa_list; 3139 struct nlattr *rxsc_nest = nla_nest_start_noflag(skb, j++); 3140 3141 if (!rxsc_nest) { 3142 nla_nest_cancel(skb, rxsc_list); 3143 goto nla_put_failure; 3144 } 3145 3146 if (nla_put_u8(skb, MACSEC_RXSC_ATTR_ACTIVE, rx_sc->active) || 3147 nla_put_sci(skb, MACSEC_RXSC_ATTR_SCI, rx_sc->sci, 3148 MACSEC_RXSC_ATTR_PAD)) { 3149 nla_nest_cancel(skb, rxsc_nest); 3150 nla_nest_cancel(skb, rxsc_list); 3151 goto nla_put_failure; 3152 } 3153 3154 attr = nla_nest_start_noflag(skb, MACSEC_RXSC_ATTR_STATS); 3155 if (!attr) { 3156 nla_nest_cancel(skb, rxsc_nest); 3157 nla_nest_cancel(skb, rxsc_list); 3158 goto nla_put_failure; 3159 } 3160 memset(&rx_sc_stats, 0, sizeof(rx_sc_stats)); 3161 get_rx_sc_stats(dev, rx_sc, &rx_sc_stats); 3162 if (copy_rx_sc_stats(skb, &rx_sc_stats)) { 3163 nla_nest_cancel(skb, attr); 3164 nla_nest_cancel(skb, rxsc_nest); 3165 nla_nest_cancel(skb, rxsc_list); 3166 goto nla_put_failure; 3167 } 3168 nla_nest_end(skb, attr); 3169 3170 rxsa_list = nla_nest_start_noflag(skb, 3171 MACSEC_RXSC_ATTR_SA_LIST); 3172 if (!rxsa_list) { 3173 nla_nest_cancel(skb, rxsc_nest); 3174 nla_nest_cancel(skb, rxsc_list); 3175 goto nla_put_failure; 3176 } 3177 3178 for (i = 0, k = 1; i < MACSEC_NUM_AN; i++) { 3179 struct macsec_rx_sa *rx_sa = rtnl_dereference(rx_sc->sa[i]); 3180 struct nlattr *rxsa_nest; 3181 u64 pn; 3182 int pn_len; 3183 3184 if (!rx_sa) 3185 continue; 3186 3187 rxsa_nest = nla_nest_start_noflag(skb, k++); 3188 if (!rxsa_nest) { 3189 nla_nest_cancel(skb, rxsa_list); 3190 nla_nest_cancel(skb, rxsc_nest); 3191 nla_nest_cancel(skb, rxsc_list); 3192 goto nla_put_failure; 3193 } 3194 3195 attr = nla_nest_start_noflag(skb, 3196 MACSEC_SA_ATTR_STATS); 3197 if (!attr) { 3198 nla_nest_cancel(skb, rxsa_list); 3199 nla_nest_cancel(skb, rxsc_nest); 3200 nla_nest_cancel(skb, rxsc_list); 3201 goto nla_put_failure; 3202 } 3203 memset(&rx_sa_stats, 0, sizeof(rx_sa_stats)); 3204 get_rx_sa_stats(dev, rx_sc, i, rx_sa, &rx_sa_stats); 3205 if (copy_rx_sa_stats(skb, &rx_sa_stats)) { 3206 nla_nest_cancel(skb, attr); 3207 nla_nest_cancel(skb, rxsa_list); 3208 nla_nest_cancel(skb, rxsc_nest); 3209 nla_nest_cancel(skb, rxsc_list); 3210 goto nla_put_failure; 3211 } 3212 nla_nest_end(skb, attr); 3213 3214 if (secy->xpn) { 3215 pn = rx_sa->next_pn; 3216 pn_len = MACSEC_XPN_PN_LEN; 3217 } else { 3218 pn = rx_sa->next_pn_halves.lower; 3219 pn_len = MACSEC_DEFAULT_PN_LEN; 3220 } 3221 3222 if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) || 3223 nla_put(skb, MACSEC_SA_ATTR_PN, pn_len, &pn) || 3224 nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, rx_sa->key.id) || 3225 (secy->xpn && nla_put_ssci(skb, MACSEC_SA_ATTR_SSCI, rx_sa->ssci)) || 3226 nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, rx_sa->active)) { 3227 nla_nest_cancel(skb, rxsa_nest); 3228 nla_nest_cancel(skb, rxsc_nest); 3229 nla_nest_cancel(skb, rxsc_list); 3230 goto nla_put_failure; 3231 } 3232 nla_nest_end(skb, rxsa_nest); 3233 } 3234 3235 nla_nest_end(skb, rxsa_list); 3236 nla_nest_end(skb, rxsc_nest); 3237 } 3238 3239 nla_nest_end(skb, rxsc_list); 3240 3241 genlmsg_end(skb, hdr); 3242 3243 return 0; 3244 3245 nla_put_failure: 3246 genlmsg_cancel(skb, hdr); 3247 return -EMSGSIZE; 3248 } 3249 3250 static int macsec_generation = 1; /* protected by RTNL */ 3251 3252 static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb) 3253 { 3254 struct net *net = sock_net(skb->sk); 3255 struct net_device *dev; 3256 int dev_idx, d; 3257 3258 dev_idx = cb->args[0]; 3259 3260 d = 0; 3261 rtnl_lock(); 3262 3263 cb->seq = macsec_generation; 3264 3265 for_each_netdev(net, dev) { 3266 struct macsec_secy *secy; 3267 3268 if (d < dev_idx) 3269 goto next; 3270 3271 if (!netif_is_macsec(dev)) 3272 goto next; 3273 3274 secy = &macsec_priv(dev)->secy; 3275 if (dump_secy(secy, dev, skb, cb) < 0) 3276 goto done; 3277 next: 3278 d++; 3279 } 3280 3281 done: 3282 rtnl_unlock(); 3283 cb->args[0] = d; 3284 return skb->len; 3285 } 3286 3287 static const struct genl_ops macsec_genl_ops[] = { 3288 { 3289 .cmd = MACSEC_CMD_GET_TXSC, 3290 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3291 .dumpit = macsec_dump_txsc, 3292 }, 3293 { 3294 .cmd = MACSEC_CMD_ADD_RXSC, 3295 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3296 .doit = macsec_add_rxsc, 3297 .flags = GENL_ADMIN_PERM, 3298 }, 3299 { 3300 .cmd = MACSEC_CMD_DEL_RXSC, 3301 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3302 .doit = macsec_del_rxsc, 3303 .flags = GENL_ADMIN_PERM, 3304 }, 3305 { 3306 .cmd = MACSEC_CMD_UPD_RXSC, 3307 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3308 .doit = macsec_upd_rxsc, 3309 .flags = GENL_ADMIN_PERM, 3310 }, 3311 { 3312 .cmd = MACSEC_CMD_ADD_TXSA, 3313 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3314 .doit = macsec_add_txsa, 3315 .flags = GENL_ADMIN_PERM, 3316 }, 3317 { 3318 .cmd = MACSEC_CMD_DEL_TXSA, 3319 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3320 .doit = macsec_del_txsa, 3321 .flags = GENL_ADMIN_PERM, 3322 }, 3323 { 3324 .cmd = MACSEC_CMD_UPD_TXSA, 3325 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3326 .doit = macsec_upd_txsa, 3327 .flags = GENL_ADMIN_PERM, 3328 }, 3329 { 3330 .cmd = MACSEC_CMD_ADD_RXSA, 3331 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3332 .doit = macsec_add_rxsa, 3333 .flags = GENL_ADMIN_PERM, 3334 }, 3335 { 3336 .cmd = MACSEC_CMD_DEL_RXSA, 3337 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3338 .doit = macsec_del_rxsa, 3339 .flags = GENL_ADMIN_PERM, 3340 }, 3341 { 3342 .cmd = MACSEC_CMD_UPD_RXSA, 3343 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3344 .doit = macsec_upd_rxsa, 3345 .flags = GENL_ADMIN_PERM, 3346 }, 3347 { 3348 .cmd = MACSEC_CMD_UPD_OFFLOAD, 3349 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3350 .doit = macsec_upd_offload, 3351 .flags = GENL_ADMIN_PERM, 3352 }, 3353 }; 3354 3355 static struct genl_family macsec_fam __ro_after_init = { 3356 .name = MACSEC_GENL_NAME, 3357 .hdrsize = 0, 3358 .version = MACSEC_GENL_VERSION, 3359 .maxattr = MACSEC_ATTR_MAX, 3360 .policy = macsec_genl_policy, 3361 .netnsok = true, 3362 .module = THIS_MODULE, 3363 .ops = macsec_genl_ops, 3364 .n_ops = ARRAY_SIZE(macsec_genl_ops), 3365 }; 3366 3367 static netdev_tx_t macsec_start_xmit(struct sk_buff *skb, 3368 struct net_device *dev) 3369 { 3370 struct macsec_dev *macsec = netdev_priv(dev); 3371 struct macsec_secy *secy = &macsec->secy; 3372 struct pcpu_secy_stats *secy_stats; 3373 int ret, len; 3374 3375 if (macsec_is_offloaded(netdev_priv(dev))) { 3376 skb->dev = macsec->real_dev; 3377 return dev_queue_xmit(skb); 3378 } 3379 3380 /* 10.5 */ 3381 if (!secy->protect_frames) { 3382 secy_stats = this_cpu_ptr(macsec->stats); 3383 u64_stats_update_begin(&secy_stats->syncp); 3384 secy_stats->stats.OutPktsUntagged++; 3385 u64_stats_update_end(&secy_stats->syncp); 3386 skb->dev = macsec->real_dev; 3387 len = skb->len; 3388 ret = dev_queue_xmit(skb); 3389 count_tx(dev, ret, len); 3390 return ret; 3391 } 3392 3393 if (!secy->operational) { 3394 kfree_skb(skb); 3395 dev->stats.tx_dropped++; 3396 return NETDEV_TX_OK; 3397 } 3398 3399 skb = macsec_encrypt(skb, dev); 3400 if (IS_ERR(skb)) { 3401 if (PTR_ERR(skb) != -EINPROGRESS) 3402 dev->stats.tx_dropped++; 3403 return NETDEV_TX_OK; 3404 } 3405 3406 macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa); 3407 3408 macsec_encrypt_finish(skb, dev); 3409 len = skb->len; 3410 ret = dev_queue_xmit(skb); 3411 count_tx(dev, ret, len); 3412 return ret; 3413 } 3414 3415 #define SW_MACSEC_FEATURES \ 3416 (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST) 3417 3418 /* If h/w offloading is enabled, use real device features save for 3419 * VLAN_FEATURES - they require additional ops 3420 * HW_MACSEC - no reason to report it 3421 */ 3422 #define REAL_DEV_FEATURES(dev) \ 3423 ((dev)->features & ~(NETIF_F_VLAN_FEATURES | NETIF_F_HW_MACSEC)) 3424 3425 static int macsec_dev_init(struct net_device *dev) 3426 { 3427 struct macsec_dev *macsec = macsec_priv(dev); 3428 struct net_device *real_dev = macsec->real_dev; 3429 int err; 3430 3431 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 3432 if (!dev->tstats) 3433 return -ENOMEM; 3434 3435 err = gro_cells_init(&macsec->gro_cells, dev); 3436 if (err) { 3437 free_percpu(dev->tstats); 3438 return err; 3439 } 3440 3441 if (macsec_is_offloaded(macsec)) { 3442 dev->features = REAL_DEV_FEATURES(real_dev); 3443 } else { 3444 dev->features = real_dev->features & SW_MACSEC_FEATURES; 3445 dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE; 3446 } 3447 3448 dev->needed_headroom = real_dev->needed_headroom + 3449 MACSEC_NEEDED_HEADROOM; 3450 dev->needed_tailroom = real_dev->needed_tailroom + 3451 MACSEC_NEEDED_TAILROOM; 3452 3453 if (is_zero_ether_addr(dev->dev_addr)) 3454 eth_hw_addr_inherit(dev, real_dev); 3455 if (is_zero_ether_addr(dev->broadcast)) 3456 memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len); 3457 3458 return 0; 3459 } 3460 3461 static void macsec_dev_uninit(struct net_device *dev) 3462 { 3463 struct macsec_dev *macsec = macsec_priv(dev); 3464 3465 gro_cells_destroy(&macsec->gro_cells); 3466 free_percpu(dev->tstats); 3467 } 3468 3469 static netdev_features_t macsec_fix_features(struct net_device *dev, 3470 netdev_features_t features) 3471 { 3472 struct macsec_dev *macsec = macsec_priv(dev); 3473 struct net_device *real_dev = macsec->real_dev; 3474 3475 if (macsec_is_offloaded(macsec)) 3476 return REAL_DEV_FEATURES(real_dev); 3477 3478 features &= (real_dev->features & SW_MACSEC_FEATURES) | 3479 NETIF_F_GSO_SOFTWARE | NETIF_F_SOFT_FEATURES; 3480 features |= NETIF_F_LLTX; 3481 3482 return features; 3483 } 3484 3485 static int macsec_dev_open(struct net_device *dev) 3486 { 3487 struct macsec_dev *macsec = macsec_priv(dev); 3488 struct net_device *real_dev = macsec->real_dev; 3489 int err; 3490 3491 err = dev_uc_add(real_dev, dev->dev_addr); 3492 if (err < 0) 3493 return err; 3494 3495 if (dev->flags & IFF_ALLMULTI) { 3496 err = dev_set_allmulti(real_dev, 1); 3497 if (err < 0) 3498 goto del_unicast; 3499 } 3500 3501 if (dev->flags & IFF_PROMISC) { 3502 err = dev_set_promiscuity(real_dev, 1); 3503 if (err < 0) 3504 goto clear_allmulti; 3505 } 3506 3507 /* If h/w offloading is available, propagate to the device */ 3508 if (macsec_is_offloaded(macsec)) { 3509 const struct macsec_ops *ops; 3510 struct macsec_context ctx; 3511 3512 ops = macsec_get_ops(netdev_priv(dev), &ctx); 3513 if (!ops) { 3514 err = -EOPNOTSUPP; 3515 goto clear_allmulti; 3516 } 3517 3518 ctx.secy = &macsec->secy; 3519 err = macsec_offload(ops->mdo_dev_open, &ctx); 3520 if (err) 3521 goto clear_allmulti; 3522 } 3523 3524 if (netif_carrier_ok(real_dev)) 3525 netif_carrier_on(dev); 3526 3527 return 0; 3528 clear_allmulti: 3529 if (dev->flags & IFF_ALLMULTI) 3530 dev_set_allmulti(real_dev, -1); 3531 del_unicast: 3532 dev_uc_del(real_dev, dev->dev_addr); 3533 netif_carrier_off(dev); 3534 return err; 3535 } 3536 3537 static int macsec_dev_stop(struct net_device *dev) 3538 { 3539 struct macsec_dev *macsec = macsec_priv(dev); 3540 struct net_device *real_dev = macsec->real_dev; 3541 3542 netif_carrier_off(dev); 3543 3544 /* If h/w offloading is available, propagate to the device */ 3545 if (macsec_is_offloaded(macsec)) { 3546 const struct macsec_ops *ops; 3547 struct macsec_context ctx; 3548 3549 ops = macsec_get_ops(macsec, &ctx); 3550 if (ops) { 3551 ctx.secy = &macsec->secy; 3552 macsec_offload(ops->mdo_dev_stop, &ctx); 3553 } 3554 } 3555 3556 dev_mc_unsync(real_dev, dev); 3557 dev_uc_unsync(real_dev, dev); 3558 3559 if (dev->flags & IFF_ALLMULTI) 3560 dev_set_allmulti(real_dev, -1); 3561 3562 if (dev->flags & IFF_PROMISC) 3563 dev_set_promiscuity(real_dev, -1); 3564 3565 dev_uc_del(real_dev, dev->dev_addr); 3566 3567 return 0; 3568 } 3569 3570 static void macsec_dev_change_rx_flags(struct net_device *dev, int change) 3571 { 3572 struct net_device *real_dev = macsec_priv(dev)->real_dev; 3573 3574 if (!(dev->flags & IFF_UP)) 3575 return; 3576 3577 if (change & IFF_ALLMULTI) 3578 dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1); 3579 3580 if (change & IFF_PROMISC) 3581 dev_set_promiscuity(real_dev, 3582 dev->flags & IFF_PROMISC ? 1 : -1); 3583 } 3584 3585 static void macsec_dev_set_rx_mode(struct net_device *dev) 3586 { 3587 struct net_device *real_dev = macsec_priv(dev)->real_dev; 3588 3589 dev_mc_sync(real_dev, dev); 3590 dev_uc_sync(real_dev, dev); 3591 } 3592 3593 static int macsec_set_mac_address(struct net_device *dev, void *p) 3594 { 3595 struct macsec_dev *macsec = macsec_priv(dev); 3596 struct net_device *real_dev = macsec->real_dev; 3597 struct sockaddr *addr = p; 3598 int err; 3599 3600 if (!is_valid_ether_addr(addr->sa_data)) 3601 return -EADDRNOTAVAIL; 3602 3603 if (!(dev->flags & IFF_UP)) 3604 goto out; 3605 3606 err = dev_uc_add(real_dev, addr->sa_data); 3607 if (err < 0) 3608 return err; 3609 3610 dev_uc_del(real_dev, dev->dev_addr); 3611 3612 out: 3613 ether_addr_copy(dev->dev_addr, addr->sa_data); 3614 macsec->secy.sci = dev_to_sci(dev, MACSEC_PORT_ES); 3615 3616 /* If h/w offloading is available, propagate to the device */ 3617 if (macsec_is_offloaded(macsec)) { 3618 const struct macsec_ops *ops; 3619 struct macsec_context ctx; 3620 3621 ops = macsec_get_ops(macsec, &ctx); 3622 if (ops) { 3623 ctx.secy = &macsec->secy; 3624 macsec_offload(ops->mdo_upd_secy, &ctx); 3625 } 3626 } 3627 3628 return 0; 3629 } 3630 3631 static int macsec_change_mtu(struct net_device *dev, int new_mtu) 3632 { 3633 struct macsec_dev *macsec = macsec_priv(dev); 3634 unsigned int extra = macsec->secy.icv_len + macsec_extra_len(true); 3635 3636 if (macsec->real_dev->mtu - extra < new_mtu) 3637 return -ERANGE; 3638 3639 dev->mtu = new_mtu; 3640 3641 return 0; 3642 } 3643 3644 static void macsec_get_stats64(struct net_device *dev, 3645 struct rtnl_link_stats64 *s) 3646 { 3647 int cpu; 3648 3649 if (!dev->tstats) 3650 return; 3651 3652 for_each_possible_cpu(cpu) { 3653 struct pcpu_sw_netstats *stats; 3654 struct pcpu_sw_netstats tmp; 3655 int start; 3656 3657 stats = per_cpu_ptr(dev->tstats, cpu); 3658 do { 3659 start = u64_stats_fetch_begin_irq(&stats->syncp); 3660 tmp.rx_packets = stats->rx_packets; 3661 tmp.rx_bytes = stats->rx_bytes; 3662 tmp.tx_packets = stats->tx_packets; 3663 tmp.tx_bytes = stats->tx_bytes; 3664 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 3665 3666 s->rx_packets += tmp.rx_packets; 3667 s->rx_bytes += tmp.rx_bytes; 3668 s->tx_packets += tmp.tx_packets; 3669 s->tx_bytes += tmp.tx_bytes; 3670 } 3671 3672 s->rx_dropped = dev->stats.rx_dropped; 3673 s->tx_dropped = dev->stats.tx_dropped; 3674 } 3675 3676 static int macsec_get_iflink(const struct net_device *dev) 3677 { 3678 return macsec_priv(dev)->real_dev->ifindex; 3679 } 3680 3681 static const struct net_device_ops macsec_netdev_ops = { 3682 .ndo_init = macsec_dev_init, 3683 .ndo_uninit = macsec_dev_uninit, 3684 .ndo_open = macsec_dev_open, 3685 .ndo_stop = macsec_dev_stop, 3686 .ndo_fix_features = macsec_fix_features, 3687 .ndo_change_mtu = macsec_change_mtu, 3688 .ndo_set_rx_mode = macsec_dev_set_rx_mode, 3689 .ndo_change_rx_flags = macsec_dev_change_rx_flags, 3690 .ndo_set_mac_address = macsec_set_mac_address, 3691 .ndo_start_xmit = macsec_start_xmit, 3692 .ndo_get_stats64 = macsec_get_stats64, 3693 .ndo_get_iflink = macsec_get_iflink, 3694 }; 3695 3696 static const struct device_type macsec_type = { 3697 .name = "macsec", 3698 }; 3699 3700 static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = { 3701 [IFLA_MACSEC_SCI] = { .type = NLA_U64 }, 3702 [IFLA_MACSEC_PORT] = { .type = NLA_U16 }, 3703 [IFLA_MACSEC_ICV_LEN] = { .type = NLA_U8 }, 3704 [IFLA_MACSEC_CIPHER_SUITE] = { .type = NLA_U64 }, 3705 [IFLA_MACSEC_WINDOW] = { .type = NLA_U32 }, 3706 [IFLA_MACSEC_ENCODING_SA] = { .type = NLA_U8 }, 3707 [IFLA_MACSEC_ENCRYPT] = { .type = NLA_U8 }, 3708 [IFLA_MACSEC_PROTECT] = { .type = NLA_U8 }, 3709 [IFLA_MACSEC_INC_SCI] = { .type = NLA_U8 }, 3710 [IFLA_MACSEC_ES] = { .type = NLA_U8 }, 3711 [IFLA_MACSEC_SCB] = { .type = NLA_U8 }, 3712 [IFLA_MACSEC_REPLAY_PROTECT] = { .type = NLA_U8 }, 3713 [IFLA_MACSEC_VALIDATION] = { .type = NLA_U8 }, 3714 }; 3715 3716 static void macsec_free_netdev(struct net_device *dev) 3717 { 3718 struct macsec_dev *macsec = macsec_priv(dev); 3719 3720 free_percpu(macsec->stats); 3721 free_percpu(macsec->secy.tx_sc.stats); 3722 3723 } 3724 3725 static void macsec_setup(struct net_device *dev) 3726 { 3727 ether_setup(dev); 3728 dev->min_mtu = 0; 3729 dev->max_mtu = ETH_MAX_MTU; 3730 dev->priv_flags |= IFF_NO_QUEUE; 3731 dev->netdev_ops = &macsec_netdev_ops; 3732 dev->needs_free_netdev = true; 3733 dev->priv_destructor = macsec_free_netdev; 3734 SET_NETDEV_DEVTYPE(dev, &macsec_type); 3735 3736 eth_zero_addr(dev->broadcast); 3737 } 3738 3739 static int macsec_changelink_common(struct net_device *dev, 3740 struct nlattr *data[]) 3741 { 3742 struct macsec_secy *secy; 3743 struct macsec_tx_sc *tx_sc; 3744 3745 secy = &macsec_priv(dev)->secy; 3746 tx_sc = &secy->tx_sc; 3747 3748 if (data[IFLA_MACSEC_ENCODING_SA]) { 3749 struct macsec_tx_sa *tx_sa; 3750 3751 tx_sc->encoding_sa = nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]); 3752 tx_sa = rtnl_dereference(tx_sc->sa[tx_sc->encoding_sa]); 3753 3754 secy->operational = tx_sa && tx_sa->active; 3755 } 3756 3757 if (data[IFLA_MACSEC_WINDOW]) 3758 secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]); 3759 3760 if (data[IFLA_MACSEC_ENCRYPT]) 3761 tx_sc->encrypt = !!nla_get_u8(data[IFLA_MACSEC_ENCRYPT]); 3762 3763 if (data[IFLA_MACSEC_PROTECT]) 3764 secy->protect_frames = !!nla_get_u8(data[IFLA_MACSEC_PROTECT]); 3765 3766 if (data[IFLA_MACSEC_INC_SCI]) 3767 tx_sc->send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]); 3768 3769 if (data[IFLA_MACSEC_ES]) 3770 tx_sc->end_station = !!nla_get_u8(data[IFLA_MACSEC_ES]); 3771 3772 if (data[IFLA_MACSEC_SCB]) 3773 tx_sc->scb = !!nla_get_u8(data[IFLA_MACSEC_SCB]); 3774 3775 if (data[IFLA_MACSEC_REPLAY_PROTECT]) 3776 secy->replay_protect = !!nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT]); 3777 3778 if (data[IFLA_MACSEC_VALIDATION]) 3779 secy->validate_frames = nla_get_u8(data[IFLA_MACSEC_VALIDATION]); 3780 3781 if (data[IFLA_MACSEC_CIPHER_SUITE]) { 3782 switch (nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE])) { 3783 case MACSEC_CIPHER_ID_GCM_AES_128: 3784 case MACSEC_DEFAULT_CIPHER_ID: 3785 secy->key_len = MACSEC_GCM_AES_128_SAK_LEN; 3786 secy->xpn = false; 3787 break; 3788 case MACSEC_CIPHER_ID_GCM_AES_256: 3789 secy->key_len = MACSEC_GCM_AES_256_SAK_LEN; 3790 secy->xpn = false; 3791 break; 3792 case MACSEC_CIPHER_ID_GCM_AES_XPN_128: 3793 secy->key_len = MACSEC_GCM_AES_128_SAK_LEN; 3794 secy->xpn = true; 3795 break; 3796 case MACSEC_CIPHER_ID_GCM_AES_XPN_256: 3797 secy->key_len = MACSEC_GCM_AES_256_SAK_LEN; 3798 secy->xpn = true; 3799 break; 3800 default: 3801 return -EINVAL; 3802 } 3803 } 3804 3805 return 0; 3806 } 3807 3808 static int macsec_changelink(struct net_device *dev, struct nlattr *tb[], 3809 struct nlattr *data[], 3810 struct netlink_ext_ack *extack) 3811 { 3812 struct macsec_dev *macsec = macsec_priv(dev); 3813 struct macsec_tx_sc tx_sc; 3814 struct macsec_secy secy; 3815 int ret; 3816 3817 if (!data) 3818 return 0; 3819 3820 if (data[IFLA_MACSEC_CIPHER_SUITE] || 3821 data[IFLA_MACSEC_ICV_LEN] || 3822 data[IFLA_MACSEC_SCI] || 3823 data[IFLA_MACSEC_PORT]) 3824 return -EINVAL; 3825 3826 /* Keep a copy of unmodified secy and tx_sc, in case the offload 3827 * propagation fails, to revert macsec_changelink_common. 3828 */ 3829 memcpy(&secy, &macsec->secy, sizeof(secy)); 3830 memcpy(&tx_sc, &macsec->secy.tx_sc, sizeof(tx_sc)); 3831 3832 ret = macsec_changelink_common(dev, data); 3833 if (ret) 3834 return ret; 3835 3836 /* If h/w offloading is available, propagate to the device */ 3837 if (macsec_is_offloaded(macsec)) { 3838 const struct macsec_ops *ops; 3839 struct macsec_context ctx; 3840 int ret; 3841 3842 ops = macsec_get_ops(netdev_priv(dev), &ctx); 3843 if (!ops) { 3844 ret = -EOPNOTSUPP; 3845 goto cleanup; 3846 } 3847 3848 ctx.secy = &macsec->secy; 3849 ret = macsec_offload(ops->mdo_upd_secy, &ctx); 3850 if (ret) 3851 goto cleanup; 3852 } 3853 3854 return 0; 3855 3856 cleanup: 3857 memcpy(&macsec->secy.tx_sc, &tx_sc, sizeof(tx_sc)); 3858 memcpy(&macsec->secy, &secy, sizeof(secy)); 3859 3860 return ret; 3861 } 3862 3863 static void macsec_del_dev(struct macsec_dev *macsec) 3864 { 3865 int i; 3866 3867 while (macsec->secy.rx_sc) { 3868 struct macsec_rx_sc *rx_sc = rtnl_dereference(macsec->secy.rx_sc); 3869 3870 rcu_assign_pointer(macsec->secy.rx_sc, rx_sc->next); 3871 free_rx_sc(rx_sc); 3872 } 3873 3874 for (i = 0; i < MACSEC_NUM_AN; i++) { 3875 struct macsec_tx_sa *sa = rtnl_dereference(macsec->secy.tx_sc.sa[i]); 3876 3877 if (sa) { 3878 RCU_INIT_POINTER(macsec->secy.tx_sc.sa[i], NULL); 3879 clear_tx_sa(sa); 3880 } 3881 } 3882 } 3883 3884 static void macsec_common_dellink(struct net_device *dev, struct list_head *head) 3885 { 3886 struct macsec_dev *macsec = macsec_priv(dev); 3887 struct net_device *real_dev = macsec->real_dev; 3888 3889 unregister_netdevice_queue(dev, head); 3890 list_del_rcu(&macsec->secys); 3891 macsec_del_dev(macsec); 3892 netdev_upper_dev_unlink(real_dev, dev); 3893 3894 macsec_generation++; 3895 } 3896 3897 static void macsec_dellink(struct net_device *dev, struct list_head *head) 3898 { 3899 struct macsec_dev *macsec = macsec_priv(dev); 3900 struct net_device *real_dev = macsec->real_dev; 3901 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); 3902 3903 /* If h/w offloading is available, propagate to the device */ 3904 if (macsec_is_offloaded(macsec)) { 3905 const struct macsec_ops *ops; 3906 struct macsec_context ctx; 3907 3908 ops = macsec_get_ops(netdev_priv(dev), &ctx); 3909 if (ops) { 3910 ctx.secy = &macsec->secy; 3911 macsec_offload(ops->mdo_del_secy, &ctx); 3912 } 3913 } 3914 3915 macsec_common_dellink(dev, head); 3916 3917 if (list_empty(&rxd->secys)) { 3918 netdev_rx_handler_unregister(real_dev); 3919 kfree(rxd); 3920 } 3921 } 3922 3923 static int register_macsec_dev(struct net_device *real_dev, 3924 struct net_device *dev) 3925 { 3926 struct macsec_dev *macsec = macsec_priv(dev); 3927 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); 3928 3929 if (!rxd) { 3930 int err; 3931 3932 rxd = kmalloc(sizeof(*rxd), GFP_KERNEL); 3933 if (!rxd) 3934 return -ENOMEM; 3935 3936 INIT_LIST_HEAD(&rxd->secys); 3937 3938 err = netdev_rx_handler_register(real_dev, macsec_handle_frame, 3939 rxd); 3940 if (err < 0) { 3941 kfree(rxd); 3942 return err; 3943 } 3944 } 3945 3946 list_add_tail_rcu(&macsec->secys, &rxd->secys); 3947 return 0; 3948 } 3949 3950 static bool sci_exists(struct net_device *dev, sci_t sci) 3951 { 3952 struct macsec_rxh_data *rxd = macsec_data_rtnl(dev); 3953 struct macsec_dev *macsec; 3954 3955 list_for_each_entry(macsec, &rxd->secys, secys) { 3956 if (macsec->secy.sci == sci) 3957 return true; 3958 } 3959 3960 return false; 3961 } 3962 3963 static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len) 3964 { 3965 struct macsec_dev *macsec = macsec_priv(dev); 3966 struct macsec_secy *secy = &macsec->secy; 3967 3968 macsec->stats = netdev_alloc_pcpu_stats(struct pcpu_secy_stats); 3969 if (!macsec->stats) 3970 return -ENOMEM; 3971 3972 secy->tx_sc.stats = netdev_alloc_pcpu_stats(struct pcpu_tx_sc_stats); 3973 if (!secy->tx_sc.stats) { 3974 free_percpu(macsec->stats); 3975 return -ENOMEM; 3976 } 3977 3978 if (sci == MACSEC_UNDEF_SCI) 3979 sci = dev_to_sci(dev, MACSEC_PORT_ES); 3980 3981 secy->netdev = dev; 3982 secy->operational = true; 3983 secy->key_len = DEFAULT_SAK_LEN; 3984 secy->icv_len = icv_len; 3985 secy->validate_frames = MACSEC_VALIDATE_DEFAULT; 3986 secy->protect_frames = true; 3987 secy->replay_protect = false; 3988 secy->xpn = DEFAULT_XPN; 3989 3990 secy->sci = sci; 3991 secy->tx_sc.active = true; 3992 secy->tx_sc.encoding_sa = DEFAULT_ENCODING_SA; 3993 secy->tx_sc.encrypt = DEFAULT_ENCRYPT; 3994 secy->tx_sc.send_sci = DEFAULT_SEND_SCI; 3995 secy->tx_sc.end_station = false; 3996 secy->tx_sc.scb = false; 3997 3998 return 0; 3999 } 4000 4001 static int macsec_newlink(struct net *net, struct net_device *dev, 4002 struct nlattr *tb[], struct nlattr *data[], 4003 struct netlink_ext_ack *extack) 4004 { 4005 struct macsec_dev *macsec = macsec_priv(dev); 4006 rx_handler_func_t *rx_handler; 4007 u8 icv_len = DEFAULT_ICV_LEN; 4008 struct net_device *real_dev; 4009 int err, mtu; 4010 sci_t sci; 4011 4012 if (!tb[IFLA_LINK]) 4013 return -EINVAL; 4014 real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK])); 4015 if (!real_dev) 4016 return -ENODEV; 4017 if (real_dev->type != ARPHRD_ETHER) 4018 return -EINVAL; 4019 4020 dev->priv_flags |= IFF_MACSEC; 4021 4022 macsec->real_dev = real_dev; 4023 4024 if (data && data[IFLA_MACSEC_OFFLOAD]) 4025 macsec->offload = nla_get_offload(data[IFLA_MACSEC_OFFLOAD]); 4026 else 4027 /* MACsec offloading is off by default */ 4028 macsec->offload = MACSEC_OFFLOAD_OFF; 4029 4030 /* Check if the offloading mode is supported by the underlying layers */ 4031 if (macsec->offload != MACSEC_OFFLOAD_OFF && 4032 !macsec_check_offload(macsec->offload, macsec)) 4033 return -EOPNOTSUPP; 4034 4035 if (data && data[IFLA_MACSEC_ICV_LEN]) 4036 icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]); 4037 mtu = real_dev->mtu - icv_len - macsec_extra_len(true); 4038 if (mtu < 0) 4039 dev->mtu = 0; 4040 else 4041 dev->mtu = mtu; 4042 4043 rx_handler = rtnl_dereference(real_dev->rx_handler); 4044 if (rx_handler && rx_handler != macsec_handle_frame) 4045 return -EBUSY; 4046 4047 err = register_netdevice(dev); 4048 if (err < 0) 4049 return err; 4050 4051 err = netdev_upper_dev_link(real_dev, dev, extack); 4052 if (err < 0) 4053 goto unregister; 4054 4055 /* need to be already registered so that ->init has run and 4056 * the MAC addr is set 4057 */ 4058 if (data && data[IFLA_MACSEC_SCI]) 4059 sci = nla_get_sci(data[IFLA_MACSEC_SCI]); 4060 else if (data && data[IFLA_MACSEC_PORT]) 4061 sci = dev_to_sci(dev, nla_get_be16(data[IFLA_MACSEC_PORT])); 4062 else 4063 sci = dev_to_sci(dev, MACSEC_PORT_ES); 4064 4065 if (rx_handler && sci_exists(real_dev, sci)) { 4066 err = -EBUSY; 4067 goto unlink; 4068 } 4069 4070 err = macsec_add_dev(dev, sci, icv_len); 4071 if (err) 4072 goto unlink; 4073 4074 if (data) { 4075 err = macsec_changelink_common(dev, data); 4076 if (err) 4077 goto del_dev; 4078 } 4079 4080 /* If h/w offloading is available, propagate to the device */ 4081 if (macsec_is_offloaded(macsec)) { 4082 const struct macsec_ops *ops; 4083 struct macsec_context ctx; 4084 4085 ops = macsec_get_ops(macsec, &ctx); 4086 if (ops) { 4087 ctx.secy = &macsec->secy; 4088 err = macsec_offload(ops->mdo_add_secy, &ctx); 4089 if (err) 4090 goto del_dev; 4091 } 4092 } 4093 4094 err = register_macsec_dev(real_dev, dev); 4095 if (err < 0) 4096 goto del_dev; 4097 4098 netif_stacked_transfer_operstate(real_dev, dev); 4099 linkwatch_fire_event(dev); 4100 4101 macsec_generation++; 4102 4103 return 0; 4104 4105 del_dev: 4106 macsec_del_dev(macsec); 4107 unlink: 4108 netdev_upper_dev_unlink(real_dev, dev); 4109 unregister: 4110 unregister_netdevice(dev); 4111 return err; 4112 } 4113 4114 static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[], 4115 struct netlink_ext_ack *extack) 4116 { 4117 u64 csid = MACSEC_DEFAULT_CIPHER_ID; 4118 u8 icv_len = DEFAULT_ICV_LEN; 4119 int flag; 4120 bool es, scb, sci; 4121 4122 if (!data) 4123 return 0; 4124 4125 if (data[IFLA_MACSEC_CIPHER_SUITE]) 4126 csid = nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE]); 4127 4128 if (data[IFLA_MACSEC_ICV_LEN]) { 4129 icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]); 4130 if (icv_len != DEFAULT_ICV_LEN) { 4131 char dummy_key[DEFAULT_SAK_LEN] = { 0 }; 4132 struct crypto_aead *dummy_tfm; 4133 4134 dummy_tfm = macsec_alloc_tfm(dummy_key, 4135 DEFAULT_SAK_LEN, 4136 icv_len); 4137 if (IS_ERR(dummy_tfm)) 4138 return PTR_ERR(dummy_tfm); 4139 crypto_free_aead(dummy_tfm); 4140 } 4141 } 4142 4143 switch (csid) { 4144 case MACSEC_CIPHER_ID_GCM_AES_128: 4145 case MACSEC_CIPHER_ID_GCM_AES_256: 4146 case MACSEC_CIPHER_ID_GCM_AES_XPN_128: 4147 case MACSEC_CIPHER_ID_GCM_AES_XPN_256: 4148 case MACSEC_DEFAULT_CIPHER_ID: 4149 if (icv_len < MACSEC_MIN_ICV_LEN || 4150 icv_len > MACSEC_STD_ICV_LEN) 4151 return -EINVAL; 4152 break; 4153 default: 4154 return -EINVAL; 4155 } 4156 4157 if (data[IFLA_MACSEC_ENCODING_SA]) { 4158 if (nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]) >= MACSEC_NUM_AN) 4159 return -EINVAL; 4160 } 4161 4162 for (flag = IFLA_MACSEC_ENCODING_SA + 1; 4163 flag < IFLA_MACSEC_VALIDATION; 4164 flag++) { 4165 if (data[flag]) { 4166 if (nla_get_u8(data[flag]) > 1) 4167 return -EINVAL; 4168 } 4169 } 4170 4171 es = data[IFLA_MACSEC_ES] ? nla_get_u8(data[IFLA_MACSEC_ES]) : false; 4172 sci = data[IFLA_MACSEC_INC_SCI] ? nla_get_u8(data[IFLA_MACSEC_INC_SCI]) : false; 4173 scb = data[IFLA_MACSEC_SCB] ? nla_get_u8(data[IFLA_MACSEC_SCB]) : false; 4174 4175 if ((sci && (scb || es)) || (scb && es)) 4176 return -EINVAL; 4177 4178 if (data[IFLA_MACSEC_VALIDATION] && 4179 nla_get_u8(data[IFLA_MACSEC_VALIDATION]) > MACSEC_VALIDATE_MAX) 4180 return -EINVAL; 4181 4182 if ((data[IFLA_MACSEC_REPLAY_PROTECT] && 4183 nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT])) && 4184 !data[IFLA_MACSEC_WINDOW]) 4185 return -EINVAL; 4186 4187 return 0; 4188 } 4189 4190 static struct net *macsec_get_link_net(const struct net_device *dev) 4191 { 4192 return dev_net(macsec_priv(dev)->real_dev); 4193 } 4194 4195 static size_t macsec_get_size(const struct net_device *dev) 4196 { 4197 return nla_total_size_64bit(8) + /* IFLA_MACSEC_SCI */ 4198 nla_total_size(1) + /* IFLA_MACSEC_ICV_LEN */ 4199 nla_total_size_64bit(8) + /* IFLA_MACSEC_CIPHER_SUITE */ 4200 nla_total_size(4) + /* IFLA_MACSEC_WINDOW */ 4201 nla_total_size(1) + /* IFLA_MACSEC_ENCODING_SA */ 4202 nla_total_size(1) + /* IFLA_MACSEC_ENCRYPT */ 4203 nla_total_size(1) + /* IFLA_MACSEC_PROTECT */ 4204 nla_total_size(1) + /* IFLA_MACSEC_INC_SCI */ 4205 nla_total_size(1) + /* IFLA_MACSEC_ES */ 4206 nla_total_size(1) + /* IFLA_MACSEC_SCB */ 4207 nla_total_size(1) + /* IFLA_MACSEC_REPLAY_PROTECT */ 4208 nla_total_size(1) + /* IFLA_MACSEC_VALIDATION */ 4209 0; 4210 } 4211 4212 static int macsec_fill_info(struct sk_buff *skb, 4213 const struct net_device *dev) 4214 { 4215 struct macsec_secy *secy = &macsec_priv(dev)->secy; 4216 struct macsec_tx_sc *tx_sc = &secy->tx_sc; 4217 u64 csid; 4218 4219 switch (secy->key_len) { 4220 case MACSEC_GCM_AES_128_SAK_LEN: 4221 csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 : MACSEC_DEFAULT_CIPHER_ID; 4222 break; 4223 case MACSEC_GCM_AES_256_SAK_LEN: 4224 csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_256 : MACSEC_CIPHER_ID_GCM_AES_256; 4225 break; 4226 default: 4227 goto nla_put_failure; 4228 } 4229 4230 if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci, 4231 IFLA_MACSEC_PAD) || 4232 nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) || 4233 nla_put_u64_64bit(skb, IFLA_MACSEC_CIPHER_SUITE, 4234 csid, IFLA_MACSEC_PAD) || 4235 nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) || 4236 nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) || 4237 nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) || 4238 nla_put_u8(skb, IFLA_MACSEC_INC_SCI, tx_sc->send_sci) || 4239 nla_put_u8(skb, IFLA_MACSEC_ES, tx_sc->end_station) || 4240 nla_put_u8(skb, IFLA_MACSEC_SCB, tx_sc->scb) || 4241 nla_put_u8(skb, IFLA_MACSEC_REPLAY_PROTECT, secy->replay_protect) || 4242 nla_put_u8(skb, IFLA_MACSEC_VALIDATION, secy->validate_frames) || 4243 0) 4244 goto nla_put_failure; 4245 4246 if (secy->replay_protect) { 4247 if (nla_put_u32(skb, IFLA_MACSEC_WINDOW, secy->replay_window)) 4248 goto nla_put_failure; 4249 } 4250 4251 return 0; 4252 4253 nla_put_failure: 4254 return -EMSGSIZE; 4255 } 4256 4257 static struct rtnl_link_ops macsec_link_ops __read_mostly = { 4258 .kind = "macsec", 4259 .priv_size = sizeof(struct macsec_dev), 4260 .maxtype = IFLA_MACSEC_MAX, 4261 .policy = macsec_rtnl_policy, 4262 .setup = macsec_setup, 4263 .validate = macsec_validate_attr, 4264 .newlink = macsec_newlink, 4265 .changelink = macsec_changelink, 4266 .dellink = macsec_dellink, 4267 .get_size = macsec_get_size, 4268 .fill_info = macsec_fill_info, 4269 .get_link_net = macsec_get_link_net, 4270 }; 4271 4272 static bool is_macsec_master(struct net_device *dev) 4273 { 4274 return rcu_access_pointer(dev->rx_handler) == macsec_handle_frame; 4275 } 4276 4277 static int macsec_notify(struct notifier_block *this, unsigned long event, 4278 void *ptr) 4279 { 4280 struct net_device *real_dev = netdev_notifier_info_to_dev(ptr); 4281 LIST_HEAD(head); 4282 4283 if (!is_macsec_master(real_dev)) 4284 return NOTIFY_DONE; 4285 4286 switch (event) { 4287 case NETDEV_DOWN: 4288 case NETDEV_UP: 4289 case NETDEV_CHANGE: { 4290 struct macsec_dev *m, *n; 4291 struct macsec_rxh_data *rxd; 4292 4293 rxd = macsec_data_rtnl(real_dev); 4294 list_for_each_entry_safe(m, n, &rxd->secys, secys) { 4295 struct net_device *dev = m->secy.netdev; 4296 4297 netif_stacked_transfer_operstate(real_dev, dev); 4298 } 4299 break; 4300 } 4301 case NETDEV_UNREGISTER: { 4302 struct macsec_dev *m, *n; 4303 struct macsec_rxh_data *rxd; 4304 4305 rxd = macsec_data_rtnl(real_dev); 4306 list_for_each_entry_safe(m, n, &rxd->secys, secys) { 4307 macsec_common_dellink(m->secy.netdev, &head); 4308 } 4309 4310 netdev_rx_handler_unregister(real_dev); 4311 kfree(rxd); 4312 4313 unregister_netdevice_many(&head); 4314 break; 4315 } 4316 case NETDEV_CHANGEMTU: { 4317 struct macsec_dev *m; 4318 struct macsec_rxh_data *rxd; 4319 4320 rxd = macsec_data_rtnl(real_dev); 4321 list_for_each_entry(m, &rxd->secys, secys) { 4322 struct net_device *dev = m->secy.netdev; 4323 unsigned int mtu = real_dev->mtu - (m->secy.icv_len + 4324 macsec_extra_len(true)); 4325 4326 if (dev->mtu > mtu) 4327 dev_set_mtu(dev, mtu); 4328 } 4329 } 4330 } 4331 4332 return NOTIFY_OK; 4333 } 4334 4335 static struct notifier_block macsec_notifier = { 4336 .notifier_call = macsec_notify, 4337 }; 4338 4339 static int __init macsec_init(void) 4340 { 4341 int err; 4342 4343 pr_info("MACsec IEEE 802.1AE\n"); 4344 err = register_netdevice_notifier(&macsec_notifier); 4345 if (err) 4346 return err; 4347 4348 err = rtnl_link_register(&macsec_link_ops); 4349 if (err) 4350 goto notifier; 4351 4352 err = genl_register_family(&macsec_fam); 4353 if (err) 4354 goto rtnl; 4355 4356 return 0; 4357 4358 rtnl: 4359 rtnl_link_unregister(&macsec_link_ops); 4360 notifier: 4361 unregister_netdevice_notifier(&macsec_notifier); 4362 return err; 4363 } 4364 4365 static void __exit macsec_exit(void) 4366 { 4367 genl_unregister_family(&macsec_fam); 4368 rtnl_link_unregister(&macsec_link_ops); 4369 unregister_netdevice_notifier(&macsec_notifier); 4370 rcu_barrier(); 4371 } 4372 4373 module_init(macsec_init); 4374 module_exit(macsec_exit); 4375 4376 MODULE_ALIAS_RTNL_LINK("macsec"); 4377 MODULE_ALIAS_GENL_FAMILY("macsec"); 4378 4379 MODULE_DESCRIPTION("MACsec IEEE 802.1AE"); 4380 MODULE_LICENSE("GPL v2"); 4381