xref: /openbmc/linux/drivers/net/macsec.c (revision 9a20332a)
1 /*
2  * drivers/net/macsec.c - MACsec device
3  *
4  * Copyright (c) 2015 Sabrina Dubroca <sd@queasysnail.net>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  */
11 
12 #include <linux/types.h>
13 #include <linux/skbuff.h>
14 #include <linux/socket.h>
15 #include <linux/module.h>
16 #include <crypto/aead.h>
17 #include <linux/etherdevice.h>
18 #include <linux/rtnetlink.h>
19 #include <linux/refcount.h>
20 #include <net/genetlink.h>
21 #include <net/sock.h>
22 #include <net/gro_cells.h>
23 
24 #include <uapi/linux/if_macsec.h>
25 
26 typedef u64 __bitwise sci_t;
27 
28 #define MACSEC_SCI_LEN 8
29 
30 /* SecTAG length = macsec_eth_header without the optional SCI */
31 #define MACSEC_TAG_LEN 6
32 
33 struct macsec_eth_header {
34 	struct ethhdr eth;
35 	/* SecTAG */
36 	u8  tci_an;
37 #if defined(__LITTLE_ENDIAN_BITFIELD)
38 	u8  short_length:6,
39 		  unused:2;
40 #elif defined(__BIG_ENDIAN_BITFIELD)
41 	u8        unused:2,
42 	    short_length:6;
43 #else
44 #error	"Please fix <asm/byteorder.h>"
45 #endif
46 	__be32 packet_number;
47 	u8 secure_channel_id[8]; /* optional */
48 } __packed;
49 
50 #define MACSEC_TCI_VERSION 0x80
51 #define MACSEC_TCI_ES      0x40 /* end station */
52 #define MACSEC_TCI_SC      0x20 /* SCI present */
53 #define MACSEC_TCI_SCB     0x10 /* epon */
54 #define MACSEC_TCI_E       0x08 /* encryption */
55 #define MACSEC_TCI_C       0x04 /* changed text */
56 #define MACSEC_AN_MASK     0x03 /* association number */
57 #define MACSEC_TCI_CONFID  (MACSEC_TCI_E | MACSEC_TCI_C)
58 
59 /* minimum secure data length deemed "not short", see IEEE 802.1AE-2006 9.7 */
60 #define MIN_NON_SHORT_LEN 48
61 
62 #define GCM_AES_IV_LEN 12
63 #define DEFAULT_ICV_LEN 16
64 
65 #define MACSEC_NUM_AN 4 /* 2 bits for the association number */
66 
67 #define for_each_rxsc(secy, sc)				\
68 	for (sc = rcu_dereference_bh(secy->rx_sc);	\
69 	     sc;					\
70 	     sc = rcu_dereference_bh(sc->next))
71 #define for_each_rxsc_rtnl(secy, sc)			\
72 	for (sc = rtnl_dereference(secy->rx_sc);	\
73 	     sc;					\
74 	     sc = rtnl_dereference(sc->next))
75 
76 struct gcm_iv {
77 	union {
78 		u8 secure_channel_id[8];
79 		sci_t sci;
80 	};
81 	__be32 pn;
82 };
83 
84 /**
85  * struct macsec_key - SA key
86  * @id: user-provided key identifier
87  * @tfm: crypto struct, key storage
88  */
89 struct macsec_key {
90 	u8 id[MACSEC_KEYID_LEN];
91 	struct crypto_aead *tfm;
92 };
93 
94 struct macsec_rx_sc_stats {
95 	__u64 InOctetsValidated;
96 	__u64 InOctetsDecrypted;
97 	__u64 InPktsUnchecked;
98 	__u64 InPktsDelayed;
99 	__u64 InPktsOK;
100 	__u64 InPktsInvalid;
101 	__u64 InPktsLate;
102 	__u64 InPktsNotValid;
103 	__u64 InPktsNotUsingSA;
104 	__u64 InPktsUnusedSA;
105 };
106 
107 struct macsec_rx_sa_stats {
108 	__u32 InPktsOK;
109 	__u32 InPktsInvalid;
110 	__u32 InPktsNotValid;
111 	__u32 InPktsNotUsingSA;
112 	__u32 InPktsUnusedSA;
113 };
114 
115 struct macsec_tx_sa_stats {
116 	__u32 OutPktsProtected;
117 	__u32 OutPktsEncrypted;
118 };
119 
120 struct macsec_tx_sc_stats {
121 	__u64 OutPktsProtected;
122 	__u64 OutPktsEncrypted;
123 	__u64 OutOctetsProtected;
124 	__u64 OutOctetsEncrypted;
125 };
126 
127 struct macsec_dev_stats {
128 	__u64 OutPktsUntagged;
129 	__u64 InPktsUntagged;
130 	__u64 OutPktsTooLong;
131 	__u64 InPktsNoTag;
132 	__u64 InPktsBadTag;
133 	__u64 InPktsUnknownSCI;
134 	__u64 InPktsNoSCI;
135 	__u64 InPktsOverrun;
136 };
137 
138 /**
139  * struct macsec_rx_sa - receive secure association
140  * @active:
141  * @next_pn: packet number expected for the next packet
142  * @lock: protects next_pn manipulations
143  * @key: key structure
144  * @stats: per-SA stats
145  */
146 struct macsec_rx_sa {
147 	struct macsec_key key;
148 	spinlock_t lock;
149 	u32 next_pn;
150 	refcount_t refcnt;
151 	bool active;
152 	struct macsec_rx_sa_stats __percpu *stats;
153 	struct macsec_rx_sc *sc;
154 	struct rcu_head rcu;
155 };
156 
157 struct pcpu_rx_sc_stats {
158 	struct macsec_rx_sc_stats stats;
159 	struct u64_stats_sync syncp;
160 };
161 
162 /**
163  * struct macsec_rx_sc - receive secure channel
164  * @sci: secure channel identifier for this SC
165  * @active: channel is active
166  * @sa: array of secure associations
167  * @stats: per-SC stats
168  */
169 struct macsec_rx_sc {
170 	struct macsec_rx_sc __rcu *next;
171 	sci_t sci;
172 	bool active;
173 	struct macsec_rx_sa __rcu *sa[MACSEC_NUM_AN];
174 	struct pcpu_rx_sc_stats __percpu *stats;
175 	refcount_t refcnt;
176 	struct rcu_head rcu_head;
177 };
178 
179 /**
180  * struct macsec_tx_sa - transmit secure association
181  * @active:
182  * @next_pn: packet number to use for the next packet
183  * @lock: protects next_pn manipulations
184  * @key: key structure
185  * @stats: per-SA stats
186  */
187 struct macsec_tx_sa {
188 	struct macsec_key key;
189 	spinlock_t lock;
190 	u32 next_pn;
191 	refcount_t refcnt;
192 	bool active;
193 	struct macsec_tx_sa_stats __percpu *stats;
194 	struct rcu_head rcu;
195 };
196 
197 struct pcpu_tx_sc_stats {
198 	struct macsec_tx_sc_stats stats;
199 	struct u64_stats_sync syncp;
200 };
201 
202 /**
203  * struct macsec_tx_sc - transmit secure channel
204  * @active:
205  * @encoding_sa: association number of the SA currently in use
206  * @encrypt: encrypt packets on transmit, or authenticate only
207  * @send_sci: always include the SCI in the SecTAG
208  * @end_station:
209  * @scb: single copy broadcast flag
210  * @sa: array of secure associations
211  * @stats: stats for this TXSC
212  */
213 struct macsec_tx_sc {
214 	bool active;
215 	u8 encoding_sa;
216 	bool encrypt;
217 	bool send_sci;
218 	bool end_station;
219 	bool scb;
220 	struct macsec_tx_sa __rcu *sa[MACSEC_NUM_AN];
221 	struct pcpu_tx_sc_stats __percpu *stats;
222 };
223 
224 #define MACSEC_VALIDATE_DEFAULT MACSEC_VALIDATE_STRICT
225 
226 /**
227  * struct macsec_secy - MACsec Security Entity
228  * @netdev: netdevice for this SecY
229  * @n_rx_sc: number of receive secure channels configured on this SecY
230  * @sci: secure channel identifier used for tx
231  * @key_len: length of keys used by the cipher suite
232  * @icv_len: length of ICV used by the cipher suite
233  * @validate_frames: validation mode
234  * @operational: MAC_Operational flag
235  * @protect_frames: enable protection for this SecY
236  * @replay_protect: enable packet number checks on receive
237  * @replay_window: size of the replay window
238  * @tx_sc: transmit secure channel
239  * @rx_sc: linked list of receive secure channels
240  */
241 struct macsec_secy {
242 	struct net_device *netdev;
243 	unsigned int n_rx_sc;
244 	sci_t sci;
245 	u16 key_len;
246 	u16 icv_len;
247 	enum macsec_validation_type validate_frames;
248 	bool operational;
249 	bool protect_frames;
250 	bool replay_protect;
251 	u32 replay_window;
252 	struct macsec_tx_sc tx_sc;
253 	struct macsec_rx_sc __rcu *rx_sc;
254 };
255 
256 struct pcpu_secy_stats {
257 	struct macsec_dev_stats stats;
258 	struct u64_stats_sync syncp;
259 };
260 
261 /**
262  * struct macsec_dev - private data
263  * @secy: SecY config
264  * @real_dev: pointer to underlying netdevice
265  * @stats: MACsec device stats
266  * @secys: linked list of SecY's on the underlying device
267  */
268 struct macsec_dev {
269 	struct macsec_secy secy;
270 	struct net_device *real_dev;
271 	struct pcpu_secy_stats __percpu *stats;
272 	struct list_head secys;
273 	struct gro_cells gro_cells;
274 	unsigned int nest_level;
275 };
276 
277 /**
278  * struct macsec_rxh_data - rx_handler private argument
279  * @secys: linked list of SecY's on this underlying device
280  */
281 struct macsec_rxh_data {
282 	struct list_head secys;
283 };
284 
285 static struct macsec_dev *macsec_priv(const struct net_device *dev)
286 {
287 	return (struct macsec_dev *)netdev_priv(dev);
288 }
289 
290 static struct macsec_rxh_data *macsec_data_rcu(const struct net_device *dev)
291 {
292 	return rcu_dereference_bh(dev->rx_handler_data);
293 }
294 
295 static struct macsec_rxh_data *macsec_data_rtnl(const struct net_device *dev)
296 {
297 	return rtnl_dereference(dev->rx_handler_data);
298 }
299 
300 struct macsec_cb {
301 	struct aead_request *req;
302 	union {
303 		struct macsec_tx_sa *tx_sa;
304 		struct macsec_rx_sa *rx_sa;
305 	};
306 	u8 assoc_num;
307 	bool valid;
308 	bool has_sci;
309 };
310 
311 static struct macsec_rx_sa *macsec_rxsa_get(struct macsec_rx_sa __rcu *ptr)
312 {
313 	struct macsec_rx_sa *sa = rcu_dereference_bh(ptr);
314 
315 	if (!sa || !sa->active)
316 		return NULL;
317 
318 	if (!refcount_inc_not_zero(&sa->refcnt))
319 		return NULL;
320 
321 	return sa;
322 }
323 
324 static void free_rx_sc_rcu(struct rcu_head *head)
325 {
326 	struct macsec_rx_sc *rx_sc = container_of(head, struct macsec_rx_sc, rcu_head);
327 
328 	free_percpu(rx_sc->stats);
329 	kfree(rx_sc);
330 }
331 
332 static struct macsec_rx_sc *macsec_rxsc_get(struct macsec_rx_sc *sc)
333 {
334 	return refcount_inc_not_zero(&sc->refcnt) ? sc : NULL;
335 }
336 
337 static void macsec_rxsc_put(struct macsec_rx_sc *sc)
338 {
339 	if (refcount_dec_and_test(&sc->refcnt))
340 		call_rcu(&sc->rcu_head, free_rx_sc_rcu);
341 }
342 
343 static void free_rxsa(struct rcu_head *head)
344 {
345 	struct macsec_rx_sa *sa = container_of(head, struct macsec_rx_sa, rcu);
346 
347 	crypto_free_aead(sa->key.tfm);
348 	free_percpu(sa->stats);
349 	kfree(sa);
350 }
351 
352 static void macsec_rxsa_put(struct macsec_rx_sa *sa)
353 {
354 	if (refcount_dec_and_test(&sa->refcnt))
355 		call_rcu(&sa->rcu, free_rxsa);
356 }
357 
358 static struct macsec_tx_sa *macsec_txsa_get(struct macsec_tx_sa __rcu *ptr)
359 {
360 	struct macsec_tx_sa *sa = rcu_dereference_bh(ptr);
361 
362 	if (!sa || !sa->active)
363 		return NULL;
364 
365 	if (!refcount_inc_not_zero(&sa->refcnt))
366 		return NULL;
367 
368 	return sa;
369 }
370 
371 static void free_txsa(struct rcu_head *head)
372 {
373 	struct macsec_tx_sa *sa = container_of(head, struct macsec_tx_sa, rcu);
374 
375 	crypto_free_aead(sa->key.tfm);
376 	free_percpu(sa->stats);
377 	kfree(sa);
378 }
379 
380 static void macsec_txsa_put(struct macsec_tx_sa *sa)
381 {
382 	if (refcount_dec_and_test(&sa->refcnt))
383 		call_rcu(&sa->rcu, free_txsa);
384 }
385 
386 static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb)
387 {
388 	BUILD_BUG_ON(sizeof(struct macsec_cb) > sizeof(skb->cb));
389 	return (struct macsec_cb *)skb->cb;
390 }
391 
392 #define MACSEC_PORT_ES (htons(0x0001))
393 #define MACSEC_PORT_SCB (0x0000)
394 #define MACSEC_UNDEF_SCI ((__force sci_t)0xffffffffffffffffULL)
395 
396 #define MACSEC_GCM_AES_128_SAK_LEN 16
397 #define MACSEC_GCM_AES_256_SAK_LEN 32
398 
399 #define DEFAULT_SAK_LEN MACSEC_GCM_AES_128_SAK_LEN
400 #define DEFAULT_SEND_SCI true
401 #define DEFAULT_ENCRYPT false
402 #define DEFAULT_ENCODING_SA 0
403 
404 static bool send_sci(const struct macsec_secy *secy)
405 {
406 	const struct macsec_tx_sc *tx_sc = &secy->tx_sc;
407 
408 	return tx_sc->send_sci ||
409 		(secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb);
410 }
411 
412 static sci_t make_sci(u8 *addr, __be16 port)
413 {
414 	sci_t sci;
415 
416 	memcpy(&sci, addr, ETH_ALEN);
417 	memcpy(((char *)&sci) + ETH_ALEN, &port, sizeof(port));
418 
419 	return sci;
420 }
421 
422 static sci_t macsec_frame_sci(struct macsec_eth_header *hdr, bool sci_present)
423 {
424 	sci_t sci;
425 
426 	if (sci_present)
427 		memcpy(&sci, hdr->secure_channel_id,
428 		       sizeof(hdr->secure_channel_id));
429 	else
430 		sci = make_sci(hdr->eth.h_source, MACSEC_PORT_ES);
431 
432 	return sci;
433 }
434 
435 static unsigned int macsec_sectag_len(bool sci_present)
436 {
437 	return MACSEC_TAG_LEN + (sci_present ? MACSEC_SCI_LEN : 0);
438 }
439 
440 static unsigned int macsec_hdr_len(bool sci_present)
441 {
442 	return macsec_sectag_len(sci_present) + ETH_HLEN;
443 }
444 
445 static unsigned int macsec_extra_len(bool sci_present)
446 {
447 	return macsec_sectag_len(sci_present) + sizeof(__be16);
448 }
449 
450 /* Fill SecTAG according to IEEE 802.1AE-2006 10.5.3 */
451 static void macsec_fill_sectag(struct macsec_eth_header *h,
452 			       const struct macsec_secy *secy, u32 pn,
453 			       bool sci_present)
454 {
455 	const struct macsec_tx_sc *tx_sc = &secy->tx_sc;
456 
457 	memset(&h->tci_an, 0, macsec_sectag_len(sci_present));
458 	h->eth.h_proto = htons(ETH_P_MACSEC);
459 
460 	if (sci_present) {
461 		h->tci_an |= MACSEC_TCI_SC;
462 		memcpy(&h->secure_channel_id, &secy->sci,
463 		       sizeof(h->secure_channel_id));
464 	} else {
465 		if (tx_sc->end_station)
466 			h->tci_an |= MACSEC_TCI_ES;
467 		if (tx_sc->scb)
468 			h->tci_an |= MACSEC_TCI_SCB;
469 	}
470 
471 	h->packet_number = htonl(pn);
472 
473 	/* with GCM, C/E clear for !encrypt, both set for encrypt */
474 	if (tx_sc->encrypt)
475 		h->tci_an |= MACSEC_TCI_CONFID;
476 	else if (secy->icv_len != DEFAULT_ICV_LEN)
477 		h->tci_an |= MACSEC_TCI_C;
478 
479 	h->tci_an |= tx_sc->encoding_sa;
480 }
481 
482 static void macsec_set_shortlen(struct macsec_eth_header *h, size_t data_len)
483 {
484 	if (data_len < MIN_NON_SHORT_LEN)
485 		h->short_length = data_len;
486 }
487 
488 /* validate MACsec packet according to IEEE 802.1AE-2006 9.12 */
489 static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len)
490 {
491 	struct macsec_eth_header *h = (struct macsec_eth_header *)skb->data;
492 	int len = skb->len - 2 * ETH_ALEN;
493 	int extra_len = macsec_extra_len(!!(h->tci_an & MACSEC_TCI_SC)) + icv_len;
494 
495 	/* a) It comprises at least 17 octets */
496 	if (skb->len <= 16)
497 		return false;
498 
499 	/* b) MACsec EtherType: already checked */
500 
501 	/* c) V bit is clear */
502 	if (h->tci_an & MACSEC_TCI_VERSION)
503 		return false;
504 
505 	/* d) ES or SCB => !SC */
506 	if ((h->tci_an & MACSEC_TCI_ES || h->tci_an & MACSEC_TCI_SCB) &&
507 	    (h->tci_an & MACSEC_TCI_SC))
508 		return false;
509 
510 	/* e) Bits 7 and 8 of octet 4 of the SecTAG are clear */
511 	if (h->unused)
512 		return false;
513 
514 	/* rx.pn != 0 (figure 10-5) */
515 	if (!h->packet_number)
516 		return false;
517 
518 	/* length check, f) g) h) i) */
519 	if (h->short_length)
520 		return len == extra_len + h->short_length;
521 	return len >= extra_len + MIN_NON_SHORT_LEN;
522 }
523 
524 #define MACSEC_NEEDED_HEADROOM (macsec_extra_len(true))
525 #define MACSEC_NEEDED_TAILROOM MACSEC_STD_ICV_LEN
526 
527 static void macsec_fill_iv(unsigned char *iv, sci_t sci, u32 pn)
528 {
529 	struct gcm_iv *gcm_iv = (struct gcm_iv *)iv;
530 
531 	gcm_iv->sci = sci;
532 	gcm_iv->pn = htonl(pn);
533 }
534 
535 static struct macsec_eth_header *macsec_ethhdr(struct sk_buff *skb)
536 {
537 	return (struct macsec_eth_header *)skb_mac_header(skb);
538 }
539 
540 static u32 tx_sa_update_pn(struct macsec_tx_sa *tx_sa, struct macsec_secy *secy)
541 {
542 	u32 pn;
543 
544 	spin_lock_bh(&tx_sa->lock);
545 	pn = tx_sa->next_pn;
546 
547 	tx_sa->next_pn++;
548 	if (tx_sa->next_pn == 0) {
549 		pr_debug("PN wrapped, transitioning to !oper\n");
550 		tx_sa->active = false;
551 		if (secy->protect_frames)
552 			secy->operational = false;
553 	}
554 	spin_unlock_bh(&tx_sa->lock);
555 
556 	return pn;
557 }
558 
559 static void macsec_encrypt_finish(struct sk_buff *skb, struct net_device *dev)
560 {
561 	struct macsec_dev *macsec = netdev_priv(dev);
562 
563 	skb->dev = macsec->real_dev;
564 	skb_reset_mac_header(skb);
565 	skb->protocol = eth_hdr(skb)->h_proto;
566 }
567 
568 static void macsec_count_tx(struct sk_buff *skb, struct macsec_tx_sc *tx_sc,
569 			    struct macsec_tx_sa *tx_sa)
570 {
571 	struct pcpu_tx_sc_stats *txsc_stats = this_cpu_ptr(tx_sc->stats);
572 
573 	u64_stats_update_begin(&txsc_stats->syncp);
574 	if (tx_sc->encrypt) {
575 		txsc_stats->stats.OutOctetsEncrypted += skb->len;
576 		txsc_stats->stats.OutPktsEncrypted++;
577 		this_cpu_inc(tx_sa->stats->OutPktsEncrypted);
578 	} else {
579 		txsc_stats->stats.OutOctetsProtected += skb->len;
580 		txsc_stats->stats.OutPktsProtected++;
581 		this_cpu_inc(tx_sa->stats->OutPktsProtected);
582 	}
583 	u64_stats_update_end(&txsc_stats->syncp);
584 }
585 
586 static void count_tx(struct net_device *dev, int ret, int len)
587 {
588 	if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
589 		struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats);
590 
591 		u64_stats_update_begin(&stats->syncp);
592 		stats->tx_packets++;
593 		stats->tx_bytes += len;
594 		u64_stats_update_end(&stats->syncp);
595 	}
596 }
597 
598 static void macsec_encrypt_done(struct crypto_async_request *base, int err)
599 {
600 	struct sk_buff *skb = base->data;
601 	struct net_device *dev = skb->dev;
602 	struct macsec_dev *macsec = macsec_priv(dev);
603 	struct macsec_tx_sa *sa = macsec_skb_cb(skb)->tx_sa;
604 	int len, ret;
605 
606 	aead_request_free(macsec_skb_cb(skb)->req);
607 
608 	rcu_read_lock_bh();
609 	macsec_encrypt_finish(skb, dev);
610 	macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa);
611 	len = skb->len;
612 	ret = dev_queue_xmit(skb);
613 	count_tx(dev, ret, len);
614 	rcu_read_unlock_bh();
615 
616 	macsec_txsa_put(sa);
617 	dev_put(dev);
618 }
619 
620 static struct aead_request *macsec_alloc_req(struct crypto_aead *tfm,
621 					     unsigned char **iv,
622 					     struct scatterlist **sg,
623 					     int num_frags)
624 {
625 	size_t size, iv_offset, sg_offset;
626 	struct aead_request *req;
627 	void *tmp;
628 
629 	size = sizeof(struct aead_request) + crypto_aead_reqsize(tfm);
630 	iv_offset = size;
631 	size += GCM_AES_IV_LEN;
632 
633 	size = ALIGN(size, __alignof__(struct scatterlist));
634 	sg_offset = size;
635 	size += sizeof(struct scatterlist) * num_frags;
636 
637 	tmp = kmalloc(size, GFP_ATOMIC);
638 	if (!tmp)
639 		return NULL;
640 
641 	*iv = (unsigned char *)(tmp + iv_offset);
642 	*sg = (struct scatterlist *)(tmp + sg_offset);
643 	req = tmp;
644 
645 	aead_request_set_tfm(req, tfm);
646 
647 	return req;
648 }
649 
650 static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
651 				      struct net_device *dev)
652 {
653 	int ret;
654 	struct scatterlist *sg;
655 	struct sk_buff *trailer;
656 	unsigned char *iv;
657 	struct ethhdr *eth;
658 	struct macsec_eth_header *hh;
659 	size_t unprotected_len;
660 	struct aead_request *req;
661 	struct macsec_secy *secy;
662 	struct macsec_tx_sc *tx_sc;
663 	struct macsec_tx_sa *tx_sa;
664 	struct macsec_dev *macsec = macsec_priv(dev);
665 	bool sci_present;
666 	u32 pn;
667 
668 	secy = &macsec->secy;
669 	tx_sc = &secy->tx_sc;
670 
671 	/* 10.5.1 TX SA assignment */
672 	tx_sa = macsec_txsa_get(tx_sc->sa[tx_sc->encoding_sa]);
673 	if (!tx_sa) {
674 		secy->operational = false;
675 		kfree_skb(skb);
676 		return ERR_PTR(-EINVAL);
677 	}
678 
679 	if (unlikely(skb_headroom(skb) < MACSEC_NEEDED_HEADROOM ||
680 		     skb_tailroom(skb) < MACSEC_NEEDED_TAILROOM)) {
681 		struct sk_buff *nskb = skb_copy_expand(skb,
682 						       MACSEC_NEEDED_HEADROOM,
683 						       MACSEC_NEEDED_TAILROOM,
684 						       GFP_ATOMIC);
685 		if (likely(nskb)) {
686 			consume_skb(skb);
687 			skb = nskb;
688 		} else {
689 			macsec_txsa_put(tx_sa);
690 			kfree_skb(skb);
691 			return ERR_PTR(-ENOMEM);
692 		}
693 	} else {
694 		skb = skb_unshare(skb, GFP_ATOMIC);
695 		if (!skb) {
696 			macsec_txsa_put(tx_sa);
697 			return ERR_PTR(-ENOMEM);
698 		}
699 	}
700 
701 	unprotected_len = skb->len;
702 	eth = eth_hdr(skb);
703 	sci_present = send_sci(secy);
704 	hh = skb_push(skb, macsec_extra_len(sci_present));
705 	memmove(hh, eth, 2 * ETH_ALEN);
706 
707 	pn = tx_sa_update_pn(tx_sa, secy);
708 	if (pn == 0) {
709 		macsec_txsa_put(tx_sa);
710 		kfree_skb(skb);
711 		return ERR_PTR(-ENOLINK);
712 	}
713 	macsec_fill_sectag(hh, secy, pn, sci_present);
714 	macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN);
715 
716 	skb_put(skb, secy->icv_len);
717 
718 	if (skb->len - ETH_HLEN > macsec_priv(dev)->real_dev->mtu) {
719 		struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats);
720 
721 		u64_stats_update_begin(&secy_stats->syncp);
722 		secy_stats->stats.OutPktsTooLong++;
723 		u64_stats_update_end(&secy_stats->syncp);
724 
725 		macsec_txsa_put(tx_sa);
726 		kfree_skb(skb);
727 		return ERR_PTR(-EINVAL);
728 	}
729 
730 	ret = skb_cow_data(skb, 0, &trailer);
731 	if (unlikely(ret < 0)) {
732 		macsec_txsa_put(tx_sa);
733 		kfree_skb(skb);
734 		return ERR_PTR(ret);
735 	}
736 
737 	req = macsec_alloc_req(tx_sa->key.tfm, &iv, &sg, ret);
738 	if (!req) {
739 		macsec_txsa_put(tx_sa);
740 		kfree_skb(skb);
741 		return ERR_PTR(-ENOMEM);
742 	}
743 
744 	macsec_fill_iv(iv, secy->sci, pn);
745 
746 	sg_init_table(sg, ret);
747 	ret = skb_to_sgvec(skb, sg, 0, skb->len);
748 	if (unlikely(ret < 0)) {
749 		aead_request_free(req);
750 		macsec_txsa_put(tx_sa);
751 		kfree_skb(skb);
752 		return ERR_PTR(ret);
753 	}
754 
755 	if (tx_sc->encrypt) {
756 		int len = skb->len - macsec_hdr_len(sci_present) -
757 			  secy->icv_len;
758 		aead_request_set_crypt(req, sg, sg, len, iv);
759 		aead_request_set_ad(req, macsec_hdr_len(sci_present));
760 	} else {
761 		aead_request_set_crypt(req, sg, sg, 0, iv);
762 		aead_request_set_ad(req, skb->len - secy->icv_len);
763 	}
764 
765 	macsec_skb_cb(skb)->req = req;
766 	macsec_skb_cb(skb)->tx_sa = tx_sa;
767 	aead_request_set_callback(req, 0, macsec_encrypt_done, skb);
768 
769 	dev_hold(skb->dev);
770 	ret = crypto_aead_encrypt(req);
771 	if (ret == -EINPROGRESS) {
772 		return ERR_PTR(ret);
773 	} else if (ret != 0) {
774 		dev_put(skb->dev);
775 		kfree_skb(skb);
776 		aead_request_free(req);
777 		macsec_txsa_put(tx_sa);
778 		return ERR_PTR(-EINVAL);
779 	}
780 
781 	dev_put(skb->dev);
782 	aead_request_free(req);
783 	macsec_txsa_put(tx_sa);
784 
785 	return skb;
786 }
787 
788 static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u32 pn)
789 {
790 	struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa;
791 	struct pcpu_rx_sc_stats *rxsc_stats = this_cpu_ptr(rx_sa->sc->stats);
792 	struct macsec_eth_header *hdr = macsec_ethhdr(skb);
793 	u32 lowest_pn = 0;
794 
795 	spin_lock(&rx_sa->lock);
796 	if (rx_sa->next_pn >= secy->replay_window)
797 		lowest_pn = rx_sa->next_pn - secy->replay_window;
798 
799 	/* Now perform replay protection check again
800 	 * (see IEEE 802.1AE-2006 figure 10-5)
801 	 */
802 	if (secy->replay_protect && pn < lowest_pn) {
803 		spin_unlock(&rx_sa->lock);
804 		u64_stats_update_begin(&rxsc_stats->syncp);
805 		rxsc_stats->stats.InPktsLate++;
806 		u64_stats_update_end(&rxsc_stats->syncp);
807 		return false;
808 	}
809 
810 	if (secy->validate_frames != MACSEC_VALIDATE_DISABLED) {
811 		u64_stats_update_begin(&rxsc_stats->syncp);
812 		if (hdr->tci_an & MACSEC_TCI_E)
813 			rxsc_stats->stats.InOctetsDecrypted += skb->len;
814 		else
815 			rxsc_stats->stats.InOctetsValidated += skb->len;
816 		u64_stats_update_end(&rxsc_stats->syncp);
817 	}
818 
819 	if (!macsec_skb_cb(skb)->valid) {
820 		spin_unlock(&rx_sa->lock);
821 
822 		/* 10.6.5 */
823 		if (hdr->tci_an & MACSEC_TCI_C ||
824 		    secy->validate_frames == MACSEC_VALIDATE_STRICT) {
825 			u64_stats_update_begin(&rxsc_stats->syncp);
826 			rxsc_stats->stats.InPktsNotValid++;
827 			u64_stats_update_end(&rxsc_stats->syncp);
828 			return false;
829 		}
830 
831 		u64_stats_update_begin(&rxsc_stats->syncp);
832 		if (secy->validate_frames == MACSEC_VALIDATE_CHECK) {
833 			rxsc_stats->stats.InPktsInvalid++;
834 			this_cpu_inc(rx_sa->stats->InPktsInvalid);
835 		} else if (pn < lowest_pn) {
836 			rxsc_stats->stats.InPktsDelayed++;
837 		} else {
838 			rxsc_stats->stats.InPktsUnchecked++;
839 		}
840 		u64_stats_update_end(&rxsc_stats->syncp);
841 	} else {
842 		u64_stats_update_begin(&rxsc_stats->syncp);
843 		if (pn < lowest_pn) {
844 			rxsc_stats->stats.InPktsDelayed++;
845 		} else {
846 			rxsc_stats->stats.InPktsOK++;
847 			this_cpu_inc(rx_sa->stats->InPktsOK);
848 		}
849 		u64_stats_update_end(&rxsc_stats->syncp);
850 
851 		if (pn >= rx_sa->next_pn)
852 			rx_sa->next_pn = pn + 1;
853 		spin_unlock(&rx_sa->lock);
854 	}
855 
856 	return true;
857 }
858 
859 static void macsec_reset_skb(struct sk_buff *skb, struct net_device *dev)
860 {
861 	skb->pkt_type = PACKET_HOST;
862 	skb->protocol = eth_type_trans(skb, dev);
863 
864 	skb_reset_network_header(skb);
865 	if (!skb_transport_header_was_set(skb))
866 		skb_reset_transport_header(skb);
867 	skb_reset_mac_len(skb);
868 }
869 
870 static void macsec_finalize_skb(struct sk_buff *skb, u8 icv_len, u8 hdr_len)
871 {
872 	memmove(skb->data + hdr_len, skb->data, 2 * ETH_ALEN);
873 	skb_pull(skb, hdr_len);
874 	pskb_trim_unique(skb, skb->len - icv_len);
875 }
876 
877 static void count_rx(struct net_device *dev, int len)
878 {
879 	struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats);
880 
881 	u64_stats_update_begin(&stats->syncp);
882 	stats->rx_packets++;
883 	stats->rx_bytes += len;
884 	u64_stats_update_end(&stats->syncp);
885 }
886 
887 static void macsec_decrypt_done(struct crypto_async_request *base, int err)
888 {
889 	struct sk_buff *skb = base->data;
890 	struct net_device *dev = skb->dev;
891 	struct macsec_dev *macsec = macsec_priv(dev);
892 	struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa;
893 	struct macsec_rx_sc *rx_sc = rx_sa->sc;
894 	int len;
895 	u32 pn;
896 
897 	aead_request_free(macsec_skb_cb(skb)->req);
898 
899 	if (!err)
900 		macsec_skb_cb(skb)->valid = true;
901 
902 	rcu_read_lock_bh();
903 	pn = ntohl(macsec_ethhdr(skb)->packet_number);
904 	if (!macsec_post_decrypt(skb, &macsec->secy, pn)) {
905 		rcu_read_unlock_bh();
906 		kfree_skb(skb);
907 		goto out;
908 	}
909 
910 	macsec_finalize_skb(skb, macsec->secy.icv_len,
911 			    macsec_extra_len(macsec_skb_cb(skb)->has_sci));
912 	macsec_reset_skb(skb, macsec->secy.netdev);
913 
914 	len = skb->len;
915 	if (gro_cells_receive(&macsec->gro_cells, skb) == NET_RX_SUCCESS)
916 		count_rx(dev, len);
917 
918 	rcu_read_unlock_bh();
919 
920 out:
921 	macsec_rxsa_put(rx_sa);
922 	macsec_rxsc_put(rx_sc);
923 	dev_put(dev);
924 }
925 
926 static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
927 				      struct net_device *dev,
928 				      struct macsec_rx_sa *rx_sa,
929 				      sci_t sci,
930 				      struct macsec_secy *secy)
931 {
932 	int ret;
933 	struct scatterlist *sg;
934 	struct sk_buff *trailer;
935 	unsigned char *iv;
936 	struct aead_request *req;
937 	struct macsec_eth_header *hdr;
938 	u16 icv_len = secy->icv_len;
939 
940 	macsec_skb_cb(skb)->valid = false;
941 	skb = skb_share_check(skb, GFP_ATOMIC);
942 	if (!skb)
943 		return ERR_PTR(-ENOMEM);
944 
945 	ret = skb_cow_data(skb, 0, &trailer);
946 	if (unlikely(ret < 0)) {
947 		kfree_skb(skb);
948 		return ERR_PTR(ret);
949 	}
950 	req = macsec_alloc_req(rx_sa->key.tfm, &iv, &sg, ret);
951 	if (!req) {
952 		kfree_skb(skb);
953 		return ERR_PTR(-ENOMEM);
954 	}
955 
956 	hdr = (struct macsec_eth_header *)skb->data;
957 	macsec_fill_iv(iv, sci, ntohl(hdr->packet_number));
958 
959 	sg_init_table(sg, ret);
960 	ret = skb_to_sgvec(skb, sg, 0, skb->len);
961 	if (unlikely(ret < 0)) {
962 		aead_request_free(req);
963 		kfree_skb(skb);
964 		return ERR_PTR(ret);
965 	}
966 
967 	if (hdr->tci_an & MACSEC_TCI_E) {
968 		/* confidentiality: ethernet + macsec header
969 		 * authenticated, encrypted payload
970 		 */
971 		int len = skb->len - macsec_hdr_len(macsec_skb_cb(skb)->has_sci);
972 
973 		aead_request_set_crypt(req, sg, sg, len, iv);
974 		aead_request_set_ad(req, macsec_hdr_len(macsec_skb_cb(skb)->has_sci));
975 		skb = skb_unshare(skb, GFP_ATOMIC);
976 		if (!skb) {
977 			aead_request_free(req);
978 			return ERR_PTR(-ENOMEM);
979 		}
980 	} else {
981 		/* integrity only: all headers + data authenticated */
982 		aead_request_set_crypt(req, sg, sg, icv_len, iv);
983 		aead_request_set_ad(req, skb->len - icv_len);
984 	}
985 
986 	macsec_skb_cb(skb)->req = req;
987 	skb->dev = dev;
988 	aead_request_set_callback(req, 0, macsec_decrypt_done, skb);
989 
990 	dev_hold(dev);
991 	ret = crypto_aead_decrypt(req);
992 	if (ret == -EINPROGRESS) {
993 		return ERR_PTR(ret);
994 	} else if (ret != 0) {
995 		/* decryption/authentication failed
996 		 * 10.6 if validateFrames is disabled, deliver anyway
997 		 */
998 		if (ret != -EBADMSG) {
999 			kfree_skb(skb);
1000 			skb = ERR_PTR(ret);
1001 		}
1002 	} else {
1003 		macsec_skb_cb(skb)->valid = true;
1004 	}
1005 	dev_put(dev);
1006 
1007 	aead_request_free(req);
1008 
1009 	return skb;
1010 }
1011 
1012 static struct macsec_rx_sc *find_rx_sc(struct macsec_secy *secy, sci_t sci)
1013 {
1014 	struct macsec_rx_sc *rx_sc;
1015 
1016 	for_each_rxsc(secy, rx_sc) {
1017 		if (rx_sc->sci == sci)
1018 			return rx_sc;
1019 	}
1020 
1021 	return NULL;
1022 }
1023 
1024 static struct macsec_rx_sc *find_rx_sc_rtnl(struct macsec_secy *secy, sci_t sci)
1025 {
1026 	struct macsec_rx_sc *rx_sc;
1027 
1028 	for_each_rxsc_rtnl(secy, rx_sc) {
1029 		if (rx_sc->sci == sci)
1030 			return rx_sc;
1031 	}
1032 
1033 	return NULL;
1034 }
1035 
1036 static void handle_not_macsec(struct sk_buff *skb)
1037 {
1038 	struct macsec_rxh_data *rxd;
1039 	struct macsec_dev *macsec;
1040 
1041 	rcu_read_lock();
1042 	rxd = macsec_data_rcu(skb->dev);
1043 
1044 	/* 10.6 If the management control validateFrames is not
1045 	 * Strict, frames without a SecTAG are received, counted, and
1046 	 * delivered to the Controlled Port
1047 	 */
1048 	list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
1049 		struct sk_buff *nskb;
1050 		struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats);
1051 
1052 		if (macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) {
1053 			u64_stats_update_begin(&secy_stats->syncp);
1054 			secy_stats->stats.InPktsNoTag++;
1055 			u64_stats_update_end(&secy_stats->syncp);
1056 			continue;
1057 		}
1058 
1059 		/* deliver on this port */
1060 		nskb = skb_clone(skb, GFP_ATOMIC);
1061 		if (!nskb)
1062 			break;
1063 
1064 		nskb->dev = macsec->secy.netdev;
1065 
1066 		if (netif_rx(nskb) == NET_RX_SUCCESS) {
1067 			u64_stats_update_begin(&secy_stats->syncp);
1068 			secy_stats->stats.InPktsUntagged++;
1069 			u64_stats_update_end(&secy_stats->syncp);
1070 		}
1071 	}
1072 
1073 	rcu_read_unlock();
1074 }
1075 
1076 static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
1077 {
1078 	struct sk_buff *skb = *pskb;
1079 	struct net_device *dev = skb->dev;
1080 	struct macsec_eth_header *hdr;
1081 	struct macsec_secy *secy = NULL;
1082 	struct macsec_rx_sc *rx_sc;
1083 	struct macsec_rx_sa *rx_sa;
1084 	struct macsec_rxh_data *rxd;
1085 	struct macsec_dev *macsec;
1086 	sci_t sci;
1087 	u32 pn;
1088 	bool cbit;
1089 	struct pcpu_rx_sc_stats *rxsc_stats;
1090 	struct pcpu_secy_stats *secy_stats;
1091 	bool pulled_sci;
1092 	int ret;
1093 
1094 	if (skb_headroom(skb) < ETH_HLEN)
1095 		goto drop_direct;
1096 
1097 	hdr = macsec_ethhdr(skb);
1098 	if (hdr->eth.h_proto != htons(ETH_P_MACSEC)) {
1099 		handle_not_macsec(skb);
1100 
1101 		/* and deliver to the uncontrolled port */
1102 		return RX_HANDLER_PASS;
1103 	}
1104 
1105 	skb = skb_unshare(skb, GFP_ATOMIC);
1106 	if (!skb) {
1107 		*pskb = NULL;
1108 		return RX_HANDLER_CONSUMED;
1109 	}
1110 
1111 	pulled_sci = pskb_may_pull(skb, macsec_extra_len(true));
1112 	if (!pulled_sci) {
1113 		if (!pskb_may_pull(skb, macsec_extra_len(false)))
1114 			goto drop_direct;
1115 	}
1116 
1117 	hdr = macsec_ethhdr(skb);
1118 
1119 	/* Frames with a SecTAG that has the TCI E bit set but the C
1120 	 * bit clear are discarded, as this reserved encoding is used
1121 	 * to identify frames with a SecTAG that are not to be
1122 	 * delivered to the Controlled Port.
1123 	 */
1124 	if ((hdr->tci_an & (MACSEC_TCI_C | MACSEC_TCI_E)) == MACSEC_TCI_E)
1125 		return RX_HANDLER_PASS;
1126 
1127 	/* now, pull the extra length */
1128 	if (hdr->tci_an & MACSEC_TCI_SC) {
1129 		if (!pulled_sci)
1130 			goto drop_direct;
1131 	}
1132 
1133 	/* ethernet header is part of crypto processing */
1134 	skb_push(skb, ETH_HLEN);
1135 
1136 	macsec_skb_cb(skb)->has_sci = !!(hdr->tci_an & MACSEC_TCI_SC);
1137 	macsec_skb_cb(skb)->assoc_num = hdr->tci_an & MACSEC_AN_MASK;
1138 	sci = macsec_frame_sci(hdr, macsec_skb_cb(skb)->has_sci);
1139 
1140 	rcu_read_lock();
1141 	rxd = macsec_data_rcu(skb->dev);
1142 
1143 	list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
1144 		struct macsec_rx_sc *sc = find_rx_sc(&macsec->secy, sci);
1145 
1146 		sc = sc ? macsec_rxsc_get(sc) : NULL;
1147 
1148 		if (sc) {
1149 			secy = &macsec->secy;
1150 			rx_sc = sc;
1151 			break;
1152 		}
1153 	}
1154 
1155 	if (!secy)
1156 		goto nosci;
1157 
1158 	dev = secy->netdev;
1159 	macsec = macsec_priv(dev);
1160 	secy_stats = this_cpu_ptr(macsec->stats);
1161 	rxsc_stats = this_cpu_ptr(rx_sc->stats);
1162 
1163 	if (!macsec_validate_skb(skb, secy->icv_len)) {
1164 		u64_stats_update_begin(&secy_stats->syncp);
1165 		secy_stats->stats.InPktsBadTag++;
1166 		u64_stats_update_end(&secy_stats->syncp);
1167 		goto drop_nosa;
1168 	}
1169 
1170 	rx_sa = macsec_rxsa_get(rx_sc->sa[macsec_skb_cb(skb)->assoc_num]);
1171 	if (!rx_sa) {
1172 		/* 10.6.1 if the SA is not in use */
1173 
1174 		/* If validateFrames is Strict or the C bit in the
1175 		 * SecTAG is set, discard
1176 		 */
1177 		if (hdr->tci_an & MACSEC_TCI_C ||
1178 		    secy->validate_frames == MACSEC_VALIDATE_STRICT) {
1179 			u64_stats_update_begin(&rxsc_stats->syncp);
1180 			rxsc_stats->stats.InPktsNotUsingSA++;
1181 			u64_stats_update_end(&rxsc_stats->syncp);
1182 			goto drop_nosa;
1183 		}
1184 
1185 		/* not Strict, the frame (with the SecTAG and ICV
1186 		 * removed) is delivered to the Controlled Port.
1187 		 */
1188 		u64_stats_update_begin(&rxsc_stats->syncp);
1189 		rxsc_stats->stats.InPktsUnusedSA++;
1190 		u64_stats_update_end(&rxsc_stats->syncp);
1191 		goto deliver;
1192 	}
1193 
1194 	/* First, PN check to avoid decrypting obviously wrong packets */
1195 	pn = ntohl(hdr->packet_number);
1196 	if (secy->replay_protect) {
1197 		bool late;
1198 
1199 		spin_lock(&rx_sa->lock);
1200 		late = rx_sa->next_pn >= secy->replay_window &&
1201 		       pn < (rx_sa->next_pn - secy->replay_window);
1202 		spin_unlock(&rx_sa->lock);
1203 
1204 		if (late) {
1205 			u64_stats_update_begin(&rxsc_stats->syncp);
1206 			rxsc_stats->stats.InPktsLate++;
1207 			u64_stats_update_end(&rxsc_stats->syncp);
1208 			goto drop;
1209 		}
1210 	}
1211 
1212 	macsec_skb_cb(skb)->rx_sa = rx_sa;
1213 
1214 	/* Disabled && !changed text => skip validation */
1215 	if (hdr->tci_an & MACSEC_TCI_C ||
1216 	    secy->validate_frames != MACSEC_VALIDATE_DISABLED)
1217 		skb = macsec_decrypt(skb, dev, rx_sa, sci, secy);
1218 
1219 	if (IS_ERR(skb)) {
1220 		/* the decrypt callback needs the reference */
1221 		if (PTR_ERR(skb) != -EINPROGRESS) {
1222 			macsec_rxsa_put(rx_sa);
1223 			macsec_rxsc_put(rx_sc);
1224 		}
1225 		rcu_read_unlock();
1226 		*pskb = NULL;
1227 		return RX_HANDLER_CONSUMED;
1228 	}
1229 
1230 	if (!macsec_post_decrypt(skb, secy, pn))
1231 		goto drop;
1232 
1233 deliver:
1234 	macsec_finalize_skb(skb, secy->icv_len,
1235 			    macsec_extra_len(macsec_skb_cb(skb)->has_sci));
1236 	macsec_reset_skb(skb, secy->netdev);
1237 
1238 	if (rx_sa)
1239 		macsec_rxsa_put(rx_sa);
1240 	macsec_rxsc_put(rx_sc);
1241 
1242 	ret = gro_cells_receive(&macsec->gro_cells, skb);
1243 	if (ret == NET_RX_SUCCESS)
1244 		count_rx(dev, skb->len);
1245 	else
1246 		macsec->secy.netdev->stats.rx_dropped++;
1247 
1248 	rcu_read_unlock();
1249 
1250 	*pskb = NULL;
1251 	return RX_HANDLER_CONSUMED;
1252 
1253 drop:
1254 	macsec_rxsa_put(rx_sa);
1255 drop_nosa:
1256 	macsec_rxsc_put(rx_sc);
1257 	rcu_read_unlock();
1258 drop_direct:
1259 	kfree_skb(skb);
1260 	*pskb = NULL;
1261 	return RX_HANDLER_CONSUMED;
1262 
1263 nosci:
1264 	/* 10.6.1 if the SC is not found */
1265 	cbit = !!(hdr->tci_an & MACSEC_TCI_C);
1266 	if (!cbit)
1267 		macsec_finalize_skb(skb, DEFAULT_ICV_LEN,
1268 				    macsec_extra_len(macsec_skb_cb(skb)->has_sci));
1269 
1270 	list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
1271 		struct sk_buff *nskb;
1272 
1273 		secy_stats = this_cpu_ptr(macsec->stats);
1274 
1275 		/* If validateFrames is Strict or the C bit in the
1276 		 * SecTAG is set, discard
1277 		 */
1278 		if (cbit ||
1279 		    macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) {
1280 			u64_stats_update_begin(&secy_stats->syncp);
1281 			secy_stats->stats.InPktsNoSCI++;
1282 			u64_stats_update_end(&secy_stats->syncp);
1283 			continue;
1284 		}
1285 
1286 		/* not strict, the frame (with the SecTAG and ICV
1287 		 * removed) is delivered to the Controlled Port.
1288 		 */
1289 		nskb = skb_clone(skb, GFP_ATOMIC);
1290 		if (!nskb)
1291 			break;
1292 
1293 		macsec_reset_skb(nskb, macsec->secy.netdev);
1294 
1295 		ret = netif_rx(nskb);
1296 		if (ret == NET_RX_SUCCESS) {
1297 			u64_stats_update_begin(&secy_stats->syncp);
1298 			secy_stats->stats.InPktsUnknownSCI++;
1299 			u64_stats_update_end(&secy_stats->syncp);
1300 		} else {
1301 			macsec->secy.netdev->stats.rx_dropped++;
1302 		}
1303 	}
1304 
1305 	rcu_read_unlock();
1306 	*pskb = skb;
1307 	return RX_HANDLER_PASS;
1308 }
1309 
1310 static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len)
1311 {
1312 	struct crypto_aead *tfm;
1313 	int ret;
1314 
1315 	tfm = crypto_alloc_aead("gcm(aes)", 0, 0);
1316 
1317 	if (IS_ERR(tfm))
1318 		return tfm;
1319 
1320 	ret = crypto_aead_setkey(tfm, key, key_len);
1321 	if (ret < 0)
1322 		goto fail;
1323 
1324 	ret = crypto_aead_setauthsize(tfm, icv_len);
1325 	if (ret < 0)
1326 		goto fail;
1327 
1328 	return tfm;
1329 fail:
1330 	crypto_free_aead(tfm);
1331 	return ERR_PTR(ret);
1332 }
1333 
1334 static int init_rx_sa(struct macsec_rx_sa *rx_sa, char *sak, int key_len,
1335 		      int icv_len)
1336 {
1337 	rx_sa->stats = alloc_percpu(struct macsec_rx_sa_stats);
1338 	if (!rx_sa->stats)
1339 		return -ENOMEM;
1340 
1341 	rx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len);
1342 	if (IS_ERR(rx_sa->key.tfm)) {
1343 		free_percpu(rx_sa->stats);
1344 		return PTR_ERR(rx_sa->key.tfm);
1345 	}
1346 
1347 	rx_sa->active = false;
1348 	rx_sa->next_pn = 1;
1349 	refcount_set(&rx_sa->refcnt, 1);
1350 	spin_lock_init(&rx_sa->lock);
1351 
1352 	return 0;
1353 }
1354 
1355 static void clear_rx_sa(struct macsec_rx_sa *rx_sa)
1356 {
1357 	rx_sa->active = false;
1358 
1359 	macsec_rxsa_put(rx_sa);
1360 }
1361 
1362 static void free_rx_sc(struct macsec_rx_sc *rx_sc)
1363 {
1364 	int i;
1365 
1366 	for (i = 0; i < MACSEC_NUM_AN; i++) {
1367 		struct macsec_rx_sa *sa = rtnl_dereference(rx_sc->sa[i]);
1368 
1369 		RCU_INIT_POINTER(rx_sc->sa[i], NULL);
1370 		if (sa)
1371 			clear_rx_sa(sa);
1372 	}
1373 
1374 	macsec_rxsc_put(rx_sc);
1375 }
1376 
1377 static struct macsec_rx_sc *del_rx_sc(struct macsec_secy *secy, sci_t sci)
1378 {
1379 	struct macsec_rx_sc *rx_sc, __rcu **rx_scp;
1380 
1381 	for (rx_scp = &secy->rx_sc, rx_sc = rtnl_dereference(*rx_scp);
1382 	     rx_sc;
1383 	     rx_scp = &rx_sc->next, rx_sc = rtnl_dereference(*rx_scp)) {
1384 		if (rx_sc->sci == sci) {
1385 			if (rx_sc->active)
1386 				secy->n_rx_sc--;
1387 			rcu_assign_pointer(*rx_scp, rx_sc->next);
1388 			return rx_sc;
1389 		}
1390 	}
1391 
1392 	return NULL;
1393 }
1394 
1395 static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci)
1396 {
1397 	struct macsec_rx_sc *rx_sc;
1398 	struct macsec_dev *macsec;
1399 	struct net_device *real_dev = macsec_priv(dev)->real_dev;
1400 	struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
1401 	struct macsec_secy *secy;
1402 
1403 	list_for_each_entry(macsec, &rxd->secys, secys) {
1404 		if (find_rx_sc_rtnl(&macsec->secy, sci))
1405 			return ERR_PTR(-EEXIST);
1406 	}
1407 
1408 	rx_sc = kzalloc(sizeof(*rx_sc), GFP_KERNEL);
1409 	if (!rx_sc)
1410 		return ERR_PTR(-ENOMEM);
1411 
1412 	rx_sc->stats = netdev_alloc_pcpu_stats(struct pcpu_rx_sc_stats);
1413 	if (!rx_sc->stats) {
1414 		kfree(rx_sc);
1415 		return ERR_PTR(-ENOMEM);
1416 	}
1417 
1418 	rx_sc->sci = sci;
1419 	rx_sc->active = true;
1420 	refcount_set(&rx_sc->refcnt, 1);
1421 
1422 	secy = &macsec_priv(dev)->secy;
1423 	rcu_assign_pointer(rx_sc->next, secy->rx_sc);
1424 	rcu_assign_pointer(secy->rx_sc, rx_sc);
1425 
1426 	if (rx_sc->active)
1427 		secy->n_rx_sc++;
1428 
1429 	return rx_sc;
1430 }
1431 
1432 static int init_tx_sa(struct macsec_tx_sa *tx_sa, char *sak, int key_len,
1433 		      int icv_len)
1434 {
1435 	tx_sa->stats = alloc_percpu(struct macsec_tx_sa_stats);
1436 	if (!tx_sa->stats)
1437 		return -ENOMEM;
1438 
1439 	tx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len);
1440 	if (IS_ERR(tx_sa->key.tfm)) {
1441 		free_percpu(tx_sa->stats);
1442 		return PTR_ERR(tx_sa->key.tfm);
1443 	}
1444 
1445 	tx_sa->active = false;
1446 	refcount_set(&tx_sa->refcnt, 1);
1447 	spin_lock_init(&tx_sa->lock);
1448 
1449 	return 0;
1450 }
1451 
1452 static void clear_tx_sa(struct macsec_tx_sa *tx_sa)
1453 {
1454 	tx_sa->active = false;
1455 
1456 	macsec_txsa_put(tx_sa);
1457 }
1458 
1459 static struct genl_family macsec_fam;
1460 
1461 static struct net_device *get_dev_from_nl(struct net *net,
1462 					  struct nlattr **attrs)
1463 {
1464 	int ifindex = nla_get_u32(attrs[MACSEC_ATTR_IFINDEX]);
1465 	struct net_device *dev;
1466 
1467 	dev = __dev_get_by_index(net, ifindex);
1468 	if (!dev)
1469 		return ERR_PTR(-ENODEV);
1470 
1471 	if (!netif_is_macsec(dev))
1472 		return ERR_PTR(-ENODEV);
1473 
1474 	return dev;
1475 }
1476 
1477 static sci_t nla_get_sci(const struct nlattr *nla)
1478 {
1479 	return (__force sci_t)nla_get_u64(nla);
1480 }
1481 
1482 static int nla_put_sci(struct sk_buff *skb, int attrtype, sci_t value,
1483 		       int padattr)
1484 {
1485 	return nla_put_u64_64bit(skb, attrtype, (__force u64)value, padattr);
1486 }
1487 
1488 static struct macsec_tx_sa *get_txsa_from_nl(struct net *net,
1489 					     struct nlattr **attrs,
1490 					     struct nlattr **tb_sa,
1491 					     struct net_device **devp,
1492 					     struct macsec_secy **secyp,
1493 					     struct macsec_tx_sc **scp,
1494 					     u8 *assoc_num)
1495 {
1496 	struct net_device *dev;
1497 	struct macsec_secy *secy;
1498 	struct macsec_tx_sc *tx_sc;
1499 	struct macsec_tx_sa *tx_sa;
1500 
1501 	if (!tb_sa[MACSEC_SA_ATTR_AN])
1502 		return ERR_PTR(-EINVAL);
1503 
1504 	*assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
1505 
1506 	dev = get_dev_from_nl(net, attrs);
1507 	if (IS_ERR(dev))
1508 		return ERR_CAST(dev);
1509 
1510 	if (*assoc_num >= MACSEC_NUM_AN)
1511 		return ERR_PTR(-EINVAL);
1512 
1513 	secy = &macsec_priv(dev)->secy;
1514 	tx_sc = &secy->tx_sc;
1515 
1516 	tx_sa = rtnl_dereference(tx_sc->sa[*assoc_num]);
1517 	if (!tx_sa)
1518 		return ERR_PTR(-ENODEV);
1519 
1520 	*devp = dev;
1521 	*scp = tx_sc;
1522 	*secyp = secy;
1523 	return tx_sa;
1524 }
1525 
1526 static struct macsec_rx_sc *get_rxsc_from_nl(struct net *net,
1527 					     struct nlattr **attrs,
1528 					     struct nlattr **tb_rxsc,
1529 					     struct net_device **devp,
1530 					     struct macsec_secy **secyp)
1531 {
1532 	struct net_device *dev;
1533 	struct macsec_secy *secy;
1534 	struct macsec_rx_sc *rx_sc;
1535 	sci_t sci;
1536 
1537 	dev = get_dev_from_nl(net, attrs);
1538 	if (IS_ERR(dev))
1539 		return ERR_CAST(dev);
1540 
1541 	secy = &macsec_priv(dev)->secy;
1542 
1543 	if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI])
1544 		return ERR_PTR(-EINVAL);
1545 
1546 	sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
1547 	rx_sc = find_rx_sc_rtnl(secy, sci);
1548 	if (!rx_sc)
1549 		return ERR_PTR(-ENODEV);
1550 
1551 	*secyp = secy;
1552 	*devp = dev;
1553 
1554 	return rx_sc;
1555 }
1556 
1557 static struct macsec_rx_sa *get_rxsa_from_nl(struct net *net,
1558 					     struct nlattr **attrs,
1559 					     struct nlattr **tb_rxsc,
1560 					     struct nlattr **tb_sa,
1561 					     struct net_device **devp,
1562 					     struct macsec_secy **secyp,
1563 					     struct macsec_rx_sc **scp,
1564 					     u8 *assoc_num)
1565 {
1566 	struct macsec_rx_sc *rx_sc;
1567 	struct macsec_rx_sa *rx_sa;
1568 
1569 	if (!tb_sa[MACSEC_SA_ATTR_AN])
1570 		return ERR_PTR(-EINVAL);
1571 
1572 	*assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
1573 	if (*assoc_num >= MACSEC_NUM_AN)
1574 		return ERR_PTR(-EINVAL);
1575 
1576 	rx_sc = get_rxsc_from_nl(net, attrs, tb_rxsc, devp, secyp);
1577 	if (IS_ERR(rx_sc))
1578 		return ERR_CAST(rx_sc);
1579 
1580 	rx_sa = rtnl_dereference(rx_sc->sa[*assoc_num]);
1581 	if (!rx_sa)
1582 		return ERR_PTR(-ENODEV);
1583 
1584 	*scp = rx_sc;
1585 	return rx_sa;
1586 }
1587 
1588 static const struct nla_policy macsec_genl_policy[NUM_MACSEC_ATTR] = {
1589 	[MACSEC_ATTR_IFINDEX] = { .type = NLA_U32 },
1590 	[MACSEC_ATTR_RXSC_CONFIG] = { .type = NLA_NESTED },
1591 	[MACSEC_ATTR_SA_CONFIG] = { .type = NLA_NESTED },
1592 };
1593 
1594 static const struct nla_policy macsec_genl_rxsc_policy[NUM_MACSEC_RXSC_ATTR] = {
1595 	[MACSEC_RXSC_ATTR_SCI] = { .type = NLA_U64 },
1596 	[MACSEC_RXSC_ATTR_ACTIVE] = { .type = NLA_U8 },
1597 };
1598 
1599 static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = {
1600 	[MACSEC_SA_ATTR_AN] = { .type = NLA_U8 },
1601 	[MACSEC_SA_ATTR_ACTIVE] = { .type = NLA_U8 },
1602 	[MACSEC_SA_ATTR_PN] = { .type = NLA_U32 },
1603 	[MACSEC_SA_ATTR_KEYID] = { .type = NLA_BINARY,
1604 				   .len = MACSEC_KEYID_LEN, },
1605 	[MACSEC_SA_ATTR_KEY] = { .type = NLA_BINARY,
1606 				 .len = MACSEC_MAX_KEY_LEN, },
1607 };
1608 
1609 static int parse_sa_config(struct nlattr **attrs, struct nlattr **tb_sa)
1610 {
1611 	if (!attrs[MACSEC_ATTR_SA_CONFIG])
1612 		return -EINVAL;
1613 
1614 	if (nla_parse_nested(tb_sa, MACSEC_SA_ATTR_MAX,
1615 			     attrs[MACSEC_ATTR_SA_CONFIG],
1616 			     macsec_genl_sa_policy, NULL))
1617 		return -EINVAL;
1618 
1619 	return 0;
1620 }
1621 
1622 static int parse_rxsc_config(struct nlattr **attrs, struct nlattr **tb_rxsc)
1623 {
1624 	if (!attrs[MACSEC_ATTR_RXSC_CONFIG])
1625 		return -EINVAL;
1626 
1627 	if (nla_parse_nested(tb_rxsc, MACSEC_RXSC_ATTR_MAX,
1628 			     attrs[MACSEC_ATTR_RXSC_CONFIG],
1629 			     macsec_genl_rxsc_policy, NULL))
1630 		return -EINVAL;
1631 
1632 	return 0;
1633 }
1634 
1635 static bool validate_add_rxsa(struct nlattr **attrs)
1636 {
1637 	if (!attrs[MACSEC_SA_ATTR_AN] ||
1638 	    !attrs[MACSEC_SA_ATTR_KEY] ||
1639 	    !attrs[MACSEC_SA_ATTR_KEYID])
1640 		return false;
1641 
1642 	if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
1643 		return false;
1644 
1645 	if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0)
1646 		return false;
1647 
1648 	if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
1649 		if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
1650 			return false;
1651 	}
1652 
1653 	if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN)
1654 		return false;
1655 
1656 	return true;
1657 }
1658 
1659 static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
1660 {
1661 	struct net_device *dev;
1662 	struct nlattr **attrs = info->attrs;
1663 	struct macsec_secy *secy;
1664 	struct macsec_rx_sc *rx_sc;
1665 	struct macsec_rx_sa *rx_sa;
1666 	unsigned char assoc_num;
1667 	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
1668 	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
1669 	int err;
1670 
1671 	if (!attrs[MACSEC_ATTR_IFINDEX])
1672 		return -EINVAL;
1673 
1674 	if (parse_sa_config(attrs, tb_sa))
1675 		return -EINVAL;
1676 
1677 	if (parse_rxsc_config(attrs, tb_rxsc))
1678 		return -EINVAL;
1679 
1680 	if (!validate_add_rxsa(tb_sa))
1681 		return -EINVAL;
1682 
1683 	rtnl_lock();
1684 	rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy);
1685 	if (IS_ERR(rx_sc)) {
1686 		rtnl_unlock();
1687 		return PTR_ERR(rx_sc);
1688 	}
1689 
1690 	assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
1691 
1692 	if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) {
1693 		pr_notice("macsec: nl: add_rxsa: bad key length: %d != %d\n",
1694 			  nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len);
1695 		rtnl_unlock();
1696 		return -EINVAL;
1697 	}
1698 
1699 	rx_sa = rtnl_dereference(rx_sc->sa[assoc_num]);
1700 	if (rx_sa) {
1701 		rtnl_unlock();
1702 		return -EBUSY;
1703 	}
1704 
1705 	rx_sa = kmalloc(sizeof(*rx_sa), GFP_KERNEL);
1706 	if (!rx_sa) {
1707 		rtnl_unlock();
1708 		return -ENOMEM;
1709 	}
1710 
1711 	err = init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
1712 			 secy->key_len, secy->icv_len);
1713 	if (err < 0) {
1714 		kfree(rx_sa);
1715 		rtnl_unlock();
1716 		return err;
1717 	}
1718 
1719 	if (tb_sa[MACSEC_SA_ATTR_PN]) {
1720 		spin_lock_bh(&rx_sa->lock);
1721 		rx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
1722 		spin_unlock_bh(&rx_sa->lock);
1723 	}
1724 
1725 	if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
1726 		rx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
1727 
1728 	nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
1729 	rx_sa->sc = rx_sc;
1730 	rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa);
1731 
1732 	rtnl_unlock();
1733 
1734 	return 0;
1735 }
1736 
1737 static bool validate_add_rxsc(struct nlattr **attrs)
1738 {
1739 	if (!attrs[MACSEC_RXSC_ATTR_SCI])
1740 		return false;
1741 
1742 	if (attrs[MACSEC_RXSC_ATTR_ACTIVE]) {
1743 		if (nla_get_u8(attrs[MACSEC_RXSC_ATTR_ACTIVE]) > 1)
1744 			return false;
1745 	}
1746 
1747 	return true;
1748 }
1749 
1750 static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info)
1751 {
1752 	struct net_device *dev;
1753 	sci_t sci = MACSEC_UNDEF_SCI;
1754 	struct nlattr **attrs = info->attrs;
1755 	struct macsec_rx_sc *rx_sc;
1756 	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
1757 
1758 	if (!attrs[MACSEC_ATTR_IFINDEX])
1759 		return -EINVAL;
1760 
1761 	if (parse_rxsc_config(attrs, tb_rxsc))
1762 		return -EINVAL;
1763 
1764 	if (!validate_add_rxsc(tb_rxsc))
1765 		return -EINVAL;
1766 
1767 	rtnl_lock();
1768 	dev = get_dev_from_nl(genl_info_net(info), attrs);
1769 	if (IS_ERR(dev)) {
1770 		rtnl_unlock();
1771 		return PTR_ERR(dev);
1772 	}
1773 
1774 	sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
1775 
1776 	rx_sc = create_rx_sc(dev, sci);
1777 	if (IS_ERR(rx_sc)) {
1778 		rtnl_unlock();
1779 		return PTR_ERR(rx_sc);
1780 	}
1781 
1782 	if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE])
1783 		rx_sc->active = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]);
1784 
1785 	rtnl_unlock();
1786 
1787 	return 0;
1788 }
1789 
1790 static bool validate_add_txsa(struct nlattr **attrs)
1791 {
1792 	if (!attrs[MACSEC_SA_ATTR_AN] ||
1793 	    !attrs[MACSEC_SA_ATTR_PN] ||
1794 	    !attrs[MACSEC_SA_ATTR_KEY] ||
1795 	    !attrs[MACSEC_SA_ATTR_KEYID])
1796 		return false;
1797 
1798 	if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
1799 		return false;
1800 
1801 	if (nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0)
1802 		return false;
1803 
1804 	if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
1805 		if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
1806 			return false;
1807 	}
1808 
1809 	if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN)
1810 		return false;
1811 
1812 	return true;
1813 }
1814 
1815 static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
1816 {
1817 	struct net_device *dev;
1818 	struct nlattr **attrs = info->attrs;
1819 	struct macsec_secy *secy;
1820 	struct macsec_tx_sc *tx_sc;
1821 	struct macsec_tx_sa *tx_sa;
1822 	unsigned char assoc_num;
1823 	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
1824 	int err;
1825 
1826 	if (!attrs[MACSEC_ATTR_IFINDEX])
1827 		return -EINVAL;
1828 
1829 	if (parse_sa_config(attrs, tb_sa))
1830 		return -EINVAL;
1831 
1832 	if (!validate_add_txsa(tb_sa))
1833 		return -EINVAL;
1834 
1835 	rtnl_lock();
1836 	dev = get_dev_from_nl(genl_info_net(info), attrs);
1837 	if (IS_ERR(dev)) {
1838 		rtnl_unlock();
1839 		return PTR_ERR(dev);
1840 	}
1841 
1842 	secy = &macsec_priv(dev)->secy;
1843 	tx_sc = &secy->tx_sc;
1844 
1845 	assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
1846 
1847 	if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) {
1848 		pr_notice("macsec: nl: add_txsa: bad key length: %d != %d\n",
1849 			  nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len);
1850 		rtnl_unlock();
1851 		return -EINVAL;
1852 	}
1853 
1854 	tx_sa = rtnl_dereference(tx_sc->sa[assoc_num]);
1855 	if (tx_sa) {
1856 		rtnl_unlock();
1857 		return -EBUSY;
1858 	}
1859 
1860 	tx_sa = kmalloc(sizeof(*tx_sa), GFP_KERNEL);
1861 	if (!tx_sa) {
1862 		rtnl_unlock();
1863 		return -ENOMEM;
1864 	}
1865 
1866 	err = init_tx_sa(tx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
1867 			 secy->key_len, secy->icv_len);
1868 	if (err < 0) {
1869 		kfree(tx_sa);
1870 		rtnl_unlock();
1871 		return err;
1872 	}
1873 
1874 	nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
1875 
1876 	spin_lock_bh(&tx_sa->lock);
1877 	tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
1878 	spin_unlock_bh(&tx_sa->lock);
1879 
1880 	if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
1881 		tx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
1882 
1883 	if (assoc_num == tx_sc->encoding_sa && tx_sa->active)
1884 		secy->operational = true;
1885 
1886 	rcu_assign_pointer(tx_sc->sa[assoc_num], tx_sa);
1887 
1888 	rtnl_unlock();
1889 
1890 	return 0;
1891 }
1892 
1893 static int macsec_del_rxsa(struct sk_buff *skb, struct genl_info *info)
1894 {
1895 	struct nlattr **attrs = info->attrs;
1896 	struct net_device *dev;
1897 	struct macsec_secy *secy;
1898 	struct macsec_rx_sc *rx_sc;
1899 	struct macsec_rx_sa *rx_sa;
1900 	u8 assoc_num;
1901 	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
1902 	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
1903 
1904 	if (!attrs[MACSEC_ATTR_IFINDEX])
1905 		return -EINVAL;
1906 
1907 	if (parse_sa_config(attrs, tb_sa))
1908 		return -EINVAL;
1909 
1910 	if (parse_rxsc_config(attrs, tb_rxsc))
1911 		return -EINVAL;
1912 
1913 	rtnl_lock();
1914 	rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa,
1915 				 &dev, &secy, &rx_sc, &assoc_num);
1916 	if (IS_ERR(rx_sa)) {
1917 		rtnl_unlock();
1918 		return PTR_ERR(rx_sa);
1919 	}
1920 
1921 	if (rx_sa->active) {
1922 		rtnl_unlock();
1923 		return -EBUSY;
1924 	}
1925 
1926 	RCU_INIT_POINTER(rx_sc->sa[assoc_num], NULL);
1927 	clear_rx_sa(rx_sa);
1928 
1929 	rtnl_unlock();
1930 
1931 	return 0;
1932 }
1933 
1934 static int macsec_del_rxsc(struct sk_buff *skb, struct genl_info *info)
1935 {
1936 	struct nlattr **attrs = info->attrs;
1937 	struct net_device *dev;
1938 	struct macsec_secy *secy;
1939 	struct macsec_rx_sc *rx_sc;
1940 	sci_t sci;
1941 	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
1942 
1943 	if (!attrs[MACSEC_ATTR_IFINDEX])
1944 		return -EINVAL;
1945 
1946 	if (parse_rxsc_config(attrs, tb_rxsc))
1947 		return -EINVAL;
1948 
1949 	if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI])
1950 		return -EINVAL;
1951 
1952 	rtnl_lock();
1953 	dev = get_dev_from_nl(genl_info_net(info), info->attrs);
1954 	if (IS_ERR(dev)) {
1955 		rtnl_unlock();
1956 		return PTR_ERR(dev);
1957 	}
1958 
1959 	secy = &macsec_priv(dev)->secy;
1960 	sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
1961 
1962 	rx_sc = del_rx_sc(secy, sci);
1963 	if (!rx_sc) {
1964 		rtnl_unlock();
1965 		return -ENODEV;
1966 	}
1967 
1968 	free_rx_sc(rx_sc);
1969 	rtnl_unlock();
1970 
1971 	return 0;
1972 }
1973 
1974 static int macsec_del_txsa(struct sk_buff *skb, struct genl_info *info)
1975 {
1976 	struct nlattr **attrs = info->attrs;
1977 	struct net_device *dev;
1978 	struct macsec_secy *secy;
1979 	struct macsec_tx_sc *tx_sc;
1980 	struct macsec_tx_sa *tx_sa;
1981 	u8 assoc_num;
1982 	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
1983 
1984 	if (!attrs[MACSEC_ATTR_IFINDEX])
1985 		return -EINVAL;
1986 
1987 	if (parse_sa_config(attrs, tb_sa))
1988 		return -EINVAL;
1989 
1990 	rtnl_lock();
1991 	tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa,
1992 				 &dev, &secy, &tx_sc, &assoc_num);
1993 	if (IS_ERR(tx_sa)) {
1994 		rtnl_unlock();
1995 		return PTR_ERR(tx_sa);
1996 	}
1997 
1998 	if (tx_sa->active) {
1999 		rtnl_unlock();
2000 		return -EBUSY;
2001 	}
2002 
2003 	RCU_INIT_POINTER(tx_sc->sa[assoc_num], NULL);
2004 	clear_tx_sa(tx_sa);
2005 
2006 	rtnl_unlock();
2007 
2008 	return 0;
2009 }
2010 
2011 static bool validate_upd_sa(struct nlattr **attrs)
2012 {
2013 	if (!attrs[MACSEC_SA_ATTR_AN] ||
2014 	    attrs[MACSEC_SA_ATTR_KEY] ||
2015 	    attrs[MACSEC_SA_ATTR_KEYID])
2016 		return false;
2017 
2018 	if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
2019 		return false;
2020 
2021 	if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0)
2022 		return false;
2023 
2024 	if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
2025 		if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
2026 			return false;
2027 	}
2028 
2029 	return true;
2030 }
2031 
2032 static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info)
2033 {
2034 	struct nlattr **attrs = info->attrs;
2035 	struct net_device *dev;
2036 	struct macsec_secy *secy;
2037 	struct macsec_tx_sc *tx_sc;
2038 	struct macsec_tx_sa *tx_sa;
2039 	u8 assoc_num;
2040 	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
2041 
2042 	if (!attrs[MACSEC_ATTR_IFINDEX])
2043 		return -EINVAL;
2044 
2045 	if (parse_sa_config(attrs, tb_sa))
2046 		return -EINVAL;
2047 
2048 	if (!validate_upd_sa(tb_sa))
2049 		return -EINVAL;
2050 
2051 	rtnl_lock();
2052 	tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa,
2053 				 &dev, &secy, &tx_sc, &assoc_num);
2054 	if (IS_ERR(tx_sa)) {
2055 		rtnl_unlock();
2056 		return PTR_ERR(tx_sa);
2057 	}
2058 
2059 	if (tb_sa[MACSEC_SA_ATTR_PN]) {
2060 		spin_lock_bh(&tx_sa->lock);
2061 		tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
2062 		spin_unlock_bh(&tx_sa->lock);
2063 	}
2064 
2065 	if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
2066 		tx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
2067 
2068 	if (assoc_num == tx_sc->encoding_sa)
2069 		secy->operational = tx_sa->active;
2070 
2071 	rtnl_unlock();
2072 
2073 	return 0;
2074 }
2075 
2076 static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
2077 {
2078 	struct nlattr **attrs = info->attrs;
2079 	struct net_device *dev;
2080 	struct macsec_secy *secy;
2081 	struct macsec_rx_sc *rx_sc;
2082 	struct macsec_rx_sa *rx_sa;
2083 	u8 assoc_num;
2084 	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
2085 	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
2086 
2087 	if (!attrs[MACSEC_ATTR_IFINDEX])
2088 		return -EINVAL;
2089 
2090 	if (parse_rxsc_config(attrs, tb_rxsc))
2091 		return -EINVAL;
2092 
2093 	if (parse_sa_config(attrs, tb_sa))
2094 		return -EINVAL;
2095 
2096 	if (!validate_upd_sa(tb_sa))
2097 		return -EINVAL;
2098 
2099 	rtnl_lock();
2100 	rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa,
2101 				 &dev, &secy, &rx_sc, &assoc_num);
2102 	if (IS_ERR(rx_sa)) {
2103 		rtnl_unlock();
2104 		return PTR_ERR(rx_sa);
2105 	}
2106 
2107 	if (tb_sa[MACSEC_SA_ATTR_PN]) {
2108 		spin_lock_bh(&rx_sa->lock);
2109 		rx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
2110 		spin_unlock_bh(&rx_sa->lock);
2111 	}
2112 
2113 	if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
2114 		rx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
2115 
2116 	rtnl_unlock();
2117 	return 0;
2118 }
2119 
2120 static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info)
2121 {
2122 	struct nlattr **attrs = info->attrs;
2123 	struct net_device *dev;
2124 	struct macsec_secy *secy;
2125 	struct macsec_rx_sc *rx_sc;
2126 	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
2127 
2128 	if (!attrs[MACSEC_ATTR_IFINDEX])
2129 		return -EINVAL;
2130 
2131 	if (parse_rxsc_config(attrs, tb_rxsc))
2132 		return -EINVAL;
2133 
2134 	if (!validate_add_rxsc(tb_rxsc))
2135 		return -EINVAL;
2136 
2137 	rtnl_lock();
2138 	rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy);
2139 	if (IS_ERR(rx_sc)) {
2140 		rtnl_unlock();
2141 		return PTR_ERR(rx_sc);
2142 	}
2143 
2144 	if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) {
2145 		bool new = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]);
2146 
2147 		if (rx_sc->active != new)
2148 			secy->n_rx_sc += new ? 1 : -1;
2149 
2150 		rx_sc->active = new;
2151 	}
2152 
2153 	rtnl_unlock();
2154 
2155 	return 0;
2156 }
2157 
2158 static int copy_tx_sa_stats(struct sk_buff *skb,
2159 			    struct macsec_tx_sa_stats __percpu *pstats)
2160 {
2161 	struct macsec_tx_sa_stats sum = {0, };
2162 	int cpu;
2163 
2164 	for_each_possible_cpu(cpu) {
2165 		const struct macsec_tx_sa_stats *stats = per_cpu_ptr(pstats, cpu);
2166 
2167 		sum.OutPktsProtected += stats->OutPktsProtected;
2168 		sum.OutPktsEncrypted += stats->OutPktsEncrypted;
2169 	}
2170 
2171 	if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED, sum.OutPktsProtected) ||
2172 	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED, sum.OutPktsEncrypted))
2173 		return -EMSGSIZE;
2174 
2175 	return 0;
2176 }
2177 
2178 static int copy_rx_sa_stats(struct sk_buff *skb,
2179 			    struct macsec_rx_sa_stats __percpu *pstats)
2180 {
2181 	struct macsec_rx_sa_stats sum = {0, };
2182 	int cpu;
2183 
2184 	for_each_possible_cpu(cpu) {
2185 		const struct macsec_rx_sa_stats *stats = per_cpu_ptr(pstats, cpu);
2186 
2187 		sum.InPktsOK         += stats->InPktsOK;
2188 		sum.InPktsInvalid    += stats->InPktsInvalid;
2189 		sum.InPktsNotValid   += stats->InPktsNotValid;
2190 		sum.InPktsNotUsingSA += stats->InPktsNotUsingSA;
2191 		sum.InPktsUnusedSA   += stats->InPktsUnusedSA;
2192 	}
2193 
2194 	if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_OK, sum.InPktsOK) ||
2195 	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID, sum.InPktsInvalid) ||
2196 	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID, sum.InPktsNotValid) ||
2197 	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA, sum.InPktsNotUsingSA) ||
2198 	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA, sum.InPktsUnusedSA))
2199 		return -EMSGSIZE;
2200 
2201 	return 0;
2202 }
2203 
2204 static int copy_rx_sc_stats(struct sk_buff *skb,
2205 			    struct pcpu_rx_sc_stats __percpu *pstats)
2206 {
2207 	struct macsec_rx_sc_stats sum = {0, };
2208 	int cpu;
2209 
2210 	for_each_possible_cpu(cpu) {
2211 		const struct pcpu_rx_sc_stats *stats;
2212 		struct macsec_rx_sc_stats tmp;
2213 		unsigned int start;
2214 
2215 		stats = per_cpu_ptr(pstats, cpu);
2216 		do {
2217 			start = u64_stats_fetch_begin_irq(&stats->syncp);
2218 			memcpy(&tmp, &stats->stats, sizeof(tmp));
2219 		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
2220 
2221 		sum.InOctetsValidated += tmp.InOctetsValidated;
2222 		sum.InOctetsDecrypted += tmp.InOctetsDecrypted;
2223 		sum.InPktsUnchecked   += tmp.InPktsUnchecked;
2224 		sum.InPktsDelayed     += tmp.InPktsDelayed;
2225 		sum.InPktsOK          += tmp.InPktsOK;
2226 		sum.InPktsInvalid     += tmp.InPktsInvalid;
2227 		sum.InPktsLate        += tmp.InPktsLate;
2228 		sum.InPktsNotValid    += tmp.InPktsNotValid;
2229 		sum.InPktsNotUsingSA  += tmp.InPktsNotUsingSA;
2230 		sum.InPktsUnusedSA    += tmp.InPktsUnusedSA;
2231 	}
2232 
2233 	if (nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED,
2234 			      sum.InOctetsValidated,
2235 			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2236 	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED,
2237 			      sum.InOctetsDecrypted,
2238 			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2239 	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED,
2240 			      sum.InPktsUnchecked,
2241 			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2242 	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED,
2243 			      sum.InPktsDelayed,
2244 			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2245 	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK,
2246 			      sum.InPktsOK,
2247 			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2248 	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID,
2249 			      sum.InPktsInvalid,
2250 			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2251 	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE,
2252 			      sum.InPktsLate,
2253 			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2254 	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID,
2255 			      sum.InPktsNotValid,
2256 			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2257 	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA,
2258 			      sum.InPktsNotUsingSA,
2259 			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2260 	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA,
2261 			      sum.InPktsUnusedSA,
2262 			      MACSEC_RXSC_STATS_ATTR_PAD))
2263 		return -EMSGSIZE;
2264 
2265 	return 0;
2266 }
2267 
2268 static int copy_tx_sc_stats(struct sk_buff *skb,
2269 			    struct pcpu_tx_sc_stats __percpu *pstats)
2270 {
2271 	struct macsec_tx_sc_stats sum = {0, };
2272 	int cpu;
2273 
2274 	for_each_possible_cpu(cpu) {
2275 		const struct pcpu_tx_sc_stats *stats;
2276 		struct macsec_tx_sc_stats tmp;
2277 		unsigned int start;
2278 
2279 		stats = per_cpu_ptr(pstats, cpu);
2280 		do {
2281 			start = u64_stats_fetch_begin_irq(&stats->syncp);
2282 			memcpy(&tmp, &stats->stats, sizeof(tmp));
2283 		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
2284 
2285 		sum.OutPktsProtected   += tmp.OutPktsProtected;
2286 		sum.OutPktsEncrypted   += tmp.OutPktsEncrypted;
2287 		sum.OutOctetsProtected += tmp.OutOctetsProtected;
2288 		sum.OutOctetsEncrypted += tmp.OutOctetsEncrypted;
2289 	}
2290 
2291 	if (nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED,
2292 			      sum.OutPktsProtected,
2293 			      MACSEC_TXSC_STATS_ATTR_PAD) ||
2294 	    nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED,
2295 			      sum.OutPktsEncrypted,
2296 			      MACSEC_TXSC_STATS_ATTR_PAD) ||
2297 	    nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED,
2298 			      sum.OutOctetsProtected,
2299 			      MACSEC_TXSC_STATS_ATTR_PAD) ||
2300 	    nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED,
2301 			      sum.OutOctetsEncrypted,
2302 			      MACSEC_TXSC_STATS_ATTR_PAD))
2303 		return -EMSGSIZE;
2304 
2305 	return 0;
2306 }
2307 
2308 static int copy_secy_stats(struct sk_buff *skb,
2309 			   struct pcpu_secy_stats __percpu *pstats)
2310 {
2311 	struct macsec_dev_stats sum = {0, };
2312 	int cpu;
2313 
2314 	for_each_possible_cpu(cpu) {
2315 		const struct pcpu_secy_stats *stats;
2316 		struct macsec_dev_stats tmp;
2317 		unsigned int start;
2318 
2319 		stats = per_cpu_ptr(pstats, cpu);
2320 		do {
2321 			start = u64_stats_fetch_begin_irq(&stats->syncp);
2322 			memcpy(&tmp, &stats->stats, sizeof(tmp));
2323 		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
2324 
2325 		sum.OutPktsUntagged  += tmp.OutPktsUntagged;
2326 		sum.InPktsUntagged   += tmp.InPktsUntagged;
2327 		sum.OutPktsTooLong   += tmp.OutPktsTooLong;
2328 		sum.InPktsNoTag      += tmp.InPktsNoTag;
2329 		sum.InPktsBadTag     += tmp.InPktsBadTag;
2330 		sum.InPktsUnknownSCI += tmp.InPktsUnknownSCI;
2331 		sum.InPktsNoSCI      += tmp.InPktsNoSCI;
2332 		sum.InPktsOverrun    += tmp.InPktsOverrun;
2333 	}
2334 
2335 	if (nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED,
2336 			      sum.OutPktsUntagged,
2337 			      MACSEC_SECY_STATS_ATTR_PAD) ||
2338 	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED,
2339 			      sum.InPktsUntagged,
2340 			      MACSEC_SECY_STATS_ATTR_PAD) ||
2341 	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG,
2342 			      sum.OutPktsTooLong,
2343 			      MACSEC_SECY_STATS_ATTR_PAD) ||
2344 	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG,
2345 			      sum.InPktsNoTag,
2346 			      MACSEC_SECY_STATS_ATTR_PAD) ||
2347 	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG,
2348 			      sum.InPktsBadTag,
2349 			      MACSEC_SECY_STATS_ATTR_PAD) ||
2350 	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI,
2351 			      sum.InPktsUnknownSCI,
2352 			      MACSEC_SECY_STATS_ATTR_PAD) ||
2353 	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI,
2354 			      sum.InPktsNoSCI,
2355 			      MACSEC_SECY_STATS_ATTR_PAD) ||
2356 	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN,
2357 			      sum.InPktsOverrun,
2358 			      MACSEC_SECY_STATS_ATTR_PAD))
2359 		return -EMSGSIZE;
2360 
2361 	return 0;
2362 }
2363 
2364 static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb)
2365 {
2366 	struct macsec_tx_sc *tx_sc = &secy->tx_sc;
2367 	struct nlattr *secy_nest = nla_nest_start(skb, MACSEC_ATTR_SECY);
2368 	u64 csid;
2369 
2370 	if (!secy_nest)
2371 		return 1;
2372 
2373 	switch (secy->key_len) {
2374 	case MACSEC_GCM_AES_128_SAK_LEN:
2375 		csid = MACSEC_DEFAULT_CIPHER_ID;
2376 		break;
2377 	case MACSEC_GCM_AES_256_SAK_LEN:
2378 		csid = MACSEC_CIPHER_ID_GCM_AES_256;
2379 		break;
2380 	default:
2381 		goto cancel;
2382 	}
2383 
2384 	if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci,
2385 			MACSEC_SECY_ATTR_PAD) ||
2386 	    nla_put_u64_64bit(skb, MACSEC_SECY_ATTR_CIPHER_SUITE,
2387 			      csid, MACSEC_SECY_ATTR_PAD) ||
2388 	    nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) ||
2389 	    nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) ||
2390 	    nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) ||
2391 	    nla_put_u8(skb, MACSEC_SECY_ATTR_REPLAY, secy->replay_protect) ||
2392 	    nla_put_u8(skb, MACSEC_SECY_ATTR_VALIDATE, secy->validate_frames) ||
2393 	    nla_put_u8(skb, MACSEC_SECY_ATTR_ENCRYPT, tx_sc->encrypt) ||
2394 	    nla_put_u8(skb, MACSEC_SECY_ATTR_INC_SCI, tx_sc->send_sci) ||
2395 	    nla_put_u8(skb, MACSEC_SECY_ATTR_ES, tx_sc->end_station) ||
2396 	    nla_put_u8(skb, MACSEC_SECY_ATTR_SCB, tx_sc->scb) ||
2397 	    nla_put_u8(skb, MACSEC_SECY_ATTR_ENCODING_SA, tx_sc->encoding_sa))
2398 		goto cancel;
2399 
2400 	if (secy->replay_protect) {
2401 		if (nla_put_u32(skb, MACSEC_SECY_ATTR_WINDOW, secy->replay_window))
2402 			goto cancel;
2403 	}
2404 
2405 	nla_nest_end(skb, secy_nest);
2406 	return 0;
2407 
2408 cancel:
2409 	nla_nest_cancel(skb, secy_nest);
2410 	return 1;
2411 }
2412 
2413 static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
2414 		     struct sk_buff *skb, struct netlink_callback *cb)
2415 {
2416 	struct macsec_rx_sc *rx_sc;
2417 	struct macsec_tx_sc *tx_sc = &secy->tx_sc;
2418 	struct nlattr *txsa_list, *rxsc_list;
2419 	int i, j;
2420 	void *hdr;
2421 	struct nlattr *attr;
2422 
2423 	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
2424 			  &macsec_fam, NLM_F_MULTI, MACSEC_CMD_GET_TXSC);
2425 	if (!hdr)
2426 		return -EMSGSIZE;
2427 
2428 	genl_dump_check_consistent(cb, hdr);
2429 
2430 	if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex))
2431 		goto nla_put_failure;
2432 
2433 	if (nla_put_secy(secy, skb))
2434 		goto nla_put_failure;
2435 
2436 	attr = nla_nest_start(skb, MACSEC_ATTR_TXSC_STATS);
2437 	if (!attr)
2438 		goto nla_put_failure;
2439 	if (copy_tx_sc_stats(skb, tx_sc->stats)) {
2440 		nla_nest_cancel(skb, attr);
2441 		goto nla_put_failure;
2442 	}
2443 	nla_nest_end(skb, attr);
2444 
2445 	attr = nla_nest_start(skb, MACSEC_ATTR_SECY_STATS);
2446 	if (!attr)
2447 		goto nla_put_failure;
2448 	if (copy_secy_stats(skb, macsec_priv(dev)->stats)) {
2449 		nla_nest_cancel(skb, attr);
2450 		goto nla_put_failure;
2451 	}
2452 	nla_nest_end(skb, attr);
2453 
2454 	txsa_list = nla_nest_start(skb, MACSEC_ATTR_TXSA_LIST);
2455 	if (!txsa_list)
2456 		goto nla_put_failure;
2457 	for (i = 0, j = 1; i < MACSEC_NUM_AN; i++) {
2458 		struct macsec_tx_sa *tx_sa = rtnl_dereference(tx_sc->sa[i]);
2459 		struct nlattr *txsa_nest;
2460 
2461 		if (!tx_sa)
2462 			continue;
2463 
2464 		txsa_nest = nla_nest_start(skb, j++);
2465 		if (!txsa_nest) {
2466 			nla_nest_cancel(skb, txsa_list);
2467 			goto nla_put_failure;
2468 		}
2469 
2470 		if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
2471 		    nla_put_u32(skb, MACSEC_SA_ATTR_PN, tx_sa->next_pn) ||
2472 		    nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, tx_sa->key.id) ||
2473 		    nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, tx_sa->active)) {
2474 			nla_nest_cancel(skb, txsa_nest);
2475 			nla_nest_cancel(skb, txsa_list);
2476 			goto nla_put_failure;
2477 		}
2478 
2479 		attr = nla_nest_start(skb, MACSEC_SA_ATTR_STATS);
2480 		if (!attr) {
2481 			nla_nest_cancel(skb, txsa_nest);
2482 			nla_nest_cancel(skb, txsa_list);
2483 			goto nla_put_failure;
2484 		}
2485 		if (copy_tx_sa_stats(skb, tx_sa->stats)) {
2486 			nla_nest_cancel(skb, attr);
2487 			nla_nest_cancel(skb, txsa_nest);
2488 			nla_nest_cancel(skb, txsa_list);
2489 			goto nla_put_failure;
2490 		}
2491 		nla_nest_end(skb, attr);
2492 
2493 		nla_nest_end(skb, txsa_nest);
2494 	}
2495 	nla_nest_end(skb, txsa_list);
2496 
2497 	rxsc_list = nla_nest_start(skb, MACSEC_ATTR_RXSC_LIST);
2498 	if (!rxsc_list)
2499 		goto nla_put_failure;
2500 
2501 	j = 1;
2502 	for_each_rxsc_rtnl(secy, rx_sc) {
2503 		int k;
2504 		struct nlattr *rxsa_list;
2505 		struct nlattr *rxsc_nest = nla_nest_start(skb, j++);
2506 
2507 		if (!rxsc_nest) {
2508 			nla_nest_cancel(skb, rxsc_list);
2509 			goto nla_put_failure;
2510 		}
2511 
2512 		if (nla_put_u8(skb, MACSEC_RXSC_ATTR_ACTIVE, rx_sc->active) ||
2513 		    nla_put_sci(skb, MACSEC_RXSC_ATTR_SCI, rx_sc->sci,
2514 				MACSEC_RXSC_ATTR_PAD)) {
2515 			nla_nest_cancel(skb, rxsc_nest);
2516 			nla_nest_cancel(skb, rxsc_list);
2517 			goto nla_put_failure;
2518 		}
2519 
2520 		attr = nla_nest_start(skb, MACSEC_RXSC_ATTR_STATS);
2521 		if (!attr) {
2522 			nla_nest_cancel(skb, rxsc_nest);
2523 			nla_nest_cancel(skb, rxsc_list);
2524 			goto nla_put_failure;
2525 		}
2526 		if (copy_rx_sc_stats(skb, rx_sc->stats)) {
2527 			nla_nest_cancel(skb, attr);
2528 			nla_nest_cancel(skb, rxsc_nest);
2529 			nla_nest_cancel(skb, rxsc_list);
2530 			goto nla_put_failure;
2531 		}
2532 		nla_nest_end(skb, attr);
2533 
2534 		rxsa_list = nla_nest_start(skb, MACSEC_RXSC_ATTR_SA_LIST);
2535 		if (!rxsa_list) {
2536 			nla_nest_cancel(skb, rxsc_nest);
2537 			nla_nest_cancel(skb, rxsc_list);
2538 			goto nla_put_failure;
2539 		}
2540 
2541 		for (i = 0, k = 1; i < MACSEC_NUM_AN; i++) {
2542 			struct macsec_rx_sa *rx_sa = rtnl_dereference(rx_sc->sa[i]);
2543 			struct nlattr *rxsa_nest;
2544 
2545 			if (!rx_sa)
2546 				continue;
2547 
2548 			rxsa_nest = nla_nest_start(skb, k++);
2549 			if (!rxsa_nest) {
2550 				nla_nest_cancel(skb, rxsa_list);
2551 				nla_nest_cancel(skb, rxsc_nest);
2552 				nla_nest_cancel(skb, rxsc_list);
2553 				goto nla_put_failure;
2554 			}
2555 
2556 			attr = nla_nest_start(skb, MACSEC_SA_ATTR_STATS);
2557 			if (!attr) {
2558 				nla_nest_cancel(skb, rxsa_list);
2559 				nla_nest_cancel(skb, rxsc_nest);
2560 				nla_nest_cancel(skb, rxsc_list);
2561 				goto nla_put_failure;
2562 			}
2563 			if (copy_rx_sa_stats(skb, rx_sa->stats)) {
2564 				nla_nest_cancel(skb, attr);
2565 				nla_nest_cancel(skb, rxsa_list);
2566 				nla_nest_cancel(skb, rxsc_nest);
2567 				nla_nest_cancel(skb, rxsc_list);
2568 				goto nla_put_failure;
2569 			}
2570 			nla_nest_end(skb, attr);
2571 
2572 			if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
2573 			    nla_put_u32(skb, MACSEC_SA_ATTR_PN, rx_sa->next_pn) ||
2574 			    nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, rx_sa->key.id) ||
2575 			    nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, rx_sa->active)) {
2576 				nla_nest_cancel(skb, rxsa_nest);
2577 				nla_nest_cancel(skb, rxsc_nest);
2578 				nla_nest_cancel(skb, rxsc_list);
2579 				goto nla_put_failure;
2580 			}
2581 			nla_nest_end(skb, rxsa_nest);
2582 		}
2583 
2584 		nla_nest_end(skb, rxsa_list);
2585 		nla_nest_end(skb, rxsc_nest);
2586 	}
2587 
2588 	nla_nest_end(skb, rxsc_list);
2589 
2590 	genlmsg_end(skb, hdr);
2591 
2592 	return 0;
2593 
2594 nla_put_failure:
2595 	genlmsg_cancel(skb, hdr);
2596 	return -EMSGSIZE;
2597 }
2598 
2599 static int macsec_generation = 1; /* protected by RTNL */
2600 
2601 static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb)
2602 {
2603 	struct net *net = sock_net(skb->sk);
2604 	struct net_device *dev;
2605 	int dev_idx, d;
2606 
2607 	dev_idx = cb->args[0];
2608 
2609 	d = 0;
2610 	rtnl_lock();
2611 
2612 	cb->seq = macsec_generation;
2613 
2614 	for_each_netdev(net, dev) {
2615 		struct macsec_secy *secy;
2616 
2617 		if (d < dev_idx)
2618 			goto next;
2619 
2620 		if (!netif_is_macsec(dev))
2621 			goto next;
2622 
2623 		secy = &macsec_priv(dev)->secy;
2624 		if (dump_secy(secy, dev, skb, cb) < 0)
2625 			goto done;
2626 next:
2627 		d++;
2628 	}
2629 
2630 done:
2631 	rtnl_unlock();
2632 	cb->args[0] = d;
2633 	return skb->len;
2634 }
2635 
2636 static const struct genl_ops macsec_genl_ops[] = {
2637 	{
2638 		.cmd = MACSEC_CMD_GET_TXSC,
2639 		.dumpit = macsec_dump_txsc,
2640 		.policy = macsec_genl_policy,
2641 	},
2642 	{
2643 		.cmd = MACSEC_CMD_ADD_RXSC,
2644 		.doit = macsec_add_rxsc,
2645 		.policy = macsec_genl_policy,
2646 		.flags = GENL_ADMIN_PERM,
2647 	},
2648 	{
2649 		.cmd = MACSEC_CMD_DEL_RXSC,
2650 		.doit = macsec_del_rxsc,
2651 		.policy = macsec_genl_policy,
2652 		.flags = GENL_ADMIN_PERM,
2653 	},
2654 	{
2655 		.cmd = MACSEC_CMD_UPD_RXSC,
2656 		.doit = macsec_upd_rxsc,
2657 		.policy = macsec_genl_policy,
2658 		.flags = GENL_ADMIN_PERM,
2659 	},
2660 	{
2661 		.cmd = MACSEC_CMD_ADD_TXSA,
2662 		.doit = macsec_add_txsa,
2663 		.policy = macsec_genl_policy,
2664 		.flags = GENL_ADMIN_PERM,
2665 	},
2666 	{
2667 		.cmd = MACSEC_CMD_DEL_TXSA,
2668 		.doit = macsec_del_txsa,
2669 		.policy = macsec_genl_policy,
2670 		.flags = GENL_ADMIN_PERM,
2671 	},
2672 	{
2673 		.cmd = MACSEC_CMD_UPD_TXSA,
2674 		.doit = macsec_upd_txsa,
2675 		.policy = macsec_genl_policy,
2676 		.flags = GENL_ADMIN_PERM,
2677 	},
2678 	{
2679 		.cmd = MACSEC_CMD_ADD_RXSA,
2680 		.doit = macsec_add_rxsa,
2681 		.policy = macsec_genl_policy,
2682 		.flags = GENL_ADMIN_PERM,
2683 	},
2684 	{
2685 		.cmd = MACSEC_CMD_DEL_RXSA,
2686 		.doit = macsec_del_rxsa,
2687 		.policy = macsec_genl_policy,
2688 		.flags = GENL_ADMIN_PERM,
2689 	},
2690 	{
2691 		.cmd = MACSEC_CMD_UPD_RXSA,
2692 		.doit = macsec_upd_rxsa,
2693 		.policy = macsec_genl_policy,
2694 		.flags = GENL_ADMIN_PERM,
2695 	},
2696 };
2697 
2698 static struct genl_family macsec_fam __ro_after_init = {
2699 	.name		= MACSEC_GENL_NAME,
2700 	.hdrsize	= 0,
2701 	.version	= MACSEC_GENL_VERSION,
2702 	.maxattr	= MACSEC_ATTR_MAX,
2703 	.netnsok	= true,
2704 	.module		= THIS_MODULE,
2705 	.ops		= macsec_genl_ops,
2706 	.n_ops		= ARRAY_SIZE(macsec_genl_ops),
2707 };
2708 
2709 static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
2710 				     struct net_device *dev)
2711 {
2712 	struct macsec_dev *macsec = netdev_priv(dev);
2713 	struct macsec_secy *secy = &macsec->secy;
2714 	struct pcpu_secy_stats *secy_stats;
2715 	int ret, len;
2716 
2717 	/* 10.5 */
2718 	if (!secy->protect_frames) {
2719 		secy_stats = this_cpu_ptr(macsec->stats);
2720 		u64_stats_update_begin(&secy_stats->syncp);
2721 		secy_stats->stats.OutPktsUntagged++;
2722 		u64_stats_update_end(&secy_stats->syncp);
2723 		skb->dev = macsec->real_dev;
2724 		len = skb->len;
2725 		ret = dev_queue_xmit(skb);
2726 		count_tx(dev, ret, len);
2727 		return ret;
2728 	}
2729 
2730 	if (!secy->operational) {
2731 		kfree_skb(skb);
2732 		dev->stats.tx_dropped++;
2733 		return NETDEV_TX_OK;
2734 	}
2735 
2736 	skb = macsec_encrypt(skb, dev);
2737 	if (IS_ERR(skb)) {
2738 		if (PTR_ERR(skb) != -EINPROGRESS)
2739 			dev->stats.tx_dropped++;
2740 		return NETDEV_TX_OK;
2741 	}
2742 
2743 	macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa);
2744 
2745 	macsec_encrypt_finish(skb, dev);
2746 	len = skb->len;
2747 	ret = dev_queue_xmit(skb);
2748 	count_tx(dev, ret, len);
2749 	return ret;
2750 }
2751 
2752 #define MACSEC_FEATURES \
2753 	(NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST)
2754 static struct lock_class_key macsec_netdev_addr_lock_key;
2755 
2756 static int macsec_dev_init(struct net_device *dev)
2757 {
2758 	struct macsec_dev *macsec = macsec_priv(dev);
2759 	struct net_device *real_dev = macsec->real_dev;
2760 	int err;
2761 
2762 	dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
2763 	if (!dev->tstats)
2764 		return -ENOMEM;
2765 
2766 	err = gro_cells_init(&macsec->gro_cells, dev);
2767 	if (err) {
2768 		free_percpu(dev->tstats);
2769 		return err;
2770 	}
2771 
2772 	dev->features = real_dev->features & MACSEC_FEATURES;
2773 	dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE;
2774 
2775 	dev->needed_headroom = real_dev->needed_headroom +
2776 			       MACSEC_NEEDED_HEADROOM;
2777 	dev->needed_tailroom = real_dev->needed_tailroom +
2778 			       MACSEC_NEEDED_TAILROOM;
2779 
2780 	if (is_zero_ether_addr(dev->dev_addr))
2781 		eth_hw_addr_inherit(dev, real_dev);
2782 	if (is_zero_ether_addr(dev->broadcast))
2783 		memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len);
2784 
2785 	return 0;
2786 }
2787 
2788 static void macsec_dev_uninit(struct net_device *dev)
2789 {
2790 	struct macsec_dev *macsec = macsec_priv(dev);
2791 
2792 	gro_cells_destroy(&macsec->gro_cells);
2793 	free_percpu(dev->tstats);
2794 }
2795 
2796 static netdev_features_t macsec_fix_features(struct net_device *dev,
2797 					     netdev_features_t features)
2798 {
2799 	struct macsec_dev *macsec = macsec_priv(dev);
2800 	struct net_device *real_dev = macsec->real_dev;
2801 
2802 	features &= (real_dev->features & MACSEC_FEATURES) |
2803 		    NETIF_F_GSO_SOFTWARE | NETIF_F_SOFT_FEATURES;
2804 	features |= NETIF_F_LLTX;
2805 
2806 	return features;
2807 }
2808 
2809 static int macsec_dev_open(struct net_device *dev)
2810 {
2811 	struct macsec_dev *macsec = macsec_priv(dev);
2812 	struct net_device *real_dev = macsec->real_dev;
2813 	int err;
2814 
2815 	err = dev_uc_add(real_dev, dev->dev_addr);
2816 	if (err < 0)
2817 		return err;
2818 
2819 	if (dev->flags & IFF_ALLMULTI) {
2820 		err = dev_set_allmulti(real_dev, 1);
2821 		if (err < 0)
2822 			goto del_unicast;
2823 	}
2824 
2825 	if (dev->flags & IFF_PROMISC) {
2826 		err = dev_set_promiscuity(real_dev, 1);
2827 		if (err < 0)
2828 			goto clear_allmulti;
2829 	}
2830 
2831 	if (netif_carrier_ok(real_dev))
2832 		netif_carrier_on(dev);
2833 
2834 	return 0;
2835 clear_allmulti:
2836 	if (dev->flags & IFF_ALLMULTI)
2837 		dev_set_allmulti(real_dev, -1);
2838 del_unicast:
2839 	dev_uc_del(real_dev, dev->dev_addr);
2840 	netif_carrier_off(dev);
2841 	return err;
2842 }
2843 
2844 static int macsec_dev_stop(struct net_device *dev)
2845 {
2846 	struct macsec_dev *macsec = macsec_priv(dev);
2847 	struct net_device *real_dev = macsec->real_dev;
2848 
2849 	netif_carrier_off(dev);
2850 
2851 	dev_mc_unsync(real_dev, dev);
2852 	dev_uc_unsync(real_dev, dev);
2853 
2854 	if (dev->flags & IFF_ALLMULTI)
2855 		dev_set_allmulti(real_dev, -1);
2856 
2857 	if (dev->flags & IFF_PROMISC)
2858 		dev_set_promiscuity(real_dev, -1);
2859 
2860 	dev_uc_del(real_dev, dev->dev_addr);
2861 
2862 	return 0;
2863 }
2864 
2865 static void macsec_dev_change_rx_flags(struct net_device *dev, int change)
2866 {
2867 	struct net_device *real_dev = macsec_priv(dev)->real_dev;
2868 
2869 	if (!(dev->flags & IFF_UP))
2870 		return;
2871 
2872 	if (change & IFF_ALLMULTI)
2873 		dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1);
2874 
2875 	if (change & IFF_PROMISC)
2876 		dev_set_promiscuity(real_dev,
2877 				    dev->flags & IFF_PROMISC ? 1 : -1);
2878 }
2879 
2880 static void macsec_dev_set_rx_mode(struct net_device *dev)
2881 {
2882 	struct net_device *real_dev = macsec_priv(dev)->real_dev;
2883 
2884 	dev_mc_sync(real_dev, dev);
2885 	dev_uc_sync(real_dev, dev);
2886 }
2887 
2888 static int macsec_set_mac_address(struct net_device *dev, void *p)
2889 {
2890 	struct macsec_dev *macsec = macsec_priv(dev);
2891 	struct net_device *real_dev = macsec->real_dev;
2892 	struct sockaddr *addr = p;
2893 	int err;
2894 
2895 	if (!is_valid_ether_addr(addr->sa_data))
2896 		return -EADDRNOTAVAIL;
2897 
2898 	if (!(dev->flags & IFF_UP))
2899 		goto out;
2900 
2901 	err = dev_uc_add(real_dev, addr->sa_data);
2902 	if (err < 0)
2903 		return err;
2904 
2905 	dev_uc_del(real_dev, dev->dev_addr);
2906 
2907 out:
2908 	ether_addr_copy(dev->dev_addr, addr->sa_data);
2909 	return 0;
2910 }
2911 
2912 static int macsec_change_mtu(struct net_device *dev, int new_mtu)
2913 {
2914 	struct macsec_dev *macsec = macsec_priv(dev);
2915 	unsigned int extra = macsec->secy.icv_len + macsec_extra_len(true);
2916 
2917 	if (macsec->real_dev->mtu - extra < new_mtu)
2918 		return -ERANGE;
2919 
2920 	dev->mtu = new_mtu;
2921 
2922 	return 0;
2923 }
2924 
2925 static void macsec_get_stats64(struct net_device *dev,
2926 			       struct rtnl_link_stats64 *s)
2927 {
2928 	int cpu;
2929 
2930 	if (!dev->tstats)
2931 		return;
2932 
2933 	for_each_possible_cpu(cpu) {
2934 		struct pcpu_sw_netstats *stats;
2935 		struct pcpu_sw_netstats tmp;
2936 		int start;
2937 
2938 		stats = per_cpu_ptr(dev->tstats, cpu);
2939 		do {
2940 			start = u64_stats_fetch_begin_irq(&stats->syncp);
2941 			tmp.rx_packets = stats->rx_packets;
2942 			tmp.rx_bytes   = stats->rx_bytes;
2943 			tmp.tx_packets = stats->tx_packets;
2944 			tmp.tx_bytes   = stats->tx_bytes;
2945 		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
2946 
2947 		s->rx_packets += tmp.rx_packets;
2948 		s->rx_bytes   += tmp.rx_bytes;
2949 		s->tx_packets += tmp.tx_packets;
2950 		s->tx_bytes   += tmp.tx_bytes;
2951 	}
2952 
2953 	s->rx_dropped = dev->stats.rx_dropped;
2954 	s->tx_dropped = dev->stats.tx_dropped;
2955 }
2956 
2957 static int macsec_get_iflink(const struct net_device *dev)
2958 {
2959 	return macsec_priv(dev)->real_dev->ifindex;
2960 }
2961 
2962 static int macsec_get_nest_level(struct net_device *dev)
2963 {
2964 	return macsec_priv(dev)->nest_level;
2965 }
2966 
2967 static const struct net_device_ops macsec_netdev_ops = {
2968 	.ndo_init		= macsec_dev_init,
2969 	.ndo_uninit		= macsec_dev_uninit,
2970 	.ndo_open		= macsec_dev_open,
2971 	.ndo_stop		= macsec_dev_stop,
2972 	.ndo_fix_features	= macsec_fix_features,
2973 	.ndo_change_mtu		= macsec_change_mtu,
2974 	.ndo_set_rx_mode	= macsec_dev_set_rx_mode,
2975 	.ndo_change_rx_flags	= macsec_dev_change_rx_flags,
2976 	.ndo_set_mac_address	= macsec_set_mac_address,
2977 	.ndo_start_xmit		= macsec_start_xmit,
2978 	.ndo_get_stats64	= macsec_get_stats64,
2979 	.ndo_get_iflink		= macsec_get_iflink,
2980 	.ndo_get_lock_subclass  = macsec_get_nest_level,
2981 };
2982 
2983 static const struct device_type macsec_type = {
2984 	.name = "macsec",
2985 };
2986 
2987 static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = {
2988 	[IFLA_MACSEC_SCI] = { .type = NLA_U64 },
2989 	[IFLA_MACSEC_ICV_LEN] = { .type = NLA_U8 },
2990 	[IFLA_MACSEC_CIPHER_SUITE] = { .type = NLA_U64 },
2991 	[IFLA_MACSEC_WINDOW] = { .type = NLA_U32 },
2992 	[IFLA_MACSEC_ENCODING_SA] = { .type = NLA_U8 },
2993 	[IFLA_MACSEC_ENCRYPT] = { .type = NLA_U8 },
2994 	[IFLA_MACSEC_PROTECT] = { .type = NLA_U8 },
2995 	[IFLA_MACSEC_INC_SCI] = { .type = NLA_U8 },
2996 	[IFLA_MACSEC_ES] = { .type = NLA_U8 },
2997 	[IFLA_MACSEC_SCB] = { .type = NLA_U8 },
2998 	[IFLA_MACSEC_REPLAY_PROTECT] = { .type = NLA_U8 },
2999 	[IFLA_MACSEC_VALIDATION] = { .type = NLA_U8 },
3000 };
3001 
3002 static void macsec_free_netdev(struct net_device *dev)
3003 {
3004 	struct macsec_dev *macsec = macsec_priv(dev);
3005 	struct net_device *real_dev = macsec->real_dev;
3006 
3007 	free_percpu(macsec->stats);
3008 	free_percpu(macsec->secy.tx_sc.stats);
3009 
3010 	dev_put(real_dev);
3011 }
3012 
3013 static void macsec_setup(struct net_device *dev)
3014 {
3015 	ether_setup(dev);
3016 	dev->min_mtu = 0;
3017 	dev->max_mtu = ETH_MAX_MTU;
3018 	dev->priv_flags |= IFF_NO_QUEUE;
3019 	dev->netdev_ops = &macsec_netdev_ops;
3020 	dev->needs_free_netdev = true;
3021 	dev->priv_destructor = macsec_free_netdev;
3022 	SET_NETDEV_DEVTYPE(dev, &macsec_type);
3023 
3024 	eth_zero_addr(dev->broadcast);
3025 }
3026 
3027 static int macsec_changelink_common(struct net_device *dev,
3028 				    struct nlattr *data[])
3029 {
3030 	struct macsec_secy *secy;
3031 	struct macsec_tx_sc *tx_sc;
3032 
3033 	secy = &macsec_priv(dev)->secy;
3034 	tx_sc = &secy->tx_sc;
3035 
3036 	if (data[IFLA_MACSEC_ENCODING_SA]) {
3037 		struct macsec_tx_sa *tx_sa;
3038 
3039 		tx_sc->encoding_sa = nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]);
3040 		tx_sa = rtnl_dereference(tx_sc->sa[tx_sc->encoding_sa]);
3041 
3042 		secy->operational = tx_sa && tx_sa->active;
3043 	}
3044 
3045 	if (data[IFLA_MACSEC_WINDOW])
3046 		secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]);
3047 
3048 	if (data[IFLA_MACSEC_ENCRYPT])
3049 		tx_sc->encrypt = !!nla_get_u8(data[IFLA_MACSEC_ENCRYPT]);
3050 
3051 	if (data[IFLA_MACSEC_PROTECT])
3052 		secy->protect_frames = !!nla_get_u8(data[IFLA_MACSEC_PROTECT]);
3053 
3054 	if (data[IFLA_MACSEC_INC_SCI])
3055 		tx_sc->send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]);
3056 
3057 	if (data[IFLA_MACSEC_ES])
3058 		tx_sc->end_station = !!nla_get_u8(data[IFLA_MACSEC_ES]);
3059 
3060 	if (data[IFLA_MACSEC_SCB])
3061 		tx_sc->scb = !!nla_get_u8(data[IFLA_MACSEC_SCB]);
3062 
3063 	if (data[IFLA_MACSEC_REPLAY_PROTECT])
3064 		secy->replay_protect = !!nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT]);
3065 
3066 	if (data[IFLA_MACSEC_VALIDATION])
3067 		secy->validate_frames = nla_get_u8(data[IFLA_MACSEC_VALIDATION]);
3068 
3069 	if (data[IFLA_MACSEC_CIPHER_SUITE]) {
3070 		switch (nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE])) {
3071 		case MACSEC_CIPHER_ID_GCM_AES_128:
3072 		case MACSEC_DEFAULT_CIPHER_ID:
3073 			secy->key_len = MACSEC_GCM_AES_128_SAK_LEN;
3074 			break;
3075 		case MACSEC_CIPHER_ID_GCM_AES_256:
3076 			secy->key_len = MACSEC_GCM_AES_256_SAK_LEN;
3077 			break;
3078 		default:
3079 			return -EINVAL;
3080 		}
3081 	}
3082 
3083 	return 0;
3084 }
3085 
3086 static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
3087 			     struct nlattr *data[],
3088 			     struct netlink_ext_ack *extack)
3089 {
3090 	if (!data)
3091 		return 0;
3092 
3093 	if (data[IFLA_MACSEC_CIPHER_SUITE] ||
3094 	    data[IFLA_MACSEC_ICV_LEN] ||
3095 	    data[IFLA_MACSEC_SCI] ||
3096 	    data[IFLA_MACSEC_PORT])
3097 		return -EINVAL;
3098 
3099 	return macsec_changelink_common(dev, data);
3100 }
3101 
3102 static void macsec_del_dev(struct macsec_dev *macsec)
3103 {
3104 	int i;
3105 
3106 	while (macsec->secy.rx_sc) {
3107 		struct macsec_rx_sc *rx_sc = rtnl_dereference(macsec->secy.rx_sc);
3108 
3109 		rcu_assign_pointer(macsec->secy.rx_sc, rx_sc->next);
3110 		free_rx_sc(rx_sc);
3111 	}
3112 
3113 	for (i = 0; i < MACSEC_NUM_AN; i++) {
3114 		struct macsec_tx_sa *sa = rtnl_dereference(macsec->secy.tx_sc.sa[i]);
3115 
3116 		if (sa) {
3117 			RCU_INIT_POINTER(macsec->secy.tx_sc.sa[i], NULL);
3118 			clear_tx_sa(sa);
3119 		}
3120 	}
3121 }
3122 
3123 static void macsec_common_dellink(struct net_device *dev, struct list_head *head)
3124 {
3125 	struct macsec_dev *macsec = macsec_priv(dev);
3126 	struct net_device *real_dev = macsec->real_dev;
3127 
3128 	unregister_netdevice_queue(dev, head);
3129 	list_del_rcu(&macsec->secys);
3130 	macsec_del_dev(macsec);
3131 	netdev_upper_dev_unlink(real_dev, dev);
3132 
3133 	macsec_generation++;
3134 }
3135 
3136 static void macsec_dellink(struct net_device *dev, struct list_head *head)
3137 {
3138 	struct macsec_dev *macsec = macsec_priv(dev);
3139 	struct net_device *real_dev = macsec->real_dev;
3140 	struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
3141 
3142 	macsec_common_dellink(dev, head);
3143 
3144 	if (list_empty(&rxd->secys)) {
3145 		netdev_rx_handler_unregister(real_dev);
3146 		kfree(rxd);
3147 	}
3148 }
3149 
3150 static int register_macsec_dev(struct net_device *real_dev,
3151 			       struct net_device *dev)
3152 {
3153 	struct macsec_dev *macsec = macsec_priv(dev);
3154 	struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
3155 
3156 	if (!rxd) {
3157 		int err;
3158 
3159 		rxd = kmalloc(sizeof(*rxd), GFP_KERNEL);
3160 		if (!rxd)
3161 			return -ENOMEM;
3162 
3163 		INIT_LIST_HEAD(&rxd->secys);
3164 
3165 		err = netdev_rx_handler_register(real_dev, macsec_handle_frame,
3166 						 rxd);
3167 		if (err < 0) {
3168 			kfree(rxd);
3169 			return err;
3170 		}
3171 	}
3172 
3173 	list_add_tail_rcu(&macsec->secys, &rxd->secys);
3174 	return 0;
3175 }
3176 
3177 static bool sci_exists(struct net_device *dev, sci_t sci)
3178 {
3179 	struct macsec_rxh_data *rxd = macsec_data_rtnl(dev);
3180 	struct macsec_dev *macsec;
3181 
3182 	list_for_each_entry(macsec, &rxd->secys, secys) {
3183 		if (macsec->secy.sci == sci)
3184 			return true;
3185 	}
3186 
3187 	return false;
3188 }
3189 
3190 static sci_t dev_to_sci(struct net_device *dev, __be16 port)
3191 {
3192 	return make_sci(dev->dev_addr, port);
3193 }
3194 
3195 static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len)
3196 {
3197 	struct macsec_dev *macsec = macsec_priv(dev);
3198 	struct macsec_secy *secy = &macsec->secy;
3199 
3200 	macsec->stats = netdev_alloc_pcpu_stats(struct pcpu_secy_stats);
3201 	if (!macsec->stats)
3202 		return -ENOMEM;
3203 
3204 	secy->tx_sc.stats = netdev_alloc_pcpu_stats(struct pcpu_tx_sc_stats);
3205 	if (!secy->tx_sc.stats) {
3206 		free_percpu(macsec->stats);
3207 		return -ENOMEM;
3208 	}
3209 
3210 	if (sci == MACSEC_UNDEF_SCI)
3211 		sci = dev_to_sci(dev, MACSEC_PORT_ES);
3212 
3213 	secy->netdev = dev;
3214 	secy->operational = true;
3215 	secy->key_len = DEFAULT_SAK_LEN;
3216 	secy->icv_len = icv_len;
3217 	secy->validate_frames = MACSEC_VALIDATE_DEFAULT;
3218 	secy->protect_frames = true;
3219 	secy->replay_protect = false;
3220 
3221 	secy->sci = sci;
3222 	secy->tx_sc.active = true;
3223 	secy->tx_sc.encoding_sa = DEFAULT_ENCODING_SA;
3224 	secy->tx_sc.encrypt = DEFAULT_ENCRYPT;
3225 	secy->tx_sc.send_sci = DEFAULT_SEND_SCI;
3226 	secy->tx_sc.end_station = false;
3227 	secy->tx_sc.scb = false;
3228 
3229 	return 0;
3230 }
3231 
3232 static int macsec_newlink(struct net *net, struct net_device *dev,
3233 			  struct nlattr *tb[], struct nlattr *data[],
3234 			  struct netlink_ext_ack *extack)
3235 {
3236 	struct macsec_dev *macsec = macsec_priv(dev);
3237 	struct net_device *real_dev;
3238 	int err;
3239 	sci_t sci;
3240 	u8 icv_len = DEFAULT_ICV_LEN;
3241 	rx_handler_func_t *rx_handler;
3242 
3243 	if (!tb[IFLA_LINK])
3244 		return -EINVAL;
3245 	real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK]));
3246 	if (!real_dev)
3247 		return -ENODEV;
3248 
3249 	dev->priv_flags |= IFF_MACSEC;
3250 
3251 	macsec->real_dev = real_dev;
3252 
3253 	if (data && data[IFLA_MACSEC_ICV_LEN])
3254 		icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
3255 	dev->mtu = real_dev->mtu - icv_len - macsec_extra_len(true);
3256 
3257 	rx_handler = rtnl_dereference(real_dev->rx_handler);
3258 	if (rx_handler && rx_handler != macsec_handle_frame)
3259 		return -EBUSY;
3260 
3261 	err = register_netdevice(dev);
3262 	if (err < 0)
3263 		return err;
3264 
3265 	dev_hold(real_dev);
3266 
3267 	macsec->nest_level = dev_get_nest_level(real_dev) + 1;
3268 	netdev_lockdep_set_classes(dev);
3269 	lockdep_set_class_and_subclass(&dev->addr_list_lock,
3270 				       &macsec_netdev_addr_lock_key,
3271 				       macsec_get_nest_level(dev));
3272 
3273 	err = netdev_upper_dev_link(real_dev, dev, extack);
3274 	if (err < 0)
3275 		goto unregister;
3276 
3277 	/* need to be already registered so that ->init has run and
3278 	 * the MAC addr is set
3279 	 */
3280 	if (data && data[IFLA_MACSEC_SCI])
3281 		sci = nla_get_sci(data[IFLA_MACSEC_SCI]);
3282 	else if (data && data[IFLA_MACSEC_PORT])
3283 		sci = dev_to_sci(dev, nla_get_be16(data[IFLA_MACSEC_PORT]));
3284 	else
3285 		sci = dev_to_sci(dev, MACSEC_PORT_ES);
3286 
3287 	if (rx_handler && sci_exists(real_dev, sci)) {
3288 		err = -EBUSY;
3289 		goto unlink;
3290 	}
3291 
3292 	err = macsec_add_dev(dev, sci, icv_len);
3293 	if (err)
3294 		goto unlink;
3295 
3296 	if (data) {
3297 		err = macsec_changelink_common(dev, data);
3298 		if (err)
3299 			goto del_dev;
3300 	}
3301 
3302 	err = register_macsec_dev(real_dev, dev);
3303 	if (err < 0)
3304 		goto del_dev;
3305 
3306 	netif_stacked_transfer_operstate(real_dev, dev);
3307 	linkwatch_fire_event(dev);
3308 
3309 	macsec_generation++;
3310 
3311 	return 0;
3312 
3313 del_dev:
3314 	macsec_del_dev(macsec);
3315 unlink:
3316 	netdev_upper_dev_unlink(real_dev, dev);
3317 unregister:
3318 	unregister_netdevice(dev);
3319 	return err;
3320 }
3321 
3322 static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[],
3323 				struct netlink_ext_ack *extack)
3324 {
3325 	u64 csid = MACSEC_DEFAULT_CIPHER_ID;
3326 	u8 icv_len = DEFAULT_ICV_LEN;
3327 	int flag;
3328 	bool es, scb, sci;
3329 
3330 	if (!data)
3331 		return 0;
3332 
3333 	if (data[IFLA_MACSEC_CIPHER_SUITE])
3334 		csid = nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE]);
3335 
3336 	if (data[IFLA_MACSEC_ICV_LEN]) {
3337 		icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
3338 		if (icv_len != DEFAULT_ICV_LEN) {
3339 			char dummy_key[DEFAULT_SAK_LEN] = { 0 };
3340 			struct crypto_aead *dummy_tfm;
3341 
3342 			dummy_tfm = macsec_alloc_tfm(dummy_key,
3343 						     DEFAULT_SAK_LEN,
3344 						     icv_len);
3345 			if (IS_ERR(dummy_tfm))
3346 				return PTR_ERR(dummy_tfm);
3347 			crypto_free_aead(dummy_tfm);
3348 		}
3349 	}
3350 
3351 	switch (csid) {
3352 	case MACSEC_CIPHER_ID_GCM_AES_128:
3353 	case MACSEC_CIPHER_ID_GCM_AES_256:
3354 	case MACSEC_DEFAULT_CIPHER_ID:
3355 		if (icv_len < MACSEC_MIN_ICV_LEN ||
3356 		    icv_len > MACSEC_STD_ICV_LEN)
3357 			return -EINVAL;
3358 		break;
3359 	default:
3360 		return -EINVAL;
3361 	}
3362 
3363 	if (data[IFLA_MACSEC_ENCODING_SA]) {
3364 		if (nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]) >= MACSEC_NUM_AN)
3365 			return -EINVAL;
3366 	}
3367 
3368 	for (flag = IFLA_MACSEC_ENCODING_SA + 1;
3369 	     flag < IFLA_MACSEC_VALIDATION;
3370 	     flag++) {
3371 		if (data[flag]) {
3372 			if (nla_get_u8(data[flag]) > 1)
3373 				return -EINVAL;
3374 		}
3375 	}
3376 
3377 	es  = data[IFLA_MACSEC_ES] ? nla_get_u8(data[IFLA_MACSEC_ES]) : false;
3378 	sci = data[IFLA_MACSEC_INC_SCI] ? nla_get_u8(data[IFLA_MACSEC_INC_SCI]) : false;
3379 	scb = data[IFLA_MACSEC_SCB] ? nla_get_u8(data[IFLA_MACSEC_SCB]) : false;
3380 
3381 	if ((sci && (scb || es)) || (scb && es))
3382 		return -EINVAL;
3383 
3384 	if (data[IFLA_MACSEC_VALIDATION] &&
3385 	    nla_get_u8(data[IFLA_MACSEC_VALIDATION]) > MACSEC_VALIDATE_MAX)
3386 		return -EINVAL;
3387 
3388 	if ((data[IFLA_MACSEC_REPLAY_PROTECT] &&
3389 	     nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT])) &&
3390 	    !data[IFLA_MACSEC_WINDOW])
3391 		return -EINVAL;
3392 
3393 	return 0;
3394 }
3395 
3396 static struct net *macsec_get_link_net(const struct net_device *dev)
3397 {
3398 	return dev_net(macsec_priv(dev)->real_dev);
3399 }
3400 
3401 static size_t macsec_get_size(const struct net_device *dev)
3402 {
3403 	return  nla_total_size_64bit(8) + /* IFLA_MACSEC_SCI */
3404 		nla_total_size(1) + /* IFLA_MACSEC_ICV_LEN */
3405 		nla_total_size_64bit(8) + /* IFLA_MACSEC_CIPHER_SUITE */
3406 		nla_total_size(4) + /* IFLA_MACSEC_WINDOW */
3407 		nla_total_size(1) + /* IFLA_MACSEC_ENCODING_SA */
3408 		nla_total_size(1) + /* IFLA_MACSEC_ENCRYPT */
3409 		nla_total_size(1) + /* IFLA_MACSEC_PROTECT */
3410 		nla_total_size(1) + /* IFLA_MACSEC_INC_SCI */
3411 		nla_total_size(1) + /* IFLA_MACSEC_ES */
3412 		nla_total_size(1) + /* IFLA_MACSEC_SCB */
3413 		nla_total_size(1) + /* IFLA_MACSEC_REPLAY_PROTECT */
3414 		nla_total_size(1) + /* IFLA_MACSEC_VALIDATION */
3415 		0;
3416 }
3417 
3418 static int macsec_fill_info(struct sk_buff *skb,
3419 			    const struct net_device *dev)
3420 {
3421 	struct macsec_secy *secy = &macsec_priv(dev)->secy;
3422 	struct macsec_tx_sc *tx_sc = &secy->tx_sc;
3423 	u64 csid;
3424 
3425 	switch (secy->key_len) {
3426 	case MACSEC_GCM_AES_128_SAK_LEN:
3427 		csid = MACSEC_DEFAULT_CIPHER_ID;
3428 		break;
3429 	case MACSEC_GCM_AES_256_SAK_LEN:
3430 		csid = MACSEC_CIPHER_ID_GCM_AES_256;
3431 		break;
3432 	default:
3433 		goto nla_put_failure;
3434 	}
3435 
3436 	if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci,
3437 			IFLA_MACSEC_PAD) ||
3438 	    nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) ||
3439 	    nla_put_u64_64bit(skb, IFLA_MACSEC_CIPHER_SUITE,
3440 			      csid, IFLA_MACSEC_PAD) ||
3441 	    nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) ||
3442 	    nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) ||
3443 	    nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) ||
3444 	    nla_put_u8(skb, IFLA_MACSEC_INC_SCI, tx_sc->send_sci) ||
3445 	    nla_put_u8(skb, IFLA_MACSEC_ES, tx_sc->end_station) ||
3446 	    nla_put_u8(skb, IFLA_MACSEC_SCB, tx_sc->scb) ||
3447 	    nla_put_u8(skb, IFLA_MACSEC_REPLAY_PROTECT, secy->replay_protect) ||
3448 	    nla_put_u8(skb, IFLA_MACSEC_VALIDATION, secy->validate_frames) ||
3449 	    0)
3450 		goto nla_put_failure;
3451 
3452 	if (secy->replay_protect) {
3453 		if (nla_put_u32(skb, IFLA_MACSEC_WINDOW, secy->replay_window))
3454 			goto nla_put_failure;
3455 	}
3456 
3457 	return 0;
3458 
3459 nla_put_failure:
3460 	return -EMSGSIZE;
3461 }
3462 
3463 static struct rtnl_link_ops macsec_link_ops __read_mostly = {
3464 	.kind		= "macsec",
3465 	.priv_size	= sizeof(struct macsec_dev),
3466 	.maxtype	= IFLA_MACSEC_MAX,
3467 	.policy		= macsec_rtnl_policy,
3468 	.setup		= macsec_setup,
3469 	.validate	= macsec_validate_attr,
3470 	.newlink	= macsec_newlink,
3471 	.changelink	= macsec_changelink,
3472 	.dellink	= macsec_dellink,
3473 	.get_size	= macsec_get_size,
3474 	.fill_info	= macsec_fill_info,
3475 	.get_link_net	= macsec_get_link_net,
3476 };
3477 
3478 static bool is_macsec_master(struct net_device *dev)
3479 {
3480 	return rcu_access_pointer(dev->rx_handler) == macsec_handle_frame;
3481 }
3482 
3483 static int macsec_notify(struct notifier_block *this, unsigned long event,
3484 			 void *ptr)
3485 {
3486 	struct net_device *real_dev = netdev_notifier_info_to_dev(ptr);
3487 	LIST_HEAD(head);
3488 
3489 	if (!is_macsec_master(real_dev))
3490 		return NOTIFY_DONE;
3491 
3492 	switch (event) {
3493 	case NETDEV_DOWN:
3494 	case NETDEV_UP:
3495 	case NETDEV_CHANGE: {
3496 		struct macsec_dev *m, *n;
3497 		struct macsec_rxh_data *rxd;
3498 
3499 		rxd = macsec_data_rtnl(real_dev);
3500 		list_for_each_entry_safe(m, n, &rxd->secys, secys) {
3501 			struct net_device *dev = m->secy.netdev;
3502 
3503 			netif_stacked_transfer_operstate(real_dev, dev);
3504 		}
3505 		break;
3506 	}
3507 	case NETDEV_UNREGISTER: {
3508 		struct macsec_dev *m, *n;
3509 		struct macsec_rxh_data *rxd;
3510 
3511 		rxd = macsec_data_rtnl(real_dev);
3512 		list_for_each_entry_safe(m, n, &rxd->secys, secys) {
3513 			macsec_common_dellink(m->secy.netdev, &head);
3514 		}
3515 
3516 		netdev_rx_handler_unregister(real_dev);
3517 		kfree(rxd);
3518 
3519 		unregister_netdevice_many(&head);
3520 		break;
3521 	}
3522 	case NETDEV_CHANGEMTU: {
3523 		struct macsec_dev *m;
3524 		struct macsec_rxh_data *rxd;
3525 
3526 		rxd = macsec_data_rtnl(real_dev);
3527 		list_for_each_entry(m, &rxd->secys, secys) {
3528 			struct net_device *dev = m->secy.netdev;
3529 			unsigned int mtu = real_dev->mtu - (m->secy.icv_len +
3530 							    macsec_extra_len(true));
3531 
3532 			if (dev->mtu > mtu)
3533 				dev_set_mtu(dev, mtu);
3534 		}
3535 	}
3536 	}
3537 
3538 	return NOTIFY_OK;
3539 }
3540 
3541 static struct notifier_block macsec_notifier = {
3542 	.notifier_call = macsec_notify,
3543 };
3544 
3545 static int __init macsec_init(void)
3546 {
3547 	int err;
3548 
3549 	pr_info("MACsec IEEE 802.1AE\n");
3550 	err = register_netdevice_notifier(&macsec_notifier);
3551 	if (err)
3552 		return err;
3553 
3554 	err = rtnl_link_register(&macsec_link_ops);
3555 	if (err)
3556 		goto notifier;
3557 
3558 	err = genl_register_family(&macsec_fam);
3559 	if (err)
3560 		goto rtnl;
3561 
3562 	return 0;
3563 
3564 rtnl:
3565 	rtnl_link_unregister(&macsec_link_ops);
3566 notifier:
3567 	unregister_netdevice_notifier(&macsec_notifier);
3568 	return err;
3569 }
3570 
3571 static void __exit macsec_exit(void)
3572 {
3573 	genl_unregister_family(&macsec_fam);
3574 	rtnl_link_unregister(&macsec_link_ops);
3575 	unregister_netdevice_notifier(&macsec_notifier);
3576 	rcu_barrier();
3577 }
3578 
3579 module_init(macsec_init);
3580 module_exit(macsec_exit);
3581 
3582 MODULE_ALIAS_RTNL_LINK("macsec");
3583 MODULE_ALIAS_GENL_FAMILY("macsec");
3584 
3585 MODULE_DESCRIPTION("MACsec IEEE 802.1AE");
3586 MODULE_LICENSE("GPL v2");
3587