xref: /openbmc/linux/drivers/net/macsec.c (revision 54a611b6)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * drivers/net/macsec.c - MACsec device
4  *
5  * Copyright (c) 2015 Sabrina Dubroca <sd@queasysnail.net>
6  */
7 
8 #include <linux/types.h>
9 #include <linux/skbuff.h>
10 #include <linux/socket.h>
11 #include <linux/module.h>
12 #include <crypto/aead.h>
13 #include <linux/etherdevice.h>
14 #include <linux/netdevice.h>
15 #include <linux/rtnetlink.h>
16 #include <linux/refcount.h>
17 #include <net/genetlink.h>
18 #include <net/sock.h>
19 #include <net/gro_cells.h>
20 #include <net/macsec.h>
21 #include <linux/phy.h>
22 #include <linux/byteorder/generic.h>
23 #include <linux/if_arp.h>
24 
25 #include <uapi/linux/if_macsec.h>
26 
27 #define MACSEC_SCI_LEN 8
28 
29 /* SecTAG length = macsec_eth_header without the optional SCI */
30 #define MACSEC_TAG_LEN 6
31 
32 struct macsec_eth_header {
33 	struct ethhdr eth;
34 	/* SecTAG */
35 	u8  tci_an;
36 #if defined(__LITTLE_ENDIAN_BITFIELD)
37 	u8  short_length:6,
38 		  unused:2;
39 #elif defined(__BIG_ENDIAN_BITFIELD)
40 	u8        unused:2,
41 	    short_length:6;
42 #else
43 #error	"Please fix <asm/byteorder.h>"
44 #endif
45 	__be32 packet_number;
46 	u8 secure_channel_id[8]; /* optional */
47 } __packed;
48 
49 #define MACSEC_TCI_VERSION 0x80
50 #define MACSEC_TCI_ES      0x40 /* end station */
51 #define MACSEC_TCI_SC      0x20 /* SCI present */
52 #define MACSEC_TCI_SCB     0x10 /* epon */
53 #define MACSEC_TCI_E       0x08 /* encryption */
54 #define MACSEC_TCI_C       0x04 /* changed text */
55 #define MACSEC_AN_MASK     0x03 /* association number */
56 #define MACSEC_TCI_CONFID  (MACSEC_TCI_E | MACSEC_TCI_C)
57 
58 /* minimum secure data length deemed "not short", see IEEE 802.1AE-2006 9.7 */
59 #define MIN_NON_SHORT_LEN 48
60 
61 #define GCM_AES_IV_LEN 12
62 #define DEFAULT_ICV_LEN 16
63 
64 #define for_each_rxsc(secy, sc)				\
65 	for (sc = rcu_dereference_bh(secy->rx_sc);	\
66 	     sc;					\
67 	     sc = rcu_dereference_bh(sc->next))
68 #define for_each_rxsc_rtnl(secy, sc)			\
69 	for (sc = rtnl_dereference(secy->rx_sc);	\
70 	     sc;					\
71 	     sc = rtnl_dereference(sc->next))
72 
73 #define pn_same_half(pn1, pn2) (!(((pn1) >> 31) ^ ((pn2) >> 31)))
74 
75 struct gcm_iv_xpn {
76 	union {
77 		u8 short_secure_channel_id[4];
78 		ssci_t ssci;
79 	};
80 	__be64 pn;
81 } __packed;
82 
83 struct gcm_iv {
84 	union {
85 		u8 secure_channel_id[8];
86 		sci_t sci;
87 	};
88 	__be32 pn;
89 };
90 
91 #define MACSEC_VALIDATE_DEFAULT MACSEC_VALIDATE_STRICT
92 
93 struct pcpu_secy_stats {
94 	struct macsec_dev_stats stats;
95 	struct u64_stats_sync syncp;
96 };
97 
98 /**
99  * struct macsec_dev - private data
100  * @secy: SecY config
101  * @real_dev: pointer to underlying netdevice
102  * @dev_tracker: refcount tracker for @real_dev reference
103  * @stats: MACsec device stats
104  * @secys: linked list of SecY's on the underlying device
105  * @gro_cells: pointer to the Generic Receive Offload cell
106  * @offload: status of offloading on the MACsec device
107  */
108 struct macsec_dev {
109 	struct macsec_secy secy;
110 	struct net_device *real_dev;
111 	netdevice_tracker dev_tracker;
112 	struct pcpu_secy_stats __percpu *stats;
113 	struct list_head secys;
114 	struct gro_cells gro_cells;
115 	enum macsec_offload offload;
116 };
117 
118 /**
119  * struct macsec_rxh_data - rx_handler private argument
120  * @secys: linked list of SecY's on this underlying device
121  */
122 struct macsec_rxh_data {
123 	struct list_head secys;
124 };
125 
126 static struct macsec_dev *macsec_priv(const struct net_device *dev)
127 {
128 	return (struct macsec_dev *)netdev_priv(dev);
129 }
130 
131 static struct macsec_rxh_data *macsec_data_rcu(const struct net_device *dev)
132 {
133 	return rcu_dereference_bh(dev->rx_handler_data);
134 }
135 
136 static struct macsec_rxh_data *macsec_data_rtnl(const struct net_device *dev)
137 {
138 	return rtnl_dereference(dev->rx_handler_data);
139 }
140 
141 struct macsec_cb {
142 	struct aead_request *req;
143 	union {
144 		struct macsec_tx_sa *tx_sa;
145 		struct macsec_rx_sa *rx_sa;
146 	};
147 	u8 assoc_num;
148 	bool valid;
149 	bool has_sci;
150 };
151 
152 static struct macsec_rx_sa *macsec_rxsa_get(struct macsec_rx_sa __rcu *ptr)
153 {
154 	struct macsec_rx_sa *sa = rcu_dereference_bh(ptr);
155 
156 	if (!sa || !sa->active)
157 		return NULL;
158 
159 	if (!refcount_inc_not_zero(&sa->refcnt))
160 		return NULL;
161 
162 	return sa;
163 }
164 
165 static struct macsec_rx_sa *macsec_active_rxsa_get(struct macsec_rx_sc *rx_sc)
166 {
167 	struct macsec_rx_sa *sa = NULL;
168 	int an;
169 
170 	for (an = 0; an < MACSEC_NUM_AN; an++)	{
171 		sa = macsec_rxsa_get(rx_sc->sa[an]);
172 		if (sa)
173 			break;
174 	}
175 	return sa;
176 }
177 
178 static void free_rx_sc_rcu(struct rcu_head *head)
179 {
180 	struct macsec_rx_sc *rx_sc = container_of(head, struct macsec_rx_sc, rcu_head);
181 
182 	free_percpu(rx_sc->stats);
183 	kfree(rx_sc);
184 }
185 
186 static struct macsec_rx_sc *macsec_rxsc_get(struct macsec_rx_sc *sc)
187 {
188 	return refcount_inc_not_zero(&sc->refcnt) ? sc : NULL;
189 }
190 
191 static void macsec_rxsc_put(struct macsec_rx_sc *sc)
192 {
193 	if (refcount_dec_and_test(&sc->refcnt))
194 		call_rcu(&sc->rcu_head, free_rx_sc_rcu);
195 }
196 
197 static void free_rxsa(struct rcu_head *head)
198 {
199 	struct macsec_rx_sa *sa = container_of(head, struct macsec_rx_sa, rcu);
200 
201 	crypto_free_aead(sa->key.tfm);
202 	free_percpu(sa->stats);
203 	kfree(sa);
204 }
205 
206 static void macsec_rxsa_put(struct macsec_rx_sa *sa)
207 {
208 	if (refcount_dec_and_test(&sa->refcnt))
209 		call_rcu(&sa->rcu, free_rxsa);
210 }
211 
212 static struct macsec_tx_sa *macsec_txsa_get(struct macsec_tx_sa __rcu *ptr)
213 {
214 	struct macsec_tx_sa *sa = rcu_dereference_bh(ptr);
215 
216 	if (!sa || !sa->active)
217 		return NULL;
218 
219 	if (!refcount_inc_not_zero(&sa->refcnt))
220 		return NULL;
221 
222 	return sa;
223 }
224 
225 static void free_txsa(struct rcu_head *head)
226 {
227 	struct macsec_tx_sa *sa = container_of(head, struct macsec_tx_sa, rcu);
228 
229 	crypto_free_aead(sa->key.tfm);
230 	free_percpu(sa->stats);
231 	kfree(sa);
232 }
233 
234 static void macsec_txsa_put(struct macsec_tx_sa *sa)
235 {
236 	if (refcount_dec_and_test(&sa->refcnt))
237 		call_rcu(&sa->rcu, free_txsa);
238 }
239 
240 static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb)
241 {
242 	BUILD_BUG_ON(sizeof(struct macsec_cb) > sizeof(skb->cb));
243 	return (struct macsec_cb *)skb->cb;
244 }
245 
246 #define MACSEC_PORT_ES (htons(0x0001))
247 #define MACSEC_PORT_SCB (0x0000)
248 #define MACSEC_UNDEF_SCI ((__force sci_t)0xffffffffffffffffULL)
249 #define MACSEC_UNDEF_SSCI ((__force ssci_t)0xffffffff)
250 
251 #define MACSEC_GCM_AES_128_SAK_LEN 16
252 #define MACSEC_GCM_AES_256_SAK_LEN 32
253 
254 #define DEFAULT_SAK_LEN MACSEC_GCM_AES_128_SAK_LEN
255 #define DEFAULT_XPN false
256 #define DEFAULT_SEND_SCI true
257 #define DEFAULT_ENCRYPT false
258 #define DEFAULT_ENCODING_SA 0
259 #define MACSEC_XPN_MAX_REPLAY_WINDOW (((1 << 30) - 1))
260 
261 static bool send_sci(const struct macsec_secy *secy)
262 {
263 	const struct macsec_tx_sc *tx_sc = &secy->tx_sc;
264 
265 	return tx_sc->send_sci ||
266 		(secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb);
267 }
268 
269 static sci_t make_sci(const u8 *addr, __be16 port)
270 {
271 	sci_t sci;
272 
273 	memcpy(&sci, addr, ETH_ALEN);
274 	memcpy(((char *)&sci) + ETH_ALEN, &port, sizeof(port));
275 
276 	return sci;
277 }
278 
279 static sci_t macsec_frame_sci(struct macsec_eth_header *hdr, bool sci_present)
280 {
281 	sci_t sci;
282 
283 	if (sci_present)
284 		memcpy(&sci, hdr->secure_channel_id,
285 		       sizeof(hdr->secure_channel_id));
286 	else
287 		sci = make_sci(hdr->eth.h_source, MACSEC_PORT_ES);
288 
289 	return sci;
290 }
291 
292 static unsigned int macsec_sectag_len(bool sci_present)
293 {
294 	return MACSEC_TAG_LEN + (sci_present ? MACSEC_SCI_LEN : 0);
295 }
296 
297 static unsigned int macsec_hdr_len(bool sci_present)
298 {
299 	return macsec_sectag_len(sci_present) + ETH_HLEN;
300 }
301 
302 static unsigned int macsec_extra_len(bool sci_present)
303 {
304 	return macsec_sectag_len(sci_present) + sizeof(__be16);
305 }
306 
307 /* Fill SecTAG according to IEEE 802.1AE-2006 10.5.3 */
308 static void macsec_fill_sectag(struct macsec_eth_header *h,
309 			       const struct macsec_secy *secy, u32 pn,
310 			       bool sci_present)
311 {
312 	const struct macsec_tx_sc *tx_sc = &secy->tx_sc;
313 
314 	memset(&h->tci_an, 0, macsec_sectag_len(sci_present));
315 	h->eth.h_proto = htons(ETH_P_MACSEC);
316 
317 	if (sci_present) {
318 		h->tci_an |= MACSEC_TCI_SC;
319 		memcpy(&h->secure_channel_id, &secy->sci,
320 		       sizeof(h->secure_channel_id));
321 	} else {
322 		if (tx_sc->end_station)
323 			h->tci_an |= MACSEC_TCI_ES;
324 		if (tx_sc->scb)
325 			h->tci_an |= MACSEC_TCI_SCB;
326 	}
327 
328 	h->packet_number = htonl(pn);
329 
330 	/* with GCM, C/E clear for !encrypt, both set for encrypt */
331 	if (tx_sc->encrypt)
332 		h->tci_an |= MACSEC_TCI_CONFID;
333 	else if (secy->icv_len != DEFAULT_ICV_LEN)
334 		h->tci_an |= MACSEC_TCI_C;
335 
336 	h->tci_an |= tx_sc->encoding_sa;
337 }
338 
339 static void macsec_set_shortlen(struct macsec_eth_header *h, size_t data_len)
340 {
341 	if (data_len < MIN_NON_SHORT_LEN)
342 		h->short_length = data_len;
343 }
344 
345 /* Checks if a MACsec interface is being offloaded to an hardware engine */
346 static bool macsec_is_offloaded(struct macsec_dev *macsec)
347 {
348 	if (macsec->offload == MACSEC_OFFLOAD_MAC ||
349 	    macsec->offload == MACSEC_OFFLOAD_PHY)
350 		return true;
351 
352 	return false;
353 }
354 
355 /* Checks if underlying layers implement MACsec offloading functions. */
356 static bool macsec_check_offload(enum macsec_offload offload,
357 				 struct macsec_dev *macsec)
358 {
359 	if (!macsec || !macsec->real_dev)
360 		return false;
361 
362 	if (offload == MACSEC_OFFLOAD_PHY)
363 		return macsec->real_dev->phydev &&
364 		       macsec->real_dev->phydev->macsec_ops;
365 	else if (offload == MACSEC_OFFLOAD_MAC)
366 		return macsec->real_dev->features & NETIF_F_HW_MACSEC &&
367 		       macsec->real_dev->macsec_ops;
368 
369 	return false;
370 }
371 
372 static const struct macsec_ops *__macsec_get_ops(enum macsec_offload offload,
373 						 struct macsec_dev *macsec,
374 						 struct macsec_context *ctx)
375 {
376 	if (ctx) {
377 		memset(ctx, 0, sizeof(*ctx));
378 		ctx->offload = offload;
379 
380 		if (offload == MACSEC_OFFLOAD_PHY)
381 			ctx->phydev = macsec->real_dev->phydev;
382 		else if (offload == MACSEC_OFFLOAD_MAC)
383 			ctx->netdev = macsec->real_dev;
384 	}
385 
386 	if (offload == MACSEC_OFFLOAD_PHY)
387 		return macsec->real_dev->phydev->macsec_ops;
388 	else
389 		return macsec->real_dev->macsec_ops;
390 }
391 
392 /* Returns a pointer to the MACsec ops struct if any and updates the MACsec
393  * context device reference if provided.
394  */
395 static const struct macsec_ops *macsec_get_ops(struct macsec_dev *macsec,
396 					       struct macsec_context *ctx)
397 {
398 	if (!macsec_check_offload(macsec->offload, macsec))
399 		return NULL;
400 
401 	return __macsec_get_ops(macsec->offload, macsec, ctx);
402 }
403 
404 /* validate MACsec packet according to IEEE 802.1AE-2018 9.12 */
405 static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len, bool xpn)
406 {
407 	struct macsec_eth_header *h = (struct macsec_eth_header *)skb->data;
408 	int len = skb->len - 2 * ETH_ALEN;
409 	int extra_len = macsec_extra_len(!!(h->tci_an & MACSEC_TCI_SC)) + icv_len;
410 
411 	/* a) It comprises at least 17 octets */
412 	if (skb->len <= 16)
413 		return false;
414 
415 	/* b) MACsec EtherType: already checked */
416 
417 	/* c) V bit is clear */
418 	if (h->tci_an & MACSEC_TCI_VERSION)
419 		return false;
420 
421 	/* d) ES or SCB => !SC */
422 	if ((h->tci_an & MACSEC_TCI_ES || h->tci_an & MACSEC_TCI_SCB) &&
423 	    (h->tci_an & MACSEC_TCI_SC))
424 		return false;
425 
426 	/* e) Bits 7 and 8 of octet 4 of the SecTAG are clear */
427 	if (h->unused)
428 		return false;
429 
430 	/* rx.pn != 0 if not XPN (figure 10-5 with 802.11AEbw-2013 amendment) */
431 	if (!h->packet_number && !xpn)
432 		return false;
433 
434 	/* length check, f) g) h) i) */
435 	if (h->short_length)
436 		return len == extra_len + h->short_length;
437 	return len >= extra_len + MIN_NON_SHORT_LEN;
438 }
439 
440 #define MACSEC_NEEDED_HEADROOM (macsec_extra_len(true))
441 #define MACSEC_NEEDED_TAILROOM MACSEC_STD_ICV_LEN
442 
443 static void macsec_fill_iv_xpn(unsigned char *iv, ssci_t ssci, u64 pn,
444 			       salt_t salt)
445 {
446 	struct gcm_iv_xpn *gcm_iv = (struct gcm_iv_xpn *)iv;
447 
448 	gcm_iv->ssci = ssci ^ salt.ssci;
449 	gcm_iv->pn = cpu_to_be64(pn) ^ salt.pn;
450 }
451 
452 static void macsec_fill_iv(unsigned char *iv, sci_t sci, u32 pn)
453 {
454 	struct gcm_iv *gcm_iv = (struct gcm_iv *)iv;
455 
456 	gcm_iv->sci = sci;
457 	gcm_iv->pn = htonl(pn);
458 }
459 
460 static struct macsec_eth_header *macsec_ethhdr(struct sk_buff *skb)
461 {
462 	return (struct macsec_eth_header *)skb_mac_header(skb);
463 }
464 
465 static void __macsec_pn_wrapped(struct macsec_secy *secy,
466 				struct macsec_tx_sa *tx_sa)
467 {
468 	pr_debug("PN wrapped, transitioning to !oper\n");
469 	tx_sa->active = false;
470 	if (secy->protect_frames)
471 		secy->operational = false;
472 }
473 
474 void macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa)
475 {
476 	spin_lock_bh(&tx_sa->lock);
477 	__macsec_pn_wrapped(secy, tx_sa);
478 	spin_unlock_bh(&tx_sa->lock);
479 }
480 EXPORT_SYMBOL_GPL(macsec_pn_wrapped);
481 
482 static pn_t tx_sa_update_pn(struct macsec_tx_sa *tx_sa,
483 			    struct macsec_secy *secy)
484 {
485 	pn_t pn;
486 
487 	spin_lock_bh(&tx_sa->lock);
488 
489 	pn = tx_sa->next_pn_halves;
490 	if (secy->xpn)
491 		tx_sa->next_pn++;
492 	else
493 		tx_sa->next_pn_halves.lower++;
494 
495 	if (tx_sa->next_pn == 0)
496 		__macsec_pn_wrapped(secy, tx_sa);
497 	spin_unlock_bh(&tx_sa->lock);
498 
499 	return pn;
500 }
501 
502 static void macsec_encrypt_finish(struct sk_buff *skb, struct net_device *dev)
503 {
504 	struct macsec_dev *macsec = netdev_priv(dev);
505 
506 	skb->dev = macsec->real_dev;
507 	skb_reset_mac_header(skb);
508 	skb->protocol = eth_hdr(skb)->h_proto;
509 }
510 
511 static unsigned int macsec_msdu_len(struct sk_buff *skb)
512 {
513 	struct macsec_dev *macsec = macsec_priv(skb->dev);
514 	struct macsec_secy *secy = &macsec->secy;
515 	bool sci_present = macsec_skb_cb(skb)->has_sci;
516 
517 	return skb->len - macsec_hdr_len(sci_present) - secy->icv_len;
518 }
519 
520 static void macsec_count_tx(struct sk_buff *skb, struct macsec_tx_sc *tx_sc,
521 			    struct macsec_tx_sa *tx_sa)
522 {
523 	unsigned int msdu_len = macsec_msdu_len(skb);
524 	struct pcpu_tx_sc_stats *txsc_stats = this_cpu_ptr(tx_sc->stats);
525 
526 	u64_stats_update_begin(&txsc_stats->syncp);
527 	if (tx_sc->encrypt) {
528 		txsc_stats->stats.OutOctetsEncrypted += msdu_len;
529 		txsc_stats->stats.OutPktsEncrypted++;
530 		this_cpu_inc(tx_sa->stats->OutPktsEncrypted);
531 	} else {
532 		txsc_stats->stats.OutOctetsProtected += msdu_len;
533 		txsc_stats->stats.OutPktsProtected++;
534 		this_cpu_inc(tx_sa->stats->OutPktsProtected);
535 	}
536 	u64_stats_update_end(&txsc_stats->syncp);
537 }
538 
539 static void count_tx(struct net_device *dev, int ret, int len)
540 {
541 	if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
542 		struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats);
543 
544 		u64_stats_update_begin(&stats->syncp);
545 		u64_stats_inc(&stats->tx_packets);
546 		u64_stats_add(&stats->tx_bytes, len);
547 		u64_stats_update_end(&stats->syncp);
548 	}
549 }
550 
551 static void macsec_encrypt_done(struct crypto_async_request *base, int err)
552 {
553 	struct sk_buff *skb = base->data;
554 	struct net_device *dev = skb->dev;
555 	struct macsec_dev *macsec = macsec_priv(dev);
556 	struct macsec_tx_sa *sa = macsec_skb_cb(skb)->tx_sa;
557 	int len, ret;
558 
559 	aead_request_free(macsec_skb_cb(skb)->req);
560 
561 	rcu_read_lock_bh();
562 	macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa);
563 	/* packet is encrypted/protected so tx_bytes must be calculated */
564 	len = macsec_msdu_len(skb) + 2 * ETH_ALEN;
565 	macsec_encrypt_finish(skb, dev);
566 	ret = dev_queue_xmit(skb);
567 	count_tx(dev, ret, len);
568 	rcu_read_unlock_bh();
569 
570 	macsec_txsa_put(sa);
571 	dev_put(dev);
572 }
573 
574 static struct aead_request *macsec_alloc_req(struct crypto_aead *tfm,
575 					     unsigned char **iv,
576 					     struct scatterlist **sg,
577 					     int num_frags)
578 {
579 	size_t size, iv_offset, sg_offset;
580 	struct aead_request *req;
581 	void *tmp;
582 
583 	size = sizeof(struct aead_request) + crypto_aead_reqsize(tfm);
584 	iv_offset = size;
585 	size += GCM_AES_IV_LEN;
586 
587 	size = ALIGN(size, __alignof__(struct scatterlist));
588 	sg_offset = size;
589 	size += sizeof(struct scatterlist) * num_frags;
590 
591 	tmp = kmalloc(size, GFP_ATOMIC);
592 	if (!tmp)
593 		return NULL;
594 
595 	*iv = (unsigned char *)(tmp + iv_offset);
596 	*sg = (struct scatterlist *)(tmp + sg_offset);
597 	req = tmp;
598 
599 	aead_request_set_tfm(req, tfm);
600 
601 	return req;
602 }
603 
604 static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
605 				      struct net_device *dev)
606 {
607 	int ret;
608 	struct scatterlist *sg;
609 	struct sk_buff *trailer;
610 	unsigned char *iv;
611 	struct ethhdr *eth;
612 	struct macsec_eth_header *hh;
613 	size_t unprotected_len;
614 	struct aead_request *req;
615 	struct macsec_secy *secy;
616 	struct macsec_tx_sc *tx_sc;
617 	struct macsec_tx_sa *tx_sa;
618 	struct macsec_dev *macsec = macsec_priv(dev);
619 	bool sci_present;
620 	pn_t pn;
621 
622 	secy = &macsec->secy;
623 	tx_sc = &secy->tx_sc;
624 
625 	/* 10.5.1 TX SA assignment */
626 	tx_sa = macsec_txsa_get(tx_sc->sa[tx_sc->encoding_sa]);
627 	if (!tx_sa) {
628 		secy->operational = false;
629 		kfree_skb(skb);
630 		return ERR_PTR(-EINVAL);
631 	}
632 
633 	if (unlikely(skb_headroom(skb) < MACSEC_NEEDED_HEADROOM ||
634 		     skb_tailroom(skb) < MACSEC_NEEDED_TAILROOM)) {
635 		struct sk_buff *nskb = skb_copy_expand(skb,
636 						       MACSEC_NEEDED_HEADROOM,
637 						       MACSEC_NEEDED_TAILROOM,
638 						       GFP_ATOMIC);
639 		if (likely(nskb)) {
640 			consume_skb(skb);
641 			skb = nskb;
642 		} else {
643 			macsec_txsa_put(tx_sa);
644 			kfree_skb(skb);
645 			return ERR_PTR(-ENOMEM);
646 		}
647 	} else {
648 		skb = skb_unshare(skb, GFP_ATOMIC);
649 		if (!skb) {
650 			macsec_txsa_put(tx_sa);
651 			return ERR_PTR(-ENOMEM);
652 		}
653 	}
654 
655 	unprotected_len = skb->len;
656 	eth = eth_hdr(skb);
657 	sci_present = send_sci(secy);
658 	hh = skb_push(skb, macsec_extra_len(sci_present));
659 	memmove(hh, eth, 2 * ETH_ALEN);
660 
661 	pn = tx_sa_update_pn(tx_sa, secy);
662 	if (pn.full64 == 0) {
663 		macsec_txsa_put(tx_sa);
664 		kfree_skb(skb);
665 		return ERR_PTR(-ENOLINK);
666 	}
667 	macsec_fill_sectag(hh, secy, pn.lower, sci_present);
668 	macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN);
669 
670 	skb_put(skb, secy->icv_len);
671 
672 	if (skb->len - ETH_HLEN > macsec_priv(dev)->real_dev->mtu) {
673 		struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats);
674 
675 		u64_stats_update_begin(&secy_stats->syncp);
676 		secy_stats->stats.OutPktsTooLong++;
677 		u64_stats_update_end(&secy_stats->syncp);
678 
679 		macsec_txsa_put(tx_sa);
680 		kfree_skb(skb);
681 		return ERR_PTR(-EINVAL);
682 	}
683 
684 	ret = skb_cow_data(skb, 0, &trailer);
685 	if (unlikely(ret < 0)) {
686 		macsec_txsa_put(tx_sa);
687 		kfree_skb(skb);
688 		return ERR_PTR(ret);
689 	}
690 
691 	req = macsec_alloc_req(tx_sa->key.tfm, &iv, &sg, ret);
692 	if (!req) {
693 		macsec_txsa_put(tx_sa);
694 		kfree_skb(skb);
695 		return ERR_PTR(-ENOMEM);
696 	}
697 
698 	if (secy->xpn)
699 		macsec_fill_iv_xpn(iv, tx_sa->ssci, pn.full64, tx_sa->key.salt);
700 	else
701 		macsec_fill_iv(iv, secy->sci, pn.lower);
702 
703 	sg_init_table(sg, ret);
704 	ret = skb_to_sgvec(skb, sg, 0, skb->len);
705 	if (unlikely(ret < 0)) {
706 		aead_request_free(req);
707 		macsec_txsa_put(tx_sa);
708 		kfree_skb(skb);
709 		return ERR_PTR(ret);
710 	}
711 
712 	if (tx_sc->encrypt) {
713 		int len = skb->len - macsec_hdr_len(sci_present) -
714 			  secy->icv_len;
715 		aead_request_set_crypt(req, sg, sg, len, iv);
716 		aead_request_set_ad(req, macsec_hdr_len(sci_present));
717 	} else {
718 		aead_request_set_crypt(req, sg, sg, 0, iv);
719 		aead_request_set_ad(req, skb->len - secy->icv_len);
720 	}
721 
722 	macsec_skb_cb(skb)->req = req;
723 	macsec_skb_cb(skb)->tx_sa = tx_sa;
724 	macsec_skb_cb(skb)->has_sci = sci_present;
725 	aead_request_set_callback(req, 0, macsec_encrypt_done, skb);
726 
727 	dev_hold(skb->dev);
728 	ret = crypto_aead_encrypt(req);
729 	if (ret == -EINPROGRESS) {
730 		return ERR_PTR(ret);
731 	} else if (ret != 0) {
732 		dev_put(skb->dev);
733 		kfree_skb(skb);
734 		aead_request_free(req);
735 		macsec_txsa_put(tx_sa);
736 		return ERR_PTR(-EINVAL);
737 	}
738 
739 	dev_put(skb->dev);
740 	aead_request_free(req);
741 	macsec_txsa_put(tx_sa);
742 
743 	return skb;
744 }
745 
746 static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u32 pn)
747 {
748 	struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa;
749 	struct pcpu_rx_sc_stats *rxsc_stats = this_cpu_ptr(rx_sa->sc->stats);
750 	struct macsec_eth_header *hdr = macsec_ethhdr(skb);
751 	u32 lowest_pn = 0;
752 
753 	spin_lock(&rx_sa->lock);
754 	if (rx_sa->next_pn_halves.lower >= secy->replay_window)
755 		lowest_pn = rx_sa->next_pn_halves.lower - secy->replay_window;
756 
757 	/* Now perform replay protection check again
758 	 * (see IEEE 802.1AE-2006 figure 10-5)
759 	 */
760 	if (secy->replay_protect && pn < lowest_pn &&
761 	    (!secy->xpn || pn_same_half(pn, lowest_pn))) {
762 		spin_unlock(&rx_sa->lock);
763 		u64_stats_update_begin(&rxsc_stats->syncp);
764 		rxsc_stats->stats.InPktsLate++;
765 		u64_stats_update_end(&rxsc_stats->syncp);
766 		secy->netdev->stats.rx_dropped++;
767 		return false;
768 	}
769 
770 	if (secy->validate_frames != MACSEC_VALIDATE_DISABLED) {
771 		unsigned int msdu_len = macsec_msdu_len(skb);
772 		u64_stats_update_begin(&rxsc_stats->syncp);
773 		if (hdr->tci_an & MACSEC_TCI_E)
774 			rxsc_stats->stats.InOctetsDecrypted += msdu_len;
775 		else
776 			rxsc_stats->stats.InOctetsValidated += msdu_len;
777 		u64_stats_update_end(&rxsc_stats->syncp);
778 	}
779 
780 	if (!macsec_skb_cb(skb)->valid) {
781 		spin_unlock(&rx_sa->lock);
782 
783 		/* 10.6.5 */
784 		if (hdr->tci_an & MACSEC_TCI_C ||
785 		    secy->validate_frames == MACSEC_VALIDATE_STRICT) {
786 			u64_stats_update_begin(&rxsc_stats->syncp);
787 			rxsc_stats->stats.InPktsNotValid++;
788 			u64_stats_update_end(&rxsc_stats->syncp);
789 			this_cpu_inc(rx_sa->stats->InPktsNotValid);
790 			secy->netdev->stats.rx_errors++;
791 			return false;
792 		}
793 
794 		u64_stats_update_begin(&rxsc_stats->syncp);
795 		if (secy->validate_frames == MACSEC_VALIDATE_CHECK) {
796 			rxsc_stats->stats.InPktsInvalid++;
797 			this_cpu_inc(rx_sa->stats->InPktsInvalid);
798 		} else if (pn < lowest_pn) {
799 			rxsc_stats->stats.InPktsDelayed++;
800 		} else {
801 			rxsc_stats->stats.InPktsUnchecked++;
802 		}
803 		u64_stats_update_end(&rxsc_stats->syncp);
804 	} else {
805 		u64_stats_update_begin(&rxsc_stats->syncp);
806 		if (pn < lowest_pn) {
807 			rxsc_stats->stats.InPktsDelayed++;
808 		} else {
809 			rxsc_stats->stats.InPktsOK++;
810 			this_cpu_inc(rx_sa->stats->InPktsOK);
811 		}
812 		u64_stats_update_end(&rxsc_stats->syncp);
813 
814 		// Instead of "pn >=" - to support pn overflow in xpn
815 		if (pn + 1 > rx_sa->next_pn_halves.lower) {
816 			rx_sa->next_pn_halves.lower = pn + 1;
817 		} else if (secy->xpn &&
818 			   !pn_same_half(pn, rx_sa->next_pn_halves.lower)) {
819 			rx_sa->next_pn_halves.upper++;
820 			rx_sa->next_pn_halves.lower = pn + 1;
821 		}
822 
823 		spin_unlock(&rx_sa->lock);
824 	}
825 
826 	return true;
827 }
828 
829 static void macsec_reset_skb(struct sk_buff *skb, struct net_device *dev)
830 {
831 	skb->pkt_type = PACKET_HOST;
832 	skb->protocol = eth_type_trans(skb, dev);
833 
834 	skb_reset_network_header(skb);
835 	if (!skb_transport_header_was_set(skb))
836 		skb_reset_transport_header(skb);
837 	skb_reset_mac_len(skb);
838 }
839 
840 static void macsec_finalize_skb(struct sk_buff *skb, u8 icv_len, u8 hdr_len)
841 {
842 	skb->ip_summed = CHECKSUM_NONE;
843 	memmove(skb->data + hdr_len, skb->data, 2 * ETH_ALEN);
844 	skb_pull(skb, hdr_len);
845 	pskb_trim_unique(skb, skb->len - icv_len);
846 }
847 
848 static void count_rx(struct net_device *dev, int len)
849 {
850 	struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats);
851 
852 	u64_stats_update_begin(&stats->syncp);
853 	u64_stats_inc(&stats->rx_packets);
854 	u64_stats_add(&stats->rx_bytes, len);
855 	u64_stats_update_end(&stats->syncp);
856 }
857 
858 static void macsec_decrypt_done(struct crypto_async_request *base, int err)
859 {
860 	struct sk_buff *skb = base->data;
861 	struct net_device *dev = skb->dev;
862 	struct macsec_dev *macsec = macsec_priv(dev);
863 	struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa;
864 	struct macsec_rx_sc *rx_sc = rx_sa->sc;
865 	int len;
866 	u32 pn;
867 
868 	aead_request_free(macsec_skb_cb(skb)->req);
869 
870 	if (!err)
871 		macsec_skb_cb(skb)->valid = true;
872 
873 	rcu_read_lock_bh();
874 	pn = ntohl(macsec_ethhdr(skb)->packet_number);
875 	if (!macsec_post_decrypt(skb, &macsec->secy, pn)) {
876 		rcu_read_unlock_bh();
877 		kfree_skb(skb);
878 		goto out;
879 	}
880 
881 	macsec_finalize_skb(skb, macsec->secy.icv_len,
882 			    macsec_extra_len(macsec_skb_cb(skb)->has_sci));
883 	len = skb->len;
884 	macsec_reset_skb(skb, macsec->secy.netdev);
885 
886 	if (gro_cells_receive(&macsec->gro_cells, skb) == NET_RX_SUCCESS)
887 		count_rx(dev, len);
888 
889 	rcu_read_unlock_bh();
890 
891 out:
892 	macsec_rxsa_put(rx_sa);
893 	macsec_rxsc_put(rx_sc);
894 	dev_put(dev);
895 }
896 
897 static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
898 				      struct net_device *dev,
899 				      struct macsec_rx_sa *rx_sa,
900 				      sci_t sci,
901 				      struct macsec_secy *secy)
902 {
903 	int ret;
904 	struct scatterlist *sg;
905 	struct sk_buff *trailer;
906 	unsigned char *iv;
907 	struct aead_request *req;
908 	struct macsec_eth_header *hdr;
909 	u32 hdr_pn;
910 	u16 icv_len = secy->icv_len;
911 
912 	macsec_skb_cb(skb)->valid = false;
913 	skb = skb_share_check(skb, GFP_ATOMIC);
914 	if (!skb)
915 		return ERR_PTR(-ENOMEM);
916 
917 	ret = skb_cow_data(skb, 0, &trailer);
918 	if (unlikely(ret < 0)) {
919 		kfree_skb(skb);
920 		return ERR_PTR(ret);
921 	}
922 	req = macsec_alloc_req(rx_sa->key.tfm, &iv, &sg, ret);
923 	if (!req) {
924 		kfree_skb(skb);
925 		return ERR_PTR(-ENOMEM);
926 	}
927 
928 	hdr = (struct macsec_eth_header *)skb->data;
929 	hdr_pn = ntohl(hdr->packet_number);
930 
931 	if (secy->xpn) {
932 		pn_t recovered_pn = rx_sa->next_pn_halves;
933 
934 		recovered_pn.lower = hdr_pn;
935 		if (hdr_pn < rx_sa->next_pn_halves.lower &&
936 		    !pn_same_half(hdr_pn, rx_sa->next_pn_halves.lower))
937 			recovered_pn.upper++;
938 
939 		macsec_fill_iv_xpn(iv, rx_sa->ssci, recovered_pn.full64,
940 				   rx_sa->key.salt);
941 	} else {
942 		macsec_fill_iv(iv, sci, hdr_pn);
943 	}
944 
945 	sg_init_table(sg, ret);
946 	ret = skb_to_sgvec(skb, sg, 0, skb->len);
947 	if (unlikely(ret < 0)) {
948 		aead_request_free(req);
949 		kfree_skb(skb);
950 		return ERR_PTR(ret);
951 	}
952 
953 	if (hdr->tci_an & MACSEC_TCI_E) {
954 		/* confidentiality: ethernet + macsec header
955 		 * authenticated, encrypted payload
956 		 */
957 		int len = skb->len - macsec_hdr_len(macsec_skb_cb(skb)->has_sci);
958 
959 		aead_request_set_crypt(req, sg, sg, len, iv);
960 		aead_request_set_ad(req, macsec_hdr_len(macsec_skb_cb(skb)->has_sci));
961 		skb = skb_unshare(skb, GFP_ATOMIC);
962 		if (!skb) {
963 			aead_request_free(req);
964 			return ERR_PTR(-ENOMEM);
965 		}
966 	} else {
967 		/* integrity only: all headers + data authenticated */
968 		aead_request_set_crypt(req, sg, sg, icv_len, iv);
969 		aead_request_set_ad(req, skb->len - icv_len);
970 	}
971 
972 	macsec_skb_cb(skb)->req = req;
973 	skb->dev = dev;
974 	aead_request_set_callback(req, 0, macsec_decrypt_done, skb);
975 
976 	dev_hold(dev);
977 	ret = crypto_aead_decrypt(req);
978 	if (ret == -EINPROGRESS) {
979 		return ERR_PTR(ret);
980 	} else if (ret != 0) {
981 		/* decryption/authentication failed
982 		 * 10.6 if validateFrames is disabled, deliver anyway
983 		 */
984 		if (ret != -EBADMSG) {
985 			kfree_skb(skb);
986 			skb = ERR_PTR(ret);
987 		}
988 	} else {
989 		macsec_skb_cb(skb)->valid = true;
990 	}
991 	dev_put(dev);
992 
993 	aead_request_free(req);
994 
995 	return skb;
996 }
997 
998 static struct macsec_rx_sc *find_rx_sc(struct macsec_secy *secy, sci_t sci)
999 {
1000 	struct macsec_rx_sc *rx_sc;
1001 
1002 	for_each_rxsc(secy, rx_sc) {
1003 		if (rx_sc->sci == sci)
1004 			return rx_sc;
1005 	}
1006 
1007 	return NULL;
1008 }
1009 
1010 static struct macsec_rx_sc *find_rx_sc_rtnl(struct macsec_secy *secy, sci_t sci)
1011 {
1012 	struct macsec_rx_sc *rx_sc;
1013 
1014 	for_each_rxsc_rtnl(secy, rx_sc) {
1015 		if (rx_sc->sci == sci)
1016 			return rx_sc;
1017 	}
1018 
1019 	return NULL;
1020 }
1021 
1022 static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
1023 {
1024 	/* Deliver to the uncontrolled port by default */
1025 	enum rx_handler_result ret = RX_HANDLER_PASS;
1026 	struct ethhdr *hdr = eth_hdr(skb);
1027 	struct macsec_rxh_data *rxd;
1028 	struct macsec_dev *macsec;
1029 
1030 	rcu_read_lock();
1031 	rxd = macsec_data_rcu(skb->dev);
1032 
1033 	list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
1034 		struct sk_buff *nskb;
1035 		struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats);
1036 		struct net_device *ndev = macsec->secy.netdev;
1037 
1038 		/* If h/w offloading is enabled, HW decodes frames and strips
1039 		 * the SecTAG, so we have to deduce which port to deliver to.
1040 		 */
1041 		if (macsec_is_offloaded(macsec) && netif_running(ndev)) {
1042 			if (ether_addr_equal_64bits(hdr->h_dest,
1043 						    ndev->dev_addr)) {
1044 				/* exact match, divert skb to this port */
1045 				skb->dev = ndev;
1046 				skb->pkt_type = PACKET_HOST;
1047 				ret = RX_HANDLER_ANOTHER;
1048 				goto out;
1049 			} else if (is_multicast_ether_addr_64bits(
1050 					   hdr->h_dest)) {
1051 				/* multicast frame, deliver on this port too */
1052 				nskb = skb_clone(skb, GFP_ATOMIC);
1053 				if (!nskb)
1054 					break;
1055 
1056 				nskb->dev = ndev;
1057 				if (ether_addr_equal_64bits(hdr->h_dest,
1058 							    ndev->broadcast))
1059 					nskb->pkt_type = PACKET_BROADCAST;
1060 				else
1061 					nskb->pkt_type = PACKET_MULTICAST;
1062 
1063 				__netif_rx(nskb);
1064 			}
1065 			continue;
1066 		}
1067 
1068 		/* 10.6 If the management control validateFrames is not
1069 		 * Strict, frames without a SecTAG are received, counted, and
1070 		 * delivered to the Controlled Port
1071 		 */
1072 		if (macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) {
1073 			u64_stats_update_begin(&secy_stats->syncp);
1074 			secy_stats->stats.InPktsNoTag++;
1075 			u64_stats_update_end(&secy_stats->syncp);
1076 			macsec->secy.netdev->stats.rx_dropped++;
1077 			continue;
1078 		}
1079 
1080 		/* deliver on this port */
1081 		nskb = skb_clone(skb, GFP_ATOMIC);
1082 		if (!nskb)
1083 			break;
1084 
1085 		nskb->dev = ndev;
1086 
1087 		if (__netif_rx(nskb) == NET_RX_SUCCESS) {
1088 			u64_stats_update_begin(&secy_stats->syncp);
1089 			secy_stats->stats.InPktsUntagged++;
1090 			u64_stats_update_end(&secy_stats->syncp);
1091 		}
1092 	}
1093 
1094 out:
1095 	rcu_read_unlock();
1096 	return ret;
1097 }
1098 
1099 static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
1100 {
1101 	struct sk_buff *skb = *pskb;
1102 	struct net_device *dev = skb->dev;
1103 	struct macsec_eth_header *hdr;
1104 	struct macsec_secy *secy = NULL;
1105 	struct macsec_rx_sc *rx_sc;
1106 	struct macsec_rx_sa *rx_sa;
1107 	struct macsec_rxh_data *rxd;
1108 	struct macsec_dev *macsec;
1109 	unsigned int len;
1110 	sci_t sci;
1111 	u32 hdr_pn;
1112 	bool cbit;
1113 	struct pcpu_rx_sc_stats *rxsc_stats;
1114 	struct pcpu_secy_stats *secy_stats;
1115 	bool pulled_sci;
1116 	int ret;
1117 
1118 	if (skb_headroom(skb) < ETH_HLEN)
1119 		goto drop_direct;
1120 
1121 	hdr = macsec_ethhdr(skb);
1122 	if (hdr->eth.h_proto != htons(ETH_P_MACSEC))
1123 		return handle_not_macsec(skb);
1124 
1125 	skb = skb_unshare(skb, GFP_ATOMIC);
1126 	*pskb = skb;
1127 	if (!skb)
1128 		return RX_HANDLER_CONSUMED;
1129 
1130 	pulled_sci = pskb_may_pull(skb, macsec_extra_len(true));
1131 	if (!pulled_sci) {
1132 		if (!pskb_may_pull(skb, macsec_extra_len(false)))
1133 			goto drop_direct;
1134 	}
1135 
1136 	hdr = macsec_ethhdr(skb);
1137 
1138 	/* Frames with a SecTAG that has the TCI E bit set but the C
1139 	 * bit clear are discarded, as this reserved encoding is used
1140 	 * to identify frames with a SecTAG that are not to be
1141 	 * delivered to the Controlled Port.
1142 	 */
1143 	if ((hdr->tci_an & (MACSEC_TCI_C | MACSEC_TCI_E)) == MACSEC_TCI_E)
1144 		return RX_HANDLER_PASS;
1145 
1146 	/* now, pull the extra length */
1147 	if (hdr->tci_an & MACSEC_TCI_SC) {
1148 		if (!pulled_sci)
1149 			goto drop_direct;
1150 	}
1151 
1152 	/* ethernet header is part of crypto processing */
1153 	skb_push(skb, ETH_HLEN);
1154 
1155 	macsec_skb_cb(skb)->has_sci = !!(hdr->tci_an & MACSEC_TCI_SC);
1156 	macsec_skb_cb(skb)->assoc_num = hdr->tci_an & MACSEC_AN_MASK;
1157 	sci = macsec_frame_sci(hdr, macsec_skb_cb(skb)->has_sci);
1158 
1159 	rcu_read_lock();
1160 	rxd = macsec_data_rcu(skb->dev);
1161 
1162 	list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
1163 		struct macsec_rx_sc *sc = find_rx_sc(&macsec->secy, sci);
1164 
1165 		sc = sc ? macsec_rxsc_get(sc) : NULL;
1166 
1167 		if (sc) {
1168 			secy = &macsec->secy;
1169 			rx_sc = sc;
1170 			break;
1171 		}
1172 	}
1173 
1174 	if (!secy)
1175 		goto nosci;
1176 
1177 	dev = secy->netdev;
1178 	macsec = macsec_priv(dev);
1179 	secy_stats = this_cpu_ptr(macsec->stats);
1180 	rxsc_stats = this_cpu_ptr(rx_sc->stats);
1181 
1182 	if (!macsec_validate_skb(skb, secy->icv_len, secy->xpn)) {
1183 		u64_stats_update_begin(&secy_stats->syncp);
1184 		secy_stats->stats.InPktsBadTag++;
1185 		u64_stats_update_end(&secy_stats->syncp);
1186 		secy->netdev->stats.rx_errors++;
1187 		goto drop_nosa;
1188 	}
1189 
1190 	rx_sa = macsec_rxsa_get(rx_sc->sa[macsec_skb_cb(skb)->assoc_num]);
1191 	if (!rx_sa) {
1192 		/* 10.6.1 if the SA is not in use */
1193 
1194 		/* If validateFrames is Strict or the C bit in the
1195 		 * SecTAG is set, discard
1196 		 */
1197 		struct macsec_rx_sa *active_rx_sa = macsec_active_rxsa_get(rx_sc);
1198 		if (hdr->tci_an & MACSEC_TCI_C ||
1199 		    secy->validate_frames == MACSEC_VALIDATE_STRICT) {
1200 			u64_stats_update_begin(&rxsc_stats->syncp);
1201 			rxsc_stats->stats.InPktsNotUsingSA++;
1202 			u64_stats_update_end(&rxsc_stats->syncp);
1203 			secy->netdev->stats.rx_errors++;
1204 			if (active_rx_sa)
1205 				this_cpu_inc(active_rx_sa->stats->InPktsNotUsingSA);
1206 			goto drop_nosa;
1207 		}
1208 
1209 		/* not Strict, the frame (with the SecTAG and ICV
1210 		 * removed) is delivered to the Controlled Port.
1211 		 */
1212 		u64_stats_update_begin(&rxsc_stats->syncp);
1213 		rxsc_stats->stats.InPktsUnusedSA++;
1214 		u64_stats_update_end(&rxsc_stats->syncp);
1215 		if (active_rx_sa)
1216 			this_cpu_inc(active_rx_sa->stats->InPktsUnusedSA);
1217 		goto deliver;
1218 	}
1219 
1220 	/* First, PN check to avoid decrypting obviously wrong packets */
1221 	hdr_pn = ntohl(hdr->packet_number);
1222 	if (secy->replay_protect) {
1223 		bool late;
1224 
1225 		spin_lock(&rx_sa->lock);
1226 		late = rx_sa->next_pn_halves.lower >= secy->replay_window &&
1227 		       hdr_pn < (rx_sa->next_pn_halves.lower - secy->replay_window);
1228 
1229 		if (secy->xpn)
1230 			late = late && pn_same_half(rx_sa->next_pn_halves.lower, hdr_pn);
1231 		spin_unlock(&rx_sa->lock);
1232 
1233 		if (late) {
1234 			u64_stats_update_begin(&rxsc_stats->syncp);
1235 			rxsc_stats->stats.InPktsLate++;
1236 			u64_stats_update_end(&rxsc_stats->syncp);
1237 			macsec->secy.netdev->stats.rx_dropped++;
1238 			goto drop;
1239 		}
1240 	}
1241 
1242 	macsec_skb_cb(skb)->rx_sa = rx_sa;
1243 
1244 	/* Disabled && !changed text => skip validation */
1245 	if (hdr->tci_an & MACSEC_TCI_C ||
1246 	    secy->validate_frames != MACSEC_VALIDATE_DISABLED)
1247 		skb = macsec_decrypt(skb, dev, rx_sa, sci, secy);
1248 
1249 	if (IS_ERR(skb)) {
1250 		/* the decrypt callback needs the reference */
1251 		if (PTR_ERR(skb) != -EINPROGRESS) {
1252 			macsec_rxsa_put(rx_sa);
1253 			macsec_rxsc_put(rx_sc);
1254 		}
1255 		rcu_read_unlock();
1256 		*pskb = NULL;
1257 		return RX_HANDLER_CONSUMED;
1258 	}
1259 
1260 	if (!macsec_post_decrypt(skb, secy, hdr_pn))
1261 		goto drop;
1262 
1263 deliver:
1264 	macsec_finalize_skb(skb, secy->icv_len,
1265 			    macsec_extra_len(macsec_skb_cb(skb)->has_sci));
1266 	len = skb->len;
1267 	macsec_reset_skb(skb, secy->netdev);
1268 
1269 	if (rx_sa)
1270 		macsec_rxsa_put(rx_sa);
1271 	macsec_rxsc_put(rx_sc);
1272 
1273 	skb_orphan(skb);
1274 	ret = gro_cells_receive(&macsec->gro_cells, skb);
1275 	if (ret == NET_RX_SUCCESS)
1276 		count_rx(dev, len);
1277 	else
1278 		macsec->secy.netdev->stats.rx_dropped++;
1279 
1280 	rcu_read_unlock();
1281 
1282 	*pskb = NULL;
1283 	return RX_HANDLER_CONSUMED;
1284 
1285 drop:
1286 	macsec_rxsa_put(rx_sa);
1287 drop_nosa:
1288 	macsec_rxsc_put(rx_sc);
1289 	rcu_read_unlock();
1290 drop_direct:
1291 	kfree_skb(skb);
1292 	*pskb = NULL;
1293 	return RX_HANDLER_CONSUMED;
1294 
1295 nosci:
1296 	/* 10.6.1 if the SC is not found */
1297 	cbit = !!(hdr->tci_an & MACSEC_TCI_C);
1298 	if (!cbit)
1299 		macsec_finalize_skb(skb, DEFAULT_ICV_LEN,
1300 				    macsec_extra_len(macsec_skb_cb(skb)->has_sci));
1301 
1302 	list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
1303 		struct sk_buff *nskb;
1304 
1305 		secy_stats = this_cpu_ptr(macsec->stats);
1306 
1307 		/* If validateFrames is Strict or the C bit in the
1308 		 * SecTAG is set, discard
1309 		 */
1310 		if (cbit ||
1311 		    macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) {
1312 			u64_stats_update_begin(&secy_stats->syncp);
1313 			secy_stats->stats.InPktsNoSCI++;
1314 			u64_stats_update_end(&secy_stats->syncp);
1315 			macsec->secy.netdev->stats.rx_errors++;
1316 			continue;
1317 		}
1318 
1319 		/* not strict, the frame (with the SecTAG and ICV
1320 		 * removed) is delivered to the Controlled Port.
1321 		 */
1322 		nskb = skb_clone(skb, GFP_ATOMIC);
1323 		if (!nskb)
1324 			break;
1325 
1326 		macsec_reset_skb(nskb, macsec->secy.netdev);
1327 
1328 		ret = __netif_rx(nskb);
1329 		if (ret == NET_RX_SUCCESS) {
1330 			u64_stats_update_begin(&secy_stats->syncp);
1331 			secy_stats->stats.InPktsUnknownSCI++;
1332 			u64_stats_update_end(&secy_stats->syncp);
1333 		} else {
1334 			macsec->secy.netdev->stats.rx_dropped++;
1335 		}
1336 	}
1337 
1338 	rcu_read_unlock();
1339 	*pskb = skb;
1340 	return RX_HANDLER_PASS;
1341 }
1342 
1343 static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len)
1344 {
1345 	struct crypto_aead *tfm;
1346 	int ret;
1347 
1348 	/* Pick a sync gcm(aes) cipher to ensure order is preserved. */
1349 	tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC);
1350 
1351 	if (IS_ERR(tfm))
1352 		return tfm;
1353 
1354 	ret = crypto_aead_setkey(tfm, key, key_len);
1355 	if (ret < 0)
1356 		goto fail;
1357 
1358 	ret = crypto_aead_setauthsize(tfm, icv_len);
1359 	if (ret < 0)
1360 		goto fail;
1361 
1362 	return tfm;
1363 fail:
1364 	crypto_free_aead(tfm);
1365 	return ERR_PTR(ret);
1366 }
1367 
1368 static int init_rx_sa(struct macsec_rx_sa *rx_sa, char *sak, int key_len,
1369 		      int icv_len)
1370 {
1371 	rx_sa->stats = alloc_percpu(struct macsec_rx_sa_stats);
1372 	if (!rx_sa->stats)
1373 		return -ENOMEM;
1374 
1375 	rx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len);
1376 	if (IS_ERR(rx_sa->key.tfm)) {
1377 		free_percpu(rx_sa->stats);
1378 		return PTR_ERR(rx_sa->key.tfm);
1379 	}
1380 
1381 	rx_sa->ssci = MACSEC_UNDEF_SSCI;
1382 	rx_sa->active = false;
1383 	rx_sa->next_pn = 1;
1384 	refcount_set(&rx_sa->refcnt, 1);
1385 	spin_lock_init(&rx_sa->lock);
1386 
1387 	return 0;
1388 }
1389 
1390 static void clear_rx_sa(struct macsec_rx_sa *rx_sa)
1391 {
1392 	rx_sa->active = false;
1393 
1394 	macsec_rxsa_put(rx_sa);
1395 }
1396 
1397 static void free_rx_sc(struct macsec_rx_sc *rx_sc)
1398 {
1399 	int i;
1400 
1401 	for (i = 0; i < MACSEC_NUM_AN; i++) {
1402 		struct macsec_rx_sa *sa = rtnl_dereference(rx_sc->sa[i]);
1403 
1404 		RCU_INIT_POINTER(rx_sc->sa[i], NULL);
1405 		if (sa)
1406 			clear_rx_sa(sa);
1407 	}
1408 
1409 	macsec_rxsc_put(rx_sc);
1410 }
1411 
1412 static struct macsec_rx_sc *del_rx_sc(struct macsec_secy *secy, sci_t sci)
1413 {
1414 	struct macsec_rx_sc *rx_sc, __rcu **rx_scp;
1415 
1416 	for (rx_scp = &secy->rx_sc, rx_sc = rtnl_dereference(*rx_scp);
1417 	     rx_sc;
1418 	     rx_scp = &rx_sc->next, rx_sc = rtnl_dereference(*rx_scp)) {
1419 		if (rx_sc->sci == sci) {
1420 			if (rx_sc->active)
1421 				secy->n_rx_sc--;
1422 			rcu_assign_pointer(*rx_scp, rx_sc->next);
1423 			return rx_sc;
1424 		}
1425 	}
1426 
1427 	return NULL;
1428 }
1429 
1430 static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci)
1431 {
1432 	struct macsec_rx_sc *rx_sc;
1433 	struct macsec_dev *macsec;
1434 	struct net_device *real_dev = macsec_priv(dev)->real_dev;
1435 	struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
1436 	struct macsec_secy *secy;
1437 
1438 	list_for_each_entry(macsec, &rxd->secys, secys) {
1439 		if (find_rx_sc_rtnl(&macsec->secy, sci))
1440 			return ERR_PTR(-EEXIST);
1441 	}
1442 
1443 	rx_sc = kzalloc(sizeof(*rx_sc), GFP_KERNEL);
1444 	if (!rx_sc)
1445 		return ERR_PTR(-ENOMEM);
1446 
1447 	rx_sc->stats = netdev_alloc_pcpu_stats(struct pcpu_rx_sc_stats);
1448 	if (!rx_sc->stats) {
1449 		kfree(rx_sc);
1450 		return ERR_PTR(-ENOMEM);
1451 	}
1452 
1453 	rx_sc->sci = sci;
1454 	rx_sc->active = true;
1455 	refcount_set(&rx_sc->refcnt, 1);
1456 
1457 	secy = &macsec_priv(dev)->secy;
1458 	rcu_assign_pointer(rx_sc->next, secy->rx_sc);
1459 	rcu_assign_pointer(secy->rx_sc, rx_sc);
1460 
1461 	if (rx_sc->active)
1462 		secy->n_rx_sc++;
1463 
1464 	return rx_sc;
1465 }
1466 
1467 static int init_tx_sa(struct macsec_tx_sa *tx_sa, char *sak, int key_len,
1468 		      int icv_len)
1469 {
1470 	tx_sa->stats = alloc_percpu(struct macsec_tx_sa_stats);
1471 	if (!tx_sa->stats)
1472 		return -ENOMEM;
1473 
1474 	tx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len);
1475 	if (IS_ERR(tx_sa->key.tfm)) {
1476 		free_percpu(tx_sa->stats);
1477 		return PTR_ERR(tx_sa->key.tfm);
1478 	}
1479 
1480 	tx_sa->ssci = MACSEC_UNDEF_SSCI;
1481 	tx_sa->active = false;
1482 	refcount_set(&tx_sa->refcnt, 1);
1483 	spin_lock_init(&tx_sa->lock);
1484 
1485 	return 0;
1486 }
1487 
1488 static void clear_tx_sa(struct macsec_tx_sa *tx_sa)
1489 {
1490 	tx_sa->active = false;
1491 
1492 	macsec_txsa_put(tx_sa);
1493 }
1494 
1495 static struct genl_family macsec_fam;
1496 
1497 static struct net_device *get_dev_from_nl(struct net *net,
1498 					  struct nlattr **attrs)
1499 {
1500 	int ifindex = nla_get_u32(attrs[MACSEC_ATTR_IFINDEX]);
1501 	struct net_device *dev;
1502 
1503 	dev = __dev_get_by_index(net, ifindex);
1504 	if (!dev)
1505 		return ERR_PTR(-ENODEV);
1506 
1507 	if (!netif_is_macsec(dev))
1508 		return ERR_PTR(-ENODEV);
1509 
1510 	return dev;
1511 }
1512 
1513 static enum macsec_offload nla_get_offload(const struct nlattr *nla)
1514 {
1515 	return (__force enum macsec_offload)nla_get_u8(nla);
1516 }
1517 
1518 static sci_t nla_get_sci(const struct nlattr *nla)
1519 {
1520 	return (__force sci_t)nla_get_u64(nla);
1521 }
1522 
1523 static int nla_put_sci(struct sk_buff *skb, int attrtype, sci_t value,
1524 		       int padattr)
1525 {
1526 	return nla_put_u64_64bit(skb, attrtype, (__force u64)value, padattr);
1527 }
1528 
1529 static ssci_t nla_get_ssci(const struct nlattr *nla)
1530 {
1531 	return (__force ssci_t)nla_get_u32(nla);
1532 }
1533 
1534 static int nla_put_ssci(struct sk_buff *skb, int attrtype, ssci_t value)
1535 {
1536 	return nla_put_u32(skb, attrtype, (__force u64)value);
1537 }
1538 
1539 static struct macsec_tx_sa *get_txsa_from_nl(struct net *net,
1540 					     struct nlattr **attrs,
1541 					     struct nlattr **tb_sa,
1542 					     struct net_device **devp,
1543 					     struct macsec_secy **secyp,
1544 					     struct macsec_tx_sc **scp,
1545 					     u8 *assoc_num)
1546 {
1547 	struct net_device *dev;
1548 	struct macsec_secy *secy;
1549 	struct macsec_tx_sc *tx_sc;
1550 	struct macsec_tx_sa *tx_sa;
1551 
1552 	if (!tb_sa[MACSEC_SA_ATTR_AN])
1553 		return ERR_PTR(-EINVAL);
1554 
1555 	*assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
1556 
1557 	dev = get_dev_from_nl(net, attrs);
1558 	if (IS_ERR(dev))
1559 		return ERR_CAST(dev);
1560 
1561 	if (*assoc_num >= MACSEC_NUM_AN)
1562 		return ERR_PTR(-EINVAL);
1563 
1564 	secy = &macsec_priv(dev)->secy;
1565 	tx_sc = &secy->tx_sc;
1566 
1567 	tx_sa = rtnl_dereference(tx_sc->sa[*assoc_num]);
1568 	if (!tx_sa)
1569 		return ERR_PTR(-ENODEV);
1570 
1571 	*devp = dev;
1572 	*scp = tx_sc;
1573 	*secyp = secy;
1574 	return tx_sa;
1575 }
1576 
1577 static struct macsec_rx_sc *get_rxsc_from_nl(struct net *net,
1578 					     struct nlattr **attrs,
1579 					     struct nlattr **tb_rxsc,
1580 					     struct net_device **devp,
1581 					     struct macsec_secy **secyp)
1582 {
1583 	struct net_device *dev;
1584 	struct macsec_secy *secy;
1585 	struct macsec_rx_sc *rx_sc;
1586 	sci_t sci;
1587 
1588 	dev = get_dev_from_nl(net, attrs);
1589 	if (IS_ERR(dev))
1590 		return ERR_CAST(dev);
1591 
1592 	secy = &macsec_priv(dev)->secy;
1593 
1594 	if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI])
1595 		return ERR_PTR(-EINVAL);
1596 
1597 	sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
1598 	rx_sc = find_rx_sc_rtnl(secy, sci);
1599 	if (!rx_sc)
1600 		return ERR_PTR(-ENODEV);
1601 
1602 	*secyp = secy;
1603 	*devp = dev;
1604 
1605 	return rx_sc;
1606 }
1607 
1608 static struct macsec_rx_sa *get_rxsa_from_nl(struct net *net,
1609 					     struct nlattr **attrs,
1610 					     struct nlattr **tb_rxsc,
1611 					     struct nlattr **tb_sa,
1612 					     struct net_device **devp,
1613 					     struct macsec_secy **secyp,
1614 					     struct macsec_rx_sc **scp,
1615 					     u8 *assoc_num)
1616 {
1617 	struct macsec_rx_sc *rx_sc;
1618 	struct macsec_rx_sa *rx_sa;
1619 
1620 	if (!tb_sa[MACSEC_SA_ATTR_AN])
1621 		return ERR_PTR(-EINVAL);
1622 
1623 	*assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
1624 	if (*assoc_num >= MACSEC_NUM_AN)
1625 		return ERR_PTR(-EINVAL);
1626 
1627 	rx_sc = get_rxsc_from_nl(net, attrs, tb_rxsc, devp, secyp);
1628 	if (IS_ERR(rx_sc))
1629 		return ERR_CAST(rx_sc);
1630 
1631 	rx_sa = rtnl_dereference(rx_sc->sa[*assoc_num]);
1632 	if (!rx_sa)
1633 		return ERR_PTR(-ENODEV);
1634 
1635 	*scp = rx_sc;
1636 	return rx_sa;
1637 }
1638 
1639 static const struct nla_policy macsec_genl_policy[NUM_MACSEC_ATTR] = {
1640 	[MACSEC_ATTR_IFINDEX] = { .type = NLA_U32 },
1641 	[MACSEC_ATTR_RXSC_CONFIG] = { .type = NLA_NESTED },
1642 	[MACSEC_ATTR_SA_CONFIG] = { .type = NLA_NESTED },
1643 	[MACSEC_ATTR_OFFLOAD] = { .type = NLA_NESTED },
1644 };
1645 
1646 static const struct nla_policy macsec_genl_rxsc_policy[NUM_MACSEC_RXSC_ATTR] = {
1647 	[MACSEC_RXSC_ATTR_SCI] = { .type = NLA_U64 },
1648 	[MACSEC_RXSC_ATTR_ACTIVE] = { .type = NLA_U8 },
1649 };
1650 
1651 static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = {
1652 	[MACSEC_SA_ATTR_AN] = { .type = NLA_U8 },
1653 	[MACSEC_SA_ATTR_ACTIVE] = { .type = NLA_U8 },
1654 	[MACSEC_SA_ATTR_PN] = NLA_POLICY_MIN_LEN(4),
1655 	[MACSEC_SA_ATTR_KEYID] = { .type = NLA_BINARY,
1656 				   .len = MACSEC_KEYID_LEN, },
1657 	[MACSEC_SA_ATTR_KEY] = { .type = NLA_BINARY,
1658 				 .len = MACSEC_MAX_KEY_LEN, },
1659 	[MACSEC_SA_ATTR_SSCI] = { .type = NLA_U32 },
1660 	[MACSEC_SA_ATTR_SALT] = { .type = NLA_BINARY,
1661 				  .len = MACSEC_SALT_LEN, },
1662 };
1663 
1664 static const struct nla_policy macsec_genl_offload_policy[NUM_MACSEC_OFFLOAD_ATTR] = {
1665 	[MACSEC_OFFLOAD_ATTR_TYPE] = { .type = NLA_U8 },
1666 };
1667 
1668 /* Offloads an operation to a device driver */
1669 static int macsec_offload(int (* const func)(struct macsec_context *),
1670 			  struct macsec_context *ctx)
1671 {
1672 	int ret;
1673 
1674 	if (unlikely(!func))
1675 		return 0;
1676 
1677 	if (ctx->offload == MACSEC_OFFLOAD_PHY)
1678 		mutex_lock(&ctx->phydev->lock);
1679 
1680 	/* Phase I: prepare. The drive should fail here if there are going to be
1681 	 * issues in the commit phase.
1682 	 */
1683 	ctx->prepare = true;
1684 	ret = (*func)(ctx);
1685 	if (ret)
1686 		goto phy_unlock;
1687 
1688 	/* Phase II: commit. This step cannot fail. */
1689 	ctx->prepare = false;
1690 	ret = (*func)(ctx);
1691 	/* This should never happen: commit is not allowed to fail */
1692 	if (unlikely(ret))
1693 		WARN(1, "MACsec offloading commit failed (%d)\n", ret);
1694 
1695 phy_unlock:
1696 	if (ctx->offload == MACSEC_OFFLOAD_PHY)
1697 		mutex_unlock(&ctx->phydev->lock);
1698 
1699 	return ret;
1700 }
1701 
1702 static int parse_sa_config(struct nlattr **attrs, struct nlattr **tb_sa)
1703 {
1704 	if (!attrs[MACSEC_ATTR_SA_CONFIG])
1705 		return -EINVAL;
1706 
1707 	if (nla_parse_nested_deprecated(tb_sa, MACSEC_SA_ATTR_MAX, attrs[MACSEC_ATTR_SA_CONFIG], macsec_genl_sa_policy, NULL))
1708 		return -EINVAL;
1709 
1710 	return 0;
1711 }
1712 
1713 static int parse_rxsc_config(struct nlattr **attrs, struct nlattr **tb_rxsc)
1714 {
1715 	if (!attrs[MACSEC_ATTR_RXSC_CONFIG])
1716 		return -EINVAL;
1717 
1718 	if (nla_parse_nested_deprecated(tb_rxsc, MACSEC_RXSC_ATTR_MAX, attrs[MACSEC_ATTR_RXSC_CONFIG], macsec_genl_rxsc_policy, NULL))
1719 		return -EINVAL;
1720 
1721 	return 0;
1722 }
1723 
1724 static bool validate_add_rxsa(struct nlattr **attrs)
1725 {
1726 	if (!attrs[MACSEC_SA_ATTR_AN] ||
1727 	    !attrs[MACSEC_SA_ATTR_KEY] ||
1728 	    !attrs[MACSEC_SA_ATTR_KEYID])
1729 		return false;
1730 
1731 	if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
1732 		return false;
1733 
1734 	if (attrs[MACSEC_SA_ATTR_PN] &&
1735 	    nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0)
1736 		return false;
1737 
1738 	if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
1739 		if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
1740 			return false;
1741 	}
1742 
1743 	if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN)
1744 		return false;
1745 
1746 	return true;
1747 }
1748 
1749 static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
1750 {
1751 	struct net_device *dev;
1752 	struct nlattr **attrs = info->attrs;
1753 	struct macsec_secy *secy;
1754 	struct macsec_rx_sc *rx_sc;
1755 	struct macsec_rx_sa *rx_sa;
1756 	unsigned char assoc_num;
1757 	int pn_len;
1758 	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
1759 	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
1760 	int err;
1761 
1762 	if (!attrs[MACSEC_ATTR_IFINDEX])
1763 		return -EINVAL;
1764 
1765 	if (parse_sa_config(attrs, tb_sa))
1766 		return -EINVAL;
1767 
1768 	if (parse_rxsc_config(attrs, tb_rxsc))
1769 		return -EINVAL;
1770 
1771 	if (!validate_add_rxsa(tb_sa))
1772 		return -EINVAL;
1773 
1774 	rtnl_lock();
1775 	rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy);
1776 	if (IS_ERR(rx_sc)) {
1777 		rtnl_unlock();
1778 		return PTR_ERR(rx_sc);
1779 	}
1780 
1781 	assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
1782 
1783 	if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) {
1784 		pr_notice("macsec: nl: add_rxsa: bad key length: %d != %d\n",
1785 			  nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len);
1786 		rtnl_unlock();
1787 		return -EINVAL;
1788 	}
1789 
1790 	pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
1791 	if (tb_sa[MACSEC_SA_ATTR_PN] &&
1792 	    nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
1793 		pr_notice("macsec: nl: add_rxsa: bad pn length: %d != %d\n",
1794 			  nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
1795 		rtnl_unlock();
1796 		return -EINVAL;
1797 	}
1798 
1799 	if (secy->xpn) {
1800 		if (!tb_sa[MACSEC_SA_ATTR_SSCI] || !tb_sa[MACSEC_SA_ATTR_SALT]) {
1801 			rtnl_unlock();
1802 			return -EINVAL;
1803 		}
1804 
1805 		if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) {
1806 			pr_notice("macsec: nl: add_rxsa: bad salt length: %d != %d\n",
1807 				  nla_len(tb_sa[MACSEC_SA_ATTR_SALT]),
1808 				  MACSEC_SALT_LEN);
1809 			rtnl_unlock();
1810 			return -EINVAL;
1811 		}
1812 	}
1813 
1814 	rx_sa = rtnl_dereference(rx_sc->sa[assoc_num]);
1815 	if (rx_sa) {
1816 		rtnl_unlock();
1817 		return -EBUSY;
1818 	}
1819 
1820 	rx_sa = kmalloc(sizeof(*rx_sa), GFP_KERNEL);
1821 	if (!rx_sa) {
1822 		rtnl_unlock();
1823 		return -ENOMEM;
1824 	}
1825 
1826 	err = init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
1827 			 secy->key_len, secy->icv_len);
1828 	if (err < 0) {
1829 		kfree(rx_sa);
1830 		rtnl_unlock();
1831 		return err;
1832 	}
1833 
1834 	if (tb_sa[MACSEC_SA_ATTR_PN]) {
1835 		spin_lock_bh(&rx_sa->lock);
1836 		rx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
1837 		spin_unlock_bh(&rx_sa->lock);
1838 	}
1839 
1840 	if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
1841 		rx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
1842 
1843 	rx_sa->sc = rx_sc;
1844 
1845 	/* If h/w offloading is available, propagate to the device */
1846 	if (macsec_is_offloaded(netdev_priv(dev))) {
1847 		const struct macsec_ops *ops;
1848 		struct macsec_context ctx;
1849 
1850 		ops = macsec_get_ops(netdev_priv(dev), &ctx);
1851 		if (!ops) {
1852 			err = -EOPNOTSUPP;
1853 			goto cleanup;
1854 		}
1855 
1856 		ctx.sa.assoc_num = assoc_num;
1857 		ctx.sa.rx_sa = rx_sa;
1858 		ctx.secy = secy;
1859 		memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
1860 		       secy->key_len);
1861 
1862 		err = macsec_offload(ops->mdo_add_rxsa, &ctx);
1863 		if (err)
1864 			goto cleanup;
1865 	}
1866 
1867 	if (secy->xpn) {
1868 		rx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]);
1869 		nla_memcpy(rx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT],
1870 			   MACSEC_SALT_LEN);
1871 	}
1872 
1873 	nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
1874 	rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa);
1875 
1876 	rtnl_unlock();
1877 
1878 	return 0;
1879 
1880 cleanup:
1881 	macsec_rxsa_put(rx_sa);
1882 	rtnl_unlock();
1883 	return err;
1884 }
1885 
1886 static bool validate_add_rxsc(struct nlattr **attrs)
1887 {
1888 	if (!attrs[MACSEC_RXSC_ATTR_SCI])
1889 		return false;
1890 
1891 	if (attrs[MACSEC_RXSC_ATTR_ACTIVE]) {
1892 		if (nla_get_u8(attrs[MACSEC_RXSC_ATTR_ACTIVE]) > 1)
1893 			return false;
1894 	}
1895 
1896 	return true;
1897 }
1898 
1899 static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info)
1900 {
1901 	struct net_device *dev;
1902 	sci_t sci = MACSEC_UNDEF_SCI;
1903 	struct nlattr **attrs = info->attrs;
1904 	struct macsec_rx_sc *rx_sc;
1905 	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
1906 	struct macsec_secy *secy;
1907 	bool was_active;
1908 	int ret;
1909 
1910 	if (!attrs[MACSEC_ATTR_IFINDEX])
1911 		return -EINVAL;
1912 
1913 	if (parse_rxsc_config(attrs, tb_rxsc))
1914 		return -EINVAL;
1915 
1916 	if (!validate_add_rxsc(tb_rxsc))
1917 		return -EINVAL;
1918 
1919 	rtnl_lock();
1920 	dev = get_dev_from_nl(genl_info_net(info), attrs);
1921 	if (IS_ERR(dev)) {
1922 		rtnl_unlock();
1923 		return PTR_ERR(dev);
1924 	}
1925 
1926 	secy = &macsec_priv(dev)->secy;
1927 	sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
1928 
1929 	rx_sc = create_rx_sc(dev, sci);
1930 	if (IS_ERR(rx_sc)) {
1931 		rtnl_unlock();
1932 		return PTR_ERR(rx_sc);
1933 	}
1934 
1935 	was_active = rx_sc->active;
1936 	if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE])
1937 		rx_sc->active = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]);
1938 
1939 	if (macsec_is_offloaded(netdev_priv(dev))) {
1940 		const struct macsec_ops *ops;
1941 		struct macsec_context ctx;
1942 
1943 		ops = macsec_get_ops(netdev_priv(dev), &ctx);
1944 		if (!ops) {
1945 			ret = -EOPNOTSUPP;
1946 			goto cleanup;
1947 		}
1948 
1949 		ctx.rx_sc = rx_sc;
1950 		ctx.secy = secy;
1951 
1952 		ret = macsec_offload(ops->mdo_add_rxsc, &ctx);
1953 		if (ret)
1954 			goto cleanup;
1955 	}
1956 
1957 	rtnl_unlock();
1958 
1959 	return 0;
1960 
1961 cleanup:
1962 	rx_sc->active = was_active;
1963 	rtnl_unlock();
1964 	return ret;
1965 }
1966 
1967 static bool validate_add_txsa(struct nlattr **attrs)
1968 {
1969 	if (!attrs[MACSEC_SA_ATTR_AN] ||
1970 	    !attrs[MACSEC_SA_ATTR_PN] ||
1971 	    !attrs[MACSEC_SA_ATTR_KEY] ||
1972 	    !attrs[MACSEC_SA_ATTR_KEYID])
1973 		return false;
1974 
1975 	if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
1976 		return false;
1977 
1978 	if (nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0)
1979 		return false;
1980 
1981 	if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
1982 		if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
1983 			return false;
1984 	}
1985 
1986 	if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN)
1987 		return false;
1988 
1989 	return true;
1990 }
1991 
1992 static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
1993 {
1994 	struct net_device *dev;
1995 	struct nlattr **attrs = info->attrs;
1996 	struct macsec_secy *secy;
1997 	struct macsec_tx_sc *tx_sc;
1998 	struct macsec_tx_sa *tx_sa;
1999 	unsigned char assoc_num;
2000 	int pn_len;
2001 	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
2002 	bool was_operational;
2003 	int err;
2004 
2005 	if (!attrs[MACSEC_ATTR_IFINDEX])
2006 		return -EINVAL;
2007 
2008 	if (parse_sa_config(attrs, tb_sa))
2009 		return -EINVAL;
2010 
2011 	if (!validate_add_txsa(tb_sa))
2012 		return -EINVAL;
2013 
2014 	rtnl_lock();
2015 	dev = get_dev_from_nl(genl_info_net(info), attrs);
2016 	if (IS_ERR(dev)) {
2017 		rtnl_unlock();
2018 		return PTR_ERR(dev);
2019 	}
2020 
2021 	secy = &macsec_priv(dev)->secy;
2022 	tx_sc = &secy->tx_sc;
2023 
2024 	assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
2025 
2026 	if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) {
2027 		pr_notice("macsec: nl: add_txsa: bad key length: %d != %d\n",
2028 			  nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len);
2029 		rtnl_unlock();
2030 		return -EINVAL;
2031 	}
2032 
2033 	pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
2034 	if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
2035 		pr_notice("macsec: nl: add_txsa: bad pn length: %d != %d\n",
2036 			  nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
2037 		rtnl_unlock();
2038 		return -EINVAL;
2039 	}
2040 
2041 	if (secy->xpn) {
2042 		if (!tb_sa[MACSEC_SA_ATTR_SSCI] || !tb_sa[MACSEC_SA_ATTR_SALT]) {
2043 			rtnl_unlock();
2044 			return -EINVAL;
2045 		}
2046 
2047 		if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) {
2048 			pr_notice("macsec: nl: add_txsa: bad salt length: %d != %d\n",
2049 				  nla_len(tb_sa[MACSEC_SA_ATTR_SALT]),
2050 				  MACSEC_SALT_LEN);
2051 			rtnl_unlock();
2052 			return -EINVAL;
2053 		}
2054 	}
2055 
2056 	tx_sa = rtnl_dereference(tx_sc->sa[assoc_num]);
2057 	if (tx_sa) {
2058 		rtnl_unlock();
2059 		return -EBUSY;
2060 	}
2061 
2062 	tx_sa = kmalloc(sizeof(*tx_sa), GFP_KERNEL);
2063 	if (!tx_sa) {
2064 		rtnl_unlock();
2065 		return -ENOMEM;
2066 	}
2067 
2068 	err = init_tx_sa(tx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
2069 			 secy->key_len, secy->icv_len);
2070 	if (err < 0) {
2071 		kfree(tx_sa);
2072 		rtnl_unlock();
2073 		return err;
2074 	}
2075 
2076 	spin_lock_bh(&tx_sa->lock);
2077 	tx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
2078 	spin_unlock_bh(&tx_sa->lock);
2079 
2080 	if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
2081 		tx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
2082 
2083 	was_operational = secy->operational;
2084 	if (assoc_num == tx_sc->encoding_sa && tx_sa->active)
2085 		secy->operational = true;
2086 
2087 	/* If h/w offloading is available, propagate to the device */
2088 	if (macsec_is_offloaded(netdev_priv(dev))) {
2089 		const struct macsec_ops *ops;
2090 		struct macsec_context ctx;
2091 
2092 		ops = macsec_get_ops(netdev_priv(dev), &ctx);
2093 		if (!ops) {
2094 			err = -EOPNOTSUPP;
2095 			goto cleanup;
2096 		}
2097 
2098 		ctx.sa.assoc_num = assoc_num;
2099 		ctx.sa.tx_sa = tx_sa;
2100 		ctx.secy = secy;
2101 		memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
2102 		       secy->key_len);
2103 
2104 		err = macsec_offload(ops->mdo_add_txsa, &ctx);
2105 		if (err)
2106 			goto cleanup;
2107 	}
2108 
2109 	if (secy->xpn) {
2110 		tx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]);
2111 		nla_memcpy(tx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT],
2112 			   MACSEC_SALT_LEN);
2113 	}
2114 
2115 	nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
2116 	rcu_assign_pointer(tx_sc->sa[assoc_num], tx_sa);
2117 
2118 	rtnl_unlock();
2119 
2120 	return 0;
2121 
2122 cleanup:
2123 	secy->operational = was_operational;
2124 	macsec_txsa_put(tx_sa);
2125 	rtnl_unlock();
2126 	return err;
2127 }
2128 
2129 static int macsec_del_rxsa(struct sk_buff *skb, struct genl_info *info)
2130 {
2131 	struct nlattr **attrs = info->attrs;
2132 	struct net_device *dev;
2133 	struct macsec_secy *secy;
2134 	struct macsec_rx_sc *rx_sc;
2135 	struct macsec_rx_sa *rx_sa;
2136 	u8 assoc_num;
2137 	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
2138 	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
2139 	int ret;
2140 
2141 	if (!attrs[MACSEC_ATTR_IFINDEX])
2142 		return -EINVAL;
2143 
2144 	if (parse_sa_config(attrs, tb_sa))
2145 		return -EINVAL;
2146 
2147 	if (parse_rxsc_config(attrs, tb_rxsc))
2148 		return -EINVAL;
2149 
2150 	rtnl_lock();
2151 	rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa,
2152 				 &dev, &secy, &rx_sc, &assoc_num);
2153 	if (IS_ERR(rx_sa)) {
2154 		rtnl_unlock();
2155 		return PTR_ERR(rx_sa);
2156 	}
2157 
2158 	if (rx_sa->active) {
2159 		rtnl_unlock();
2160 		return -EBUSY;
2161 	}
2162 
2163 	/* If h/w offloading is available, propagate to the device */
2164 	if (macsec_is_offloaded(netdev_priv(dev))) {
2165 		const struct macsec_ops *ops;
2166 		struct macsec_context ctx;
2167 
2168 		ops = macsec_get_ops(netdev_priv(dev), &ctx);
2169 		if (!ops) {
2170 			ret = -EOPNOTSUPP;
2171 			goto cleanup;
2172 		}
2173 
2174 		ctx.sa.assoc_num = assoc_num;
2175 		ctx.sa.rx_sa = rx_sa;
2176 		ctx.secy = secy;
2177 
2178 		ret = macsec_offload(ops->mdo_del_rxsa, &ctx);
2179 		if (ret)
2180 			goto cleanup;
2181 	}
2182 
2183 	RCU_INIT_POINTER(rx_sc->sa[assoc_num], NULL);
2184 	clear_rx_sa(rx_sa);
2185 
2186 	rtnl_unlock();
2187 
2188 	return 0;
2189 
2190 cleanup:
2191 	rtnl_unlock();
2192 	return ret;
2193 }
2194 
2195 static int macsec_del_rxsc(struct sk_buff *skb, struct genl_info *info)
2196 {
2197 	struct nlattr **attrs = info->attrs;
2198 	struct net_device *dev;
2199 	struct macsec_secy *secy;
2200 	struct macsec_rx_sc *rx_sc;
2201 	sci_t sci;
2202 	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
2203 	int ret;
2204 
2205 	if (!attrs[MACSEC_ATTR_IFINDEX])
2206 		return -EINVAL;
2207 
2208 	if (parse_rxsc_config(attrs, tb_rxsc))
2209 		return -EINVAL;
2210 
2211 	if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI])
2212 		return -EINVAL;
2213 
2214 	rtnl_lock();
2215 	dev = get_dev_from_nl(genl_info_net(info), info->attrs);
2216 	if (IS_ERR(dev)) {
2217 		rtnl_unlock();
2218 		return PTR_ERR(dev);
2219 	}
2220 
2221 	secy = &macsec_priv(dev)->secy;
2222 	sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
2223 
2224 	rx_sc = del_rx_sc(secy, sci);
2225 	if (!rx_sc) {
2226 		rtnl_unlock();
2227 		return -ENODEV;
2228 	}
2229 
2230 	/* If h/w offloading is available, propagate to the device */
2231 	if (macsec_is_offloaded(netdev_priv(dev))) {
2232 		const struct macsec_ops *ops;
2233 		struct macsec_context ctx;
2234 
2235 		ops = macsec_get_ops(netdev_priv(dev), &ctx);
2236 		if (!ops) {
2237 			ret = -EOPNOTSUPP;
2238 			goto cleanup;
2239 		}
2240 
2241 		ctx.rx_sc = rx_sc;
2242 		ctx.secy = secy;
2243 		ret = macsec_offload(ops->mdo_del_rxsc, &ctx);
2244 		if (ret)
2245 			goto cleanup;
2246 	}
2247 
2248 	free_rx_sc(rx_sc);
2249 	rtnl_unlock();
2250 
2251 	return 0;
2252 
2253 cleanup:
2254 	rtnl_unlock();
2255 	return ret;
2256 }
2257 
2258 static int macsec_del_txsa(struct sk_buff *skb, struct genl_info *info)
2259 {
2260 	struct nlattr **attrs = info->attrs;
2261 	struct net_device *dev;
2262 	struct macsec_secy *secy;
2263 	struct macsec_tx_sc *tx_sc;
2264 	struct macsec_tx_sa *tx_sa;
2265 	u8 assoc_num;
2266 	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
2267 	int ret;
2268 
2269 	if (!attrs[MACSEC_ATTR_IFINDEX])
2270 		return -EINVAL;
2271 
2272 	if (parse_sa_config(attrs, tb_sa))
2273 		return -EINVAL;
2274 
2275 	rtnl_lock();
2276 	tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa,
2277 				 &dev, &secy, &tx_sc, &assoc_num);
2278 	if (IS_ERR(tx_sa)) {
2279 		rtnl_unlock();
2280 		return PTR_ERR(tx_sa);
2281 	}
2282 
2283 	if (tx_sa->active) {
2284 		rtnl_unlock();
2285 		return -EBUSY;
2286 	}
2287 
2288 	/* If h/w offloading is available, propagate to the device */
2289 	if (macsec_is_offloaded(netdev_priv(dev))) {
2290 		const struct macsec_ops *ops;
2291 		struct macsec_context ctx;
2292 
2293 		ops = macsec_get_ops(netdev_priv(dev), &ctx);
2294 		if (!ops) {
2295 			ret = -EOPNOTSUPP;
2296 			goto cleanup;
2297 		}
2298 
2299 		ctx.sa.assoc_num = assoc_num;
2300 		ctx.sa.tx_sa = tx_sa;
2301 		ctx.secy = secy;
2302 
2303 		ret = macsec_offload(ops->mdo_del_txsa, &ctx);
2304 		if (ret)
2305 			goto cleanup;
2306 	}
2307 
2308 	RCU_INIT_POINTER(tx_sc->sa[assoc_num], NULL);
2309 	clear_tx_sa(tx_sa);
2310 
2311 	rtnl_unlock();
2312 
2313 	return 0;
2314 
2315 cleanup:
2316 	rtnl_unlock();
2317 	return ret;
2318 }
2319 
2320 static bool validate_upd_sa(struct nlattr **attrs)
2321 {
2322 	if (!attrs[MACSEC_SA_ATTR_AN] ||
2323 	    attrs[MACSEC_SA_ATTR_KEY] ||
2324 	    attrs[MACSEC_SA_ATTR_KEYID] ||
2325 	    attrs[MACSEC_SA_ATTR_SSCI] ||
2326 	    attrs[MACSEC_SA_ATTR_SALT])
2327 		return false;
2328 
2329 	if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
2330 		return false;
2331 
2332 	if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0)
2333 		return false;
2334 
2335 	if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
2336 		if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
2337 			return false;
2338 	}
2339 
2340 	return true;
2341 }
2342 
2343 static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info)
2344 {
2345 	struct nlattr **attrs = info->attrs;
2346 	struct net_device *dev;
2347 	struct macsec_secy *secy;
2348 	struct macsec_tx_sc *tx_sc;
2349 	struct macsec_tx_sa *tx_sa;
2350 	u8 assoc_num;
2351 	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
2352 	bool was_operational, was_active;
2353 	pn_t prev_pn;
2354 	int ret = 0;
2355 
2356 	prev_pn.full64 = 0;
2357 
2358 	if (!attrs[MACSEC_ATTR_IFINDEX])
2359 		return -EINVAL;
2360 
2361 	if (parse_sa_config(attrs, tb_sa))
2362 		return -EINVAL;
2363 
2364 	if (!validate_upd_sa(tb_sa))
2365 		return -EINVAL;
2366 
2367 	rtnl_lock();
2368 	tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa,
2369 				 &dev, &secy, &tx_sc, &assoc_num);
2370 	if (IS_ERR(tx_sa)) {
2371 		rtnl_unlock();
2372 		return PTR_ERR(tx_sa);
2373 	}
2374 
2375 	if (tb_sa[MACSEC_SA_ATTR_PN]) {
2376 		int pn_len;
2377 
2378 		pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
2379 		if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
2380 			pr_notice("macsec: nl: upd_txsa: bad pn length: %d != %d\n",
2381 				  nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
2382 			rtnl_unlock();
2383 			return -EINVAL;
2384 		}
2385 
2386 		spin_lock_bh(&tx_sa->lock);
2387 		prev_pn = tx_sa->next_pn_halves;
2388 		tx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
2389 		spin_unlock_bh(&tx_sa->lock);
2390 	}
2391 
2392 	was_active = tx_sa->active;
2393 	if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
2394 		tx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
2395 
2396 	was_operational = secy->operational;
2397 	if (assoc_num == tx_sc->encoding_sa)
2398 		secy->operational = tx_sa->active;
2399 
2400 	/* If h/w offloading is available, propagate to the device */
2401 	if (macsec_is_offloaded(netdev_priv(dev))) {
2402 		const struct macsec_ops *ops;
2403 		struct macsec_context ctx;
2404 
2405 		ops = macsec_get_ops(netdev_priv(dev), &ctx);
2406 		if (!ops) {
2407 			ret = -EOPNOTSUPP;
2408 			goto cleanup;
2409 		}
2410 
2411 		ctx.sa.assoc_num = assoc_num;
2412 		ctx.sa.tx_sa = tx_sa;
2413 		ctx.secy = secy;
2414 
2415 		ret = macsec_offload(ops->mdo_upd_txsa, &ctx);
2416 		if (ret)
2417 			goto cleanup;
2418 	}
2419 
2420 	rtnl_unlock();
2421 
2422 	return 0;
2423 
2424 cleanup:
2425 	if (tb_sa[MACSEC_SA_ATTR_PN]) {
2426 		spin_lock_bh(&tx_sa->lock);
2427 		tx_sa->next_pn_halves = prev_pn;
2428 		spin_unlock_bh(&tx_sa->lock);
2429 	}
2430 	tx_sa->active = was_active;
2431 	secy->operational = was_operational;
2432 	rtnl_unlock();
2433 	return ret;
2434 }
2435 
2436 static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
2437 {
2438 	struct nlattr **attrs = info->attrs;
2439 	struct net_device *dev;
2440 	struct macsec_secy *secy;
2441 	struct macsec_rx_sc *rx_sc;
2442 	struct macsec_rx_sa *rx_sa;
2443 	u8 assoc_num;
2444 	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
2445 	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
2446 	bool was_active;
2447 	pn_t prev_pn;
2448 	int ret = 0;
2449 
2450 	prev_pn.full64 = 0;
2451 
2452 	if (!attrs[MACSEC_ATTR_IFINDEX])
2453 		return -EINVAL;
2454 
2455 	if (parse_rxsc_config(attrs, tb_rxsc))
2456 		return -EINVAL;
2457 
2458 	if (parse_sa_config(attrs, tb_sa))
2459 		return -EINVAL;
2460 
2461 	if (!validate_upd_sa(tb_sa))
2462 		return -EINVAL;
2463 
2464 	rtnl_lock();
2465 	rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa,
2466 				 &dev, &secy, &rx_sc, &assoc_num);
2467 	if (IS_ERR(rx_sa)) {
2468 		rtnl_unlock();
2469 		return PTR_ERR(rx_sa);
2470 	}
2471 
2472 	if (tb_sa[MACSEC_SA_ATTR_PN]) {
2473 		int pn_len;
2474 
2475 		pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
2476 		if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
2477 			pr_notice("macsec: nl: upd_rxsa: bad pn length: %d != %d\n",
2478 				  nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
2479 			rtnl_unlock();
2480 			return -EINVAL;
2481 		}
2482 
2483 		spin_lock_bh(&rx_sa->lock);
2484 		prev_pn = rx_sa->next_pn_halves;
2485 		rx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
2486 		spin_unlock_bh(&rx_sa->lock);
2487 	}
2488 
2489 	was_active = rx_sa->active;
2490 	if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
2491 		rx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
2492 
2493 	/* If h/w offloading is available, propagate to the device */
2494 	if (macsec_is_offloaded(netdev_priv(dev))) {
2495 		const struct macsec_ops *ops;
2496 		struct macsec_context ctx;
2497 
2498 		ops = macsec_get_ops(netdev_priv(dev), &ctx);
2499 		if (!ops) {
2500 			ret = -EOPNOTSUPP;
2501 			goto cleanup;
2502 		}
2503 
2504 		ctx.sa.assoc_num = assoc_num;
2505 		ctx.sa.rx_sa = rx_sa;
2506 		ctx.secy = secy;
2507 
2508 		ret = macsec_offload(ops->mdo_upd_rxsa, &ctx);
2509 		if (ret)
2510 			goto cleanup;
2511 	}
2512 
2513 	rtnl_unlock();
2514 	return 0;
2515 
2516 cleanup:
2517 	if (tb_sa[MACSEC_SA_ATTR_PN]) {
2518 		spin_lock_bh(&rx_sa->lock);
2519 		rx_sa->next_pn_halves = prev_pn;
2520 		spin_unlock_bh(&rx_sa->lock);
2521 	}
2522 	rx_sa->active = was_active;
2523 	rtnl_unlock();
2524 	return ret;
2525 }
2526 
2527 static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info)
2528 {
2529 	struct nlattr **attrs = info->attrs;
2530 	struct net_device *dev;
2531 	struct macsec_secy *secy;
2532 	struct macsec_rx_sc *rx_sc;
2533 	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
2534 	unsigned int prev_n_rx_sc;
2535 	bool was_active;
2536 	int ret;
2537 
2538 	if (!attrs[MACSEC_ATTR_IFINDEX])
2539 		return -EINVAL;
2540 
2541 	if (parse_rxsc_config(attrs, tb_rxsc))
2542 		return -EINVAL;
2543 
2544 	if (!validate_add_rxsc(tb_rxsc))
2545 		return -EINVAL;
2546 
2547 	rtnl_lock();
2548 	rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy);
2549 	if (IS_ERR(rx_sc)) {
2550 		rtnl_unlock();
2551 		return PTR_ERR(rx_sc);
2552 	}
2553 
2554 	was_active = rx_sc->active;
2555 	prev_n_rx_sc = secy->n_rx_sc;
2556 	if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) {
2557 		bool new = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]);
2558 
2559 		if (rx_sc->active != new)
2560 			secy->n_rx_sc += new ? 1 : -1;
2561 
2562 		rx_sc->active = new;
2563 	}
2564 
2565 	/* If h/w offloading is available, propagate to the device */
2566 	if (macsec_is_offloaded(netdev_priv(dev))) {
2567 		const struct macsec_ops *ops;
2568 		struct macsec_context ctx;
2569 
2570 		ops = macsec_get_ops(netdev_priv(dev), &ctx);
2571 		if (!ops) {
2572 			ret = -EOPNOTSUPP;
2573 			goto cleanup;
2574 		}
2575 
2576 		ctx.rx_sc = rx_sc;
2577 		ctx.secy = secy;
2578 
2579 		ret = macsec_offload(ops->mdo_upd_rxsc, &ctx);
2580 		if (ret)
2581 			goto cleanup;
2582 	}
2583 
2584 	rtnl_unlock();
2585 
2586 	return 0;
2587 
2588 cleanup:
2589 	secy->n_rx_sc = prev_n_rx_sc;
2590 	rx_sc->active = was_active;
2591 	rtnl_unlock();
2592 	return ret;
2593 }
2594 
2595 static bool macsec_is_configured(struct macsec_dev *macsec)
2596 {
2597 	struct macsec_secy *secy = &macsec->secy;
2598 	struct macsec_tx_sc *tx_sc = &secy->tx_sc;
2599 	int i;
2600 
2601 	if (secy->n_rx_sc > 0)
2602 		return true;
2603 
2604 	for (i = 0; i < MACSEC_NUM_AN; i++)
2605 		if (tx_sc->sa[i])
2606 			return true;
2607 
2608 	return false;
2609 }
2610 
2611 static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info)
2612 {
2613 	struct nlattr *tb_offload[MACSEC_OFFLOAD_ATTR_MAX + 1];
2614 	enum macsec_offload offload, prev_offload;
2615 	int (*func)(struct macsec_context *ctx);
2616 	struct nlattr **attrs = info->attrs;
2617 	struct net_device *dev;
2618 	const struct macsec_ops *ops;
2619 	struct macsec_context ctx;
2620 	struct macsec_dev *macsec;
2621 	int ret;
2622 
2623 	if (!attrs[MACSEC_ATTR_IFINDEX])
2624 		return -EINVAL;
2625 
2626 	if (!attrs[MACSEC_ATTR_OFFLOAD])
2627 		return -EINVAL;
2628 
2629 	if (nla_parse_nested_deprecated(tb_offload, MACSEC_OFFLOAD_ATTR_MAX,
2630 					attrs[MACSEC_ATTR_OFFLOAD],
2631 					macsec_genl_offload_policy, NULL))
2632 		return -EINVAL;
2633 
2634 	dev = get_dev_from_nl(genl_info_net(info), attrs);
2635 	if (IS_ERR(dev))
2636 		return PTR_ERR(dev);
2637 	macsec = macsec_priv(dev);
2638 
2639 	if (!tb_offload[MACSEC_OFFLOAD_ATTR_TYPE])
2640 		return -EINVAL;
2641 
2642 	offload = nla_get_u8(tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]);
2643 	if (macsec->offload == offload)
2644 		return 0;
2645 
2646 	/* Check if the offloading mode is supported by the underlying layers */
2647 	if (offload != MACSEC_OFFLOAD_OFF &&
2648 	    !macsec_check_offload(offload, macsec))
2649 		return -EOPNOTSUPP;
2650 
2651 	/* Check if the net device is busy. */
2652 	if (netif_running(dev))
2653 		return -EBUSY;
2654 
2655 	rtnl_lock();
2656 
2657 	prev_offload = macsec->offload;
2658 	macsec->offload = offload;
2659 
2660 	/* Check if the device already has rules configured: we do not support
2661 	 * rules migration.
2662 	 */
2663 	if (macsec_is_configured(macsec)) {
2664 		ret = -EBUSY;
2665 		goto rollback;
2666 	}
2667 
2668 	ops = __macsec_get_ops(offload == MACSEC_OFFLOAD_OFF ? prev_offload : offload,
2669 			       macsec, &ctx);
2670 	if (!ops) {
2671 		ret = -EOPNOTSUPP;
2672 		goto rollback;
2673 	}
2674 
2675 	if (prev_offload == MACSEC_OFFLOAD_OFF)
2676 		func = ops->mdo_add_secy;
2677 	else
2678 		func = ops->mdo_del_secy;
2679 
2680 	ctx.secy = &macsec->secy;
2681 	ret = macsec_offload(func, &ctx);
2682 	if (ret)
2683 		goto rollback;
2684 
2685 	/* Force features update, since they are different for SW MACSec and
2686 	 * HW offloading cases.
2687 	 */
2688 	netdev_update_features(dev);
2689 
2690 	rtnl_unlock();
2691 	return 0;
2692 
2693 rollback:
2694 	macsec->offload = prev_offload;
2695 
2696 	rtnl_unlock();
2697 	return ret;
2698 }
2699 
2700 static void get_tx_sa_stats(struct net_device *dev, int an,
2701 			    struct macsec_tx_sa *tx_sa,
2702 			    struct macsec_tx_sa_stats *sum)
2703 {
2704 	struct macsec_dev *macsec = macsec_priv(dev);
2705 	int cpu;
2706 
2707 	/* If h/w offloading is available, propagate to the device */
2708 	if (macsec_is_offloaded(macsec)) {
2709 		const struct macsec_ops *ops;
2710 		struct macsec_context ctx;
2711 
2712 		ops = macsec_get_ops(macsec, &ctx);
2713 		if (ops) {
2714 			ctx.sa.assoc_num = an;
2715 			ctx.sa.tx_sa = tx_sa;
2716 			ctx.stats.tx_sa_stats = sum;
2717 			ctx.secy = &macsec_priv(dev)->secy;
2718 			macsec_offload(ops->mdo_get_tx_sa_stats, &ctx);
2719 		}
2720 		return;
2721 	}
2722 
2723 	for_each_possible_cpu(cpu) {
2724 		const struct macsec_tx_sa_stats *stats =
2725 			per_cpu_ptr(tx_sa->stats, cpu);
2726 
2727 		sum->OutPktsProtected += stats->OutPktsProtected;
2728 		sum->OutPktsEncrypted += stats->OutPktsEncrypted;
2729 	}
2730 }
2731 
2732 static int copy_tx_sa_stats(struct sk_buff *skb, struct macsec_tx_sa_stats *sum)
2733 {
2734 	if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED,
2735 			sum->OutPktsProtected) ||
2736 	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED,
2737 			sum->OutPktsEncrypted))
2738 		return -EMSGSIZE;
2739 
2740 	return 0;
2741 }
2742 
2743 static void get_rx_sa_stats(struct net_device *dev,
2744 			    struct macsec_rx_sc *rx_sc, int an,
2745 			    struct macsec_rx_sa *rx_sa,
2746 			    struct macsec_rx_sa_stats *sum)
2747 {
2748 	struct macsec_dev *macsec = macsec_priv(dev);
2749 	int cpu;
2750 
2751 	/* If h/w offloading is available, propagate to the device */
2752 	if (macsec_is_offloaded(macsec)) {
2753 		const struct macsec_ops *ops;
2754 		struct macsec_context ctx;
2755 
2756 		ops = macsec_get_ops(macsec, &ctx);
2757 		if (ops) {
2758 			ctx.sa.assoc_num = an;
2759 			ctx.sa.rx_sa = rx_sa;
2760 			ctx.stats.rx_sa_stats = sum;
2761 			ctx.secy = &macsec_priv(dev)->secy;
2762 			ctx.rx_sc = rx_sc;
2763 			macsec_offload(ops->mdo_get_rx_sa_stats, &ctx);
2764 		}
2765 		return;
2766 	}
2767 
2768 	for_each_possible_cpu(cpu) {
2769 		const struct macsec_rx_sa_stats *stats =
2770 			per_cpu_ptr(rx_sa->stats, cpu);
2771 
2772 		sum->InPktsOK         += stats->InPktsOK;
2773 		sum->InPktsInvalid    += stats->InPktsInvalid;
2774 		sum->InPktsNotValid   += stats->InPktsNotValid;
2775 		sum->InPktsNotUsingSA += stats->InPktsNotUsingSA;
2776 		sum->InPktsUnusedSA   += stats->InPktsUnusedSA;
2777 	}
2778 }
2779 
2780 static int copy_rx_sa_stats(struct sk_buff *skb,
2781 			    struct macsec_rx_sa_stats *sum)
2782 {
2783 	if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_OK, sum->InPktsOK) ||
2784 	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID,
2785 			sum->InPktsInvalid) ||
2786 	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID,
2787 			sum->InPktsNotValid) ||
2788 	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA,
2789 			sum->InPktsNotUsingSA) ||
2790 	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA,
2791 			sum->InPktsUnusedSA))
2792 		return -EMSGSIZE;
2793 
2794 	return 0;
2795 }
2796 
2797 static void get_rx_sc_stats(struct net_device *dev,
2798 			    struct macsec_rx_sc *rx_sc,
2799 			    struct macsec_rx_sc_stats *sum)
2800 {
2801 	struct macsec_dev *macsec = macsec_priv(dev);
2802 	int cpu;
2803 
2804 	/* If h/w offloading is available, propagate to the device */
2805 	if (macsec_is_offloaded(macsec)) {
2806 		const struct macsec_ops *ops;
2807 		struct macsec_context ctx;
2808 
2809 		ops = macsec_get_ops(macsec, &ctx);
2810 		if (ops) {
2811 			ctx.stats.rx_sc_stats = sum;
2812 			ctx.secy = &macsec_priv(dev)->secy;
2813 			ctx.rx_sc = rx_sc;
2814 			macsec_offload(ops->mdo_get_rx_sc_stats, &ctx);
2815 		}
2816 		return;
2817 	}
2818 
2819 	for_each_possible_cpu(cpu) {
2820 		const struct pcpu_rx_sc_stats *stats;
2821 		struct macsec_rx_sc_stats tmp;
2822 		unsigned int start;
2823 
2824 		stats = per_cpu_ptr(rx_sc->stats, cpu);
2825 		do {
2826 			start = u64_stats_fetch_begin_irq(&stats->syncp);
2827 			memcpy(&tmp, &stats->stats, sizeof(tmp));
2828 		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
2829 
2830 		sum->InOctetsValidated += tmp.InOctetsValidated;
2831 		sum->InOctetsDecrypted += tmp.InOctetsDecrypted;
2832 		sum->InPktsUnchecked   += tmp.InPktsUnchecked;
2833 		sum->InPktsDelayed     += tmp.InPktsDelayed;
2834 		sum->InPktsOK          += tmp.InPktsOK;
2835 		sum->InPktsInvalid     += tmp.InPktsInvalid;
2836 		sum->InPktsLate        += tmp.InPktsLate;
2837 		sum->InPktsNotValid    += tmp.InPktsNotValid;
2838 		sum->InPktsNotUsingSA  += tmp.InPktsNotUsingSA;
2839 		sum->InPktsUnusedSA    += tmp.InPktsUnusedSA;
2840 	}
2841 }
2842 
2843 static int copy_rx_sc_stats(struct sk_buff *skb, struct macsec_rx_sc_stats *sum)
2844 {
2845 	if (nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED,
2846 			      sum->InOctetsValidated,
2847 			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2848 	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED,
2849 			      sum->InOctetsDecrypted,
2850 			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2851 	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED,
2852 			      sum->InPktsUnchecked,
2853 			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2854 	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED,
2855 			      sum->InPktsDelayed,
2856 			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2857 	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK,
2858 			      sum->InPktsOK,
2859 			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2860 	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID,
2861 			      sum->InPktsInvalid,
2862 			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2863 	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE,
2864 			      sum->InPktsLate,
2865 			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2866 	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID,
2867 			      sum->InPktsNotValid,
2868 			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2869 	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA,
2870 			      sum->InPktsNotUsingSA,
2871 			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2872 	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA,
2873 			      sum->InPktsUnusedSA,
2874 			      MACSEC_RXSC_STATS_ATTR_PAD))
2875 		return -EMSGSIZE;
2876 
2877 	return 0;
2878 }
2879 
2880 static void get_tx_sc_stats(struct net_device *dev,
2881 			    struct macsec_tx_sc_stats *sum)
2882 {
2883 	struct macsec_dev *macsec = macsec_priv(dev);
2884 	int cpu;
2885 
2886 	/* If h/w offloading is available, propagate to the device */
2887 	if (macsec_is_offloaded(macsec)) {
2888 		const struct macsec_ops *ops;
2889 		struct macsec_context ctx;
2890 
2891 		ops = macsec_get_ops(macsec, &ctx);
2892 		if (ops) {
2893 			ctx.stats.tx_sc_stats = sum;
2894 			ctx.secy = &macsec_priv(dev)->secy;
2895 			macsec_offload(ops->mdo_get_tx_sc_stats, &ctx);
2896 		}
2897 		return;
2898 	}
2899 
2900 	for_each_possible_cpu(cpu) {
2901 		const struct pcpu_tx_sc_stats *stats;
2902 		struct macsec_tx_sc_stats tmp;
2903 		unsigned int start;
2904 
2905 		stats = per_cpu_ptr(macsec_priv(dev)->secy.tx_sc.stats, cpu);
2906 		do {
2907 			start = u64_stats_fetch_begin_irq(&stats->syncp);
2908 			memcpy(&tmp, &stats->stats, sizeof(tmp));
2909 		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
2910 
2911 		sum->OutPktsProtected   += tmp.OutPktsProtected;
2912 		sum->OutPktsEncrypted   += tmp.OutPktsEncrypted;
2913 		sum->OutOctetsProtected += tmp.OutOctetsProtected;
2914 		sum->OutOctetsEncrypted += tmp.OutOctetsEncrypted;
2915 	}
2916 }
2917 
2918 static int copy_tx_sc_stats(struct sk_buff *skb, struct macsec_tx_sc_stats *sum)
2919 {
2920 	if (nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED,
2921 			      sum->OutPktsProtected,
2922 			      MACSEC_TXSC_STATS_ATTR_PAD) ||
2923 	    nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED,
2924 			      sum->OutPktsEncrypted,
2925 			      MACSEC_TXSC_STATS_ATTR_PAD) ||
2926 	    nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED,
2927 			      sum->OutOctetsProtected,
2928 			      MACSEC_TXSC_STATS_ATTR_PAD) ||
2929 	    nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED,
2930 			      sum->OutOctetsEncrypted,
2931 			      MACSEC_TXSC_STATS_ATTR_PAD))
2932 		return -EMSGSIZE;
2933 
2934 	return 0;
2935 }
2936 
2937 static void get_secy_stats(struct net_device *dev, struct macsec_dev_stats *sum)
2938 {
2939 	struct macsec_dev *macsec = macsec_priv(dev);
2940 	int cpu;
2941 
2942 	/* If h/w offloading is available, propagate to the device */
2943 	if (macsec_is_offloaded(macsec)) {
2944 		const struct macsec_ops *ops;
2945 		struct macsec_context ctx;
2946 
2947 		ops = macsec_get_ops(macsec, &ctx);
2948 		if (ops) {
2949 			ctx.stats.dev_stats = sum;
2950 			ctx.secy = &macsec_priv(dev)->secy;
2951 			macsec_offload(ops->mdo_get_dev_stats, &ctx);
2952 		}
2953 		return;
2954 	}
2955 
2956 	for_each_possible_cpu(cpu) {
2957 		const struct pcpu_secy_stats *stats;
2958 		struct macsec_dev_stats tmp;
2959 		unsigned int start;
2960 
2961 		stats = per_cpu_ptr(macsec_priv(dev)->stats, cpu);
2962 		do {
2963 			start = u64_stats_fetch_begin_irq(&stats->syncp);
2964 			memcpy(&tmp, &stats->stats, sizeof(tmp));
2965 		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
2966 
2967 		sum->OutPktsUntagged  += tmp.OutPktsUntagged;
2968 		sum->InPktsUntagged   += tmp.InPktsUntagged;
2969 		sum->OutPktsTooLong   += tmp.OutPktsTooLong;
2970 		sum->InPktsNoTag      += tmp.InPktsNoTag;
2971 		sum->InPktsBadTag     += tmp.InPktsBadTag;
2972 		sum->InPktsUnknownSCI += tmp.InPktsUnknownSCI;
2973 		sum->InPktsNoSCI      += tmp.InPktsNoSCI;
2974 		sum->InPktsOverrun    += tmp.InPktsOverrun;
2975 	}
2976 }
2977 
2978 static int copy_secy_stats(struct sk_buff *skb, struct macsec_dev_stats *sum)
2979 {
2980 	if (nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED,
2981 			      sum->OutPktsUntagged,
2982 			      MACSEC_SECY_STATS_ATTR_PAD) ||
2983 	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED,
2984 			      sum->InPktsUntagged,
2985 			      MACSEC_SECY_STATS_ATTR_PAD) ||
2986 	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG,
2987 			      sum->OutPktsTooLong,
2988 			      MACSEC_SECY_STATS_ATTR_PAD) ||
2989 	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG,
2990 			      sum->InPktsNoTag,
2991 			      MACSEC_SECY_STATS_ATTR_PAD) ||
2992 	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG,
2993 			      sum->InPktsBadTag,
2994 			      MACSEC_SECY_STATS_ATTR_PAD) ||
2995 	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI,
2996 			      sum->InPktsUnknownSCI,
2997 			      MACSEC_SECY_STATS_ATTR_PAD) ||
2998 	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI,
2999 			      sum->InPktsNoSCI,
3000 			      MACSEC_SECY_STATS_ATTR_PAD) ||
3001 	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN,
3002 			      sum->InPktsOverrun,
3003 			      MACSEC_SECY_STATS_ATTR_PAD))
3004 		return -EMSGSIZE;
3005 
3006 	return 0;
3007 }
3008 
3009 static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb)
3010 {
3011 	struct macsec_tx_sc *tx_sc = &secy->tx_sc;
3012 	struct nlattr *secy_nest = nla_nest_start_noflag(skb,
3013 							 MACSEC_ATTR_SECY);
3014 	u64 csid;
3015 
3016 	if (!secy_nest)
3017 		return 1;
3018 
3019 	switch (secy->key_len) {
3020 	case MACSEC_GCM_AES_128_SAK_LEN:
3021 		csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 : MACSEC_DEFAULT_CIPHER_ID;
3022 		break;
3023 	case MACSEC_GCM_AES_256_SAK_LEN:
3024 		csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_256 : MACSEC_CIPHER_ID_GCM_AES_256;
3025 		break;
3026 	default:
3027 		goto cancel;
3028 	}
3029 
3030 	if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci,
3031 			MACSEC_SECY_ATTR_PAD) ||
3032 	    nla_put_u64_64bit(skb, MACSEC_SECY_ATTR_CIPHER_SUITE,
3033 			      csid, MACSEC_SECY_ATTR_PAD) ||
3034 	    nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) ||
3035 	    nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) ||
3036 	    nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) ||
3037 	    nla_put_u8(skb, MACSEC_SECY_ATTR_REPLAY, secy->replay_protect) ||
3038 	    nla_put_u8(skb, MACSEC_SECY_ATTR_VALIDATE, secy->validate_frames) ||
3039 	    nla_put_u8(skb, MACSEC_SECY_ATTR_ENCRYPT, tx_sc->encrypt) ||
3040 	    nla_put_u8(skb, MACSEC_SECY_ATTR_INC_SCI, tx_sc->send_sci) ||
3041 	    nla_put_u8(skb, MACSEC_SECY_ATTR_ES, tx_sc->end_station) ||
3042 	    nla_put_u8(skb, MACSEC_SECY_ATTR_SCB, tx_sc->scb) ||
3043 	    nla_put_u8(skb, MACSEC_SECY_ATTR_ENCODING_SA, tx_sc->encoding_sa))
3044 		goto cancel;
3045 
3046 	if (secy->replay_protect) {
3047 		if (nla_put_u32(skb, MACSEC_SECY_ATTR_WINDOW, secy->replay_window))
3048 			goto cancel;
3049 	}
3050 
3051 	nla_nest_end(skb, secy_nest);
3052 	return 0;
3053 
3054 cancel:
3055 	nla_nest_cancel(skb, secy_nest);
3056 	return 1;
3057 }
3058 
3059 static noinline_for_stack int
3060 dump_secy(struct macsec_secy *secy, struct net_device *dev,
3061 	  struct sk_buff *skb, struct netlink_callback *cb)
3062 {
3063 	struct macsec_tx_sc_stats tx_sc_stats = {0, };
3064 	struct macsec_tx_sa_stats tx_sa_stats = {0, };
3065 	struct macsec_rx_sc_stats rx_sc_stats = {0, };
3066 	struct macsec_rx_sa_stats rx_sa_stats = {0, };
3067 	struct macsec_dev *macsec = netdev_priv(dev);
3068 	struct macsec_dev_stats dev_stats = {0, };
3069 	struct macsec_tx_sc *tx_sc = &secy->tx_sc;
3070 	struct nlattr *txsa_list, *rxsc_list;
3071 	struct macsec_rx_sc *rx_sc;
3072 	struct nlattr *attr;
3073 	void *hdr;
3074 	int i, j;
3075 
3076 	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
3077 			  &macsec_fam, NLM_F_MULTI, MACSEC_CMD_GET_TXSC);
3078 	if (!hdr)
3079 		return -EMSGSIZE;
3080 
3081 	genl_dump_check_consistent(cb, hdr);
3082 
3083 	if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex))
3084 		goto nla_put_failure;
3085 
3086 	attr = nla_nest_start_noflag(skb, MACSEC_ATTR_OFFLOAD);
3087 	if (!attr)
3088 		goto nla_put_failure;
3089 	if (nla_put_u8(skb, MACSEC_OFFLOAD_ATTR_TYPE, macsec->offload))
3090 		goto nla_put_failure;
3091 	nla_nest_end(skb, attr);
3092 
3093 	if (nla_put_secy(secy, skb))
3094 		goto nla_put_failure;
3095 
3096 	attr = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSC_STATS);
3097 	if (!attr)
3098 		goto nla_put_failure;
3099 
3100 	get_tx_sc_stats(dev, &tx_sc_stats);
3101 	if (copy_tx_sc_stats(skb, &tx_sc_stats)) {
3102 		nla_nest_cancel(skb, attr);
3103 		goto nla_put_failure;
3104 	}
3105 	nla_nest_end(skb, attr);
3106 
3107 	attr = nla_nest_start_noflag(skb, MACSEC_ATTR_SECY_STATS);
3108 	if (!attr)
3109 		goto nla_put_failure;
3110 	get_secy_stats(dev, &dev_stats);
3111 	if (copy_secy_stats(skb, &dev_stats)) {
3112 		nla_nest_cancel(skb, attr);
3113 		goto nla_put_failure;
3114 	}
3115 	nla_nest_end(skb, attr);
3116 
3117 	txsa_list = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSA_LIST);
3118 	if (!txsa_list)
3119 		goto nla_put_failure;
3120 	for (i = 0, j = 1; i < MACSEC_NUM_AN; i++) {
3121 		struct macsec_tx_sa *tx_sa = rtnl_dereference(tx_sc->sa[i]);
3122 		struct nlattr *txsa_nest;
3123 		u64 pn;
3124 		int pn_len;
3125 
3126 		if (!tx_sa)
3127 			continue;
3128 
3129 		txsa_nest = nla_nest_start_noflag(skb, j++);
3130 		if (!txsa_nest) {
3131 			nla_nest_cancel(skb, txsa_list);
3132 			goto nla_put_failure;
3133 		}
3134 
3135 		attr = nla_nest_start_noflag(skb, MACSEC_SA_ATTR_STATS);
3136 		if (!attr) {
3137 			nla_nest_cancel(skb, txsa_nest);
3138 			nla_nest_cancel(skb, txsa_list);
3139 			goto nla_put_failure;
3140 		}
3141 		memset(&tx_sa_stats, 0, sizeof(tx_sa_stats));
3142 		get_tx_sa_stats(dev, i, tx_sa, &tx_sa_stats);
3143 		if (copy_tx_sa_stats(skb, &tx_sa_stats)) {
3144 			nla_nest_cancel(skb, attr);
3145 			nla_nest_cancel(skb, txsa_nest);
3146 			nla_nest_cancel(skb, txsa_list);
3147 			goto nla_put_failure;
3148 		}
3149 		nla_nest_end(skb, attr);
3150 
3151 		if (secy->xpn) {
3152 			pn = tx_sa->next_pn;
3153 			pn_len = MACSEC_XPN_PN_LEN;
3154 		} else {
3155 			pn = tx_sa->next_pn_halves.lower;
3156 			pn_len = MACSEC_DEFAULT_PN_LEN;
3157 		}
3158 
3159 		if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
3160 		    nla_put(skb, MACSEC_SA_ATTR_PN, pn_len, &pn) ||
3161 		    nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, tx_sa->key.id) ||
3162 		    (secy->xpn && nla_put_ssci(skb, MACSEC_SA_ATTR_SSCI, tx_sa->ssci)) ||
3163 		    nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, tx_sa->active)) {
3164 			nla_nest_cancel(skb, txsa_nest);
3165 			nla_nest_cancel(skb, txsa_list);
3166 			goto nla_put_failure;
3167 		}
3168 
3169 		nla_nest_end(skb, txsa_nest);
3170 	}
3171 	nla_nest_end(skb, txsa_list);
3172 
3173 	rxsc_list = nla_nest_start_noflag(skb, MACSEC_ATTR_RXSC_LIST);
3174 	if (!rxsc_list)
3175 		goto nla_put_failure;
3176 
3177 	j = 1;
3178 	for_each_rxsc_rtnl(secy, rx_sc) {
3179 		int k;
3180 		struct nlattr *rxsa_list;
3181 		struct nlattr *rxsc_nest = nla_nest_start_noflag(skb, j++);
3182 
3183 		if (!rxsc_nest) {
3184 			nla_nest_cancel(skb, rxsc_list);
3185 			goto nla_put_failure;
3186 		}
3187 
3188 		if (nla_put_u8(skb, MACSEC_RXSC_ATTR_ACTIVE, rx_sc->active) ||
3189 		    nla_put_sci(skb, MACSEC_RXSC_ATTR_SCI, rx_sc->sci,
3190 				MACSEC_RXSC_ATTR_PAD)) {
3191 			nla_nest_cancel(skb, rxsc_nest);
3192 			nla_nest_cancel(skb, rxsc_list);
3193 			goto nla_put_failure;
3194 		}
3195 
3196 		attr = nla_nest_start_noflag(skb, MACSEC_RXSC_ATTR_STATS);
3197 		if (!attr) {
3198 			nla_nest_cancel(skb, rxsc_nest);
3199 			nla_nest_cancel(skb, rxsc_list);
3200 			goto nla_put_failure;
3201 		}
3202 		memset(&rx_sc_stats, 0, sizeof(rx_sc_stats));
3203 		get_rx_sc_stats(dev, rx_sc, &rx_sc_stats);
3204 		if (copy_rx_sc_stats(skb, &rx_sc_stats)) {
3205 			nla_nest_cancel(skb, attr);
3206 			nla_nest_cancel(skb, rxsc_nest);
3207 			nla_nest_cancel(skb, rxsc_list);
3208 			goto nla_put_failure;
3209 		}
3210 		nla_nest_end(skb, attr);
3211 
3212 		rxsa_list = nla_nest_start_noflag(skb,
3213 						  MACSEC_RXSC_ATTR_SA_LIST);
3214 		if (!rxsa_list) {
3215 			nla_nest_cancel(skb, rxsc_nest);
3216 			nla_nest_cancel(skb, rxsc_list);
3217 			goto nla_put_failure;
3218 		}
3219 
3220 		for (i = 0, k = 1; i < MACSEC_NUM_AN; i++) {
3221 			struct macsec_rx_sa *rx_sa = rtnl_dereference(rx_sc->sa[i]);
3222 			struct nlattr *rxsa_nest;
3223 			u64 pn;
3224 			int pn_len;
3225 
3226 			if (!rx_sa)
3227 				continue;
3228 
3229 			rxsa_nest = nla_nest_start_noflag(skb, k++);
3230 			if (!rxsa_nest) {
3231 				nla_nest_cancel(skb, rxsa_list);
3232 				nla_nest_cancel(skb, rxsc_nest);
3233 				nla_nest_cancel(skb, rxsc_list);
3234 				goto nla_put_failure;
3235 			}
3236 
3237 			attr = nla_nest_start_noflag(skb,
3238 						     MACSEC_SA_ATTR_STATS);
3239 			if (!attr) {
3240 				nla_nest_cancel(skb, rxsa_list);
3241 				nla_nest_cancel(skb, rxsc_nest);
3242 				nla_nest_cancel(skb, rxsc_list);
3243 				goto nla_put_failure;
3244 			}
3245 			memset(&rx_sa_stats, 0, sizeof(rx_sa_stats));
3246 			get_rx_sa_stats(dev, rx_sc, i, rx_sa, &rx_sa_stats);
3247 			if (copy_rx_sa_stats(skb, &rx_sa_stats)) {
3248 				nla_nest_cancel(skb, attr);
3249 				nla_nest_cancel(skb, rxsa_list);
3250 				nla_nest_cancel(skb, rxsc_nest);
3251 				nla_nest_cancel(skb, rxsc_list);
3252 				goto nla_put_failure;
3253 			}
3254 			nla_nest_end(skb, attr);
3255 
3256 			if (secy->xpn) {
3257 				pn = rx_sa->next_pn;
3258 				pn_len = MACSEC_XPN_PN_LEN;
3259 			} else {
3260 				pn = rx_sa->next_pn_halves.lower;
3261 				pn_len = MACSEC_DEFAULT_PN_LEN;
3262 			}
3263 
3264 			if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
3265 			    nla_put(skb, MACSEC_SA_ATTR_PN, pn_len, &pn) ||
3266 			    nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, rx_sa->key.id) ||
3267 			    (secy->xpn && nla_put_ssci(skb, MACSEC_SA_ATTR_SSCI, rx_sa->ssci)) ||
3268 			    nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, rx_sa->active)) {
3269 				nla_nest_cancel(skb, rxsa_nest);
3270 				nla_nest_cancel(skb, rxsc_nest);
3271 				nla_nest_cancel(skb, rxsc_list);
3272 				goto nla_put_failure;
3273 			}
3274 			nla_nest_end(skb, rxsa_nest);
3275 		}
3276 
3277 		nla_nest_end(skb, rxsa_list);
3278 		nla_nest_end(skb, rxsc_nest);
3279 	}
3280 
3281 	nla_nest_end(skb, rxsc_list);
3282 
3283 	genlmsg_end(skb, hdr);
3284 
3285 	return 0;
3286 
3287 nla_put_failure:
3288 	genlmsg_cancel(skb, hdr);
3289 	return -EMSGSIZE;
3290 }
3291 
3292 static int macsec_generation = 1; /* protected by RTNL */
3293 
3294 static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb)
3295 {
3296 	struct net *net = sock_net(skb->sk);
3297 	struct net_device *dev;
3298 	int dev_idx, d;
3299 
3300 	dev_idx = cb->args[0];
3301 
3302 	d = 0;
3303 	rtnl_lock();
3304 
3305 	cb->seq = macsec_generation;
3306 
3307 	for_each_netdev(net, dev) {
3308 		struct macsec_secy *secy;
3309 
3310 		if (d < dev_idx)
3311 			goto next;
3312 
3313 		if (!netif_is_macsec(dev))
3314 			goto next;
3315 
3316 		secy = &macsec_priv(dev)->secy;
3317 		if (dump_secy(secy, dev, skb, cb) < 0)
3318 			goto done;
3319 next:
3320 		d++;
3321 	}
3322 
3323 done:
3324 	rtnl_unlock();
3325 	cb->args[0] = d;
3326 	return skb->len;
3327 }
3328 
3329 static const struct genl_small_ops macsec_genl_ops[] = {
3330 	{
3331 		.cmd = MACSEC_CMD_GET_TXSC,
3332 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3333 		.dumpit = macsec_dump_txsc,
3334 	},
3335 	{
3336 		.cmd = MACSEC_CMD_ADD_RXSC,
3337 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3338 		.doit = macsec_add_rxsc,
3339 		.flags = GENL_ADMIN_PERM,
3340 	},
3341 	{
3342 		.cmd = MACSEC_CMD_DEL_RXSC,
3343 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3344 		.doit = macsec_del_rxsc,
3345 		.flags = GENL_ADMIN_PERM,
3346 	},
3347 	{
3348 		.cmd = MACSEC_CMD_UPD_RXSC,
3349 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3350 		.doit = macsec_upd_rxsc,
3351 		.flags = GENL_ADMIN_PERM,
3352 	},
3353 	{
3354 		.cmd = MACSEC_CMD_ADD_TXSA,
3355 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3356 		.doit = macsec_add_txsa,
3357 		.flags = GENL_ADMIN_PERM,
3358 	},
3359 	{
3360 		.cmd = MACSEC_CMD_DEL_TXSA,
3361 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3362 		.doit = macsec_del_txsa,
3363 		.flags = GENL_ADMIN_PERM,
3364 	},
3365 	{
3366 		.cmd = MACSEC_CMD_UPD_TXSA,
3367 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3368 		.doit = macsec_upd_txsa,
3369 		.flags = GENL_ADMIN_PERM,
3370 	},
3371 	{
3372 		.cmd = MACSEC_CMD_ADD_RXSA,
3373 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3374 		.doit = macsec_add_rxsa,
3375 		.flags = GENL_ADMIN_PERM,
3376 	},
3377 	{
3378 		.cmd = MACSEC_CMD_DEL_RXSA,
3379 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3380 		.doit = macsec_del_rxsa,
3381 		.flags = GENL_ADMIN_PERM,
3382 	},
3383 	{
3384 		.cmd = MACSEC_CMD_UPD_RXSA,
3385 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3386 		.doit = macsec_upd_rxsa,
3387 		.flags = GENL_ADMIN_PERM,
3388 	},
3389 	{
3390 		.cmd = MACSEC_CMD_UPD_OFFLOAD,
3391 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3392 		.doit = macsec_upd_offload,
3393 		.flags = GENL_ADMIN_PERM,
3394 	},
3395 };
3396 
3397 static struct genl_family macsec_fam __ro_after_init = {
3398 	.name		= MACSEC_GENL_NAME,
3399 	.hdrsize	= 0,
3400 	.version	= MACSEC_GENL_VERSION,
3401 	.maxattr	= MACSEC_ATTR_MAX,
3402 	.policy = macsec_genl_policy,
3403 	.netnsok	= true,
3404 	.module		= THIS_MODULE,
3405 	.small_ops	= macsec_genl_ops,
3406 	.n_small_ops	= ARRAY_SIZE(macsec_genl_ops),
3407 };
3408 
3409 static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
3410 				     struct net_device *dev)
3411 {
3412 	struct macsec_dev *macsec = netdev_priv(dev);
3413 	struct macsec_secy *secy = &macsec->secy;
3414 	struct pcpu_secy_stats *secy_stats;
3415 	int ret, len;
3416 
3417 	if (macsec_is_offloaded(netdev_priv(dev))) {
3418 		skb->dev = macsec->real_dev;
3419 		return dev_queue_xmit(skb);
3420 	}
3421 
3422 	/* 10.5 */
3423 	if (!secy->protect_frames) {
3424 		secy_stats = this_cpu_ptr(macsec->stats);
3425 		u64_stats_update_begin(&secy_stats->syncp);
3426 		secy_stats->stats.OutPktsUntagged++;
3427 		u64_stats_update_end(&secy_stats->syncp);
3428 		skb->dev = macsec->real_dev;
3429 		len = skb->len;
3430 		ret = dev_queue_xmit(skb);
3431 		count_tx(dev, ret, len);
3432 		return ret;
3433 	}
3434 
3435 	if (!secy->operational) {
3436 		kfree_skb(skb);
3437 		dev->stats.tx_dropped++;
3438 		return NETDEV_TX_OK;
3439 	}
3440 
3441 	len = skb->len;
3442 	skb = macsec_encrypt(skb, dev);
3443 	if (IS_ERR(skb)) {
3444 		if (PTR_ERR(skb) != -EINPROGRESS)
3445 			dev->stats.tx_dropped++;
3446 		return NETDEV_TX_OK;
3447 	}
3448 
3449 	macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa);
3450 
3451 	macsec_encrypt_finish(skb, dev);
3452 	ret = dev_queue_xmit(skb);
3453 	count_tx(dev, ret, len);
3454 	return ret;
3455 }
3456 
3457 #define SW_MACSEC_FEATURES \
3458 	(NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST)
3459 
3460 /* If h/w offloading is enabled, use real device features save for
3461  *   VLAN_FEATURES - they require additional ops
3462  *   HW_MACSEC - no reason to report it
3463  */
3464 #define REAL_DEV_FEATURES(dev) \
3465 	((dev)->features & ~(NETIF_F_VLAN_FEATURES | NETIF_F_HW_MACSEC))
3466 
3467 static int macsec_dev_init(struct net_device *dev)
3468 {
3469 	struct macsec_dev *macsec = macsec_priv(dev);
3470 	struct net_device *real_dev = macsec->real_dev;
3471 	int err;
3472 
3473 	dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
3474 	if (!dev->tstats)
3475 		return -ENOMEM;
3476 
3477 	err = gro_cells_init(&macsec->gro_cells, dev);
3478 	if (err) {
3479 		free_percpu(dev->tstats);
3480 		return err;
3481 	}
3482 
3483 	if (macsec_is_offloaded(macsec)) {
3484 		dev->features = REAL_DEV_FEATURES(real_dev);
3485 	} else {
3486 		dev->features = real_dev->features & SW_MACSEC_FEATURES;
3487 		dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE;
3488 	}
3489 
3490 	dev->needed_headroom = real_dev->needed_headroom +
3491 			       MACSEC_NEEDED_HEADROOM;
3492 	dev->needed_tailroom = real_dev->needed_tailroom +
3493 			       MACSEC_NEEDED_TAILROOM;
3494 
3495 	if (is_zero_ether_addr(dev->dev_addr))
3496 		eth_hw_addr_inherit(dev, real_dev);
3497 	if (is_zero_ether_addr(dev->broadcast))
3498 		memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len);
3499 
3500 	/* Get macsec's reference to real_dev */
3501 	netdev_hold(real_dev, &macsec->dev_tracker, GFP_KERNEL);
3502 
3503 	return 0;
3504 }
3505 
3506 static void macsec_dev_uninit(struct net_device *dev)
3507 {
3508 	struct macsec_dev *macsec = macsec_priv(dev);
3509 
3510 	gro_cells_destroy(&macsec->gro_cells);
3511 	free_percpu(dev->tstats);
3512 }
3513 
3514 static netdev_features_t macsec_fix_features(struct net_device *dev,
3515 					     netdev_features_t features)
3516 {
3517 	struct macsec_dev *macsec = macsec_priv(dev);
3518 	struct net_device *real_dev = macsec->real_dev;
3519 
3520 	if (macsec_is_offloaded(macsec))
3521 		return REAL_DEV_FEATURES(real_dev);
3522 
3523 	features &= (real_dev->features & SW_MACSEC_FEATURES) |
3524 		    NETIF_F_GSO_SOFTWARE | NETIF_F_SOFT_FEATURES;
3525 	features |= NETIF_F_LLTX;
3526 
3527 	return features;
3528 }
3529 
3530 static int macsec_dev_open(struct net_device *dev)
3531 {
3532 	struct macsec_dev *macsec = macsec_priv(dev);
3533 	struct net_device *real_dev = macsec->real_dev;
3534 	int err;
3535 
3536 	err = dev_uc_add(real_dev, dev->dev_addr);
3537 	if (err < 0)
3538 		return err;
3539 
3540 	if (dev->flags & IFF_ALLMULTI) {
3541 		err = dev_set_allmulti(real_dev, 1);
3542 		if (err < 0)
3543 			goto del_unicast;
3544 	}
3545 
3546 	if (dev->flags & IFF_PROMISC) {
3547 		err = dev_set_promiscuity(real_dev, 1);
3548 		if (err < 0)
3549 			goto clear_allmulti;
3550 	}
3551 
3552 	/* If h/w offloading is available, propagate to the device */
3553 	if (macsec_is_offloaded(macsec)) {
3554 		const struct macsec_ops *ops;
3555 		struct macsec_context ctx;
3556 
3557 		ops = macsec_get_ops(netdev_priv(dev), &ctx);
3558 		if (!ops) {
3559 			err = -EOPNOTSUPP;
3560 			goto clear_allmulti;
3561 		}
3562 
3563 		ctx.secy = &macsec->secy;
3564 		err = macsec_offload(ops->mdo_dev_open, &ctx);
3565 		if (err)
3566 			goto clear_allmulti;
3567 	}
3568 
3569 	if (netif_carrier_ok(real_dev))
3570 		netif_carrier_on(dev);
3571 
3572 	return 0;
3573 clear_allmulti:
3574 	if (dev->flags & IFF_ALLMULTI)
3575 		dev_set_allmulti(real_dev, -1);
3576 del_unicast:
3577 	dev_uc_del(real_dev, dev->dev_addr);
3578 	netif_carrier_off(dev);
3579 	return err;
3580 }
3581 
3582 static int macsec_dev_stop(struct net_device *dev)
3583 {
3584 	struct macsec_dev *macsec = macsec_priv(dev);
3585 	struct net_device *real_dev = macsec->real_dev;
3586 
3587 	netif_carrier_off(dev);
3588 
3589 	/* If h/w offloading is available, propagate to the device */
3590 	if (macsec_is_offloaded(macsec)) {
3591 		const struct macsec_ops *ops;
3592 		struct macsec_context ctx;
3593 
3594 		ops = macsec_get_ops(macsec, &ctx);
3595 		if (ops) {
3596 			ctx.secy = &macsec->secy;
3597 			macsec_offload(ops->mdo_dev_stop, &ctx);
3598 		}
3599 	}
3600 
3601 	dev_mc_unsync(real_dev, dev);
3602 	dev_uc_unsync(real_dev, dev);
3603 
3604 	if (dev->flags & IFF_ALLMULTI)
3605 		dev_set_allmulti(real_dev, -1);
3606 
3607 	if (dev->flags & IFF_PROMISC)
3608 		dev_set_promiscuity(real_dev, -1);
3609 
3610 	dev_uc_del(real_dev, dev->dev_addr);
3611 
3612 	return 0;
3613 }
3614 
3615 static void macsec_dev_change_rx_flags(struct net_device *dev, int change)
3616 {
3617 	struct net_device *real_dev = macsec_priv(dev)->real_dev;
3618 
3619 	if (!(dev->flags & IFF_UP))
3620 		return;
3621 
3622 	if (change & IFF_ALLMULTI)
3623 		dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1);
3624 
3625 	if (change & IFF_PROMISC)
3626 		dev_set_promiscuity(real_dev,
3627 				    dev->flags & IFF_PROMISC ? 1 : -1);
3628 }
3629 
3630 static void macsec_dev_set_rx_mode(struct net_device *dev)
3631 {
3632 	struct net_device *real_dev = macsec_priv(dev)->real_dev;
3633 
3634 	dev_mc_sync(real_dev, dev);
3635 	dev_uc_sync(real_dev, dev);
3636 }
3637 
3638 static int macsec_set_mac_address(struct net_device *dev, void *p)
3639 {
3640 	struct macsec_dev *macsec = macsec_priv(dev);
3641 	struct net_device *real_dev = macsec->real_dev;
3642 	struct sockaddr *addr = p;
3643 	int err;
3644 
3645 	if (!is_valid_ether_addr(addr->sa_data))
3646 		return -EADDRNOTAVAIL;
3647 
3648 	if (!(dev->flags & IFF_UP))
3649 		goto out;
3650 
3651 	err = dev_uc_add(real_dev, addr->sa_data);
3652 	if (err < 0)
3653 		return err;
3654 
3655 	dev_uc_del(real_dev, dev->dev_addr);
3656 
3657 out:
3658 	eth_hw_addr_set(dev, addr->sa_data);
3659 
3660 	/* If h/w offloading is available, propagate to the device */
3661 	if (macsec_is_offloaded(macsec)) {
3662 		const struct macsec_ops *ops;
3663 		struct macsec_context ctx;
3664 
3665 		ops = macsec_get_ops(macsec, &ctx);
3666 		if (ops) {
3667 			ctx.secy = &macsec->secy;
3668 			macsec_offload(ops->mdo_upd_secy, &ctx);
3669 		}
3670 	}
3671 
3672 	return 0;
3673 }
3674 
3675 static int macsec_change_mtu(struct net_device *dev, int new_mtu)
3676 {
3677 	struct macsec_dev *macsec = macsec_priv(dev);
3678 	unsigned int extra = macsec->secy.icv_len + macsec_extra_len(true);
3679 
3680 	if (macsec->real_dev->mtu - extra < new_mtu)
3681 		return -ERANGE;
3682 
3683 	dev->mtu = new_mtu;
3684 
3685 	return 0;
3686 }
3687 
3688 static void macsec_get_stats64(struct net_device *dev,
3689 			       struct rtnl_link_stats64 *s)
3690 {
3691 	if (!dev->tstats)
3692 		return;
3693 
3694 	dev_fetch_sw_netstats(s, dev->tstats);
3695 
3696 	s->rx_dropped = dev->stats.rx_dropped;
3697 	s->tx_dropped = dev->stats.tx_dropped;
3698 	s->rx_errors = dev->stats.rx_errors;
3699 }
3700 
3701 static int macsec_get_iflink(const struct net_device *dev)
3702 {
3703 	return macsec_priv(dev)->real_dev->ifindex;
3704 }
3705 
3706 static const struct net_device_ops macsec_netdev_ops = {
3707 	.ndo_init		= macsec_dev_init,
3708 	.ndo_uninit		= macsec_dev_uninit,
3709 	.ndo_open		= macsec_dev_open,
3710 	.ndo_stop		= macsec_dev_stop,
3711 	.ndo_fix_features	= macsec_fix_features,
3712 	.ndo_change_mtu		= macsec_change_mtu,
3713 	.ndo_set_rx_mode	= macsec_dev_set_rx_mode,
3714 	.ndo_change_rx_flags	= macsec_dev_change_rx_flags,
3715 	.ndo_set_mac_address	= macsec_set_mac_address,
3716 	.ndo_start_xmit		= macsec_start_xmit,
3717 	.ndo_get_stats64	= macsec_get_stats64,
3718 	.ndo_get_iflink		= macsec_get_iflink,
3719 };
3720 
3721 static const struct device_type macsec_type = {
3722 	.name = "macsec",
3723 };
3724 
3725 static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = {
3726 	[IFLA_MACSEC_SCI] = { .type = NLA_U64 },
3727 	[IFLA_MACSEC_PORT] = { .type = NLA_U16 },
3728 	[IFLA_MACSEC_ICV_LEN] = { .type = NLA_U8 },
3729 	[IFLA_MACSEC_CIPHER_SUITE] = { .type = NLA_U64 },
3730 	[IFLA_MACSEC_WINDOW] = { .type = NLA_U32 },
3731 	[IFLA_MACSEC_ENCODING_SA] = { .type = NLA_U8 },
3732 	[IFLA_MACSEC_ENCRYPT] = { .type = NLA_U8 },
3733 	[IFLA_MACSEC_PROTECT] = { .type = NLA_U8 },
3734 	[IFLA_MACSEC_INC_SCI] = { .type = NLA_U8 },
3735 	[IFLA_MACSEC_ES] = { .type = NLA_U8 },
3736 	[IFLA_MACSEC_SCB] = { .type = NLA_U8 },
3737 	[IFLA_MACSEC_REPLAY_PROTECT] = { .type = NLA_U8 },
3738 	[IFLA_MACSEC_VALIDATION] = { .type = NLA_U8 },
3739 };
3740 
3741 static void macsec_free_netdev(struct net_device *dev)
3742 {
3743 	struct macsec_dev *macsec = macsec_priv(dev);
3744 
3745 	free_percpu(macsec->stats);
3746 	free_percpu(macsec->secy.tx_sc.stats);
3747 
3748 	/* Get rid of the macsec's reference to real_dev */
3749 	netdev_put(macsec->real_dev, &macsec->dev_tracker);
3750 }
3751 
3752 static void macsec_setup(struct net_device *dev)
3753 {
3754 	ether_setup(dev);
3755 	dev->min_mtu = 0;
3756 	dev->max_mtu = ETH_MAX_MTU;
3757 	dev->priv_flags |= IFF_NO_QUEUE;
3758 	dev->netdev_ops = &macsec_netdev_ops;
3759 	dev->needs_free_netdev = true;
3760 	dev->priv_destructor = macsec_free_netdev;
3761 	SET_NETDEV_DEVTYPE(dev, &macsec_type);
3762 
3763 	eth_zero_addr(dev->broadcast);
3764 }
3765 
3766 static int macsec_changelink_common(struct net_device *dev,
3767 				    struct nlattr *data[])
3768 {
3769 	struct macsec_secy *secy;
3770 	struct macsec_tx_sc *tx_sc;
3771 
3772 	secy = &macsec_priv(dev)->secy;
3773 	tx_sc = &secy->tx_sc;
3774 
3775 	if (data[IFLA_MACSEC_ENCODING_SA]) {
3776 		struct macsec_tx_sa *tx_sa;
3777 
3778 		tx_sc->encoding_sa = nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]);
3779 		tx_sa = rtnl_dereference(tx_sc->sa[tx_sc->encoding_sa]);
3780 
3781 		secy->operational = tx_sa && tx_sa->active;
3782 	}
3783 
3784 	if (data[IFLA_MACSEC_ENCRYPT])
3785 		tx_sc->encrypt = !!nla_get_u8(data[IFLA_MACSEC_ENCRYPT]);
3786 
3787 	if (data[IFLA_MACSEC_PROTECT])
3788 		secy->protect_frames = !!nla_get_u8(data[IFLA_MACSEC_PROTECT]);
3789 
3790 	if (data[IFLA_MACSEC_INC_SCI])
3791 		tx_sc->send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]);
3792 
3793 	if (data[IFLA_MACSEC_ES])
3794 		tx_sc->end_station = !!nla_get_u8(data[IFLA_MACSEC_ES]);
3795 
3796 	if (data[IFLA_MACSEC_SCB])
3797 		tx_sc->scb = !!nla_get_u8(data[IFLA_MACSEC_SCB]);
3798 
3799 	if (data[IFLA_MACSEC_REPLAY_PROTECT])
3800 		secy->replay_protect = !!nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT]);
3801 
3802 	if (data[IFLA_MACSEC_VALIDATION])
3803 		secy->validate_frames = nla_get_u8(data[IFLA_MACSEC_VALIDATION]);
3804 
3805 	if (data[IFLA_MACSEC_CIPHER_SUITE]) {
3806 		switch (nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE])) {
3807 		case MACSEC_CIPHER_ID_GCM_AES_128:
3808 		case MACSEC_DEFAULT_CIPHER_ID:
3809 			secy->key_len = MACSEC_GCM_AES_128_SAK_LEN;
3810 			secy->xpn = false;
3811 			break;
3812 		case MACSEC_CIPHER_ID_GCM_AES_256:
3813 			secy->key_len = MACSEC_GCM_AES_256_SAK_LEN;
3814 			secy->xpn = false;
3815 			break;
3816 		case MACSEC_CIPHER_ID_GCM_AES_XPN_128:
3817 			secy->key_len = MACSEC_GCM_AES_128_SAK_LEN;
3818 			secy->xpn = true;
3819 			break;
3820 		case MACSEC_CIPHER_ID_GCM_AES_XPN_256:
3821 			secy->key_len = MACSEC_GCM_AES_256_SAK_LEN;
3822 			secy->xpn = true;
3823 			break;
3824 		default:
3825 			return -EINVAL;
3826 		}
3827 	}
3828 
3829 	if (data[IFLA_MACSEC_WINDOW]) {
3830 		secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]);
3831 
3832 		/* IEEE 802.1AEbw-2013 10.7.8 - maximum replay window
3833 		 * for XPN cipher suites */
3834 		if (secy->xpn &&
3835 		    secy->replay_window > MACSEC_XPN_MAX_REPLAY_WINDOW)
3836 			return -EINVAL;
3837 	}
3838 
3839 	return 0;
3840 }
3841 
3842 static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
3843 			     struct nlattr *data[],
3844 			     struct netlink_ext_ack *extack)
3845 {
3846 	struct macsec_dev *macsec = macsec_priv(dev);
3847 	struct macsec_tx_sc tx_sc;
3848 	struct macsec_secy secy;
3849 	int ret;
3850 
3851 	if (!data)
3852 		return 0;
3853 
3854 	if (data[IFLA_MACSEC_CIPHER_SUITE] ||
3855 	    data[IFLA_MACSEC_ICV_LEN] ||
3856 	    data[IFLA_MACSEC_SCI] ||
3857 	    data[IFLA_MACSEC_PORT])
3858 		return -EINVAL;
3859 
3860 	/* Keep a copy of unmodified secy and tx_sc, in case the offload
3861 	 * propagation fails, to revert macsec_changelink_common.
3862 	 */
3863 	memcpy(&secy, &macsec->secy, sizeof(secy));
3864 	memcpy(&tx_sc, &macsec->secy.tx_sc, sizeof(tx_sc));
3865 
3866 	ret = macsec_changelink_common(dev, data);
3867 	if (ret)
3868 		goto cleanup;
3869 
3870 	/* If h/w offloading is available, propagate to the device */
3871 	if (macsec_is_offloaded(macsec)) {
3872 		const struct macsec_ops *ops;
3873 		struct macsec_context ctx;
3874 		int ret;
3875 
3876 		ops = macsec_get_ops(netdev_priv(dev), &ctx);
3877 		if (!ops) {
3878 			ret = -EOPNOTSUPP;
3879 			goto cleanup;
3880 		}
3881 
3882 		ctx.secy = &macsec->secy;
3883 		ret = macsec_offload(ops->mdo_upd_secy, &ctx);
3884 		if (ret)
3885 			goto cleanup;
3886 	}
3887 
3888 	return 0;
3889 
3890 cleanup:
3891 	memcpy(&macsec->secy.tx_sc, &tx_sc, sizeof(tx_sc));
3892 	memcpy(&macsec->secy, &secy, sizeof(secy));
3893 
3894 	return ret;
3895 }
3896 
3897 static void macsec_del_dev(struct macsec_dev *macsec)
3898 {
3899 	int i;
3900 
3901 	while (macsec->secy.rx_sc) {
3902 		struct macsec_rx_sc *rx_sc = rtnl_dereference(macsec->secy.rx_sc);
3903 
3904 		rcu_assign_pointer(macsec->secy.rx_sc, rx_sc->next);
3905 		free_rx_sc(rx_sc);
3906 	}
3907 
3908 	for (i = 0; i < MACSEC_NUM_AN; i++) {
3909 		struct macsec_tx_sa *sa = rtnl_dereference(macsec->secy.tx_sc.sa[i]);
3910 
3911 		if (sa) {
3912 			RCU_INIT_POINTER(macsec->secy.tx_sc.sa[i], NULL);
3913 			clear_tx_sa(sa);
3914 		}
3915 	}
3916 }
3917 
3918 static void macsec_common_dellink(struct net_device *dev, struct list_head *head)
3919 {
3920 	struct macsec_dev *macsec = macsec_priv(dev);
3921 	struct net_device *real_dev = macsec->real_dev;
3922 
3923 	/* If h/w offloading is available, propagate to the device */
3924 	if (macsec_is_offloaded(macsec)) {
3925 		const struct macsec_ops *ops;
3926 		struct macsec_context ctx;
3927 
3928 		ops = macsec_get_ops(netdev_priv(dev), &ctx);
3929 		if (ops) {
3930 			ctx.secy = &macsec->secy;
3931 			macsec_offload(ops->mdo_del_secy, &ctx);
3932 		}
3933 	}
3934 
3935 	unregister_netdevice_queue(dev, head);
3936 	list_del_rcu(&macsec->secys);
3937 	macsec_del_dev(macsec);
3938 	netdev_upper_dev_unlink(real_dev, dev);
3939 
3940 	macsec_generation++;
3941 }
3942 
3943 static void macsec_dellink(struct net_device *dev, struct list_head *head)
3944 {
3945 	struct macsec_dev *macsec = macsec_priv(dev);
3946 	struct net_device *real_dev = macsec->real_dev;
3947 	struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
3948 
3949 	macsec_common_dellink(dev, head);
3950 
3951 	if (list_empty(&rxd->secys)) {
3952 		netdev_rx_handler_unregister(real_dev);
3953 		kfree(rxd);
3954 	}
3955 }
3956 
3957 static int register_macsec_dev(struct net_device *real_dev,
3958 			       struct net_device *dev)
3959 {
3960 	struct macsec_dev *macsec = macsec_priv(dev);
3961 	struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
3962 
3963 	if (!rxd) {
3964 		int err;
3965 
3966 		rxd = kmalloc(sizeof(*rxd), GFP_KERNEL);
3967 		if (!rxd)
3968 			return -ENOMEM;
3969 
3970 		INIT_LIST_HEAD(&rxd->secys);
3971 
3972 		err = netdev_rx_handler_register(real_dev, macsec_handle_frame,
3973 						 rxd);
3974 		if (err < 0) {
3975 			kfree(rxd);
3976 			return err;
3977 		}
3978 	}
3979 
3980 	list_add_tail_rcu(&macsec->secys, &rxd->secys);
3981 	return 0;
3982 }
3983 
3984 static bool sci_exists(struct net_device *dev, sci_t sci)
3985 {
3986 	struct macsec_rxh_data *rxd = macsec_data_rtnl(dev);
3987 	struct macsec_dev *macsec;
3988 
3989 	list_for_each_entry(macsec, &rxd->secys, secys) {
3990 		if (macsec->secy.sci == sci)
3991 			return true;
3992 	}
3993 
3994 	return false;
3995 }
3996 
3997 static sci_t dev_to_sci(struct net_device *dev, __be16 port)
3998 {
3999 	return make_sci(dev->dev_addr, port);
4000 }
4001 
4002 static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len)
4003 {
4004 	struct macsec_dev *macsec = macsec_priv(dev);
4005 	struct macsec_secy *secy = &macsec->secy;
4006 
4007 	macsec->stats = netdev_alloc_pcpu_stats(struct pcpu_secy_stats);
4008 	if (!macsec->stats)
4009 		return -ENOMEM;
4010 
4011 	secy->tx_sc.stats = netdev_alloc_pcpu_stats(struct pcpu_tx_sc_stats);
4012 	if (!secy->tx_sc.stats) {
4013 		free_percpu(macsec->stats);
4014 		return -ENOMEM;
4015 	}
4016 
4017 	if (sci == MACSEC_UNDEF_SCI)
4018 		sci = dev_to_sci(dev, MACSEC_PORT_ES);
4019 
4020 	secy->netdev = dev;
4021 	secy->operational = true;
4022 	secy->key_len = DEFAULT_SAK_LEN;
4023 	secy->icv_len = icv_len;
4024 	secy->validate_frames = MACSEC_VALIDATE_DEFAULT;
4025 	secy->protect_frames = true;
4026 	secy->replay_protect = false;
4027 	secy->xpn = DEFAULT_XPN;
4028 
4029 	secy->sci = sci;
4030 	secy->tx_sc.active = true;
4031 	secy->tx_sc.encoding_sa = DEFAULT_ENCODING_SA;
4032 	secy->tx_sc.encrypt = DEFAULT_ENCRYPT;
4033 	secy->tx_sc.send_sci = DEFAULT_SEND_SCI;
4034 	secy->tx_sc.end_station = false;
4035 	secy->tx_sc.scb = false;
4036 
4037 	return 0;
4038 }
4039 
4040 static struct lock_class_key macsec_netdev_addr_lock_key;
4041 
4042 static int macsec_newlink(struct net *net, struct net_device *dev,
4043 			  struct nlattr *tb[], struct nlattr *data[],
4044 			  struct netlink_ext_ack *extack)
4045 {
4046 	struct macsec_dev *macsec = macsec_priv(dev);
4047 	rx_handler_func_t *rx_handler;
4048 	u8 icv_len = DEFAULT_ICV_LEN;
4049 	struct net_device *real_dev;
4050 	int err, mtu;
4051 	sci_t sci;
4052 
4053 	if (!tb[IFLA_LINK])
4054 		return -EINVAL;
4055 	real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK]));
4056 	if (!real_dev)
4057 		return -ENODEV;
4058 	if (real_dev->type != ARPHRD_ETHER)
4059 		return -EINVAL;
4060 
4061 	dev->priv_flags |= IFF_MACSEC;
4062 
4063 	macsec->real_dev = real_dev;
4064 
4065 	if (data && data[IFLA_MACSEC_OFFLOAD])
4066 		macsec->offload = nla_get_offload(data[IFLA_MACSEC_OFFLOAD]);
4067 	else
4068 		/* MACsec offloading is off by default */
4069 		macsec->offload = MACSEC_OFFLOAD_OFF;
4070 
4071 	/* Check if the offloading mode is supported by the underlying layers */
4072 	if (macsec->offload != MACSEC_OFFLOAD_OFF &&
4073 	    !macsec_check_offload(macsec->offload, macsec))
4074 		return -EOPNOTSUPP;
4075 
4076 	/* send_sci must be set to true when transmit sci explicitly is set */
4077 	if ((data && data[IFLA_MACSEC_SCI]) &&
4078 	    (data && data[IFLA_MACSEC_INC_SCI])) {
4079 		u8 send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]);
4080 
4081 		if (!send_sci)
4082 			return -EINVAL;
4083 	}
4084 
4085 	if (data && data[IFLA_MACSEC_ICV_LEN])
4086 		icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
4087 	mtu = real_dev->mtu - icv_len - macsec_extra_len(true);
4088 	if (mtu < 0)
4089 		dev->mtu = 0;
4090 	else
4091 		dev->mtu = mtu;
4092 
4093 	rx_handler = rtnl_dereference(real_dev->rx_handler);
4094 	if (rx_handler && rx_handler != macsec_handle_frame)
4095 		return -EBUSY;
4096 
4097 	err = register_netdevice(dev);
4098 	if (err < 0)
4099 		return err;
4100 
4101 	netdev_lockdep_set_classes(dev);
4102 	lockdep_set_class(&dev->addr_list_lock,
4103 			  &macsec_netdev_addr_lock_key);
4104 
4105 	err = netdev_upper_dev_link(real_dev, dev, extack);
4106 	if (err < 0)
4107 		goto unregister;
4108 
4109 	/* need to be already registered so that ->init has run and
4110 	 * the MAC addr is set
4111 	 */
4112 	if (data && data[IFLA_MACSEC_SCI])
4113 		sci = nla_get_sci(data[IFLA_MACSEC_SCI]);
4114 	else if (data && data[IFLA_MACSEC_PORT])
4115 		sci = dev_to_sci(dev, nla_get_be16(data[IFLA_MACSEC_PORT]));
4116 	else
4117 		sci = dev_to_sci(dev, MACSEC_PORT_ES);
4118 
4119 	if (rx_handler && sci_exists(real_dev, sci)) {
4120 		err = -EBUSY;
4121 		goto unlink;
4122 	}
4123 
4124 	err = macsec_add_dev(dev, sci, icv_len);
4125 	if (err)
4126 		goto unlink;
4127 
4128 	if (data) {
4129 		err = macsec_changelink_common(dev, data);
4130 		if (err)
4131 			goto del_dev;
4132 	}
4133 
4134 	/* If h/w offloading is available, propagate to the device */
4135 	if (macsec_is_offloaded(macsec)) {
4136 		const struct macsec_ops *ops;
4137 		struct macsec_context ctx;
4138 
4139 		ops = macsec_get_ops(macsec, &ctx);
4140 		if (ops) {
4141 			ctx.secy = &macsec->secy;
4142 			err = macsec_offload(ops->mdo_add_secy, &ctx);
4143 			if (err)
4144 				goto del_dev;
4145 		}
4146 	}
4147 
4148 	err = register_macsec_dev(real_dev, dev);
4149 	if (err < 0)
4150 		goto del_dev;
4151 
4152 	netif_stacked_transfer_operstate(real_dev, dev);
4153 	linkwatch_fire_event(dev);
4154 
4155 	macsec_generation++;
4156 
4157 	return 0;
4158 
4159 del_dev:
4160 	macsec_del_dev(macsec);
4161 unlink:
4162 	netdev_upper_dev_unlink(real_dev, dev);
4163 unregister:
4164 	unregister_netdevice(dev);
4165 	return err;
4166 }
4167 
4168 static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[],
4169 				struct netlink_ext_ack *extack)
4170 {
4171 	u64 csid = MACSEC_DEFAULT_CIPHER_ID;
4172 	u8 icv_len = DEFAULT_ICV_LEN;
4173 	int flag;
4174 	bool es, scb, sci;
4175 
4176 	if (!data)
4177 		return 0;
4178 
4179 	if (data[IFLA_MACSEC_CIPHER_SUITE])
4180 		csid = nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE]);
4181 
4182 	if (data[IFLA_MACSEC_ICV_LEN]) {
4183 		icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
4184 		if (icv_len != DEFAULT_ICV_LEN) {
4185 			char dummy_key[DEFAULT_SAK_LEN] = { 0 };
4186 			struct crypto_aead *dummy_tfm;
4187 
4188 			dummy_tfm = macsec_alloc_tfm(dummy_key,
4189 						     DEFAULT_SAK_LEN,
4190 						     icv_len);
4191 			if (IS_ERR(dummy_tfm))
4192 				return PTR_ERR(dummy_tfm);
4193 			crypto_free_aead(dummy_tfm);
4194 		}
4195 	}
4196 
4197 	switch (csid) {
4198 	case MACSEC_CIPHER_ID_GCM_AES_128:
4199 	case MACSEC_CIPHER_ID_GCM_AES_256:
4200 	case MACSEC_CIPHER_ID_GCM_AES_XPN_128:
4201 	case MACSEC_CIPHER_ID_GCM_AES_XPN_256:
4202 	case MACSEC_DEFAULT_CIPHER_ID:
4203 		if (icv_len < MACSEC_MIN_ICV_LEN ||
4204 		    icv_len > MACSEC_STD_ICV_LEN)
4205 			return -EINVAL;
4206 		break;
4207 	default:
4208 		return -EINVAL;
4209 	}
4210 
4211 	if (data[IFLA_MACSEC_ENCODING_SA]) {
4212 		if (nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]) >= MACSEC_NUM_AN)
4213 			return -EINVAL;
4214 	}
4215 
4216 	for (flag = IFLA_MACSEC_ENCODING_SA + 1;
4217 	     flag < IFLA_MACSEC_VALIDATION;
4218 	     flag++) {
4219 		if (data[flag]) {
4220 			if (nla_get_u8(data[flag]) > 1)
4221 				return -EINVAL;
4222 		}
4223 	}
4224 
4225 	es  = data[IFLA_MACSEC_ES] ? nla_get_u8(data[IFLA_MACSEC_ES]) : false;
4226 	sci = data[IFLA_MACSEC_INC_SCI] ? nla_get_u8(data[IFLA_MACSEC_INC_SCI]) : false;
4227 	scb = data[IFLA_MACSEC_SCB] ? nla_get_u8(data[IFLA_MACSEC_SCB]) : false;
4228 
4229 	if ((sci && (scb || es)) || (scb && es))
4230 		return -EINVAL;
4231 
4232 	if (data[IFLA_MACSEC_VALIDATION] &&
4233 	    nla_get_u8(data[IFLA_MACSEC_VALIDATION]) > MACSEC_VALIDATE_MAX)
4234 		return -EINVAL;
4235 
4236 	if ((data[IFLA_MACSEC_REPLAY_PROTECT] &&
4237 	     nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT])) &&
4238 	    !data[IFLA_MACSEC_WINDOW])
4239 		return -EINVAL;
4240 
4241 	return 0;
4242 }
4243 
4244 static struct net *macsec_get_link_net(const struct net_device *dev)
4245 {
4246 	return dev_net(macsec_priv(dev)->real_dev);
4247 }
4248 
4249 static size_t macsec_get_size(const struct net_device *dev)
4250 {
4251 	return  nla_total_size_64bit(8) + /* IFLA_MACSEC_SCI */
4252 		nla_total_size(1) + /* IFLA_MACSEC_ICV_LEN */
4253 		nla_total_size_64bit(8) + /* IFLA_MACSEC_CIPHER_SUITE */
4254 		nla_total_size(4) + /* IFLA_MACSEC_WINDOW */
4255 		nla_total_size(1) + /* IFLA_MACSEC_ENCODING_SA */
4256 		nla_total_size(1) + /* IFLA_MACSEC_ENCRYPT */
4257 		nla_total_size(1) + /* IFLA_MACSEC_PROTECT */
4258 		nla_total_size(1) + /* IFLA_MACSEC_INC_SCI */
4259 		nla_total_size(1) + /* IFLA_MACSEC_ES */
4260 		nla_total_size(1) + /* IFLA_MACSEC_SCB */
4261 		nla_total_size(1) + /* IFLA_MACSEC_REPLAY_PROTECT */
4262 		nla_total_size(1) + /* IFLA_MACSEC_VALIDATION */
4263 		0;
4264 }
4265 
4266 static int macsec_fill_info(struct sk_buff *skb,
4267 			    const struct net_device *dev)
4268 {
4269 	struct macsec_secy *secy = &macsec_priv(dev)->secy;
4270 	struct macsec_tx_sc *tx_sc = &secy->tx_sc;
4271 	u64 csid;
4272 
4273 	switch (secy->key_len) {
4274 	case MACSEC_GCM_AES_128_SAK_LEN:
4275 		csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 : MACSEC_DEFAULT_CIPHER_ID;
4276 		break;
4277 	case MACSEC_GCM_AES_256_SAK_LEN:
4278 		csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_256 : MACSEC_CIPHER_ID_GCM_AES_256;
4279 		break;
4280 	default:
4281 		goto nla_put_failure;
4282 	}
4283 
4284 	if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci,
4285 			IFLA_MACSEC_PAD) ||
4286 	    nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) ||
4287 	    nla_put_u64_64bit(skb, IFLA_MACSEC_CIPHER_SUITE,
4288 			      csid, IFLA_MACSEC_PAD) ||
4289 	    nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) ||
4290 	    nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) ||
4291 	    nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) ||
4292 	    nla_put_u8(skb, IFLA_MACSEC_INC_SCI, tx_sc->send_sci) ||
4293 	    nla_put_u8(skb, IFLA_MACSEC_ES, tx_sc->end_station) ||
4294 	    nla_put_u8(skb, IFLA_MACSEC_SCB, tx_sc->scb) ||
4295 	    nla_put_u8(skb, IFLA_MACSEC_REPLAY_PROTECT, secy->replay_protect) ||
4296 	    nla_put_u8(skb, IFLA_MACSEC_VALIDATION, secy->validate_frames) ||
4297 	    0)
4298 		goto nla_put_failure;
4299 
4300 	if (secy->replay_protect) {
4301 		if (nla_put_u32(skb, IFLA_MACSEC_WINDOW, secy->replay_window))
4302 			goto nla_put_failure;
4303 	}
4304 
4305 	return 0;
4306 
4307 nla_put_failure:
4308 	return -EMSGSIZE;
4309 }
4310 
4311 static struct rtnl_link_ops macsec_link_ops __read_mostly = {
4312 	.kind		= "macsec",
4313 	.priv_size	= sizeof(struct macsec_dev),
4314 	.maxtype	= IFLA_MACSEC_MAX,
4315 	.policy		= macsec_rtnl_policy,
4316 	.setup		= macsec_setup,
4317 	.validate	= macsec_validate_attr,
4318 	.newlink	= macsec_newlink,
4319 	.changelink	= macsec_changelink,
4320 	.dellink	= macsec_dellink,
4321 	.get_size	= macsec_get_size,
4322 	.fill_info	= macsec_fill_info,
4323 	.get_link_net	= macsec_get_link_net,
4324 };
4325 
4326 static bool is_macsec_master(struct net_device *dev)
4327 {
4328 	return rcu_access_pointer(dev->rx_handler) == macsec_handle_frame;
4329 }
4330 
4331 static int macsec_notify(struct notifier_block *this, unsigned long event,
4332 			 void *ptr)
4333 {
4334 	struct net_device *real_dev = netdev_notifier_info_to_dev(ptr);
4335 	LIST_HEAD(head);
4336 
4337 	if (!is_macsec_master(real_dev))
4338 		return NOTIFY_DONE;
4339 
4340 	switch (event) {
4341 	case NETDEV_DOWN:
4342 	case NETDEV_UP:
4343 	case NETDEV_CHANGE: {
4344 		struct macsec_dev *m, *n;
4345 		struct macsec_rxh_data *rxd;
4346 
4347 		rxd = macsec_data_rtnl(real_dev);
4348 		list_for_each_entry_safe(m, n, &rxd->secys, secys) {
4349 			struct net_device *dev = m->secy.netdev;
4350 
4351 			netif_stacked_transfer_operstate(real_dev, dev);
4352 		}
4353 		break;
4354 	}
4355 	case NETDEV_UNREGISTER: {
4356 		struct macsec_dev *m, *n;
4357 		struct macsec_rxh_data *rxd;
4358 
4359 		rxd = macsec_data_rtnl(real_dev);
4360 		list_for_each_entry_safe(m, n, &rxd->secys, secys) {
4361 			macsec_common_dellink(m->secy.netdev, &head);
4362 		}
4363 
4364 		netdev_rx_handler_unregister(real_dev);
4365 		kfree(rxd);
4366 
4367 		unregister_netdevice_many(&head);
4368 		break;
4369 	}
4370 	case NETDEV_CHANGEMTU: {
4371 		struct macsec_dev *m;
4372 		struct macsec_rxh_data *rxd;
4373 
4374 		rxd = macsec_data_rtnl(real_dev);
4375 		list_for_each_entry(m, &rxd->secys, secys) {
4376 			struct net_device *dev = m->secy.netdev;
4377 			unsigned int mtu = real_dev->mtu - (m->secy.icv_len +
4378 							    macsec_extra_len(true));
4379 
4380 			if (dev->mtu > mtu)
4381 				dev_set_mtu(dev, mtu);
4382 		}
4383 	}
4384 	}
4385 
4386 	return NOTIFY_OK;
4387 }
4388 
4389 static struct notifier_block macsec_notifier = {
4390 	.notifier_call = macsec_notify,
4391 };
4392 
4393 static int __init macsec_init(void)
4394 {
4395 	int err;
4396 
4397 	pr_info("MACsec IEEE 802.1AE\n");
4398 	err = register_netdevice_notifier(&macsec_notifier);
4399 	if (err)
4400 		return err;
4401 
4402 	err = rtnl_link_register(&macsec_link_ops);
4403 	if (err)
4404 		goto notifier;
4405 
4406 	err = genl_register_family(&macsec_fam);
4407 	if (err)
4408 		goto rtnl;
4409 
4410 	return 0;
4411 
4412 rtnl:
4413 	rtnl_link_unregister(&macsec_link_ops);
4414 notifier:
4415 	unregister_netdevice_notifier(&macsec_notifier);
4416 	return err;
4417 }
4418 
4419 static void __exit macsec_exit(void)
4420 {
4421 	genl_unregister_family(&macsec_fam);
4422 	rtnl_link_unregister(&macsec_link_ops);
4423 	unregister_netdevice_notifier(&macsec_notifier);
4424 	rcu_barrier();
4425 }
4426 
4427 module_init(macsec_init);
4428 module_exit(macsec_exit);
4429 
4430 MODULE_ALIAS_RTNL_LINK("macsec");
4431 MODULE_ALIAS_GENL_FAMILY("macsec");
4432 
4433 MODULE_DESCRIPTION("MACsec IEEE 802.1AE");
4434 MODULE_LICENSE("GPL v2");
4435