xref: /openbmc/linux/net/hsr/hsr_forward.c (revision 6f4eaea2)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright 2011-2014 Autronica Fire and Security AS
3  *
4  * Author(s):
5  *	2011-2014 Arvid Brodin, arvid.brodin@alten.se
6  *
7  * Frame router for HSR and PRP.
8  */
9 
10 #include "hsr_forward.h"
11 #include <linux/types.h>
12 #include <linux/skbuff.h>
13 #include <linux/etherdevice.h>
14 #include <linux/if_vlan.h>
15 #include "hsr_main.h"
16 #include "hsr_framereg.h"
17 
18 struct hsr_node;
19 
20 /* The uses I can see for these HSR supervision frames are:
21  * 1) Use the frames that are sent after node initialization ("HSR_TLV.Type =
22  *    22") to reset any sequence_nr counters belonging to that node. Useful if
23  *    the other node's counter has been reset for some reason.
24  *    --
25  *    Or not - resetting the counter and bridging the frame would create a
26  *    loop, unfortunately.
27  *
28  * 2) Use the LifeCheck frames to detect ring breaks. I.e. if no LifeCheck
29  *    frame is received from a particular node, we know something is wrong.
30  *    We just register these (as with normal frames) and throw them away.
31  *
32  * 3) Allow different MAC addresses for the two slave interfaces, using the
33  *    MacAddressA field.
34  */
35 static bool is_supervision_frame(struct hsr_priv *hsr, struct sk_buff *skb)
36 {
37 	struct ethhdr *eth_hdr;
38 	struct hsr_sup_tag *hsr_sup_tag;
39 	struct hsrv1_ethhdr_sp *hsr_V1_hdr;
40 
41 	WARN_ON_ONCE(!skb_mac_header_was_set(skb));
42 	eth_hdr = (struct ethhdr *)skb_mac_header(skb);
43 
44 	/* Correct addr? */
45 	if (!ether_addr_equal(eth_hdr->h_dest,
46 			      hsr->sup_multicast_addr))
47 		return false;
48 
49 	/* Correct ether type?. */
50 	if (!(eth_hdr->h_proto == htons(ETH_P_PRP) ||
51 	      eth_hdr->h_proto == htons(ETH_P_HSR)))
52 		return false;
53 
54 	/* Get the supervision header from correct location. */
55 	if (eth_hdr->h_proto == htons(ETH_P_HSR)) { /* Okay HSRv1. */
56 		hsr_V1_hdr = (struct hsrv1_ethhdr_sp *)skb_mac_header(skb);
57 		if (hsr_V1_hdr->hsr.encap_proto != htons(ETH_P_PRP))
58 			return false;
59 
60 		hsr_sup_tag = &hsr_V1_hdr->hsr_sup;
61 	} else {
62 		hsr_sup_tag =
63 		     &((struct hsrv0_ethhdr_sp *)skb_mac_header(skb))->hsr_sup;
64 	}
65 
66 	if (hsr_sup_tag->HSR_TLV_type != HSR_TLV_ANNOUNCE &&
67 	    hsr_sup_tag->HSR_TLV_type != HSR_TLV_LIFE_CHECK &&
68 	    hsr_sup_tag->HSR_TLV_type != PRP_TLV_LIFE_CHECK_DD &&
69 	    hsr_sup_tag->HSR_TLV_type != PRP_TLV_LIFE_CHECK_DA)
70 		return false;
71 	if (hsr_sup_tag->HSR_TLV_length != 12 &&
72 	    hsr_sup_tag->HSR_TLV_length != sizeof(struct hsr_sup_payload))
73 		return false;
74 
75 	return true;
76 }
77 
78 static struct sk_buff *create_stripped_skb_hsr(struct sk_buff *skb_in,
79 					       struct hsr_frame_info *frame)
80 {
81 	struct sk_buff *skb;
82 	int copylen;
83 	unsigned char *dst, *src;
84 
85 	skb_pull(skb_in, HSR_HLEN);
86 	skb = __pskb_copy(skb_in, skb_headroom(skb_in) - HSR_HLEN, GFP_ATOMIC);
87 	skb_push(skb_in, HSR_HLEN);
88 	if (!skb)
89 		return NULL;
90 
91 	skb_reset_mac_header(skb);
92 
93 	if (skb->ip_summed == CHECKSUM_PARTIAL)
94 		skb->csum_start -= HSR_HLEN;
95 
96 	copylen = 2 * ETH_ALEN;
97 	if (frame->is_vlan)
98 		copylen += VLAN_HLEN;
99 	src = skb_mac_header(skb_in);
100 	dst = skb_mac_header(skb);
101 	memcpy(dst, src, copylen);
102 
103 	skb->protocol = eth_hdr(skb)->h_proto;
104 	return skb;
105 }
106 
107 struct sk_buff *hsr_get_untagged_frame(struct hsr_frame_info *frame,
108 				       struct hsr_port *port)
109 {
110 	if (!frame->skb_std) {
111 		if (frame->skb_hsr) {
112 			frame->skb_std =
113 				create_stripped_skb_hsr(frame->skb_hsr, frame);
114 		} else {
115 			/* Unexpected */
116 			WARN_ONCE(1, "%s:%d: Unexpected frame received (port_src %s)\n",
117 				  __FILE__, __LINE__, port->dev->name);
118 			return NULL;
119 		}
120 	}
121 
122 	return skb_clone(frame->skb_std, GFP_ATOMIC);
123 }
124 
125 struct sk_buff *prp_get_untagged_frame(struct hsr_frame_info *frame,
126 				       struct hsr_port *port)
127 {
128 	if (!frame->skb_std) {
129 		if (frame->skb_prp) {
130 			/* trim the skb by len - HSR_HLEN to exclude RCT */
131 			skb_trim(frame->skb_prp,
132 				 frame->skb_prp->len - HSR_HLEN);
133 			frame->skb_std =
134 				__pskb_copy(frame->skb_prp,
135 					    skb_headroom(frame->skb_prp),
136 					    GFP_ATOMIC);
137 		} else {
138 			/* Unexpected */
139 			WARN_ONCE(1, "%s:%d: Unexpected frame received (port_src %s)\n",
140 				  __FILE__, __LINE__, port->dev->name);
141 			return NULL;
142 		}
143 	}
144 
145 	return skb_clone(frame->skb_std, GFP_ATOMIC);
146 }
147 
148 static void prp_set_lan_id(struct prp_rct *trailer,
149 			   struct hsr_port *port)
150 {
151 	int lane_id;
152 
153 	if (port->type == HSR_PT_SLAVE_A)
154 		lane_id = 0;
155 	else
156 		lane_id = 1;
157 
158 	/* Add net_id in the upper 3 bits of lane_id */
159 	lane_id |= port->hsr->net_id;
160 	set_prp_lan_id(trailer, lane_id);
161 }
162 
163 /* Tailroom for PRP rct should have been created before calling this */
164 static struct sk_buff *prp_fill_rct(struct sk_buff *skb,
165 				    struct hsr_frame_info *frame,
166 				    struct hsr_port *port)
167 {
168 	struct prp_rct *trailer;
169 	int min_size = ETH_ZLEN;
170 	int lsdu_size;
171 
172 	if (!skb)
173 		return skb;
174 
175 	if (frame->is_vlan)
176 		min_size = VLAN_ETH_ZLEN;
177 
178 	if (skb_put_padto(skb, min_size))
179 		return NULL;
180 
181 	trailer = (struct prp_rct *)skb_put(skb, HSR_HLEN);
182 	lsdu_size = skb->len - 14;
183 	if (frame->is_vlan)
184 		lsdu_size -= 4;
185 	prp_set_lan_id(trailer, port);
186 	set_prp_LSDU_size(trailer, lsdu_size);
187 	trailer->sequence_nr = htons(frame->sequence_nr);
188 	trailer->PRP_suffix = htons(ETH_P_PRP);
189 	skb->protocol = eth_hdr(skb)->h_proto;
190 
191 	return skb;
192 }
193 
194 static void hsr_set_path_id(struct hsr_ethhdr *hsr_ethhdr,
195 			    struct hsr_port *port)
196 {
197 	int path_id;
198 
199 	if (port->type == HSR_PT_SLAVE_A)
200 		path_id = 0;
201 	else
202 		path_id = 1;
203 
204 	set_hsr_tag_path(&hsr_ethhdr->hsr_tag, path_id);
205 }
206 
207 static struct sk_buff *hsr_fill_tag(struct sk_buff *skb,
208 				    struct hsr_frame_info *frame,
209 				    struct hsr_port *port, u8 proto_version)
210 {
211 	struct hsr_ethhdr *hsr_ethhdr;
212 	int lsdu_size;
213 
214 	/* pad to minimum packet size which is 60 + 6 (HSR tag) */
215 	if (skb_put_padto(skb, ETH_ZLEN + HSR_HLEN))
216 		return NULL;
217 
218 	lsdu_size = skb->len - 14;
219 	if (frame->is_vlan)
220 		lsdu_size -= 4;
221 
222 	hsr_ethhdr = (struct hsr_ethhdr *)skb_mac_header(skb);
223 
224 	hsr_set_path_id(hsr_ethhdr, port);
225 	set_hsr_tag_LSDU_size(&hsr_ethhdr->hsr_tag, lsdu_size);
226 	hsr_ethhdr->hsr_tag.sequence_nr = htons(frame->sequence_nr);
227 	hsr_ethhdr->hsr_tag.encap_proto = hsr_ethhdr->ethhdr.h_proto;
228 	hsr_ethhdr->ethhdr.h_proto = htons(proto_version ?
229 			ETH_P_HSR : ETH_P_PRP);
230 	skb->protocol = hsr_ethhdr->ethhdr.h_proto;
231 
232 	return skb;
233 }
234 
235 /* If the original frame was an HSR tagged frame, just clone it to be sent
236  * unchanged. Otherwise, create a private frame especially tagged for 'port'.
237  */
238 struct sk_buff *hsr_create_tagged_frame(struct hsr_frame_info *frame,
239 					struct hsr_port *port)
240 {
241 	unsigned char *dst, *src;
242 	struct sk_buff *skb;
243 	int movelen;
244 
245 	if (frame->skb_hsr) {
246 		struct hsr_ethhdr *hsr_ethhdr =
247 			(struct hsr_ethhdr *)skb_mac_header(frame->skb_hsr);
248 
249 		/* set the lane id properly */
250 		hsr_set_path_id(hsr_ethhdr, port);
251 		return skb_clone(frame->skb_hsr, GFP_ATOMIC);
252 	} else if (port->dev->features & NETIF_F_HW_HSR_TAG_INS) {
253 		return skb_clone(frame->skb_std, GFP_ATOMIC);
254 	}
255 
256 	/* Create the new skb with enough headroom to fit the HSR tag */
257 	skb = __pskb_copy(frame->skb_std,
258 			  skb_headroom(frame->skb_std) + HSR_HLEN, GFP_ATOMIC);
259 	if (!skb)
260 		return NULL;
261 	skb_reset_mac_header(skb);
262 
263 	if (skb->ip_summed == CHECKSUM_PARTIAL)
264 		skb->csum_start += HSR_HLEN;
265 
266 	movelen = ETH_HLEN;
267 	if (frame->is_vlan)
268 		movelen += VLAN_HLEN;
269 
270 	src = skb_mac_header(skb);
271 	dst = skb_push(skb, HSR_HLEN);
272 	memmove(dst, src, movelen);
273 	skb_reset_mac_header(skb);
274 
275 	/* skb_put_padto free skb on error and hsr_fill_tag returns NULL in
276 	 * that case
277 	 */
278 	return hsr_fill_tag(skb, frame, port, port->hsr->prot_version);
279 }
280 
281 struct sk_buff *prp_create_tagged_frame(struct hsr_frame_info *frame,
282 					struct hsr_port *port)
283 {
284 	struct sk_buff *skb;
285 
286 	if (frame->skb_prp) {
287 		struct prp_rct *trailer = skb_get_PRP_rct(frame->skb_prp);
288 
289 		if (trailer) {
290 			prp_set_lan_id(trailer, port);
291 		} else {
292 			WARN_ONCE(!trailer, "errored PRP skb");
293 			return NULL;
294 		}
295 		return skb_clone(frame->skb_prp, GFP_ATOMIC);
296 	} else if (port->dev->features & NETIF_F_HW_HSR_TAG_INS) {
297 		return skb_clone(frame->skb_std, GFP_ATOMIC);
298 	}
299 
300 	skb = skb_copy_expand(frame->skb_std, 0,
301 			      skb_tailroom(frame->skb_std) + HSR_HLEN,
302 			      GFP_ATOMIC);
303 	prp_fill_rct(skb, frame, port);
304 
305 	return skb;
306 }
307 
308 static void hsr_deliver_master(struct sk_buff *skb, struct net_device *dev,
309 			       struct hsr_node *node_src)
310 {
311 	bool was_multicast_frame;
312 	int res;
313 
314 	was_multicast_frame = (skb->pkt_type == PACKET_MULTICAST);
315 	hsr_addr_subst_source(node_src, skb);
316 	skb_pull(skb, ETH_HLEN);
317 	res = netif_rx(skb);
318 	if (res == NET_RX_DROP) {
319 		dev->stats.rx_dropped++;
320 	} else {
321 		dev->stats.rx_packets++;
322 		dev->stats.rx_bytes += skb->len;
323 		if (was_multicast_frame)
324 			dev->stats.multicast++;
325 	}
326 }
327 
328 static int hsr_xmit(struct sk_buff *skb, struct hsr_port *port,
329 		    struct hsr_frame_info *frame)
330 {
331 	if (frame->port_rcv->type == HSR_PT_MASTER) {
332 		hsr_addr_subst_dest(frame->node_src, skb, port);
333 
334 		/* Address substitution (IEC62439-3 pp 26, 50): replace mac
335 		 * address of outgoing frame with that of the outgoing slave's.
336 		 */
337 		ether_addr_copy(eth_hdr(skb)->h_source, port->dev->dev_addr);
338 	}
339 	return dev_queue_xmit(skb);
340 }
341 
342 bool prp_drop_frame(struct hsr_frame_info *frame, struct hsr_port *port)
343 {
344 	return ((frame->port_rcv->type == HSR_PT_SLAVE_A &&
345 		 port->type ==  HSR_PT_SLAVE_B) ||
346 		(frame->port_rcv->type == HSR_PT_SLAVE_B &&
347 		 port->type ==  HSR_PT_SLAVE_A));
348 }
349 
350 bool hsr_drop_frame(struct hsr_frame_info *frame, struct hsr_port *port)
351 {
352 	if (port->dev->features & NETIF_F_HW_HSR_FWD)
353 		return prp_drop_frame(frame, port);
354 
355 	return false;
356 }
357 
358 /* Forward the frame through all devices except:
359  * - Back through the receiving device
360  * - If it's a HSR frame: through a device where it has passed before
361  * - if it's a PRP frame: through another PRP slave device (no bridge)
362  * - To the local HSR master only if the frame is directly addressed to it, or
363  *   a non-supervision multicast or broadcast frame.
364  *
365  * HSR slave devices should insert a HSR tag into the frame, or forward the
366  * frame unchanged if it's already tagged. Interlink devices should strip HSR
367  * tags if they're of the non-HSR type (but only after duplicate discard). The
368  * master device always strips HSR tags.
369  */
370 static void hsr_forward_do(struct hsr_frame_info *frame)
371 {
372 	struct hsr_port *port;
373 	struct sk_buff *skb;
374 	bool sent = false;
375 
376 	hsr_for_each_port(frame->port_rcv->hsr, port) {
377 		struct hsr_priv *hsr = port->hsr;
378 		/* Don't send frame back the way it came */
379 		if (port == frame->port_rcv)
380 			continue;
381 
382 		/* Don't deliver locally unless we should */
383 		if (port->type == HSR_PT_MASTER && !frame->is_local_dest)
384 			continue;
385 
386 		/* Deliver frames directly addressed to us to master only */
387 		if (port->type != HSR_PT_MASTER && frame->is_local_exclusive)
388 			continue;
389 
390 		/* If hardware duplicate generation is enabled, only send out
391 		 * one port.
392 		 */
393 		if ((port->dev->features & NETIF_F_HW_HSR_DUP) && sent)
394 			continue;
395 
396 		/* Don't send frame over port where it has been sent before.
397 		 * Also fro SAN, this shouldn't be done.
398 		 */
399 		if (!frame->is_from_san &&
400 		    hsr_register_frame_out(port, frame->node_src,
401 					   frame->sequence_nr))
402 			continue;
403 
404 		if (frame->is_supervision && port->type == HSR_PT_MASTER) {
405 			hsr_handle_sup_frame(frame);
406 			continue;
407 		}
408 
409 		/* Check if frame is to be dropped. Eg. for PRP no forward
410 		 * between ports.
411 		 */
412 		if (hsr->proto_ops->drop_frame &&
413 		    hsr->proto_ops->drop_frame(frame, port))
414 			continue;
415 
416 		if (port->type != HSR_PT_MASTER)
417 			skb = hsr->proto_ops->create_tagged_frame(frame, port);
418 		else
419 			skb = hsr->proto_ops->get_untagged_frame(frame, port);
420 
421 		if (!skb) {
422 			frame->port_rcv->dev->stats.rx_dropped++;
423 			continue;
424 		}
425 
426 		skb->dev = port->dev;
427 		if (port->type == HSR_PT_MASTER) {
428 			hsr_deliver_master(skb, port->dev, frame->node_src);
429 		} else {
430 			if (!hsr_xmit(skb, port, frame))
431 				sent = true;
432 		}
433 	}
434 }
435 
436 static void check_local_dest(struct hsr_priv *hsr, struct sk_buff *skb,
437 			     struct hsr_frame_info *frame)
438 {
439 	if (hsr_addr_is_self(hsr, eth_hdr(skb)->h_dest)) {
440 		frame->is_local_exclusive = true;
441 		skb->pkt_type = PACKET_HOST;
442 	} else {
443 		frame->is_local_exclusive = false;
444 	}
445 
446 	if (skb->pkt_type == PACKET_HOST ||
447 	    skb->pkt_type == PACKET_MULTICAST ||
448 	    skb->pkt_type == PACKET_BROADCAST) {
449 		frame->is_local_dest = true;
450 	} else {
451 		frame->is_local_dest = false;
452 	}
453 }
454 
455 static void handle_std_frame(struct sk_buff *skb,
456 			     struct hsr_frame_info *frame)
457 {
458 	struct hsr_port *port = frame->port_rcv;
459 	struct hsr_priv *hsr = port->hsr;
460 	unsigned long irqflags;
461 
462 	frame->skb_hsr = NULL;
463 	frame->skb_prp = NULL;
464 	frame->skb_std = skb;
465 
466 	if (port->type != HSR_PT_MASTER) {
467 		frame->is_from_san = true;
468 	} else {
469 		/* Sequence nr for the master node */
470 		spin_lock_irqsave(&hsr->seqnr_lock, irqflags);
471 		frame->sequence_nr = hsr->sequence_nr;
472 		hsr->sequence_nr++;
473 		spin_unlock_irqrestore(&hsr->seqnr_lock, irqflags);
474 	}
475 }
476 
477 void hsr_fill_frame_info(__be16 proto, struct sk_buff *skb,
478 			 struct hsr_frame_info *frame)
479 {
480 	struct hsr_port *port = frame->port_rcv;
481 	struct hsr_priv *hsr = port->hsr;
482 
483 	/* HSRv0 supervisory frames double as a tag so treat them as tagged. */
484 	if ((!hsr->prot_version && proto == htons(ETH_P_PRP)) ||
485 	    proto == htons(ETH_P_HSR)) {
486 		/* HSR tagged frame :- Data or Supervision */
487 		frame->skb_std = NULL;
488 		frame->skb_prp = NULL;
489 		frame->skb_hsr = skb;
490 		frame->sequence_nr = hsr_get_skb_sequence_nr(skb);
491 		return;
492 	}
493 
494 	/* Standard frame or PRP from master port */
495 	handle_std_frame(skb, frame);
496 }
497 
498 void prp_fill_frame_info(__be16 proto, struct sk_buff *skb,
499 			 struct hsr_frame_info *frame)
500 {
501 	/* Supervision frame */
502 	struct prp_rct *rct = skb_get_PRP_rct(skb);
503 
504 	if (rct &&
505 	    prp_check_lsdu_size(skb, rct, frame->is_supervision)) {
506 		frame->skb_hsr = NULL;
507 		frame->skb_std = NULL;
508 		frame->skb_prp = skb;
509 		frame->sequence_nr = prp_get_skb_sequence_nr(rct);
510 		return;
511 	}
512 	handle_std_frame(skb, frame);
513 }
514 
515 static int fill_frame_info(struct hsr_frame_info *frame,
516 			   struct sk_buff *skb, struct hsr_port *port)
517 {
518 	struct hsr_priv *hsr = port->hsr;
519 	struct hsr_vlan_ethhdr *vlan_hdr;
520 	struct ethhdr *ethhdr;
521 	__be16 proto;
522 
523 	memset(frame, 0, sizeof(*frame));
524 	frame->is_supervision = is_supervision_frame(port->hsr, skb);
525 	frame->node_src = hsr_get_node(port, &hsr->node_db, skb,
526 				       frame->is_supervision,
527 				       port->type);
528 	if (!frame->node_src)
529 		return -1; /* Unknown node and !is_supervision, or no mem */
530 
531 	ethhdr = (struct ethhdr *)skb_mac_header(skb);
532 	frame->is_vlan = false;
533 	proto = ethhdr->h_proto;
534 
535 	if (proto == htons(ETH_P_8021Q))
536 		frame->is_vlan = true;
537 
538 	if (frame->is_vlan) {
539 		vlan_hdr = (struct hsr_vlan_ethhdr *)ethhdr;
540 		proto = vlan_hdr->vlanhdr.h_vlan_encapsulated_proto;
541 		/* FIXME: */
542 		netdev_warn_once(skb->dev, "VLAN not yet supported");
543 	}
544 
545 	frame->is_from_san = false;
546 	frame->port_rcv = port;
547 	hsr->proto_ops->fill_frame_info(proto, skb, frame);
548 	check_local_dest(port->hsr, skb, frame);
549 
550 	return 0;
551 }
552 
553 /* Must be called holding rcu read lock (because of the port parameter) */
554 void hsr_forward_skb(struct sk_buff *skb, struct hsr_port *port)
555 {
556 	struct hsr_frame_info frame;
557 
558 	if (skb_mac_header(skb) != skb->data) {
559 		WARN_ONCE(1, "%s:%d: Malformed frame (port_src %s)\n",
560 			  __FILE__, __LINE__, port->dev->name);
561 		goto out_drop;
562 	}
563 
564 	if (fill_frame_info(&frame, skb, port) < 0)
565 		goto out_drop;
566 
567 	hsr_register_frame_in(frame.node_src, port, frame.sequence_nr);
568 	hsr_forward_do(&frame);
569 	/* Gets called for ingress frames as well as egress from master port.
570 	 * So check and increment stats for master port only here.
571 	 */
572 	if (port->type == HSR_PT_MASTER) {
573 		port->dev->stats.tx_packets++;
574 		port->dev->stats.tx_bytes += skb->len;
575 	}
576 
577 	kfree_skb(frame.skb_hsr);
578 	kfree_skb(frame.skb_prp);
579 	kfree_skb(frame.skb_std);
580 	return;
581 
582 out_drop:
583 	port->dev->stats.tx_dropped++;
584 	kfree_skb(skb);
585 }
586