1 /* Copyright 2011-2014 Autronica Fire and Security AS 2 * 3 * This program is free software; you can redistribute it and/or modify it 4 * under the terms of the GNU General Public License as published by the Free 5 * Software Foundation; either version 2 of the License, or (at your option) 6 * any later version. 7 * 8 * Author(s): 9 * 2011-2014 Arvid Brodin, arvid.brodin@alten.se 10 */ 11 12 #include "hsr_forward.h" 13 #include <linux/types.h> 14 #include <linux/skbuff.h> 15 #include <linux/etherdevice.h> 16 #include <linux/if_vlan.h> 17 #include "hsr_main.h" 18 #include "hsr_framereg.h" 19 20 21 struct hsr_node; 22 23 struct hsr_frame_info { 24 struct sk_buff *skb_std; 25 struct sk_buff *skb_hsr; 26 struct hsr_port *port_rcv; 27 struct hsr_node *node_src; 28 u16 sequence_nr; 29 bool is_supervision; 30 bool is_vlan; 31 bool is_local_dest; 32 bool is_local_exclusive; 33 }; 34 35 36 /* The uses I can see for these HSR supervision frames are: 37 * 1) Use the frames that are sent after node initialization ("HSR_TLV.Type = 38 * 22") to reset any sequence_nr counters belonging to that node. Useful if 39 * the other node's counter has been reset for some reason. 40 * -- 41 * Or not - resetting the counter and bridging the frame would create a 42 * loop, unfortunately. 43 * 44 * 2) Use the LifeCheck frames to detect ring breaks. I.e. if no LifeCheck 45 * frame is received from a particular node, we know something is wrong. 46 * We just register these (as with normal frames) and throw them away. 47 * 48 * 3) Allow different MAC addresses for the two slave interfaces, using the 49 * MacAddressA field. 50 */ 51 static bool is_supervision_frame(struct hsr_priv *hsr, struct sk_buff *skb) 52 { 53 struct ethhdr *ethHdr; 54 struct hsr_sup_tag *hsrSupTag; 55 struct hsrv1_ethhdr_sp *hsrV1Hdr; 56 57 WARN_ON_ONCE(!skb_mac_header_was_set(skb)); 58 ethHdr = (struct ethhdr *) skb_mac_header(skb); 59 60 /* Correct addr? */ 61 if (!ether_addr_equal(ethHdr->h_dest, 62 hsr->sup_multicast_addr)) 63 return false; 64 65 /* Correct ether type?. */ 66 if (!(ethHdr->h_proto == htons(ETH_P_PRP) 67 || ethHdr->h_proto == htons(ETH_P_HSR))) 68 return false; 69 70 /* Get the supervision header from correct location. */ 71 if (ethHdr->h_proto == htons(ETH_P_HSR)) { /* Okay HSRv1. */ 72 hsrV1Hdr = (struct hsrv1_ethhdr_sp *) skb_mac_header(skb); 73 if (hsrV1Hdr->hsr.encap_proto != htons(ETH_P_PRP)) 74 return false; 75 76 hsrSupTag = &hsrV1Hdr->hsr_sup; 77 } else { 78 hsrSupTag = &((struct hsrv0_ethhdr_sp *) skb_mac_header(skb))->hsr_sup; 79 } 80 81 if ((hsrSupTag->HSR_TLV_Type != HSR_TLV_ANNOUNCE) && 82 (hsrSupTag->HSR_TLV_Type != HSR_TLV_LIFE_CHECK)) 83 return false; 84 if ((hsrSupTag->HSR_TLV_Length != 12) && 85 (hsrSupTag->HSR_TLV_Length != 86 sizeof(struct hsr_sup_payload))) 87 return false; 88 89 return true; 90 } 91 92 93 static struct sk_buff *create_stripped_skb(struct sk_buff *skb_in, 94 struct hsr_frame_info *frame) 95 { 96 struct sk_buff *skb; 97 int copylen; 98 unsigned char *dst, *src; 99 100 skb_pull(skb_in, HSR_HLEN); 101 skb = __pskb_copy(skb_in, skb_headroom(skb_in) - HSR_HLEN, GFP_ATOMIC); 102 skb_push(skb_in, HSR_HLEN); 103 if (skb == NULL) 104 return NULL; 105 106 skb_reset_mac_header(skb); 107 108 if (skb->ip_summed == CHECKSUM_PARTIAL) 109 skb->csum_start -= HSR_HLEN; 110 111 copylen = 2*ETH_ALEN; 112 if (frame->is_vlan) 113 copylen += VLAN_HLEN; 114 src = skb_mac_header(skb_in); 115 dst = skb_mac_header(skb); 116 memcpy(dst, src, copylen); 117 118 skb->protocol = eth_hdr(skb)->h_proto; 119 return skb; 120 } 121 122 static struct sk_buff *frame_get_stripped_skb(struct hsr_frame_info *frame, 123 struct hsr_port *port) 124 { 125 if (!frame->skb_std) 126 frame->skb_std = create_stripped_skb(frame->skb_hsr, frame); 127 return skb_clone(frame->skb_std, GFP_ATOMIC); 128 } 129 130 131 static void hsr_fill_tag(struct sk_buff *skb, struct hsr_frame_info *frame, 132 struct hsr_port *port, u8 protoVersion) 133 { 134 struct hsr_ethhdr *hsr_ethhdr; 135 int lane_id; 136 int lsdu_size; 137 138 if (port->type == HSR_PT_SLAVE_A) 139 lane_id = 0; 140 else 141 lane_id = 1; 142 143 lsdu_size = skb->len - 14; 144 if (frame->is_vlan) 145 lsdu_size -= 4; 146 147 hsr_ethhdr = (struct hsr_ethhdr *) skb_mac_header(skb); 148 149 set_hsr_tag_path(&hsr_ethhdr->hsr_tag, lane_id); 150 set_hsr_tag_LSDU_size(&hsr_ethhdr->hsr_tag, lsdu_size); 151 hsr_ethhdr->hsr_tag.sequence_nr = htons(frame->sequence_nr); 152 hsr_ethhdr->hsr_tag.encap_proto = hsr_ethhdr->ethhdr.h_proto; 153 hsr_ethhdr->ethhdr.h_proto = htons(protoVersion ? 154 ETH_P_HSR : ETH_P_PRP); 155 } 156 157 static struct sk_buff *create_tagged_skb(struct sk_buff *skb_o, 158 struct hsr_frame_info *frame, 159 struct hsr_port *port) 160 { 161 int movelen; 162 unsigned char *dst, *src; 163 struct sk_buff *skb; 164 165 /* Create the new skb with enough headroom to fit the HSR tag */ 166 skb = __pskb_copy(skb_o, skb_headroom(skb_o) + HSR_HLEN, GFP_ATOMIC); 167 if (skb == NULL) 168 return NULL; 169 skb_reset_mac_header(skb); 170 171 if (skb->ip_summed == CHECKSUM_PARTIAL) 172 skb->csum_start += HSR_HLEN; 173 174 movelen = ETH_HLEN; 175 if (frame->is_vlan) 176 movelen += VLAN_HLEN; 177 178 src = skb_mac_header(skb); 179 dst = skb_push(skb, HSR_HLEN); 180 memmove(dst, src, movelen); 181 skb_reset_mac_header(skb); 182 183 hsr_fill_tag(skb, frame, port, port->hsr->protVersion); 184 185 return skb; 186 } 187 188 /* If the original frame was an HSR tagged frame, just clone it to be sent 189 * unchanged. Otherwise, create a private frame especially tagged for 'port'. 190 */ 191 static struct sk_buff *frame_get_tagged_skb(struct hsr_frame_info *frame, 192 struct hsr_port *port) 193 { 194 if (frame->skb_hsr) 195 return skb_clone(frame->skb_hsr, GFP_ATOMIC); 196 197 if ((port->type != HSR_PT_SLAVE_A) && (port->type != HSR_PT_SLAVE_B)) { 198 WARN_ONCE(1, "HSR: Bug: trying to create a tagged frame for a non-ring port"); 199 return NULL; 200 } 201 202 return create_tagged_skb(frame->skb_std, frame, port); 203 } 204 205 206 static void hsr_deliver_master(struct sk_buff *skb, struct net_device *dev, 207 struct hsr_node *node_src) 208 { 209 bool was_multicast_frame; 210 int res; 211 212 was_multicast_frame = (skb->pkt_type == PACKET_MULTICAST); 213 hsr_addr_subst_source(node_src, skb); 214 skb_pull(skb, ETH_HLEN); 215 res = netif_rx(skb); 216 if (res == NET_RX_DROP) { 217 dev->stats.rx_dropped++; 218 } else { 219 dev->stats.rx_packets++; 220 dev->stats.rx_bytes += skb->len; 221 if (was_multicast_frame) 222 dev->stats.multicast++; 223 } 224 } 225 226 static int hsr_xmit(struct sk_buff *skb, struct hsr_port *port, 227 struct hsr_frame_info *frame) 228 { 229 if (frame->port_rcv->type == HSR_PT_MASTER) { 230 hsr_addr_subst_dest(frame->node_src, skb, port); 231 232 /* Address substitution (IEC62439-3 pp 26, 50): replace mac 233 * address of outgoing frame with that of the outgoing slave's. 234 */ 235 ether_addr_copy(eth_hdr(skb)->h_source, port->dev->dev_addr); 236 } 237 return dev_queue_xmit(skb); 238 } 239 240 241 /* Forward the frame through all devices except: 242 * - Back through the receiving device 243 * - If it's a HSR frame: through a device where it has passed before 244 * - To the local HSR master only if the frame is directly addressed to it, or 245 * a non-supervision multicast or broadcast frame. 246 * 247 * HSR slave devices should insert a HSR tag into the frame, or forward the 248 * frame unchanged if it's already tagged. Interlink devices should strip HSR 249 * tags if they're of the non-HSR type (but only after duplicate discard). The 250 * master device always strips HSR tags. 251 */ 252 static void hsr_forward_do(struct hsr_frame_info *frame) 253 { 254 struct hsr_port *port; 255 struct sk_buff *skb; 256 257 hsr_for_each_port(frame->port_rcv->hsr, port) { 258 /* Don't send frame back the way it came */ 259 if (port == frame->port_rcv) 260 continue; 261 262 /* Don't deliver locally unless we should */ 263 if ((port->type == HSR_PT_MASTER) && !frame->is_local_dest) 264 continue; 265 266 /* Deliver frames directly addressed to us to master only */ 267 if ((port->type != HSR_PT_MASTER) && frame->is_local_exclusive) 268 continue; 269 270 /* Don't send frame over port where it has been sent before */ 271 if (hsr_register_frame_out(port, frame->node_src, 272 frame->sequence_nr)) 273 continue; 274 275 if (frame->is_supervision && (port->type == HSR_PT_MASTER)) { 276 hsr_handle_sup_frame(frame->skb_hsr, 277 frame->node_src, 278 frame->port_rcv); 279 continue; 280 } 281 282 if (port->type != HSR_PT_MASTER) 283 skb = frame_get_tagged_skb(frame, port); 284 else 285 skb = frame_get_stripped_skb(frame, port); 286 if (skb == NULL) { 287 /* FIXME: Record the dropped frame? */ 288 continue; 289 } 290 291 skb->dev = port->dev; 292 if (port->type == HSR_PT_MASTER) 293 hsr_deliver_master(skb, port->dev, frame->node_src); 294 else 295 hsr_xmit(skb, port, frame); 296 } 297 } 298 299 300 static void check_local_dest(struct hsr_priv *hsr, struct sk_buff *skb, 301 struct hsr_frame_info *frame) 302 { 303 if (hsr_addr_is_self(hsr, eth_hdr(skb)->h_dest)) { 304 frame->is_local_exclusive = true; 305 skb->pkt_type = PACKET_HOST; 306 } else { 307 frame->is_local_exclusive = false; 308 } 309 310 if ((skb->pkt_type == PACKET_HOST) || 311 (skb->pkt_type == PACKET_MULTICAST) || 312 (skb->pkt_type == PACKET_BROADCAST)) { 313 frame->is_local_dest = true; 314 } else { 315 frame->is_local_dest = false; 316 } 317 } 318 319 320 static int hsr_fill_frame_info(struct hsr_frame_info *frame, 321 struct sk_buff *skb, struct hsr_port *port) 322 { 323 struct ethhdr *ethhdr; 324 unsigned long irqflags; 325 326 frame->is_supervision = is_supervision_frame(port->hsr, skb); 327 frame->node_src = hsr_get_node(port, skb, frame->is_supervision); 328 if (frame->node_src == NULL) 329 return -1; /* Unknown node and !is_supervision, or no mem */ 330 331 ethhdr = (struct ethhdr *) skb_mac_header(skb); 332 frame->is_vlan = false; 333 if (ethhdr->h_proto == htons(ETH_P_8021Q)) { 334 frame->is_vlan = true; 335 /* FIXME: */ 336 WARN_ONCE(1, "HSR: VLAN not yet supported"); 337 } 338 if (ethhdr->h_proto == htons(ETH_P_PRP) 339 || ethhdr->h_proto == htons(ETH_P_HSR)) { 340 frame->skb_std = NULL; 341 frame->skb_hsr = skb; 342 frame->sequence_nr = hsr_get_skb_sequence_nr(skb); 343 } else { 344 frame->skb_std = skb; 345 frame->skb_hsr = NULL; 346 /* Sequence nr for the master node */ 347 spin_lock_irqsave(&port->hsr->seqnr_lock, irqflags); 348 frame->sequence_nr = port->hsr->sequence_nr; 349 port->hsr->sequence_nr++; 350 spin_unlock_irqrestore(&port->hsr->seqnr_lock, irqflags); 351 } 352 353 frame->port_rcv = port; 354 check_local_dest(port->hsr, skb, frame); 355 356 return 0; 357 } 358 359 /* Must be called holding rcu read lock (because of the port parameter) */ 360 void hsr_forward_skb(struct sk_buff *skb, struct hsr_port *port) 361 { 362 struct hsr_frame_info frame; 363 364 if (skb_mac_header(skb) != skb->data) { 365 WARN_ONCE(1, "%s:%d: Malformed frame (port_src %s)\n", 366 __FILE__, __LINE__, port->dev->name); 367 goto out_drop; 368 } 369 370 if (hsr_fill_frame_info(&frame, skb, port) < 0) 371 goto out_drop; 372 hsr_register_frame_in(frame.node_src, port, frame.sequence_nr); 373 hsr_forward_do(&frame); 374 375 if (frame.skb_hsr != NULL) 376 kfree_skb(frame.skb_hsr); 377 if (frame.skb_std != NULL) 378 kfree_skb(frame.skb_std); 379 return; 380 381 out_drop: 382 port->dev->stats.tx_dropped++; 383 kfree_skb(skb); 384 } 385