xref: /openbmc/linux/net/dsa/tag_sja1105.c (revision bfad37c5)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
3  */
4 #include <linux/if_vlan.h>
5 #include <linux/dsa/sja1105.h>
6 #include <linux/dsa/8021q.h>
7 #include <linux/packing.h>
8 #include "dsa_priv.h"
9 
10 /* Is this a TX or an RX header? */
11 #define SJA1110_HEADER_HOST_TO_SWITCH		BIT(15)
12 
13 /* RX header */
14 #define SJA1110_RX_HEADER_IS_METADATA		BIT(14)
15 #define SJA1110_RX_HEADER_HOST_ONLY		BIT(13)
16 #define SJA1110_RX_HEADER_HAS_TRAILER		BIT(12)
17 
18 /* Trap-to-host format (no trailer present) */
19 #define SJA1110_RX_HEADER_SRC_PORT(x)		(((x) & GENMASK(7, 4)) >> 4)
20 #define SJA1110_RX_HEADER_SWITCH_ID(x)		((x) & GENMASK(3, 0))
21 
22 /* Timestamp format (trailer present) */
23 #define SJA1110_RX_HEADER_TRAILER_POS(x)	((x) & GENMASK(11, 0))
24 
25 #define SJA1110_RX_TRAILER_SWITCH_ID(x)		(((x) & GENMASK(7, 4)) >> 4)
26 #define SJA1110_RX_TRAILER_SRC_PORT(x)		((x) & GENMASK(3, 0))
27 
28 /* Meta frame format (for 2-step TX timestamps) */
29 #define SJA1110_RX_HEADER_N_TS(x)		(((x) & GENMASK(8, 4)) >> 4)
30 
31 /* TX header */
32 #define SJA1110_TX_HEADER_UPDATE_TC		BIT(14)
33 #define SJA1110_TX_HEADER_TAKE_TS		BIT(13)
34 #define SJA1110_TX_HEADER_TAKE_TS_CASC		BIT(12)
35 #define SJA1110_TX_HEADER_HAS_TRAILER		BIT(11)
36 
37 /* Only valid if SJA1110_TX_HEADER_HAS_TRAILER is false */
38 #define SJA1110_TX_HEADER_PRIO(x)		(((x) << 7) & GENMASK(10, 7))
39 #define SJA1110_TX_HEADER_TSTAMP_ID(x)		((x) & GENMASK(7, 0))
40 
41 /* Only valid if SJA1110_TX_HEADER_HAS_TRAILER is true */
42 #define SJA1110_TX_HEADER_TRAILER_POS(x)	((x) & GENMASK(10, 0))
43 
44 #define SJA1110_TX_TRAILER_TSTAMP_ID(x)		(((x) << 24) & GENMASK(31, 24))
45 #define SJA1110_TX_TRAILER_PRIO(x)		(((x) << 21) & GENMASK(23, 21))
46 #define SJA1110_TX_TRAILER_SWITCHID(x)		(((x) << 12) & GENMASK(15, 12))
47 #define SJA1110_TX_TRAILER_DESTPORTS(x)		(((x) << 1) & GENMASK(11, 1))
48 
49 #define SJA1110_META_TSTAMP_SIZE		10
50 
51 #define SJA1110_HEADER_LEN			4
52 #define SJA1110_RX_TRAILER_LEN			13
53 #define SJA1110_TX_TRAILER_LEN			4
54 #define SJA1110_MAX_PADDING_LEN			15
55 
56 /* Similar to is_link_local_ether_addr(hdr->h_dest) but also covers PTP */
57 static inline bool sja1105_is_link_local(const struct sk_buff *skb)
58 {
59 	const struct ethhdr *hdr = eth_hdr(skb);
60 	u64 dmac = ether_addr_to_u64(hdr->h_dest);
61 
62 	if (ntohs(hdr->h_proto) == ETH_P_SJA1105_META)
63 		return false;
64 	if ((dmac & SJA1105_LINKLOCAL_FILTER_A_MASK) ==
65 		    SJA1105_LINKLOCAL_FILTER_A)
66 		return true;
67 	if ((dmac & SJA1105_LINKLOCAL_FILTER_B_MASK) ==
68 		    SJA1105_LINKLOCAL_FILTER_B)
69 		return true;
70 	return false;
71 }
72 
73 struct sja1105_meta {
74 	u64 tstamp;
75 	u64 dmac_byte_4;
76 	u64 dmac_byte_3;
77 	u64 source_port;
78 	u64 switch_id;
79 };
80 
81 static void sja1105_meta_unpack(const struct sk_buff *skb,
82 				struct sja1105_meta *meta)
83 {
84 	u8 *buf = skb_mac_header(skb) + ETH_HLEN;
85 
86 	/* UM10944.pdf section 4.2.17 AVB Parameters:
87 	 * Structure of the meta-data follow-up frame.
88 	 * It is in network byte order, so there are no quirks
89 	 * while unpacking the meta frame.
90 	 *
91 	 * Also SJA1105 E/T only populates bits 23:0 of the timestamp
92 	 * whereas P/Q/R/S does 32 bits. Since the structure is the
93 	 * same and the E/T puts zeroes in the high-order byte, use
94 	 * a unified unpacking command for both device series.
95 	 */
96 	packing(buf,     &meta->tstamp,     31, 0, 4, UNPACK, 0);
97 	packing(buf + 4, &meta->dmac_byte_4, 7, 0, 1, UNPACK, 0);
98 	packing(buf + 5, &meta->dmac_byte_3, 7, 0, 1, UNPACK, 0);
99 	packing(buf + 6, &meta->source_port, 7, 0, 1, UNPACK, 0);
100 	packing(buf + 7, &meta->switch_id,   7, 0, 1, UNPACK, 0);
101 }
102 
103 static inline bool sja1105_is_meta_frame(const struct sk_buff *skb)
104 {
105 	const struct ethhdr *hdr = eth_hdr(skb);
106 	u64 smac = ether_addr_to_u64(hdr->h_source);
107 	u64 dmac = ether_addr_to_u64(hdr->h_dest);
108 
109 	if (smac != SJA1105_META_SMAC)
110 		return false;
111 	if (dmac != SJA1105_META_DMAC)
112 		return false;
113 	if (ntohs(hdr->h_proto) != ETH_P_SJA1105_META)
114 		return false;
115 	return true;
116 }
117 
118 /* Calls sja1105_port_deferred_xmit in sja1105_main.c */
119 static struct sk_buff *sja1105_defer_xmit(struct dsa_port *dp,
120 					  struct sk_buff *skb)
121 {
122 	struct sja1105_port *sp = dp->priv;
123 
124 	if (!dsa_port_is_sja1105(dp))
125 		return skb;
126 
127 	/* Increase refcount so the kfree_skb in dsa_slave_xmit
128 	 * won't really free the packet.
129 	 */
130 	skb_queue_tail(&sp->xmit_queue, skb_get(skb));
131 	kthread_queue_work(sp->xmit_worker, &sp->xmit_work);
132 
133 	return NULL;
134 }
135 
136 /* Send VLAN tags with a TPID that blends in with whatever VLAN protocol a
137  * bridge spanning ports of this switch might have.
138  */
139 static u16 sja1105_xmit_tpid(struct dsa_port *dp)
140 {
141 	struct dsa_switch *ds = dp->ds;
142 	struct dsa_port *other_dp;
143 	u16 proto;
144 
145 	/* Since VLAN awareness is global, then if this port is VLAN-unaware,
146 	 * all ports are. Use the VLAN-unaware TPID used for tag_8021q.
147 	 */
148 	if (!dsa_port_is_vlan_filtering(dp))
149 		return ETH_P_SJA1105;
150 
151 	/* Port is VLAN-aware, so there is a bridge somewhere (a single one,
152 	 * we're sure about that). It may not be on this port though, so we
153 	 * need to find it.
154 	 */
155 	list_for_each_entry(other_dp, &ds->dst->ports, list) {
156 		if (other_dp->ds != ds)
157 			continue;
158 
159 		if (!other_dp->bridge_dev)
160 			continue;
161 
162 		/* Error is returned only if CONFIG_BRIDGE_VLAN_FILTERING,
163 		 * which seems pointless to handle, as our port cannot become
164 		 * VLAN-aware in that case.
165 		 */
166 		br_vlan_get_proto(other_dp->bridge_dev, &proto);
167 
168 		return proto;
169 	}
170 
171 	WARN_ONCE(1, "Port is VLAN-aware but cannot find associated bridge!\n");
172 
173 	return ETH_P_SJA1105;
174 }
175 
176 static struct sk_buff *sja1105_imprecise_xmit(struct sk_buff *skb,
177 					      struct net_device *netdev)
178 {
179 	struct dsa_port *dp = dsa_slave_to_port(netdev);
180 	struct net_device *br = dp->bridge_dev;
181 	u16 tx_vid;
182 
183 	/* If the port is under a VLAN-aware bridge, just slide the
184 	 * VLAN-tagged packet into the FDB and hope for the best.
185 	 * This works because we support a single VLAN-aware bridge
186 	 * across the entire dst, and its VLANs cannot be shared with
187 	 * any standalone port.
188 	 */
189 	if (br_vlan_enabled(br))
190 		return skb;
191 
192 	/* If the port is under a VLAN-unaware bridge, use an imprecise
193 	 * TX VLAN that targets the bridge's entire broadcast domain,
194 	 * instead of just the specific port.
195 	 */
196 	tx_vid = dsa_8021q_bridge_tx_fwd_offload_vid(dp->bridge_num);
197 
198 	return dsa_8021q_xmit(skb, netdev, sja1105_xmit_tpid(dp), tx_vid);
199 }
200 
201 /* Transform untagged control packets into pvid-tagged control packets so that
202  * all packets sent by this tagger are VLAN-tagged and we can configure the
203  * switch to drop untagged packets coming from the DSA master.
204  */
205 static struct sk_buff *sja1105_pvid_tag_control_pkt(struct dsa_port *dp,
206 						    struct sk_buff *skb, u8 pcp)
207 {
208 	__be16 xmit_tpid = htons(sja1105_xmit_tpid(dp));
209 	struct vlan_ethhdr *hdr;
210 
211 	/* If VLAN tag is in hwaccel area, move it to the payload
212 	 * to deal with both cases uniformly and to ensure that
213 	 * the VLANs are added in the right order.
214 	 */
215 	if (unlikely(skb_vlan_tag_present(skb))) {
216 		skb = __vlan_hwaccel_push_inside(skb);
217 		if (!skb)
218 			return NULL;
219 	}
220 
221 	hdr = (struct vlan_ethhdr *)skb_mac_header(skb);
222 
223 	/* If skb is already VLAN-tagged, leave that VLAN ID in place */
224 	if (hdr->h_vlan_proto == xmit_tpid)
225 		return skb;
226 
227 	return vlan_insert_tag(skb, xmit_tpid, (pcp << VLAN_PRIO_SHIFT) |
228 			       SJA1105_DEFAULT_VLAN);
229 }
230 
231 static struct sk_buff *sja1105_xmit(struct sk_buff *skb,
232 				    struct net_device *netdev)
233 {
234 	struct dsa_port *dp = dsa_slave_to_port(netdev);
235 	u16 tx_vid = dsa_8021q_tx_vid(dp->ds, dp->index);
236 	u16 queue_mapping = skb_get_queue_mapping(skb);
237 	u8 pcp = netdev_txq_to_tc(netdev, queue_mapping);
238 
239 	if (skb->offload_fwd_mark)
240 		return sja1105_imprecise_xmit(skb, netdev);
241 
242 	/* Transmitting management traffic does not rely upon switch tagging,
243 	 * but instead SPI-installed management routes. Part 2 of this
244 	 * is the .port_deferred_xmit driver callback.
245 	 */
246 	if (unlikely(sja1105_is_link_local(skb))) {
247 		skb = sja1105_pvid_tag_control_pkt(dp, skb, pcp);
248 		if (!skb)
249 			return NULL;
250 
251 		return sja1105_defer_xmit(dp, skb);
252 	}
253 
254 	return dsa_8021q_xmit(skb, netdev, sja1105_xmit_tpid(dp),
255 			     ((pcp << VLAN_PRIO_SHIFT) | tx_vid));
256 }
257 
258 static struct sk_buff *sja1110_xmit(struct sk_buff *skb,
259 				    struct net_device *netdev)
260 {
261 	struct sk_buff *clone = SJA1105_SKB_CB(skb)->clone;
262 	struct dsa_port *dp = dsa_slave_to_port(netdev);
263 	u16 tx_vid = dsa_8021q_tx_vid(dp->ds, dp->index);
264 	u16 queue_mapping = skb_get_queue_mapping(skb);
265 	u8 pcp = netdev_txq_to_tc(netdev, queue_mapping);
266 	__be32 *tx_trailer;
267 	__be16 *tx_header;
268 	int trailer_pos;
269 
270 	if (skb->offload_fwd_mark)
271 		return sja1105_imprecise_xmit(skb, netdev);
272 
273 	/* Transmitting control packets is done using in-band control
274 	 * extensions, while data packets are transmitted using
275 	 * tag_8021q TX VLANs.
276 	 */
277 	if (likely(!sja1105_is_link_local(skb)))
278 		return dsa_8021q_xmit(skb, netdev, sja1105_xmit_tpid(dp),
279 				     ((pcp << VLAN_PRIO_SHIFT) | tx_vid));
280 
281 	skb = sja1105_pvid_tag_control_pkt(dp, skb, pcp);
282 	if (!skb)
283 		return NULL;
284 
285 	skb_push(skb, SJA1110_HEADER_LEN);
286 
287 	dsa_alloc_etype_header(skb, SJA1110_HEADER_LEN);
288 
289 	trailer_pos = skb->len;
290 
291 	tx_header = dsa_etype_header_pos_tx(skb);
292 	tx_trailer = skb_put(skb, SJA1110_TX_TRAILER_LEN);
293 
294 	tx_header[0] = htons(ETH_P_SJA1110);
295 	tx_header[1] = htons(SJA1110_HEADER_HOST_TO_SWITCH |
296 			     SJA1110_TX_HEADER_HAS_TRAILER |
297 			     SJA1110_TX_HEADER_TRAILER_POS(trailer_pos));
298 	*tx_trailer = cpu_to_be32(SJA1110_TX_TRAILER_PRIO(pcp) |
299 				  SJA1110_TX_TRAILER_SWITCHID(dp->ds->index) |
300 				  SJA1110_TX_TRAILER_DESTPORTS(BIT(dp->index)));
301 	if (clone) {
302 		u8 ts_id = SJA1105_SKB_CB(clone)->ts_id;
303 
304 		tx_header[1] |= htons(SJA1110_TX_HEADER_TAKE_TS);
305 		*tx_trailer |= cpu_to_be32(SJA1110_TX_TRAILER_TSTAMP_ID(ts_id));
306 	}
307 
308 	return skb;
309 }
310 
311 static void sja1105_transfer_meta(struct sk_buff *skb,
312 				  const struct sja1105_meta *meta)
313 {
314 	struct ethhdr *hdr = eth_hdr(skb);
315 
316 	hdr->h_dest[3] = meta->dmac_byte_3;
317 	hdr->h_dest[4] = meta->dmac_byte_4;
318 	SJA1105_SKB_CB(skb)->tstamp = meta->tstamp;
319 }
320 
321 /* This is a simple state machine which follows the hardware mechanism of
322  * generating RX timestamps:
323  *
324  * After each timestampable skb (all traffic for which send_meta1 and
325  * send_meta0 is true, aka all MAC-filtered link-local traffic) a meta frame
326  * containing a partial timestamp is immediately generated by the switch and
327  * sent as a follow-up to the link-local frame on the CPU port.
328  *
329  * The meta frames have no unique identifier (such as sequence number) by which
330  * one may pair them to the correct timestampable frame.
331  * Instead, the switch has internal logic that ensures no frames are sent on
332  * the CPU port between a link-local timestampable frame and its corresponding
333  * meta follow-up. It also ensures strict ordering between ports (lower ports
334  * have higher priority towards the CPU port). For this reason, a per-port
335  * data structure is not needed/desirable.
336  *
337  * This function pairs the link-local frame with its partial timestamp from the
338  * meta follow-up frame. The full timestamp will be reconstructed later in a
339  * work queue.
340  */
341 static struct sk_buff
342 *sja1105_rcv_meta_state_machine(struct sk_buff *skb,
343 				struct sja1105_meta *meta,
344 				bool is_link_local,
345 				bool is_meta)
346 {
347 	/* Step 1: A timestampable frame was received.
348 	 * Buffer it until we get its meta frame.
349 	 */
350 	if (is_link_local) {
351 		struct dsa_port *dp = dsa_slave_to_port(skb->dev);
352 		struct sja1105_port *sp = dp->priv;
353 
354 		if (unlikely(!dsa_port_is_sja1105(dp)))
355 			return skb;
356 
357 		if (!test_bit(SJA1105_HWTS_RX_EN, &sp->data->state))
358 			/* Do normal processing. */
359 			return skb;
360 
361 		spin_lock(&sp->data->meta_lock);
362 		/* Was this a link-local frame instead of the meta
363 		 * that we were expecting?
364 		 */
365 		if (sp->data->stampable_skb) {
366 			dev_err_ratelimited(dp->ds->dev,
367 					    "Expected meta frame, is %12llx "
368 					    "in the DSA master multicast filter?\n",
369 					    SJA1105_META_DMAC);
370 			kfree_skb(sp->data->stampable_skb);
371 		}
372 
373 		/* Hold a reference to avoid dsa_switch_rcv
374 		 * from freeing the skb.
375 		 */
376 		sp->data->stampable_skb = skb_get(skb);
377 		spin_unlock(&sp->data->meta_lock);
378 
379 		/* Tell DSA we got nothing */
380 		return NULL;
381 
382 	/* Step 2: The meta frame arrived.
383 	 * Time to take the stampable skb out of the closet, annotate it
384 	 * with the partial timestamp, and pretend that we received it
385 	 * just now (basically masquerade the buffered frame as the meta
386 	 * frame, which serves no further purpose).
387 	 */
388 	} else if (is_meta) {
389 		struct dsa_port *dp = dsa_slave_to_port(skb->dev);
390 		struct sja1105_port *sp = dp->priv;
391 		struct sk_buff *stampable_skb;
392 
393 		if (unlikely(!dsa_port_is_sja1105(dp)))
394 			return skb;
395 
396 		/* Drop the meta frame if we're not in the right state
397 		 * to process it.
398 		 */
399 		if (!test_bit(SJA1105_HWTS_RX_EN, &sp->data->state))
400 			return NULL;
401 
402 		spin_lock(&sp->data->meta_lock);
403 
404 		stampable_skb = sp->data->stampable_skb;
405 		sp->data->stampable_skb = NULL;
406 
407 		/* Was this a meta frame instead of the link-local
408 		 * that we were expecting?
409 		 */
410 		if (!stampable_skb) {
411 			dev_err_ratelimited(dp->ds->dev,
412 					    "Unexpected meta frame\n");
413 			spin_unlock(&sp->data->meta_lock);
414 			return NULL;
415 		}
416 
417 		if (stampable_skb->dev != skb->dev) {
418 			dev_err_ratelimited(dp->ds->dev,
419 					    "Meta frame on wrong port\n");
420 			spin_unlock(&sp->data->meta_lock);
421 			return NULL;
422 		}
423 
424 		/* Free the meta frame and give DSA the buffered stampable_skb
425 		 * for further processing up the network stack.
426 		 */
427 		kfree_skb(skb);
428 		skb = stampable_skb;
429 		sja1105_transfer_meta(skb, meta);
430 
431 		spin_unlock(&sp->data->meta_lock);
432 	}
433 
434 	return skb;
435 }
436 
437 static bool sja1105_skb_has_tag_8021q(const struct sk_buff *skb)
438 {
439 	u16 tpid = ntohs(eth_hdr(skb)->h_proto);
440 
441 	return tpid == ETH_P_SJA1105 || tpid == ETH_P_8021Q ||
442 	       skb_vlan_tag_present(skb);
443 }
444 
445 static bool sja1110_skb_has_inband_control_extension(const struct sk_buff *skb)
446 {
447 	return ntohs(eth_hdr(skb)->h_proto) == ETH_P_SJA1110;
448 }
449 
450 /* If the VLAN in the packet is a tag_8021q one, set @source_port and
451  * @switch_id and strip the header. Otherwise set @vid and keep it in the
452  * packet.
453  */
454 static void sja1105_vlan_rcv(struct sk_buff *skb, int *source_port,
455 			     int *switch_id, u16 *vid)
456 {
457 	struct vlan_ethhdr *hdr = (struct vlan_ethhdr *)skb_mac_header(skb);
458 	u16 vlan_tci;
459 
460 	if (skb_vlan_tag_present(skb))
461 		vlan_tci = skb_vlan_tag_get(skb);
462 	else
463 		vlan_tci = ntohs(hdr->h_vlan_TCI);
464 
465 	if (vid_is_dsa_8021q_rxvlan(vlan_tci & VLAN_VID_MASK))
466 		return dsa_8021q_rcv(skb, source_port, switch_id);
467 
468 	/* Try our best with imprecise RX */
469 	*vid = vlan_tci & VLAN_VID_MASK;
470 }
471 
472 static struct sk_buff *sja1105_rcv(struct sk_buff *skb,
473 				   struct net_device *netdev)
474 {
475 	int source_port = -1, switch_id = -1;
476 	struct sja1105_meta meta = {0};
477 	struct ethhdr *hdr;
478 	bool is_link_local;
479 	bool is_meta;
480 	u16 vid;
481 
482 	hdr = eth_hdr(skb);
483 	is_link_local = sja1105_is_link_local(skb);
484 	is_meta = sja1105_is_meta_frame(skb);
485 
486 	if (sja1105_skb_has_tag_8021q(skb)) {
487 		/* Normal traffic path. */
488 		sja1105_vlan_rcv(skb, &source_port, &switch_id, &vid);
489 	} else if (is_link_local) {
490 		/* Management traffic path. Switch embeds the switch ID and
491 		 * port ID into bytes of the destination MAC, courtesy of
492 		 * the incl_srcpt options.
493 		 */
494 		source_port = hdr->h_dest[3];
495 		switch_id = hdr->h_dest[4];
496 		/* Clear the DMAC bytes that were mangled by the switch */
497 		hdr->h_dest[3] = 0;
498 		hdr->h_dest[4] = 0;
499 	} else if (is_meta) {
500 		sja1105_meta_unpack(skb, &meta);
501 		source_port = meta.source_port;
502 		switch_id = meta.switch_id;
503 	} else {
504 		return NULL;
505 	}
506 
507 	if (source_port == -1 || switch_id == -1)
508 		skb->dev = dsa_find_designated_bridge_port_by_vid(netdev, vid);
509 	else
510 		skb->dev = dsa_master_find_slave(netdev, switch_id, source_port);
511 	if (!skb->dev) {
512 		netdev_warn(netdev, "Couldn't decode source port\n");
513 		return NULL;
514 	}
515 
516 	if (!is_link_local)
517 		dsa_default_offload_fwd_mark(skb);
518 
519 	return sja1105_rcv_meta_state_machine(skb, &meta, is_link_local,
520 					      is_meta);
521 }
522 
523 static struct sk_buff *sja1110_rcv_meta(struct sk_buff *skb, u16 rx_header)
524 {
525 	u8 *buf = dsa_etype_header_pos_rx(skb) + SJA1110_HEADER_LEN;
526 	int switch_id = SJA1110_RX_HEADER_SWITCH_ID(rx_header);
527 	int n_ts = SJA1110_RX_HEADER_N_TS(rx_header);
528 	struct net_device *master = skb->dev;
529 	struct dsa_port *cpu_dp;
530 	struct dsa_switch *ds;
531 	int i;
532 
533 	cpu_dp = master->dsa_ptr;
534 	ds = dsa_switch_find(cpu_dp->dst->index, switch_id);
535 	if (!ds) {
536 		net_err_ratelimited("%s: cannot find switch id %d\n",
537 				    master->name, switch_id);
538 		return NULL;
539 	}
540 
541 	for (i = 0; i <= n_ts; i++) {
542 		u8 ts_id, source_port, dir;
543 		u64 tstamp;
544 
545 		ts_id = buf[0];
546 		source_port = (buf[1] & GENMASK(7, 4)) >> 4;
547 		dir = (buf[1] & BIT(3)) >> 3;
548 		tstamp = be64_to_cpu(*(__be64 *)(buf + 2));
549 
550 		sja1110_process_meta_tstamp(ds, source_port, ts_id, dir,
551 					    tstamp);
552 
553 		buf += SJA1110_META_TSTAMP_SIZE;
554 	}
555 
556 	/* Discard the meta frame, we've consumed the timestamps it contained */
557 	return NULL;
558 }
559 
560 static struct sk_buff *sja1110_rcv_inband_control_extension(struct sk_buff *skb,
561 							    int *source_port,
562 							    int *switch_id,
563 							    bool *host_only)
564 {
565 	u16 rx_header;
566 
567 	if (unlikely(!pskb_may_pull(skb, SJA1110_HEADER_LEN)))
568 		return NULL;
569 
570 	/* skb->data points to skb_mac_header(skb) + ETH_HLEN, which is exactly
571 	 * what we need because the caller has checked the EtherType (which is
572 	 * located 2 bytes back) and we just need a pointer to the header that
573 	 * comes afterwards.
574 	 */
575 	rx_header = ntohs(*(__be16 *)skb->data);
576 
577 	if (rx_header & SJA1110_RX_HEADER_HOST_ONLY)
578 		*host_only = true;
579 
580 	if (rx_header & SJA1110_RX_HEADER_IS_METADATA)
581 		return sja1110_rcv_meta(skb, rx_header);
582 
583 	/* Timestamp frame, we have a trailer */
584 	if (rx_header & SJA1110_RX_HEADER_HAS_TRAILER) {
585 		int start_of_padding = SJA1110_RX_HEADER_TRAILER_POS(rx_header);
586 		u8 *rx_trailer = skb_tail_pointer(skb) - SJA1110_RX_TRAILER_LEN;
587 		u64 *tstamp = &SJA1105_SKB_CB(skb)->tstamp;
588 		u8 last_byte = rx_trailer[12];
589 
590 		/* The timestamp is unaligned, so we need to use packing()
591 		 * to get it
592 		 */
593 		packing(rx_trailer, tstamp, 63, 0, 8, UNPACK, 0);
594 
595 		*source_port = SJA1110_RX_TRAILER_SRC_PORT(last_byte);
596 		*switch_id = SJA1110_RX_TRAILER_SWITCH_ID(last_byte);
597 
598 		/* skb->len counts from skb->data, while start_of_padding
599 		 * counts from the destination MAC address. Right now skb->data
600 		 * is still as set by the DSA master, so to trim away the
601 		 * padding and trailer we need to account for the fact that
602 		 * skb->data points to skb_mac_header(skb) + ETH_HLEN.
603 		 */
604 		pskb_trim_rcsum(skb, start_of_padding - ETH_HLEN);
605 	/* Trap-to-host frame, no timestamp trailer */
606 	} else {
607 		*source_port = SJA1110_RX_HEADER_SRC_PORT(rx_header);
608 		*switch_id = SJA1110_RX_HEADER_SWITCH_ID(rx_header);
609 	}
610 
611 	/* Advance skb->data past the DSA header */
612 	skb_pull_rcsum(skb, SJA1110_HEADER_LEN);
613 
614 	dsa_strip_etype_header(skb, SJA1110_HEADER_LEN);
615 
616 	/* With skb->data in its final place, update the MAC header
617 	 * so that eth_hdr() continues to works properly.
618 	 */
619 	skb_set_mac_header(skb, -ETH_HLEN);
620 
621 	return skb;
622 }
623 
624 static struct sk_buff *sja1110_rcv(struct sk_buff *skb,
625 				   struct net_device *netdev)
626 {
627 	int source_port = -1, switch_id = -1;
628 	bool host_only = false;
629 	u16 vid = 0;
630 
631 	if (sja1110_skb_has_inband_control_extension(skb)) {
632 		skb = sja1110_rcv_inband_control_extension(skb, &source_port,
633 							   &switch_id,
634 							   &host_only);
635 		if (!skb)
636 			return NULL;
637 	}
638 
639 	/* Packets with in-band control extensions might still have RX VLANs */
640 	if (likely(sja1105_skb_has_tag_8021q(skb)))
641 		sja1105_vlan_rcv(skb, &source_port, &switch_id, &vid);
642 
643 	if (source_port == -1 || switch_id == -1)
644 		skb->dev = dsa_find_designated_bridge_port_by_vid(netdev, vid);
645 	else
646 		skb->dev = dsa_master_find_slave(netdev, switch_id, source_port);
647 	if (!skb->dev) {
648 		netdev_warn(netdev, "Couldn't decode source port\n");
649 		return NULL;
650 	}
651 
652 	if (!host_only)
653 		dsa_default_offload_fwd_mark(skb);
654 
655 	return skb;
656 }
657 
658 static void sja1105_flow_dissect(const struct sk_buff *skb, __be16 *proto,
659 				 int *offset)
660 {
661 	/* No tag added for management frames, all ok */
662 	if (unlikely(sja1105_is_link_local(skb)))
663 		return;
664 
665 	dsa_tag_generic_flow_dissect(skb, proto, offset);
666 }
667 
668 static void sja1110_flow_dissect(const struct sk_buff *skb, __be16 *proto,
669 				 int *offset)
670 {
671 	/* Management frames have 2 DSA tags on RX, so the needed_headroom we
672 	 * declared is fine for the generic dissector adjustment procedure.
673 	 */
674 	if (unlikely(sja1105_is_link_local(skb)))
675 		return dsa_tag_generic_flow_dissect(skb, proto, offset);
676 
677 	/* For the rest, there is a single DSA tag, the tag_8021q one */
678 	*offset = VLAN_HLEN;
679 	*proto = ((__be16 *)skb->data)[(VLAN_HLEN / 2) - 1];
680 }
681 
682 static const struct dsa_device_ops sja1105_netdev_ops = {
683 	.name = "sja1105",
684 	.proto = DSA_TAG_PROTO_SJA1105,
685 	.xmit = sja1105_xmit,
686 	.rcv = sja1105_rcv,
687 	.needed_headroom = VLAN_HLEN,
688 	.flow_dissect = sja1105_flow_dissect,
689 	.promisc_on_master = true,
690 };
691 
692 DSA_TAG_DRIVER(sja1105_netdev_ops);
693 MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_SJA1105);
694 
695 static const struct dsa_device_ops sja1110_netdev_ops = {
696 	.name = "sja1110",
697 	.proto = DSA_TAG_PROTO_SJA1110,
698 	.xmit = sja1110_xmit,
699 	.rcv = sja1110_rcv,
700 	.flow_dissect = sja1110_flow_dissect,
701 	.needed_headroom = SJA1110_HEADER_LEN + VLAN_HLEN,
702 	.needed_tailroom = SJA1110_RX_TRAILER_LEN + SJA1110_MAX_PADDING_LEN,
703 };
704 
705 DSA_TAG_DRIVER(sja1110_netdev_ops);
706 MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_SJA1110);
707 
708 static struct dsa_tag_driver *sja1105_tag_driver_array[] = {
709 	&DSA_TAG_DRIVER_NAME(sja1105_netdev_ops),
710 	&DSA_TAG_DRIVER_NAME(sja1110_netdev_ops),
711 };
712 
713 module_dsa_tag_drivers(sja1105_tag_driver_array);
714 
715 MODULE_LICENSE("GPL v2");
716