xref: /openbmc/linux/net/dsa/tag_sja1105.c (revision faf69551)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
3  */
4 #include <linux/if_vlan.h>
5 #include <linux/dsa/sja1105.h>
6 #include <linux/dsa/8021q.h>
7 #include <linux/skbuff.h>
8 #include <linux/packing.h>
9 #include "dsa_priv.h"
10 
11 /* Is this a TX or an RX header? */
12 #define SJA1110_HEADER_HOST_TO_SWITCH		BIT(15)
13 
14 /* RX header */
15 #define SJA1110_RX_HEADER_IS_METADATA		BIT(14)
16 #define SJA1110_RX_HEADER_HOST_ONLY		BIT(13)
17 #define SJA1110_RX_HEADER_HAS_TRAILER		BIT(12)
18 
19 /* Trap-to-host format (no trailer present) */
20 #define SJA1110_RX_HEADER_SRC_PORT(x)		(((x) & GENMASK(7, 4)) >> 4)
21 #define SJA1110_RX_HEADER_SWITCH_ID(x)		((x) & GENMASK(3, 0))
22 
23 /* Timestamp format (trailer present) */
24 #define SJA1110_RX_HEADER_TRAILER_POS(x)	((x) & GENMASK(11, 0))
25 
26 #define SJA1110_RX_TRAILER_SWITCH_ID(x)		(((x) & GENMASK(7, 4)) >> 4)
27 #define SJA1110_RX_TRAILER_SRC_PORT(x)		((x) & GENMASK(3, 0))
28 
29 /* Meta frame format (for 2-step TX timestamps) */
30 #define SJA1110_RX_HEADER_N_TS(x)		(((x) & GENMASK(8, 4)) >> 4)
31 
32 /* TX header */
33 #define SJA1110_TX_HEADER_UPDATE_TC		BIT(14)
34 #define SJA1110_TX_HEADER_TAKE_TS		BIT(13)
35 #define SJA1110_TX_HEADER_TAKE_TS_CASC		BIT(12)
36 #define SJA1110_TX_HEADER_HAS_TRAILER		BIT(11)
37 
38 /* Only valid if SJA1110_TX_HEADER_HAS_TRAILER is false */
39 #define SJA1110_TX_HEADER_PRIO(x)		(((x) << 7) & GENMASK(10, 7))
40 #define SJA1110_TX_HEADER_TSTAMP_ID(x)		((x) & GENMASK(7, 0))
41 
42 /* Only valid if SJA1110_TX_HEADER_HAS_TRAILER is true */
43 #define SJA1110_TX_HEADER_TRAILER_POS(x)	((x) & GENMASK(10, 0))
44 
45 #define SJA1110_TX_TRAILER_TSTAMP_ID(x)		(((x) << 24) & GENMASK(31, 24))
46 #define SJA1110_TX_TRAILER_PRIO(x)		(((x) << 21) & GENMASK(23, 21))
47 #define SJA1110_TX_TRAILER_SWITCHID(x)		(((x) << 12) & GENMASK(15, 12))
48 #define SJA1110_TX_TRAILER_DESTPORTS(x)		(((x) << 1) & GENMASK(11, 1))
49 
50 #define SJA1110_META_TSTAMP_SIZE		10
51 
52 #define SJA1110_HEADER_LEN			4
53 #define SJA1110_RX_TRAILER_LEN			13
54 #define SJA1110_TX_TRAILER_LEN			4
55 #define SJA1110_MAX_PADDING_LEN			15
56 
57 enum sja1110_meta_tstamp {
58 	SJA1110_META_TSTAMP_TX = 0,
59 	SJA1110_META_TSTAMP_RX = 1,
60 };
61 
62 /* Similar to is_link_local_ether_addr(hdr->h_dest) but also covers PTP */
63 static inline bool sja1105_is_link_local(const struct sk_buff *skb)
64 {
65 	const struct ethhdr *hdr = eth_hdr(skb);
66 	u64 dmac = ether_addr_to_u64(hdr->h_dest);
67 
68 	if (ntohs(hdr->h_proto) == ETH_P_SJA1105_META)
69 		return false;
70 	if ((dmac & SJA1105_LINKLOCAL_FILTER_A_MASK) ==
71 		    SJA1105_LINKLOCAL_FILTER_A)
72 		return true;
73 	if ((dmac & SJA1105_LINKLOCAL_FILTER_B_MASK) ==
74 		    SJA1105_LINKLOCAL_FILTER_B)
75 		return true;
76 	return false;
77 }
78 
79 struct sja1105_meta {
80 	u64 tstamp;
81 	u64 dmac_byte_4;
82 	u64 dmac_byte_3;
83 	u64 source_port;
84 	u64 switch_id;
85 };
86 
87 static void sja1105_meta_unpack(const struct sk_buff *skb,
88 				struct sja1105_meta *meta)
89 {
90 	u8 *buf = skb_mac_header(skb) + ETH_HLEN;
91 
92 	/* UM10944.pdf section 4.2.17 AVB Parameters:
93 	 * Structure of the meta-data follow-up frame.
94 	 * It is in network byte order, so there are no quirks
95 	 * while unpacking the meta frame.
96 	 *
97 	 * Also SJA1105 E/T only populates bits 23:0 of the timestamp
98 	 * whereas P/Q/R/S does 32 bits. Since the structure is the
99 	 * same and the E/T puts zeroes in the high-order byte, use
100 	 * a unified unpacking command for both device series.
101 	 */
102 	packing(buf,     &meta->tstamp,     31, 0, 4, UNPACK, 0);
103 	packing(buf + 4, &meta->dmac_byte_4, 7, 0, 1, UNPACK, 0);
104 	packing(buf + 5, &meta->dmac_byte_3, 7, 0, 1, UNPACK, 0);
105 	packing(buf + 6, &meta->source_port, 7, 0, 1, UNPACK, 0);
106 	packing(buf + 7, &meta->switch_id,   7, 0, 1, UNPACK, 0);
107 }
108 
109 static inline bool sja1105_is_meta_frame(const struct sk_buff *skb)
110 {
111 	const struct ethhdr *hdr = eth_hdr(skb);
112 	u64 smac = ether_addr_to_u64(hdr->h_source);
113 	u64 dmac = ether_addr_to_u64(hdr->h_dest);
114 
115 	if (smac != SJA1105_META_SMAC)
116 		return false;
117 	if (dmac != SJA1105_META_DMAC)
118 		return false;
119 	if (ntohs(hdr->h_proto) != ETH_P_SJA1105_META)
120 		return false;
121 	return true;
122 }
123 
124 /* Calls sja1105_port_deferred_xmit in sja1105_main.c */
125 static struct sk_buff *sja1105_defer_xmit(struct dsa_port *dp,
126 					  struct sk_buff *skb)
127 {
128 	struct sja1105_port *sp = dp->priv;
129 
130 	if (!dsa_port_is_sja1105(dp))
131 		return skb;
132 
133 	/* Increase refcount so the kfree_skb in dsa_slave_xmit
134 	 * won't really free the packet.
135 	 */
136 	skb_queue_tail(&sp->xmit_queue, skb_get(skb));
137 	kthread_queue_work(sp->xmit_worker, &sp->xmit_work);
138 
139 	return NULL;
140 }
141 
142 /* Send VLAN tags with a TPID that blends in with whatever VLAN protocol a
143  * bridge spanning ports of this switch might have.
144  */
145 static u16 sja1105_xmit_tpid(struct dsa_port *dp)
146 {
147 	struct dsa_switch *ds = dp->ds;
148 	struct dsa_port *other_dp;
149 	u16 proto;
150 
151 	/* Since VLAN awareness is global, then if this port is VLAN-unaware,
152 	 * all ports are. Use the VLAN-unaware TPID used for tag_8021q.
153 	 */
154 	if (!dsa_port_is_vlan_filtering(dp))
155 		return ETH_P_SJA1105;
156 
157 	/* Port is VLAN-aware, so there is a bridge somewhere (a single one,
158 	 * we're sure about that). It may not be on this port though, so we
159 	 * need to find it.
160 	 */
161 	dsa_switch_for_each_port(other_dp, ds) {
162 		if (!other_dp->bridge_dev)
163 			continue;
164 
165 		/* Error is returned only if CONFIG_BRIDGE_VLAN_FILTERING,
166 		 * which seems pointless to handle, as our port cannot become
167 		 * VLAN-aware in that case.
168 		 */
169 		br_vlan_get_proto(other_dp->bridge_dev, &proto);
170 
171 		return proto;
172 	}
173 
174 	WARN_ONCE(1, "Port is VLAN-aware but cannot find associated bridge!\n");
175 
176 	return ETH_P_SJA1105;
177 }
178 
179 static struct sk_buff *sja1105_imprecise_xmit(struct sk_buff *skb,
180 					      struct net_device *netdev)
181 {
182 	struct dsa_port *dp = dsa_slave_to_port(netdev);
183 	struct net_device *br = dp->bridge_dev;
184 	u16 tx_vid;
185 
186 	/* If the port is under a VLAN-aware bridge, just slide the
187 	 * VLAN-tagged packet into the FDB and hope for the best.
188 	 * This works because we support a single VLAN-aware bridge
189 	 * across the entire dst, and its VLANs cannot be shared with
190 	 * any standalone port.
191 	 */
192 	if (br_vlan_enabled(br))
193 		return skb;
194 
195 	/* If the port is under a VLAN-unaware bridge, use an imprecise
196 	 * TX VLAN that targets the bridge's entire broadcast domain,
197 	 * instead of just the specific port.
198 	 */
199 	tx_vid = dsa_8021q_bridge_tx_fwd_offload_vid(dp->bridge_num);
200 
201 	return dsa_8021q_xmit(skb, netdev, sja1105_xmit_tpid(dp), tx_vid);
202 }
203 
204 /* Transform untagged control packets into pvid-tagged control packets so that
205  * all packets sent by this tagger are VLAN-tagged and we can configure the
206  * switch to drop untagged packets coming from the DSA master.
207  */
208 static struct sk_buff *sja1105_pvid_tag_control_pkt(struct dsa_port *dp,
209 						    struct sk_buff *skb, u8 pcp)
210 {
211 	__be16 xmit_tpid = htons(sja1105_xmit_tpid(dp));
212 	struct vlan_ethhdr *hdr;
213 
214 	/* If VLAN tag is in hwaccel area, move it to the payload
215 	 * to deal with both cases uniformly and to ensure that
216 	 * the VLANs are added in the right order.
217 	 */
218 	if (unlikely(skb_vlan_tag_present(skb))) {
219 		skb = __vlan_hwaccel_push_inside(skb);
220 		if (!skb)
221 			return NULL;
222 	}
223 
224 	hdr = (struct vlan_ethhdr *)skb_mac_header(skb);
225 
226 	/* If skb is already VLAN-tagged, leave that VLAN ID in place */
227 	if (hdr->h_vlan_proto == xmit_tpid)
228 		return skb;
229 
230 	return vlan_insert_tag(skb, xmit_tpid, (pcp << VLAN_PRIO_SHIFT) |
231 			       SJA1105_DEFAULT_VLAN);
232 }
233 
234 static struct sk_buff *sja1105_xmit(struct sk_buff *skb,
235 				    struct net_device *netdev)
236 {
237 	struct dsa_port *dp = dsa_slave_to_port(netdev);
238 	u16 queue_mapping = skb_get_queue_mapping(skb);
239 	u8 pcp = netdev_txq_to_tc(netdev, queue_mapping);
240 	u16 tx_vid = dsa_tag_8021q_tx_vid(dp);
241 
242 	if (skb->offload_fwd_mark)
243 		return sja1105_imprecise_xmit(skb, netdev);
244 
245 	/* Transmitting management traffic does not rely upon switch tagging,
246 	 * but instead SPI-installed management routes. Part 2 of this
247 	 * is the .port_deferred_xmit driver callback.
248 	 */
249 	if (unlikely(sja1105_is_link_local(skb))) {
250 		skb = sja1105_pvid_tag_control_pkt(dp, skb, pcp);
251 		if (!skb)
252 			return NULL;
253 
254 		return sja1105_defer_xmit(dp, skb);
255 	}
256 
257 	return dsa_8021q_xmit(skb, netdev, sja1105_xmit_tpid(dp),
258 			     ((pcp << VLAN_PRIO_SHIFT) | tx_vid));
259 }
260 
261 static struct sk_buff *sja1110_xmit(struct sk_buff *skb,
262 				    struct net_device *netdev)
263 {
264 	struct sk_buff *clone = SJA1105_SKB_CB(skb)->clone;
265 	struct dsa_port *dp = dsa_slave_to_port(netdev);
266 	u16 queue_mapping = skb_get_queue_mapping(skb);
267 	u8 pcp = netdev_txq_to_tc(netdev, queue_mapping);
268 	u16 tx_vid = dsa_tag_8021q_tx_vid(dp);
269 	__be32 *tx_trailer;
270 	__be16 *tx_header;
271 	int trailer_pos;
272 
273 	if (skb->offload_fwd_mark)
274 		return sja1105_imprecise_xmit(skb, netdev);
275 
276 	/* Transmitting control packets is done using in-band control
277 	 * extensions, while data packets are transmitted using
278 	 * tag_8021q TX VLANs.
279 	 */
280 	if (likely(!sja1105_is_link_local(skb)))
281 		return dsa_8021q_xmit(skb, netdev, sja1105_xmit_tpid(dp),
282 				     ((pcp << VLAN_PRIO_SHIFT) | tx_vid));
283 
284 	skb = sja1105_pvid_tag_control_pkt(dp, skb, pcp);
285 	if (!skb)
286 		return NULL;
287 
288 	skb_push(skb, SJA1110_HEADER_LEN);
289 
290 	dsa_alloc_etype_header(skb, SJA1110_HEADER_LEN);
291 
292 	trailer_pos = skb->len;
293 
294 	tx_header = dsa_etype_header_pos_tx(skb);
295 	tx_trailer = skb_put(skb, SJA1110_TX_TRAILER_LEN);
296 
297 	tx_header[0] = htons(ETH_P_SJA1110);
298 	tx_header[1] = htons(SJA1110_HEADER_HOST_TO_SWITCH |
299 			     SJA1110_TX_HEADER_HAS_TRAILER |
300 			     SJA1110_TX_HEADER_TRAILER_POS(trailer_pos));
301 	*tx_trailer = cpu_to_be32(SJA1110_TX_TRAILER_PRIO(pcp) |
302 				  SJA1110_TX_TRAILER_SWITCHID(dp->ds->index) |
303 				  SJA1110_TX_TRAILER_DESTPORTS(BIT(dp->index)));
304 	if (clone) {
305 		u8 ts_id = SJA1105_SKB_CB(clone)->ts_id;
306 
307 		tx_header[1] |= htons(SJA1110_TX_HEADER_TAKE_TS);
308 		*tx_trailer |= cpu_to_be32(SJA1110_TX_TRAILER_TSTAMP_ID(ts_id));
309 	}
310 
311 	return skb;
312 }
313 
314 static void sja1105_transfer_meta(struct sk_buff *skb,
315 				  const struct sja1105_meta *meta)
316 {
317 	struct ethhdr *hdr = eth_hdr(skb);
318 
319 	hdr->h_dest[3] = meta->dmac_byte_3;
320 	hdr->h_dest[4] = meta->dmac_byte_4;
321 	SJA1105_SKB_CB(skb)->tstamp = meta->tstamp;
322 }
323 
324 /* This is a simple state machine which follows the hardware mechanism of
325  * generating RX timestamps:
326  *
327  * After each timestampable skb (all traffic for which send_meta1 and
328  * send_meta0 is true, aka all MAC-filtered link-local traffic) a meta frame
329  * containing a partial timestamp is immediately generated by the switch and
330  * sent as a follow-up to the link-local frame on the CPU port.
331  *
332  * The meta frames have no unique identifier (such as sequence number) by which
333  * one may pair them to the correct timestampable frame.
334  * Instead, the switch has internal logic that ensures no frames are sent on
335  * the CPU port between a link-local timestampable frame and its corresponding
336  * meta follow-up. It also ensures strict ordering between ports (lower ports
337  * have higher priority towards the CPU port). For this reason, a per-port
338  * data structure is not needed/desirable.
339  *
340  * This function pairs the link-local frame with its partial timestamp from the
341  * meta follow-up frame. The full timestamp will be reconstructed later in a
342  * work queue.
343  */
344 static struct sk_buff
345 *sja1105_rcv_meta_state_machine(struct sk_buff *skb,
346 				struct sja1105_meta *meta,
347 				bool is_link_local,
348 				bool is_meta)
349 {
350 	/* Step 1: A timestampable frame was received.
351 	 * Buffer it until we get its meta frame.
352 	 */
353 	if (is_link_local) {
354 		struct dsa_port *dp = dsa_slave_to_port(skb->dev);
355 		struct sja1105_port *sp = dp->priv;
356 
357 		if (unlikely(!dsa_port_is_sja1105(dp)))
358 			return skb;
359 
360 		if (!test_bit(SJA1105_HWTS_RX_EN, &sp->data->state))
361 			/* Do normal processing. */
362 			return skb;
363 
364 		spin_lock(&sp->data->meta_lock);
365 		/* Was this a link-local frame instead of the meta
366 		 * that we were expecting?
367 		 */
368 		if (sp->data->stampable_skb) {
369 			dev_err_ratelimited(dp->ds->dev,
370 					    "Expected meta frame, is %12llx "
371 					    "in the DSA master multicast filter?\n",
372 					    SJA1105_META_DMAC);
373 			kfree_skb(sp->data->stampable_skb);
374 		}
375 
376 		/* Hold a reference to avoid dsa_switch_rcv
377 		 * from freeing the skb.
378 		 */
379 		sp->data->stampable_skb = skb_get(skb);
380 		spin_unlock(&sp->data->meta_lock);
381 
382 		/* Tell DSA we got nothing */
383 		return NULL;
384 
385 	/* Step 2: The meta frame arrived.
386 	 * Time to take the stampable skb out of the closet, annotate it
387 	 * with the partial timestamp, and pretend that we received it
388 	 * just now (basically masquerade the buffered frame as the meta
389 	 * frame, which serves no further purpose).
390 	 */
391 	} else if (is_meta) {
392 		struct dsa_port *dp = dsa_slave_to_port(skb->dev);
393 		struct sja1105_port *sp = dp->priv;
394 		struct sk_buff *stampable_skb;
395 
396 		if (unlikely(!dsa_port_is_sja1105(dp)))
397 			return skb;
398 
399 		/* Drop the meta frame if we're not in the right state
400 		 * to process it.
401 		 */
402 		if (!test_bit(SJA1105_HWTS_RX_EN, &sp->data->state))
403 			return NULL;
404 
405 		spin_lock(&sp->data->meta_lock);
406 
407 		stampable_skb = sp->data->stampable_skb;
408 		sp->data->stampable_skb = NULL;
409 
410 		/* Was this a meta frame instead of the link-local
411 		 * that we were expecting?
412 		 */
413 		if (!stampable_skb) {
414 			dev_err_ratelimited(dp->ds->dev,
415 					    "Unexpected meta frame\n");
416 			spin_unlock(&sp->data->meta_lock);
417 			return NULL;
418 		}
419 
420 		if (stampable_skb->dev != skb->dev) {
421 			dev_err_ratelimited(dp->ds->dev,
422 					    "Meta frame on wrong port\n");
423 			spin_unlock(&sp->data->meta_lock);
424 			return NULL;
425 		}
426 
427 		/* Free the meta frame and give DSA the buffered stampable_skb
428 		 * for further processing up the network stack.
429 		 */
430 		kfree_skb(skb);
431 		skb = stampable_skb;
432 		sja1105_transfer_meta(skb, meta);
433 
434 		spin_unlock(&sp->data->meta_lock);
435 	}
436 
437 	return skb;
438 }
439 
440 static bool sja1105_skb_has_tag_8021q(const struct sk_buff *skb)
441 {
442 	u16 tpid = ntohs(eth_hdr(skb)->h_proto);
443 
444 	return tpid == ETH_P_SJA1105 || tpid == ETH_P_8021Q ||
445 	       skb_vlan_tag_present(skb);
446 }
447 
448 static bool sja1110_skb_has_inband_control_extension(const struct sk_buff *skb)
449 {
450 	return ntohs(eth_hdr(skb)->h_proto) == ETH_P_SJA1110;
451 }
452 
453 /* If the VLAN in the packet is a tag_8021q one, set @source_port and
454  * @switch_id and strip the header. Otherwise set @vid and keep it in the
455  * packet.
456  */
457 static void sja1105_vlan_rcv(struct sk_buff *skb, int *source_port,
458 			     int *switch_id, u16 *vid)
459 {
460 	struct vlan_ethhdr *hdr = (struct vlan_ethhdr *)skb_mac_header(skb);
461 	u16 vlan_tci;
462 
463 	if (skb_vlan_tag_present(skb))
464 		vlan_tci = skb_vlan_tag_get(skb);
465 	else
466 		vlan_tci = ntohs(hdr->h_vlan_TCI);
467 
468 	if (vid_is_dsa_8021q_rxvlan(vlan_tci & VLAN_VID_MASK))
469 		return dsa_8021q_rcv(skb, source_port, switch_id);
470 
471 	/* Try our best with imprecise RX */
472 	*vid = vlan_tci & VLAN_VID_MASK;
473 }
474 
475 static struct sk_buff *sja1105_rcv(struct sk_buff *skb,
476 				   struct net_device *netdev)
477 {
478 	int source_port = -1, switch_id = -1;
479 	struct sja1105_meta meta = {0};
480 	struct ethhdr *hdr;
481 	bool is_link_local;
482 	bool is_meta;
483 	u16 vid;
484 
485 	hdr = eth_hdr(skb);
486 	is_link_local = sja1105_is_link_local(skb);
487 	is_meta = sja1105_is_meta_frame(skb);
488 
489 	if (sja1105_skb_has_tag_8021q(skb)) {
490 		/* Normal traffic path. */
491 		sja1105_vlan_rcv(skb, &source_port, &switch_id, &vid);
492 	} else if (is_link_local) {
493 		/* Management traffic path. Switch embeds the switch ID and
494 		 * port ID into bytes of the destination MAC, courtesy of
495 		 * the incl_srcpt options.
496 		 */
497 		source_port = hdr->h_dest[3];
498 		switch_id = hdr->h_dest[4];
499 		/* Clear the DMAC bytes that were mangled by the switch */
500 		hdr->h_dest[3] = 0;
501 		hdr->h_dest[4] = 0;
502 	} else if (is_meta) {
503 		sja1105_meta_unpack(skb, &meta);
504 		source_port = meta.source_port;
505 		switch_id = meta.switch_id;
506 	} else {
507 		return NULL;
508 	}
509 
510 	if (source_port == -1 || switch_id == -1)
511 		skb->dev = dsa_find_designated_bridge_port_by_vid(netdev, vid);
512 	else
513 		skb->dev = dsa_master_find_slave(netdev, switch_id, source_port);
514 	if (!skb->dev) {
515 		netdev_warn(netdev, "Couldn't decode source port\n");
516 		return NULL;
517 	}
518 
519 	if (!is_link_local)
520 		dsa_default_offload_fwd_mark(skb);
521 
522 	return sja1105_rcv_meta_state_machine(skb, &meta, is_link_local,
523 					      is_meta);
524 }
525 
526 static void sja1110_process_meta_tstamp(struct dsa_switch *ds, int port,
527 					u8 ts_id, enum sja1110_meta_tstamp dir,
528 					u64 tstamp)
529 {
530 	struct sk_buff *skb, *skb_tmp, *skb_match = NULL;
531 	struct dsa_port *dp = dsa_to_port(ds, port);
532 	struct skb_shared_hwtstamps shwt = {0};
533 	struct sja1105_port *sp = dp->priv;
534 
535 	if (!dsa_port_is_sja1105(dp))
536 		return;
537 
538 	/* We don't care about RX timestamps on the CPU port */
539 	if (dir == SJA1110_META_TSTAMP_RX)
540 		return;
541 
542 	spin_lock(&sp->data->skb_txtstamp_queue.lock);
543 
544 	skb_queue_walk_safe(&sp->data->skb_txtstamp_queue, skb, skb_tmp) {
545 		if (SJA1105_SKB_CB(skb)->ts_id != ts_id)
546 			continue;
547 
548 		__skb_unlink(skb, &sp->data->skb_txtstamp_queue);
549 		skb_match = skb;
550 
551 		break;
552 	}
553 
554 	spin_unlock(&sp->data->skb_txtstamp_queue.lock);
555 
556 	if (WARN_ON(!skb_match))
557 		return;
558 
559 	shwt.hwtstamp = ns_to_ktime(sja1105_ticks_to_ns(tstamp));
560 	skb_complete_tx_timestamp(skb_match, &shwt);
561 }
562 
563 static struct sk_buff *sja1110_rcv_meta(struct sk_buff *skb, u16 rx_header)
564 {
565 	u8 *buf = dsa_etype_header_pos_rx(skb) + SJA1110_HEADER_LEN;
566 	int switch_id = SJA1110_RX_HEADER_SWITCH_ID(rx_header);
567 	int n_ts = SJA1110_RX_HEADER_N_TS(rx_header);
568 	struct net_device *master = skb->dev;
569 	struct dsa_port *cpu_dp;
570 	struct dsa_switch *ds;
571 	int i;
572 
573 	cpu_dp = master->dsa_ptr;
574 	ds = dsa_switch_find(cpu_dp->dst->index, switch_id);
575 	if (!ds) {
576 		net_err_ratelimited("%s: cannot find switch id %d\n",
577 				    master->name, switch_id);
578 		return NULL;
579 	}
580 
581 	for (i = 0; i <= n_ts; i++) {
582 		u8 ts_id, source_port, dir;
583 		u64 tstamp;
584 
585 		ts_id = buf[0];
586 		source_port = (buf[1] & GENMASK(7, 4)) >> 4;
587 		dir = (buf[1] & BIT(3)) >> 3;
588 		tstamp = be64_to_cpu(*(__be64 *)(buf + 2));
589 
590 		sja1110_process_meta_tstamp(ds, source_port, ts_id, dir,
591 					    tstamp);
592 
593 		buf += SJA1110_META_TSTAMP_SIZE;
594 	}
595 
596 	/* Discard the meta frame, we've consumed the timestamps it contained */
597 	return NULL;
598 }
599 
600 static struct sk_buff *sja1110_rcv_inband_control_extension(struct sk_buff *skb,
601 							    int *source_port,
602 							    int *switch_id,
603 							    bool *host_only)
604 {
605 	u16 rx_header;
606 
607 	if (unlikely(!pskb_may_pull(skb, SJA1110_HEADER_LEN)))
608 		return NULL;
609 
610 	/* skb->data points to skb_mac_header(skb) + ETH_HLEN, which is exactly
611 	 * what we need because the caller has checked the EtherType (which is
612 	 * located 2 bytes back) and we just need a pointer to the header that
613 	 * comes afterwards.
614 	 */
615 	rx_header = ntohs(*(__be16 *)skb->data);
616 
617 	if (rx_header & SJA1110_RX_HEADER_HOST_ONLY)
618 		*host_only = true;
619 
620 	if (rx_header & SJA1110_RX_HEADER_IS_METADATA)
621 		return sja1110_rcv_meta(skb, rx_header);
622 
623 	/* Timestamp frame, we have a trailer */
624 	if (rx_header & SJA1110_RX_HEADER_HAS_TRAILER) {
625 		int start_of_padding = SJA1110_RX_HEADER_TRAILER_POS(rx_header);
626 		u8 *rx_trailer = skb_tail_pointer(skb) - SJA1110_RX_TRAILER_LEN;
627 		u64 *tstamp = &SJA1105_SKB_CB(skb)->tstamp;
628 		u8 last_byte = rx_trailer[12];
629 
630 		/* The timestamp is unaligned, so we need to use packing()
631 		 * to get it
632 		 */
633 		packing(rx_trailer, tstamp, 63, 0, 8, UNPACK, 0);
634 
635 		*source_port = SJA1110_RX_TRAILER_SRC_PORT(last_byte);
636 		*switch_id = SJA1110_RX_TRAILER_SWITCH_ID(last_byte);
637 
638 		/* skb->len counts from skb->data, while start_of_padding
639 		 * counts from the destination MAC address. Right now skb->data
640 		 * is still as set by the DSA master, so to trim away the
641 		 * padding and trailer we need to account for the fact that
642 		 * skb->data points to skb_mac_header(skb) + ETH_HLEN.
643 		 */
644 		pskb_trim_rcsum(skb, start_of_padding - ETH_HLEN);
645 	/* Trap-to-host frame, no timestamp trailer */
646 	} else {
647 		*source_port = SJA1110_RX_HEADER_SRC_PORT(rx_header);
648 		*switch_id = SJA1110_RX_HEADER_SWITCH_ID(rx_header);
649 	}
650 
651 	/* Advance skb->data past the DSA header */
652 	skb_pull_rcsum(skb, SJA1110_HEADER_LEN);
653 
654 	dsa_strip_etype_header(skb, SJA1110_HEADER_LEN);
655 
656 	/* With skb->data in its final place, update the MAC header
657 	 * so that eth_hdr() continues to works properly.
658 	 */
659 	skb_set_mac_header(skb, -ETH_HLEN);
660 
661 	return skb;
662 }
663 
664 static struct sk_buff *sja1110_rcv(struct sk_buff *skb,
665 				   struct net_device *netdev)
666 {
667 	int source_port = -1, switch_id = -1;
668 	bool host_only = false;
669 	u16 vid = 0;
670 
671 	if (sja1110_skb_has_inband_control_extension(skb)) {
672 		skb = sja1110_rcv_inband_control_extension(skb, &source_port,
673 							   &switch_id,
674 							   &host_only);
675 		if (!skb)
676 			return NULL;
677 	}
678 
679 	/* Packets with in-band control extensions might still have RX VLANs */
680 	if (likely(sja1105_skb_has_tag_8021q(skb)))
681 		sja1105_vlan_rcv(skb, &source_port, &switch_id, &vid);
682 
683 	if (source_port == -1 || switch_id == -1)
684 		skb->dev = dsa_find_designated_bridge_port_by_vid(netdev, vid);
685 	else
686 		skb->dev = dsa_master_find_slave(netdev, switch_id, source_port);
687 	if (!skb->dev) {
688 		netdev_warn(netdev, "Couldn't decode source port\n");
689 		return NULL;
690 	}
691 
692 	if (!host_only)
693 		dsa_default_offload_fwd_mark(skb);
694 
695 	return skb;
696 }
697 
698 static void sja1105_flow_dissect(const struct sk_buff *skb, __be16 *proto,
699 				 int *offset)
700 {
701 	/* No tag added for management frames, all ok */
702 	if (unlikely(sja1105_is_link_local(skb)))
703 		return;
704 
705 	dsa_tag_generic_flow_dissect(skb, proto, offset);
706 }
707 
708 static void sja1110_flow_dissect(const struct sk_buff *skb, __be16 *proto,
709 				 int *offset)
710 {
711 	/* Management frames have 2 DSA tags on RX, so the needed_headroom we
712 	 * declared is fine for the generic dissector adjustment procedure.
713 	 */
714 	if (unlikely(sja1105_is_link_local(skb)))
715 		return dsa_tag_generic_flow_dissect(skb, proto, offset);
716 
717 	/* For the rest, there is a single DSA tag, the tag_8021q one */
718 	*offset = VLAN_HLEN;
719 	*proto = ((__be16 *)skb->data)[(VLAN_HLEN / 2) - 1];
720 }
721 
722 static const struct dsa_device_ops sja1105_netdev_ops = {
723 	.name = "sja1105",
724 	.proto = DSA_TAG_PROTO_SJA1105,
725 	.xmit = sja1105_xmit,
726 	.rcv = sja1105_rcv,
727 	.needed_headroom = VLAN_HLEN,
728 	.flow_dissect = sja1105_flow_dissect,
729 	.promisc_on_master = true,
730 };
731 
732 DSA_TAG_DRIVER(sja1105_netdev_ops);
733 MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_SJA1105);
734 
735 static const struct dsa_device_ops sja1110_netdev_ops = {
736 	.name = "sja1110",
737 	.proto = DSA_TAG_PROTO_SJA1110,
738 	.xmit = sja1110_xmit,
739 	.rcv = sja1110_rcv,
740 	.flow_dissect = sja1110_flow_dissect,
741 	.needed_headroom = SJA1110_HEADER_LEN + VLAN_HLEN,
742 	.needed_tailroom = SJA1110_RX_TRAILER_LEN + SJA1110_MAX_PADDING_LEN,
743 };
744 
745 DSA_TAG_DRIVER(sja1110_netdev_ops);
746 MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_SJA1110);
747 
748 static struct dsa_tag_driver *sja1105_tag_driver_array[] = {
749 	&DSA_TAG_DRIVER_NAME(sja1105_netdev_ops),
750 	&DSA_TAG_DRIVER_NAME(sja1110_netdev_ops),
751 };
752 
753 module_dsa_tag_drivers(sja1105_tag_driver_array);
754 
755 MODULE_LICENSE("GPL v2");
756