xref: /openbmc/linux/net/dsa/tag_sja1105.c (revision 76afff43)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
3  */
4 #include <linux/if_vlan.h>
5 #include <linux/dsa/sja1105.h>
6 #include <linux/dsa/8021q.h>
7 #include <linux/packing.h>
8 
9 #include "tag.h"
10 #include "tag_8021q.h"
11 
12 #define SJA1105_NAME				"sja1105"
13 #define SJA1110_NAME				"sja1110"
14 
15 /* Is this a TX or an RX header? */
16 #define SJA1110_HEADER_HOST_TO_SWITCH		BIT(15)
17 
18 /* RX header */
19 #define SJA1110_RX_HEADER_IS_METADATA		BIT(14)
20 #define SJA1110_RX_HEADER_HOST_ONLY		BIT(13)
21 #define SJA1110_RX_HEADER_HAS_TRAILER		BIT(12)
22 
23 /* Trap-to-host format (no trailer present) */
24 #define SJA1110_RX_HEADER_SRC_PORT(x)		(((x) & GENMASK(7, 4)) >> 4)
25 #define SJA1110_RX_HEADER_SWITCH_ID(x)		((x) & GENMASK(3, 0))
26 
27 /* Timestamp format (trailer present) */
28 #define SJA1110_RX_HEADER_TRAILER_POS(x)	((x) & GENMASK(11, 0))
29 
30 #define SJA1110_RX_TRAILER_SWITCH_ID(x)		(((x) & GENMASK(7, 4)) >> 4)
31 #define SJA1110_RX_TRAILER_SRC_PORT(x)		((x) & GENMASK(3, 0))
32 
33 /* Meta frame format (for 2-step TX timestamps) */
34 #define SJA1110_RX_HEADER_N_TS(x)		(((x) & GENMASK(8, 4)) >> 4)
35 
36 /* TX header */
37 #define SJA1110_TX_HEADER_UPDATE_TC		BIT(14)
38 #define SJA1110_TX_HEADER_TAKE_TS		BIT(13)
39 #define SJA1110_TX_HEADER_TAKE_TS_CASC		BIT(12)
40 #define SJA1110_TX_HEADER_HAS_TRAILER		BIT(11)
41 
42 /* Only valid if SJA1110_TX_HEADER_HAS_TRAILER is false */
43 #define SJA1110_TX_HEADER_PRIO(x)		(((x) << 7) & GENMASK(10, 7))
44 #define SJA1110_TX_HEADER_TSTAMP_ID(x)		((x) & GENMASK(7, 0))
45 
46 /* Only valid if SJA1110_TX_HEADER_HAS_TRAILER is true */
47 #define SJA1110_TX_HEADER_TRAILER_POS(x)	((x) & GENMASK(10, 0))
48 
49 #define SJA1110_TX_TRAILER_TSTAMP_ID(x)		(((x) << 24) & GENMASK(31, 24))
50 #define SJA1110_TX_TRAILER_PRIO(x)		(((x) << 21) & GENMASK(23, 21))
51 #define SJA1110_TX_TRAILER_SWITCHID(x)		(((x) << 12) & GENMASK(15, 12))
52 #define SJA1110_TX_TRAILER_DESTPORTS(x)		(((x) << 1) & GENMASK(11, 1))
53 
54 #define SJA1110_META_TSTAMP_SIZE		10
55 
56 #define SJA1110_HEADER_LEN			4
57 #define SJA1110_RX_TRAILER_LEN			13
58 #define SJA1110_TX_TRAILER_LEN			4
59 #define SJA1110_MAX_PADDING_LEN			15
60 
61 #define SJA1105_HWTS_RX_EN			0
62 
63 struct sja1105_tagger_private {
64 	struct sja1105_tagger_data data; /* Must be first */
65 	unsigned long state;
66 	/* Protects concurrent access to the meta state machine
67 	 * from taggers running on multiple ports on SMP systems
68 	 */
69 	spinlock_t meta_lock;
70 	struct sk_buff *stampable_skb;
71 	struct kthread_worker *xmit_worker;
72 };
73 
74 static struct sja1105_tagger_private *
75 sja1105_tagger_private(struct dsa_switch *ds)
76 {
77 	return ds->tagger_data;
78 }
79 
80 /* Similar to is_link_local_ether_addr(hdr->h_dest) but also covers PTP */
81 static inline bool sja1105_is_link_local(const struct sk_buff *skb)
82 {
83 	const struct ethhdr *hdr = eth_hdr(skb);
84 	u64 dmac = ether_addr_to_u64(hdr->h_dest);
85 
86 	if (ntohs(hdr->h_proto) == ETH_P_SJA1105_META)
87 		return false;
88 	if ((dmac & SJA1105_LINKLOCAL_FILTER_A_MASK) ==
89 		    SJA1105_LINKLOCAL_FILTER_A)
90 		return true;
91 	if ((dmac & SJA1105_LINKLOCAL_FILTER_B_MASK) ==
92 		    SJA1105_LINKLOCAL_FILTER_B)
93 		return true;
94 	return false;
95 }
96 
97 struct sja1105_meta {
98 	u64 tstamp;
99 	u64 dmac_byte_4;
100 	u64 dmac_byte_3;
101 	u64 source_port;
102 	u64 switch_id;
103 };
104 
105 static void sja1105_meta_unpack(const struct sk_buff *skb,
106 				struct sja1105_meta *meta)
107 {
108 	u8 *buf = skb_mac_header(skb) + ETH_HLEN;
109 
110 	/* UM10944.pdf section 4.2.17 AVB Parameters:
111 	 * Structure of the meta-data follow-up frame.
112 	 * It is in network byte order, so there are no quirks
113 	 * while unpacking the meta frame.
114 	 *
115 	 * Also SJA1105 E/T only populates bits 23:0 of the timestamp
116 	 * whereas P/Q/R/S does 32 bits. Since the structure is the
117 	 * same and the E/T puts zeroes in the high-order byte, use
118 	 * a unified unpacking command for both device series.
119 	 */
120 	packing(buf,     &meta->tstamp,     31, 0, 4, UNPACK, 0);
121 	packing(buf + 4, &meta->dmac_byte_4, 7, 0, 1, UNPACK, 0);
122 	packing(buf + 5, &meta->dmac_byte_3, 7, 0, 1, UNPACK, 0);
123 	packing(buf + 6, &meta->source_port, 7, 0, 1, UNPACK, 0);
124 	packing(buf + 7, &meta->switch_id,   7, 0, 1, UNPACK, 0);
125 }
126 
127 static inline bool sja1105_is_meta_frame(const struct sk_buff *skb)
128 {
129 	const struct ethhdr *hdr = eth_hdr(skb);
130 	u64 smac = ether_addr_to_u64(hdr->h_source);
131 	u64 dmac = ether_addr_to_u64(hdr->h_dest);
132 
133 	if (smac != SJA1105_META_SMAC)
134 		return false;
135 	if (dmac != SJA1105_META_DMAC)
136 		return false;
137 	if (ntohs(hdr->h_proto) != ETH_P_SJA1105_META)
138 		return false;
139 	return true;
140 }
141 
142 /* Calls sja1105_port_deferred_xmit in sja1105_main.c */
143 static struct sk_buff *sja1105_defer_xmit(struct dsa_port *dp,
144 					  struct sk_buff *skb)
145 {
146 	struct sja1105_tagger_data *tagger_data = sja1105_tagger_data(dp->ds);
147 	struct sja1105_tagger_private *priv = sja1105_tagger_private(dp->ds);
148 	void (*xmit_work_fn)(struct kthread_work *work);
149 	struct sja1105_deferred_xmit_work *xmit_work;
150 	struct kthread_worker *xmit_worker;
151 
152 	xmit_work_fn = tagger_data->xmit_work_fn;
153 	xmit_worker = priv->xmit_worker;
154 
155 	if (!xmit_work_fn || !xmit_worker)
156 		return NULL;
157 
158 	xmit_work = kzalloc(sizeof(*xmit_work), GFP_ATOMIC);
159 	if (!xmit_work)
160 		return NULL;
161 
162 	kthread_init_work(&xmit_work->work, xmit_work_fn);
163 	/* Increase refcount so the kfree_skb in dsa_slave_xmit
164 	 * won't really free the packet.
165 	 */
166 	xmit_work->dp = dp;
167 	xmit_work->skb = skb_get(skb);
168 
169 	kthread_queue_work(xmit_worker, &xmit_work->work);
170 
171 	return NULL;
172 }
173 
174 /* Send VLAN tags with a TPID that blends in with whatever VLAN protocol a
175  * bridge spanning ports of this switch might have.
176  */
177 static u16 sja1105_xmit_tpid(struct dsa_port *dp)
178 {
179 	struct dsa_switch *ds = dp->ds;
180 	struct dsa_port *other_dp;
181 	u16 proto;
182 
183 	/* Since VLAN awareness is global, then if this port is VLAN-unaware,
184 	 * all ports are. Use the VLAN-unaware TPID used for tag_8021q.
185 	 */
186 	if (!dsa_port_is_vlan_filtering(dp))
187 		return ETH_P_SJA1105;
188 
189 	/* Port is VLAN-aware, so there is a bridge somewhere (a single one,
190 	 * we're sure about that). It may not be on this port though, so we
191 	 * need to find it.
192 	 */
193 	dsa_switch_for_each_port(other_dp, ds) {
194 		struct net_device *br = dsa_port_bridge_dev_get(other_dp);
195 
196 		if (!br)
197 			continue;
198 
199 		/* Error is returned only if CONFIG_BRIDGE_VLAN_FILTERING,
200 		 * which seems pointless to handle, as our port cannot become
201 		 * VLAN-aware in that case.
202 		 */
203 		br_vlan_get_proto(br, &proto);
204 
205 		return proto;
206 	}
207 
208 	WARN_ONCE(1, "Port is VLAN-aware but cannot find associated bridge!\n");
209 
210 	return ETH_P_SJA1105;
211 }
212 
213 static struct sk_buff *sja1105_imprecise_xmit(struct sk_buff *skb,
214 					      struct net_device *netdev)
215 {
216 	struct dsa_port *dp = dsa_slave_to_port(netdev);
217 	unsigned int bridge_num = dsa_port_bridge_num_get(dp);
218 	struct net_device *br = dsa_port_bridge_dev_get(dp);
219 	u16 tx_vid;
220 
221 	/* If the port is under a VLAN-aware bridge, just slide the
222 	 * VLAN-tagged packet into the FDB and hope for the best.
223 	 * This works because we support a single VLAN-aware bridge
224 	 * across the entire dst, and its VLANs cannot be shared with
225 	 * any standalone port.
226 	 */
227 	if (br_vlan_enabled(br))
228 		return skb;
229 
230 	/* If the port is under a VLAN-unaware bridge, use an imprecise
231 	 * TX VLAN that targets the bridge's entire broadcast domain,
232 	 * instead of just the specific port.
233 	 */
234 	tx_vid = dsa_tag_8021q_bridge_vid(bridge_num);
235 
236 	return dsa_8021q_xmit(skb, netdev, sja1105_xmit_tpid(dp), tx_vid);
237 }
238 
239 /* Transform untagged control packets into pvid-tagged control packets so that
240  * all packets sent by this tagger are VLAN-tagged and we can configure the
241  * switch to drop untagged packets coming from the DSA master.
242  */
243 static struct sk_buff *sja1105_pvid_tag_control_pkt(struct dsa_port *dp,
244 						    struct sk_buff *skb, u8 pcp)
245 {
246 	__be16 xmit_tpid = htons(sja1105_xmit_tpid(dp));
247 	struct vlan_ethhdr *hdr;
248 
249 	/* If VLAN tag is in hwaccel area, move it to the payload
250 	 * to deal with both cases uniformly and to ensure that
251 	 * the VLANs are added in the right order.
252 	 */
253 	if (unlikely(skb_vlan_tag_present(skb))) {
254 		skb = __vlan_hwaccel_push_inside(skb);
255 		if (!skb)
256 			return NULL;
257 	}
258 
259 	hdr = (struct vlan_ethhdr *)skb_mac_header(skb);
260 
261 	/* If skb is already VLAN-tagged, leave that VLAN ID in place */
262 	if (hdr->h_vlan_proto == xmit_tpid)
263 		return skb;
264 
265 	return vlan_insert_tag(skb, xmit_tpid, (pcp << VLAN_PRIO_SHIFT) |
266 			       SJA1105_DEFAULT_VLAN);
267 }
268 
269 static struct sk_buff *sja1105_xmit(struct sk_buff *skb,
270 				    struct net_device *netdev)
271 {
272 	struct dsa_port *dp = dsa_slave_to_port(netdev);
273 	u16 queue_mapping = skb_get_queue_mapping(skb);
274 	u8 pcp = netdev_txq_to_tc(netdev, queue_mapping);
275 	u16 tx_vid = dsa_tag_8021q_standalone_vid(dp);
276 
277 	if (skb->offload_fwd_mark)
278 		return sja1105_imprecise_xmit(skb, netdev);
279 
280 	/* Transmitting management traffic does not rely upon switch tagging,
281 	 * but instead SPI-installed management routes. Part 2 of this
282 	 * is the .port_deferred_xmit driver callback.
283 	 */
284 	if (unlikely(sja1105_is_link_local(skb))) {
285 		skb = sja1105_pvid_tag_control_pkt(dp, skb, pcp);
286 		if (!skb)
287 			return NULL;
288 
289 		return sja1105_defer_xmit(dp, skb);
290 	}
291 
292 	return dsa_8021q_xmit(skb, netdev, sja1105_xmit_tpid(dp),
293 			     ((pcp << VLAN_PRIO_SHIFT) | tx_vid));
294 }
295 
296 static struct sk_buff *sja1110_xmit(struct sk_buff *skb,
297 				    struct net_device *netdev)
298 {
299 	struct sk_buff *clone = SJA1105_SKB_CB(skb)->clone;
300 	struct dsa_port *dp = dsa_slave_to_port(netdev);
301 	u16 queue_mapping = skb_get_queue_mapping(skb);
302 	u8 pcp = netdev_txq_to_tc(netdev, queue_mapping);
303 	u16 tx_vid = dsa_tag_8021q_standalone_vid(dp);
304 	__be32 *tx_trailer;
305 	__be16 *tx_header;
306 	int trailer_pos;
307 
308 	if (skb->offload_fwd_mark)
309 		return sja1105_imprecise_xmit(skb, netdev);
310 
311 	/* Transmitting control packets is done using in-band control
312 	 * extensions, while data packets are transmitted using
313 	 * tag_8021q TX VLANs.
314 	 */
315 	if (likely(!sja1105_is_link_local(skb)))
316 		return dsa_8021q_xmit(skb, netdev, sja1105_xmit_tpid(dp),
317 				     ((pcp << VLAN_PRIO_SHIFT) | tx_vid));
318 
319 	skb = sja1105_pvid_tag_control_pkt(dp, skb, pcp);
320 	if (!skb)
321 		return NULL;
322 
323 	skb_push(skb, SJA1110_HEADER_LEN);
324 
325 	dsa_alloc_etype_header(skb, SJA1110_HEADER_LEN);
326 
327 	trailer_pos = skb->len;
328 
329 	tx_header = dsa_etype_header_pos_tx(skb);
330 	tx_trailer = skb_put(skb, SJA1110_TX_TRAILER_LEN);
331 
332 	tx_header[0] = htons(ETH_P_SJA1110);
333 	tx_header[1] = htons(SJA1110_HEADER_HOST_TO_SWITCH |
334 			     SJA1110_TX_HEADER_HAS_TRAILER |
335 			     SJA1110_TX_HEADER_TRAILER_POS(trailer_pos));
336 	*tx_trailer = cpu_to_be32(SJA1110_TX_TRAILER_PRIO(pcp) |
337 				  SJA1110_TX_TRAILER_SWITCHID(dp->ds->index) |
338 				  SJA1110_TX_TRAILER_DESTPORTS(BIT(dp->index)));
339 	if (clone) {
340 		u8 ts_id = SJA1105_SKB_CB(clone)->ts_id;
341 
342 		tx_header[1] |= htons(SJA1110_TX_HEADER_TAKE_TS);
343 		*tx_trailer |= cpu_to_be32(SJA1110_TX_TRAILER_TSTAMP_ID(ts_id));
344 	}
345 
346 	return skb;
347 }
348 
349 static void sja1105_transfer_meta(struct sk_buff *skb,
350 				  const struct sja1105_meta *meta)
351 {
352 	struct ethhdr *hdr = eth_hdr(skb);
353 
354 	hdr->h_dest[3] = meta->dmac_byte_3;
355 	hdr->h_dest[4] = meta->dmac_byte_4;
356 	SJA1105_SKB_CB(skb)->tstamp = meta->tstamp;
357 }
358 
359 /* This is a simple state machine which follows the hardware mechanism of
360  * generating RX timestamps:
361  *
362  * After each timestampable skb (all traffic for which send_meta1 and
363  * send_meta0 is true, aka all MAC-filtered link-local traffic) a meta frame
364  * containing a partial timestamp is immediately generated by the switch and
365  * sent as a follow-up to the link-local frame on the CPU port.
366  *
367  * The meta frames have no unique identifier (such as sequence number) by which
368  * one may pair them to the correct timestampable frame.
369  * Instead, the switch has internal logic that ensures no frames are sent on
370  * the CPU port between a link-local timestampable frame and its corresponding
371  * meta follow-up. It also ensures strict ordering between ports (lower ports
372  * have higher priority towards the CPU port). For this reason, a per-port
373  * data structure is not needed/desirable.
374  *
375  * This function pairs the link-local frame with its partial timestamp from the
376  * meta follow-up frame. The full timestamp will be reconstructed later in a
377  * work queue.
378  */
379 static struct sk_buff
380 *sja1105_rcv_meta_state_machine(struct sk_buff *skb,
381 				struct sja1105_meta *meta,
382 				bool is_link_local,
383 				bool is_meta)
384 {
385 	/* Step 1: A timestampable frame was received.
386 	 * Buffer it until we get its meta frame.
387 	 */
388 	if (is_link_local) {
389 		struct dsa_port *dp = dsa_slave_to_port(skb->dev);
390 		struct sja1105_tagger_private *priv;
391 		struct dsa_switch *ds = dp->ds;
392 
393 		priv = sja1105_tagger_private(ds);
394 
395 		if (!test_bit(SJA1105_HWTS_RX_EN, &priv->state))
396 			/* Do normal processing. */
397 			return skb;
398 
399 		spin_lock(&priv->meta_lock);
400 		/* Was this a link-local frame instead of the meta
401 		 * that we were expecting?
402 		 */
403 		if (priv->stampable_skb) {
404 			dev_err_ratelimited(ds->dev,
405 					    "Expected meta frame, is %12llx "
406 					    "in the DSA master multicast filter?\n",
407 					    SJA1105_META_DMAC);
408 			kfree_skb(priv->stampable_skb);
409 		}
410 
411 		/* Hold a reference to avoid dsa_switch_rcv
412 		 * from freeing the skb.
413 		 */
414 		priv->stampable_skb = skb_get(skb);
415 		spin_unlock(&priv->meta_lock);
416 
417 		/* Tell DSA we got nothing */
418 		return NULL;
419 
420 	/* Step 2: The meta frame arrived.
421 	 * Time to take the stampable skb out of the closet, annotate it
422 	 * with the partial timestamp, and pretend that we received it
423 	 * just now (basically masquerade the buffered frame as the meta
424 	 * frame, which serves no further purpose).
425 	 */
426 	} else if (is_meta) {
427 		struct dsa_port *dp = dsa_slave_to_port(skb->dev);
428 		struct sja1105_tagger_private *priv;
429 		struct dsa_switch *ds = dp->ds;
430 		struct sk_buff *stampable_skb;
431 
432 		priv = sja1105_tagger_private(ds);
433 
434 		/* Drop the meta frame if we're not in the right state
435 		 * to process it.
436 		 */
437 		if (!test_bit(SJA1105_HWTS_RX_EN, &priv->state))
438 			return NULL;
439 
440 		spin_lock(&priv->meta_lock);
441 
442 		stampable_skb = priv->stampable_skb;
443 		priv->stampable_skb = NULL;
444 
445 		/* Was this a meta frame instead of the link-local
446 		 * that we were expecting?
447 		 */
448 		if (!stampable_skb) {
449 			dev_err_ratelimited(ds->dev,
450 					    "Unexpected meta frame\n");
451 			spin_unlock(&priv->meta_lock);
452 			return NULL;
453 		}
454 
455 		if (stampable_skb->dev != skb->dev) {
456 			dev_err_ratelimited(ds->dev,
457 					    "Meta frame on wrong port\n");
458 			spin_unlock(&priv->meta_lock);
459 			return NULL;
460 		}
461 
462 		/* Free the meta frame and give DSA the buffered stampable_skb
463 		 * for further processing up the network stack.
464 		 */
465 		kfree_skb(skb);
466 		skb = stampable_skb;
467 		sja1105_transfer_meta(skb, meta);
468 
469 		spin_unlock(&priv->meta_lock);
470 	}
471 
472 	return skb;
473 }
474 
475 static bool sja1105_rxtstamp_get_state(struct dsa_switch *ds)
476 {
477 	struct sja1105_tagger_private *priv = sja1105_tagger_private(ds);
478 
479 	return test_bit(SJA1105_HWTS_RX_EN, &priv->state);
480 }
481 
482 static void sja1105_rxtstamp_set_state(struct dsa_switch *ds, bool on)
483 {
484 	struct sja1105_tagger_private *priv = sja1105_tagger_private(ds);
485 
486 	if (on)
487 		set_bit(SJA1105_HWTS_RX_EN, &priv->state);
488 	else
489 		clear_bit(SJA1105_HWTS_RX_EN, &priv->state);
490 
491 	/* Initialize the meta state machine to a known state */
492 	if (!priv->stampable_skb)
493 		return;
494 
495 	kfree_skb(priv->stampable_skb);
496 	priv->stampable_skb = NULL;
497 }
498 
499 static bool sja1105_skb_has_tag_8021q(const struct sk_buff *skb)
500 {
501 	u16 tpid = ntohs(eth_hdr(skb)->h_proto);
502 
503 	return tpid == ETH_P_SJA1105 || tpid == ETH_P_8021Q ||
504 	       skb_vlan_tag_present(skb);
505 }
506 
507 static bool sja1110_skb_has_inband_control_extension(const struct sk_buff *skb)
508 {
509 	return ntohs(eth_hdr(skb)->h_proto) == ETH_P_SJA1110;
510 }
511 
512 /* If the VLAN in the packet is a tag_8021q one, set @source_port and
513  * @switch_id and strip the header. Otherwise set @vid and keep it in the
514  * packet.
515  */
516 static void sja1105_vlan_rcv(struct sk_buff *skb, int *source_port,
517 			     int *switch_id, int *vbid, u16 *vid)
518 {
519 	struct vlan_ethhdr *hdr = (struct vlan_ethhdr *)skb_mac_header(skb);
520 	u16 vlan_tci;
521 
522 	if (skb_vlan_tag_present(skb))
523 		vlan_tci = skb_vlan_tag_get(skb);
524 	else
525 		vlan_tci = ntohs(hdr->h_vlan_TCI);
526 
527 	if (vid_is_dsa_8021q(vlan_tci & VLAN_VID_MASK))
528 		return dsa_8021q_rcv(skb, source_port, switch_id, vbid);
529 
530 	/* Try our best with imprecise RX */
531 	*vid = vlan_tci & VLAN_VID_MASK;
532 }
533 
534 static struct sk_buff *sja1105_rcv(struct sk_buff *skb,
535 				   struct net_device *netdev)
536 {
537 	int source_port = -1, switch_id = -1, vbid = -1;
538 	struct sja1105_meta meta = {0};
539 	struct ethhdr *hdr;
540 	bool is_link_local;
541 	bool is_meta;
542 	u16 vid;
543 
544 	hdr = eth_hdr(skb);
545 	is_link_local = sja1105_is_link_local(skb);
546 	is_meta = sja1105_is_meta_frame(skb);
547 
548 	if (sja1105_skb_has_tag_8021q(skb)) {
549 		/* Normal traffic path. */
550 		sja1105_vlan_rcv(skb, &source_port, &switch_id, &vbid, &vid);
551 	} else if (is_link_local) {
552 		/* Management traffic path. Switch embeds the switch ID and
553 		 * port ID into bytes of the destination MAC, courtesy of
554 		 * the incl_srcpt options.
555 		 */
556 		source_port = hdr->h_dest[3];
557 		switch_id = hdr->h_dest[4];
558 		/* Clear the DMAC bytes that were mangled by the switch */
559 		hdr->h_dest[3] = 0;
560 		hdr->h_dest[4] = 0;
561 	} else if (is_meta) {
562 		sja1105_meta_unpack(skb, &meta);
563 		source_port = meta.source_port;
564 		switch_id = meta.switch_id;
565 	} else {
566 		return NULL;
567 	}
568 
569 	if (vbid >= 1)
570 		skb->dev = dsa_tag_8021q_find_port_by_vbid(netdev, vbid);
571 	else if (source_port == -1 || switch_id == -1)
572 		skb->dev = dsa_find_designated_bridge_port_by_vid(netdev, vid);
573 	else
574 		skb->dev = dsa_master_find_slave(netdev, switch_id, source_port);
575 	if (!skb->dev) {
576 		netdev_warn(netdev, "Couldn't decode source port\n");
577 		return NULL;
578 	}
579 
580 	if (!is_link_local)
581 		dsa_default_offload_fwd_mark(skb);
582 
583 	return sja1105_rcv_meta_state_machine(skb, &meta, is_link_local,
584 					      is_meta);
585 }
586 
587 static struct sk_buff *sja1110_rcv_meta(struct sk_buff *skb, u16 rx_header)
588 {
589 	u8 *buf = dsa_etype_header_pos_rx(skb) + SJA1110_HEADER_LEN;
590 	int switch_id = SJA1110_RX_HEADER_SWITCH_ID(rx_header);
591 	int n_ts = SJA1110_RX_HEADER_N_TS(rx_header);
592 	struct sja1105_tagger_data *tagger_data;
593 	struct net_device *master = skb->dev;
594 	struct dsa_port *cpu_dp;
595 	struct dsa_switch *ds;
596 	int i;
597 
598 	cpu_dp = master->dsa_ptr;
599 	ds = dsa_switch_find(cpu_dp->dst->index, switch_id);
600 	if (!ds) {
601 		net_err_ratelimited("%s: cannot find switch id %d\n",
602 				    master->name, switch_id);
603 		return NULL;
604 	}
605 
606 	tagger_data = sja1105_tagger_data(ds);
607 	if (!tagger_data->meta_tstamp_handler)
608 		return NULL;
609 
610 	for (i = 0; i <= n_ts; i++) {
611 		u8 ts_id, source_port, dir;
612 		u64 tstamp;
613 
614 		ts_id = buf[0];
615 		source_port = (buf[1] & GENMASK(7, 4)) >> 4;
616 		dir = (buf[1] & BIT(3)) >> 3;
617 		tstamp = be64_to_cpu(*(__be64 *)(buf + 2));
618 
619 		tagger_data->meta_tstamp_handler(ds, source_port, ts_id, dir,
620 						 tstamp);
621 
622 		buf += SJA1110_META_TSTAMP_SIZE;
623 	}
624 
625 	/* Discard the meta frame, we've consumed the timestamps it contained */
626 	return NULL;
627 }
628 
629 static struct sk_buff *sja1110_rcv_inband_control_extension(struct sk_buff *skb,
630 							    int *source_port,
631 							    int *switch_id,
632 							    bool *host_only)
633 {
634 	u16 rx_header;
635 
636 	if (unlikely(!pskb_may_pull(skb, SJA1110_HEADER_LEN)))
637 		return NULL;
638 
639 	/* skb->data points to skb_mac_header(skb) + ETH_HLEN, which is exactly
640 	 * what we need because the caller has checked the EtherType (which is
641 	 * located 2 bytes back) and we just need a pointer to the header that
642 	 * comes afterwards.
643 	 */
644 	rx_header = ntohs(*(__be16 *)skb->data);
645 
646 	if (rx_header & SJA1110_RX_HEADER_HOST_ONLY)
647 		*host_only = true;
648 
649 	if (rx_header & SJA1110_RX_HEADER_IS_METADATA)
650 		return sja1110_rcv_meta(skb, rx_header);
651 
652 	/* Timestamp frame, we have a trailer */
653 	if (rx_header & SJA1110_RX_HEADER_HAS_TRAILER) {
654 		int start_of_padding = SJA1110_RX_HEADER_TRAILER_POS(rx_header);
655 		u8 *rx_trailer = skb_tail_pointer(skb) - SJA1110_RX_TRAILER_LEN;
656 		u64 *tstamp = &SJA1105_SKB_CB(skb)->tstamp;
657 		u8 last_byte = rx_trailer[12];
658 
659 		/* The timestamp is unaligned, so we need to use packing()
660 		 * to get it
661 		 */
662 		packing(rx_trailer, tstamp, 63, 0, 8, UNPACK, 0);
663 
664 		*source_port = SJA1110_RX_TRAILER_SRC_PORT(last_byte);
665 		*switch_id = SJA1110_RX_TRAILER_SWITCH_ID(last_byte);
666 
667 		/* skb->len counts from skb->data, while start_of_padding
668 		 * counts from the destination MAC address. Right now skb->data
669 		 * is still as set by the DSA master, so to trim away the
670 		 * padding and trailer we need to account for the fact that
671 		 * skb->data points to skb_mac_header(skb) + ETH_HLEN.
672 		 */
673 		if (pskb_trim_rcsum(skb, start_of_padding - ETH_HLEN))
674 			return NULL;
675 	/* Trap-to-host frame, no timestamp trailer */
676 	} else {
677 		*source_port = SJA1110_RX_HEADER_SRC_PORT(rx_header);
678 		*switch_id = SJA1110_RX_HEADER_SWITCH_ID(rx_header);
679 	}
680 
681 	/* Advance skb->data past the DSA header */
682 	skb_pull_rcsum(skb, SJA1110_HEADER_LEN);
683 
684 	dsa_strip_etype_header(skb, SJA1110_HEADER_LEN);
685 
686 	/* With skb->data in its final place, update the MAC header
687 	 * so that eth_hdr() continues to works properly.
688 	 */
689 	skb_set_mac_header(skb, -ETH_HLEN);
690 
691 	return skb;
692 }
693 
694 static struct sk_buff *sja1110_rcv(struct sk_buff *skb,
695 				   struct net_device *netdev)
696 {
697 	int source_port = -1, switch_id = -1, vbid = -1;
698 	bool host_only = false;
699 	u16 vid = 0;
700 
701 	if (sja1110_skb_has_inband_control_extension(skb)) {
702 		skb = sja1110_rcv_inband_control_extension(skb, &source_port,
703 							   &switch_id,
704 							   &host_only);
705 		if (!skb)
706 			return NULL;
707 	}
708 
709 	/* Packets with in-band control extensions might still have RX VLANs */
710 	if (likely(sja1105_skb_has_tag_8021q(skb)))
711 		sja1105_vlan_rcv(skb, &source_port, &switch_id, &vbid, &vid);
712 
713 	if (vbid >= 1)
714 		skb->dev = dsa_tag_8021q_find_port_by_vbid(netdev, vbid);
715 	else if (source_port == -1 || switch_id == -1)
716 		skb->dev = dsa_find_designated_bridge_port_by_vid(netdev, vid);
717 	else
718 		skb->dev = dsa_master_find_slave(netdev, switch_id, source_port);
719 	if (!skb->dev) {
720 		netdev_warn(netdev, "Couldn't decode source port\n");
721 		return NULL;
722 	}
723 
724 	if (!host_only)
725 		dsa_default_offload_fwd_mark(skb);
726 
727 	return skb;
728 }
729 
730 static void sja1105_flow_dissect(const struct sk_buff *skb, __be16 *proto,
731 				 int *offset)
732 {
733 	/* No tag added for management frames, all ok */
734 	if (unlikely(sja1105_is_link_local(skb)))
735 		return;
736 
737 	dsa_tag_generic_flow_dissect(skb, proto, offset);
738 }
739 
740 static void sja1110_flow_dissect(const struct sk_buff *skb, __be16 *proto,
741 				 int *offset)
742 {
743 	/* Management frames have 2 DSA tags on RX, so the needed_headroom we
744 	 * declared is fine for the generic dissector adjustment procedure.
745 	 */
746 	if (unlikely(sja1105_is_link_local(skb)))
747 		return dsa_tag_generic_flow_dissect(skb, proto, offset);
748 
749 	/* For the rest, there is a single DSA tag, the tag_8021q one */
750 	*offset = VLAN_HLEN;
751 	*proto = ((__be16 *)skb->data)[(VLAN_HLEN / 2) - 1];
752 }
753 
754 static void sja1105_disconnect(struct dsa_switch *ds)
755 {
756 	struct sja1105_tagger_private *priv = ds->tagger_data;
757 
758 	kthread_destroy_worker(priv->xmit_worker);
759 	kfree(priv);
760 	ds->tagger_data = NULL;
761 }
762 
763 static int sja1105_connect(struct dsa_switch *ds)
764 {
765 	struct sja1105_tagger_data *tagger_data;
766 	struct sja1105_tagger_private *priv;
767 	struct kthread_worker *xmit_worker;
768 	int err;
769 
770 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
771 	if (!priv)
772 		return -ENOMEM;
773 
774 	spin_lock_init(&priv->meta_lock);
775 
776 	xmit_worker = kthread_create_worker(0, "dsa%d:%d_xmit",
777 					    ds->dst->index, ds->index);
778 	if (IS_ERR(xmit_worker)) {
779 		err = PTR_ERR(xmit_worker);
780 		kfree(priv);
781 		return err;
782 	}
783 
784 	priv->xmit_worker = xmit_worker;
785 	/* Export functions for switch driver use */
786 	tagger_data = &priv->data;
787 	tagger_data->rxtstamp_get_state = sja1105_rxtstamp_get_state;
788 	tagger_data->rxtstamp_set_state = sja1105_rxtstamp_set_state;
789 	ds->tagger_data = priv;
790 
791 	return 0;
792 }
793 
794 static const struct dsa_device_ops sja1105_netdev_ops = {
795 	.name = SJA1105_NAME,
796 	.proto = DSA_TAG_PROTO_SJA1105,
797 	.xmit = sja1105_xmit,
798 	.rcv = sja1105_rcv,
799 	.connect = sja1105_connect,
800 	.disconnect = sja1105_disconnect,
801 	.needed_headroom = VLAN_HLEN,
802 	.flow_dissect = sja1105_flow_dissect,
803 	.promisc_on_master = true,
804 };
805 
806 DSA_TAG_DRIVER(sja1105_netdev_ops);
807 MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_SJA1105, SJA1105_NAME);
808 
809 static const struct dsa_device_ops sja1110_netdev_ops = {
810 	.name = SJA1110_NAME,
811 	.proto = DSA_TAG_PROTO_SJA1110,
812 	.xmit = sja1110_xmit,
813 	.rcv = sja1110_rcv,
814 	.connect = sja1105_connect,
815 	.disconnect = sja1105_disconnect,
816 	.flow_dissect = sja1110_flow_dissect,
817 	.needed_headroom = SJA1110_HEADER_LEN + VLAN_HLEN,
818 	.needed_tailroom = SJA1110_RX_TRAILER_LEN + SJA1110_MAX_PADDING_LEN,
819 };
820 
821 DSA_TAG_DRIVER(sja1110_netdev_ops);
822 MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_SJA1110, SJA1110_NAME);
823 
824 static struct dsa_tag_driver *sja1105_tag_driver_array[] = {
825 	&DSA_TAG_DRIVER_NAME(sja1105_netdev_ops),
826 	&DSA_TAG_DRIVER_NAME(sja1110_netdev_ops),
827 };
828 
829 module_dsa_tag_drivers(sja1105_tag_driver_array);
830 
831 MODULE_LICENSE("GPL v2");
832