xref: /openbmc/linux/net/dsa/tag_dsa.c (revision dc3401c8)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Regular and Ethertype DSA tagging
4  * Copyright (c) 2008-2009 Marvell Semiconductor
5  *
6  * Regular DSA
7  * -----------
8 
9  * For untagged (in 802.1Q terms) packets, the switch will splice in
10  * the tag between the SA and the ethertype of the original
11  * packet. Tagged frames will instead have their outermost .1Q tag
12  * converted to a DSA tag. It expects the same layout when receiving
13  * packets from the CPU.
14  *
15  * Example:
16  *
17  *     .----.----.----.---------
18  * Pu: | DA | SA | ET | Payload ...
19  *     '----'----'----'---------
20  *       6    6    2       N
21  *     .----.----.--------.-----.----.---------
22  * Pt: | DA | SA | 0x8100 | TCI | ET | Payload ...
23  *     '----'----'--------'-----'----'---------
24  *       6    6       2      2    2       N
25  *     .----.----.-----.----.---------
26  * Pd: | DA | SA | DSA | ET | Payload ...
27  *     '----'----'-----'----'---------
28  *       6    6     4    2       N
29  *
30  * No matter if a packet is received untagged (Pu) or tagged (Pt),
31  * they will both have the same layout (Pd) when they are sent to the
32  * CPU. This is done by ignoring 802.3, replacing the ethertype field
33  * with more metadata, among which is a bit to signal if the original
34  * packet was tagged or not.
35  *
36  * Ethertype DSA
37  * -------------
38  * Uses the exact same tag format as regular DSA, but also includes a
39  * proper ethertype field (which the mv88e6xxx driver sets to
40  * ETH_P_EDSA/0xdada) followed by two zero bytes:
41  *
42  * .----.----.--------.--------.-----.----.---------
43  * | DA | SA | 0xdada | 0x0000 | DSA | ET | Payload ...
44  * '----'----'--------'--------'-----'----'---------
45  *   6    6       2        2      4    2       N
46  */
47 
48 #include <linux/etherdevice.h>
49 #include <linux/list.h>
50 #include <linux/slab.h>
51 
52 #include "dsa_priv.h"
53 
54 #define DSA_HLEN	4
55 
56 /**
57  * enum dsa_cmd - DSA Command
58  * @DSA_CMD_TO_CPU: Set on packets that were trapped or mirrored to
59  *     the CPU port. This is needed to implement control protocols,
60  *     e.g. STP and LLDP, that must not allow those control packets to
61  *     be switched according to the normal rules.
62  * @DSA_CMD_FROM_CPU: Used by the CPU to send a packet to a specific
63  *     port, ignoring all the barriers that the switch normally
64  *     enforces (VLANs, STP port states etc.). No source address
65  *     learning takes place. "sudo send packet"
66  * @DSA_CMD_TO_SNIFFER: Set on the copies of packets that matched some
67  *     user configured ingress or egress monitor criteria. These are
68  *     forwarded by the switch tree to the user configured ingress or
69  *     egress monitor port, which can be set to the CPU port or a
70  *     regular port. If the destination is a regular port, the tag
71  *     will be removed before egressing the port. If the destination
72  *     is the CPU port, the tag will not be removed.
73  * @DSA_CMD_FORWARD: This tag is used on all bulk traffic passing
74  *     through the switch tree, including the flows that are directed
75  *     towards the CPU. Its device/port tuple encodes the original
76  *     source port on which the packet ingressed. It can also be used
77  *     on transmit by the CPU to defer the forwarding decision to the
78  *     hardware, based on the current config of PVT/VTU/ATU
79  *     etc. Source address learning takes places if enabled on the
80  *     receiving DSA/CPU port.
81  */
82 enum dsa_cmd {
83 	DSA_CMD_TO_CPU     = 0,
84 	DSA_CMD_FROM_CPU   = 1,
85 	DSA_CMD_TO_SNIFFER = 2,
86 	DSA_CMD_FORWARD    = 3
87 };
88 
89 /**
90  * enum dsa_code - TO_CPU Code
91  *
92  * @DSA_CODE_MGMT_TRAP: DA was classified as a management
93  *     address. Typical examples include STP BPDUs and LLDP.
94  * @DSA_CODE_FRAME2REG: Response to a "remote management" request.
95  * @DSA_CODE_IGMP_MLD_TRAP: IGMP/MLD signaling.
96  * @DSA_CODE_POLICY_TRAP: Frame matched some policy configuration on
97  *     the device. Typical examples are matching on DA/SA/VID and DHCP
98  *     snooping.
99  * @DSA_CODE_ARP_MIRROR: The name says it all really.
100  * @DSA_CODE_POLICY_MIRROR: Same as @DSA_CODE_POLICY_TRAP, but the
101  *     particular policy was set to trigger a mirror instead of a
102  *     trap.
103  * @DSA_CODE_RESERVED_6: Unused on all devices up to at least 6393X.
104  * @DSA_CODE_RESERVED_7: Unused on all devices up to at least 6393X.
105  *
106  * A 3-bit code is used to relay why a particular frame was sent to
107  * the CPU. We only use this to determine if the packet was mirrored
108  * or trapped, i.e. whether the packet has been forwarded by hardware
109  * or not.
110  *
111  * This is the superset of all possible codes. Any particular device
112  * may only implement a subset.
113  */
114 enum dsa_code {
115 	DSA_CODE_MGMT_TRAP     = 0,
116 	DSA_CODE_FRAME2REG     = 1,
117 	DSA_CODE_IGMP_MLD_TRAP = 2,
118 	DSA_CODE_POLICY_TRAP   = 3,
119 	DSA_CODE_ARP_MIRROR    = 4,
120 	DSA_CODE_POLICY_MIRROR = 5,
121 	DSA_CODE_RESERVED_6    = 6,
122 	DSA_CODE_RESERVED_7    = 7
123 };
124 
125 static struct sk_buff *dsa_xmit_ll(struct sk_buff *skb, struct net_device *dev,
126 				   u8 extra)
127 {
128 	struct dsa_port *dp = dsa_slave_to_port(dev);
129 	u8 tag_dev, tag_port;
130 	enum dsa_cmd cmd;
131 	u8 *dsa_header;
132 	u16 pvid = 0;
133 	int err;
134 
135 	if (skb->offload_fwd_mark) {
136 		struct dsa_switch_tree *dst = dp->ds->dst;
137 		struct net_device *br = dp->bridge_dev;
138 
139 		cmd = DSA_CMD_FORWARD;
140 
141 		/* When offloading forwarding for a bridge, inject FORWARD
142 		 * packets on behalf of a virtual switch device with an index
143 		 * past the physical switches.
144 		 */
145 		tag_dev = dst->last_switch + 1 + dp->bridge_num;
146 		tag_port = 0;
147 
148 		/* If we are offloading forwarding for a VLAN-unaware bridge,
149 		 * inject packets to hardware using the bridge's pvid, since
150 		 * that's where the packets ingressed from.
151 		 */
152 		if (!br_vlan_enabled(br)) {
153 			/* Safe because __dev_queue_xmit() runs under
154 			 * rcu_read_lock_bh()
155 			 */
156 			err = br_vlan_get_pvid_rcu(br, &pvid);
157 			if (err)
158 				return NULL;
159 		}
160 	} else {
161 		cmd = DSA_CMD_FROM_CPU;
162 		tag_dev = dp->ds->index;
163 		tag_port = dp->index;
164 	}
165 
166 	if (skb->protocol == htons(ETH_P_8021Q)) {
167 		if (extra) {
168 			skb_push(skb, extra);
169 			dsa_alloc_etype_header(skb, extra);
170 		}
171 
172 		/* Construct tagged DSA tag from 802.1Q tag. */
173 		dsa_header = dsa_etype_header_pos_tx(skb) + extra;
174 		dsa_header[0] = (cmd << 6) | 0x20 | tag_dev;
175 		dsa_header[1] = tag_port << 3;
176 
177 		/* Move CFI field from byte 2 to byte 1. */
178 		if (dsa_header[2] & 0x10) {
179 			dsa_header[1] |= 0x01;
180 			dsa_header[2] &= ~0x10;
181 		}
182 	} else {
183 		skb_push(skb, DSA_HLEN + extra);
184 		dsa_alloc_etype_header(skb, DSA_HLEN + extra);
185 
186 		/* Construct untagged DSA tag. */
187 		dsa_header = dsa_etype_header_pos_tx(skb) + extra;
188 
189 		dsa_header[0] = (cmd << 6) | tag_dev;
190 		dsa_header[1] = tag_port << 3;
191 		dsa_header[2] = pvid >> 8;
192 		dsa_header[3] = pvid & 0xff;
193 	}
194 
195 	return skb;
196 }
197 
198 static struct sk_buff *dsa_rcv_ll(struct sk_buff *skb, struct net_device *dev,
199 				  u8 extra)
200 {
201 	bool trap = false, trunk = false;
202 	int source_device, source_port;
203 	enum dsa_code code;
204 	enum dsa_cmd cmd;
205 	u8 *dsa_header;
206 
207 	/* The ethertype field is part of the DSA header. */
208 	dsa_header = dsa_etype_header_pos_rx(skb);
209 
210 	cmd = dsa_header[0] >> 6;
211 	switch (cmd) {
212 	case DSA_CMD_FORWARD:
213 		trunk = !!(dsa_header[1] & 7);
214 		break;
215 
216 	case DSA_CMD_TO_CPU:
217 		code = (dsa_header[1] & 0x6) | ((dsa_header[2] >> 4) & 1);
218 
219 		switch (code) {
220 		case DSA_CODE_FRAME2REG:
221 			/* Remote management is not implemented yet,
222 			 * drop.
223 			 */
224 			return NULL;
225 		case DSA_CODE_ARP_MIRROR:
226 		case DSA_CODE_POLICY_MIRROR:
227 			/* Mark mirrored packets to notify any upper
228 			 * device (like a bridge) that forwarding has
229 			 * already been done by hardware.
230 			 */
231 			break;
232 		case DSA_CODE_MGMT_TRAP:
233 		case DSA_CODE_IGMP_MLD_TRAP:
234 		case DSA_CODE_POLICY_TRAP:
235 			/* Traps have, by definition, not been
236 			 * forwarded by hardware, so don't mark them.
237 			 */
238 			trap = true;
239 			break;
240 		default:
241 			/* Reserved code, this could be anything. Drop
242 			 * seems like the safest option.
243 			 */
244 			return NULL;
245 		}
246 
247 		break;
248 
249 	default:
250 		return NULL;
251 	}
252 
253 	source_device = dsa_header[0] & 0x1f;
254 	source_port = (dsa_header[1] >> 3) & 0x1f;
255 
256 	if (trunk) {
257 		struct dsa_port *cpu_dp = dev->dsa_ptr;
258 
259 		/* The exact source port is not available in the tag,
260 		 * so we inject the frame directly on the upper
261 		 * team/bond.
262 		 */
263 		skb->dev = dsa_lag_dev(cpu_dp->dst, source_port);
264 	} else {
265 		skb->dev = dsa_master_find_slave(dev, source_device,
266 						 source_port);
267 	}
268 
269 	if (!skb->dev)
270 		return NULL;
271 
272 	/* When using LAG offload, skb->dev is not a DSA slave interface,
273 	 * so we cannot call dsa_default_offload_fwd_mark and we need to
274 	 * special-case it.
275 	 */
276 	if (trunk)
277 		skb->offload_fwd_mark = true;
278 	else if (!trap)
279 		dsa_default_offload_fwd_mark(skb);
280 
281 	/* If the 'tagged' bit is set; convert the DSA tag to a 802.1Q
282 	 * tag, and delete the ethertype (extra) if applicable. If the
283 	 * 'tagged' bit is cleared; delete the DSA tag, and ethertype
284 	 * if applicable.
285 	 */
286 	if (dsa_header[0] & 0x20) {
287 		u8 new_header[4];
288 
289 		/* Insert 802.1Q ethertype and copy the VLAN-related
290 		 * fields, but clear the bit that will hold CFI (since
291 		 * DSA uses that bit location for another purpose).
292 		 */
293 		new_header[0] = (ETH_P_8021Q >> 8) & 0xff;
294 		new_header[1] = ETH_P_8021Q & 0xff;
295 		new_header[2] = dsa_header[2] & ~0x10;
296 		new_header[3] = dsa_header[3];
297 
298 		/* Move CFI bit from its place in the DSA header to
299 		 * its 802.1Q-designated place.
300 		 */
301 		if (dsa_header[1] & 0x01)
302 			new_header[2] |= 0x10;
303 
304 		/* Update packet checksum if skb is CHECKSUM_COMPLETE. */
305 		if (skb->ip_summed == CHECKSUM_COMPLETE) {
306 			__wsum c = skb->csum;
307 			c = csum_add(c, csum_partial(new_header + 2, 2, 0));
308 			c = csum_sub(c, csum_partial(dsa_header + 2, 2, 0));
309 			skb->csum = c;
310 		}
311 
312 		memcpy(dsa_header, new_header, DSA_HLEN);
313 
314 		if (extra)
315 			dsa_strip_etype_header(skb, extra);
316 	} else {
317 		skb_pull_rcsum(skb, DSA_HLEN);
318 		dsa_strip_etype_header(skb, DSA_HLEN + extra);
319 	}
320 
321 	return skb;
322 }
323 
324 #if IS_ENABLED(CONFIG_NET_DSA_TAG_DSA)
325 
326 static struct sk_buff *dsa_xmit(struct sk_buff *skb, struct net_device *dev)
327 {
328 	return dsa_xmit_ll(skb, dev, 0);
329 }
330 
331 static struct sk_buff *dsa_rcv(struct sk_buff *skb, struct net_device *dev)
332 {
333 	if (unlikely(!pskb_may_pull(skb, DSA_HLEN)))
334 		return NULL;
335 
336 	return dsa_rcv_ll(skb, dev, 0);
337 }
338 
339 static const struct dsa_device_ops dsa_netdev_ops = {
340 	.name	  = "dsa",
341 	.proto	  = DSA_TAG_PROTO_DSA,
342 	.xmit	  = dsa_xmit,
343 	.rcv	  = dsa_rcv,
344 	.needed_headroom = DSA_HLEN,
345 };
346 
347 DSA_TAG_DRIVER(dsa_netdev_ops);
348 MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_DSA);
349 #endif	/* CONFIG_NET_DSA_TAG_DSA */
350 
351 #if IS_ENABLED(CONFIG_NET_DSA_TAG_EDSA)
352 
353 #define EDSA_HLEN 8
354 
355 static struct sk_buff *edsa_xmit(struct sk_buff *skb, struct net_device *dev)
356 {
357 	u8 *edsa_header;
358 
359 	skb = dsa_xmit_ll(skb, dev, EDSA_HLEN - DSA_HLEN);
360 	if (!skb)
361 		return NULL;
362 
363 	edsa_header = dsa_etype_header_pos_tx(skb);
364 	edsa_header[0] = (ETH_P_EDSA >> 8) & 0xff;
365 	edsa_header[1] = ETH_P_EDSA & 0xff;
366 	edsa_header[2] = 0x00;
367 	edsa_header[3] = 0x00;
368 	return skb;
369 }
370 
371 static struct sk_buff *edsa_rcv(struct sk_buff *skb, struct net_device *dev)
372 {
373 	if (unlikely(!pskb_may_pull(skb, EDSA_HLEN)))
374 		return NULL;
375 
376 	skb_pull_rcsum(skb, EDSA_HLEN - DSA_HLEN);
377 
378 	return dsa_rcv_ll(skb, dev, EDSA_HLEN - DSA_HLEN);
379 }
380 
381 static const struct dsa_device_ops edsa_netdev_ops = {
382 	.name	  = "edsa",
383 	.proto	  = DSA_TAG_PROTO_EDSA,
384 	.xmit	  = edsa_xmit,
385 	.rcv	  = edsa_rcv,
386 	.needed_headroom = EDSA_HLEN,
387 };
388 
389 DSA_TAG_DRIVER(edsa_netdev_ops);
390 MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_EDSA);
391 #endif	/* CONFIG_NET_DSA_TAG_EDSA */
392 
393 static struct dsa_tag_driver *dsa_tag_drivers[] = {
394 #if IS_ENABLED(CONFIG_NET_DSA_TAG_DSA)
395 	&DSA_TAG_DRIVER_NAME(dsa_netdev_ops),
396 #endif
397 #if IS_ENABLED(CONFIG_NET_DSA_TAG_EDSA)
398 	&DSA_TAG_DRIVER_NAME(edsa_netdev_ops),
399 #endif
400 };
401 
402 module_dsa_tag_drivers(dsa_tag_drivers);
403 
404 MODULE_LICENSE("GPL");
405