1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2021 Intel Corporation
4  */
5 
6 #include <uapi/linux/if_ether.h>
7 #include <uapi/linux/if_arp.h>
8 #include <uapi/linux/icmp.h>
9 
10 #include <linux/etherdevice.h>
11 #include <linux/netdevice.h>
12 #include <linux/skbuff.h>
13 #include <linux/ieee80211.h>
14 
15 #include <net/cfg80211.h>
16 #include <net/ip.h>
17 
18 #include <linux/if_arp.h>
19 #include <linux/icmp.h>
20 #include <linux/udp.h>
21 #include <linux/ip.h>
22 #include <linux/mm.h>
23 
24 #include "internal.h"
25 #include "sap.h"
26 #include "iwl-mei.h"
27 
28 /*
29  * Returns true if further filtering should be stopped. Only in that case
30  * pass_to_csme and rx_handler_res are set. Otherwise, next level of filters
31  * should be checked.
32  */
33 static bool iwl_mei_rx_filter_eth(const struct ethhdr *ethhdr,
34 				  const struct iwl_sap_oob_filters *filters,
35 				  bool *pass_to_csme,
36 				  rx_handler_result_t *rx_handler_res)
37 {
38 	const struct iwl_sap_eth_filter *filt;
39 
40 	/* This filter is not relevant for UCAST packet */
41 	if (!is_multicast_ether_addr(ethhdr->h_dest) ||
42 	    is_broadcast_ether_addr(ethhdr->h_dest))
43 		return false;
44 
45 	for (filt = &filters->eth_filters[0];
46 	     filt < &filters->eth_filters[0] + ARRAY_SIZE(filters->eth_filters);
47 	     filt++) {
48 		/* Assume there are no enabled filter after a disabled one */
49 		if (!(filt->flags & SAP_ETH_FILTER_ENABLED))
50 			break;
51 
52 		if (compare_ether_header(filt->mac_address, ethhdr->h_dest))
53 			continue;
54 
55 		/* Packet needs to reach the host's stack */
56 		if (filt->flags & SAP_ETH_FILTER_COPY)
57 			*rx_handler_res = RX_HANDLER_PASS;
58 		else
59 			*rx_handler_res = RX_HANDLER_CONSUMED;
60 
61 		/* We have an authoritative answer, stop filtering */
62 		if (filt->flags & SAP_ETH_FILTER_STOP) {
63 			*pass_to_csme = true;
64 			return true;
65 		}
66 
67 		return false;
68 	}
69 
70 	 /* MCAST frames that don't match layer 2 filters are not sent to ME */
71 	*pass_to_csme  = false;
72 
73 	return true;
74 }
75 
76 /*
77  * Returns true iff the frame should be passed to CSME in which case
78  * rx_handler_res is set.
79  */
80 static bool iwl_mei_rx_filter_arp(struct sk_buff *skb,
81 				  const struct iwl_sap_oob_filters *filters,
82 				  rx_handler_result_t *rx_handler_res)
83 {
84 	const struct iwl_sap_ipv4_filter *filt = &filters->ipv4_filter;
85 	const struct arphdr *arp;
86 	const __be32 *target_ip;
87 	u32 flags = le32_to_cpu(filt->flags);
88 
89 	if (!pskb_may_pull(skb, arp_hdr_len(skb->dev)))
90 		return false;
91 
92 	arp = arp_hdr(skb);
93 
94 	/* Handle only IPv4 over ethernet ARP frames */
95 	if (arp->ar_hrd != htons(ARPHRD_ETHER) ||
96 	    arp->ar_pro != htons(ETH_P_IP))
97 		return false;
98 
99 	/*
100 	 * After the ARP header, we have:
101 	 * src MAC address   - 6 bytes
102 	 * src IP address    - 4 bytes
103 	 * target MAC addess - 6 bytes
104 	 */
105 	target_ip = (void *)((u8 *)(arp + 1) +
106 			     ETH_ALEN + sizeof(__be32) + ETH_ALEN);
107 
108 	/*
109 	 * ARP request is forwarded to ME only if IP address match in the
110 	 * ARP request's target ip field.
111 	 */
112 	if (arp->ar_op == htons(ARPOP_REQUEST) &&
113 	    (filt->flags & cpu_to_le32(SAP_IPV4_FILTER_ARP_REQ_PASS)) &&
114 	    (filt->ipv4_addr == 0 || filt->ipv4_addr == *target_ip)) {
115 		if (flags & SAP_IPV4_FILTER_ARP_REQ_COPY)
116 			*rx_handler_res = RX_HANDLER_PASS;
117 		else
118 			*rx_handler_res = RX_HANDLER_CONSUMED;
119 
120 		return true;
121 	}
122 
123 	/* ARP reply is always forwarded to ME regardless of the IP */
124 	if (flags & SAP_IPV4_FILTER_ARP_RESP_PASS &&
125 	    arp->ar_op == htons(ARPOP_REPLY)) {
126 		if (flags & SAP_IPV4_FILTER_ARP_RESP_COPY)
127 			*rx_handler_res = RX_HANDLER_PASS;
128 		else
129 			*rx_handler_res = RX_HANDLER_CONSUMED;
130 
131 		return true;
132 	}
133 
134 	return false;
135 }
136 
137 static bool
138 iwl_mei_rx_filter_tcp_udp(struct sk_buff *skb, bool  ip_match,
139 			  const struct iwl_sap_oob_filters *filters,
140 			  rx_handler_result_t *rx_handler_res)
141 {
142 	const struct iwl_sap_flex_filter *filt;
143 
144 	for (filt = &filters->flex_filters[0];
145 	     filt < &filters->flex_filters[0] + ARRAY_SIZE(filters->flex_filters);
146 	     filt++) {
147 		if (!(filt->flags & SAP_FLEX_FILTER_ENABLED))
148 			break;
149 
150 		/*
151 		 * We are required to have a match on the IP level and we didn't
152 		 * have such match.
153 		 */
154 		if ((filt->flags &
155 		     (SAP_FLEX_FILTER_IPV4 | SAP_FLEX_FILTER_IPV6)) &&
156 		    !ip_match)
157 			continue;
158 
159 		if ((filt->flags & SAP_FLEX_FILTER_UDP) &&
160 		    ip_hdr(skb)->protocol != IPPROTO_UDP)
161 			continue;
162 
163 		if ((filt->flags & SAP_FLEX_FILTER_TCP) &&
164 		    ip_hdr(skb)->protocol != IPPROTO_TCP)
165 			continue;
166 
167 		/*
168 		 * We must have either a TCP header or a UDP header, both
169 		 * starts with a source port and then a destination port.
170 		 * Both are big endian words.
171 		 * Use a UDP header and that will work for TCP as well.
172 		 */
173 		if ((filt->src_port && filt->src_port != udp_hdr(skb)->source) ||
174 		    (filt->dst_port && filt->dst_port != udp_hdr(skb)->dest))
175 			continue;
176 
177 		if (filt->flags & SAP_FLEX_FILTER_COPY)
178 			*rx_handler_res = RX_HANDLER_PASS;
179 		else
180 			*rx_handler_res = RX_HANDLER_CONSUMED;
181 
182 		return true;
183 	}
184 
185 	return false;
186 }
187 
188 static bool iwl_mei_rx_filter_ipv4(struct sk_buff *skb,
189 				   const struct iwl_sap_oob_filters *filters,
190 				   rx_handler_result_t *rx_handler_res)
191 {
192 	const struct iwl_sap_ipv4_filter *filt = &filters->ipv4_filter;
193 	const struct iphdr *iphdr;
194 	unsigned int iphdrlen;
195 	bool match;
196 
197 	if (!pskb_may_pull(skb, skb_network_offset(skb) + sizeof(*iphdr)) ||
198 	    !pskb_may_pull(skb, skb_network_offset(skb) +
199 			   sizeof(ip_hdrlen(skb) - sizeof(*iphdr))))
200 		return false;
201 
202 	iphdrlen = ip_hdrlen(skb);
203 	iphdr = ip_hdr(skb);
204 	match = !filters->ipv4_filter.ipv4_addr ||
205 		filters->ipv4_filter.ipv4_addr == iphdr->daddr;
206 
207 	skb_set_transport_header(skb, skb_network_offset(skb) + iphdrlen);
208 
209 	switch (ip_hdr(skb)->protocol) {
210 	case IPPROTO_UDP:
211 	case IPPROTO_TCP:
212 		/*
213 		 * UDP header is shorter than TCP header and we look at the first bytes
214 		 * of the header anyway (see below).
215 		 * If we have a truncated TCP packet, let CSME handle this.
216 		 */
217 		if (!pskb_may_pull(skb, skb_transport_offset(skb) +
218 				   sizeof(struct udphdr)))
219 			return false;
220 
221 		return iwl_mei_rx_filter_tcp_udp(skb, match,
222 						 filters, rx_handler_res);
223 
224 	case IPPROTO_ICMP: {
225 		struct icmphdr *icmp;
226 
227 		if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(*icmp)))
228 			return false;
229 
230 		icmp = icmp_hdr(skb);
231 
232 		/*
233 		 * Don't pass echo requests to ME even if it wants it as we
234 		 * want the host to answer.
235 		 */
236 		if ((filt->flags & cpu_to_le32(SAP_IPV4_FILTER_ICMP_PASS)) &&
237 		    match && (icmp->type != ICMP_ECHO || icmp->code != 0)) {
238 			if (filt->flags & cpu_to_le32(SAP_IPV4_FILTER_ICMP_COPY))
239 				*rx_handler_res = RX_HANDLER_PASS;
240 			else
241 				*rx_handler_res = RX_HANDLER_CONSUMED;
242 
243 			return true;
244 		}
245 		break;
246 		}
247 	case IPPROTO_ICMPV6:
248 		/* TODO: Should we have the same ICMP request logic here too? */
249 		if ((filters->icmpv6_flags & cpu_to_le32(SAP_ICMPV6_FILTER_ENABLED) &&
250 		     match)) {
251 			if (filters->icmpv6_flags &
252 			    cpu_to_le32(SAP_ICMPV6_FILTER_COPY))
253 				*rx_handler_res = RX_HANDLER_PASS;
254 			else
255 				*rx_handler_res = RX_HANDLER_CONSUMED;
256 
257 			return true;
258 		}
259 		break;
260 	default:
261 		return false;
262 	}
263 
264 	return false;
265 }
266 
267 static bool iwl_mei_rx_filter_ipv6(struct sk_buff *skb,
268 				   const struct iwl_sap_oob_filters *filters,
269 				   rx_handler_result_t *rx_handler_res)
270 {
271 	*rx_handler_res = RX_HANDLER_PASS;
272 
273 	/* TODO */
274 
275 	return false;
276 }
277 
278 static rx_handler_result_t
279 iwl_mei_rx_pass_to_csme(struct sk_buff *skb,
280 			const struct iwl_sap_oob_filters *filters,
281 			bool *pass_to_csme)
282 {
283 	const struct ethhdr *ethhdr = (void *)skb_mac_header(skb);
284 	rx_handler_result_t rx_handler_res = RX_HANDLER_PASS;
285 	bool (*filt_handler)(struct sk_buff *skb,
286 			     const struct iwl_sap_oob_filters *filters,
287 			     rx_handler_result_t *rx_handler_res);
288 
289 	/*
290 	 * skb->data points the IP header / ARP header and the ETH header
291 	 * is in the headroom.
292 	 */
293 	skb_reset_network_header(skb);
294 
295 	/*
296 	 * MCAST IP packets sent by us are received again here without
297 	 * an ETH header. Drop them here.
298 	 */
299 	if (!skb_mac_offset(skb))
300 		return RX_HANDLER_PASS;
301 
302 	if (skb_headroom(skb) < sizeof(*ethhdr))
303 		return RX_HANDLER_PASS;
304 
305 	if (iwl_mei_rx_filter_eth(ethhdr, filters,
306 				  pass_to_csme, &rx_handler_res))
307 		return rx_handler_res;
308 
309 	switch (skb->protocol) {
310 	case htons(ETH_P_IP):
311 		filt_handler = iwl_mei_rx_filter_ipv4;
312 		break;
313 	case htons(ETH_P_ARP):
314 		filt_handler = iwl_mei_rx_filter_arp;
315 		break;
316 	case htons(ETH_P_IPV6):
317 		filt_handler = iwl_mei_rx_filter_ipv6;
318 		break;
319 	default:
320 		*pass_to_csme = false;
321 		return rx_handler_res;
322 	}
323 
324 	*pass_to_csme = filt_handler(skb, filters, &rx_handler_res);
325 
326 	return rx_handler_res;
327 }
328 
329 rx_handler_result_t iwl_mei_rx_filter(struct sk_buff *orig_skb,
330 				      const struct iwl_sap_oob_filters *filters,
331 				      bool *pass_to_csme)
332 {
333 	rx_handler_result_t ret;
334 	struct sk_buff *skb;
335 
336 	ret = iwl_mei_rx_pass_to_csme(orig_skb, filters, pass_to_csme);
337 
338 	if (!*pass_to_csme)
339 		return RX_HANDLER_PASS;
340 
341 	if (ret == RX_HANDLER_PASS)
342 		skb = skb_copy(orig_skb, GFP_ATOMIC);
343 	else
344 		skb = orig_skb;
345 
346 	/* CSME wants the MAC header as well, push it back */
347 	skb_push(skb, skb->data - skb_mac_header(skb));
348 
349 	/*
350 	 * Add the packet that CSME wants to get to the ring. Don't send the
351 	 * Check Shared Area HECI message since this is not possible from the
352 	 * Rx context. The caller will schedule a worker to do just that.
353 	 */
354 	iwl_mei_add_data_to_ring(skb, false);
355 
356 	/*
357 	 * In case we drop the packet, don't free it, the caller will do that
358 	 * for us
359 	 */
360 	if (ret == RX_HANDLER_PASS)
361 		dev_kfree_skb(skb);
362 
363 	return ret;
364 }
365 
366 #define DHCP_SERVER_PORT 67
367 #define DHCP_CLIENT_PORT 68
368 void iwl_mei_tx_copy_to_csme(struct sk_buff *origskb, unsigned int ivlen)
369 {
370 	struct ieee80211_hdr *hdr;
371 	struct sk_buff *skb;
372 	struct ethhdr ethhdr;
373 	struct ethhdr *eth;
374 
375 	/* Catch DHCP packets */
376 	if (origskb->protocol != htons(ETH_P_IP) ||
377 	    ip_hdr(origskb)->protocol != IPPROTO_UDP ||
378 	    udp_hdr(origskb)->source != htons(DHCP_CLIENT_PORT) ||
379 	    udp_hdr(origskb)->dest != htons(DHCP_SERVER_PORT))
380 		return;
381 
382 	/*
383 	 * We could be a bit less aggressive here and not copy everything, but
384 	 * this is very rare anyway, do don't bother much.
385 	 */
386 	skb = skb_copy(origskb, GFP_ATOMIC);
387 	if (!skb)
388 		return;
389 
390 	skb->protocol = origskb->protocol;
391 
392 	hdr = (void *)skb->data;
393 
394 	memcpy(ethhdr.h_dest, ieee80211_get_DA(hdr), ETH_ALEN);
395 	memcpy(ethhdr.h_source, ieee80211_get_SA(hdr), ETH_ALEN);
396 
397 	/*
398 	 * Remove the ieee80211 header + IV + SNAP but leave the ethertype
399 	 * We still have enough headroom for the sap header.
400 	 */
401 	pskb_pull(skb, ieee80211_hdrlen(hdr->frame_control) + ivlen + 6);
402 	eth = skb_push(skb, sizeof(ethhdr.h_dest) + sizeof(ethhdr.h_source));
403 	memcpy(eth, &ethhdr, sizeof(ethhdr.h_dest) + sizeof(ethhdr.h_source));
404 
405 	iwl_mei_add_data_to_ring(skb, true);
406 
407 	dev_kfree_skb(skb);
408 }
409 EXPORT_SYMBOL_GPL(iwl_mei_tx_copy_to_csme);
410