1 // SPDX-License-Identifier: GPL-2.0-only
2 /* (C) 1999-2001 Paul `Rusty' Russell
3  * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
4  */
5 
6 #include <linux/module.h>
7 #include <net/ip.h>
8 #include <net/tcp.h>
9 #include <net/route.h>
10 #include <net/dst.h>
11 #include <net/netfilter/ipv4/nf_reject.h>
12 #include <linux/netfilter_ipv4.h>
13 #include <linux/netfilter_bridge.h>
14 
15 static int nf_reject_iphdr_validate(struct sk_buff *skb)
16 {
17 	struct iphdr *iph;
18 	u32 len;
19 
20 	if (!pskb_may_pull(skb, sizeof(struct iphdr)))
21 		return 0;
22 
23 	iph = ip_hdr(skb);
24 	if (iph->ihl < 5 || iph->version != 4)
25 		return 0;
26 
27 	len = ntohs(iph->tot_len);
28 	if (skb->len < len)
29 		return 0;
30 	else if (len < (iph->ihl*4))
31 		return 0;
32 
33 	if (!pskb_may_pull(skb, iph->ihl*4))
34 		return 0;
35 
36 	return 1;
37 }
38 
39 struct sk_buff *nf_reject_skb_v4_tcp_reset(struct net *net,
40 					   struct sk_buff *oldskb,
41 					   const struct net_device *dev,
42 					   int hook)
43 {
44 	const struct tcphdr *oth;
45 	struct sk_buff *nskb;
46 	struct iphdr *niph;
47 	struct tcphdr _oth;
48 
49 	if (!nf_reject_iphdr_validate(oldskb))
50 		return NULL;
51 
52 	oth = nf_reject_ip_tcphdr_get(oldskb, &_oth, hook);
53 	if (!oth)
54 		return NULL;
55 
56 	nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) +
57 			 LL_MAX_HEADER, GFP_ATOMIC);
58 	if (!nskb)
59 		return NULL;
60 
61 	nskb->dev = (struct net_device *)dev;
62 
63 	skb_reserve(nskb, LL_MAX_HEADER);
64 	niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP,
65 				   net->ipv4.sysctl_ip_default_ttl);
66 	nf_reject_ip_tcphdr_put(nskb, oldskb, oth);
67 	niph->tot_len = htons(nskb->len);
68 	ip_send_check(niph);
69 
70 	return nskb;
71 }
72 EXPORT_SYMBOL_GPL(nf_reject_skb_v4_tcp_reset);
73 
74 struct sk_buff *nf_reject_skb_v4_unreach(struct net *net,
75 					 struct sk_buff *oldskb,
76 					 const struct net_device *dev,
77 					 int hook, u8 code)
78 {
79 	struct sk_buff *nskb;
80 	struct iphdr *niph;
81 	struct icmphdr *icmph;
82 	unsigned int len;
83 	__wsum csum;
84 	u8 proto;
85 
86 	if (!nf_reject_iphdr_validate(oldskb))
87 		return NULL;
88 
89 	/* IP header checks: fragment. */
90 	if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET))
91 		return NULL;
92 
93 	/* RFC says return as much as we can without exceeding 576 bytes. */
94 	len = min_t(unsigned int, 536, oldskb->len);
95 
96 	if (!pskb_may_pull(oldskb, len))
97 		return NULL;
98 
99 	if (pskb_trim_rcsum(oldskb, ntohs(ip_hdr(oldskb)->tot_len)))
100 		return NULL;
101 
102 	proto = ip_hdr(oldskb)->protocol;
103 
104 	if (!skb_csum_unnecessary(oldskb) &&
105 	    nf_reject_verify_csum(proto) &&
106 	    nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), proto))
107 		return NULL;
108 
109 	nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct icmphdr) +
110 			 LL_MAX_HEADER + len, GFP_ATOMIC);
111 	if (!nskb)
112 		return NULL;
113 
114 	nskb->dev = (struct net_device *)dev;
115 
116 	skb_reserve(nskb, LL_MAX_HEADER);
117 	niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_ICMP,
118 				   net->ipv4.sysctl_ip_default_ttl);
119 
120 	skb_reset_transport_header(nskb);
121 	icmph = skb_put_zero(nskb, sizeof(struct icmphdr));
122 	icmph->type     = ICMP_DEST_UNREACH;
123 	icmph->code	= code;
124 
125 	skb_put_data(nskb, skb_network_header(oldskb), len);
126 
127 	csum = csum_partial((void *)icmph, len + sizeof(struct icmphdr), 0);
128 	icmph->checksum = csum_fold(csum);
129 
130 	niph->tot_len	= htons(nskb->len);
131 	ip_send_check(niph);
132 
133 	return nskb;
134 }
135 EXPORT_SYMBOL_GPL(nf_reject_skb_v4_unreach);
136 
137 const struct tcphdr *nf_reject_ip_tcphdr_get(struct sk_buff *oldskb,
138 					     struct tcphdr *_oth, int hook)
139 {
140 	const struct tcphdr *oth;
141 
142 	/* IP header checks: fragment. */
143 	if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET))
144 		return NULL;
145 
146 	if (ip_hdr(oldskb)->protocol != IPPROTO_TCP)
147 		return NULL;
148 
149 	oth = skb_header_pointer(oldskb, ip_hdrlen(oldskb),
150 				 sizeof(struct tcphdr), _oth);
151 	if (oth == NULL)
152 		return NULL;
153 
154 	/* No RST for RST. */
155 	if (oth->rst)
156 		return NULL;
157 
158 	/* Check checksum */
159 	if (nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), IPPROTO_TCP))
160 		return NULL;
161 
162 	return oth;
163 }
164 EXPORT_SYMBOL_GPL(nf_reject_ip_tcphdr_get);
165 
166 struct iphdr *nf_reject_iphdr_put(struct sk_buff *nskb,
167 				  const struct sk_buff *oldskb,
168 				  __u8 protocol, int ttl)
169 {
170 	struct iphdr *niph, *oiph = ip_hdr(oldskb);
171 
172 	skb_reset_network_header(nskb);
173 	niph = skb_put(nskb, sizeof(struct iphdr));
174 	niph->version	= 4;
175 	niph->ihl	= sizeof(struct iphdr) / 4;
176 	niph->tos	= 0;
177 	niph->id	= 0;
178 	niph->frag_off	= htons(IP_DF);
179 	niph->protocol	= protocol;
180 	niph->check	= 0;
181 	niph->saddr	= oiph->daddr;
182 	niph->daddr	= oiph->saddr;
183 	niph->ttl	= ttl;
184 
185 	nskb->protocol = htons(ETH_P_IP);
186 
187 	return niph;
188 }
189 EXPORT_SYMBOL_GPL(nf_reject_iphdr_put);
190 
191 void nf_reject_ip_tcphdr_put(struct sk_buff *nskb, const struct sk_buff *oldskb,
192 			  const struct tcphdr *oth)
193 {
194 	struct iphdr *niph = ip_hdr(nskb);
195 	struct tcphdr *tcph;
196 
197 	skb_reset_transport_header(nskb);
198 	tcph = skb_put_zero(nskb, sizeof(struct tcphdr));
199 	tcph->source	= oth->dest;
200 	tcph->dest	= oth->source;
201 	tcph->doff	= sizeof(struct tcphdr) / 4;
202 
203 	if (oth->ack) {
204 		tcph->seq = oth->ack_seq;
205 	} else {
206 		tcph->ack_seq = htonl(ntohl(oth->seq) + oth->syn + oth->fin +
207 				      oldskb->len - ip_hdrlen(oldskb) -
208 				      (oth->doff << 2));
209 		tcph->ack = 1;
210 	}
211 
212 	tcph->rst	= 1;
213 	tcph->check = ~tcp_v4_check(sizeof(struct tcphdr), niph->saddr,
214 				    niph->daddr, 0);
215 	nskb->ip_summed = CHECKSUM_PARTIAL;
216 	nskb->csum_start = (unsigned char *)tcph - nskb->head;
217 	nskb->csum_offset = offsetof(struct tcphdr, check);
218 }
219 EXPORT_SYMBOL_GPL(nf_reject_ip_tcphdr_put);
220 
221 static int nf_reject_fill_skb_dst(struct sk_buff *skb_in)
222 {
223 	struct dst_entry *dst = NULL;
224 	struct flowi fl;
225 
226 	memset(&fl, 0, sizeof(struct flowi));
227 	fl.u.ip4.daddr = ip_hdr(skb_in)->saddr;
228 	nf_ip_route(dev_net(skb_in->dev), &dst, &fl, false);
229 	if (!dst)
230 		return -1;
231 
232 	skb_dst_set(skb_in, dst);
233 	return 0;
234 }
235 
236 /* Send RST reply */
237 void nf_send_reset(struct net *net, struct sock *sk, struct sk_buff *oldskb,
238 		   int hook)
239 {
240 	struct net_device *br_indev __maybe_unused;
241 	struct sk_buff *nskb;
242 	struct iphdr *niph;
243 	const struct tcphdr *oth;
244 	struct tcphdr _oth;
245 
246 	oth = nf_reject_ip_tcphdr_get(oldskb, &_oth, hook);
247 	if (!oth)
248 		return;
249 
250 	if ((hook == NF_INET_PRE_ROUTING || hook == NF_INET_INGRESS) &&
251 	    nf_reject_fill_skb_dst(oldskb) < 0)
252 		return;
253 
254 	if (skb_rtable(oldskb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
255 		return;
256 
257 	nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) +
258 			 LL_MAX_HEADER, GFP_ATOMIC);
259 	if (!nskb)
260 		return;
261 
262 	/* ip_route_me_harder expects skb->dst to be set */
263 	skb_dst_set_noref(nskb, skb_dst(oldskb));
264 
265 	nskb->mark = IP4_REPLY_MARK(net, oldskb->mark);
266 
267 	skb_reserve(nskb, LL_MAX_HEADER);
268 	niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP,
269 				   ip4_dst_hoplimit(skb_dst(nskb)));
270 	nf_reject_ip_tcphdr_put(nskb, oldskb, oth);
271 	if (ip_route_me_harder(net, sk, nskb, RTN_UNSPEC))
272 		goto free_nskb;
273 
274 	niph = ip_hdr(nskb);
275 
276 	/* "Never happens" */
277 	if (nskb->len > dst_mtu(skb_dst(nskb)))
278 		goto free_nskb;
279 
280 	nf_ct_attach(nskb, oldskb);
281 
282 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
283 	/* If we use ip_local_out for bridged traffic, the MAC source on
284 	 * the RST will be ours, instead of the destination's.  This confuses
285 	 * some routers/firewalls, and they drop the packet.  So we need to
286 	 * build the eth header using the original destination's MAC as the
287 	 * source, and send the RST packet directly.
288 	 */
289 	br_indev = nf_bridge_get_physindev(oldskb);
290 	if (br_indev) {
291 		struct ethhdr *oeth = eth_hdr(oldskb);
292 
293 		nskb->dev = br_indev;
294 		niph->tot_len = htons(nskb->len);
295 		ip_send_check(niph);
296 		if (dev_hard_header(nskb, nskb->dev, ntohs(nskb->protocol),
297 				    oeth->h_source, oeth->h_dest, nskb->len) < 0)
298 			goto free_nskb;
299 		dev_queue_xmit(nskb);
300 	} else
301 #endif
302 		ip_local_out(net, nskb->sk, nskb);
303 
304 	return;
305 
306  free_nskb:
307 	kfree_skb(nskb);
308 }
309 EXPORT_SYMBOL_GPL(nf_send_reset);
310 
311 void nf_send_unreach(struct sk_buff *skb_in, int code, int hook)
312 {
313 	struct iphdr *iph = ip_hdr(skb_in);
314 	u8 proto = iph->protocol;
315 
316 	if (iph->frag_off & htons(IP_OFFSET))
317 		return;
318 
319 	if ((hook == NF_INET_PRE_ROUTING || hook == NF_INET_INGRESS) &&
320 	    nf_reject_fill_skb_dst(skb_in) < 0)
321 		return;
322 
323 	if (skb_csum_unnecessary(skb_in) || !nf_reject_verify_csum(proto)) {
324 		icmp_send(skb_in, ICMP_DEST_UNREACH, code, 0);
325 		return;
326 	}
327 
328 	if (nf_ip_checksum(skb_in, hook, ip_hdrlen(skb_in), proto) == 0)
329 		icmp_send(skb_in, ICMP_DEST_UNREACH, code, 0);
330 }
331 EXPORT_SYMBOL_GPL(nf_send_unreach);
332 
333 MODULE_LICENSE("GPL");
334