1 // SPDX-License-Identifier: GPL-2.0-only
2 /* (C) 1999-2001 Paul `Rusty' Russell
3  * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
4  */
5 
6 #include <linux/module.h>
7 #include <net/ipv6.h>
8 #include <net/ip6_route.h>
9 #include <net/ip6_fib.h>
10 #include <net/ip6_checksum.h>
11 #include <net/netfilter/ipv6/nf_reject.h>
12 #include <linux/netfilter_ipv6.h>
13 #include <linux/netfilter_bridge.h>
14 
15 const struct tcphdr *nf_reject_ip6_tcphdr_get(struct sk_buff *oldskb,
16 					      struct tcphdr *otcph,
17 					      unsigned int *otcplen, int hook)
18 {
19 	const struct ipv6hdr *oip6h = ipv6_hdr(oldskb);
20 	u8 proto;
21 	__be16 frag_off;
22 	int tcphoff;
23 
24 	proto = oip6h->nexthdr;
25 	tcphoff = ipv6_skip_exthdr(oldskb, ((u8 *)(oip6h + 1) - oldskb->data),
26 				   &proto, &frag_off);
27 
28 	if ((tcphoff < 0) || (tcphoff > oldskb->len)) {
29 		pr_debug("Cannot get TCP header.\n");
30 		return NULL;
31 	}
32 
33 	*otcplen = oldskb->len - tcphoff;
34 
35 	/* IP header checks: fragment, too short. */
36 	if (proto != IPPROTO_TCP || *otcplen < sizeof(struct tcphdr)) {
37 		pr_debug("proto(%d) != IPPROTO_TCP or too short (len = %d)\n",
38 			 proto, *otcplen);
39 		return NULL;
40 	}
41 
42 	otcph = skb_header_pointer(oldskb, tcphoff, sizeof(struct tcphdr),
43 				   otcph);
44 	if (otcph == NULL)
45 		return NULL;
46 
47 	/* No RST for RST. */
48 	if (otcph->rst) {
49 		pr_debug("RST is set\n");
50 		return NULL;
51 	}
52 
53 	/* Check checksum. */
54 	if (nf_ip6_checksum(oldskb, hook, tcphoff, IPPROTO_TCP)) {
55 		pr_debug("TCP checksum is invalid\n");
56 		return NULL;
57 	}
58 
59 	return otcph;
60 }
61 EXPORT_SYMBOL_GPL(nf_reject_ip6_tcphdr_get);
62 
63 struct ipv6hdr *nf_reject_ip6hdr_put(struct sk_buff *nskb,
64 				     const struct sk_buff *oldskb,
65 				     __u8 protocol, int hoplimit)
66 {
67 	struct ipv6hdr *ip6h;
68 	const struct ipv6hdr *oip6h = ipv6_hdr(oldskb);
69 #define DEFAULT_TOS_VALUE	0x0U
70 	const __u8 tclass = DEFAULT_TOS_VALUE;
71 
72 	skb_put(nskb, sizeof(struct ipv6hdr));
73 	skb_reset_network_header(nskb);
74 	ip6h = ipv6_hdr(nskb);
75 	ip6_flow_hdr(ip6h, tclass, 0);
76 	ip6h->hop_limit = hoplimit;
77 	ip6h->nexthdr = protocol;
78 	ip6h->saddr = oip6h->daddr;
79 	ip6h->daddr = oip6h->saddr;
80 
81 	nskb->protocol = htons(ETH_P_IPV6);
82 
83 	return ip6h;
84 }
85 EXPORT_SYMBOL_GPL(nf_reject_ip6hdr_put);
86 
87 void nf_reject_ip6_tcphdr_put(struct sk_buff *nskb,
88 			      const struct sk_buff *oldskb,
89 			      const struct tcphdr *oth, unsigned int otcplen)
90 {
91 	struct tcphdr *tcph;
92 	int needs_ack;
93 
94 	skb_reset_transport_header(nskb);
95 	tcph = skb_put(nskb, sizeof(struct tcphdr));
96 	/* Truncate to length (no data) */
97 	tcph->doff = sizeof(struct tcphdr)/4;
98 	tcph->source = oth->dest;
99 	tcph->dest = oth->source;
100 
101 	if (oth->ack) {
102 		needs_ack = 0;
103 		tcph->seq = oth->ack_seq;
104 		tcph->ack_seq = 0;
105 	} else {
106 		needs_ack = 1;
107 		tcph->ack_seq = htonl(ntohl(oth->seq) + oth->syn + oth->fin +
108 				      otcplen - (oth->doff<<2));
109 		tcph->seq = 0;
110 	}
111 
112 	/* Reset flags */
113 	((u_int8_t *)tcph)[13] = 0;
114 	tcph->rst = 1;
115 	tcph->ack = needs_ack;
116 	tcph->window = 0;
117 	tcph->urg_ptr = 0;
118 	tcph->check = 0;
119 
120 	/* Adjust TCP checksum */
121 	tcph->check = csum_ipv6_magic(&ipv6_hdr(nskb)->saddr,
122 				      &ipv6_hdr(nskb)->daddr,
123 				      sizeof(struct tcphdr), IPPROTO_TCP,
124 				      csum_partial(tcph,
125 						   sizeof(struct tcphdr), 0));
126 }
127 EXPORT_SYMBOL_GPL(nf_reject_ip6_tcphdr_put);
128 
129 static int nf_reject6_fill_skb_dst(struct sk_buff *skb_in)
130 {
131 	struct dst_entry *dst = NULL;
132 	struct flowi fl;
133 
134 	memset(&fl, 0, sizeof(struct flowi));
135 	fl.u.ip6.daddr = ipv6_hdr(skb_in)->saddr;
136 	nf_ip6_route(dev_net(skb_in->dev), &dst, &fl, false);
137 	if (!dst)
138 		return -1;
139 
140 	skb_dst_set(skb_in, dst);
141 	return 0;
142 }
143 
144 void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
145 {
146 	struct net_device *br_indev __maybe_unused;
147 	struct sk_buff *nskb;
148 	struct tcphdr _otcph;
149 	const struct tcphdr *otcph;
150 	unsigned int otcplen, hh_len;
151 	const struct ipv6hdr *oip6h = ipv6_hdr(oldskb);
152 	struct ipv6hdr *ip6h;
153 	struct dst_entry *dst = NULL;
154 	struct flowi6 fl6;
155 
156 	if ((!(ipv6_addr_type(&oip6h->saddr) & IPV6_ADDR_UNICAST)) ||
157 	    (!(ipv6_addr_type(&oip6h->daddr) & IPV6_ADDR_UNICAST))) {
158 		pr_debug("addr is not unicast.\n");
159 		return;
160 	}
161 
162 	otcph = nf_reject_ip6_tcphdr_get(oldskb, &_otcph, &otcplen, hook);
163 	if (!otcph)
164 		return;
165 
166 	memset(&fl6, 0, sizeof(fl6));
167 	fl6.flowi6_proto = IPPROTO_TCP;
168 	fl6.saddr = oip6h->daddr;
169 	fl6.daddr = oip6h->saddr;
170 	fl6.fl6_sport = otcph->dest;
171 	fl6.fl6_dport = otcph->source;
172 
173 	if (hook == NF_INET_PRE_ROUTING) {
174 		nf_ip6_route(net, &dst, flowi6_to_flowi(&fl6), false);
175 		if (!dst)
176 			return;
177 		skb_dst_set(oldskb, dst);
178 	}
179 
180 	fl6.flowi6_oif = l3mdev_master_ifindex(skb_dst(oldskb)->dev);
181 	fl6.flowi6_mark = IP6_REPLY_MARK(net, oldskb->mark);
182 	security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6));
183 	dst = ip6_route_output(net, NULL, &fl6);
184 	if (dst->error) {
185 		dst_release(dst);
186 		return;
187 	}
188 	dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0);
189 	if (IS_ERR(dst))
190 		return;
191 
192 	hh_len = (dst->dev->hard_header_len + 15)&~15;
193 	nskb = alloc_skb(hh_len + 15 + dst->header_len + sizeof(struct ipv6hdr)
194 			 + sizeof(struct tcphdr) + dst->trailer_len,
195 			 GFP_ATOMIC);
196 
197 	if (!nskb) {
198 		net_dbg_ratelimited("cannot alloc skb\n");
199 		dst_release(dst);
200 		return;
201 	}
202 
203 	skb_dst_set(nskb, dst);
204 
205 	nskb->mark = fl6.flowi6_mark;
206 
207 	skb_reserve(nskb, hh_len + dst->header_len);
208 	ip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP,
209 				    ip6_dst_hoplimit(dst));
210 	nf_reject_ip6_tcphdr_put(nskb, oldskb, otcph, otcplen);
211 
212 	nf_ct_attach(nskb, oldskb);
213 
214 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
215 	/* If we use ip6_local_out for bridged traffic, the MAC source on
216 	 * the RST will be ours, instead of the destination's.  This confuses
217 	 * some routers/firewalls, and they drop the packet.  So we need to
218 	 * build the eth header using the original destination's MAC as the
219 	 * source, and send the RST packet directly.
220 	 */
221 	br_indev = nf_bridge_get_physindev(oldskb);
222 	if (br_indev) {
223 		struct ethhdr *oeth = eth_hdr(oldskb);
224 
225 		nskb->dev = br_indev;
226 		nskb->protocol = htons(ETH_P_IPV6);
227 		ip6h->payload_len = htons(sizeof(struct tcphdr));
228 		if (dev_hard_header(nskb, nskb->dev, ntohs(nskb->protocol),
229 				    oeth->h_source, oeth->h_dest, nskb->len) < 0) {
230 			kfree_skb(nskb);
231 			return;
232 		}
233 		dev_queue_xmit(nskb);
234 	} else
235 #endif
236 		ip6_local_out(net, nskb->sk, nskb);
237 }
238 EXPORT_SYMBOL_GPL(nf_send_reset6);
239 
240 static bool reject6_csum_ok(struct sk_buff *skb, int hook)
241 {
242 	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
243 	int thoff;
244 	__be16 fo;
245 	u8 proto;
246 
247 	if (skb_csum_unnecessary(skb))
248 		return true;
249 
250 	proto = ip6h->nexthdr;
251 	thoff = ipv6_skip_exthdr(skb, ((u8 *)(ip6h + 1) - skb->data), &proto, &fo);
252 
253 	if (thoff < 0 || thoff >= skb->len || (fo & htons(~0x7)) != 0)
254 		return false;
255 
256 	if (!nf_reject_verify_csum(proto))
257 		return true;
258 
259 	return nf_ip6_checksum(skb, hook, thoff, proto) == 0;
260 }
261 
262 void nf_send_unreach6(struct net *net, struct sk_buff *skb_in,
263 		      unsigned char code, unsigned int hooknum)
264 {
265 	if (!reject6_csum_ok(skb_in, hooknum))
266 		return;
267 
268 	if (hooknum == NF_INET_LOCAL_OUT && skb_in->dev == NULL)
269 		skb_in->dev = net->loopback_dev;
270 
271 	if (hooknum == NF_INET_PRE_ROUTING && nf_reject6_fill_skb_dst(skb_in))
272 		return;
273 
274 	icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0);
275 }
276 EXPORT_SYMBOL_GPL(nf_send_unreach6);
277 
278 MODULE_LICENSE("GPL");
279