1 /* Peer event handling, typically ICMP messages. 2 * 3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 4 * Written by David Howells (dhowells@redhat.com) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 12 #include <linux/module.h> 13 #include <linux/net.h> 14 #include <linux/skbuff.h> 15 #include <linux/errqueue.h> 16 #include <linux/udp.h> 17 #include <linux/in.h> 18 #include <linux/in6.h> 19 #include <linux/icmp.h> 20 #include <net/sock.h> 21 #include <net/af_rxrpc.h> 22 #include <net/ip.h> 23 #include "ar-internal.h" 24 25 static void rxrpc_store_error(struct rxrpc_peer *, struct sock_exterr_skb *); 26 static void rxrpc_distribute_error(struct rxrpc_peer *, int, 27 enum rxrpc_call_completion); 28 29 /* 30 * Find the peer associated with an ICMP packet. 31 */ 32 static struct rxrpc_peer *rxrpc_lookup_peer_icmp_rcu(struct rxrpc_local *local, 33 const struct sk_buff *skb, 34 struct sockaddr_rxrpc *srx) 35 { 36 struct sock_exterr_skb *serr = SKB_EXT_ERR(skb); 37 38 _enter(""); 39 40 memset(srx, 0, sizeof(*srx)); 41 srx->transport_type = local->srx.transport_type; 42 srx->transport_len = local->srx.transport_len; 43 srx->transport.family = local->srx.transport.family; 44 45 /* Can we see an ICMP4 packet on an ICMP6 listening socket? and vice 46 * versa? 47 */ 48 switch (srx->transport.family) { 49 case AF_INET: 50 srx->transport_len = sizeof(srx->transport.sin); 51 srx->transport.family = AF_INET; 52 srx->transport.sin.sin_port = serr->port; 53 switch (serr->ee.ee_origin) { 54 case SO_EE_ORIGIN_ICMP: 55 _net("Rx ICMP"); 56 memcpy(&srx->transport.sin.sin_addr, 57 skb_network_header(skb) + serr->addr_offset, 58 sizeof(struct in_addr)); 59 break; 60 case SO_EE_ORIGIN_ICMP6: 61 _net("Rx ICMP6 on v4 sock"); 62 memcpy(&srx->transport.sin.sin_addr, 63 skb_network_header(skb) + serr->addr_offset + 12, 64 sizeof(struct in_addr)); 65 break; 66 default: 67 memcpy(&srx->transport.sin.sin_addr, &ip_hdr(skb)->saddr, 68 sizeof(struct in_addr)); 69 break; 70 } 71 break; 72 73 #ifdef CONFIG_AF_RXRPC_IPV6 74 case AF_INET6: 75 switch (serr->ee.ee_origin) { 76 case SO_EE_ORIGIN_ICMP6: 77 _net("Rx ICMP6"); 78 srx->transport.sin6.sin6_port = serr->port; 79 memcpy(&srx->transport.sin6.sin6_addr, 80 skb_network_header(skb) + serr->addr_offset, 81 sizeof(struct in6_addr)); 82 break; 83 case SO_EE_ORIGIN_ICMP: 84 _net("Rx ICMP on v6 sock"); 85 srx->transport_len = sizeof(srx->transport.sin); 86 srx->transport.family = AF_INET; 87 srx->transport.sin.sin_port = serr->port; 88 memcpy(&srx->transport.sin.sin_addr, 89 skb_network_header(skb) + serr->addr_offset, 90 sizeof(struct in_addr)); 91 break; 92 default: 93 memcpy(&srx->transport.sin6.sin6_addr, 94 &ipv6_hdr(skb)->saddr, 95 sizeof(struct in6_addr)); 96 break; 97 } 98 break; 99 #endif 100 101 default: 102 BUG(); 103 } 104 105 return rxrpc_lookup_peer_rcu(local, srx); 106 } 107 108 /* 109 * Handle an MTU/fragmentation problem. 110 */ 111 static void rxrpc_adjust_mtu(struct rxrpc_peer *peer, struct sock_exterr_skb *serr) 112 { 113 u32 mtu = serr->ee.ee_info; 114 115 _net("Rx ICMP Fragmentation Needed (%d)", mtu); 116 117 /* wind down the local interface MTU */ 118 if (mtu > 0 && peer->if_mtu == 65535 && mtu < peer->if_mtu) { 119 peer->if_mtu = mtu; 120 _net("I/F MTU %u", mtu); 121 } 122 123 if (mtu == 0) { 124 /* they didn't give us a size, estimate one */ 125 mtu = peer->if_mtu; 126 if (mtu > 1500) { 127 mtu >>= 1; 128 if (mtu < 1500) 129 mtu = 1500; 130 } else { 131 mtu -= 100; 132 if (mtu < peer->hdrsize) 133 mtu = peer->hdrsize + 4; 134 } 135 } 136 137 if (mtu < peer->mtu) { 138 spin_lock_bh(&peer->lock); 139 peer->mtu = mtu; 140 peer->maxdata = peer->mtu - peer->hdrsize; 141 spin_unlock_bh(&peer->lock); 142 _net("Net MTU %u (maxdata %u)", 143 peer->mtu, peer->maxdata); 144 } 145 } 146 147 /* 148 * Handle an error received on the local endpoint. 149 */ 150 void rxrpc_error_report(struct sock *sk) 151 { 152 struct sock_exterr_skb *serr; 153 struct sockaddr_rxrpc srx; 154 struct rxrpc_local *local = sk->sk_user_data; 155 struct rxrpc_peer *peer; 156 struct sk_buff *skb; 157 158 _enter("%p{%d}", sk, local->debug_id); 159 160 /* Clear the outstanding error value on the socket so that it doesn't 161 * cause kernel_sendmsg() to return it later. 162 */ 163 sock_error(sk); 164 165 skb = sock_dequeue_err_skb(sk); 166 if (!skb) { 167 _leave("UDP socket errqueue empty"); 168 return; 169 } 170 rxrpc_new_skb(skb, rxrpc_skb_rx_received); 171 serr = SKB_EXT_ERR(skb); 172 if (!skb->len && serr->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) { 173 _leave("UDP empty message"); 174 rxrpc_free_skb(skb, rxrpc_skb_rx_freed); 175 return; 176 } 177 178 rcu_read_lock(); 179 peer = rxrpc_lookup_peer_icmp_rcu(local, skb, &srx); 180 if (peer && !rxrpc_get_peer_maybe(peer)) 181 peer = NULL; 182 if (!peer) { 183 rcu_read_unlock(); 184 rxrpc_free_skb(skb, rxrpc_skb_rx_freed); 185 _leave(" [no peer]"); 186 return; 187 } 188 189 trace_rxrpc_rx_icmp(peer, &serr->ee, &srx); 190 191 if ((serr->ee.ee_origin == SO_EE_ORIGIN_ICMP && 192 serr->ee.ee_type == ICMP_DEST_UNREACH && 193 serr->ee.ee_code == ICMP_FRAG_NEEDED)) { 194 rxrpc_adjust_mtu(peer, serr); 195 rcu_read_unlock(); 196 rxrpc_free_skb(skb, rxrpc_skb_rx_freed); 197 rxrpc_put_peer(peer); 198 _leave(" [MTU update]"); 199 return; 200 } 201 202 rxrpc_store_error(peer, serr); 203 rcu_read_unlock(); 204 rxrpc_free_skb(skb, rxrpc_skb_rx_freed); 205 rxrpc_put_peer(peer); 206 207 _leave(""); 208 } 209 210 /* 211 * Map an error report to error codes on the peer record. 212 */ 213 static void rxrpc_store_error(struct rxrpc_peer *peer, 214 struct sock_exterr_skb *serr) 215 { 216 enum rxrpc_call_completion compl = RXRPC_CALL_NETWORK_ERROR; 217 struct sock_extended_err *ee; 218 int err; 219 220 _enter(""); 221 222 ee = &serr->ee; 223 224 err = ee->ee_errno; 225 226 switch (ee->ee_origin) { 227 case SO_EE_ORIGIN_ICMP: 228 switch (ee->ee_type) { 229 case ICMP_DEST_UNREACH: 230 switch (ee->ee_code) { 231 case ICMP_NET_UNREACH: 232 _net("Rx Received ICMP Network Unreachable"); 233 break; 234 case ICMP_HOST_UNREACH: 235 _net("Rx Received ICMP Host Unreachable"); 236 break; 237 case ICMP_PORT_UNREACH: 238 _net("Rx Received ICMP Port Unreachable"); 239 break; 240 case ICMP_NET_UNKNOWN: 241 _net("Rx Received ICMP Unknown Network"); 242 break; 243 case ICMP_HOST_UNKNOWN: 244 _net("Rx Received ICMP Unknown Host"); 245 break; 246 default: 247 _net("Rx Received ICMP DestUnreach code=%u", 248 ee->ee_code); 249 break; 250 } 251 break; 252 253 case ICMP_TIME_EXCEEDED: 254 _net("Rx Received ICMP TTL Exceeded"); 255 break; 256 257 default: 258 _proto("Rx Received ICMP error { type=%u code=%u }", 259 ee->ee_type, ee->ee_code); 260 break; 261 } 262 break; 263 264 case SO_EE_ORIGIN_NONE: 265 case SO_EE_ORIGIN_LOCAL: 266 _proto("Rx Received local error { error=%d }", err); 267 compl = RXRPC_CALL_LOCAL_ERROR; 268 break; 269 270 case SO_EE_ORIGIN_ICMP6: 271 default: 272 _proto("Rx Received error report { orig=%u }", ee->ee_origin); 273 break; 274 } 275 276 rxrpc_distribute_error(peer, err, compl); 277 } 278 279 /* 280 * Distribute an error that occurred on a peer. 281 */ 282 static void rxrpc_distribute_error(struct rxrpc_peer *peer, int error, 283 enum rxrpc_call_completion compl) 284 { 285 struct rxrpc_call *call; 286 287 hlist_for_each_entry_rcu(call, &peer->error_targets, error_link) { 288 rxrpc_see_call(call); 289 if (call->state < RXRPC_CALL_COMPLETE && 290 rxrpc_set_call_completion(call, compl, 0, -error)) 291 rxrpc_notify_socket(call); 292 } 293 } 294 295 /* 296 * Add RTT information to cache. This is called in softirq mode and has 297 * exclusive access to the peer RTT data. 298 */ 299 void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why, 300 rxrpc_serial_t send_serial, rxrpc_serial_t resp_serial, 301 ktime_t send_time, ktime_t resp_time) 302 { 303 struct rxrpc_peer *peer = call->peer; 304 s64 rtt; 305 u64 sum = peer->rtt_sum, avg; 306 u8 cursor = peer->rtt_cursor, usage = peer->rtt_usage; 307 308 rtt = ktime_to_ns(ktime_sub(resp_time, send_time)); 309 if (rtt < 0) 310 return; 311 312 spin_lock(&peer->rtt_input_lock); 313 314 /* Replace the oldest datum in the RTT buffer */ 315 sum -= peer->rtt_cache[cursor]; 316 sum += rtt; 317 peer->rtt_cache[cursor] = rtt; 318 peer->rtt_cursor = (cursor + 1) & (RXRPC_RTT_CACHE_SIZE - 1); 319 peer->rtt_sum = sum; 320 if (usage < RXRPC_RTT_CACHE_SIZE) { 321 usage++; 322 peer->rtt_usage = usage; 323 } 324 325 spin_unlock(&peer->rtt_input_lock); 326 327 /* Now recalculate the average */ 328 if (usage == RXRPC_RTT_CACHE_SIZE) { 329 avg = sum / RXRPC_RTT_CACHE_SIZE; 330 } else { 331 avg = sum; 332 do_div(avg, usage); 333 } 334 335 /* Don't need to update this under lock */ 336 peer->rtt = avg; 337 trace_rxrpc_rtt_rx(call, why, send_serial, resp_serial, rtt, 338 usage, avg); 339 } 340 341 /* 342 * Perform keep-alive pings. 343 */ 344 static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet, 345 struct list_head *collector, 346 time64_t base, 347 u8 cursor) 348 { 349 struct rxrpc_peer *peer; 350 const u8 mask = ARRAY_SIZE(rxnet->peer_keepalive) - 1; 351 time64_t keepalive_at; 352 int slot; 353 354 spin_lock_bh(&rxnet->peer_hash_lock); 355 356 while (!list_empty(collector)) { 357 peer = list_entry(collector->next, 358 struct rxrpc_peer, keepalive_link); 359 360 list_del_init(&peer->keepalive_link); 361 if (!rxrpc_get_peer_maybe(peer)) 362 continue; 363 364 spin_unlock_bh(&rxnet->peer_hash_lock); 365 366 keepalive_at = peer->last_tx_at + RXRPC_KEEPALIVE_TIME; 367 slot = keepalive_at - base; 368 _debug("%02x peer %u t=%d {%pISp}", 369 cursor, peer->debug_id, slot, &peer->srx.transport); 370 371 if (keepalive_at <= base || 372 keepalive_at > base + RXRPC_KEEPALIVE_TIME) { 373 rxrpc_send_keepalive(peer); 374 slot = RXRPC_KEEPALIVE_TIME; 375 } 376 377 /* A transmission to this peer occurred since last we examined 378 * it so put it into the appropriate future bucket. 379 */ 380 slot += cursor; 381 slot &= mask; 382 spin_lock_bh(&rxnet->peer_hash_lock); 383 list_add_tail(&peer->keepalive_link, 384 &rxnet->peer_keepalive[slot & mask]); 385 rxrpc_put_peer(peer); 386 } 387 388 spin_unlock_bh(&rxnet->peer_hash_lock); 389 } 390 391 /* 392 * Perform keep-alive pings with VERSION packets to keep any NAT alive. 393 */ 394 void rxrpc_peer_keepalive_worker(struct work_struct *work) 395 { 396 struct rxrpc_net *rxnet = 397 container_of(work, struct rxrpc_net, peer_keepalive_work); 398 const u8 mask = ARRAY_SIZE(rxnet->peer_keepalive) - 1; 399 time64_t base, now, delay; 400 u8 cursor, stop; 401 LIST_HEAD(collector); 402 403 now = ktime_get_seconds(); 404 base = rxnet->peer_keepalive_base; 405 cursor = rxnet->peer_keepalive_cursor; 406 _enter("%lld,%u", base - now, cursor); 407 408 if (!rxnet->live) 409 return; 410 411 /* Remove to a temporary list all the peers that are currently lodged 412 * in expired buckets plus all new peers. 413 * 414 * Everything in the bucket at the cursor is processed this 415 * second; the bucket at cursor + 1 goes at now + 1s and so 416 * on... 417 */ 418 spin_lock_bh(&rxnet->peer_hash_lock); 419 list_splice_init(&rxnet->peer_keepalive_new, &collector); 420 421 stop = cursor + ARRAY_SIZE(rxnet->peer_keepalive); 422 while (base <= now && (s8)(cursor - stop) < 0) { 423 list_splice_tail_init(&rxnet->peer_keepalive[cursor & mask], 424 &collector); 425 base++; 426 cursor++; 427 } 428 429 base = now; 430 spin_unlock_bh(&rxnet->peer_hash_lock); 431 432 rxnet->peer_keepalive_base = base; 433 rxnet->peer_keepalive_cursor = cursor; 434 rxrpc_peer_keepalive_dispatch(rxnet, &collector, base, cursor); 435 ASSERT(list_empty(&collector)); 436 437 /* Schedule the timer for the next occupied timeslot. */ 438 cursor = rxnet->peer_keepalive_cursor; 439 stop = cursor + RXRPC_KEEPALIVE_TIME - 1; 440 for (; (s8)(cursor - stop) < 0; cursor++) { 441 if (!list_empty(&rxnet->peer_keepalive[cursor & mask])) 442 break; 443 base++; 444 } 445 446 now = ktime_get_seconds(); 447 delay = base - now; 448 if (delay < 1) 449 delay = 1; 450 delay *= HZ; 451 if (rxnet->live) 452 timer_reduce(&rxnet->peer_keepalive_timer, jiffies + delay); 453 454 _leave(""); 455 } 456