1 /* Local endpoint object management 2 * 3 * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved. 4 * Written by David Howells (dhowells@redhat.com) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public Licence 8 * as published by the Free Software Foundation; either version 9 * 2 of the Licence, or (at your option) any later version. 10 */ 11 12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 13 14 #include <linux/module.h> 15 #include <linux/net.h> 16 #include <linux/skbuff.h> 17 #include <linux/slab.h> 18 #include <linux/udp.h> 19 #include <linux/ip.h> 20 #include <linux/hashtable.h> 21 #include <net/sock.h> 22 #include <net/af_rxrpc.h> 23 #include "ar-internal.h" 24 25 static void rxrpc_local_processor(struct work_struct *); 26 static void rxrpc_local_rcu(struct rcu_head *); 27 28 /* 29 * Compare a local to an address. Return -ve, 0 or +ve to indicate less than, 30 * same or greater than. 31 * 32 * We explicitly don't compare the RxRPC service ID as we want to reject 33 * conflicting uses by differing services. Further, we don't want to share 34 * addresses with different options (IPv6), so we don't compare those bits 35 * either. 36 */ 37 static long rxrpc_local_cmp_key(const struct rxrpc_local *local, 38 const struct sockaddr_rxrpc *srx) 39 { 40 long diff; 41 42 diff = ((local->srx.transport_type - srx->transport_type) ?: 43 (local->srx.transport_len - srx->transport_len) ?: 44 (local->srx.transport.family - srx->transport.family)); 45 if (diff != 0) 46 return diff; 47 48 switch (srx->transport.family) { 49 case AF_INET: 50 /* If the choice of UDP port is left up to the transport, then 51 * the endpoint record doesn't match. 52 */ 53 return ((u16 __force)local->srx.transport.sin.sin_port - 54 (u16 __force)srx->transport.sin.sin_port) ?: 55 memcmp(&local->srx.transport.sin.sin_addr, 56 &srx->transport.sin.sin_addr, 57 sizeof(struct in_addr)); 58 #ifdef CONFIG_AF_RXRPC_IPV6 59 case AF_INET6: 60 /* If the choice of UDP6 port is left up to the transport, then 61 * the endpoint record doesn't match. 62 */ 63 return ((u16 __force)local->srx.transport.sin6.sin6_port - 64 (u16 __force)srx->transport.sin6.sin6_port) ?: 65 memcmp(&local->srx.transport.sin6.sin6_addr, 66 &srx->transport.sin6.sin6_addr, 67 sizeof(struct in6_addr)); 68 #endif 69 default: 70 BUG(); 71 } 72 } 73 74 /* 75 * Allocate a new local endpoint. 76 */ 77 static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet, 78 const struct sockaddr_rxrpc *srx) 79 { 80 struct rxrpc_local *local; 81 82 local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL); 83 if (local) { 84 atomic_set(&local->usage, 1); 85 local->rxnet = rxnet; 86 INIT_LIST_HEAD(&local->link); 87 INIT_WORK(&local->processor, rxrpc_local_processor); 88 init_rwsem(&local->defrag_sem); 89 skb_queue_head_init(&local->reject_queue); 90 skb_queue_head_init(&local->event_queue); 91 local->client_conns = RB_ROOT; 92 spin_lock_init(&local->client_conns_lock); 93 spin_lock_init(&local->lock); 94 rwlock_init(&local->services_lock); 95 local->debug_id = atomic_inc_return(&rxrpc_debug_id); 96 memcpy(&local->srx, srx, sizeof(*srx)); 97 local->srx.srx_service = 0; 98 trace_rxrpc_local(local, rxrpc_local_new, 1, NULL); 99 } 100 101 _leave(" = %p", local); 102 return local; 103 } 104 105 /* 106 * create the local socket 107 * - must be called with rxrpc_local_mutex locked 108 */ 109 static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net) 110 { 111 struct sock *sock; 112 int ret, opt; 113 114 _enter("%p{%d,%d}", 115 local, local->srx.transport_type, local->srx.transport.family); 116 117 /* create a socket to represent the local endpoint */ 118 ret = sock_create_kern(net, local->srx.transport.family, 119 local->srx.transport_type, 0, &local->socket); 120 if (ret < 0) { 121 _leave(" = %d [socket]", ret); 122 return ret; 123 } 124 125 /* if a local address was supplied then bind it */ 126 if (local->srx.transport_len > sizeof(sa_family_t)) { 127 _debug("bind"); 128 ret = kernel_bind(local->socket, 129 (struct sockaddr *)&local->srx.transport, 130 local->srx.transport_len); 131 if (ret < 0) { 132 _debug("bind failed %d", ret); 133 goto error; 134 } 135 } 136 137 switch (local->srx.transport.family) { 138 case AF_INET: 139 /* we want to receive ICMP errors */ 140 opt = 1; 141 ret = kernel_setsockopt(local->socket, SOL_IP, IP_RECVERR, 142 (char *) &opt, sizeof(opt)); 143 if (ret < 0) { 144 _debug("setsockopt failed"); 145 goto error; 146 } 147 148 /* we want to set the don't fragment bit */ 149 opt = IP_PMTUDISC_DO; 150 ret = kernel_setsockopt(local->socket, SOL_IP, IP_MTU_DISCOVER, 151 (char *) &opt, sizeof(opt)); 152 if (ret < 0) { 153 _debug("setsockopt failed"); 154 goto error; 155 } 156 break; 157 158 case AF_INET6: 159 /* we want to receive ICMP errors */ 160 opt = 1; 161 ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_RECVERR, 162 (char *) &opt, sizeof(opt)); 163 if (ret < 0) { 164 _debug("setsockopt failed"); 165 goto error; 166 } 167 168 /* we want to set the don't fragment bit */ 169 opt = IPV6_PMTUDISC_DO; 170 ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_MTU_DISCOVER, 171 (char *) &opt, sizeof(opt)); 172 if (ret < 0) { 173 _debug("setsockopt failed"); 174 goto error; 175 } 176 break; 177 178 default: 179 BUG(); 180 } 181 182 /* set the socket up */ 183 sock = local->socket->sk; 184 sock->sk_user_data = local; 185 sock->sk_data_ready = rxrpc_data_ready; 186 sock->sk_error_report = rxrpc_error_report; 187 _leave(" = 0"); 188 return 0; 189 190 error: 191 kernel_sock_shutdown(local->socket, SHUT_RDWR); 192 local->socket->sk->sk_user_data = NULL; 193 sock_release(local->socket); 194 local->socket = NULL; 195 196 _leave(" = %d", ret); 197 return ret; 198 } 199 200 /* 201 * Look up or create a new local endpoint using the specified local address. 202 */ 203 struct rxrpc_local *rxrpc_lookup_local(struct net *net, 204 const struct sockaddr_rxrpc *srx) 205 { 206 struct rxrpc_local *local; 207 struct rxrpc_net *rxnet = rxrpc_net(net); 208 struct list_head *cursor; 209 const char *age; 210 long diff; 211 int ret; 212 213 _enter("{%d,%d,%pISp}", 214 srx->transport_type, srx->transport.family, &srx->transport); 215 216 mutex_lock(&rxnet->local_mutex); 217 218 for (cursor = rxnet->local_endpoints.next; 219 cursor != &rxnet->local_endpoints; 220 cursor = cursor->next) { 221 local = list_entry(cursor, struct rxrpc_local, link); 222 223 diff = rxrpc_local_cmp_key(local, srx); 224 if (diff < 0) 225 continue; 226 if (diff > 0) 227 break; 228 229 /* Services aren't allowed to share transport sockets, so 230 * reject that here. It is possible that the object is dying - 231 * but it may also still have the local transport address that 232 * we want bound. 233 */ 234 if (srx->srx_service) { 235 local = NULL; 236 goto addr_in_use; 237 } 238 239 /* Found a match. We replace a dying object. Attempting to 240 * bind the transport socket may still fail if we're attempting 241 * to use a local address that the dying object is still using. 242 */ 243 if (!rxrpc_get_local_maybe(local)) { 244 cursor = cursor->next; 245 list_del_init(&local->link); 246 break; 247 } 248 249 age = "old"; 250 goto found; 251 } 252 253 local = rxrpc_alloc_local(rxnet, srx); 254 if (!local) 255 goto nomem; 256 257 ret = rxrpc_open_socket(local, net); 258 if (ret < 0) 259 goto sock_error; 260 261 list_add_tail(&local->link, cursor); 262 age = "new"; 263 264 found: 265 mutex_unlock(&rxnet->local_mutex); 266 267 _net("LOCAL %s %d {%pISp}", 268 age, local->debug_id, &local->srx.transport); 269 270 _leave(" = %p", local); 271 return local; 272 273 nomem: 274 ret = -ENOMEM; 275 sock_error: 276 mutex_unlock(&rxnet->local_mutex); 277 kfree(local); 278 _leave(" = %d", ret); 279 return ERR_PTR(ret); 280 281 addr_in_use: 282 mutex_unlock(&rxnet->local_mutex); 283 _leave(" = -EADDRINUSE"); 284 return ERR_PTR(-EADDRINUSE); 285 } 286 287 /* 288 * Get a ref on a local endpoint. 289 */ 290 struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *local) 291 { 292 const void *here = __builtin_return_address(0); 293 int n; 294 295 n = atomic_inc_return(&local->usage); 296 trace_rxrpc_local(local, rxrpc_local_got, n, here); 297 return local; 298 } 299 300 /* 301 * Get a ref on a local endpoint unless its usage has already reached 0. 302 */ 303 struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local) 304 { 305 const void *here = __builtin_return_address(0); 306 307 if (local) { 308 int n = atomic_fetch_add_unless(&local->usage, 1, 0); 309 if (n > 0) 310 trace_rxrpc_local(local, rxrpc_local_got, n + 1, here); 311 else 312 local = NULL; 313 } 314 return local; 315 } 316 317 /* 318 * Queue a local endpoint. 319 */ 320 void rxrpc_queue_local(struct rxrpc_local *local) 321 { 322 const void *here = __builtin_return_address(0); 323 324 if (rxrpc_queue_work(&local->processor)) 325 trace_rxrpc_local(local, rxrpc_local_queued, 326 atomic_read(&local->usage), here); 327 } 328 329 /* 330 * A local endpoint reached its end of life. 331 */ 332 static void __rxrpc_put_local(struct rxrpc_local *local) 333 { 334 _enter("%d", local->debug_id); 335 rxrpc_queue_work(&local->processor); 336 } 337 338 /* 339 * Drop a ref on a local endpoint. 340 */ 341 void rxrpc_put_local(struct rxrpc_local *local) 342 { 343 const void *here = __builtin_return_address(0); 344 int n; 345 346 if (local) { 347 n = atomic_dec_return(&local->usage); 348 trace_rxrpc_local(local, rxrpc_local_put, n, here); 349 350 if (n == 0) 351 __rxrpc_put_local(local); 352 } 353 } 354 355 /* 356 * Destroy a local endpoint's socket and then hand the record to RCU to dispose 357 * of. 358 * 359 * Closing the socket cannot be done from bottom half context or RCU callback 360 * context because it might sleep. 361 */ 362 static void rxrpc_local_destroyer(struct rxrpc_local *local) 363 { 364 struct socket *socket = local->socket; 365 struct rxrpc_net *rxnet = local->rxnet; 366 367 _enter("%d", local->debug_id); 368 369 /* We can get a race between an incoming call packet queueing the 370 * processor again and the work processor starting the destruction 371 * process which will shut down the UDP socket. 372 */ 373 if (local->dead) { 374 _leave(" [already dead]"); 375 return; 376 } 377 local->dead = true; 378 379 mutex_lock(&rxnet->local_mutex); 380 list_del_init(&local->link); 381 mutex_unlock(&rxnet->local_mutex); 382 383 ASSERT(RB_EMPTY_ROOT(&local->client_conns)); 384 ASSERT(!local->service); 385 386 if (socket) { 387 local->socket = NULL; 388 kernel_sock_shutdown(socket, SHUT_RDWR); 389 socket->sk->sk_user_data = NULL; 390 sock_release(socket); 391 } 392 393 /* At this point, there should be no more packets coming in to the 394 * local endpoint. 395 */ 396 rxrpc_purge_queue(&local->reject_queue); 397 rxrpc_purge_queue(&local->event_queue); 398 399 _debug("rcu local %d", local->debug_id); 400 call_rcu(&local->rcu, rxrpc_local_rcu); 401 } 402 403 /* 404 * Process events on an endpoint 405 */ 406 static void rxrpc_local_processor(struct work_struct *work) 407 { 408 struct rxrpc_local *local = 409 container_of(work, struct rxrpc_local, processor); 410 bool again; 411 412 trace_rxrpc_local(local, rxrpc_local_processing, 413 atomic_read(&local->usage), NULL); 414 415 do { 416 again = false; 417 if (atomic_read(&local->usage) == 0) 418 return rxrpc_local_destroyer(local); 419 420 if (!skb_queue_empty(&local->reject_queue)) { 421 rxrpc_reject_packets(local); 422 again = true; 423 } 424 425 if (!skb_queue_empty(&local->event_queue)) { 426 rxrpc_process_local_events(local); 427 again = true; 428 } 429 } while (again); 430 } 431 432 /* 433 * Destroy a local endpoint after the RCU grace period expires. 434 */ 435 static void rxrpc_local_rcu(struct rcu_head *rcu) 436 { 437 struct rxrpc_local *local = container_of(rcu, struct rxrpc_local, rcu); 438 439 _enter("%d", local->debug_id); 440 441 ASSERT(!work_pending(&local->processor)); 442 443 _net("DESTROY LOCAL %d", local->debug_id); 444 kfree(local); 445 _leave(""); 446 } 447 448 /* 449 * Verify the local endpoint list is empty by this point. 450 */ 451 void rxrpc_destroy_all_locals(struct rxrpc_net *rxnet) 452 { 453 struct rxrpc_local *local; 454 455 _enter(""); 456 457 flush_workqueue(rxrpc_workqueue); 458 459 if (!list_empty(&rxnet->local_endpoints)) { 460 mutex_lock(&rxnet->local_mutex); 461 list_for_each_entry(local, &rxnet->local_endpoints, link) { 462 pr_err("AF_RXRPC: Leaked local %p {%d}\n", 463 local, atomic_read(&local->usage)); 464 } 465 mutex_unlock(&rxnet->local_mutex); 466 BUG(); 467 } 468 } 469