1 /* Local endpoint object management 2 * 3 * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved. 4 * Written by David Howells (dhowells@redhat.com) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public Licence 8 * as published by the Free Software Foundation; either version 9 * 2 of the Licence, or (at your option) any later version. 10 */ 11 12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 13 14 #include <linux/module.h> 15 #include <linux/net.h> 16 #include <linux/skbuff.h> 17 #include <linux/slab.h> 18 #include <linux/udp.h> 19 #include <linux/ip.h> 20 #include <linux/hashtable.h> 21 #include <net/sock.h> 22 #include <net/af_rxrpc.h> 23 #include "ar-internal.h" 24 25 static void rxrpc_local_processor(struct work_struct *); 26 static void rxrpc_local_rcu(struct rcu_head *); 27 28 /* 29 * Compare a local to an address. Return -ve, 0 or +ve to indicate less than, 30 * same or greater than. 31 * 32 * We explicitly don't compare the RxRPC service ID as we want to reject 33 * conflicting uses by differing services. Further, we don't want to share 34 * addresses with different options (IPv6), so we don't compare those bits 35 * either. 36 */ 37 static long rxrpc_local_cmp_key(const struct rxrpc_local *local, 38 const struct sockaddr_rxrpc *srx) 39 { 40 long diff; 41 42 diff = ((local->srx.transport_type - srx->transport_type) ?: 43 (local->srx.transport_len - srx->transport_len) ?: 44 (local->srx.transport.family - srx->transport.family)); 45 if (diff != 0) 46 return diff; 47 48 switch (srx->transport.family) { 49 case AF_INET: 50 /* If the choice of UDP port is left up to the transport, then 51 * the endpoint record doesn't match. 52 */ 53 return ((u16 __force)local->srx.transport.sin.sin_port - 54 (u16 __force)srx->transport.sin.sin_port) ?: 55 memcmp(&local->srx.transport.sin.sin_addr, 56 &srx->transport.sin.sin_addr, 57 sizeof(struct in_addr)); 58 #ifdef CONFIG_AF_RXRPC_IPV6 59 case AF_INET6: 60 /* If the choice of UDP6 port is left up to the transport, then 61 * the endpoint record doesn't match. 62 */ 63 return ((u16 __force)local->srx.transport.sin6.sin6_port - 64 (u16 __force)srx->transport.sin6.sin6_port) ?: 65 memcmp(&local->srx.transport.sin6.sin6_addr, 66 &srx->transport.sin6.sin6_addr, 67 sizeof(struct in6_addr)); 68 #endif 69 default: 70 BUG(); 71 } 72 } 73 74 /* 75 * Allocate a new local endpoint. 76 */ 77 static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet, 78 const struct sockaddr_rxrpc *srx) 79 { 80 struct rxrpc_local *local; 81 82 local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL); 83 if (local) { 84 atomic_set(&local->usage, 1); 85 local->rxnet = rxnet; 86 INIT_LIST_HEAD(&local->link); 87 INIT_WORK(&local->processor, rxrpc_local_processor); 88 init_rwsem(&local->defrag_sem); 89 skb_queue_head_init(&local->reject_queue); 90 skb_queue_head_init(&local->event_queue); 91 local->client_conns = RB_ROOT; 92 spin_lock_init(&local->client_conns_lock); 93 spin_lock_init(&local->lock); 94 rwlock_init(&local->services_lock); 95 local->debug_id = atomic_inc_return(&rxrpc_debug_id); 96 memcpy(&local->srx, srx, sizeof(*srx)); 97 local->srx.srx_service = 0; 98 trace_rxrpc_local(local, rxrpc_local_new, 1, NULL); 99 } 100 101 _leave(" = %p", local); 102 return local; 103 } 104 105 /* 106 * create the local socket 107 * - must be called with rxrpc_local_mutex locked 108 */ 109 static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net) 110 { 111 struct sock *sock; 112 int ret, opt; 113 114 _enter("%p{%d,%d}", 115 local, local->srx.transport_type, local->srx.transport.family); 116 117 /* create a socket to represent the local endpoint */ 118 ret = sock_create_kern(net, local->srx.transport.family, 119 local->srx.transport_type, 0, &local->socket); 120 if (ret < 0) { 121 _leave(" = %d [socket]", ret); 122 return ret; 123 } 124 125 /* if a local address was supplied then bind it */ 126 if (local->srx.transport_len > sizeof(sa_family_t)) { 127 _debug("bind"); 128 ret = kernel_bind(local->socket, 129 (struct sockaddr *)&local->srx.transport, 130 local->srx.transport_len); 131 if (ret < 0) { 132 _debug("bind failed %d", ret); 133 goto error; 134 } 135 } 136 137 /* we want to receive ICMP errors */ 138 opt = 1; 139 ret = kernel_setsockopt(local->socket, SOL_IP, IP_RECVERR, 140 (char *) &opt, sizeof(opt)); 141 if (ret < 0) { 142 _debug("setsockopt failed"); 143 goto error; 144 } 145 146 /* we want to set the don't fragment bit */ 147 opt = IP_PMTUDISC_DO; 148 ret = kernel_setsockopt(local->socket, SOL_IP, IP_MTU_DISCOVER, 149 (char *) &opt, sizeof(opt)); 150 if (ret < 0) { 151 _debug("setsockopt failed"); 152 goto error; 153 } 154 155 /* set the socket up */ 156 sock = local->socket->sk; 157 sock->sk_user_data = local; 158 sock->sk_data_ready = rxrpc_data_ready; 159 sock->sk_error_report = rxrpc_error_report; 160 _leave(" = 0"); 161 return 0; 162 163 error: 164 kernel_sock_shutdown(local->socket, SHUT_RDWR); 165 local->socket->sk->sk_user_data = NULL; 166 sock_release(local->socket); 167 local->socket = NULL; 168 169 _leave(" = %d", ret); 170 return ret; 171 } 172 173 /* 174 * Look up or create a new local endpoint using the specified local address. 175 */ 176 struct rxrpc_local *rxrpc_lookup_local(struct net *net, 177 const struct sockaddr_rxrpc *srx) 178 { 179 struct rxrpc_local *local; 180 struct rxrpc_net *rxnet = rxrpc_net(net); 181 struct list_head *cursor; 182 const char *age; 183 long diff; 184 int ret; 185 186 _enter("{%d,%d,%pISp}", 187 srx->transport_type, srx->transport.family, &srx->transport); 188 189 mutex_lock(&rxnet->local_mutex); 190 191 for (cursor = rxnet->local_endpoints.next; 192 cursor != &rxnet->local_endpoints; 193 cursor = cursor->next) { 194 local = list_entry(cursor, struct rxrpc_local, link); 195 196 diff = rxrpc_local_cmp_key(local, srx); 197 if (diff < 0) 198 continue; 199 if (diff > 0) 200 break; 201 202 /* Services aren't allowed to share transport sockets, so 203 * reject that here. It is possible that the object is dying - 204 * but it may also still have the local transport address that 205 * we want bound. 206 */ 207 if (srx->srx_service) { 208 local = NULL; 209 goto addr_in_use; 210 } 211 212 /* Found a match. We replace a dying object. Attempting to 213 * bind the transport socket may still fail if we're attempting 214 * to use a local address that the dying object is still using. 215 */ 216 if (!rxrpc_get_local_maybe(local)) { 217 cursor = cursor->next; 218 list_del_init(&local->link); 219 break; 220 } 221 222 age = "old"; 223 goto found; 224 } 225 226 local = rxrpc_alloc_local(rxnet, srx); 227 if (!local) 228 goto nomem; 229 230 ret = rxrpc_open_socket(local, net); 231 if (ret < 0) 232 goto sock_error; 233 234 list_add_tail(&local->link, cursor); 235 age = "new"; 236 237 found: 238 mutex_unlock(&rxnet->local_mutex); 239 240 _net("LOCAL %s %d {%pISp}", 241 age, local->debug_id, &local->srx.transport); 242 243 _leave(" = %p", local); 244 return local; 245 246 nomem: 247 ret = -ENOMEM; 248 sock_error: 249 mutex_unlock(&rxnet->local_mutex); 250 kfree(local); 251 _leave(" = %d", ret); 252 return ERR_PTR(ret); 253 254 addr_in_use: 255 mutex_unlock(&rxnet->local_mutex); 256 _leave(" = -EADDRINUSE"); 257 return ERR_PTR(-EADDRINUSE); 258 } 259 260 /* 261 * Get a ref on a local endpoint. 262 */ 263 struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *local) 264 { 265 const void *here = __builtin_return_address(0); 266 int n; 267 268 n = atomic_inc_return(&local->usage); 269 trace_rxrpc_local(local, rxrpc_local_got, n, here); 270 return local; 271 } 272 273 /* 274 * Get a ref on a local endpoint unless its usage has already reached 0. 275 */ 276 struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local) 277 { 278 const void *here = __builtin_return_address(0); 279 280 if (local) { 281 int n = __atomic_add_unless(&local->usage, 1, 0); 282 if (n > 0) 283 trace_rxrpc_local(local, rxrpc_local_got, n + 1, here); 284 else 285 local = NULL; 286 } 287 return local; 288 } 289 290 /* 291 * Queue a local endpoint. 292 */ 293 void rxrpc_queue_local(struct rxrpc_local *local) 294 { 295 const void *here = __builtin_return_address(0); 296 297 if (rxrpc_queue_work(&local->processor)) 298 trace_rxrpc_local(local, rxrpc_local_queued, 299 atomic_read(&local->usage), here); 300 } 301 302 /* 303 * A local endpoint reached its end of life. 304 */ 305 static void __rxrpc_put_local(struct rxrpc_local *local) 306 { 307 _enter("%d", local->debug_id); 308 rxrpc_queue_work(&local->processor); 309 } 310 311 /* 312 * Drop a ref on a local endpoint. 313 */ 314 void rxrpc_put_local(struct rxrpc_local *local) 315 { 316 const void *here = __builtin_return_address(0); 317 int n; 318 319 if (local) { 320 n = atomic_dec_return(&local->usage); 321 trace_rxrpc_local(local, rxrpc_local_put, n, here); 322 323 if (n == 0) 324 __rxrpc_put_local(local); 325 } 326 } 327 328 /* 329 * Destroy a local endpoint's socket and then hand the record to RCU to dispose 330 * of. 331 * 332 * Closing the socket cannot be done from bottom half context or RCU callback 333 * context because it might sleep. 334 */ 335 static void rxrpc_local_destroyer(struct rxrpc_local *local) 336 { 337 struct socket *socket = local->socket; 338 struct rxrpc_net *rxnet = local->rxnet; 339 340 _enter("%d", local->debug_id); 341 342 /* We can get a race between an incoming call packet queueing the 343 * processor again and the work processor starting the destruction 344 * process which will shut down the UDP socket. 345 */ 346 if (local->dead) { 347 _leave(" [already dead]"); 348 return; 349 } 350 local->dead = true; 351 352 mutex_lock(&rxnet->local_mutex); 353 list_del_init(&local->link); 354 mutex_unlock(&rxnet->local_mutex); 355 356 ASSERT(RB_EMPTY_ROOT(&local->client_conns)); 357 ASSERT(!local->service); 358 359 if (socket) { 360 local->socket = NULL; 361 kernel_sock_shutdown(socket, SHUT_RDWR); 362 socket->sk->sk_user_data = NULL; 363 sock_release(socket); 364 } 365 366 /* At this point, there should be no more packets coming in to the 367 * local endpoint. 368 */ 369 rxrpc_purge_queue(&local->reject_queue); 370 rxrpc_purge_queue(&local->event_queue); 371 372 _debug("rcu local %d", local->debug_id); 373 call_rcu(&local->rcu, rxrpc_local_rcu); 374 } 375 376 /* 377 * Process events on an endpoint 378 */ 379 static void rxrpc_local_processor(struct work_struct *work) 380 { 381 struct rxrpc_local *local = 382 container_of(work, struct rxrpc_local, processor); 383 bool again; 384 385 trace_rxrpc_local(local, rxrpc_local_processing, 386 atomic_read(&local->usage), NULL); 387 388 do { 389 again = false; 390 if (atomic_read(&local->usage) == 0) 391 return rxrpc_local_destroyer(local); 392 393 if (!skb_queue_empty(&local->reject_queue)) { 394 rxrpc_reject_packets(local); 395 again = true; 396 } 397 398 if (!skb_queue_empty(&local->event_queue)) { 399 rxrpc_process_local_events(local); 400 again = true; 401 } 402 } while (again); 403 } 404 405 /* 406 * Destroy a local endpoint after the RCU grace period expires. 407 */ 408 static void rxrpc_local_rcu(struct rcu_head *rcu) 409 { 410 struct rxrpc_local *local = container_of(rcu, struct rxrpc_local, rcu); 411 412 _enter("%d", local->debug_id); 413 414 ASSERT(!work_pending(&local->processor)); 415 416 _net("DESTROY LOCAL %d", local->debug_id); 417 kfree(local); 418 _leave(""); 419 } 420 421 /* 422 * Verify the local endpoint list is empty by this point. 423 */ 424 void rxrpc_destroy_all_locals(struct rxrpc_net *rxnet) 425 { 426 struct rxrpc_local *local; 427 428 _enter(""); 429 430 flush_workqueue(rxrpc_workqueue); 431 432 if (!list_empty(&rxnet->local_endpoints)) { 433 mutex_lock(&rxnet->local_mutex); 434 list_for_each_entry(local, &rxnet->local_endpoints, link) { 435 pr_err("AF_RXRPC: Leaked local %p {%d}\n", 436 local, atomic_read(&local->usage)); 437 } 438 mutex_unlock(&rxnet->local_mutex); 439 BUG(); 440 } 441 } 442