1 /* 2 * linux/fs/lockd/host.c 3 * 4 * Management for NLM peer hosts. The nlm_host struct is shared 5 * between client and server implementation. The only reason to 6 * do so is to reduce code bloat. 7 * 8 * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> 9 */ 10 11 #include <linux/types.h> 12 #include <linux/slab.h> 13 #include <linux/in.h> 14 #include <linux/in6.h> 15 #include <linux/sunrpc/clnt.h> 16 #include <linux/sunrpc/svc.h> 17 #include <linux/lockd/lockd.h> 18 #include <linux/mutex.h> 19 20 #include <net/ipv6.h> 21 22 #define NLMDBG_FACILITY NLMDBG_HOSTCACHE 23 #define NLM_HOST_NRHASH 32 24 #define NLM_HOST_REBIND (60 * HZ) 25 #define NLM_HOST_EXPIRE (300 * HZ) 26 #define NLM_HOST_COLLECT (120 * HZ) 27 28 static struct hlist_head nlm_hosts[NLM_HOST_NRHASH]; 29 static unsigned long next_gc; 30 static int nrhosts; 31 static DEFINE_MUTEX(nlm_host_mutex); 32 33 static void nlm_gc_hosts(void); 34 35 struct nlm_lookup_host_info { 36 const int server; /* search for server|client */ 37 const struct sockaddr *sap; /* address to search for */ 38 const size_t salen; /* it's length */ 39 const unsigned short protocol; /* transport to search for*/ 40 const u32 version; /* NLM version to search for */ 41 const char *hostname; /* remote's hostname */ 42 const size_t hostname_len; /* it's length */ 43 const struct sockaddr *src_sap; /* our address (optional) */ 44 const size_t src_len; /* it's length */ 45 const int noresvport; /* use non-priv port */ 46 }; 47 48 /* 49 * Hash function must work well on big- and little-endian platforms 50 */ 51 static unsigned int __nlm_hash32(const __be32 n) 52 { 53 unsigned int hash = (__force u32)n ^ ((__force u32)n >> 16); 54 return hash ^ (hash >> 8); 55 } 56 57 static unsigned int __nlm_hash_addr4(const struct sockaddr *sap) 58 { 59 const struct sockaddr_in *sin = (struct sockaddr_in *)sap; 60 return __nlm_hash32(sin->sin_addr.s_addr); 61 } 62 63 static unsigned int __nlm_hash_addr6(const struct sockaddr *sap) 64 { 65 const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap; 66 const struct in6_addr addr = sin6->sin6_addr; 67 return __nlm_hash32(addr.s6_addr32[0]) ^ 68 __nlm_hash32(addr.s6_addr32[1]) ^ 69 __nlm_hash32(addr.s6_addr32[2]) ^ 70 __nlm_hash32(addr.s6_addr32[3]); 71 } 72 73 static unsigned int nlm_hash_address(const struct sockaddr *sap) 74 { 75 unsigned int hash; 76 77 switch (sap->sa_family) { 78 case AF_INET: 79 hash = __nlm_hash_addr4(sap); 80 break; 81 case AF_INET6: 82 hash = __nlm_hash_addr6(sap); 83 break; 84 default: 85 hash = 0; 86 } 87 return hash & (NLM_HOST_NRHASH - 1); 88 } 89 90 /* 91 * Common host lookup routine for server & client 92 */ 93 static struct nlm_host *nlm_lookup_host(struct nlm_lookup_host_info *ni) 94 { 95 struct hlist_head *chain; 96 struct hlist_node *pos; 97 struct nlm_host *host; 98 struct nsm_handle *nsm = NULL; 99 100 mutex_lock(&nlm_host_mutex); 101 102 if (time_after_eq(jiffies, next_gc)) 103 nlm_gc_hosts(); 104 105 /* We may keep several nlm_host objects for a peer, because each 106 * nlm_host is identified by 107 * (address, protocol, version, server/client) 108 * We could probably simplify this a little by putting all those 109 * different NLM rpc_clients into one single nlm_host object. 110 * This would allow us to have one nlm_host per address. 111 */ 112 chain = &nlm_hosts[nlm_hash_address(ni->sap)]; 113 hlist_for_each_entry(host, pos, chain, h_hash) { 114 if (!rpc_cmp_addr(nlm_addr(host), ni->sap)) 115 continue; 116 117 /* See if we have an NSM handle for this client */ 118 if (!nsm) 119 nsm = host->h_nsmhandle; 120 121 if (host->h_proto != ni->protocol) 122 continue; 123 if (host->h_version != ni->version) 124 continue; 125 if (host->h_server != ni->server) 126 continue; 127 if (ni->server && 128 !rpc_cmp_addr(nlm_srcaddr(host), ni->src_sap)) 129 continue; 130 131 /* Move to head of hash chain. */ 132 hlist_del(&host->h_hash); 133 hlist_add_head(&host->h_hash, chain); 134 135 nlm_get_host(host); 136 dprintk("lockd: nlm_lookup_host found host %s (%s)\n", 137 host->h_name, host->h_addrbuf); 138 goto out; 139 } 140 141 /* 142 * The host wasn't in our hash table. If we don't 143 * have an NSM handle for it yet, create one. 144 */ 145 if (nsm) 146 atomic_inc(&nsm->sm_count); 147 else { 148 host = NULL; 149 nsm = nsm_get_handle(ni->sap, ni->salen, 150 ni->hostname, ni->hostname_len); 151 if (!nsm) { 152 dprintk("lockd: nlm_lookup_host failed; " 153 "no nsm handle\n"); 154 goto out; 155 } 156 } 157 158 host = kzalloc(sizeof(*host), GFP_KERNEL); 159 if (!host) { 160 nsm_release(nsm); 161 dprintk("lockd: nlm_lookup_host failed; no memory\n"); 162 goto out; 163 } 164 host->h_name = nsm->sm_name; 165 host->h_addrbuf = nsm->sm_addrbuf; 166 memcpy(nlm_addr(host), ni->sap, ni->salen); 167 host->h_addrlen = ni->salen; 168 rpc_set_port(nlm_addr(host), 0); 169 memcpy(nlm_srcaddr(host), ni->src_sap, ni->src_len); 170 host->h_version = ni->version; 171 host->h_proto = ni->protocol; 172 host->h_rpcclnt = NULL; 173 mutex_init(&host->h_mutex); 174 host->h_nextrebind = jiffies + NLM_HOST_REBIND; 175 host->h_expires = jiffies + NLM_HOST_EXPIRE; 176 atomic_set(&host->h_count, 1); 177 init_waitqueue_head(&host->h_gracewait); 178 init_rwsem(&host->h_rwsem); 179 host->h_state = 0; /* pseudo NSM state */ 180 host->h_nsmstate = 0; /* real NSM state */ 181 host->h_nsmhandle = nsm; 182 host->h_server = ni->server; 183 host->h_noresvport = ni->noresvport; 184 hlist_add_head(&host->h_hash, chain); 185 INIT_LIST_HEAD(&host->h_lockowners); 186 spin_lock_init(&host->h_lock); 187 INIT_LIST_HEAD(&host->h_granted); 188 INIT_LIST_HEAD(&host->h_reclaim); 189 190 nrhosts++; 191 192 dprintk("lockd: nlm_lookup_host created host %s\n", 193 host->h_name); 194 195 out: 196 mutex_unlock(&nlm_host_mutex); 197 return host; 198 } 199 200 /* 201 * Destroy a host 202 */ 203 static void 204 nlm_destroy_host(struct nlm_host *host) 205 { 206 struct rpc_clnt *clnt; 207 208 BUG_ON(!list_empty(&host->h_lockowners)); 209 BUG_ON(atomic_read(&host->h_count)); 210 211 nsm_unmonitor(host); 212 nsm_release(host->h_nsmhandle); 213 214 clnt = host->h_rpcclnt; 215 if (clnt != NULL) 216 rpc_shutdown_client(clnt); 217 kfree(host); 218 } 219 220 /** 221 * nlmclnt_lookup_host - Find an NLM host handle matching a remote server 222 * @sap: network address of server 223 * @salen: length of server address 224 * @protocol: transport protocol to use 225 * @version: NLM protocol version 226 * @hostname: '\0'-terminated hostname of server 227 * @noresvport: 1 if non-privileged port should be used 228 * 229 * Returns an nlm_host structure that matches the passed-in 230 * [server address, transport protocol, NLM version, server hostname]. 231 * If one doesn't already exist in the host cache, a new handle is 232 * created and returned. 233 */ 234 struct nlm_host *nlmclnt_lookup_host(const struct sockaddr *sap, 235 const size_t salen, 236 const unsigned short protocol, 237 const u32 version, 238 const char *hostname, 239 int noresvport) 240 { 241 const struct sockaddr source = { 242 .sa_family = AF_UNSPEC, 243 }; 244 struct nlm_lookup_host_info ni = { 245 .server = 0, 246 .sap = sap, 247 .salen = salen, 248 .protocol = protocol, 249 .version = version, 250 .hostname = hostname, 251 .hostname_len = strlen(hostname), 252 .src_sap = &source, 253 .src_len = sizeof(source), 254 .noresvport = noresvport, 255 }; 256 257 dprintk("lockd: %s(host='%s', vers=%u, proto=%s)\n", __func__, 258 (hostname ? hostname : "<none>"), version, 259 (protocol == IPPROTO_UDP ? "udp" : "tcp")); 260 261 return nlm_lookup_host(&ni); 262 } 263 264 /** 265 * nlmsvc_lookup_host - Find an NLM host handle matching a remote client 266 * @rqstp: incoming NLM request 267 * @hostname: name of client host 268 * @hostname_len: length of client hostname 269 * 270 * Returns an nlm_host structure that matches the [client address, 271 * transport protocol, NLM version, client hostname] of the passed-in 272 * NLM request. If one doesn't already exist in the host cache, a 273 * new handle is created and returned. 274 * 275 * Before possibly creating a new nlm_host, construct a sockaddr 276 * for a specific source address in case the local system has 277 * multiple network addresses. The family of the address in 278 * rq_daddr is guaranteed to be the same as the family of the 279 * address in rq_addr, so it's safe to use the same family for 280 * the source address. 281 */ 282 struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp, 283 const char *hostname, 284 const size_t hostname_len) 285 { 286 struct sockaddr_in sin = { 287 .sin_family = AF_INET, 288 }; 289 struct sockaddr_in6 sin6 = { 290 .sin6_family = AF_INET6, 291 }; 292 struct nlm_lookup_host_info ni = { 293 .server = 1, 294 .sap = svc_addr(rqstp), 295 .salen = rqstp->rq_addrlen, 296 .protocol = rqstp->rq_prot, 297 .version = rqstp->rq_vers, 298 .hostname = hostname, 299 .hostname_len = hostname_len, 300 .src_len = rqstp->rq_addrlen, 301 }; 302 303 dprintk("lockd: %s(host='%*s', vers=%u, proto=%s)\n", __func__, 304 (int)hostname_len, hostname, rqstp->rq_vers, 305 (rqstp->rq_prot == IPPROTO_UDP ? "udp" : "tcp")); 306 307 switch (ni.sap->sa_family) { 308 case AF_INET: 309 sin.sin_addr.s_addr = rqstp->rq_daddr.addr.s_addr; 310 ni.src_sap = (struct sockaddr *)&sin; 311 break; 312 case AF_INET6: 313 ipv6_addr_copy(&sin6.sin6_addr, &rqstp->rq_daddr.addr6); 314 ni.src_sap = (struct sockaddr *)&sin6; 315 break; 316 default: 317 return NULL; 318 } 319 320 return nlm_lookup_host(&ni); 321 } 322 323 /* 324 * Create the NLM RPC client for an NLM peer 325 */ 326 struct rpc_clnt * 327 nlm_bind_host(struct nlm_host *host) 328 { 329 struct rpc_clnt *clnt; 330 331 dprintk("lockd: nlm_bind_host %s (%s)\n", 332 host->h_name, host->h_addrbuf); 333 334 /* Lock host handle */ 335 mutex_lock(&host->h_mutex); 336 337 /* If we've already created an RPC client, check whether 338 * RPC rebind is required 339 */ 340 if ((clnt = host->h_rpcclnt) != NULL) { 341 if (time_after_eq(jiffies, host->h_nextrebind)) { 342 rpc_force_rebind(clnt); 343 host->h_nextrebind = jiffies + NLM_HOST_REBIND; 344 dprintk("lockd: next rebind in %lu jiffies\n", 345 host->h_nextrebind - jiffies); 346 } 347 } else { 348 unsigned long increment = nlmsvc_timeout; 349 struct rpc_timeout timeparms = { 350 .to_initval = increment, 351 .to_increment = increment, 352 .to_maxval = increment * 6UL, 353 .to_retries = 5U, 354 }; 355 struct rpc_create_args args = { 356 .protocol = host->h_proto, 357 .address = nlm_addr(host), 358 .addrsize = host->h_addrlen, 359 .saddress = nlm_srcaddr(host), 360 .timeout = &timeparms, 361 .servername = host->h_name, 362 .program = &nlm_program, 363 .version = host->h_version, 364 .authflavor = RPC_AUTH_UNIX, 365 .flags = (RPC_CLNT_CREATE_NOPING | 366 RPC_CLNT_CREATE_AUTOBIND), 367 }; 368 369 /* 370 * lockd retries server side blocks automatically so we want 371 * those to be soft RPC calls. Client side calls need to be 372 * hard RPC tasks. 373 */ 374 if (!host->h_server) 375 args.flags |= RPC_CLNT_CREATE_HARDRTRY; 376 if (host->h_noresvport) 377 args.flags |= RPC_CLNT_CREATE_NONPRIVPORT; 378 379 clnt = rpc_create(&args); 380 if (!IS_ERR(clnt)) 381 host->h_rpcclnt = clnt; 382 else { 383 printk("lockd: couldn't create RPC handle for %s\n", host->h_name); 384 clnt = NULL; 385 } 386 } 387 388 mutex_unlock(&host->h_mutex); 389 return clnt; 390 } 391 392 /* 393 * Force a portmap lookup of the remote lockd port 394 */ 395 void 396 nlm_rebind_host(struct nlm_host *host) 397 { 398 dprintk("lockd: rebind host %s\n", host->h_name); 399 if (host->h_rpcclnt && time_after_eq(jiffies, host->h_nextrebind)) { 400 rpc_force_rebind(host->h_rpcclnt); 401 host->h_nextrebind = jiffies + NLM_HOST_REBIND; 402 } 403 } 404 405 /* 406 * Increment NLM host count 407 */ 408 struct nlm_host * nlm_get_host(struct nlm_host *host) 409 { 410 if (host) { 411 dprintk("lockd: get host %s\n", host->h_name); 412 atomic_inc(&host->h_count); 413 host->h_expires = jiffies + NLM_HOST_EXPIRE; 414 } 415 return host; 416 } 417 418 /* 419 * Release NLM host after use 420 */ 421 void nlm_release_host(struct nlm_host *host) 422 { 423 if (host != NULL) { 424 dprintk("lockd: release host %s\n", host->h_name); 425 BUG_ON(atomic_read(&host->h_count) < 0); 426 if (atomic_dec_and_test(&host->h_count)) { 427 BUG_ON(!list_empty(&host->h_lockowners)); 428 BUG_ON(!list_empty(&host->h_granted)); 429 BUG_ON(!list_empty(&host->h_reclaim)); 430 } 431 } 432 } 433 434 /** 435 * nlm_host_rebooted - Release all resources held by rebooted host 436 * @info: pointer to decoded results of NLM_SM_NOTIFY call 437 * 438 * We were notified that the specified host has rebooted. Release 439 * all resources held by that peer. 440 */ 441 void nlm_host_rebooted(const struct nlm_reboot *info) 442 { 443 struct hlist_head *chain; 444 struct hlist_node *pos; 445 struct nsm_handle *nsm; 446 struct nlm_host *host; 447 448 nsm = nsm_reboot_lookup(info); 449 if (unlikely(nsm == NULL)) 450 return; 451 452 /* Mark all hosts tied to this NSM state as having rebooted. 453 * We run the loop repeatedly, because we drop the host table 454 * lock for this. 455 * To avoid processing a host several times, we match the nsmstate. 456 */ 457 again: mutex_lock(&nlm_host_mutex); 458 for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) { 459 hlist_for_each_entry(host, pos, chain, h_hash) { 460 if (host->h_nsmhandle == nsm 461 && host->h_nsmstate != info->state) { 462 host->h_nsmstate = info->state; 463 host->h_state++; 464 465 nlm_get_host(host); 466 mutex_unlock(&nlm_host_mutex); 467 468 if (host->h_server) { 469 /* We're server for this guy, just ditch 470 * all the locks he held. */ 471 nlmsvc_free_host_resources(host); 472 } else { 473 /* He's the server, initiate lock recovery. */ 474 nlmclnt_recovery(host); 475 } 476 477 nlm_release_host(host); 478 goto again; 479 } 480 } 481 } 482 483 mutex_unlock(&nlm_host_mutex); 484 } 485 486 /* 487 * Shut down the hosts module. 488 * Note that this routine is called only at server shutdown time. 489 */ 490 void 491 nlm_shutdown_hosts(void) 492 { 493 struct hlist_head *chain; 494 struct hlist_node *pos; 495 struct nlm_host *host; 496 497 dprintk("lockd: shutting down host module\n"); 498 mutex_lock(&nlm_host_mutex); 499 500 /* First, make all hosts eligible for gc */ 501 dprintk("lockd: nuking all hosts...\n"); 502 for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) { 503 hlist_for_each_entry(host, pos, chain, h_hash) { 504 host->h_expires = jiffies - 1; 505 if (host->h_rpcclnt) { 506 rpc_shutdown_client(host->h_rpcclnt); 507 host->h_rpcclnt = NULL; 508 } 509 } 510 } 511 512 /* Then, perform a garbage collection pass */ 513 nlm_gc_hosts(); 514 mutex_unlock(&nlm_host_mutex); 515 516 /* complain if any hosts are left */ 517 if (nrhosts) { 518 printk(KERN_WARNING "lockd: couldn't shutdown host module!\n"); 519 dprintk("lockd: %d hosts left:\n", nrhosts); 520 for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) { 521 hlist_for_each_entry(host, pos, chain, h_hash) { 522 dprintk(" %s (cnt %d use %d exp %ld)\n", 523 host->h_name, atomic_read(&host->h_count), 524 host->h_inuse, host->h_expires); 525 } 526 } 527 } 528 } 529 530 /* 531 * Garbage collect any unused NLM hosts. 532 * This GC combines reference counting for async operations with 533 * mark & sweep for resources held by remote clients. 534 */ 535 static void 536 nlm_gc_hosts(void) 537 { 538 struct hlist_head *chain; 539 struct hlist_node *pos, *next; 540 struct nlm_host *host; 541 542 dprintk("lockd: host garbage collection\n"); 543 for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) { 544 hlist_for_each_entry(host, pos, chain, h_hash) 545 host->h_inuse = 0; 546 } 547 548 /* Mark all hosts that hold locks, blocks or shares */ 549 nlmsvc_mark_resources(); 550 551 for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) { 552 hlist_for_each_entry_safe(host, pos, next, chain, h_hash) { 553 if (atomic_read(&host->h_count) || host->h_inuse 554 || time_before(jiffies, host->h_expires)) { 555 dprintk("nlm_gc_hosts skipping %s (cnt %d use %d exp %ld)\n", 556 host->h_name, atomic_read(&host->h_count), 557 host->h_inuse, host->h_expires); 558 continue; 559 } 560 dprintk("lockd: delete host %s\n", host->h_name); 561 hlist_del_init(&host->h_hash); 562 563 nlm_destroy_host(host); 564 nrhosts--; 565 } 566 } 567 568 next_gc = jiffies + NLM_HOST_COLLECT; 569 } 570