1 /* 2 * linux/fs/lockd/host.c 3 * 4 * Management for NLM peer hosts. The nlm_host struct is shared 5 * between client and server implementation. The only reason to 6 * do so is to reduce code bloat. 7 * 8 * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> 9 */ 10 11 #include <linux/types.h> 12 #include <linux/slab.h> 13 #include <linux/in.h> 14 #include <linux/sunrpc/clnt.h> 15 #include <linux/sunrpc/svc.h> 16 #include <linux/lockd/lockd.h> 17 #include <linux/lockd/sm_inter.h> 18 #include <linux/mutex.h> 19 20 21 #define NLMDBG_FACILITY NLMDBG_HOSTCACHE 22 #define NLM_HOST_MAX 64 23 #define NLM_HOST_NRHASH 32 24 #define NLM_ADDRHASH(addr) (ntohl(addr) & (NLM_HOST_NRHASH-1)) 25 #define NLM_HOST_REBIND (60 * HZ) 26 #define NLM_HOST_EXPIRE ((nrhosts > NLM_HOST_MAX)? 300 * HZ : 120 * HZ) 27 #define NLM_HOST_COLLECT ((nrhosts > NLM_HOST_MAX)? 120 * HZ : 60 * HZ) 28 29 static struct hlist_head nlm_hosts[NLM_HOST_NRHASH]; 30 static unsigned long next_gc; 31 static int nrhosts; 32 static DEFINE_MUTEX(nlm_host_mutex); 33 34 35 static void nlm_gc_hosts(void); 36 static struct nsm_handle * __nsm_find(const struct sockaddr_in *, 37 const char *, unsigned int, int); 38 static struct nsm_handle * nsm_find(const struct sockaddr_in *sin, 39 const char *hostname, 40 unsigned int hostname_len); 41 42 /* 43 * Common host lookup routine for server & client 44 */ 45 static struct nlm_host * 46 nlm_lookup_host(int server, const struct sockaddr_in *sin, 47 int proto, int version, const char *hostname, 48 unsigned int hostname_len, 49 const struct sockaddr_in *ssin) 50 { 51 struct hlist_head *chain; 52 struct hlist_node *pos; 53 struct nlm_host *host; 54 struct nsm_handle *nsm = NULL; 55 int hash; 56 57 dprintk("lockd: nlm_lookup_host("NIPQUAD_FMT"->"NIPQUAD_FMT 58 ", p=%d, v=%d, my role=%s, name=%.*s)\n", 59 NIPQUAD(ssin->sin_addr.s_addr), 60 NIPQUAD(sin->sin_addr.s_addr), proto, version, 61 server? "server" : "client", 62 hostname_len, 63 hostname? hostname : "<none>"); 64 65 66 hash = NLM_ADDRHASH(sin->sin_addr.s_addr); 67 68 /* Lock hash table */ 69 mutex_lock(&nlm_host_mutex); 70 71 if (time_after_eq(jiffies, next_gc)) 72 nlm_gc_hosts(); 73 74 /* We may keep several nlm_host objects for a peer, because each 75 * nlm_host is identified by 76 * (address, protocol, version, server/client) 77 * We could probably simplify this a little by putting all those 78 * different NLM rpc_clients into one single nlm_host object. 79 * This would allow us to have one nlm_host per address. 80 */ 81 chain = &nlm_hosts[hash]; 82 hlist_for_each_entry(host, pos, chain, h_hash) { 83 if (!nlm_cmp_addr(&host->h_addr, sin)) 84 continue; 85 86 /* See if we have an NSM handle for this client */ 87 if (!nsm) 88 nsm = host->h_nsmhandle; 89 90 if (host->h_proto != proto) 91 continue; 92 if (host->h_version != version) 93 continue; 94 if (host->h_server != server) 95 continue; 96 if (!nlm_cmp_addr(&host->h_saddr, ssin)) 97 continue; 98 99 /* Move to head of hash chain. */ 100 hlist_del(&host->h_hash); 101 hlist_add_head(&host->h_hash, chain); 102 103 nlm_get_host(host); 104 goto out; 105 } 106 if (nsm) 107 atomic_inc(&nsm->sm_count); 108 109 host = NULL; 110 111 /* Sadly, the host isn't in our hash table yet. See if 112 * we have an NSM handle for it. If not, create one. 113 */ 114 if (!nsm && !(nsm = nsm_find(sin, hostname, hostname_len))) 115 goto out; 116 117 host = kzalloc(sizeof(*host), GFP_KERNEL); 118 if (!host) { 119 nsm_release(nsm); 120 goto out; 121 } 122 host->h_name = nsm->sm_name; 123 host->h_addr = *sin; 124 host->h_addr.sin_port = 0; /* ouch! */ 125 host->h_saddr = *ssin; 126 host->h_version = version; 127 host->h_proto = proto; 128 host->h_rpcclnt = NULL; 129 mutex_init(&host->h_mutex); 130 host->h_nextrebind = jiffies + NLM_HOST_REBIND; 131 host->h_expires = jiffies + NLM_HOST_EXPIRE; 132 atomic_set(&host->h_count, 1); 133 init_waitqueue_head(&host->h_gracewait); 134 init_rwsem(&host->h_rwsem); 135 host->h_state = 0; /* pseudo NSM state */ 136 host->h_nsmstate = 0; /* real NSM state */ 137 host->h_nsmhandle = nsm; 138 host->h_server = server; 139 hlist_add_head(&host->h_hash, chain); 140 INIT_LIST_HEAD(&host->h_lockowners); 141 spin_lock_init(&host->h_lock); 142 INIT_LIST_HEAD(&host->h_granted); 143 INIT_LIST_HEAD(&host->h_reclaim); 144 145 if (++nrhosts > NLM_HOST_MAX) 146 next_gc = 0; 147 148 out: 149 mutex_unlock(&nlm_host_mutex); 150 return host; 151 } 152 153 /* 154 * Destroy a host 155 */ 156 static void 157 nlm_destroy_host(struct nlm_host *host) 158 { 159 struct rpc_clnt *clnt; 160 161 BUG_ON(!list_empty(&host->h_lockowners)); 162 BUG_ON(atomic_read(&host->h_count)); 163 164 /* 165 * Release NSM handle and unmonitor host. 166 */ 167 nsm_unmonitor(host); 168 169 clnt = host->h_rpcclnt; 170 if (clnt != NULL) 171 rpc_shutdown_client(clnt); 172 kfree(host); 173 } 174 175 /* 176 * Find an NLM server handle in the cache. If there is none, create it. 177 */ 178 struct nlm_host * 179 nlmclnt_lookup_host(const struct sockaddr_in *sin, int proto, int version, 180 const char *hostname, unsigned int hostname_len) 181 { 182 struct sockaddr_in ssin = {0}; 183 184 return nlm_lookup_host(0, sin, proto, version, 185 hostname, hostname_len, &ssin); 186 } 187 188 /* 189 * Find an NLM client handle in the cache. If there is none, create it. 190 */ 191 struct nlm_host * 192 nlmsvc_lookup_host(struct svc_rqst *rqstp, 193 const char *hostname, unsigned int hostname_len) 194 { 195 struct sockaddr_in ssin = {0}; 196 197 ssin.sin_addr = rqstp->rq_daddr.addr; 198 return nlm_lookup_host(1, svc_addr_in(rqstp), 199 rqstp->rq_prot, rqstp->rq_vers, 200 hostname, hostname_len, &ssin); 201 } 202 203 /* 204 * Create the NLM RPC client for an NLM peer 205 */ 206 struct rpc_clnt * 207 nlm_bind_host(struct nlm_host *host) 208 { 209 struct rpc_clnt *clnt; 210 211 dprintk("lockd: nlm_bind_host("NIPQUAD_FMT"->"NIPQUAD_FMT")\n", 212 NIPQUAD(host->h_saddr.sin_addr), 213 NIPQUAD(host->h_addr.sin_addr)); 214 215 /* Lock host handle */ 216 mutex_lock(&host->h_mutex); 217 218 /* If we've already created an RPC client, check whether 219 * RPC rebind is required 220 */ 221 if ((clnt = host->h_rpcclnt) != NULL) { 222 if (time_after_eq(jiffies, host->h_nextrebind)) { 223 rpc_force_rebind(clnt); 224 host->h_nextrebind = jiffies + NLM_HOST_REBIND; 225 dprintk("lockd: next rebind in %ld jiffies\n", 226 host->h_nextrebind - jiffies); 227 } 228 } else { 229 unsigned long increment = nlmsvc_timeout; 230 struct rpc_timeout timeparms = { 231 .to_initval = increment, 232 .to_increment = increment, 233 .to_maxval = increment * 6UL, 234 .to_retries = 5U, 235 }; 236 struct rpc_create_args args = { 237 .protocol = host->h_proto, 238 .address = (struct sockaddr *)&host->h_addr, 239 .addrsize = sizeof(host->h_addr), 240 .saddress = (struct sockaddr *)&host->h_saddr, 241 .timeout = &timeparms, 242 .servername = host->h_name, 243 .program = &nlm_program, 244 .version = host->h_version, 245 .authflavor = RPC_AUTH_UNIX, 246 .flags = (RPC_CLNT_CREATE_HARDRTRY | 247 RPC_CLNT_CREATE_AUTOBIND), 248 }; 249 250 clnt = rpc_create(&args); 251 if (!IS_ERR(clnt)) 252 host->h_rpcclnt = clnt; 253 else { 254 printk("lockd: couldn't create RPC handle for %s\n", host->h_name); 255 clnt = NULL; 256 } 257 } 258 259 mutex_unlock(&host->h_mutex); 260 return clnt; 261 } 262 263 /* 264 * Force a portmap lookup of the remote lockd port 265 */ 266 void 267 nlm_rebind_host(struct nlm_host *host) 268 { 269 dprintk("lockd: rebind host %s\n", host->h_name); 270 if (host->h_rpcclnt && time_after_eq(jiffies, host->h_nextrebind)) { 271 rpc_force_rebind(host->h_rpcclnt); 272 host->h_nextrebind = jiffies + NLM_HOST_REBIND; 273 } 274 } 275 276 /* 277 * Increment NLM host count 278 */ 279 struct nlm_host * nlm_get_host(struct nlm_host *host) 280 { 281 if (host) { 282 dprintk("lockd: get host %s\n", host->h_name); 283 atomic_inc(&host->h_count); 284 host->h_expires = jiffies + NLM_HOST_EXPIRE; 285 } 286 return host; 287 } 288 289 /* 290 * Release NLM host after use 291 */ 292 void nlm_release_host(struct nlm_host *host) 293 { 294 if (host != NULL) { 295 dprintk("lockd: release host %s\n", host->h_name); 296 BUG_ON(atomic_read(&host->h_count) < 0); 297 if (atomic_dec_and_test(&host->h_count)) { 298 BUG_ON(!list_empty(&host->h_lockowners)); 299 BUG_ON(!list_empty(&host->h_granted)); 300 BUG_ON(!list_empty(&host->h_reclaim)); 301 } 302 } 303 } 304 305 /* 306 * We were notified that the host indicated by address &sin 307 * has rebooted. 308 * Release all resources held by that peer. 309 */ 310 void nlm_host_rebooted(const struct sockaddr_in *sin, 311 const char *hostname, 312 unsigned int hostname_len, 313 u32 new_state) 314 { 315 struct hlist_head *chain; 316 struct hlist_node *pos; 317 struct nsm_handle *nsm; 318 struct nlm_host *host; 319 320 dprintk("lockd: nlm_host_rebooted(%s, %u.%u.%u.%u)\n", 321 hostname, NIPQUAD(sin->sin_addr)); 322 323 /* Find the NSM handle for this peer */ 324 if (!(nsm = __nsm_find(sin, hostname, hostname_len, 0))) 325 return; 326 327 /* When reclaiming locks on this peer, make sure that 328 * we set up a new notification */ 329 nsm->sm_monitored = 0; 330 331 /* Mark all hosts tied to this NSM state as having rebooted. 332 * We run the loop repeatedly, because we drop the host table 333 * lock for this. 334 * To avoid processing a host several times, we match the nsmstate. 335 */ 336 again: mutex_lock(&nlm_host_mutex); 337 for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) { 338 hlist_for_each_entry(host, pos, chain, h_hash) { 339 if (host->h_nsmhandle == nsm 340 && host->h_nsmstate != new_state) { 341 host->h_nsmstate = new_state; 342 host->h_state++; 343 344 nlm_get_host(host); 345 mutex_unlock(&nlm_host_mutex); 346 347 if (host->h_server) { 348 /* We're server for this guy, just ditch 349 * all the locks he held. */ 350 nlmsvc_free_host_resources(host); 351 } else { 352 /* He's the server, initiate lock recovery. */ 353 nlmclnt_recovery(host); 354 } 355 356 nlm_release_host(host); 357 goto again; 358 } 359 } 360 } 361 362 mutex_unlock(&nlm_host_mutex); 363 } 364 365 /* 366 * Shut down the hosts module. 367 * Note that this routine is called only at server shutdown time. 368 */ 369 void 370 nlm_shutdown_hosts(void) 371 { 372 struct hlist_head *chain; 373 struct hlist_node *pos; 374 struct nlm_host *host; 375 376 dprintk("lockd: shutting down host module\n"); 377 mutex_lock(&nlm_host_mutex); 378 379 /* First, make all hosts eligible for gc */ 380 dprintk("lockd: nuking all hosts...\n"); 381 for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) { 382 hlist_for_each_entry(host, pos, chain, h_hash) { 383 host->h_expires = jiffies - 1; 384 if (host->h_rpcclnt) { 385 rpc_shutdown_client(host->h_rpcclnt); 386 host->h_rpcclnt = NULL; 387 } 388 } 389 } 390 391 /* Then, perform a garbage collection pass */ 392 nlm_gc_hosts(); 393 mutex_unlock(&nlm_host_mutex); 394 395 /* complain if any hosts are left */ 396 if (nrhosts) { 397 printk(KERN_WARNING "lockd: couldn't shutdown host module!\n"); 398 dprintk("lockd: %d hosts left:\n", nrhosts); 399 for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) { 400 hlist_for_each_entry(host, pos, chain, h_hash) { 401 dprintk(" %s (cnt %d use %d exp %ld)\n", 402 host->h_name, atomic_read(&host->h_count), 403 host->h_inuse, host->h_expires); 404 } 405 } 406 } 407 } 408 409 /* 410 * Garbage collect any unused NLM hosts. 411 * This GC combines reference counting for async operations with 412 * mark & sweep for resources held by remote clients. 413 */ 414 static void 415 nlm_gc_hosts(void) 416 { 417 struct hlist_head *chain; 418 struct hlist_node *pos, *next; 419 struct nlm_host *host; 420 421 dprintk("lockd: host garbage collection\n"); 422 for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) { 423 hlist_for_each_entry(host, pos, chain, h_hash) 424 host->h_inuse = 0; 425 } 426 427 /* Mark all hosts that hold locks, blocks or shares */ 428 nlmsvc_mark_resources(); 429 430 for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) { 431 hlist_for_each_entry_safe(host, pos, next, chain, h_hash) { 432 if (atomic_read(&host->h_count) || host->h_inuse 433 || time_before(jiffies, host->h_expires)) { 434 dprintk("nlm_gc_hosts skipping %s (cnt %d use %d exp %ld)\n", 435 host->h_name, atomic_read(&host->h_count), 436 host->h_inuse, host->h_expires); 437 continue; 438 } 439 dprintk("lockd: delete host %s\n", host->h_name); 440 hlist_del_init(&host->h_hash); 441 442 nlm_destroy_host(host); 443 nrhosts--; 444 } 445 } 446 447 next_gc = jiffies + NLM_HOST_COLLECT; 448 } 449 450 451 /* 452 * Manage NSM handles 453 */ 454 static LIST_HEAD(nsm_handles); 455 static DEFINE_MUTEX(nsm_mutex); 456 457 static struct nsm_handle * 458 __nsm_find(const struct sockaddr_in *sin, 459 const char *hostname, unsigned int hostname_len, 460 int create) 461 { 462 struct nsm_handle *nsm = NULL; 463 struct list_head *pos; 464 465 if (!sin) 466 return NULL; 467 468 if (hostname && memchr(hostname, '/', hostname_len) != NULL) { 469 if (printk_ratelimit()) { 470 printk(KERN_WARNING "Invalid hostname \"%.*s\" " 471 "in NFS lock request\n", 472 hostname_len, hostname); 473 } 474 return NULL; 475 } 476 477 mutex_lock(&nsm_mutex); 478 list_for_each(pos, &nsm_handles) { 479 nsm = list_entry(pos, struct nsm_handle, sm_link); 480 481 if (hostname && nsm_use_hostnames) { 482 if (strlen(nsm->sm_name) != hostname_len 483 || memcmp(nsm->sm_name, hostname, hostname_len)) 484 continue; 485 } else if (!nlm_cmp_addr(&nsm->sm_addr, sin)) 486 continue; 487 atomic_inc(&nsm->sm_count); 488 goto out; 489 } 490 491 if (!create) { 492 nsm = NULL; 493 goto out; 494 } 495 496 nsm = kzalloc(sizeof(*nsm) + hostname_len + 1, GFP_KERNEL); 497 if (nsm != NULL) { 498 nsm->sm_addr = *sin; 499 nsm->sm_name = (char *) (nsm + 1); 500 memcpy(nsm->sm_name, hostname, hostname_len); 501 nsm->sm_name[hostname_len] = '\0'; 502 atomic_set(&nsm->sm_count, 1); 503 504 list_add(&nsm->sm_link, &nsm_handles); 505 } 506 507 out: 508 mutex_unlock(&nsm_mutex); 509 return nsm; 510 } 511 512 static struct nsm_handle * 513 nsm_find(const struct sockaddr_in *sin, const char *hostname, 514 unsigned int hostname_len) 515 { 516 return __nsm_find(sin, hostname, hostname_len, 1); 517 } 518 519 /* 520 * Release an NSM handle 521 */ 522 void 523 nsm_release(struct nsm_handle *nsm) 524 { 525 if (!nsm) 526 return; 527 if (atomic_dec_and_test(&nsm->sm_count)) { 528 mutex_lock(&nsm_mutex); 529 if (atomic_read(&nsm->sm_count) == 0) { 530 list_del(&nsm->sm_link); 531 kfree(nsm); 532 } 533 mutex_unlock(&nsm_mutex); 534 } 535 } 536