xref: /openbmc/linux/fs/lockd/host.c (revision f42b3800)
1 /*
2  * linux/fs/lockd/host.c
3  *
4  * Management for NLM peer hosts. The nlm_host struct is shared
5  * between client and server implementation. The only reason to
6  * do so is to reduce code bloat.
7  *
8  * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
9  */
10 
11 #include <linux/types.h>
12 #include <linux/slab.h>
13 #include <linux/in.h>
14 #include <linux/sunrpc/clnt.h>
15 #include <linux/sunrpc/svc.h>
16 #include <linux/lockd/lockd.h>
17 #include <linux/lockd/sm_inter.h>
18 #include <linux/mutex.h>
19 
20 
21 #define NLMDBG_FACILITY		NLMDBG_HOSTCACHE
22 #define NLM_HOST_MAX		64
23 #define NLM_HOST_NRHASH		32
24 #define NLM_ADDRHASH(addr)	(ntohl(addr) & (NLM_HOST_NRHASH-1))
25 #define NLM_HOST_REBIND		(60 * HZ)
26 #define NLM_HOST_EXPIRE		((nrhosts > NLM_HOST_MAX)? 300 * HZ : 120 * HZ)
27 #define NLM_HOST_COLLECT	((nrhosts > NLM_HOST_MAX)? 120 * HZ :  60 * HZ)
28 
29 static struct hlist_head	nlm_hosts[NLM_HOST_NRHASH];
30 static unsigned long		next_gc;
31 static int			nrhosts;
32 static DEFINE_MUTEX(nlm_host_mutex);
33 
34 
35 static void			nlm_gc_hosts(void);
36 static struct nsm_handle *	__nsm_find(const struct sockaddr_in *,
37 					const char *, unsigned int, int);
38 static struct nsm_handle *	nsm_find(const struct sockaddr_in *sin,
39 					 const char *hostname,
40 					 unsigned int hostname_len);
41 
42 /*
43  * Common host lookup routine for server & client
44  */
45 static struct nlm_host *
46 nlm_lookup_host(int server, const struct sockaddr_in *sin,
47 		int proto, int version, const char *hostname,
48 		unsigned int hostname_len,
49 		const struct sockaddr_in *ssin)
50 {
51 	struct hlist_head *chain;
52 	struct hlist_node *pos;
53 	struct nlm_host	*host;
54 	struct nsm_handle *nsm = NULL;
55 	int		hash;
56 
57 	dprintk("lockd: nlm_lookup_host("NIPQUAD_FMT"->"NIPQUAD_FMT
58 			", p=%d, v=%d, my role=%s, name=%.*s)\n",
59 			NIPQUAD(ssin->sin_addr.s_addr),
60 			NIPQUAD(sin->sin_addr.s_addr), proto, version,
61 			server? "server" : "client",
62 			hostname_len,
63 			hostname? hostname : "<none>");
64 
65 
66 	hash = NLM_ADDRHASH(sin->sin_addr.s_addr);
67 
68 	/* Lock hash table */
69 	mutex_lock(&nlm_host_mutex);
70 
71 	if (time_after_eq(jiffies, next_gc))
72 		nlm_gc_hosts();
73 
74 	/* We may keep several nlm_host objects for a peer, because each
75 	 * nlm_host is identified by
76 	 * (address, protocol, version, server/client)
77 	 * We could probably simplify this a little by putting all those
78 	 * different NLM rpc_clients into one single nlm_host object.
79 	 * This would allow us to have one nlm_host per address.
80 	 */
81 	chain = &nlm_hosts[hash];
82 	hlist_for_each_entry(host, pos, chain, h_hash) {
83 		if (!nlm_cmp_addr(&host->h_addr, sin))
84 			continue;
85 
86 		/* See if we have an NSM handle for this client */
87 		if (!nsm)
88 			nsm = host->h_nsmhandle;
89 
90 		if (host->h_proto != proto)
91 			continue;
92 		if (host->h_version != version)
93 			continue;
94 		if (host->h_server != server)
95 			continue;
96 		if (!nlm_cmp_addr(&host->h_saddr, ssin))
97 			continue;
98 
99 		/* Move to head of hash chain. */
100 		hlist_del(&host->h_hash);
101 		hlist_add_head(&host->h_hash, chain);
102 
103 		nlm_get_host(host);
104 		goto out;
105 	}
106 	if (nsm)
107 		atomic_inc(&nsm->sm_count);
108 
109 	host = NULL;
110 
111 	/* Sadly, the host isn't in our hash table yet. See if
112 	 * we have an NSM handle for it. If not, create one.
113 	 */
114 	if (!nsm && !(nsm = nsm_find(sin, hostname, hostname_len)))
115 		goto out;
116 
117 	host = kzalloc(sizeof(*host), GFP_KERNEL);
118 	if (!host) {
119 		nsm_release(nsm);
120 		goto out;
121 	}
122 	host->h_name	   = nsm->sm_name;
123 	host->h_addr       = *sin;
124 	host->h_addr.sin_port = 0;	/* ouch! */
125 	host->h_saddr	   = *ssin;
126 	host->h_version    = version;
127 	host->h_proto      = proto;
128 	host->h_rpcclnt    = NULL;
129 	mutex_init(&host->h_mutex);
130 	host->h_nextrebind = jiffies + NLM_HOST_REBIND;
131 	host->h_expires    = jiffies + NLM_HOST_EXPIRE;
132 	atomic_set(&host->h_count, 1);
133 	init_waitqueue_head(&host->h_gracewait);
134 	init_rwsem(&host->h_rwsem);
135 	host->h_state      = 0;			/* pseudo NSM state */
136 	host->h_nsmstate   = 0;			/* real NSM state */
137 	host->h_nsmhandle  = nsm;
138 	host->h_server	   = server;
139 	hlist_add_head(&host->h_hash, chain);
140 	INIT_LIST_HEAD(&host->h_lockowners);
141 	spin_lock_init(&host->h_lock);
142 	INIT_LIST_HEAD(&host->h_granted);
143 	INIT_LIST_HEAD(&host->h_reclaim);
144 
145 	if (++nrhosts > NLM_HOST_MAX)
146 		next_gc = 0;
147 
148 out:
149 	mutex_unlock(&nlm_host_mutex);
150 	return host;
151 }
152 
153 /*
154  * Destroy a host
155  */
156 static void
157 nlm_destroy_host(struct nlm_host *host)
158 {
159 	struct rpc_clnt	*clnt;
160 
161 	BUG_ON(!list_empty(&host->h_lockowners));
162 	BUG_ON(atomic_read(&host->h_count));
163 
164 	/*
165 	 * Release NSM handle and unmonitor host.
166 	 */
167 	nsm_unmonitor(host);
168 
169 	clnt = host->h_rpcclnt;
170 	if (clnt != NULL)
171 		rpc_shutdown_client(clnt);
172 	kfree(host);
173 }
174 
175 /*
176  * Find an NLM server handle in the cache. If there is none, create it.
177  */
178 struct nlm_host *
179 nlmclnt_lookup_host(const struct sockaddr_in *sin, int proto, int version,
180 			const char *hostname, unsigned int hostname_len)
181 {
182 	struct sockaddr_in ssin = {0};
183 
184 	return nlm_lookup_host(0, sin, proto, version,
185 			       hostname, hostname_len, &ssin);
186 }
187 
188 /*
189  * Find an NLM client handle in the cache. If there is none, create it.
190  */
191 struct nlm_host *
192 nlmsvc_lookup_host(struct svc_rqst *rqstp,
193 			const char *hostname, unsigned int hostname_len)
194 {
195 	struct sockaddr_in ssin = {0};
196 
197 	ssin.sin_addr = rqstp->rq_daddr.addr;
198 	return nlm_lookup_host(1, svc_addr_in(rqstp),
199 			       rqstp->rq_prot, rqstp->rq_vers,
200 			       hostname, hostname_len, &ssin);
201 }
202 
203 /*
204  * Create the NLM RPC client for an NLM peer
205  */
206 struct rpc_clnt *
207 nlm_bind_host(struct nlm_host *host)
208 {
209 	struct rpc_clnt	*clnt;
210 
211 	dprintk("lockd: nlm_bind_host("NIPQUAD_FMT"->"NIPQUAD_FMT")\n",
212 			NIPQUAD(host->h_saddr.sin_addr),
213 			NIPQUAD(host->h_addr.sin_addr));
214 
215 	/* Lock host handle */
216 	mutex_lock(&host->h_mutex);
217 
218 	/* If we've already created an RPC client, check whether
219 	 * RPC rebind is required
220 	 */
221 	if ((clnt = host->h_rpcclnt) != NULL) {
222 		if (time_after_eq(jiffies, host->h_nextrebind)) {
223 			rpc_force_rebind(clnt);
224 			host->h_nextrebind = jiffies + NLM_HOST_REBIND;
225 			dprintk("lockd: next rebind in %ld jiffies\n",
226 					host->h_nextrebind - jiffies);
227 		}
228 	} else {
229 		unsigned long increment = nlmsvc_timeout;
230 		struct rpc_timeout timeparms = {
231 			.to_initval	= increment,
232 			.to_increment	= increment,
233 			.to_maxval	= increment * 6UL,
234 			.to_retries	= 5U,
235 		};
236 		struct rpc_create_args args = {
237 			.protocol	= host->h_proto,
238 			.address	= (struct sockaddr *)&host->h_addr,
239 			.addrsize	= sizeof(host->h_addr),
240 			.saddress	= (struct sockaddr *)&host->h_saddr,
241 			.timeout	= &timeparms,
242 			.servername	= host->h_name,
243 			.program	= &nlm_program,
244 			.version	= host->h_version,
245 			.authflavor	= RPC_AUTH_UNIX,
246 			.flags		= (RPC_CLNT_CREATE_NOPING |
247 					   RPC_CLNT_CREATE_AUTOBIND),
248 		};
249 
250 		/*
251 		 * lockd retries server side blocks automatically so we want
252 		 * those to be soft RPC calls. Client side calls need to be
253 		 * hard RPC tasks.
254 		 */
255 		if (!host->h_server)
256 			args.flags |= RPC_CLNT_CREATE_HARDRTRY;
257 
258 		clnt = rpc_create(&args);
259 		if (!IS_ERR(clnt))
260 			host->h_rpcclnt = clnt;
261 		else {
262 			printk("lockd: couldn't create RPC handle for %s\n", host->h_name);
263 			clnt = NULL;
264 		}
265 	}
266 
267 	mutex_unlock(&host->h_mutex);
268 	return clnt;
269 }
270 
271 /*
272  * Force a portmap lookup of the remote lockd port
273  */
274 void
275 nlm_rebind_host(struct nlm_host *host)
276 {
277 	dprintk("lockd: rebind host %s\n", host->h_name);
278 	if (host->h_rpcclnt && time_after_eq(jiffies, host->h_nextrebind)) {
279 		rpc_force_rebind(host->h_rpcclnt);
280 		host->h_nextrebind = jiffies + NLM_HOST_REBIND;
281 	}
282 }
283 
284 /*
285  * Increment NLM host count
286  */
287 struct nlm_host * nlm_get_host(struct nlm_host *host)
288 {
289 	if (host) {
290 		dprintk("lockd: get host %s\n", host->h_name);
291 		atomic_inc(&host->h_count);
292 		host->h_expires = jiffies + NLM_HOST_EXPIRE;
293 	}
294 	return host;
295 }
296 
297 /*
298  * Release NLM host after use
299  */
300 void nlm_release_host(struct nlm_host *host)
301 {
302 	if (host != NULL) {
303 		dprintk("lockd: release host %s\n", host->h_name);
304 		BUG_ON(atomic_read(&host->h_count) < 0);
305 		if (atomic_dec_and_test(&host->h_count)) {
306 			BUG_ON(!list_empty(&host->h_lockowners));
307 			BUG_ON(!list_empty(&host->h_granted));
308 			BUG_ON(!list_empty(&host->h_reclaim));
309 		}
310 	}
311 }
312 
313 /*
314  * We were notified that the host indicated by address &sin
315  * has rebooted.
316  * Release all resources held by that peer.
317  */
318 void nlm_host_rebooted(const struct sockaddr_in *sin,
319 				const char *hostname,
320 				unsigned int hostname_len,
321 				u32 new_state)
322 {
323 	struct hlist_head *chain;
324 	struct hlist_node *pos;
325 	struct nsm_handle *nsm;
326 	struct nlm_host	*host;
327 
328 	dprintk("lockd: nlm_host_rebooted(%s, %u.%u.%u.%u)\n",
329 			hostname, NIPQUAD(sin->sin_addr));
330 
331 	/* Find the NSM handle for this peer */
332 	if (!(nsm = __nsm_find(sin, hostname, hostname_len, 0)))
333 		return;
334 
335 	/* When reclaiming locks on this peer, make sure that
336 	 * we set up a new notification */
337 	nsm->sm_monitored = 0;
338 
339 	/* Mark all hosts tied to this NSM state as having rebooted.
340 	 * We run the loop repeatedly, because we drop the host table
341 	 * lock for this.
342 	 * To avoid processing a host several times, we match the nsmstate.
343 	 */
344 again:	mutex_lock(&nlm_host_mutex);
345 	for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) {
346 		hlist_for_each_entry(host, pos, chain, h_hash) {
347 			if (host->h_nsmhandle == nsm
348 			 && host->h_nsmstate != new_state) {
349 				host->h_nsmstate = new_state;
350 				host->h_state++;
351 
352 				nlm_get_host(host);
353 				mutex_unlock(&nlm_host_mutex);
354 
355 				if (host->h_server) {
356 					/* We're server for this guy, just ditch
357 					 * all the locks he held. */
358 					nlmsvc_free_host_resources(host);
359 				} else {
360 					/* He's the server, initiate lock recovery. */
361 					nlmclnt_recovery(host);
362 				}
363 
364 				nlm_release_host(host);
365 				goto again;
366 			}
367 		}
368 	}
369 
370 	mutex_unlock(&nlm_host_mutex);
371 }
372 
373 /*
374  * Shut down the hosts module.
375  * Note that this routine is called only at server shutdown time.
376  */
377 void
378 nlm_shutdown_hosts(void)
379 {
380 	struct hlist_head *chain;
381 	struct hlist_node *pos;
382 	struct nlm_host	*host;
383 
384 	dprintk("lockd: shutting down host module\n");
385 	mutex_lock(&nlm_host_mutex);
386 
387 	/* First, make all hosts eligible for gc */
388 	dprintk("lockd: nuking all hosts...\n");
389 	for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) {
390 		hlist_for_each_entry(host, pos, chain, h_hash) {
391 			host->h_expires = jiffies - 1;
392 			if (host->h_rpcclnt) {
393 				rpc_shutdown_client(host->h_rpcclnt);
394 				host->h_rpcclnt = NULL;
395 			}
396 		}
397 	}
398 
399 	/* Then, perform a garbage collection pass */
400 	nlm_gc_hosts();
401 	mutex_unlock(&nlm_host_mutex);
402 
403 	/* complain if any hosts are left */
404 	if (nrhosts) {
405 		printk(KERN_WARNING "lockd: couldn't shutdown host module!\n");
406 		dprintk("lockd: %d hosts left:\n", nrhosts);
407 		for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) {
408 			hlist_for_each_entry(host, pos, chain, h_hash) {
409 				dprintk("       %s (cnt %d use %d exp %ld)\n",
410 					host->h_name, atomic_read(&host->h_count),
411 					host->h_inuse, host->h_expires);
412 			}
413 		}
414 	}
415 }
416 
417 /*
418  * Garbage collect any unused NLM hosts.
419  * This GC combines reference counting for async operations with
420  * mark & sweep for resources held by remote clients.
421  */
422 static void
423 nlm_gc_hosts(void)
424 {
425 	struct hlist_head *chain;
426 	struct hlist_node *pos, *next;
427 	struct nlm_host	*host;
428 
429 	dprintk("lockd: host garbage collection\n");
430 	for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) {
431 		hlist_for_each_entry(host, pos, chain, h_hash)
432 			host->h_inuse = 0;
433 	}
434 
435 	/* Mark all hosts that hold locks, blocks or shares */
436 	nlmsvc_mark_resources();
437 
438 	for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) {
439 		hlist_for_each_entry_safe(host, pos, next, chain, h_hash) {
440 			if (atomic_read(&host->h_count) || host->h_inuse
441 			 || time_before(jiffies, host->h_expires)) {
442 				dprintk("nlm_gc_hosts skipping %s (cnt %d use %d exp %ld)\n",
443 					host->h_name, atomic_read(&host->h_count),
444 					host->h_inuse, host->h_expires);
445 				continue;
446 			}
447 			dprintk("lockd: delete host %s\n", host->h_name);
448 			hlist_del_init(&host->h_hash);
449 
450 			nlm_destroy_host(host);
451 			nrhosts--;
452 		}
453 	}
454 
455 	next_gc = jiffies + NLM_HOST_COLLECT;
456 }
457 
458 
459 /*
460  * Manage NSM handles
461  */
462 static LIST_HEAD(nsm_handles);
463 static DEFINE_MUTEX(nsm_mutex);
464 
465 static struct nsm_handle *
466 __nsm_find(const struct sockaddr_in *sin,
467 		const char *hostname, unsigned int hostname_len,
468 		int create)
469 {
470 	struct nsm_handle *nsm = NULL;
471 	struct list_head *pos;
472 
473 	if (!sin)
474 		return NULL;
475 
476 	if (hostname && memchr(hostname, '/', hostname_len) != NULL) {
477 		if (printk_ratelimit()) {
478 			printk(KERN_WARNING "Invalid hostname \"%.*s\" "
479 					    "in NFS lock request\n",
480 				hostname_len, hostname);
481 		}
482 		return NULL;
483 	}
484 
485 	mutex_lock(&nsm_mutex);
486 	list_for_each(pos, &nsm_handles) {
487 		nsm = list_entry(pos, struct nsm_handle, sm_link);
488 
489 		if (hostname && nsm_use_hostnames) {
490 			if (strlen(nsm->sm_name) != hostname_len
491 			 || memcmp(nsm->sm_name, hostname, hostname_len))
492 				continue;
493 		} else if (!nlm_cmp_addr(&nsm->sm_addr, sin))
494 			continue;
495 		atomic_inc(&nsm->sm_count);
496 		goto out;
497 	}
498 
499 	if (!create) {
500 		nsm = NULL;
501 		goto out;
502 	}
503 
504 	nsm = kzalloc(sizeof(*nsm) + hostname_len + 1, GFP_KERNEL);
505 	if (nsm != NULL) {
506 		nsm->sm_addr = *sin;
507 		nsm->sm_name = (char *) (nsm + 1);
508 		memcpy(nsm->sm_name, hostname, hostname_len);
509 		nsm->sm_name[hostname_len] = '\0';
510 		atomic_set(&nsm->sm_count, 1);
511 
512 		list_add(&nsm->sm_link, &nsm_handles);
513 	}
514 
515 out:
516 	mutex_unlock(&nsm_mutex);
517 	return nsm;
518 }
519 
520 static struct nsm_handle *
521 nsm_find(const struct sockaddr_in *sin, const char *hostname,
522 	 unsigned int hostname_len)
523 {
524 	return __nsm_find(sin, hostname, hostname_len, 1);
525 }
526 
527 /*
528  * Release an NSM handle
529  */
530 void
531 nsm_release(struct nsm_handle *nsm)
532 {
533 	if (!nsm)
534 		return;
535 	if (atomic_dec_and_test(&nsm->sm_count)) {
536 		mutex_lock(&nsm_mutex);
537 		if (atomic_read(&nsm->sm_count) == 0) {
538 			list_del(&nsm->sm_link);
539 			kfree(nsm);
540 		}
541 		mutex_unlock(&nsm_mutex);
542 	}
543 }
544