xref: /openbmc/linux/fs/lockd/clntlock.c (revision 95e9fd10)
1 /*
2  * linux/fs/lockd/clntlock.c
3  *
4  * Lock handling for the client side NLM implementation
5  *
6  * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
7  */
8 
9 #include <linux/module.h>
10 #include <linux/types.h>
11 #include <linux/slab.h>
12 #include <linux/time.h>
13 #include <linux/nfs_fs.h>
14 #include <linux/sunrpc/clnt.h>
15 #include <linux/sunrpc/svc.h>
16 #include <linux/lockd/lockd.h>
17 #include <linux/kthread.h>
18 
19 #define NLMDBG_FACILITY		NLMDBG_CLIENT
20 
21 /*
22  * Local function prototypes
23  */
24 static int			reclaimer(void *ptr);
25 
26 /*
27  * The following functions handle blocking and granting from the
28  * client perspective.
29  */
30 
31 /*
32  * This is the representation of a blocked client lock.
33  */
34 struct nlm_wait {
35 	struct list_head	b_list;		/* linked list */
36 	wait_queue_head_t	b_wait;		/* where to wait on */
37 	struct nlm_host *	b_host;
38 	struct file_lock *	b_lock;		/* local file lock */
39 	unsigned short		b_reclaim;	/* got to reclaim lock */
40 	__be32			b_status;	/* grant callback status */
41 };
42 
43 static LIST_HEAD(nlm_blocked);
44 static DEFINE_SPINLOCK(nlm_blocked_lock);
45 
46 /**
47  * nlmclnt_init - Set up per-NFS mount point lockd data structures
48  * @nlm_init: pointer to arguments structure
49  *
50  * Returns pointer to an appropriate nlm_host struct,
51  * or an ERR_PTR value.
52  */
53 struct nlm_host *nlmclnt_init(const struct nlmclnt_initdata *nlm_init)
54 {
55 	struct nlm_host *host;
56 	u32 nlm_version = (nlm_init->nfs_version == 2) ? 1 : 4;
57 	int status;
58 
59 	status = lockd_up(nlm_init->net);
60 	if (status < 0)
61 		return ERR_PTR(status);
62 
63 	host = nlmclnt_lookup_host(nlm_init->address, nlm_init->addrlen,
64 				   nlm_init->protocol, nlm_version,
65 				   nlm_init->hostname, nlm_init->noresvport,
66 				   nlm_init->net);
67 	if (host == NULL) {
68 		lockd_down(nlm_init->net);
69 		return ERR_PTR(-ENOLCK);
70 	}
71 
72 	return host;
73 }
74 EXPORT_SYMBOL_GPL(nlmclnt_init);
75 
76 /**
77  * nlmclnt_done - Release resources allocated by nlmclnt_init()
78  * @host: nlm_host structure reserved by nlmclnt_init()
79  *
80  */
81 void nlmclnt_done(struct nlm_host *host)
82 {
83 	struct net *net = host->net;
84 
85 	nlmclnt_release_host(host);
86 	lockd_down(net);
87 }
88 EXPORT_SYMBOL_GPL(nlmclnt_done);
89 
90 /*
91  * Queue up a lock for blocking so that the GRANTED request can see it
92  */
93 struct nlm_wait *nlmclnt_prepare_block(struct nlm_host *host, struct file_lock *fl)
94 {
95 	struct nlm_wait *block;
96 
97 	block = kmalloc(sizeof(*block), GFP_KERNEL);
98 	if (block != NULL) {
99 		block->b_host = host;
100 		block->b_lock = fl;
101 		init_waitqueue_head(&block->b_wait);
102 		block->b_status = nlm_lck_blocked;
103 
104 		spin_lock(&nlm_blocked_lock);
105 		list_add(&block->b_list, &nlm_blocked);
106 		spin_unlock(&nlm_blocked_lock);
107 	}
108 	return block;
109 }
110 
111 void nlmclnt_finish_block(struct nlm_wait *block)
112 {
113 	if (block == NULL)
114 		return;
115 	spin_lock(&nlm_blocked_lock);
116 	list_del(&block->b_list);
117 	spin_unlock(&nlm_blocked_lock);
118 	kfree(block);
119 }
120 
121 /*
122  * Block on a lock
123  */
124 int nlmclnt_block(struct nlm_wait *block, struct nlm_rqst *req, long timeout)
125 {
126 	long ret;
127 
128 	/* A borken server might ask us to block even if we didn't
129 	 * request it. Just say no!
130 	 */
131 	if (block == NULL)
132 		return -EAGAIN;
133 
134 	/* Go to sleep waiting for GRANT callback. Some servers seem
135 	 * to lose callbacks, however, so we're going to poll from
136 	 * time to time just to make sure.
137 	 *
138 	 * For now, the retry frequency is pretty high; normally
139 	 * a 1 minute timeout would do. See the comment before
140 	 * nlmclnt_lock for an explanation.
141 	 */
142 	ret = wait_event_interruptible_timeout(block->b_wait,
143 			block->b_status != nlm_lck_blocked,
144 			timeout);
145 	if (ret < 0)
146 		return -ERESTARTSYS;
147 	req->a_res.status = block->b_status;
148 	return 0;
149 }
150 
151 /*
152  * The server lockd has called us back to tell us the lock was granted
153  */
154 __be32 nlmclnt_grant(const struct sockaddr *addr, const struct nlm_lock *lock)
155 {
156 	const struct file_lock *fl = &lock->fl;
157 	const struct nfs_fh *fh = &lock->fh;
158 	struct nlm_wait	*block;
159 	__be32 res = nlm_lck_denied;
160 
161 	/*
162 	 * Look up blocked request based on arguments.
163 	 * Warning: must not use cookie to match it!
164 	 */
165 	spin_lock(&nlm_blocked_lock);
166 	list_for_each_entry(block, &nlm_blocked, b_list) {
167 		struct file_lock *fl_blocked = block->b_lock;
168 
169 		if (fl_blocked->fl_start != fl->fl_start)
170 			continue;
171 		if (fl_blocked->fl_end != fl->fl_end)
172 			continue;
173 		/*
174 		 * Careful! The NLM server will return the 32-bit "pid" that
175 		 * we put on the wire: in this case the lockowner "pid".
176 		 */
177 		if (fl_blocked->fl_u.nfs_fl.owner->pid != lock->svid)
178 			continue;
179 		if (!rpc_cmp_addr(nlm_addr(block->b_host), addr))
180 			continue;
181 		if (nfs_compare_fh(NFS_FH(fl_blocked->fl_file->f_path.dentry->d_inode) ,fh) != 0)
182 			continue;
183 		/* Alright, we found a lock. Set the return status
184 		 * and wake up the caller
185 		 */
186 		block->b_status = nlm_granted;
187 		wake_up(&block->b_wait);
188 		res = nlm_granted;
189 	}
190 	spin_unlock(&nlm_blocked_lock);
191 	return res;
192 }
193 
194 /*
195  * The following procedures deal with the recovery of locks after a
196  * server crash.
197  */
198 
199 /*
200  * Reclaim all locks on server host. We do this by spawning a separate
201  * reclaimer thread.
202  */
203 void
204 nlmclnt_recovery(struct nlm_host *host)
205 {
206 	struct task_struct *task;
207 
208 	if (!host->h_reclaiming++) {
209 		nlm_get_host(host);
210 		task = kthread_run(reclaimer, host, "%s-reclaim", host->h_name);
211 		if (IS_ERR(task))
212 			printk(KERN_ERR "lockd: unable to spawn reclaimer "
213 				"thread. Locks for %s won't be reclaimed! "
214 				"(%ld)\n", host->h_name, PTR_ERR(task));
215 	}
216 }
217 
218 static int
219 reclaimer(void *ptr)
220 {
221 	struct nlm_host	  *host = (struct nlm_host *) ptr;
222 	struct nlm_wait	  *block;
223 	struct file_lock *fl, *next;
224 	u32 nsmstate;
225 	struct net *net = host->net;
226 
227 	allow_signal(SIGKILL);
228 
229 	down_write(&host->h_rwsem);
230 	lockd_up(net);	/* note: this cannot fail as lockd is already running */
231 
232 	dprintk("lockd: reclaiming locks for host %s\n", host->h_name);
233 
234 restart:
235 	nsmstate = host->h_nsmstate;
236 
237 	/* Force a portmap getport - the peer's lockd will
238 	 * most likely end up on a different port.
239 	 */
240 	host->h_nextrebind = jiffies;
241 	nlm_rebind_host(host);
242 
243 	/* First, reclaim all locks that have been granted. */
244 	list_splice_init(&host->h_granted, &host->h_reclaim);
245 	list_for_each_entry_safe(fl, next, &host->h_reclaim, fl_u.nfs_fl.list) {
246 		list_del_init(&fl->fl_u.nfs_fl.list);
247 
248 		/*
249 		 * sending this thread a SIGKILL will result in any unreclaimed
250 		 * locks being removed from the h_granted list. This means that
251 		 * the kernel will not attempt to reclaim them again if a new
252 		 * reclaimer thread is spawned for this host.
253 		 */
254 		if (signalled())
255 			continue;
256 		if (nlmclnt_reclaim(host, fl) != 0)
257 			continue;
258 		list_add_tail(&fl->fl_u.nfs_fl.list, &host->h_granted);
259 		if (host->h_nsmstate != nsmstate) {
260 			/* Argh! The server rebooted again! */
261 			goto restart;
262 		}
263 	}
264 
265 	host->h_reclaiming = 0;
266 	up_write(&host->h_rwsem);
267 	dprintk("NLM: done reclaiming locks for host %s\n", host->h_name);
268 
269 	/* Now, wake up all processes that sleep on a blocked lock */
270 	spin_lock(&nlm_blocked_lock);
271 	list_for_each_entry(block, &nlm_blocked, b_list) {
272 		if (block->b_host == host) {
273 			block->b_status = nlm_lck_denied_grace_period;
274 			wake_up(&block->b_wait);
275 		}
276 	}
277 	spin_unlock(&nlm_blocked_lock);
278 
279 	/* Release host handle after use */
280 	nlmclnt_release_host(host);
281 	lockd_down(net);
282 	return 0;
283 }
284