xref: /openbmc/linux/fs/nfs/delegation.c (revision e868d61272caa648214046a096e5a6bfc068dc8c)
1 /*
2  * linux/fs/nfs/delegation.c
3  *
4  * Copyright (C) 2004 Trond Myklebust
5  *
6  * NFS file delegation management
7  *
8  */
9 #include <linux/completion.h>
10 #include <linux/kthread.h>
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/spinlock.h>
14 
15 #include <linux/nfs4.h>
16 #include <linux/nfs_fs.h>
17 #include <linux/nfs_xdr.h>
18 
19 #include "nfs4_fs.h"
20 #include "delegation.h"
21 #include "internal.h"
22 
23 static void nfs_free_delegation(struct nfs_delegation *delegation)
24 {
25 	if (delegation->cred)
26 		put_rpccred(delegation->cred);
27 	kfree(delegation);
28 }
29 
30 static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_state *state)
31 {
32 	struct inode *inode = state->inode;
33 	struct file_lock *fl;
34 	int status;
35 
36 	for (fl = inode->i_flock; fl != 0; fl = fl->fl_next) {
37 		if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK)))
38 			continue;
39 		if ((struct nfs_open_context *)fl->fl_file->private_data != ctx)
40 			continue;
41 		status = nfs4_lock_delegation_recall(state, fl);
42 		if (status >= 0)
43 			continue;
44 		switch (status) {
45 			default:
46 				printk(KERN_ERR "%s: unhandled error %d.\n",
47 						__FUNCTION__, status);
48 			case -NFS4ERR_EXPIRED:
49 				/* kill_proc(fl->fl_pid, SIGLOST, 1); */
50 			case -NFS4ERR_STALE_CLIENTID:
51 				nfs4_schedule_state_recovery(NFS_SERVER(inode)->nfs_client);
52 				goto out_err;
53 		}
54 	}
55 	return 0;
56 out_err:
57 	return status;
58 }
59 
60 static void nfs_delegation_claim_opens(struct inode *inode)
61 {
62 	struct nfs_inode *nfsi = NFS_I(inode);
63 	struct nfs_open_context *ctx;
64 	struct nfs4_state *state;
65 	int err;
66 
67 again:
68 	spin_lock(&inode->i_lock);
69 	list_for_each_entry(ctx, &nfsi->open_files, list) {
70 		state = ctx->state;
71 		if (state == NULL)
72 			continue;
73 		if (!test_bit(NFS_DELEGATED_STATE, &state->flags))
74 			continue;
75 		get_nfs_open_context(ctx);
76 		spin_unlock(&inode->i_lock);
77 		err = nfs4_open_delegation_recall(ctx->dentry, state);
78 		if (err >= 0)
79 			err = nfs_delegation_claim_locks(ctx, state);
80 		put_nfs_open_context(ctx);
81 		if (err != 0)
82 			return;
83 		goto again;
84 	}
85 	spin_unlock(&inode->i_lock);
86 }
87 
88 /*
89  * Set up a delegation on an inode
90  */
91 void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
92 {
93 	struct nfs_delegation *delegation = NFS_I(inode)->delegation;
94 
95 	if (delegation == NULL)
96 		return;
97 	memcpy(delegation->stateid.data, res->delegation.data,
98 			sizeof(delegation->stateid.data));
99 	delegation->type = res->delegation_type;
100 	delegation->maxsize = res->maxsize;
101 	put_rpccred(cred);
102 	delegation->cred = get_rpccred(cred);
103 	delegation->flags &= ~NFS_DELEGATION_NEED_RECLAIM;
104 	NFS_I(inode)->delegation_state = delegation->type;
105 	smp_wmb();
106 }
107 
108 /*
109  * Set up a delegation on an inode
110  */
111 int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
112 {
113 	struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
114 	struct nfs_inode *nfsi = NFS_I(inode);
115 	struct nfs_delegation *delegation;
116 	int status = 0;
117 
118 	/* Ensure we first revalidate the attributes and page cache! */
119 	if ((nfsi->cache_validity & (NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_ATTR)))
120 		__nfs_revalidate_inode(NFS_SERVER(inode), inode);
121 
122 	delegation = kmalloc(sizeof(*delegation), GFP_KERNEL);
123 	if (delegation == NULL)
124 		return -ENOMEM;
125 	memcpy(delegation->stateid.data, res->delegation.data,
126 			sizeof(delegation->stateid.data));
127 	delegation->type = res->delegation_type;
128 	delegation->maxsize = res->maxsize;
129 	delegation->change_attr = nfsi->change_attr;
130 	delegation->cred = get_rpccred(cred);
131 	delegation->inode = inode;
132 
133 	spin_lock(&clp->cl_lock);
134 	if (nfsi->delegation == NULL) {
135 		list_add(&delegation->super_list, &clp->cl_delegations);
136 		nfsi->delegation = delegation;
137 		nfsi->delegation_state = delegation->type;
138 		delegation = NULL;
139 	} else {
140 		if (memcmp(&delegation->stateid, &nfsi->delegation->stateid,
141 					sizeof(delegation->stateid)) != 0 ||
142 				delegation->type != nfsi->delegation->type) {
143 			printk("%s: server %u.%u.%u.%u, handed out a duplicate delegation!\n",
144 					__FUNCTION__, NIPQUAD(clp->cl_addr.sin_addr));
145 			status = -EIO;
146 		}
147 	}
148 	spin_unlock(&clp->cl_lock);
149 	kfree(delegation);
150 	return status;
151 }
152 
153 static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation *delegation)
154 {
155 	int res = 0;
156 
157 	res = nfs4_proc_delegreturn(inode, delegation->cred, &delegation->stateid);
158 	nfs_free_delegation(delegation);
159 	return res;
160 }
161 
162 /* Sync all data to disk upon delegation return */
163 static void nfs_msync_inode(struct inode *inode)
164 {
165 	filemap_fdatawrite(inode->i_mapping);
166 	nfs_wb_all(inode);
167 	filemap_fdatawait(inode->i_mapping);
168 }
169 
170 /*
171  * Basic procedure for returning a delegation to the server
172  */
173 int __nfs_inode_return_delegation(struct inode *inode)
174 {
175 	struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
176 	struct nfs_inode *nfsi = NFS_I(inode);
177 	struct nfs_delegation *delegation;
178 	int res = 0;
179 
180 	nfs_msync_inode(inode);
181 	down_read(&clp->cl_sem);
182 	/* Guard against new delegated open calls */
183 	down_write(&nfsi->rwsem);
184 	spin_lock(&clp->cl_lock);
185 	delegation = nfsi->delegation;
186 	if (delegation != NULL) {
187 		list_del_init(&delegation->super_list);
188 		nfsi->delegation = NULL;
189 		nfsi->delegation_state = 0;
190 	}
191 	spin_unlock(&clp->cl_lock);
192 	nfs_delegation_claim_opens(inode);
193 	up_write(&nfsi->rwsem);
194 	up_read(&clp->cl_sem);
195 	nfs_msync_inode(inode);
196 
197 	if (delegation != NULL)
198 		res = nfs_do_return_delegation(inode, delegation);
199 	return res;
200 }
201 
202 /*
203  * Return all delegations associated to a super block
204  */
205 void nfs_return_all_delegations(struct super_block *sb)
206 {
207 	struct nfs_client *clp = NFS_SB(sb)->nfs_client;
208 	struct nfs_delegation *delegation;
209 	struct inode *inode;
210 
211 	if (clp == NULL)
212 		return;
213 restart:
214 	spin_lock(&clp->cl_lock);
215 	list_for_each_entry(delegation, &clp->cl_delegations, super_list) {
216 		if (delegation->inode->i_sb != sb)
217 			continue;
218 		inode = igrab(delegation->inode);
219 		if (inode == NULL)
220 			continue;
221 		spin_unlock(&clp->cl_lock);
222 		nfs_inode_return_delegation(inode);
223 		iput(inode);
224 		goto restart;
225 	}
226 	spin_unlock(&clp->cl_lock);
227 }
228 
229 int nfs_do_expire_all_delegations(void *ptr)
230 {
231 	struct nfs_client *clp = ptr;
232 	struct nfs_delegation *delegation;
233 	struct inode *inode;
234 
235 	allow_signal(SIGKILL);
236 restart:
237 	spin_lock(&clp->cl_lock);
238 	if (test_bit(NFS4CLNT_STATE_RECOVER, &clp->cl_state) != 0)
239 		goto out;
240 	if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0)
241 		goto out;
242 	list_for_each_entry(delegation, &clp->cl_delegations, super_list) {
243 		inode = igrab(delegation->inode);
244 		if (inode == NULL)
245 			continue;
246 		spin_unlock(&clp->cl_lock);
247 		nfs_inode_return_delegation(inode);
248 		iput(inode);
249 		goto restart;
250 	}
251 out:
252 	spin_unlock(&clp->cl_lock);
253 	nfs_put_client(clp);
254 	module_put_and_exit(0);
255 }
256 
257 void nfs_expire_all_delegations(struct nfs_client *clp)
258 {
259 	struct task_struct *task;
260 
261 	__module_get(THIS_MODULE);
262 	atomic_inc(&clp->cl_count);
263 	task = kthread_run(nfs_do_expire_all_delegations, clp,
264 			"%u.%u.%u.%u-delegreturn",
265 			NIPQUAD(clp->cl_addr.sin_addr));
266 	if (!IS_ERR(task))
267 		return;
268 	nfs_put_client(clp);
269 	module_put(THIS_MODULE);
270 }
271 
272 /*
273  * Return all delegations following an NFS4ERR_CB_PATH_DOWN error.
274  */
275 void nfs_handle_cb_pathdown(struct nfs_client *clp)
276 {
277 	struct nfs_delegation *delegation;
278 	struct inode *inode;
279 
280 	if (clp == NULL)
281 		return;
282 restart:
283 	spin_lock(&clp->cl_lock);
284 	list_for_each_entry(delegation, &clp->cl_delegations, super_list) {
285 		inode = igrab(delegation->inode);
286 		if (inode == NULL)
287 			continue;
288 		spin_unlock(&clp->cl_lock);
289 		nfs_inode_return_delegation(inode);
290 		iput(inode);
291 		goto restart;
292 	}
293 	spin_unlock(&clp->cl_lock);
294 }
295 
296 struct recall_threadargs {
297 	struct inode *inode;
298 	struct nfs_client *clp;
299 	const nfs4_stateid *stateid;
300 
301 	struct completion started;
302 	int result;
303 };
304 
305 static int recall_thread(void *data)
306 {
307 	struct recall_threadargs *args = (struct recall_threadargs *)data;
308 	struct inode *inode = igrab(args->inode);
309 	struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
310 	struct nfs_inode *nfsi = NFS_I(inode);
311 	struct nfs_delegation *delegation;
312 
313 	daemonize("nfsv4-delegreturn");
314 
315 	nfs_msync_inode(inode);
316 	down_read(&clp->cl_sem);
317 	down_write(&nfsi->rwsem);
318 	spin_lock(&clp->cl_lock);
319 	delegation = nfsi->delegation;
320 	if (delegation != NULL && memcmp(delegation->stateid.data,
321 				args->stateid->data,
322 				sizeof(delegation->stateid.data)) == 0) {
323 		list_del_init(&delegation->super_list);
324 		nfsi->delegation = NULL;
325 		nfsi->delegation_state = 0;
326 		args->result = 0;
327 	} else {
328 		delegation = NULL;
329 		args->result = -ENOENT;
330 	}
331 	spin_unlock(&clp->cl_lock);
332 	complete(&args->started);
333 	nfs_delegation_claim_opens(inode);
334 	up_write(&nfsi->rwsem);
335 	up_read(&clp->cl_sem);
336 	nfs_msync_inode(inode);
337 
338 	if (delegation != NULL)
339 		nfs_do_return_delegation(inode, delegation);
340 	iput(inode);
341 	module_put_and_exit(0);
342 }
343 
344 /*
345  * Asynchronous delegation recall!
346  */
347 int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid)
348 {
349 	struct recall_threadargs data = {
350 		.inode = inode,
351 		.stateid = stateid,
352 	};
353 	int status;
354 
355 	init_completion(&data.started);
356 	__module_get(THIS_MODULE);
357 	status = kernel_thread(recall_thread, &data, CLONE_KERNEL);
358 	if (status < 0)
359 		goto out_module_put;
360 	wait_for_completion(&data.started);
361 	return data.result;
362 out_module_put:
363 	module_put(THIS_MODULE);
364 	return status;
365 }
366 
367 /*
368  * Retrieve the inode associated with a delegation
369  */
370 struct inode *nfs_delegation_find_inode(struct nfs_client *clp, const struct nfs_fh *fhandle)
371 {
372 	struct nfs_delegation *delegation;
373 	struct inode *res = NULL;
374 	spin_lock(&clp->cl_lock);
375 	list_for_each_entry(delegation, &clp->cl_delegations, super_list) {
376 		if (nfs_compare_fh(fhandle, &NFS_I(delegation->inode)->fh) == 0) {
377 			res = igrab(delegation->inode);
378 			break;
379 		}
380 	}
381 	spin_unlock(&clp->cl_lock);
382 	return res;
383 }
384 
385 /*
386  * Mark all delegations as needing to be reclaimed
387  */
388 void nfs_delegation_mark_reclaim(struct nfs_client *clp)
389 {
390 	struct nfs_delegation *delegation;
391 	spin_lock(&clp->cl_lock);
392 	list_for_each_entry(delegation, &clp->cl_delegations, super_list)
393 		delegation->flags |= NFS_DELEGATION_NEED_RECLAIM;
394 	spin_unlock(&clp->cl_lock);
395 }
396 
397 /*
398  * Reap all unclaimed delegations after reboot recovery is done
399  */
400 void nfs_delegation_reap_unclaimed(struct nfs_client *clp)
401 {
402 	struct nfs_delegation *delegation, *n;
403 	LIST_HEAD(head);
404 	spin_lock(&clp->cl_lock);
405 	list_for_each_entry_safe(delegation, n, &clp->cl_delegations, super_list) {
406 		if ((delegation->flags & NFS_DELEGATION_NEED_RECLAIM) == 0)
407 			continue;
408 		list_move(&delegation->super_list, &head);
409 		NFS_I(delegation->inode)->delegation = NULL;
410 		NFS_I(delegation->inode)->delegation_state = 0;
411 	}
412 	spin_unlock(&clp->cl_lock);
413 	while(!list_empty(&head)) {
414 		delegation = list_entry(head.next, struct nfs_delegation, super_list);
415 		list_del(&delegation->super_list);
416 		nfs_free_delegation(delegation);
417 	}
418 }
419 
420 int nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode)
421 {
422 	struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
423 	struct nfs_inode *nfsi = NFS_I(inode);
424 	struct nfs_delegation *delegation;
425 	int res = 0;
426 
427 	if (nfsi->delegation_state == 0)
428 		return 0;
429 	spin_lock(&clp->cl_lock);
430 	delegation = nfsi->delegation;
431 	if (delegation != NULL) {
432 		memcpy(dst->data, delegation->stateid.data, sizeof(dst->data));
433 		res = 1;
434 	}
435 	spin_unlock(&clp->cl_lock);
436 	return res;
437 }
438