xref: /openbmc/linux/fs/nfs/delegation.c (revision d5cb9783536a41df9f9cba5b0a1d78047ed787f7)
1 /*
2  * linux/fs/nfs/delegation.c
3  *
4  * Copyright (C) 2004 Trond Myklebust
5  *
6  * NFS file delegation management
7  *
8  */
9 #include <linux/config.h>
10 #include <linux/completion.h>
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/spinlock.h>
14 
15 #include <linux/nfs4.h>
16 #include <linux/nfs_fs.h>
17 #include <linux/nfs_xdr.h>
18 
19 #include "nfs4_fs.h"
20 #include "delegation.h"
21 
22 static struct nfs_delegation *nfs_alloc_delegation(void)
23 {
24 	return (struct nfs_delegation *)kmalloc(sizeof(struct nfs_delegation), GFP_KERNEL);
25 }
26 
27 static void nfs_free_delegation(struct nfs_delegation *delegation)
28 {
29 	if (delegation->cred)
30 		put_rpccred(delegation->cred);
31 	kfree(delegation);
32 }
33 
34 static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_state *state)
35 {
36 	struct inode *inode = state->inode;
37 	struct file_lock *fl;
38 	int status;
39 
40 	for (fl = inode->i_flock; fl != 0; fl = fl->fl_next) {
41 		if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK)))
42 			continue;
43 		if ((struct nfs_open_context *)fl->fl_file->private_data != ctx)
44 			continue;
45 		status = nfs4_lock_delegation_recall(state, fl);
46 		if (status >= 0)
47 			continue;
48 		switch (status) {
49 			default:
50 				printk(KERN_ERR "%s: unhandled error %d.\n",
51 						__FUNCTION__, status);
52 			case -NFS4ERR_EXPIRED:
53 				/* kill_proc(fl->fl_pid, SIGLOST, 1); */
54 			case -NFS4ERR_STALE_CLIENTID:
55 				nfs4_schedule_state_recovery(NFS_SERVER(inode)->nfs4_state);
56 				goto out_err;
57 		}
58 	}
59 	return 0;
60 out_err:
61 	return status;
62 }
63 
64 static void nfs_delegation_claim_opens(struct inode *inode)
65 {
66 	struct nfs_inode *nfsi = NFS_I(inode);
67 	struct nfs_open_context *ctx;
68 	struct nfs4_state *state;
69 	int err;
70 
71 again:
72 	spin_lock(&inode->i_lock);
73 	list_for_each_entry(ctx, &nfsi->open_files, list) {
74 		state = ctx->state;
75 		if (state == NULL)
76 			continue;
77 		if (!test_bit(NFS_DELEGATED_STATE, &state->flags))
78 			continue;
79 		get_nfs_open_context(ctx);
80 		spin_unlock(&inode->i_lock);
81 		err = nfs4_open_delegation_recall(ctx->dentry, state);
82 		if (err >= 0)
83 			err = nfs_delegation_claim_locks(ctx, state);
84 		put_nfs_open_context(ctx);
85 		if (err != 0)
86 			return;
87 		goto again;
88 	}
89 	spin_unlock(&inode->i_lock);
90 }
91 
92 /*
93  * Set up a delegation on an inode
94  */
95 void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
96 {
97 	struct nfs_delegation *delegation = NFS_I(inode)->delegation;
98 
99 	if (delegation == NULL)
100 		return;
101 	memcpy(delegation->stateid.data, res->delegation.data,
102 			sizeof(delegation->stateid.data));
103 	delegation->type = res->delegation_type;
104 	delegation->maxsize = res->maxsize;
105 	put_rpccred(cred);
106 	delegation->cred = get_rpccred(cred);
107 	delegation->flags &= ~NFS_DELEGATION_NEED_RECLAIM;
108 	NFS_I(inode)->delegation_state = delegation->type;
109 	smp_wmb();
110 }
111 
112 /*
113  * Set up a delegation on an inode
114  */
115 int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
116 {
117 	struct nfs4_client *clp = NFS_SERVER(inode)->nfs4_state;
118 	struct nfs_inode *nfsi = NFS_I(inode);
119 	struct nfs_delegation *delegation;
120 	int status = 0;
121 
122 	/* Ensure we first revalidate the attributes and page cache! */
123 	if ((nfsi->cache_validity & (NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_ATTR)))
124 		__nfs_revalidate_inode(NFS_SERVER(inode), inode);
125 
126 	delegation = nfs_alloc_delegation();
127 	if (delegation == NULL)
128 		return -ENOMEM;
129 	memcpy(delegation->stateid.data, res->delegation.data,
130 			sizeof(delegation->stateid.data));
131 	delegation->type = res->delegation_type;
132 	delegation->maxsize = res->maxsize;
133 	delegation->cred = get_rpccred(cred);
134 	delegation->inode = inode;
135 
136 	spin_lock(&clp->cl_lock);
137 	if (nfsi->delegation == NULL) {
138 		list_add(&delegation->super_list, &clp->cl_delegations);
139 		nfsi->delegation = delegation;
140 		nfsi->delegation_state = delegation->type;
141 		delegation = NULL;
142 	} else {
143 		if (memcmp(&delegation->stateid, &nfsi->delegation->stateid,
144 					sizeof(delegation->stateid)) != 0 ||
145 				delegation->type != nfsi->delegation->type) {
146 			printk("%s: server %u.%u.%u.%u, handed out a duplicate delegation!\n",
147 					__FUNCTION__, NIPQUAD(clp->cl_addr));
148 			status = -EIO;
149 		}
150 	}
151 	spin_unlock(&clp->cl_lock);
152 	if (delegation != NULL)
153 		kfree(delegation);
154 	return status;
155 }
156 
157 static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation *delegation)
158 {
159 	int res = 0;
160 
161 	__nfs_revalidate_inode(NFS_SERVER(inode), inode);
162 
163 	res = nfs4_proc_delegreturn(inode, delegation->cred, &delegation->stateid);
164 	nfs_free_delegation(delegation);
165 	return res;
166 }
167 
168 /* Sync all data to disk upon delegation return */
169 static void nfs_msync_inode(struct inode *inode)
170 {
171 	filemap_fdatawrite(inode->i_mapping);
172 	nfs_wb_all(inode);
173 	filemap_fdatawait(inode->i_mapping);
174 }
175 
176 /*
177  * Basic procedure for returning a delegation to the server
178  */
179 int __nfs_inode_return_delegation(struct inode *inode)
180 {
181 	struct nfs4_client *clp = NFS_SERVER(inode)->nfs4_state;
182 	struct nfs_inode *nfsi = NFS_I(inode);
183 	struct nfs_delegation *delegation;
184 	int res = 0;
185 
186 	nfs_msync_inode(inode);
187 	down_read(&clp->cl_sem);
188 	/* Guard against new delegated open calls */
189 	down_write(&nfsi->rwsem);
190 	spin_lock(&clp->cl_lock);
191 	delegation = nfsi->delegation;
192 	if (delegation != NULL) {
193 		list_del_init(&delegation->super_list);
194 		nfsi->delegation = NULL;
195 		nfsi->delegation_state = 0;
196 	}
197 	spin_unlock(&clp->cl_lock);
198 	nfs_delegation_claim_opens(inode);
199 	up_write(&nfsi->rwsem);
200 	up_read(&clp->cl_sem);
201 	nfs_msync_inode(inode);
202 
203 	if (delegation != NULL)
204 		res = nfs_do_return_delegation(inode, delegation);
205 	return res;
206 }
207 
208 /*
209  * Return all delegations associated to a super block
210  */
211 void nfs_return_all_delegations(struct super_block *sb)
212 {
213 	struct nfs4_client *clp = NFS_SB(sb)->nfs4_state;
214 	struct nfs_delegation *delegation;
215 	struct inode *inode;
216 
217 	if (clp == NULL)
218 		return;
219 restart:
220 	spin_lock(&clp->cl_lock);
221 	list_for_each_entry(delegation, &clp->cl_delegations, super_list) {
222 		if (delegation->inode->i_sb != sb)
223 			continue;
224 		inode = igrab(delegation->inode);
225 		if (inode == NULL)
226 			continue;
227 		spin_unlock(&clp->cl_lock);
228 		nfs_inode_return_delegation(inode);
229 		iput(inode);
230 		goto restart;
231 	}
232 	spin_unlock(&clp->cl_lock);
233 }
234 
235 /*
236  * Return all delegations following an NFS4ERR_CB_PATH_DOWN error.
237  */
238 void nfs_handle_cb_pathdown(struct nfs4_client *clp)
239 {
240 	struct nfs_delegation *delegation;
241 	struct inode *inode;
242 
243 	if (clp == NULL)
244 		return;
245 restart:
246 	spin_lock(&clp->cl_lock);
247 	list_for_each_entry(delegation, &clp->cl_delegations, super_list) {
248 		inode = igrab(delegation->inode);
249 		if (inode == NULL)
250 			continue;
251 		spin_unlock(&clp->cl_lock);
252 		nfs_inode_return_delegation(inode);
253 		iput(inode);
254 		goto restart;
255 	}
256 	spin_unlock(&clp->cl_lock);
257 }
258 
259 struct recall_threadargs {
260 	struct inode *inode;
261 	struct nfs4_client *clp;
262 	const nfs4_stateid *stateid;
263 
264 	struct completion started;
265 	int result;
266 };
267 
268 static int recall_thread(void *data)
269 {
270 	struct recall_threadargs *args = (struct recall_threadargs *)data;
271 	struct inode *inode = igrab(args->inode);
272 	struct nfs4_client *clp = NFS_SERVER(inode)->nfs4_state;
273 	struct nfs_inode *nfsi = NFS_I(inode);
274 	struct nfs_delegation *delegation;
275 
276 	daemonize("nfsv4-delegreturn");
277 
278 	nfs_msync_inode(inode);
279 	down_read(&clp->cl_sem);
280 	down_write(&nfsi->rwsem);
281 	spin_lock(&clp->cl_lock);
282 	delegation = nfsi->delegation;
283 	if (delegation != NULL && memcmp(delegation->stateid.data,
284 				args->stateid->data,
285 				sizeof(delegation->stateid.data)) == 0) {
286 		list_del_init(&delegation->super_list);
287 		nfsi->delegation = NULL;
288 		nfsi->delegation_state = 0;
289 		args->result = 0;
290 	} else {
291 		delegation = NULL;
292 		args->result = -ENOENT;
293 	}
294 	spin_unlock(&clp->cl_lock);
295 	complete(&args->started);
296 	nfs_delegation_claim_opens(inode);
297 	up_write(&nfsi->rwsem);
298 	up_read(&clp->cl_sem);
299 	nfs_msync_inode(inode);
300 
301 	if (delegation != NULL)
302 		nfs_do_return_delegation(inode, delegation);
303 	iput(inode);
304 	module_put_and_exit(0);
305 }
306 
307 /*
308  * Asynchronous delegation recall!
309  */
310 int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid)
311 {
312 	struct recall_threadargs data = {
313 		.inode = inode,
314 		.stateid = stateid,
315 	};
316 	int status;
317 
318 	init_completion(&data.started);
319 	__module_get(THIS_MODULE);
320 	status = kernel_thread(recall_thread, &data, CLONE_KERNEL);
321 	if (status < 0)
322 		goto out_module_put;
323 	wait_for_completion(&data.started);
324 	return data.result;
325 out_module_put:
326 	module_put(THIS_MODULE);
327 	return status;
328 }
329 
330 /*
331  * Retrieve the inode associated with a delegation
332  */
333 struct inode *nfs_delegation_find_inode(struct nfs4_client *clp, const struct nfs_fh *fhandle)
334 {
335 	struct nfs_delegation *delegation;
336 	struct inode *res = NULL;
337 	spin_lock(&clp->cl_lock);
338 	list_for_each_entry(delegation, &clp->cl_delegations, super_list) {
339 		if (nfs_compare_fh(fhandle, &NFS_I(delegation->inode)->fh) == 0) {
340 			res = igrab(delegation->inode);
341 			break;
342 		}
343 	}
344 	spin_unlock(&clp->cl_lock);
345 	return res;
346 }
347 
348 /*
349  * Mark all delegations as needing to be reclaimed
350  */
351 void nfs_delegation_mark_reclaim(struct nfs4_client *clp)
352 {
353 	struct nfs_delegation *delegation;
354 	spin_lock(&clp->cl_lock);
355 	list_for_each_entry(delegation, &clp->cl_delegations, super_list)
356 		delegation->flags |= NFS_DELEGATION_NEED_RECLAIM;
357 	spin_unlock(&clp->cl_lock);
358 }
359 
360 /*
361  * Reap all unclaimed delegations after reboot recovery is done
362  */
363 void nfs_delegation_reap_unclaimed(struct nfs4_client *clp)
364 {
365 	struct nfs_delegation *delegation, *n;
366 	LIST_HEAD(head);
367 	spin_lock(&clp->cl_lock);
368 	list_for_each_entry_safe(delegation, n, &clp->cl_delegations, super_list) {
369 		if ((delegation->flags & NFS_DELEGATION_NEED_RECLAIM) == 0)
370 			continue;
371 		list_move(&delegation->super_list, &head);
372 		NFS_I(delegation->inode)->delegation = NULL;
373 		NFS_I(delegation->inode)->delegation_state = 0;
374 	}
375 	spin_unlock(&clp->cl_lock);
376 	while(!list_empty(&head)) {
377 		delegation = list_entry(head.next, struct nfs_delegation, super_list);
378 		list_del(&delegation->super_list);
379 		nfs_free_delegation(delegation);
380 	}
381 }
382