xref: /openbmc/linux/fs/nfs/nfs42proc.c (revision d9f6e12f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2014 Anna Schumaker <Anna.Schumaker@Netapp.com>
4  */
5 #include <linux/fs.h>
6 #include <linux/sunrpc/addr.h>
7 #include <linux/sunrpc/sched.h>
8 #include <linux/nfs.h>
9 #include <linux/nfs3.h>
10 #include <linux/nfs4.h>
11 #include <linux/nfs_xdr.h>
12 #include <linux/nfs_fs.h>
13 #include "nfs4_fs.h"
14 #include "nfs42.h"
15 #include "iostat.h"
16 #include "pnfs.h"
17 #include "nfs4session.h"
18 #include "internal.h"
19 #include "delegation.h"
20 #include "nfs4trace.h"
21 
22 #define NFSDBG_FACILITY NFSDBG_PROC
23 static int nfs42_do_offload_cancel_async(struct file *dst, nfs4_stateid *std);
24 
25 static void nfs42_set_netaddr(struct file *filep, struct nfs42_netaddr *naddr)
26 {
27 	struct nfs_client *clp = (NFS_SERVER(file_inode(filep)))->nfs_client;
28 	unsigned short port = 2049;
29 
30 	rcu_read_lock();
31 	naddr->netid_len = scnprintf(naddr->netid,
32 					sizeof(naddr->netid), "%s",
33 					rpc_peeraddr2str(clp->cl_rpcclient,
34 					RPC_DISPLAY_NETID));
35 	naddr->addr_len = scnprintf(naddr->addr,
36 					sizeof(naddr->addr),
37 					"%s.%u.%u",
38 					rpc_peeraddr2str(clp->cl_rpcclient,
39 					RPC_DISPLAY_ADDR),
40 					port >> 8, port & 255);
41 	rcu_read_unlock();
42 }
43 
44 static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
45 		struct nfs_lock_context *lock, loff_t offset, loff_t len)
46 {
47 	struct inode *inode = file_inode(filep);
48 	struct nfs_server *server = NFS_SERVER(inode);
49 	struct nfs42_falloc_args args = {
50 		.falloc_fh	= NFS_FH(inode),
51 		.falloc_offset	= offset,
52 		.falloc_length	= len,
53 		.falloc_bitmask	= nfs4_fattr_bitmap,
54 	};
55 	struct nfs42_falloc_res res = {
56 		.falloc_server	= server,
57 	};
58 	int status;
59 
60 	msg->rpc_argp = &args;
61 	msg->rpc_resp = &res;
62 
63 	status = nfs4_set_rw_stateid(&args.falloc_stateid, lock->open_context,
64 			lock, FMODE_WRITE);
65 	if (status) {
66 		if (status == -EAGAIN)
67 			status = -NFS4ERR_BAD_STATEID;
68 		return status;
69 	}
70 
71 	res.falloc_fattr = nfs_alloc_fattr();
72 	if (!res.falloc_fattr)
73 		return -ENOMEM;
74 
75 	status = nfs4_call_sync(server->client, server, msg,
76 				&args.seq_args, &res.seq_res, 0);
77 	if (status == 0)
78 		status = nfs_post_op_update_inode(inode, res.falloc_fattr);
79 
80 	kfree(res.falloc_fattr);
81 	return status;
82 }
83 
84 static int nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
85 				loff_t offset, loff_t len)
86 {
87 	struct nfs_server *server = NFS_SERVER(file_inode(filep));
88 	struct nfs4_exception exception = { };
89 	struct nfs_lock_context *lock;
90 	int err;
91 
92 	lock = nfs_get_lock_context(nfs_file_open_context(filep));
93 	if (IS_ERR(lock))
94 		return PTR_ERR(lock);
95 
96 	exception.inode = file_inode(filep);
97 	exception.state = lock->open_context->state;
98 
99 	do {
100 		err = _nfs42_proc_fallocate(msg, filep, lock, offset, len);
101 		if (err == -ENOTSUPP) {
102 			err = -EOPNOTSUPP;
103 			break;
104 		}
105 		err = nfs4_handle_exception(server, err, &exception);
106 	} while (exception.retry);
107 
108 	nfs_put_lock_context(lock);
109 	return err;
110 }
111 
112 int nfs42_proc_allocate(struct file *filep, loff_t offset, loff_t len)
113 {
114 	struct rpc_message msg = {
115 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ALLOCATE],
116 	};
117 	struct inode *inode = file_inode(filep);
118 	int err;
119 
120 	if (!nfs_server_capable(inode, NFS_CAP_ALLOCATE))
121 		return -EOPNOTSUPP;
122 
123 	inode_lock(inode);
124 
125 	err = nfs42_proc_fallocate(&msg, filep, offset, len);
126 	if (err == -EOPNOTSUPP)
127 		NFS_SERVER(inode)->caps &= ~NFS_CAP_ALLOCATE;
128 
129 	inode_unlock(inode);
130 	return err;
131 }
132 
133 int nfs42_proc_deallocate(struct file *filep, loff_t offset, loff_t len)
134 {
135 	struct rpc_message msg = {
136 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DEALLOCATE],
137 	};
138 	struct inode *inode = file_inode(filep);
139 	int err;
140 
141 	if (!nfs_server_capable(inode, NFS_CAP_DEALLOCATE))
142 		return -EOPNOTSUPP;
143 
144 	inode_lock(inode);
145 	err = nfs_sync_inode(inode);
146 	if (err)
147 		goto out_unlock;
148 
149 	err = nfs42_proc_fallocate(&msg, filep, offset, len);
150 	if (err == 0)
151 		truncate_pagecache_range(inode, offset, (offset + len) -1);
152 	if (err == -EOPNOTSUPP)
153 		NFS_SERVER(inode)->caps &= ~NFS_CAP_DEALLOCATE;
154 out_unlock:
155 	inode_unlock(inode);
156 	return err;
157 }
158 
159 static int handle_async_copy(struct nfs42_copy_res *res,
160 			     struct nfs_server *dst_server,
161 			     struct nfs_server *src_server,
162 			     struct file *src,
163 			     struct file *dst,
164 			     nfs4_stateid *src_stateid,
165 			     bool *restart)
166 {
167 	struct nfs4_copy_state *copy, *tmp_copy;
168 	int status = NFS4_OK;
169 	bool found_pending = false;
170 	struct nfs_open_context *dst_ctx = nfs_file_open_context(dst);
171 	struct nfs_open_context *src_ctx = nfs_file_open_context(src);
172 
173 	copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_NOFS);
174 	if (!copy)
175 		return -ENOMEM;
176 
177 	spin_lock(&dst_server->nfs_client->cl_lock);
178 	list_for_each_entry(tmp_copy,
179 				&dst_server->nfs_client->pending_cb_stateids,
180 				copies) {
181 		if (memcmp(&res->write_res.stateid, &tmp_copy->stateid,
182 				NFS4_STATEID_SIZE))
183 			continue;
184 		found_pending = true;
185 		list_del(&tmp_copy->copies);
186 		break;
187 	}
188 	if (found_pending) {
189 		spin_unlock(&dst_server->nfs_client->cl_lock);
190 		kfree(copy);
191 		copy = tmp_copy;
192 		goto out;
193 	}
194 
195 	memcpy(&copy->stateid, &res->write_res.stateid, NFS4_STATEID_SIZE);
196 	init_completion(&copy->completion);
197 	copy->parent_dst_state = dst_ctx->state;
198 	copy->parent_src_state = src_ctx->state;
199 
200 	list_add_tail(&copy->copies, &dst_server->ss_copies);
201 	spin_unlock(&dst_server->nfs_client->cl_lock);
202 
203 	if (dst_server != src_server) {
204 		spin_lock(&src_server->nfs_client->cl_lock);
205 		list_add_tail(&copy->src_copies, &src_server->ss_copies);
206 		spin_unlock(&src_server->nfs_client->cl_lock);
207 	}
208 
209 	status = wait_for_completion_interruptible(&copy->completion);
210 	spin_lock(&dst_server->nfs_client->cl_lock);
211 	list_del_init(&copy->copies);
212 	spin_unlock(&dst_server->nfs_client->cl_lock);
213 	if (dst_server != src_server) {
214 		spin_lock(&src_server->nfs_client->cl_lock);
215 		list_del_init(&copy->src_copies);
216 		spin_unlock(&src_server->nfs_client->cl_lock);
217 	}
218 	if (status == -ERESTARTSYS) {
219 		goto out_cancel;
220 	} else if (copy->flags || copy->error == NFS4ERR_PARTNER_NO_AUTH) {
221 		status = -EAGAIN;
222 		*restart = true;
223 		goto out_cancel;
224 	}
225 out:
226 	res->write_res.count = copy->count;
227 	memcpy(&res->write_res.verifier, &copy->verf, sizeof(copy->verf));
228 	status = -copy->error;
229 
230 out_free:
231 	kfree(copy);
232 	return status;
233 out_cancel:
234 	nfs42_do_offload_cancel_async(dst, &copy->stateid);
235 	if (!nfs42_files_from_same_server(src, dst))
236 		nfs42_do_offload_cancel_async(src, src_stateid);
237 	goto out_free;
238 }
239 
240 static int process_copy_commit(struct file *dst, loff_t pos_dst,
241 			       struct nfs42_copy_res *res)
242 {
243 	struct nfs_commitres cres;
244 	int status = -ENOMEM;
245 
246 	cres.verf = kzalloc(sizeof(struct nfs_writeverf), GFP_NOFS);
247 	if (!cres.verf)
248 		goto out;
249 
250 	status = nfs4_proc_commit(dst, pos_dst, res->write_res.count, &cres);
251 	if (status)
252 		goto out_free;
253 	if (nfs_write_verifier_cmp(&res->write_res.verifier.verifier,
254 				    &cres.verf->verifier)) {
255 		dprintk("commit verf differs from copy verf\n");
256 		status = -EAGAIN;
257 	}
258 out_free:
259 	kfree(cres.verf);
260 out:
261 	return status;
262 }
263 
264 static ssize_t _nfs42_proc_copy(struct file *src,
265 				struct nfs_lock_context *src_lock,
266 				struct file *dst,
267 				struct nfs_lock_context *dst_lock,
268 				struct nfs42_copy_args *args,
269 				struct nfs42_copy_res *res,
270 				struct nl4_server *nss,
271 				nfs4_stateid *cnr_stateid,
272 				bool *restart)
273 {
274 	struct rpc_message msg = {
275 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COPY],
276 		.rpc_argp = args,
277 		.rpc_resp = res,
278 	};
279 	struct inode *dst_inode = file_inode(dst);
280 	struct inode *src_inode = file_inode(src);
281 	struct nfs_server *dst_server = NFS_SERVER(dst_inode);
282 	struct nfs_server *src_server = NFS_SERVER(src_inode);
283 	loff_t pos_src = args->src_pos;
284 	loff_t pos_dst = args->dst_pos;
285 	size_t count = args->count;
286 	ssize_t status;
287 
288 	if (nss) {
289 		args->cp_src = nss;
290 		nfs4_stateid_copy(&args->src_stateid, cnr_stateid);
291 	} else {
292 		status = nfs4_set_rw_stateid(&args->src_stateid,
293 				src_lock->open_context, src_lock, FMODE_READ);
294 		if (status) {
295 			if (status == -EAGAIN)
296 				status = -NFS4ERR_BAD_STATEID;
297 			return status;
298 		}
299 	}
300 	status = nfs_filemap_write_and_wait_range(file_inode(src)->i_mapping,
301 			pos_src, pos_src + (loff_t)count - 1);
302 	if (status)
303 		return status;
304 
305 	status = nfs4_set_rw_stateid(&args->dst_stateid, dst_lock->open_context,
306 				     dst_lock, FMODE_WRITE);
307 	if (status) {
308 		if (status == -EAGAIN)
309 			status = -NFS4ERR_BAD_STATEID;
310 		return status;
311 	}
312 
313 	status = nfs_sync_inode(dst_inode);
314 	if (status)
315 		return status;
316 
317 	res->commit_res.verf = NULL;
318 	if (args->sync) {
319 		res->commit_res.verf =
320 			kzalloc(sizeof(struct nfs_writeverf), GFP_NOFS);
321 		if (!res->commit_res.verf)
322 			return -ENOMEM;
323 	}
324 	set_bit(NFS_CLNT_SRC_SSC_COPY_STATE,
325 		&src_lock->open_context->state->flags);
326 	set_bit(NFS_CLNT_DST_SSC_COPY_STATE,
327 		&dst_lock->open_context->state->flags);
328 
329 	status = nfs4_call_sync(dst_server->client, dst_server, &msg,
330 				&args->seq_args, &res->seq_res, 0);
331 	if (status == -ENOTSUPP)
332 		dst_server->caps &= ~NFS_CAP_COPY;
333 	if (status)
334 		goto out;
335 
336 	if (args->sync &&
337 		nfs_write_verifier_cmp(&res->write_res.verifier.verifier,
338 				    &res->commit_res.verf->verifier)) {
339 		status = -EAGAIN;
340 		goto out;
341 	}
342 
343 	if (!res->synchronous) {
344 		status = handle_async_copy(res, dst_server, src_server, src,
345 				dst, &args->src_stateid, restart);
346 		if (status)
347 			goto out;
348 	}
349 
350 	if ((!res->synchronous || !args->sync) &&
351 			res->write_res.verifier.committed != NFS_FILE_SYNC) {
352 		status = process_copy_commit(dst, pos_dst, res);
353 		if (status)
354 			goto out;
355 	}
356 
357 	truncate_pagecache_range(dst_inode, pos_dst,
358 				 pos_dst + res->write_res.count);
359 	spin_lock(&dst_inode->i_lock);
360 	nfs_set_cache_invalid(
361 		dst_inode, NFS_INO_REVAL_PAGECACHE | NFS_INO_REVAL_FORCED |
362 				   NFS_INO_INVALID_SIZE | NFS_INO_INVALID_ATTR |
363 				   NFS_INO_INVALID_DATA);
364 	spin_unlock(&dst_inode->i_lock);
365 	spin_lock(&src_inode->i_lock);
366 	nfs_set_cache_invalid(src_inode, NFS_INO_REVAL_PAGECACHE |
367 						 NFS_INO_REVAL_FORCED |
368 						 NFS_INO_INVALID_ATIME);
369 	spin_unlock(&src_inode->i_lock);
370 	status = res->write_res.count;
371 out:
372 	if (args->sync)
373 		kfree(res->commit_res.verf);
374 	return status;
375 }
376 
377 ssize_t nfs42_proc_copy(struct file *src, loff_t pos_src,
378 			struct file *dst, loff_t pos_dst, size_t count,
379 			struct nl4_server *nss,
380 			nfs4_stateid *cnr_stateid, bool sync)
381 {
382 	struct nfs_server *server = NFS_SERVER(file_inode(dst));
383 	struct nfs_lock_context *src_lock;
384 	struct nfs_lock_context *dst_lock;
385 	struct nfs42_copy_args args = {
386 		.src_fh		= NFS_FH(file_inode(src)),
387 		.src_pos	= pos_src,
388 		.dst_fh		= NFS_FH(file_inode(dst)),
389 		.dst_pos	= pos_dst,
390 		.count		= count,
391 		.sync		= sync,
392 	};
393 	struct nfs42_copy_res res;
394 	struct nfs4_exception src_exception = {
395 		.inode		= file_inode(src),
396 		.stateid	= &args.src_stateid,
397 	};
398 	struct nfs4_exception dst_exception = {
399 		.inode		= file_inode(dst),
400 		.stateid	= &args.dst_stateid,
401 	};
402 	ssize_t err, err2;
403 	bool restart = false;
404 
405 	src_lock = nfs_get_lock_context(nfs_file_open_context(src));
406 	if (IS_ERR(src_lock))
407 		return PTR_ERR(src_lock);
408 
409 	src_exception.state = src_lock->open_context->state;
410 
411 	dst_lock = nfs_get_lock_context(nfs_file_open_context(dst));
412 	if (IS_ERR(dst_lock)) {
413 		err = PTR_ERR(dst_lock);
414 		goto out_put_src_lock;
415 	}
416 
417 	dst_exception.state = dst_lock->open_context->state;
418 
419 	do {
420 		inode_lock(file_inode(dst));
421 		err = _nfs42_proc_copy(src, src_lock,
422 				dst, dst_lock,
423 				&args, &res,
424 				nss, cnr_stateid, &restart);
425 		inode_unlock(file_inode(dst));
426 
427 		if (err >= 0)
428 			break;
429 		if (err == -ENOTSUPP &&
430 				nfs42_files_from_same_server(src, dst)) {
431 			err = -EOPNOTSUPP;
432 			break;
433 		} else if (err == -EAGAIN) {
434 			if (!restart) {
435 				dst_exception.retry = 1;
436 				continue;
437 			}
438 			break;
439 		} else if (err == -NFS4ERR_OFFLOAD_NO_REQS && !args.sync) {
440 			args.sync = true;
441 			dst_exception.retry = 1;
442 			continue;
443 		} else if ((err == -ESTALE ||
444 				err == -NFS4ERR_OFFLOAD_DENIED ||
445 				err == -ENOTSUPP) &&
446 				!nfs42_files_from_same_server(src, dst)) {
447 			nfs42_do_offload_cancel_async(src, &args.src_stateid);
448 			err = -EOPNOTSUPP;
449 			break;
450 		}
451 
452 		err2 = nfs4_handle_exception(server, err, &src_exception);
453 		err  = nfs4_handle_exception(server, err, &dst_exception);
454 		if (!err)
455 			err = err2;
456 	} while (src_exception.retry || dst_exception.retry);
457 
458 	nfs_put_lock_context(dst_lock);
459 out_put_src_lock:
460 	nfs_put_lock_context(src_lock);
461 	return err;
462 }
463 
464 struct nfs42_offloadcancel_data {
465 	struct nfs_server *seq_server;
466 	struct nfs42_offload_status_args args;
467 	struct nfs42_offload_status_res res;
468 };
469 
470 static void nfs42_offload_cancel_prepare(struct rpc_task *task, void *calldata)
471 {
472 	struct nfs42_offloadcancel_data *data = calldata;
473 
474 	nfs4_setup_sequence(data->seq_server->nfs_client,
475 				&data->args.osa_seq_args,
476 				&data->res.osr_seq_res, task);
477 }
478 
479 static void nfs42_offload_cancel_done(struct rpc_task *task, void *calldata)
480 {
481 	struct nfs42_offloadcancel_data *data = calldata;
482 
483 	nfs41_sequence_done(task, &data->res.osr_seq_res);
484 	if (task->tk_status &&
485 		nfs4_async_handle_error(task, data->seq_server, NULL,
486 			NULL) == -EAGAIN)
487 		rpc_restart_call_prepare(task);
488 }
489 
490 static void nfs42_free_offloadcancel_data(void *data)
491 {
492 	kfree(data);
493 }
494 
495 static const struct rpc_call_ops nfs42_offload_cancel_ops = {
496 	.rpc_call_prepare = nfs42_offload_cancel_prepare,
497 	.rpc_call_done = nfs42_offload_cancel_done,
498 	.rpc_release = nfs42_free_offloadcancel_data,
499 };
500 
501 static int nfs42_do_offload_cancel_async(struct file *dst,
502 					 nfs4_stateid *stateid)
503 {
504 	struct nfs_server *dst_server = NFS_SERVER(file_inode(dst));
505 	struct nfs42_offloadcancel_data *data = NULL;
506 	struct nfs_open_context *ctx = nfs_file_open_context(dst);
507 	struct rpc_task *task;
508 	struct rpc_message msg = {
509 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OFFLOAD_CANCEL],
510 		.rpc_cred = ctx->cred,
511 	};
512 	struct rpc_task_setup task_setup_data = {
513 		.rpc_client = dst_server->client,
514 		.rpc_message = &msg,
515 		.callback_ops = &nfs42_offload_cancel_ops,
516 		.workqueue = nfsiod_workqueue,
517 		.flags = RPC_TASK_ASYNC,
518 	};
519 	int status;
520 
521 	if (!(dst_server->caps & NFS_CAP_OFFLOAD_CANCEL))
522 		return -EOPNOTSUPP;
523 
524 	data = kzalloc(sizeof(struct nfs42_offloadcancel_data), GFP_NOFS);
525 	if (data == NULL)
526 		return -ENOMEM;
527 
528 	data->seq_server = dst_server;
529 	data->args.osa_src_fh = NFS_FH(file_inode(dst));
530 	memcpy(&data->args.osa_stateid, stateid,
531 		sizeof(data->args.osa_stateid));
532 	msg.rpc_argp = &data->args;
533 	msg.rpc_resp = &data->res;
534 	task_setup_data.callback_data = data;
535 	nfs4_init_sequence(&data->args.osa_seq_args, &data->res.osr_seq_res,
536 			   1, 0);
537 	task = rpc_run_task(&task_setup_data);
538 	if (IS_ERR(task))
539 		return PTR_ERR(task);
540 	status = rpc_wait_for_completion_task(task);
541 	if (status == -ENOTSUPP)
542 		dst_server->caps &= ~NFS_CAP_OFFLOAD_CANCEL;
543 	rpc_put_task(task);
544 	return status;
545 }
546 
547 static int _nfs42_proc_copy_notify(struct file *src, struct file *dst,
548 				   struct nfs42_copy_notify_args *args,
549 				   struct nfs42_copy_notify_res *res)
550 {
551 	struct nfs_server *src_server = NFS_SERVER(file_inode(src));
552 	struct rpc_message msg = {
553 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COPY_NOTIFY],
554 		.rpc_argp = args,
555 		.rpc_resp = res,
556 	};
557 	int status;
558 	struct nfs_open_context *ctx;
559 	struct nfs_lock_context *l_ctx;
560 
561 	ctx = get_nfs_open_context(nfs_file_open_context(src));
562 	l_ctx = nfs_get_lock_context(ctx);
563 	if (IS_ERR(l_ctx))
564 		return PTR_ERR(l_ctx);
565 
566 	status = nfs4_set_rw_stateid(&args->cna_src_stateid, ctx, l_ctx,
567 				     FMODE_READ);
568 	nfs_put_lock_context(l_ctx);
569 	if (status) {
570 		if (status == -EAGAIN)
571 			status = -NFS4ERR_BAD_STATEID;
572 		return status;
573 	}
574 
575 	status = nfs4_call_sync(src_server->client, src_server, &msg,
576 				&args->cna_seq_args, &res->cnr_seq_res, 0);
577 	if (status == -ENOTSUPP)
578 		src_server->caps &= ~NFS_CAP_COPY_NOTIFY;
579 
580 	put_nfs_open_context(nfs_file_open_context(src));
581 	return status;
582 }
583 
584 int nfs42_proc_copy_notify(struct file *src, struct file *dst,
585 				struct nfs42_copy_notify_res *res)
586 {
587 	struct nfs_server *src_server = NFS_SERVER(file_inode(src));
588 	struct nfs42_copy_notify_args *args;
589 	struct nfs4_exception exception = {
590 		.inode = file_inode(src),
591 	};
592 	int status;
593 
594 	if (!(src_server->caps & NFS_CAP_COPY_NOTIFY))
595 		return -EOPNOTSUPP;
596 
597 	args = kzalloc(sizeof(struct nfs42_copy_notify_args), GFP_NOFS);
598 	if (args == NULL)
599 		return -ENOMEM;
600 
601 	args->cna_src_fh  = NFS_FH(file_inode(src)),
602 	args->cna_dst.nl4_type = NL4_NETADDR;
603 	nfs42_set_netaddr(dst, &args->cna_dst.u.nl4_addr);
604 	exception.stateid = &args->cna_src_stateid;
605 
606 	do {
607 		status = _nfs42_proc_copy_notify(src, dst, args, res);
608 		if (status == -ENOTSUPP) {
609 			status = -EOPNOTSUPP;
610 			goto out;
611 		}
612 		status = nfs4_handle_exception(src_server, status, &exception);
613 	} while (exception.retry);
614 
615 out:
616 	kfree(args);
617 	return status;
618 }
619 
620 static loff_t _nfs42_proc_llseek(struct file *filep,
621 		struct nfs_lock_context *lock, loff_t offset, int whence)
622 {
623 	struct inode *inode = file_inode(filep);
624 	struct nfs42_seek_args args = {
625 		.sa_fh		= NFS_FH(inode),
626 		.sa_offset	= offset,
627 		.sa_what	= (whence == SEEK_HOLE) ?
628 					NFS4_CONTENT_HOLE : NFS4_CONTENT_DATA,
629 	};
630 	struct nfs42_seek_res res;
631 	struct rpc_message msg = {
632 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEEK],
633 		.rpc_argp = &args,
634 		.rpc_resp = &res,
635 	};
636 	struct nfs_server *server = NFS_SERVER(inode);
637 	int status;
638 
639 	if (!nfs_server_capable(inode, NFS_CAP_SEEK))
640 		return -ENOTSUPP;
641 
642 	status = nfs4_set_rw_stateid(&args.sa_stateid, lock->open_context,
643 			lock, FMODE_READ);
644 	if (status) {
645 		if (status == -EAGAIN)
646 			status = -NFS4ERR_BAD_STATEID;
647 		return status;
648 	}
649 
650 	status = nfs_filemap_write_and_wait_range(inode->i_mapping,
651 			offset, LLONG_MAX);
652 	if (status)
653 		return status;
654 
655 	status = nfs4_call_sync(server->client, server, &msg,
656 				&args.seq_args, &res.seq_res, 0);
657 	if (status == -ENOTSUPP)
658 		server->caps &= ~NFS_CAP_SEEK;
659 	if (status)
660 		return status;
661 
662 	return vfs_setpos(filep, res.sr_offset, inode->i_sb->s_maxbytes);
663 }
664 
665 loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence)
666 {
667 	struct nfs_server *server = NFS_SERVER(file_inode(filep));
668 	struct nfs4_exception exception = { };
669 	struct nfs_lock_context *lock;
670 	loff_t err;
671 
672 	lock = nfs_get_lock_context(nfs_file_open_context(filep));
673 	if (IS_ERR(lock))
674 		return PTR_ERR(lock);
675 
676 	exception.inode = file_inode(filep);
677 	exception.state = lock->open_context->state;
678 
679 	do {
680 		err = _nfs42_proc_llseek(filep, lock, offset, whence);
681 		if (err >= 0)
682 			break;
683 		if (err == -ENOTSUPP) {
684 			err = -EOPNOTSUPP;
685 			break;
686 		}
687 		err = nfs4_handle_exception(server, err, &exception);
688 	} while (exception.retry);
689 
690 	nfs_put_lock_context(lock);
691 	return err;
692 }
693 
694 
695 static void
696 nfs42_layoutstat_prepare(struct rpc_task *task, void *calldata)
697 {
698 	struct nfs42_layoutstat_data *data = calldata;
699 	struct inode *inode = data->inode;
700 	struct nfs_server *server = NFS_SERVER(inode);
701 	struct pnfs_layout_hdr *lo;
702 
703 	spin_lock(&inode->i_lock);
704 	lo = NFS_I(inode)->layout;
705 	if (!pnfs_layout_is_valid(lo)) {
706 		spin_unlock(&inode->i_lock);
707 		rpc_exit(task, 0);
708 		return;
709 	}
710 	nfs4_stateid_copy(&data->args.stateid, &lo->plh_stateid);
711 	spin_unlock(&inode->i_lock);
712 	nfs4_setup_sequence(server->nfs_client, &data->args.seq_args,
713 			    &data->res.seq_res, task);
714 }
715 
716 static void
717 nfs42_layoutstat_done(struct rpc_task *task, void *calldata)
718 {
719 	struct nfs42_layoutstat_data *data = calldata;
720 	struct inode *inode = data->inode;
721 	struct pnfs_layout_hdr *lo;
722 
723 	if (!nfs4_sequence_done(task, &data->res.seq_res))
724 		return;
725 
726 	switch (task->tk_status) {
727 	case 0:
728 		return;
729 	case -NFS4ERR_BADHANDLE:
730 	case -ESTALE:
731 		pnfs_destroy_layout(NFS_I(inode));
732 		break;
733 	case -NFS4ERR_EXPIRED:
734 	case -NFS4ERR_ADMIN_REVOKED:
735 	case -NFS4ERR_DELEG_REVOKED:
736 	case -NFS4ERR_STALE_STATEID:
737 	case -NFS4ERR_BAD_STATEID:
738 		spin_lock(&inode->i_lock);
739 		lo = NFS_I(inode)->layout;
740 		if (pnfs_layout_is_valid(lo) &&
741 		    nfs4_stateid_match(&data->args.stateid,
742 					     &lo->plh_stateid)) {
743 			LIST_HEAD(head);
744 
745 			/*
746 			 * Mark the bad layout state as invalid, then retry
747 			 * with the current stateid.
748 			 */
749 			pnfs_mark_layout_stateid_invalid(lo, &head);
750 			spin_unlock(&inode->i_lock);
751 			pnfs_free_lseg_list(&head);
752 			nfs_commit_inode(inode, 0);
753 		} else
754 			spin_unlock(&inode->i_lock);
755 		break;
756 	case -NFS4ERR_OLD_STATEID:
757 		spin_lock(&inode->i_lock);
758 		lo = NFS_I(inode)->layout;
759 		if (pnfs_layout_is_valid(lo) &&
760 		    nfs4_stateid_match_other(&data->args.stateid,
761 					&lo->plh_stateid)) {
762 			/* Do we need to delay before resending? */
763 			if (!nfs4_stateid_is_newer(&lo->plh_stateid,
764 						&data->args.stateid))
765 				rpc_delay(task, HZ);
766 			rpc_restart_call_prepare(task);
767 		}
768 		spin_unlock(&inode->i_lock);
769 		break;
770 	case -ENOTSUPP:
771 	case -EOPNOTSUPP:
772 		NFS_SERVER(inode)->caps &= ~NFS_CAP_LAYOUTSTATS;
773 	}
774 
775 	trace_nfs4_layoutstats(inode, &data->args.stateid, task->tk_status);
776 }
777 
778 static void
779 nfs42_layoutstat_release(void *calldata)
780 {
781 	struct nfs42_layoutstat_data *data = calldata;
782 	struct nfs42_layoutstat_devinfo *devinfo = data->args.devinfo;
783 	int i;
784 
785 	for (i = 0; i < data->args.num_dev; i++) {
786 		if (devinfo[i].ld_private.ops && devinfo[i].ld_private.ops->free)
787 			devinfo[i].ld_private.ops->free(&devinfo[i].ld_private);
788 	}
789 
790 	pnfs_put_layout_hdr(NFS_I(data->args.inode)->layout);
791 	smp_mb__before_atomic();
792 	clear_bit(NFS_INO_LAYOUTSTATS, &NFS_I(data->args.inode)->flags);
793 	smp_mb__after_atomic();
794 	nfs_iput_and_deactive(data->inode);
795 	kfree(data->args.devinfo);
796 	kfree(data);
797 }
798 
799 static const struct rpc_call_ops nfs42_layoutstat_ops = {
800 	.rpc_call_prepare = nfs42_layoutstat_prepare,
801 	.rpc_call_done = nfs42_layoutstat_done,
802 	.rpc_release = nfs42_layoutstat_release,
803 };
804 
805 int nfs42_proc_layoutstats_generic(struct nfs_server *server,
806 				   struct nfs42_layoutstat_data *data)
807 {
808 	struct rpc_message msg = {
809 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTSTATS],
810 		.rpc_argp = &data->args,
811 		.rpc_resp = &data->res,
812 	};
813 	struct rpc_task_setup task_setup = {
814 		.rpc_client = server->client,
815 		.rpc_message = &msg,
816 		.callback_ops = &nfs42_layoutstat_ops,
817 		.callback_data = data,
818 		.flags = RPC_TASK_ASYNC,
819 	};
820 	struct rpc_task *task;
821 
822 	data->inode = nfs_igrab_and_active(data->args.inode);
823 	if (!data->inode) {
824 		nfs42_layoutstat_release(data);
825 		return -EAGAIN;
826 	}
827 	nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0, 0);
828 	task = rpc_run_task(&task_setup);
829 	if (IS_ERR(task))
830 		return PTR_ERR(task);
831 	rpc_put_task(task);
832 	return 0;
833 }
834 
835 static struct nfs42_layouterror_data *
836 nfs42_alloc_layouterror_data(struct pnfs_layout_segment *lseg, gfp_t gfp_flags)
837 {
838 	struct nfs42_layouterror_data *data;
839 	struct inode *inode = lseg->pls_layout->plh_inode;
840 
841 	data = kzalloc(sizeof(*data), gfp_flags);
842 	if (data) {
843 		data->args.inode = data->inode = nfs_igrab_and_active(inode);
844 		if (data->inode) {
845 			data->lseg = pnfs_get_lseg(lseg);
846 			if (data->lseg)
847 				return data;
848 			nfs_iput_and_deactive(data->inode);
849 		}
850 		kfree(data);
851 	}
852 	return NULL;
853 }
854 
855 static void
856 nfs42_free_layouterror_data(struct nfs42_layouterror_data *data)
857 {
858 	pnfs_put_lseg(data->lseg);
859 	nfs_iput_and_deactive(data->inode);
860 	kfree(data);
861 }
862 
863 static void
864 nfs42_layouterror_prepare(struct rpc_task *task, void *calldata)
865 {
866 	struct nfs42_layouterror_data *data = calldata;
867 	struct inode *inode = data->inode;
868 	struct nfs_server *server = NFS_SERVER(inode);
869 	struct pnfs_layout_hdr *lo = data->lseg->pls_layout;
870 	unsigned i;
871 
872 	spin_lock(&inode->i_lock);
873 	if (!pnfs_layout_is_valid(lo)) {
874 		spin_unlock(&inode->i_lock);
875 		rpc_exit(task, 0);
876 		return;
877 	}
878 	for (i = 0; i < data->args.num_errors; i++)
879 		nfs4_stateid_copy(&data->args.errors[i].stateid,
880 				&lo->plh_stateid);
881 	spin_unlock(&inode->i_lock);
882 	nfs4_setup_sequence(server->nfs_client, &data->args.seq_args,
883 			    &data->res.seq_res, task);
884 }
885 
886 static void
887 nfs42_layouterror_done(struct rpc_task *task, void *calldata)
888 {
889 	struct nfs42_layouterror_data *data = calldata;
890 	struct inode *inode = data->inode;
891 	struct pnfs_layout_hdr *lo = data->lseg->pls_layout;
892 
893 	if (!nfs4_sequence_done(task, &data->res.seq_res))
894 		return;
895 
896 	switch (task->tk_status) {
897 	case 0:
898 		return;
899 	case -NFS4ERR_BADHANDLE:
900 	case -ESTALE:
901 		pnfs_destroy_layout(NFS_I(inode));
902 		break;
903 	case -NFS4ERR_EXPIRED:
904 	case -NFS4ERR_ADMIN_REVOKED:
905 	case -NFS4ERR_DELEG_REVOKED:
906 	case -NFS4ERR_STALE_STATEID:
907 	case -NFS4ERR_BAD_STATEID:
908 		spin_lock(&inode->i_lock);
909 		if (pnfs_layout_is_valid(lo) &&
910 		    nfs4_stateid_match(&data->args.errors[0].stateid,
911 					     &lo->plh_stateid)) {
912 			LIST_HEAD(head);
913 
914 			/*
915 			 * Mark the bad layout state as invalid, then retry
916 			 * with the current stateid.
917 			 */
918 			pnfs_mark_layout_stateid_invalid(lo, &head);
919 			spin_unlock(&inode->i_lock);
920 			pnfs_free_lseg_list(&head);
921 			nfs_commit_inode(inode, 0);
922 		} else
923 			spin_unlock(&inode->i_lock);
924 		break;
925 	case -NFS4ERR_OLD_STATEID:
926 		spin_lock(&inode->i_lock);
927 		if (pnfs_layout_is_valid(lo) &&
928 		    nfs4_stateid_match_other(&data->args.errors[0].stateid,
929 					&lo->plh_stateid)) {
930 			/* Do we need to delay before resending? */
931 			if (!nfs4_stateid_is_newer(&lo->plh_stateid,
932 						&data->args.errors[0].stateid))
933 				rpc_delay(task, HZ);
934 			rpc_restart_call_prepare(task);
935 		}
936 		spin_unlock(&inode->i_lock);
937 		break;
938 	case -ENOTSUPP:
939 	case -EOPNOTSUPP:
940 		NFS_SERVER(inode)->caps &= ~NFS_CAP_LAYOUTERROR;
941 	}
942 
943 	trace_nfs4_layouterror(inode, &data->args.errors[0].stateid,
944 			       task->tk_status);
945 }
946 
947 static void
948 nfs42_layouterror_release(void *calldata)
949 {
950 	struct nfs42_layouterror_data *data = calldata;
951 
952 	nfs42_free_layouterror_data(data);
953 }
954 
955 static const struct rpc_call_ops nfs42_layouterror_ops = {
956 	.rpc_call_prepare = nfs42_layouterror_prepare,
957 	.rpc_call_done = nfs42_layouterror_done,
958 	.rpc_release = nfs42_layouterror_release,
959 };
960 
961 int nfs42_proc_layouterror(struct pnfs_layout_segment *lseg,
962 		const struct nfs42_layout_error *errors, size_t n)
963 {
964 	struct inode *inode = lseg->pls_layout->plh_inode;
965 	struct nfs42_layouterror_data *data;
966 	struct rpc_task *task;
967 	struct rpc_message msg = {
968 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTERROR],
969 	};
970 	struct rpc_task_setup task_setup = {
971 		.rpc_message = &msg,
972 		.callback_ops = &nfs42_layouterror_ops,
973 		.flags = RPC_TASK_ASYNC,
974 	};
975 	unsigned int i;
976 
977 	if (!nfs_server_capable(inode, NFS_CAP_LAYOUTERROR))
978 		return -EOPNOTSUPP;
979 	if (n > NFS42_LAYOUTERROR_MAX)
980 		return -EINVAL;
981 	data = nfs42_alloc_layouterror_data(lseg, GFP_NOFS);
982 	if (!data)
983 		return -ENOMEM;
984 	for (i = 0; i < n; i++) {
985 		data->args.errors[i] = errors[i];
986 		data->args.num_errors++;
987 		data->res.num_errors++;
988 	}
989 	msg.rpc_argp = &data->args;
990 	msg.rpc_resp = &data->res;
991 	task_setup.callback_data = data;
992 	task_setup.rpc_client = NFS_SERVER(inode)->client;
993 	nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0, 0);
994 	task = rpc_run_task(&task_setup);
995 	if (IS_ERR(task))
996 		return PTR_ERR(task);
997 	rpc_put_task(task);
998 	return 0;
999 }
1000 EXPORT_SYMBOL_GPL(nfs42_proc_layouterror);
1001 
1002 static int _nfs42_proc_clone(struct rpc_message *msg, struct file *src_f,
1003 		struct file *dst_f, struct nfs_lock_context *src_lock,
1004 		struct nfs_lock_context *dst_lock, loff_t src_offset,
1005 		loff_t dst_offset, loff_t count)
1006 {
1007 	struct inode *src_inode = file_inode(src_f);
1008 	struct inode *dst_inode = file_inode(dst_f);
1009 	struct nfs_server *server = NFS_SERVER(dst_inode);
1010 	struct nfs42_clone_args args = {
1011 		.src_fh = NFS_FH(src_inode),
1012 		.dst_fh = NFS_FH(dst_inode),
1013 		.src_offset = src_offset,
1014 		.dst_offset = dst_offset,
1015 		.count = count,
1016 		.dst_bitmask = server->cache_consistency_bitmask,
1017 	};
1018 	struct nfs42_clone_res res = {
1019 		.server	= server,
1020 	};
1021 	int status;
1022 
1023 	msg->rpc_argp = &args;
1024 	msg->rpc_resp = &res;
1025 
1026 	status = nfs4_set_rw_stateid(&args.src_stateid, src_lock->open_context,
1027 			src_lock, FMODE_READ);
1028 	if (status) {
1029 		if (status == -EAGAIN)
1030 			status = -NFS4ERR_BAD_STATEID;
1031 		return status;
1032 	}
1033 	status = nfs4_set_rw_stateid(&args.dst_stateid, dst_lock->open_context,
1034 			dst_lock, FMODE_WRITE);
1035 	if (status) {
1036 		if (status == -EAGAIN)
1037 			status = -NFS4ERR_BAD_STATEID;
1038 		return status;
1039 	}
1040 
1041 	res.dst_fattr = nfs_alloc_fattr();
1042 	if (!res.dst_fattr)
1043 		return -ENOMEM;
1044 
1045 	status = nfs4_call_sync(server->client, server, msg,
1046 				&args.seq_args, &res.seq_res, 0);
1047 	if (status == 0)
1048 		status = nfs_post_op_update_inode(dst_inode, res.dst_fattr);
1049 
1050 	kfree(res.dst_fattr);
1051 	return status;
1052 }
1053 
1054 int nfs42_proc_clone(struct file *src_f, struct file *dst_f,
1055 		     loff_t src_offset, loff_t dst_offset, loff_t count)
1056 {
1057 	struct rpc_message msg = {
1058 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLONE],
1059 	};
1060 	struct inode *inode = file_inode(src_f);
1061 	struct nfs_server *server = NFS_SERVER(file_inode(src_f));
1062 	struct nfs_lock_context *src_lock;
1063 	struct nfs_lock_context *dst_lock;
1064 	struct nfs4_exception src_exception = { };
1065 	struct nfs4_exception dst_exception = { };
1066 	int err, err2;
1067 
1068 	if (!nfs_server_capable(inode, NFS_CAP_CLONE))
1069 		return -EOPNOTSUPP;
1070 
1071 	src_lock = nfs_get_lock_context(nfs_file_open_context(src_f));
1072 	if (IS_ERR(src_lock))
1073 		return PTR_ERR(src_lock);
1074 
1075 	src_exception.inode = file_inode(src_f);
1076 	src_exception.state = src_lock->open_context->state;
1077 
1078 	dst_lock = nfs_get_lock_context(nfs_file_open_context(dst_f));
1079 	if (IS_ERR(dst_lock)) {
1080 		err = PTR_ERR(dst_lock);
1081 		goto out_put_src_lock;
1082 	}
1083 
1084 	dst_exception.inode = file_inode(dst_f);
1085 	dst_exception.state = dst_lock->open_context->state;
1086 
1087 	do {
1088 		err = _nfs42_proc_clone(&msg, src_f, dst_f, src_lock, dst_lock,
1089 					src_offset, dst_offset, count);
1090 		if (err == -ENOTSUPP || err == -EOPNOTSUPP) {
1091 			NFS_SERVER(inode)->caps &= ~NFS_CAP_CLONE;
1092 			err = -EOPNOTSUPP;
1093 			break;
1094 		}
1095 
1096 		err2 = nfs4_handle_exception(server, err, &src_exception);
1097 		err = nfs4_handle_exception(server, err, &dst_exception);
1098 		if (!err)
1099 			err = err2;
1100 	} while (src_exception.retry || dst_exception.retry);
1101 
1102 	nfs_put_lock_context(dst_lock);
1103 out_put_src_lock:
1104 	nfs_put_lock_context(src_lock);
1105 	return err;
1106 }
1107 
1108 #define NFS4XATTR_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE)
1109 
1110 static int _nfs42_proc_removexattr(struct inode *inode, const char *name)
1111 {
1112 	struct nfs_server *server = NFS_SERVER(inode);
1113 	struct nfs42_removexattrargs args = {
1114 		.fh = NFS_FH(inode),
1115 		.xattr_name = name,
1116 	};
1117 	struct nfs42_removexattrres res;
1118 	struct rpc_message msg = {
1119 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVEXATTR],
1120 		.rpc_argp = &args,
1121 		.rpc_resp = &res,
1122 	};
1123 	int ret;
1124 	unsigned long timestamp = jiffies;
1125 
1126 	ret = nfs4_call_sync(server->client, server, &msg, &args.seq_args,
1127 	    &res.seq_res, 1);
1128 	if (!ret)
1129 		nfs4_update_changeattr(inode, &res.cinfo, timestamp, 0);
1130 
1131 	return ret;
1132 }
1133 
1134 static int _nfs42_proc_setxattr(struct inode *inode, const char *name,
1135 				const void *buf, size_t buflen, int flags)
1136 {
1137 	struct nfs_server *server = NFS_SERVER(inode);
1138 	struct page *pages[NFS4XATTR_MAXPAGES];
1139 	struct nfs42_setxattrargs arg = {
1140 		.fh		= NFS_FH(inode),
1141 		.xattr_pages	= pages,
1142 		.xattr_len	= buflen,
1143 		.xattr_name	= name,
1144 		.xattr_flags	= flags,
1145 	};
1146 	struct nfs42_setxattrres res;
1147 	struct rpc_message msg = {
1148 		.rpc_proc	= &nfs4_procedures[NFSPROC4_CLNT_SETXATTR],
1149 		.rpc_argp	= &arg,
1150 		.rpc_resp	= &res,
1151 	};
1152 	int ret, np;
1153 	unsigned long timestamp = jiffies;
1154 
1155 	if (buflen > server->sxasize)
1156 		return -ERANGE;
1157 
1158 	if (buflen > 0) {
1159 		np = nfs4_buf_to_pages_noslab(buf, buflen, arg.xattr_pages);
1160 		if (np < 0)
1161 			return np;
1162 	} else
1163 		np = 0;
1164 
1165 	ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args,
1166 	    &res.seq_res, 1);
1167 
1168 	for (; np > 0; np--)
1169 		put_page(pages[np - 1]);
1170 
1171 	if (!ret)
1172 		nfs4_update_changeattr(inode, &res.cinfo, timestamp, 0);
1173 
1174 	return ret;
1175 }
1176 
1177 static ssize_t _nfs42_proc_getxattr(struct inode *inode, const char *name,
1178 				void *buf, size_t buflen, struct page **pages,
1179 				size_t plen)
1180 {
1181 	struct nfs_server *server = NFS_SERVER(inode);
1182 	struct nfs42_getxattrargs arg = {
1183 		.fh		= NFS_FH(inode),
1184 		.xattr_name	= name,
1185 	};
1186 	struct nfs42_getxattrres res;
1187 	struct rpc_message msg = {
1188 		.rpc_proc	= &nfs4_procedures[NFSPROC4_CLNT_GETXATTR],
1189 		.rpc_argp	= &arg,
1190 		.rpc_resp	= &res,
1191 	};
1192 	ssize_t ret;
1193 
1194 	arg.xattr_len = plen;
1195 	arg.xattr_pages = pages;
1196 
1197 	ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args,
1198 	    &res.seq_res, 0);
1199 	if (ret < 0)
1200 		return ret;
1201 
1202 	/*
1203 	 * Normally, the caching is done one layer up, but for successful
1204 	 * RPCS, always cache the result here, even if the caller was
1205 	 * just querying the length, or if the reply was too big for
1206 	 * the caller. This avoids a second RPC in the case of the
1207 	 * common query-alloc-retrieve cycle for xattrs.
1208 	 *
1209 	 * Note that xattr_len is always capped to XATTR_SIZE_MAX.
1210 	 */
1211 
1212 	nfs4_xattr_cache_add(inode, name, NULL, pages, res.xattr_len);
1213 
1214 	if (buflen) {
1215 		if (res.xattr_len > buflen)
1216 			return -ERANGE;
1217 		_copy_from_pages(buf, pages, 0, res.xattr_len);
1218 	}
1219 
1220 	return res.xattr_len;
1221 }
1222 
1223 static ssize_t _nfs42_proc_listxattrs(struct inode *inode, void *buf,
1224 				 size_t buflen, u64 *cookiep, bool *eofp)
1225 {
1226 	struct nfs_server *server = NFS_SERVER(inode);
1227 	struct page **pages;
1228 	struct nfs42_listxattrsargs arg = {
1229 		.fh		= NFS_FH(inode),
1230 		.cookie		= *cookiep,
1231 	};
1232 	struct nfs42_listxattrsres res = {
1233 		.eof = false,
1234 		.xattr_buf = buf,
1235 		.xattr_len = buflen,
1236 	};
1237 	struct rpc_message msg = {
1238 		.rpc_proc	= &nfs4_procedures[NFSPROC4_CLNT_LISTXATTRS],
1239 		.rpc_argp	= &arg,
1240 		.rpc_resp	= &res,
1241 	};
1242 	u32 xdrlen;
1243 	int ret, np, i;
1244 
1245 
1246 	ret = -ENOMEM;
1247 	res.scratch = alloc_page(GFP_KERNEL);
1248 	if (!res.scratch)
1249 		goto out;
1250 
1251 	xdrlen = nfs42_listxattr_xdrsize(buflen);
1252 	if (xdrlen > server->lxasize)
1253 		xdrlen = server->lxasize;
1254 	np = xdrlen / PAGE_SIZE + 1;
1255 
1256 	pages = kcalloc(np, sizeof(struct page *), GFP_KERNEL);
1257 	if (!pages)
1258 		goto out_free_scratch;
1259 	for (i = 0; i < np; i++) {
1260 		pages[i] = alloc_page(GFP_KERNEL);
1261 		if (!pages[i])
1262 			goto out_free_pages;
1263 	}
1264 
1265 	arg.xattr_pages = pages;
1266 	arg.count = xdrlen;
1267 
1268 	ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args,
1269 	    &res.seq_res, 0);
1270 
1271 	if (ret >= 0) {
1272 		ret = res.copied;
1273 		*cookiep = res.cookie;
1274 		*eofp = res.eof;
1275 	}
1276 
1277 out_free_pages:
1278 	while (--np >= 0) {
1279 		if (pages[np])
1280 			__free_page(pages[np]);
1281 	}
1282 	kfree(pages);
1283 out_free_scratch:
1284 	__free_page(res.scratch);
1285 out:
1286 	return ret;
1287 
1288 }
1289 
1290 ssize_t nfs42_proc_getxattr(struct inode *inode, const char *name,
1291 			      void *buf, size_t buflen)
1292 {
1293 	struct nfs4_exception exception = { };
1294 	ssize_t err, np, i;
1295 	struct page **pages;
1296 
1297 	np = nfs_page_array_len(0, buflen ?: XATTR_SIZE_MAX);
1298 	pages = kmalloc_array(np, sizeof(*pages), GFP_KERNEL);
1299 	if (!pages)
1300 		return -ENOMEM;
1301 
1302 	for (i = 0; i < np; i++) {
1303 		pages[i] = alloc_page(GFP_KERNEL);
1304 		if (!pages[i]) {
1305 			np = i + 1;
1306 			err = -ENOMEM;
1307 			goto out;
1308 		}
1309 	}
1310 
1311 	/*
1312 	 * The GETXATTR op has no length field in the call, and the
1313 	 * xattr data is at the end of the reply.
1314 	 *
1315 	 * There is no downside in using the page-aligned length. It will
1316 	 * allow receiving and caching xattrs that are too large for the
1317 	 * caller but still fit in the page-rounded value.
1318 	 */
1319 	do {
1320 		err = _nfs42_proc_getxattr(inode, name, buf, buflen,
1321 			pages, np * PAGE_SIZE);
1322 		if (err >= 0)
1323 			break;
1324 		err = nfs4_handle_exception(NFS_SERVER(inode), err,
1325 				&exception);
1326 	} while (exception.retry);
1327 
1328 out:
1329 	while (--np >= 0)
1330 		__free_page(pages[np]);
1331 	kfree(pages);
1332 
1333 	return err;
1334 }
1335 
1336 int nfs42_proc_setxattr(struct inode *inode, const char *name,
1337 			      const void *buf, size_t buflen, int flags)
1338 {
1339 	struct nfs4_exception exception = { };
1340 	int err;
1341 
1342 	do {
1343 		err = _nfs42_proc_setxattr(inode, name, buf, buflen, flags);
1344 		if (!err)
1345 			break;
1346 		err = nfs4_handle_exception(NFS_SERVER(inode), err,
1347 				&exception);
1348 	} while (exception.retry);
1349 
1350 	return err;
1351 }
1352 
1353 ssize_t nfs42_proc_listxattrs(struct inode *inode, void *buf,
1354 			      size_t buflen, u64 *cookiep, bool *eofp)
1355 {
1356 	struct nfs4_exception exception = { };
1357 	ssize_t err;
1358 
1359 	do {
1360 		err = _nfs42_proc_listxattrs(inode, buf, buflen,
1361 		    cookiep, eofp);
1362 		if (err >= 0)
1363 			break;
1364 		err = nfs4_handle_exception(NFS_SERVER(inode), err,
1365 				&exception);
1366 	} while (exception.retry);
1367 
1368 	return err;
1369 }
1370 
1371 int nfs42_proc_removexattr(struct inode *inode, const char *name)
1372 {
1373 	struct nfs4_exception exception = { };
1374 	int err;
1375 
1376 	do {
1377 		err = _nfs42_proc_removexattr(inode, name);
1378 		if (!err)
1379 			break;
1380 		err = nfs4_handle_exception(NFS_SERVER(inode), err,
1381 				&exception);
1382 	} while (exception.retry);
1383 
1384 	return err;
1385 }
1386