xref: /openbmc/linux/fs/nfs/callback_proc.c (revision 1d05334d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * linux/fs/nfs/callback_proc.c
4  *
5  * Copyright (C) 2004 Trond Myklebust
6  *
7  * NFSv4 callback procedures
8  */
9 #include <linux/nfs4.h>
10 #include <linux/nfs_fs.h>
11 #include <linux/slab.h>
12 #include <linux/rcupdate.h>
13 #include "nfs4_fs.h"
14 #include "callback.h"
15 #include "delegation.h"
16 #include "internal.h"
17 #include "pnfs.h"
18 #include "nfs4session.h"
19 #include "nfs4trace.h"
20 
21 #define NFSDBG_FACILITY NFSDBG_CALLBACK
22 
23 __be32 nfs4_callback_getattr(void *argp, void *resp,
24 			     struct cb_process_state *cps)
25 {
26 	struct cb_getattrargs *args = argp;
27 	struct cb_getattrres *res = resp;
28 	struct nfs_delegation *delegation;
29 	struct inode *inode;
30 
31 	res->status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
32 	if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */
33 		goto out;
34 
35 	res->bitmap[0] = res->bitmap[1] = 0;
36 	res->status = htonl(NFS4ERR_BADHANDLE);
37 
38 	dprintk_rcu("NFS: GETATTR callback request from %s\n",
39 		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
40 
41 	inode = nfs_delegation_find_inode(cps->clp, &args->fh);
42 	if (IS_ERR(inode)) {
43 		if (inode == ERR_PTR(-EAGAIN))
44 			res->status = htonl(NFS4ERR_DELAY);
45 		trace_nfs4_cb_getattr(cps->clp, &args->fh, NULL,
46 				-ntohl(res->status));
47 		goto out;
48 	}
49 	rcu_read_lock();
50 	delegation = nfs4_get_valid_delegation(inode);
51 	if (delegation == NULL || (delegation->type & FMODE_WRITE) == 0)
52 		goto out_iput;
53 	res->size = i_size_read(inode);
54 	res->change_attr = delegation->change_attr;
55 	if (nfs_have_writebacks(inode))
56 		res->change_attr++;
57 	res->ctime = inode->i_ctime;
58 	res->mtime = inode->i_mtime;
59 	res->bitmap[0] = (FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE) &
60 		args->bitmap[0];
61 	res->bitmap[1] = (FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY) &
62 		args->bitmap[1];
63 	res->status = 0;
64 out_iput:
65 	rcu_read_unlock();
66 	trace_nfs4_cb_getattr(cps->clp, &args->fh, inode, -ntohl(res->status));
67 	nfs_iput_and_deactive(inode);
68 out:
69 	dprintk("%s: exit with status = %d\n", __func__, ntohl(res->status));
70 	return res->status;
71 }
72 
73 __be32 nfs4_callback_recall(void *argp, void *resp,
74 			    struct cb_process_state *cps)
75 {
76 	struct cb_recallargs *args = argp;
77 	struct inode *inode;
78 	__be32 res;
79 
80 	res = htonl(NFS4ERR_OP_NOT_IN_SESSION);
81 	if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */
82 		goto out;
83 
84 	dprintk_rcu("NFS: RECALL callback request from %s\n",
85 		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
86 
87 	res = htonl(NFS4ERR_BADHANDLE);
88 	inode = nfs_delegation_find_inode(cps->clp, &args->fh);
89 	if (IS_ERR(inode)) {
90 		if (inode == ERR_PTR(-EAGAIN))
91 			res = htonl(NFS4ERR_DELAY);
92 		trace_nfs4_cb_recall(cps->clp, &args->fh, NULL,
93 				&args->stateid, -ntohl(res));
94 		goto out;
95 	}
96 	/* Set up a helper thread to actually return the delegation */
97 	switch (nfs_async_inode_return_delegation(inode, &args->stateid)) {
98 	case 0:
99 		res = 0;
100 		break;
101 	case -ENOENT:
102 		res = htonl(NFS4ERR_BAD_STATEID);
103 		break;
104 	default:
105 		res = htonl(NFS4ERR_RESOURCE);
106 	}
107 	trace_nfs4_cb_recall(cps->clp, &args->fh, inode,
108 			&args->stateid, -ntohl(res));
109 	nfs_iput_and_deactive(inode);
110 out:
111 	dprintk("%s: exit with status = %d\n", __func__, ntohl(res));
112 	return res;
113 }
114 
115 #if defined(CONFIG_NFS_V4_1)
116 
117 /*
118  * Lookup a layout inode by stateid
119  *
120  * Note: returns a refcount on the inode and superblock
121  */
122 static struct inode *nfs_layout_find_inode_by_stateid(struct nfs_client *clp,
123 		const nfs4_stateid *stateid)
124 	__must_hold(RCU)
125 {
126 	struct nfs_server *server;
127 	struct inode *inode;
128 	struct pnfs_layout_hdr *lo;
129 
130 	rcu_read_lock();
131 	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
132 		list_for_each_entry_rcu(lo, &server->layouts, plh_layouts) {
133 			if (!pnfs_layout_is_valid(lo))
134 				continue;
135 			if (stateid != NULL &&
136 			    !nfs4_stateid_match_other(stateid, &lo->plh_stateid))
137 				continue;
138 			if (!nfs_sb_active(server->super))
139 				continue;
140 			inode = igrab(lo->plh_inode);
141 			rcu_read_unlock();
142 			if (inode)
143 				return inode;
144 			nfs_sb_deactive(server->super);
145 			return ERR_PTR(-EAGAIN);
146 		}
147 	}
148 	rcu_read_unlock();
149 	return ERR_PTR(-ENOENT);
150 }
151 
152 /*
153  * Lookup a layout inode by filehandle.
154  *
155  * Note: returns a refcount on the inode and superblock
156  *
157  */
158 static struct inode *nfs_layout_find_inode_by_fh(struct nfs_client *clp,
159 		const struct nfs_fh *fh)
160 {
161 	struct nfs_server *server;
162 	struct nfs_inode *nfsi;
163 	struct inode *inode;
164 	struct pnfs_layout_hdr *lo;
165 
166 	rcu_read_lock();
167 	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
168 		list_for_each_entry_rcu(lo, &server->layouts, plh_layouts) {
169 			nfsi = NFS_I(lo->plh_inode);
170 			if (nfs_compare_fh(fh, &nfsi->fh))
171 				continue;
172 			if (nfsi->layout != lo)
173 				continue;
174 			if (!nfs_sb_active(server->super))
175 				continue;
176 			inode = igrab(lo->plh_inode);
177 			rcu_read_unlock();
178 			if (inode)
179 				return inode;
180 			nfs_sb_deactive(server->super);
181 			return ERR_PTR(-EAGAIN);
182 		}
183 	}
184 	rcu_read_unlock();
185 	return ERR_PTR(-ENOENT);
186 }
187 
188 static struct inode *nfs_layout_find_inode(struct nfs_client *clp,
189 		const struct nfs_fh *fh,
190 		const nfs4_stateid *stateid)
191 {
192 	struct inode *inode;
193 
194 	inode = nfs_layout_find_inode_by_stateid(clp, stateid);
195 	if (inode == ERR_PTR(-ENOENT))
196 		inode = nfs_layout_find_inode_by_fh(clp, fh);
197 	return inode;
198 }
199 
200 /*
201  * Enforce RFC5661 section 12.5.5.2.1. (Layout Recall and Return Sequencing)
202  */
203 static u32 pnfs_check_callback_stateid(struct pnfs_layout_hdr *lo,
204 					const nfs4_stateid *new)
205 {
206 	u32 oldseq, newseq;
207 
208 	/* Is the stateid not initialised? */
209 	if (!pnfs_layout_is_valid(lo))
210 		return NFS4ERR_NOMATCHING_LAYOUT;
211 
212 	/* Mismatched stateid? */
213 	if (!nfs4_stateid_match_other(&lo->plh_stateid, new))
214 		return NFS4ERR_BAD_STATEID;
215 
216 	newseq = be32_to_cpu(new->seqid);
217 	/* Are we already in a layout recall situation? */
218 	if (test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags) &&
219 	    lo->plh_return_seq != 0) {
220 		if (newseq < lo->plh_return_seq)
221 			return NFS4ERR_OLD_STATEID;
222 		if (newseq > lo->plh_return_seq)
223 			return NFS4ERR_DELAY;
224 		goto out;
225 	}
226 
227 	/* Check that the stateid matches what we think it should be. */
228 	oldseq = be32_to_cpu(lo->plh_stateid.seqid);
229 	if (newseq > oldseq + 1)
230 		return NFS4ERR_DELAY;
231 	/* Crazy server! */
232 	if (newseq <= oldseq)
233 		return NFS4ERR_OLD_STATEID;
234 out:
235 	return NFS_OK;
236 }
237 
238 static u32 initiate_file_draining(struct nfs_client *clp,
239 				  struct cb_layoutrecallargs *args)
240 {
241 	struct inode *ino;
242 	struct pnfs_layout_hdr *lo;
243 	u32 rv = NFS4ERR_NOMATCHING_LAYOUT;
244 	LIST_HEAD(free_me_list);
245 
246 	ino = nfs_layout_find_inode(clp, &args->cbl_fh, &args->cbl_stateid);
247 	if (IS_ERR(ino)) {
248 		if (ino == ERR_PTR(-EAGAIN))
249 			rv = NFS4ERR_DELAY;
250 		goto out_noput;
251 	}
252 
253 	pnfs_layoutcommit_inode(ino, false);
254 
255 
256 	spin_lock(&ino->i_lock);
257 	lo = NFS_I(ino)->layout;
258 	if (!lo) {
259 		spin_unlock(&ino->i_lock);
260 		goto out;
261 	}
262 	pnfs_get_layout_hdr(lo);
263 	rv = pnfs_check_callback_stateid(lo, &args->cbl_stateid);
264 	if (rv != NFS_OK)
265 		goto unlock;
266 
267 	/*
268 	 * Enforce RFC5661 Section 12.5.5.2.1.5 (Bulk Recall and Return)
269 	 */
270 	if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
271 		rv = NFS4ERR_DELAY;
272 		goto unlock;
273 	}
274 
275 	pnfs_set_layout_stateid(lo, &args->cbl_stateid, NULL, true);
276 	switch (pnfs_mark_matching_lsegs_return(lo, &free_me_list,
277 				&args->cbl_range,
278 				be32_to_cpu(args->cbl_stateid.seqid))) {
279 	case 0:
280 	case -EBUSY:
281 		/* There are layout segments that need to be returned */
282 		rv = NFS4_OK;
283 		break;
284 	case -ENOENT:
285 		/* Embrace your forgetfulness! */
286 		rv = NFS4ERR_NOMATCHING_LAYOUT;
287 
288 		if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) {
289 			NFS_SERVER(ino)->pnfs_curr_ld->return_range(lo,
290 				&args->cbl_range);
291 		}
292 	}
293 unlock:
294 	spin_unlock(&ino->i_lock);
295 	pnfs_free_lseg_list(&free_me_list);
296 	/* Free all lsegs that are attached to commit buckets */
297 	nfs_commit_inode(ino, 0);
298 	pnfs_put_layout_hdr(lo);
299 out:
300 	nfs_iput_and_deactive(ino);
301 out_noput:
302 	trace_nfs4_cb_layoutrecall_file(clp, &args->cbl_fh, ino,
303 			&args->cbl_stateid, -rv);
304 	return rv;
305 }
306 
307 static u32 initiate_bulk_draining(struct nfs_client *clp,
308 				  struct cb_layoutrecallargs *args)
309 {
310 	int stat;
311 
312 	if (args->cbl_recall_type == RETURN_FSID)
313 		stat = pnfs_destroy_layouts_byfsid(clp, &args->cbl_fsid, true);
314 	else
315 		stat = pnfs_destroy_layouts_byclid(clp, true);
316 	if (stat != 0)
317 		return NFS4ERR_DELAY;
318 	return NFS4ERR_NOMATCHING_LAYOUT;
319 }
320 
321 static u32 do_callback_layoutrecall(struct nfs_client *clp,
322 				    struct cb_layoutrecallargs *args)
323 {
324 	if (args->cbl_recall_type == RETURN_FILE)
325 		return initiate_file_draining(clp, args);
326 	return initiate_bulk_draining(clp, args);
327 }
328 
329 __be32 nfs4_callback_layoutrecall(void *argp, void *resp,
330 				  struct cb_process_state *cps)
331 {
332 	struct cb_layoutrecallargs *args = argp;
333 	u32 res = NFS4ERR_OP_NOT_IN_SESSION;
334 
335 	if (cps->clp)
336 		res = do_callback_layoutrecall(cps->clp, args);
337 	return cpu_to_be32(res);
338 }
339 
340 static void pnfs_recall_all_layouts(struct nfs_client *clp)
341 {
342 	struct cb_layoutrecallargs args;
343 
344 	/* Pretend we got a CB_LAYOUTRECALL(ALL) */
345 	memset(&args, 0, sizeof(args));
346 	args.cbl_recall_type = RETURN_ALL;
347 	/* FIXME we ignore errors, what should we do? */
348 	do_callback_layoutrecall(clp, &args);
349 }
350 
351 __be32 nfs4_callback_devicenotify(void *argp, void *resp,
352 				  struct cb_process_state *cps)
353 {
354 	struct cb_devicenotifyargs *args = argp;
355 	int i;
356 	__be32 res = 0;
357 	struct nfs_client *clp = cps->clp;
358 	struct nfs_server *server = NULL;
359 
360 	if (!clp) {
361 		res = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION);
362 		goto out;
363 	}
364 
365 	for (i = 0; i < args->ndevs; i++) {
366 		struct cb_devicenotifyitem *dev = &args->devs[i];
367 
368 		if (!server ||
369 		    server->pnfs_curr_ld->id != dev->cbd_layout_type) {
370 			rcu_read_lock();
371 			list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
372 				if (server->pnfs_curr_ld &&
373 				    server->pnfs_curr_ld->id == dev->cbd_layout_type) {
374 					rcu_read_unlock();
375 					goto found;
376 				}
377 			rcu_read_unlock();
378 			continue;
379 		}
380 
381 	found:
382 		nfs4_delete_deviceid(server->pnfs_curr_ld, clp, &dev->cbd_dev_id);
383 	}
384 
385 out:
386 	kfree(args->devs);
387 	return res;
388 }
389 
390 /*
391  * Validate the sequenceID sent by the server.
392  * Return success if the sequenceID is one more than what we last saw on
393  * this slot, accounting for wraparound.  Increments the slot's sequence.
394  *
395  * We don't yet implement a duplicate request cache, instead we set the
396  * back channel ca_maxresponsesize_cached to zero. This is OK for now
397  * since we only currently implement idempotent callbacks anyway.
398  *
399  * We have a single slot backchannel at this time, so we don't bother
400  * checking the used_slots bit array on the table.  The lower layer guarantees
401  * a single outstanding callback request at a time.
402  */
403 static __be32
404 validate_seqid(const struct nfs4_slot_table *tbl, const struct nfs4_slot *slot,
405 		const struct cb_sequenceargs * args)
406 {
407 	__be32 ret;
408 
409 	ret = cpu_to_be32(NFS4ERR_BADSLOT);
410 	if (args->csa_slotid > tbl->server_highest_slotid)
411 		goto out_err;
412 
413 	/* Replay */
414 	if (args->csa_sequenceid == slot->seq_nr) {
415 		ret = cpu_to_be32(NFS4ERR_DELAY);
416 		if (nfs4_test_locked_slot(tbl, slot->slot_nr))
417 			goto out_err;
418 
419 		/* Signal process_op to set this error on next op */
420 		ret = cpu_to_be32(NFS4ERR_RETRY_UNCACHED_REP);
421 		if (args->csa_cachethis == 0)
422 			goto out_err;
423 
424 		/* Liar! We never allowed you to set csa_cachethis != 0 */
425 		ret = cpu_to_be32(NFS4ERR_SEQ_FALSE_RETRY);
426 		goto out_err;
427 	}
428 
429 	/* Note: wraparound relies on seq_nr being of type u32 */
430 	/* Misordered request */
431 	ret = cpu_to_be32(NFS4ERR_SEQ_MISORDERED);
432 	if (args->csa_sequenceid != slot->seq_nr + 1)
433 		goto out_err;
434 
435 	return cpu_to_be32(NFS4_OK);
436 
437 out_err:
438 	trace_nfs4_cb_seqid_err(args, ret);
439 	return ret;
440 }
441 
442 /*
443  * For each referring call triple, check the session's slot table for
444  * a match.  If the slot is in use and the sequence numbers match, the
445  * client is still waiting for a response to the original request.
446  */
447 static int referring_call_exists(struct nfs_client *clp,
448 				  uint32_t nrclists,
449 				  struct referring_call_list *rclists,
450 				  spinlock_t *lock)
451 	__releases(lock)
452 	__acquires(lock)
453 {
454 	int status = 0;
455 	int i, j;
456 	struct nfs4_session *session;
457 	struct nfs4_slot_table *tbl;
458 	struct referring_call_list *rclist;
459 	struct referring_call *ref;
460 
461 	/*
462 	 * XXX When client trunking is implemented, this becomes
463 	 * a session lookup from within the loop
464 	 */
465 	session = clp->cl_session;
466 	tbl = &session->fc_slot_table;
467 
468 	for (i = 0; i < nrclists; i++) {
469 		rclist = &rclists[i];
470 		if (memcmp(session->sess_id.data,
471 			   rclist->rcl_sessionid.data,
472 			   NFS4_MAX_SESSIONID_LEN) != 0)
473 			continue;
474 
475 		for (j = 0; j < rclist->rcl_nrefcalls; j++) {
476 			ref = &rclist->rcl_refcalls[j];
477 			spin_unlock(lock);
478 			status = nfs4_slot_wait_on_seqid(tbl, ref->rc_slotid,
479 					ref->rc_sequenceid, HZ >> 1) < 0;
480 			spin_lock(lock);
481 			if (status)
482 				goto out;
483 		}
484 	}
485 
486 out:
487 	return status;
488 }
489 
490 __be32 nfs4_callback_sequence(void *argp, void *resp,
491 			      struct cb_process_state *cps)
492 {
493 	struct cb_sequenceargs *args = argp;
494 	struct cb_sequenceres *res = resp;
495 	struct nfs4_slot_table *tbl;
496 	struct nfs4_slot *slot;
497 	struct nfs_client *clp;
498 	int i;
499 	__be32 status = htonl(NFS4ERR_BADSESSION);
500 
501 	clp = nfs4_find_client_sessionid(cps->net, args->csa_addr,
502 					 &args->csa_sessionid, cps->minorversion);
503 	if (clp == NULL)
504 		goto out;
505 
506 	if (!(clp->cl_session->flags & SESSION4_BACK_CHAN))
507 		goto out;
508 
509 	tbl = &clp->cl_session->bc_slot_table;
510 
511 	/* Set up res before grabbing the spinlock */
512 	memcpy(&res->csr_sessionid, &args->csa_sessionid,
513 	       sizeof(res->csr_sessionid));
514 	res->csr_sequenceid = args->csa_sequenceid;
515 	res->csr_slotid = args->csa_slotid;
516 
517 	spin_lock(&tbl->slot_tbl_lock);
518 	/* state manager is resetting the session */
519 	if (test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state)) {
520 		status = htonl(NFS4ERR_DELAY);
521 		/* Return NFS4ERR_BADSESSION if we're draining the session
522 		 * in order to reset it.
523 		 */
524 		if (test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state))
525 			status = htonl(NFS4ERR_BADSESSION);
526 		goto out_unlock;
527 	}
528 
529 	status = htonl(NFS4ERR_BADSLOT);
530 	slot = nfs4_lookup_slot(tbl, args->csa_slotid);
531 	if (IS_ERR(slot))
532 		goto out_unlock;
533 
534 	res->csr_highestslotid = tbl->server_highest_slotid;
535 	res->csr_target_highestslotid = tbl->target_highest_slotid;
536 
537 	status = validate_seqid(tbl, slot, args);
538 	if (status)
539 		goto out_unlock;
540 	if (!nfs4_try_to_lock_slot(tbl, slot)) {
541 		status = htonl(NFS4ERR_DELAY);
542 		goto out_unlock;
543 	}
544 	cps->slot = slot;
545 
546 	/* The ca_maxresponsesize_cached is 0 with no DRC */
547 	if (args->csa_cachethis != 0) {
548 		status = htonl(NFS4ERR_REP_TOO_BIG_TO_CACHE);
549 		goto out_unlock;
550 	}
551 
552 	/*
553 	 * Check for pending referring calls.  If a match is found, a
554 	 * related callback was received before the response to the original
555 	 * call.
556 	 */
557 	if (referring_call_exists(clp, args->csa_nrclists, args->csa_rclists,
558 				&tbl->slot_tbl_lock) < 0) {
559 		status = htonl(NFS4ERR_DELAY);
560 		goto out_unlock;
561 	}
562 
563 	/*
564 	 * RFC5661 20.9.3
565 	 * If CB_SEQUENCE returns an error, then the state of the slot
566 	 * (sequence ID, cached reply) MUST NOT change.
567 	 */
568 	slot->seq_nr = args->csa_sequenceid;
569 out_unlock:
570 	spin_unlock(&tbl->slot_tbl_lock);
571 
572 out:
573 	cps->clp = clp; /* put in nfs4_callback_compound */
574 	for (i = 0; i < args->csa_nrclists; i++)
575 		kfree(args->csa_rclists[i].rcl_refcalls);
576 	kfree(args->csa_rclists);
577 
578 	if (status == htonl(NFS4ERR_RETRY_UNCACHED_REP)) {
579 		cps->drc_status = status;
580 		status = 0;
581 	} else
582 		res->csr_status = status;
583 
584 	trace_nfs4_cb_sequence(args, res, status);
585 	return status;
586 }
587 
588 static bool
589 validate_bitmap_values(unsigned int mask)
590 {
591 	return (mask & ~RCA4_TYPE_MASK_ALL) == 0;
592 }
593 
594 __be32 nfs4_callback_recallany(void *argp, void *resp,
595 			       struct cb_process_state *cps)
596 {
597 	struct cb_recallanyargs *args = argp;
598 	__be32 status;
599 	fmode_t flags = 0;
600 	bool schedule_manager = false;
601 
602 	status = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION);
603 	if (!cps->clp) /* set in cb_sequence */
604 		goto out;
605 
606 	dprintk_rcu("NFS: RECALL_ANY callback request from %s\n",
607 		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
608 
609 	status = cpu_to_be32(NFS4ERR_INVAL);
610 	if (!validate_bitmap_values(args->craa_type_mask))
611 		goto out;
612 
613 	status = cpu_to_be32(NFS4_OK);
614 	if (args->craa_type_mask & BIT(RCA4_TYPE_MASK_RDATA_DLG))
615 		flags = FMODE_READ;
616 	if (args->craa_type_mask & BIT(RCA4_TYPE_MASK_WDATA_DLG))
617 		flags |= FMODE_WRITE;
618 	if (flags)
619 		nfs_expire_unused_delegation_types(cps->clp, flags);
620 
621 	if (args->craa_type_mask & BIT(RCA4_TYPE_MASK_FILE_LAYOUT))
622 		pnfs_recall_all_layouts(cps->clp);
623 
624 	if (args->craa_type_mask & BIT(PNFS_FF_RCA4_TYPE_MASK_READ)) {
625 		set_bit(NFS4CLNT_RECALL_ANY_LAYOUT_READ, &cps->clp->cl_state);
626 		schedule_manager = true;
627 	}
628 	if (args->craa_type_mask & BIT(PNFS_FF_RCA4_TYPE_MASK_RW)) {
629 		set_bit(NFS4CLNT_RECALL_ANY_LAYOUT_RW, &cps->clp->cl_state);
630 		schedule_manager = true;
631 	}
632 	if (schedule_manager)
633 		nfs4_schedule_state_manager(cps->clp);
634 
635 out:
636 	dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
637 	return status;
638 }
639 
640 /* Reduce the fore channel's max_slots to the target value */
641 __be32 nfs4_callback_recallslot(void *argp, void *resp,
642 				struct cb_process_state *cps)
643 {
644 	struct cb_recallslotargs *args = argp;
645 	struct nfs4_slot_table *fc_tbl;
646 	__be32 status;
647 
648 	status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
649 	if (!cps->clp) /* set in cb_sequence */
650 		goto out;
651 
652 	dprintk_rcu("NFS: CB_RECALL_SLOT request from %s target highest slotid %u\n",
653 		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR),
654 		args->crsa_target_highest_slotid);
655 
656 	fc_tbl = &cps->clp->cl_session->fc_slot_table;
657 
658 	status = htonl(NFS4_OK);
659 
660 	nfs41_set_target_slotid(fc_tbl, args->crsa_target_highest_slotid);
661 	nfs41_notify_server(cps->clp);
662 out:
663 	dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
664 	return status;
665 }
666 
667 __be32 nfs4_callback_notify_lock(void *argp, void *resp,
668 				 struct cb_process_state *cps)
669 {
670 	struct cb_notify_lock_args *args = argp;
671 
672 	if (!cps->clp) /* set in cb_sequence */
673 		return htonl(NFS4ERR_OP_NOT_IN_SESSION);
674 
675 	dprintk_rcu("NFS: CB_NOTIFY_LOCK request from %s\n",
676 		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
677 
678 	/* Don't wake anybody if the string looked bogus */
679 	if (args->cbnl_valid)
680 		__wake_up(&cps->clp->cl_lock_waitq, TASK_NORMAL, 0, args);
681 
682 	return htonl(NFS4_OK);
683 }
684 #endif /* CONFIG_NFS_V4_1 */
685 #ifdef CONFIG_NFS_V4_2
686 static void nfs4_copy_cb_args(struct nfs4_copy_state *cp_state,
687 				struct cb_offloadargs *args)
688 {
689 	cp_state->count = args->wr_count;
690 	cp_state->error = args->error;
691 	if (!args->error) {
692 		cp_state->verf.committed = args->wr_writeverf.committed;
693 		memcpy(&cp_state->verf.verifier.data[0],
694 			&args->wr_writeverf.verifier.data[0],
695 			NFS4_VERIFIER_SIZE);
696 	}
697 }
698 
699 __be32 nfs4_callback_offload(void *data, void *dummy,
700 			     struct cb_process_state *cps)
701 {
702 	struct cb_offloadargs *args = data;
703 	struct nfs_server *server;
704 	struct nfs4_copy_state *copy, *tmp_copy;
705 	bool found = false;
706 
707 	copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_NOFS);
708 	if (!copy)
709 		return htonl(NFS4ERR_SERVERFAULT);
710 
711 	spin_lock(&cps->clp->cl_lock);
712 	rcu_read_lock();
713 	list_for_each_entry_rcu(server, &cps->clp->cl_superblocks,
714 				client_link) {
715 		list_for_each_entry(tmp_copy, &server->ss_copies, copies) {
716 			if (memcmp(args->coa_stateid.other,
717 					tmp_copy->stateid.other,
718 					sizeof(args->coa_stateid.other)))
719 				continue;
720 			nfs4_copy_cb_args(tmp_copy, args);
721 			complete(&tmp_copy->completion);
722 			found = true;
723 			goto out;
724 		}
725 	}
726 out:
727 	rcu_read_unlock();
728 	if (!found) {
729 		memcpy(&copy->stateid, &args->coa_stateid, NFS4_STATEID_SIZE);
730 		nfs4_copy_cb_args(copy, args);
731 		list_add_tail(&copy->copies, &cps->clp->pending_cb_stateids);
732 	} else
733 		kfree(copy);
734 	spin_unlock(&cps->clp->cl_lock);
735 
736 	return 0;
737 }
738 #endif /* CONFIG_NFS_V4_2 */
739