xref: /openbmc/linux/net/ceph/osd_client.c (revision 68198dca)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <linux/ceph/ceph_debug.h>
4 
5 #include <linux/module.h>
6 #include <linux/err.h>
7 #include <linux/highmem.h>
8 #include <linux/mm.h>
9 #include <linux/pagemap.h>
10 #include <linux/slab.h>
11 #include <linux/uaccess.h>
12 #ifdef CONFIG_BLOCK
13 #include <linux/bio.h>
14 #endif
15 
16 #include <linux/ceph/ceph_features.h>
17 #include <linux/ceph/libceph.h>
18 #include <linux/ceph/osd_client.h>
19 #include <linux/ceph/messenger.h>
20 #include <linux/ceph/decode.h>
21 #include <linux/ceph/auth.h>
22 #include <linux/ceph/pagelist.h>
23 
24 #define OSD_OPREPLY_FRONT_LEN	512
25 
26 static struct kmem_cache	*ceph_osd_request_cache;
27 
28 static const struct ceph_connection_operations osd_con_ops;
29 
30 /*
31  * Implement client access to distributed object storage cluster.
32  *
33  * All data objects are stored within a cluster/cloud of OSDs, or
34  * "object storage devices."  (Note that Ceph OSDs have _nothing_ to
35  * do with the T10 OSD extensions to SCSI.)  Ceph OSDs are simply
36  * remote daemons serving up and coordinating consistent and safe
37  * access to storage.
38  *
39  * Cluster membership and the mapping of data objects onto storage devices
40  * are described by the osd map.
41  *
42  * We keep track of pending OSD requests (read, write), resubmit
43  * requests to different OSDs when the cluster topology/data layout
44  * change, or retry the affected requests when the communications
45  * channel with an OSD is reset.
46  */
47 
48 static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req);
49 static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req);
50 static void link_linger(struct ceph_osd *osd,
51 			struct ceph_osd_linger_request *lreq);
52 static void unlink_linger(struct ceph_osd *osd,
53 			  struct ceph_osd_linger_request *lreq);
54 static void clear_backoffs(struct ceph_osd *osd);
55 
56 #if 1
57 static inline bool rwsem_is_wrlocked(struct rw_semaphore *sem)
58 {
59 	bool wrlocked = true;
60 
61 	if (unlikely(down_read_trylock(sem))) {
62 		wrlocked = false;
63 		up_read(sem);
64 	}
65 
66 	return wrlocked;
67 }
68 static inline void verify_osdc_locked(struct ceph_osd_client *osdc)
69 {
70 	WARN_ON(!rwsem_is_locked(&osdc->lock));
71 }
72 static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc)
73 {
74 	WARN_ON(!rwsem_is_wrlocked(&osdc->lock));
75 }
76 static inline void verify_osd_locked(struct ceph_osd *osd)
77 {
78 	struct ceph_osd_client *osdc = osd->o_osdc;
79 
80 	WARN_ON(!(mutex_is_locked(&osd->lock) &&
81 		  rwsem_is_locked(&osdc->lock)) &&
82 		!rwsem_is_wrlocked(&osdc->lock));
83 }
84 static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq)
85 {
86 	WARN_ON(!mutex_is_locked(&lreq->lock));
87 }
88 #else
89 static inline void verify_osdc_locked(struct ceph_osd_client *osdc) { }
90 static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc) { }
91 static inline void verify_osd_locked(struct ceph_osd *osd) { }
92 static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq) { }
93 #endif
94 
95 /*
96  * calculate the mapping of a file extent onto an object, and fill out the
97  * request accordingly.  shorten extent as necessary if it crosses an
98  * object boundary.
99  *
100  * fill osd op in request message.
101  */
102 static int calc_layout(struct ceph_file_layout *layout, u64 off, u64 *plen,
103 			u64 *objnum, u64 *objoff, u64 *objlen)
104 {
105 	u64 orig_len = *plen;
106 	int r;
107 
108 	/* object extent? */
109 	r = ceph_calc_file_object_mapping(layout, off, orig_len, objnum,
110 					  objoff, objlen);
111 	if (r < 0)
112 		return r;
113 	if (*objlen < orig_len) {
114 		*plen = *objlen;
115 		dout(" skipping last %llu, final file extent %llu~%llu\n",
116 		     orig_len - *plen, off, *plen);
117 	}
118 
119 	dout("calc_layout objnum=%llx %llu~%llu\n", *objnum, *objoff, *objlen);
120 
121 	return 0;
122 }
123 
124 static void ceph_osd_data_init(struct ceph_osd_data *osd_data)
125 {
126 	memset(osd_data, 0, sizeof (*osd_data));
127 	osd_data->type = CEPH_OSD_DATA_TYPE_NONE;
128 }
129 
130 static void ceph_osd_data_pages_init(struct ceph_osd_data *osd_data,
131 			struct page **pages, u64 length, u32 alignment,
132 			bool pages_from_pool, bool own_pages)
133 {
134 	osd_data->type = CEPH_OSD_DATA_TYPE_PAGES;
135 	osd_data->pages = pages;
136 	osd_data->length = length;
137 	osd_data->alignment = alignment;
138 	osd_data->pages_from_pool = pages_from_pool;
139 	osd_data->own_pages = own_pages;
140 }
141 
142 static void ceph_osd_data_pagelist_init(struct ceph_osd_data *osd_data,
143 			struct ceph_pagelist *pagelist)
144 {
145 	osd_data->type = CEPH_OSD_DATA_TYPE_PAGELIST;
146 	osd_data->pagelist = pagelist;
147 }
148 
149 #ifdef CONFIG_BLOCK
150 static void ceph_osd_data_bio_init(struct ceph_osd_data *osd_data,
151 			struct bio *bio, size_t bio_length)
152 {
153 	osd_data->type = CEPH_OSD_DATA_TYPE_BIO;
154 	osd_data->bio = bio;
155 	osd_data->bio_length = bio_length;
156 }
157 #endif /* CONFIG_BLOCK */
158 
159 #define osd_req_op_data(oreq, whch, typ, fld)				\
160 ({									\
161 	struct ceph_osd_request *__oreq = (oreq);			\
162 	unsigned int __whch = (whch);					\
163 	BUG_ON(__whch >= __oreq->r_num_ops);				\
164 	&__oreq->r_ops[__whch].typ.fld;					\
165 })
166 
167 static struct ceph_osd_data *
168 osd_req_op_raw_data_in(struct ceph_osd_request *osd_req, unsigned int which)
169 {
170 	BUG_ON(which >= osd_req->r_num_ops);
171 
172 	return &osd_req->r_ops[which].raw_data_in;
173 }
174 
175 struct ceph_osd_data *
176 osd_req_op_extent_osd_data(struct ceph_osd_request *osd_req,
177 			unsigned int which)
178 {
179 	return osd_req_op_data(osd_req, which, extent, osd_data);
180 }
181 EXPORT_SYMBOL(osd_req_op_extent_osd_data);
182 
183 void osd_req_op_raw_data_in_pages(struct ceph_osd_request *osd_req,
184 			unsigned int which, struct page **pages,
185 			u64 length, u32 alignment,
186 			bool pages_from_pool, bool own_pages)
187 {
188 	struct ceph_osd_data *osd_data;
189 
190 	osd_data = osd_req_op_raw_data_in(osd_req, which);
191 	ceph_osd_data_pages_init(osd_data, pages, length, alignment,
192 				pages_from_pool, own_pages);
193 }
194 EXPORT_SYMBOL(osd_req_op_raw_data_in_pages);
195 
196 void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *osd_req,
197 			unsigned int which, struct page **pages,
198 			u64 length, u32 alignment,
199 			bool pages_from_pool, bool own_pages)
200 {
201 	struct ceph_osd_data *osd_data;
202 
203 	osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
204 	ceph_osd_data_pages_init(osd_data, pages, length, alignment,
205 				pages_from_pool, own_pages);
206 }
207 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pages);
208 
209 void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request *osd_req,
210 			unsigned int which, struct ceph_pagelist *pagelist)
211 {
212 	struct ceph_osd_data *osd_data;
213 
214 	osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
215 	ceph_osd_data_pagelist_init(osd_data, pagelist);
216 }
217 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pagelist);
218 
219 #ifdef CONFIG_BLOCK
220 void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *osd_req,
221 			unsigned int which, struct bio *bio, size_t bio_length)
222 {
223 	struct ceph_osd_data *osd_data;
224 
225 	osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
226 	ceph_osd_data_bio_init(osd_data, bio, bio_length);
227 }
228 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bio);
229 #endif /* CONFIG_BLOCK */
230 
231 static void osd_req_op_cls_request_info_pagelist(
232 			struct ceph_osd_request *osd_req,
233 			unsigned int which, struct ceph_pagelist *pagelist)
234 {
235 	struct ceph_osd_data *osd_data;
236 
237 	osd_data = osd_req_op_data(osd_req, which, cls, request_info);
238 	ceph_osd_data_pagelist_init(osd_data, pagelist);
239 }
240 
241 void osd_req_op_cls_request_data_pagelist(
242 			struct ceph_osd_request *osd_req,
243 			unsigned int which, struct ceph_pagelist *pagelist)
244 {
245 	struct ceph_osd_data *osd_data;
246 
247 	osd_data = osd_req_op_data(osd_req, which, cls, request_data);
248 	ceph_osd_data_pagelist_init(osd_data, pagelist);
249 	osd_req->r_ops[which].cls.indata_len += pagelist->length;
250 	osd_req->r_ops[which].indata_len += pagelist->length;
251 }
252 EXPORT_SYMBOL(osd_req_op_cls_request_data_pagelist);
253 
254 void osd_req_op_cls_request_data_pages(struct ceph_osd_request *osd_req,
255 			unsigned int which, struct page **pages, u64 length,
256 			u32 alignment, bool pages_from_pool, bool own_pages)
257 {
258 	struct ceph_osd_data *osd_data;
259 
260 	osd_data = osd_req_op_data(osd_req, which, cls, request_data);
261 	ceph_osd_data_pages_init(osd_data, pages, length, alignment,
262 				pages_from_pool, own_pages);
263 	osd_req->r_ops[which].cls.indata_len += length;
264 	osd_req->r_ops[which].indata_len += length;
265 }
266 EXPORT_SYMBOL(osd_req_op_cls_request_data_pages);
267 
268 void osd_req_op_cls_response_data_pages(struct ceph_osd_request *osd_req,
269 			unsigned int which, struct page **pages, u64 length,
270 			u32 alignment, bool pages_from_pool, bool own_pages)
271 {
272 	struct ceph_osd_data *osd_data;
273 
274 	osd_data = osd_req_op_data(osd_req, which, cls, response_data);
275 	ceph_osd_data_pages_init(osd_data, pages, length, alignment,
276 				pages_from_pool, own_pages);
277 }
278 EXPORT_SYMBOL(osd_req_op_cls_response_data_pages);
279 
280 static u64 ceph_osd_data_length(struct ceph_osd_data *osd_data)
281 {
282 	switch (osd_data->type) {
283 	case CEPH_OSD_DATA_TYPE_NONE:
284 		return 0;
285 	case CEPH_OSD_DATA_TYPE_PAGES:
286 		return osd_data->length;
287 	case CEPH_OSD_DATA_TYPE_PAGELIST:
288 		return (u64)osd_data->pagelist->length;
289 #ifdef CONFIG_BLOCK
290 	case CEPH_OSD_DATA_TYPE_BIO:
291 		return (u64)osd_data->bio_length;
292 #endif /* CONFIG_BLOCK */
293 	default:
294 		WARN(true, "unrecognized data type %d\n", (int)osd_data->type);
295 		return 0;
296 	}
297 }
298 
299 static void ceph_osd_data_release(struct ceph_osd_data *osd_data)
300 {
301 	if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES && osd_data->own_pages) {
302 		int num_pages;
303 
304 		num_pages = calc_pages_for((u64)osd_data->alignment,
305 						(u64)osd_data->length);
306 		ceph_release_page_vector(osd_data->pages, num_pages);
307 	}
308 	ceph_osd_data_init(osd_data);
309 }
310 
311 static void osd_req_op_data_release(struct ceph_osd_request *osd_req,
312 			unsigned int which)
313 {
314 	struct ceph_osd_req_op *op;
315 
316 	BUG_ON(which >= osd_req->r_num_ops);
317 	op = &osd_req->r_ops[which];
318 
319 	switch (op->op) {
320 	case CEPH_OSD_OP_READ:
321 	case CEPH_OSD_OP_WRITE:
322 	case CEPH_OSD_OP_WRITEFULL:
323 		ceph_osd_data_release(&op->extent.osd_data);
324 		break;
325 	case CEPH_OSD_OP_CALL:
326 		ceph_osd_data_release(&op->cls.request_info);
327 		ceph_osd_data_release(&op->cls.request_data);
328 		ceph_osd_data_release(&op->cls.response_data);
329 		break;
330 	case CEPH_OSD_OP_SETXATTR:
331 	case CEPH_OSD_OP_CMPXATTR:
332 		ceph_osd_data_release(&op->xattr.osd_data);
333 		break;
334 	case CEPH_OSD_OP_STAT:
335 		ceph_osd_data_release(&op->raw_data_in);
336 		break;
337 	case CEPH_OSD_OP_NOTIFY_ACK:
338 		ceph_osd_data_release(&op->notify_ack.request_data);
339 		break;
340 	case CEPH_OSD_OP_NOTIFY:
341 		ceph_osd_data_release(&op->notify.request_data);
342 		ceph_osd_data_release(&op->notify.response_data);
343 		break;
344 	case CEPH_OSD_OP_LIST_WATCHERS:
345 		ceph_osd_data_release(&op->list_watchers.response_data);
346 		break;
347 	default:
348 		break;
349 	}
350 }
351 
352 /*
353  * Assumes @t is zero-initialized.
354  */
355 static void target_init(struct ceph_osd_request_target *t)
356 {
357 	ceph_oid_init(&t->base_oid);
358 	ceph_oloc_init(&t->base_oloc);
359 	ceph_oid_init(&t->target_oid);
360 	ceph_oloc_init(&t->target_oloc);
361 
362 	ceph_osds_init(&t->acting);
363 	ceph_osds_init(&t->up);
364 	t->size = -1;
365 	t->min_size = -1;
366 
367 	t->osd = CEPH_HOMELESS_OSD;
368 }
369 
370 static void target_copy(struct ceph_osd_request_target *dest,
371 			const struct ceph_osd_request_target *src)
372 {
373 	ceph_oid_copy(&dest->base_oid, &src->base_oid);
374 	ceph_oloc_copy(&dest->base_oloc, &src->base_oloc);
375 	ceph_oid_copy(&dest->target_oid, &src->target_oid);
376 	ceph_oloc_copy(&dest->target_oloc, &src->target_oloc);
377 
378 	dest->pgid = src->pgid; /* struct */
379 	dest->spgid = src->spgid; /* struct */
380 	dest->pg_num = src->pg_num;
381 	dest->pg_num_mask = src->pg_num_mask;
382 	ceph_osds_copy(&dest->acting, &src->acting);
383 	ceph_osds_copy(&dest->up, &src->up);
384 	dest->size = src->size;
385 	dest->min_size = src->min_size;
386 	dest->sort_bitwise = src->sort_bitwise;
387 
388 	dest->flags = src->flags;
389 	dest->paused = src->paused;
390 
391 	dest->epoch = src->epoch;
392 	dest->last_force_resend = src->last_force_resend;
393 
394 	dest->osd = src->osd;
395 }
396 
397 static void target_destroy(struct ceph_osd_request_target *t)
398 {
399 	ceph_oid_destroy(&t->base_oid);
400 	ceph_oloc_destroy(&t->base_oloc);
401 	ceph_oid_destroy(&t->target_oid);
402 	ceph_oloc_destroy(&t->target_oloc);
403 }
404 
405 /*
406  * requests
407  */
408 static void request_release_checks(struct ceph_osd_request *req)
409 {
410 	WARN_ON(!RB_EMPTY_NODE(&req->r_node));
411 	WARN_ON(!RB_EMPTY_NODE(&req->r_mc_node));
412 	WARN_ON(!list_empty(&req->r_unsafe_item));
413 	WARN_ON(req->r_osd);
414 }
415 
416 static void ceph_osdc_release_request(struct kref *kref)
417 {
418 	struct ceph_osd_request *req = container_of(kref,
419 					    struct ceph_osd_request, r_kref);
420 	unsigned int which;
421 
422 	dout("%s %p (r_request %p r_reply %p)\n", __func__, req,
423 	     req->r_request, req->r_reply);
424 	request_release_checks(req);
425 
426 	if (req->r_request)
427 		ceph_msg_put(req->r_request);
428 	if (req->r_reply)
429 		ceph_msg_put(req->r_reply);
430 
431 	for (which = 0; which < req->r_num_ops; which++)
432 		osd_req_op_data_release(req, which);
433 
434 	target_destroy(&req->r_t);
435 	ceph_put_snap_context(req->r_snapc);
436 
437 	if (req->r_mempool)
438 		mempool_free(req, req->r_osdc->req_mempool);
439 	else if (req->r_num_ops <= CEPH_OSD_SLAB_OPS)
440 		kmem_cache_free(ceph_osd_request_cache, req);
441 	else
442 		kfree(req);
443 }
444 
445 void ceph_osdc_get_request(struct ceph_osd_request *req)
446 {
447 	dout("%s %p (was %d)\n", __func__, req,
448 	     kref_read(&req->r_kref));
449 	kref_get(&req->r_kref);
450 }
451 EXPORT_SYMBOL(ceph_osdc_get_request);
452 
453 void ceph_osdc_put_request(struct ceph_osd_request *req)
454 {
455 	if (req) {
456 		dout("%s %p (was %d)\n", __func__, req,
457 		     kref_read(&req->r_kref));
458 		kref_put(&req->r_kref, ceph_osdc_release_request);
459 	}
460 }
461 EXPORT_SYMBOL(ceph_osdc_put_request);
462 
463 static void request_init(struct ceph_osd_request *req)
464 {
465 	/* req only, each op is zeroed in _osd_req_op_init() */
466 	memset(req, 0, sizeof(*req));
467 
468 	kref_init(&req->r_kref);
469 	init_completion(&req->r_completion);
470 	RB_CLEAR_NODE(&req->r_node);
471 	RB_CLEAR_NODE(&req->r_mc_node);
472 	INIT_LIST_HEAD(&req->r_unsafe_item);
473 
474 	target_init(&req->r_t);
475 }
476 
477 /*
478  * This is ugly, but it allows us to reuse linger registration and ping
479  * requests, keeping the structure of the code around send_linger{_ping}()
480  * reasonable.  Setting up a min_nr=2 mempool for each linger request
481  * and dealing with copying ops (this blasts req only, watch op remains
482  * intact) isn't any better.
483  */
484 static void request_reinit(struct ceph_osd_request *req)
485 {
486 	struct ceph_osd_client *osdc = req->r_osdc;
487 	bool mempool = req->r_mempool;
488 	unsigned int num_ops = req->r_num_ops;
489 	u64 snapid = req->r_snapid;
490 	struct ceph_snap_context *snapc = req->r_snapc;
491 	bool linger = req->r_linger;
492 	struct ceph_msg *request_msg = req->r_request;
493 	struct ceph_msg *reply_msg = req->r_reply;
494 
495 	dout("%s req %p\n", __func__, req);
496 	WARN_ON(kref_read(&req->r_kref) != 1);
497 	request_release_checks(req);
498 
499 	WARN_ON(kref_read(&request_msg->kref) != 1);
500 	WARN_ON(kref_read(&reply_msg->kref) != 1);
501 	target_destroy(&req->r_t);
502 
503 	request_init(req);
504 	req->r_osdc = osdc;
505 	req->r_mempool = mempool;
506 	req->r_num_ops = num_ops;
507 	req->r_snapid = snapid;
508 	req->r_snapc = snapc;
509 	req->r_linger = linger;
510 	req->r_request = request_msg;
511 	req->r_reply = reply_msg;
512 }
513 
514 struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
515 					       struct ceph_snap_context *snapc,
516 					       unsigned int num_ops,
517 					       bool use_mempool,
518 					       gfp_t gfp_flags)
519 {
520 	struct ceph_osd_request *req;
521 
522 	if (use_mempool) {
523 		BUG_ON(num_ops > CEPH_OSD_SLAB_OPS);
524 		req = mempool_alloc(osdc->req_mempool, gfp_flags);
525 	} else if (num_ops <= CEPH_OSD_SLAB_OPS) {
526 		req = kmem_cache_alloc(ceph_osd_request_cache, gfp_flags);
527 	} else {
528 		BUG_ON(num_ops > CEPH_OSD_MAX_OPS);
529 		req = kmalloc(sizeof(*req) + num_ops * sizeof(req->r_ops[0]),
530 			      gfp_flags);
531 	}
532 	if (unlikely(!req))
533 		return NULL;
534 
535 	request_init(req);
536 	req->r_osdc = osdc;
537 	req->r_mempool = use_mempool;
538 	req->r_num_ops = num_ops;
539 	req->r_snapid = CEPH_NOSNAP;
540 	req->r_snapc = ceph_get_snap_context(snapc);
541 
542 	dout("%s req %p\n", __func__, req);
543 	return req;
544 }
545 EXPORT_SYMBOL(ceph_osdc_alloc_request);
546 
547 static int ceph_oloc_encoding_size(const struct ceph_object_locator *oloc)
548 {
549 	return 8 + 4 + 4 + 4 + (oloc->pool_ns ? oloc->pool_ns->len : 0);
550 }
551 
552 int ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp)
553 {
554 	struct ceph_osd_client *osdc = req->r_osdc;
555 	struct ceph_msg *msg;
556 	int msg_size;
557 
558 	WARN_ON(ceph_oid_empty(&req->r_base_oid));
559 	WARN_ON(ceph_oloc_empty(&req->r_base_oloc));
560 
561 	/* create request message */
562 	msg_size = CEPH_ENCODING_START_BLK_LEN +
563 			CEPH_PGID_ENCODING_LEN + 1; /* spgid */
564 	msg_size += 4 + 4 + 4; /* hash, osdmap_epoch, flags */
565 	msg_size += CEPH_ENCODING_START_BLK_LEN +
566 			sizeof(struct ceph_osd_reqid); /* reqid */
567 	msg_size += sizeof(struct ceph_blkin_trace_info); /* trace */
568 	msg_size += 4 + sizeof(struct ceph_timespec); /* client_inc, mtime */
569 	msg_size += CEPH_ENCODING_START_BLK_LEN +
570 			ceph_oloc_encoding_size(&req->r_base_oloc); /* oloc */
571 	msg_size += 4 + req->r_base_oid.name_len; /* oid */
572 	msg_size += 2 + req->r_num_ops * sizeof(struct ceph_osd_op);
573 	msg_size += 8; /* snapid */
574 	msg_size += 8; /* snap_seq */
575 	msg_size += 4 + 8 * (req->r_snapc ? req->r_snapc->num_snaps : 0);
576 	msg_size += 4 + 8; /* retry_attempt, features */
577 
578 	if (req->r_mempool)
579 		msg = ceph_msgpool_get(&osdc->msgpool_op, 0);
580 	else
581 		msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, gfp, true);
582 	if (!msg)
583 		return -ENOMEM;
584 
585 	memset(msg->front.iov_base, 0, msg->front.iov_len);
586 	req->r_request = msg;
587 
588 	/* create reply message */
589 	msg_size = OSD_OPREPLY_FRONT_LEN;
590 	msg_size += req->r_base_oid.name_len;
591 	msg_size += req->r_num_ops * sizeof(struct ceph_osd_op);
592 
593 	if (req->r_mempool)
594 		msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0);
595 	else
596 		msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, msg_size, gfp, true);
597 	if (!msg)
598 		return -ENOMEM;
599 
600 	req->r_reply = msg;
601 
602 	return 0;
603 }
604 EXPORT_SYMBOL(ceph_osdc_alloc_messages);
605 
606 static bool osd_req_opcode_valid(u16 opcode)
607 {
608 	switch (opcode) {
609 #define GENERATE_CASE(op, opcode, str)	case CEPH_OSD_OP_##op: return true;
610 __CEPH_FORALL_OSD_OPS(GENERATE_CASE)
611 #undef GENERATE_CASE
612 	default:
613 		return false;
614 	}
615 }
616 
617 /*
618  * This is an osd op init function for opcodes that have no data or
619  * other information associated with them.  It also serves as a
620  * common init routine for all the other init functions, below.
621  */
622 static struct ceph_osd_req_op *
623 _osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which,
624 		 u16 opcode, u32 flags)
625 {
626 	struct ceph_osd_req_op *op;
627 
628 	BUG_ON(which >= osd_req->r_num_ops);
629 	BUG_ON(!osd_req_opcode_valid(opcode));
630 
631 	op = &osd_req->r_ops[which];
632 	memset(op, 0, sizeof (*op));
633 	op->op = opcode;
634 	op->flags = flags;
635 
636 	return op;
637 }
638 
639 void osd_req_op_init(struct ceph_osd_request *osd_req,
640 		     unsigned int which, u16 opcode, u32 flags)
641 {
642 	(void)_osd_req_op_init(osd_req, which, opcode, flags);
643 }
644 EXPORT_SYMBOL(osd_req_op_init);
645 
646 void osd_req_op_extent_init(struct ceph_osd_request *osd_req,
647 				unsigned int which, u16 opcode,
648 				u64 offset, u64 length,
649 				u64 truncate_size, u32 truncate_seq)
650 {
651 	struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
652 						      opcode, 0);
653 	size_t payload_len = 0;
654 
655 	BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
656 	       opcode != CEPH_OSD_OP_WRITEFULL && opcode != CEPH_OSD_OP_ZERO &&
657 	       opcode != CEPH_OSD_OP_TRUNCATE);
658 
659 	op->extent.offset = offset;
660 	op->extent.length = length;
661 	op->extent.truncate_size = truncate_size;
662 	op->extent.truncate_seq = truncate_seq;
663 	if (opcode == CEPH_OSD_OP_WRITE || opcode == CEPH_OSD_OP_WRITEFULL)
664 		payload_len += length;
665 
666 	op->indata_len = payload_len;
667 }
668 EXPORT_SYMBOL(osd_req_op_extent_init);
669 
670 void osd_req_op_extent_update(struct ceph_osd_request *osd_req,
671 				unsigned int which, u64 length)
672 {
673 	struct ceph_osd_req_op *op;
674 	u64 previous;
675 
676 	BUG_ON(which >= osd_req->r_num_ops);
677 	op = &osd_req->r_ops[which];
678 	previous = op->extent.length;
679 
680 	if (length == previous)
681 		return;		/* Nothing to do */
682 	BUG_ON(length > previous);
683 
684 	op->extent.length = length;
685 	if (op->op == CEPH_OSD_OP_WRITE || op->op == CEPH_OSD_OP_WRITEFULL)
686 		op->indata_len -= previous - length;
687 }
688 EXPORT_SYMBOL(osd_req_op_extent_update);
689 
690 void osd_req_op_extent_dup_last(struct ceph_osd_request *osd_req,
691 				unsigned int which, u64 offset_inc)
692 {
693 	struct ceph_osd_req_op *op, *prev_op;
694 
695 	BUG_ON(which + 1 >= osd_req->r_num_ops);
696 
697 	prev_op = &osd_req->r_ops[which];
698 	op = _osd_req_op_init(osd_req, which + 1, prev_op->op, prev_op->flags);
699 	/* dup previous one */
700 	op->indata_len = prev_op->indata_len;
701 	op->outdata_len = prev_op->outdata_len;
702 	op->extent = prev_op->extent;
703 	/* adjust offset */
704 	op->extent.offset += offset_inc;
705 	op->extent.length -= offset_inc;
706 
707 	if (op->op == CEPH_OSD_OP_WRITE || op->op == CEPH_OSD_OP_WRITEFULL)
708 		op->indata_len -= offset_inc;
709 }
710 EXPORT_SYMBOL(osd_req_op_extent_dup_last);
711 
712 void osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which,
713 			u16 opcode, const char *class, const char *method)
714 {
715 	struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
716 						      opcode, 0);
717 	struct ceph_pagelist *pagelist;
718 	size_t payload_len = 0;
719 	size_t size;
720 
721 	BUG_ON(opcode != CEPH_OSD_OP_CALL);
722 
723 	pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
724 	BUG_ON(!pagelist);
725 	ceph_pagelist_init(pagelist);
726 
727 	op->cls.class_name = class;
728 	size = strlen(class);
729 	BUG_ON(size > (size_t) U8_MAX);
730 	op->cls.class_len = size;
731 	ceph_pagelist_append(pagelist, class, size);
732 	payload_len += size;
733 
734 	op->cls.method_name = method;
735 	size = strlen(method);
736 	BUG_ON(size > (size_t) U8_MAX);
737 	op->cls.method_len = size;
738 	ceph_pagelist_append(pagelist, method, size);
739 	payload_len += size;
740 
741 	osd_req_op_cls_request_info_pagelist(osd_req, which, pagelist);
742 
743 	op->indata_len = payload_len;
744 }
745 EXPORT_SYMBOL(osd_req_op_cls_init);
746 
747 int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which,
748 			  u16 opcode, const char *name, const void *value,
749 			  size_t size, u8 cmp_op, u8 cmp_mode)
750 {
751 	struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
752 						      opcode, 0);
753 	struct ceph_pagelist *pagelist;
754 	size_t payload_len;
755 
756 	BUG_ON(opcode != CEPH_OSD_OP_SETXATTR && opcode != CEPH_OSD_OP_CMPXATTR);
757 
758 	pagelist = kmalloc(sizeof(*pagelist), GFP_NOFS);
759 	if (!pagelist)
760 		return -ENOMEM;
761 
762 	ceph_pagelist_init(pagelist);
763 
764 	payload_len = strlen(name);
765 	op->xattr.name_len = payload_len;
766 	ceph_pagelist_append(pagelist, name, payload_len);
767 
768 	op->xattr.value_len = size;
769 	ceph_pagelist_append(pagelist, value, size);
770 	payload_len += size;
771 
772 	op->xattr.cmp_op = cmp_op;
773 	op->xattr.cmp_mode = cmp_mode;
774 
775 	ceph_osd_data_pagelist_init(&op->xattr.osd_data, pagelist);
776 	op->indata_len = payload_len;
777 	return 0;
778 }
779 EXPORT_SYMBOL(osd_req_op_xattr_init);
780 
781 /*
782  * @watch_opcode: CEPH_OSD_WATCH_OP_*
783  */
784 static void osd_req_op_watch_init(struct ceph_osd_request *req, int which,
785 				  u64 cookie, u8 watch_opcode)
786 {
787 	struct ceph_osd_req_op *op;
788 
789 	op = _osd_req_op_init(req, which, CEPH_OSD_OP_WATCH, 0);
790 	op->watch.cookie = cookie;
791 	op->watch.op = watch_opcode;
792 	op->watch.gen = 0;
793 }
794 
795 void osd_req_op_alloc_hint_init(struct ceph_osd_request *osd_req,
796 				unsigned int which,
797 				u64 expected_object_size,
798 				u64 expected_write_size)
799 {
800 	struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
801 						      CEPH_OSD_OP_SETALLOCHINT,
802 						      0);
803 
804 	op->alloc_hint.expected_object_size = expected_object_size;
805 	op->alloc_hint.expected_write_size = expected_write_size;
806 
807 	/*
808 	 * CEPH_OSD_OP_SETALLOCHINT op is advisory and therefore deemed
809 	 * not worth a feature bit.  Set FAILOK per-op flag to make
810 	 * sure older osds don't trip over an unsupported opcode.
811 	 */
812 	op->flags |= CEPH_OSD_OP_FLAG_FAILOK;
813 }
814 EXPORT_SYMBOL(osd_req_op_alloc_hint_init);
815 
816 static void ceph_osdc_msg_data_add(struct ceph_msg *msg,
817 				struct ceph_osd_data *osd_data)
818 {
819 	u64 length = ceph_osd_data_length(osd_data);
820 
821 	if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) {
822 		BUG_ON(length > (u64) SIZE_MAX);
823 		if (length)
824 			ceph_msg_data_add_pages(msg, osd_data->pages,
825 					length, osd_data->alignment);
826 	} else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) {
827 		BUG_ON(!length);
828 		ceph_msg_data_add_pagelist(msg, osd_data->pagelist);
829 #ifdef CONFIG_BLOCK
830 	} else if (osd_data->type == CEPH_OSD_DATA_TYPE_BIO) {
831 		ceph_msg_data_add_bio(msg, osd_data->bio, length);
832 #endif
833 	} else {
834 		BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_NONE);
835 	}
836 }
837 
838 static u32 osd_req_encode_op(struct ceph_osd_op *dst,
839 			     const struct ceph_osd_req_op *src)
840 {
841 	if (WARN_ON(!osd_req_opcode_valid(src->op))) {
842 		pr_err("unrecognized osd opcode %d\n", src->op);
843 
844 		return 0;
845 	}
846 
847 	switch (src->op) {
848 	case CEPH_OSD_OP_STAT:
849 		break;
850 	case CEPH_OSD_OP_READ:
851 	case CEPH_OSD_OP_WRITE:
852 	case CEPH_OSD_OP_WRITEFULL:
853 	case CEPH_OSD_OP_ZERO:
854 	case CEPH_OSD_OP_TRUNCATE:
855 		dst->extent.offset = cpu_to_le64(src->extent.offset);
856 		dst->extent.length = cpu_to_le64(src->extent.length);
857 		dst->extent.truncate_size =
858 			cpu_to_le64(src->extent.truncate_size);
859 		dst->extent.truncate_seq =
860 			cpu_to_le32(src->extent.truncate_seq);
861 		break;
862 	case CEPH_OSD_OP_CALL:
863 		dst->cls.class_len = src->cls.class_len;
864 		dst->cls.method_len = src->cls.method_len;
865 		dst->cls.indata_len = cpu_to_le32(src->cls.indata_len);
866 		break;
867 	case CEPH_OSD_OP_WATCH:
868 		dst->watch.cookie = cpu_to_le64(src->watch.cookie);
869 		dst->watch.ver = cpu_to_le64(0);
870 		dst->watch.op = src->watch.op;
871 		dst->watch.gen = cpu_to_le32(src->watch.gen);
872 		break;
873 	case CEPH_OSD_OP_NOTIFY_ACK:
874 		break;
875 	case CEPH_OSD_OP_NOTIFY:
876 		dst->notify.cookie = cpu_to_le64(src->notify.cookie);
877 		break;
878 	case CEPH_OSD_OP_LIST_WATCHERS:
879 		break;
880 	case CEPH_OSD_OP_SETALLOCHINT:
881 		dst->alloc_hint.expected_object_size =
882 		    cpu_to_le64(src->alloc_hint.expected_object_size);
883 		dst->alloc_hint.expected_write_size =
884 		    cpu_to_le64(src->alloc_hint.expected_write_size);
885 		break;
886 	case CEPH_OSD_OP_SETXATTR:
887 	case CEPH_OSD_OP_CMPXATTR:
888 		dst->xattr.name_len = cpu_to_le32(src->xattr.name_len);
889 		dst->xattr.value_len = cpu_to_le32(src->xattr.value_len);
890 		dst->xattr.cmp_op = src->xattr.cmp_op;
891 		dst->xattr.cmp_mode = src->xattr.cmp_mode;
892 		break;
893 	case CEPH_OSD_OP_CREATE:
894 	case CEPH_OSD_OP_DELETE:
895 		break;
896 	default:
897 		pr_err("unsupported osd opcode %s\n",
898 			ceph_osd_op_name(src->op));
899 		WARN_ON(1);
900 
901 		return 0;
902 	}
903 
904 	dst->op = cpu_to_le16(src->op);
905 	dst->flags = cpu_to_le32(src->flags);
906 	dst->payload_len = cpu_to_le32(src->indata_len);
907 
908 	return src->indata_len;
909 }
910 
911 /*
912  * build new request AND message, calculate layout, and adjust file
913  * extent as needed.
914  *
915  * if the file was recently truncated, we include information about its
916  * old and new size so that the object can be updated appropriately.  (we
917  * avoid synchronously deleting truncated objects because it's slow.)
918  */
919 struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
920 					       struct ceph_file_layout *layout,
921 					       struct ceph_vino vino,
922 					       u64 off, u64 *plen,
923 					       unsigned int which, int num_ops,
924 					       int opcode, int flags,
925 					       struct ceph_snap_context *snapc,
926 					       u32 truncate_seq,
927 					       u64 truncate_size,
928 					       bool use_mempool)
929 {
930 	struct ceph_osd_request *req;
931 	u64 objnum = 0;
932 	u64 objoff = 0;
933 	u64 objlen = 0;
934 	int r;
935 
936 	BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
937 	       opcode != CEPH_OSD_OP_ZERO && opcode != CEPH_OSD_OP_TRUNCATE &&
938 	       opcode != CEPH_OSD_OP_CREATE && opcode != CEPH_OSD_OP_DELETE);
939 
940 	req = ceph_osdc_alloc_request(osdc, snapc, num_ops, use_mempool,
941 					GFP_NOFS);
942 	if (!req) {
943 		r = -ENOMEM;
944 		goto fail;
945 	}
946 
947 	/* calculate max write size */
948 	r = calc_layout(layout, off, plen, &objnum, &objoff, &objlen);
949 	if (r)
950 		goto fail;
951 
952 	if (opcode == CEPH_OSD_OP_CREATE || opcode == CEPH_OSD_OP_DELETE) {
953 		osd_req_op_init(req, which, opcode, 0);
954 	} else {
955 		u32 object_size = layout->object_size;
956 		u32 object_base = off - objoff;
957 		if (!(truncate_seq == 1 && truncate_size == -1ULL)) {
958 			if (truncate_size <= object_base) {
959 				truncate_size = 0;
960 			} else {
961 				truncate_size -= object_base;
962 				if (truncate_size > object_size)
963 					truncate_size = object_size;
964 			}
965 		}
966 		osd_req_op_extent_init(req, which, opcode, objoff, objlen,
967 				       truncate_size, truncate_seq);
968 	}
969 
970 	req->r_abort_on_full = true;
971 	req->r_flags = flags;
972 	req->r_base_oloc.pool = layout->pool_id;
973 	req->r_base_oloc.pool_ns = ceph_try_get_string(layout->pool_ns);
974 	ceph_oid_printf(&req->r_base_oid, "%llx.%08llx", vino.ino, objnum);
975 
976 	req->r_snapid = vino.snap;
977 	if (flags & CEPH_OSD_FLAG_WRITE)
978 		req->r_data_offset = off;
979 
980 	r = ceph_osdc_alloc_messages(req, GFP_NOFS);
981 	if (r)
982 		goto fail;
983 
984 	return req;
985 
986 fail:
987 	ceph_osdc_put_request(req);
988 	return ERR_PTR(r);
989 }
990 EXPORT_SYMBOL(ceph_osdc_new_request);
991 
992 /*
993  * We keep osd requests in an rbtree, sorted by ->r_tid.
994  */
995 DEFINE_RB_FUNCS(request, struct ceph_osd_request, r_tid, r_node)
996 DEFINE_RB_FUNCS(request_mc, struct ceph_osd_request, r_tid, r_mc_node)
997 
998 static bool osd_homeless(struct ceph_osd *osd)
999 {
1000 	return osd->o_osd == CEPH_HOMELESS_OSD;
1001 }
1002 
1003 static bool osd_registered(struct ceph_osd *osd)
1004 {
1005 	verify_osdc_locked(osd->o_osdc);
1006 
1007 	return !RB_EMPTY_NODE(&osd->o_node);
1008 }
1009 
1010 /*
1011  * Assumes @osd is zero-initialized.
1012  */
1013 static void osd_init(struct ceph_osd *osd)
1014 {
1015 	refcount_set(&osd->o_ref, 1);
1016 	RB_CLEAR_NODE(&osd->o_node);
1017 	osd->o_requests = RB_ROOT;
1018 	osd->o_linger_requests = RB_ROOT;
1019 	osd->o_backoff_mappings = RB_ROOT;
1020 	osd->o_backoffs_by_id = RB_ROOT;
1021 	INIT_LIST_HEAD(&osd->o_osd_lru);
1022 	INIT_LIST_HEAD(&osd->o_keepalive_item);
1023 	osd->o_incarnation = 1;
1024 	mutex_init(&osd->lock);
1025 }
1026 
1027 static void osd_cleanup(struct ceph_osd *osd)
1028 {
1029 	WARN_ON(!RB_EMPTY_NODE(&osd->o_node));
1030 	WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests));
1031 	WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests));
1032 	WARN_ON(!RB_EMPTY_ROOT(&osd->o_backoff_mappings));
1033 	WARN_ON(!RB_EMPTY_ROOT(&osd->o_backoffs_by_id));
1034 	WARN_ON(!list_empty(&osd->o_osd_lru));
1035 	WARN_ON(!list_empty(&osd->o_keepalive_item));
1036 
1037 	if (osd->o_auth.authorizer) {
1038 		WARN_ON(osd_homeless(osd));
1039 		ceph_auth_destroy_authorizer(osd->o_auth.authorizer);
1040 	}
1041 }
1042 
1043 /*
1044  * Track open sessions with osds.
1045  */
1046 static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum)
1047 {
1048 	struct ceph_osd *osd;
1049 
1050 	WARN_ON(onum == CEPH_HOMELESS_OSD);
1051 
1052 	osd = kzalloc(sizeof(*osd), GFP_NOIO | __GFP_NOFAIL);
1053 	osd_init(osd);
1054 	osd->o_osdc = osdc;
1055 	osd->o_osd = onum;
1056 
1057 	ceph_con_init(&osd->o_con, osd, &osd_con_ops, &osdc->client->msgr);
1058 
1059 	return osd;
1060 }
1061 
1062 static struct ceph_osd *get_osd(struct ceph_osd *osd)
1063 {
1064 	if (refcount_inc_not_zero(&osd->o_ref)) {
1065 		dout("get_osd %p %d -> %d\n", osd, refcount_read(&osd->o_ref)-1,
1066 		     refcount_read(&osd->o_ref));
1067 		return osd;
1068 	} else {
1069 		dout("get_osd %p FAIL\n", osd);
1070 		return NULL;
1071 	}
1072 }
1073 
1074 static void put_osd(struct ceph_osd *osd)
1075 {
1076 	dout("put_osd %p %d -> %d\n", osd, refcount_read(&osd->o_ref),
1077 	     refcount_read(&osd->o_ref) - 1);
1078 	if (refcount_dec_and_test(&osd->o_ref)) {
1079 		osd_cleanup(osd);
1080 		kfree(osd);
1081 	}
1082 }
1083 
1084 DEFINE_RB_FUNCS(osd, struct ceph_osd, o_osd, o_node)
1085 
1086 static void __move_osd_to_lru(struct ceph_osd *osd)
1087 {
1088 	struct ceph_osd_client *osdc = osd->o_osdc;
1089 
1090 	dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1091 	BUG_ON(!list_empty(&osd->o_osd_lru));
1092 
1093 	spin_lock(&osdc->osd_lru_lock);
1094 	list_add_tail(&osd->o_osd_lru, &osdc->osd_lru);
1095 	spin_unlock(&osdc->osd_lru_lock);
1096 
1097 	osd->lru_ttl = jiffies + osdc->client->options->osd_idle_ttl;
1098 }
1099 
1100 static void maybe_move_osd_to_lru(struct ceph_osd *osd)
1101 {
1102 	if (RB_EMPTY_ROOT(&osd->o_requests) &&
1103 	    RB_EMPTY_ROOT(&osd->o_linger_requests))
1104 		__move_osd_to_lru(osd);
1105 }
1106 
1107 static void __remove_osd_from_lru(struct ceph_osd *osd)
1108 {
1109 	struct ceph_osd_client *osdc = osd->o_osdc;
1110 
1111 	dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1112 
1113 	spin_lock(&osdc->osd_lru_lock);
1114 	if (!list_empty(&osd->o_osd_lru))
1115 		list_del_init(&osd->o_osd_lru);
1116 	spin_unlock(&osdc->osd_lru_lock);
1117 }
1118 
1119 /*
1120  * Close the connection and assign any leftover requests to the
1121  * homeless session.
1122  */
1123 static void close_osd(struct ceph_osd *osd)
1124 {
1125 	struct ceph_osd_client *osdc = osd->o_osdc;
1126 	struct rb_node *n;
1127 
1128 	verify_osdc_wrlocked(osdc);
1129 	dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1130 
1131 	ceph_con_close(&osd->o_con);
1132 
1133 	for (n = rb_first(&osd->o_requests); n; ) {
1134 		struct ceph_osd_request *req =
1135 		    rb_entry(n, struct ceph_osd_request, r_node);
1136 
1137 		n = rb_next(n); /* unlink_request() */
1138 
1139 		dout(" reassigning req %p tid %llu\n", req, req->r_tid);
1140 		unlink_request(osd, req);
1141 		link_request(&osdc->homeless_osd, req);
1142 	}
1143 	for (n = rb_first(&osd->o_linger_requests); n; ) {
1144 		struct ceph_osd_linger_request *lreq =
1145 		    rb_entry(n, struct ceph_osd_linger_request, node);
1146 
1147 		n = rb_next(n); /* unlink_linger() */
1148 
1149 		dout(" reassigning lreq %p linger_id %llu\n", lreq,
1150 		     lreq->linger_id);
1151 		unlink_linger(osd, lreq);
1152 		link_linger(&osdc->homeless_osd, lreq);
1153 	}
1154 	clear_backoffs(osd);
1155 
1156 	__remove_osd_from_lru(osd);
1157 	erase_osd(&osdc->osds, osd);
1158 	put_osd(osd);
1159 }
1160 
1161 /*
1162  * reset osd connect
1163  */
1164 static int reopen_osd(struct ceph_osd *osd)
1165 {
1166 	struct ceph_entity_addr *peer_addr;
1167 
1168 	dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1169 
1170 	if (RB_EMPTY_ROOT(&osd->o_requests) &&
1171 	    RB_EMPTY_ROOT(&osd->o_linger_requests)) {
1172 		close_osd(osd);
1173 		return -ENODEV;
1174 	}
1175 
1176 	peer_addr = &osd->o_osdc->osdmap->osd_addr[osd->o_osd];
1177 	if (!memcmp(peer_addr, &osd->o_con.peer_addr, sizeof (*peer_addr)) &&
1178 			!ceph_con_opened(&osd->o_con)) {
1179 		struct rb_node *n;
1180 
1181 		dout("osd addr hasn't changed and connection never opened, "
1182 		     "letting msgr retry\n");
1183 		/* touch each r_stamp for handle_timeout()'s benfit */
1184 		for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) {
1185 			struct ceph_osd_request *req =
1186 			    rb_entry(n, struct ceph_osd_request, r_node);
1187 			req->r_stamp = jiffies;
1188 		}
1189 
1190 		return -EAGAIN;
1191 	}
1192 
1193 	ceph_con_close(&osd->o_con);
1194 	ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd, peer_addr);
1195 	osd->o_incarnation++;
1196 
1197 	return 0;
1198 }
1199 
1200 static struct ceph_osd *lookup_create_osd(struct ceph_osd_client *osdc, int o,
1201 					  bool wrlocked)
1202 {
1203 	struct ceph_osd *osd;
1204 
1205 	if (wrlocked)
1206 		verify_osdc_wrlocked(osdc);
1207 	else
1208 		verify_osdc_locked(osdc);
1209 
1210 	if (o != CEPH_HOMELESS_OSD)
1211 		osd = lookup_osd(&osdc->osds, o);
1212 	else
1213 		osd = &osdc->homeless_osd;
1214 	if (!osd) {
1215 		if (!wrlocked)
1216 			return ERR_PTR(-EAGAIN);
1217 
1218 		osd = create_osd(osdc, o);
1219 		insert_osd(&osdc->osds, osd);
1220 		ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd,
1221 			      &osdc->osdmap->osd_addr[osd->o_osd]);
1222 	}
1223 
1224 	dout("%s osdc %p osd%d -> osd %p\n", __func__, osdc, o, osd);
1225 	return osd;
1226 }
1227 
1228 /*
1229  * Create request <-> OSD session relation.
1230  *
1231  * @req has to be assigned a tid, @osd may be homeless.
1232  */
1233 static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req)
1234 {
1235 	verify_osd_locked(osd);
1236 	WARN_ON(!req->r_tid || req->r_osd);
1237 	dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd,
1238 	     req, req->r_tid);
1239 
1240 	if (!osd_homeless(osd))
1241 		__remove_osd_from_lru(osd);
1242 	else
1243 		atomic_inc(&osd->o_osdc->num_homeless);
1244 
1245 	get_osd(osd);
1246 	insert_request(&osd->o_requests, req);
1247 	req->r_osd = osd;
1248 }
1249 
1250 static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req)
1251 {
1252 	verify_osd_locked(osd);
1253 	WARN_ON(req->r_osd != osd);
1254 	dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd,
1255 	     req, req->r_tid);
1256 
1257 	req->r_osd = NULL;
1258 	erase_request(&osd->o_requests, req);
1259 	put_osd(osd);
1260 
1261 	if (!osd_homeless(osd))
1262 		maybe_move_osd_to_lru(osd);
1263 	else
1264 		atomic_dec(&osd->o_osdc->num_homeless);
1265 }
1266 
1267 static bool __pool_full(struct ceph_pg_pool_info *pi)
1268 {
1269 	return pi->flags & CEPH_POOL_FLAG_FULL;
1270 }
1271 
1272 static bool have_pool_full(struct ceph_osd_client *osdc)
1273 {
1274 	struct rb_node *n;
1275 
1276 	for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) {
1277 		struct ceph_pg_pool_info *pi =
1278 		    rb_entry(n, struct ceph_pg_pool_info, node);
1279 
1280 		if (__pool_full(pi))
1281 			return true;
1282 	}
1283 
1284 	return false;
1285 }
1286 
1287 static bool pool_full(struct ceph_osd_client *osdc, s64 pool_id)
1288 {
1289 	struct ceph_pg_pool_info *pi;
1290 
1291 	pi = ceph_pg_pool_by_id(osdc->osdmap, pool_id);
1292 	if (!pi)
1293 		return false;
1294 
1295 	return __pool_full(pi);
1296 }
1297 
1298 /*
1299  * Returns whether a request should be blocked from being sent
1300  * based on the current osdmap and osd_client settings.
1301  */
1302 static bool target_should_be_paused(struct ceph_osd_client *osdc,
1303 				    const struct ceph_osd_request_target *t,
1304 				    struct ceph_pg_pool_info *pi)
1305 {
1306 	bool pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
1307 	bool pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
1308 		       ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
1309 		       __pool_full(pi);
1310 
1311 	WARN_ON(pi->id != t->target_oloc.pool);
1312 	return ((t->flags & CEPH_OSD_FLAG_READ) && pauserd) ||
1313 	       ((t->flags & CEPH_OSD_FLAG_WRITE) && pausewr) ||
1314 	       (osdc->osdmap->epoch < osdc->epoch_barrier);
1315 }
1316 
1317 enum calc_target_result {
1318 	CALC_TARGET_NO_ACTION = 0,
1319 	CALC_TARGET_NEED_RESEND,
1320 	CALC_TARGET_POOL_DNE,
1321 };
1322 
1323 static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
1324 					   struct ceph_osd_request_target *t,
1325 					   struct ceph_connection *con,
1326 					   bool any_change)
1327 {
1328 	struct ceph_pg_pool_info *pi;
1329 	struct ceph_pg pgid, last_pgid;
1330 	struct ceph_osds up, acting;
1331 	bool force_resend = false;
1332 	bool unpaused = false;
1333 	bool legacy_change;
1334 	bool split = false;
1335 	bool sort_bitwise = ceph_osdmap_flag(osdc, CEPH_OSDMAP_SORTBITWISE);
1336 	bool recovery_deletes = ceph_osdmap_flag(osdc,
1337 						 CEPH_OSDMAP_RECOVERY_DELETES);
1338 	enum calc_target_result ct_res;
1339 	int ret;
1340 
1341 	t->epoch = osdc->osdmap->epoch;
1342 	pi = ceph_pg_pool_by_id(osdc->osdmap, t->base_oloc.pool);
1343 	if (!pi) {
1344 		t->osd = CEPH_HOMELESS_OSD;
1345 		ct_res = CALC_TARGET_POOL_DNE;
1346 		goto out;
1347 	}
1348 
1349 	if (osdc->osdmap->epoch == pi->last_force_request_resend) {
1350 		if (t->last_force_resend < pi->last_force_request_resend) {
1351 			t->last_force_resend = pi->last_force_request_resend;
1352 			force_resend = true;
1353 		} else if (t->last_force_resend == 0) {
1354 			force_resend = true;
1355 		}
1356 	}
1357 
1358 	/* apply tiering */
1359 	ceph_oid_copy(&t->target_oid, &t->base_oid);
1360 	ceph_oloc_copy(&t->target_oloc, &t->base_oloc);
1361 	if ((t->flags & CEPH_OSD_FLAG_IGNORE_OVERLAY) == 0) {
1362 		if (t->flags & CEPH_OSD_FLAG_READ && pi->read_tier >= 0)
1363 			t->target_oloc.pool = pi->read_tier;
1364 		if (t->flags & CEPH_OSD_FLAG_WRITE && pi->write_tier >= 0)
1365 			t->target_oloc.pool = pi->write_tier;
1366 
1367 		pi = ceph_pg_pool_by_id(osdc->osdmap, t->target_oloc.pool);
1368 		if (!pi) {
1369 			t->osd = CEPH_HOMELESS_OSD;
1370 			ct_res = CALC_TARGET_POOL_DNE;
1371 			goto out;
1372 		}
1373 	}
1374 
1375 	ret = __ceph_object_locator_to_pg(pi, &t->target_oid, &t->target_oloc,
1376 					  &pgid);
1377 	if (ret) {
1378 		WARN_ON(ret != -ENOENT);
1379 		t->osd = CEPH_HOMELESS_OSD;
1380 		ct_res = CALC_TARGET_POOL_DNE;
1381 		goto out;
1382 	}
1383 	last_pgid.pool = pgid.pool;
1384 	last_pgid.seed = ceph_stable_mod(pgid.seed, t->pg_num, t->pg_num_mask);
1385 
1386 	ceph_pg_to_up_acting_osds(osdc->osdmap, pi, &pgid, &up, &acting);
1387 	if (any_change &&
1388 	    ceph_is_new_interval(&t->acting,
1389 				 &acting,
1390 				 &t->up,
1391 				 &up,
1392 				 t->size,
1393 				 pi->size,
1394 				 t->min_size,
1395 				 pi->min_size,
1396 				 t->pg_num,
1397 				 pi->pg_num,
1398 				 t->sort_bitwise,
1399 				 sort_bitwise,
1400 				 t->recovery_deletes,
1401 				 recovery_deletes,
1402 				 &last_pgid))
1403 		force_resend = true;
1404 
1405 	if (t->paused && !target_should_be_paused(osdc, t, pi)) {
1406 		t->paused = false;
1407 		unpaused = true;
1408 	}
1409 	legacy_change = ceph_pg_compare(&t->pgid, &pgid) ||
1410 			ceph_osds_changed(&t->acting, &acting, any_change);
1411 	if (t->pg_num)
1412 		split = ceph_pg_is_split(&last_pgid, t->pg_num, pi->pg_num);
1413 
1414 	if (legacy_change || force_resend || split) {
1415 		t->pgid = pgid; /* struct */
1416 		ceph_pg_to_primary_shard(osdc->osdmap, pi, &pgid, &t->spgid);
1417 		ceph_osds_copy(&t->acting, &acting);
1418 		ceph_osds_copy(&t->up, &up);
1419 		t->size = pi->size;
1420 		t->min_size = pi->min_size;
1421 		t->pg_num = pi->pg_num;
1422 		t->pg_num_mask = pi->pg_num_mask;
1423 		t->sort_bitwise = sort_bitwise;
1424 		t->recovery_deletes = recovery_deletes;
1425 
1426 		t->osd = acting.primary;
1427 	}
1428 
1429 	if (unpaused || legacy_change || force_resend ||
1430 	    (split && con && CEPH_HAVE_FEATURE(con->peer_features,
1431 					       RESEND_ON_SPLIT)))
1432 		ct_res = CALC_TARGET_NEED_RESEND;
1433 	else
1434 		ct_res = CALC_TARGET_NO_ACTION;
1435 
1436 out:
1437 	dout("%s t %p -> ct_res %d osd %d\n", __func__, t, ct_res, t->osd);
1438 	return ct_res;
1439 }
1440 
1441 static struct ceph_spg_mapping *alloc_spg_mapping(void)
1442 {
1443 	struct ceph_spg_mapping *spg;
1444 
1445 	spg = kmalloc(sizeof(*spg), GFP_NOIO);
1446 	if (!spg)
1447 		return NULL;
1448 
1449 	RB_CLEAR_NODE(&spg->node);
1450 	spg->backoffs = RB_ROOT;
1451 	return spg;
1452 }
1453 
1454 static void free_spg_mapping(struct ceph_spg_mapping *spg)
1455 {
1456 	WARN_ON(!RB_EMPTY_NODE(&spg->node));
1457 	WARN_ON(!RB_EMPTY_ROOT(&spg->backoffs));
1458 
1459 	kfree(spg);
1460 }
1461 
1462 /*
1463  * rbtree of ceph_spg_mapping for handling map<spg_t, ...>, similar to
1464  * ceph_pg_mapping.  Used to track OSD backoffs -- a backoff [range] is
1465  * defined only within a specific spgid; it does not pass anything to
1466  * children on split, or to another primary.
1467  */
1468 DEFINE_RB_FUNCS2(spg_mapping, struct ceph_spg_mapping, spgid, ceph_spg_compare,
1469 		 RB_BYPTR, const struct ceph_spg *, node)
1470 
1471 static u64 hoid_get_bitwise_key(const struct ceph_hobject_id *hoid)
1472 {
1473 	return hoid->is_max ? 0x100000000ull : hoid->hash_reverse_bits;
1474 }
1475 
1476 static void hoid_get_effective_key(const struct ceph_hobject_id *hoid,
1477 				   void **pkey, size_t *pkey_len)
1478 {
1479 	if (hoid->key_len) {
1480 		*pkey = hoid->key;
1481 		*pkey_len = hoid->key_len;
1482 	} else {
1483 		*pkey = hoid->oid;
1484 		*pkey_len = hoid->oid_len;
1485 	}
1486 }
1487 
1488 static int compare_names(const void *name1, size_t name1_len,
1489 			 const void *name2, size_t name2_len)
1490 {
1491 	int ret;
1492 
1493 	ret = memcmp(name1, name2, min(name1_len, name2_len));
1494 	if (!ret) {
1495 		if (name1_len < name2_len)
1496 			ret = -1;
1497 		else if (name1_len > name2_len)
1498 			ret = 1;
1499 	}
1500 	return ret;
1501 }
1502 
1503 static int hoid_compare(const struct ceph_hobject_id *lhs,
1504 			const struct ceph_hobject_id *rhs)
1505 {
1506 	void *effective_key1, *effective_key2;
1507 	size_t effective_key1_len, effective_key2_len;
1508 	int ret;
1509 
1510 	if (lhs->is_max < rhs->is_max)
1511 		return -1;
1512 	if (lhs->is_max > rhs->is_max)
1513 		return 1;
1514 
1515 	if (lhs->pool < rhs->pool)
1516 		return -1;
1517 	if (lhs->pool > rhs->pool)
1518 		return 1;
1519 
1520 	if (hoid_get_bitwise_key(lhs) < hoid_get_bitwise_key(rhs))
1521 		return -1;
1522 	if (hoid_get_bitwise_key(lhs) > hoid_get_bitwise_key(rhs))
1523 		return 1;
1524 
1525 	ret = compare_names(lhs->nspace, lhs->nspace_len,
1526 			    rhs->nspace, rhs->nspace_len);
1527 	if (ret)
1528 		return ret;
1529 
1530 	hoid_get_effective_key(lhs, &effective_key1, &effective_key1_len);
1531 	hoid_get_effective_key(rhs, &effective_key2, &effective_key2_len);
1532 	ret = compare_names(effective_key1, effective_key1_len,
1533 			    effective_key2, effective_key2_len);
1534 	if (ret)
1535 		return ret;
1536 
1537 	ret = compare_names(lhs->oid, lhs->oid_len, rhs->oid, rhs->oid_len);
1538 	if (ret)
1539 		return ret;
1540 
1541 	if (lhs->snapid < rhs->snapid)
1542 		return -1;
1543 	if (lhs->snapid > rhs->snapid)
1544 		return 1;
1545 
1546 	return 0;
1547 }
1548 
1549 /*
1550  * For decoding ->begin and ->end of MOSDBackoff only -- no MIN/MAX
1551  * compat stuff here.
1552  *
1553  * Assumes @hoid is zero-initialized.
1554  */
1555 static int decode_hoid(void **p, void *end, struct ceph_hobject_id *hoid)
1556 {
1557 	u8 struct_v;
1558 	u32 struct_len;
1559 	int ret;
1560 
1561 	ret = ceph_start_decoding(p, end, 4, "hobject_t", &struct_v,
1562 				  &struct_len);
1563 	if (ret)
1564 		return ret;
1565 
1566 	if (struct_v < 4) {
1567 		pr_err("got struct_v %d < 4 of hobject_t\n", struct_v);
1568 		goto e_inval;
1569 	}
1570 
1571 	hoid->key = ceph_extract_encoded_string(p, end, &hoid->key_len,
1572 						GFP_NOIO);
1573 	if (IS_ERR(hoid->key)) {
1574 		ret = PTR_ERR(hoid->key);
1575 		hoid->key = NULL;
1576 		return ret;
1577 	}
1578 
1579 	hoid->oid = ceph_extract_encoded_string(p, end, &hoid->oid_len,
1580 						GFP_NOIO);
1581 	if (IS_ERR(hoid->oid)) {
1582 		ret = PTR_ERR(hoid->oid);
1583 		hoid->oid = NULL;
1584 		return ret;
1585 	}
1586 
1587 	ceph_decode_64_safe(p, end, hoid->snapid, e_inval);
1588 	ceph_decode_32_safe(p, end, hoid->hash, e_inval);
1589 	ceph_decode_8_safe(p, end, hoid->is_max, e_inval);
1590 
1591 	hoid->nspace = ceph_extract_encoded_string(p, end, &hoid->nspace_len,
1592 						   GFP_NOIO);
1593 	if (IS_ERR(hoid->nspace)) {
1594 		ret = PTR_ERR(hoid->nspace);
1595 		hoid->nspace = NULL;
1596 		return ret;
1597 	}
1598 
1599 	ceph_decode_64_safe(p, end, hoid->pool, e_inval);
1600 
1601 	ceph_hoid_build_hash_cache(hoid);
1602 	return 0;
1603 
1604 e_inval:
1605 	return -EINVAL;
1606 }
1607 
1608 static int hoid_encoding_size(const struct ceph_hobject_id *hoid)
1609 {
1610 	return 8 + 4 + 1 + 8 + /* snapid, hash, is_max, pool */
1611 	       4 + hoid->key_len + 4 + hoid->oid_len + 4 + hoid->nspace_len;
1612 }
1613 
1614 static void encode_hoid(void **p, void *end, const struct ceph_hobject_id *hoid)
1615 {
1616 	ceph_start_encoding(p, 4, 3, hoid_encoding_size(hoid));
1617 	ceph_encode_string(p, end, hoid->key, hoid->key_len);
1618 	ceph_encode_string(p, end, hoid->oid, hoid->oid_len);
1619 	ceph_encode_64(p, hoid->snapid);
1620 	ceph_encode_32(p, hoid->hash);
1621 	ceph_encode_8(p, hoid->is_max);
1622 	ceph_encode_string(p, end, hoid->nspace, hoid->nspace_len);
1623 	ceph_encode_64(p, hoid->pool);
1624 }
1625 
1626 static void free_hoid(struct ceph_hobject_id *hoid)
1627 {
1628 	if (hoid) {
1629 		kfree(hoid->key);
1630 		kfree(hoid->oid);
1631 		kfree(hoid->nspace);
1632 		kfree(hoid);
1633 	}
1634 }
1635 
1636 static struct ceph_osd_backoff *alloc_backoff(void)
1637 {
1638 	struct ceph_osd_backoff *backoff;
1639 
1640 	backoff = kzalloc(sizeof(*backoff), GFP_NOIO);
1641 	if (!backoff)
1642 		return NULL;
1643 
1644 	RB_CLEAR_NODE(&backoff->spg_node);
1645 	RB_CLEAR_NODE(&backoff->id_node);
1646 	return backoff;
1647 }
1648 
1649 static void free_backoff(struct ceph_osd_backoff *backoff)
1650 {
1651 	WARN_ON(!RB_EMPTY_NODE(&backoff->spg_node));
1652 	WARN_ON(!RB_EMPTY_NODE(&backoff->id_node));
1653 
1654 	free_hoid(backoff->begin);
1655 	free_hoid(backoff->end);
1656 	kfree(backoff);
1657 }
1658 
1659 /*
1660  * Within a specific spgid, backoffs are managed by ->begin hoid.
1661  */
1662 DEFINE_RB_INSDEL_FUNCS2(backoff, struct ceph_osd_backoff, begin, hoid_compare,
1663 			RB_BYVAL, spg_node);
1664 
1665 static struct ceph_osd_backoff *lookup_containing_backoff(struct rb_root *root,
1666 					    const struct ceph_hobject_id *hoid)
1667 {
1668 	struct rb_node *n = root->rb_node;
1669 
1670 	while (n) {
1671 		struct ceph_osd_backoff *cur =
1672 		    rb_entry(n, struct ceph_osd_backoff, spg_node);
1673 		int cmp;
1674 
1675 		cmp = hoid_compare(hoid, cur->begin);
1676 		if (cmp < 0) {
1677 			n = n->rb_left;
1678 		} else if (cmp > 0) {
1679 			if (hoid_compare(hoid, cur->end) < 0)
1680 				return cur;
1681 
1682 			n = n->rb_right;
1683 		} else {
1684 			return cur;
1685 		}
1686 	}
1687 
1688 	return NULL;
1689 }
1690 
1691 /*
1692  * Each backoff has a unique id within its OSD session.
1693  */
1694 DEFINE_RB_FUNCS(backoff_by_id, struct ceph_osd_backoff, id, id_node)
1695 
1696 static void clear_backoffs(struct ceph_osd *osd)
1697 {
1698 	while (!RB_EMPTY_ROOT(&osd->o_backoff_mappings)) {
1699 		struct ceph_spg_mapping *spg =
1700 		    rb_entry(rb_first(&osd->o_backoff_mappings),
1701 			     struct ceph_spg_mapping, node);
1702 
1703 		while (!RB_EMPTY_ROOT(&spg->backoffs)) {
1704 			struct ceph_osd_backoff *backoff =
1705 			    rb_entry(rb_first(&spg->backoffs),
1706 				     struct ceph_osd_backoff, spg_node);
1707 
1708 			erase_backoff(&spg->backoffs, backoff);
1709 			erase_backoff_by_id(&osd->o_backoffs_by_id, backoff);
1710 			free_backoff(backoff);
1711 		}
1712 		erase_spg_mapping(&osd->o_backoff_mappings, spg);
1713 		free_spg_mapping(spg);
1714 	}
1715 }
1716 
1717 /*
1718  * Set up a temporary, non-owning view into @t.
1719  */
1720 static void hoid_fill_from_target(struct ceph_hobject_id *hoid,
1721 				  const struct ceph_osd_request_target *t)
1722 {
1723 	hoid->key = NULL;
1724 	hoid->key_len = 0;
1725 	hoid->oid = t->target_oid.name;
1726 	hoid->oid_len = t->target_oid.name_len;
1727 	hoid->snapid = CEPH_NOSNAP;
1728 	hoid->hash = t->pgid.seed;
1729 	hoid->is_max = false;
1730 	if (t->target_oloc.pool_ns) {
1731 		hoid->nspace = t->target_oloc.pool_ns->str;
1732 		hoid->nspace_len = t->target_oloc.pool_ns->len;
1733 	} else {
1734 		hoid->nspace = NULL;
1735 		hoid->nspace_len = 0;
1736 	}
1737 	hoid->pool = t->target_oloc.pool;
1738 	ceph_hoid_build_hash_cache(hoid);
1739 }
1740 
1741 static bool should_plug_request(struct ceph_osd_request *req)
1742 {
1743 	struct ceph_osd *osd = req->r_osd;
1744 	struct ceph_spg_mapping *spg;
1745 	struct ceph_osd_backoff *backoff;
1746 	struct ceph_hobject_id hoid;
1747 
1748 	spg = lookup_spg_mapping(&osd->o_backoff_mappings, &req->r_t.spgid);
1749 	if (!spg)
1750 		return false;
1751 
1752 	hoid_fill_from_target(&hoid, &req->r_t);
1753 	backoff = lookup_containing_backoff(&spg->backoffs, &hoid);
1754 	if (!backoff)
1755 		return false;
1756 
1757 	dout("%s req %p tid %llu backoff osd%d spgid %llu.%xs%d id %llu\n",
1758 	     __func__, req, req->r_tid, osd->o_osd, backoff->spgid.pgid.pool,
1759 	     backoff->spgid.pgid.seed, backoff->spgid.shard, backoff->id);
1760 	return true;
1761 }
1762 
1763 static void setup_request_data(struct ceph_osd_request *req,
1764 			       struct ceph_msg *msg)
1765 {
1766 	u32 data_len = 0;
1767 	int i;
1768 
1769 	if (!list_empty(&msg->data))
1770 		return;
1771 
1772 	WARN_ON(msg->data_length);
1773 	for (i = 0; i < req->r_num_ops; i++) {
1774 		struct ceph_osd_req_op *op = &req->r_ops[i];
1775 
1776 		switch (op->op) {
1777 		/* request */
1778 		case CEPH_OSD_OP_WRITE:
1779 		case CEPH_OSD_OP_WRITEFULL:
1780 			WARN_ON(op->indata_len != op->extent.length);
1781 			ceph_osdc_msg_data_add(msg, &op->extent.osd_data);
1782 			break;
1783 		case CEPH_OSD_OP_SETXATTR:
1784 		case CEPH_OSD_OP_CMPXATTR:
1785 			WARN_ON(op->indata_len != op->xattr.name_len +
1786 						  op->xattr.value_len);
1787 			ceph_osdc_msg_data_add(msg, &op->xattr.osd_data);
1788 			break;
1789 		case CEPH_OSD_OP_NOTIFY_ACK:
1790 			ceph_osdc_msg_data_add(msg,
1791 					       &op->notify_ack.request_data);
1792 			break;
1793 
1794 		/* reply */
1795 		case CEPH_OSD_OP_STAT:
1796 			ceph_osdc_msg_data_add(req->r_reply,
1797 					       &op->raw_data_in);
1798 			break;
1799 		case CEPH_OSD_OP_READ:
1800 			ceph_osdc_msg_data_add(req->r_reply,
1801 					       &op->extent.osd_data);
1802 			break;
1803 		case CEPH_OSD_OP_LIST_WATCHERS:
1804 			ceph_osdc_msg_data_add(req->r_reply,
1805 					       &op->list_watchers.response_data);
1806 			break;
1807 
1808 		/* both */
1809 		case CEPH_OSD_OP_CALL:
1810 			WARN_ON(op->indata_len != op->cls.class_len +
1811 						  op->cls.method_len +
1812 						  op->cls.indata_len);
1813 			ceph_osdc_msg_data_add(msg, &op->cls.request_info);
1814 			/* optional, can be NONE */
1815 			ceph_osdc_msg_data_add(msg, &op->cls.request_data);
1816 			/* optional, can be NONE */
1817 			ceph_osdc_msg_data_add(req->r_reply,
1818 					       &op->cls.response_data);
1819 			break;
1820 		case CEPH_OSD_OP_NOTIFY:
1821 			ceph_osdc_msg_data_add(msg,
1822 					       &op->notify.request_data);
1823 			ceph_osdc_msg_data_add(req->r_reply,
1824 					       &op->notify.response_data);
1825 			break;
1826 		}
1827 
1828 		data_len += op->indata_len;
1829 	}
1830 
1831 	WARN_ON(data_len != msg->data_length);
1832 }
1833 
1834 static void encode_pgid(void **p, const struct ceph_pg *pgid)
1835 {
1836 	ceph_encode_8(p, 1);
1837 	ceph_encode_64(p, pgid->pool);
1838 	ceph_encode_32(p, pgid->seed);
1839 	ceph_encode_32(p, -1); /* preferred */
1840 }
1841 
1842 static void encode_spgid(void **p, const struct ceph_spg *spgid)
1843 {
1844 	ceph_start_encoding(p, 1, 1, CEPH_PGID_ENCODING_LEN + 1);
1845 	encode_pgid(p, &spgid->pgid);
1846 	ceph_encode_8(p, spgid->shard);
1847 }
1848 
1849 static void encode_oloc(void **p, void *end,
1850 			const struct ceph_object_locator *oloc)
1851 {
1852 	ceph_start_encoding(p, 5, 4, ceph_oloc_encoding_size(oloc));
1853 	ceph_encode_64(p, oloc->pool);
1854 	ceph_encode_32(p, -1); /* preferred */
1855 	ceph_encode_32(p, 0);  /* key len */
1856 	if (oloc->pool_ns)
1857 		ceph_encode_string(p, end, oloc->pool_ns->str,
1858 				   oloc->pool_ns->len);
1859 	else
1860 		ceph_encode_32(p, 0);
1861 }
1862 
1863 static void encode_request_partial(struct ceph_osd_request *req,
1864 				   struct ceph_msg *msg)
1865 {
1866 	void *p = msg->front.iov_base;
1867 	void *const end = p + msg->front_alloc_len;
1868 	u32 data_len = 0;
1869 	int i;
1870 
1871 	if (req->r_flags & CEPH_OSD_FLAG_WRITE) {
1872 		/* snapshots aren't writeable */
1873 		WARN_ON(req->r_snapid != CEPH_NOSNAP);
1874 	} else {
1875 		WARN_ON(req->r_mtime.tv_sec || req->r_mtime.tv_nsec ||
1876 			req->r_data_offset || req->r_snapc);
1877 	}
1878 
1879 	setup_request_data(req, msg);
1880 
1881 	encode_spgid(&p, &req->r_t.spgid); /* actual spg */
1882 	ceph_encode_32(&p, req->r_t.pgid.seed); /* raw hash */
1883 	ceph_encode_32(&p, req->r_osdc->osdmap->epoch);
1884 	ceph_encode_32(&p, req->r_flags);
1885 
1886 	/* reqid */
1887 	ceph_start_encoding(&p, 2, 2, sizeof(struct ceph_osd_reqid));
1888 	memset(p, 0, sizeof(struct ceph_osd_reqid));
1889 	p += sizeof(struct ceph_osd_reqid);
1890 
1891 	/* trace */
1892 	memset(p, 0, sizeof(struct ceph_blkin_trace_info));
1893 	p += sizeof(struct ceph_blkin_trace_info);
1894 
1895 	ceph_encode_32(&p, 0); /* client_inc, always 0 */
1896 	ceph_encode_timespec(p, &req->r_mtime);
1897 	p += sizeof(struct ceph_timespec);
1898 
1899 	encode_oloc(&p, end, &req->r_t.target_oloc);
1900 	ceph_encode_string(&p, end, req->r_t.target_oid.name,
1901 			   req->r_t.target_oid.name_len);
1902 
1903 	/* ops, can imply data */
1904 	ceph_encode_16(&p, req->r_num_ops);
1905 	for (i = 0; i < req->r_num_ops; i++) {
1906 		data_len += osd_req_encode_op(p, &req->r_ops[i]);
1907 		p += sizeof(struct ceph_osd_op);
1908 	}
1909 
1910 	ceph_encode_64(&p, req->r_snapid); /* snapid */
1911 	if (req->r_snapc) {
1912 		ceph_encode_64(&p, req->r_snapc->seq);
1913 		ceph_encode_32(&p, req->r_snapc->num_snaps);
1914 		for (i = 0; i < req->r_snapc->num_snaps; i++)
1915 			ceph_encode_64(&p, req->r_snapc->snaps[i]);
1916 	} else {
1917 		ceph_encode_64(&p, 0); /* snap_seq */
1918 		ceph_encode_32(&p, 0); /* snaps len */
1919 	}
1920 
1921 	ceph_encode_32(&p, req->r_attempts); /* retry_attempt */
1922 	BUG_ON(p > end - 8); /* space for features */
1923 
1924 	msg->hdr.version = cpu_to_le16(8); /* MOSDOp v8 */
1925 	/* front_len is finalized in encode_request_finish() */
1926 	msg->front.iov_len = p - msg->front.iov_base;
1927 	msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
1928 	msg->hdr.data_len = cpu_to_le32(data_len);
1929 	/*
1930 	 * The header "data_off" is a hint to the receiver allowing it
1931 	 * to align received data into its buffers such that there's no
1932 	 * need to re-copy it before writing it to disk (direct I/O).
1933 	 */
1934 	msg->hdr.data_off = cpu_to_le16(req->r_data_offset);
1935 
1936 	dout("%s req %p msg %p oid %s oid_len %d\n", __func__, req, msg,
1937 	     req->r_t.target_oid.name, req->r_t.target_oid.name_len);
1938 }
1939 
1940 static void encode_request_finish(struct ceph_msg *msg)
1941 {
1942 	void *p = msg->front.iov_base;
1943 	void *const partial_end = p + msg->front.iov_len;
1944 	void *const end = p + msg->front_alloc_len;
1945 
1946 	if (CEPH_HAVE_FEATURE(msg->con->peer_features, RESEND_ON_SPLIT)) {
1947 		/* luminous OSD -- encode features and be done */
1948 		p = partial_end;
1949 		ceph_encode_64(&p, msg->con->peer_features);
1950 	} else {
1951 		struct {
1952 			char spgid[CEPH_ENCODING_START_BLK_LEN +
1953 				   CEPH_PGID_ENCODING_LEN + 1];
1954 			__le32 hash;
1955 			__le32 epoch;
1956 			__le32 flags;
1957 			char reqid[CEPH_ENCODING_START_BLK_LEN +
1958 				   sizeof(struct ceph_osd_reqid)];
1959 			char trace[sizeof(struct ceph_blkin_trace_info)];
1960 			__le32 client_inc;
1961 			struct ceph_timespec mtime;
1962 		} __packed head;
1963 		struct ceph_pg pgid;
1964 		void *oloc, *oid, *tail;
1965 		int oloc_len, oid_len, tail_len;
1966 		int len;
1967 
1968 		/*
1969 		 * Pre-luminous OSD -- reencode v8 into v4 using @head
1970 		 * as a temporary buffer.  Encode the raw PG; the rest
1971 		 * is just a matter of moving oloc, oid and tail blobs
1972 		 * around.
1973 		 */
1974 		memcpy(&head, p, sizeof(head));
1975 		p += sizeof(head);
1976 
1977 		oloc = p;
1978 		p += CEPH_ENCODING_START_BLK_LEN;
1979 		pgid.pool = ceph_decode_64(&p);
1980 		p += 4 + 4; /* preferred, key len */
1981 		len = ceph_decode_32(&p);
1982 		p += len;   /* nspace */
1983 		oloc_len = p - oloc;
1984 
1985 		oid = p;
1986 		len = ceph_decode_32(&p);
1987 		p += len;
1988 		oid_len = p - oid;
1989 
1990 		tail = p;
1991 		tail_len = partial_end - p;
1992 
1993 		p = msg->front.iov_base;
1994 		ceph_encode_copy(&p, &head.client_inc, sizeof(head.client_inc));
1995 		ceph_encode_copy(&p, &head.epoch, sizeof(head.epoch));
1996 		ceph_encode_copy(&p, &head.flags, sizeof(head.flags));
1997 		ceph_encode_copy(&p, &head.mtime, sizeof(head.mtime));
1998 
1999 		/* reassert_version */
2000 		memset(p, 0, sizeof(struct ceph_eversion));
2001 		p += sizeof(struct ceph_eversion);
2002 
2003 		BUG_ON(p >= oloc);
2004 		memmove(p, oloc, oloc_len);
2005 		p += oloc_len;
2006 
2007 		pgid.seed = le32_to_cpu(head.hash);
2008 		encode_pgid(&p, &pgid); /* raw pg */
2009 
2010 		BUG_ON(p >= oid);
2011 		memmove(p, oid, oid_len);
2012 		p += oid_len;
2013 
2014 		/* tail -- ops, snapid, snapc, retry_attempt */
2015 		BUG_ON(p >= tail);
2016 		memmove(p, tail, tail_len);
2017 		p += tail_len;
2018 
2019 		msg->hdr.version = cpu_to_le16(4); /* MOSDOp v4 */
2020 	}
2021 
2022 	BUG_ON(p > end);
2023 	msg->front.iov_len = p - msg->front.iov_base;
2024 	msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2025 
2026 	dout("%s msg %p tid %llu %u+%u+%u v%d\n", __func__, msg,
2027 	     le64_to_cpu(msg->hdr.tid), le32_to_cpu(msg->hdr.front_len),
2028 	     le32_to_cpu(msg->hdr.middle_len), le32_to_cpu(msg->hdr.data_len),
2029 	     le16_to_cpu(msg->hdr.version));
2030 }
2031 
2032 /*
2033  * @req has to be assigned a tid and registered.
2034  */
2035 static void send_request(struct ceph_osd_request *req)
2036 {
2037 	struct ceph_osd *osd = req->r_osd;
2038 
2039 	verify_osd_locked(osd);
2040 	WARN_ON(osd->o_osd != req->r_t.osd);
2041 
2042 	/* backoff? */
2043 	if (should_plug_request(req))
2044 		return;
2045 
2046 	/*
2047 	 * We may have a previously queued request message hanging
2048 	 * around.  Cancel it to avoid corrupting the msgr.
2049 	 */
2050 	if (req->r_sent)
2051 		ceph_msg_revoke(req->r_request);
2052 
2053 	req->r_flags |= CEPH_OSD_FLAG_KNOWN_REDIR;
2054 	if (req->r_attempts)
2055 		req->r_flags |= CEPH_OSD_FLAG_RETRY;
2056 	else
2057 		WARN_ON(req->r_flags & CEPH_OSD_FLAG_RETRY);
2058 
2059 	encode_request_partial(req, req->r_request);
2060 
2061 	dout("%s req %p tid %llu to pgid %llu.%x spgid %llu.%xs%d osd%d e%u flags 0x%x attempt %d\n",
2062 	     __func__, req, req->r_tid, req->r_t.pgid.pool, req->r_t.pgid.seed,
2063 	     req->r_t.spgid.pgid.pool, req->r_t.spgid.pgid.seed,
2064 	     req->r_t.spgid.shard, osd->o_osd, req->r_t.epoch, req->r_flags,
2065 	     req->r_attempts);
2066 
2067 	req->r_t.paused = false;
2068 	req->r_stamp = jiffies;
2069 	req->r_attempts++;
2070 
2071 	req->r_sent = osd->o_incarnation;
2072 	req->r_request->hdr.tid = cpu_to_le64(req->r_tid);
2073 	ceph_con_send(&osd->o_con, ceph_msg_get(req->r_request));
2074 }
2075 
2076 static void maybe_request_map(struct ceph_osd_client *osdc)
2077 {
2078 	bool continuous = false;
2079 
2080 	verify_osdc_locked(osdc);
2081 	WARN_ON(!osdc->osdmap->epoch);
2082 
2083 	if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
2084 	    ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD) ||
2085 	    ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) {
2086 		dout("%s osdc %p continuous\n", __func__, osdc);
2087 		continuous = true;
2088 	} else {
2089 		dout("%s osdc %p onetime\n", __func__, osdc);
2090 	}
2091 
2092 	if (ceph_monc_want_map(&osdc->client->monc, CEPH_SUB_OSDMAP,
2093 			       osdc->osdmap->epoch + 1, continuous))
2094 		ceph_monc_renew_subs(&osdc->client->monc);
2095 }
2096 
2097 static void complete_request(struct ceph_osd_request *req, int err);
2098 static void send_map_check(struct ceph_osd_request *req);
2099 
2100 static void __submit_request(struct ceph_osd_request *req, bool wrlocked)
2101 {
2102 	struct ceph_osd_client *osdc = req->r_osdc;
2103 	struct ceph_osd *osd;
2104 	enum calc_target_result ct_res;
2105 	bool need_send = false;
2106 	bool promoted = false;
2107 	bool need_abort = false;
2108 
2109 	WARN_ON(req->r_tid);
2110 	dout("%s req %p wrlocked %d\n", __func__, req, wrlocked);
2111 
2112 again:
2113 	ct_res = calc_target(osdc, &req->r_t, NULL, false);
2114 	if (ct_res == CALC_TARGET_POOL_DNE && !wrlocked)
2115 		goto promote;
2116 
2117 	osd = lookup_create_osd(osdc, req->r_t.osd, wrlocked);
2118 	if (IS_ERR(osd)) {
2119 		WARN_ON(PTR_ERR(osd) != -EAGAIN || wrlocked);
2120 		goto promote;
2121 	}
2122 
2123 	if (osdc->osdmap->epoch < osdc->epoch_barrier) {
2124 		dout("req %p epoch %u barrier %u\n", req, osdc->osdmap->epoch,
2125 		     osdc->epoch_barrier);
2126 		req->r_t.paused = true;
2127 		maybe_request_map(osdc);
2128 	} else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
2129 		   ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) {
2130 		dout("req %p pausewr\n", req);
2131 		req->r_t.paused = true;
2132 		maybe_request_map(osdc);
2133 	} else if ((req->r_flags & CEPH_OSD_FLAG_READ) &&
2134 		   ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) {
2135 		dout("req %p pauserd\n", req);
2136 		req->r_t.paused = true;
2137 		maybe_request_map(osdc);
2138 	} else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
2139 		   !(req->r_flags & (CEPH_OSD_FLAG_FULL_TRY |
2140 				     CEPH_OSD_FLAG_FULL_FORCE)) &&
2141 		   (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
2142 		    pool_full(osdc, req->r_t.base_oloc.pool))) {
2143 		dout("req %p full/pool_full\n", req);
2144 		pr_warn_ratelimited("FULL or reached pool quota\n");
2145 		req->r_t.paused = true;
2146 		maybe_request_map(osdc);
2147 		if (req->r_abort_on_full)
2148 			need_abort = true;
2149 	} else if (!osd_homeless(osd)) {
2150 		need_send = true;
2151 	} else {
2152 		maybe_request_map(osdc);
2153 	}
2154 
2155 	mutex_lock(&osd->lock);
2156 	/*
2157 	 * Assign the tid atomically with send_request() to protect
2158 	 * multiple writes to the same object from racing with each
2159 	 * other, resulting in out of order ops on the OSDs.
2160 	 */
2161 	req->r_tid = atomic64_inc_return(&osdc->last_tid);
2162 	link_request(osd, req);
2163 	if (need_send)
2164 		send_request(req);
2165 	else if (need_abort)
2166 		complete_request(req, -ENOSPC);
2167 	mutex_unlock(&osd->lock);
2168 
2169 	if (ct_res == CALC_TARGET_POOL_DNE)
2170 		send_map_check(req);
2171 
2172 	if (promoted)
2173 		downgrade_write(&osdc->lock);
2174 	return;
2175 
2176 promote:
2177 	up_read(&osdc->lock);
2178 	down_write(&osdc->lock);
2179 	wrlocked = true;
2180 	promoted = true;
2181 	goto again;
2182 }
2183 
2184 static void account_request(struct ceph_osd_request *req)
2185 {
2186 	WARN_ON(req->r_flags & (CEPH_OSD_FLAG_ACK | CEPH_OSD_FLAG_ONDISK));
2187 	WARN_ON(!(req->r_flags & (CEPH_OSD_FLAG_READ | CEPH_OSD_FLAG_WRITE)));
2188 
2189 	req->r_flags |= CEPH_OSD_FLAG_ONDISK;
2190 	atomic_inc(&req->r_osdc->num_requests);
2191 
2192 	req->r_start_stamp = jiffies;
2193 }
2194 
2195 static void submit_request(struct ceph_osd_request *req, bool wrlocked)
2196 {
2197 	ceph_osdc_get_request(req);
2198 	account_request(req);
2199 	__submit_request(req, wrlocked);
2200 }
2201 
2202 static void finish_request(struct ceph_osd_request *req)
2203 {
2204 	struct ceph_osd_client *osdc = req->r_osdc;
2205 
2206 	WARN_ON(lookup_request_mc(&osdc->map_checks, req->r_tid));
2207 	dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
2208 
2209 	if (req->r_osd)
2210 		unlink_request(req->r_osd, req);
2211 	atomic_dec(&osdc->num_requests);
2212 
2213 	/*
2214 	 * If an OSD has failed or returned and a request has been sent
2215 	 * twice, it's possible to get a reply and end up here while the
2216 	 * request message is queued for delivery.  We will ignore the
2217 	 * reply, so not a big deal, but better to try and catch it.
2218 	 */
2219 	ceph_msg_revoke(req->r_request);
2220 	ceph_msg_revoke_incoming(req->r_reply);
2221 }
2222 
2223 static void __complete_request(struct ceph_osd_request *req)
2224 {
2225 	if (req->r_callback) {
2226 		dout("%s req %p tid %llu cb %pf result %d\n", __func__, req,
2227 		     req->r_tid, req->r_callback, req->r_result);
2228 		req->r_callback(req);
2229 	}
2230 }
2231 
2232 /*
2233  * This is open-coded in handle_reply().
2234  */
2235 static void complete_request(struct ceph_osd_request *req, int err)
2236 {
2237 	dout("%s req %p tid %llu err %d\n", __func__, req, req->r_tid, err);
2238 
2239 	req->r_result = err;
2240 	finish_request(req);
2241 	__complete_request(req);
2242 	complete_all(&req->r_completion);
2243 	ceph_osdc_put_request(req);
2244 }
2245 
2246 static void cancel_map_check(struct ceph_osd_request *req)
2247 {
2248 	struct ceph_osd_client *osdc = req->r_osdc;
2249 	struct ceph_osd_request *lookup_req;
2250 
2251 	verify_osdc_wrlocked(osdc);
2252 
2253 	lookup_req = lookup_request_mc(&osdc->map_checks, req->r_tid);
2254 	if (!lookup_req)
2255 		return;
2256 
2257 	WARN_ON(lookup_req != req);
2258 	erase_request_mc(&osdc->map_checks, req);
2259 	ceph_osdc_put_request(req);
2260 }
2261 
2262 static void cancel_request(struct ceph_osd_request *req)
2263 {
2264 	dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
2265 
2266 	cancel_map_check(req);
2267 	finish_request(req);
2268 	complete_all(&req->r_completion);
2269 	ceph_osdc_put_request(req);
2270 }
2271 
2272 static void abort_request(struct ceph_osd_request *req, int err)
2273 {
2274 	dout("%s req %p tid %llu err %d\n", __func__, req, req->r_tid, err);
2275 
2276 	cancel_map_check(req);
2277 	complete_request(req, err);
2278 }
2279 
2280 static void update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb)
2281 {
2282 	if (likely(eb > osdc->epoch_barrier)) {
2283 		dout("updating epoch_barrier from %u to %u\n",
2284 				osdc->epoch_barrier, eb);
2285 		osdc->epoch_barrier = eb;
2286 		/* Request map if we're not to the barrier yet */
2287 		if (eb > osdc->osdmap->epoch)
2288 			maybe_request_map(osdc);
2289 	}
2290 }
2291 
2292 void ceph_osdc_update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb)
2293 {
2294 	down_read(&osdc->lock);
2295 	if (unlikely(eb > osdc->epoch_barrier)) {
2296 		up_read(&osdc->lock);
2297 		down_write(&osdc->lock);
2298 		update_epoch_barrier(osdc, eb);
2299 		up_write(&osdc->lock);
2300 	} else {
2301 		up_read(&osdc->lock);
2302 	}
2303 }
2304 EXPORT_SYMBOL(ceph_osdc_update_epoch_barrier);
2305 
2306 /*
2307  * Drop all pending requests that are stalled waiting on a full condition to
2308  * clear, and complete them with ENOSPC as the return code. Set the
2309  * osdc->epoch_barrier to the latest map epoch that we've seen if any were
2310  * cancelled.
2311  */
2312 static void ceph_osdc_abort_on_full(struct ceph_osd_client *osdc)
2313 {
2314 	struct rb_node *n;
2315 	bool victims = false;
2316 
2317 	dout("enter abort_on_full\n");
2318 
2319 	if (!ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) && !have_pool_full(osdc))
2320 		goto out;
2321 
2322 	/* Scan list and see if there is anything to abort */
2323 	for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
2324 		struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
2325 		struct rb_node *m;
2326 
2327 		m = rb_first(&osd->o_requests);
2328 		while (m) {
2329 			struct ceph_osd_request *req = rb_entry(m,
2330 					struct ceph_osd_request, r_node);
2331 			m = rb_next(m);
2332 
2333 			if (req->r_abort_on_full) {
2334 				victims = true;
2335 				break;
2336 			}
2337 		}
2338 		if (victims)
2339 			break;
2340 	}
2341 
2342 	if (!victims)
2343 		goto out;
2344 
2345 	/*
2346 	 * Update the barrier to current epoch if it's behind that point,
2347 	 * since we know we have some calls to be aborted in the tree.
2348 	 */
2349 	update_epoch_barrier(osdc, osdc->osdmap->epoch);
2350 
2351 	for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
2352 		struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
2353 		struct rb_node *m;
2354 
2355 		m = rb_first(&osd->o_requests);
2356 		while (m) {
2357 			struct ceph_osd_request *req = rb_entry(m,
2358 					struct ceph_osd_request, r_node);
2359 			m = rb_next(m);
2360 
2361 			if (req->r_abort_on_full &&
2362 			    (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
2363 			     pool_full(osdc, req->r_t.target_oloc.pool)))
2364 				abort_request(req, -ENOSPC);
2365 		}
2366 	}
2367 out:
2368 	dout("return abort_on_full barrier=%u\n", osdc->epoch_barrier);
2369 }
2370 
2371 static void check_pool_dne(struct ceph_osd_request *req)
2372 {
2373 	struct ceph_osd_client *osdc = req->r_osdc;
2374 	struct ceph_osdmap *map = osdc->osdmap;
2375 
2376 	verify_osdc_wrlocked(osdc);
2377 	WARN_ON(!map->epoch);
2378 
2379 	if (req->r_attempts) {
2380 		/*
2381 		 * We sent a request earlier, which means that
2382 		 * previously the pool existed, and now it does not
2383 		 * (i.e., it was deleted).
2384 		 */
2385 		req->r_map_dne_bound = map->epoch;
2386 		dout("%s req %p tid %llu pool disappeared\n", __func__, req,
2387 		     req->r_tid);
2388 	} else {
2389 		dout("%s req %p tid %llu map_dne_bound %u have %u\n", __func__,
2390 		     req, req->r_tid, req->r_map_dne_bound, map->epoch);
2391 	}
2392 
2393 	if (req->r_map_dne_bound) {
2394 		if (map->epoch >= req->r_map_dne_bound) {
2395 			/* we had a new enough map */
2396 			pr_info_ratelimited("tid %llu pool does not exist\n",
2397 					    req->r_tid);
2398 			complete_request(req, -ENOENT);
2399 		}
2400 	} else {
2401 		send_map_check(req);
2402 	}
2403 }
2404 
2405 static void map_check_cb(struct ceph_mon_generic_request *greq)
2406 {
2407 	struct ceph_osd_client *osdc = &greq->monc->client->osdc;
2408 	struct ceph_osd_request *req;
2409 	u64 tid = greq->private_data;
2410 
2411 	WARN_ON(greq->result || !greq->u.newest);
2412 
2413 	down_write(&osdc->lock);
2414 	req = lookup_request_mc(&osdc->map_checks, tid);
2415 	if (!req) {
2416 		dout("%s tid %llu dne\n", __func__, tid);
2417 		goto out_unlock;
2418 	}
2419 
2420 	dout("%s req %p tid %llu map_dne_bound %u newest %llu\n", __func__,
2421 	     req, req->r_tid, req->r_map_dne_bound, greq->u.newest);
2422 	if (!req->r_map_dne_bound)
2423 		req->r_map_dne_bound = greq->u.newest;
2424 	erase_request_mc(&osdc->map_checks, req);
2425 	check_pool_dne(req);
2426 
2427 	ceph_osdc_put_request(req);
2428 out_unlock:
2429 	up_write(&osdc->lock);
2430 }
2431 
2432 static void send_map_check(struct ceph_osd_request *req)
2433 {
2434 	struct ceph_osd_client *osdc = req->r_osdc;
2435 	struct ceph_osd_request *lookup_req;
2436 	int ret;
2437 
2438 	verify_osdc_wrlocked(osdc);
2439 
2440 	lookup_req = lookup_request_mc(&osdc->map_checks, req->r_tid);
2441 	if (lookup_req) {
2442 		WARN_ON(lookup_req != req);
2443 		return;
2444 	}
2445 
2446 	ceph_osdc_get_request(req);
2447 	insert_request_mc(&osdc->map_checks, req);
2448 	ret = ceph_monc_get_version_async(&osdc->client->monc, "osdmap",
2449 					  map_check_cb, req->r_tid);
2450 	WARN_ON(ret);
2451 }
2452 
2453 /*
2454  * lingering requests, watch/notify v2 infrastructure
2455  */
2456 static void linger_release(struct kref *kref)
2457 {
2458 	struct ceph_osd_linger_request *lreq =
2459 	    container_of(kref, struct ceph_osd_linger_request, kref);
2460 
2461 	dout("%s lreq %p reg_req %p ping_req %p\n", __func__, lreq,
2462 	     lreq->reg_req, lreq->ping_req);
2463 	WARN_ON(!RB_EMPTY_NODE(&lreq->node));
2464 	WARN_ON(!RB_EMPTY_NODE(&lreq->osdc_node));
2465 	WARN_ON(!RB_EMPTY_NODE(&lreq->mc_node));
2466 	WARN_ON(!list_empty(&lreq->scan_item));
2467 	WARN_ON(!list_empty(&lreq->pending_lworks));
2468 	WARN_ON(lreq->osd);
2469 
2470 	if (lreq->reg_req)
2471 		ceph_osdc_put_request(lreq->reg_req);
2472 	if (lreq->ping_req)
2473 		ceph_osdc_put_request(lreq->ping_req);
2474 	target_destroy(&lreq->t);
2475 	kfree(lreq);
2476 }
2477 
2478 static void linger_put(struct ceph_osd_linger_request *lreq)
2479 {
2480 	if (lreq)
2481 		kref_put(&lreq->kref, linger_release);
2482 }
2483 
2484 static struct ceph_osd_linger_request *
2485 linger_get(struct ceph_osd_linger_request *lreq)
2486 {
2487 	kref_get(&lreq->kref);
2488 	return lreq;
2489 }
2490 
2491 static struct ceph_osd_linger_request *
2492 linger_alloc(struct ceph_osd_client *osdc)
2493 {
2494 	struct ceph_osd_linger_request *lreq;
2495 
2496 	lreq = kzalloc(sizeof(*lreq), GFP_NOIO);
2497 	if (!lreq)
2498 		return NULL;
2499 
2500 	kref_init(&lreq->kref);
2501 	mutex_init(&lreq->lock);
2502 	RB_CLEAR_NODE(&lreq->node);
2503 	RB_CLEAR_NODE(&lreq->osdc_node);
2504 	RB_CLEAR_NODE(&lreq->mc_node);
2505 	INIT_LIST_HEAD(&lreq->scan_item);
2506 	INIT_LIST_HEAD(&lreq->pending_lworks);
2507 	init_completion(&lreq->reg_commit_wait);
2508 	init_completion(&lreq->notify_finish_wait);
2509 
2510 	lreq->osdc = osdc;
2511 	target_init(&lreq->t);
2512 
2513 	dout("%s lreq %p\n", __func__, lreq);
2514 	return lreq;
2515 }
2516 
2517 DEFINE_RB_INSDEL_FUNCS(linger, struct ceph_osd_linger_request, linger_id, node)
2518 DEFINE_RB_FUNCS(linger_osdc, struct ceph_osd_linger_request, linger_id, osdc_node)
2519 DEFINE_RB_FUNCS(linger_mc, struct ceph_osd_linger_request, linger_id, mc_node)
2520 
2521 /*
2522  * Create linger request <-> OSD session relation.
2523  *
2524  * @lreq has to be registered, @osd may be homeless.
2525  */
2526 static void link_linger(struct ceph_osd *osd,
2527 			struct ceph_osd_linger_request *lreq)
2528 {
2529 	verify_osd_locked(osd);
2530 	WARN_ON(!lreq->linger_id || lreq->osd);
2531 	dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd,
2532 	     osd->o_osd, lreq, lreq->linger_id);
2533 
2534 	if (!osd_homeless(osd))
2535 		__remove_osd_from_lru(osd);
2536 	else
2537 		atomic_inc(&osd->o_osdc->num_homeless);
2538 
2539 	get_osd(osd);
2540 	insert_linger(&osd->o_linger_requests, lreq);
2541 	lreq->osd = osd;
2542 }
2543 
2544 static void unlink_linger(struct ceph_osd *osd,
2545 			  struct ceph_osd_linger_request *lreq)
2546 {
2547 	verify_osd_locked(osd);
2548 	WARN_ON(lreq->osd != osd);
2549 	dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd,
2550 	     osd->o_osd, lreq, lreq->linger_id);
2551 
2552 	lreq->osd = NULL;
2553 	erase_linger(&osd->o_linger_requests, lreq);
2554 	put_osd(osd);
2555 
2556 	if (!osd_homeless(osd))
2557 		maybe_move_osd_to_lru(osd);
2558 	else
2559 		atomic_dec(&osd->o_osdc->num_homeless);
2560 }
2561 
2562 static bool __linger_registered(struct ceph_osd_linger_request *lreq)
2563 {
2564 	verify_osdc_locked(lreq->osdc);
2565 
2566 	return !RB_EMPTY_NODE(&lreq->osdc_node);
2567 }
2568 
2569 static bool linger_registered(struct ceph_osd_linger_request *lreq)
2570 {
2571 	struct ceph_osd_client *osdc = lreq->osdc;
2572 	bool registered;
2573 
2574 	down_read(&osdc->lock);
2575 	registered = __linger_registered(lreq);
2576 	up_read(&osdc->lock);
2577 
2578 	return registered;
2579 }
2580 
2581 static void linger_register(struct ceph_osd_linger_request *lreq)
2582 {
2583 	struct ceph_osd_client *osdc = lreq->osdc;
2584 
2585 	verify_osdc_wrlocked(osdc);
2586 	WARN_ON(lreq->linger_id);
2587 
2588 	linger_get(lreq);
2589 	lreq->linger_id = ++osdc->last_linger_id;
2590 	insert_linger_osdc(&osdc->linger_requests, lreq);
2591 }
2592 
2593 static void linger_unregister(struct ceph_osd_linger_request *lreq)
2594 {
2595 	struct ceph_osd_client *osdc = lreq->osdc;
2596 
2597 	verify_osdc_wrlocked(osdc);
2598 
2599 	erase_linger_osdc(&osdc->linger_requests, lreq);
2600 	linger_put(lreq);
2601 }
2602 
2603 static void cancel_linger_request(struct ceph_osd_request *req)
2604 {
2605 	struct ceph_osd_linger_request *lreq = req->r_priv;
2606 
2607 	WARN_ON(!req->r_linger);
2608 	cancel_request(req);
2609 	linger_put(lreq);
2610 }
2611 
2612 struct linger_work {
2613 	struct work_struct work;
2614 	struct ceph_osd_linger_request *lreq;
2615 	struct list_head pending_item;
2616 	unsigned long queued_stamp;
2617 
2618 	union {
2619 		struct {
2620 			u64 notify_id;
2621 			u64 notifier_id;
2622 			void *payload; /* points into @msg front */
2623 			size_t payload_len;
2624 
2625 			struct ceph_msg *msg; /* for ceph_msg_put() */
2626 		} notify;
2627 		struct {
2628 			int err;
2629 		} error;
2630 	};
2631 };
2632 
2633 static struct linger_work *lwork_alloc(struct ceph_osd_linger_request *lreq,
2634 				       work_func_t workfn)
2635 {
2636 	struct linger_work *lwork;
2637 
2638 	lwork = kzalloc(sizeof(*lwork), GFP_NOIO);
2639 	if (!lwork)
2640 		return NULL;
2641 
2642 	INIT_WORK(&lwork->work, workfn);
2643 	INIT_LIST_HEAD(&lwork->pending_item);
2644 	lwork->lreq = linger_get(lreq);
2645 
2646 	return lwork;
2647 }
2648 
2649 static void lwork_free(struct linger_work *lwork)
2650 {
2651 	struct ceph_osd_linger_request *lreq = lwork->lreq;
2652 
2653 	mutex_lock(&lreq->lock);
2654 	list_del(&lwork->pending_item);
2655 	mutex_unlock(&lreq->lock);
2656 
2657 	linger_put(lreq);
2658 	kfree(lwork);
2659 }
2660 
2661 static void lwork_queue(struct linger_work *lwork)
2662 {
2663 	struct ceph_osd_linger_request *lreq = lwork->lreq;
2664 	struct ceph_osd_client *osdc = lreq->osdc;
2665 
2666 	verify_lreq_locked(lreq);
2667 	WARN_ON(!list_empty(&lwork->pending_item));
2668 
2669 	lwork->queued_stamp = jiffies;
2670 	list_add_tail(&lwork->pending_item, &lreq->pending_lworks);
2671 	queue_work(osdc->notify_wq, &lwork->work);
2672 }
2673 
2674 static void do_watch_notify(struct work_struct *w)
2675 {
2676 	struct linger_work *lwork = container_of(w, struct linger_work, work);
2677 	struct ceph_osd_linger_request *lreq = lwork->lreq;
2678 
2679 	if (!linger_registered(lreq)) {
2680 		dout("%s lreq %p not registered\n", __func__, lreq);
2681 		goto out;
2682 	}
2683 
2684 	WARN_ON(!lreq->is_watch);
2685 	dout("%s lreq %p notify_id %llu notifier_id %llu payload_len %zu\n",
2686 	     __func__, lreq, lwork->notify.notify_id, lwork->notify.notifier_id,
2687 	     lwork->notify.payload_len);
2688 	lreq->wcb(lreq->data, lwork->notify.notify_id, lreq->linger_id,
2689 		  lwork->notify.notifier_id, lwork->notify.payload,
2690 		  lwork->notify.payload_len);
2691 
2692 out:
2693 	ceph_msg_put(lwork->notify.msg);
2694 	lwork_free(lwork);
2695 }
2696 
2697 static void do_watch_error(struct work_struct *w)
2698 {
2699 	struct linger_work *lwork = container_of(w, struct linger_work, work);
2700 	struct ceph_osd_linger_request *lreq = lwork->lreq;
2701 
2702 	if (!linger_registered(lreq)) {
2703 		dout("%s lreq %p not registered\n", __func__, lreq);
2704 		goto out;
2705 	}
2706 
2707 	dout("%s lreq %p err %d\n", __func__, lreq, lwork->error.err);
2708 	lreq->errcb(lreq->data, lreq->linger_id, lwork->error.err);
2709 
2710 out:
2711 	lwork_free(lwork);
2712 }
2713 
2714 static void queue_watch_error(struct ceph_osd_linger_request *lreq)
2715 {
2716 	struct linger_work *lwork;
2717 
2718 	lwork = lwork_alloc(lreq, do_watch_error);
2719 	if (!lwork) {
2720 		pr_err("failed to allocate error-lwork\n");
2721 		return;
2722 	}
2723 
2724 	lwork->error.err = lreq->last_error;
2725 	lwork_queue(lwork);
2726 }
2727 
2728 static void linger_reg_commit_complete(struct ceph_osd_linger_request *lreq,
2729 				       int result)
2730 {
2731 	if (!completion_done(&lreq->reg_commit_wait)) {
2732 		lreq->reg_commit_error = (result <= 0 ? result : 0);
2733 		complete_all(&lreq->reg_commit_wait);
2734 	}
2735 }
2736 
2737 static void linger_commit_cb(struct ceph_osd_request *req)
2738 {
2739 	struct ceph_osd_linger_request *lreq = req->r_priv;
2740 
2741 	mutex_lock(&lreq->lock);
2742 	dout("%s lreq %p linger_id %llu result %d\n", __func__, lreq,
2743 	     lreq->linger_id, req->r_result);
2744 	linger_reg_commit_complete(lreq, req->r_result);
2745 	lreq->committed = true;
2746 
2747 	if (!lreq->is_watch) {
2748 		struct ceph_osd_data *osd_data =
2749 		    osd_req_op_data(req, 0, notify, response_data);
2750 		void *p = page_address(osd_data->pages[0]);
2751 
2752 		WARN_ON(req->r_ops[0].op != CEPH_OSD_OP_NOTIFY ||
2753 			osd_data->type != CEPH_OSD_DATA_TYPE_PAGES);
2754 
2755 		/* make note of the notify_id */
2756 		if (req->r_ops[0].outdata_len >= sizeof(u64)) {
2757 			lreq->notify_id = ceph_decode_64(&p);
2758 			dout("lreq %p notify_id %llu\n", lreq,
2759 			     lreq->notify_id);
2760 		} else {
2761 			dout("lreq %p no notify_id\n", lreq);
2762 		}
2763 	}
2764 
2765 	mutex_unlock(&lreq->lock);
2766 	linger_put(lreq);
2767 }
2768 
2769 static int normalize_watch_error(int err)
2770 {
2771 	/*
2772 	 * Translate ENOENT -> ENOTCONN so that a delete->disconnection
2773 	 * notification and a failure to reconnect because we raced with
2774 	 * the delete appear the same to the user.
2775 	 */
2776 	if (err == -ENOENT)
2777 		err = -ENOTCONN;
2778 
2779 	return err;
2780 }
2781 
2782 static void linger_reconnect_cb(struct ceph_osd_request *req)
2783 {
2784 	struct ceph_osd_linger_request *lreq = req->r_priv;
2785 
2786 	mutex_lock(&lreq->lock);
2787 	dout("%s lreq %p linger_id %llu result %d last_error %d\n", __func__,
2788 	     lreq, lreq->linger_id, req->r_result, lreq->last_error);
2789 	if (req->r_result < 0) {
2790 		if (!lreq->last_error) {
2791 			lreq->last_error = normalize_watch_error(req->r_result);
2792 			queue_watch_error(lreq);
2793 		}
2794 	}
2795 
2796 	mutex_unlock(&lreq->lock);
2797 	linger_put(lreq);
2798 }
2799 
2800 static void send_linger(struct ceph_osd_linger_request *lreq)
2801 {
2802 	struct ceph_osd_request *req = lreq->reg_req;
2803 	struct ceph_osd_req_op *op = &req->r_ops[0];
2804 
2805 	verify_osdc_wrlocked(req->r_osdc);
2806 	dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
2807 
2808 	if (req->r_osd)
2809 		cancel_linger_request(req);
2810 
2811 	request_reinit(req);
2812 	ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
2813 	ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
2814 	req->r_flags = lreq->t.flags;
2815 	req->r_mtime = lreq->mtime;
2816 
2817 	mutex_lock(&lreq->lock);
2818 	if (lreq->is_watch && lreq->committed) {
2819 		WARN_ON(op->op != CEPH_OSD_OP_WATCH ||
2820 			op->watch.cookie != lreq->linger_id);
2821 		op->watch.op = CEPH_OSD_WATCH_OP_RECONNECT;
2822 		op->watch.gen = ++lreq->register_gen;
2823 		dout("lreq %p reconnect register_gen %u\n", lreq,
2824 		     op->watch.gen);
2825 		req->r_callback = linger_reconnect_cb;
2826 	} else {
2827 		if (!lreq->is_watch)
2828 			lreq->notify_id = 0;
2829 		else
2830 			WARN_ON(op->watch.op != CEPH_OSD_WATCH_OP_WATCH);
2831 		dout("lreq %p register\n", lreq);
2832 		req->r_callback = linger_commit_cb;
2833 	}
2834 	mutex_unlock(&lreq->lock);
2835 
2836 	req->r_priv = linger_get(lreq);
2837 	req->r_linger = true;
2838 
2839 	submit_request(req, true);
2840 }
2841 
2842 static void linger_ping_cb(struct ceph_osd_request *req)
2843 {
2844 	struct ceph_osd_linger_request *lreq = req->r_priv;
2845 
2846 	mutex_lock(&lreq->lock);
2847 	dout("%s lreq %p linger_id %llu result %d ping_sent %lu last_error %d\n",
2848 	     __func__, lreq, lreq->linger_id, req->r_result, lreq->ping_sent,
2849 	     lreq->last_error);
2850 	if (lreq->register_gen == req->r_ops[0].watch.gen) {
2851 		if (!req->r_result) {
2852 			lreq->watch_valid_thru = lreq->ping_sent;
2853 		} else if (!lreq->last_error) {
2854 			lreq->last_error = normalize_watch_error(req->r_result);
2855 			queue_watch_error(lreq);
2856 		}
2857 	} else {
2858 		dout("lreq %p register_gen %u ignoring old pong %u\n", lreq,
2859 		     lreq->register_gen, req->r_ops[0].watch.gen);
2860 	}
2861 
2862 	mutex_unlock(&lreq->lock);
2863 	linger_put(lreq);
2864 }
2865 
2866 static void send_linger_ping(struct ceph_osd_linger_request *lreq)
2867 {
2868 	struct ceph_osd_client *osdc = lreq->osdc;
2869 	struct ceph_osd_request *req = lreq->ping_req;
2870 	struct ceph_osd_req_op *op = &req->r_ops[0];
2871 
2872 	if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) {
2873 		dout("%s PAUSERD\n", __func__);
2874 		return;
2875 	}
2876 
2877 	lreq->ping_sent = jiffies;
2878 	dout("%s lreq %p linger_id %llu ping_sent %lu register_gen %u\n",
2879 	     __func__, lreq, lreq->linger_id, lreq->ping_sent,
2880 	     lreq->register_gen);
2881 
2882 	if (req->r_osd)
2883 		cancel_linger_request(req);
2884 
2885 	request_reinit(req);
2886 	target_copy(&req->r_t, &lreq->t);
2887 
2888 	WARN_ON(op->op != CEPH_OSD_OP_WATCH ||
2889 		op->watch.cookie != lreq->linger_id ||
2890 		op->watch.op != CEPH_OSD_WATCH_OP_PING);
2891 	op->watch.gen = lreq->register_gen;
2892 	req->r_callback = linger_ping_cb;
2893 	req->r_priv = linger_get(lreq);
2894 	req->r_linger = true;
2895 
2896 	ceph_osdc_get_request(req);
2897 	account_request(req);
2898 	req->r_tid = atomic64_inc_return(&osdc->last_tid);
2899 	link_request(lreq->osd, req);
2900 	send_request(req);
2901 }
2902 
2903 static void linger_submit(struct ceph_osd_linger_request *lreq)
2904 {
2905 	struct ceph_osd_client *osdc = lreq->osdc;
2906 	struct ceph_osd *osd;
2907 
2908 	calc_target(osdc, &lreq->t, NULL, false);
2909 	osd = lookup_create_osd(osdc, lreq->t.osd, true);
2910 	link_linger(osd, lreq);
2911 
2912 	send_linger(lreq);
2913 }
2914 
2915 static void cancel_linger_map_check(struct ceph_osd_linger_request *lreq)
2916 {
2917 	struct ceph_osd_client *osdc = lreq->osdc;
2918 	struct ceph_osd_linger_request *lookup_lreq;
2919 
2920 	verify_osdc_wrlocked(osdc);
2921 
2922 	lookup_lreq = lookup_linger_mc(&osdc->linger_map_checks,
2923 				       lreq->linger_id);
2924 	if (!lookup_lreq)
2925 		return;
2926 
2927 	WARN_ON(lookup_lreq != lreq);
2928 	erase_linger_mc(&osdc->linger_map_checks, lreq);
2929 	linger_put(lreq);
2930 }
2931 
2932 /*
2933  * @lreq has to be both registered and linked.
2934  */
2935 static void __linger_cancel(struct ceph_osd_linger_request *lreq)
2936 {
2937 	if (lreq->is_watch && lreq->ping_req->r_osd)
2938 		cancel_linger_request(lreq->ping_req);
2939 	if (lreq->reg_req->r_osd)
2940 		cancel_linger_request(lreq->reg_req);
2941 	cancel_linger_map_check(lreq);
2942 	unlink_linger(lreq->osd, lreq);
2943 	linger_unregister(lreq);
2944 }
2945 
2946 static void linger_cancel(struct ceph_osd_linger_request *lreq)
2947 {
2948 	struct ceph_osd_client *osdc = lreq->osdc;
2949 
2950 	down_write(&osdc->lock);
2951 	if (__linger_registered(lreq))
2952 		__linger_cancel(lreq);
2953 	up_write(&osdc->lock);
2954 }
2955 
2956 static void send_linger_map_check(struct ceph_osd_linger_request *lreq);
2957 
2958 static void check_linger_pool_dne(struct ceph_osd_linger_request *lreq)
2959 {
2960 	struct ceph_osd_client *osdc = lreq->osdc;
2961 	struct ceph_osdmap *map = osdc->osdmap;
2962 
2963 	verify_osdc_wrlocked(osdc);
2964 	WARN_ON(!map->epoch);
2965 
2966 	if (lreq->register_gen) {
2967 		lreq->map_dne_bound = map->epoch;
2968 		dout("%s lreq %p linger_id %llu pool disappeared\n", __func__,
2969 		     lreq, lreq->linger_id);
2970 	} else {
2971 		dout("%s lreq %p linger_id %llu map_dne_bound %u have %u\n",
2972 		     __func__, lreq, lreq->linger_id, lreq->map_dne_bound,
2973 		     map->epoch);
2974 	}
2975 
2976 	if (lreq->map_dne_bound) {
2977 		if (map->epoch >= lreq->map_dne_bound) {
2978 			/* we had a new enough map */
2979 			pr_info("linger_id %llu pool does not exist\n",
2980 				lreq->linger_id);
2981 			linger_reg_commit_complete(lreq, -ENOENT);
2982 			__linger_cancel(lreq);
2983 		}
2984 	} else {
2985 		send_linger_map_check(lreq);
2986 	}
2987 }
2988 
2989 static void linger_map_check_cb(struct ceph_mon_generic_request *greq)
2990 {
2991 	struct ceph_osd_client *osdc = &greq->monc->client->osdc;
2992 	struct ceph_osd_linger_request *lreq;
2993 	u64 linger_id = greq->private_data;
2994 
2995 	WARN_ON(greq->result || !greq->u.newest);
2996 
2997 	down_write(&osdc->lock);
2998 	lreq = lookup_linger_mc(&osdc->linger_map_checks, linger_id);
2999 	if (!lreq) {
3000 		dout("%s linger_id %llu dne\n", __func__, linger_id);
3001 		goto out_unlock;
3002 	}
3003 
3004 	dout("%s lreq %p linger_id %llu map_dne_bound %u newest %llu\n",
3005 	     __func__, lreq, lreq->linger_id, lreq->map_dne_bound,
3006 	     greq->u.newest);
3007 	if (!lreq->map_dne_bound)
3008 		lreq->map_dne_bound = greq->u.newest;
3009 	erase_linger_mc(&osdc->linger_map_checks, lreq);
3010 	check_linger_pool_dne(lreq);
3011 
3012 	linger_put(lreq);
3013 out_unlock:
3014 	up_write(&osdc->lock);
3015 }
3016 
3017 static void send_linger_map_check(struct ceph_osd_linger_request *lreq)
3018 {
3019 	struct ceph_osd_client *osdc = lreq->osdc;
3020 	struct ceph_osd_linger_request *lookup_lreq;
3021 	int ret;
3022 
3023 	verify_osdc_wrlocked(osdc);
3024 
3025 	lookup_lreq = lookup_linger_mc(&osdc->linger_map_checks,
3026 				       lreq->linger_id);
3027 	if (lookup_lreq) {
3028 		WARN_ON(lookup_lreq != lreq);
3029 		return;
3030 	}
3031 
3032 	linger_get(lreq);
3033 	insert_linger_mc(&osdc->linger_map_checks, lreq);
3034 	ret = ceph_monc_get_version_async(&osdc->client->monc, "osdmap",
3035 					  linger_map_check_cb, lreq->linger_id);
3036 	WARN_ON(ret);
3037 }
3038 
3039 static int linger_reg_commit_wait(struct ceph_osd_linger_request *lreq)
3040 {
3041 	int ret;
3042 
3043 	dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
3044 	ret = wait_for_completion_interruptible(&lreq->reg_commit_wait);
3045 	return ret ?: lreq->reg_commit_error;
3046 }
3047 
3048 static int linger_notify_finish_wait(struct ceph_osd_linger_request *lreq)
3049 {
3050 	int ret;
3051 
3052 	dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
3053 	ret = wait_for_completion_interruptible(&lreq->notify_finish_wait);
3054 	return ret ?: lreq->notify_finish_error;
3055 }
3056 
3057 /*
3058  * Timeout callback, called every N seconds.  When 1 or more OSD
3059  * requests has been active for more than N seconds, we send a keepalive
3060  * (tag + timestamp) to its OSD to ensure any communications channel
3061  * reset is detected.
3062  */
3063 static void handle_timeout(struct work_struct *work)
3064 {
3065 	struct ceph_osd_client *osdc =
3066 		container_of(work, struct ceph_osd_client, timeout_work.work);
3067 	struct ceph_options *opts = osdc->client->options;
3068 	unsigned long cutoff = jiffies - opts->osd_keepalive_timeout;
3069 	unsigned long expiry_cutoff = jiffies - opts->osd_request_timeout;
3070 	LIST_HEAD(slow_osds);
3071 	struct rb_node *n, *p;
3072 
3073 	dout("%s osdc %p\n", __func__, osdc);
3074 	down_write(&osdc->lock);
3075 
3076 	/*
3077 	 * ping osds that are a bit slow.  this ensures that if there
3078 	 * is a break in the TCP connection we will notice, and reopen
3079 	 * a connection with that osd (from the fault callback).
3080 	 */
3081 	for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
3082 		struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
3083 		bool found = false;
3084 
3085 		for (p = rb_first(&osd->o_requests); p; ) {
3086 			struct ceph_osd_request *req =
3087 			    rb_entry(p, struct ceph_osd_request, r_node);
3088 
3089 			p = rb_next(p); /* abort_request() */
3090 
3091 			if (time_before(req->r_stamp, cutoff)) {
3092 				dout(" req %p tid %llu on osd%d is laggy\n",
3093 				     req, req->r_tid, osd->o_osd);
3094 				found = true;
3095 			}
3096 			if (opts->osd_request_timeout &&
3097 			    time_before(req->r_start_stamp, expiry_cutoff)) {
3098 				pr_err_ratelimited("tid %llu on osd%d timeout\n",
3099 				       req->r_tid, osd->o_osd);
3100 				abort_request(req, -ETIMEDOUT);
3101 			}
3102 		}
3103 		for (p = rb_first(&osd->o_linger_requests); p; p = rb_next(p)) {
3104 			struct ceph_osd_linger_request *lreq =
3105 			    rb_entry(p, struct ceph_osd_linger_request, node);
3106 
3107 			dout(" lreq %p linger_id %llu is served by osd%d\n",
3108 			     lreq, lreq->linger_id, osd->o_osd);
3109 			found = true;
3110 
3111 			mutex_lock(&lreq->lock);
3112 			if (lreq->is_watch && lreq->committed && !lreq->last_error)
3113 				send_linger_ping(lreq);
3114 			mutex_unlock(&lreq->lock);
3115 		}
3116 
3117 		if (found)
3118 			list_move_tail(&osd->o_keepalive_item, &slow_osds);
3119 	}
3120 
3121 	if (opts->osd_request_timeout) {
3122 		for (p = rb_first(&osdc->homeless_osd.o_requests); p; ) {
3123 			struct ceph_osd_request *req =
3124 			    rb_entry(p, struct ceph_osd_request, r_node);
3125 
3126 			p = rb_next(p); /* abort_request() */
3127 
3128 			if (time_before(req->r_start_stamp, expiry_cutoff)) {
3129 				pr_err_ratelimited("tid %llu on osd%d timeout\n",
3130 				       req->r_tid, osdc->homeless_osd.o_osd);
3131 				abort_request(req, -ETIMEDOUT);
3132 			}
3133 		}
3134 	}
3135 
3136 	if (atomic_read(&osdc->num_homeless) || !list_empty(&slow_osds))
3137 		maybe_request_map(osdc);
3138 
3139 	while (!list_empty(&slow_osds)) {
3140 		struct ceph_osd *osd = list_first_entry(&slow_osds,
3141 							struct ceph_osd,
3142 							o_keepalive_item);
3143 		list_del_init(&osd->o_keepalive_item);
3144 		ceph_con_keepalive(&osd->o_con);
3145 	}
3146 
3147 	up_write(&osdc->lock);
3148 	schedule_delayed_work(&osdc->timeout_work,
3149 			      osdc->client->options->osd_keepalive_timeout);
3150 }
3151 
3152 static void handle_osds_timeout(struct work_struct *work)
3153 {
3154 	struct ceph_osd_client *osdc =
3155 		container_of(work, struct ceph_osd_client,
3156 			     osds_timeout_work.work);
3157 	unsigned long delay = osdc->client->options->osd_idle_ttl / 4;
3158 	struct ceph_osd *osd, *nosd;
3159 
3160 	dout("%s osdc %p\n", __func__, osdc);
3161 	down_write(&osdc->lock);
3162 	list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) {
3163 		if (time_before(jiffies, osd->lru_ttl))
3164 			break;
3165 
3166 		WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests));
3167 		WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests));
3168 		close_osd(osd);
3169 	}
3170 
3171 	up_write(&osdc->lock);
3172 	schedule_delayed_work(&osdc->osds_timeout_work,
3173 			      round_jiffies_relative(delay));
3174 }
3175 
3176 static int ceph_oloc_decode(void **p, void *end,
3177 			    struct ceph_object_locator *oloc)
3178 {
3179 	u8 struct_v, struct_cv;
3180 	u32 len;
3181 	void *struct_end;
3182 	int ret = 0;
3183 
3184 	ceph_decode_need(p, end, 1 + 1 + 4, e_inval);
3185 	struct_v = ceph_decode_8(p);
3186 	struct_cv = ceph_decode_8(p);
3187 	if (struct_v < 3) {
3188 		pr_warn("got v %d < 3 cv %d of ceph_object_locator\n",
3189 			struct_v, struct_cv);
3190 		goto e_inval;
3191 	}
3192 	if (struct_cv > 6) {
3193 		pr_warn("got v %d cv %d > 6 of ceph_object_locator\n",
3194 			struct_v, struct_cv);
3195 		goto e_inval;
3196 	}
3197 	len = ceph_decode_32(p);
3198 	ceph_decode_need(p, end, len, e_inval);
3199 	struct_end = *p + len;
3200 
3201 	oloc->pool = ceph_decode_64(p);
3202 	*p += 4; /* skip preferred */
3203 
3204 	len = ceph_decode_32(p);
3205 	if (len > 0) {
3206 		pr_warn("ceph_object_locator::key is set\n");
3207 		goto e_inval;
3208 	}
3209 
3210 	if (struct_v >= 5) {
3211 		bool changed = false;
3212 
3213 		len = ceph_decode_32(p);
3214 		if (len > 0) {
3215 			ceph_decode_need(p, end, len, e_inval);
3216 			if (!oloc->pool_ns ||
3217 			    ceph_compare_string(oloc->pool_ns, *p, len))
3218 				changed = true;
3219 			*p += len;
3220 		} else {
3221 			if (oloc->pool_ns)
3222 				changed = true;
3223 		}
3224 		if (changed) {
3225 			/* redirect changes namespace */
3226 			pr_warn("ceph_object_locator::nspace is changed\n");
3227 			goto e_inval;
3228 		}
3229 	}
3230 
3231 	if (struct_v >= 6) {
3232 		s64 hash = ceph_decode_64(p);
3233 		if (hash != -1) {
3234 			pr_warn("ceph_object_locator::hash is set\n");
3235 			goto e_inval;
3236 		}
3237 	}
3238 
3239 	/* skip the rest */
3240 	*p = struct_end;
3241 out:
3242 	return ret;
3243 
3244 e_inval:
3245 	ret = -EINVAL;
3246 	goto out;
3247 }
3248 
3249 static int ceph_redirect_decode(void **p, void *end,
3250 				struct ceph_request_redirect *redir)
3251 {
3252 	u8 struct_v, struct_cv;
3253 	u32 len;
3254 	void *struct_end;
3255 	int ret;
3256 
3257 	ceph_decode_need(p, end, 1 + 1 + 4, e_inval);
3258 	struct_v = ceph_decode_8(p);
3259 	struct_cv = ceph_decode_8(p);
3260 	if (struct_cv > 1) {
3261 		pr_warn("got v %d cv %d > 1 of ceph_request_redirect\n",
3262 			struct_v, struct_cv);
3263 		goto e_inval;
3264 	}
3265 	len = ceph_decode_32(p);
3266 	ceph_decode_need(p, end, len, e_inval);
3267 	struct_end = *p + len;
3268 
3269 	ret = ceph_oloc_decode(p, end, &redir->oloc);
3270 	if (ret)
3271 		goto out;
3272 
3273 	len = ceph_decode_32(p);
3274 	if (len > 0) {
3275 		pr_warn("ceph_request_redirect::object_name is set\n");
3276 		goto e_inval;
3277 	}
3278 
3279 	len = ceph_decode_32(p);
3280 	*p += len; /* skip osd_instructions */
3281 
3282 	/* skip the rest */
3283 	*p = struct_end;
3284 out:
3285 	return ret;
3286 
3287 e_inval:
3288 	ret = -EINVAL;
3289 	goto out;
3290 }
3291 
3292 struct MOSDOpReply {
3293 	struct ceph_pg pgid;
3294 	u64 flags;
3295 	int result;
3296 	u32 epoch;
3297 	int num_ops;
3298 	u32 outdata_len[CEPH_OSD_MAX_OPS];
3299 	s32 rval[CEPH_OSD_MAX_OPS];
3300 	int retry_attempt;
3301 	struct ceph_eversion replay_version;
3302 	u64 user_version;
3303 	struct ceph_request_redirect redirect;
3304 };
3305 
3306 static int decode_MOSDOpReply(const struct ceph_msg *msg, struct MOSDOpReply *m)
3307 {
3308 	void *p = msg->front.iov_base;
3309 	void *const end = p + msg->front.iov_len;
3310 	u16 version = le16_to_cpu(msg->hdr.version);
3311 	struct ceph_eversion bad_replay_version;
3312 	u8 decode_redir;
3313 	u32 len;
3314 	int ret;
3315 	int i;
3316 
3317 	ceph_decode_32_safe(&p, end, len, e_inval);
3318 	ceph_decode_need(&p, end, len, e_inval);
3319 	p += len; /* skip oid */
3320 
3321 	ret = ceph_decode_pgid(&p, end, &m->pgid);
3322 	if (ret)
3323 		return ret;
3324 
3325 	ceph_decode_64_safe(&p, end, m->flags, e_inval);
3326 	ceph_decode_32_safe(&p, end, m->result, e_inval);
3327 	ceph_decode_need(&p, end, sizeof(bad_replay_version), e_inval);
3328 	memcpy(&bad_replay_version, p, sizeof(bad_replay_version));
3329 	p += sizeof(bad_replay_version);
3330 	ceph_decode_32_safe(&p, end, m->epoch, e_inval);
3331 
3332 	ceph_decode_32_safe(&p, end, m->num_ops, e_inval);
3333 	if (m->num_ops > ARRAY_SIZE(m->outdata_len))
3334 		goto e_inval;
3335 
3336 	ceph_decode_need(&p, end, m->num_ops * sizeof(struct ceph_osd_op),
3337 			 e_inval);
3338 	for (i = 0; i < m->num_ops; i++) {
3339 		struct ceph_osd_op *op = p;
3340 
3341 		m->outdata_len[i] = le32_to_cpu(op->payload_len);
3342 		p += sizeof(*op);
3343 	}
3344 
3345 	ceph_decode_32_safe(&p, end, m->retry_attempt, e_inval);
3346 	for (i = 0; i < m->num_ops; i++)
3347 		ceph_decode_32_safe(&p, end, m->rval[i], e_inval);
3348 
3349 	if (version >= 5) {
3350 		ceph_decode_need(&p, end, sizeof(m->replay_version), e_inval);
3351 		memcpy(&m->replay_version, p, sizeof(m->replay_version));
3352 		p += sizeof(m->replay_version);
3353 		ceph_decode_64_safe(&p, end, m->user_version, e_inval);
3354 	} else {
3355 		m->replay_version = bad_replay_version; /* struct */
3356 		m->user_version = le64_to_cpu(m->replay_version.version);
3357 	}
3358 
3359 	if (version >= 6) {
3360 		if (version >= 7)
3361 			ceph_decode_8_safe(&p, end, decode_redir, e_inval);
3362 		else
3363 			decode_redir = 1;
3364 	} else {
3365 		decode_redir = 0;
3366 	}
3367 
3368 	if (decode_redir) {
3369 		ret = ceph_redirect_decode(&p, end, &m->redirect);
3370 		if (ret)
3371 			return ret;
3372 	} else {
3373 		ceph_oloc_init(&m->redirect.oloc);
3374 	}
3375 
3376 	return 0;
3377 
3378 e_inval:
3379 	return -EINVAL;
3380 }
3381 
3382 /*
3383  * Handle MOSDOpReply.  Set ->r_result and call the callback if it is
3384  * specified.
3385  */
3386 static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg)
3387 {
3388 	struct ceph_osd_client *osdc = osd->o_osdc;
3389 	struct ceph_osd_request *req;
3390 	struct MOSDOpReply m;
3391 	u64 tid = le64_to_cpu(msg->hdr.tid);
3392 	u32 data_len = 0;
3393 	int ret;
3394 	int i;
3395 
3396 	dout("%s msg %p tid %llu\n", __func__, msg, tid);
3397 
3398 	down_read(&osdc->lock);
3399 	if (!osd_registered(osd)) {
3400 		dout("%s osd%d unknown\n", __func__, osd->o_osd);
3401 		goto out_unlock_osdc;
3402 	}
3403 	WARN_ON(osd->o_osd != le64_to_cpu(msg->hdr.src.num));
3404 
3405 	mutex_lock(&osd->lock);
3406 	req = lookup_request(&osd->o_requests, tid);
3407 	if (!req) {
3408 		dout("%s osd%d tid %llu unknown\n", __func__, osd->o_osd, tid);
3409 		goto out_unlock_session;
3410 	}
3411 
3412 	m.redirect.oloc.pool_ns = req->r_t.target_oloc.pool_ns;
3413 	ret = decode_MOSDOpReply(msg, &m);
3414 	m.redirect.oloc.pool_ns = NULL;
3415 	if (ret) {
3416 		pr_err("failed to decode MOSDOpReply for tid %llu: %d\n",
3417 		       req->r_tid, ret);
3418 		ceph_msg_dump(msg);
3419 		goto fail_request;
3420 	}
3421 	dout("%s req %p tid %llu flags 0x%llx pgid %llu.%x epoch %u attempt %d v %u'%llu uv %llu\n",
3422 	     __func__, req, req->r_tid, m.flags, m.pgid.pool, m.pgid.seed,
3423 	     m.epoch, m.retry_attempt, le32_to_cpu(m.replay_version.epoch),
3424 	     le64_to_cpu(m.replay_version.version), m.user_version);
3425 
3426 	if (m.retry_attempt >= 0) {
3427 		if (m.retry_attempt != req->r_attempts - 1) {
3428 			dout("req %p tid %llu retry_attempt %d != %d, ignoring\n",
3429 			     req, req->r_tid, m.retry_attempt,
3430 			     req->r_attempts - 1);
3431 			goto out_unlock_session;
3432 		}
3433 	} else {
3434 		WARN_ON(1); /* MOSDOpReply v4 is assumed */
3435 	}
3436 
3437 	if (!ceph_oloc_empty(&m.redirect.oloc)) {
3438 		dout("req %p tid %llu redirect pool %lld\n", req, req->r_tid,
3439 		     m.redirect.oloc.pool);
3440 		unlink_request(osd, req);
3441 		mutex_unlock(&osd->lock);
3442 
3443 		/*
3444 		 * Not ceph_oloc_copy() - changing pool_ns is not
3445 		 * supported.
3446 		 */
3447 		req->r_t.target_oloc.pool = m.redirect.oloc.pool;
3448 		req->r_flags |= CEPH_OSD_FLAG_REDIRECTED;
3449 		req->r_tid = 0;
3450 		__submit_request(req, false);
3451 		goto out_unlock_osdc;
3452 	}
3453 
3454 	if (m.num_ops != req->r_num_ops) {
3455 		pr_err("num_ops %d != %d for tid %llu\n", m.num_ops,
3456 		       req->r_num_ops, req->r_tid);
3457 		goto fail_request;
3458 	}
3459 	for (i = 0; i < req->r_num_ops; i++) {
3460 		dout(" req %p tid %llu op %d rval %d len %u\n", req,
3461 		     req->r_tid, i, m.rval[i], m.outdata_len[i]);
3462 		req->r_ops[i].rval = m.rval[i];
3463 		req->r_ops[i].outdata_len = m.outdata_len[i];
3464 		data_len += m.outdata_len[i];
3465 	}
3466 	if (data_len != le32_to_cpu(msg->hdr.data_len)) {
3467 		pr_err("sum of lens %u != %u for tid %llu\n", data_len,
3468 		       le32_to_cpu(msg->hdr.data_len), req->r_tid);
3469 		goto fail_request;
3470 	}
3471 	dout("%s req %p tid %llu result %d data_len %u\n", __func__,
3472 	     req, req->r_tid, m.result, data_len);
3473 
3474 	/*
3475 	 * Since we only ever request ONDISK, we should only ever get
3476 	 * one (type of) reply back.
3477 	 */
3478 	WARN_ON(!(m.flags & CEPH_OSD_FLAG_ONDISK));
3479 	req->r_result = m.result ?: data_len;
3480 	finish_request(req);
3481 	mutex_unlock(&osd->lock);
3482 	up_read(&osdc->lock);
3483 
3484 	__complete_request(req);
3485 	complete_all(&req->r_completion);
3486 	ceph_osdc_put_request(req);
3487 	return;
3488 
3489 fail_request:
3490 	complete_request(req, -EIO);
3491 out_unlock_session:
3492 	mutex_unlock(&osd->lock);
3493 out_unlock_osdc:
3494 	up_read(&osdc->lock);
3495 }
3496 
3497 static void set_pool_was_full(struct ceph_osd_client *osdc)
3498 {
3499 	struct rb_node *n;
3500 
3501 	for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) {
3502 		struct ceph_pg_pool_info *pi =
3503 		    rb_entry(n, struct ceph_pg_pool_info, node);
3504 
3505 		pi->was_full = __pool_full(pi);
3506 	}
3507 }
3508 
3509 static bool pool_cleared_full(struct ceph_osd_client *osdc, s64 pool_id)
3510 {
3511 	struct ceph_pg_pool_info *pi;
3512 
3513 	pi = ceph_pg_pool_by_id(osdc->osdmap, pool_id);
3514 	if (!pi)
3515 		return false;
3516 
3517 	return pi->was_full && !__pool_full(pi);
3518 }
3519 
3520 static enum calc_target_result
3521 recalc_linger_target(struct ceph_osd_linger_request *lreq)
3522 {
3523 	struct ceph_osd_client *osdc = lreq->osdc;
3524 	enum calc_target_result ct_res;
3525 
3526 	ct_res = calc_target(osdc, &lreq->t, NULL, true);
3527 	if (ct_res == CALC_TARGET_NEED_RESEND) {
3528 		struct ceph_osd *osd;
3529 
3530 		osd = lookup_create_osd(osdc, lreq->t.osd, true);
3531 		if (osd != lreq->osd) {
3532 			unlink_linger(lreq->osd, lreq);
3533 			link_linger(osd, lreq);
3534 		}
3535 	}
3536 
3537 	return ct_res;
3538 }
3539 
3540 /*
3541  * Requeue requests whose mapping to an OSD has changed.
3542  */
3543 static void scan_requests(struct ceph_osd *osd,
3544 			  bool force_resend,
3545 			  bool cleared_full,
3546 			  bool check_pool_cleared_full,
3547 			  struct rb_root *need_resend,
3548 			  struct list_head *need_resend_linger)
3549 {
3550 	struct ceph_osd_client *osdc = osd->o_osdc;
3551 	struct rb_node *n;
3552 	bool force_resend_writes;
3553 
3554 	for (n = rb_first(&osd->o_linger_requests); n; ) {
3555 		struct ceph_osd_linger_request *lreq =
3556 		    rb_entry(n, struct ceph_osd_linger_request, node);
3557 		enum calc_target_result ct_res;
3558 
3559 		n = rb_next(n); /* recalc_linger_target() */
3560 
3561 		dout("%s lreq %p linger_id %llu\n", __func__, lreq,
3562 		     lreq->linger_id);
3563 		ct_res = recalc_linger_target(lreq);
3564 		switch (ct_res) {
3565 		case CALC_TARGET_NO_ACTION:
3566 			force_resend_writes = cleared_full ||
3567 			    (check_pool_cleared_full &&
3568 			     pool_cleared_full(osdc, lreq->t.base_oloc.pool));
3569 			if (!force_resend && !force_resend_writes)
3570 				break;
3571 
3572 			/* fall through */
3573 		case CALC_TARGET_NEED_RESEND:
3574 			cancel_linger_map_check(lreq);
3575 			/*
3576 			 * scan_requests() for the previous epoch(s)
3577 			 * may have already added it to the list, since
3578 			 * it's not unlinked here.
3579 			 */
3580 			if (list_empty(&lreq->scan_item))
3581 				list_add_tail(&lreq->scan_item, need_resend_linger);
3582 			break;
3583 		case CALC_TARGET_POOL_DNE:
3584 			list_del_init(&lreq->scan_item);
3585 			check_linger_pool_dne(lreq);
3586 			break;
3587 		}
3588 	}
3589 
3590 	for (n = rb_first(&osd->o_requests); n; ) {
3591 		struct ceph_osd_request *req =
3592 		    rb_entry(n, struct ceph_osd_request, r_node);
3593 		enum calc_target_result ct_res;
3594 
3595 		n = rb_next(n); /* unlink_request(), check_pool_dne() */
3596 
3597 		dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
3598 		ct_res = calc_target(osdc, &req->r_t, &req->r_osd->o_con,
3599 				     false);
3600 		switch (ct_res) {
3601 		case CALC_TARGET_NO_ACTION:
3602 			force_resend_writes = cleared_full ||
3603 			    (check_pool_cleared_full &&
3604 			     pool_cleared_full(osdc, req->r_t.base_oloc.pool));
3605 			if (!force_resend &&
3606 			    (!(req->r_flags & CEPH_OSD_FLAG_WRITE) ||
3607 			     !force_resend_writes))
3608 				break;
3609 
3610 			/* fall through */
3611 		case CALC_TARGET_NEED_RESEND:
3612 			cancel_map_check(req);
3613 			unlink_request(osd, req);
3614 			insert_request(need_resend, req);
3615 			break;
3616 		case CALC_TARGET_POOL_DNE:
3617 			check_pool_dne(req);
3618 			break;
3619 		}
3620 	}
3621 }
3622 
3623 static int handle_one_map(struct ceph_osd_client *osdc,
3624 			  void *p, void *end, bool incremental,
3625 			  struct rb_root *need_resend,
3626 			  struct list_head *need_resend_linger)
3627 {
3628 	struct ceph_osdmap *newmap;
3629 	struct rb_node *n;
3630 	bool skipped_map = false;
3631 	bool was_full;
3632 
3633 	was_full = ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL);
3634 	set_pool_was_full(osdc);
3635 
3636 	if (incremental)
3637 		newmap = osdmap_apply_incremental(&p, end, osdc->osdmap);
3638 	else
3639 		newmap = ceph_osdmap_decode(&p, end);
3640 	if (IS_ERR(newmap))
3641 		return PTR_ERR(newmap);
3642 
3643 	if (newmap != osdc->osdmap) {
3644 		/*
3645 		 * Preserve ->was_full before destroying the old map.
3646 		 * For pools that weren't in the old map, ->was_full
3647 		 * should be false.
3648 		 */
3649 		for (n = rb_first(&newmap->pg_pools); n; n = rb_next(n)) {
3650 			struct ceph_pg_pool_info *pi =
3651 			    rb_entry(n, struct ceph_pg_pool_info, node);
3652 			struct ceph_pg_pool_info *old_pi;
3653 
3654 			old_pi = ceph_pg_pool_by_id(osdc->osdmap, pi->id);
3655 			if (old_pi)
3656 				pi->was_full = old_pi->was_full;
3657 			else
3658 				WARN_ON(pi->was_full);
3659 		}
3660 
3661 		if (osdc->osdmap->epoch &&
3662 		    osdc->osdmap->epoch + 1 < newmap->epoch) {
3663 			WARN_ON(incremental);
3664 			skipped_map = true;
3665 		}
3666 
3667 		ceph_osdmap_destroy(osdc->osdmap);
3668 		osdc->osdmap = newmap;
3669 	}
3670 
3671 	was_full &= !ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL);
3672 	scan_requests(&osdc->homeless_osd, skipped_map, was_full, true,
3673 		      need_resend, need_resend_linger);
3674 
3675 	for (n = rb_first(&osdc->osds); n; ) {
3676 		struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
3677 
3678 		n = rb_next(n); /* close_osd() */
3679 
3680 		scan_requests(osd, skipped_map, was_full, true, need_resend,
3681 			      need_resend_linger);
3682 		if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) ||
3683 		    memcmp(&osd->o_con.peer_addr,
3684 			   ceph_osd_addr(osdc->osdmap, osd->o_osd),
3685 			   sizeof(struct ceph_entity_addr)))
3686 			close_osd(osd);
3687 	}
3688 
3689 	return 0;
3690 }
3691 
3692 static void kick_requests(struct ceph_osd_client *osdc,
3693 			  struct rb_root *need_resend,
3694 			  struct list_head *need_resend_linger)
3695 {
3696 	struct ceph_osd_linger_request *lreq, *nlreq;
3697 	enum calc_target_result ct_res;
3698 	struct rb_node *n;
3699 
3700 	/* make sure need_resend targets reflect latest map */
3701 	for (n = rb_first(need_resend); n; ) {
3702 		struct ceph_osd_request *req =
3703 		    rb_entry(n, struct ceph_osd_request, r_node);
3704 
3705 		n = rb_next(n);
3706 
3707 		if (req->r_t.epoch < osdc->osdmap->epoch) {
3708 			ct_res = calc_target(osdc, &req->r_t, NULL, false);
3709 			if (ct_res == CALC_TARGET_POOL_DNE) {
3710 				erase_request(need_resend, req);
3711 				check_pool_dne(req);
3712 			}
3713 		}
3714 	}
3715 
3716 	for (n = rb_first(need_resend); n; ) {
3717 		struct ceph_osd_request *req =
3718 		    rb_entry(n, struct ceph_osd_request, r_node);
3719 		struct ceph_osd *osd;
3720 
3721 		n = rb_next(n);
3722 		erase_request(need_resend, req); /* before link_request() */
3723 
3724 		osd = lookup_create_osd(osdc, req->r_t.osd, true);
3725 		link_request(osd, req);
3726 		if (!req->r_linger) {
3727 			if (!osd_homeless(osd) && !req->r_t.paused)
3728 				send_request(req);
3729 		} else {
3730 			cancel_linger_request(req);
3731 		}
3732 	}
3733 
3734 	list_for_each_entry_safe(lreq, nlreq, need_resend_linger, scan_item) {
3735 		if (!osd_homeless(lreq->osd))
3736 			send_linger(lreq);
3737 
3738 		list_del_init(&lreq->scan_item);
3739 	}
3740 }
3741 
3742 /*
3743  * Process updated osd map.
3744  *
3745  * The message contains any number of incremental and full maps, normally
3746  * indicating some sort of topology change in the cluster.  Kick requests
3747  * off to different OSDs as needed.
3748  */
3749 void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
3750 {
3751 	void *p = msg->front.iov_base;
3752 	void *const end = p + msg->front.iov_len;
3753 	u32 nr_maps, maplen;
3754 	u32 epoch;
3755 	struct ceph_fsid fsid;
3756 	struct rb_root need_resend = RB_ROOT;
3757 	LIST_HEAD(need_resend_linger);
3758 	bool handled_incremental = false;
3759 	bool was_pauserd, was_pausewr;
3760 	bool pauserd, pausewr;
3761 	int err;
3762 
3763 	dout("%s have %u\n", __func__, osdc->osdmap->epoch);
3764 	down_write(&osdc->lock);
3765 
3766 	/* verify fsid */
3767 	ceph_decode_need(&p, end, sizeof(fsid), bad);
3768 	ceph_decode_copy(&p, &fsid, sizeof(fsid));
3769 	if (ceph_check_fsid(osdc->client, &fsid) < 0)
3770 		goto bad;
3771 
3772 	was_pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
3773 	was_pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
3774 		      ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
3775 		      have_pool_full(osdc);
3776 
3777 	/* incremental maps */
3778 	ceph_decode_32_safe(&p, end, nr_maps, bad);
3779 	dout(" %d inc maps\n", nr_maps);
3780 	while (nr_maps > 0) {
3781 		ceph_decode_need(&p, end, 2*sizeof(u32), bad);
3782 		epoch = ceph_decode_32(&p);
3783 		maplen = ceph_decode_32(&p);
3784 		ceph_decode_need(&p, end, maplen, bad);
3785 		if (osdc->osdmap->epoch &&
3786 		    osdc->osdmap->epoch + 1 == epoch) {
3787 			dout("applying incremental map %u len %d\n",
3788 			     epoch, maplen);
3789 			err = handle_one_map(osdc, p, p + maplen, true,
3790 					     &need_resend, &need_resend_linger);
3791 			if (err)
3792 				goto bad;
3793 			handled_incremental = true;
3794 		} else {
3795 			dout("ignoring incremental map %u len %d\n",
3796 			     epoch, maplen);
3797 		}
3798 		p += maplen;
3799 		nr_maps--;
3800 	}
3801 	if (handled_incremental)
3802 		goto done;
3803 
3804 	/* full maps */
3805 	ceph_decode_32_safe(&p, end, nr_maps, bad);
3806 	dout(" %d full maps\n", nr_maps);
3807 	while (nr_maps) {
3808 		ceph_decode_need(&p, end, 2*sizeof(u32), bad);
3809 		epoch = ceph_decode_32(&p);
3810 		maplen = ceph_decode_32(&p);
3811 		ceph_decode_need(&p, end, maplen, bad);
3812 		if (nr_maps > 1) {
3813 			dout("skipping non-latest full map %u len %d\n",
3814 			     epoch, maplen);
3815 		} else if (osdc->osdmap->epoch >= epoch) {
3816 			dout("skipping full map %u len %d, "
3817 			     "older than our %u\n", epoch, maplen,
3818 			     osdc->osdmap->epoch);
3819 		} else {
3820 			dout("taking full map %u len %d\n", epoch, maplen);
3821 			err = handle_one_map(osdc, p, p + maplen, false,
3822 					     &need_resend, &need_resend_linger);
3823 			if (err)
3824 				goto bad;
3825 		}
3826 		p += maplen;
3827 		nr_maps--;
3828 	}
3829 
3830 done:
3831 	/*
3832 	 * subscribe to subsequent osdmap updates if full to ensure
3833 	 * we find out when we are no longer full and stop returning
3834 	 * ENOSPC.
3835 	 */
3836 	pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
3837 	pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
3838 		  ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
3839 		  have_pool_full(osdc);
3840 	if (was_pauserd || was_pausewr || pauserd || pausewr ||
3841 	    osdc->osdmap->epoch < osdc->epoch_barrier)
3842 		maybe_request_map(osdc);
3843 
3844 	kick_requests(osdc, &need_resend, &need_resend_linger);
3845 
3846 	ceph_osdc_abort_on_full(osdc);
3847 	ceph_monc_got_map(&osdc->client->monc, CEPH_SUB_OSDMAP,
3848 			  osdc->osdmap->epoch);
3849 	up_write(&osdc->lock);
3850 	wake_up_all(&osdc->client->auth_wq);
3851 	return;
3852 
3853 bad:
3854 	pr_err("osdc handle_map corrupt msg\n");
3855 	ceph_msg_dump(msg);
3856 	up_write(&osdc->lock);
3857 }
3858 
3859 /*
3860  * Resubmit requests pending on the given osd.
3861  */
3862 static void kick_osd_requests(struct ceph_osd *osd)
3863 {
3864 	struct rb_node *n;
3865 
3866 	clear_backoffs(osd);
3867 
3868 	for (n = rb_first(&osd->o_requests); n; ) {
3869 		struct ceph_osd_request *req =
3870 		    rb_entry(n, struct ceph_osd_request, r_node);
3871 
3872 		n = rb_next(n); /* cancel_linger_request() */
3873 
3874 		if (!req->r_linger) {
3875 			if (!req->r_t.paused)
3876 				send_request(req);
3877 		} else {
3878 			cancel_linger_request(req);
3879 		}
3880 	}
3881 	for (n = rb_first(&osd->o_linger_requests); n; n = rb_next(n)) {
3882 		struct ceph_osd_linger_request *lreq =
3883 		    rb_entry(n, struct ceph_osd_linger_request, node);
3884 
3885 		send_linger(lreq);
3886 	}
3887 }
3888 
3889 /*
3890  * If the osd connection drops, we need to resubmit all requests.
3891  */
3892 static void osd_fault(struct ceph_connection *con)
3893 {
3894 	struct ceph_osd *osd = con->private;
3895 	struct ceph_osd_client *osdc = osd->o_osdc;
3896 
3897 	dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
3898 
3899 	down_write(&osdc->lock);
3900 	if (!osd_registered(osd)) {
3901 		dout("%s osd%d unknown\n", __func__, osd->o_osd);
3902 		goto out_unlock;
3903 	}
3904 
3905 	if (!reopen_osd(osd))
3906 		kick_osd_requests(osd);
3907 	maybe_request_map(osdc);
3908 
3909 out_unlock:
3910 	up_write(&osdc->lock);
3911 }
3912 
3913 struct MOSDBackoff {
3914 	struct ceph_spg spgid;
3915 	u32 map_epoch;
3916 	u8 op;
3917 	u64 id;
3918 	struct ceph_hobject_id *begin;
3919 	struct ceph_hobject_id *end;
3920 };
3921 
3922 static int decode_MOSDBackoff(const struct ceph_msg *msg, struct MOSDBackoff *m)
3923 {
3924 	void *p = msg->front.iov_base;
3925 	void *const end = p + msg->front.iov_len;
3926 	u8 struct_v;
3927 	u32 struct_len;
3928 	int ret;
3929 
3930 	ret = ceph_start_decoding(&p, end, 1, "spg_t", &struct_v, &struct_len);
3931 	if (ret)
3932 		return ret;
3933 
3934 	ret = ceph_decode_pgid(&p, end, &m->spgid.pgid);
3935 	if (ret)
3936 		return ret;
3937 
3938 	ceph_decode_8_safe(&p, end, m->spgid.shard, e_inval);
3939 	ceph_decode_32_safe(&p, end, m->map_epoch, e_inval);
3940 	ceph_decode_8_safe(&p, end, m->op, e_inval);
3941 	ceph_decode_64_safe(&p, end, m->id, e_inval);
3942 
3943 	m->begin = kzalloc(sizeof(*m->begin), GFP_NOIO);
3944 	if (!m->begin)
3945 		return -ENOMEM;
3946 
3947 	ret = decode_hoid(&p, end, m->begin);
3948 	if (ret) {
3949 		free_hoid(m->begin);
3950 		return ret;
3951 	}
3952 
3953 	m->end = kzalloc(sizeof(*m->end), GFP_NOIO);
3954 	if (!m->end) {
3955 		free_hoid(m->begin);
3956 		return -ENOMEM;
3957 	}
3958 
3959 	ret = decode_hoid(&p, end, m->end);
3960 	if (ret) {
3961 		free_hoid(m->begin);
3962 		free_hoid(m->end);
3963 		return ret;
3964 	}
3965 
3966 	return 0;
3967 
3968 e_inval:
3969 	return -EINVAL;
3970 }
3971 
3972 static struct ceph_msg *create_backoff_message(
3973 				const struct ceph_osd_backoff *backoff,
3974 				u32 map_epoch)
3975 {
3976 	struct ceph_msg *msg;
3977 	void *p, *end;
3978 	int msg_size;
3979 
3980 	msg_size = CEPH_ENCODING_START_BLK_LEN +
3981 			CEPH_PGID_ENCODING_LEN + 1; /* spgid */
3982 	msg_size += 4 + 1 + 8; /* map_epoch, op, id */
3983 	msg_size += CEPH_ENCODING_START_BLK_LEN +
3984 			hoid_encoding_size(backoff->begin);
3985 	msg_size += CEPH_ENCODING_START_BLK_LEN +
3986 			hoid_encoding_size(backoff->end);
3987 
3988 	msg = ceph_msg_new(CEPH_MSG_OSD_BACKOFF, msg_size, GFP_NOIO, true);
3989 	if (!msg)
3990 		return NULL;
3991 
3992 	p = msg->front.iov_base;
3993 	end = p + msg->front_alloc_len;
3994 
3995 	encode_spgid(&p, &backoff->spgid);
3996 	ceph_encode_32(&p, map_epoch);
3997 	ceph_encode_8(&p, CEPH_OSD_BACKOFF_OP_ACK_BLOCK);
3998 	ceph_encode_64(&p, backoff->id);
3999 	encode_hoid(&p, end, backoff->begin);
4000 	encode_hoid(&p, end, backoff->end);
4001 	BUG_ON(p != end);
4002 
4003 	msg->front.iov_len = p - msg->front.iov_base;
4004 	msg->hdr.version = cpu_to_le16(1); /* MOSDBackoff v1 */
4005 	msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
4006 
4007 	return msg;
4008 }
4009 
4010 static void handle_backoff_block(struct ceph_osd *osd, struct MOSDBackoff *m)
4011 {
4012 	struct ceph_spg_mapping *spg;
4013 	struct ceph_osd_backoff *backoff;
4014 	struct ceph_msg *msg;
4015 
4016 	dout("%s osd%d spgid %llu.%xs%d id %llu\n", __func__, osd->o_osd,
4017 	     m->spgid.pgid.pool, m->spgid.pgid.seed, m->spgid.shard, m->id);
4018 
4019 	spg = lookup_spg_mapping(&osd->o_backoff_mappings, &m->spgid);
4020 	if (!spg) {
4021 		spg = alloc_spg_mapping();
4022 		if (!spg) {
4023 			pr_err("%s failed to allocate spg\n", __func__);
4024 			return;
4025 		}
4026 		spg->spgid = m->spgid; /* struct */
4027 		insert_spg_mapping(&osd->o_backoff_mappings, spg);
4028 	}
4029 
4030 	backoff = alloc_backoff();
4031 	if (!backoff) {
4032 		pr_err("%s failed to allocate backoff\n", __func__);
4033 		return;
4034 	}
4035 	backoff->spgid = m->spgid; /* struct */
4036 	backoff->id = m->id;
4037 	backoff->begin = m->begin;
4038 	m->begin = NULL; /* backoff now owns this */
4039 	backoff->end = m->end;
4040 	m->end = NULL;   /* ditto */
4041 
4042 	insert_backoff(&spg->backoffs, backoff);
4043 	insert_backoff_by_id(&osd->o_backoffs_by_id, backoff);
4044 
4045 	/*
4046 	 * Ack with original backoff's epoch so that the OSD can
4047 	 * discard this if there was a PG split.
4048 	 */
4049 	msg = create_backoff_message(backoff, m->map_epoch);
4050 	if (!msg) {
4051 		pr_err("%s failed to allocate msg\n", __func__);
4052 		return;
4053 	}
4054 	ceph_con_send(&osd->o_con, msg);
4055 }
4056 
4057 static bool target_contained_by(const struct ceph_osd_request_target *t,
4058 				const struct ceph_hobject_id *begin,
4059 				const struct ceph_hobject_id *end)
4060 {
4061 	struct ceph_hobject_id hoid;
4062 	int cmp;
4063 
4064 	hoid_fill_from_target(&hoid, t);
4065 	cmp = hoid_compare(&hoid, begin);
4066 	return !cmp || (cmp > 0 && hoid_compare(&hoid, end) < 0);
4067 }
4068 
4069 static void handle_backoff_unblock(struct ceph_osd *osd,
4070 				   const struct MOSDBackoff *m)
4071 {
4072 	struct ceph_spg_mapping *spg;
4073 	struct ceph_osd_backoff *backoff;
4074 	struct rb_node *n;
4075 
4076 	dout("%s osd%d spgid %llu.%xs%d id %llu\n", __func__, osd->o_osd,
4077 	     m->spgid.pgid.pool, m->spgid.pgid.seed, m->spgid.shard, m->id);
4078 
4079 	backoff = lookup_backoff_by_id(&osd->o_backoffs_by_id, m->id);
4080 	if (!backoff) {
4081 		pr_err("%s osd%d spgid %llu.%xs%d id %llu backoff dne\n",
4082 		       __func__, osd->o_osd, m->spgid.pgid.pool,
4083 		       m->spgid.pgid.seed, m->spgid.shard, m->id);
4084 		return;
4085 	}
4086 
4087 	if (hoid_compare(backoff->begin, m->begin) &&
4088 	    hoid_compare(backoff->end, m->end)) {
4089 		pr_err("%s osd%d spgid %llu.%xs%d id %llu bad range?\n",
4090 		       __func__, osd->o_osd, m->spgid.pgid.pool,
4091 		       m->spgid.pgid.seed, m->spgid.shard, m->id);
4092 		/* unblock it anyway... */
4093 	}
4094 
4095 	spg = lookup_spg_mapping(&osd->o_backoff_mappings, &backoff->spgid);
4096 	BUG_ON(!spg);
4097 
4098 	erase_backoff(&spg->backoffs, backoff);
4099 	erase_backoff_by_id(&osd->o_backoffs_by_id, backoff);
4100 	free_backoff(backoff);
4101 
4102 	if (RB_EMPTY_ROOT(&spg->backoffs)) {
4103 		erase_spg_mapping(&osd->o_backoff_mappings, spg);
4104 		free_spg_mapping(spg);
4105 	}
4106 
4107 	for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) {
4108 		struct ceph_osd_request *req =
4109 		    rb_entry(n, struct ceph_osd_request, r_node);
4110 
4111 		if (!ceph_spg_compare(&req->r_t.spgid, &m->spgid)) {
4112 			/*
4113 			 * Match against @m, not @backoff -- the PG may
4114 			 * have split on the OSD.
4115 			 */
4116 			if (target_contained_by(&req->r_t, m->begin, m->end)) {
4117 				/*
4118 				 * If no other installed backoff applies,
4119 				 * resend.
4120 				 */
4121 				send_request(req);
4122 			}
4123 		}
4124 	}
4125 }
4126 
4127 static void handle_backoff(struct ceph_osd *osd, struct ceph_msg *msg)
4128 {
4129 	struct ceph_osd_client *osdc = osd->o_osdc;
4130 	struct MOSDBackoff m;
4131 	int ret;
4132 
4133 	down_read(&osdc->lock);
4134 	if (!osd_registered(osd)) {
4135 		dout("%s osd%d unknown\n", __func__, osd->o_osd);
4136 		up_read(&osdc->lock);
4137 		return;
4138 	}
4139 	WARN_ON(osd->o_osd != le64_to_cpu(msg->hdr.src.num));
4140 
4141 	mutex_lock(&osd->lock);
4142 	ret = decode_MOSDBackoff(msg, &m);
4143 	if (ret) {
4144 		pr_err("failed to decode MOSDBackoff: %d\n", ret);
4145 		ceph_msg_dump(msg);
4146 		goto out_unlock;
4147 	}
4148 
4149 	switch (m.op) {
4150 	case CEPH_OSD_BACKOFF_OP_BLOCK:
4151 		handle_backoff_block(osd, &m);
4152 		break;
4153 	case CEPH_OSD_BACKOFF_OP_UNBLOCK:
4154 		handle_backoff_unblock(osd, &m);
4155 		break;
4156 	default:
4157 		pr_err("%s osd%d unknown op %d\n", __func__, osd->o_osd, m.op);
4158 	}
4159 
4160 	free_hoid(m.begin);
4161 	free_hoid(m.end);
4162 
4163 out_unlock:
4164 	mutex_unlock(&osd->lock);
4165 	up_read(&osdc->lock);
4166 }
4167 
4168 /*
4169  * Process osd watch notifications
4170  */
4171 static void handle_watch_notify(struct ceph_osd_client *osdc,
4172 				struct ceph_msg *msg)
4173 {
4174 	void *p = msg->front.iov_base;
4175 	void *const end = p + msg->front.iov_len;
4176 	struct ceph_osd_linger_request *lreq;
4177 	struct linger_work *lwork;
4178 	u8 proto_ver, opcode;
4179 	u64 cookie, notify_id;
4180 	u64 notifier_id = 0;
4181 	s32 return_code = 0;
4182 	void *payload = NULL;
4183 	u32 payload_len = 0;
4184 
4185 	ceph_decode_8_safe(&p, end, proto_ver, bad);
4186 	ceph_decode_8_safe(&p, end, opcode, bad);
4187 	ceph_decode_64_safe(&p, end, cookie, bad);
4188 	p += 8; /* skip ver */
4189 	ceph_decode_64_safe(&p, end, notify_id, bad);
4190 
4191 	if (proto_ver >= 1) {
4192 		ceph_decode_32_safe(&p, end, payload_len, bad);
4193 		ceph_decode_need(&p, end, payload_len, bad);
4194 		payload = p;
4195 		p += payload_len;
4196 	}
4197 
4198 	if (le16_to_cpu(msg->hdr.version) >= 2)
4199 		ceph_decode_32_safe(&p, end, return_code, bad);
4200 
4201 	if (le16_to_cpu(msg->hdr.version) >= 3)
4202 		ceph_decode_64_safe(&p, end, notifier_id, bad);
4203 
4204 	down_read(&osdc->lock);
4205 	lreq = lookup_linger_osdc(&osdc->linger_requests, cookie);
4206 	if (!lreq) {
4207 		dout("%s opcode %d cookie %llu dne\n", __func__, opcode,
4208 		     cookie);
4209 		goto out_unlock_osdc;
4210 	}
4211 
4212 	mutex_lock(&lreq->lock);
4213 	dout("%s opcode %d cookie %llu lreq %p is_watch %d\n", __func__,
4214 	     opcode, cookie, lreq, lreq->is_watch);
4215 	if (opcode == CEPH_WATCH_EVENT_DISCONNECT) {
4216 		if (!lreq->last_error) {
4217 			lreq->last_error = -ENOTCONN;
4218 			queue_watch_error(lreq);
4219 		}
4220 	} else if (!lreq->is_watch) {
4221 		/* CEPH_WATCH_EVENT_NOTIFY_COMPLETE */
4222 		if (lreq->notify_id && lreq->notify_id != notify_id) {
4223 			dout("lreq %p notify_id %llu != %llu, ignoring\n", lreq,
4224 			     lreq->notify_id, notify_id);
4225 		} else if (!completion_done(&lreq->notify_finish_wait)) {
4226 			struct ceph_msg_data *data =
4227 			    list_first_entry_or_null(&msg->data,
4228 						     struct ceph_msg_data,
4229 						     links);
4230 
4231 			if (data) {
4232 				if (lreq->preply_pages) {
4233 					WARN_ON(data->type !=
4234 							CEPH_MSG_DATA_PAGES);
4235 					*lreq->preply_pages = data->pages;
4236 					*lreq->preply_len = data->length;
4237 				} else {
4238 					ceph_release_page_vector(data->pages,
4239 					       calc_pages_for(0, data->length));
4240 				}
4241 			}
4242 			lreq->notify_finish_error = return_code;
4243 			complete_all(&lreq->notify_finish_wait);
4244 		}
4245 	} else {
4246 		/* CEPH_WATCH_EVENT_NOTIFY */
4247 		lwork = lwork_alloc(lreq, do_watch_notify);
4248 		if (!lwork) {
4249 			pr_err("failed to allocate notify-lwork\n");
4250 			goto out_unlock_lreq;
4251 		}
4252 
4253 		lwork->notify.notify_id = notify_id;
4254 		lwork->notify.notifier_id = notifier_id;
4255 		lwork->notify.payload = payload;
4256 		lwork->notify.payload_len = payload_len;
4257 		lwork->notify.msg = ceph_msg_get(msg);
4258 		lwork_queue(lwork);
4259 	}
4260 
4261 out_unlock_lreq:
4262 	mutex_unlock(&lreq->lock);
4263 out_unlock_osdc:
4264 	up_read(&osdc->lock);
4265 	return;
4266 
4267 bad:
4268 	pr_err("osdc handle_watch_notify corrupt msg\n");
4269 }
4270 
4271 /*
4272  * Register request, send initial attempt.
4273  */
4274 int ceph_osdc_start_request(struct ceph_osd_client *osdc,
4275 			    struct ceph_osd_request *req,
4276 			    bool nofail)
4277 {
4278 	down_read(&osdc->lock);
4279 	submit_request(req, false);
4280 	up_read(&osdc->lock);
4281 
4282 	return 0;
4283 }
4284 EXPORT_SYMBOL(ceph_osdc_start_request);
4285 
4286 /*
4287  * Unregister a registered request.  The request is not completed:
4288  * ->r_result isn't set and __complete_request() isn't called.
4289  */
4290 void ceph_osdc_cancel_request(struct ceph_osd_request *req)
4291 {
4292 	struct ceph_osd_client *osdc = req->r_osdc;
4293 
4294 	down_write(&osdc->lock);
4295 	if (req->r_osd)
4296 		cancel_request(req);
4297 	up_write(&osdc->lock);
4298 }
4299 EXPORT_SYMBOL(ceph_osdc_cancel_request);
4300 
4301 /*
4302  * @timeout: in jiffies, 0 means "wait forever"
4303  */
4304 static int wait_request_timeout(struct ceph_osd_request *req,
4305 				unsigned long timeout)
4306 {
4307 	long left;
4308 
4309 	dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
4310 	left = wait_for_completion_killable_timeout(&req->r_completion,
4311 						ceph_timeout_jiffies(timeout));
4312 	if (left <= 0) {
4313 		left = left ?: -ETIMEDOUT;
4314 		ceph_osdc_cancel_request(req);
4315 	} else {
4316 		left = req->r_result; /* completed */
4317 	}
4318 
4319 	return left;
4320 }
4321 
4322 /*
4323  * wait for a request to complete
4324  */
4325 int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
4326 			   struct ceph_osd_request *req)
4327 {
4328 	return wait_request_timeout(req, 0);
4329 }
4330 EXPORT_SYMBOL(ceph_osdc_wait_request);
4331 
4332 /*
4333  * sync - wait for all in-flight requests to flush.  avoid starvation.
4334  */
4335 void ceph_osdc_sync(struct ceph_osd_client *osdc)
4336 {
4337 	struct rb_node *n, *p;
4338 	u64 last_tid = atomic64_read(&osdc->last_tid);
4339 
4340 again:
4341 	down_read(&osdc->lock);
4342 	for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
4343 		struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
4344 
4345 		mutex_lock(&osd->lock);
4346 		for (p = rb_first(&osd->o_requests); p; p = rb_next(p)) {
4347 			struct ceph_osd_request *req =
4348 			    rb_entry(p, struct ceph_osd_request, r_node);
4349 
4350 			if (req->r_tid > last_tid)
4351 				break;
4352 
4353 			if (!(req->r_flags & CEPH_OSD_FLAG_WRITE))
4354 				continue;
4355 
4356 			ceph_osdc_get_request(req);
4357 			mutex_unlock(&osd->lock);
4358 			up_read(&osdc->lock);
4359 			dout("%s waiting on req %p tid %llu last_tid %llu\n",
4360 			     __func__, req, req->r_tid, last_tid);
4361 			wait_for_completion(&req->r_completion);
4362 			ceph_osdc_put_request(req);
4363 			goto again;
4364 		}
4365 
4366 		mutex_unlock(&osd->lock);
4367 	}
4368 
4369 	up_read(&osdc->lock);
4370 	dout("%s done last_tid %llu\n", __func__, last_tid);
4371 }
4372 EXPORT_SYMBOL(ceph_osdc_sync);
4373 
4374 static struct ceph_osd_request *
4375 alloc_linger_request(struct ceph_osd_linger_request *lreq)
4376 {
4377 	struct ceph_osd_request *req;
4378 
4379 	req = ceph_osdc_alloc_request(lreq->osdc, NULL, 1, false, GFP_NOIO);
4380 	if (!req)
4381 		return NULL;
4382 
4383 	ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
4384 	ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
4385 
4386 	if (ceph_osdc_alloc_messages(req, GFP_NOIO)) {
4387 		ceph_osdc_put_request(req);
4388 		return NULL;
4389 	}
4390 
4391 	return req;
4392 }
4393 
4394 /*
4395  * Returns a handle, caller owns a ref.
4396  */
4397 struct ceph_osd_linger_request *
4398 ceph_osdc_watch(struct ceph_osd_client *osdc,
4399 		struct ceph_object_id *oid,
4400 		struct ceph_object_locator *oloc,
4401 		rados_watchcb2_t wcb,
4402 		rados_watcherrcb_t errcb,
4403 		void *data)
4404 {
4405 	struct ceph_osd_linger_request *lreq;
4406 	int ret;
4407 
4408 	lreq = linger_alloc(osdc);
4409 	if (!lreq)
4410 		return ERR_PTR(-ENOMEM);
4411 
4412 	lreq->is_watch = true;
4413 	lreq->wcb = wcb;
4414 	lreq->errcb = errcb;
4415 	lreq->data = data;
4416 	lreq->watch_valid_thru = jiffies;
4417 
4418 	ceph_oid_copy(&lreq->t.base_oid, oid);
4419 	ceph_oloc_copy(&lreq->t.base_oloc, oloc);
4420 	lreq->t.flags = CEPH_OSD_FLAG_WRITE;
4421 	ktime_get_real_ts(&lreq->mtime);
4422 
4423 	lreq->reg_req = alloc_linger_request(lreq);
4424 	if (!lreq->reg_req) {
4425 		ret = -ENOMEM;
4426 		goto err_put_lreq;
4427 	}
4428 
4429 	lreq->ping_req = alloc_linger_request(lreq);
4430 	if (!lreq->ping_req) {
4431 		ret = -ENOMEM;
4432 		goto err_put_lreq;
4433 	}
4434 
4435 	down_write(&osdc->lock);
4436 	linger_register(lreq); /* before osd_req_op_* */
4437 	osd_req_op_watch_init(lreq->reg_req, 0, lreq->linger_id,
4438 			      CEPH_OSD_WATCH_OP_WATCH);
4439 	osd_req_op_watch_init(lreq->ping_req, 0, lreq->linger_id,
4440 			      CEPH_OSD_WATCH_OP_PING);
4441 	linger_submit(lreq);
4442 	up_write(&osdc->lock);
4443 
4444 	ret = linger_reg_commit_wait(lreq);
4445 	if (ret) {
4446 		linger_cancel(lreq);
4447 		goto err_put_lreq;
4448 	}
4449 
4450 	return lreq;
4451 
4452 err_put_lreq:
4453 	linger_put(lreq);
4454 	return ERR_PTR(ret);
4455 }
4456 EXPORT_SYMBOL(ceph_osdc_watch);
4457 
4458 /*
4459  * Releases a ref.
4460  *
4461  * Times out after mount_timeout to preserve rbd unmap behaviour
4462  * introduced in 2894e1d76974 ("rbd: timeout watch teardown on unmap
4463  * with mount_timeout").
4464  */
4465 int ceph_osdc_unwatch(struct ceph_osd_client *osdc,
4466 		      struct ceph_osd_linger_request *lreq)
4467 {
4468 	struct ceph_options *opts = osdc->client->options;
4469 	struct ceph_osd_request *req;
4470 	int ret;
4471 
4472 	req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
4473 	if (!req)
4474 		return -ENOMEM;
4475 
4476 	ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
4477 	ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
4478 	req->r_flags = CEPH_OSD_FLAG_WRITE;
4479 	ktime_get_real_ts(&req->r_mtime);
4480 	osd_req_op_watch_init(req, 0, lreq->linger_id,
4481 			      CEPH_OSD_WATCH_OP_UNWATCH);
4482 
4483 	ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
4484 	if (ret)
4485 		goto out_put_req;
4486 
4487 	ceph_osdc_start_request(osdc, req, false);
4488 	linger_cancel(lreq);
4489 	linger_put(lreq);
4490 	ret = wait_request_timeout(req, opts->mount_timeout);
4491 
4492 out_put_req:
4493 	ceph_osdc_put_request(req);
4494 	return ret;
4495 }
4496 EXPORT_SYMBOL(ceph_osdc_unwatch);
4497 
4498 static int osd_req_op_notify_ack_init(struct ceph_osd_request *req, int which,
4499 				      u64 notify_id, u64 cookie, void *payload,
4500 				      size_t payload_len)
4501 {
4502 	struct ceph_osd_req_op *op;
4503 	struct ceph_pagelist *pl;
4504 	int ret;
4505 
4506 	op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY_ACK, 0);
4507 
4508 	pl = kmalloc(sizeof(*pl), GFP_NOIO);
4509 	if (!pl)
4510 		return -ENOMEM;
4511 
4512 	ceph_pagelist_init(pl);
4513 	ret = ceph_pagelist_encode_64(pl, notify_id);
4514 	ret |= ceph_pagelist_encode_64(pl, cookie);
4515 	if (payload) {
4516 		ret |= ceph_pagelist_encode_32(pl, payload_len);
4517 		ret |= ceph_pagelist_append(pl, payload, payload_len);
4518 	} else {
4519 		ret |= ceph_pagelist_encode_32(pl, 0);
4520 	}
4521 	if (ret) {
4522 		ceph_pagelist_release(pl);
4523 		return -ENOMEM;
4524 	}
4525 
4526 	ceph_osd_data_pagelist_init(&op->notify_ack.request_data, pl);
4527 	op->indata_len = pl->length;
4528 	return 0;
4529 }
4530 
4531 int ceph_osdc_notify_ack(struct ceph_osd_client *osdc,
4532 			 struct ceph_object_id *oid,
4533 			 struct ceph_object_locator *oloc,
4534 			 u64 notify_id,
4535 			 u64 cookie,
4536 			 void *payload,
4537 			 size_t payload_len)
4538 {
4539 	struct ceph_osd_request *req;
4540 	int ret;
4541 
4542 	req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
4543 	if (!req)
4544 		return -ENOMEM;
4545 
4546 	ceph_oid_copy(&req->r_base_oid, oid);
4547 	ceph_oloc_copy(&req->r_base_oloc, oloc);
4548 	req->r_flags = CEPH_OSD_FLAG_READ;
4549 
4550 	ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
4551 	if (ret)
4552 		goto out_put_req;
4553 
4554 	ret = osd_req_op_notify_ack_init(req, 0, notify_id, cookie, payload,
4555 					 payload_len);
4556 	if (ret)
4557 		goto out_put_req;
4558 
4559 	ceph_osdc_start_request(osdc, req, false);
4560 	ret = ceph_osdc_wait_request(osdc, req);
4561 
4562 out_put_req:
4563 	ceph_osdc_put_request(req);
4564 	return ret;
4565 }
4566 EXPORT_SYMBOL(ceph_osdc_notify_ack);
4567 
4568 static int osd_req_op_notify_init(struct ceph_osd_request *req, int which,
4569 				  u64 cookie, u32 prot_ver, u32 timeout,
4570 				  void *payload, size_t payload_len)
4571 {
4572 	struct ceph_osd_req_op *op;
4573 	struct ceph_pagelist *pl;
4574 	int ret;
4575 
4576 	op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY, 0);
4577 	op->notify.cookie = cookie;
4578 
4579 	pl = kmalloc(sizeof(*pl), GFP_NOIO);
4580 	if (!pl)
4581 		return -ENOMEM;
4582 
4583 	ceph_pagelist_init(pl);
4584 	ret = ceph_pagelist_encode_32(pl, 1); /* prot_ver */
4585 	ret |= ceph_pagelist_encode_32(pl, timeout);
4586 	ret |= ceph_pagelist_encode_32(pl, payload_len);
4587 	ret |= ceph_pagelist_append(pl, payload, payload_len);
4588 	if (ret) {
4589 		ceph_pagelist_release(pl);
4590 		return -ENOMEM;
4591 	}
4592 
4593 	ceph_osd_data_pagelist_init(&op->notify.request_data, pl);
4594 	op->indata_len = pl->length;
4595 	return 0;
4596 }
4597 
4598 /*
4599  * @timeout: in seconds
4600  *
4601  * @preply_{pages,len} are initialized both on success and error.
4602  * The caller is responsible for:
4603  *
4604  *     ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len))
4605  */
4606 int ceph_osdc_notify(struct ceph_osd_client *osdc,
4607 		     struct ceph_object_id *oid,
4608 		     struct ceph_object_locator *oloc,
4609 		     void *payload,
4610 		     size_t payload_len,
4611 		     u32 timeout,
4612 		     struct page ***preply_pages,
4613 		     size_t *preply_len)
4614 {
4615 	struct ceph_osd_linger_request *lreq;
4616 	struct page **pages;
4617 	int ret;
4618 
4619 	WARN_ON(!timeout);
4620 	if (preply_pages) {
4621 		*preply_pages = NULL;
4622 		*preply_len = 0;
4623 	}
4624 
4625 	lreq = linger_alloc(osdc);
4626 	if (!lreq)
4627 		return -ENOMEM;
4628 
4629 	lreq->preply_pages = preply_pages;
4630 	lreq->preply_len = preply_len;
4631 
4632 	ceph_oid_copy(&lreq->t.base_oid, oid);
4633 	ceph_oloc_copy(&lreq->t.base_oloc, oloc);
4634 	lreq->t.flags = CEPH_OSD_FLAG_READ;
4635 
4636 	lreq->reg_req = alloc_linger_request(lreq);
4637 	if (!lreq->reg_req) {
4638 		ret = -ENOMEM;
4639 		goto out_put_lreq;
4640 	}
4641 
4642 	/* for notify_id */
4643 	pages = ceph_alloc_page_vector(1, GFP_NOIO);
4644 	if (IS_ERR(pages)) {
4645 		ret = PTR_ERR(pages);
4646 		goto out_put_lreq;
4647 	}
4648 
4649 	down_write(&osdc->lock);
4650 	linger_register(lreq); /* before osd_req_op_* */
4651 	ret = osd_req_op_notify_init(lreq->reg_req, 0, lreq->linger_id, 1,
4652 				     timeout, payload, payload_len);
4653 	if (ret) {
4654 		linger_unregister(lreq);
4655 		up_write(&osdc->lock);
4656 		ceph_release_page_vector(pages, 1);
4657 		goto out_put_lreq;
4658 	}
4659 	ceph_osd_data_pages_init(osd_req_op_data(lreq->reg_req, 0, notify,
4660 						 response_data),
4661 				 pages, PAGE_SIZE, 0, false, true);
4662 	linger_submit(lreq);
4663 	up_write(&osdc->lock);
4664 
4665 	ret = linger_reg_commit_wait(lreq);
4666 	if (!ret)
4667 		ret = linger_notify_finish_wait(lreq);
4668 	else
4669 		dout("lreq %p failed to initiate notify %d\n", lreq, ret);
4670 
4671 	linger_cancel(lreq);
4672 out_put_lreq:
4673 	linger_put(lreq);
4674 	return ret;
4675 }
4676 EXPORT_SYMBOL(ceph_osdc_notify);
4677 
4678 /*
4679  * Return the number of milliseconds since the watch was last
4680  * confirmed, or an error.  If there is an error, the watch is no
4681  * longer valid, and should be destroyed with ceph_osdc_unwatch().
4682  */
4683 int ceph_osdc_watch_check(struct ceph_osd_client *osdc,
4684 			  struct ceph_osd_linger_request *lreq)
4685 {
4686 	unsigned long stamp, age;
4687 	int ret;
4688 
4689 	down_read(&osdc->lock);
4690 	mutex_lock(&lreq->lock);
4691 	stamp = lreq->watch_valid_thru;
4692 	if (!list_empty(&lreq->pending_lworks)) {
4693 		struct linger_work *lwork =
4694 		    list_first_entry(&lreq->pending_lworks,
4695 				     struct linger_work,
4696 				     pending_item);
4697 
4698 		if (time_before(lwork->queued_stamp, stamp))
4699 			stamp = lwork->queued_stamp;
4700 	}
4701 	age = jiffies - stamp;
4702 	dout("%s lreq %p linger_id %llu age %lu last_error %d\n", __func__,
4703 	     lreq, lreq->linger_id, age, lreq->last_error);
4704 	/* we are truncating to msecs, so return a safe upper bound */
4705 	ret = lreq->last_error ?: 1 + jiffies_to_msecs(age);
4706 
4707 	mutex_unlock(&lreq->lock);
4708 	up_read(&osdc->lock);
4709 	return ret;
4710 }
4711 
4712 static int decode_watcher(void **p, void *end, struct ceph_watch_item *item)
4713 {
4714 	u8 struct_v;
4715 	u32 struct_len;
4716 	int ret;
4717 
4718 	ret = ceph_start_decoding(p, end, 2, "watch_item_t",
4719 				  &struct_v, &struct_len);
4720 	if (ret)
4721 		return ret;
4722 
4723 	ceph_decode_copy(p, &item->name, sizeof(item->name));
4724 	item->cookie = ceph_decode_64(p);
4725 	*p += 4; /* skip timeout_seconds */
4726 	if (struct_v >= 2) {
4727 		ceph_decode_copy(p, &item->addr, sizeof(item->addr));
4728 		ceph_decode_addr(&item->addr);
4729 	}
4730 
4731 	dout("%s %s%llu cookie %llu addr %s\n", __func__,
4732 	     ENTITY_NAME(item->name), item->cookie,
4733 	     ceph_pr_addr(&item->addr.in_addr));
4734 	return 0;
4735 }
4736 
4737 static int decode_watchers(void **p, void *end,
4738 			   struct ceph_watch_item **watchers,
4739 			   u32 *num_watchers)
4740 {
4741 	u8 struct_v;
4742 	u32 struct_len;
4743 	int i;
4744 	int ret;
4745 
4746 	ret = ceph_start_decoding(p, end, 1, "obj_list_watch_response_t",
4747 				  &struct_v, &struct_len);
4748 	if (ret)
4749 		return ret;
4750 
4751 	*num_watchers = ceph_decode_32(p);
4752 	*watchers = kcalloc(*num_watchers, sizeof(**watchers), GFP_NOIO);
4753 	if (!*watchers)
4754 		return -ENOMEM;
4755 
4756 	for (i = 0; i < *num_watchers; i++) {
4757 		ret = decode_watcher(p, end, *watchers + i);
4758 		if (ret) {
4759 			kfree(*watchers);
4760 			return ret;
4761 		}
4762 	}
4763 
4764 	return 0;
4765 }
4766 
4767 /*
4768  * On success, the caller is responsible for:
4769  *
4770  *     kfree(watchers);
4771  */
4772 int ceph_osdc_list_watchers(struct ceph_osd_client *osdc,
4773 			    struct ceph_object_id *oid,
4774 			    struct ceph_object_locator *oloc,
4775 			    struct ceph_watch_item **watchers,
4776 			    u32 *num_watchers)
4777 {
4778 	struct ceph_osd_request *req;
4779 	struct page **pages;
4780 	int ret;
4781 
4782 	req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
4783 	if (!req)
4784 		return -ENOMEM;
4785 
4786 	ceph_oid_copy(&req->r_base_oid, oid);
4787 	ceph_oloc_copy(&req->r_base_oloc, oloc);
4788 	req->r_flags = CEPH_OSD_FLAG_READ;
4789 
4790 	ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
4791 	if (ret)
4792 		goto out_put_req;
4793 
4794 	pages = ceph_alloc_page_vector(1, GFP_NOIO);
4795 	if (IS_ERR(pages)) {
4796 		ret = PTR_ERR(pages);
4797 		goto out_put_req;
4798 	}
4799 
4800 	osd_req_op_init(req, 0, CEPH_OSD_OP_LIST_WATCHERS, 0);
4801 	ceph_osd_data_pages_init(osd_req_op_data(req, 0, list_watchers,
4802 						 response_data),
4803 				 pages, PAGE_SIZE, 0, false, true);
4804 
4805 	ceph_osdc_start_request(osdc, req, false);
4806 	ret = ceph_osdc_wait_request(osdc, req);
4807 	if (ret >= 0) {
4808 		void *p = page_address(pages[0]);
4809 		void *const end = p + req->r_ops[0].outdata_len;
4810 
4811 		ret = decode_watchers(&p, end, watchers, num_watchers);
4812 	}
4813 
4814 out_put_req:
4815 	ceph_osdc_put_request(req);
4816 	return ret;
4817 }
4818 EXPORT_SYMBOL(ceph_osdc_list_watchers);
4819 
4820 /*
4821  * Call all pending notify callbacks - for use after a watch is
4822  * unregistered, to make sure no more callbacks for it will be invoked
4823  */
4824 void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc)
4825 {
4826 	dout("%s osdc %p\n", __func__, osdc);
4827 	flush_workqueue(osdc->notify_wq);
4828 }
4829 EXPORT_SYMBOL(ceph_osdc_flush_notifies);
4830 
4831 void ceph_osdc_maybe_request_map(struct ceph_osd_client *osdc)
4832 {
4833 	down_read(&osdc->lock);
4834 	maybe_request_map(osdc);
4835 	up_read(&osdc->lock);
4836 }
4837 EXPORT_SYMBOL(ceph_osdc_maybe_request_map);
4838 
4839 /*
4840  * Execute an OSD class method on an object.
4841  *
4842  * @flags: CEPH_OSD_FLAG_*
4843  * @resp_len: in/out param for reply length
4844  */
4845 int ceph_osdc_call(struct ceph_osd_client *osdc,
4846 		   struct ceph_object_id *oid,
4847 		   struct ceph_object_locator *oloc,
4848 		   const char *class, const char *method,
4849 		   unsigned int flags,
4850 		   struct page *req_page, size_t req_len,
4851 		   struct page *resp_page, size_t *resp_len)
4852 {
4853 	struct ceph_osd_request *req;
4854 	int ret;
4855 
4856 	if (req_len > PAGE_SIZE || (resp_page && *resp_len > PAGE_SIZE))
4857 		return -E2BIG;
4858 
4859 	req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
4860 	if (!req)
4861 		return -ENOMEM;
4862 
4863 	ceph_oid_copy(&req->r_base_oid, oid);
4864 	ceph_oloc_copy(&req->r_base_oloc, oloc);
4865 	req->r_flags = flags;
4866 
4867 	ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
4868 	if (ret)
4869 		goto out_put_req;
4870 
4871 	osd_req_op_cls_init(req, 0, CEPH_OSD_OP_CALL, class, method);
4872 	if (req_page)
4873 		osd_req_op_cls_request_data_pages(req, 0, &req_page, req_len,
4874 						  0, false, false);
4875 	if (resp_page)
4876 		osd_req_op_cls_response_data_pages(req, 0, &resp_page,
4877 						   *resp_len, 0, false, false);
4878 
4879 	ceph_osdc_start_request(osdc, req, false);
4880 	ret = ceph_osdc_wait_request(osdc, req);
4881 	if (ret >= 0) {
4882 		ret = req->r_ops[0].rval;
4883 		if (resp_page)
4884 			*resp_len = req->r_ops[0].outdata_len;
4885 	}
4886 
4887 out_put_req:
4888 	ceph_osdc_put_request(req);
4889 	return ret;
4890 }
4891 EXPORT_SYMBOL(ceph_osdc_call);
4892 
4893 /*
4894  * init, shutdown
4895  */
4896 int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
4897 {
4898 	int err;
4899 
4900 	dout("init\n");
4901 	osdc->client = client;
4902 	init_rwsem(&osdc->lock);
4903 	osdc->osds = RB_ROOT;
4904 	INIT_LIST_HEAD(&osdc->osd_lru);
4905 	spin_lock_init(&osdc->osd_lru_lock);
4906 	osd_init(&osdc->homeless_osd);
4907 	osdc->homeless_osd.o_osdc = osdc;
4908 	osdc->homeless_osd.o_osd = CEPH_HOMELESS_OSD;
4909 	osdc->last_linger_id = CEPH_LINGER_ID_START;
4910 	osdc->linger_requests = RB_ROOT;
4911 	osdc->map_checks = RB_ROOT;
4912 	osdc->linger_map_checks = RB_ROOT;
4913 	INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout);
4914 	INIT_DELAYED_WORK(&osdc->osds_timeout_work, handle_osds_timeout);
4915 
4916 	err = -ENOMEM;
4917 	osdc->osdmap = ceph_osdmap_alloc();
4918 	if (!osdc->osdmap)
4919 		goto out;
4920 
4921 	osdc->req_mempool = mempool_create_slab_pool(10,
4922 						     ceph_osd_request_cache);
4923 	if (!osdc->req_mempool)
4924 		goto out_map;
4925 
4926 	err = ceph_msgpool_init(&osdc->msgpool_op, CEPH_MSG_OSD_OP,
4927 				PAGE_SIZE, 10, true, "osd_op");
4928 	if (err < 0)
4929 		goto out_mempool;
4930 	err = ceph_msgpool_init(&osdc->msgpool_op_reply, CEPH_MSG_OSD_OPREPLY,
4931 				PAGE_SIZE, 10, true, "osd_op_reply");
4932 	if (err < 0)
4933 		goto out_msgpool;
4934 
4935 	err = -ENOMEM;
4936 	osdc->notify_wq = create_singlethread_workqueue("ceph-watch-notify");
4937 	if (!osdc->notify_wq)
4938 		goto out_msgpool_reply;
4939 
4940 	schedule_delayed_work(&osdc->timeout_work,
4941 			      osdc->client->options->osd_keepalive_timeout);
4942 	schedule_delayed_work(&osdc->osds_timeout_work,
4943 	    round_jiffies_relative(osdc->client->options->osd_idle_ttl));
4944 
4945 	return 0;
4946 
4947 out_msgpool_reply:
4948 	ceph_msgpool_destroy(&osdc->msgpool_op_reply);
4949 out_msgpool:
4950 	ceph_msgpool_destroy(&osdc->msgpool_op);
4951 out_mempool:
4952 	mempool_destroy(osdc->req_mempool);
4953 out_map:
4954 	ceph_osdmap_destroy(osdc->osdmap);
4955 out:
4956 	return err;
4957 }
4958 
4959 void ceph_osdc_stop(struct ceph_osd_client *osdc)
4960 {
4961 	flush_workqueue(osdc->notify_wq);
4962 	destroy_workqueue(osdc->notify_wq);
4963 	cancel_delayed_work_sync(&osdc->timeout_work);
4964 	cancel_delayed_work_sync(&osdc->osds_timeout_work);
4965 
4966 	down_write(&osdc->lock);
4967 	while (!RB_EMPTY_ROOT(&osdc->osds)) {
4968 		struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds),
4969 						struct ceph_osd, o_node);
4970 		close_osd(osd);
4971 	}
4972 	up_write(&osdc->lock);
4973 	WARN_ON(refcount_read(&osdc->homeless_osd.o_ref) != 1);
4974 	osd_cleanup(&osdc->homeless_osd);
4975 
4976 	WARN_ON(!list_empty(&osdc->osd_lru));
4977 	WARN_ON(!RB_EMPTY_ROOT(&osdc->linger_requests));
4978 	WARN_ON(!RB_EMPTY_ROOT(&osdc->map_checks));
4979 	WARN_ON(!RB_EMPTY_ROOT(&osdc->linger_map_checks));
4980 	WARN_ON(atomic_read(&osdc->num_requests));
4981 	WARN_ON(atomic_read(&osdc->num_homeless));
4982 
4983 	ceph_osdmap_destroy(osdc->osdmap);
4984 	mempool_destroy(osdc->req_mempool);
4985 	ceph_msgpool_destroy(&osdc->msgpool_op);
4986 	ceph_msgpool_destroy(&osdc->msgpool_op_reply);
4987 }
4988 
4989 /*
4990  * Read some contiguous pages.  If we cross a stripe boundary, shorten
4991  * *plen.  Return number of bytes read, or error.
4992  */
4993 int ceph_osdc_readpages(struct ceph_osd_client *osdc,
4994 			struct ceph_vino vino, struct ceph_file_layout *layout,
4995 			u64 off, u64 *plen,
4996 			u32 truncate_seq, u64 truncate_size,
4997 			struct page **pages, int num_pages, int page_align)
4998 {
4999 	struct ceph_osd_request *req;
5000 	int rc = 0;
5001 
5002 	dout("readpages on ino %llx.%llx on %llu~%llu\n", vino.ino,
5003 	     vino.snap, off, *plen);
5004 	req = ceph_osdc_new_request(osdc, layout, vino, off, plen, 0, 1,
5005 				    CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
5006 				    NULL, truncate_seq, truncate_size,
5007 				    false);
5008 	if (IS_ERR(req))
5009 		return PTR_ERR(req);
5010 
5011 	/* it may be a short read due to an object boundary */
5012 	osd_req_op_extent_osd_data_pages(req, 0,
5013 				pages, *plen, page_align, false, false);
5014 
5015 	dout("readpages  final extent is %llu~%llu (%llu bytes align %d)\n",
5016 	     off, *plen, *plen, page_align);
5017 
5018 	rc = ceph_osdc_start_request(osdc, req, false);
5019 	if (!rc)
5020 		rc = ceph_osdc_wait_request(osdc, req);
5021 
5022 	ceph_osdc_put_request(req);
5023 	dout("readpages result %d\n", rc);
5024 	return rc;
5025 }
5026 EXPORT_SYMBOL(ceph_osdc_readpages);
5027 
5028 /*
5029  * do a synchronous write on N pages
5030  */
5031 int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino,
5032 			 struct ceph_file_layout *layout,
5033 			 struct ceph_snap_context *snapc,
5034 			 u64 off, u64 len,
5035 			 u32 truncate_seq, u64 truncate_size,
5036 			 struct timespec *mtime,
5037 			 struct page **pages, int num_pages)
5038 {
5039 	struct ceph_osd_request *req;
5040 	int rc = 0;
5041 	int page_align = off & ~PAGE_MASK;
5042 
5043 	req = ceph_osdc_new_request(osdc, layout, vino, off, &len, 0, 1,
5044 				    CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE,
5045 				    snapc, truncate_seq, truncate_size,
5046 				    true);
5047 	if (IS_ERR(req))
5048 		return PTR_ERR(req);
5049 
5050 	/* it may be a short write due to an object boundary */
5051 	osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_align,
5052 				false, false);
5053 	dout("writepages %llu~%llu (%llu bytes)\n", off, len, len);
5054 
5055 	req->r_mtime = *mtime;
5056 	rc = ceph_osdc_start_request(osdc, req, true);
5057 	if (!rc)
5058 		rc = ceph_osdc_wait_request(osdc, req);
5059 
5060 	ceph_osdc_put_request(req);
5061 	if (rc == 0)
5062 		rc = len;
5063 	dout("writepages result %d\n", rc);
5064 	return rc;
5065 }
5066 EXPORT_SYMBOL(ceph_osdc_writepages);
5067 
5068 int ceph_osdc_setup(void)
5069 {
5070 	size_t size = sizeof(struct ceph_osd_request) +
5071 	    CEPH_OSD_SLAB_OPS * sizeof(struct ceph_osd_req_op);
5072 
5073 	BUG_ON(ceph_osd_request_cache);
5074 	ceph_osd_request_cache = kmem_cache_create("ceph_osd_request", size,
5075 						   0, 0, NULL);
5076 
5077 	return ceph_osd_request_cache ? 0 : -ENOMEM;
5078 }
5079 EXPORT_SYMBOL(ceph_osdc_setup);
5080 
5081 void ceph_osdc_cleanup(void)
5082 {
5083 	BUG_ON(!ceph_osd_request_cache);
5084 	kmem_cache_destroy(ceph_osd_request_cache);
5085 	ceph_osd_request_cache = NULL;
5086 }
5087 EXPORT_SYMBOL(ceph_osdc_cleanup);
5088 
5089 /*
5090  * handle incoming message
5091  */
5092 static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
5093 {
5094 	struct ceph_osd *osd = con->private;
5095 	struct ceph_osd_client *osdc = osd->o_osdc;
5096 	int type = le16_to_cpu(msg->hdr.type);
5097 
5098 	switch (type) {
5099 	case CEPH_MSG_OSD_MAP:
5100 		ceph_osdc_handle_map(osdc, msg);
5101 		break;
5102 	case CEPH_MSG_OSD_OPREPLY:
5103 		handle_reply(osd, msg);
5104 		break;
5105 	case CEPH_MSG_OSD_BACKOFF:
5106 		handle_backoff(osd, msg);
5107 		break;
5108 	case CEPH_MSG_WATCH_NOTIFY:
5109 		handle_watch_notify(osdc, msg);
5110 		break;
5111 
5112 	default:
5113 		pr_err("received unknown message type %d %s\n", type,
5114 		       ceph_msg_type_name(type));
5115 	}
5116 
5117 	ceph_msg_put(msg);
5118 }
5119 
5120 /*
5121  * Lookup and return message for incoming reply.  Don't try to do
5122  * anything about a larger than preallocated data portion of the
5123  * message at the moment - for now, just skip the message.
5124  */
5125 static struct ceph_msg *get_reply(struct ceph_connection *con,
5126 				  struct ceph_msg_header *hdr,
5127 				  int *skip)
5128 {
5129 	struct ceph_osd *osd = con->private;
5130 	struct ceph_osd_client *osdc = osd->o_osdc;
5131 	struct ceph_msg *m = NULL;
5132 	struct ceph_osd_request *req;
5133 	int front_len = le32_to_cpu(hdr->front_len);
5134 	int data_len = le32_to_cpu(hdr->data_len);
5135 	u64 tid = le64_to_cpu(hdr->tid);
5136 
5137 	down_read(&osdc->lock);
5138 	if (!osd_registered(osd)) {
5139 		dout("%s osd%d unknown, skipping\n", __func__, osd->o_osd);
5140 		*skip = 1;
5141 		goto out_unlock_osdc;
5142 	}
5143 	WARN_ON(osd->o_osd != le64_to_cpu(hdr->src.num));
5144 
5145 	mutex_lock(&osd->lock);
5146 	req = lookup_request(&osd->o_requests, tid);
5147 	if (!req) {
5148 		dout("%s osd%d tid %llu unknown, skipping\n", __func__,
5149 		     osd->o_osd, tid);
5150 		*skip = 1;
5151 		goto out_unlock_session;
5152 	}
5153 
5154 	ceph_msg_revoke_incoming(req->r_reply);
5155 
5156 	if (front_len > req->r_reply->front_alloc_len) {
5157 		pr_warn("%s osd%d tid %llu front %d > preallocated %d\n",
5158 			__func__, osd->o_osd, req->r_tid, front_len,
5159 			req->r_reply->front_alloc_len);
5160 		m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front_len, GFP_NOFS,
5161 				 false);
5162 		if (!m)
5163 			goto out_unlock_session;
5164 		ceph_msg_put(req->r_reply);
5165 		req->r_reply = m;
5166 	}
5167 
5168 	if (data_len > req->r_reply->data_length) {
5169 		pr_warn("%s osd%d tid %llu data %d > preallocated %zu, skipping\n",
5170 			__func__, osd->o_osd, req->r_tid, data_len,
5171 			req->r_reply->data_length);
5172 		m = NULL;
5173 		*skip = 1;
5174 		goto out_unlock_session;
5175 	}
5176 
5177 	m = ceph_msg_get(req->r_reply);
5178 	dout("get_reply tid %lld %p\n", tid, m);
5179 
5180 out_unlock_session:
5181 	mutex_unlock(&osd->lock);
5182 out_unlock_osdc:
5183 	up_read(&osdc->lock);
5184 	return m;
5185 }
5186 
5187 /*
5188  * TODO: switch to a msg-owned pagelist
5189  */
5190 static struct ceph_msg *alloc_msg_with_page_vector(struct ceph_msg_header *hdr)
5191 {
5192 	struct ceph_msg *m;
5193 	int type = le16_to_cpu(hdr->type);
5194 	u32 front_len = le32_to_cpu(hdr->front_len);
5195 	u32 data_len = le32_to_cpu(hdr->data_len);
5196 
5197 	m = ceph_msg_new(type, front_len, GFP_NOIO, false);
5198 	if (!m)
5199 		return NULL;
5200 
5201 	if (data_len) {
5202 		struct page **pages;
5203 		struct ceph_osd_data osd_data;
5204 
5205 		pages = ceph_alloc_page_vector(calc_pages_for(0, data_len),
5206 					       GFP_NOIO);
5207 		if (IS_ERR(pages)) {
5208 			ceph_msg_put(m);
5209 			return NULL;
5210 		}
5211 
5212 		ceph_osd_data_pages_init(&osd_data, pages, data_len, 0, false,
5213 					 false);
5214 		ceph_osdc_msg_data_add(m, &osd_data);
5215 	}
5216 
5217 	return m;
5218 }
5219 
5220 static struct ceph_msg *alloc_msg(struct ceph_connection *con,
5221 				  struct ceph_msg_header *hdr,
5222 				  int *skip)
5223 {
5224 	struct ceph_osd *osd = con->private;
5225 	int type = le16_to_cpu(hdr->type);
5226 
5227 	*skip = 0;
5228 	switch (type) {
5229 	case CEPH_MSG_OSD_MAP:
5230 	case CEPH_MSG_OSD_BACKOFF:
5231 	case CEPH_MSG_WATCH_NOTIFY:
5232 		return alloc_msg_with_page_vector(hdr);
5233 	case CEPH_MSG_OSD_OPREPLY:
5234 		return get_reply(con, hdr, skip);
5235 	default:
5236 		pr_warn("%s osd%d unknown msg type %d, skipping\n", __func__,
5237 			osd->o_osd, type);
5238 		*skip = 1;
5239 		return NULL;
5240 	}
5241 }
5242 
5243 /*
5244  * Wrappers to refcount containing ceph_osd struct
5245  */
5246 static struct ceph_connection *get_osd_con(struct ceph_connection *con)
5247 {
5248 	struct ceph_osd *osd = con->private;
5249 	if (get_osd(osd))
5250 		return con;
5251 	return NULL;
5252 }
5253 
5254 static void put_osd_con(struct ceph_connection *con)
5255 {
5256 	struct ceph_osd *osd = con->private;
5257 	put_osd(osd);
5258 }
5259 
5260 /*
5261  * authentication
5262  */
5263 /*
5264  * Note: returned pointer is the address of a structure that's
5265  * managed separately.  Caller must *not* attempt to free it.
5266  */
5267 static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
5268 					int *proto, int force_new)
5269 {
5270 	struct ceph_osd *o = con->private;
5271 	struct ceph_osd_client *osdc = o->o_osdc;
5272 	struct ceph_auth_client *ac = osdc->client->monc.auth;
5273 	struct ceph_auth_handshake *auth = &o->o_auth;
5274 
5275 	if (force_new && auth->authorizer) {
5276 		ceph_auth_destroy_authorizer(auth->authorizer);
5277 		auth->authorizer = NULL;
5278 	}
5279 	if (!auth->authorizer) {
5280 		int ret = ceph_auth_create_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
5281 						      auth);
5282 		if (ret)
5283 			return ERR_PTR(ret);
5284 	} else {
5285 		int ret = ceph_auth_update_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
5286 						     auth);
5287 		if (ret)
5288 			return ERR_PTR(ret);
5289 	}
5290 	*proto = ac->protocol;
5291 
5292 	return auth;
5293 }
5294 
5295 
5296 static int verify_authorizer_reply(struct ceph_connection *con)
5297 {
5298 	struct ceph_osd *o = con->private;
5299 	struct ceph_osd_client *osdc = o->o_osdc;
5300 	struct ceph_auth_client *ac = osdc->client->monc.auth;
5301 
5302 	return ceph_auth_verify_authorizer_reply(ac, o->o_auth.authorizer);
5303 }
5304 
5305 static int invalidate_authorizer(struct ceph_connection *con)
5306 {
5307 	struct ceph_osd *o = con->private;
5308 	struct ceph_osd_client *osdc = o->o_osdc;
5309 	struct ceph_auth_client *ac = osdc->client->monc.auth;
5310 
5311 	ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD);
5312 	return ceph_monc_validate_auth(&osdc->client->monc);
5313 }
5314 
5315 static void osd_reencode_message(struct ceph_msg *msg)
5316 {
5317 	int type = le16_to_cpu(msg->hdr.type);
5318 
5319 	if (type == CEPH_MSG_OSD_OP)
5320 		encode_request_finish(msg);
5321 }
5322 
5323 static int osd_sign_message(struct ceph_msg *msg)
5324 {
5325 	struct ceph_osd *o = msg->con->private;
5326 	struct ceph_auth_handshake *auth = &o->o_auth;
5327 
5328 	return ceph_auth_sign_message(auth, msg);
5329 }
5330 
5331 static int osd_check_message_signature(struct ceph_msg *msg)
5332 {
5333 	struct ceph_osd *o = msg->con->private;
5334 	struct ceph_auth_handshake *auth = &o->o_auth;
5335 
5336 	return ceph_auth_check_message_signature(auth, msg);
5337 }
5338 
5339 static const struct ceph_connection_operations osd_con_ops = {
5340 	.get = get_osd_con,
5341 	.put = put_osd_con,
5342 	.dispatch = dispatch,
5343 	.get_authorizer = get_authorizer,
5344 	.verify_authorizer_reply = verify_authorizer_reply,
5345 	.invalidate_authorizer = invalidate_authorizer,
5346 	.alloc_msg = alloc_msg,
5347 	.reencode_message = osd_reencode_message,
5348 	.sign_message = osd_sign_message,
5349 	.check_message_signature = osd_check_message_signature,
5350 	.fault = osd_fault,
5351 };
5352